aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorfedor-miron <fedor-miron@yandex-team.com>2023-11-15 20:42:00 +0300
committerfedor-miron <fedor-miron@yandex-team.com>2023-11-15 21:52:48 +0300
commit7434cc06afd2ef8e9297f65c03d1fee169119949 (patch)
tree6839a4345f92d7ccd2ed040350db931b7dc49b8d
parentcb57bb54c10bb28c7623fa3f6e4d928172d9c17e (diff)
downloadydb-7434cc06afd2ef8e9297f65c03d1fee169119949.tar.gz
YQL-17123: move pgrun to ydb/library/yql/tools
-rw-r--r--.mapping.json5
-rw-r--r--contrib/python/patch/.dist-info/METADATA15
-rw-r--r--contrib/python/patch/.dist-info/top_level.txt1
-rw-r--r--contrib/python/patch/patch.py1203
-rw-r--r--contrib/python/patch/ya.make20
-rw-r--r--ydb/library/yql/tests/postgresql/README.md10
-rw-r--r--ydb/library/yql/tests/postgresql/cases/aggregates.out5
-rw-r--r--ydb/library/yql/tests/postgresql/cases/aggregates.sql5
-rw-r--r--ydb/library/yql/tests/postgresql/cases/alter_table.out18
-rw-r--r--ydb/library/yql/tests/postgresql/cases/alter_table.sql18
-rw-r--r--ydb/library/yql/tests/postgresql/cases/arrays.out12
-rw-r--r--ydb/library/yql/tests/postgresql/cases/arrays.sql12
-rw-r--r--ydb/library/yql/tests/postgresql/cases/bit.out10
-rw-r--r--ydb/library/yql/tests/postgresql/cases/bit.sql10
-rw-r--r--ydb/library/yql/tests/postgresql/cases/boolean.out307
-rw-r--r--ydb/library/yql/tests/postgresql/cases/boolean.sql94
-rw-r--r--ydb/library/yql/tests/postgresql/cases/case.out34
-rw-r--r--ydb/library/yql/tests/postgresql/cases/case.sql29
-rw-r--r--ydb/library/yql/tests/postgresql/cases/char.out15
-rw-r--r--ydb/library/yql/tests/postgresql/cases/char.sql14
-rw-r--r--ydb/library/yql/tests/postgresql/cases/comments.out65
-rw-r--r--ydb/library/yql/tests/postgresql/cases/comments.sql35
-rw-r--r--ydb/library/yql/tests/postgresql/cases/create_misc.out12
-rw-r--r--ydb/library/yql/tests/postgresql/cases/create_misc.sql12
-rw-r--r--ydb/library/yql/tests/postgresql/cases/create_table.out208
-rw-r--r--ydb/library/yql/tests/postgresql/cases/create_table.sql202
-rw-r--r--ydb/library/yql/tests/postgresql/cases/date.out20
-rw-r--r--ydb/library/yql/tests/postgresql/cases/date.sql20
-rw-r--r--ydb/library/yql/tests/postgresql/cases/dbsize.out188
-rw-r--r--ydb/library/yql/tests/postgresql/cases/dbsize.sql57
-rw-r--r--ydb/library/yql/tests/postgresql/cases/delete.out5
-rw-r--r--ydb/library/yql/tests/postgresql/cases/delete.sql5
-rw-r--r--ydb/library/yql/tests/postgresql/cases/expressions.out61
-rw-r--r--ydb/library/yql/tests/postgresql/cases/expressions.sql36
-rw-r--r--ydb/library/yql/tests/postgresql/cases/float4.out194
-rw-r--r--ydb/library/yql/tests/postgresql/cases/float4.sql56
-rw-r--r--ydb/library/yql/tests/postgresql/cases/float8.out480
-rw-r--r--ydb/library/yql/tests/postgresql/cases/float8.sql126
-rw-r--r--ydb/library/yql/tests/postgresql/cases/functional_deps.out17
-rw-r--r--ydb/library/yql/tests/postgresql/cases/functional_deps.sql17
-rw-r--r--ydb/library/yql/tests/postgresql/cases/horology.out400
-rw-r--r--ydb/library/yql/tests/postgresql/cases/horology.sql114
-rw-r--r--ydb/library/yql/tests/postgresql/cases/insert.out12
-rw-r--r--ydb/library/yql/tests/postgresql/cases/insert.sql12
-rw-r--r--ydb/library/yql/tests/postgresql/cases/int2.out308
-rw-r--r--ydb/library/yql/tests/postgresql/cases/int2.sql75
-rw-r--r--ydb/library/yql/tests/postgresql/cases/int4.out439
-rw-r--r--ydb/library/yql/tests/postgresql/cases/int4.sql120
-rw-r--r--ydb/library/yql/tests/postgresql/cases/int8.out39
-rw-r--r--ydb/library/yql/tests/postgresql/cases/int8.sql18
-rw-r--r--ydb/library/yql/tests/postgresql/cases/interval.out623
-rw-r--r--ydb/library/yql/tests/postgresql/cases/interval.sql156
-rw-r--r--ydb/library/yql/tests/postgresql/cases/join.out35
-rw-r--r--ydb/library/yql/tests/postgresql/cases/join.sql35
-rw-r--r--ydb/library/yql/tests/postgresql/cases/json.out301
-rw-r--r--ydb/library/yql/tests/postgresql/cases/json.sql72
-rw-r--r--ydb/library/yql/tests/postgresql/cases/json_encoding.out255
-rw-r--r--ydb/library/yql/tests/postgresql/cases/json_encoding.sql55
-rw-r--r--ydb/library/yql/tests/postgresql/cases/jsonb.out302
-rw-r--r--ydb/library/yql/tests/postgresql/cases/jsonb.sql78
-rw-r--r--ydb/library/yql/tests/postgresql/cases/jsonb_jsonpath.out30
-rw-r--r--ydb/library/yql/tests/postgresql/cases/jsonb_jsonpath.sql5
-rw-r--r--ydb/library/yql/tests/postgresql/cases/jsonpath.out896
-rw-r--r--ydb/library/yql/tests/postgresql/cases/jsonpath.sql153
-rw-r--r--ydb/library/yql/tests/postgresql/cases/jsonpath_encoding.out173
-rw-r--r--ydb/library/yql/tests/postgresql/cases/jsonpath_encoding.sql42
-rw-r--r--ydb/library/yql/tests/postgresql/cases/limit.out14
-rw-r--r--ydb/library/yql/tests/postgresql/cases/limit.sql12
-rw-r--r--ydb/library/yql/tests/postgresql/cases/name.out126
-rw-r--r--ydb/library/yql/tests/postgresql/cases/name.sql30
-rw-r--r--ydb/library/yql/tests/postgresql/cases/numeric.out728
-rw-r--r--ydb/library/yql/tests/postgresql/cases/numeric.sql582
-rw-r--r--ydb/library/yql/tests/postgresql/cases/numerology.out22
-rw-r--r--ydb/library/yql/tests/postgresql/cases/numerology.sql22
-rw-r--r--ydb/library/yql/tests/postgresql/cases/oid.out61
-rw-r--r--ydb/library/yql/tests/postgresql/cases/oid.sql26
-rw-r--r--ydb/library/yql/tests/postgresql/cases/select.out16
-rw-r--r--ydb/library/yql/tests/postgresql/cases/select.sql9
-rw-r--r--ydb/library/yql/tests/postgresql/cases/select_distinct.out5
-rw-r--r--ydb/library/yql/tests/postgresql/cases/select_distinct.sql5
-rw-r--r--ydb/library/yql/tests/postgresql/cases/select_having.out15
-rw-r--r--ydb/library/yql/tests/postgresql/cases/select_having.sql15
-rw-r--r--ydb/library/yql/tests/postgresql/cases/select_implicit.out19
-rw-r--r--ydb/library/yql/tests/postgresql/cases/select_implicit.sql19
-rw-r--r--ydb/library/yql/tests/postgresql/cases/select_into.out4
-rw-r--r--ydb/library/yql/tests/postgresql/cases/select_into.sql4
-rw-r--r--ydb/library/yql/tests/postgresql/cases/strings.out152
-rw-r--r--ydb/library/yql/tests/postgresql/cases/strings.sql46
-rw-r--r--ydb/library/yql/tests/postgresql/cases/subselect.out13
-rw-r--r--ydb/library/yql/tests/postgresql/cases/subselect.sql3
-rw-r--r--ydb/library/yql/tests/postgresql/cases/text.out18
-rw-r--r--ydb/library/yql/tests/postgresql/cases/text.sql8
-rw-r--r--ydb/library/yql/tests/postgresql/cases/time.out15
-rw-r--r--ydb/library/yql/tests/postgresql/cases/time.sql15
-rw-r--r--ydb/library/yql/tests/postgresql/cases/timestamp.out114
-rw-r--r--ydb/library/yql/tests/postgresql/cases/timestamp.sql99
-rw-r--r--ydb/library/yql/tests/postgresql/cases/timestamptz.out117
-rw-r--r--ydb/library/yql/tests/postgresql/cases/timestamptz.sql101
-rw-r--r--ydb/library/yql/tests/postgresql/cases/timetz.out16
-rw-r--r--ydb/library/yql/tests/postgresql/cases/timetz.sql16
-rw-r--r--ydb/library/yql/tests/postgresql/cases/truncate.out4
-rw-r--r--ydb/library/yql/tests/postgresql/cases/truncate.sql4
-rw-r--r--ydb/library/yql/tests/postgresql/cases/unicode.out31
-rw-r--r--ydb/library/yql/tests/postgresql/cases/unicode.sql15
-rw-r--r--ydb/library/yql/tests/postgresql/cases/update.out27
-rw-r--r--ydb/library/yql/tests/postgresql/cases/update.sql27
-rw-r--r--ydb/library/yql/tests/postgresql/cases/varchar.out4
-rw-r--r--ydb/library/yql/tests/postgresql/cases/varchar.sql5
-rw-r--r--ydb/library/yql/tests/postgresql/cases/window.out20
-rw-r--r--ydb/library/yql/tests/postgresql/cases/window.sql20
-rw-r--r--ydb/library/yql/tests/postgresql/cases/xml.out6
-rw-r--r--ydb/library/yql/tests/postgresql/cases/xml.sql6
-rw-r--r--ydb/library/yql/tests/postgresql/common/__init__.py90
-rw-r--r--ydb/library/yql/tests/postgresql/common/differ.py91
-rw-r--r--ydb/library/yql/tests/postgresql/common/ya.make8
-rwxr-xr-xydb/library/yql/tests/postgresql/make-tests.sh11
-rwxr-xr-xydb/library/yql/tests/postgresql/original.sh2
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/aggregates.out2815
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/aggregates.sql1247
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/alter_table.out4593
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/alter_table.sql3030
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/arrays.out2436
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/arrays.sql740
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/bit.out748
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/bit.sql231
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/boolean.out559
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/boolean.sql262
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/case.out419
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/case.sql265
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/char.out122
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/char.sql75
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/char_1.out122
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/char_2.out122
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/comments.out65
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/comments.sql42
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/create_misc.out151
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/create_misc.sql217
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/create_table.out1321
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/create_table.sql977
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/date.out1497
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/date.sql366
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/dbsize.out188
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/dbsize.sql67
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/delete.out33
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/delete.sql25
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/expressions.out381
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/expressions.sql205
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/float4.out961
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/float4.sql354
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/float8.out1382
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/float8.sql500
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/functional_deps.out232
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/functional_deps.sql210
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/horology.out3344
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/horology.sql570
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/insert.out1004
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/insert.sql626
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/int2.out312
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/int2.sql118
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/int4.out439
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/int4.sql178
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/int8.out934
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/int8.sql252
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/interval.out1045
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/interval.sql357
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/join.out6628
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/join.sql2325
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/json.out2638
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/json.sql852
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/json_encoding.out262
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/json_encoding.sql78
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/jsonb.out5511
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/jsonb.sql1484
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/jsonb_jsonpath.out2539
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/jsonb_jsonpath.sql587
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/jsonpath.out964
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/jsonpath.sql181
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/jsonpath_encoding.out180
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/jsonpath_encoding.sql59
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/limit.out694
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/limit.sql201
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/name.out197
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/name.sql87
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/numeric.out3226
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/numeric.sql1365
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/numerology.out136
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/numerology.sql98
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/oid.out122
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/oid.sql43
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/select.out973
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/select.sql269
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/select_distinct.out308
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/select_distinct.sql137
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/select_distinct_on.out75
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/select_distinct_on.sql19
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/select_having.out93
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/select_having.sql50
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/select_having_1.out93
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/select_having_2.out93
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/select_implicit.out338
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/select_implicit.sql156
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/select_implicit_1.out338
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/select_implicit_2.out338
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/select_into.out222
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/select_into.sql138
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/strings.out2272
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/strings.sql761
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/subselect.out1859
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/subselect.sql939
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/text.out440
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/text.sql118
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/time.out200
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/time.sql72
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/timestamp.out2081
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/timestamp.sql386
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/timestamptz.out2991
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/timestamptz.sql567
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/timetz.out233
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/timetz.sql79
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/truncate.out594
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/truncate.sql329
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/unicode.out89
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/unicode.sql34
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/union.out1434
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/union.sql542
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/update.out1028
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/update.sql669
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/uuid.out159
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/uuid.sql85
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/varchar.out111
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/varchar.sql66
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/varchar_1.out111
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/varchar_2.out111
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/window.out4065
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/window.sql1330
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/xml.out1570
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/xml.sql619
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/xml_1.out1417
-rw-r--r--ydb/library/yql/tests/postgresql/original/cases/xml_2.out1550
-rw-r--r--ydb/library/yql/tests/postgresql/patches/alter_table.out.patch112
-rw-r--r--ydb/library/yql/tests/postgresql/patches/alter_table.sql.patch73
-rw-r--r--ydb/library/yql/tests/postgresql/patches/create_table.out.patch96
-rw-r--r--ydb/library/yql/tests/postgresql/patches/create_table.sql.patch71
-rw-r--r--ydb/library/yql/tests/postgresql/patches/json_encoding.out.patch16
-rw-r--r--ydb/library/yql/tests/postgresql/patches/json_encoding.sql.patch17
-rw-r--r--ydb/library/yql/tests/postgresql/patches/jsonpath_encoding.out.patch16
-rw-r--r--ydb/library/yql/tests/postgresql/patches/jsonpath_encoding.sql.patch17
-rw-r--r--ydb/library/yql/tests/postgresql/patches/unicode.out.patch10
-rw-r--r--ydb/library/yql/tests/postgresql/patches/unicode.sql.patch11
-rw-r--r--ydb/library/yql/tests/postgresql/pg_tests.csv54
-rw-r--r--ydb/library/yql/tests/postgresql/status.md61
-rw-r--r--ydb/library/yql/tests/postgresql/status.old61
-rwxr-xr-xydb/library/yql/tests/postgresql/test.sh2
-rw-r--r--ydb/library/yql/tests/postgresql/test_postgres.py14
-rw-r--r--ydb/library/yql/tests/postgresql/test_postgres_original.py14
-rwxr-xr-xydb/library/yql/tests/postgresql/update-status.sh3
-rw-r--r--ydb/library/yql/tests/postgresql/ya.make41
-rw-r--r--ydb/library/yql/tests/ya.make1
-rw-r--r--ydb/library/yql/tools/CMakeLists.txt1
-rw-r--r--ydb/library/yql/tools/pg-make-test/README.md10
-rw-r--r--ydb/library/yql/tools/pg-make-test/__main__.py439
-rw-r--r--ydb/library/yql/tools/pg-make-test/update-test-status/__main__.py88
-rw-r--r--ydb/library/yql/tools/pg-make-test/update-test-status/ya.make11
-rw-r--r--ydb/library/yql/tools/pg-make-test/ya.make21
-rw-r--r--ydb/library/yql/tools/pgrun/CMakeLists.darwin-x86_64.txt76
-rw-r--r--ydb/library/yql/tools/pgrun/CMakeLists.linux-aarch64.txt80
-rw-r--r--ydb/library/yql/tools/pgrun/CMakeLists.linux-x86_64.txt81
-rw-r--r--ydb/library/yql/tools/pgrun/CMakeLists.txt17
-rw-r--r--ydb/library/yql/tools/pgrun/CMakeLists.windows-x86_64.txt69
-rw-r--r--ydb/library/yql/tools/pgrun/pgrun.cpp1099
-rw-r--r--ydb/library/yql/tools/pgrun/ya.make46
-rw-r--r--ydb/library/yql/tools/ya.make2
272 files changed, 113116 insertions, 0 deletions
diff --git a/.mapping.json b/.mapping.json
index 7a93a54fdb..b1236d6218 100644
--- a/.mapping.json
+++ b/.mapping.json
@@ -8518,6 +8518,11 @@
"ydb/library/yql/tools/mrjob/CMakeLists.linux-x86_64.txt":"",
"ydb/library/yql/tools/mrjob/CMakeLists.txt":"",
"ydb/library/yql/tools/mrjob/CMakeLists.windows-x86_64.txt":"",
+ "ydb/library/yql/tools/pgrun/CMakeLists.darwin-x86_64.txt":"",
+ "ydb/library/yql/tools/pgrun/CMakeLists.linux-aarch64.txt":"",
+ "ydb/library/yql/tools/pgrun/CMakeLists.linux-x86_64.txt":"",
+ "ydb/library/yql/tools/pgrun/CMakeLists.txt":"",
+ "ydb/library/yql/tools/pgrun/CMakeLists.windows-x86_64.txt":"",
"ydb/library/yql/tools/sql2yql/CMakeLists.darwin-x86_64.txt":"",
"ydb/library/yql/tools/sql2yql/CMakeLists.linux-aarch64.txt":"",
"ydb/library/yql/tools/sql2yql/CMakeLists.linux-x86_64.txt":"",
diff --git a/contrib/python/patch/.dist-info/METADATA b/contrib/python/patch/.dist-info/METADATA
new file mode 100644
index 0000000000..033c752a09
--- /dev/null
+++ b/contrib/python/patch/.dist-info/METADATA
@@ -0,0 +1,15 @@
+Metadata-Version: 2.1
+Name: patch
+Version: 1.16
+Summary: Patch utility to apply unified diffs
+Home-page: https://github.com/techtonik/python-patch/
+Author: anatoly techtonik <techtonik@gmail.com>
+Author-email: UNKNOWN
+License: MIT
+Platform: UNKNOWN
+Classifier: Classifier: Programming Language :: Python :: 2
+Classifier: Classifier: Programming Language :: Python :: 3
+
+UNKNOWN
+
+
diff --git a/contrib/python/patch/.dist-info/top_level.txt b/contrib/python/patch/.dist-info/top_level.txt
new file mode 100644
index 0000000000..9eb7b90ed5
--- /dev/null
+++ b/contrib/python/patch/.dist-info/top_level.txt
@@ -0,0 +1 @@
+patch
diff --git a/contrib/python/patch/patch.py b/contrib/python/patch/patch.py
new file mode 100644
index 0000000000..45c5a48658
--- /dev/null
+++ b/contrib/python/patch/patch.py
@@ -0,0 +1,1203 @@
+#!/usr/bin/env python
+"""
+ Patch utility to apply unified diffs
+
+ Brute-force line-by-line non-recursive parsing
+
+ Copyright (c) 2008-2016 anatoly techtonik
+ Available under the terms of MIT license
+
+ https://github.com/techtonik/python-patch/
+
+"""
+from __future__ import print_function
+
+__author__ = "anatoly techtonik <techtonik@gmail.com>"
+__version__ = "1.16"
+
+import copy
+import logging
+import re
+
+# cStringIO doesn't support unicode in 2.5
+try:
+ from StringIO import StringIO
+except ImportError:
+ from io import BytesIO as StringIO # python 3
+try:
+ import urllib2 as urllib_request
+except ImportError:
+ import urllib.request as urllib_request
+
+from os.path import exists, isfile, abspath
+import os
+import posixpath
+import shutil
+import sys
+
+
+PY3K = sys.version_info >= (3, 0)
+
+# PEP 3114
+if not PY3K:
+ compat_next = lambda gen: gen.next()
+else:
+ compat_next = lambda gen: gen.__next__()
+
+def tostr(b):
+ """ Python 3 bytes encoder. Used to print filename in
+ diffstat output. Assumes that filenames are in utf-8.
+ """
+ if not PY3K:
+ return b
+
+ # [ ] figure out how to print non-utf-8 filenames without
+ # information loss
+ return b.decode('utf-8')
+
+
+#------------------------------------------------
+# Logging is controlled by logger named after the
+# module name (e.g. 'patch' for patch.py module)
+
+logger = logging.getLogger(__name__)
+
+debug = logger.debug
+info = logger.info
+warning = logger.warning
+
+class NullHandler(logging.Handler):
+ """ Copied from Python 2.7 to avoid getting
+ `No handlers could be found for logger "patch"`
+ http://bugs.python.org/issue16539
+ """
+ def handle(self, record):
+ pass
+ def emit(self, record):
+ pass
+ def createLock(self):
+ self.lock = None
+
+streamhandler = logging.StreamHandler()
+
+# initialize logger itself
+logger.addHandler(NullHandler())
+
+debugmode = False
+
+def setdebug():
+ global debugmode, streamhandler
+
+ debugmode = True
+ loglevel = logging.DEBUG
+ logformat = "%(levelname)8s %(message)s"
+ logger.setLevel(loglevel)
+
+ if streamhandler not in logger.handlers:
+ # when used as a library, streamhandler is not added
+ # by default
+ logger.addHandler(streamhandler)
+
+ streamhandler.setFormatter(logging.Formatter(logformat))
+
+
+#------------------------------------------------
+# Constants for Patch/PatchSet types
+
+DIFF = PLAIN = "plain"
+GIT = "git"
+HG = MERCURIAL = "mercurial"
+SVN = SUBVERSION = "svn"
+# mixed type is only actual when PatchSet contains
+# Patches of different type
+MIXED = MIXED = "mixed"
+
+
+#------------------------------------------------
+# Helpers (these could come with Python stdlib)
+
+# x...() function are used to work with paths in
+# cross-platform manner - all paths use forward
+# slashes even on Windows.
+
+def xisabs(filename):
+ """ Cross-platform version of `os.path.isabs()`
+ Returns True if `filename` is absolute on
+ Linux, OS X or Windows.
+ """
+ if filename.startswith(b'/'): # Linux/Unix
+ return True
+ elif filename.startswith(b'\\'): # Windows
+ return True
+ elif re.match(b'\\w:[\\\\/]', filename): # Windows
+ return True
+ return False
+
+def xnormpath(path):
+ """ Cross-platform version of os.path.normpath """
+ # replace escapes and Windows slashes
+ normalized = posixpath.normpath(path).replace(b'\\', b'/')
+ # fold the result
+ return posixpath.normpath(normalized)
+
+def xstrip(filename):
+ """ Make relative path out of absolute by stripping
+ prefixes used on Linux, OS X and Windows.
+
+ This function is critical for security.
+ """
+ while xisabs(filename):
+ # strip windows drive with all slashes
+ if re.match(b'\\w:[\\\\/]', filename):
+ filename = re.sub(b'^\\w+:[\\\\/]+', b'', filename)
+ # strip all slashes
+ elif re.match(b'[\\\\/]', filename):
+ filename = re.sub(b'^[\\\\/]+', b'', filename)
+ return filename
+
+#-----------------------------------------------
+# Main API functions
+
+def fromfile(filename):
+ """ Parse patch file. If successful, returns
+ PatchSet() object. Otherwise returns False.
+ """
+ patchset = PatchSet()
+ debug("reading %s" % filename)
+ fp = open(filename, "rb")
+ res = patchset.parse(fp)
+ fp.close()
+ if res == True:
+ return patchset
+ return False
+
+
+def fromstring(s):
+ """ Parse text string and return PatchSet()
+ object (or False if parsing fails)
+ """
+ ps = PatchSet( StringIO(s) )
+ if ps.errors == 0:
+ return ps
+ return False
+
+
+def fromurl(url):
+ """ Parse patch from an URL, return False
+ if an error occured. Note that this also
+ can throw urlopen() exceptions.
+ """
+ ps = PatchSet( urllib_request.urlopen(url) )
+ if ps.errors == 0:
+ return ps
+ return False
+
+
+# --- Utility functions ---
+# [ ] reuse more universal pathsplit()
+def pathstrip(path, n):
+ """ Strip n leading components from the given path """
+ pathlist = [path]
+ while os.path.dirname(pathlist[0]) != b'':
+ pathlist[0:1] = os.path.split(pathlist[0])
+ return b'/'.join(pathlist[n:])
+# --- /Utility function ---
+
+
+class Hunk(object):
+ """ Parsed hunk data container (hunk starts with @@ -R +R @@) """
+
+ def __init__(self):
+ self.startsrc=None #: line count starts with 1
+ self.linessrc=None
+ self.starttgt=None
+ self.linestgt=None
+ self.invalid=False
+ self.desc=''
+ self.text=[]
+
+# def apply(self, estream):
+# """ write hunk data into enumerable stream
+# return strings one by one until hunk is
+# over
+#
+# enumerable stream are tuples (lineno, line)
+# where lineno starts with 0
+# """
+# pass
+
+
+class Patch(object):
+ """ Patch for a single file.
+ If used as an iterable, returns hunks.
+ """
+ def __init__(self):
+ self.source = None
+ self.target = None
+ self.hunks = []
+ self.hunkends = []
+ self.header = []
+
+ self.type = None
+
+ def __iter__(self):
+ for h in self.hunks:
+ yield h
+
+
+class PatchSet(object):
+ """ PatchSet is a patch parser and container.
+ When used as an iterable, returns patches.
+ """
+
+ def __init__(self, stream=None):
+ # --- API accessible fields ---
+
+ # name of the PatchSet (filename or ...)
+ self.name = None
+ # patch set type - one of constants
+ self.type = None
+
+ # list of Patch objects
+ self.items = []
+
+ self.errors = 0 # fatal parsing errors
+ self.warnings = 0 # non-critical warnings
+ # --- /API ---
+
+ if stream:
+ self.parse(stream)
+
+ def __len__(self):
+ return len(self.items)
+
+ def __iter__(self):
+ for i in self.items:
+ yield i
+
+ def parse(self, stream):
+ """ parse unified diff
+ return True on success
+ """
+ lineends = dict(lf=0, crlf=0, cr=0)
+ nexthunkno = 0 #: even if index starts with 0 user messages number hunks from 1
+
+ p = None
+ hunk = None
+ # hunkactual variable is used to calculate hunk lines for comparison
+ hunkactual = dict(linessrc=None, linestgt=None)
+
+
+ class wrapumerate(enumerate):
+ """Enumerate wrapper that uses boolean end of stream status instead of
+ StopIteration exception, and properties to access line information.
+ """
+
+ def __init__(self, *args, **kwargs):
+ # we don't call parent, it is magically created by __new__ method
+
+ self._exhausted = False
+ self._lineno = False # after end of stream equal to the num of lines
+ self._line = False # will be reset to False after end of stream
+
+ def next(self):
+ """Try to read the next line and return True if it is available,
+ False if end of stream is reached."""
+ if self._exhausted:
+ return False
+
+ try:
+ self._lineno, self._line = compat_next(super(wrapumerate, self))
+ except StopIteration:
+ self._exhausted = True
+ self._line = False
+ return False
+ return True
+
+ @property
+ def is_empty(self):
+ return self._exhausted
+
+ @property
+ def line(self):
+ return self._line
+
+ @property
+ def lineno(self):
+ return self._lineno
+
+ # define states (possible file regions) that direct parse flow
+ headscan = True # start with scanning header
+ filenames = False # lines starting with --- and +++
+
+ hunkhead = False # @@ -R +R @@ sequence
+ hunkbody = False #
+ hunkskip = False # skipping invalid hunk mode
+
+ hunkparsed = False # state after successfully parsed hunk
+
+ # regexp to match start of hunk, used groups - 1,3,4,6
+ re_hunk_start = re.compile(b"^@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@")
+
+ self.errors = 0
+ # temp buffers for header and filenames info
+ header = []
+ srcname = None
+ tgtname = None
+
+ # start of main cycle
+ # each parsing block already has line available in fe.line
+ fe = wrapumerate(stream)
+ while fe.next():
+
+ # -- deciders: these only switch state to decide who should process
+ # -- line fetched at the start of this cycle
+ if hunkparsed:
+ hunkparsed = False
+ if re_hunk_start.match(fe.line):
+ hunkhead = True
+ elif fe.line.startswith(b"--- "):
+ filenames = True
+ else:
+ headscan = True
+ # -- ------------------------------------
+
+ # read out header
+ if headscan:
+ while not fe.is_empty and not fe.line.startswith(b"--- "):
+ header.append(fe.line)
+ fe.next()
+ if fe.is_empty:
+ if p == None:
+ debug("no patch data found") # error is shown later
+ self.errors += 1
+ else:
+ info("%d unparsed bytes left at the end of stream" % len(b''.join(header)))
+ self.warnings += 1
+ # TODO check for \No new line at the end..
+ # TODO test for unparsed bytes
+ # otherwise error += 1
+ # this is actually a loop exit
+ continue
+
+ headscan = False
+ # switch to filenames state
+ filenames = True
+
+ line = fe.line
+ lineno = fe.lineno
+
+
+ # hunkskip and hunkbody code skipped until definition of hunkhead is parsed
+ if hunkbody:
+ # [x] treat empty lines inside hunks as containing single space
+ # (this happens when diff is saved by copy/pasting to editor
+ # that strips trailing whitespace)
+ if line.strip(b"\r\n") == b"":
+ debug("expanding empty line in a middle of hunk body")
+ self.warnings += 1
+ line = b' ' + line
+
+ # process line first
+ if re.match(b"^[- \\+\\\\]", line):
+ # gather stats about line endings
+ if line.endswith(b"\r\n"):
+ p.hunkends["crlf"] += 1
+ elif line.endswith(b"\n"):
+ p.hunkends["lf"] += 1
+ elif line.endswith(b"\r"):
+ p.hunkends["cr"] += 1
+
+ if line.startswith(b"-"):
+ hunkactual["linessrc"] += 1
+ elif line.startswith(b"+"):
+ hunkactual["linestgt"] += 1
+ elif not line.startswith(b"\\"):
+ hunkactual["linessrc"] += 1
+ hunkactual["linestgt"] += 1
+ hunk.text.append(line)
+ # todo: handle \ No newline cases
+ else:
+ warning("invalid hunk no.%d at %d for target file %s" % (nexthunkno, lineno+1, p.target))
+ # add hunk status node
+ hunk.invalid = True
+ p.hunks.append(hunk)
+ self.errors += 1
+ # switch to hunkskip state
+ hunkbody = False
+ hunkskip = True
+
+ # check exit conditions
+ if hunkactual["linessrc"] > hunk.linessrc or hunkactual["linestgt"] > hunk.linestgt:
+ warning("extra lines for hunk no.%d at %d for target %s" % (nexthunkno, lineno+1, p.target))
+ # add hunk status node
+ hunk.invalid = True
+ p.hunks.append(hunk)
+ self.errors += 1
+ # switch to hunkskip state
+ hunkbody = False
+ hunkskip = True
+ elif hunk.linessrc == hunkactual["linessrc"] and hunk.linestgt == hunkactual["linestgt"]:
+ # hunk parsed successfully
+ p.hunks.append(hunk)
+ # switch to hunkparsed state
+ hunkbody = False
+ hunkparsed = True
+
+ # detect mixed window/unix line ends
+ ends = p.hunkends
+ if ((ends["cr"]!=0) + (ends["crlf"]!=0) + (ends["lf"]!=0)) > 1:
+ warning("inconsistent line ends in patch hunks for %s" % p.source)
+ self.warnings += 1
+ if debugmode:
+ debuglines = dict(ends)
+ debuglines.update(file=p.target, hunk=nexthunkno)
+ debug("crlf: %(crlf)d lf: %(lf)d cr: %(cr)d\t - file: %(file)s hunk: %(hunk)d" % debuglines)
+ # fetch next line
+ continue
+
+ if hunkskip:
+ if re_hunk_start.match(line):
+ # switch to hunkhead state
+ hunkskip = False
+ hunkhead = True
+ elif line.startswith(b"--- "):
+ # switch to filenames state
+ hunkskip = False
+ filenames = True
+ if debugmode and len(self.items) > 0:
+ debug("- %2d hunks for %s" % (len(p.hunks), p.source))
+
+ if filenames:
+ if line.startswith(b"--- "):
+ if srcname != None:
+ # XXX testcase
+ warning("skipping false patch for %s" % srcname)
+ srcname = None
+ # XXX header += srcname
+ # double source filename line is encountered
+ # attempt to restart from this second line
+ re_filename = b"^--- ([^\t]+)"
+ match = re.match(re_filename, line)
+ # todo: support spaces in filenames
+ if match:
+ srcname = match.group(1).strip()
+ else:
+ warning("skipping invalid filename at line %d" % (lineno+1))
+ self.errors += 1
+ # XXX p.header += line
+ # switch back to headscan state
+ filenames = False
+ headscan = True
+ elif not line.startswith(b"+++ "):
+ if srcname != None:
+ warning("skipping invalid patch with no target for %s" % srcname)
+ self.errors += 1
+ srcname = None
+ # XXX header += srcname
+ # XXX header += line
+ else:
+ # this should be unreachable
+ warning("skipping invalid target patch")
+ filenames = False
+ headscan = True
+ else:
+ if tgtname != None:
+ # XXX seems to be a dead branch
+ warning("skipping invalid patch - double target at line %d" % (lineno+1))
+ self.errors += 1
+ srcname = None
+ tgtname = None
+ # XXX header += srcname
+ # XXX header += tgtname
+ # XXX header += line
+ # double target filename line is encountered
+ # switch back to headscan state
+ filenames = False
+ headscan = True
+ else:
+ re_filename = b"^\+\+\+ ([^\t]+)"
+ match = re.match(re_filename, line)
+ if not match:
+ warning("skipping invalid patch - no target filename at line %d" % (lineno+1))
+ self.errors += 1
+ srcname = None
+ # switch back to headscan state
+ filenames = False
+ headscan = True
+ else:
+ if p: # for the first run p is None
+ self.items.append(p)
+ p = Patch()
+ p.source = srcname
+ srcname = None
+ p.target = match.group(1).strip()
+ p.header = header
+ header = []
+ # switch to hunkhead state
+ filenames = False
+ hunkhead = True
+ nexthunkno = 0
+ p.hunkends = lineends.copy()
+ continue
+
+ if hunkhead:
+ match = re.match(b"^@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@(.*)", line)
+ if not match:
+ if not p.hunks:
+ warning("skipping invalid patch with no hunks for file %s" % p.source)
+ self.errors += 1
+ # XXX review switch
+ # switch to headscan state
+ hunkhead = False
+ headscan = True
+ continue
+ else:
+ # TODO review condition case
+ # switch to headscan state
+ hunkhead = False
+ headscan = True
+ else:
+ hunk = Hunk()
+ hunk.startsrc = int(match.group(1))
+ hunk.linessrc = 1
+ if match.group(3): hunk.linessrc = int(match.group(3))
+ hunk.starttgt = int(match.group(4))
+ hunk.linestgt = 1
+ if match.group(6): hunk.linestgt = int(match.group(6))
+ hunk.invalid = False
+ hunk.desc = match.group(7)[1:].rstrip()
+ hunk.text = []
+
+ hunkactual["linessrc"] = hunkactual["linestgt"] = 0
+
+ # switch to hunkbody state
+ hunkhead = False
+ hunkbody = True
+ nexthunkno += 1
+ continue
+
+ # /while fe.next()
+
+ if p:
+ self.items.append(p)
+
+ if not hunkparsed:
+ if hunkskip:
+ warning("warning: finished with errors, some hunks may be invalid")
+ elif headscan:
+ if len(self.items) == 0:
+ warning("error: no patch data found!")
+ return False
+ else: # extra data at the end of file
+ pass
+ else:
+ warning("error: patch stream is incomplete!")
+ self.errors += 1
+ if len(self.items) == 0:
+ return False
+
+ if debugmode and len(self.items) > 0:
+ debug("- %2d hunks for %s" % (len(p.hunks), p.source))
+
+ # XXX fix total hunks calculation
+ debug("total files: %d total hunks: %d" % (len(self.items),
+ sum(len(p.hunks) for p in self.items)))
+
+ # ---- detect patch and patchset types ----
+ for idx, p in enumerate(self.items):
+ self.items[idx].type = self._detect_type(p)
+
+ types = set([p.type for p in self.items])
+ if len(types) > 1:
+ self.type = MIXED
+ else:
+ self.type = types.pop()
+ # --------
+
+ self._normalize_filenames()
+
+ return (self.errors == 0)
+
+ def _detect_type(self, p):
+ """ detect and return type for the specified Patch object
+ analyzes header and filenames info
+
+ NOTE: must be run before filenames are normalized
+ """
+
+ # check for SVN
+ # - header starts with Index:
+ # - next line is ===... delimiter
+ # - filename is followed by revision number
+ # TODO add SVN revision
+ if (len(p.header) > 1 and p.header[-2].startswith(b"Index: ")
+ and p.header[-1].startswith(b"="*67)):
+ return SVN
+
+ # common checks for both HG and GIT
+ DVCS = ((p.source.startswith(b'a/') or p.source == b'/dev/null')
+ and (p.target.startswith(b'b/') or p.target == b'/dev/null'))
+
+ # GIT type check
+ # - header[-2] is like "diff --git a/oldname b/newname"
+ # - header[-1] is like "index <hash>..<hash> <mode>"
+ # TODO add git rename diffs and add/remove diffs
+ # add git diff with spaced filename
+ # TODO http://www.kernel.org/pub/software/scm/git/docs/git-diff.html
+
+ # Git patch header len is 2 min
+ if len(p.header) > 1:
+ # detect the start of diff header - there might be some comments before
+ for idx in reversed(range(len(p.header))):
+ if p.header[idx].startswith(b"diff --git"):
+ break
+ if p.header[idx].startswith(b'diff --git a/'):
+ if (idx+1 < len(p.header)
+ and re.match(b'index \\w{7}..\\w{7} \\d{6}', p.header[idx+1])):
+ if DVCS:
+ return GIT
+
+ # HG check
+ #
+ # - for plain HG format header is like "diff -r b2d9961ff1f5 filename"
+ # - for Git-style HG patches it is "diff --git a/oldname b/newname"
+ # - filename starts with a/, b/ or is equal to /dev/null
+ # - exported changesets also contain the header
+ # # HG changeset patch
+ # # User name@example.com
+ # ...
+ # TODO add MQ
+ # TODO add revision info
+ if len(p.header) > 0:
+ if DVCS and re.match(b'diff -r \\w{12} .*', p.header[-1]):
+ return HG
+ if DVCS and p.header[-1].startswith(b'diff --git a/'):
+ if len(p.header) == 1: # native Git patch header len is 2
+ return HG
+ elif p.header[0].startswith(b'# HG changeset patch'):
+ return HG
+
+ return PLAIN
+
+
+ def _normalize_filenames(self):
+ """ sanitize filenames, normalizing paths, i.e.:
+ 1. strip a/ and b/ prefixes from GIT and HG style patches
+ 2. remove all references to parent directories (with warning)
+ 3. translate any absolute paths to relative (with warning)
+
+ [x] always use forward slashes to be crossplatform
+ (diff/patch were born as a unix utility after all)
+
+ return None
+ """
+ if debugmode:
+ debug("normalize filenames")
+ for i,p in enumerate(self.items):
+ if debugmode:
+ debug(" patch type = " + p.type)
+ debug(" source = " + p.source)
+ debug(" target = " + p.target)
+ if p.type in (HG, GIT):
+ # TODO: figure out how to deal with /dev/null entries
+ debug("stripping a/ and b/ prefixes")
+ if p.source != '/dev/null':
+ if not p.source.startswith(b"a/"):
+ warning("invalid source filename")
+ else:
+ p.source = p.source[2:]
+ if p.target != '/dev/null':
+ if not p.target.startswith(b"b/"):
+ warning("invalid target filename")
+ else:
+ p.target = p.target[2:]
+
+ p.source = xnormpath(p.source)
+ p.target = xnormpath(p.target)
+
+ sep = b'/' # sep value can be hardcoded, but it looks nice this way
+
+ # references to parent are not allowed
+ if p.source.startswith(b".." + sep):
+ warning("error: stripping parent path for source file patch no.%d" % (i+1))
+ self.warnings += 1
+ while p.source.startswith(b".." + sep):
+ p.source = p.source.partition(sep)[2]
+ if p.target.startswith(b".." + sep):
+ warning("error: stripping parent path for target file patch no.%d" % (i+1))
+ self.warnings += 1
+ while p.target.startswith(b".." + sep):
+ p.target = p.target.partition(sep)[2]
+ # absolute paths are not allowed
+ if xisabs(p.source) or xisabs(p.target):
+ warning("error: absolute paths are not allowed - file no.%d" % (i+1))
+ self.warnings += 1
+ if xisabs(p.source):
+ warning("stripping absolute path from source name '%s'" % p.source)
+ p.source = xstrip(p.source)
+ if xisabs(p.target):
+ warning("stripping absolute path from target name '%s'" % p.target)
+ p.target = xstrip(p.target)
+
+ self.items[i].source = p.source
+ self.items[i].target = p.target
+
+
+ def diffstat(self):
+ """ calculate diffstat and return as a string
+ Notes:
+ - original diffstat ouputs target filename
+ - single + or - shouldn't escape histogram
+ """
+ names = []
+ insert = []
+ delete = []
+ delta = 0 # size change in bytes
+ namelen = 0
+ maxdiff = 0 # max number of changes for single file
+ # (for histogram width calculation)
+ for patch in self.items:
+ i,d = 0,0
+ for hunk in patch.hunks:
+ for line in hunk.text:
+ if line.startswith(b'+'):
+ i += 1
+ delta += len(line)-1
+ elif line.startswith(b'-'):
+ d += 1
+ delta -= len(line)-1
+ names.append(patch.target)
+ insert.append(i)
+ delete.append(d)
+ namelen = max(namelen, len(patch.target))
+ maxdiff = max(maxdiff, i+d)
+ output = ''
+ statlen = len(str(maxdiff)) # stats column width
+ for i,n in enumerate(names):
+ # %-19s | %-4d %s
+ format = " %-" + str(namelen) + "s | %" + str(statlen) + "s %s\n"
+
+ hist = ''
+ # -- calculating histogram --
+ width = len(format % ('', '', ''))
+ histwidth = max(2, 80 - width)
+ if maxdiff < histwidth:
+ hist = "+"*insert[i] + "-"*delete[i]
+ else:
+ iratio = (float(insert[i]) / maxdiff) * histwidth
+ dratio = (float(delete[i]) / maxdiff) * histwidth
+
+ # make sure every entry gets at least one + or -
+ iwidth = 1 if 0 < iratio < 1 else int(iratio)
+ dwidth = 1 if 0 < dratio < 1 else int(dratio)
+ #print(iratio, dratio, iwidth, dwidth, histwidth)
+ hist = "+"*int(iwidth) + "-"*int(dwidth)
+ # -- /calculating +- histogram --
+ output += (format % (tostr(names[i]), str(insert[i] + delete[i]), hist))
+
+ output += (" %d files changed, %d insertions(+), %d deletions(-), %+d bytes"
+ % (len(names), sum(insert), sum(delete), delta))
+ return output
+
+
+ def findfile(self, old, new):
+ """ return name of file to be patched or None """
+ if exists(old):
+ return old
+ elif exists(new):
+ return new
+ else:
+ # [w] Google Code generates broken patches with its online editor
+ debug("broken patch from Google Code, stripping prefixes..")
+ if old.startswith(b'a/') and new.startswith(b'b/'):
+ old, new = old[2:], new[2:]
+ debug(" %s" % old)
+ debug(" %s" % new)
+ if exists(old):
+ return old
+ elif exists(new):
+ return new
+ return None
+
+
+ def apply(self, strip=0, root=None):
+ """ Apply parsed patch, optionally stripping leading components
+ from file paths. `root` parameter specifies working dir.
+ return True on success
+ """
+ if root:
+ prevdir = os.getcwd()
+ os.chdir(root)
+
+ total = len(self.items)
+ errors = 0
+ if strip:
+ # [ ] test strip level exceeds nesting level
+ # [ ] test the same only for selected files
+ # [ ] test if files end up being on the same level
+ try:
+ strip = int(strip)
+ except ValueError:
+ errors += 1
+ warning("error: strip parameter '%s' must be an integer" % strip)
+ strip = 0
+
+ #for fileno, filename in enumerate(self.source):
+ for i,p in enumerate(self.items):
+ if strip:
+ debug("stripping %s leading component(s) from:" % strip)
+ debug(" %s" % p.source)
+ debug(" %s" % p.target)
+ old = pathstrip(p.source, strip)
+ new = pathstrip(p.target, strip)
+ else:
+ old, new = p.source, p.target
+
+ filename = self.findfile(old, new)
+
+ if not filename:
+ warning("source/target file does not exist:\n --- %s\n +++ %s" % (old, new))
+ errors += 1
+ continue
+ if not isfile(filename):
+ warning("not a file - %s" % filename)
+ errors += 1
+ continue
+
+ # [ ] check absolute paths security here
+ debug("processing %d/%d:\t %s" % (i+1, total, filename))
+
+ # validate before patching
+ f2fp = open(filename, 'rb')
+ hunkno = 0
+ hunk = p.hunks[hunkno]
+ hunkfind = []
+ hunkreplace = []
+ validhunks = 0
+ canpatch = False
+ for lineno, line in enumerate(f2fp):
+ if lineno+1 < hunk.startsrc:
+ continue
+ elif lineno+1 == hunk.startsrc:
+ hunkfind = [x[1:].rstrip(b"\r\n") for x in hunk.text if x[0] in b" -"]
+ hunkreplace = [x[1:].rstrip(b"\r\n") for x in hunk.text if x[0] in b" +"]
+ #pprint(hunkreplace)
+ hunklineno = 0
+
+ # todo \ No newline at end of file
+
+ # check hunks in source file
+ if lineno+1 < hunk.startsrc+len(hunkfind)-1:
+ if line.rstrip(b"\r\n") == hunkfind[hunklineno]:
+ hunklineno+=1
+ else:
+ info("file %d/%d:\t %s" % (i+1, total, filename))
+ info(" hunk no.%d doesn't match source file at line %d" % (hunkno+1, lineno+1))
+ info(" expected: %s" % hunkfind[hunklineno])
+ info(" actual : %s" % line.rstrip(b"\r\n"))
+ # not counting this as error, because file may already be patched.
+ # check if file is already patched is done after the number of
+ # invalid hunks if found
+ # TODO: check hunks against source/target file in one pass
+ # API - check(stream, srchunks, tgthunks)
+ # return tuple (srcerrs, tgterrs)
+
+ # continue to check other hunks for completeness
+ hunkno += 1
+ if hunkno < len(p.hunks):
+ hunk = p.hunks[hunkno]
+ continue
+ else:
+ break
+
+ # check if processed line is the last line
+ if lineno+1 == hunk.startsrc+len(hunkfind)-1:
+ debug(" hunk no.%d for file %s -- is ready to be patched" % (hunkno+1, filename))
+ hunkno+=1
+ validhunks+=1
+ if hunkno < len(p.hunks):
+ hunk = p.hunks[hunkno]
+ else:
+ if validhunks == len(p.hunks):
+ # patch file
+ canpatch = True
+ break
+ else:
+ if hunkno < len(p.hunks):
+ warning("premature end of source file %s at hunk %d" % (filename, hunkno+1))
+ errors += 1
+
+ f2fp.close()
+
+ if validhunks < len(p.hunks):
+ if self._match_file_hunks(filename, p.hunks):
+ warning("already patched %s" % filename)
+ else:
+ warning("source file is different - %s" % filename)
+ errors += 1
+ if canpatch:
+ backupname = filename+b".orig"
+ if exists(backupname):
+ warning("can't backup original file to %s - aborting" % backupname)
+ else:
+ import shutil
+ shutil.move(filename, backupname)
+ if self.write_hunks(backupname, filename, p.hunks):
+ info("successfully patched %d/%d:\t %s" % (i+1, total, filename))
+ os.unlink(backupname)
+ else:
+ errors += 1
+ warning("error patching file %s" % filename)
+ shutil.copy(filename, filename+".invalid")
+ warning("invalid version is saved to %s" % filename+".invalid")
+ # todo: proper rejects
+ shutil.move(backupname, filename)
+
+ if root:
+ os.chdir(prevdir)
+
+ # todo: check for premature eof
+ return (errors == 0)
+
+
+ def _reverse(self):
+ """ reverse patch direction (this doesn't touch filenames) """
+ for p in self.items:
+ for h in p.hunks:
+ h.startsrc, h.starttgt = h.starttgt, h.startsrc
+ h.linessrc, h.linestgt = h.linestgt, h.linessrc
+ for i,line in enumerate(h.text):
+ # need to use line[0:1] here, because line[0]
+ # returns int instead of bytes on Python 3
+ if line[0:1] == b'+':
+ h.text[i] = b'-' + line[1:]
+ elif line[0:1] == b'-':
+ h.text[i] = b'+' +line[1:]
+
+ def revert(self, strip=0, root=None):
+ """ apply patch in reverse order """
+ reverted = copy.deepcopy(self)
+ reverted._reverse()
+ return reverted.apply(strip, root)
+
+
+ def can_patch(self, filename):
+ """ Check if specified filename can be patched. Returns None if file can
+ not be found among source filenames. False if patch can not be applied
+ clearly. True otherwise.
+
+ :returns: True, False or None
+ """
+ filename = abspath(filename)
+ for p in self.items:
+ if filename == abspath(p.source):
+ return self._match_file_hunks(filename, p.hunks)
+ return None
+
+
+ def _match_file_hunks(self, filepath, hunks):
+ matched = True
+ fp = open(abspath(filepath), 'rb')
+
+ class NoMatch(Exception):
+ pass
+
+ lineno = 1
+ line = fp.readline()
+ hno = None
+ try:
+ for hno, h in enumerate(hunks):
+ # skip to first line of the hunk
+ while lineno < h.starttgt:
+ if not len(line): # eof
+ debug("check failed - premature eof before hunk: %d" % (hno+1))
+ raise NoMatch
+ line = fp.readline()
+ lineno += 1
+ for hline in h.text:
+ if hline.startswith(b"-"):
+ continue
+ if not len(line):
+ debug("check failed - premature eof on hunk: %d" % (hno+1))
+ # todo: \ No newline at the end of file
+ raise NoMatch
+ if line.rstrip(b"\r\n") != hline[1:].rstrip(b"\r\n"):
+ debug("file is not patched - failed hunk: %d" % (hno+1))
+ raise NoMatch
+ line = fp.readline()
+ lineno += 1
+
+ except NoMatch:
+ matched = False
+ # todo: display failed hunk, i.e. expected/found
+
+ fp.close()
+ return matched
+
+
+ def patch_stream(self, instream, hunks):
+ """ Generator that yields stream patched with hunks iterable
+
+ Converts lineends in hunk lines to the best suitable format
+ autodetected from input
+ """
+
+ # todo: At the moment substituted lineends may not be the same
+ # at the start and at the end of patching. Also issue a
+ # warning/throw about mixed lineends (is it really needed?)
+
+ hunks = iter(hunks)
+
+ srclineno = 1
+
+ lineends = {b'\n':0, b'\r\n':0, b'\r':0}
+ def get_line():
+ """
+ local utility function - return line from source stream
+ collecting line end statistics on the way
+ """
+ line = instream.readline()
+ # 'U' mode works only with text files
+ if line.endswith(b"\r\n"):
+ lineends[b"\r\n"] += 1
+ elif line.endswith(b"\n"):
+ lineends[b"\n"] += 1
+ elif line.endswith(b"\r"):
+ lineends[b"\r"] += 1
+ return line
+
+ for hno, h in enumerate(hunks):
+ debug("hunk %d" % (hno+1))
+ # skip to line just before hunk starts
+ while srclineno < h.startsrc:
+ yield get_line()
+ srclineno += 1
+
+ for hline in h.text:
+ # todo: check \ No newline at the end of file
+ if hline.startswith(b"-") or hline.startswith(b"\\"):
+ get_line()
+ srclineno += 1
+ continue
+ else:
+ if not hline.startswith(b"+"):
+ get_line()
+ srclineno += 1
+ line2write = hline[1:]
+ # detect if line ends are consistent in source file
+ if sum([bool(lineends[x]) for x in lineends]) == 1:
+ newline = [x for x in lineends if lineends[x] != 0][0]
+ yield line2write.rstrip(b"\r\n")+newline
+ else: # newlines are mixed
+ yield line2write
+
+ for line in instream:
+ yield line
+
+
+ def write_hunks(self, srcname, tgtname, hunks):
+ src = open(srcname, "rb")
+ tgt = open(tgtname, "wb")
+
+ debug("processing target file %s" % tgtname)
+
+ tgt.writelines(self.patch_stream(src, hunks))
+
+ tgt.close()
+ src.close()
+ # [ ] TODO: add test for permission copy
+ shutil.copymode(srcname, tgtname)
+ return True
+
+
+ def dump(self):
+ for p in self.items:
+ for headline in p.header:
+ print(headline.rstrip('\n'))
+ print('--- ' + p.source)
+ print('+++ ' + p.target)
+ for h in p.hunks:
+ print('@@ -%s,%s +%s,%s @@' % (h.startsrc, h.linessrc, h.starttgt, h.linestgt))
+ for line in h.text:
+ print(line.rstrip('\n'))
+
+
+def main():
+ from optparse import OptionParser
+ from os.path import exists
+ import sys
+
+ opt = OptionParser(usage="1. %prog [options] unified.diff\n"
+ " 2. %prog [options] http://host/patch\n"
+ " 3. %prog [options] -- < unified.diff",
+ version="python-patch %s" % __version__)
+ opt.add_option("-q", "--quiet", action="store_const", dest="verbosity",
+ const=0, help="print only warnings and errors", default=1)
+ opt.add_option("-v", "--verbose", action="store_const", dest="verbosity",
+ const=2, help="be verbose")
+ opt.add_option("--debug", action="store_true", dest="debugmode", help="debug mode")
+ opt.add_option("--diffstat", action="store_true", dest="diffstat",
+ help="print diffstat and exit")
+ opt.add_option("-d", "--directory", metavar='DIR',
+ help="specify root directory for applying patch")
+ opt.add_option("-p", "--strip", type="int", metavar='N', default=0,
+ help="strip N path components from filenames")
+ opt.add_option("--revert", action="store_true",
+ help="apply patch in reverse order (unpatch)")
+ (options, args) = opt.parse_args()
+
+ if not args and sys.argv[-1:] != ['--']:
+ opt.print_version()
+ opt.print_help()
+ sys.exit()
+ readstdin = (sys.argv[-1:] == ['--'] and not args)
+
+ verbosity_levels = {0:logging.WARNING, 1:logging.INFO, 2:logging.DEBUG}
+ loglevel = verbosity_levels[options.verbosity]
+ logformat = "%(message)s"
+ logger.setLevel(loglevel)
+ streamhandler.setFormatter(logging.Formatter(logformat))
+
+ if options.debugmode:
+ setdebug() # this sets global debugmode variable
+
+ if readstdin:
+ patch = PatchSet(sys.stdin)
+ else:
+ patchfile = args[0]
+ urltest = patchfile.split(':')[0]
+ if (':' in patchfile and urltest.isalpha()
+ and len(urltest) > 1): # one char before : is a windows drive letter
+ patch = fromurl(patchfile)
+ else:
+ if not exists(patchfile) or not isfile(patchfile):
+ sys.exit("patch file does not exist - %s" % patchfile)
+ patch = fromfile(patchfile)
+
+ if options.diffstat:
+ print(patch.diffstat())
+ sys.exit(0)
+
+ #pprint(patch)
+ if options.revert:
+ patch.revert(options.strip, root=options.directory) or sys.exit(-1)
+ else:
+ patch.apply(options.strip, root=options.directory) or sys.exit(-1)
+
+ # todo: document and test line ends handling logic - patch.py detects proper line-endings
+ # for inserted hunks and issues a warning if patched file has incosistent line ends
+
+
+if __name__ == "__main__":
+ main()
+
+# Legend:
+# [ ] - some thing to be done
+# [w] - official wart, external or internal that is unlikely to be fixed
+
+# [ ] API break (2.x) wishlist
+# PatchSet.items --> PatchSet.patches
+
+# [ ] run --revert test for all dataset items
+# [ ] run .parse() / .dump() test for dataset
diff --git a/contrib/python/patch/ya.make b/contrib/python/patch/ya.make
new file mode 100644
index 0000000000..40e0074701
--- /dev/null
+++ b/contrib/python/patch/ya.make
@@ -0,0 +1,20 @@
+PY3_LIBRARY()
+
+VERSION(1.16)
+
+LICENSE(MIT)
+
+NO_LINT()
+
+PY_SRCS(
+ TOP_LEVEL
+ patch.py
+)
+
+RESOURCE_FILES(
+ PREFIX contrib/python/patch/
+ .dist-info/METADATA
+ .dist-info/top_level.txt
+)
+
+END()
diff --git a/ydb/library/yql/tests/postgresql/README.md b/ydb/library/yql/tests/postgresql/README.md
new file mode 100644
index 0000000000..a269d01b82
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/README.md
@@ -0,0 +1,10 @@
+Original regression Postgresql tests are in original/cases
+Extracts of original tests used as pre-commit tests are in cases
+
+test.sh runs pre-commit tests. Run ya make in yql/tools/pg-make-test beforehand
+original.sh runs original tests
+
+Run make-tests.sh to build pre-commit tests from original tests. pg_tests.csv file is a report file indicating
+share of SQL statements per testcase, which run successfully.
+
+Run update-status.sh to update status.md from pg_tests.csv
diff --git a/ydb/library/yql/tests/postgresql/cases/aggregates.out b/ydb/library/yql/tests/postgresql/cases/aggregates.out
new file mode 100644
index 0000000000..fa46df67fe
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/aggregates.out
@@ -0,0 +1,5 @@
+--
+-- AGGREGATES
+--
+-- avoid bit-exact output here because operations may not be bit-exact.
+SET extra_float_digits = 0;
diff --git a/ydb/library/yql/tests/postgresql/cases/aggregates.sql b/ydb/library/yql/tests/postgresql/cases/aggregates.sql
new file mode 100644
index 0000000000..fa46df67fe
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/aggregates.sql
@@ -0,0 +1,5 @@
+--
+-- AGGREGATES
+--
+-- avoid bit-exact output here because operations may not be bit-exact.
+SET extra_float_digits = 0;
diff --git a/ydb/library/yql/tests/postgresql/cases/alter_table.out b/ydb/library/yql/tests/postgresql/cases/alter_table.out
new file mode 100644
index 0000000000..6d5b65872f
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/alter_table.out
@@ -0,0 +1,18 @@
+--
+-- ALTER_TABLE
+--
+-- Clean up in case a prior regression run failed
+SET client_min_messages TO 'warning';
+RESET client_min_messages;
+--
+-- add attribute
+--
+CREATE TABLE attmp (initial int4);
+INSERT INTO attmp (a, b, c, d, e, f, g, i, k, l, m, n, p, q, r, s, t,
+ v, w, x, y, z)
+ VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)',
+ 'c',
+ 314159, '(1,1)', '512',
+ '1 2 3 4 5 6 7 8', true, '(1.1,1.1)', '(4.1,4.1,3.1,3.1)',
+ '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)',
+ 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}');
diff --git a/ydb/library/yql/tests/postgresql/cases/alter_table.sql b/ydb/library/yql/tests/postgresql/cases/alter_table.sql
new file mode 100644
index 0000000000..6d5b65872f
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/alter_table.sql
@@ -0,0 +1,18 @@
+--
+-- ALTER_TABLE
+--
+-- Clean up in case a prior regression run failed
+SET client_min_messages TO 'warning';
+RESET client_min_messages;
+--
+-- add attribute
+--
+CREATE TABLE attmp (initial int4);
+INSERT INTO attmp (a, b, c, d, e, f, g, i, k, l, m, n, p, q, r, s, t,
+ v, w, x, y, z)
+ VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)',
+ 'c',
+ 314159, '(1,1)', '512',
+ '1 2 3 4 5 6 7 8', true, '(1.1,1.1)', '(4.1,4.1,3.1,3.1)',
+ '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)',
+ 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}');
diff --git a/ydb/library/yql/tests/postgresql/cases/arrays.out b/ydb/library/yql/tests/postgresql/cases/arrays.out
new file mode 100644
index 0000000000..2133c8660a
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/arrays.out
@@ -0,0 +1,12 @@
+--
+-- ARRAYS
+--
+CREATE TABLE arrtest (
+ a int2[],
+ b int4[][][],
+ c name[],
+ d text[][],
+ e float8[],
+ f char(5)[],
+ g varchar(5)[]
+);
diff --git a/ydb/library/yql/tests/postgresql/cases/arrays.sql b/ydb/library/yql/tests/postgresql/cases/arrays.sql
new file mode 100644
index 0000000000..2133c8660a
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/arrays.sql
@@ -0,0 +1,12 @@
+--
+-- ARRAYS
+--
+CREATE TABLE arrtest (
+ a int2[],
+ b int4[][][],
+ c name[],
+ d text[][],
+ e float8[],
+ f char(5)[],
+ g varchar(5)[]
+);
diff --git a/ydb/library/yql/tests/postgresql/cases/bit.out b/ydb/library/yql/tests/postgresql/cases/bit.out
new file mode 100644
index 0000000000..12dc4f985f
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/bit.out
@@ -0,0 +1,10 @@
+--
+-- BIT types
+--
+--
+-- Build tables for testing
+--
+CREATE TABLE BIT_TABLE(b BIT(11));
+INSERT INTO BIT_TABLE VALUES (B'00000000000');
+INSERT INTO BIT_TABLE VALUES (B'11011000000');
+INSERT INTO BIT_TABLE VALUES (B'01010101010');
diff --git a/ydb/library/yql/tests/postgresql/cases/bit.sql b/ydb/library/yql/tests/postgresql/cases/bit.sql
new file mode 100644
index 0000000000..12dc4f985f
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/bit.sql
@@ -0,0 +1,10 @@
+--
+-- BIT types
+--
+--
+-- Build tables for testing
+--
+CREATE TABLE BIT_TABLE(b BIT(11));
+INSERT INTO BIT_TABLE VALUES (B'00000000000');
+INSERT INTO BIT_TABLE VALUES (B'11011000000');
+INSERT INTO BIT_TABLE VALUES (B'01010101010');
diff --git a/ydb/library/yql/tests/postgresql/cases/boolean.out b/ydb/library/yql/tests/postgresql/cases/boolean.out
new file mode 100644
index 0000000000..50c6859e93
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/boolean.out
@@ -0,0 +1,307 @@
+--
+-- BOOLEAN
+--
+--
+-- sanity check - if this fails go insane!
+--
+SELECT 1 AS one;
+ one
+-----
+ 1
+(1 row)
+
+-- ******************testing built-in type bool********************
+-- check bool input syntax
+SELECT true AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT false AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT bool 't' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool ' f ' AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT bool 'true' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool 'test' AS error;
+ERROR: invalid input syntax for type boolean: "test"
+LINE 1: SELECT bool 'test' AS error;
+ ^
+SELECT bool 'false' AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT bool 'foo' AS error;
+ERROR: invalid input syntax for type boolean: "foo"
+LINE 1: SELECT bool 'foo' AS error;
+ ^
+SELECT bool 'y' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool 'yes' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool 'yeah' AS error;
+ERROR: invalid input syntax for type boolean: "yeah"
+LINE 1: SELECT bool 'yeah' AS error;
+ ^
+SELECT bool 'n' AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT bool 'no' AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT bool 'nay' AS error;
+ERROR: invalid input syntax for type boolean: "nay"
+LINE 1: SELECT bool 'nay' AS error;
+ ^
+SELECT bool 'on' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool 'off' AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT bool 'of' AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT bool 'o' AS error;
+ERROR: invalid input syntax for type boolean: "o"
+LINE 1: SELECT bool 'o' AS error;
+ ^
+SELECT bool 'on_' AS error;
+ERROR: invalid input syntax for type boolean: "on_"
+LINE 1: SELECT bool 'on_' AS error;
+ ^
+SELECT bool 'off_' AS error;
+ERROR: invalid input syntax for type boolean: "off_"
+LINE 1: SELECT bool 'off_' AS error;
+ ^
+SELECT bool '1' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool '11' AS error;
+ERROR: invalid input syntax for type boolean: "11"
+LINE 1: SELECT bool '11' AS error;
+ ^
+SELECT bool '0' AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT bool '000' AS error;
+ERROR: invalid input syntax for type boolean: "000"
+LINE 1: SELECT bool '000' AS error;
+ ^
+SELECT bool '' AS error;
+ERROR: invalid input syntax for type boolean: ""
+LINE 1: SELECT bool '' AS error;
+ ^
+-- and, or, not in qualifications
+SELECT bool 't' or bool 'f' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool 't' and bool 'f' AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT not bool 'f' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool 't' = bool 'f' AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT bool 't' <> bool 'f' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool 't' > bool 'f' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool 't' >= bool 'f' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool 'f' < bool 't' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool 'f' <= bool 't' AS true;
+ true
+------
+ t
+(1 row)
+
+-- explicit casts to/from text
+SELECT 'TrUe'::text::boolean AS true, 'fAlse'::text::boolean AS false;
+ true | false
+------+-------
+ t | f
+(1 row)
+
+SELECT ' true '::text::boolean AS true,
+ ' FALSE'::text::boolean AS false;
+ true | false
+------+-------
+ t | f
+(1 row)
+
+SELECT true::boolean::text AS true, false::boolean::text AS false;
+ true | false
+------+-------
+ true | false
+(1 row)
+
+SELECT ' tru e '::text::boolean AS invalid; -- error
+ERROR: invalid input syntax for type boolean: " tru e "
+SELECT ''::text::boolean AS invalid; -- error
+ERROR: invalid input syntax for type boolean: ""
+CREATE TABLE BOOLTBL1 (f1 bool);
+INSERT INTO BOOLTBL1 (f1) VALUES (bool 't');
+INSERT INTO BOOLTBL1 (f1) VALUES (bool 'True');
+INSERT INTO BOOLTBL1 (f1) VALUES (bool 'true');
+-- BOOLTBL1 should be full of true's at this point
+SELECT BOOLTBL1.* FROM BOOLTBL1;
+ f1
+----
+ t
+ t
+ t
+(3 rows)
+
+SELECT BOOLTBL1.*
+ FROM BOOLTBL1
+ WHERE f1 = bool 'true';
+ f1
+----
+ t
+ t
+ t
+(3 rows)
+
+SELECT BOOLTBL1.*
+ FROM BOOLTBL1
+ WHERE f1 <> bool 'false';
+ f1
+----
+ t
+ t
+ t
+(3 rows)
+
+SELECT BOOLTBL1.*
+ FROM BOOLTBL1
+ WHERE booleq(bool 'false', f1);
+ f1
+----
+(0 rows)
+
+INSERT INTO BOOLTBL1 (f1) VALUES (bool 'f');
+SELECT BOOLTBL1.*
+ FROM BOOLTBL1
+ WHERE f1 = bool 'false';
+ f1
+----
+ f
+(1 row)
+
+CREATE TABLE BOOLTBL2 (f1 bool);
+INSERT INTO BOOLTBL2 (f1) VALUES (bool 'f');
+INSERT INTO BOOLTBL2 (f1) VALUES (bool 'false');
+INSERT INTO BOOLTBL2 (f1) VALUES (bool 'False');
+INSERT INTO BOOLTBL2 (f1) VALUES (bool 'FALSE');
+-- This is now an invalid expression
+-- For pre-v6.3 this evaluated to false - thomas 1997-10-23
+INSERT INTO BOOLTBL2 (f1)
+ VALUES (bool 'XXX');
+ERROR: invalid input syntax for type boolean: "XXX"
+LINE 2: VALUES (bool 'XXX');
+ ^
+-- BOOLTBL2 should be full of false's at this point
+SELECT BOOLTBL2.* FROM BOOLTBL2;
+ f1
+----
+ f
+ f
+ f
+ f
+(4 rows)
+
+--
+-- Tests for BooleanTest
+--
+CREATE TABLE BOOLTBL3 (d text, b bool, o int);
+INSERT INTO BOOLTBL3 (d, b, o) VALUES ('true', true, 1);
+INSERT INTO BOOLTBL3 (d, b, o) VALUES ('false', false, 2);
+INSERT INTO BOOLTBL3 (d, b, o) VALUES ('null', null, 3);
+-- Test to make sure short-circuiting and NULL handling is
+-- correct. Use a table as source to prevent constant simplification
+-- to interfer.
+CREATE TABLE booltbl4(isfalse bool, istrue bool, isnul bool);
+INSERT INTO booltbl4 VALUES (false, true, null);
+\pset null '(null)'
diff --git a/ydb/library/yql/tests/postgresql/cases/boolean.sql b/ydb/library/yql/tests/postgresql/cases/boolean.sql
new file mode 100644
index 0000000000..efe99cf80e
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/boolean.sql
@@ -0,0 +1,94 @@
+--
+-- BOOLEAN
+--
+--
+-- sanity check - if this fails go insane!
+--
+SELECT 1 AS one;
+-- ******************testing built-in type bool********************
+-- check bool input syntax
+SELECT true AS true;
+SELECT false AS false;
+SELECT bool 't' AS true;
+SELECT bool ' f ' AS false;
+SELECT bool 'true' AS true;
+SELECT bool 'test' AS error;
+SELECT bool 'false' AS false;
+SELECT bool 'foo' AS error;
+SELECT bool 'y' AS true;
+SELECT bool 'yes' AS true;
+SELECT bool 'yeah' AS error;
+SELECT bool 'n' AS false;
+SELECT bool 'no' AS false;
+SELECT bool 'nay' AS error;
+SELECT bool 'on' AS true;
+SELECT bool 'off' AS false;
+SELECT bool 'of' AS false;
+SELECT bool 'o' AS error;
+SELECT bool 'on_' AS error;
+SELECT bool 'off_' AS error;
+SELECT bool '1' AS true;
+SELECT bool '11' AS error;
+SELECT bool '0' AS false;
+SELECT bool '000' AS error;
+SELECT bool '' AS error;
+-- and, or, not in qualifications
+SELECT bool 't' or bool 'f' AS true;
+SELECT bool 't' and bool 'f' AS false;
+SELECT not bool 'f' AS true;
+SELECT bool 't' = bool 'f' AS false;
+SELECT bool 't' <> bool 'f' AS true;
+SELECT bool 't' > bool 'f' AS true;
+SELECT bool 't' >= bool 'f' AS true;
+SELECT bool 'f' < bool 't' AS true;
+SELECT bool 'f' <= bool 't' AS true;
+-- explicit casts to/from text
+SELECT 'TrUe'::text::boolean AS true, 'fAlse'::text::boolean AS false;
+SELECT ' true '::text::boolean AS true,
+ ' FALSE'::text::boolean AS false;
+SELECT true::boolean::text AS true, false::boolean::text AS false;
+SELECT ' tru e '::text::boolean AS invalid; -- error
+SELECT ''::text::boolean AS invalid; -- error
+CREATE TABLE BOOLTBL1 (f1 bool);
+INSERT INTO BOOLTBL1 (f1) VALUES (bool 't');
+INSERT INTO BOOLTBL1 (f1) VALUES (bool 'True');
+INSERT INTO BOOLTBL1 (f1) VALUES (bool 'true');
+-- BOOLTBL1 should be full of true's at this point
+SELECT BOOLTBL1.* FROM BOOLTBL1;
+SELECT BOOLTBL1.*
+ FROM BOOLTBL1
+ WHERE f1 = bool 'true';
+SELECT BOOLTBL1.*
+ FROM BOOLTBL1
+ WHERE f1 <> bool 'false';
+SELECT BOOLTBL1.*
+ FROM BOOLTBL1
+ WHERE booleq(bool 'false', f1);
+INSERT INTO BOOLTBL1 (f1) VALUES (bool 'f');
+SELECT BOOLTBL1.*
+ FROM BOOLTBL1
+ WHERE f1 = bool 'false';
+CREATE TABLE BOOLTBL2 (f1 bool);
+INSERT INTO BOOLTBL2 (f1) VALUES (bool 'f');
+INSERT INTO BOOLTBL2 (f1) VALUES (bool 'false');
+INSERT INTO BOOLTBL2 (f1) VALUES (bool 'False');
+INSERT INTO BOOLTBL2 (f1) VALUES (bool 'FALSE');
+-- This is now an invalid expression
+-- For pre-v6.3 this evaluated to false - thomas 1997-10-23
+INSERT INTO BOOLTBL2 (f1)
+ VALUES (bool 'XXX');
+-- BOOLTBL2 should be full of false's at this point
+SELECT BOOLTBL2.* FROM BOOLTBL2;
+--
+-- Tests for BooleanTest
+--
+CREATE TABLE BOOLTBL3 (d text, b bool, o int);
+INSERT INTO BOOLTBL3 (d, b, o) VALUES ('true', true, 1);
+INSERT INTO BOOLTBL3 (d, b, o) VALUES ('false', false, 2);
+INSERT INTO BOOLTBL3 (d, b, o) VALUES ('null', null, 3);
+-- Test to make sure short-circuiting and NULL handling is
+-- correct. Use a table as source to prevent constant simplification
+-- to interfer.
+CREATE TABLE booltbl4(isfalse bool, istrue bool, isnul bool);
+INSERT INTO booltbl4 VALUES (false, true, null);
+\pset null '(null)'
diff --git a/ydb/library/yql/tests/postgresql/cases/case.out b/ydb/library/yql/tests/postgresql/cases/case.out
new file mode 100644
index 0000000000..8a8de88ebe
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/case.out
@@ -0,0 +1,34 @@
+--
+-- CASE
+-- Test the case statement
+--
+CREATE TABLE CASE_TBL (
+ i integer,
+ f double precision
+);
+CREATE TABLE CASE2_TBL (
+ i integer,
+ j integer
+);
+INSERT INTO CASE_TBL VALUES (1, 10.1);
+INSERT INTO CASE_TBL VALUES (2, 20.2);
+INSERT INTO CASE_TBL VALUES (3, -30.3);
+INSERT INTO CASE_TBL VALUES (4, NULL);
+INSERT INTO CASE2_TBL VALUES (1, -1);
+INSERT INTO CASE2_TBL VALUES (2, -2);
+INSERT INTO CASE2_TBL VALUES (3, -3);
+INSERT INTO CASE2_TBL VALUES (2, -4);
+INSERT INTO CASE2_TBL VALUES (1, NULL);
+INSERT INTO CASE2_TBL VALUES (NULL, -6);
+--
+-- Simplest examples without tables
+--
+SELECT '3' AS "One",
+ CASE
+ WHEN 1 < 2 THEN 3
+ END AS "Simple WHEN";
+ One | Simple WHEN
+-----+-------------
+ 3 | 3
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/cases/case.sql b/ydb/library/yql/tests/postgresql/cases/case.sql
new file mode 100644
index 0000000000..6c713de309
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/case.sql
@@ -0,0 +1,29 @@
+--
+-- CASE
+-- Test the case statement
+--
+CREATE TABLE CASE_TBL (
+ i integer,
+ f double precision
+);
+CREATE TABLE CASE2_TBL (
+ i integer,
+ j integer
+);
+INSERT INTO CASE_TBL VALUES (1, 10.1);
+INSERT INTO CASE_TBL VALUES (2, 20.2);
+INSERT INTO CASE_TBL VALUES (3, -30.3);
+INSERT INTO CASE_TBL VALUES (4, NULL);
+INSERT INTO CASE2_TBL VALUES (1, -1);
+INSERT INTO CASE2_TBL VALUES (2, -2);
+INSERT INTO CASE2_TBL VALUES (3, -3);
+INSERT INTO CASE2_TBL VALUES (2, -4);
+INSERT INTO CASE2_TBL VALUES (1, NULL);
+INSERT INTO CASE2_TBL VALUES (NULL, -6);
+--
+-- Simplest examples without tables
+--
+SELECT '3' AS "One",
+ CASE
+ WHEN 1 < 2 THEN 3
+ END AS "Simple WHEN";
diff --git a/ydb/library/yql/tests/postgresql/cases/char.out b/ydb/library/yql/tests/postgresql/cases/char.out
new file mode 100644
index 0000000000..4336c6f955
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/char.out
@@ -0,0 +1,15 @@
+--
+-- CHAR
+--
+-- fixed-length by value
+-- internally passed by value if <= 4 bytes in storage
+SELECT char 'c' = char 'c' AS true;
+ true
+------
+ t
+(1 row)
+
+--
+-- Build a table for testing
+--
+CREATE TABLE CHAR_TBL(f1 char);
diff --git a/ydb/library/yql/tests/postgresql/cases/char.sql b/ydb/library/yql/tests/postgresql/cases/char.sql
new file mode 100644
index 0000000000..5e9d757b19
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/char.sql
@@ -0,0 +1,14 @@
+--
+-- CHAR
+--
+
+-- fixed-length by value
+-- internally passed by value if <= 4 bytes in storage
+
+SELECT char 'c' = char 'c' AS true;
+
+--
+-- Build a table for testing
+--
+
+CREATE TABLE CHAR_TBL(f1 char);
diff --git a/ydb/library/yql/tests/postgresql/cases/comments.out b/ydb/library/yql/tests/postgresql/cases/comments.out
new file mode 100644
index 0000000000..33f612e633
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/comments.out
@@ -0,0 +1,65 @@
+--
+-- COMMENTS
+--
+SELECT 'trailing' AS first; -- trailing single line
+ first
+----------
+ trailing
+(1 row)
+
+SELECT /* embedded single line */ 'embedded' AS second;
+ second
+----------
+ embedded
+(1 row)
+
+SELECT /* both embedded and trailing single line */ 'both' AS third; -- trailing single line
+ third
+-------
+ both
+(1 row)
+
+SELECT 'before multi-line' AS fourth;
+ fourth
+-------------------
+ before multi-line
+(1 row)
+
+/* This is an example of SQL which should not execute:
+ * select 'multi-line';
+ */
+SELECT 'after multi-line' AS fifth;
+ fifth
+------------------
+ after multi-line
+(1 row)
+
+--
+-- Nested comments
+--
+/*
+SELECT 'trailing' as x1; -- inside block comment
+*/
+/* This block comment surrounds a query which itself has a block comment...
+SELECT /* embedded single line */ 'embedded' AS x2;
+*/
+SELECT -- continued after the following block comments...
+/* Deeply nested comment.
+ This includes a single apostrophe to make sure we aren't decoding this part as a string.
+SELECT 'deep nest' AS n1;
+/* Second level of nesting...
+SELECT 'deeper nest' as n2;
+/* Third level of nesting...
+SELECT 'deepest nest' as n3;
+*/
+Hoo boy. Still two deep...
+*/
+Now just one deep...
+*/
+'deeply nested example' AS sixth;
+ sixth
+-----------------------
+ deeply nested example
+(1 row)
+
+/* and this is the end of the file */
diff --git a/ydb/library/yql/tests/postgresql/cases/comments.sql b/ydb/library/yql/tests/postgresql/cases/comments.sql
new file mode 100644
index 0000000000..7aec12fac7
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/comments.sql
@@ -0,0 +1,35 @@
+--
+-- COMMENTS
+--
+SELECT 'trailing' AS first; -- trailing single line
+SELECT /* embedded single line */ 'embedded' AS second;
+SELECT /* both embedded and trailing single line */ 'both' AS third; -- trailing single line
+SELECT 'before multi-line' AS fourth;
+/* This is an example of SQL which should not execute:
+ * select 'multi-line';
+ */
+SELECT 'after multi-line' AS fifth;
+--
+-- Nested comments
+--
+/*
+SELECT 'trailing' as x1; -- inside block comment
+*/
+/* This block comment surrounds a query which itself has a block comment...
+SELECT /* embedded single line */ 'embedded' AS x2;
+*/
+SELECT -- continued after the following block comments...
+/* Deeply nested comment.
+ This includes a single apostrophe to make sure we aren't decoding this part as a string.
+SELECT 'deep nest' AS n1;
+/* Second level of nesting...
+SELECT 'deeper nest' as n2;
+/* Third level of nesting...
+SELECT 'deepest nest' as n3;
+*/
+Hoo boy. Still two deep...
+*/
+Now just one deep...
+*/
+'deeply nested example' AS sixth;
+/* and this is the end of the file */
diff --git a/ydb/library/yql/tests/postgresql/cases/create_misc.out b/ydb/library/yql/tests/postgresql/cases/create_misc.out
new file mode 100644
index 0000000000..b6ae432650
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/create_misc.out
@@ -0,0 +1,12 @@
+--
+-- for internal portal (cursor) tests
+--
+CREATE TABLE iportaltest (
+ i int4,
+ d float4,
+ p polygon
+);
+INSERT INTO iportaltest (i, d, p)
+ VALUES (1, 3.567, '(3.0,1.0),(4.0,2.0)'::polygon);
+INSERT INTO iportaltest (i, d, p)
+ VALUES (2, 89.05, '(4.0,2.0),(3.0,1.0)'::polygon);
diff --git a/ydb/library/yql/tests/postgresql/cases/create_misc.sql b/ydb/library/yql/tests/postgresql/cases/create_misc.sql
new file mode 100644
index 0000000000..b6ae432650
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/create_misc.sql
@@ -0,0 +1,12 @@
+--
+-- for internal portal (cursor) tests
+--
+CREATE TABLE iportaltest (
+ i int4,
+ d float4,
+ p polygon
+);
+INSERT INTO iportaltest (i, d, p)
+ VALUES (1, 3.567, '(3.0,1.0),(4.0,2.0)'::polygon);
+INSERT INTO iportaltest (i, d, p)
+ VALUES (2, 89.05, '(4.0,2.0),(3.0,1.0)'::polygon);
diff --git a/ydb/library/yql/tests/postgresql/cases/create_table.out b/ydb/library/yql/tests/postgresql/cases/create_table.out
new file mode 100644
index 0000000000..40c55dc27c
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/create_table.out
@@ -0,0 +1,208 @@
+--
+-- CREATE_TABLE
+--
+--
+-- CLASS DEFINITIONS
+--
+CREATE TABLE hobbies_r (
+ name text,
+ person text
+);
+CREATE TABLE equipment_r (
+ name text,
+ hobby text
+);
+CREATE TABLE onek (
+ unique1 int4,
+ unique2 int4,
+ two int4,
+ four int4,
+ ten int4,
+ twenty int4,
+ hundred int4,
+ thousand int4,
+ twothousand int4,
+ fivethous int4,
+ tenthous int4,
+ odd int4,
+ even int4,
+ stringu1 name,
+ stringu2 name,
+ string4 name
+);
+CREATE TABLE tenk1 (
+ unique1 int4,
+ unique2 int4,
+ two int4,
+ four int4,
+ ten int4,
+ twenty int4,
+ hundred int4,
+ thousand int4,
+ twothousand int4,
+ fivethous int4,
+ tenthous int4,
+ odd int4,
+ even int4,
+ stringu1 name,
+ stringu2 name,
+ string4 name
+);
+CREATE TABLE tenk2 (
+ unique1 int4,
+ unique2 int4,
+ two int4,
+ four int4,
+ ten int4,
+ twenty int4,
+ hundred int4,
+ thousand int4,
+ twothousand int4,
+ fivethous int4,
+ tenthous int4,
+ odd int4,
+ even int4,
+ stringu1 name,
+ stringu2 name,
+ string4 name
+);
+CREATE TABLE person (
+ name text,
+ age int4,
+ location point
+);
+CREATE TABLE city (
+ name name,
+ location box,
+ budget city_budget
+);
+CREATE TABLE dept (
+ dname name,
+ mgrname text
+);
+CREATE TABLE slow_emp4000 (
+ home_base box
+);
+CREATE TABLE fast_emp4000 (
+ home_base box
+);
+CREATE TABLE road (
+ name text,
+ thepath path
+);
+CREATE TABLE real_city (
+ pop int4,
+ cname text,
+ outline path
+);
+--
+-- test the "star" operators a bit more thoroughly -- this time,
+-- throw in lots of NULL fields...
+--
+-- a is the type root
+-- b and c inherit from a (one-level single inheritance)
+-- d inherits from b and c (two-level multiple inheritance)
+-- e inherits from c (two-level single inheritance)
+-- f inherits from e (three-level single inheritance)
+--
+CREATE TABLE a_star (
+ class char,
+ a int4
+);
+CREATE TABLE aggtest (
+ a int2,
+ b float4
+);
+CREATE TABLE hash_i4_heap (
+ seqno int4,
+ random int4
+);
+CREATE TABLE hash_name_heap (
+ seqno int4,
+ random name
+);
+CREATE TABLE hash_txt_heap (
+ seqno int4,
+ random text
+);
+CREATE TABLE hash_f8_heap (
+ seqno int4,
+ random float8
+);
+-- don't include the hash_ovfl_heap stuff in the distribution
+-- the data set is too large for what it's worth
+--
+-- CREATE TABLE hash_ovfl_heap (
+-- x int4,
+-- y int4
+-- );
+CREATE TABLE bt_i4_heap (
+ seqno int4,
+ random int4
+);
+CREATE TABLE bt_name_heap (
+ seqno name,
+ random int4
+);
+CREATE TABLE bt_txt_heap (
+ seqno text,
+ random int4
+);
+CREATE TABLE bt_f8_heap (
+ seqno float8,
+ random int4
+);
+CREATE TABLE array_op_test (
+ seqno int4,
+ i int4[],
+ t text[]
+);
+CREATE TABLE array_index_op_test (
+ seqno int4,
+ i int4[],
+ t text[]
+);
+CREATE TABLE testjsonb (
+ j jsonb
+);
+CREATE TABLE IF NOT EXISTS test_tsvector(
+ t text,
+ a tsvector
+);
+CREATE TEMP TABLE explicitly_temp (a int primary key); -- also OK
+-- check that tables with oids cannot be created anymore
+CREATE TABLE withoid() WITH OIDS;
+ERROR: syntax error at or near "OIDS"
+LINE 1: CREATE TABLE withoid() WITH OIDS;
+ ^
+-- but explicitly not adding oids is still supported
+CREATE TEMP TABLE withoutoid() WITHOUT OIDS;
+DROP TABLE withoutoid;
+-- Verify that subtransaction rollback restores rd_createSubid.
+BEGIN;
+CREATE TABLE remember_create_subid (c int);
+SAVEPOINT q;
+DROP TABLE remember_create_subid;
+ROLLBACK TO q;
+COMMIT;
+-- Verify that subtransaction rollback restores rd_firstRelfilenodeSubid.
+CREATE TABLE remember_node_subid (c int);
+BEGIN;
+SAVEPOINT q;
+DROP TABLE remember_node_subid;
+ROLLBACK TO q;
+COMMIT;
+-- syntax does not allow empty list of values for list partitions
+CREATE TABLE fail_part PARTITION OF list_parted FOR VALUES IN ();
+ERROR: syntax error at or near ")"
+LINE 1: ...E TABLE fail_part PARTITION OF list_parted FOR VALUES IN ();
+ ^
+-- check if compatible with the specified parent
+-- cannot create as partition of a non-partitioned table
+CREATE TABLE unparted (
+ a int
+);
+DROP TABLE unparted;
+create table defcheck_def (a int, c int, b int);
+-- test that complex default partition constraints are enforced correctly
+insert into defcheck_def values (0, 0);
diff --git a/ydb/library/yql/tests/postgresql/cases/create_table.sql b/ydb/library/yql/tests/postgresql/cases/create_table.sql
new file mode 100644
index 0000000000..2f597f9645
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/create_table.sql
@@ -0,0 +1,202 @@
+--
+-- CREATE_TABLE
+--
+--
+-- CLASS DEFINITIONS
+--
+CREATE TABLE hobbies_r (
+ name text,
+ person text
+);
+CREATE TABLE equipment_r (
+ name text,
+ hobby text
+);
+CREATE TABLE onek (
+ unique1 int4,
+ unique2 int4,
+ two int4,
+ four int4,
+ ten int4,
+ twenty int4,
+ hundred int4,
+ thousand int4,
+ twothousand int4,
+ fivethous int4,
+ tenthous int4,
+ odd int4,
+ even int4,
+ stringu1 name,
+ stringu2 name,
+ string4 name
+);
+CREATE TABLE tenk1 (
+ unique1 int4,
+ unique2 int4,
+ two int4,
+ four int4,
+ ten int4,
+ twenty int4,
+ hundred int4,
+ thousand int4,
+ twothousand int4,
+ fivethous int4,
+ tenthous int4,
+ odd int4,
+ even int4,
+ stringu1 name,
+ stringu2 name,
+ string4 name
+);
+CREATE TABLE tenk2 (
+ unique1 int4,
+ unique2 int4,
+ two int4,
+ four int4,
+ ten int4,
+ twenty int4,
+ hundred int4,
+ thousand int4,
+ twothousand int4,
+ fivethous int4,
+ tenthous int4,
+ odd int4,
+ even int4,
+ stringu1 name,
+ stringu2 name,
+ string4 name
+);
+CREATE TABLE person (
+ name text,
+ age int4,
+ location point
+);
+CREATE TABLE city (
+ name name,
+ location box,
+ budget city_budget
+);
+CREATE TABLE dept (
+ dname name,
+ mgrname text
+);
+CREATE TABLE slow_emp4000 (
+ home_base box
+);
+CREATE TABLE fast_emp4000 (
+ home_base box
+);
+CREATE TABLE road (
+ name text,
+ thepath path
+);
+CREATE TABLE real_city (
+ pop int4,
+ cname text,
+ outline path
+);
+--
+-- test the "star" operators a bit more thoroughly -- this time,
+-- throw in lots of NULL fields...
+--
+-- a is the type root
+-- b and c inherit from a (one-level single inheritance)
+-- d inherits from b and c (two-level multiple inheritance)
+-- e inherits from c (two-level single inheritance)
+-- f inherits from e (three-level single inheritance)
+--
+CREATE TABLE a_star (
+ class char,
+ a int4
+);
+CREATE TABLE aggtest (
+ a int2,
+ b float4
+);
+CREATE TABLE hash_i4_heap (
+ seqno int4,
+ random int4
+);
+CREATE TABLE hash_name_heap (
+ seqno int4,
+ random name
+);
+CREATE TABLE hash_txt_heap (
+ seqno int4,
+ random text
+);
+CREATE TABLE hash_f8_heap (
+ seqno int4,
+ random float8
+);
+-- don't include the hash_ovfl_heap stuff in the distribution
+-- the data set is too large for what it's worth
+--
+-- CREATE TABLE hash_ovfl_heap (
+-- x int4,
+-- y int4
+-- );
+CREATE TABLE bt_i4_heap (
+ seqno int4,
+ random int4
+);
+CREATE TABLE bt_name_heap (
+ seqno name,
+ random int4
+);
+CREATE TABLE bt_txt_heap (
+ seqno text,
+ random int4
+);
+CREATE TABLE bt_f8_heap (
+ seqno float8,
+ random int4
+);
+CREATE TABLE array_op_test (
+ seqno int4,
+ i int4[],
+ t text[]
+);
+CREATE TABLE array_index_op_test (
+ seqno int4,
+ i int4[],
+ t text[]
+);
+CREATE TABLE testjsonb (
+ j jsonb
+);
+CREATE TABLE IF NOT EXISTS test_tsvector(
+ t text,
+ a tsvector
+);
+CREATE TEMP TABLE explicitly_temp (a int primary key); -- also OK
+-- check that tables with oids cannot be created anymore
+CREATE TABLE withoid() WITH OIDS;
+-- but explicitly not adding oids is still supported
+CREATE TEMP TABLE withoutoid() WITHOUT OIDS;
+DROP TABLE withoutoid;
+-- Verify that subtransaction rollback restores rd_createSubid.
+BEGIN;
+CREATE TABLE remember_create_subid (c int);
+SAVEPOINT q;
+DROP TABLE remember_create_subid;
+ROLLBACK TO q;
+COMMIT;
+-- Verify that subtransaction rollback restores rd_firstRelfilenodeSubid.
+CREATE TABLE remember_node_subid (c int);
+BEGIN;
+SAVEPOINT q;
+DROP TABLE remember_node_subid;
+ROLLBACK TO q;
+COMMIT;
+-- syntax does not allow empty list of values for list partitions
+CREATE TABLE fail_part PARTITION OF list_parted FOR VALUES IN ();
+-- check if compatible with the specified parent
+-- cannot create as partition of a non-partitioned table
+CREATE TABLE unparted (
+ a int
+);
+DROP TABLE unparted;
+create table defcheck_def (a int, c int, b int);
+-- test that complex default partition constraints are enforced correctly
+insert into defcheck_def values (0, 0);
diff --git a/ydb/library/yql/tests/postgresql/cases/date.out b/ydb/library/yql/tests/postgresql/cases/date.out
new file mode 100644
index 0000000000..e73c7f0de9
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/date.out
@@ -0,0 +1,20 @@
+--
+-- DATE
+--
+CREATE TABLE DATE_TBL (f1 date);
+INSERT INTO DATE_TBL VALUES ('1957-04-09');
+INSERT INTO DATE_TBL VALUES ('1957-06-13');
+INSERT INTO DATE_TBL VALUES ('1996-02-28');
+INSERT INTO DATE_TBL VALUES ('1996-02-29');
+INSERT INTO DATE_TBL VALUES ('1996-03-01');
+INSERT INTO DATE_TBL VALUES ('1996-03-02');
+INSERT INTO DATE_TBL VALUES ('1997-02-28');
+INSERT INTO DATE_TBL VALUES ('1997-03-01');
+INSERT INTO DATE_TBL VALUES ('1997-03-02');
+INSERT INTO DATE_TBL VALUES ('2000-04-01');
+INSERT INTO DATE_TBL VALUES ('2000-04-02');
+INSERT INTO DATE_TBL VALUES ('2000-04-03');
+INSERT INTO DATE_TBL VALUES ('2038-04-08');
+INSERT INTO DATE_TBL VALUES ('2039-04-09');
+INSERT INTO DATE_TBL VALUES ('2040-04-10');
+INSERT INTO DATE_TBL VALUES ('2040-04-10 BC');
diff --git a/ydb/library/yql/tests/postgresql/cases/date.sql b/ydb/library/yql/tests/postgresql/cases/date.sql
new file mode 100644
index 0000000000..e73c7f0de9
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/date.sql
@@ -0,0 +1,20 @@
+--
+-- DATE
+--
+CREATE TABLE DATE_TBL (f1 date);
+INSERT INTO DATE_TBL VALUES ('1957-04-09');
+INSERT INTO DATE_TBL VALUES ('1957-06-13');
+INSERT INTO DATE_TBL VALUES ('1996-02-28');
+INSERT INTO DATE_TBL VALUES ('1996-02-29');
+INSERT INTO DATE_TBL VALUES ('1996-03-01');
+INSERT INTO DATE_TBL VALUES ('1996-03-02');
+INSERT INTO DATE_TBL VALUES ('1997-02-28');
+INSERT INTO DATE_TBL VALUES ('1997-03-01');
+INSERT INTO DATE_TBL VALUES ('1997-03-02');
+INSERT INTO DATE_TBL VALUES ('2000-04-01');
+INSERT INTO DATE_TBL VALUES ('2000-04-02');
+INSERT INTO DATE_TBL VALUES ('2000-04-03');
+INSERT INTO DATE_TBL VALUES ('2038-04-08');
+INSERT INTO DATE_TBL VALUES ('2039-04-09');
+INSERT INTO DATE_TBL VALUES ('2040-04-10');
+INSERT INTO DATE_TBL VALUES ('2040-04-10 BC');
diff --git a/ydb/library/yql/tests/postgresql/cases/dbsize.out b/ydb/library/yql/tests/postgresql/cases/dbsize.out
new file mode 100644
index 0000000000..29804aee8b
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/dbsize.out
@@ -0,0 +1,188 @@
+SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM
+ (VALUES (10::bigint), (1000::bigint), (1000000::bigint),
+ (1000000000::bigint), (1000000000000::bigint),
+ (1000000000000000::bigint)) x(size);
+ size | pg_size_pretty | pg_size_pretty
+------------------+----------------+----------------
+ 10 | 10 bytes | -10 bytes
+ 1000 | 1000 bytes | -1000 bytes
+ 1000000 | 977 kB | -977 kB
+ 1000000000 | 954 MB | -954 MB
+ 1000000000000 | 931 GB | -931 GB
+ 1000000000000000 | 909 TB | -909 TB
+(6 rows)
+
+SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM
+ (VALUES (10::numeric), (1000::numeric), (1000000::numeric),
+ (1000000000::numeric), (1000000000000::numeric),
+ (1000000000000000::numeric),
+ (10.5::numeric), (1000.5::numeric), (1000000.5::numeric),
+ (1000000000.5::numeric), (1000000000000.5::numeric),
+ (1000000000000000.5::numeric)) x(size);
+ size | pg_size_pretty | pg_size_pretty
+--------------------+----------------+----------------
+ 10 | 10 bytes | -10 bytes
+ 1000 | 1000 bytes | -1000 bytes
+ 1000000 | 977 kB | -977 kB
+ 1000000000 | 954 MB | -954 MB
+ 1000000000000 | 931 GB | -931 GB
+ 1000000000000000 | 909 TB | -909 TB
+ 10.5 | 10.5 bytes | -10.5 bytes
+ 1000.5 | 1000.5 bytes | -1000.5 bytes
+ 1000000.5 | 977 kB | -977 kB
+ 1000000000.5 | 954 MB | -954 MB
+ 1000000000000.5 | 931 GB | -931 GB
+ 1000000000000000.5 | 909 TB | -909 TB
+(12 rows)
+
+-- test where units change up
+SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM
+ (VALUES (10239::bigint), (10240::bigint),
+ (10485247::bigint), (10485248::bigint),
+ (10736893951::bigint), (10736893952::bigint),
+ (10994579406847::bigint), (10994579406848::bigint),
+ (11258449312612351::bigint), (11258449312612352::bigint)) x(size);
+ size | pg_size_pretty | pg_size_pretty
+-------------------+----------------+----------------
+ 10239 | 10239 bytes | -10239 bytes
+ 10240 | 10 kB | -10 kB
+ 10485247 | 10239 kB | -10239 kB
+ 10485248 | 10 MB | -10 MB
+ 10736893951 | 10239 MB | -10239 MB
+ 10736893952 | 10 GB | -10 GB
+ 10994579406847 | 10239 GB | -10239 GB
+ 10994579406848 | 10 TB | -10 TB
+ 11258449312612351 | 10239 TB | -10239 TB
+ 11258449312612352 | 10240 TB | -10240 TB
+(10 rows)
+
+SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM
+ (VALUES (10239::numeric), (10240::numeric),
+ (10485247::numeric), (10485248::numeric),
+ (10736893951::numeric), (10736893952::numeric),
+ (10994579406847::numeric), (10994579406848::numeric),
+ (11258449312612351::numeric), (11258449312612352::numeric)) x(size);
+ size | pg_size_pretty | pg_size_pretty
+-------------------+----------------+----------------
+ 10239 | 10239 bytes | -10239 bytes
+ 10240 | 10 kB | -10 kB
+ 10485247 | 10239 kB | -10239 kB
+ 10485248 | 10 MB | -10 MB
+ 10736893951 | 10239 MB | -10239 MB
+ 10736893952 | 10 GB | -10 GB
+ 10994579406847 | 10239 GB | -10239 GB
+ 10994579406848 | 10 TB | -10 TB
+ 11258449312612351 | 10239 TB | -10239 TB
+ 11258449312612352 | 10240 TB | -10240 TB
+(10 rows)
+
+-- pg_size_bytes() tests
+SELECT size, pg_size_bytes(size) FROM
+ (VALUES ('1'), ('123bytes'), ('1kB'), ('1MB'), (' 1 GB'), ('1.5 GB '),
+ ('1TB'), ('3000 TB'), ('1e6 MB')) x(size);
+ size | pg_size_bytes
+----------+------------------
+ 1 | 1
+ 123bytes | 123
+ 1kB | 1024
+ 1MB | 1048576
+ 1 GB | 1073741824
+ 1.5 GB | 1610612736
+ 1TB | 1099511627776
+ 3000 TB | 3298534883328000
+ 1e6 MB | 1048576000000
+(9 rows)
+
+-- case-insensitive units are supported
+SELECT size, pg_size_bytes(size) FROM
+ (VALUES ('1'), ('123bYteS'), ('1kb'), ('1mb'), (' 1 Gb'), ('1.5 gB '),
+ ('1tb'), ('3000 tb'), ('1e6 mb')) x(size);
+ size | pg_size_bytes
+----------+------------------
+ 1 | 1
+ 123bYteS | 123
+ 1kb | 1024
+ 1mb | 1048576
+ 1 Gb | 1073741824
+ 1.5 gB | 1610612736
+ 1tb | 1099511627776
+ 3000 tb | 3298534883328000
+ 1e6 mb | 1048576000000
+(9 rows)
+
+-- negative numbers are supported
+SELECT size, pg_size_bytes(size) FROM
+ (VALUES ('-1'), ('-123bytes'), ('-1kb'), ('-1mb'), (' -1 Gb'), ('-1.5 gB '),
+ ('-1tb'), ('-3000 TB'), ('-10e-1 MB')) x(size);
+ size | pg_size_bytes
+-----------+-------------------
+ -1 | -1
+ -123bytes | -123
+ -1kb | -1024
+ -1mb | -1048576
+ -1 Gb | -1073741824
+ -1.5 gB | -1610612736
+ -1tb | -1099511627776
+ -3000 TB | -3298534883328000
+ -10e-1 MB | -1048576
+(9 rows)
+
+-- different cases with allowed points
+SELECT size, pg_size_bytes(size) FROM
+ (VALUES ('-1.'), ('-1.kb'), ('-1. kb'), ('-0. gb'),
+ ('-.1'), ('-.1kb'), ('-.1 kb'), ('-.0 gb')) x(size);
+ size | pg_size_bytes
+--------+---------------
+ -1. | -1
+ -1.kb | -1024
+ -1. kb | -1024
+ -0. gb | 0
+ -.1 | 0
+ -.1kb | -102
+ -.1 kb | -102
+ -.0 gb | 0
+(8 rows)
+
+-- invalid inputs
+SELECT pg_size_bytes('1 AB');
+ERROR: invalid size: "1 AB"
+DETAIL: Invalid size unit: "AB".
+HINT: Valid units are "bytes", "kB", "MB", "GB", and "TB".
+SELECT pg_size_bytes('1 AB A');
+ERROR: invalid size: "1 AB A"
+DETAIL: Invalid size unit: "AB A".
+HINT: Valid units are "bytes", "kB", "MB", "GB", and "TB".
+SELECT pg_size_bytes('1 AB A ');
+ERROR: invalid size: "1 AB A "
+DETAIL: Invalid size unit: "AB A".
+HINT: Valid units are "bytes", "kB", "MB", "GB", and "TB".
+SELECT pg_size_bytes('9223372036854775807.9');
+ERROR: bigint out of range
+SELECT pg_size_bytes('1e100');
+ERROR: bigint out of range
+SELECT pg_size_bytes('1e1000000000000000000');
+ERROR: value overflows numeric format
+SELECT pg_size_bytes('1 byte'); -- the singular "byte" is not supported
+ERROR: invalid size: "1 byte"
+DETAIL: Invalid size unit: "byte".
+HINT: Valid units are "bytes", "kB", "MB", "GB", and "TB".
+SELECT pg_size_bytes('');
+ERROR: invalid size: ""
+SELECT pg_size_bytes('kb');
+ERROR: invalid size: "kb"
+SELECT pg_size_bytes('..');
+ERROR: invalid size: ".."
+SELECT pg_size_bytes('-.');
+ERROR: invalid size: "-."
+SELECT pg_size_bytes('-.kb');
+ERROR: invalid size: "-.kb"
+SELECT pg_size_bytes('-. kb');
+ERROR: invalid size: "-. kb"
+SELECT pg_size_bytes('.+912');
+ERROR: invalid size: ".+912"
+SELECT pg_size_bytes('+912+ kB');
+ERROR: invalid size: "+912+ kB"
+DETAIL: Invalid size unit: "+ kB".
+HINT: Valid units are "bytes", "kB", "MB", "GB", and "TB".
+SELECT pg_size_bytes('++123 kB');
+ERROR: invalid size: "++123 kB"
diff --git a/ydb/library/yql/tests/postgresql/cases/dbsize.sql b/ydb/library/yql/tests/postgresql/cases/dbsize.sql
new file mode 100644
index 0000000000..1ae573e05e
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/dbsize.sql
@@ -0,0 +1,57 @@
+SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM
+ (VALUES (10::bigint), (1000::bigint), (1000000::bigint),
+ (1000000000::bigint), (1000000000000::bigint),
+ (1000000000000000::bigint)) x(size);
+SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM
+ (VALUES (10::numeric), (1000::numeric), (1000000::numeric),
+ (1000000000::numeric), (1000000000000::numeric),
+ (1000000000000000::numeric),
+ (10.5::numeric), (1000.5::numeric), (1000000.5::numeric),
+ (1000000000.5::numeric), (1000000000000.5::numeric),
+ (1000000000000000.5::numeric)) x(size);
+-- test where units change up
+SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM
+ (VALUES (10239::bigint), (10240::bigint),
+ (10485247::bigint), (10485248::bigint),
+ (10736893951::bigint), (10736893952::bigint),
+ (10994579406847::bigint), (10994579406848::bigint),
+ (11258449312612351::bigint), (11258449312612352::bigint)) x(size);
+SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM
+ (VALUES (10239::numeric), (10240::numeric),
+ (10485247::numeric), (10485248::numeric),
+ (10736893951::numeric), (10736893952::numeric),
+ (10994579406847::numeric), (10994579406848::numeric),
+ (11258449312612351::numeric), (11258449312612352::numeric)) x(size);
+-- pg_size_bytes() tests
+SELECT size, pg_size_bytes(size) FROM
+ (VALUES ('1'), ('123bytes'), ('1kB'), ('1MB'), (' 1 GB'), ('1.5 GB '),
+ ('1TB'), ('3000 TB'), ('1e6 MB')) x(size);
+-- case-insensitive units are supported
+SELECT size, pg_size_bytes(size) FROM
+ (VALUES ('1'), ('123bYteS'), ('1kb'), ('1mb'), (' 1 Gb'), ('1.5 gB '),
+ ('1tb'), ('3000 tb'), ('1e6 mb')) x(size);
+-- negative numbers are supported
+SELECT size, pg_size_bytes(size) FROM
+ (VALUES ('-1'), ('-123bytes'), ('-1kb'), ('-1mb'), (' -1 Gb'), ('-1.5 gB '),
+ ('-1tb'), ('-3000 TB'), ('-10e-1 MB')) x(size);
+-- different cases with allowed points
+SELECT size, pg_size_bytes(size) FROM
+ (VALUES ('-1.'), ('-1.kb'), ('-1. kb'), ('-0. gb'),
+ ('-.1'), ('-.1kb'), ('-.1 kb'), ('-.0 gb')) x(size);
+-- invalid inputs
+SELECT pg_size_bytes('1 AB');
+SELECT pg_size_bytes('1 AB A');
+SELECT pg_size_bytes('1 AB A ');
+SELECT pg_size_bytes('9223372036854775807.9');
+SELECT pg_size_bytes('1e100');
+SELECT pg_size_bytes('1e1000000000000000000');
+SELECT pg_size_bytes('1 byte'); -- the singular "byte" is not supported
+SELECT pg_size_bytes('');
+SELECT pg_size_bytes('kb');
+SELECT pg_size_bytes('..');
+SELECT pg_size_bytes('-.');
+SELECT pg_size_bytes('-.kb');
+SELECT pg_size_bytes('-. kb');
+SELECT pg_size_bytes('.+912');
+SELECT pg_size_bytes('+912+ kB');
+SELECT pg_size_bytes('++123 kB');
diff --git a/ydb/library/yql/tests/postgresql/cases/delete.out b/ydb/library/yql/tests/postgresql/cases/delete.out
new file mode 100644
index 0000000000..e0469409bf
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/delete.out
@@ -0,0 +1,5 @@
+CREATE TABLE delete_test (
+ id SERIAL PRIMARY KEY,
+ a INT,
+ b text
+);
diff --git a/ydb/library/yql/tests/postgresql/cases/delete.sql b/ydb/library/yql/tests/postgresql/cases/delete.sql
new file mode 100644
index 0000000000..e0469409bf
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/delete.sql
@@ -0,0 +1,5 @@
+CREATE TABLE delete_test (
+ id SERIAL PRIMARY KEY,
+ a INT,
+ b text
+);
diff --git a/ydb/library/yql/tests/postgresql/cases/expressions.out b/ydb/library/yql/tests/postgresql/cases/expressions.out
new file mode 100644
index 0000000000..42a99dbaaf
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/expressions.out
@@ -0,0 +1,61 @@
+--
+-- expression evaluation tests that don't fit into a more specific file
+--
+--
+-- Tests for SQLVAlueFunction
+--
+-- current_date (always matches because of transactional behaviour)
+SELECT date(now())::text = current_date::text;
+ ?column?
+----------
+ t
+(1 row)
+
+-- current_time / localtime
+SELECT now()::timetz::text = current_time::text;
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT now()::timetz(4)::text = current_time(4)::text;
+ ?column?
+----------
+ t
+(1 row)
+
+-- current_timestamp / localtimestamp (always matches because of transactional behaviour)
+SELECT current_timestamp = NOW();
+ ?column?
+----------
+ t
+(1 row)
+
+-- precision
+SELECT length(current_timestamp::text) >= length(current_timestamp(0)::text);
+ ?column?
+----------
+ t
+(1 row)
+
+--
+-- Test parsing of a no-op cast to a type with unspecified typmod
+--
+begin;
+create table numeric_tbl (f1 numeric(18,3), f2 numeric);
+-- bpchar, lacking planner support for its length coercion function,
+-- could behave differently
+create table bpchar_tbl (f1 character(16) unique, f2 bpchar);
+rollback;
+--
+-- Tests for ScalarArrayOpExpr with a hashfn
+--
+-- create a stable function so that the tests below are not
+-- evaluated using the planner's constant folding.
+begin;
+rollback;
+-- Test with non-strict equality function.
+-- We need to create our own type for this.
+begin;
+create table inttest (a myint);
+rollback;
diff --git a/ydb/library/yql/tests/postgresql/cases/expressions.sql b/ydb/library/yql/tests/postgresql/cases/expressions.sql
new file mode 100644
index 0000000000..1bacf1999b
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/expressions.sql
@@ -0,0 +1,36 @@
+--
+-- expression evaluation tests that don't fit into a more specific file
+--
+--
+-- Tests for SQLVAlueFunction
+--
+-- current_date (always matches because of transactional behaviour)
+SELECT date(now())::text = current_date::text;
+-- current_time / localtime
+SELECT now()::timetz::text = current_time::text;
+SELECT now()::timetz(4)::text = current_time(4)::text;
+-- current_timestamp / localtimestamp (always matches because of transactional behaviour)
+SELECT current_timestamp = NOW();
+-- precision
+SELECT length(current_timestamp::text) >= length(current_timestamp(0)::text);
+--
+-- Test parsing of a no-op cast to a type with unspecified typmod
+--
+begin;
+create table numeric_tbl (f1 numeric(18,3), f2 numeric);
+-- bpchar, lacking planner support for its length coercion function,
+-- could behave differently
+create table bpchar_tbl (f1 character(16) unique, f2 bpchar);
+rollback;
+--
+-- Tests for ScalarArrayOpExpr with a hashfn
+--
+-- create a stable function so that the tests below are not
+-- evaluated using the planner's constant folding.
+begin;
+rollback;
+-- Test with non-strict equality function.
+-- We need to create our own type for this.
+begin;
+create table inttest (a myint);
+rollback;
diff --git a/ydb/library/yql/tests/postgresql/cases/float4.out b/ydb/library/yql/tests/postgresql/cases/float4.out
new file mode 100644
index 0000000000..fd954860f4
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/float4.out
@@ -0,0 +1,194 @@
+--
+-- FLOAT4
+--
+CREATE TABLE FLOAT4_TBL (f1 float4);
+INSERT INTO FLOAT4_TBL(f1) VALUES (' 0.0');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('1004.30 ');
+INSERT INTO FLOAT4_TBL(f1) VALUES (' -34.84 ');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e+20');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e-20');
+-- test for over and under flow
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70');
+ERROR: "10e70" is out of range for type real
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e70');
+ERROR: "-10e70" is out of range for type real
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e70');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-70');
+ERROR: "10e-70" is out of range for type real
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-70');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-70');
+ERROR: "-10e-70" is out of range for type real
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-70');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70'::float8);
+ERROR: value out of range: overflow
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e70'::float8);
+ERROR: value out of range: overflow
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-70'::float8);
+ERROR: value out of range: underflow
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-70'::float8);
+ERROR: value out of range: underflow
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e400');
+ERROR: "10e400" is out of range for type real
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e400');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e400');
+ERROR: "-10e400" is out of range for type real
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e400');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-400');
+ERROR: "10e-400" is out of range for type real
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-400');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-400');
+ERROR: "-10e-400" is out of range for type real
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-400');
+ ^
+-- bad input
+INSERT INTO FLOAT4_TBL(f1) VALUES ('');
+ERROR: invalid input syntax for type real: ""
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES (' ');
+ERROR: invalid input syntax for type real: " "
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES (' ');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('xyz');
+ERROR: invalid input syntax for type real: "xyz"
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('xyz');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('5.0.0');
+ERROR: invalid input syntax for type real: "5.0.0"
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('5.0.0');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('5 . 0');
+ERROR: invalid input syntax for type real: "5 . 0"
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('5 . 0');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('5. 0');
+ERROR: invalid input syntax for type real: "5. 0"
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('5. 0');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES (' - 3.0');
+ERROR: invalid input syntax for type real: " - 3.0"
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES (' - 3.0');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('123 5');
+ERROR: invalid input syntax for type real: "123 5"
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('123 5');
+ ^
+-- bad special inputs
+SELECT 'N A N'::float4;
+ERROR: invalid input syntax for type real: "N A N"
+LINE 1: SELECT 'N A N'::float4;
+ ^
+SELECT 'NaN x'::float4;
+ERROR: invalid input syntax for type real: "NaN x"
+LINE 1: SELECT 'NaN x'::float4;
+ ^
+SELECT ' INFINITY x'::float4;
+ERROR: invalid input syntax for type real: " INFINITY x"
+LINE 1: SELECT ' INFINITY x'::float4;
+ ^
+SELECT '42'::float4 / 'Infinity'::float4;
+ ?column?
+----------
+ 0
+(1 row)
+
+SELECT f.* FROM FLOAT4_TBL f WHERE f.f1 = '1004.3';
+ f1
+--------
+ 1004.3
+(1 row)
+
+SELECT f.* FROM FLOAT4_TBL f WHERE '1004.3' > f.f1;
+ f1
+---------------
+ 0
+ -34.84
+ 1.2345679e-20
+(3 rows)
+
+SELECT f.* FROM FLOAT4_TBL f WHERE f.f1 < '1004.3';
+ f1
+---------------
+ 0
+ -34.84
+ 1.2345679e-20
+(3 rows)
+
+SELECT f.* FROM FLOAT4_TBL f WHERE '1004.3' >= f.f1;
+ f1
+---------------
+ 0
+ 1004.3
+ -34.84
+ 1.2345679e-20
+(4 rows)
+
+SELECT f.* FROM FLOAT4_TBL f WHERE f.f1 <= '1004.3';
+ f1
+---------------
+ 0
+ 1004.3
+ -34.84
+ 1.2345679e-20
+(4 rows)
+
+-- test divide by zero
+SELECT f.f1 / '0.0' from FLOAT4_TBL f;
+ERROR: division by zero
+-- test edge-case coercions to integer
+SELECT '32767.4'::float4::int2;
+ int2
+-------
+ 32767
+(1 row)
+
+SELECT '32767.6'::float4::int2;
+ERROR: smallint out of range
+SELECT '-32768.4'::float4::int2;
+ int2
+--------
+ -32768
+(1 row)
+
+SELECT '-32768.6'::float4::int2;
+ERROR: smallint out of range
+SELECT '2147483520'::float4::int4;
+ int4
+------------
+ 2147483520
+(1 row)
+
+SELECT '2147483647'::float4::int4;
+ERROR: integer out of range
+SELECT '-2147483648.5'::float4::int4;
+ int4
+-------------
+ -2147483648
+(1 row)
+
+SELECT '-2147483900'::float4::int4;
+ERROR: integer out of range
+SELECT '9223369837831520256'::float4::int8;
+ int8
+---------------------
+ 9223369837831520256
+(1 row)
+
+SELECT '9223372036854775807'::float4::int8;
+ERROR: bigint out of range
+SELECT '-9223372036854775808.5'::float4::int8;
+ int8
+----------------------
+ -9223372036854775808
+(1 row)
+
+SELECT '-9223380000000000000'::float4::int8;
+ERROR: bigint out of range
diff --git a/ydb/library/yql/tests/postgresql/cases/float4.sql b/ydb/library/yql/tests/postgresql/cases/float4.sql
new file mode 100644
index 0000000000..12d1da606d
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/float4.sql
@@ -0,0 +1,56 @@
+--
+-- FLOAT4
+--
+CREATE TABLE FLOAT4_TBL (f1 float4);
+INSERT INTO FLOAT4_TBL(f1) VALUES (' 0.0');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('1004.30 ');
+INSERT INTO FLOAT4_TBL(f1) VALUES (' -34.84 ');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e+20');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e-20');
+-- test for over and under flow
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e70');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-70');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-70');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70'::float8);
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e70'::float8);
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-70'::float8);
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-70'::float8);
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e400');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e400');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-400');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-400');
+-- bad input
+INSERT INTO FLOAT4_TBL(f1) VALUES ('');
+INSERT INTO FLOAT4_TBL(f1) VALUES (' ');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('xyz');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('5.0.0');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('5 . 0');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('5. 0');
+INSERT INTO FLOAT4_TBL(f1) VALUES (' - 3.0');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('123 5');
+-- bad special inputs
+SELECT 'N A N'::float4;
+SELECT 'NaN x'::float4;
+SELECT ' INFINITY x'::float4;
+SELECT '42'::float4 / 'Infinity'::float4;
+SELECT f.* FROM FLOAT4_TBL f WHERE f.f1 = '1004.3';
+SELECT f.* FROM FLOAT4_TBL f WHERE '1004.3' > f.f1;
+SELECT f.* FROM FLOAT4_TBL f WHERE f.f1 < '1004.3';
+SELECT f.* FROM FLOAT4_TBL f WHERE '1004.3' >= f.f1;
+SELECT f.* FROM FLOAT4_TBL f WHERE f.f1 <= '1004.3';
+-- test divide by zero
+SELECT f.f1 / '0.0' from FLOAT4_TBL f;
+-- test edge-case coercions to integer
+SELECT '32767.4'::float4::int2;
+SELECT '32767.6'::float4::int2;
+SELECT '-32768.4'::float4::int2;
+SELECT '-32768.6'::float4::int2;
+SELECT '2147483520'::float4::int4;
+SELECT '2147483647'::float4::int4;
+SELECT '-2147483648.5'::float4::int4;
+SELECT '-2147483900'::float4::int4;
+SELECT '9223369837831520256'::float4::int8;
+SELECT '9223372036854775807'::float4::int8;
+SELECT '-9223372036854775808.5'::float4::int8;
+SELECT '-9223380000000000000'::float4::int8;
diff --git a/ydb/library/yql/tests/postgresql/cases/float8.out b/ydb/library/yql/tests/postgresql/cases/float8.out
new file mode 100644
index 0000000000..18127b354e
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/float8.out
@@ -0,0 +1,480 @@
+--
+-- FLOAT8
+--
+CREATE TABLE FLOAT8_TBL(f1 float8);
+INSERT INTO FLOAT8_TBL(f1) VALUES (' 0.0 ');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('1004.30 ');
+INSERT INTO FLOAT8_TBL(f1) VALUES (' -34.84');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('1.2345678901234e+200');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('1.2345678901234e-200');
+-- test for underflow and overflow handling
+SELECT '10e400'::float8;
+ERROR: "10e400" is out of range for type double precision
+LINE 1: SELECT '10e400'::float8;
+ ^
+SELECT '-10e400'::float8;
+ERROR: "-10e400" is out of range for type double precision
+LINE 1: SELECT '-10e400'::float8;
+ ^
+SELECT '10e-400'::float8;
+ERROR: "10e-400" is out of range for type double precision
+LINE 1: SELECT '10e-400'::float8;
+ ^
+SELECT '-10e-400'::float8;
+ERROR: "-10e-400" is out of range for type double precision
+LINE 1: SELECT '-10e-400'::float8;
+ ^
+-- bad input
+INSERT INTO FLOAT8_TBL(f1) VALUES ('');
+ERROR: invalid input syntax for type double precision: ""
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('');
+ ^
+INSERT INTO FLOAT8_TBL(f1) VALUES (' ');
+ERROR: invalid input syntax for type double precision: " "
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES (' ');
+ ^
+INSERT INTO FLOAT8_TBL(f1) VALUES ('xyz');
+ERROR: invalid input syntax for type double precision: "xyz"
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('xyz');
+ ^
+INSERT INTO FLOAT8_TBL(f1) VALUES ('5.0.0');
+ERROR: invalid input syntax for type double precision: "5.0.0"
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('5.0.0');
+ ^
+INSERT INTO FLOAT8_TBL(f1) VALUES ('5 . 0');
+ERROR: invalid input syntax for type double precision: "5 . 0"
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('5 . 0');
+ ^
+INSERT INTO FLOAT8_TBL(f1) VALUES ('5. 0');
+ERROR: invalid input syntax for type double precision: "5. 0"
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('5. 0');
+ ^
+INSERT INTO FLOAT8_TBL(f1) VALUES (' - 3');
+ERROR: invalid input syntax for type double precision: " - 3"
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES (' - 3');
+ ^
+INSERT INTO FLOAT8_TBL(f1) VALUES ('123 5');
+ERROR: invalid input syntax for type double precision: "123 5"
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('123 5');
+ ^
+-- bad special inputs
+SELECT 'N A N'::float8;
+ERROR: invalid input syntax for type double precision: "N A N"
+LINE 1: SELECT 'N A N'::float8;
+ ^
+SELECT 'NaN x'::float8;
+ERROR: invalid input syntax for type double precision: "NaN x"
+LINE 1: SELECT 'NaN x'::float8;
+ ^
+SELECT ' INFINITY x'::float8;
+ERROR: invalid input syntax for type double precision: " INFINITY x"
+LINE 1: SELECT ' INFINITY x'::float8;
+ ^
+SELECT '42'::float8 / 'Infinity'::float8;
+ ?column?
+----------
+ 0
+(1 row)
+
+SELECT * FROM FLOAT8_TBL;
+ f1
+----------------------
+ 0
+ 1004.3
+ -34.84
+ 1.2345678901234e+200
+ 1.2345678901234e-200
+(5 rows)
+
+SELECT f.* FROM FLOAT8_TBL f WHERE f.f1 <> '1004.3';
+ f1
+----------------------
+ 0
+ -34.84
+ 1.2345678901234e+200
+ 1.2345678901234e-200
+(4 rows)
+
+SELECT f.* FROM FLOAT8_TBL f WHERE f.f1 = '1004.3';
+ f1
+--------
+ 1004.3
+(1 row)
+
+SELECT f.* FROM FLOAT8_TBL f WHERE '1004.3' > f.f1;
+ f1
+----------------------
+ 0
+ -34.84
+ 1.2345678901234e-200
+(3 rows)
+
+SELECT f.* FROM FLOAT8_TBL f WHERE f.f1 < '1004.3';
+ f1
+----------------------
+ 0
+ -34.84
+ 1.2345678901234e-200
+(3 rows)
+
+SELECT f.* FROM FLOAT8_TBL f WHERE '1004.3' >= f.f1;
+ f1
+----------------------
+ 0
+ 1004.3
+ -34.84
+ 1.2345678901234e-200
+(4 rows)
+
+SELECT f.* FROM FLOAT8_TBL f WHERE f.f1 <= '1004.3';
+ f1
+----------------------
+ 0
+ 1004.3
+ -34.84
+ 1.2345678901234e-200
+(4 rows)
+
+SELECT f.f1, f.f1 * '-10' AS x
+ FROM FLOAT8_TBL f
+ WHERE f.f1 > '0.0';
+ f1 | x
+----------------------+-----------------------
+ 1004.3 | -10043
+ 1.2345678901234e+200 | -1.2345678901234e+201
+ 1.2345678901234e-200 | -1.2345678901234e-199
+(3 rows)
+
+SELECT f.f1, f.f1 + '-10' AS x
+ FROM FLOAT8_TBL f
+ WHERE f.f1 > '0.0';
+ f1 | x
+----------------------+----------------------
+ 1004.3 | 994.3
+ 1.2345678901234e+200 | 1.2345678901234e+200
+ 1.2345678901234e-200 | -10
+(3 rows)
+
+SELECT f.f1, f.f1 / '-10' AS x
+ FROM FLOAT8_TBL f
+ WHERE f.f1 > '0.0';
+ f1 | x
+----------------------+-----------------------
+ 1004.3 | -100.42999999999999
+ 1.2345678901234e+200 | -1.2345678901234e+199
+ 1.2345678901234e-200 | -1.2345678901234e-201
+(3 rows)
+
+SELECT f.f1, f.f1 - '-10' AS x
+ FROM FLOAT8_TBL f
+ WHERE f.f1 > '0.0';
+ f1 | x
+----------------------+----------------------
+ 1004.3 | 1014.3
+ 1.2345678901234e+200 | 1.2345678901234e+200
+ 1.2345678901234e-200 | 10
+(3 rows)
+
+SELECT f.f1 ^ '2.0' AS square_f1
+ FROM FLOAT8_TBL f where f.f1 = '1004.3';
+ square_f1
+--------------------
+ 1008618.4899999999
+(1 row)
+
+-- absolute value
+SELECT f.f1, @f.f1 AS abs_f1
+ FROM FLOAT8_TBL f;
+ f1 | abs_f1
+----------------------+----------------------
+ 0 | 0
+ 1004.3 | 1004.3
+ -34.84 | 34.84
+ 1.2345678901234e+200 | 1.2345678901234e+200
+ 1.2345678901234e-200 | 1.2345678901234e-200
+(5 rows)
+
+-- truncate
+SELECT f.f1, trunc(f.f1) AS trunc_f1
+ FROM FLOAT8_TBL f;
+ f1 | trunc_f1
+----------------------+----------------------
+ 0 | 0
+ 1004.3 | 1004
+ -34.84 | -34
+ 1.2345678901234e+200 | 1.2345678901234e+200
+ 1.2345678901234e-200 | 0
+(5 rows)
+
+-- round
+SELECT f.f1, round(f.f1) AS round_f1
+ FROM FLOAT8_TBL f;
+ f1 | round_f1
+----------------------+----------------------
+ 0 | 0
+ 1004.3 | 1004
+ -34.84 | -35
+ 1.2345678901234e+200 | 1.2345678901234e+200
+ 1.2345678901234e-200 | 0
+(5 rows)
+
+-- avoid bit-exact output here because operations may not be bit-exact.
+SET extra_float_digits = 0;
+-- square root
+SELECT sqrt(float8 '64') AS eight;
+ eight
+-------
+ 8
+(1 row)
+
+SELECT |/ float8 '64' AS eight;
+ eight
+-------
+ 8
+(1 row)
+
+-- power
+SELECT power(float8 '144', float8 '0.5');
+ power
+-------
+ 12
+(1 row)
+
+SELECT power(float8 '1', float8 'NaN');
+ power
+-------
+ 1
+(1 row)
+
+SELECT power(float8 'NaN', float8 '0');
+ power
+-------
+ 1
+(1 row)
+
+SELECT power(float8 'inf', float8 '0');
+ power
+-------
+ 1
+(1 row)
+
+SELECT power(float8 '-inf', float8 '0');
+ power
+-------
+ 1
+(1 row)
+
+SELECT power(float8 '0', float8 'inf');
+ power
+-------
+ 0
+(1 row)
+
+SELECT power(float8 '0', float8 '-inf');
+ERROR: zero raised to a negative power is undefined
+SELECT power(float8 '1', float8 'inf');
+ power
+-------
+ 1
+(1 row)
+
+SELECT power(float8 '1', float8 '-inf');
+ power
+-------
+ 1
+(1 row)
+
+SELECT power(float8 '-1', float8 'inf');
+ power
+-------
+ 1
+(1 row)
+
+SELECT power(float8 '-1', float8 '-inf');
+ power
+-------
+ 1
+(1 row)
+
+SELECT power(float8 '0.1', float8 'inf');
+ power
+-------
+ 0
+(1 row)
+
+SELECT power(float8 '-0.1', float8 'inf');
+ power
+-------
+ 0
+(1 row)
+
+SELECT power(float8 '1.1', float8 '-inf');
+ power
+-------
+ 0
+(1 row)
+
+SELECT power(float8 '-1.1', float8 '-inf');
+ power
+-------
+ 0
+(1 row)
+
+SELECT power(float8 'inf', float8 '-2');
+ power
+-------
+ 0
+(1 row)
+
+SELECT power(float8 'inf', float8 '-inf');
+ power
+-------
+ 0
+(1 row)
+
+-- Intel's icc misoptimizes the code that controls the sign of this result,
+-- even with -mp1. Pending a fix for that, only test for "is it zero".
+SELECT power(float8 '-inf', float8 '-2') = '0';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT power(float8 '-inf', float8 '-3');
+ power
+-------
+ -0
+(1 row)
+
+SELECT power(float8 '-inf', float8 '3.5');
+ERROR: a negative number raised to a non-integer power yields a complex result
+SELECT power(float8 '-inf', float8 '-inf');
+ power
+-------
+ 0
+(1 row)
+
+SELECT * FROM FLOAT8_TBL;
+ f1
+----------------------
+ 0
+ 1004.3
+ -34.84
+ 1.2345678901234e+200
+ 1.2345678901234e-200
+(5 rows)
+
+SELECT f.f1 * '1e200' from FLOAT8_TBL f;
+ERROR: value out of range: overflow
+SELECT f.f1 ^ '1e200' from FLOAT8_TBL f;
+ERROR: value out of range: overflow
+SELECT 0 ^ 0 + 0 ^ 1 + 0 ^ 0.0 + 0 ^ 0.5;
+ ?column?
+----------
+ 2
+(1 row)
+
+SELECT ln(f.f1) from FLOAT8_TBL f where f.f1 = '0.0' ;
+ERROR: cannot take logarithm of zero
+SELECT ln(f.f1) from FLOAT8_TBL f where f.f1 < '0.0' ;
+ERROR: cannot take logarithm of a negative number
+SELECT f.f1 / '0.0' from FLOAT8_TBL f;
+ERROR: division by zero
+SELECT asinh(float8 '1');
+ asinh
+-------------------
+ 0.881373587019543
+(1 row)
+
+SELECT tanh(float8 'infinity');
+ tanh
+------
+ 1
+(1 row)
+
+SELECT tanh(float8 '-infinity');
+ tanh
+------
+ -1
+(1 row)
+
+-- acosh(Inf) should be Inf, but some mingw versions produce NaN, so skip test
+-- SELECT acosh(float8 'infinity');
+SELECT acosh(float8 '-infinity');
+ERROR: input is out of range
+SELECT atanh(float8 'infinity');
+ERROR: input is out of range
+SELECT atanh(float8 '-infinity');
+ERROR: input is out of range
+RESET extra_float_digits;
+-- test for over- and underflow
+INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400');
+ERROR: "10e400" is out of range for type double precision
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400');
+ ^
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e400');
+ERROR: "-10e400" is out of range for type double precision
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e400');
+ ^
+INSERT INTO FLOAT8_TBL(f1) VALUES ('10e-400');
+ERROR: "10e-400" is out of range for type double precision
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('10e-400');
+ ^
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e-400');
+ERROR: "-10e-400" is out of range for type double precision
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e-400');
+ ^
+INSERT INTO FLOAT8_TBL(f1) VALUES ('0.0');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-34.84');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-1004.30');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e+200');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e-200');
+-- test edge-case coercions to integer
+SELECT '32767.4'::float8::int2;
+ int2
+-------
+ 32767
+(1 row)
+
+SELECT '32767.6'::float8::int2;
+ERROR: smallint out of range
+SELECT '-32768.4'::float8::int2;
+ int2
+--------
+ -32768
+(1 row)
+
+SELECT '-32768.6'::float8::int2;
+ERROR: smallint out of range
+SELECT '2147483647.4'::float8::int4;
+ int4
+------------
+ 2147483647
+(1 row)
+
+SELECT '2147483647.6'::float8::int4;
+ERROR: integer out of range
+SELECT '-2147483648.4'::float8::int4;
+ int4
+-------------
+ -2147483648
+(1 row)
+
+SELECT '-2147483648.6'::float8::int4;
+ERROR: integer out of range
+SELECT '9223372036854773760'::float8::int8;
+ int8
+---------------------
+ 9223372036854773760
+(1 row)
+
+SELECT '9223372036854775807'::float8::int8;
+ERROR: bigint out of range
+SELECT '-9223372036854775808.5'::float8::int8;
+ int8
+----------------------
+ -9223372036854775808
+(1 row)
+
+SELECT '-9223372036854780000'::float8::int8;
+ERROR: bigint out of range
diff --git a/ydb/library/yql/tests/postgresql/cases/float8.sql b/ydb/library/yql/tests/postgresql/cases/float8.sql
new file mode 100644
index 0000000000..334a80f800
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/float8.sql
@@ -0,0 +1,126 @@
+--
+-- FLOAT8
+--
+CREATE TABLE FLOAT8_TBL(f1 float8);
+INSERT INTO FLOAT8_TBL(f1) VALUES (' 0.0 ');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('1004.30 ');
+INSERT INTO FLOAT8_TBL(f1) VALUES (' -34.84');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('1.2345678901234e+200');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('1.2345678901234e-200');
+-- test for underflow and overflow handling
+SELECT '10e400'::float8;
+SELECT '-10e400'::float8;
+SELECT '10e-400'::float8;
+SELECT '-10e-400'::float8;
+-- bad input
+INSERT INTO FLOAT8_TBL(f1) VALUES ('');
+INSERT INTO FLOAT8_TBL(f1) VALUES (' ');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('xyz');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('5.0.0');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('5 . 0');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('5. 0');
+INSERT INTO FLOAT8_TBL(f1) VALUES (' - 3');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('123 5');
+-- bad special inputs
+SELECT 'N A N'::float8;
+SELECT 'NaN x'::float8;
+SELECT ' INFINITY x'::float8;
+SELECT '42'::float8 / 'Infinity'::float8;
+SELECT * FROM FLOAT8_TBL;
+SELECT f.* FROM FLOAT8_TBL f WHERE f.f1 <> '1004.3';
+SELECT f.* FROM FLOAT8_TBL f WHERE f.f1 = '1004.3';
+SELECT f.* FROM FLOAT8_TBL f WHERE '1004.3' > f.f1;
+SELECT f.* FROM FLOAT8_TBL f WHERE f.f1 < '1004.3';
+SELECT f.* FROM FLOAT8_TBL f WHERE '1004.3' >= f.f1;
+SELECT f.* FROM FLOAT8_TBL f WHERE f.f1 <= '1004.3';
+SELECT f.f1, f.f1 * '-10' AS x
+ FROM FLOAT8_TBL f
+ WHERE f.f1 > '0.0';
+SELECT f.f1, f.f1 + '-10' AS x
+ FROM FLOAT8_TBL f
+ WHERE f.f1 > '0.0';
+SELECT f.f1, f.f1 / '-10' AS x
+ FROM FLOAT8_TBL f
+ WHERE f.f1 > '0.0';
+SELECT f.f1, f.f1 - '-10' AS x
+ FROM FLOAT8_TBL f
+ WHERE f.f1 > '0.0';
+SELECT f.f1 ^ '2.0' AS square_f1
+ FROM FLOAT8_TBL f where f.f1 = '1004.3';
+-- absolute value
+SELECT f.f1, @f.f1 AS abs_f1
+ FROM FLOAT8_TBL f;
+-- truncate
+SELECT f.f1, trunc(f.f1) AS trunc_f1
+ FROM FLOAT8_TBL f;
+-- round
+SELECT f.f1, round(f.f1) AS round_f1
+ FROM FLOAT8_TBL f;
+-- avoid bit-exact output here because operations may not be bit-exact.
+SET extra_float_digits = 0;
+-- square root
+SELECT sqrt(float8 '64') AS eight;
+SELECT |/ float8 '64' AS eight;
+-- power
+SELECT power(float8 '144', float8 '0.5');
+SELECT power(float8 '1', float8 'NaN');
+SELECT power(float8 'NaN', float8 '0');
+SELECT power(float8 'inf', float8 '0');
+SELECT power(float8 '-inf', float8 '0');
+SELECT power(float8 '0', float8 'inf');
+SELECT power(float8 '0', float8 '-inf');
+SELECT power(float8 '1', float8 'inf');
+SELECT power(float8 '1', float8 '-inf');
+SELECT power(float8 '-1', float8 'inf');
+SELECT power(float8 '-1', float8 '-inf');
+SELECT power(float8 '0.1', float8 'inf');
+SELECT power(float8 '-0.1', float8 'inf');
+SELECT power(float8 '1.1', float8 '-inf');
+SELECT power(float8 '-1.1', float8 '-inf');
+SELECT power(float8 'inf', float8 '-2');
+SELECT power(float8 'inf', float8 '-inf');
+-- Intel's icc misoptimizes the code that controls the sign of this result,
+-- even with -mp1. Pending a fix for that, only test for "is it zero".
+SELECT power(float8 '-inf', float8 '-2') = '0';
+SELECT power(float8 '-inf', float8 '-3');
+SELECT power(float8 '-inf', float8 '3.5');
+SELECT power(float8 '-inf', float8 '-inf');
+SELECT * FROM FLOAT8_TBL;
+SELECT f.f1 * '1e200' from FLOAT8_TBL f;
+SELECT f.f1 ^ '1e200' from FLOAT8_TBL f;
+SELECT 0 ^ 0 + 0 ^ 1 + 0 ^ 0.0 + 0 ^ 0.5;
+SELECT ln(f.f1) from FLOAT8_TBL f where f.f1 = '0.0' ;
+SELECT ln(f.f1) from FLOAT8_TBL f where f.f1 < '0.0' ;
+SELECT f.f1 / '0.0' from FLOAT8_TBL f;
+SELECT asinh(float8 '1');
+SELECT tanh(float8 'infinity');
+SELECT tanh(float8 '-infinity');
+-- acosh(Inf) should be Inf, but some mingw versions produce NaN, so skip test
+-- SELECT acosh(float8 'infinity');
+SELECT acosh(float8 '-infinity');
+SELECT atanh(float8 'infinity');
+SELECT atanh(float8 '-infinity');
+RESET extra_float_digits;
+-- test for over- and underflow
+INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e400');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('10e-400');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e-400');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('0.0');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-34.84');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-1004.30');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e+200');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e-200');
+-- test edge-case coercions to integer
+SELECT '32767.4'::float8::int2;
+SELECT '32767.6'::float8::int2;
+SELECT '-32768.4'::float8::int2;
+SELECT '-32768.6'::float8::int2;
+SELECT '2147483647.4'::float8::int4;
+SELECT '2147483647.6'::float8::int4;
+SELECT '-2147483648.4'::float8::int4;
+SELECT '-2147483648.6'::float8::int4;
+SELECT '9223372036854773760'::float8::int8;
+SELECT '9223372036854775807'::float8::int8;
+SELECT '-9223372036854775808.5'::float8::int8;
+SELECT '-9223372036854780000'::float8::int8;
diff --git a/ydb/library/yql/tests/postgresql/cases/functional_deps.out b/ydb/library/yql/tests/postgresql/cases/functional_deps.out
new file mode 100644
index 0000000000..bea079082e
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/functional_deps.out
@@ -0,0 +1,17 @@
+-- from http://www.depesz.com/index.php/2010/04/19/getting-unique-elements/
+CREATE TEMP TABLE articles (
+ id int CONSTRAINT articles_pkey PRIMARY KEY,
+ keywords text,
+ title text UNIQUE NOT NULL,
+ body text UNIQUE,
+ created date
+);
+CREATE TEMP TABLE articles_in_category (
+ article_id int,
+ category_id int,
+ changed date,
+ PRIMARY KEY (article_id, category_id)
+);
+-- example from documentation
+CREATE TEMP TABLE products (product_id int, name text, price numeric);
+CREATE TEMP TABLE sales (product_id int, units int);
diff --git a/ydb/library/yql/tests/postgresql/cases/functional_deps.sql b/ydb/library/yql/tests/postgresql/cases/functional_deps.sql
new file mode 100644
index 0000000000..bea079082e
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/functional_deps.sql
@@ -0,0 +1,17 @@
+-- from http://www.depesz.com/index.php/2010/04/19/getting-unique-elements/
+CREATE TEMP TABLE articles (
+ id int CONSTRAINT articles_pkey PRIMARY KEY,
+ keywords text,
+ title text UNIQUE NOT NULL,
+ body text UNIQUE,
+ created date
+);
+CREATE TEMP TABLE articles_in_category (
+ article_id int,
+ category_id int,
+ changed date,
+ PRIMARY KEY (article_id, category_id)
+);
+-- example from documentation
+CREATE TEMP TABLE products (product_id int, name text, price numeric);
+CREATE TEMP TABLE sales (product_id int, units int);
diff --git a/ydb/library/yql/tests/postgresql/cases/horology.out b/ydb/library/yql/tests/postgresql/cases/horology.out
new file mode 100644
index 0000000000..a67dd635b5
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/horology.out
@@ -0,0 +1,400 @@
+-- should fail in mdy mode:
+SELECT timestamp with time zone '27/12/2001 04:05:06.789-08';
+ERROR: date/time field value out of range: "27/12/2001 04:05:06.789-08"
+LINE 1: SELECT timestamp with time zone '27/12/2001 04:05:06.789-08'...
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+-- As of 7.4, allow time without time zone having a time zone specified
+SELECT time without time zone '040506.789+08';
+ time
+--------------
+ 04:05:06.789
+(1 row)
+
+SELECT time without time zone '040506.789-08';
+ time
+--------------
+ 04:05:06.789
+(1 row)
+
+SELECT time without time zone 'T040506.789+08';
+ time
+--------------
+ 04:05:06.789
+(1 row)
+
+SELECT time without time zone 'T040506.789-08';
+ time
+--------------
+ 04:05:06.789
+(1 row)
+
+SELECT time with time zone '040506.789+08';
+ timetz
+-----------------
+ 04:05:06.789+08
+(1 row)
+
+SELECT time with time zone '040506.789-08';
+ timetz
+-----------------
+ 04:05:06.789-08
+(1 row)
+
+SELECT time with time zone 'T040506.789+08';
+ timetz
+-----------------
+ 04:05:06.789+08
+(1 row)
+
+SELECT time with time zone 'T040506.789-08';
+ timetz
+-----------------
+ 04:05:06.789-08
+(1 row)
+
+SELECT time with time zone 'T040506.789 +08';
+ timetz
+-----------------
+ 04:05:06.789+08
+(1 row)
+
+SELECT time with time zone 'T040506.789 -08';
+ timetz
+-----------------
+ 04:05:06.789-08
+(1 row)
+
+-- Shorthand values
+-- Not directly usable for regression testing since these are not constants.
+-- So, just try to test parser and hope for the best - thomas 97/04/26
+SELECT (timestamp without time zone 'today' = (timestamp without time zone 'yesterday' + interval '1 day')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp without time zone 'today' = (timestamp without time zone 'tomorrow' - interval '1 day')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp without time zone 'today 10:30' = (timestamp without time zone 'yesterday' + interval '1 day 10 hr 30 min')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp without time zone '10:30 today' = (timestamp without time zone 'yesterday' + interval '1 day 10 hr 30 min')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp without time zone 'tomorrow' = (timestamp without time zone 'yesterday' + interval '2 days')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp without time zone 'tomorrow 16:00:00' = (timestamp without time zone 'today' + interval '1 day 16 hours')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp without time zone '16:00:00 tomorrow' = (timestamp without time zone 'today' + interval '1 day 16 hours')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp without time zone 'yesterday 12:34:56' = (timestamp without time zone 'tomorrow' - interval '2 days - 12:34:56')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp without time zone '12:34:56 yesterday' = (timestamp without time zone 'tomorrow' - interval '2 days - 12:34:56')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp without time zone 'tomorrow' > 'now') as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp with time zone 'today' = (timestamp with time zone 'yesterday' + interval '1 day')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp with time zone 'today' = (timestamp with time zone 'tomorrow' - interval '1 day')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp with time zone 'tomorrow' = (timestamp with time zone 'yesterday' + interval '2 days')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp with time zone 'tomorrow' > 'now') as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT CAST(interval '02:03' AS time) AS "02:03:00";
+ 02:03:00
+----------
+ 02:03:00
+(1 row)
+
+SELECT time '01:30' + interval '02:01' AS "03:31:00";
+ 03:31:00
+----------
+ 03:31:00
+(1 row)
+
+SELECT time '01:30' - interval '02:01' AS "23:29:00";
+ 23:29:00
+----------
+ 23:29:00
+(1 row)
+
+SELECT time '02:30' + interval '36:01' AS "14:31:00";
+ 14:31:00
+----------
+ 14:31:00
+(1 row)
+
+SELECT time '03:30' + interval '1 month 04:01' AS "07:31:00";
+ 07:31:00
+----------
+ 07:31:00
+(1 row)
+
+SELECT time with time zone '01:30-08' - interval '02:01' AS "23:29:00-08";
+ 23:29:00-08
+-------------
+ 23:29:00-08
+(1 row)
+
+SELECT time with time zone '02:30-08' + interval '36:01' AS "14:31:00-08";
+ 14:31:00-08
+-------------
+ 14:31:00-08
+(1 row)
+
+-- These two tests cannot be used because they default to current timezone,
+-- which may be either -08 or -07 depending on the time of year.
+-- SELECT time with time zone '01:30' + interval '02:01' AS "03:31:00-08";
+-- SELECT time with time zone '03:30' + interval '1 month 04:01' AS "07:31:00-08";
+-- Try the following two tests instead, as a poor substitute
+SELECT CAST(CAST(date 'today' + time with time zone '05:30'
+ + interval '02:01' AS time with time zone) AS time) AS "07:31:00";
+ 07:31:00
+----------
+ 07:31:00
+(1 row)
+
+SELECT CAST(cast(date 'today' + time with time zone '03:30'
+ + interval '1 month 04:01' as timestamp without time zone) AS time) AS "07:31:00";
+ 07:31:00
+----------
+ 07:31:00
+(1 row)
+
+-- SQL9x OVERLAPS operator
+-- test with time zone
+SELECT (timestamp with time zone '2000-11-27', timestamp with time zone '2000-11-28')
+ OVERLAPS (timestamp with time zone '2000-11-27 12:00', timestamp with time zone '2000-11-30') AS "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp with time zone '2000-11-26', timestamp with time zone '2000-11-27')
+ OVERLAPS (timestamp with time zone '2000-11-27 12:00', timestamp with time zone '2000-11-30') AS "False";
+ False
+-------
+ f
+(1 row)
+
+-- test without time zone
+SELECT (timestamp without time zone '2000-11-27', timestamp without time zone '2000-11-28')
+ OVERLAPS (timestamp without time zone '2000-11-27 12:00', timestamp without time zone '2000-11-30') AS "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp without time zone '2000-11-26', timestamp without time zone '2000-11-27')
+ OVERLAPS (timestamp without time zone '2000-11-27 12:00', timestamp without time zone '2000-11-30') AS "False";
+ False
+-------
+ f
+(1 row)
+
+-- test time and interval
+SELECT (time '00:00', time '01:00')
+ OVERLAPS (time '00:30', time '01:30') AS "True";
+ True
+------
+ t
+(1 row)
+
+CREATE TABLE TEMP_TIMESTAMP (f1 timestamp with time zone);
+DROP TABLE TEMP_TIMESTAMP;
+--
+-- Comparisons between datetime types, especially overflow cases
+---
+SELECT '2202020-10-05'::date::timestamp; -- fail
+ERROR: date out of range for timestamp
+SELECT '2202020-10-05'::date > '2020-10-05'::timestamp as t;
+ t
+---
+ t
+(1 row)
+
+SELECT '2020-10-05'::timestamp > '2202020-10-05'::date as f;
+ f
+---
+ f
+(1 row)
+
+SELECT '2202020-10-05'::date::timestamptz; -- fail
+ERROR: date out of range for timestamp
+SELECT '2202020-10-05'::date > '2020-10-05'::timestamptz as t;
+ t
+---
+ t
+(1 row)
+
+SELECT '2020-10-05'::timestamptz > '2202020-10-05'::date as f;
+ f
+---
+ f
+(1 row)
+
+SELECT '4714-11-24 BC'::date < '2020-10-05'::timestamptz as t;
+ t
+---
+ t
+(1 row)
+
+SELECT '2020-10-05'::timestamptz >= '4714-11-24 BC'::date as t;
+ t
+---
+ t
+(1 row)
+
+SELECT '4714-11-24 BC'::timestamp < '2020-10-05'::timestamptz as t;
+ t
+---
+ t
+(1 row)
+
+SELECT '2020-10-05'::timestamptz >= '4714-11-24 BC'::timestamp as t;
+ t
+---
+ t
+(1 row)
+
+SELECT to_timestamp('97/Feb/16', 'YYMonDD');
+ERROR: invalid value "/Feb/16" for "Mon"
+DETAIL: The given value did not match any of the allowed values for this field.
+SELECT to_timestamp('2011-12-18 11:38 PST', 'YYYY-MM-DD HH12:MI TZ'); -- NYI
+ERROR: formatting field "TZ" is only supported in to_char
+SELECT to_timestamp('2000 + + JUN', 'YYYY MON');
+ERROR: invalid value "+" for "MON"
+DETAIL: The given value did not match any of the allowed values for this field.
+SELECT to_date('2011 x12 x18', 'YYYYxMMxDD');
+ERROR: invalid value "x1" for "MM"
+DETAIL: Value must be an integer.
+--
+-- Check errors for some incorrect usages of to_timestamp() and to_date()
+--
+-- Mixture of date conventions (ISO week and Gregorian):
+SELECT to_timestamp('2005527', 'YYYYIWID');
+ERROR: invalid combination of date conventions
+HINT: Do not mix Gregorian and ISO week date conventions in a formatting template.
+-- Insufficient characters in the source string:
+SELECT to_timestamp('19971', 'YYYYMMDD');
+ERROR: source string too short for "MM" formatting field
+DETAIL: Field requires 2 characters, but only 1 remain.
+HINT: If your source string is not fixed-width, try using the "FM" modifier.
+-- Insufficient digit characters for a single node:
+SELECT to_timestamp('19971)24', 'YYYYMMDD');
+ERROR: invalid value "1)" for "MM"
+DETAIL: Field requires 2 characters, but only 1 could be parsed.
+HINT: If your source string is not fixed-width, try using the "FM" modifier.
+-- We don't accept full-length day or month names if short form is specified:
+SELECT to_timestamp('Friday 1-January-1999', 'DY DD MON YYYY');
+ERROR: invalid value "da" for "DD"
+DETAIL: Value must be an integer.
+SELECT to_timestamp('Fri 1-January-1999', 'DY DD MON YYYY');
+ERROR: invalid value "uary" for "YYYY"
+DETAIL: Value must be an integer.
+-- Value clobbering:
+SELECT to_timestamp('1997-11-Jan-16', 'YYYY-MM-Mon-DD');
+ERROR: conflicting values for "Mon" field in formatting string
+DETAIL: This value contradicts a previous setting for the same field type.
+-- Non-numeric input:
+SELECT to_timestamp('199711xy', 'YYYYMMDD');
+ERROR: invalid value "xy" for "DD"
+DETAIL: Value must be an integer.
+-- Input that doesn't fit in an int:
+SELECT to_timestamp('10000000000', 'FMYYYY');
+ERROR: value for "YYYY" in source string is out of range
+DETAIL: Value must be in the range -2147483648 to 2147483647.
+-- Out-of-range and not-quite-out-of-range fields:
+SELECT to_timestamp('2016-06-13 25:00:00', 'YYYY-MM-DD HH24:MI:SS');
+ERROR: date/time field value out of range: "2016-06-13 25:00:00"
+SELECT to_timestamp('2016-06-13 15:60:00', 'YYYY-MM-DD HH24:MI:SS');
+ERROR: date/time field value out of range: "2016-06-13 15:60:00"
+SELECT to_timestamp('2016-06-13 15:50:60', 'YYYY-MM-DD HH24:MI:SS');
+ERROR: date/time field value out of range: "2016-06-13 15:50:60"
+SELECT to_timestamp('2016-06-13 15:50:55', 'YYYY-MM-DD HH:MI:SS');
+ERROR: hour "15" is invalid for the 12-hour clock
+HINT: Use the 24-hour clock, or give an hour between 1 and 12.
+SELECT to_timestamp('2016-13-01 15:50:55', 'YYYY-MM-DD HH24:MI:SS');
+ERROR: date/time field value out of range: "2016-13-01 15:50:55"
+SELECT to_timestamp('2016-02-30 15:50:55', 'YYYY-MM-DD HH24:MI:SS');
+ERROR: date/time field value out of range: "2016-02-30 15:50:55"
+SELECT to_timestamp('2015-02-29 15:50:55', 'YYYY-MM-DD HH24:MI:SS');
+ERROR: date/time field value out of range: "2015-02-29 15:50:55"
+SELECT to_timestamp('2015-02-11 86400', 'YYYY-MM-DD SSSS');
+ERROR: date/time field value out of range: "2015-02-11 86400"
+SELECT to_timestamp('2015-02-11 86400', 'YYYY-MM-DD SSSSS');
+ERROR: date/time field value out of range: "2015-02-11 86400"
+SELECT to_date('2016-13-10', 'YYYY-MM-DD');
+ERROR: date/time field value out of range: "2016-13-10"
+SELECT to_date('2016-02-30', 'YYYY-MM-DD');
+ERROR: date/time field value out of range: "2016-02-30"
+SELECT to_date('2015-02-29', 'YYYY-MM-DD');
+ERROR: date/time field value out of range: "2015-02-29"
+SELECT to_date('2015 366', 'YYYY DDD');
+ERROR: date/time field value out of range: "2015 366"
+SELECT to_date('2016 367', 'YYYY DDD');
+ERROR: date/time field value out of range: "2016 367"
+SELECT to_char('2012-12-12 12:00'::timestamptz, 'YYYY-MM-DD SSSS');
+ to_char
+------------------
+ 2012-12-12 43200
+(1 row)
+
+SELECT to_char('2012-12-12 12:00'::timestamptz, 'YYYY-MM-DD SSSSS');
+ to_char
+------------------
+ 2012-12-12 43200
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/cases/horology.sql b/ydb/library/yql/tests/postgresql/cases/horology.sql
new file mode 100644
index 0000000000..20f516ef80
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/horology.sql
@@ -0,0 +1,114 @@
+-- should fail in mdy mode:
+SELECT timestamp with time zone '27/12/2001 04:05:06.789-08';
+-- As of 7.4, allow time without time zone having a time zone specified
+SELECT time without time zone '040506.789+08';
+SELECT time without time zone '040506.789-08';
+SELECT time without time zone 'T040506.789+08';
+SELECT time without time zone 'T040506.789-08';
+SELECT time with time zone '040506.789+08';
+SELECT time with time zone '040506.789-08';
+SELECT time with time zone 'T040506.789+08';
+SELECT time with time zone 'T040506.789-08';
+SELECT time with time zone 'T040506.789 +08';
+SELECT time with time zone 'T040506.789 -08';
+-- Shorthand values
+-- Not directly usable for regression testing since these are not constants.
+-- So, just try to test parser and hope for the best - thomas 97/04/26
+SELECT (timestamp without time zone 'today' = (timestamp without time zone 'yesterday' + interval '1 day')) as "True";
+SELECT (timestamp without time zone 'today' = (timestamp without time zone 'tomorrow' - interval '1 day')) as "True";
+SELECT (timestamp without time zone 'today 10:30' = (timestamp without time zone 'yesterday' + interval '1 day 10 hr 30 min')) as "True";
+SELECT (timestamp without time zone '10:30 today' = (timestamp without time zone 'yesterday' + interval '1 day 10 hr 30 min')) as "True";
+SELECT (timestamp without time zone 'tomorrow' = (timestamp without time zone 'yesterday' + interval '2 days')) as "True";
+SELECT (timestamp without time zone 'tomorrow 16:00:00' = (timestamp without time zone 'today' + interval '1 day 16 hours')) as "True";
+SELECT (timestamp without time zone '16:00:00 tomorrow' = (timestamp without time zone 'today' + interval '1 day 16 hours')) as "True";
+SELECT (timestamp without time zone 'yesterday 12:34:56' = (timestamp without time zone 'tomorrow' - interval '2 days - 12:34:56')) as "True";
+SELECT (timestamp without time zone '12:34:56 yesterday' = (timestamp without time zone 'tomorrow' - interval '2 days - 12:34:56')) as "True";
+SELECT (timestamp without time zone 'tomorrow' > 'now') as "True";
+SELECT (timestamp with time zone 'today' = (timestamp with time zone 'yesterday' + interval '1 day')) as "True";
+SELECT (timestamp with time zone 'today' = (timestamp with time zone 'tomorrow' - interval '1 day')) as "True";
+SELECT (timestamp with time zone 'tomorrow' = (timestamp with time zone 'yesterday' + interval '2 days')) as "True";
+SELECT (timestamp with time zone 'tomorrow' > 'now') as "True";
+SELECT CAST(interval '02:03' AS time) AS "02:03:00";
+SELECT time '01:30' + interval '02:01' AS "03:31:00";
+SELECT time '01:30' - interval '02:01' AS "23:29:00";
+SELECT time '02:30' + interval '36:01' AS "14:31:00";
+SELECT time '03:30' + interval '1 month 04:01' AS "07:31:00";
+SELECT time with time zone '01:30-08' - interval '02:01' AS "23:29:00-08";
+SELECT time with time zone '02:30-08' + interval '36:01' AS "14:31:00-08";
+-- These two tests cannot be used because they default to current timezone,
+-- which may be either -08 or -07 depending on the time of year.
+-- SELECT time with time zone '01:30' + interval '02:01' AS "03:31:00-08";
+-- SELECT time with time zone '03:30' + interval '1 month 04:01' AS "07:31:00-08";
+-- Try the following two tests instead, as a poor substitute
+SELECT CAST(CAST(date 'today' + time with time zone '05:30'
+ + interval '02:01' AS time with time zone) AS time) AS "07:31:00";
+SELECT CAST(cast(date 'today' + time with time zone '03:30'
+ + interval '1 month 04:01' as timestamp without time zone) AS time) AS "07:31:00";
+-- SQL9x OVERLAPS operator
+-- test with time zone
+SELECT (timestamp with time zone '2000-11-27', timestamp with time zone '2000-11-28')
+ OVERLAPS (timestamp with time zone '2000-11-27 12:00', timestamp with time zone '2000-11-30') AS "True";
+SELECT (timestamp with time zone '2000-11-26', timestamp with time zone '2000-11-27')
+ OVERLAPS (timestamp with time zone '2000-11-27 12:00', timestamp with time zone '2000-11-30') AS "False";
+-- test without time zone
+SELECT (timestamp without time zone '2000-11-27', timestamp without time zone '2000-11-28')
+ OVERLAPS (timestamp without time zone '2000-11-27 12:00', timestamp without time zone '2000-11-30') AS "True";
+SELECT (timestamp without time zone '2000-11-26', timestamp without time zone '2000-11-27')
+ OVERLAPS (timestamp without time zone '2000-11-27 12:00', timestamp without time zone '2000-11-30') AS "False";
+-- test time and interval
+SELECT (time '00:00', time '01:00')
+ OVERLAPS (time '00:30', time '01:30') AS "True";
+CREATE TABLE TEMP_TIMESTAMP (f1 timestamp with time zone);
+DROP TABLE TEMP_TIMESTAMP;
+--
+-- Comparisons between datetime types, especially overflow cases
+---
+SELECT '2202020-10-05'::date::timestamp; -- fail
+SELECT '2202020-10-05'::date > '2020-10-05'::timestamp as t;
+SELECT '2020-10-05'::timestamp > '2202020-10-05'::date as f;
+SELECT '2202020-10-05'::date::timestamptz; -- fail
+SELECT '2202020-10-05'::date > '2020-10-05'::timestamptz as t;
+SELECT '2020-10-05'::timestamptz > '2202020-10-05'::date as f;
+SELECT '4714-11-24 BC'::date < '2020-10-05'::timestamptz as t;
+SELECT '2020-10-05'::timestamptz >= '4714-11-24 BC'::date as t;
+SELECT '4714-11-24 BC'::timestamp < '2020-10-05'::timestamptz as t;
+SELECT '2020-10-05'::timestamptz >= '4714-11-24 BC'::timestamp as t;
+SELECT to_timestamp('97/Feb/16', 'YYMonDD');
+SELECT to_timestamp('2011-12-18 11:38 PST', 'YYYY-MM-DD HH12:MI TZ'); -- NYI
+SELECT to_timestamp('2000 + + JUN', 'YYYY MON');
+SELECT to_date('2011 x12 x18', 'YYYYxMMxDD');
+--
+-- Check errors for some incorrect usages of to_timestamp() and to_date()
+--
+-- Mixture of date conventions (ISO week and Gregorian):
+SELECT to_timestamp('2005527', 'YYYYIWID');
+-- Insufficient characters in the source string:
+SELECT to_timestamp('19971', 'YYYYMMDD');
+-- Insufficient digit characters for a single node:
+SELECT to_timestamp('19971)24', 'YYYYMMDD');
+-- We don't accept full-length day or month names if short form is specified:
+SELECT to_timestamp('Friday 1-January-1999', 'DY DD MON YYYY');
+SELECT to_timestamp('Fri 1-January-1999', 'DY DD MON YYYY');
+-- Value clobbering:
+SELECT to_timestamp('1997-11-Jan-16', 'YYYY-MM-Mon-DD');
+-- Non-numeric input:
+SELECT to_timestamp('199711xy', 'YYYYMMDD');
+-- Input that doesn't fit in an int:
+SELECT to_timestamp('10000000000', 'FMYYYY');
+-- Out-of-range and not-quite-out-of-range fields:
+SELECT to_timestamp('2016-06-13 25:00:00', 'YYYY-MM-DD HH24:MI:SS');
+SELECT to_timestamp('2016-06-13 15:60:00', 'YYYY-MM-DD HH24:MI:SS');
+SELECT to_timestamp('2016-06-13 15:50:60', 'YYYY-MM-DD HH24:MI:SS');
+SELECT to_timestamp('2016-06-13 15:50:55', 'YYYY-MM-DD HH:MI:SS');
+SELECT to_timestamp('2016-13-01 15:50:55', 'YYYY-MM-DD HH24:MI:SS');
+SELECT to_timestamp('2016-02-30 15:50:55', 'YYYY-MM-DD HH24:MI:SS');
+SELECT to_timestamp('2015-02-29 15:50:55', 'YYYY-MM-DD HH24:MI:SS');
+SELECT to_timestamp('2015-02-11 86400', 'YYYY-MM-DD SSSS');
+SELECT to_timestamp('2015-02-11 86400', 'YYYY-MM-DD SSSSS');
+SELECT to_date('2016-13-10', 'YYYY-MM-DD');
+SELECT to_date('2016-02-30', 'YYYY-MM-DD');
+SELECT to_date('2015-02-29', 'YYYY-MM-DD');
+SELECT to_date('2015 366', 'YYYY DDD');
+SELECT to_date('2016 367', 'YYYY DDD');
+SELECT to_char('2012-12-12 12:00'::timestamptz, 'YYYY-MM-DD SSSS');
+SELECT to_char('2012-12-12 12:00'::timestamptz, 'YYYY-MM-DD SSSSS');
diff --git a/ydb/library/yql/tests/postgresql/cases/insert.out b/ydb/library/yql/tests/postgresql/cases/insert.out
new file mode 100644
index 0000000000..5159fb1def
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/insert.out
@@ -0,0 +1,12 @@
+create table inserttest (f1 int, f2 int[],
+ f3 insert_test_type, f4 insert_test_type[]);
+-- also check reverse-listing
+create table inserttest2 (f1 bigint, f2 text);
+drop table inserttest2;
+drop table inserttest;
+create table mlparted2 (b int not null, a int not null);
+create table mlparted5a (a int not null, c text, b int not null);
+create table mlparted5_b (d int, b int, c text, a int);
+create table donothingbrtrig_test1 (b text, a int);
+create table donothingbrtrig_test2 (c text, b text, a int);
+create table returningwrtest2 (b text, c int, a int);
diff --git a/ydb/library/yql/tests/postgresql/cases/insert.sql b/ydb/library/yql/tests/postgresql/cases/insert.sql
new file mode 100644
index 0000000000..5159fb1def
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/insert.sql
@@ -0,0 +1,12 @@
+create table inserttest (f1 int, f2 int[],
+ f3 insert_test_type, f4 insert_test_type[]);
+-- also check reverse-listing
+create table inserttest2 (f1 bigint, f2 text);
+drop table inserttest2;
+drop table inserttest;
+create table mlparted2 (b int not null, a int not null);
+create table mlparted5a (a int not null, c text, b int not null);
+create table mlparted5_b (d int, b int, c text, a int);
+create table donothingbrtrig_test1 (b text, a int);
+create table donothingbrtrig_test2 (c text, b text, a int);
+create table returningwrtest2 (b text, c int, a int);
diff --git a/ydb/library/yql/tests/postgresql/cases/int2.out b/ydb/library/yql/tests/postgresql/cases/int2.out
new file mode 100644
index 0000000000..55ea7202cd
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/int2.out
@@ -0,0 +1,308 @@
+--
+-- INT2
+--
+CREATE TABLE INT2_TBL(f1 int2);
+INSERT INTO INT2_TBL(f1) VALUES ('0 ');
+INSERT INTO INT2_TBL(f1) VALUES (' 1234 ');
+INSERT INTO INT2_TBL(f1) VALUES (' -1234');
+INSERT INTO INT2_TBL(f1) VALUES ('34.5');
+ERROR: invalid input syntax for type smallint: "34.5"
+LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('34.5');
+ ^
+-- largest and smallest values
+INSERT INTO INT2_TBL(f1) VALUES ('32767');
+INSERT INTO INT2_TBL(f1) VALUES ('-32767');
+-- bad input values -- should give errors
+INSERT INTO INT2_TBL(f1) VALUES ('100000');
+ERROR: value "100000" is out of range for type smallint
+LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('100000');
+ ^
+INSERT INTO INT2_TBL(f1) VALUES ('asdf');
+ERROR: invalid input syntax for type smallint: "asdf"
+LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('asdf');
+ ^
+INSERT INTO INT2_TBL(f1) VALUES (' ');
+ERROR: invalid input syntax for type smallint: " "
+LINE 1: INSERT INTO INT2_TBL(f1) VALUES (' ');
+ ^
+INSERT INTO INT2_TBL(f1) VALUES ('- 1234');
+ERROR: invalid input syntax for type smallint: "- 1234"
+LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('- 1234');
+ ^
+INSERT INTO INT2_TBL(f1) VALUES ('4 444');
+ERROR: invalid input syntax for type smallint: "4 444"
+LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('4 444');
+ ^
+INSERT INTO INT2_TBL(f1) VALUES ('123 dt');
+ERROR: invalid input syntax for type smallint: "123 dt"
+LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('123 dt');
+ ^
+INSERT INTO INT2_TBL(f1) VALUES ('');
+ERROR: invalid input syntax for type smallint: ""
+LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('');
+ ^
+SELECT * FROM INT2_TBL;
+ f1
+--------
+ 0
+ 1234
+ -1234
+ 32767
+ -32767
+(5 rows)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 <> int2 '0';
+ f1
+--------
+ 1234
+ -1234
+ 32767
+ -32767
+(4 rows)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 <> int4 '0';
+ f1
+--------
+ 1234
+ -1234
+ 32767
+ -32767
+(4 rows)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 = int2 '0';
+ f1
+----
+ 0
+(1 row)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 = int4 '0';
+ f1
+----
+ 0
+(1 row)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 < int2 '0';
+ f1
+--------
+ -1234
+ -32767
+(2 rows)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 < int4 '0';
+ f1
+--------
+ -1234
+ -32767
+(2 rows)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 <= int2 '0';
+ f1
+--------
+ 0
+ -1234
+ -32767
+(3 rows)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 <= int4 '0';
+ f1
+--------
+ 0
+ -1234
+ -32767
+(3 rows)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 > int2 '0';
+ f1
+-------
+ 1234
+ 32767
+(2 rows)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 > int4 '0';
+ f1
+-------
+ 1234
+ 32767
+(2 rows)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 >= int2 '0';
+ f1
+-------
+ 0
+ 1234
+ 32767
+(3 rows)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 >= int4 '0';
+ f1
+-------
+ 0
+ 1234
+ 32767
+(3 rows)
+
+-- positive odds
+SELECT i.* FROM INT2_TBL i WHERE (i.f1 % int2 '2') = int2 '1';
+ f1
+-------
+ 32767
+(1 row)
+
+-- any evens
+SELECT i.* FROM INT2_TBL i WHERE (i.f1 % int4 '2') = int2 '0';
+ f1
+-------
+ 0
+ 1234
+ -1234
+(3 rows)
+
+SELECT i.f1, i.f1 * int2 '2' AS x FROM INT2_TBL i;
+ERROR: smallint out of range
+SELECT i.f1, i.f1 * int2 '2' AS x FROM INT2_TBL i
+WHERE abs(f1) < 16384;
+ f1 | x
+-------+-------
+ 0 | 0
+ 1234 | 2468
+ -1234 | -2468
+(3 rows)
+
+SELECT i.f1, i.f1 * int4 '2' AS x FROM INT2_TBL i;
+ f1 | x
+--------+--------
+ 0 | 0
+ 1234 | 2468
+ -1234 | -2468
+ 32767 | 65534
+ -32767 | -65534
+(5 rows)
+
+SELECT i.f1, i.f1 + int2 '2' AS x FROM INT2_TBL i;
+ERROR: smallint out of range
+SELECT i.f1, i.f1 + int2 '2' AS x FROM INT2_TBL i
+WHERE f1 < 32766;
+ f1 | x
+--------+--------
+ 0 | 2
+ 1234 | 1236
+ -1234 | -1232
+ -32767 | -32765
+(4 rows)
+
+SELECT i.f1, i.f1 + int4 '2' AS x FROM INT2_TBL i;
+ f1 | x
+--------+--------
+ 0 | 2
+ 1234 | 1236
+ -1234 | -1232
+ 32767 | 32769
+ -32767 | -32765
+(5 rows)
+
+SELECT i.f1, i.f1 - int2 '2' AS x FROM INT2_TBL i;
+ERROR: smallint out of range
+SELECT i.f1, i.f1 - int2 '2' AS x FROM INT2_TBL i
+WHERE f1 > -32767;
+ f1 | x
+-------+-------
+ 0 | -2
+ 1234 | 1232
+ -1234 | -1236
+ 32767 | 32765
+(4 rows)
+
+SELECT i.f1, i.f1 - int4 '2' AS x FROM INT2_TBL i;
+ f1 | x
+--------+--------
+ 0 | -2
+ 1234 | 1232
+ -1234 | -1236
+ 32767 | 32765
+ -32767 | -32769
+(5 rows)
+
+SELECT i.f1, i.f1 / int2 '2' AS x FROM INT2_TBL i;
+ f1 | x
+--------+--------
+ 0 | 0
+ 1234 | 617
+ -1234 | -617
+ 32767 | 16383
+ -32767 | -16383
+(5 rows)
+
+SELECT i.f1, i.f1 / int4 '2' AS x FROM INT2_TBL i;
+ f1 | x
+--------+--------
+ 0 | 0
+ 1234 | 617
+ -1234 | -617
+ 32767 | 16383
+ -32767 | -16383
+(5 rows)
+
+-- corner cases
+SELECT (-1::int2<<15)::text;
+ text
+--------
+ -32768
+(1 row)
+
+SELECT ((-1::int2<<15)+1::int2)::text;
+ text
+--------
+ -32767
+(1 row)
+
+-- check sane handling of INT16_MIN overflow cases
+SELECT (-32768)::int2 * (-1)::int2;
+ERROR: smallint out of range
+SELECT (-32768)::int2 / (-1)::int2;
+ERROR: smallint out of range
+SELECT (-32768)::int2 % (-1)::int2;
+ ?column?
+----------
+ 0
+(1 row)
+
+-- check rounding when casting from float
+SELECT x, x::int2 AS int2_value
+FROM (VALUES (-2.5::float8),
+ (-1.5::float8),
+ (-0.5::float8),
+ (0.0::float8),
+ (0.5::float8),
+ (1.5::float8),
+ (2.5::float8)) t(x);
+ x | int2_value
+------+------------
+ -2.5 | -2
+ -1.5 | -2
+ -0.5 | 0
+ 0 | 0
+ 0.5 | 0
+ 1.5 | 2
+ 2.5 | 2
+(7 rows)
+
+-- check rounding when casting from numeric
+SELECT x, x::int2 AS int2_value
+FROM (VALUES (-2.5::numeric),
+ (-1.5::numeric),
+ (-0.5::numeric),
+ (0.0::numeric),
+ (0.5::numeric),
+ (1.5::numeric),
+ (2.5::numeric)) t(x);
+ x | int2_value
+------+------------
+ -2.5 | -3
+ -1.5 | -2
+ -0.5 | -1
+ 0.0 | 0
+ 0.5 | 1
+ 1.5 | 2
+ 2.5 | 3
+(7 rows)
+
diff --git a/ydb/library/yql/tests/postgresql/cases/int2.sql b/ydb/library/yql/tests/postgresql/cases/int2.sql
new file mode 100644
index 0000000000..8373a26d95
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/int2.sql
@@ -0,0 +1,75 @@
+--
+-- INT2
+--
+CREATE TABLE INT2_TBL(f1 int2);
+INSERT INTO INT2_TBL(f1) VALUES ('0 ');
+INSERT INTO INT2_TBL(f1) VALUES (' 1234 ');
+INSERT INTO INT2_TBL(f1) VALUES (' -1234');
+INSERT INTO INT2_TBL(f1) VALUES ('34.5');
+-- largest and smallest values
+INSERT INTO INT2_TBL(f1) VALUES ('32767');
+INSERT INTO INT2_TBL(f1) VALUES ('-32767');
+-- bad input values -- should give errors
+INSERT INTO INT2_TBL(f1) VALUES ('100000');
+INSERT INTO INT2_TBL(f1) VALUES ('asdf');
+INSERT INTO INT2_TBL(f1) VALUES (' ');
+INSERT INTO INT2_TBL(f1) VALUES ('- 1234');
+INSERT INTO INT2_TBL(f1) VALUES ('4 444');
+INSERT INTO INT2_TBL(f1) VALUES ('123 dt');
+INSERT INTO INT2_TBL(f1) VALUES ('');
+SELECT * FROM INT2_TBL;
+SELECT i.* FROM INT2_TBL i WHERE i.f1 <> int2 '0';
+SELECT i.* FROM INT2_TBL i WHERE i.f1 <> int4 '0';
+SELECT i.* FROM INT2_TBL i WHERE i.f1 = int2 '0';
+SELECT i.* FROM INT2_TBL i WHERE i.f1 = int4 '0';
+SELECT i.* FROM INT2_TBL i WHERE i.f1 < int2 '0';
+SELECT i.* FROM INT2_TBL i WHERE i.f1 < int4 '0';
+SELECT i.* FROM INT2_TBL i WHERE i.f1 <= int2 '0';
+SELECT i.* FROM INT2_TBL i WHERE i.f1 <= int4 '0';
+SELECT i.* FROM INT2_TBL i WHERE i.f1 > int2 '0';
+SELECT i.* FROM INT2_TBL i WHERE i.f1 > int4 '0';
+SELECT i.* FROM INT2_TBL i WHERE i.f1 >= int2 '0';
+SELECT i.* FROM INT2_TBL i WHERE i.f1 >= int4 '0';
+-- positive odds
+SELECT i.* FROM INT2_TBL i WHERE (i.f1 % int2 '2') = int2 '1';
+-- any evens
+SELECT i.* FROM INT2_TBL i WHERE (i.f1 % int4 '2') = int2 '0';
+SELECT i.f1, i.f1 * int2 '2' AS x FROM INT2_TBL i;
+SELECT i.f1, i.f1 * int2 '2' AS x FROM INT2_TBL i
+WHERE abs(f1) < 16384;
+SELECT i.f1, i.f1 * int4 '2' AS x FROM INT2_TBL i;
+SELECT i.f1, i.f1 + int2 '2' AS x FROM INT2_TBL i;
+SELECT i.f1, i.f1 + int2 '2' AS x FROM INT2_TBL i
+WHERE f1 < 32766;
+SELECT i.f1, i.f1 + int4 '2' AS x FROM INT2_TBL i;
+SELECT i.f1, i.f1 - int2 '2' AS x FROM INT2_TBL i;
+SELECT i.f1, i.f1 - int2 '2' AS x FROM INT2_TBL i
+WHERE f1 > -32767;
+SELECT i.f1, i.f1 - int4 '2' AS x FROM INT2_TBL i;
+SELECT i.f1, i.f1 / int2 '2' AS x FROM INT2_TBL i;
+SELECT i.f1, i.f1 / int4 '2' AS x FROM INT2_TBL i;
+-- corner cases
+SELECT (-1::int2<<15)::text;
+SELECT ((-1::int2<<15)+1::int2)::text;
+-- check sane handling of INT16_MIN overflow cases
+SELECT (-32768)::int2 * (-1)::int2;
+SELECT (-32768)::int2 / (-1)::int2;
+SELECT (-32768)::int2 % (-1)::int2;
+-- check rounding when casting from float
+SELECT x, x::int2 AS int2_value
+FROM (VALUES (-2.5::float8),
+ (-1.5::float8),
+ (-0.5::float8),
+ (0.0::float8),
+ (0.5::float8),
+ (1.5::float8),
+ (2.5::float8)) t(x);
+-- check rounding when casting from numeric
+SELECT x, x::int2 AS int2_value
+FROM (VALUES (-2.5::numeric),
+ (-1.5::numeric),
+ (-0.5::numeric),
+ (0.0::numeric),
+ (0.5::numeric),
+ (1.5::numeric),
+ (2.5::numeric)) t(x);
diff --git a/ydb/library/yql/tests/postgresql/cases/int4.out b/ydb/library/yql/tests/postgresql/cases/int4.out
new file mode 100644
index 0000000000..9d20b3380f
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/int4.out
@@ -0,0 +1,439 @@
+--
+-- INT4
+--
+CREATE TABLE INT4_TBL(f1 int4);
+INSERT INTO INT4_TBL(f1) VALUES (' 0 ');
+INSERT INTO INT4_TBL(f1) VALUES ('123456 ');
+INSERT INTO INT4_TBL(f1) VALUES (' -123456');
+INSERT INTO INT4_TBL(f1) VALUES ('34.5');
+ERROR: invalid input syntax for type integer: "34.5"
+LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('34.5');
+ ^
+-- largest and smallest values
+INSERT INTO INT4_TBL(f1) VALUES ('2147483647');
+INSERT INTO INT4_TBL(f1) VALUES ('-2147483647');
+-- bad input values -- should give errors
+INSERT INTO INT4_TBL(f1) VALUES ('1000000000000');
+ERROR: value "1000000000000" is out of range for type integer
+LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('1000000000000');
+ ^
+INSERT INTO INT4_TBL(f1) VALUES ('asdf');
+ERROR: invalid input syntax for type integer: "asdf"
+LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('asdf');
+ ^
+INSERT INTO INT4_TBL(f1) VALUES (' ');
+ERROR: invalid input syntax for type integer: " "
+LINE 1: INSERT INTO INT4_TBL(f1) VALUES (' ');
+ ^
+INSERT INTO INT4_TBL(f1) VALUES (' asdf ');
+ERROR: invalid input syntax for type integer: " asdf "
+LINE 1: INSERT INTO INT4_TBL(f1) VALUES (' asdf ');
+ ^
+INSERT INTO INT4_TBL(f1) VALUES ('- 1234');
+ERROR: invalid input syntax for type integer: "- 1234"
+LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('- 1234');
+ ^
+INSERT INTO INT4_TBL(f1) VALUES ('123 5');
+ERROR: invalid input syntax for type integer: "123 5"
+LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('123 5');
+ ^
+INSERT INTO INT4_TBL(f1) VALUES ('');
+ERROR: invalid input syntax for type integer: ""
+LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('');
+ ^
+SELECT * FROM INT4_TBL;
+ f1
+-------------
+ 0
+ 123456
+ -123456
+ 2147483647
+ -2147483647
+(5 rows)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 <> int2 '0';
+ f1
+-------------
+ 123456
+ -123456
+ 2147483647
+ -2147483647
+(4 rows)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 <> int4 '0';
+ f1
+-------------
+ 123456
+ -123456
+ 2147483647
+ -2147483647
+(4 rows)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 = int2 '0';
+ f1
+----
+ 0
+(1 row)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 = int4 '0';
+ f1
+----
+ 0
+(1 row)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 < int2 '0';
+ f1
+-------------
+ -123456
+ -2147483647
+(2 rows)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 < int4 '0';
+ f1
+-------------
+ -123456
+ -2147483647
+(2 rows)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 <= int2 '0';
+ f1
+-------------
+ 0
+ -123456
+ -2147483647
+(3 rows)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 <= int4 '0';
+ f1
+-------------
+ 0
+ -123456
+ -2147483647
+(3 rows)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 > int2 '0';
+ f1
+------------
+ 123456
+ 2147483647
+(2 rows)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 > int4 '0';
+ f1
+------------
+ 123456
+ 2147483647
+(2 rows)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 >= int2 '0';
+ f1
+------------
+ 0
+ 123456
+ 2147483647
+(3 rows)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 >= int4 '0';
+ f1
+------------
+ 0
+ 123456
+ 2147483647
+(3 rows)
+
+-- positive odds
+SELECT i.* FROM INT4_TBL i WHERE (i.f1 % int2 '2') = int2 '1';
+ f1
+------------
+ 2147483647
+(1 row)
+
+-- any evens
+SELECT i.* FROM INT4_TBL i WHERE (i.f1 % int4 '2') = int2 '0';
+ f1
+---------
+ 0
+ 123456
+ -123456
+(3 rows)
+
+SELECT i.f1, i.f1 * int2 '2' AS x FROM INT4_TBL i;
+ERROR: integer out of range
+SELECT i.f1, i.f1 * int2 '2' AS x FROM INT4_TBL i
+WHERE abs(f1) < 1073741824;
+ f1 | x
+---------+---------
+ 0 | 0
+ 123456 | 246912
+ -123456 | -246912
+(3 rows)
+
+SELECT i.f1, i.f1 * int4 '2' AS x FROM INT4_TBL i;
+ERROR: integer out of range
+SELECT i.f1, i.f1 * int4 '2' AS x FROM INT4_TBL i
+WHERE abs(f1) < 1073741824;
+ f1 | x
+---------+---------
+ 0 | 0
+ 123456 | 246912
+ -123456 | -246912
+(3 rows)
+
+SELECT i.f1, i.f1 + int2 '2' AS x FROM INT4_TBL i;
+ERROR: integer out of range
+SELECT i.f1, i.f1 + int2 '2' AS x FROM INT4_TBL i
+WHERE f1 < 2147483646;
+ f1 | x
+-------------+-------------
+ 0 | 2
+ 123456 | 123458
+ -123456 | -123454
+ -2147483647 | -2147483645
+(4 rows)
+
+SELECT i.f1, i.f1 + int4 '2' AS x FROM INT4_TBL i;
+ERROR: integer out of range
+SELECT i.f1, i.f1 + int4 '2' AS x FROM INT4_TBL i
+WHERE f1 < 2147483646;
+ f1 | x
+-------------+-------------
+ 0 | 2
+ 123456 | 123458
+ -123456 | -123454
+ -2147483647 | -2147483645
+(4 rows)
+
+SELECT i.f1, i.f1 - int2 '2' AS x FROM INT4_TBL i;
+ERROR: integer out of range
+SELECT i.f1, i.f1 - int2 '2' AS x FROM INT4_TBL i
+WHERE f1 > -2147483647;
+ f1 | x
+------------+------------
+ 0 | -2
+ 123456 | 123454
+ -123456 | -123458
+ 2147483647 | 2147483645
+(4 rows)
+
+SELECT i.f1, i.f1 - int4 '2' AS x FROM INT4_TBL i;
+ERROR: integer out of range
+SELECT i.f1, i.f1 - int4 '2' AS x FROM INT4_TBL i
+WHERE f1 > -2147483647;
+ f1 | x
+------------+------------
+ 0 | -2
+ 123456 | 123454
+ -123456 | -123458
+ 2147483647 | 2147483645
+(4 rows)
+
+SELECT i.f1, i.f1 / int2 '2' AS x FROM INT4_TBL i;
+ f1 | x
+-------------+-------------
+ 0 | 0
+ 123456 | 61728
+ -123456 | -61728
+ 2147483647 | 1073741823
+ -2147483647 | -1073741823
+(5 rows)
+
+SELECT i.f1, i.f1 / int4 '2' AS x FROM INT4_TBL i;
+ f1 | x
+-------------+-------------
+ 0 | 0
+ 123456 | 61728
+ -123456 | -61728
+ 2147483647 | 1073741823
+ -2147483647 | -1073741823
+(5 rows)
+
+--
+-- more complex expressions
+--
+-- variations on unary minus parsing
+SELECT -2+3 AS one;
+ one
+-----
+ 1
+(1 row)
+
+SELECT 4-2 AS two;
+ two
+-----
+ 2
+(1 row)
+
+SELECT 2- -1 AS three;
+ three
+-------
+ 3
+(1 row)
+
+SELECT 2 - -2 AS four;
+ four
+------
+ 4
+(1 row)
+
+SELECT int2 '2' * int2 '2' = int2 '16' / int2 '4' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT int4 '2' * int2 '2' = int2 '16' / int4 '4' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT int2 '2' * int4 '2' = int4 '16' / int2 '4' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT int4 '1000' < int4 '999' AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 AS ten;
+ ten
+-----
+ 10
+(1 row)
+
+SELECT 2 + 2 / 2 AS three;
+ three
+-------
+ 3
+(1 row)
+
+SELECT (2 + 2) / 2 AS two;
+ two
+-----
+ 2
+(1 row)
+
+-- corner case
+SELECT (-1::int4<<31)::text;
+ text
+-------------
+ -2147483648
+(1 row)
+
+SELECT ((-1::int4<<31)+1)::text;
+ text
+-------------
+ -2147483647
+(1 row)
+
+-- check sane handling of INT_MIN overflow cases
+SELECT (-2147483648)::int4 * (-1)::int4;
+ERROR: integer out of range
+SELECT (-2147483648)::int4 / (-1)::int4;
+ERROR: integer out of range
+SELECT (-2147483648)::int4 % (-1)::int4;
+ ?column?
+----------
+ 0
+(1 row)
+
+SELECT (-2147483648)::int4 * (-1)::int2;
+ERROR: integer out of range
+SELECT (-2147483648)::int4 / (-1)::int2;
+ERROR: integer out of range
+SELECT (-2147483648)::int4 % (-1)::int2;
+ ?column?
+----------
+ 0
+(1 row)
+
+-- check rounding when casting from float
+SELECT x, x::int4 AS int4_value
+FROM (VALUES (-2.5::float8),
+ (-1.5::float8),
+ (-0.5::float8),
+ (0.0::float8),
+ (0.5::float8),
+ (1.5::float8),
+ (2.5::float8)) t(x);
+ x | int4_value
+------+------------
+ -2.5 | -2
+ -1.5 | -2
+ -0.5 | 0
+ 0 | 0
+ 0.5 | 0
+ 1.5 | 2
+ 2.5 | 2
+(7 rows)
+
+-- check rounding when casting from numeric
+SELECT x, x::int4 AS int4_value
+FROM (VALUES (-2.5::numeric),
+ (-1.5::numeric),
+ (-0.5::numeric),
+ (0.0::numeric),
+ (0.5::numeric),
+ (1.5::numeric),
+ (2.5::numeric)) t(x);
+ x | int4_value
+------+------------
+ -2.5 | -3
+ -1.5 | -2
+ -0.5 | -1
+ 0.0 | 0
+ 0.5 | 1
+ 1.5 | 2
+ 2.5 | 3
+(7 rows)
+
+-- test gcd()
+SELECT a, b, gcd(a, b), gcd(a, -b), gcd(b, a), gcd(-b, a)
+FROM (VALUES (0::int4, 0::int4),
+ (0::int4, 6410818::int4),
+ (61866666::int4, 6410818::int4),
+ (-61866666::int4, 6410818::int4),
+ ((-2147483648)::int4, 1::int4),
+ ((-2147483648)::int4, 2147483647::int4),
+ ((-2147483648)::int4, 1073741824::int4)) AS v(a, b);
+ a | b | gcd | gcd | gcd | gcd
+-------------+------------+------------+------------+------------+------------
+ 0 | 0 | 0 | 0 | 0 | 0
+ 0 | 6410818 | 6410818 | 6410818 | 6410818 | 6410818
+ 61866666 | 6410818 | 1466 | 1466 | 1466 | 1466
+ -61866666 | 6410818 | 1466 | 1466 | 1466 | 1466
+ -2147483648 | 1 | 1 | 1 | 1 | 1
+ -2147483648 | 2147483647 | 1 | 1 | 1 | 1
+ -2147483648 | 1073741824 | 1073741824 | 1073741824 | 1073741824 | 1073741824
+(7 rows)
+
+SELECT gcd((-2147483648)::int4, 0::int4); -- overflow
+ERROR: integer out of range
+SELECT gcd((-2147483648)::int4, (-2147483648)::int4); -- overflow
+ERROR: integer out of range
+-- test lcm()
+SELECT a, b, lcm(a, b), lcm(a, -b), lcm(b, a), lcm(-b, a)
+FROM (VALUES (0::int4, 0::int4),
+ (0::int4, 42::int4),
+ (42::int4, 42::int4),
+ (330::int4, 462::int4),
+ (-330::int4, 462::int4),
+ ((-2147483648)::int4, 0::int4)) AS v(a, b);
+ a | b | lcm | lcm | lcm | lcm
+-------------+-----+------+------+------+------
+ 0 | 0 | 0 | 0 | 0 | 0
+ 0 | 42 | 0 | 0 | 0 | 0
+ 42 | 42 | 42 | 42 | 42 | 42
+ 330 | 462 | 2310 | 2310 | 2310 | 2310
+ -330 | 462 | 2310 | 2310 | 2310 | 2310
+ -2147483648 | 0 | 0 | 0 | 0 | 0
+(6 rows)
+
+SELECT lcm((-2147483648)::int4, 1::int4); -- overflow
+ERROR: integer out of range
+SELECT lcm(2147483647::int4, 2147483646::int4); -- overflow
+ERROR: integer out of range
diff --git a/ydb/library/yql/tests/postgresql/cases/int4.sql b/ydb/library/yql/tests/postgresql/cases/int4.sql
new file mode 100644
index 0000000000..701e02dbf5
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/int4.sql
@@ -0,0 +1,120 @@
+--
+-- INT4
+--
+CREATE TABLE INT4_TBL(f1 int4);
+INSERT INTO INT4_TBL(f1) VALUES (' 0 ');
+INSERT INTO INT4_TBL(f1) VALUES ('123456 ');
+INSERT INTO INT4_TBL(f1) VALUES (' -123456');
+INSERT INTO INT4_TBL(f1) VALUES ('34.5');
+-- largest and smallest values
+INSERT INTO INT4_TBL(f1) VALUES ('2147483647');
+INSERT INTO INT4_TBL(f1) VALUES ('-2147483647');
+-- bad input values -- should give errors
+INSERT INTO INT4_TBL(f1) VALUES ('1000000000000');
+INSERT INTO INT4_TBL(f1) VALUES ('asdf');
+INSERT INTO INT4_TBL(f1) VALUES (' ');
+INSERT INTO INT4_TBL(f1) VALUES (' asdf ');
+INSERT INTO INT4_TBL(f1) VALUES ('- 1234');
+INSERT INTO INT4_TBL(f1) VALUES ('123 5');
+INSERT INTO INT4_TBL(f1) VALUES ('');
+SELECT * FROM INT4_TBL;
+SELECT i.* FROM INT4_TBL i WHERE i.f1 <> int2 '0';
+SELECT i.* FROM INT4_TBL i WHERE i.f1 <> int4 '0';
+SELECT i.* FROM INT4_TBL i WHERE i.f1 = int2 '0';
+SELECT i.* FROM INT4_TBL i WHERE i.f1 = int4 '0';
+SELECT i.* FROM INT4_TBL i WHERE i.f1 < int2 '0';
+SELECT i.* FROM INT4_TBL i WHERE i.f1 < int4 '0';
+SELECT i.* FROM INT4_TBL i WHERE i.f1 <= int2 '0';
+SELECT i.* FROM INT4_TBL i WHERE i.f1 <= int4 '0';
+SELECT i.* FROM INT4_TBL i WHERE i.f1 > int2 '0';
+SELECT i.* FROM INT4_TBL i WHERE i.f1 > int4 '0';
+SELECT i.* FROM INT4_TBL i WHERE i.f1 >= int2 '0';
+SELECT i.* FROM INT4_TBL i WHERE i.f1 >= int4 '0';
+-- positive odds
+SELECT i.* FROM INT4_TBL i WHERE (i.f1 % int2 '2') = int2 '1';
+-- any evens
+SELECT i.* FROM INT4_TBL i WHERE (i.f1 % int4 '2') = int2 '0';
+SELECT i.f1, i.f1 * int2 '2' AS x FROM INT4_TBL i;
+SELECT i.f1, i.f1 * int2 '2' AS x FROM INT4_TBL i
+WHERE abs(f1) < 1073741824;
+SELECT i.f1, i.f1 * int4 '2' AS x FROM INT4_TBL i;
+SELECT i.f1, i.f1 * int4 '2' AS x FROM INT4_TBL i
+WHERE abs(f1) < 1073741824;
+SELECT i.f1, i.f1 + int2 '2' AS x FROM INT4_TBL i;
+SELECT i.f1, i.f1 + int2 '2' AS x FROM INT4_TBL i
+WHERE f1 < 2147483646;
+SELECT i.f1, i.f1 + int4 '2' AS x FROM INT4_TBL i;
+SELECT i.f1, i.f1 + int4 '2' AS x FROM INT4_TBL i
+WHERE f1 < 2147483646;
+SELECT i.f1, i.f1 - int2 '2' AS x FROM INT4_TBL i;
+SELECT i.f1, i.f1 - int2 '2' AS x FROM INT4_TBL i
+WHERE f1 > -2147483647;
+SELECT i.f1, i.f1 - int4 '2' AS x FROM INT4_TBL i;
+SELECT i.f1, i.f1 - int4 '2' AS x FROM INT4_TBL i
+WHERE f1 > -2147483647;
+SELECT i.f1, i.f1 / int2 '2' AS x FROM INT4_TBL i;
+SELECT i.f1, i.f1 / int4 '2' AS x FROM INT4_TBL i;
+--
+-- more complex expressions
+--
+-- variations on unary minus parsing
+SELECT -2+3 AS one;
+SELECT 4-2 AS two;
+SELECT 2- -1 AS three;
+SELECT 2 - -2 AS four;
+SELECT int2 '2' * int2 '2' = int2 '16' / int2 '4' AS true;
+SELECT int4 '2' * int2 '2' = int2 '16' / int4 '4' AS true;
+SELECT int2 '2' * int4 '2' = int4 '16' / int2 '4' AS true;
+SELECT int4 '1000' < int4 '999' AS false;
+SELECT 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 AS ten;
+SELECT 2 + 2 / 2 AS three;
+SELECT (2 + 2) / 2 AS two;
+-- corner case
+SELECT (-1::int4<<31)::text;
+SELECT ((-1::int4<<31)+1)::text;
+-- check sane handling of INT_MIN overflow cases
+SELECT (-2147483648)::int4 * (-1)::int4;
+SELECT (-2147483648)::int4 / (-1)::int4;
+SELECT (-2147483648)::int4 % (-1)::int4;
+SELECT (-2147483648)::int4 * (-1)::int2;
+SELECT (-2147483648)::int4 / (-1)::int2;
+SELECT (-2147483648)::int4 % (-1)::int2;
+-- check rounding when casting from float
+SELECT x, x::int4 AS int4_value
+FROM (VALUES (-2.5::float8),
+ (-1.5::float8),
+ (-0.5::float8),
+ (0.0::float8),
+ (0.5::float8),
+ (1.5::float8),
+ (2.5::float8)) t(x);
+-- check rounding when casting from numeric
+SELECT x, x::int4 AS int4_value
+FROM (VALUES (-2.5::numeric),
+ (-1.5::numeric),
+ (-0.5::numeric),
+ (0.0::numeric),
+ (0.5::numeric),
+ (1.5::numeric),
+ (2.5::numeric)) t(x);
+-- test gcd()
+SELECT a, b, gcd(a, b), gcd(a, -b), gcd(b, a), gcd(-b, a)
+FROM (VALUES (0::int4, 0::int4),
+ (0::int4, 6410818::int4),
+ (61866666::int4, 6410818::int4),
+ (-61866666::int4, 6410818::int4),
+ ((-2147483648)::int4, 1::int4),
+ ((-2147483648)::int4, 2147483647::int4),
+ ((-2147483648)::int4, 1073741824::int4)) AS v(a, b);
+SELECT gcd((-2147483648)::int4, 0::int4); -- overflow
+SELECT gcd((-2147483648)::int4, (-2147483648)::int4); -- overflow
+-- test lcm()
+SELECT a, b, lcm(a, b), lcm(a, -b), lcm(b, a), lcm(-b, a)
+FROM (VALUES (0::int4, 0::int4),
+ (0::int4, 42::int4),
+ (42::int4, 42::int4),
+ (330::int4, 462::int4),
+ (-330::int4, 462::int4),
+ ((-2147483648)::int4, 0::int4)) AS v(a, b);
+SELECT lcm((-2147483648)::int4, 1::int4); -- overflow
+SELECT lcm(2147483647::int4, 2147483646::int4); -- overflow
diff --git a/ydb/library/yql/tests/postgresql/cases/int8.out b/ydb/library/yql/tests/postgresql/cases/int8.out
new file mode 100644
index 0000000000..5450ee2b42
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/int8.out
@@ -0,0 +1,39 @@
+--
+-- INT8
+-- Test int8 64-bit integers.
+--
+CREATE TABLE INT8_TBL(q1 int8, q2 int8);
+INSERT INTO INT8_TBL VALUES(' 123 ',' 456');
+INSERT INTO INT8_TBL VALUES('123 ','4567890123456789');
+INSERT INTO INT8_TBL VALUES('4567890123456789','123');
+INSERT INTO INT8_TBL VALUES(+4567890123456789,'4567890123456789');
+INSERT INTO INT8_TBL VALUES('+4567890123456789','-4567890123456789');
+-- bad inputs
+INSERT INTO INT8_TBL(q1) VALUES (' ');
+ERROR: invalid input syntax for type bigint: " "
+LINE 1: INSERT INTO INT8_TBL(q1) VALUES (' ');
+ ^
+INSERT INTO INT8_TBL(q1) VALUES ('xxx');
+ERROR: invalid input syntax for type bigint: "xxx"
+LINE 1: INSERT INTO INT8_TBL(q1) VALUES ('xxx');
+ ^
+INSERT INTO INT8_TBL(q1) VALUES ('3908203590239580293850293850329485');
+ERROR: value "3908203590239580293850293850329485" is out of range for type bigint
+LINE 1: INSERT INTO INT8_TBL(q1) VALUES ('39082035902395802938502938...
+ ^
+INSERT INTO INT8_TBL(q1) VALUES ('-1204982019841029840928340329840934');
+ERROR: value "-1204982019841029840928340329840934" is out of range for type bigint
+LINE 1: INSERT INTO INT8_TBL(q1) VALUES ('-1204982019841029840928340...
+ ^
+INSERT INTO INT8_TBL(q1) VALUES ('- 123');
+ERROR: invalid input syntax for type bigint: "- 123"
+LINE 1: INSERT INTO INT8_TBL(q1) VALUES ('- 123');
+ ^
+INSERT INTO INT8_TBL(q1) VALUES (' 345 5');
+ERROR: invalid input syntax for type bigint: " 345 5"
+LINE 1: INSERT INTO INT8_TBL(q1) VALUES (' 345 5');
+ ^
+INSERT INTO INT8_TBL(q1) VALUES ('');
+ERROR: invalid input syntax for type bigint: ""
+LINE 1: INSERT INTO INT8_TBL(q1) VALUES ('');
+ ^
diff --git a/ydb/library/yql/tests/postgresql/cases/int8.sql b/ydb/library/yql/tests/postgresql/cases/int8.sql
new file mode 100644
index 0000000000..8d2efcb2dd
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/int8.sql
@@ -0,0 +1,18 @@
+--
+-- INT8
+-- Test int8 64-bit integers.
+--
+CREATE TABLE INT8_TBL(q1 int8, q2 int8);
+INSERT INTO INT8_TBL VALUES(' 123 ',' 456');
+INSERT INTO INT8_TBL VALUES('123 ','4567890123456789');
+INSERT INTO INT8_TBL VALUES('4567890123456789','123');
+INSERT INTO INT8_TBL VALUES(+4567890123456789,'4567890123456789');
+INSERT INTO INT8_TBL VALUES('+4567890123456789','-4567890123456789');
+-- bad inputs
+INSERT INTO INT8_TBL(q1) VALUES (' ');
+INSERT INTO INT8_TBL(q1) VALUES ('xxx');
+INSERT INTO INT8_TBL(q1) VALUES ('3908203590239580293850293850329485');
+INSERT INTO INT8_TBL(q1) VALUES ('-1204982019841029840928340329840934');
+INSERT INTO INT8_TBL(q1) VALUES ('- 123');
+INSERT INTO INT8_TBL(q1) VALUES (' 345 5');
+INSERT INTO INT8_TBL(q1) VALUES ('');
diff --git a/ydb/library/yql/tests/postgresql/cases/interval.out b/ydb/library/yql/tests/postgresql/cases/interval.out
new file mode 100644
index 0000000000..72ff3e4c0a
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/interval.out
@@ -0,0 +1,623 @@
+-- check acceptance of "time zone style"
+SELECT INTERVAL '01:00' AS "One hour";
+ One hour
+----------
+ 01:00:00
+(1 row)
+
+SELECT INTERVAL '+02:00' AS "Two hours";
+ Two hours
+-----------
+ 02:00:00
+(1 row)
+
+SELECT INTERVAL '-08:00' AS "Eight hours";
+ Eight hours
+-------------
+ -08:00:00
+(1 row)
+
+SELECT INTERVAL '-1 +02:03' AS "22 hours ago...";
+ 22 hours ago...
+-------------------
+ -1 days +02:03:00
+(1 row)
+
+SELECT INTERVAL '-1 days +02:03' AS "22 hours ago...";
+ 22 hours ago...
+-------------------
+ -1 days +02:03:00
+(1 row)
+
+SELECT INTERVAL '1.5 weeks' AS "Ten days twelve hours";
+ Ten days twelve hours
+-----------------------
+ 10 days 12:00:00
+(1 row)
+
+SELECT INTERVAL '1.5 months' AS "One month 15 days";
+ One month 15 days
+-------------------
+ 1 mon 15 days
+(1 row)
+
+SELECT INTERVAL '10 years -11 month -12 days +13:14' AS "9 years...";
+ 9 years...
+----------------------------------
+ 9 years 1 mon -12 days +13:14:00
+(1 row)
+
+CREATE TABLE INTERVAL_TBL (f1 interval);
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 1 minute');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 5 hour');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 10 day');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 34 year');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 3 months');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 14 seconds ago');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('1 day 2 hours 3 minutes 4 seconds');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('6 years');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('5 months');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('5 months 12 hours');
+-- badly formatted interval
+INSERT INTO INTERVAL_TBL (f1) VALUES ('badly formatted interval');
+ERROR: invalid input syntax for type interval: "badly formatted interval"
+LINE 1: INSERT INTO INTERVAL_TBL (f1) VALUES ('badly formatted inter...
+ ^
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 30 eons ago');
+ERROR: invalid input syntax for type interval: "@ 30 eons ago"
+LINE 1: INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 30 eons ago');
+ ^
+-- test interval operators
+SELECT * FROM INTERVAL_TBL;
+ f1
+-----------------
+ 00:01:00
+ 05:00:00
+ 10 days
+ 34 years
+ 3 mons
+ -00:00:14
+ 1 day 02:03:04
+ 6 years
+ 5 mons
+ 5 mons 12:00:00
+(10 rows)
+
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 <> interval '@ 10 days';
+ f1
+-----------------
+ 00:01:00
+ 05:00:00
+ 34 years
+ 3 mons
+ -00:00:14
+ 1 day 02:03:04
+ 6 years
+ 5 mons
+ 5 mons 12:00:00
+(9 rows)
+
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 <= interval '@ 5 hours';
+ f1
+-----------
+ 00:01:00
+ 05:00:00
+ -00:00:14
+(3 rows)
+
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 < interval '@ 1 day';
+ f1
+-----------
+ 00:01:00
+ 05:00:00
+ -00:00:14
+(3 rows)
+
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 = interval '@ 34 years';
+ f1
+----------
+ 34 years
+(1 row)
+
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 >= interval '@ 1 month';
+ f1
+-----------------
+ 34 years
+ 3 mons
+ 6 years
+ 5 mons
+ 5 mons 12:00:00
+(5 rows)
+
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 > interval '@ 3 seconds ago';
+ f1
+-----------------
+ 00:01:00
+ 05:00:00
+ 10 days
+ 34 years
+ 3 mons
+ 1 day 02:03:04
+ 6 years
+ 5 mons
+ 5 mons 12:00:00
+(9 rows)
+
+-- Test intervals that are large enough to overflow 64 bits in comparisons
+CREATE TEMP TABLE INTERVAL_TBL_OF (f1 interval);
+INSERT INTO INTERVAL_TBL_OF (f1) VALUES
+ ('2147483647 days 2147483647 months'),
+ ('2147483647 days -2147483648 months'),
+ ('1 year'),
+ ('-2147483648 days 2147483647 months'),
+ ('-2147483648 days -2147483648 months');
+-- these should fail as out-of-range
+INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('2147483648 days');
+ERROR: interval field value out of range: "2147483648 days"
+LINE 1: INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('2147483648 days');
+ ^
+INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('-2147483649 days');
+ERROR: interval field value out of range: "-2147483649 days"
+LINE 1: INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('-2147483649 days')...
+ ^
+INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('2147483647 years');
+ERROR: interval out of range
+LINE 1: INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('2147483647 years')...
+ ^
+INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('-2147483648 years');
+ERROR: interval out of range
+LINE 1: INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('-2147483648 years'...
+ ^
+-- Test edge-case overflow detection in interval multiplication
+select extract(epoch from '256 microseconds'::interval * (2^55)::float8);
+ERROR: interval out of range
+CREATE INDEX ON INTERVAL_TBL_OF USING btree (f1);
+DROP TABLE INTERVAL_TBL_OF;
+-- Test multiplication and division with intervals.
+-- Floating point arithmetic rounding errors can lead to unexpected results,
+-- though the code attempts to do the right thing and round up to days and
+-- minutes to avoid results such as '3 days 24:00 hours' or '14:20:60'.
+-- Note that it is expected for some day components to be greater than 29 and
+-- some time components be greater than 23:59:59 due to how intervals are
+-- stored internally.
+CREATE TABLE INTERVAL_MULDIV_TBL (span interval);
+DROP TABLE INTERVAL_MULDIV_TBL;
+SELECT '1 millisecond'::interval, '1 microsecond'::interval,
+ '500 seconds 99 milliseconds 51 microseconds'::interval;
+ interval | interval | interval
+--------------+-----------------+-----------------
+ 00:00:00.001 | 00:00:00.000001 | 00:08:20.099051
+(1 row)
+
+SELECT '3 days 5 milliseconds'::interval;
+ interval
+---------------------
+ 3 days 00:00:00.005
+(1 row)
+
+SELECT '1 second 2 seconds'::interval; -- error
+ERROR: invalid input syntax for type interval: "1 second 2 seconds"
+LINE 1: SELECT '1 second 2 seconds'::interval;
+ ^
+SELECT '10 milliseconds 20 milliseconds'::interval; -- error
+ERROR: invalid input syntax for type interval: "10 milliseconds 20 milliseconds"
+LINE 1: SELECT '10 milliseconds 20 milliseconds'::interval;
+ ^
+SELECT '5.5 seconds 3 milliseconds'::interval; -- error
+ERROR: invalid input syntax for type interval: "5.5 seconds 3 milliseconds"
+LINE 1: SELECT '5.5 seconds 3 milliseconds'::interval;
+ ^
+SELECT '1:20:05 5 microseconds'::interval; -- error
+ERROR: invalid input syntax for type interval: "1:20:05 5 microseconds"
+LINE 1: SELECT '1:20:05 5 microseconds'::interval;
+ ^
+SELECT '1 day 1 day'::interval; -- error
+ERROR: invalid input syntax for type interval: "1 day 1 day"
+LINE 1: SELECT '1 day 1 day'::interval;
+ ^
+SELECT interval '1-2'; -- SQL year-month literal
+ interval
+---------------
+ 1 year 2 mons
+(1 row)
+
+SELECT interval '999' second; -- oversize leading field is ok
+ interval
+----------
+ 00:16:39
+(1 row)
+
+SELECT interval '999' minute;
+ interval
+----------
+ 16:39:00
+(1 row)
+
+SELECT interval '999' hour;
+ interval
+-----------
+ 999:00:00
+(1 row)
+
+SELECT interval '999' day;
+ interval
+----------
+ 999 days
+(1 row)
+
+SELECT interval '999' month;
+ interval
+-----------------
+ 83 years 3 mons
+(1 row)
+
+-- test SQL-spec syntaxes for restricted field sets
+SELECT interval '1' year;
+ interval
+----------
+ 1 year
+(1 row)
+
+SELECT interval '2' month;
+ interval
+----------
+ 2 mons
+(1 row)
+
+SELECT interval '3' day;
+ interval
+----------
+ 3 days
+(1 row)
+
+SELECT interval '4' hour;
+ interval
+----------
+ 04:00:00
+(1 row)
+
+SELECT interval '5' minute;
+ interval
+----------
+ 00:05:00
+(1 row)
+
+SELECT interval '6' second;
+ interval
+----------
+ 00:00:06
+(1 row)
+
+SELECT interval '1' year to month;
+ interval
+----------
+ 1 mon
+(1 row)
+
+SELECT interval '1-2' year to month;
+ interval
+---------------
+ 1 year 2 mons
+(1 row)
+
+SELECT interval '1 2' day to hour;
+ interval
+----------------
+ 1 day 02:00:00
+(1 row)
+
+SELECT interval '1 2:03' day to hour;
+ interval
+----------------
+ 1 day 02:00:00
+(1 row)
+
+SELECT interval '1 2:03:04' day to hour;
+ interval
+----------------
+ 1 day 02:00:00
+(1 row)
+
+SELECT interval '1 2' day to minute;
+ERROR: invalid input syntax for type interval: "1 2"
+LINE 1: SELECT interval '1 2' day to minute;
+ ^
+SELECT interval '1 2:03' day to minute;
+ interval
+----------------
+ 1 day 02:03:00
+(1 row)
+
+SELECT interval '1 2:03:04' day to minute;
+ interval
+----------------
+ 1 day 02:03:00
+(1 row)
+
+SELECT interval '1 2' day to second;
+ERROR: invalid input syntax for type interval: "1 2"
+LINE 1: SELECT interval '1 2' day to second;
+ ^
+SELECT interval '1 2:03' day to second;
+ interval
+----------------
+ 1 day 02:03:00
+(1 row)
+
+SELECT interval '1 2:03:04' day to second;
+ interval
+----------------
+ 1 day 02:03:04
+(1 row)
+
+SELECT interval '1 2' hour to minute;
+ERROR: invalid input syntax for type interval: "1 2"
+LINE 1: SELECT interval '1 2' hour to minute;
+ ^
+SELECT interval '1 2:03' hour to minute;
+ interval
+----------------
+ 1 day 02:03:00
+(1 row)
+
+SELECT interval '1 2:03:04' hour to minute;
+ interval
+----------------
+ 1 day 02:03:00
+(1 row)
+
+SELECT interval '1 2' hour to second;
+ERROR: invalid input syntax for type interval: "1 2"
+LINE 1: SELECT interval '1 2' hour to second;
+ ^
+SELECT interval '1 2:03' hour to second;
+ interval
+----------------
+ 1 day 02:03:00
+(1 row)
+
+SELECT interval '1 2:03:04' hour to second;
+ interval
+----------------
+ 1 day 02:03:04
+(1 row)
+
+SELECT interval '1 2' minute to second;
+ERROR: invalid input syntax for type interval: "1 2"
+LINE 1: SELECT interval '1 2' minute to second;
+ ^
+SELECT interval '1 2:03' minute to second;
+ interval
+----------------
+ 1 day 00:02:03
+(1 row)
+
+SELECT interval '1 2:03:04' minute to second;
+ interval
+----------------
+ 1 day 02:03:04
+(1 row)
+
+SELECT interval '1 +2:03' minute to second;
+ interval
+----------------
+ 1 day 00:02:03
+(1 row)
+
+SELECT interval '1 +2:03:04' minute to second;
+ interval
+----------------
+ 1 day 02:03:04
+(1 row)
+
+SELECT interval '1 -2:03' minute to second;
+ interval
+-----------------
+ 1 day -00:02:03
+(1 row)
+
+SELECT interval '1 -2:03:04' minute to second;
+ interval
+-----------------
+ 1 day -02:03:04
+(1 row)
+
+SELECT interval '123 11' day to hour; -- ok
+ interval
+-------------------
+ 123 days 11:00:00
+(1 row)
+
+SELECT interval '123 11' day; -- not ok
+ERROR: invalid input syntax for type interval: "123 11"
+LINE 1: SELECT interval '123 11' day;
+ ^
+SELECT interval '123 11'; -- not ok, too ambiguous
+ERROR: invalid input syntax for type interval: "123 11"
+LINE 1: SELECT interval '123 11';
+ ^
+SELECT interval '123 2:03 -2:04'; -- not ok, redundant hh:mm fields
+ERROR: invalid input syntax for type interval: "123 2:03 -2:04"
+LINE 1: SELECT interval '123 2:03 -2:04';
+ ^
+-- test syntaxes for restricted precision
+SELECT interval(0) '1 day 01:23:45.6789';
+ interval
+----------------
+ 1 day 01:23:46
+(1 row)
+
+SELECT interval(2) '1 day 01:23:45.6789';
+ interval
+-------------------
+ 1 day 01:23:45.68
+(1 row)
+
+SELECT interval '12:34.5678' minute to second(2); -- per SQL spec
+ interval
+-------------
+ 00:12:34.57
+(1 row)
+
+SELECT interval '1.234' second;
+ interval
+--------------
+ 00:00:01.234
+(1 row)
+
+SELECT interval '1.234' second(2);
+ interval
+-------------
+ 00:00:01.23
+(1 row)
+
+SELECT interval '1 2.345' day to second(2);
+ERROR: invalid input syntax for type interval: "1 2.345"
+LINE 1: SELECT interval '1 2.345' day to second(2);
+ ^
+SELECT interval '1 2:03' day to second(2);
+ interval
+----------------
+ 1 day 02:03:00
+(1 row)
+
+SELECT interval '1 2:03.4567' day to second(2);
+ interval
+-------------------
+ 1 day 00:02:03.46
+(1 row)
+
+SELECT interval '1 2:03:04.5678' day to second(2);
+ interval
+-------------------
+ 1 day 02:03:04.57
+(1 row)
+
+SELECT interval '1 2.345' hour to second(2);
+ERROR: invalid input syntax for type interval: "1 2.345"
+LINE 1: SELECT interval '1 2.345' hour to second(2);
+ ^
+SELECT interval '1 2:03.45678' hour to second(2);
+ interval
+-------------------
+ 1 day 00:02:03.46
+(1 row)
+
+SELECT interval '1 2:03:04.5678' hour to second(2);
+ interval
+-------------------
+ 1 day 02:03:04.57
+(1 row)
+
+SELECT interval '1 2.3456' minute to second(2);
+ERROR: invalid input syntax for type interval: "1 2.3456"
+LINE 1: SELECT interval '1 2.3456' minute to second(2);
+ ^
+SELECT interval '1 2:03.5678' minute to second(2);
+ interval
+-------------------
+ 1 day 00:02:03.57
+(1 row)
+
+SELECT interval '1 2:03:04.5678' minute to second(2);
+ interval
+-------------------
+ 1 day 02:03:04.57
+(1 row)
+
+SELECT interval '+1 -1:00:00',
+ interval '-1 +1:00:00',
+ interval '+1-2 -3 +4:05:06.789',
+ interval '-1-2 +3 -4:05:06.789';
+ interval | interval | interval | interval
+-----------------+-------------------+-------------------------------------+----------------------------------------
+ 1 day -01:00:00 | -1 days +01:00:00 | 1 year 2 mons -3 days +04:05:06.789 | -1 years -2 mons +3 days -04:05:06.789
+(1 row)
+
+select interval 'P00021015T103020' AS "ISO8601 Basic Format",
+ interval 'P0002-10-15T10:30:20' AS "ISO8601 Extended Format";
+ ISO8601 Basic Format | ISO8601 Extended Format
+----------------------------------+----------------------------------
+ 2 years 10 mons 15 days 10:30:20 | 2 years 10 mons 15 days 10:30:20
+(1 row)
+
+-- Make sure optional ISO8601 alternative format fields are optional.
+select interval 'P0002' AS "year only",
+ interval 'P0002-10' AS "year month",
+ interval 'P0002-10-15' AS "year month day",
+ interval 'P0002T1S' AS "year only plus time",
+ interval 'P0002-10T1S' AS "year month plus time",
+ interval 'P0002-10-15T1S' AS "year month day plus time",
+ interval 'PT10' AS "hour only",
+ interval 'PT10:30' AS "hour minute";
+ year only | year month | year month day | year only plus time | year month plus time | year month day plus time | hour only | hour minute
+-----------+-----------------+-------------------------+---------------------+--------------------------+----------------------------------+-----------+-------------
+ 2 years | 2 years 10 mons | 2 years 10 mons 15 days | 2 years 00:00:01 | 2 years 10 mons 00:00:01 | 2 years 10 mons 15 days 00:00:01 | 10:00:00 | 10:30:00
+(1 row)
+
+-- check that '30 days' equals '1 month' according to the hash function
+select '30 days'::interval = '1 month'::interval as t;
+ t
+---
+ t
+(1 row)
+
+select interval_hash('30 days'::interval) = interval_hash('1 month'::interval) as t;
+ t
+---
+ t
+(1 row)
+
+SELECT EXTRACT(FORTNIGHT FROM INTERVAL '2 days'); -- error
+ERROR: interval units "fortnight" not recognized
+SELECT EXTRACT(TIMEZONE FROM INTERVAL '2 days'); -- error
+ERROR: interval units "timezone" not supported
+SELECT EXTRACT(DECADE FROM INTERVAL '100 y');
+ extract
+---------
+ 10
+(1 row)
+
+SELECT EXTRACT(DECADE FROM INTERVAL '99 y');
+ extract
+---------
+ 9
+(1 row)
+
+SELECT EXTRACT(DECADE FROM INTERVAL '-99 y');
+ extract
+---------
+ -9
+(1 row)
+
+SELECT EXTRACT(DECADE FROM INTERVAL '-100 y');
+ extract
+---------
+ -10
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM INTERVAL '100 y');
+ extract
+---------
+ 1
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM INTERVAL '-100 y');
+ extract
+---------
+ -1
+(1 row)
+
+-- internal overflow test case
+SELECT extract(epoch from interval '1000000000 days');
+ extract
+-----------------------
+ 86400000000000.000000
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/cases/interval.sql b/ydb/library/yql/tests/postgresql/cases/interval.sql
new file mode 100644
index 0000000000..582efe4cb6
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/interval.sql
@@ -0,0 +1,156 @@
+-- check acceptance of "time zone style"
+SELECT INTERVAL '01:00' AS "One hour";
+SELECT INTERVAL '+02:00' AS "Two hours";
+SELECT INTERVAL '-08:00' AS "Eight hours";
+SELECT INTERVAL '-1 +02:03' AS "22 hours ago...";
+SELECT INTERVAL '-1 days +02:03' AS "22 hours ago...";
+SELECT INTERVAL '1.5 weeks' AS "Ten days twelve hours";
+SELECT INTERVAL '1.5 months' AS "One month 15 days";
+SELECT INTERVAL '10 years -11 month -12 days +13:14' AS "9 years...";
+CREATE TABLE INTERVAL_TBL (f1 interval);
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 1 minute');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 5 hour');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 10 day');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 34 year');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 3 months');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 14 seconds ago');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('1 day 2 hours 3 minutes 4 seconds');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('6 years');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('5 months');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('5 months 12 hours');
+-- badly formatted interval
+INSERT INTO INTERVAL_TBL (f1) VALUES ('badly formatted interval');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 30 eons ago');
+-- test interval operators
+SELECT * FROM INTERVAL_TBL;
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 <> interval '@ 10 days';
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 <= interval '@ 5 hours';
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 < interval '@ 1 day';
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 = interval '@ 34 years';
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 >= interval '@ 1 month';
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 > interval '@ 3 seconds ago';
+-- Test intervals that are large enough to overflow 64 bits in comparisons
+CREATE TEMP TABLE INTERVAL_TBL_OF (f1 interval);
+INSERT INTO INTERVAL_TBL_OF (f1) VALUES
+ ('2147483647 days 2147483647 months'),
+ ('2147483647 days -2147483648 months'),
+ ('1 year'),
+ ('-2147483648 days 2147483647 months'),
+ ('-2147483648 days -2147483648 months');
+-- these should fail as out-of-range
+INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('2147483648 days');
+INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('-2147483649 days');
+INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('2147483647 years');
+INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('-2147483648 years');
+-- Test edge-case overflow detection in interval multiplication
+select extract(epoch from '256 microseconds'::interval * (2^55)::float8);
+CREATE INDEX ON INTERVAL_TBL_OF USING btree (f1);
+DROP TABLE INTERVAL_TBL_OF;
+-- Test multiplication and division with intervals.
+-- Floating point arithmetic rounding errors can lead to unexpected results,
+-- though the code attempts to do the right thing and round up to days and
+-- minutes to avoid results such as '3 days 24:00 hours' or '14:20:60'.
+-- Note that it is expected for some day components to be greater than 29 and
+-- some time components be greater than 23:59:59 due to how intervals are
+-- stored internally.
+CREATE TABLE INTERVAL_MULDIV_TBL (span interval);
+DROP TABLE INTERVAL_MULDIV_TBL;
+SELECT '1 millisecond'::interval, '1 microsecond'::interval,
+ '500 seconds 99 milliseconds 51 microseconds'::interval;
+SELECT '3 days 5 milliseconds'::interval;
+SELECT '1 second 2 seconds'::interval; -- error
+SELECT '10 milliseconds 20 milliseconds'::interval; -- error
+SELECT '5.5 seconds 3 milliseconds'::interval; -- error
+SELECT '1:20:05 5 microseconds'::interval; -- error
+SELECT '1 day 1 day'::interval; -- error
+SELECT interval '1-2'; -- SQL year-month literal
+SELECT interval '999' second; -- oversize leading field is ok
+SELECT interval '999' minute;
+SELECT interval '999' hour;
+SELECT interval '999' day;
+SELECT interval '999' month;
+-- test SQL-spec syntaxes for restricted field sets
+SELECT interval '1' year;
+SELECT interval '2' month;
+SELECT interval '3' day;
+SELECT interval '4' hour;
+SELECT interval '5' minute;
+SELECT interval '6' second;
+SELECT interval '1' year to month;
+SELECT interval '1-2' year to month;
+SELECT interval '1 2' day to hour;
+SELECT interval '1 2:03' day to hour;
+SELECT interval '1 2:03:04' day to hour;
+SELECT interval '1 2' day to minute;
+SELECT interval '1 2:03' day to minute;
+SELECT interval '1 2:03:04' day to minute;
+SELECT interval '1 2' day to second;
+SELECT interval '1 2:03' day to second;
+SELECT interval '1 2:03:04' day to second;
+SELECT interval '1 2' hour to minute;
+SELECT interval '1 2:03' hour to minute;
+SELECT interval '1 2:03:04' hour to minute;
+SELECT interval '1 2' hour to second;
+SELECT interval '1 2:03' hour to second;
+SELECT interval '1 2:03:04' hour to second;
+SELECT interval '1 2' minute to second;
+SELECT interval '1 2:03' minute to second;
+SELECT interval '1 2:03:04' minute to second;
+SELECT interval '1 +2:03' minute to second;
+SELECT interval '1 +2:03:04' minute to second;
+SELECT interval '1 -2:03' minute to second;
+SELECT interval '1 -2:03:04' minute to second;
+SELECT interval '123 11' day to hour; -- ok
+SELECT interval '123 11' day; -- not ok
+SELECT interval '123 11'; -- not ok, too ambiguous
+SELECT interval '123 2:03 -2:04'; -- not ok, redundant hh:mm fields
+-- test syntaxes for restricted precision
+SELECT interval(0) '1 day 01:23:45.6789';
+SELECT interval(2) '1 day 01:23:45.6789';
+SELECT interval '12:34.5678' minute to second(2); -- per SQL spec
+SELECT interval '1.234' second;
+SELECT interval '1.234' second(2);
+SELECT interval '1 2.345' day to second(2);
+SELECT interval '1 2:03' day to second(2);
+SELECT interval '1 2:03.4567' day to second(2);
+SELECT interval '1 2:03:04.5678' day to second(2);
+SELECT interval '1 2.345' hour to second(2);
+SELECT interval '1 2:03.45678' hour to second(2);
+SELECT interval '1 2:03:04.5678' hour to second(2);
+SELECT interval '1 2.3456' minute to second(2);
+SELECT interval '1 2:03.5678' minute to second(2);
+SELECT interval '1 2:03:04.5678' minute to second(2);
+SELECT interval '+1 -1:00:00',
+ interval '-1 +1:00:00',
+ interval '+1-2 -3 +4:05:06.789',
+ interval '-1-2 +3 -4:05:06.789';
+select interval 'P00021015T103020' AS "ISO8601 Basic Format",
+ interval 'P0002-10-15T10:30:20' AS "ISO8601 Extended Format";
+-- Make sure optional ISO8601 alternative format fields are optional.
+select interval 'P0002' AS "year only",
+ interval 'P0002-10' AS "year month",
+ interval 'P0002-10-15' AS "year month day",
+ interval 'P0002T1S' AS "year only plus time",
+ interval 'P0002-10T1S' AS "year month plus time",
+ interval 'P0002-10-15T1S' AS "year month day plus time",
+ interval 'PT10' AS "hour only",
+ interval 'PT10:30' AS "hour minute";
+-- check that '30 days' equals '1 month' according to the hash function
+select '30 days'::interval = '1 month'::interval as t;
+select interval_hash('30 days'::interval) = interval_hash('1 month'::interval) as t;
+SELECT EXTRACT(FORTNIGHT FROM INTERVAL '2 days'); -- error
+SELECT EXTRACT(TIMEZONE FROM INTERVAL '2 days'); -- error
+SELECT EXTRACT(DECADE FROM INTERVAL '100 y');
+SELECT EXTRACT(DECADE FROM INTERVAL '99 y');
+SELECT EXTRACT(DECADE FROM INTERVAL '-99 y');
+SELECT EXTRACT(DECADE FROM INTERVAL '-100 y');
+SELECT EXTRACT(CENTURY FROM INTERVAL '100 y');
+SELECT EXTRACT(CENTURY FROM INTERVAL '-100 y');
+-- internal overflow test case
+SELECT extract(epoch from interval '1000000000 days');
diff --git a/ydb/library/yql/tests/postgresql/cases/join.out b/ydb/library/yql/tests/postgresql/cases/join.out
new file mode 100644
index 0000000000..e22646412a
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/join.out
@@ -0,0 +1,35 @@
+--
+-- JOIN
+-- Test JOIN clauses
+--
+CREATE TABLE J1_TBL (
+ i integer,
+ j integer,
+ t text
+);
+CREATE TABLE J2_TBL (
+ i integer,
+ k integer
+);
+INSERT INTO J1_TBL VALUES (1, 4, 'one');
+INSERT INTO J1_TBL VALUES (2, 3, 'two');
+INSERT INTO J1_TBL VALUES (3, 2, 'three');
+INSERT INTO J1_TBL VALUES (4, 1, 'four');
+INSERT INTO J1_TBL VALUES (5, 0, 'five');
+INSERT INTO J1_TBL VALUES (6, 6, 'six');
+INSERT INTO J1_TBL VALUES (7, 7, 'seven');
+INSERT INTO J1_TBL VALUES (8, 8, 'eight');
+INSERT INTO J1_TBL VALUES (0, NULL, 'zero');
+INSERT INTO J1_TBL VALUES (NULL, NULL, 'null');
+INSERT INTO J1_TBL VALUES (NULL, 0, 'zero');
+INSERT INTO J2_TBL VALUES (1, -1);
+INSERT INTO J2_TBL VALUES (2, 2);
+INSERT INTO J2_TBL VALUES (3, -3);
+INSERT INTO J2_TBL VALUES (2, 4);
+INSERT INTO J2_TBL VALUES (5, -5);
+INSERT INTO J2_TBL VALUES (5, -5);
+INSERT INTO J2_TBL VALUES (0, NULL);
+INSERT INTO J2_TBL VALUES (NULL, NULL);
+INSERT INTO J2_TBL VALUES (NULL, 0);
+-- useful in some tests below
+create temp table onerow();
diff --git a/ydb/library/yql/tests/postgresql/cases/join.sql b/ydb/library/yql/tests/postgresql/cases/join.sql
new file mode 100644
index 0000000000..e22646412a
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/join.sql
@@ -0,0 +1,35 @@
+--
+-- JOIN
+-- Test JOIN clauses
+--
+CREATE TABLE J1_TBL (
+ i integer,
+ j integer,
+ t text
+);
+CREATE TABLE J2_TBL (
+ i integer,
+ k integer
+);
+INSERT INTO J1_TBL VALUES (1, 4, 'one');
+INSERT INTO J1_TBL VALUES (2, 3, 'two');
+INSERT INTO J1_TBL VALUES (3, 2, 'three');
+INSERT INTO J1_TBL VALUES (4, 1, 'four');
+INSERT INTO J1_TBL VALUES (5, 0, 'five');
+INSERT INTO J1_TBL VALUES (6, 6, 'six');
+INSERT INTO J1_TBL VALUES (7, 7, 'seven');
+INSERT INTO J1_TBL VALUES (8, 8, 'eight');
+INSERT INTO J1_TBL VALUES (0, NULL, 'zero');
+INSERT INTO J1_TBL VALUES (NULL, NULL, 'null');
+INSERT INTO J1_TBL VALUES (NULL, 0, 'zero');
+INSERT INTO J2_TBL VALUES (1, -1);
+INSERT INTO J2_TBL VALUES (2, 2);
+INSERT INTO J2_TBL VALUES (3, -3);
+INSERT INTO J2_TBL VALUES (2, 4);
+INSERT INTO J2_TBL VALUES (5, -5);
+INSERT INTO J2_TBL VALUES (5, -5);
+INSERT INTO J2_TBL VALUES (0, NULL);
+INSERT INTO J2_TBL VALUES (NULL, NULL);
+INSERT INTO J2_TBL VALUES (NULL, 0);
+-- useful in some tests below
+create temp table onerow();
diff --git a/ydb/library/yql/tests/postgresql/cases/json.out b/ydb/library/yql/tests/postgresql/cases/json.out
new file mode 100644
index 0000000000..8d39d1b21f
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/json.out
@@ -0,0 +1,301 @@
+-- Strings.
+SELECT '""'::json; -- OK.
+ json
+------
+ ""
+(1 row)
+
+SELECT $$''$$::json; -- ERROR, single quotes are not allowed
+ERROR: invalid input syntax for type json
+LINE 1: SELECT $$''$$::json;
+ ^
+DETAIL: Token "'" is invalid.
+CONTEXT: JSON data, line 1: '...
+SELECT '"abc"'::json; -- OK
+ json
+-------
+ "abc"
+(1 row)
+
+SELECT '"abc'::json; -- ERROR, quotes not closed
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"abc'::json;
+ ^
+DETAIL: Token ""abc" is invalid.
+CONTEXT: JSON data, line 1: "abc
+SELECT '"abc
+def"'::json; -- ERROR, unescaped newline in string constant
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"abc
+ ^
+DETAIL: Character with value 0x0a must be escaped.
+CONTEXT: JSON data, line 1: "abc
+SELECT '"\n\"\\"'::json; -- OK, legal escapes
+ json
+----------
+ "\n\"\\"
+(1 row)
+
+SELECT '"\v"'::json; -- ERROR, not a valid JSON escape
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"\v"'::json;
+ ^
+DETAIL: Escape sequence "\v" is invalid.
+CONTEXT: JSON data, line 1: "\v...
+-- see json_encoding test for input with unicode escapes
+-- Numbers.
+SELECT '1'::json; -- OK
+ json
+------
+ 1
+(1 row)
+
+SELECT '0'::json; -- OK
+ json
+------
+ 0
+(1 row)
+
+SELECT '01'::json; -- ERROR, not valid according to JSON spec
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '01'::json;
+ ^
+DETAIL: Token "01" is invalid.
+CONTEXT: JSON data, line 1: 01
+SELECT '0.1'::json; -- OK
+ json
+------
+ 0.1
+(1 row)
+
+SELECT '9223372036854775808'::json; -- OK, even though it's too large for int8
+ json
+---------------------
+ 9223372036854775808
+(1 row)
+
+SELECT '1e100'::json; -- OK
+ json
+-------
+ 1e100
+(1 row)
+
+SELECT '1.3e100'::json; -- OK
+ json
+---------
+ 1.3e100
+(1 row)
+
+SELECT '1f2'::json; -- ERROR
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '1f2'::json;
+ ^
+DETAIL: Token "1f2" is invalid.
+CONTEXT: JSON data, line 1: 1f2
+SELECT '0.x1'::json; -- ERROR
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '0.x1'::json;
+ ^
+DETAIL: Token "0.x1" is invalid.
+CONTEXT: JSON data, line 1: 0.x1
+SELECT '1.3ex100'::json; -- ERROR
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '1.3ex100'::json;
+ ^
+DETAIL: Token "1.3ex100" is invalid.
+CONTEXT: JSON data, line 1: 1.3ex100
+-- Arrays.
+SELECT '[]'::json; -- OK
+ json
+------
+ []
+(1 row)
+
+SELECT '[1,2]'::json; -- OK
+ json
+-------
+ [1,2]
+(1 row)
+
+SELECT '[1,2,]'::json; -- ERROR, trailing comma
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '[1,2,]'::json;
+ ^
+DETAIL: Expected JSON value, but found "]".
+CONTEXT: JSON data, line 1: [1,2,]
+SELECT '[1,2'::json; -- ERROR, no closing bracket
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '[1,2'::json;
+ ^
+DETAIL: The input string ended unexpectedly.
+CONTEXT: JSON data, line 1: [1,2
+SELECT '[1,[2]'::json; -- ERROR, no closing bracket
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '[1,[2]'::json;
+ ^
+DETAIL: The input string ended unexpectedly.
+CONTEXT: JSON data, line 1: [1,[2]
+-- Objects.
+SELECT '{}'::json; -- OK
+ json
+------
+ {}
+(1 row)
+
+SELECT '{"abc"}'::json; -- ERROR, no value
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc"}'::json;
+ ^
+DETAIL: Expected ":", but found "}".
+CONTEXT: JSON data, line 1: {"abc"}
+SELECT '{"abc":1}'::json; -- OK
+ json
+-----------
+ {"abc":1}
+(1 row)
+
+SELECT '{1:"abc"}'::json; -- ERROR, keys must be strings
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{1:"abc"}'::json;
+ ^
+DETAIL: Expected string or "}", but found "1".
+CONTEXT: JSON data, line 1: {1...
+SELECT '{"abc",1}'::json; -- ERROR, wrong separator
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc",1}'::json;
+ ^
+DETAIL: Expected ":", but found ",".
+CONTEXT: JSON data, line 1: {"abc",...
+SELECT '{"abc"=1}'::json; -- ERROR, totally wrong separator
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc"=1}'::json;
+ ^
+DETAIL: Token "=" is invalid.
+CONTEXT: JSON data, line 1: {"abc"=...
+SELECT '{"abc"::1}'::json; -- ERROR, another wrong separator
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc"::1}'::json;
+ ^
+DETAIL: Expected JSON value, but found ":".
+CONTEXT: JSON data, line 1: {"abc"::...
+SELECT '{"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}}'::json; -- OK
+ json
+---------------------------------------------------------
+ {"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}}
+(1 row)
+
+SELECT '{"abc":1:2}'::json; -- ERROR, colon in wrong spot
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc":1:2}'::json;
+ ^
+DETAIL: Expected "," or "}", but found ":".
+CONTEXT: JSON data, line 1: {"abc":1:...
+SELECT '{"abc":1,3}'::json; -- ERROR, no value
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc":1,3}'::json;
+ ^
+DETAIL: Expected string, but found "3".
+CONTEXT: JSON data, line 1: {"abc":1,3...
+SELECT repeat('[', 10000)::json;
+ERROR: stack depth limit exceeded
+HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
+SELECT repeat('{"a":', 10000)::json;
+ERROR: stack depth limit exceeded
+HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
+-- Miscellaneous stuff.
+SELECT 'true'::json; -- OK
+ json
+------
+ true
+(1 row)
+
+SELECT 'false'::json; -- OK
+ json
+-------
+ false
+(1 row)
+
+SELECT 'null'::json; -- OK
+ json
+------
+ null
+(1 row)
+
+SELECT ' true '::json; -- OK, even with extra whitespace
+ json
+--------
+ true
+(1 row)
+
+SELECT 'true false'::json; -- ERROR, too many values
+ERROR: invalid input syntax for type json
+LINE 1: SELECT 'true false'::json;
+ ^
+DETAIL: Expected end of input, but found "false".
+CONTEXT: JSON data, line 1: true false
+SELECT 'true, false'::json; -- ERROR, too many values
+ERROR: invalid input syntax for type json
+LINE 1: SELECT 'true, false'::json;
+ ^
+DETAIL: Expected end of input, but found ",".
+CONTEXT: JSON data, line 1: true,...
+SELECT 'truf'::json; -- ERROR, not a keyword
+ERROR: invalid input syntax for type json
+LINE 1: SELECT 'truf'::json;
+ ^
+DETAIL: Token "truf" is invalid.
+CONTEXT: JSON data, line 1: truf
+SELECT 'trues'::json; -- ERROR, not a keyword
+ERROR: invalid input syntax for type json
+LINE 1: SELECT 'trues'::json;
+ ^
+DETAIL: Token "trues" is invalid.
+CONTEXT: JSON data, line 1: trues
+SELECT ''::json; -- ERROR, no value
+ERROR: invalid input syntax for type json
+LINE 1: SELECT ''::json;
+ ^
+DETAIL: The input string ended unexpectedly.
+CONTEXT: JSON data, line 1:
+SELECT ' '::json; -- ERROR, no value
+ERROR: invalid input syntax for type json
+LINE 1: SELECT ' '::json;
+ ^
+DETAIL: The input string ended unexpectedly.
+CONTEXT: JSON data, line 1:
+SELECT '{
+ "one": 1,
+ "two":,"two", -- ERROR extraneous comma before field "two"
+ "three":
+ true}'::json;
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{
+ ^
+DETAIL: Expected JSON value, but found ",".
+CONTEXT: JSON data, line 3: "two":,...
+SELECT '{
+ "one": 1,
+ "two":"two",
+ "averyveryveryveryveryveryveryveryveryverylongfieldname":}'::json;
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{
+ ^
+DETAIL: Expected JSON value, but found "}".
+CONTEXT: JSON data, line 4: ...yveryveryveryveryveryveryveryverylongfieldname":}
+SELECT array_to_json('{{1,5},{99,100}}'::int[]);
+ array_to_json
+------------------
+ [[1,5],[99,100]]
+(1 row)
+
+BEGIN;
+COMMIT;
+-- json extraction functions
+CREATE TEMP TABLE test_json (
+ json_type text,
+ test_json json
+);
+INSERT INTO test_json VALUES
+('scalar','"a scalar"'),
+('array','["zero", "one","two",null,"four","five", [1,2,3],{"f1":9}]'),
+('object','{"field1":"val1","field2":"val2","field3":null, "field4": 4, "field5": [1,2,3], "field6": {"f1":9}}');
diff --git a/ydb/library/yql/tests/postgresql/cases/json.sql b/ydb/library/yql/tests/postgresql/cases/json.sql
new file mode 100644
index 0000000000..8eca8cf477
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/json.sql
@@ -0,0 +1,72 @@
+-- Strings.
+SELECT '""'::json; -- OK.
+SELECT $$''$$::json; -- ERROR, single quotes are not allowed
+SELECT '"abc"'::json; -- OK
+SELECT '"abc'::json; -- ERROR, quotes not closed
+SELECT '"abc
+def"'::json; -- ERROR, unescaped newline in string constant
+SELECT '"\n\"\\"'::json; -- OK, legal escapes
+SELECT '"\v"'::json; -- ERROR, not a valid JSON escape
+-- see json_encoding test for input with unicode escapes
+-- Numbers.
+SELECT '1'::json; -- OK
+SELECT '0'::json; -- OK
+SELECT '01'::json; -- ERROR, not valid according to JSON spec
+SELECT '0.1'::json; -- OK
+SELECT '9223372036854775808'::json; -- OK, even though it's too large for int8
+SELECT '1e100'::json; -- OK
+SELECT '1.3e100'::json; -- OK
+SELECT '1f2'::json; -- ERROR
+SELECT '0.x1'::json; -- ERROR
+SELECT '1.3ex100'::json; -- ERROR
+-- Arrays.
+SELECT '[]'::json; -- OK
+SELECT '[1,2]'::json; -- OK
+SELECT '[1,2,]'::json; -- ERROR, trailing comma
+SELECT '[1,2'::json; -- ERROR, no closing bracket
+SELECT '[1,[2]'::json; -- ERROR, no closing bracket
+-- Objects.
+SELECT '{}'::json; -- OK
+SELECT '{"abc"}'::json; -- ERROR, no value
+SELECT '{"abc":1}'::json; -- OK
+SELECT '{1:"abc"}'::json; -- ERROR, keys must be strings
+SELECT '{"abc",1}'::json; -- ERROR, wrong separator
+SELECT '{"abc"=1}'::json; -- ERROR, totally wrong separator
+SELECT '{"abc"::1}'::json; -- ERROR, another wrong separator
+SELECT '{"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}}'::json; -- OK
+SELECT '{"abc":1:2}'::json; -- ERROR, colon in wrong spot
+SELECT '{"abc":1,3}'::json; -- ERROR, no value
+SELECT repeat('[', 10000)::json;
+SELECT repeat('{"a":', 10000)::json;
+-- Miscellaneous stuff.
+SELECT 'true'::json; -- OK
+SELECT 'false'::json; -- OK
+SELECT 'null'::json; -- OK
+SELECT ' true '::json; -- OK, even with extra whitespace
+SELECT 'true false'::json; -- ERROR, too many values
+SELECT 'true, false'::json; -- ERROR, too many values
+SELECT 'truf'::json; -- ERROR, not a keyword
+SELECT 'trues'::json; -- ERROR, not a keyword
+SELECT ''::json; -- ERROR, no value
+SELECT ' '::json; -- ERROR, no value
+SELECT '{
+ "one": 1,
+ "two":,"two", -- ERROR extraneous comma before field "two"
+ "three":
+ true}'::json;
+SELECT '{
+ "one": 1,
+ "two":"two",
+ "averyveryveryveryveryveryveryveryveryverylongfieldname":}'::json;
+SELECT array_to_json('{{1,5},{99,100}}'::int[]);
+BEGIN;
+COMMIT;
+-- json extraction functions
+CREATE TEMP TABLE test_json (
+ json_type text,
+ test_json json
+);
+INSERT INTO test_json VALUES
+('scalar','"a scalar"'),
+('array','["zero", "one","two",null,"four","five", [1,2,3],{"f1":9}]'),
+('object','{"field1":"val1","field2":"val2","field3":null, "field4": 4, "field5": [1,2,3], "field6": {"f1":9}}');
diff --git a/ydb/library/yql/tests/postgresql/cases/json_encoding.out b/ydb/library/yql/tests/postgresql/cases/json_encoding.out
new file mode 100644
index 0000000000..8025d09624
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/json_encoding.out
@@ -0,0 +1,255 @@
+--
+-- encoding-sensitive tests for json and jsonb
+--
+SELECT getdatabaseencoding(); -- just to label the results files
+ getdatabaseencoding
+---------------------
+ UTF8
+(1 row)
+
+-- first json
+-- basic unicode input
+SELECT '"\u"'::json; -- ERROR, incomplete escape
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"\u"'::json;
+ ^
+DETAIL: "\u" must be followed by four hexadecimal digits.
+CONTEXT: JSON data, line 1: "\u"
+SELECT '"\u00"'::json; -- ERROR, incomplete escape
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"\u00"'::json;
+ ^
+DETAIL: "\u" must be followed by four hexadecimal digits.
+CONTEXT: JSON data, line 1: "\u00"
+SELECT '"\u000g"'::json; -- ERROR, g is not a hex digit
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"\u000g"'::json;
+ ^
+DETAIL: "\u" must be followed by four hexadecimal digits.
+CONTEXT: JSON data, line 1: "\u000g...
+SELECT '"\u0000"'::json; -- OK, legal escape
+ json
+----------
+ "\u0000"
+(1 row)
+
+SELECT '"\uaBcD"'::json; -- OK, uppercase and lower case both OK
+ json
+----------
+ "\uaBcD"
+(1 row)
+
+-- handling of unicode surrogate pairs
+select json '{ "a": "\ud83d\ude04\ud83d\udc36" }' -> 'a' as correct_in_utf8;
+ correct_in_utf8
+----------------------------
+ "\ud83d\ude04\ud83d\udc36"
+(1 row)
+
+select json '{ "a": "\ud83d\ud83d" }' -> 'a'; -- 2 high surrogates in a row
+ERROR: invalid input syntax for type json
+DETAIL: Unicode high surrogate must not follow a high surrogate.
+CONTEXT: JSON data, line 1: { "a":...
+select json '{ "a": "\ude04\ud83d" }' -> 'a'; -- surrogates in wrong order
+ERROR: invalid input syntax for type json
+DETAIL: Unicode low surrogate must follow a high surrogate.
+CONTEXT: JSON data, line 1: { "a":...
+select json '{ "a": "\ud83dX" }' -> 'a'; -- orphan high surrogate
+ERROR: invalid input syntax for type json
+DETAIL: Unicode low surrogate must follow a high surrogate.
+CONTEXT: JSON data, line 1: { "a":...
+select json '{ "a": "\ude04X" }' -> 'a'; -- orphan low surrogate
+ERROR: invalid input syntax for type json
+DETAIL: Unicode low surrogate must follow a high surrogate.
+CONTEXT: JSON data, line 1: { "a":...
+--handling of simple unicode escapes
+select json '{ "a": "the Copyright \u00a9 sign" }' as correct_in_utf8;
+ correct_in_utf8
+---------------------------------------
+ { "a": "the Copyright \u00a9 sign" }
+(1 row)
+
+select json '{ "a": "dollar \u0024 character" }' as correct_everywhere;
+ correct_everywhere
+-------------------------------------
+ { "a": "dollar \u0024 character" }
+(1 row)
+
+select json '{ "a": "dollar \\u0024 character" }' as not_an_escape;
+ not_an_escape
+--------------------------------------
+ { "a": "dollar \\u0024 character" }
+(1 row)
+
+select json '{ "a": "null \u0000 escape" }' as not_unescaped;
+ not_unescaped
+--------------------------------
+ { "a": "null \u0000 escape" }
+(1 row)
+
+select json '{ "a": "null \\u0000 escape" }' as not_an_escape;
+ not_an_escape
+---------------------------------
+ { "a": "null \\u0000 escape" }
+(1 row)
+
+select json '{ "a": "the Copyright \u00a9 sign" }' ->> 'a' as correct_in_utf8;
+ correct_in_utf8
+----------------------
+ the Copyright © sign
+(1 row)
+
+select json '{ "a": "dollar \u0024 character" }' ->> 'a' as correct_everywhere;
+ correct_everywhere
+--------------------
+ dollar $ character
+(1 row)
+
+select json '{ "a": "dollar \\u0024 character" }' ->> 'a' as not_an_escape;
+ not_an_escape
+-------------------------
+ dollar \u0024 character
+(1 row)
+
+select json '{ "a": "null \u0000 escape" }' ->> 'a' as fails;
+ERROR: unsupported Unicode escape sequence
+DETAIL: \u0000 cannot be converted to text.
+CONTEXT: JSON data, line 1: { "a":...
+select json '{ "a": "null \\u0000 escape" }' ->> 'a' as not_an_escape;
+ not_an_escape
+--------------------
+ null \u0000 escape
+(1 row)
+
+-- then jsonb
+-- basic unicode input
+SELECT '"\u"'::jsonb; -- ERROR, incomplete escape
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"\u"'::jsonb;
+ ^
+DETAIL: "\u" must be followed by four hexadecimal digits.
+CONTEXT: JSON data, line 1: "\u"
+SELECT '"\u00"'::jsonb; -- ERROR, incomplete escape
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"\u00"'::jsonb;
+ ^
+DETAIL: "\u" must be followed by four hexadecimal digits.
+CONTEXT: JSON data, line 1: "\u00"
+SELECT '"\u000g"'::jsonb; -- ERROR, g is not a hex digit
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"\u000g"'::jsonb;
+ ^
+DETAIL: "\u" must be followed by four hexadecimal digits.
+CONTEXT: JSON data, line 1: "\u000g...
+SELECT '"\u0045"'::jsonb; -- OK, legal escape
+ jsonb
+-------
+ "E"
+(1 row)
+
+SELECT '"\u0000"'::jsonb; -- ERROR, we don't support U+0000
+ERROR: unsupported Unicode escape sequence
+LINE 1: SELECT '"\u0000"'::jsonb;
+ ^
+DETAIL: \u0000 cannot be converted to text.
+CONTEXT: JSON data, line 1: ...
+-- use octet_length here so we don't get an odd unicode char in the
+-- output
+SELECT octet_length('"\uaBcD"'::jsonb::text); -- OK, uppercase and lower case both OK
+ octet_length
+--------------
+ 5
+(1 row)
+
+-- handling of unicode surrogate pairs
+SELECT octet_length((jsonb '{ "a": "\ud83d\ude04\ud83d\udc36" }' -> 'a')::text) AS correct_in_utf8;
+ correct_in_utf8
+-----------------
+ 10
+(1 row)
+
+SELECT jsonb '{ "a": "\ud83d\ud83d" }' -> 'a'; -- 2 high surrogates in a row
+ERROR: invalid input syntax for type json
+LINE 1: SELECT jsonb '{ "a": "\ud83d\ud83d" }' -> 'a';
+ ^
+DETAIL: Unicode high surrogate must not follow a high surrogate.
+CONTEXT: JSON data, line 1: { "a":...
+SELECT jsonb '{ "a": "\ude04\ud83d" }' -> 'a'; -- surrogates in wrong order
+ERROR: invalid input syntax for type json
+LINE 1: SELECT jsonb '{ "a": "\ude04\ud83d" }' -> 'a';
+ ^
+DETAIL: Unicode low surrogate must follow a high surrogate.
+CONTEXT: JSON data, line 1: { "a":...
+SELECT jsonb '{ "a": "\ud83dX" }' -> 'a'; -- orphan high surrogate
+ERROR: invalid input syntax for type json
+LINE 1: SELECT jsonb '{ "a": "\ud83dX" }' -> 'a';
+ ^
+DETAIL: Unicode low surrogate must follow a high surrogate.
+CONTEXT: JSON data, line 1: { "a":...
+SELECT jsonb '{ "a": "\ude04X" }' -> 'a'; -- orphan low surrogate
+ERROR: invalid input syntax for type json
+LINE 1: SELECT jsonb '{ "a": "\ude04X" }' -> 'a';
+ ^
+DETAIL: Unicode low surrogate must follow a high surrogate.
+CONTEXT: JSON data, line 1: { "a":...
+-- handling of simple unicode escapes
+SELECT jsonb '{ "a": "the Copyright \u00a9 sign" }' as correct_in_utf8;
+ correct_in_utf8
+-------------------------------
+ {"a": "the Copyright © sign"}
+(1 row)
+
+SELECT jsonb '{ "a": "dollar \u0024 character" }' as correct_everywhere;
+ correct_everywhere
+-----------------------------
+ {"a": "dollar $ character"}
+(1 row)
+
+SELECT jsonb '{ "a": "dollar \\u0024 character" }' as not_an_escape;
+ not_an_escape
+-----------------------------------
+ {"a": "dollar \\u0024 character"}
+(1 row)
+
+SELECT jsonb '{ "a": "null \u0000 escape" }' as fails;
+ERROR: unsupported Unicode escape sequence
+LINE 1: SELECT jsonb '{ "a": "null \u0000 escape" }' as fails;
+ ^
+DETAIL: \u0000 cannot be converted to text.
+CONTEXT: JSON data, line 1: { "a":...
+SELECT jsonb '{ "a": "null \\u0000 escape" }' as not_an_escape;
+ not_an_escape
+------------------------------
+ {"a": "null \\u0000 escape"}
+(1 row)
+
+SELECT jsonb '{ "a": "the Copyright \u00a9 sign" }' ->> 'a' as correct_in_utf8;
+ correct_in_utf8
+----------------------
+ the Copyright © sign
+(1 row)
+
+SELECT jsonb '{ "a": "dollar \u0024 character" }' ->> 'a' as correct_everywhere;
+ correct_everywhere
+--------------------
+ dollar $ character
+(1 row)
+
+SELECT jsonb '{ "a": "dollar \\u0024 character" }' ->> 'a' as not_an_escape;
+ not_an_escape
+-------------------------
+ dollar \u0024 character
+(1 row)
+
+SELECT jsonb '{ "a": "null \u0000 escape" }' ->> 'a' as fails;
+ERROR: unsupported Unicode escape sequence
+LINE 1: SELECT jsonb '{ "a": "null \u0000 escape" }' ->> 'a' as fai...
+ ^
+DETAIL: \u0000 cannot be converted to text.
+CONTEXT: JSON data, line 1: { "a":...
+SELECT jsonb '{ "a": "null \\u0000 escape" }' ->> 'a' as not_an_escape;
+ not_an_escape
+--------------------
+ null \u0000 escape
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/cases/json_encoding.sql b/ydb/library/yql/tests/postgresql/cases/json_encoding.sql
new file mode 100644
index 0000000000..f07f8986b2
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/json_encoding.sql
@@ -0,0 +1,55 @@
+--
+-- encoding-sensitive tests for json and jsonb
+--
+SELECT getdatabaseencoding(); -- just to label the results files
+-- first json
+-- basic unicode input
+SELECT '"\u"'::json; -- ERROR, incomplete escape
+SELECT '"\u00"'::json; -- ERROR, incomplete escape
+SELECT '"\u000g"'::json; -- ERROR, g is not a hex digit
+SELECT '"\u0000"'::json; -- OK, legal escape
+SELECT '"\uaBcD"'::json; -- OK, uppercase and lower case both OK
+-- handling of unicode surrogate pairs
+select json '{ "a": "\ud83d\ude04\ud83d\udc36" }' -> 'a' as correct_in_utf8;
+select json '{ "a": "\ud83d\ud83d" }' -> 'a'; -- 2 high surrogates in a row
+select json '{ "a": "\ude04\ud83d" }' -> 'a'; -- surrogates in wrong order
+select json '{ "a": "\ud83dX" }' -> 'a'; -- orphan high surrogate
+select json '{ "a": "\ude04X" }' -> 'a'; -- orphan low surrogate
+--handling of simple unicode escapes
+select json '{ "a": "the Copyright \u00a9 sign" }' as correct_in_utf8;
+select json '{ "a": "dollar \u0024 character" }' as correct_everywhere;
+select json '{ "a": "dollar \\u0024 character" }' as not_an_escape;
+select json '{ "a": "null \u0000 escape" }' as not_unescaped;
+select json '{ "a": "null \\u0000 escape" }' as not_an_escape;
+select json '{ "a": "the Copyright \u00a9 sign" }' ->> 'a' as correct_in_utf8;
+select json '{ "a": "dollar \u0024 character" }' ->> 'a' as correct_everywhere;
+select json '{ "a": "dollar \\u0024 character" }' ->> 'a' as not_an_escape;
+select json '{ "a": "null \u0000 escape" }' ->> 'a' as fails;
+select json '{ "a": "null \\u0000 escape" }' ->> 'a' as not_an_escape;
+-- then jsonb
+-- basic unicode input
+SELECT '"\u"'::jsonb; -- ERROR, incomplete escape
+SELECT '"\u00"'::jsonb; -- ERROR, incomplete escape
+SELECT '"\u000g"'::jsonb; -- ERROR, g is not a hex digit
+SELECT '"\u0045"'::jsonb; -- OK, legal escape
+SELECT '"\u0000"'::jsonb; -- ERROR, we don't support U+0000
+-- use octet_length here so we don't get an odd unicode char in the
+-- output
+SELECT octet_length('"\uaBcD"'::jsonb::text); -- OK, uppercase and lower case both OK
+-- handling of unicode surrogate pairs
+SELECT octet_length((jsonb '{ "a": "\ud83d\ude04\ud83d\udc36" }' -> 'a')::text) AS correct_in_utf8;
+SELECT jsonb '{ "a": "\ud83d\ud83d" }' -> 'a'; -- 2 high surrogates in a row
+SELECT jsonb '{ "a": "\ude04\ud83d" }' -> 'a'; -- surrogates in wrong order
+SELECT jsonb '{ "a": "\ud83dX" }' -> 'a'; -- orphan high surrogate
+SELECT jsonb '{ "a": "\ude04X" }' -> 'a'; -- orphan low surrogate
+-- handling of simple unicode escapes
+SELECT jsonb '{ "a": "the Copyright \u00a9 sign" }' as correct_in_utf8;
+SELECT jsonb '{ "a": "dollar \u0024 character" }' as correct_everywhere;
+SELECT jsonb '{ "a": "dollar \\u0024 character" }' as not_an_escape;
+SELECT jsonb '{ "a": "null \u0000 escape" }' as fails;
+SELECT jsonb '{ "a": "null \\u0000 escape" }' as not_an_escape;
+SELECT jsonb '{ "a": "the Copyright \u00a9 sign" }' ->> 'a' as correct_in_utf8;
+SELECT jsonb '{ "a": "dollar \u0024 character" }' ->> 'a' as correct_everywhere;
+SELECT jsonb '{ "a": "dollar \\u0024 character" }' ->> 'a' as not_an_escape;
+SELECT jsonb '{ "a": "null \u0000 escape" }' ->> 'a' as fails;
+SELECT jsonb '{ "a": "null \\u0000 escape" }' ->> 'a' as not_an_escape;
diff --git a/ydb/library/yql/tests/postgresql/cases/jsonb.out b/ydb/library/yql/tests/postgresql/cases/jsonb.out
new file mode 100644
index 0000000000..3d767135a7
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/jsonb.out
@@ -0,0 +1,302 @@
+-- Strings.
+SELECT '""'::jsonb; -- OK.
+ jsonb
+-------
+ ""
+(1 row)
+
+SELECT $$''$$::jsonb; -- ERROR, single quotes are not allowed
+ERROR: invalid input syntax for type json
+LINE 1: SELECT $$''$$::jsonb;
+ ^
+DETAIL: Token "'" is invalid.
+CONTEXT: JSON data, line 1: '...
+SELECT '"abc"'::jsonb; -- OK
+ jsonb
+-------
+ "abc"
+(1 row)
+
+SELECT '"abc'::jsonb; -- ERROR, quotes not closed
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"abc'::jsonb;
+ ^
+DETAIL: Token ""abc" is invalid.
+CONTEXT: JSON data, line 1: "abc
+SELECT '"abc
+def"'::jsonb; -- ERROR, unescaped newline in string constant
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"abc
+ ^
+DETAIL: Character with value 0x0a must be escaped.
+CONTEXT: JSON data, line 1: "abc
+SELECT '"\n\"\\"'::jsonb; -- OK, legal escapes
+ jsonb
+----------
+ "\n\"\\"
+(1 row)
+
+SELECT '"\v"'::jsonb; -- ERROR, not a valid JSON escape
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"\v"'::jsonb;
+ ^
+DETAIL: Escape sequence "\v" is invalid.
+CONTEXT: JSON data, line 1: "\v...
+-- see json_encoding test for input with unicode escapes
+-- Numbers.
+SELECT '1'::jsonb; -- OK
+ jsonb
+-------
+ 1
+(1 row)
+
+SELECT '0'::jsonb; -- OK
+ jsonb
+-------
+ 0
+(1 row)
+
+SELECT '01'::jsonb; -- ERROR, not valid according to JSON spec
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '01'::jsonb;
+ ^
+DETAIL: Token "01" is invalid.
+CONTEXT: JSON data, line 1: 01
+SELECT '0.1'::jsonb; -- OK
+ jsonb
+-------
+ 0.1
+(1 row)
+
+SELECT '9223372036854775808'::jsonb; -- OK, even though it's too large for int8
+ jsonb
+---------------------
+ 9223372036854775808
+(1 row)
+
+SELECT '1f2'::jsonb; -- ERROR
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '1f2'::jsonb;
+ ^
+DETAIL: Token "1f2" is invalid.
+CONTEXT: JSON data, line 1: 1f2
+SELECT '0.x1'::jsonb; -- ERROR
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '0.x1'::jsonb;
+ ^
+DETAIL: Token "0.x1" is invalid.
+CONTEXT: JSON data, line 1: 0.x1
+SELECT '1.3ex100'::jsonb; -- ERROR
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '1.3ex100'::jsonb;
+ ^
+DETAIL: Token "1.3ex100" is invalid.
+CONTEXT: JSON data, line 1: 1.3ex100
+-- Arrays.
+SELECT '[]'::jsonb; -- OK
+ jsonb
+-------
+ []
+(1 row)
+
+SELECT '[1,2]'::jsonb; -- OK
+ jsonb
+--------
+ [1, 2]
+(1 row)
+
+SELECT '[1,2,]'::jsonb; -- ERROR, trailing comma
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '[1,2,]'::jsonb;
+ ^
+DETAIL: Expected JSON value, but found "]".
+CONTEXT: JSON data, line 1: [1,2,]
+SELECT '[1,2'::jsonb; -- ERROR, no closing bracket
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '[1,2'::jsonb;
+ ^
+DETAIL: The input string ended unexpectedly.
+CONTEXT: JSON data, line 1: [1,2
+SELECT '[1,[2]'::jsonb; -- ERROR, no closing bracket
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '[1,[2]'::jsonb;
+ ^
+DETAIL: The input string ended unexpectedly.
+CONTEXT: JSON data, line 1: [1,[2]
+-- Objects.
+SELECT '{}'::jsonb; -- OK
+ jsonb
+-------
+ {}
+(1 row)
+
+SELECT '{"abc"}'::jsonb; -- ERROR, no value
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc"}'::jsonb;
+ ^
+DETAIL: Expected ":", but found "}".
+CONTEXT: JSON data, line 1: {"abc"}
+SELECT '{"abc":1}'::jsonb; -- OK
+ jsonb
+------------
+ {"abc": 1}
+(1 row)
+
+SELECT '{1:"abc"}'::jsonb; -- ERROR, keys must be strings
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{1:"abc"}'::jsonb;
+ ^
+DETAIL: Expected string or "}", but found "1".
+CONTEXT: JSON data, line 1: {1...
+SELECT '{"abc",1}'::jsonb; -- ERROR, wrong separator
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc",1}'::jsonb;
+ ^
+DETAIL: Expected ":", but found ",".
+CONTEXT: JSON data, line 1: {"abc",...
+SELECT '{"abc"=1}'::jsonb; -- ERROR, totally wrong separator
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc"=1}'::jsonb;
+ ^
+DETAIL: Token "=" is invalid.
+CONTEXT: JSON data, line 1: {"abc"=...
+SELECT '{"abc"::1}'::jsonb; -- ERROR, another wrong separator
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc"::1}'::jsonb;
+ ^
+DETAIL: Expected JSON value, but found ":".
+CONTEXT: JSON data, line 1: {"abc"::...
+SELECT '{"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}}'::jsonb; -- OK
+ jsonb
+--------------------------------------------------------------------
+ {"abc": 1, "def": 2, "ghi": [3, 4], "hij": {"klm": 5, "nop": [6]}}
+(1 row)
+
+SELECT '{"abc":1:2}'::jsonb; -- ERROR, colon in wrong spot
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc":1:2}'::jsonb;
+ ^
+DETAIL: Expected "," or "}", but found ":".
+CONTEXT: JSON data, line 1: {"abc":1:...
+SELECT '{"abc":1,3}'::jsonb; -- ERROR, no value
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc":1,3}'::jsonb;
+ ^
+DETAIL: Expected string, but found "3".
+CONTEXT: JSON data, line 1: {"abc":1,3...
+SELECT repeat('[', 10000)::jsonb;
+ERROR: stack depth limit exceeded
+HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
+SELECT repeat('{"a":', 10000)::jsonb;
+ERROR: stack depth limit exceeded
+HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
+-- Miscellaneous stuff.
+SELECT 'true'::jsonb; -- OK
+ jsonb
+-------
+ true
+(1 row)
+
+SELECT 'false'::jsonb; -- OK
+ jsonb
+-------
+ false
+(1 row)
+
+SELECT 'null'::jsonb; -- OK
+ jsonb
+-------
+ null
+(1 row)
+
+SELECT ' true '::jsonb; -- OK, even with extra whitespace
+ jsonb
+-------
+ true
+(1 row)
+
+SELECT 'true false'::jsonb; -- ERROR, too many values
+ERROR: invalid input syntax for type json
+LINE 1: SELECT 'true false'::jsonb;
+ ^
+DETAIL: Expected end of input, but found "false".
+CONTEXT: JSON data, line 1: true false
+SELECT 'true, false'::jsonb; -- ERROR, too many values
+ERROR: invalid input syntax for type json
+LINE 1: SELECT 'true, false'::jsonb;
+ ^
+DETAIL: Expected end of input, but found ",".
+CONTEXT: JSON data, line 1: true,...
+SELECT 'truf'::jsonb; -- ERROR, not a keyword
+ERROR: invalid input syntax for type json
+LINE 1: SELECT 'truf'::jsonb;
+ ^
+DETAIL: Token "truf" is invalid.
+CONTEXT: JSON data, line 1: truf
+SELECT 'trues'::jsonb; -- ERROR, not a keyword
+ERROR: invalid input syntax for type json
+LINE 1: SELECT 'trues'::jsonb;
+ ^
+DETAIL: Token "trues" is invalid.
+CONTEXT: JSON data, line 1: trues
+SELECT ''::jsonb; -- ERROR, no value
+ERROR: invalid input syntax for type json
+LINE 1: SELECT ''::jsonb;
+ ^
+DETAIL: The input string ended unexpectedly.
+CONTEXT: JSON data, line 1:
+SELECT ' '::jsonb; -- ERROR, no value
+ERROR: invalid input syntax for type json
+LINE 1: SELECT ' '::jsonb;
+ ^
+DETAIL: The input string ended unexpectedly.
+CONTEXT: JSON data, line 1:
+-- Multi-line JSON input to check ERROR reporting
+SELECT '{
+ "one": 1,
+ "two":"two",
+ "three":
+ true}'::jsonb; -- OK
+ jsonb
+-----------------------------------------
+ {"one": 1, "two": "two", "three": true}
+(1 row)
+
+SELECT '{
+ "one": 1,
+ "two":,"two", -- ERROR extraneous comma before field "two"
+ "three":
+ true}'::jsonb;
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{
+ ^
+DETAIL: Expected JSON value, but found ",".
+CONTEXT: JSON data, line 3: "two":,...
+SELECT '{
+ "one": 1,
+ "two":"two",
+ "averyveryveryveryveryveryveryveryveryverylongfieldname":}'::jsonb;
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{
+ ^
+DETAIL: Expected JSON value, but found "}".
+CONTEXT: JSON data, line 4: ...yveryveryveryveryveryveryveryverylongfieldname":}
+-- ERROR missing value for last field
+-- make sure jsonb is passed through json generators without being escaped
+SELECT array_to_json(ARRAY [jsonb '{"a":1}', jsonb '{"b":[2,3]}']);
+ array_to_json
+--------------------------
+ [{"a": 1},{"b": [2, 3]}]
+(1 row)
+
+BEGIN;
+COMMIT;
+-- jsonb extraction functions
+CREATE TEMP TABLE test_jsonb (
+ json_type text,
+ test_json jsonb
+);
+INSERT INTO test_jsonb VALUES
+('scalar','"a scalar"'),
+('array','["zero", "one","two",null,"four","five", [1,2,3],{"f1":9}]'),
+('object','{"field1":"val1","field2":"val2","field3":null, "field4": 4, "field5": [1,2,3], "field6": {"f1":9}}');
diff --git a/ydb/library/yql/tests/postgresql/cases/jsonb.sql b/ydb/library/yql/tests/postgresql/cases/jsonb.sql
new file mode 100644
index 0000000000..6280eb2ee5
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/jsonb.sql
@@ -0,0 +1,78 @@
+-- Strings.
+SELECT '""'::jsonb; -- OK.
+SELECT $$''$$::jsonb; -- ERROR, single quotes are not allowed
+SELECT '"abc"'::jsonb; -- OK
+SELECT '"abc'::jsonb; -- ERROR, quotes not closed
+SELECT '"abc
+def"'::jsonb; -- ERROR, unescaped newline in string constant
+SELECT '"\n\"\\"'::jsonb; -- OK, legal escapes
+SELECT '"\v"'::jsonb; -- ERROR, not a valid JSON escape
+-- see json_encoding test for input with unicode escapes
+-- Numbers.
+SELECT '1'::jsonb; -- OK
+SELECT '0'::jsonb; -- OK
+SELECT '01'::jsonb; -- ERROR, not valid according to JSON spec
+SELECT '0.1'::jsonb; -- OK
+SELECT '9223372036854775808'::jsonb; -- OK, even though it's too large for int8
+SELECT '1f2'::jsonb; -- ERROR
+SELECT '0.x1'::jsonb; -- ERROR
+SELECT '1.3ex100'::jsonb; -- ERROR
+-- Arrays.
+SELECT '[]'::jsonb; -- OK
+SELECT '[1,2]'::jsonb; -- OK
+SELECT '[1,2,]'::jsonb; -- ERROR, trailing comma
+SELECT '[1,2'::jsonb; -- ERROR, no closing bracket
+SELECT '[1,[2]'::jsonb; -- ERROR, no closing bracket
+-- Objects.
+SELECT '{}'::jsonb; -- OK
+SELECT '{"abc"}'::jsonb; -- ERROR, no value
+SELECT '{"abc":1}'::jsonb; -- OK
+SELECT '{1:"abc"}'::jsonb; -- ERROR, keys must be strings
+SELECT '{"abc",1}'::jsonb; -- ERROR, wrong separator
+SELECT '{"abc"=1}'::jsonb; -- ERROR, totally wrong separator
+SELECT '{"abc"::1}'::jsonb; -- ERROR, another wrong separator
+SELECT '{"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}}'::jsonb; -- OK
+SELECT '{"abc":1:2}'::jsonb; -- ERROR, colon in wrong spot
+SELECT '{"abc":1,3}'::jsonb; -- ERROR, no value
+SELECT repeat('[', 10000)::jsonb;
+SELECT repeat('{"a":', 10000)::jsonb;
+-- Miscellaneous stuff.
+SELECT 'true'::jsonb; -- OK
+SELECT 'false'::jsonb; -- OK
+SELECT 'null'::jsonb; -- OK
+SELECT ' true '::jsonb; -- OK, even with extra whitespace
+SELECT 'true false'::jsonb; -- ERROR, too many values
+SELECT 'true, false'::jsonb; -- ERROR, too many values
+SELECT 'truf'::jsonb; -- ERROR, not a keyword
+SELECT 'trues'::jsonb; -- ERROR, not a keyword
+SELECT ''::jsonb; -- ERROR, no value
+SELECT ' '::jsonb; -- ERROR, no value
+-- Multi-line JSON input to check ERROR reporting
+SELECT '{
+ "one": 1,
+ "two":"two",
+ "three":
+ true}'::jsonb; -- OK
+SELECT '{
+ "one": 1,
+ "two":,"two", -- ERROR extraneous comma before field "two"
+ "three":
+ true}'::jsonb;
+SELECT '{
+ "one": 1,
+ "two":"two",
+ "averyveryveryveryveryveryveryveryveryverylongfieldname":}'::jsonb;
+-- ERROR missing value for last field
+-- make sure jsonb is passed through json generators without being escaped
+SELECT array_to_json(ARRAY [jsonb '{"a":1}', jsonb '{"b":[2,3]}']);
+BEGIN;
+COMMIT;
+-- jsonb extraction functions
+CREATE TEMP TABLE test_jsonb (
+ json_type text,
+ test_json jsonb
+);
+INSERT INTO test_jsonb VALUES
+('scalar','"a scalar"'),
+('array','["zero", "one","two",null,"four","five", [1,2,3],{"f1":9}]'),
+('object','{"field1":"val1","field2":"val2","field3":null, "field4": 4, "field5": [1,2,3], "field6": {"f1":9}}');
diff --git a/ydb/library/yql/tests/postgresql/cases/jsonb_jsonpath.out b/ydb/library/yql/tests/postgresql/cases/jsonb_jsonpath.out
new file mode 100644
index 0000000000..cd22b75a63
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/jsonb_jsonpath.out
@@ -0,0 +1,30 @@
+select jsonb '{"a": 12}' @? '$';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"a": 12}' @? '1';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"a": 12}' @? '$.a.b';
+ ?column?
+----------
+ f
+(1 row)
+
+select jsonb '{"a": 12}' @? '$.b';
+ ?column?
+----------
+ f
+(1 row)
+
+select jsonb '{"a": 12}' @? '$.a + 2';
+ ?column?
+----------
+ t
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/cases/jsonb_jsonpath.sql b/ydb/library/yql/tests/postgresql/cases/jsonb_jsonpath.sql
new file mode 100644
index 0000000000..16d4ede54e
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/jsonb_jsonpath.sql
@@ -0,0 +1,5 @@
+select jsonb '{"a": 12}' @? '$';
+select jsonb '{"a": 12}' @? '1';
+select jsonb '{"a": 12}' @? '$.a.b';
+select jsonb '{"a": 12}' @? '$.b';
+select jsonb '{"a": 12}' @? '$.a + 2';
diff --git a/ydb/library/yql/tests/postgresql/cases/jsonpath.out b/ydb/library/yql/tests/postgresql/cases/jsonpath.out
new file mode 100644
index 0000000000..bdbff16a19
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/jsonpath.out
@@ -0,0 +1,896 @@
+--jsonpath io
+select ''::jsonpath;
+ERROR: invalid input syntax for type jsonpath: ""
+LINE 1: select ''::jsonpath;
+ ^
+select '$'::jsonpath;
+ jsonpath
+----------
+ $
+(1 row)
+
+select 'strict $'::jsonpath;
+ jsonpath
+----------
+ strict $
+(1 row)
+
+select 'lax $'::jsonpath;
+ jsonpath
+----------
+ $
+(1 row)
+
+select '$.a'::jsonpath;
+ jsonpath
+----------
+ $."a"
+(1 row)
+
+select '$.a.v'::jsonpath;
+ jsonpath
+-----------
+ $."a"."v"
+(1 row)
+
+select '$.a.*'::jsonpath;
+ jsonpath
+----------
+ $."a".*
+(1 row)
+
+select '$.*[*]'::jsonpath;
+ jsonpath
+----------
+ $.*[*]
+(1 row)
+
+select '$.a[*]'::jsonpath;
+ jsonpath
+----------
+ $."a"[*]
+(1 row)
+
+select '$.a[*][*]'::jsonpath;
+ jsonpath
+-------------
+ $."a"[*][*]
+(1 row)
+
+select '$[*]'::jsonpath;
+ jsonpath
+----------
+ $[*]
+(1 row)
+
+select '$[0]'::jsonpath;
+ jsonpath
+----------
+ $[0]
+(1 row)
+
+select '$[*][0]'::jsonpath;
+ jsonpath
+----------
+ $[*][0]
+(1 row)
+
+select '$[*].a'::jsonpath;
+ jsonpath
+----------
+ $[*]."a"
+(1 row)
+
+select '$[*][0].a.b'::jsonpath;
+ jsonpath
+-----------------
+ $[*][0]."a"."b"
+(1 row)
+
+select '$.a.**.b'::jsonpath;
+ jsonpath
+--------------
+ $."a".**."b"
+(1 row)
+
+select '$.a.**{2}.b'::jsonpath;
+ jsonpath
+-----------------
+ $."a".**{2}."b"
+(1 row)
+
+select '$.a.**{2 to 2}.b'::jsonpath;
+ jsonpath
+-----------------
+ $."a".**{2}."b"
+(1 row)
+
+select '$.a.**{2 to 5}.b'::jsonpath;
+ jsonpath
+----------------------
+ $."a".**{2 to 5}."b"
+(1 row)
+
+select '$.a.**{0 to 5}.b'::jsonpath;
+ jsonpath
+----------------------
+ $."a".**{0 to 5}."b"
+(1 row)
+
+select '$.a.**{5 to last}.b'::jsonpath;
+ jsonpath
+-------------------------
+ $."a".**{5 to last}."b"
+(1 row)
+
+select '$.a.**{last}.b'::jsonpath;
+ jsonpath
+--------------------
+ $."a".**{last}."b"
+(1 row)
+
+select '$.a.**{last to 5}.b'::jsonpath;
+ jsonpath
+-------------------------
+ $."a".**{last to 5}."b"
+(1 row)
+
+select '$+1'::jsonpath;
+ jsonpath
+----------
+ ($ + 1)
+(1 row)
+
+select '$-1'::jsonpath;
+ jsonpath
+----------
+ ($ - 1)
+(1 row)
+
+select '$--+1'::jsonpath;
+ jsonpath
+----------
+ ($ - -1)
+(1 row)
+
+select '$.a/+-1'::jsonpath;
+ jsonpath
+--------------
+ ($."a" / -1)
+(1 row)
+
+select '1 * 2 + 4 % -3 != false'::jsonpath;
+ jsonpath
+---------------------------
+ (1 * 2 + 4 % -3 != false)
+(1 row)
+
+select '"\b\f\r\n\t\v\"\''\\"'::jsonpath;
+ jsonpath
+-------------------------
+ "\b\f\r\n\t\u000b\"'\\"
+(1 row)
+
+select '"\x50\u0067\u{53}\u{051}\u{00004C}"'::jsonpath;
+ jsonpath
+----------
+ "PgSQL"
+(1 row)
+
+select '$.foo\x50\u0067\u{53}\u{051}\u{00004C}\t\"bar'::jsonpath;
+ jsonpath
+---------------------
+ $."fooPgSQL\t\"bar"
+(1 row)
+
+select '"\z"'::jsonpath; -- unrecognized escape is just the literal char
+ jsonpath
+----------
+ "z"
+(1 row)
+
+select '$.g ? ($.a == 1)'::jsonpath;
+ jsonpath
+--------------------
+ $."g"?($."a" == 1)
+(1 row)
+
+select '$.g ? (@ == 1)'::jsonpath;
+ jsonpath
+----------------
+ $."g"?(@ == 1)
+(1 row)
+
+select '$.g ? (@.a == 1)'::jsonpath;
+ jsonpath
+--------------------
+ $."g"?(@."a" == 1)
+(1 row)
+
+select '$.g ? (@.a == 1 || @.a == 4)'::jsonpath;
+ jsonpath
+----------------------------------
+ $."g"?(@."a" == 1 || @."a" == 4)
+(1 row)
+
+select '$.g ? (@.a == 1 && @.a == 4)'::jsonpath;
+ jsonpath
+----------------------------------
+ $."g"?(@."a" == 1 && @."a" == 4)
+(1 row)
+
+select '$.g ? (@.a == 1 || @.a == 4 && @.b == 7)'::jsonpath;
+ jsonpath
+------------------------------------------------
+ $."g"?(@."a" == 1 || @."a" == 4 && @."b" == 7)
+(1 row)
+
+select '$.g ? (@.a == 1 || !(@.a == 4) && @.b == 7)'::jsonpath;
+ jsonpath
+---------------------------------------------------
+ $."g"?(@."a" == 1 || !(@."a" == 4) && @."b" == 7)
+(1 row)
+
+select '$.g ? (@.a == 1 || !(@.x >= 123 || @.a == 4) && @.b == 7)'::jsonpath;
+ jsonpath
+-------------------------------------------------------------------
+ $."g"?(@."a" == 1 || !(@."x" >= 123 || @."a" == 4) && @."b" == 7)
+(1 row)
+
+select '$.g ? (@.x >= @[*]?(@.a > "abc"))'::jsonpath;
+ jsonpath
+---------------------------------------
+ $."g"?(@."x" >= @[*]?(@."a" > "abc"))
+(1 row)
+
+select '$.g ? ((@.x >= 123 || @.a == 4) is unknown)'::jsonpath;
+ jsonpath
+-------------------------------------------------
+ $."g"?((@."x" >= 123 || @."a" == 4) is unknown)
+(1 row)
+
+select '$.g ? (exists (@.x))'::jsonpath;
+ jsonpath
+------------------------
+ $."g"?(exists (@."x"))
+(1 row)
+
+select '$.g ? (exists (@.x ? (@ == 14)))'::jsonpath;
+ jsonpath
+----------------------------------
+ $."g"?(exists (@."x"?(@ == 14)))
+(1 row)
+
+select '$.g ? ((@.x >= 123 || @.a == 4) && exists (@.x ? (@ == 14)))'::jsonpath;
+ jsonpath
+------------------------------------------------------------------
+ $."g"?((@."x" >= 123 || @."a" == 4) && exists (@."x"?(@ == 14)))
+(1 row)
+
+select '$.g ? (+@.x >= +-(+@.a + 2))'::jsonpath;
+ jsonpath
+------------------------------------
+ $."g"?(+@."x" >= +(-(+@."a" + 2)))
+(1 row)
+
+select '$a'::jsonpath;
+ jsonpath
+----------
+ $"a"
+(1 row)
+
+select '$a.b'::jsonpath;
+ jsonpath
+----------
+ $"a"."b"
+(1 row)
+
+select '$a[*]'::jsonpath;
+ jsonpath
+----------
+ $"a"[*]
+(1 row)
+
+select '$.g ? (@.zip == $zip)'::jsonpath;
+ jsonpath
+---------------------------
+ $."g"?(@."zip" == $"zip")
+(1 row)
+
+select '$.a[1,2, 3 to 16]'::jsonpath;
+ jsonpath
+--------------------
+ $."a"[1,2,3 to 16]
+(1 row)
+
+select '$.a[$a + 1, ($b[*]) to -($[0] * 2)]'::jsonpath;
+ jsonpath
+----------------------------------------
+ $."a"[$"a" + 1,$"b"[*] to -($[0] * 2)]
+(1 row)
+
+select '$.a[$.a.size() - 3]'::jsonpath;
+ jsonpath
+-------------------------
+ $."a"[$."a".size() - 3]
+(1 row)
+
+select 'last'::jsonpath;
+ERROR: LAST is allowed only in array subscripts
+LINE 1: select 'last'::jsonpath;
+ ^
+select '"last"'::jsonpath;
+ jsonpath
+----------
+ "last"
+(1 row)
+
+select '$.last'::jsonpath;
+ jsonpath
+----------
+ $."last"
+(1 row)
+
+select '$ ? (last > 0)'::jsonpath;
+ERROR: LAST is allowed only in array subscripts
+LINE 1: select '$ ? (last > 0)'::jsonpath;
+ ^
+select '$[last]'::jsonpath;
+ jsonpath
+----------
+ $[last]
+(1 row)
+
+select '$[$[0] ? (last > 0)]'::jsonpath;
+ jsonpath
+--------------------
+ $[$[0]?(last > 0)]
+(1 row)
+
+select 'null.type()'::jsonpath;
+ jsonpath
+-------------
+ null.type()
+(1 row)
+
+select '1.type()'::jsonpath;
+ jsonpath
+----------
+ 1.type()
+(1 row)
+
+select '(1).type()'::jsonpath;
+ jsonpath
+----------
+ 1.type()
+(1 row)
+
+select '1.2.type()'::jsonpath;
+ jsonpath
+------------
+ 1.2.type()
+(1 row)
+
+select '"aaa".type()'::jsonpath;
+ jsonpath
+--------------
+ "aaa".type()
+(1 row)
+
+select 'true.type()'::jsonpath;
+ jsonpath
+-------------
+ true.type()
+(1 row)
+
+select '$.double().floor().ceiling().abs()'::jsonpath;
+ jsonpath
+------------------------------------
+ $.double().floor().ceiling().abs()
+(1 row)
+
+select '$.keyvalue().key'::jsonpath;
+ jsonpath
+--------------------
+ $.keyvalue()."key"
+(1 row)
+
+select '$.datetime()'::jsonpath;
+ jsonpath
+--------------
+ $.datetime()
+(1 row)
+
+select '$.datetime("datetime template")'::jsonpath;
+ jsonpath
+---------------------------------
+ $.datetime("datetime template")
+(1 row)
+
+select '$ ? (@ starts with "abc")'::jsonpath;
+ jsonpath
+-------------------------
+ $?(@ starts with "abc")
+(1 row)
+
+select '$ ? (@ starts with $var)'::jsonpath;
+ jsonpath
+--------------------------
+ $?(@ starts with $"var")
+(1 row)
+
+select '$ ? (@ like_regex "(invalid pattern")'::jsonpath;
+ERROR: invalid regular expression: parentheses () not balanced
+LINE 1: select '$ ? (@ like_regex "(invalid pattern")'::jsonpath;
+ ^
+select '$ ? (@ like_regex "pattern")'::jsonpath;
+ jsonpath
+----------------------------
+ $?(@ like_regex "pattern")
+(1 row)
+
+select '$ ? (@ like_regex "pattern" flag "")'::jsonpath;
+ jsonpath
+----------------------------
+ $?(@ like_regex "pattern")
+(1 row)
+
+select '$ ? (@ like_regex "pattern" flag "i")'::jsonpath;
+ jsonpath
+-------------------------------------
+ $?(@ like_regex "pattern" flag "i")
+(1 row)
+
+select '$ ? (@ like_regex "pattern" flag "is")'::jsonpath;
+ jsonpath
+--------------------------------------
+ $?(@ like_regex "pattern" flag "is")
+(1 row)
+
+select '$ ? (@ like_regex "pattern" flag "isim")'::jsonpath;
+ jsonpath
+---------------------------------------
+ $?(@ like_regex "pattern" flag "ism")
+(1 row)
+
+select '$ ? (@ like_regex "pattern" flag "xsms")'::jsonpath;
+ERROR: XQuery "x" flag (expanded regular expressions) is not implemented
+LINE 1: select '$ ? (@ like_regex "pattern" flag "xsms")'::jsonpath;
+ ^
+select '$ ? (@ like_regex "pattern" flag "q")'::jsonpath;
+ jsonpath
+-------------------------------------
+ $?(@ like_regex "pattern" flag "q")
+(1 row)
+
+select '$ ? (@ like_regex "pattern" flag "iq")'::jsonpath;
+ jsonpath
+--------------------------------------
+ $?(@ like_regex "pattern" flag "iq")
+(1 row)
+
+select '$ ? (@ like_regex "pattern" flag "smixq")'::jsonpath;
+ jsonpath
+-----------------------------------------
+ $?(@ like_regex "pattern" flag "ismxq")
+(1 row)
+
+select '$ ? (@ like_regex "pattern" flag "a")'::jsonpath;
+ERROR: invalid input syntax for type jsonpath
+LINE 1: select '$ ? (@ like_regex "pattern" flag "a")'::jsonpath;
+ ^
+DETAIL: unrecognized flag character "a" in LIKE_REGEX predicate
+select '$ < 1'::jsonpath;
+ jsonpath
+----------
+ ($ < 1)
+(1 row)
+
+select '($ < 1) || $.a.b <= $x'::jsonpath;
+ jsonpath
+------------------------------
+ ($ < 1 || $."a"."b" <= $"x")
+(1 row)
+
+select '@ + 1'::jsonpath;
+ERROR: @ is not allowed in root expressions
+LINE 1: select '@ + 1'::jsonpath;
+ ^
+select '($).a.b'::jsonpath;
+ jsonpath
+-----------
+ $."a"."b"
+(1 row)
+
+select '($.a.b).c.d'::jsonpath;
+ jsonpath
+-------------------
+ $."a"."b"."c"."d"
+(1 row)
+
+select '($.a.b + -$.x.y).c.d'::jsonpath;
+ jsonpath
+----------------------------------
+ ($."a"."b" + -$."x"."y")."c"."d"
+(1 row)
+
+select '(-+$.a.b).c.d'::jsonpath;
+ jsonpath
+-------------------------
+ (-(+$."a"."b"))."c"."d"
+(1 row)
+
+select '1 + ($.a.b + 2).c.d'::jsonpath;
+ jsonpath
+-------------------------------
+ (1 + ($."a"."b" + 2)."c"."d")
+(1 row)
+
+select '1 + ($.a.b > 2).c.d'::jsonpath;
+ jsonpath
+-------------------------------
+ (1 + ($."a"."b" > 2)."c"."d")
+(1 row)
+
+select '($)'::jsonpath;
+ jsonpath
+----------
+ $
+(1 row)
+
+select '(($))'::jsonpath;
+ jsonpath
+----------
+ $
+(1 row)
+
+select '((($ + 1)).a + ((2)).b ? ((((@ > 1)) || (exists(@.c)))))'::jsonpath;
+ jsonpath
+-------------------------------------------------
+ (($ + 1)."a" + 2."b"?(@ > 1 || exists (@."c")))
+(1 row)
+
+select '$ ? (@.a < 1)'::jsonpath;
+ jsonpath
+---------------
+ $?(@."a" < 1)
+(1 row)
+
+select '$ ? (@.a < -1)'::jsonpath;
+ jsonpath
+----------------
+ $?(@."a" < -1)
+(1 row)
+
+select '$ ? (@.a < +1)'::jsonpath;
+ jsonpath
+---------------
+ $?(@."a" < 1)
+(1 row)
+
+select '$ ? (@.a < 0.1)'::jsonpath;
+ jsonpath
+-----------------
+ $?(@."a" < 0.1)
+(1 row)
+
+select '$ ? (@.a < -0.1)'::jsonpath;
+ jsonpath
+------------------
+ $?(@."a" < -0.1)
+(1 row)
+
+select '$ ? (@.a < +0.1)'::jsonpath;
+ jsonpath
+-----------------
+ $?(@."a" < 0.1)
+(1 row)
+
+select '$ ? (@.a < 10.1)'::jsonpath;
+ jsonpath
+------------------
+ $?(@."a" < 10.1)
+(1 row)
+
+select '$ ? (@.a < -10.1)'::jsonpath;
+ jsonpath
+-------------------
+ $?(@."a" < -10.1)
+(1 row)
+
+select '$ ? (@.a < +10.1)'::jsonpath;
+ jsonpath
+------------------
+ $?(@."a" < 10.1)
+(1 row)
+
+select '$ ? (@.a < 1e1)'::jsonpath;
+ jsonpath
+----------------
+ $?(@."a" < 10)
+(1 row)
+
+select '$ ? (@.a < -1e1)'::jsonpath;
+ jsonpath
+-----------------
+ $?(@."a" < -10)
+(1 row)
+
+select '$ ? (@.a < +1e1)'::jsonpath;
+ jsonpath
+----------------
+ $?(@."a" < 10)
+(1 row)
+
+select '$ ? (@.a < 0.1e1)'::jsonpath;
+ jsonpath
+---------------
+ $?(@."a" < 1)
+(1 row)
+
+select '$ ? (@.a < -0.1e1)'::jsonpath;
+ jsonpath
+----------------
+ $?(@."a" < -1)
+(1 row)
+
+select '$ ? (@.a < +0.1e1)'::jsonpath;
+ jsonpath
+---------------
+ $?(@."a" < 1)
+(1 row)
+
+select '$ ? (@.a < 10.1e1)'::jsonpath;
+ jsonpath
+-----------------
+ $?(@."a" < 101)
+(1 row)
+
+select '$ ? (@.a < -10.1e1)'::jsonpath;
+ jsonpath
+------------------
+ $?(@."a" < -101)
+(1 row)
+
+select '$ ? (@.a < +10.1e1)'::jsonpath;
+ jsonpath
+-----------------
+ $?(@."a" < 101)
+(1 row)
+
+select '$ ? (@.a < 1e-1)'::jsonpath;
+ jsonpath
+-----------------
+ $?(@."a" < 0.1)
+(1 row)
+
+select '$ ? (@.a < -1e-1)'::jsonpath;
+ jsonpath
+------------------
+ $?(@."a" < -0.1)
+(1 row)
+
+select '$ ? (@.a < +1e-1)'::jsonpath;
+ jsonpath
+-----------------
+ $?(@."a" < 0.1)
+(1 row)
+
+select '$ ? (@.a < 0.1e-1)'::jsonpath;
+ jsonpath
+------------------
+ $?(@."a" < 0.01)
+(1 row)
+
+select '$ ? (@.a < -0.1e-1)'::jsonpath;
+ jsonpath
+-------------------
+ $?(@."a" < -0.01)
+(1 row)
+
+select '$ ? (@.a < +0.1e-1)'::jsonpath;
+ jsonpath
+------------------
+ $?(@."a" < 0.01)
+(1 row)
+
+select '$ ? (@.a < 10.1e-1)'::jsonpath;
+ jsonpath
+------------------
+ $?(@."a" < 1.01)
+(1 row)
+
+select '$ ? (@.a < -10.1e-1)'::jsonpath;
+ jsonpath
+-------------------
+ $?(@."a" < -1.01)
+(1 row)
+
+select '$ ? (@.a < +10.1e-1)'::jsonpath;
+ jsonpath
+------------------
+ $?(@."a" < 1.01)
+(1 row)
+
+select '$ ? (@.a < 1e+1)'::jsonpath;
+ jsonpath
+----------------
+ $?(@."a" < 10)
+(1 row)
+
+select '$ ? (@.a < -1e+1)'::jsonpath;
+ jsonpath
+-----------------
+ $?(@."a" < -10)
+(1 row)
+
+select '$ ? (@.a < +1e+1)'::jsonpath;
+ jsonpath
+----------------
+ $?(@."a" < 10)
+(1 row)
+
+select '$ ? (@.a < 0.1e+1)'::jsonpath;
+ jsonpath
+---------------
+ $?(@."a" < 1)
+(1 row)
+
+select '$ ? (@.a < -0.1e+1)'::jsonpath;
+ jsonpath
+----------------
+ $?(@."a" < -1)
+(1 row)
+
+select '$ ? (@.a < +0.1e+1)'::jsonpath;
+ jsonpath
+---------------
+ $?(@."a" < 1)
+(1 row)
+
+select '$ ? (@.a < 10.1e+1)'::jsonpath;
+ jsonpath
+-----------------
+ $?(@."a" < 101)
+(1 row)
+
+select '$ ? (@.a < -10.1e+1)'::jsonpath;
+ jsonpath
+------------------
+ $?(@."a" < -101)
+(1 row)
+
+select '$ ? (@.a < +10.1e+1)'::jsonpath;
+ jsonpath
+-----------------
+ $?(@."a" < 101)
+(1 row)
+
+select '0'::jsonpath;
+ jsonpath
+----------
+ 0
+(1 row)
+
+select '0.0'::jsonpath;
+ jsonpath
+----------
+ 0.0
+(1 row)
+
+select '0.000'::jsonpath;
+ jsonpath
+----------
+ 0.000
+(1 row)
+
+select '0.000e1'::jsonpath;
+ jsonpath
+----------
+ 0.00
+(1 row)
+
+select '0.000e2'::jsonpath;
+ jsonpath
+----------
+ 0.0
+(1 row)
+
+select '0.000e3'::jsonpath;
+ jsonpath
+----------
+ 0
+(1 row)
+
+select '0.0010'::jsonpath;
+ jsonpath
+----------
+ 0.0010
+(1 row)
+
+select '0.0010e-1'::jsonpath;
+ jsonpath
+----------
+ 0.00010
+(1 row)
+
+select '0.0010e+1'::jsonpath;
+ jsonpath
+----------
+ 0.010
+(1 row)
+
+select '0.0010e+2'::jsonpath;
+ jsonpath
+----------
+ 0.10
+(1 row)
+
+select '1e'::jsonpath;
+ERROR: invalid floating point number at or near "1e" of jsonpath input
+LINE 1: select '1e'::jsonpath;
+ ^
+select '1.e'::jsonpath;
+ jsonpath
+----------
+ 1."e"
+(1 row)
+
+select '1.2e'::jsonpath;
+ERROR: invalid floating point number at or near "1.2e" of jsonpath input
+LINE 1: select '1.2e'::jsonpath;
+ ^
+select '1.2.e'::jsonpath;
+ jsonpath
+----------
+ 1.2."e"
+(1 row)
+
+select '(1.2).e'::jsonpath;
+ jsonpath
+----------
+ 1.2."e"
+(1 row)
+
+select '1e3'::jsonpath;
+ jsonpath
+----------
+ 1000
+(1 row)
+
+select '1.e3'::jsonpath;
+ jsonpath
+----------
+ 1."e3"
+(1 row)
+
+select '1.e3.e'::jsonpath;
+ jsonpath
+------------
+ 1."e3"."e"
+(1 row)
+
+select '1.e3.e4'::jsonpath;
+ jsonpath
+-------------
+ 1."e3"."e4"
+(1 row)
+
+select '1.2e3'::jsonpath;
+ jsonpath
+----------
+ 1200
+(1 row)
+
+select '1.2.e3'::jsonpath;
+ jsonpath
+----------
+ 1.2."e3"
+(1 row)
+
+select '(1.2).e3'::jsonpath;
+ jsonpath
+----------
+ 1.2."e3"
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/cases/jsonpath.sql b/ydb/library/yql/tests/postgresql/cases/jsonpath.sql
new file mode 100644
index 0000000000..9c3a92a2fb
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/jsonpath.sql
@@ -0,0 +1,153 @@
+--jsonpath io
+select ''::jsonpath;
+select '$'::jsonpath;
+select 'strict $'::jsonpath;
+select 'lax $'::jsonpath;
+select '$.a'::jsonpath;
+select '$.a.v'::jsonpath;
+select '$.a.*'::jsonpath;
+select '$.*[*]'::jsonpath;
+select '$.a[*]'::jsonpath;
+select '$.a[*][*]'::jsonpath;
+select '$[*]'::jsonpath;
+select '$[0]'::jsonpath;
+select '$[*][0]'::jsonpath;
+select '$[*].a'::jsonpath;
+select '$[*][0].a.b'::jsonpath;
+select '$.a.**.b'::jsonpath;
+select '$.a.**{2}.b'::jsonpath;
+select '$.a.**{2 to 2}.b'::jsonpath;
+select '$.a.**{2 to 5}.b'::jsonpath;
+select '$.a.**{0 to 5}.b'::jsonpath;
+select '$.a.**{5 to last}.b'::jsonpath;
+select '$.a.**{last}.b'::jsonpath;
+select '$.a.**{last to 5}.b'::jsonpath;
+select '$+1'::jsonpath;
+select '$-1'::jsonpath;
+select '$--+1'::jsonpath;
+select '$.a/+-1'::jsonpath;
+select '1 * 2 + 4 % -3 != false'::jsonpath;
+select '"\b\f\r\n\t\v\"\''\\"'::jsonpath;
+select '"\x50\u0067\u{53}\u{051}\u{00004C}"'::jsonpath;
+select '$.foo\x50\u0067\u{53}\u{051}\u{00004C}\t\"bar'::jsonpath;
+select '"\z"'::jsonpath; -- unrecognized escape is just the literal char
+select '$.g ? ($.a == 1)'::jsonpath;
+select '$.g ? (@ == 1)'::jsonpath;
+select '$.g ? (@.a == 1)'::jsonpath;
+select '$.g ? (@.a == 1 || @.a == 4)'::jsonpath;
+select '$.g ? (@.a == 1 && @.a == 4)'::jsonpath;
+select '$.g ? (@.a == 1 || @.a == 4 && @.b == 7)'::jsonpath;
+select '$.g ? (@.a == 1 || !(@.a == 4) && @.b == 7)'::jsonpath;
+select '$.g ? (@.a == 1 || !(@.x >= 123 || @.a == 4) && @.b == 7)'::jsonpath;
+select '$.g ? (@.x >= @[*]?(@.a > "abc"))'::jsonpath;
+select '$.g ? ((@.x >= 123 || @.a == 4) is unknown)'::jsonpath;
+select '$.g ? (exists (@.x))'::jsonpath;
+select '$.g ? (exists (@.x ? (@ == 14)))'::jsonpath;
+select '$.g ? ((@.x >= 123 || @.a == 4) && exists (@.x ? (@ == 14)))'::jsonpath;
+select '$.g ? (+@.x >= +-(+@.a + 2))'::jsonpath;
+select '$a'::jsonpath;
+select '$a.b'::jsonpath;
+select '$a[*]'::jsonpath;
+select '$.g ? (@.zip == $zip)'::jsonpath;
+select '$.a[1,2, 3 to 16]'::jsonpath;
+select '$.a[$a + 1, ($b[*]) to -($[0] * 2)]'::jsonpath;
+select '$.a[$.a.size() - 3]'::jsonpath;
+select 'last'::jsonpath;
+select '"last"'::jsonpath;
+select '$.last'::jsonpath;
+select '$ ? (last > 0)'::jsonpath;
+select '$[last]'::jsonpath;
+select '$[$[0] ? (last > 0)]'::jsonpath;
+select 'null.type()'::jsonpath;
+select '1.type()'::jsonpath;
+select '(1).type()'::jsonpath;
+select '1.2.type()'::jsonpath;
+select '"aaa".type()'::jsonpath;
+select 'true.type()'::jsonpath;
+select '$.double().floor().ceiling().abs()'::jsonpath;
+select '$.keyvalue().key'::jsonpath;
+select '$.datetime()'::jsonpath;
+select '$.datetime("datetime template")'::jsonpath;
+select '$ ? (@ starts with "abc")'::jsonpath;
+select '$ ? (@ starts with $var)'::jsonpath;
+select '$ ? (@ like_regex "(invalid pattern")'::jsonpath;
+select '$ ? (@ like_regex "pattern")'::jsonpath;
+select '$ ? (@ like_regex "pattern" flag "")'::jsonpath;
+select '$ ? (@ like_regex "pattern" flag "i")'::jsonpath;
+select '$ ? (@ like_regex "pattern" flag "is")'::jsonpath;
+select '$ ? (@ like_regex "pattern" flag "isim")'::jsonpath;
+select '$ ? (@ like_regex "pattern" flag "xsms")'::jsonpath;
+select '$ ? (@ like_regex "pattern" flag "q")'::jsonpath;
+select '$ ? (@ like_regex "pattern" flag "iq")'::jsonpath;
+select '$ ? (@ like_regex "pattern" flag "smixq")'::jsonpath;
+select '$ ? (@ like_regex "pattern" flag "a")'::jsonpath;
+select '$ < 1'::jsonpath;
+select '($ < 1) || $.a.b <= $x'::jsonpath;
+select '@ + 1'::jsonpath;
+select '($).a.b'::jsonpath;
+select '($.a.b).c.d'::jsonpath;
+select '($.a.b + -$.x.y).c.d'::jsonpath;
+select '(-+$.a.b).c.d'::jsonpath;
+select '1 + ($.a.b + 2).c.d'::jsonpath;
+select '1 + ($.a.b > 2).c.d'::jsonpath;
+select '($)'::jsonpath;
+select '(($))'::jsonpath;
+select '((($ + 1)).a + ((2)).b ? ((((@ > 1)) || (exists(@.c)))))'::jsonpath;
+select '$ ? (@.a < 1)'::jsonpath;
+select '$ ? (@.a < -1)'::jsonpath;
+select '$ ? (@.a < +1)'::jsonpath;
+select '$ ? (@.a < 0.1)'::jsonpath;
+select '$ ? (@.a < -0.1)'::jsonpath;
+select '$ ? (@.a < +0.1)'::jsonpath;
+select '$ ? (@.a < 10.1)'::jsonpath;
+select '$ ? (@.a < -10.1)'::jsonpath;
+select '$ ? (@.a < +10.1)'::jsonpath;
+select '$ ? (@.a < 1e1)'::jsonpath;
+select '$ ? (@.a < -1e1)'::jsonpath;
+select '$ ? (@.a < +1e1)'::jsonpath;
+select '$ ? (@.a < 0.1e1)'::jsonpath;
+select '$ ? (@.a < -0.1e1)'::jsonpath;
+select '$ ? (@.a < +0.1e1)'::jsonpath;
+select '$ ? (@.a < 10.1e1)'::jsonpath;
+select '$ ? (@.a < -10.1e1)'::jsonpath;
+select '$ ? (@.a < +10.1e1)'::jsonpath;
+select '$ ? (@.a < 1e-1)'::jsonpath;
+select '$ ? (@.a < -1e-1)'::jsonpath;
+select '$ ? (@.a < +1e-1)'::jsonpath;
+select '$ ? (@.a < 0.1e-1)'::jsonpath;
+select '$ ? (@.a < -0.1e-1)'::jsonpath;
+select '$ ? (@.a < +0.1e-1)'::jsonpath;
+select '$ ? (@.a < 10.1e-1)'::jsonpath;
+select '$ ? (@.a < -10.1e-1)'::jsonpath;
+select '$ ? (@.a < +10.1e-1)'::jsonpath;
+select '$ ? (@.a < 1e+1)'::jsonpath;
+select '$ ? (@.a < -1e+1)'::jsonpath;
+select '$ ? (@.a < +1e+1)'::jsonpath;
+select '$ ? (@.a < 0.1e+1)'::jsonpath;
+select '$ ? (@.a < -0.1e+1)'::jsonpath;
+select '$ ? (@.a < +0.1e+1)'::jsonpath;
+select '$ ? (@.a < 10.1e+1)'::jsonpath;
+select '$ ? (@.a < -10.1e+1)'::jsonpath;
+select '$ ? (@.a < +10.1e+1)'::jsonpath;
+select '0'::jsonpath;
+select '0.0'::jsonpath;
+select '0.000'::jsonpath;
+select '0.000e1'::jsonpath;
+select '0.000e2'::jsonpath;
+select '0.000e3'::jsonpath;
+select '0.0010'::jsonpath;
+select '0.0010e-1'::jsonpath;
+select '0.0010e+1'::jsonpath;
+select '0.0010e+2'::jsonpath;
+select '1e'::jsonpath;
+select '1.e'::jsonpath;
+select '1.2e'::jsonpath;
+select '1.2.e'::jsonpath;
+select '(1.2).e'::jsonpath;
+select '1e3'::jsonpath;
+select '1.e3'::jsonpath;
+select '1.e3.e'::jsonpath;
+select '1.e3.e4'::jsonpath;
+select '1.2e3'::jsonpath;
+select '1.2.e3'::jsonpath;
+select '(1.2).e3'::jsonpath;
diff --git a/ydb/library/yql/tests/postgresql/cases/jsonpath_encoding.out b/ydb/library/yql/tests/postgresql/cases/jsonpath_encoding.out
new file mode 100644
index 0000000000..2b57de4a06
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/jsonpath_encoding.out
@@ -0,0 +1,173 @@
+--
+-- encoding-sensitive tests for jsonpath
+--
+SELECT getdatabaseencoding(); -- just to label the results files
+ getdatabaseencoding
+---------------------
+ UTF8
+(1 row)
+
+-- checks for double-quoted values
+-- basic unicode input
+SELECT '"\u"'::jsonpath; -- ERROR, incomplete escape
+ERROR: invalid unicode sequence at or near "\u" of jsonpath input
+LINE 1: SELECT '"\u"'::jsonpath;
+ ^
+SELECT '"\u00"'::jsonpath; -- ERROR, incomplete escape
+ERROR: invalid unicode sequence at or near "\u00" of jsonpath input
+LINE 1: SELECT '"\u00"'::jsonpath;
+ ^
+SELECT '"\u000g"'::jsonpath; -- ERROR, g is not a hex digit
+ERROR: invalid unicode sequence at or near "\u000" of jsonpath input
+LINE 1: SELECT '"\u000g"'::jsonpath;
+ ^
+SELECT '"\u0000"'::jsonpath; -- OK, legal escape
+ERROR: unsupported Unicode escape sequence
+LINE 1: SELECT '"\u0000"'::jsonpath;
+ ^
+DETAIL: \u0000 cannot be converted to text.
+SELECT '"\uaBcD"'::jsonpath; -- OK, uppercase and lower case both OK
+ jsonpath
+----------
+ "ꯍ"
+(1 row)
+
+-- handling of unicode surrogate pairs
+select '"\ud83d\ude04\ud83d\udc36"'::jsonpath as correct_in_utf8;
+ correct_in_utf8
+-----------------
+ "😄🐶"
+(1 row)
+
+select '"\ud83d\ud83d"'::jsonpath; -- 2 high surrogates in a row
+ERROR: invalid input syntax for type jsonpath
+LINE 1: select '"\ud83d\ud83d"'::jsonpath;
+ ^
+DETAIL: Unicode high surrogate must not follow a high surrogate.
+select '"\ude04\ud83d"'::jsonpath; -- surrogates in wrong order
+ERROR: invalid input syntax for type jsonpath
+LINE 1: select '"\ude04\ud83d"'::jsonpath;
+ ^
+DETAIL: Unicode low surrogate must follow a high surrogate.
+select '"\ud83dX"'::jsonpath; -- orphan high surrogate
+ERROR: invalid input syntax for type jsonpath
+LINE 1: select '"\ud83dX"'::jsonpath;
+ ^
+DETAIL: Unicode low surrogate must follow a high surrogate.
+select '"\ude04X"'::jsonpath; -- orphan low surrogate
+ERROR: invalid input syntax for type jsonpath
+LINE 1: select '"\ude04X"'::jsonpath;
+ ^
+DETAIL: Unicode low surrogate must follow a high surrogate.
+--handling of simple unicode escapes
+select '"the Copyright \u00a9 sign"'::jsonpath as correct_in_utf8;
+ correct_in_utf8
+------------------------
+ "the Copyright © sign"
+(1 row)
+
+select '"dollar \u0024 character"'::jsonpath as correct_everywhere;
+ correct_everywhere
+----------------------
+ "dollar $ character"
+(1 row)
+
+select '"dollar \\u0024 character"'::jsonpath as not_an_escape;
+ not_an_escape
+----------------------------
+ "dollar \\u0024 character"
+(1 row)
+
+select '"null \u0000 escape"'::jsonpath as not_unescaped;
+ERROR: unsupported Unicode escape sequence
+LINE 1: select '"null \u0000 escape"'::jsonpath as not_unescaped;
+ ^
+DETAIL: \u0000 cannot be converted to text.
+select '"null \\u0000 escape"'::jsonpath as not_an_escape;
+ not_an_escape
+-----------------------
+ "null \\u0000 escape"
+(1 row)
+
+-- checks for quoted key names
+-- basic unicode input
+SELECT '$."\u"'::jsonpath; -- ERROR, incomplete escape
+ERROR: invalid unicode sequence at or near "\u" of jsonpath input
+LINE 1: SELECT '$."\u"'::jsonpath;
+ ^
+SELECT '$."\u00"'::jsonpath; -- ERROR, incomplete escape
+ERROR: invalid unicode sequence at or near "\u00" of jsonpath input
+LINE 1: SELECT '$."\u00"'::jsonpath;
+ ^
+SELECT '$."\u000g"'::jsonpath; -- ERROR, g is not a hex digit
+ERROR: invalid unicode sequence at or near "\u000" of jsonpath input
+LINE 1: SELECT '$."\u000g"'::jsonpath;
+ ^
+SELECT '$."\u0000"'::jsonpath; -- OK, legal escape
+ERROR: unsupported Unicode escape sequence
+LINE 1: SELECT '$."\u0000"'::jsonpath;
+ ^
+DETAIL: \u0000 cannot be converted to text.
+SELECT '$."\uaBcD"'::jsonpath; -- OK, uppercase and lower case both OK
+ jsonpath
+----------
+ $."ꯍ"
+(1 row)
+
+-- handling of unicode surrogate pairs
+select '$."\ud83d\ude04\ud83d\udc36"'::jsonpath as correct_in_utf8;
+ correct_in_utf8
+-----------------
+ $."😄🐶"
+(1 row)
+
+select '$."\ud83d\ud83d"'::jsonpath; -- 2 high surrogates in a row
+ERROR: invalid input syntax for type jsonpath
+LINE 1: select '$."\ud83d\ud83d"'::jsonpath;
+ ^
+DETAIL: Unicode high surrogate must not follow a high surrogate.
+select '$."\ude04\ud83d"'::jsonpath; -- surrogates in wrong order
+ERROR: invalid input syntax for type jsonpath
+LINE 1: select '$."\ude04\ud83d"'::jsonpath;
+ ^
+DETAIL: Unicode low surrogate must follow a high surrogate.
+select '$."\ud83dX"'::jsonpath; -- orphan high surrogate
+ERROR: invalid input syntax for type jsonpath
+LINE 1: select '$."\ud83dX"'::jsonpath;
+ ^
+DETAIL: Unicode low surrogate must follow a high surrogate.
+select '$."\ude04X"'::jsonpath; -- orphan low surrogate
+ERROR: invalid input syntax for type jsonpath
+LINE 1: select '$."\ude04X"'::jsonpath;
+ ^
+DETAIL: Unicode low surrogate must follow a high surrogate.
+--handling of simple unicode escapes
+select '$."the Copyright \u00a9 sign"'::jsonpath as correct_in_utf8;
+ correct_in_utf8
+--------------------------
+ $."the Copyright © sign"
+(1 row)
+
+select '$."dollar \u0024 character"'::jsonpath as correct_everywhere;
+ correct_everywhere
+------------------------
+ $."dollar $ character"
+(1 row)
+
+select '$."dollar \\u0024 character"'::jsonpath as not_an_escape;
+ not_an_escape
+------------------------------
+ $."dollar \\u0024 character"
+(1 row)
+
+select '$."null \u0000 escape"'::jsonpath as not_unescaped;
+ERROR: unsupported Unicode escape sequence
+LINE 1: select '$."null \u0000 escape"'::jsonpath as not_unescaped;
+ ^
+DETAIL: \u0000 cannot be converted to text.
+select '$."null \\u0000 escape"'::jsonpath as not_an_escape;
+ not_an_escape
+-------------------------
+ $."null \\u0000 escape"
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/cases/jsonpath_encoding.sql b/ydb/library/yql/tests/postgresql/cases/jsonpath_encoding.sql
new file mode 100644
index 0000000000..597c153f5c
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/jsonpath_encoding.sql
@@ -0,0 +1,42 @@
+--
+-- encoding-sensitive tests for jsonpath
+--
+SELECT getdatabaseencoding(); -- just to label the results files
+-- checks for double-quoted values
+-- basic unicode input
+SELECT '"\u"'::jsonpath; -- ERROR, incomplete escape
+SELECT '"\u00"'::jsonpath; -- ERROR, incomplete escape
+SELECT '"\u000g"'::jsonpath; -- ERROR, g is not a hex digit
+SELECT '"\u0000"'::jsonpath; -- OK, legal escape
+SELECT '"\uaBcD"'::jsonpath; -- OK, uppercase and lower case both OK
+-- handling of unicode surrogate pairs
+select '"\ud83d\ude04\ud83d\udc36"'::jsonpath as correct_in_utf8;
+select '"\ud83d\ud83d"'::jsonpath; -- 2 high surrogates in a row
+select '"\ude04\ud83d"'::jsonpath; -- surrogates in wrong order
+select '"\ud83dX"'::jsonpath; -- orphan high surrogate
+select '"\ude04X"'::jsonpath; -- orphan low surrogate
+--handling of simple unicode escapes
+select '"the Copyright \u00a9 sign"'::jsonpath as correct_in_utf8;
+select '"dollar \u0024 character"'::jsonpath as correct_everywhere;
+select '"dollar \\u0024 character"'::jsonpath as not_an_escape;
+select '"null \u0000 escape"'::jsonpath as not_unescaped;
+select '"null \\u0000 escape"'::jsonpath as not_an_escape;
+-- checks for quoted key names
+-- basic unicode input
+SELECT '$."\u"'::jsonpath; -- ERROR, incomplete escape
+SELECT '$."\u00"'::jsonpath; -- ERROR, incomplete escape
+SELECT '$."\u000g"'::jsonpath; -- ERROR, g is not a hex digit
+SELECT '$."\u0000"'::jsonpath; -- OK, legal escape
+SELECT '$."\uaBcD"'::jsonpath; -- OK, uppercase and lower case both OK
+-- handling of unicode surrogate pairs
+select '$."\ud83d\ude04\ud83d\udc36"'::jsonpath as correct_in_utf8;
+select '$."\ud83d\ud83d"'::jsonpath; -- 2 high surrogates in a row
+select '$."\ude04\ud83d"'::jsonpath; -- surrogates in wrong order
+select '$."\ud83dX"'::jsonpath; -- orphan high surrogate
+select '$."\ude04X"'::jsonpath; -- orphan low surrogate
+--handling of simple unicode escapes
+select '$."the Copyright \u00a9 sign"'::jsonpath as correct_in_utf8;
+select '$."dollar \u0024 character"'::jsonpath as correct_everywhere;
+select '$."dollar \\u0024 character"'::jsonpath as not_an_escape;
+select '$."null \u0000 escape"'::jsonpath as not_unescaped;
+select '$."null \\u0000 escape"'::jsonpath as not_an_escape;
diff --git a/ydb/library/yql/tests/postgresql/cases/limit.out b/ydb/library/yql/tests/postgresql/cases/limit.out
new file mode 100644
index 0000000000..c70d432bf1
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/limit.out
@@ -0,0 +1,14 @@
+-- Test assorted cases involving backwards fetch from a LIMIT plan node
+begin;
+rollback;
+-- SKIP LOCKED and WITH TIES are incompatible
+SELECT thousand
+ FROM onek WHERE thousand < 5
+ ORDER BY thousand FETCH FIRST 1 ROW WITH TIES FOR UPDATE SKIP LOCKED;
+ERROR: SKIP LOCKED and WITH TIES options cannot be used together
+-- should fail
+SELECT ''::text AS two, unique1, unique2, stringu1
+ FROM onek WHERE unique1 > 50
+ FETCH FIRST 2 ROW WITH TIES;
+ERROR: WITH TIES cannot be specified without ORDER BY clause
+-- leave these views
diff --git a/ydb/library/yql/tests/postgresql/cases/limit.sql b/ydb/library/yql/tests/postgresql/cases/limit.sql
new file mode 100644
index 0000000000..8af04870f5
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/limit.sql
@@ -0,0 +1,12 @@
+-- Test assorted cases involving backwards fetch from a LIMIT plan node
+begin;
+rollback;
+-- SKIP LOCKED and WITH TIES are incompatible
+SELECT thousand
+ FROM onek WHERE thousand < 5
+ ORDER BY thousand FETCH FIRST 1 ROW WITH TIES FOR UPDATE SKIP LOCKED;
+-- should fail
+SELECT ''::text AS two, unique1, unique2, stringu1
+ FROM onek WHERE unique1 > 50
+ FETCH FIRST 2 ROW WITH TIES;
+-- leave these views
diff --git a/ydb/library/yql/tests/postgresql/cases/name.out b/ydb/library/yql/tests/postgresql/cases/name.out
new file mode 100644
index 0000000000..cb19d6de6e
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/name.out
@@ -0,0 +1,126 @@
+--
+-- NAME
+-- all inputs are silently truncated at NAMEDATALEN-1 (63) characters
+--
+-- fixed-length by reference
+SELECT name 'name string' = name 'name string' AS "True";
+ True
+------
+ t
+(1 row)
+
+SELECT name 'name string' = name 'name string ' AS "False";
+ False
+-------
+ f
+(1 row)
+
+--
+--
+--
+CREATE TABLE NAME_TBL(f1 name);
+INSERT INTO NAME_TBL(f1) VALUES ('1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR');
+INSERT INTO NAME_TBL(f1) VALUES ('1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqr');
+INSERT INTO NAME_TBL(f1) VALUES ('asdfghjkl;');
+INSERT INTO NAME_TBL(f1) VALUES ('343f%2a');
+INSERT INTO NAME_TBL(f1) VALUES ('d34aaasdf');
+INSERT INTO NAME_TBL(f1) VALUES ('');
+INSERT INTO NAME_TBL(f1) VALUES ('1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ');
+SELECT * FROM NAME_TBL;
+ f1
+-----------------------------------------------------------------
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+ 1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopq
+ asdfghjkl;
+ 343f%2a
+ d34aaasdf
+
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+(7 rows)
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 <> '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+ f1
+-----------------------------------------------------------------
+ 1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopq
+ asdfghjkl;
+ 343f%2a
+ d34aaasdf
+
+(5 rows)
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 = '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+ f1
+-----------------------------------------------------------------
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+(2 rows)
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 < '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+ f1
+----
+
+(1 row)
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 <= '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+ f1
+-----------------------------------------------------------------
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+(3 rows)
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 > '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+ f1
+-----------------------------------------------------------------
+ 1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopq
+ asdfghjkl;
+ 343f%2a
+ d34aaasdf
+(4 rows)
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 >= '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+ f1
+-----------------------------------------------------------------
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+ 1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopq
+ asdfghjkl;
+ 343f%2a
+ d34aaasdf
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+(6 rows)
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 ~ '.*';
+ f1
+-----------------------------------------------------------------
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+ 1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopq
+ asdfghjkl;
+ 343f%2a
+ d34aaasdf
+
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+(7 rows)
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 !~ '.*';
+ f1
+----
+(0 rows)
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 ~ '[0-9]';
+ f1
+-----------------------------------------------------------------
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+ 1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopq
+ 343f%2a
+ d34aaasdf
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+(5 rows)
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 ~ '.*asdf.*';
+ f1
+------------
+ asdfghjkl;
+ d34aaasdf
+(2 rows)
+
+DROP TABLE NAME_TBL;
diff --git a/ydb/library/yql/tests/postgresql/cases/name.sql b/ydb/library/yql/tests/postgresql/cases/name.sql
new file mode 100644
index 0000000000..964d6a72cb
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/name.sql
@@ -0,0 +1,30 @@
+--
+-- NAME
+-- all inputs are silently truncated at NAMEDATALEN-1 (63) characters
+--
+-- fixed-length by reference
+SELECT name 'name string' = name 'name string' AS "True";
+SELECT name 'name string' = name 'name string ' AS "False";
+--
+--
+--
+CREATE TABLE NAME_TBL(f1 name);
+INSERT INTO NAME_TBL(f1) VALUES ('1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR');
+INSERT INTO NAME_TBL(f1) VALUES ('1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqr');
+INSERT INTO NAME_TBL(f1) VALUES ('asdfghjkl;');
+INSERT INTO NAME_TBL(f1) VALUES ('343f%2a');
+INSERT INTO NAME_TBL(f1) VALUES ('d34aaasdf');
+INSERT INTO NAME_TBL(f1) VALUES ('');
+INSERT INTO NAME_TBL(f1) VALUES ('1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ');
+SELECT * FROM NAME_TBL;
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 <> '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 = '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 < '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 <= '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 > '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 >= '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 ~ '.*';
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 !~ '.*';
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 ~ '[0-9]';
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 ~ '.*asdf.*';
+DROP TABLE NAME_TBL;
diff --git a/ydb/library/yql/tests/postgresql/cases/numeric.out b/ydb/library/yql/tests/postgresql/cases/numeric.out
new file mode 100644
index 0000000000..8ff77c3c01
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/numeric.out
@@ -0,0 +1,728 @@
+--
+-- NUMERIC
+--
+CREATE TABLE num_data (id int4, val numeric(210,10));
+CREATE TABLE num_exp_add (id1 int4, id2 int4, expected numeric(210,10));
+CREATE TABLE num_exp_sub (id1 int4, id2 int4, expected numeric(210,10));
+CREATE TABLE num_exp_div (id1 int4, id2 int4, expected numeric(210,10));
+CREATE TABLE num_exp_mul (id1 int4, id2 int4, expected numeric(210,10));
+CREATE TABLE num_exp_sqrt (id int4, expected numeric(210,10));
+CREATE TABLE num_exp_ln (id int4, expected numeric(210,10));
+CREATE TABLE num_exp_log10 (id int4, expected numeric(210,10));
+CREATE TABLE num_exp_power_10_ln (id int4, expected numeric(210,10));
+CREATE TABLE num_result (id1 int4, id2 int4, result numeric(210,10));
+-- ******************************
+-- * The following EXPECTED results are computed by bc(1)
+-- * with a scale of 200
+-- ******************************
+BEGIN TRANSACTION;
+INSERT INTO num_exp_add VALUES (0,0,'0');
+INSERT INTO num_exp_sub VALUES (0,0,'0');
+INSERT INTO num_exp_mul VALUES (0,0,'0');
+INSERT INTO num_exp_div VALUES (0,0,'NaN');
+INSERT INTO num_exp_add VALUES (0,1,'0');
+INSERT INTO num_exp_sub VALUES (0,1,'0');
+INSERT INTO num_exp_mul VALUES (0,1,'0');
+INSERT INTO num_exp_div VALUES (0,1,'NaN');
+INSERT INTO num_exp_add VALUES (0,2,'-34338492.215397047');
+INSERT INTO num_exp_sub VALUES (0,2,'34338492.215397047');
+INSERT INTO num_exp_mul VALUES (0,2,'0');
+INSERT INTO num_exp_div VALUES (0,2,'0');
+INSERT INTO num_exp_add VALUES (0,3,'4.31');
+INSERT INTO num_exp_sub VALUES (0,3,'-4.31');
+INSERT INTO num_exp_mul VALUES (0,3,'0');
+INSERT INTO num_exp_div VALUES (0,3,'0');
+INSERT INTO num_exp_add VALUES (0,4,'7799461.4119');
+INSERT INTO num_exp_sub VALUES (0,4,'-7799461.4119');
+INSERT INTO num_exp_mul VALUES (0,4,'0');
+INSERT INTO num_exp_div VALUES (0,4,'0');
+INSERT INTO num_exp_add VALUES (0,5,'16397.038491');
+INSERT INTO num_exp_sub VALUES (0,5,'-16397.038491');
+INSERT INTO num_exp_mul VALUES (0,5,'0');
+INSERT INTO num_exp_div VALUES (0,5,'0');
+INSERT INTO num_exp_add VALUES (0,6,'93901.57763026');
+INSERT INTO num_exp_sub VALUES (0,6,'-93901.57763026');
+INSERT INTO num_exp_mul VALUES (0,6,'0');
+INSERT INTO num_exp_div VALUES (0,6,'0');
+INSERT INTO num_exp_add VALUES (0,7,'-83028485');
+INSERT INTO num_exp_sub VALUES (0,7,'83028485');
+INSERT INTO num_exp_mul VALUES (0,7,'0');
+INSERT INTO num_exp_div VALUES (0,7,'0');
+INSERT INTO num_exp_add VALUES (0,8,'74881');
+INSERT INTO num_exp_sub VALUES (0,8,'-74881');
+INSERT INTO num_exp_mul VALUES (0,8,'0');
+INSERT INTO num_exp_div VALUES (0,8,'0');
+INSERT INTO num_exp_add VALUES (0,9,'-24926804.045047420');
+INSERT INTO num_exp_sub VALUES (0,9,'24926804.045047420');
+INSERT INTO num_exp_mul VALUES (0,9,'0');
+INSERT INTO num_exp_div VALUES (0,9,'0');
+INSERT INTO num_exp_add VALUES (1,0,'0');
+INSERT INTO num_exp_sub VALUES (1,0,'0');
+INSERT INTO num_exp_mul VALUES (1,0,'0');
+INSERT INTO num_exp_div VALUES (1,0,'NaN');
+INSERT INTO num_exp_add VALUES (1,1,'0');
+INSERT INTO num_exp_sub VALUES (1,1,'0');
+INSERT INTO num_exp_mul VALUES (1,1,'0');
+INSERT INTO num_exp_div VALUES (1,1,'NaN');
+INSERT INTO num_exp_add VALUES (1,2,'-34338492.215397047');
+INSERT INTO num_exp_sub VALUES (1,2,'34338492.215397047');
+INSERT INTO num_exp_mul VALUES (1,2,'0');
+INSERT INTO num_exp_div VALUES (1,2,'0');
+INSERT INTO num_exp_add VALUES (1,3,'4.31');
+INSERT INTO num_exp_sub VALUES (1,3,'-4.31');
+INSERT INTO num_exp_mul VALUES (1,3,'0');
+INSERT INTO num_exp_div VALUES (1,3,'0');
+INSERT INTO num_exp_add VALUES (1,4,'7799461.4119');
+INSERT INTO num_exp_sub VALUES (1,4,'-7799461.4119');
+INSERT INTO num_exp_mul VALUES (1,4,'0');
+INSERT INTO num_exp_div VALUES (1,4,'0');
+INSERT INTO num_exp_add VALUES (1,5,'16397.038491');
+INSERT INTO num_exp_sub VALUES (1,5,'-16397.038491');
+INSERT INTO num_exp_mul VALUES (1,5,'0');
+INSERT INTO num_exp_div VALUES (1,5,'0');
+INSERT INTO num_exp_add VALUES (1,6,'93901.57763026');
+INSERT INTO num_exp_sub VALUES (1,6,'-93901.57763026');
+INSERT INTO num_exp_mul VALUES (1,6,'0');
+INSERT INTO num_exp_div VALUES (1,6,'0');
+INSERT INTO num_exp_add VALUES (1,7,'-83028485');
+INSERT INTO num_exp_sub VALUES (1,7,'83028485');
+INSERT INTO num_exp_mul VALUES (1,7,'0');
+INSERT INTO num_exp_div VALUES (1,7,'0');
+INSERT INTO num_exp_add VALUES (1,8,'74881');
+INSERT INTO num_exp_sub VALUES (1,8,'-74881');
+INSERT INTO num_exp_mul VALUES (1,8,'0');
+INSERT INTO num_exp_div VALUES (1,8,'0');
+INSERT INTO num_exp_add VALUES (1,9,'-24926804.045047420');
+INSERT INTO num_exp_sub VALUES (1,9,'24926804.045047420');
+INSERT INTO num_exp_mul VALUES (1,9,'0');
+INSERT INTO num_exp_div VALUES (1,9,'0');
+INSERT INTO num_exp_add VALUES (2,0,'-34338492.215397047');
+INSERT INTO num_exp_sub VALUES (2,0,'-34338492.215397047');
+INSERT INTO num_exp_mul VALUES (2,0,'0');
+INSERT INTO num_exp_div VALUES (2,0,'NaN');
+INSERT INTO num_exp_add VALUES (2,1,'-34338492.215397047');
+INSERT INTO num_exp_sub VALUES (2,1,'-34338492.215397047');
+INSERT INTO num_exp_mul VALUES (2,1,'0');
+INSERT INTO num_exp_div VALUES (2,1,'NaN');
+INSERT INTO num_exp_add VALUES (2,2,'-68676984.430794094');
+INSERT INTO num_exp_sub VALUES (2,2,'0');
+INSERT INTO num_exp_mul VALUES (2,2,'1179132047626883.596862135856320209');
+INSERT INTO num_exp_div VALUES (2,2,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (2,3,'-34338487.905397047');
+INSERT INTO num_exp_sub VALUES (2,3,'-34338496.525397047');
+INSERT INTO num_exp_mul VALUES (2,3,'-147998901.44836127257');
+INSERT INTO num_exp_div VALUES (2,3,'-7967167.56737750510440835266');
+INSERT INTO num_exp_add VALUES (2,4,'-26539030.803497047');
+INSERT INTO num_exp_sub VALUES (2,4,'-42137953.627297047');
+INSERT INTO num_exp_mul VALUES (2,4,'-267821744976817.8111137106593');
+INSERT INTO num_exp_div VALUES (2,4,'-4.40267480046830116685');
+INSERT INTO num_exp_add VALUES (2,5,'-34322095.176906047');
+INSERT INTO num_exp_sub VALUES (2,5,'-34354889.253888047');
+INSERT INTO num_exp_mul VALUES (2,5,'-563049578578.769242506736077');
+INSERT INTO num_exp_div VALUES (2,5,'-2094.18866914563535496429');
+INSERT INTO num_exp_add VALUES (2,6,'-34244590.637766787');
+INSERT INTO num_exp_sub VALUES (2,6,'-34432393.793027307');
+INSERT INTO num_exp_mul VALUES (2,6,'-3224438592470.18449811926184222');
+INSERT INTO num_exp_div VALUES (2,6,'-365.68599891479766440940');
+INSERT INTO num_exp_add VALUES (2,7,'-117366977.215397047');
+INSERT INTO num_exp_sub VALUES (2,7,'48689992.784602953');
+INSERT INTO num_exp_mul VALUES (2,7,'2851072985828710.485883795');
+INSERT INTO num_exp_div VALUES (2,7,'.41357483778485235518');
+INSERT INTO num_exp_add VALUES (2,8,'-34263611.215397047');
+INSERT INTO num_exp_sub VALUES (2,8,'-34413373.215397047');
+INSERT INTO num_exp_mul VALUES (2,8,'-2571300635581.146276407');
+INSERT INTO num_exp_div VALUES (2,8,'-458.57416721727870888476');
+INSERT INTO num_exp_add VALUES (2,9,'-59265296.260444467');
+INSERT INTO num_exp_sub VALUES (2,9,'-9411688.170349627');
+INSERT INTO num_exp_mul VALUES (2,9,'855948866655588.453741509242968740');
+INSERT INTO num_exp_div VALUES (2,9,'1.37757299946438931811');
+INSERT INTO num_exp_add VALUES (3,0,'4.31');
+INSERT INTO num_exp_sub VALUES (3,0,'4.31');
+INSERT INTO num_exp_mul VALUES (3,0,'0');
+INSERT INTO num_exp_div VALUES (3,0,'NaN');
+INSERT INTO num_exp_add VALUES (3,1,'4.31');
+INSERT INTO num_exp_sub VALUES (3,1,'4.31');
+INSERT INTO num_exp_mul VALUES (3,1,'0');
+INSERT INTO num_exp_div VALUES (3,1,'NaN');
+INSERT INTO num_exp_add VALUES (3,2,'-34338487.905397047');
+INSERT INTO num_exp_sub VALUES (3,2,'34338496.525397047');
+INSERT INTO num_exp_mul VALUES (3,2,'-147998901.44836127257');
+INSERT INTO num_exp_div VALUES (3,2,'-.00000012551512084352');
+INSERT INTO num_exp_add VALUES (3,3,'8.62');
+INSERT INTO num_exp_sub VALUES (3,3,'0');
+INSERT INTO num_exp_mul VALUES (3,3,'18.5761');
+INSERT INTO num_exp_div VALUES (3,3,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (3,4,'7799465.7219');
+INSERT INTO num_exp_sub VALUES (3,4,'-7799457.1019');
+INSERT INTO num_exp_mul VALUES (3,4,'33615678.685289');
+INSERT INTO num_exp_div VALUES (3,4,'.00000055260225961552');
+INSERT INTO num_exp_add VALUES (3,5,'16401.348491');
+INSERT INTO num_exp_sub VALUES (3,5,'-16392.728491');
+INSERT INTO num_exp_mul VALUES (3,5,'70671.23589621');
+INSERT INTO num_exp_div VALUES (3,5,'.00026285234387695504');
+INSERT INTO num_exp_add VALUES (3,6,'93905.88763026');
+INSERT INTO num_exp_sub VALUES (3,6,'-93897.26763026');
+INSERT INTO num_exp_mul VALUES (3,6,'404715.7995864206');
+INSERT INTO num_exp_div VALUES (3,6,'.00004589912234457595');
+INSERT INTO num_exp_add VALUES (3,7,'-83028480.69');
+INSERT INTO num_exp_sub VALUES (3,7,'83028489.31');
+INSERT INTO num_exp_mul VALUES (3,7,'-357852770.35');
+INSERT INTO num_exp_div VALUES (3,7,'-.00000005190989574240');
+INSERT INTO num_exp_add VALUES (3,8,'74885.31');
+INSERT INTO num_exp_sub VALUES (3,8,'-74876.69');
+INSERT INTO num_exp_mul VALUES (3,8,'322737.11');
+INSERT INTO num_exp_div VALUES (3,8,'.00005755799201399553');
+INSERT INTO num_exp_add VALUES (3,9,'-24926799.735047420');
+INSERT INTO num_exp_sub VALUES (3,9,'24926808.355047420');
+INSERT INTO num_exp_mul VALUES (3,9,'-107434525.43415438020');
+INSERT INTO num_exp_div VALUES (3,9,'-.00000017290624149854');
+INSERT INTO num_exp_add VALUES (4,0,'7799461.4119');
+INSERT INTO num_exp_sub VALUES (4,0,'7799461.4119');
+INSERT INTO num_exp_mul VALUES (4,0,'0');
+INSERT INTO num_exp_div VALUES (4,0,'NaN');
+INSERT INTO num_exp_add VALUES (4,1,'7799461.4119');
+INSERT INTO num_exp_sub VALUES (4,1,'7799461.4119');
+INSERT INTO num_exp_mul VALUES (4,1,'0');
+INSERT INTO num_exp_div VALUES (4,1,'NaN');
+INSERT INTO num_exp_add VALUES (4,2,'-26539030.803497047');
+INSERT INTO num_exp_sub VALUES (4,2,'42137953.627297047');
+INSERT INTO num_exp_mul VALUES (4,2,'-267821744976817.8111137106593');
+INSERT INTO num_exp_div VALUES (4,2,'-.22713465002993920385');
+INSERT INTO num_exp_add VALUES (4,3,'7799465.7219');
+INSERT INTO num_exp_sub VALUES (4,3,'7799457.1019');
+INSERT INTO num_exp_mul VALUES (4,3,'33615678.685289');
+INSERT INTO num_exp_div VALUES (4,3,'1809619.81714617169373549883');
+INSERT INTO num_exp_add VALUES (4,4,'15598922.8238');
+INSERT INTO num_exp_sub VALUES (4,4,'0');
+INSERT INTO num_exp_mul VALUES (4,4,'60831598315717.14146161');
+INSERT INTO num_exp_div VALUES (4,4,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (4,5,'7815858.450391');
+INSERT INTO num_exp_sub VALUES (4,5,'7783064.373409');
+INSERT INTO num_exp_mul VALUES (4,5,'127888068979.9935054429');
+INSERT INTO num_exp_div VALUES (4,5,'475.66281046305802686061');
+INSERT INTO num_exp_add VALUES (4,6,'7893362.98953026');
+INSERT INTO num_exp_sub VALUES (4,6,'7705559.83426974');
+INSERT INTO num_exp_mul VALUES (4,6,'732381731243.745115764094');
+INSERT INTO num_exp_div VALUES (4,6,'83.05996138436129499606');
+INSERT INTO num_exp_add VALUES (4,7,'-75229023.5881');
+INSERT INTO num_exp_sub VALUES (4,7,'90827946.4119');
+INSERT INTO num_exp_mul VALUES (4,7,'-647577464846017.9715');
+INSERT INTO num_exp_div VALUES (4,7,'-.09393717604145131637');
+INSERT INTO num_exp_add VALUES (4,8,'7874342.4119');
+INSERT INTO num_exp_sub VALUES (4,8,'7724580.4119');
+INSERT INTO num_exp_mul VALUES (4,8,'584031469984.4839');
+INSERT INTO num_exp_div VALUES (4,8,'104.15808298366741897143');
+INSERT INTO num_exp_add VALUES (4,9,'-17127342.633147420');
+INSERT INTO num_exp_sub VALUES (4,9,'32726265.456947420');
+INSERT INTO num_exp_mul VALUES (4,9,'-194415646271340.1815956522980');
+INSERT INTO num_exp_div VALUES (4,9,'-.31289456112403769409');
+INSERT INTO num_exp_add VALUES (5,0,'16397.038491');
+INSERT INTO num_exp_sub VALUES (5,0,'16397.038491');
+INSERT INTO num_exp_mul VALUES (5,0,'0');
+INSERT INTO num_exp_div VALUES (5,0,'NaN');
+INSERT INTO num_exp_add VALUES (5,1,'16397.038491');
+INSERT INTO num_exp_sub VALUES (5,1,'16397.038491');
+INSERT INTO num_exp_mul VALUES (5,1,'0');
+INSERT INTO num_exp_div VALUES (5,1,'NaN');
+INSERT INTO num_exp_add VALUES (5,2,'-34322095.176906047');
+INSERT INTO num_exp_sub VALUES (5,2,'34354889.253888047');
+INSERT INTO num_exp_mul VALUES (5,2,'-563049578578.769242506736077');
+INSERT INTO num_exp_div VALUES (5,2,'-.00047751189505192446');
+INSERT INTO num_exp_add VALUES (5,3,'16401.348491');
+INSERT INTO num_exp_sub VALUES (5,3,'16392.728491');
+INSERT INTO num_exp_mul VALUES (5,3,'70671.23589621');
+INSERT INTO num_exp_div VALUES (5,3,'3804.41728329466357308584');
+INSERT INTO num_exp_add VALUES (5,4,'7815858.450391');
+INSERT INTO num_exp_sub VALUES (5,4,'-7783064.373409');
+INSERT INTO num_exp_mul VALUES (5,4,'127888068979.9935054429');
+INSERT INTO num_exp_div VALUES (5,4,'.00210232958726897192');
+INSERT INTO num_exp_add VALUES (5,5,'32794.076982');
+INSERT INTO num_exp_sub VALUES (5,5,'0');
+INSERT INTO num_exp_mul VALUES (5,5,'268862871.275335557081');
+INSERT INTO num_exp_div VALUES (5,5,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (5,6,'110298.61612126');
+INSERT INTO num_exp_sub VALUES (5,6,'-77504.53913926');
+INSERT INTO num_exp_mul VALUES (5,6,'1539707782.76899778633766');
+INSERT INTO num_exp_div VALUES (5,6,'.17461941433576102689');
+INSERT INTO num_exp_add VALUES (5,7,'-83012087.961509');
+INSERT INTO num_exp_sub VALUES (5,7,'83044882.038491');
+INSERT INTO num_exp_mul VALUES (5,7,'-1361421264394.416135');
+INSERT INTO num_exp_div VALUES (5,7,'-.00019748690453643710');
+INSERT INTO num_exp_add VALUES (5,8,'91278.038491');
+INSERT INTO num_exp_sub VALUES (5,8,'-58483.961509');
+INSERT INTO num_exp_mul VALUES (5,8,'1227826639.244571');
+INSERT INTO num_exp_div VALUES (5,8,'.21897461960978085228');
+INSERT INTO num_exp_add VALUES (5,9,'-24910407.006556420');
+INSERT INTO num_exp_sub VALUES (5,9,'24943201.083538420');
+INSERT INTO num_exp_mul VALUES (5,9,'-408725765384.257043660243220');
+INSERT INTO num_exp_div VALUES (5,9,'-.00065780749354660427');
+INSERT INTO num_exp_add VALUES (6,0,'93901.57763026');
+INSERT INTO num_exp_sub VALUES (6,0,'93901.57763026');
+INSERT INTO num_exp_mul VALUES (6,0,'0');
+INSERT INTO num_exp_div VALUES (6,0,'NaN');
+INSERT INTO num_exp_add VALUES (6,1,'93901.57763026');
+INSERT INTO num_exp_sub VALUES (6,1,'93901.57763026');
+INSERT INTO num_exp_mul VALUES (6,1,'0');
+INSERT INTO num_exp_div VALUES (6,1,'NaN');
+INSERT INTO num_exp_add VALUES (6,2,'-34244590.637766787');
+INSERT INTO num_exp_sub VALUES (6,2,'34432393.793027307');
+INSERT INTO num_exp_mul VALUES (6,2,'-3224438592470.18449811926184222');
+INSERT INTO num_exp_div VALUES (6,2,'-.00273458651128995823');
+INSERT INTO num_exp_add VALUES (6,3,'93905.88763026');
+INSERT INTO num_exp_sub VALUES (6,3,'93897.26763026');
+INSERT INTO num_exp_mul VALUES (6,3,'404715.7995864206');
+INSERT INTO num_exp_div VALUES (6,3,'21786.90896293735498839907');
+INSERT INTO num_exp_add VALUES (6,4,'7893362.98953026');
+INSERT INTO num_exp_sub VALUES (6,4,'-7705559.83426974');
+INSERT INTO num_exp_mul VALUES (6,4,'732381731243.745115764094');
+INSERT INTO num_exp_div VALUES (6,4,'.01203949512295682469');
+INSERT INTO num_exp_add VALUES (6,5,'110298.61612126');
+INSERT INTO num_exp_sub VALUES (6,5,'77504.53913926');
+INSERT INTO num_exp_mul VALUES (6,5,'1539707782.76899778633766');
+INSERT INTO num_exp_div VALUES (6,5,'5.72674008674192359679');
+INSERT INTO num_exp_add VALUES (6,6,'187803.15526052');
+INSERT INTO num_exp_sub VALUES (6,6,'0');
+INSERT INTO num_exp_mul VALUES (6,6,'8817506281.4517452372676676');
+INSERT INTO num_exp_div VALUES (6,6,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (6,7,'-82934583.42236974');
+INSERT INTO num_exp_sub VALUES (6,7,'83122386.57763026');
+INSERT INTO num_exp_mul VALUES (6,7,'-7796505729750.37795610');
+INSERT INTO num_exp_div VALUES (6,7,'-.00113095617281538980');
+INSERT INTO num_exp_add VALUES (6,8,'168782.57763026');
+INSERT INTO num_exp_sub VALUES (6,8,'19020.57763026');
+INSERT INTO num_exp_mul VALUES (6,8,'7031444034.53149906');
+INSERT INTO num_exp_div VALUES (6,8,'1.25401073209839612184');
+INSERT INTO num_exp_add VALUES (6,9,'-24832902.467417160');
+INSERT INTO num_exp_sub VALUES (6,9,'25020705.622677680');
+INSERT INTO num_exp_mul VALUES (6,9,'-2340666225110.29929521292692920');
+INSERT INTO num_exp_div VALUES (6,9,'-.00376709254265256789');
+INSERT INTO num_exp_add VALUES (7,0,'-83028485');
+INSERT INTO num_exp_sub VALUES (7,0,'-83028485');
+INSERT INTO num_exp_mul VALUES (7,0,'0');
+INSERT INTO num_exp_div VALUES (7,0,'NaN');
+INSERT INTO num_exp_add VALUES (7,1,'-83028485');
+INSERT INTO num_exp_sub VALUES (7,1,'-83028485');
+INSERT INTO num_exp_mul VALUES (7,1,'0');
+INSERT INTO num_exp_div VALUES (7,1,'NaN');
+INSERT INTO num_exp_add VALUES (7,2,'-117366977.215397047');
+INSERT INTO num_exp_sub VALUES (7,2,'-48689992.784602953');
+INSERT INTO num_exp_mul VALUES (7,2,'2851072985828710.485883795');
+INSERT INTO num_exp_div VALUES (7,2,'2.41794207151503385700');
+INSERT INTO num_exp_add VALUES (7,3,'-83028480.69');
+INSERT INTO num_exp_sub VALUES (7,3,'-83028489.31');
+INSERT INTO num_exp_mul VALUES (7,3,'-357852770.35');
+INSERT INTO num_exp_div VALUES (7,3,'-19264149.65197215777262180974');
+INSERT INTO num_exp_add VALUES (7,4,'-75229023.5881');
+INSERT INTO num_exp_sub VALUES (7,4,'-90827946.4119');
+INSERT INTO num_exp_mul VALUES (7,4,'-647577464846017.9715');
+INSERT INTO num_exp_div VALUES (7,4,'-10.64541262725136247686');
+INSERT INTO num_exp_add VALUES (7,5,'-83012087.961509');
+INSERT INTO num_exp_sub VALUES (7,5,'-83044882.038491');
+INSERT INTO num_exp_mul VALUES (7,5,'-1361421264394.416135');
+INSERT INTO num_exp_div VALUES (7,5,'-5063.62688881730941836574');
+INSERT INTO num_exp_add VALUES (7,6,'-82934583.42236974');
+INSERT INTO num_exp_sub VALUES (7,6,'-83122386.57763026');
+INSERT INTO num_exp_mul VALUES (7,6,'-7796505729750.37795610');
+INSERT INTO num_exp_div VALUES (7,6,'-884.20756174009028770294');
+INSERT INTO num_exp_add VALUES (7,7,'-166056970');
+INSERT INTO num_exp_sub VALUES (7,7,'0');
+INSERT INTO num_exp_mul VALUES (7,7,'6893729321395225');
+INSERT INTO num_exp_div VALUES (7,7,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (7,8,'-82953604');
+INSERT INTO num_exp_sub VALUES (7,8,'-83103366');
+INSERT INTO num_exp_mul VALUES (7,8,'-6217255985285');
+INSERT INTO num_exp_div VALUES (7,8,'-1108.80577182462841041118');
+INSERT INTO num_exp_add VALUES (7,9,'-107955289.045047420');
+INSERT INTO num_exp_sub VALUES (7,9,'-58101680.954952580');
+INSERT INTO num_exp_mul VALUES (7,9,'2069634775752159.035758700');
+INSERT INTO num_exp_div VALUES (7,9,'3.33089171198810413382');
+INSERT INTO num_exp_add VALUES (8,0,'74881');
+INSERT INTO num_exp_sub VALUES (8,0,'74881');
+INSERT INTO num_exp_mul VALUES (8,0,'0');
+INSERT INTO num_exp_div VALUES (8,0,'NaN');
+INSERT INTO num_exp_add VALUES (8,1,'74881');
+INSERT INTO num_exp_sub VALUES (8,1,'74881');
+INSERT INTO num_exp_mul VALUES (8,1,'0');
+INSERT INTO num_exp_div VALUES (8,1,'NaN');
+INSERT INTO num_exp_add VALUES (8,2,'-34263611.215397047');
+INSERT INTO num_exp_sub VALUES (8,2,'34413373.215397047');
+INSERT INTO num_exp_mul VALUES (8,2,'-2571300635581.146276407');
+INSERT INTO num_exp_div VALUES (8,2,'-.00218067233500788615');
+INSERT INTO num_exp_add VALUES (8,3,'74885.31');
+INSERT INTO num_exp_sub VALUES (8,3,'74876.69');
+INSERT INTO num_exp_mul VALUES (8,3,'322737.11');
+INSERT INTO num_exp_div VALUES (8,3,'17373.78190255220417633410');
+INSERT INTO num_exp_add VALUES (8,4,'7874342.4119');
+INSERT INTO num_exp_sub VALUES (8,4,'-7724580.4119');
+INSERT INTO num_exp_mul VALUES (8,4,'584031469984.4839');
+INSERT INTO num_exp_div VALUES (8,4,'.00960079113741758956');
+INSERT INTO num_exp_add VALUES (8,5,'91278.038491');
+INSERT INTO num_exp_sub VALUES (8,5,'58483.961509');
+INSERT INTO num_exp_mul VALUES (8,5,'1227826639.244571');
+INSERT INTO num_exp_div VALUES (8,5,'4.56673929509287019456');
+INSERT INTO num_exp_add VALUES (8,6,'168782.57763026');
+INSERT INTO num_exp_sub VALUES (8,6,'-19020.57763026');
+INSERT INTO num_exp_mul VALUES (8,6,'7031444034.53149906');
+INSERT INTO num_exp_div VALUES (8,6,'.79744134113322314424');
+INSERT INTO num_exp_add VALUES (8,7,'-82953604');
+INSERT INTO num_exp_sub VALUES (8,7,'83103366');
+INSERT INTO num_exp_mul VALUES (8,7,'-6217255985285');
+INSERT INTO num_exp_div VALUES (8,7,'-.00090187120721280172');
+INSERT INTO num_exp_add VALUES (8,8,'149762');
+INSERT INTO num_exp_sub VALUES (8,8,'0');
+INSERT INTO num_exp_mul VALUES (8,8,'5607164161');
+INSERT INTO num_exp_div VALUES (8,8,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (8,9,'-24851923.045047420');
+INSERT INTO num_exp_sub VALUES (8,9,'25001685.045047420');
+INSERT INTO num_exp_mul VALUES (8,9,'-1866544013697.195857020');
+INSERT INTO num_exp_div VALUES (8,9,'-.00300403532938582735');
+INSERT INTO num_exp_add VALUES (9,0,'-24926804.045047420');
+INSERT INTO num_exp_sub VALUES (9,0,'-24926804.045047420');
+INSERT INTO num_exp_mul VALUES (9,0,'0');
+INSERT INTO num_exp_div VALUES (9,0,'NaN');
+INSERT INTO num_exp_add VALUES (9,1,'-24926804.045047420');
+INSERT INTO num_exp_sub VALUES (9,1,'-24926804.045047420');
+INSERT INTO num_exp_mul VALUES (9,1,'0');
+INSERT INTO num_exp_div VALUES (9,1,'NaN');
+INSERT INTO num_exp_add VALUES (9,2,'-59265296.260444467');
+INSERT INTO num_exp_sub VALUES (9,2,'9411688.170349627');
+INSERT INTO num_exp_mul VALUES (9,2,'855948866655588.453741509242968740');
+INSERT INTO num_exp_div VALUES (9,2,'.72591434384152961526');
+INSERT INTO num_exp_add VALUES (9,3,'-24926799.735047420');
+INSERT INTO num_exp_sub VALUES (9,3,'-24926808.355047420');
+INSERT INTO num_exp_mul VALUES (9,3,'-107434525.43415438020');
+INSERT INTO num_exp_div VALUES (9,3,'-5783481.21694835730858468677');
+INSERT INTO num_exp_add VALUES (9,4,'-17127342.633147420');
+INSERT INTO num_exp_sub VALUES (9,4,'-32726265.456947420');
+INSERT INTO num_exp_mul VALUES (9,4,'-194415646271340.1815956522980');
+INSERT INTO num_exp_div VALUES (9,4,'-3.19596478892958416484');
+INSERT INTO num_exp_add VALUES (9,5,'-24910407.006556420');
+INSERT INTO num_exp_sub VALUES (9,5,'-24943201.083538420');
+INSERT INTO num_exp_mul VALUES (9,5,'-408725765384.257043660243220');
+INSERT INTO num_exp_div VALUES (9,5,'-1520.20159364322004505807');
+INSERT INTO num_exp_add VALUES (9,6,'-24832902.467417160');
+INSERT INTO num_exp_sub VALUES (9,6,'-25020705.622677680');
+INSERT INTO num_exp_mul VALUES (9,6,'-2340666225110.29929521292692920');
+INSERT INTO num_exp_div VALUES (9,6,'-265.45671195426965751280');
+INSERT INTO num_exp_add VALUES (9,7,'-107955289.045047420');
+INSERT INTO num_exp_sub VALUES (9,7,'58101680.954952580');
+INSERT INTO num_exp_mul VALUES (9,7,'2069634775752159.035758700');
+INSERT INTO num_exp_div VALUES (9,7,'.30021990699995814689');
+INSERT INTO num_exp_add VALUES (9,8,'-24851923.045047420');
+INSERT INTO num_exp_sub VALUES (9,8,'-25001685.045047420');
+INSERT INTO num_exp_mul VALUES (9,8,'-1866544013697.195857020');
+INSERT INTO num_exp_div VALUES (9,8,'-332.88556569820675471748');
+INSERT INTO num_exp_add VALUES (9,9,'-49853608.090094840');
+INSERT INTO num_exp_sub VALUES (9,9,'0');
+INSERT INTO num_exp_mul VALUES (9,9,'621345559900192.420120630048656400');
+INSERT INTO num_exp_div VALUES (9,9,'1.00000000000000000000');
+COMMIT TRANSACTION;
+BEGIN TRANSACTION;
+INSERT INTO num_exp_sqrt VALUES (0,'0');
+INSERT INTO num_exp_sqrt VALUES (1,'0');
+INSERT INTO num_exp_sqrt VALUES (2,'5859.90547836712524903505');
+INSERT INTO num_exp_sqrt VALUES (3,'2.07605394920266944396');
+INSERT INTO num_exp_sqrt VALUES (4,'2792.75158435189147418923');
+INSERT INTO num_exp_sqrt VALUES (5,'128.05092147657509145473');
+INSERT INTO num_exp_sqrt VALUES (6,'306.43364311096782703406');
+INSERT INTO num_exp_sqrt VALUES (7,'9111.99676251039939975230');
+INSERT INTO num_exp_sqrt VALUES (8,'273.64392922189960397542');
+INSERT INTO num_exp_sqrt VALUES (9,'4992.67503899937593364766');
+COMMIT TRANSACTION;
+BEGIN TRANSACTION;
+INSERT INTO num_exp_ln VALUES (0,'NaN');
+INSERT INTO num_exp_ln VALUES (1,'NaN');
+INSERT INTO num_exp_ln VALUES (2,'17.35177750493897715514');
+INSERT INTO num_exp_ln VALUES (3,'1.46093790411565641971');
+INSERT INTO num_exp_ln VALUES (4,'15.86956523951936572464');
+INSERT INTO num_exp_ln VALUES (5,'9.70485601768871834038');
+INSERT INTO num_exp_ln VALUES (6,'11.45000246622944403127');
+INSERT INTO num_exp_ln VALUES (7,'18.23469429965478772991');
+INSERT INTO num_exp_ln VALUES (8,'11.22365546576315513668');
+INSERT INTO num_exp_ln VALUES (9,'17.03145425013166006962');
+COMMIT TRANSACTION;
+BEGIN TRANSACTION;
+INSERT INTO num_exp_log10 VALUES (0,'NaN');
+INSERT INTO num_exp_log10 VALUES (1,'NaN');
+INSERT INTO num_exp_log10 VALUES (2,'7.53578122160797276459');
+INSERT INTO num_exp_log10 VALUES (3,'.63447727016073160075');
+INSERT INTO num_exp_log10 VALUES (4,'6.89206461372691743345');
+INSERT INTO num_exp_log10 VALUES (5,'4.21476541614777768626');
+INSERT INTO num_exp_log10 VALUES (6,'4.97267288886207207671');
+INSERT INTO num_exp_log10 VALUES (7,'7.91922711353275546914');
+INSERT INTO num_exp_log10 VALUES (8,'4.87437163556421004138');
+INSERT INTO num_exp_log10 VALUES (9,'7.39666659961986567059');
+COMMIT TRANSACTION;
+BEGIN TRANSACTION;
+INSERT INTO num_exp_power_10_ln VALUES (0,'NaN');
+INSERT INTO num_exp_power_10_ln VALUES (1,'NaN');
+INSERT INTO num_exp_power_10_ln VALUES (2,'224790267919917955.13261618583642653184');
+INSERT INTO num_exp_power_10_ln VALUES (3,'28.90266599445155957393');
+INSERT INTO num_exp_power_10_ln VALUES (4,'7405685069594999.07733999469386277636');
+INSERT INTO num_exp_power_10_ln VALUES (5,'5068226527.32127265408584640098');
+INSERT INTO num_exp_power_10_ln VALUES (6,'281839893606.99372343357047819067');
+INSERT INTO num_exp_power_10_ln VALUES (7,'1716699575118597095.42330819910640247627');
+INSERT INTO num_exp_power_10_ln VALUES (8,'167361463828.07491320069016125952');
+INSERT INTO num_exp_power_10_ln VALUES (9,'107511333880052007.04141124673540337457');
+COMMIT TRANSACTION;
+BEGIN TRANSACTION;
+INSERT INTO num_data VALUES (0, '0');
+INSERT INTO num_data VALUES (1, '0');
+INSERT INTO num_data VALUES (2, '-34338492.215397047');
+INSERT INTO num_data VALUES (3, '4.31');
+INSERT INTO num_data VALUES (4, '7799461.4119');
+INSERT INTO num_data VALUES (5, '16397.038491');
+INSERT INTO num_data VALUES (6, '93901.57763026');
+INSERT INTO num_data VALUES (7, '-83028485');
+INSERT INTO num_data VALUES (8, '74881');
+INSERT INTO num_data VALUES (9, '-24926804.045047420');
+COMMIT TRANSACTION;
+SELECT t1.id1, t1.id2, t1.result, t2.expected
+ FROM num_result t1, num_exp_add t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != t2.expected;
+ id1 | id2 | result | expected
+-----+-----+--------+----------
+(0 rows)
+
+SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 10) as expected
+ FROM num_result t1, num_exp_add t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != round(t2.expected, 10);
+ id1 | id2 | result | expected
+-----+-----+--------+----------
+(0 rows)
+
+SELECT t1.id1, t1.id2, t1.result, t2.expected
+ FROM num_result t1, num_exp_sub t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != t2.expected;
+ id1 | id2 | result | expected
+-----+-----+--------+----------
+(0 rows)
+
+SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 40)
+ FROM num_result t1, num_exp_sub t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != round(t2.expected, 40);
+ id1 | id2 | result | round
+-----+-----+--------+-------
+(0 rows)
+
+SELECT t1.id1, t1.id2, t1.result, t2.expected
+ FROM num_result t1, num_exp_mul t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != t2.expected;
+ id1 | id2 | result | expected
+-----+-----+--------+----------
+(0 rows)
+
+SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 30) as expected
+ FROM num_result t1, num_exp_mul t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != round(t2.expected, 30);
+ id1 | id2 | result | expected
+-----+-----+--------+----------
+(0 rows)
+
+SELECT t1.id1, t1.id2, t1.result, t2.expected
+ FROM num_result t1, num_exp_div t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != t2.expected;
+ id1 | id2 | result | expected
+-----+-----+--------+----------
+(0 rows)
+
+SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 80) as expected
+ FROM num_result t1, num_exp_div t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != round(t2.expected, 80);
+ id1 | id2 | result | expected
+-----+-----+--------+----------
+(0 rows)
+
+INSERT INTO num_result SELECT id, 0, SQRT(ABS(val))
+ FROM num_data;
+SELECT t1.id1, t1.result, t2.expected
+ FROM num_result t1, num_exp_sqrt t2
+ WHERE t1.id1 = t2.id
+ AND t1.result != t2.expected;
+ id1 | result | expected
+-----+--------+----------
+(0 rows)
+
+INSERT INTO num_result SELECT id, 0, LN(ABS(val))
+ FROM num_data
+ WHERE val != '0.0';
+SELECT t1.id1, t1.result, t2.expected
+ FROM num_result t1, num_exp_ln t2
+ WHERE t1.id1 = t2.id
+ AND t1.result != t2.expected;
+ id1 | result | expected
+-----+--------+----------
+(0 rows)
+
+INSERT INTO num_result SELECT id, 0, LOG(numeric '10', ABS(val))
+ FROM num_data
+ WHERE val != '0.0';
+SELECT t1.id1, t1.result, t2.expected
+ FROM num_result t1, num_exp_log10 t2
+ WHERE t1.id1 = t2.id
+ AND t1.result != t2.expected;
+ id1 | result | expected
+-----+--------+----------
+(0 rows)
+
+INSERT INTO num_result SELECT id, 0, POWER(numeric '10', LN(ABS(round(val,200))))
+ FROM num_data
+ WHERE val != '0.0';
+SELECT t1.id1, t1.result, t2.expected
+ FROM num_result t1, num_exp_power_10_ln t2
+ WHERE t1.id1 = t2.id
+ AND t1.result != t2.expected;
+ id1 | result | expected
+-----+--------+----------
+(0 rows)
+
+SELECT 'inf'::numeric / '0';
+ERROR: division by zero
+SELECT '-inf'::numeric / '0';
+ERROR: division by zero
+SELECT 'nan'::numeric / '0';
+ ?column?
+----------
+ NaN
+(1 row)
+
+SELECT '0'::numeric / '0';
+ERROR: division by zero
+SELECT 'inf'::numeric % '0';
+ERROR: division by zero
+SELECT '-inf'::numeric % '0';
+ERROR: division by zero
+SELECT 'nan'::numeric % '0';
+ ?column?
+----------
+ NaN
+(1 row)
+
+SELECT '0'::numeric % '0';
+ERROR: division by zero
+SELECT div('inf'::numeric, '0');
+ERROR: division by zero
+SELECT div('-inf'::numeric, '0');
+ERROR: division by zero
+SELECT div('nan'::numeric, '0');
+ div
+-----
+ NaN
+(1 row)
+
+SELECT div('0'::numeric, '0');
+ERROR: division by zero
+-- the large values fall into the numeric abbreviation code's maximal classes
+WITH v(x) AS
+ (VALUES('0'::numeric),('1'),('-1'),('4.2'),('-7.777'),('1e340'),('-1e340'),
+ ('inf'),('-inf'),('nan'),
+ ('inf'),('-inf'),('nan'))
+SELECT substring(x::text, 1, 32)
+FROM v ORDER BY x;
+ substring
+----------------------------------
+ -Infinity
+ -Infinity
+ -1000000000000000000000000000000
+ -7.777
+ -1
+ 0
+ 1
+ 4.2
+ 10000000000000000000000000000000
+ Infinity
+ Infinity
+ NaN
+ NaN
+(13 rows)
+
+SELECT sqrt('-1'::numeric);
+ERROR: cannot take square root of a negative number
+SELECT sqrt('-inf'::numeric);
+ERROR: cannot take square root of a negative number
+SELECT ln('0'::numeric);
+ERROR: cannot take logarithm of zero
+SELECT ln('-1'::numeric);
+ERROR: cannot take logarithm of a negative number
+SELECT ln('-inf'::numeric);
+ERROR: cannot take logarithm of a negative number
+SELECT log('0'::numeric, '10');
+ERROR: cannot take logarithm of zero
+SELECT log('10'::numeric, '0');
+ERROR: cannot take logarithm of zero
+SELECT log('-inf'::numeric, '10');
+ERROR: cannot take logarithm of a negative number
+SELECT log('10'::numeric, '-inf');
+ERROR: cannot take logarithm of a negative number
+SELECT log('inf'::numeric, '0');
+ERROR: cannot take logarithm of zero
+SELECT log('inf'::numeric, '-inf');
+ERROR: cannot take logarithm of a negative number
+SELECT log('-inf'::numeric, 'inf');
+ERROR: cannot take logarithm of a negative number
+SELECT power('0'::numeric, '-1');
+ERROR: zero raised to a negative power is undefined
+SELECT power('0'::numeric, '-inf');
+ERROR: zero raised to a negative power is undefined
+SELECT power('-1'::numeric, 'inf');
+ power
+-------
+ 1
+(1 row)
+
+SELECT power('-2'::numeric, '3');
+ power
+---------------------
+ -8.0000000000000000
+(1 row)
+
+SELECT power('-2'::numeric, '3.3');
+ERROR: a negative number raised to a non-integer power yields a complex result
+SELECT power('-2'::numeric, '-1');
+ power
+---------------------
+ -0.5000000000000000
+(1 row)
+
+SELECT power('-2'::numeric, '-1.5');
+ERROR: a negative number raised to a non-integer power yields a complex result
+SELECT power('-2'::numeric, 'inf');
+ power
+----------
+ Infinity
+(1 row)
+
+SELECT power('-inf'::numeric, '2');
+ power
+----------
+ Infinity
+(1 row)
+
+SELECT power('-inf'::numeric, '3');
+ power
+-----------
+ -Infinity
+(1 row)
+
+SELECT power('-inf'::numeric, '4.5');
+ERROR: a negative number raised to a non-integer power yields a complex result
+SELECT power('-inf'::numeric, '0');
+ power
+-------
+ 1
+(1 row)
+
+SELECT power('-inf'::numeric, 'inf');
+ power
+----------
+ Infinity
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/cases/numeric.sql b/ydb/library/yql/tests/postgresql/cases/numeric.sql
new file mode 100644
index 0000000000..fbc848306a
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/numeric.sql
@@ -0,0 +1,582 @@
+--
+-- NUMERIC
+--
+CREATE TABLE num_data (id int4, val numeric(210,10));
+CREATE TABLE num_exp_add (id1 int4, id2 int4, expected numeric(210,10));
+CREATE TABLE num_exp_sub (id1 int4, id2 int4, expected numeric(210,10));
+CREATE TABLE num_exp_div (id1 int4, id2 int4, expected numeric(210,10));
+CREATE TABLE num_exp_mul (id1 int4, id2 int4, expected numeric(210,10));
+CREATE TABLE num_exp_sqrt (id int4, expected numeric(210,10));
+CREATE TABLE num_exp_ln (id int4, expected numeric(210,10));
+CREATE TABLE num_exp_log10 (id int4, expected numeric(210,10));
+CREATE TABLE num_exp_power_10_ln (id int4, expected numeric(210,10));
+CREATE TABLE num_result (id1 int4, id2 int4, result numeric(210,10));
+-- ******************************
+-- * The following EXPECTED results are computed by bc(1)
+-- * with a scale of 200
+-- ******************************
+BEGIN TRANSACTION;
+INSERT INTO num_exp_add VALUES (0,0,'0');
+INSERT INTO num_exp_sub VALUES (0,0,'0');
+INSERT INTO num_exp_mul VALUES (0,0,'0');
+INSERT INTO num_exp_div VALUES (0,0,'NaN');
+INSERT INTO num_exp_add VALUES (0,1,'0');
+INSERT INTO num_exp_sub VALUES (0,1,'0');
+INSERT INTO num_exp_mul VALUES (0,1,'0');
+INSERT INTO num_exp_div VALUES (0,1,'NaN');
+INSERT INTO num_exp_add VALUES (0,2,'-34338492.215397047');
+INSERT INTO num_exp_sub VALUES (0,2,'34338492.215397047');
+INSERT INTO num_exp_mul VALUES (0,2,'0');
+INSERT INTO num_exp_div VALUES (0,2,'0');
+INSERT INTO num_exp_add VALUES (0,3,'4.31');
+INSERT INTO num_exp_sub VALUES (0,3,'-4.31');
+INSERT INTO num_exp_mul VALUES (0,3,'0');
+INSERT INTO num_exp_div VALUES (0,3,'0');
+INSERT INTO num_exp_add VALUES (0,4,'7799461.4119');
+INSERT INTO num_exp_sub VALUES (0,4,'-7799461.4119');
+INSERT INTO num_exp_mul VALUES (0,4,'0');
+INSERT INTO num_exp_div VALUES (0,4,'0');
+INSERT INTO num_exp_add VALUES (0,5,'16397.038491');
+INSERT INTO num_exp_sub VALUES (0,5,'-16397.038491');
+INSERT INTO num_exp_mul VALUES (0,5,'0');
+INSERT INTO num_exp_div VALUES (0,5,'0');
+INSERT INTO num_exp_add VALUES (0,6,'93901.57763026');
+INSERT INTO num_exp_sub VALUES (0,6,'-93901.57763026');
+INSERT INTO num_exp_mul VALUES (0,6,'0');
+INSERT INTO num_exp_div VALUES (0,6,'0');
+INSERT INTO num_exp_add VALUES (0,7,'-83028485');
+INSERT INTO num_exp_sub VALUES (0,7,'83028485');
+INSERT INTO num_exp_mul VALUES (0,7,'0');
+INSERT INTO num_exp_div VALUES (0,7,'0');
+INSERT INTO num_exp_add VALUES (0,8,'74881');
+INSERT INTO num_exp_sub VALUES (0,8,'-74881');
+INSERT INTO num_exp_mul VALUES (0,8,'0');
+INSERT INTO num_exp_div VALUES (0,8,'0');
+INSERT INTO num_exp_add VALUES (0,9,'-24926804.045047420');
+INSERT INTO num_exp_sub VALUES (0,9,'24926804.045047420');
+INSERT INTO num_exp_mul VALUES (0,9,'0');
+INSERT INTO num_exp_div VALUES (0,9,'0');
+INSERT INTO num_exp_add VALUES (1,0,'0');
+INSERT INTO num_exp_sub VALUES (1,0,'0');
+INSERT INTO num_exp_mul VALUES (1,0,'0');
+INSERT INTO num_exp_div VALUES (1,0,'NaN');
+INSERT INTO num_exp_add VALUES (1,1,'0');
+INSERT INTO num_exp_sub VALUES (1,1,'0');
+INSERT INTO num_exp_mul VALUES (1,1,'0');
+INSERT INTO num_exp_div VALUES (1,1,'NaN');
+INSERT INTO num_exp_add VALUES (1,2,'-34338492.215397047');
+INSERT INTO num_exp_sub VALUES (1,2,'34338492.215397047');
+INSERT INTO num_exp_mul VALUES (1,2,'0');
+INSERT INTO num_exp_div VALUES (1,2,'0');
+INSERT INTO num_exp_add VALUES (1,3,'4.31');
+INSERT INTO num_exp_sub VALUES (1,3,'-4.31');
+INSERT INTO num_exp_mul VALUES (1,3,'0');
+INSERT INTO num_exp_div VALUES (1,3,'0');
+INSERT INTO num_exp_add VALUES (1,4,'7799461.4119');
+INSERT INTO num_exp_sub VALUES (1,4,'-7799461.4119');
+INSERT INTO num_exp_mul VALUES (1,4,'0');
+INSERT INTO num_exp_div VALUES (1,4,'0');
+INSERT INTO num_exp_add VALUES (1,5,'16397.038491');
+INSERT INTO num_exp_sub VALUES (1,5,'-16397.038491');
+INSERT INTO num_exp_mul VALUES (1,5,'0');
+INSERT INTO num_exp_div VALUES (1,5,'0');
+INSERT INTO num_exp_add VALUES (1,6,'93901.57763026');
+INSERT INTO num_exp_sub VALUES (1,6,'-93901.57763026');
+INSERT INTO num_exp_mul VALUES (1,6,'0');
+INSERT INTO num_exp_div VALUES (1,6,'0');
+INSERT INTO num_exp_add VALUES (1,7,'-83028485');
+INSERT INTO num_exp_sub VALUES (1,7,'83028485');
+INSERT INTO num_exp_mul VALUES (1,7,'0');
+INSERT INTO num_exp_div VALUES (1,7,'0');
+INSERT INTO num_exp_add VALUES (1,8,'74881');
+INSERT INTO num_exp_sub VALUES (1,8,'-74881');
+INSERT INTO num_exp_mul VALUES (1,8,'0');
+INSERT INTO num_exp_div VALUES (1,8,'0');
+INSERT INTO num_exp_add VALUES (1,9,'-24926804.045047420');
+INSERT INTO num_exp_sub VALUES (1,9,'24926804.045047420');
+INSERT INTO num_exp_mul VALUES (1,9,'0');
+INSERT INTO num_exp_div VALUES (1,9,'0');
+INSERT INTO num_exp_add VALUES (2,0,'-34338492.215397047');
+INSERT INTO num_exp_sub VALUES (2,0,'-34338492.215397047');
+INSERT INTO num_exp_mul VALUES (2,0,'0');
+INSERT INTO num_exp_div VALUES (2,0,'NaN');
+INSERT INTO num_exp_add VALUES (2,1,'-34338492.215397047');
+INSERT INTO num_exp_sub VALUES (2,1,'-34338492.215397047');
+INSERT INTO num_exp_mul VALUES (2,1,'0');
+INSERT INTO num_exp_div VALUES (2,1,'NaN');
+INSERT INTO num_exp_add VALUES (2,2,'-68676984.430794094');
+INSERT INTO num_exp_sub VALUES (2,2,'0');
+INSERT INTO num_exp_mul VALUES (2,2,'1179132047626883.596862135856320209');
+INSERT INTO num_exp_div VALUES (2,2,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (2,3,'-34338487.905397047');
+INSERT INTO num_exp_sub VALUES (2,3,'-34338496.525397047');
+INSERT INTO num_exp_mul VALUES (2,3,'-147998901.44836127257');
+INSERT INTO num_exp_div VALUES (2,3,'-7967167.56737750510440835266');
+INSERT INTO num_exp_add VALUES (2,4,'-26539030.803497047');
+INSERT INTO num_exp_sub VALUES (2,4,'-42137953.627297047');
+INSERT INTO num_exp_mul VALUES (2,4,'-267821744976817.8111137106593');
+INSERT INTO num_exp_div VALUES (2,4,'-4.40267480046830116685');
+INSERT INTO num_exp_add VALUES (2,5,'-34322095.176906047');
+INSERT INTO num_exp_sub VALUES (2,5,'-34354889.253888047');
+INSERT INTO num_exp_mul VALUES (2,5,'-563049578578.769242506736077');
+INSERT INTO num_exp_div VALUES (2,5,'-2094.18866914563535496429');
+INSERT INTO num_exp_add VALUES (2,6,'-34244590.637766787');
+INSERT INTO num_exp_sub VALUES (2,6,'-34432393.793027307');
+INSERT INTO num_exp_mul VALUES (2,6,'-3224438592470.18449811926184222');
+INSERT INTO num_exp_div VALUES (2,6,'-365.68599891479766440940');
+INSERT INTO num_exp_add VALUES (2,7,'-117366977.215397047');
+INSERT INTO num_exp_sub VALUES (2,7,'48689992.784602953');
+INSERT INTO num_exp_mul VALUES (2,7,'2851072985828710.485883795');
+INSERT INTO num_exp_div VALUES (2,7,'.41357483778485235518');
+INSERT INTO num_exp_add VALUES (2,8,'-34263611.215397047');
+INSERT INTO num_exp_sub VALUES (2,8,'-34413373.215397047');
+INSERT INTO num_exp_mul VALUES (2,8,'-2571300635581.146276407');
+INSERT INTO num_exp_div VALUES (2,8,'-458.57416721727870888476');
+INSERT INTO num_exp_add VALUES (2,9,'-59265296.260444467');
+INSERT INTO num_exp_sub VALUES (2,9,'-9411688.170349627');
+INSERT INTO num_exp_mul VALUES (2,9,'855948866655588.453741509242968740');
+INSERT INTO num_exp_div VALUES (2,9,'1.37757299946438931811');
+INSERT INTO num_exp_add VALUES (3,0,'4.31');
+INSERT INTO num_exp_sub VALUES (3,0,'4.31');
+INSERT INTO num_exp_mul VALUES (3,0,'0');
+INSERT INTO num_exp_div VALUES (3,0,'NaN');
+INSERT INTO num_exp_add VALUES (3,1,'4.31');
+INSERT INTO num_exp_sub VALUES (3,1,'4.31');
+INSERT INTO num_exp_mul VALUES (3,1,'0');
+INSERT INTO num_exp_div VALUES (3,1,'NaN');
+INSERT INTO num_exp_add VALUES (3,2,'-34338487.905397047');
+INSERT INTO num_exp_sub VALUES (3,2,'34338496.525397047');
+INSERT INTO num_exp_mul VALUES (3,2,'-147998901.44836127257');
+INSERT INTO num_exp_div VALUES (3,2,'-.00000012551512084352');
+INSERT INTO num_exp_add VALUES (3,3,'8.62');
+INSERT INTO num_exp_sub VALUES (3,3,'0');
+INSERT INTO num_exp_mul VALUES (3,3,'18.5761');
+INSERT INTO num_exp_div VALUES (3,3,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (3,4,'7799465.7219');
+INSERT INTO num_exp_sub VALUES (3,4,'-7799457.1019');
+INSERT INTO num_exp_mul VALUES (3,4,'33615678.685289');
+INSERT INTO num_exp_div VALUES (3,4,'.00000055260225961552');
+INSERT INTO num_exp_add VALUES (3,5,'16401.348491');
+INSERT INTO num_exp_sub VALUES (3,5,'-16392.728491');
+INSERT INTO num_exp_mul VALUES (3,5,'70671.23589621');
+INSERT INTO num_exp_div VALUES (3,5,'.00026285234387695504');
+INSERT INTO num_exp_add VALUES (3,6,'93905.88763026');
+INSERT INTO num_exp_sub VALUES (3,6,'-93897.26763026');
+INSERT INTO num_exp_mul VALUES (3,6,'404715.7995864206');
+INSERT INTO num_exp_div VALUES (3,6,'.00004589912234457595');
+INSERT INTO num_exp_add VALUES (3,7,'-83028480.69');
+INSERT INTO num_exp_sub VALUES (3,7,'83028489.31');
+INSERT INTO num_exp_mul VALUES (3,7,'-357852770.35');
+INSERT INTO num_exp_div VALUES (3,7,'-.00000005190989574240');
+INSERT INTO num_exp_add VALUES (3,8,'74885.31');
+INSERT INTO num_exp_sub VALUES (3,8,'-74876.69');
+INSERT INTO num_exp_mul VALUES (3,8,'322737.11');
+INSERT INTO num_exp_div VALUES (3,8,'.00005755799201399553');
+INSERT INTO num_exp_add VALUES (3,9,'-24926799.735047420');
+INSERT INTO num_exp_sub VALUES (3,9,'24926808.355047420');
+INSERT INTO num_exp_mul VALUES (3,9,'-107434525.43415438020');
+INSERT INTO num_exp_div VALUES (3,9,'-.00000017290624149854');
+INSERT INTO num_exp_add VALUES (4,0,'7799461.4119');
+INSERT INTO num_exp_sub VALUES (4,0,'7799461.4119');
+INSERT INTO num_exp_mul VALUES (4,0,'0');
+INSERT INTO num_exp_div VALUES (4,0,'NaN');
+INSERT INTO num_exp_add VALUES (4,1,'7799461.4119');
+INSERT INTO num_exp_sub VALUES (4,1,'7799461.4119');
+INSERT INTO num_exp_mul VALUES (4,1,'0');
+INSERT INTO num_exp_div VALUES (4,1,'NaN');
+INSERT INTO num_exp_add VALUES (4,2,'-26539030.803497047');
+INSERT INTO num_exp_sub VALUES (4,2,'42137953.627297047');
+INSERT INTO num_exp_mul VALUES (4,2,'-267821744976817.8111137106593');
+INSERT INTO num_exp_div VALUES (4,2,'-.22713465002993920385');
+INSERT INTO num_exp_add VALUES (4,3,'7799465.7219');
+INSERT INTO num_exp_sub VALUES (4,3,'7799457.1019');
+INSERT INTO num_exp_mul VALUES (4,3,'33615678.685289');
+INSERT INTO num_exp_div VALUES (4,3,'1809619.81714617169373549883');
+INSERT INTO num_exp_add VALUES (4,4,'15598922.8238');
+INSERT INTO num_exp_sub VALUES (4,4,'0');
+INSERT INTO num_exp_mul VALUES (4,4,'60831598315717.14146161');
+INSERT INTO num_exp_div VALUES (4,4,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (4,5,'7815858.450391');
+INSERT INTO num_exp_sub VALUES (4,5,'7783064.373409');
+INSERT INTO num_exp_mul VALUES (4,5,'127888068979.9935054429');
+INSERT INTO num_exp_div VALUES (4,5,'475.66281046305802686061');
+INSERT INTO num_exp_add VALUES (4,6,'7893362.98953026');
+INSERT INTO num_exp_sub VALUES (4,6,'7705559.83426974');
+INSERT INTO num_exp_mul VALUES (4,6,'732381731243.745115764094');
+INSERT INTO num_exp_div VALUES (4,6,'83.05996138436129499606');
+INSERT INTO num_exp_add VALUES (4,7,'-75229023.5881');
+INSERT INTO num_exp_sub VALUES (4,7,'90827946.4119');
+INSERT INTO num_exp_mul VALUES (4,7,'-647577464846017.9715');
+INSERT INTO num_exp_div VALUES (4,7,'-.09393717604145131637');
+INSERT INTO num_exp_add VALUES (4,8,'7874342.4119');
+INSERT INTO num_exp_sub VALUES (4,8,'7724580.4119');
+INSERT INTO num_exp_mul VALUES (4,8,'584031469984.4839');
+INSERT INTO num_exp_div VALUES (4,8,'104.15808298366741897143');
+INSERT INTO num_exp_add VALUES (4,9,'-17127342.633147420');
+INSERT INTO num_exp_sub VALUES (4,9,'32726265.456947420');
+INSERT INTO num_exp_mul VALUES (4,9,'-194415646271340.1815956522980');
+INSERT INTO num_exp_div VALUES (4,9,'-.31289456112403769409');
+INSERT INTO num_exp_add VALUES (5,0,'16397.038491');
+INSERT INTO num_exp_sub VALUES (5,0,'16397.038491');
+INSERT INTO num_exp_mul VALUES (5,0,'0');
+INSERT INTO num_exp_div VALUES (5,0,'NaN');
+INSERT INTO num_exp_add VALUES (5,1,'16397.038491');
+INSERT INTO num_exp_sub VALUES (5,1,'16397.038491');
+INSERT INTO num_exp_mul VALUES (5,1,'0');
+INSERT INTO num_exp_div VALUES (5,1,'NaN');
+INSERT INTO num_exp_add VALUES (5,2,'-34322095.176906047');
+INSERT INTO num_exp_sub VALUES (5,2,'34354889.253888047');
+INSERT INTO num_exp_mul VALUES (5,2,'-563049578578.769242506736077');
+INSERT INTO num_exp_div VALUES (5,2,'-.00047751189505192446');
+INSERT INTO num_exp_add VALUES (5,3,'16401.348491');
+INSERT INTO num_exp_sub VALUES (5,3,'16392.728491');
+INSERT INTO num_exp_mul VALUES (5,3,'70671.23589621');
+INSERT INTO num_exp_div VALUES (5,3,'3804.41728329466357308584');
+INSERT INTO num_exp_add VALUES (5,4,'7815858.450391');
+INSERT INTO num_exp_sub VALUES (5,4,'-7783064.373409');
+INSERT INTO num_exp_mul VALUES (5,4,'127888068979.9935054429');
+INSERT INTO num_exp_div VALUES (5,4,'.00210232958726897192');
+INSERT INTO num_exp_add VALUES (5,5,'32794.076982');
+INSERT INTO num_exp_sub VALUES (5,5,'0');
+INSERT INTO num_exp_mul VALUES (5,5,'268862871.275335557081');
+INSERT INTO num_exp_div VALUES (5,5,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (5,6,'110298.61612126');
+INSERT INTO num_exp_sub VALUES (5,6,'-77504.53913926');
+INSERT INTO num_exp_mul VALUES (5,6,'1539707782.76899778633766');
+INSERT INTO num_exp_div VALUES (5,6,'.17461941433576102689');
+INSERT INTO num_exp_add VALUES (5,7,'-83012087.961509');
+INSERT INTO num_exp_sub VALUES (5,7,'83044882.038491');
+INSERT INTO num_exp_mul VALUES (5,7,'-1361421264394.416135');
+INSERT INTO num_exp_div VALUES (5,7,'-.00019748690453643710');
+INSERT INTO num_exp_add VALUES (5,8,'91278.038491');
+INSERT INTO num_exp_sub VALUES (5,8,'-58483.961509');
+INSERT INTO num_exp_mul VALUES (5,8,'1227826639.244571');
+INSERT INTO num_exp_div VALUES (5,8,'.21897461960978085228');
+INSERT INTO num_exp_add VALUES (5,9,'-24910407.006556420');
+INSERT INTO num_exp_sub VALUES (5,9,'24943201.083538420');
+INSERT INTO num_exp_mul VALUES (5,9,'-408725765384.257043660243220');
+INSERT INTO num_exp_div VALUES (5,9,'-.00065780749354660427');
+INSERT INTO num_exp_add VALUES (6,0,'93901.57763026');
+INSERT INTO num_exp_sub VALUES (6,0,'93901.57763026');
+INSERT INTO num_exp_mul VALUES (6,0,'0');
+INSERT INTO num_exp_div VALUES (6,0,'NaN');
+INSERT INTO num_exp_add VALUES (6,1,'93901.57763026');
+INSERT INTO num_exp_sub VALUES (6,1,'93901.57763026');
+INSERT INTO num_exp_mul VALUES (6,1,'0');
+INSERT INTO num_exp_div VALUES (6,1,'NaN');
+INSERT INTO num_exp_add VALUES (6,2,'-34244590.637766787');
+INSERT INTO num_exp_sub VALUES (6,2,'34432393.793027307');
+INSERT INTO num_exp_mul VALUES (6,2,'-3224438592470.18449811926184222');
+INSERT INTO num_exp_div VALUES (6,2,'-.00273458651128995823');
+INSERT INTO num_exp_add VALUES (6,3,'93905.88763026');
+INSERT INTO num_exp_sub VALUES (6,3,'93897.26763026');
+INSERT INTO num_exp_mul VALUES (6,3,'404715.7995864206');
+INSERT INTO num_exp_div VALUES (6,3,'21786.90896293735498839907');
+INSERT INTO num_exp_add VALUES (6,4,'7893362.98953026');
+INSERT INTO num_exp_sub VALUES (6,4,'-7705559.83426974');
+INSERT INTO num_exp_mul VALUES (6,4,'732381731243.745115764094');
+INSERT INTO num_exp_div VALUES (6,4,'.01203949512295682469');
+INSERT INTO num_exp_add VALUES (6,5,'110298.61612126');
+INSERT INTO num_exp_sub VALUES (6,5,'77504.53913926');
+INSERT INTO num_exp_mul VALUES (6,5,'1539707782.76899778633766');
+INSERT INTO num_exp_div VALUES (6,5,'5.72674008674192359679');
+INSERT INTO num_exp_add VALUES (6,6,'187803.15526052');
+INSERT INTO num_exp_sub VALUES (6,6,'0');
+INSERT INTO num_exp_mul VALUES (6,6,'8817506281.4517452372676676');
+INSERT INTO num_exp_div VALUES (6,6,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (6,7,'-82934583.42236974');
+INSERT INTO num_exp_sub VALUES (6,7,'83122386.57763026');
+INSERT INTO num_exp_mul VALUES (6,7,'-7796505729750.37795610');
+INSERT INTO num_exp_div VALUES (6,7,'-.00113095617281538980');
+INSERT INTO num_exp_add VALUES (6,8,'168782.57763026');
+INSERT INTO num_exp_sub VALUES (6,8,'19020.57763026');
+INSERT INTO num_exp_mul VALUES (6,8,'7031444034.53149906');
+INSERT INTO num_exp_div VALUES (6,8,'1.25401073209839612184');
+INSERT INTO num_exp_add VALUES (6,9,'-24832902.467417160');
+INSERT INTO num_exp_sub VALUES (6,9,'25020705.622677680');
+INSERT INTO num_exp_mul VALUES (6,9,'-2340666225110.29929521292692920');
+INSERT INTO num_exp_div VALUES (6,9,'-.00376709254265256789');
+INSERT INTO num_exp_add VALUES (7,0,'-83028485');
+INSERT INTO num_exp_sub VALUES (7,0,'-83028485');
+INSERT INTO num_exp_mul VALUES (7,0,'0');
+INSERT INTO num_exp_div VALUES (7,0,'NaN');
+INSERT INTO num_exp_add VALUES (7,1,'-83028485');
+INSERT INTO num_exp_sub VALUES (7,1,'-83028485');
+INSERT INTO num_exp_mul VALUES (7,1,'0');
+INSERT INTO num_exp_div VALUES (7,1,'NaN');
+INSERT INTO num_exp_add VALUES (7,2,'-117366977.215397047');
+INSERT INTO num_exp_sub VALUES (7,2,'-48689992.784602953');
+INSERT INTO num_exp_mul VALUES (7,2,'2851072985828710.485883795');
+INSERT INTO num_exp_div VALUES (7,2,'2.41794207151503385700');
+INSERT INTO num_exp_add VALUES (7,3,'-83028480.69');
+INSERT INTO num_exp_sub VALUES (7,3,'-83028489.31');
+INSERT INTO num_exp_mul VALUES (7,3,'-357852770.35');
+INSERT INTO num_exp_div VALUES (7,3,'-19264149.65197215777262180974');
+INSERT INTO num_exp_add VALUES (7,4,'-75229023.5881');
+INSERT INTO num_exp_sub VALUES (7,4,'-90827946.4119');
+INSERT INTO num_exp_mul VALUES (7,4,'-647577464846017.9715');
+INSERT INTO num_exp_div VALUES (7,4,'-10.64541262725136247686');
+INSERT INTO num_exp_add VALUES (7,5,'-83012087.961509');
+INSERT INTO num_exp_sub VALUES (7,5,'-83044882.038491');
+INSERT INTO num_exp_mul VALUES (7,5,'-1361421264394.416135');
+INSERT INTO num_exp_div VALUES (7,5,'-5063.62688881730941836574');
+INSERT INTO num_exp_add VALUES (7,6,'-82934583.42236974');
+INSERT INTO num_exp_sub VALUES (7,6,'-83122386.57763026');
+INSERT INTO num_exp_mul VALUES (7,6,'-7796505729750.37795610');
+INSERT INTO num_exp_div VALUES (7,6,'-884.20756174009028770294');
+INSERT INTO num_exp_add VALUES (7,7,'-166056970');
+INSERT INTO num_exp_sub VALUES (7,7,'0');
+INSERT INTO num_exp_mul VALUES (7,7,'6893729321395225');
+INSERT INTO num_exp_div VALUES (7,7,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (7,8,'-82953604');
+INSERT INTO num_exp_sub VALUES (7,8,'-83103366');
+INSERT INTO num_exp_mul VALUES (7,8,'-6217255985285');
+INSERT INTO num_exp_div VALUES (7,8,'-1108.80577182462841041118');
+INSERT INTO num_exp_add VALUES (7,9,'-107955289.045047420');
+INSERT INTO num_exp_sub VALUES (7,9,'-58101680.954952580');
+INSERT INTO num_exp_mul VALUES (7,9,'2069634775752159.035758700');
+INSERT INTO num_exp_div VALUES (7,9,'3.33089171198810413382');
+INSERT INTO num_exp_add VALUES (8,0,'74881');
+INSERT INTO num_exp_sub VALUES (8,0,'74881');
+INSERT INTO num_exp_mul VALUES (8,0,'0');
+INSERT INTO num_exp_div VALUES (8,0,'NaN');
+INSERT INTO num_exp_add VALUES (8,1,'74881');
+INSERT INTO num_exp_sub VALUES (8,1,'74881');
+INSERT INTO num_exp_mul VALUES (8,1,'0');
+INSERT INTO num_exp_div VALUES (8,1,'NaN');
+INSERT INTO num_exp_add VALUES (8,2,'-34263611.215397047');
+INSERT INTO num_exp_sub VALUES (8,2,'34413373.215397047');
+INSERT INTO num_exp_mul VALUES (8,2,'-2571300635581.146276407');
+INSERT INTO num_exp_div VALUES (8,2,'-.00218067233500788615');
+INSERT INTO num_exp_add VALUES (8,3,'74885.31');
+INSERT INTO num_exp_sub VALUES (8,3,'74876.69');
+INSERT INTO num_exp_mul VALUES (8,3,'322737.11');
+INSERT INTO num_exp_div VALUES (8,3,'17373.78190255220417633410');
+INSERT INTO num_exp_add VALUES (8,4,'7874342.4119');
+INSERT INTO num_exp_sub VALUES (8,4,'-7724580.4119');
+INSERT INTO num_exp_mul VALUES (8,4,'584031469984.4839');
+INSERT INTO num_exp_div VALUES (8,4,'.00960079113741758956');
+INSERT INTO num_exp_add VALUES (8,5,'91278.038491');
+INSERT INTO num_exp_sub VALUES (8,5,'58483.961509');
+INSERT INTO num_exp_mul VALUES (8,5,'1227826639.244571');
+INSERT INTO num_exp_div VALUES (8,5,'4.56673929509287019456');
+INSERT INTO num_exp_add VALUES (8,6,'168782.57763026');
+INSERT INTO num_exp_sub VALUES (8,6,'-19020.57763026');
+INSERT INTO num_exp_mul VALUES (8,6,'7031444034.53149906');
+INSERT INTO num_exp_div VALUES (8,6,'.79744134113322314424');
+INSERT INTO num_exp_add VALUES (8,7,'-82953604');
+INSERT INTO num_exp_sub VALUES (8,7,'83103366');
+INSERT INTO num_exp_mul VALUES (8,7,'-6217255985285');
+INSERT INTO num_exp_div VALUES (8,7,'-.00090187120721280172');
+INSERT INTO num_exp_add VALUES (8,8,'149762');
+INSERT INTO num_exp_sub VALUES (8,8,'0');
+INSERT INTO num_exp_mul VALUES (8,8,'5607164161');
+INSERT INTO num_exp_div VALUES (8,8,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (8,9,'-24851923.045047420');
+INSERT INTO num_exp_sub VALUES (8,9,'25001685.045047420');
+INSERT INTO num_exp_mul VALUES (8,9,'-1866544013697.195857020');
+INSERT INTO num_exp_div VALUES (8,9,'-.00300403532938582735');
+INSERT INTO num_exp_add VALUES (9,0,'-24926804.045047420');
+INSERT INTO num_exp_sub VALUES (9,0,'-24926804.045047420');
+INSERT INTO num_exp_mul VALUES (9,0,'0');
+INSERT INTO num_exp_div VALUES (9,0,'NaN');
+INSERT INTO num_exp_add VALUES (9,1,'-24926804.045047420');
+INSERT INTO num_exp_sub VALUES (9,1,'-24926804.045047420');
+INSERT INTO num_exp_mul VALUES (9,1,'0');
+INSERT INTO num_exp_div VALUES (9,1,'NaN');
+INSERT INTO num_exp_add VALUES (9,2,'-59265296.260444467');
+INSERT INTO num_exp_sub VALUES (9,2,'9411688.170349627');
+INSERT INTO num_exp_mul VALUES (9,2,'855948866655588.453741509242968740');
+INSERT INTO num_exp_div VALUES (9,2,'.72591434384152961526');
+INSERT INTO num_exp_add VALUES (9,3,'-24926799.735047420');
+INSERT INTO num_exp_sub VALUES (9,3,'-24926808.355047420');
+INSERT INTO num_exp_mul VALUES (9,3,'-107434525.43415438020');
+INSERT INTO num_exp_div VALUES (9,3,'-5783481.21694835730858468677');
+INSERT INTO num_exp_add VALUES (9,4,'-17127342.633147420');
+INSERT INTO num_exp_sub VALUES (9,4,'-32726265.456947420');
+INSERT INTO num_exp_mul VALUES (9,4,'-194415646271340.1815956522980');
+INSERT INTO num_exp_div VALUES (9,4,'-3.19596478892958416484');
+INSERT INTO num_exp_add VALUES (9,5,'-24910407.006556420');
+INSERT INTO num_exp_sub VALUES (9,5,'-24943201.083538420');
+INSERT INTO num_exp_mul VALUES (9,5,'-408725765384.257043660243220');
+INSERT INTO num_exp_div VALUES (9,5,'-1520.20159364322004505807');
+INSERT INTO num_exp_add VALUES (9,6,'-24832902.467417160');
+INSERT INTO num_exp_sub VALUES (9,6,'-25020705.622677680');
+INSERT INTO num_exp_mul VALUES (9,6,'-2340666225110.29929521292692920');
+INSERT INTO num_exp_div VALUES (9,6,'-265.45671195426965751280');
+INSERT INTO num_exp_add VALUES (9,7,'-107955289.045047420');
+INSERT INTO num_exp_sub VALUES (9,7,'58101680.954952580');
+INSERT INTO num_exp_mul VALUES (9,7,'2069634775752159.035758700');
+INSERT INTO num_exp_div VALUES (9,7,'.30021990699995814689');
+INSERT INTO num_exp_add VALUES (9,8,'-24851923.045047420');
+INSERT INTO num_exp_sub VALUES (9,8,'-25001685.045047420');
+INSERT INTO num_exp_mul VALUES (9,8,'-1866544013697.195857020');
+INSERT INTO num_exp_div VALUES (9,8,'-332.88556569820675471748');
+INSERT INTO num_exp_add VALUES (9,9,'-49853608.090094840');
+INSERT INTO num_exp_sub VALUES (9,9,'0');
+INSERT INTO num_exp_mul VALUES (9,9,'621345559900192.420120630048656400');
+INSERT INTO num_exp_div VALUES (9,9,'1.00000000000000000000');
+COMMIT TRANSACTION;
+BEGIN TRANSACTION;
+INSERT INTO num_exp_sqrt VALUES (0,'0');
+INSERT INTO num_exp_sqrt VALUES (1,'0');
+INSERT INTO num_exp_sqrt VALUES (2,'5859.90547836712524903505');
+INSERT INTO num_exp_sqrt VALUES (3,'2.07605394920266944396');
+INSERT INTO num_exp_sqrt VALUES (4,'2792.75158435189147418923');
+INSERT INTO num_exp_sqrt VALUES (5,'128.05092147657509145473');
+INSERT INTO num_exp_sqrt VALUES (6,'306.43364311096782703406');
+INSERT INTO num_exp_sqrt VALUES (7,'9111.99676251039939975230');
+INSERT INTO num_exp_sqrt VALUES (8,'273.64392922189960397542');
+INSERT INTO num_exp_sqrt VALUES (9,'4992.67503899937593364766');
+COMMIT TRANSACTION;
+BEGIN TRANSACTION;
+INSERT INTO num_exp_ln VALUES (0,'NaN');
+INSERT INTO num_exp_ln VALUES (1,'NaN');
+INSERT INTO num_exp_ln VALUES (2,'17.35177750493897715514');
+INSERT INTO num_exp_ln VALUES (3,'1.46093790411565641971');
+INSERT INTO num_exp_ln VALUES (4,'15.86956523951936572464');
+INSERT INTO num_exp_ln VALUES (5,'9.70485601768871834038');
+INSERT INTO num_exp_ln VALUES (6,'11.45000246622944403127');
+INSERT INTO num_exp_ln VALUES (7,'18.23469429965478772991');
+INSERT INTO num_exp_ln VALUES (8,'11.22365546576315513668');
+INSERT INTO num_exp_ln VALUES (9,'17.03145425013166006962');
+COMMIT TRANSACTION;
+BEGIN TRANSACTION;
+INSERT INTO num_exp_log10 VALUES (0,'NaN');
+INSERT INTO num_exp_log10 VALUES (1,'NaN');
+INSERT INTO num_exp_log10 VALUES (2,'7.53578122160797276459');
+INSERT INTO num_exp_log10 VALUES (3,'.63447727016073160075');
+INSERT INTO num_exp_log10 VALUES (4,'6.89206461372691743345');
+INSERT INTO num_exp_log10 VALUES (5,'4.21476541614777768626');
+INSERT INTO num_exp_log10 VALUES (6,'4.97267288886207207671');
+INSERT INTO num_exp_log10 VALUES (7,'7.91922711353275546914');
+INSERT INTO num_exp_log10 VALUES (8,'4.87437163556421004138');
+INSERT INTO num_exp_log10 VALUES (9,'7.39666659961986567059');
+COMMIT TRANSACTION;
+BEGIN TRANSACTION;
+INSERT INTO num_exp_power_10_ln VALUES (0,'NaN');
+INSERT INTO num_exp_power_10_ln VALUES (1,'NaN');
+INSERT INTO num_exp_power_10_ln VALUES (2,'224790267919917955.13261618583642653184');
+INSERT INTO num_exp_power_10_ln VALUES (3,'28.90266599445155957393');
+INSERT INTO num_exp_power_10_ln VALUES (4,'7405685069594999.07733999469386277636');
+INSERT INTO num_exp_power_10_ln VALUES (5,'5068226527.32127265408584640098');
+INSERT INTO num_exp_power_10_ln VALUES (6,'281839893606.99372343357047819067');
+INSERT INTO num_exp_power_10_ln VALUES (7,'1716699575118597095.42330819910640247627');
+INSERT INTO num_exp_power_10_ln VALUES (8,'167361463828.07491320069016125952');
+INSERT INTO num_exp_power_10_ln VALUES (9,'107511333880052007.04141124673540337457');
+COMMIT TRANSACTION;
+BEGIN TRANSACTION;
+INSERT INTO num_data VALUES (0, '0');
+INSERT INTO num_data VALUES (1, '0');
+INSERT INTO num_data VALUES (2, '-34338492.215397047');
+INSERT INTO num_data VALUES (3, '4.31');
+INSERT INTO num_data VALUES (4, '7799461.4119');
+INSERT INTO num_data VALUES (5, '16397.038491');
+INSERT INTO num_data VALUES (6, '93901.57763026');
+INSERT INTO num_data VALUES (7, '-83028485');
+INSERT INTO num_data VALUES (8, '74881');
+INSERT INTO num_data VALUES (9, '-24926804.045047420');
+COMMIT TRANSACTION;
+SELECT t1.id1, t1.id2, t1.result, t2.expected
+ FROM num_result t1, num_exp_add t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != t2.expected;
+SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 10) as expected
+ FROM num_result t1, num_exp_add t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != round(t2.expected, 10);
+SELECT t1.id1, t1.id2, t1.result, t2.expected
+ FROM num_result t1, num_exp_sub t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != t2.expected;
+SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 40)
+ FROM num_result t1, num_exp_sub t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != round(t2.expected, 40);
+SELECT t1.id1, t1.id2, t1.result, t2.expected
+ FROM num_result t1, num_exp_mul t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != t2.expected;
+SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 30) as expected
+ FROM num_result t1, num_exp_mul t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != round(t2.expected, 30);
+SELECT t1.id1, t1.id2, t1.result, t2.expected
+ FROM num_result t1, num_exp_div t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != t2.expected;
+SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 80) as expected
+ FROM num_result t1, num_exp_div t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != round(t2.expected, 80);
+INSERT INTO num_result SELECT id, 0, SQRT(ABS(val))
+ FROM num_data;
+SELECT t1.id1, t1.result, t2.expected
+ FROM num_result t1, num_exp_sqrt t2
+ WHERE t1.id1 = t2.id
+ AND t1.result != t2.expected;
+INSERT INTO num_result SELECT id, 0, LN(ABS(val))
+ FROM num_data
+ WHERE val != '0.0';
+SELECT t1.id1, t1.result, t2.expected
+ FROM num_result t1, num_exp_ln t2
+ WHERE t1.id1 = t2.id
+ AND t1.result != t2.expected;
+INSERT INTO num_result SELECT id, 0, LOG(numeric '10', ABS(val))
+ FROM num_data
+ WHERE val != '0.0';
+SELECT t1.id1, t1.result, t2.expected
+ FROM num_result t1, num_exp_log10 t2
+ WHERE t1.id1 = t2.id
+ AND t1.result != t2.expected;
+INSERT INTO num_result SELECT id, 0, POWER(numeric '10', LN(ABS(round(val,200))))
+ FROM num_data
+ WHERE val != '0.0';
+SELECT t1.id1, t1.result, t2.expected
+ FROM num_result t1, num_exp_power_10_ln t2
+ WHERE t1.id1 = t2.id
+ AND t1.result != t2.expected;
+SELECT 'inf'::numeric / '0';
+SELECT '-inf'::numeric / '0';
+SELECT 'nan'::numeric / '0';
+SELECT '0'::numeric / '0';
+SELECT 'inf'::numeric % '0';
+SELECT '-inf'::numeric % '0';
+SELECT 'nan'::numeric % '0';
+SELECT '0'::numeric % '0';
+SELECT div('inf'::numeric, '0');
+SELECT div('-inf'::numeric, '0');
+SELECT div('nan'::numeric, '0');
+SELECT div('0'::numeric, '0');
+-- the large values fall into the numeric abbreviation code's maximal classes
+WITH v(x) AS
+ (VALUES('0'::numeric),('1'),('-1'),('4.2'),('-7.777'),('1e340'),('-1e340'),
+ ('inf'),('-inf'),('nan'),
+ ('inf'),('-inf'),('nan'))
+SELECT substring(x::text, 1, 32)
+FROM v ORDER BY x;
+SELECT sqrt('-1'::numeric);
+SELECT sqrt('-inf'::numeric);
+SELECT ln('0'::numeric);
+SELECT ln('-1'::numeric);
+SELECT ln('-inf'::numeric);
+SELECT log('0'::numeric, '10');
+SELECT log('10'::numeric, '0');
+SELECT log('-inf'::numeric, '10');
+SELECT log('10'::numeric, '-inf');
+SELECT log('inf'::numeric, '0');
+SELECT log('inf'::numeric, '-inf');
+SELECT log('-inf'::numeric, 'inf');
+SELECT power('0'::numeric, '-1');
+SELECT power('0'::numeric, '-inf');
+SELECT power('-1'::numeric, 'inf');
+SELECT power('-2'::numeric, '3');
+SELECT power('-2'::numeric, '3.3');
+SELECT power('-2'::numeric, '-1');
+SELECT power('-2'::numeric, '-1.5');
+SELECT power('-2'::numeric, 'inf');
+SELECT power('-inf'::numeric, '2');
+SELECT power('-inf'::numeric, '3');
+SELECT power('-inf'::numeric, '4.5');
+SELECT power('-inf'::numeric, '0');
+SELECT power('-inf'::numeric, 'inf');
diff --git a/ydb/library/yql/tests/postgresql/cases/numerology.out b/ydb/library/yql/tests/postgresql/cases/numerology.out
new file mode 100644
index 0000000000..6f4af2a006
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/numerology.out
@@ -0,0 +1,22 @@
+--
+-- NUMEROLOGY
+-- Test various combinations of numeric types and functions.
+--
+--
+-- Test implicit type conversions
+-- This fails for Postgres v6.1 (and earlier?)
+-- so let's try explicit conversions for now - tgl 97/05/07
+--
+CREATE TABLE TEMP_FLOAT (f1 FLOAT8);
+-- int4
+CREATE TABLE TEMP_INT4 (f1 INT4);
+-- int2
+CREATE TABLE TEMP_INT2 (f1 INT2);
+--
+-- Group-by combinations
+--
+CREATE TABLE TEMP_GROUP (f1 INT4, f2 INT4, f3 FLOAT8);
+DROP TABLE TEMP_INT2;
+DROP TABLE TEMP_INT4;
+DROP TABLE TEMP_FLOAT;
+DROP TABLE TEMP_GROUP;
diff --git a/ydb/library/yql/tests/postgresql/cases/numerology.sql b/ydb/library/yql/tests/postgresql/cases/numerology.sql
new file mode 100644
index 0000000000..6f4af2a006
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/numerology.sql
@@ -0,0 +1,22 @@
+--
+-- NUMEROLOGY
+-- Test various combinations of numeric types and functions.
+--
+--
+-- Test implicit type conversions
+-- This fails for Postgres v6.1 (and earlier?)
+-- so let's try explicit conversions for now - tgl 97/05/07
+--
+CREATE TABLE TEMP_FLOAT (f1 FLOAT8);
+-- int4
+CREATE TABLE TEMP_INT4 (f1 INT4);
+-- int2
+CREATE TABLE TEMP_INT2 (f1 INT2);
+--
+-- Group-by combinations
+--
+CREATE TABLE TEMP_GROUP (f1 INT4, f2 INT4, f3 FLOAT8);
+DROP TABLE TEMP_INT2;
+DROP TABLE TEMP_INT4;
+DROP TABLE TEMP_FLOAT;
+DROP TABLE TEMP_GROUP;
diff --git a/ydb/library/yql/tests/postgresql/cases/oid.out b/ydb/library/yql/tests/postgresql/cases/oid.out
new file mode 100644
index 0000000000..845af79e14
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/oid.out
@@ -0,0 +1,61 @@
+--
+-- OID
+--
+CREATE TABLE OID_TBL(f1 oid);
+INSERT INTO OID_TBL(f1) VALUES ('1234');
+INSERT INTO OID_TBL(f1) VALUES ('1235');
+INSERT INTO OID_TBL(f1) VALUES ('987');
+INSERT INTO OID_TBL(f1) VALUES ('-1040');
+INSERT INTO OID_TBL(f1) VALUES ('99999999');
+INSERT INTO OID_TBL(f1) VALUES ('5 ');
+INSERT INTO OID_TBL(f1) VALUES (' 10 ');
+-- leading/trailing hard tab is also allowed
+INSERT INTO OID_TBL(f1) VALUES (' 15 ');
+-- bad inputs
+INSERT INTO OID_TBL(f1) VALUES ('');
+ERROR: invalid input syntax for type oid: ""
+LINE 1: INSERT INTO OID_TBL(f1) VALUES ('');
+ ^
+INSERT INTO OID_TBL(f1) VALUES (' ');
+ERROR: invalid input syntax for type oid: " "
+LINE 1: INSERT INTO OID_TBL(f1) VALUES (' ');
+ ^
+INSERT INTO OID_TBL(f1) VALUES ('asdfasd');
+ERROR: invalid input syntax for type oid: "asdfasd"
+LINE 1: INSERT INTO OID_TBL(f1) VALUES ('asdfasd');
+ ^
+INSERT INTO OID_TBL(f1) VALUES ('99asdfasd');
+ERROR: invalid input syntax for type oid: "99asdfasd"
+LINE 1: INSERT INTO OID_TBL(f1) VALUES ('99asdfasd');
+ ^
+INSERT INTO OID_TBL(f1) VALUES ('5 d');
+ERROR: invalid input syntax for type oid: "5 d"
+LINE 1: INSERT INTO OID_TBL(f1) VALUES ('5 d');
+ ^
+INSERT INTO OID_TBL(f1) VALUES (' 5d');
+ERROR: invalid input syntax for type oid: " 5d"
+LINE 1: INSERT INTO OID_TBL(f1) VALUES (' 5d');
+ ^
+INSERT INTO OID_TBL(f1) VALUES ('5 5');
+ERROR: invalid input syntax for type oid: "5 5"
+LINE 1: INSERT INTO OID_TBL(f1) VALUES ('5 5');
+ ^
+INSERT INTO OID_TBL(f1) VALUES (' - 500');
+ERROR: invalid input syntax for type oid: " - 500"
+LINE 1: INSERT INTO OID_TBL(f1) VALUES (' - 500');
+ ^
+INSERT INTO OID_TBL(f1) VALUES ('32958209582039852935');
+ERROR: value "32958209582039852935" is out of range for type oid
+LINE 1: INSERT INTO OID_TBL(f1) VALUES ('32958209582039852935');
+ ^
+INSERT INTO OID_TBL(f1) VALUES ('-23582358720398502385');
+ERROR: value "-23582358720398502385" is out of range for type oid
+LINE 1: INSERT INTO OID_TBL(f1) VALUES ('-23582358720398502385');
+ ^
+SELECT o.* FROM OID_TBL o WHERE o.f1 = 1234;
+ f1
+------
+ 1234
+(1 row)
+
+DROP TABLE OID_TBL;
diff --git a/ydb/library/yql/tests/postgresql/cases/oid.sql b/ydb/library/yql/tests/postgresql/cases/oid.sql
new file mode 100644
index 0000000000..2b149953b1
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/oid.sql
@@ -0,0 +1,26 @@
+--
+-- OID
+--
+CREATE TABLE OID_TBL(f1 oid);
+INSERT INTO OID_TBL(f1) VALUES ('1234');
+INSERT INTO OID_TBL(f1) VALUES ('1235');
+INSERT INTO OID_TBL(f1) VALUES ('987');
+INSERT INTO OID_TBL(f1) VALUES ('-1040');
+INSERT INTO OID_TBL(f1) VALUES ('99999999');
+INSERT INTO OID_TBL(f1) VALUES ('5 ');
+INSERT INTO OID_TBL(f1) VALUES (' 10 ');
+-- leading/trailing hard tab is also allowed
+INSERT INTO OID_TBL(f1) VALUES (' 15 ');
+-- bad inputs
+INSERT INTO OID_TBL(f1) VALUES ('');
+INSERT INTO OID_TBL(f1) VALUES (' ');
+INSERT INTO OID_TBL(f1) VALUES ('asdfasd');
+INSERT INTO OID_TBL(f1) VALUES ('99asdfasd');
+INSERT INTO OID_TBL(f1) VALUES ('5 d');
+INSERT INTO OID_TBL(f1) VALUES (' 5d');
+INSERT INTO OID_TBL(f1) VALUES ('5 5');
+INSERT INTO OID_TBL(f1) VALUES (' - 500');
+INSERT INTO OID_TBL(f1) VALUES ('32958209582039852935');
+INSERT INTO OID_TBL(f1) VALUES ('-23582358720398502385');
+SELECT o.* FROM OID_TBL o WHERE o.f1 = 1234;
+DROP TABLE OID_TBL;
diff --git a/ydb/library/yql/tests/postgresql/cases/select.out b/ydb/library/yql/tests/postgresql/cases/select.out
new file mode 100644
index 0000000000..550b4ef9aa
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/select.out
@@ -0,0 +1,16 @@
+-- VALUES is also legal as a standalone query or a set-operation member
+VALUES (1,2), (3,4+4), (7,77.7);
+ column1 | column2
+---------+---------
+ 1 | 2
+ 3 | 8
+ 7 | 77.7
+(3 rows)
+
+-- corner case: VALUES with no columns
+CREATE TEMP TABLE nocols();
+--
+-- Test ORDER BY options
+--
+CREATE TEMP TABLE foo (f1 int);
+INSERT INTO foo VALUES (42),(3),(10),(7),(null),(null),(1);
diff --git a/ydb/library/yql/tests/postgresql/cases/select.sql b/ydb/library/yql/tests/postgresql/cases/select.sql
new file mode 100644
index 0000000000..43f57c644e
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/select.sql
@@ -0,0 +1,9 @@
+-- VALUES is also legal as a standalone query or a set-operation member
+VALUES (1,2), (3,4+4), (7,77.7);
+-- corner case: VALUES with no columns
+CREATE TEMP TABLE nocols();
+--
+-- Test ORDER BY options
+--
+CREATE TEMP TABLE foo (f1 int);
+INSERT INTO foo VALUES (42),(3),(10),(7),(null),(null),(1);
diff --git a/ydb/library/yql/tests/postgresql/cases/select_distinct.out b/ydb/library/yql/tests/postgresql/cases/select_distinct.out
new file mode 100644
index 0000000000..3941704313
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/select_distinct.out
@@ -0,0 +1,5 @@
+--
+-- Also, some tests of IS DISTINCT FROM, which doesn't quite deserve its
+-- very own regression file.
+--
+CREATE TEMP TABLE disttable (f1 integer);
diff --git a/ydb/library/yql/tests/postgresql/cases/select_distinct.sql b/ydb/library/yql/tests/postgresql/cases/select_distinct.sql
new file mode 100644
index 0000000000..3941704313
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/select_distinct.sql
@@ -0,0 +1,5 @@
+--
+-- Also, some tests of IS DISTINCT FROM, which doesn't quite deserve its
+-- very own regression file.
+--
+CREATE TEMP TABLE disttable (f1 integer);
diff --git a/ydb/library/yql/tests/postgresql/cases/select_having.out b/ydb/library/yql/tests/postgresql/cases/select_having.out
new file mode 100644
index 0000000000..9d39b485f2
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/select_having.out
@@ -0,0 +1,15 @@
+--
+-- SELECT_HAVING
+--
+-- load test data
+CREATE TABLE test_having (a int, b int, c char(8), d char);
+INSERT INTO test_having VALUES (0, 1, 'XXXX', 'A');
+INSERT INTO test_having VALUES (1, 2, 'AAAA', 'b');
+INSERT INTO test_having VALUES (2, 2, 'AAAA', 'c');
+INSERT INTO test_having VALUES (3, 3, 'BBBB', 'D');
+INSERT INTO test_having VALUES (4, 3, 'BBBB', 'e');
+INSERT INTO test_having VALUES (5, 3, 'bbbb', 'F');
+INSERT INTO test_having VALUES (6, 4, 'cccc', 'g');
+INSERT INTO test_having VALUES (7, 4, 'cccc', 'h');
+INSERT INTO test_having VALUES (8, 4, 'CCCC', 'I');
+INSERT INTO test_having VALUES (9, 4, 'CCCC', 'j');
diff --git a/ydb/library/yql/tests/postgresql/cases/select_having.sql b/ydb/library/yql/tests/postgresql/cases/select_having.sql
new file mode 100644
index 0000000000..9d39b485f2
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/select_having.sql
@@ -0,0 +1,15 @@
+--
+-- SELECT_HAVING
+--
+-- load test data
+CREATE TABLE test_having (a int, b int, c char(8), d char);
+INSERT INTO test_having VALUES (0, 1, 'XXXX', 'A');
+INSERT INTO test_having VALUES (1, 2, 'AAAA', 'b');
+INSERT INTO test_having VALUES (2, 2, 'AAAA', 'c');
+INSERT INTO test_having VALUES (3, 3, 'BBBB', 'D');
+INSERT INTO test_having VALUES (4, 3, 'BBBB', 'e');
+INSERT INTO test_having VALUES (5, 3, 'bbbb', 'F');
+INSERT INTO test_having VALUES (6, 4, 'cccc', 'g');
+INSERT INTO test_having VALUES (7, 4, 'cccc', 'h');
+INSERT INTO test_having VALUES (8, 4, 'CCCC', 'I');
+INSERT INTO test_having VALUES (9, 4, 'CCCC', 'j');
diff --git a/ydb/library/yql/tests/postgresql/cases/select_implicit.out b/ydb/library/yql/tests/postgresql/cases/select_implicit.out
new file mode 100644
index 0000000000..d9faaa22d1
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/select_implicit.out
@@ -0,0 +1,19 @@
+--
+-- SELECT_IMPLICIT
+-- Test cases for queries with ordering terms missing from the target list.
+-- This used to be called "junkfilter.sql".
+-- The parser uses the term "resjunk" to handle these cases.
+-- - thomas 1998-07-09
+--
+-- load test data
+CREATE TABLE test_missing_target (a int, b int, c char(8), d char);
+INSERT INTO test_missing_target VALUES (0, 1, 'XXXX', 'A');
+INSERT INTO test_missing_target VALUES (1, 2, 'ABAB', 'b');
+INSERT INTO test_missing_target VALUES (2, 2, 'ABAB', 'c');
+INSERT INTO test_missing_target VALUES (3, 3, 'BBBB', 'D');
+INSERT INTO test_missing_target VALUES (4, 3, 'BBBB', 'e');
+INSERT INTO test_missing_target VALUES (5, 3, 'bbbb', 'F');
+INSERT INTO test_missing_target VALUES (6, 4, 'cccc', 'g');
+INSERT INTO test_missing_target VALUES (7, 4, 'cccc', 'h');
+INSERT INTO test_missing_target VALUES (8, 4, 'CCCC', 'I');
+INSERT INTO test_missing_target VALUES (9, 4, 'CCCC', 'j');
diff --git a/ydb/library/yql/tests/postgresql/cases/select_implicit.sql b/ydb/library/yql/tests/postgresql/cases/select_implicit.sql
new file mode 100644
index 0000000000..d9faaa22d1
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/select_implicit.sql
@@ -0,0 +1,19 @@
+--
+-- SELECT_IMPLICIT
+-- Test cases for queries with ordering terms missing from the target list.
+-- This used to be called "junkfilter.sql".
+-- The parser uses the term "resjunk" to handle these cases.
+-- - thomas 1998-07-09
+--
+-- load test data
+CREATE TABLE test_missing_target (a int, b int, c char(8), d char);
+INSERT INTO test_missing_target VALUES (0, 1, 'XXXX', 'A');
+INSERT INTO test_missing_target VALUES (1, 2, 'ABAB', 'b');
+INSERT INTO test_missing_target VALUES (2, 2, 'ABAB', 'c');
+INSERT INTO test_missing_target VALUES (3, 3, 'BBBB', 'D');
+INSERT INTO test_missing_target VALUES (4, 3, 'BBBB', 'e');
+INSERT INTO test_missing_target VALUES (5, 3, 'bbbb', 'F');
+INSERT INTO test_missing_target VALUES (6, 4, 'cccc', 'g');
+INSERT INTO test_missing_target VALUES (7, 4, 'cccc', 'h');
+INSERT INTO test_missing_target VALUES (8, 4, 'CCCC', 'I');
+INSERT INTO test_missing_target VALUES (9, 4, 'CCCC', 'j');
diff --git a/ydb/library/yql/tests/postgresql/cases/select_into.out b/ydb/library/yql/tests/postgresql/cases/select_into.out
new file mode 100644
index 0000000000..772dc0419b
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/select_into.out
@@ -0,0 +1,4 @@
+-- Tests for WITH NO DATA and column name consistency
+CREATE TABLE ctas_base (i int, j int);
+INSERT INTO ctas_base VALUES (1, 2);
+DROP TABLE ctas_base;
diff --git a/ydb/library/yql/tests/postgresql/cases/select_into.sql b/ydb/library/yql/tests/postgresql/cases/select_into.sql
new file mode 100644
index 0000000000..772dc0419b
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/select_into.sql
@@ -0,0 +1,4 @@
+-- Tests for WITH NO DATA and column name consistency
+CREATE TABLE ctas_base (i int, j int);
+INSERT INTO ctas_base VALUES (1, 2);
+DROP TABLE ctas_base;
diff --git a/ydb/library/yql/tests/postgresql/cases/strings.out b/ydb/library/yql/tests/postgresql/cases/strings.out
new file mode 100644
index 0000000000..1de62436df
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/strings.out
@@ -0,0 +1,152 @@
+--
+-- STRINGS
+-- Test various data entry syntaxes.
+--
+-- SQL string continuation syntax
+-- E021-03 character string literals
+SELECT 'first line'
+' - next line'
+ ' - third line'
+ AS "Three lines to one";
+ Three lines to one
+-------------------------------------
+ first line - next line - third line
+(1 row)
+
+-- illegal string continuation syntax
+SELECT 'first line'
+' - next line' /* this comment is not allowed here */
+' - third line'
+ AS "Illegal comment within continuation";
+ERROR: syntax error at or near "' - third line'"
+LINE 3: ' - third line'
+ ^
+-- Unicode escapes
+SET standard_conforming_strings TO on;
+SELECT U&'d\0061t\+000061' AS U&"d\0061t\+000061";
+ data
+------
+ data
+(1 row)
+
+SELECT U&'d!0061t\+000061' UESCAPE '!' AS U&"d*0061t\+000061" UESCAPE '*';
+ dat\+000061
+-------------
+ dat\+000061
+(1 row)
+
+SELECT U&'a\\b' AS "a\b";
+ a\b
+-----
+ a\b
+(1 row)
+
+SELECT U&' \' UESCAPE '!' AS "tricky";
+ tricky
+--------
+ \
+(1 row)
+
+SELECT 'tricky' AS U&"\" UESCAPE '!';
+ \
+--------
+ tricky
+(1 row)
+
+SELECT U&'wrong: \061';
+ERROR: invalid Unicode escape
+LINE 1: SELECT U&'wrong: \061';
+ ^
+HINT: Unicode escapes must be \XXXX or \+XXXXXX.
+SELECT U&'wrong: \+0061';
+ERROR: invalid Unicode escape
+LINE 1: SELECT U&'wrong: \+0061';
+ ^
+HINT: Unicode escapes must be \XXXX or \+XXXXXX.
+SELECT U&'wrong: +0061' UESCAPE +;
+ERROR: UESCAPE must be followed by a simple string literal at or near "+"
+LINE 1: SELECT U&'wrong: +0061' UESCAPE +;
+ ^
+SELECT U&'wrong: +0061' UESCAPE '+';
+ERROR: invalid Unicode escape character at or near "'+'"
+LINE 1: SELECT U&'wrong: +0061' UESCAPE '+';
+ ^
+SELECT U&'wrong: \db99';
+ERROR: invalid Unicode surrogate pair
+LINE 1: SELECT U&'wrong: \db99';
+ ^
+SELECT U&'wrong: \db99xy';
+ERROR: invalid Unicode surrogate pair
+LINE 1: SELECT U&'wrong: \db99xy';
+ ^
+SELECT U&'wrong: \db99\\';
+ERROR: invalid Unicode surrogate pair
+LINE 1: SELECT U&'wrong: \db99\\';
+ ^
+SELECT U&'wrong: \db99\0061';
+ERROR: invalid Unicode surrogate pair
+LINE 1: SELECT U&'wrong: \db99\0061';
+ ^
+SELECT U&'wrong: \+00db99\+000061';
+ERROR: invalid Unicode surrogate pair
+LINE 1: SELECT U&'wrong: \+00db99\+000061';
+ ^
+SELECT U&'wrong: \+2FFFFF';
+ERROR: invalid Unicode escape value
+LINE 1: SELECT U&'wrong: \+2FFFFF';
+ ^
+-- while we're here, check the same cases in E-style literals
+SELECT E'd\u0061t\U00000061' AS "data";
+ data
+------
+ data
+(1 row)
+
+SELECT E'a\\b' AS "a\b";
+ a\b
+-----
+ a\b
+(1 row)
+
+SELECT E'wrong: \u061';
+ERROR: invalid Unicode escape
+LINE 1: SELECT E'wrong: \u061';
+ ^
+HINT: Unicode escapes must be \uXXXX or \UXXXXXXXX.
+SELECT E'wrong: \U0061';
+ERROR: invalid Unicode escape
+LINE 1: SELECT E'wrong: \U0061';
+ ^
+HINT: Unicode escapes must be \uXXXX or \UXXXXXXXX.
+SELECT E'wrong: \udb99';
+ERROR: invalid Unicode surrogate pair at or near "'"
+LINE 1: SELECT E'wrong: \udb99';
+ ^
+SELECT E'wrong: \udb99xy';
+ERROR: invalid Unicode surrogate pair at or near "x"
+LINE 1: SELECT E'wrong: \udb99xy';
+ ^
+SELECT E'wrong: \udb99\\';
+ERROR: invalid Unicode surrogate pair at or near "\"
+LINE 1: SELECT E'wrong: \udb99\\';
+ ^
+SELECT E'wrong: \udb99\u0061';
+ERROR: invalid Unicode surrogate pair at or near "\u0061"
+LINE 1: SELECT E'wrong: \udb99\u0061';
+ ^
+SELECT E'wrong: \U0000db99\U00000061';
+ERROR: invalid Unicode surrogate pair at or near "\U00000061"
+LINE 1: SELECT E'wrong: \U0000db99\U00000061';
+ ^
+SELECT E'wrong: \U002FFFFF';
+ERROR: invalid Unicode escape value at or near "\U002FFFFF"
+LINE 1: SELECT E'wrong: \U002FFFFF';
+ ^
+SET standard_conforming_strings TO off;
+SELECT 'tricky' AS U&"\" UESCAPE '!';
+ \
+--------
+ tricky
+(1 row)
+
+RESET standard_conforming_strings;
diff --git a/ydb/library/yql/tests/postgresql/cases/strings.sql b/ydb/library/yql/tests/postgresql/cases/strings.sql
new file mode 100644
index 0000000000..6bdab34ca3
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/strings.sql
@@ -0,0 +1,46 @@
+--
+-- STRINGS
+-- Test various data entry syntaxes.
+--
+-- SQL string continuation syntax
+-- E021-03 character string literals
+SELECT 'first line'
+' - next line'
+ ' - third line'
+ AS "Three lines to one";
+-- illegal string continuation syntax
+SELECT 'first line'
+' - next line' /* this comment is not allowed here */
+' - third line'
+ AS "Illegal comment within continuation";
+-- Unicode escapes
+SET standard_conforming_strings TO on;
+SELECT U&'d\0061t\+000061' AS U&"d\0061t\+000061";
+SELECT U&'d!0061t\+000061' UESCAPE '!' AS U&"d*0061t\+000061" UESCAPE '*';
+SELECT U&'a\\b' AS "a\b";
+SELECT U&' \' UESCAPE '!' AS "tricky";
+SELECT 'tricky' AS U&"\" UESCAPE '!';
+SELECT U&'wrong: \061';
+SELECT U&'wrong: \+0061';
+SELECT U&'wrong: +0061' UESCAPE +;
+SELECT U&'wrong: +0061' UESCAPE '+';
+SELECT U&'wrong: \db99';
+SELECT U&'wrong: \db99xy';
+SELECT U&'wrong: \db99\\';
+SELECT U&'wrong: \db99\0061';
+SELECT U&'wrong: \+00db99\+000061';
+SELECT U&'wrong: \+2FFFFF';
+-- while we're here, check the same cases in E-style literals
+SELECT E'd\u0061t\U00000061' AS "data";
+SELECT E'a\\b' AS "a\b";
+SELECT E'wrong: \u061';
+SELECT E'wrong: \U0061';
+SELECT E'wrong: \udb99';
+SELECT E'wrong: \udb99xy';
+SELECT E'wrong: \udb99\\';
+SELECT E'wrong: \udb99\u0061';
+SELECT E'wrong: \U0000db99\U00000061';
+SELECT E'wrong: \U002FFFFF';
+SET standard_conforming_strings TO off;
+SELECT 'tricky' AS U&"\" UESCAPE '!';
+RESET standard_conforming_strings;
diff --git a/ydb/library/yql/tests/postgresql/cases/subselect.out b/ydb/library/yql/tests/postgresql/cases/subselect.out
new file mode 100644
index 0000000000..e5202b1d20
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/subselect.out
@@ -0,0 +1,13 @@
+-- Check grammar's handling of extra parens in assorted contexts
+SELECT * FROM (SELECT 1 AS x) ss;
+ x
+---
+ 1
+(1 row)
+
+SELECT * FROM ((SELECT 1 AS x)) ss;
+ x
+---
+ 1
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/cases/subselect.sql b/ydb/library/yql/tests/postgresql/cases/subselect.sql
new file mode 100644
index 0000000000..9598532095
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/subselect.sql
@@ -0,0 +1,3 @@
+-- Check grammar's handling of extra parens in assorted contexts
+SELECT * FROM (SELECT 1 AS x) ss;
+SELECT * FROM ((SELECT 1 AS x)) ss;
diff --git a/ydb/library/yql/tests/postgresql/cases/text.out b/ydb/library/yql/tests/postgresql/cases/text.out
new file mode 100644
index 0000000000..b70d550d03
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/text.out
@@ -0,0 +1,18 @@
+--
+-- TEXT
+--
+SELECT text 'this is a text string' = text 'this is a text string' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT text 'this is a text string' = text 'this is a text strin' AS false;
+ false
+-------
+ f
+(1 row)
+
+CREATE TABLE TEXT_TBL (f1 text);
+INSERT INTO TEXT_TBL VALUES ('doh!');
+INSERT INTO TEXT_TBL VALUES ('hi de ho neighbor');
diff --git a/ydb/library/yql/tests/postgresql/cases/text.sql b/ydb/library/yql/tests/postgresql/cases/text.sql
new file mode 100644
index 0000000000..596e424cb1
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/text.sql
@@ -0,0 +1,8 @@
+--
+-- TEXT
+--
+SELECT text 'this is a text string' = text 'this is a text string' AS true;
+SELECT text 'this is a text string' = text 'this is a text strin' AS false;
+CREATE TABLE TEXT_TBL (f1 text);
+INSERT INTO TEXT_TBL VALUES ('doh!');
+INSERT INTO TEXT_TBL VALUES ('hi de ho neighbor');
diff --git a/ydb/library/yql/tests/postgresql/cases/time.out b/ydb/library/yql/tests/postgresql/cases/time.out
new file mode 100644
index 0000000000..26e3161674
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/time.out
@@ -0,0 +1,15 @@
+--
+-- TIME
+--
+CREATE TABLE TIME_TBL (f1 time(2));
+INSERT INTO TIME_TBL VALUES ('00:00');
+INSERT INTO TIME_TBL VALUES ('01:00');
+-- as of 7.4, timezone spec should be accepted and ignored
+INSERT INTO TIME_TBL VALUES ('02:03 PST');
+INSERT INTO TIME_TBL VALUES ('11:59 EDT');
+INSERT INTO TIME_TBL VALUES ('12:00');
+INSERT INTO TIME_TBL VALUES ('12:01');
+INSERT INTO TIME_TBL VALUES ('23:59');
+INSERT INTO TIME_TBL VALUES ('11:59:59.99 PM');
+INSERT INTO TIME_TBL VALUES ('2003-03-07 15:36:39 America/New_York');
+INSERT INTO TIME_TBL VALUES ('2003-07-07 15:36:39 America/New_York');
diff --git a/ydb/library/yql/tests/postgresql/cases/time.sql b/ydb/library/yql/tests/postgresql/cases/time.sql
new file mode 100644
index 0000000000..26e3161674
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/time.sql
@@ -0,0 +1,15 @@
+--
+-- TIME
+--
+CREATE TABLE TIME_TBL (f1 time(2));
+INSERT INTO TIME_TBL VALUES ('00:00');
+INSERT INTO TIME_TBL VALUES ('01:00');
+-- as of 7.4, timezone spec should be accepted and ignored
+INSERT INTO TIME_TBL VALUES ('02:03 PST');
+INSERT INTO TIME_TBL VALUES ('11:59 EDT');
+INSERT INTO TIME_TBL VALUES ('12:00');
+INSERT INTO TIME_TBL VALUES ('12:01');
+INSERT INTO TIME_TBL VALUES ('23:59');
+INSERT INTO TIME_TBL VALUES ('11:59:59.99 PM');
+INSERT INTO TIME_TBL VALUES ('2003-03-07 15:36:39 America/New_York');
+INSERT INTO TIME_TBL VALUES ('2003-07-07 15:36:39 America/New_York');
diff --git a/ydb/library/yql/tests/postgresql/cases/timestamp.out b/ydb/library/yql/tests/postgresql/cases/timestamp.out
new file mode 100644
index 0000000000..55f6b42ace
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/timestamp.out
@@ -0,0 +1,114 @@
+--
+-- TIMESTAMP
+--
+CREATE TABLE TIMESTAMP_TBL (d1 timestamp(2) without time zone);
+-- Test shorthand input values
+-- We can't just "select" the results since they aren't constants; test for
+-- equality instead. We can do that by running the test inside a transaction
+-- block, within which the value of 'now' shouldn't change, and so these
+-- related values shouldn't either.
+BEGIN;
+INSERT INTO TIMESTAMP_TBL VALUES ('today');
+INSERT INTO TIMESTAMP_TBL VALUES ('yesterday');
+INSERT INTO TIMESTAMP_TBL VALUES ('tomorrow');
+-- time zone should be ignored by this data type
+INSERT INTO TIMESTAMP_TBL VALUES ('tomorrow EST');
+INSERT INTO TIMESTAMP_TBL VALUES ('tomorrow zulu');
+COMMIT;
+-- Verify that 'now' *does* change over a reasonable interval such as 100 msec,
+-- and that it doesn't change over the same interval within a transaction block
+INSERT INTO TIMESTAMP_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+ pg_sleep
+----------
+
+(1 row)
+
+BEGIN;
+INSERT INTO TIMESTAMP_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+ pg_sleep
+----------
+
+(1 row)
+
+INSERT INTO TIMESTAMP_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+ pg_sleep
+----------
+
+(1 row)
+
+COMMIT;
+-- Special values
+INSERT INTO TIMESTAMP_TBL VALUES ('-infinity');
+INSERT INTO TIMESTAMP_TBL VALUES ('infinity');
+INSERT INTO TIMESTAMP_TBL VALUES ('epoch');
+-- Postgres v6.0 standard output format
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01 1997 PST');
+-- Variations on Postgres v6.1 standard output format
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.000001 1997 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.999999 1997 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.4 1997 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.5 1997 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.6 1997 PST');
+-- ISO 8601 format
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-01-02');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-01-02 03:04:05');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-02-10 17:32:01-08');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-02-10 17:32:01-0800');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-02-10 17:32:01 -08:00');
+INSERT INTO TIMESTAMP_TBL VALUES ('19970210 173201 -0800');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-06-10 17:32:01 -07:00');
+INSERT INTO TIMESTAMP_TBL VALUES ('2001-09-22T18:19:20');
+-- POSIX format (note that the timezone abbrev is just decoration here)
+INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 08:14:01 GMT+8');
+INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 13:14:02 GMT-1');
+INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 12:14:03 GMT-2');
+INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 03:14:04 PST+8');
+INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 02:14:05 MST+7:00');
+-- Variations for acceptable input formats
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 10 17:32:01 1997 -0800');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 10 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 10 5:32PM 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997/02/10 17:32:01-0800');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-02-10 17:32:01 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb-10-1997 17:32:01 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('02-10-1997 17:32:01 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('19970210 173201 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('97FEB10 5:32:01PM UTC');
+INSERT INTO TIMESTAMP_TBL VALUES ('97/02/10 17:32:01 UTC');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997.041 17:32:01 UTC');
+INSERT INTO TIMESTAMP_TBL VALUES ('19970210 173201 America/New_York');
+-- Check date conversion and date arithmetic
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-06-10 18:32:01 PDT');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 10 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 11 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 12 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 13 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 14 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 15 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 0097 BC');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 0097');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 0597');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1097');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1697');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1797');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1897');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 2097');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 28 17:32:01 1996');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 29 17:32:01 1996');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mar 01 17:32:01 1996');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 30 17:32:01 1996');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 31 17:32:01 1996');
+INSERT INTO TIMESTAMP_TBL VALUES ('Jan 01 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 28 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mar 01 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 30 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 31 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 31 17:32:01 1999');
+INSERT INTO TIMESTAMP_TBL VALUES ('Jan 01 17:32:01 2000');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 31 17:32:01 2000');
+INSERT INTO TIMESTAMP_TBL VALUES ('Jan 01 17:32:01 2001');
diff --git a/ydb/library/yql/tests/postgresql/cases/timestamp.sql b/ydb/library/yql/tests/postgresql/cases/timestamp.sql
new file mode 100644
index 0000000000..97b743f46f
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/timestamp.sql
@@ -0,0 +1,99 @@
+--
+-- TIMESTAMP
+--
+CREATE TABLE TIMESTAMP_TBL (d1 timestamp(2) without time zone);
+-- Test shorthand input values
+-- We can't just "select" the results since they aren't constants; test for
+-- equality instead. We can do that by running the test inside a transaction
+-- block, within which the value of 'now' shouldn't change, and so these
+-- related values shouldn't either.
+BEGIN;
+INSERT INTO TIMESTAMP_TBL VALUES ('today');
+INSERT INTO TIMESTAMP_TBL VALUES ('yesterday');
+INSERT INTO TIMESTAMP_TBL VALUES ('tomorrow');
+-- time zone should be ignored by this data type
+INSERT INTO TIMESTAMP_TBL VALUES ('tomorrow EST');
+INSERT INTO TIMESTAMP_TBL VALUES ('tomorrow zulu');
+COMMIT;
+-- Verify that 'now' *does* change over a reasonable interval such as 100 msec,
+-- and that it doesn't change over the same interval within a transaction block
+INSERT INTO TIMESTAMP_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+BEGIN;
+INSERT INTO TIMESTAMP_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+INSERT INTO TIMESTAMP_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+COMMIT;
+-- Special values
+INSERT INTO TIMESTAMP_TBL VALUES ('-infinity');
+INSERT INTO TIMESTAMP_TBL VALUES ('infinity');
+INSERT INTO TIMESTAMP_TBL VALUES ('epoch');
+-- Postgres v6.0 standard output format
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01 1997 PST');
+-- Variations on Postgres v6.1 standard output format
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.000001 1997 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.999999 1997 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.4 1997 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.5 1997 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.6 1997 PST');
+-- ISO 8601 format
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-01-02');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-01-02 03:04:05');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-02-10 17:32:01-08');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-02-10 17:32:01-0800');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-02-10 17:32:01 -08:00');
+INSERT INTO TIMESTAMP_TBL VALUES ('19970210 173201 -0800');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-06-10 17:32:01 -07:00');
+INSERT INTO TIMESTAMP_TBL VALUES ('2001-09-22T18:19:20');
+-- POSIX format (note that the timezone abbrev is just decoration here)
+INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 08:14:01 GMT+8');
+INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 13:14:02 GMT-1');
+INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 12:14:03 GMT-2');
+INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 03:14:04 PST+8');
+INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 02:14:05 MST+7:00');
+-- Variations for acceptable input formats
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 10 17:32:01 1997 -0800');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 10 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 10 5:32PM 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997/02/10 17:32:01-0800');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-02-10 17:32:01 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb-10-1997 17:32:01 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('02-10-1997 17:32:01 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('19970210 173201 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('97FEB10 5:32:01PM UTC');
+INSERT INTO TIMESTAMP_TBL VALUES ('97/02/10 17:32:01 UTC');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997.041 17:32:01 UTC');
+INSERT INTO TIMESTAMP_TBL VALUES ('19970210 173201 America/New_York');
+-- Check date conversion and date arithmetic
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-06-10 18:32:01 PDT');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 10 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 11 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 12 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 13 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 14 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 15 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 0097 BC');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 0097');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 0597');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1097');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1697');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1797');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1897');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 2097');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 28 17:32:01 1996');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 29 17:32:01 1996');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mar 01 17:32:01 1996');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 30 17:32:01 1996');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 31 17:32:01 1996');
+INSERT INTO TIMESTAMP_TBL VALUES ('Jan 01 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 28 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mar 01 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 30 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 31 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 31 17:32:01 1999');
+INSERT INTO TIMESTAMP_TBL VALUES ('Jan 01 17:32:01 2000');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 31 17:32:01 2000');
+INSERT INTO TIMESTAMP_TBL VALUES ('Jan 01 17:32:01 2001');
diff --git a/ydb/library/yql/tests/postgresql/cases/timestamptz.out b/ydb/library/yql/tests/postgresql/cases/timestamptz.out
new file mode 100644
index 0000000000..11c8cc7ca4
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/timestamptz.out
@@ -0,0 +1,117 @@
+--
+-- TIMESTAMPTZ
+--
+CREATE TABLE TIMESTAMPTZ_TBL (d1 timestamp(2) with time zone);
+-- Test shorthand input values
+-- We can't just "select" the results since they aren't constants; test for
+-- equality instead. We can do that by running the test inside a transaction
+-- block, within which the value of 'now' shouldn't change, and so these
+-- related values shouldn't either.
+BEGIN;
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('today');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('yesterday');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('tomorrow');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('tomorrow EST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('tomorrow zulu');
+COMMIT;
+-- Verify that 'now' *does* change over a reasonable interval such as 100 msec,
+-- and that it doesn't change over the same interval within a transaction block
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+ pg_sleep
+----------
+
+(1 row)
+
+BEGIN;
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+ pg_sleep
+----------
+
+(1 row)
+
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+ pg_sleep
+----------
+
+(1 row)
+
+COMMIT;
+-- Special values
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('-infinity');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('infinity');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('epoch');
+-- Postgres v6.0 standard output format
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01 1997 PST');
+-- Variations on Postgres v6.1 standard output format
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.000001 1997 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.999999 1997 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.4 1997 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.5 1997 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.6 1997 PST');
+-- ISO 8601 format
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-01-02');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-01-02 03:04:05');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-02-10 17:32:01-08');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-02-10 17:32:01-0800');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-02-10 17:32:01 -08:00');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970210 173201 -0800');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-06-10 17:32:01 -07:00');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2001-09-22T18:19:20');
+-- POSIX format (note that the timezone abbrev is just decoration here)
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 08:14:01 GMT+8');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 13:14:02 GMT-1');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 12:14:03 GMT-2');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 03:14:04 PST+8');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 02:14:05 MST+7:00');
+-- Variations for acceptable input formats
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 10 17:32:01 1997 -0800');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 10 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 10 5:32PM 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997/02/10 17:32:01-0800');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-02-10 17:32:01 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb-10-1997 17:32:01 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('02-10-1997 17:32:01 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970210 173201 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('97FEB10 5:32:01PM UTC');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('97/02/10 17:32:01 UTC');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997.041 17:32:01 UTC');
+-- timestamps at different timezones
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970210 173201 America/New_York');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970710 173201 America/New_York');
+SELECT '19970710 173201' AT TIME ZONE 'America/Does_not_exist';
+ERROR: time zone "America/Does_not_exist" not recognized
+-- Check date conversion and date arithmetic
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-06-10 18:32:01 PDT');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 10 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 11 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 12 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 13 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 14 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 15 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 0097 BC');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 0097');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 0597');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1097');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1697');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1797');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1897');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 2097');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 28 17:32:01 1996');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 29 17:32:01 1996');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mar 01 17:32:01 1996');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 30 17:32:01 1996');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 31 17:32:01 1996');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Jan 01 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 28 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mar 01 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 30 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 31 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 31 17:32:01 1999');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Jan 01 17:32:01 2000');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 31 17:32:01 2000');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Jan 01 17:32:01 2001');
diff --git a/ydb/library/yql/tests/postgresql/cases/timestamptz.sql b/ydb/library/yql/tests/postgresql/cases/timestamptz.sql
new file mode 100644
index 0000000000..2014211551
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/timestamptz.sql
@@ -0,0 +1,101 @@
+--
+-- TIMESTAMPTZ
+--
+CREATE TABLE TIMESTAMPTZ_TBL (d1 timestamp(2) with time zone);
+-- Test shorthand input values
+-- We can't just "select" the results since they aren't constants; test for
+-- equality instead. We can do that by running the test inside a transaction
+-- block, within which the value of 'now' shouldn't change, and so these
+-- related values shouldn't either.
+BEGIN;
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('today');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('yesterday');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('tomorrow');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('tomorrow EST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('tomorrow zulu');
+COMMIT;
+-- Verify that 'now' *does* change over a reasonable interval such as 100 msec,
+-- and that it doesn't change over the same interval within a transaction block
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+BEGIN;
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+COMMIT;
+-- Special values
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('-infinity');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('infinity');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('epoch');
+-- Postgres v6.0 standard output format
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01 1997 PST');
+-- Variations on Postgres v6.1 standard output format
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.000001 1997 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.999999 1997 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.4 1997 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.5 1997 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.6 1997 PST');
+-- ISO 8601 format
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-01-02');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-01-02 03:04:05');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-02-10 17:32:01-08');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-02-10 17:32:01-0800');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-02-10 17:32:01 -08:00');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970210 173201 -0800');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-06-10 17:32:01 -07:00');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2001-09-22T18:19:20');
+-- POSIX format (note that the timezone abbrev is just decoration here)
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 08:14:01 GMT+8');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 13:14:02 GMT-1');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 12:14:03 GMT-2');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 03:14:04 PST+8');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 02:14:05 MST+7:00');
+-- Variations for acceptable input formats
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 10 17:32:01 1997 -0800');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 10 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 10 5:32PM 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997/02/10 17:32:01-0800');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-02-10 17:32:01 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb-10-1997 17:32:01 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('02-10-1997 17:32:01 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970210 173201 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('97FEB10 5:32:01PM UTC');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('97/02/10 17:32:01 UTC');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997.041 17:32:01 UTC');
+-- timestamps at different timezones
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970210 173201 America/New_York');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970710 173201 America/New_York');
+SELECT '19970710 173201' AT TIME ZONE 'America/Does_not_exist';
+-- Check date conversion and date arithmetic
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-06-10 18:32:01 PDT');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 10 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 11 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 12 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 13 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 14 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 15 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 0097 BC');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 0097');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 0597');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1097');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1697');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1797');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1897');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 2097');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 28 17:32:01 1996');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 29 17:32:01 1996');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mar 01 17:32:01 1996');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 30 17:32:01 1996');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 31 17:32:01 1996');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Jan 01 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 28 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mar 01 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 30 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 31 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 31 17:32:01 1999');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Jan 01 17:32:01 2000');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 31 17:32:01 2000');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Jan 01 17:32:01 2001');
diff --git a/ydb/library/yql/tests/postgresql/cases/timetz.out b/ydb/library/yql/tests/postgresql/cases/timetz.out
new file mode 100644
index 0000000000..be4b2dcee3
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/timetz.out
@@ -0,0 +1,16 @@
+--
+-- TIMETZ
+--
+CREATE TABLE TIMETZ_TBL (f1 time(2) with time zone);
+INSERT INTO TIMETZ_TBL VALUES ('00:01 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('01:00 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('02:03 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('07:07 PST');
+INSERT INTO TIMETZ_TBL VALUES ('08:08 EDT');
+INSERT INTO TIMETZ_TBL VALUES ('11:59 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('12:00 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('12:01 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('23:59 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('11:59:59.99 PM PDT');
+INSERT INTO TIMETZ_TBL VALUES ('2003-03-07 15:36:39 America/New_York');
+INSERT INTO TIMETZ_TBL VALUES ('2003-07-07 15:36:39 America/New_York');
diff --git a/ydb/library/yql/tests/postgresql/cases/timetz.sql b/ydb/library/yql/tests/postgresql/cases/timetz.sql
new file mode 100644
index 0000000000..be4b2dcee3
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/timetz.sql
@@ -0,0 +1,16 @@
+--
+-- TIMETZ
+--
+CREATE TABLE TIMETZ_TBL (f1 time(2) with time zone);
+INSERT INTO TIMETZ_TBL VALUES ('00:01 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('01:00 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('02:03 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('07:07 PST');
+INSERT INTO TIMETZ_TBL VALUES ('08:08 EDT');
+INSERT INTO TIMETZ_TBL VALUES ('11:59 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('12:00 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('12:01 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('23:59 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('11:59:59.99 PM PDT');
+INSERT INTO TIMETZ_TBL VALUES ('2003-03-07 15:36:39 America/New_York');
+INSERT INTO TIMETZ_TBL VALUES ('2003-07-07 15:36:39 America/New_York');
diff --git a/ydb/library/yql/tests/postgresql/cases/truncate.out b/ydb/library/yql/tests/postgresql/cases/truncate.out
new file mode 100644
index 0000000000..5ec9a033fd
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/truncate.out
@@ -0,0 +1,4 @@
+-- Test basic TRUNCATE functionality.
+CREATE TABLE truncate_a (col1 integer primary key);
+INSERT INTO truncate_a VALUES (1);
+INSERT INTO truncate_a VALUES (2);
diff --git a/ydb/library/yql/tests/postgresql/cases/truncate.sql b/ydb/library/yql/tests/postgresql/cases/truncate.sql
new file mode 100644
index 0000000000..5ec9a033fd
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/truncate.sql
@@ -0,0 +1,4 @@
+-- Test basic TRUNCATE functionality.
+CREATE TABLE truncate_a (col1 integer primary key);
+INSERT INTO truncate_a VALUES (1);
+INSERT INTO truncate_a VALUES (2);
diff --git a/ydb/library/yql/tests/postgresql/cases/unicode.out b/ydb/library/yql/tests/postgresql/cases/unicode.out
new file mode 100644
index 0000000000..17816cb963
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/unicode.out
@@ -0,0 +1,31 @@
+SELECT "normalize"('abc', 'def'); -- run-time error
+ERROR: invalid normalization form: def
+SELECT U&'\00E4\24D1c' IS NFC NORMALIZED AS test_nfc;
+ test_nfc
+----------
+ t
+(1 row)
+
+SELECT num, val,
+ val IS NFC NORMALIZED AS NFC,
+ val IS NFD NORMALIZED AS NFD,
+ val IS NFKC NORMALIZED AS NFKC,
+ val IS NFKD NORMALIZED AS NFKD
+FROM
+ (VALUES (1, U&'\00E4bc'),
+ (2, U&'\0061\0308bc'),
+ (3, U&'\00E4\24D1c'),
+ (4, U&'\0061\0308\24D1c'),
+ (5, '')) vals (num, val)
+ORDER BY num;
+ num | val | nfc | nfd | nfkc | nfkd
+-----+-----+-----+-----+------+------
+ 1 | äbc | t | f | t | f
+ 2 | äbc | f | t | f | t
+ 3 | äⓑc | t | f | f | f
+ 4 | äⓑc | f | t | f | f
+ 5 | | t | t | t | t
+(5 rows)
+
+SELECT is_normalized('abc', 'def'); -- run-time error
+ERROR: invalid normalization form: def
diff --git a/ydb/library/yql/tests/postgresql/cases/unicode.sql b/ydb/library/yql/tests/postgresql/cases/unicode.sql
new file mode 100644
index 0000000000..6e9bc22afb
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/unicode.sql
@@ -0,0 +1,15 @@
+SELECT "normalize"('abc', 'def'); -- run-time error
+SELECT U&'\00E4\24D1c' IS NFC NORMALIZED AS test_nfc;
+SELECT num, val,
+ val IS NFC NORMALIZED AS NFC,
+ val IS NFD NORMALIZED AS NFD,
+ val IS NFKC NORMALIZED AS NFKC,
+ val IS NFKD NORMALIZED AS NFKD
+FROM
+ (VALUES (1, U&'\00E4bc'),
+ (2, U&'\0061\0308bc'),
+ (3, U&'\00E4\24D1c'),
+ (4, U&'\0061\0308\24D1c'),
+ (5, '')) vals (num, val)
+ORDER BY num;
+SELECT is_normalized('abc', 'def'); -- run-time error
diff --git a/ydb/library/yql/tests/postgresql/cases/update.out b/ydb/library/yql/tests/postgresql/cases/update.out
new file mode 100644
index 0000000000..c5bdedc2c5
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/update.out
@@ -0,0 +1,27 @@
+CREATE TABLE upsert_test (
+ a INT PRIMARY KEY,
+ b TEXT
+);
+-- Test ON CONFLICT DO UPDATE
+INSERT INTO upsert_test VALUES(1, 'Boo'), (3, 'Zoo');
+DROP TABLE upsert_test;
+CREATE TABLE upsert_test_2 (b TEXT, a INT PRIMARY KEY);
+-- Create partitions intentionally in descending bound order, so as to test
+-- that update-row-movement works with the leaf partitions not in bound order.
+CREATE TABLE part_b_20_b_30 (e varchar, c numeric, a text, b bigint, d int);
+CREATE TABLE part_c_1_100 (e varchar, d int, c numeric, b bigint, a text);
+\set init_range_parted 'truncate range_parted; insert into range_parted VALUES (''a'', 1, 1, 1), (''a'', 10, 200, 1), (''b'', 12, 96, 1), (''b'', 13, 97, 2), (''b'', 15, 105, 16), (''b'', 17, 105, 19)'
+\set show_data 'select tableoid::regclass::text COLLATE "C" partname, * from range_parted ORDER BY 1, 2, 3, 4, 5, 6'
+-- Common table needed for multiple test scenarios.
+CREATE TABLE mintab(c1 int);
+INSERT into mintab VALUES (120);
+DROP TABLE mintab;
+CREATE TABLE sub_part1(b int, c int8, a numeric);
+CREATE TABLE sub_part2(b int, c int8, a numeric);
+CREATE TABLE list_part1(a numeric, b int, c int8);
+-- UPDATE partition-key with FROM clause. If join produces multiple output
+-- rows for the same row to be modified, we should tuple-route the row only
+-- once. There should not be any rows inserted.
+CREATE TABLE non_parted (id int);
+INSERT into non_parted VALUES (1), (1), (1), (2), (2), (2), (3), (3), (3);
+DROP TABLE non_parted;
diff --git a/ydb/library/yql/tests/postgresql/cases/update.sql b/ydb/library/yql/tests/postgresql/cases/update.sql
new file mode 100644
index 0000000000..c5bdedc2c5
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/update.sql
@@ -0,0 +1,27 @@
+CREATE TABLE upsert_test (
+ a INT PRIMARY KEY,
+ b TEXT
+);
+-- Test ON CONFLICT DO UPDATE
+INSERT INTO upsert_test VALUES(1, 'Boo'), (3, 'Zoo');
+DROP TABLE upsert_test;
+CREATE TABLE upsert_test_2 (b TEXT, a INT PRIMARY KEY);
+-- Create partitions intentionally in descending bound order, so as to test
+-- that update-row-movement works with the leaf partitions not in bound order.
+CREATE TABLE part_b_20_b_30 (e varchar, c numeric, a text, b bigint, d int);
+CREATE TABLE part_c_1_100 (e varchar, d int, c numeric, b bigint, a text);
+\set init_range_parted 'truncate range_parted; insert into range_parted VALUES (''a'', 1, 1, 1), (''a'', 10, 200, 1), (''b'', 12, 96, 1), (''b'', 13, 97, 2), (''b'', 15, 105, 16), (''b'', 17, 105, 19)'
+\set show_data 'select tableoid::regclass::text COLLATE "C" partname, * from range_parted ORDER BY 1, 2, 3, 4, 5, 6'
+-- Common table needed for multiple test scenarios.
+CREATE TABLE mintab(c1 int);
+INSERT into mintab VALUES (120);
+DROP TABLE mintab;
+CREATE TABLE sub_part1(b int, c int8, a numeric);
+CREATE TABLE sub_part2(b int, c int8, a numeric);
+CREATE TABLE list_part1(a numeric, b int, c int8);
+-- UPDATE partition-key with FROM clause. If join produces multiple output
+-- rows for the same row to be modified, we should tuple-route the row only
+-- once. There should not be any rows inserted.
+CREATE TABLE non_parted (id int);
+INSERT into non_parted VALUES (1), (1), (1), (2), (2), (2), (3), (3), (3);
+DROP TABLE non_parted;
diff --git a/ydb/library/yql/tests/postgresql/cases/varchar.out b/ydb/library/yql/tests/postgresql/cases/varchar.out
new file mode 100644
index 0000000000..b55b167625
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/varchar.out
@@ -0,0 +1,4 @@
+--
+-- VARCHAR
+--
+CREATE TABLE VARCHAR_TBL(f1 varchar(1));
diff --git a/ydb/library/yql/tests/postgresql/cases/varchar.sql b/ydb/library/yql/tests/postgresql/cases/varchar.sql
new file mode 100644
index 0000000000..8e0b0a47df
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/varchar.sql
@@ -0,0 +1,5 @@
+--
+-- VARCHAR
+--
+
+CREATE TABLE VARCHAR_TBL(f1 varchar(1));
diff --git a/ydb/library/yql/tests/postgresql/cases/window.out b/ydb/library/yql/tests/postgresql/cases/window.out
new file mode 100644
index 0000000000..9cdd9febaa
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/window.out
@@ -0,0 +1,20 @@
+--
+-- WINDOW FUNCTIONS
+--
+CREATE TEMPORARY TABLE empsalary (
+ depname varchar,
+ empno bigint,
+ salary int,
+ enroll_date date
+);
+INSERT INTO empsalary VALUES
+('develop', 10, 5200, '2007-08-01'),
+('sales', 1, 5000, '2006-10-01'),
+('personnel', 5, 3500, '2007-12-10'),
+('sales', 4, 4800, '2007-08-08'),
+('personnel', 2, 3900, '2006-12-23'),
+('develop', 7, 4200, '2008-01-01'),
+('develop', 9, 4500, '2008-01-01'),
+('sales', 3, 4800, '2007-08-01'),
+('develop', 8, 6000, '2006-10-01'),
+('develop', 11, 5200, '2007-08-15');
diff --git a/ydb/library/yql/tests/postgresql/cases/window.sql b/ydb/library/yql/tests/postgresql/cases/window.sql
new file mode 100644
index 0000000000..9cdd9febaa
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/window.sql
@@ -0,0 +1,20 @@
+--
+-- WINDOW FUNCTIONS
+--
+CREATE TEMPORARY TABLE empsalary (
+ depname varchar,
+ empno bigint,
+ salary int,
+ enroll_date date
+);
+INSERT INTO empsalary VALUES
+('develop', 10, 5200, '2007-08-01'),
+('sales', 1, 5000, '2006-10-01'),
+('personnel', 5, 3500, '2007-12-10'),
+('sales', 4, 4800, '2007-08-08'),
+('personnel', 2, 3900, '2006-12-23'),
+('develop', 7, 4200, '2008-01-01'),
+('develop', 9, 4500, '2008-01-01'),
+('sales', 3, 4800, '2007-08-01'),
+('develop', 8, 6000, '2006-10-01'),
+('develop', 11, 5200, '2007-08-15');
diff --git a/ydb/library/yql/tests/postgresql/cases/xml.out b/ydb/library/yql/tests/postgresql/cases/xml.out
new file mode 100644
index 0000000000..ab0012d138
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/xml.out
@@ -0,0 +1,6 @@
+CREATE TABLE xmltest (
+ id int,
+ data xml
+);
+INSERT INTO xmltest VALUES (1, '<value>one</value>');
+INSERT INTO xmltest VALUES (2, '<value>two</value>');
diff --git a/ydb/library/yql/tests/postgresql/cases/xml.sql b/ydb/library/yql/tests/postgresql/cases/xml.sql
new file mode 100644
index 0000000000..ab0012d138
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/cases/xml.sql
@@ -0,0 +1,6 @@
+CREATE TABLE xmltest (
+ id int,
+ data xml
+);
+INSERT INTO xmltest VALUES (1, '<value>one</value>');
+INSERT INTO xmltest VALUES (2, '<value>two</value>');
diff --git a/ydb/library/yql/tests/postgresql/common/__init__.py b/ydb/library/yql/tests/postgresql/common/__init__.py
new file mode 100644
index 0000000000..651d1b84fe
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/common/__init__.py
@@ -0,0 +1,90 @@
+import sys
+import logging
+from pathlib import Path
+import subprocess
+from .differ import Differ
+
+
+LOGGER = logging.getLogger(__name__)
+
+
+def setup_logger():
+ options = dict(
+ level=logging.DEBUG,
+ format='%(levelname)s: %(message)s',
+ datefmt='%Y-%m-%d %H:%M:%S',
+ stream=sys.stderr
+ )
+
+ logging.basicConfig(**options)
+
+
+setup_logger()
+
+
+def find_sql_tests(path):
+ tests = []
+
+ for sql_file in Path(path).glob('*.sql'):
+ if not sql_file.is_file():
+ LOGGER.warning("'%s' is not a file", sql_file.absolute())
+ continue
+
+ out_files = list(get_out_files(sql_file))
+ if not out_files:
+ LOGGER.warning("No .out files found for '%s'", sql_file.absolute())
+ continue
+
+ tests.append((sql_file.stem, (sql_file, out_files)))
+
+ return tests
+
+
+def run_sql_test(sql, out, tmp_path, runner):
+ LOGGER.debug("Running %s '%s' -> [%s]", runner, sql, ', '.join("'{}'".format(a) for a in out))
+ with open(sql, 'rb') as f:
+ pi = subprocess.run([runner, "--datadir", tmp_path],
+ stdin=f, stdout=subprocess.PIPE, stderr=sys.stderr, check=True)
+
+ min_diff = sys.maxsize
+ best_match = out[0]
+ best_diff = ''
+
+ for out_file in out:
+ with open(out_file, 'rb') as f:
+ out_data = f.read()
+
+ last_diff = Differ.diff(pi.stdout, out_data)
+ diff_len = len(last_diff)
+
+ if diff_len == 0:
+ return
+
+ if diff_len < min_diff:
+ min_diff = diff_len
+ best_match = out_file
+ best_diff = last_diff
+
+ LOGGER.info("No exact match for '%s'. Best match is '%s'", sql, best_match)
+ for line in best_diff:
+ LOGGER.debug(line)
+
+ # We need assert to fail the test properly
+ assert min_diff == 0, \
+ f"pgrun output does not match out-file for {sql}. Diff:\n" + ''.join(d.decode('utf8') for d in best_diff)[:1024]
+
+
+def get_out_files(sql_file):
+ base_name = sql_file.stem
+ out_file = sql_file.with_suffix('.out')
+
+ if out_file.is_file():
+ yield out_file
+
+ for i in range(1, 10):
+ nth_out_file = out_file.with_stem('{}_{}'.format(base_name, i))
+
+ if not nth_out_file.is_file():
+ break
+
+ yield nth_out_file
diff --git a/ydb/library/yql/tests/postgresql/common/differ.py b/ydb/library/yql/tests/postgresql/common/differ.py
new file mode 100644
index 0000000000..3b29d2cdd5
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/common/differ.py
@@ -0,0 +1,91 @@
+import difflib
+import re
+
+
+class Differ:
+ @classmethod
+ def diff(cls, left, right):
+ left = cls.__remove_pg_error_msgs(left).splitlines(keepends=True)
+ right = cls.__remove_pg_error_msgs(right).splitlines(keepends=True)
+
+ cls.__unify_tables(left, right)
+
+ return list(difflib.diff_bytes(difflib.unified_diff, left, right, n=0, fromfile=b'sql', tofile=b'out'))
+
+ __reErr = re.compile(b'(^ERROR: [^\n]+)(?:\nLINE \\d+: [^\n]+(?:\n\\s*\\^\\s*)?)?(?:\n(?:HINT|DETAIL|CONTEXT): [^\n]+)*(?:\n|$)',
+ re.MULTILINE)
+
+ @classmethod
+ def __remove_pg_error_msgs(cls, s):
+ return cls.__reErr.sub(rb"\1", s)
+
+ __reUniversalTableMarker = re.compile(rb'^-{3,100}(?:\+-{3,100})*$')
+ __reTableEndMarker = re.compile(rb'^\(\d+ rows?\)$')
+
+ @classmethod
+ def __is_table_start(cls, pgrun_output: str, row_idx):
+ is_0_col_tbl_start = pgrun_output[row_idx] == b'--\n' and row_idx + 1 < len(pgrun_output) \
+ and cls.__reTableEndMarker.match(pgrun_output[row_idx + 1])
+ return is_0_col_tbl_start or cls.__reUniversalTableMarker.match(pgrun_output[row_idx])
+
+ @classmethod
+ def __reformat_table_row(cls, row, col_widths):
+ cells = [c.strip() for c in row[:-1].split(b'|')]
+ return b'|'.join(c.ljust(w) for (c, w) in zip(cells, col_widths))
+
+ @classmethod
+ def __remove_table_headers(cls, lines, header_line_numbers):
+ for i in reversed(header_line_numbers):
+ del lines[i]
+ del lines[i-1]
+
+ @classmethod
+ def __unify_tables(cls, left, right):
+ left_headers = []
+ right_headers = []
+ ucols = []
+
+ in_table = False
+ R = enumerate(right)
+ for i, l in enumerate(left):
+ if in_table:
+ if cls.__reTableEndMarker.match(l):
+ in_table = False
+
+ for (j, r) in R:
+ if cls.__reTableEndMarker.match(r):
+ break
+ right[j] = cls.__reformat_table_row(r, ucols)
+ else:
+ break
+
+ continue
+
+ left[i] = cls.__reformat_table_row(l, ucols)
+
+ continue
+
+ if cls.__is_table_start(left, i):
+ for (j, r) in R:
+ if cls.__is_table_start(right, j):
+ break
+ else:
+ continue
+ lcols = [len(c) for c in l[:-1].split(b'+')]
+ rcols = [len(c) for c in r[:-1].split(b'+')]
+
+ if left[i-1] == right[j-1]:
+ continue
+
+ if len(lcols) != len(rcols):
+ continue
+
+ ucols = [max(lw, rw) for lw, rw in zip(lcols, rcols)]
+
+ left_headers.append(i)
+ right_headers.append(j)
+
+ in_table = True
+
+ cls.__remove_table_headers(left, left_headers)
+ cls.__remove_table_headers(right, right_headers)
diff --git a/ydb/library/yql/tests/postgresql/common/ya.make b/ydb/library/yql/tests/postgresql/common/ya.make
new file mode 100644
index 0000000000..c354d69518
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/common/ya.make
@@ -0,0 +1,8 @@
+PY3_LIBRARY()
+
+PY_SRCS(
+ __init__.py
+ differ.py
+)
+
+END()
diff --git a/ydb/library/yql/tests/postgresql/make-tests.sh b/ydb/library/yql/tests/postgresql/make-tests.sh
new file mode 100755
index 0000000000..9f131b2a5e
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/make-tests.sh
@@ -0,0 +1,11 @@
+#! /bin/sh
+../../tools/pg-make-test/pg-make-test \
+ --srcdir=original/cases \
+ --patchdir=patches \
+ --dstdir=cases \
+ --runner=../../tools/pgrun/pgrun \
+ --splitter="../../tools/pgrun/pgrun split-statements" \
+ --skip=uuid,char,varchar \
+ --report=pg_tests.csv \
+ $@
+
diff --git a/ydb/library/yql/tests/postgresql/original.sh b/ydb/library/yql/tests/postgresql/original.sh
new file mode 100755
index 0000000000..0f4d603122
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original.sh
@@ -0,0 +1,2 @@
+#! /bin/sh
+ya make -tt -DORIGINAL $@
diff --git a/ydb/library/yql/tests/postgresql/original/cases/aggregates.out b/ydb/library/yql/tests/postgresql/original/cases/aggregates.out
new file mode 100644
index 0000000000..5949996ebc
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/aggregates.out
@@ -0,0 +1,2815 @@
+--
+-- AGGREGATES
+--
+-- avoid bit-exact output here because operations may not be bit-exact.
+SET extra_float_digits = 0;
+SELECT avg(four) AS avg_1 FROM onek;
+ avg_1
+--------------------
+ 1.5000000000000000
+(1 row)
+
+SELECT avg(a) AS avg_32 FROM aggtest WHERE a < 100;
+ avg_32
+---------------------
+ 32.6666666666666667
+(1 row)
+
+-- In 7.1, avg(float4) is computed using float8 arithmetic.
+-- Round the result to 3 digits to avoid platform-specific results.
+SELECT avg(b)::numeric(10,3) AS avg_107_943 FROM aggtest;
+ avg_107_943
+-------------
+ 107.943
+(1 row)
+
+SELECT avg(gpa) AS avg_3_4 FROM ONLY student;
+ avg_3_4
+---------
+ 3.4
+(1 row)
+
+SELECT sum(four) AS sum_1500 FROM onek;
+ sum_1500
+----------
+ 1500
+(1 row)
+
+SELECT sum(a) AS sum_198 FROM aggtest;
+ sum_198
+---------
+ 198
+(1 row)
+
+SELECT sum(b) AS avg_431_773 FROM aggtest;
+ avg_431_773
+-------------
+ 431.773
+(1 row)
+
+SELECT sum(gpa) AS avg_6_8 FROM ONLY student;
+ avg_6_8
+---------
+ 6.8
+(1 row)
+
+SELECT max(four) AS max_3 FROM onek;
+ max_3
+-------
+ 3
+(1 row)
+
+SELECT max(a) AS max_100 FROM aggtest;
+ max_100
+---------
+ 100
+(1 row)
+
+SELECT max(aggtest.b) AS max_324_78 FROM aggtest;
+ max_324_78
+------------
+ 324.78
+(1 row)
+
+SELECT max(student.gpa) AS max_3_7 FROM student;
+ max_3_7
+---------
+ 3.7
+(1 row)
+
+SELECT stddev_pop(b) FROM aggtest;
+ stddev_pop
+-----------------
+ 131.10703231895
+(1 row)
+
+SELECT stddev_samp(b) FROM aggtest;
+ stddev_samp
+------------------
+ 151.389360803998
+(1 row)
+
+SELECT var_pop(b) FROM aggtest;
+ var_pop
+------------------
+ 17189.0539234823
+(1 row)
+
+SELECT var_samp(b) FROM aggtest;
+ var_samp
+------------------
+ 22918.7385646431
+(1 row)
+
+SELECT stddev_pop(b::numeric) FROM aggtest;
+ stddev_pop
+------------------
+ 131.107032862199
+(1 row)
+
+SELECT stddev_samp(b::numeric) FROM aggtest;
+ stddev_samp
+------------------
+ 151.389361431288
+(1 row)
+
+SELECT var_pop(b::numeric) FROM aggtest;
+ var_pop
+--------------------
+ 17189.054065929769
+(1 row)
+
+SELECT var_samp(b::numeric) FROM aggtest;
+ var_samp
+--------------------
+ 22918.738754573025
+(1 row)
+
+-- population variance is defined for a single tuple, sample variance
+-- is not
+SELECT var_pop(1.0::float8), var_samp(2.0::float8);
+ var_pop | var_samp
+---------+----------
+ 0 |
+(1 row)
+
+SELECT stddev_pop(3.0::float8), stddev_samp(4.0::float8);
+ stddev_pop | stddev_samp
+------------+-------------
+ 0 |
+(1 row)
+
+SELECT var_pop('inf'::float8), var_samp('inf'::float8);
+ var_pop | var_samp
+---------+----------
+ NaN |
+(1 row)
+
+SELECT stddev_pop('inf'::float8), stddev_samp('inf'::float8);
+ stddev_pop | stddev_samp
+------------+-------------
+ NaN |
+(1 row)
+
+SELECT var_pop('nan'::float8), var_samp('nan'::float8);
+ var_pop | var_samp
+---------+----------
+ NaN |
+(1 row)
+
+SELECT stddev_pop('nan'::float8), stddev_samp('nan'::float8);
+ stddev_pop | stddev_samp
+------------+-------------
+ NaN |
+(1 row)
+
+SELECT var_pop(1.0::float4), var_samp(2.0::float4);
+ var_pop | var_samp
+---------+----------
+ 0 |
+(1 row)
+
+SELECT stddev_pop(3.0::float4), stddev_samp(4.0::float4);
+ stddev_pop | stddev_samp
+------------+-------------
+ 0 |
+(1 row)
+
+SELECT var_pop('inf'::float4), var_samp('inf'::float4);
+ var_pop | var_samp
+---------+----------
+ NaN |
+(1 row)
+
+SELECT stddev_pop('inf'::float4), stddev_samp('inf'::float4);
+ stddev_pop | stddev_samp
+------------+-------------
+ NaN |
+(1 row)
+
+SELECT var_pop('nan'::float4), var_samp('nan'::float4);
+ var_pop | var_samp
+---------+----------
+ NaN |
+(1 row)
+
+SELECT stddev_pop('nan'::float4), stddev_samp('nan'::float4);
+ stddev_pop | stddev_samp
+------------+-------------
+ NaN |
+(1 row)
+
+SELECT var_pop(1.0::numeric), var_samp(2.0::numeric);
+ var_pop | var_samp
+---------+----------
+ 0 |
+(1 row)
+
+SELECT stddev_pop(3.0::numeric), stddev_samp(4.0::numeric);
+ stddev_pop | stddev_samp
+------------+-------------
+ 0 |
+(1 row)
+
+SELECT var_pop('inf'::numeric), var_samp('inf'::numeric);
+ var_pop | var_samp
+---------+----------
+ NaN |
+(1 row)
+
+SELECT stddev_pop('inf'::numeric), stddev_samp('inf'::numeric);
+ stddev_pop | stddev_samp
+------------+-------------
+ NaN |
+(1 row)
+
+SELECT var_pop('nan'::numeric), var_samp('nan'::numeric);
+ var_pop | var_samp
+---------+----------
+ NaN |
+(1 row)
+
+SELECT stddev_pop('nan'::numeric), stddev_samp('nan'::numeric);
+ stddev_pop | stddev_samp
+------------+-------------
+ NaN |
+(1 row)
+
+-- verify correct results for null and NaN inputs
+select sum(null::int4) from generate_series(1,3);
+ sum
+-----
+
+(1 row)
+
+select sum(null::int8) from generate_series(1,3);
+ sum
+-----
+
+(1 row)
+
+select sum(null::numeric) from generate_series(1,3);
+ sum
+-----
+
+(1 row)
+
+select sum(null::float8) from generate_series(1,3);
+ sum
+-----
+
+(1 row)
+
+select avg(null::int4) from generate_series(1,3);
+ avg
+-----
+
+(1 row)
+
+select avg(null::int8) from generate_series(1,3);
+ avg
+-----
+
+(1 row)
+
+select avg(null::numeric) from generate_series(1,3);
+ avg
+-----
+
+(1 row)
+
+select avg(null::float8) from generate_series(1,3);
+ avg
+-----
+
+(1 row)
+
+select sum('NaN'::numeric) from generate_series(1,3);
+ sum
+-----
+ NaN
+(1 row)
+
+select avg('NaN'::numeric) from generate_series(1,3);
+ avg
+-----
+ NaN
+(1 row)
+
+-- verify correct results for infinite inputs
+SELECT sum(x::float8), avg(x::float8), var_pop(x::float8)
+FROM (VALUES ('1'), ('infinity')) v(x);
+ sum | avg | var_pop
+----------+----------+---------
+ Infinity | Infinity | NaN
+(1 row)
+
+SELECT sum(x::float8), avg(x::float8), var_pop(x::float8)
+FROM (VALUES ('infinity'), ('1')) v(x);
+ sum | avg | var_pop
+----------+----------+---------
+ Infinity | Infinity | NaN
+(1 row)
+
+SELECT sum(x::float8), avg(x::float8), var_pop(x::float8)
+FROM (VALUES ('infinity'), ('infinity')) v(x);
+ sum | avg | var_pop
+----------+----------+---------
+ Infinity | Infinity | NaN
+(1 row)
+
+SELECT sum(x::float8), avg(x::float8), var_pop(x::float8)
+FROM (VALUES ('-infinity'), ('infinity')) v(x);
+ sum | avg | var_pop
+-----+-----+---------
+ NaN | NaN | NaN
+(1 row)
+
+SELECT sum(x::float8), avg(x::float8), var_pop(x::float8)
+FROM (VALUES ('-infinity'), ('-infinity')) v(x);
+ sum | avg | var_pop
+-----------+-----------+---------
+ -Infinity | -Infinity | NaN
+(1 row)
+
+SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric)
+FROM (VALUES ('1'), ('infinity')) v(x);
+ sum | avg | var_pop
+----------+----------+---------
+ Infinity | Infinity | NaN
+(1 row)
+
+SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric)
+FROM (VALUES ('infinity'), ('1')) v(x);
+ sum | avg | var_pop
+----------+----------+---------
+ Infinity | Infinity | NaN
+(1 row)
+
+SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric)
+FROM (VALUES ('infinity'), ('infinity')) v(x);
+ sum | avg | var_pop
+----------+----------+---------
+ Infinity | Infinity | NaN
+(1 row)
+
+SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric)
+FROM (VALUES ('-infinity'), ('infinity')) v(x);
+ sum | avg | var_pop
+-----+-----+---------
+ NaN | NaN | NaN
+(1 row)
+
+SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric)
+FROM (VALUES ('-infinity'), ('-infinity')) v(x);
+ sum | avg | var_pop
+-----------+-----------+---------
+ -Infinity | -Infinity | NaN
+(1 row)
+
+-- test accuracy with a large input offset
+SELECT avg(x::float8), var_pop(x::float8)
+FROM (VALUES (100000003), (100000004), (100000006), (100000007)) v(x);
+ avg | var_pop
+-----------+---------
+ 100000005 | 2.5
+(1 row)
+
+SELECT avg(x::float8), var_pop(x::float8)
+FROM (VALUES (7000000000005), (7000000000007)) v(x);
+ avg | var_pop
+---------------+---------
+ 7000000000006 | 1
+(1 row)
+
+-- SQL2003 binary aggregates
+SELECT regr_count(b, a) FROM aggtest;
+ regr_count
+------------
+ 4
+(1 row)
+
+SELECT regr_sxx(b, a) FROM aggtest;
+ regr_sxx
+----------
+ 5099
+(1 row)
+
+SELECT regr_syy(b, a) FROM aggtest;
+ regr_syy
+------------------
+ 68756.2156939293
+(1 row)
+
+SELECT regr_sxy(b, a) FROM aggtest;
+ regr_sxy
+------------------
+ 2614.51582155004
+(1 row)
+
+SELECT regr_avgx(b, a), regr_avgy(b, a) FROM aggtest;
+ regr_avgx | regr_avgy
+-----------+------------------
+ 49.5 | 107.943152273074
+(1 row)
+
+SELECT regr_r2(b, a) FROM aggtest;
+ regr_r2
+--------------------
+ 0.0194977982031803
+(1 row)
+
+SELECT regr_slope(b, a), regr_intercept(b, a) FROM aggtest;
+ regr_slope | regr_intercept
+-------------------+------------------
+ 0.512750700441271 | 82.5619926012309
+(1 row)
+
+SELECT covar_pop(b, a), covar_samp(b, a) FROM aggtest;
+ covar_pop | covar_samp
+-----------------+------------------
+ 653.62895538751 | 871.505273850014
+(1 row)
+
+SELECT corr(b, a) FROM aggtest;
+ corr
+-------------------
+ 0.139634516517873
+(1 row)
+
+-- check single-tuple behavior
+SELECT covar_pop(1::float8,2::float8), covar_samp(3::float8,4::float8);
+ covar_pop | covar_samp
+-----------+------------
+ 0 |
+(1 row)
+
+SELECT covar_pop(1::float8,'inf'::float8), covar_samp(3::float8,'inf'::float8);
+ covar_pop | covar_samp
+-----------+------------
+ NaN |
+(1 row)
+
+SELECT covar_pop(1::float8,'nan'::float8), covar_samp(3::float8,'nan'::float8);
+ covar_pop | covar_samp
+-----------+------------
+ NaN |
+(1 row)
+
+-- test accum and combine functions directly
+CREATE TABLE regr_test (x float8, y float8);
+INSERT INTO regr_test VALUES (10,150),(20,250),(30,350),(80,540),(100,200);
+SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x)
+FROM regr_test WHERE x IN (10,20,30,80);
+ count | sum | regr_sxx | sum | regr_syy | regr_sxy
+-------+-----+----------+------+----------+----------
+ 4 | 140 | 2900 | 1290 | 83075 | 15050
+(1 row)
+
+SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x)
+FROM regr_test;
+ count | sum | regr_sxx | sum | regr_syy | regr_sxy
+-------+-----+----------+------+----------+----------
+ 5 | 240 | 6280 | 1490 | 95080 | 8680
+(1 row)
+
+SELECT float8_accum('{4,140,2900}'::float8[], 100);
+ float8_accum
+--------------
+ {5,240,6280}
+(1 row)
+
+SELECT float8_regr_accum('{4,140,2900,1290,83075,15050}'::float8[], 200, 100);
+ float8_regr_accum
+------------------------------
+ {5,240,6280,1490,95080,8680}
+(1 row)
+
+SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x)
+FROM regr_test WHERE x IN (10,20,30);
+ count | sum | regr_sxx | sum | regr_syy | regr_sxy
+-------+-----+----------+-----+----------+----------
+ 3 | 60 | 200 | 750 | 20000 | 2000
+(1 row)
+
+SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x)
+FROM regr_test WHERE x IN (80,100);
+ count | sum | regr_sxx | sum | regr_syy | regr_sxy
+-------+-----+----------+-----+----------+----------
+ 2 | 180 | 200 | 740 | 57800 | -3400
+(1 row)
+
+SELECT float8_combine('{3,60,200}'::float8[], '{0,0,0}'::float8[]);
+ float8_combine
+----------------
+ {3,60,200}
+(1 row)
+
+SELECT float8_combine('{0,0,0}'::float8[], '{2,180,200}'::float8[]);
+ float8_combine
+----------------
+ {2,180,200}
+(1 row)
+
+SELECT float8_combine('{3,60,200}'::float8[], '{2,180,200}'::float8[]);
+ float8_combine
+----------------
+ {5,240,6280}
+(1 row)
+
+SELECT float8_regr_combine('{3,60,200,750,20000,2000}'::float8[],
+ '{0,0,0,0,0,0}'::float8[]);
+ float8_regr_combine
+---------------------------
+ {3,60,200,750,20000,2000}
+(1 row)
+
+SELECT float8_regr_combine('{0,0,0,0,0,0}'::float8[],
+ '{2,180,200,740,57800,-3400}'::float8[]);
+ float8_regr_combine
+-----------------------------
+ {2,180,200,740,57800,-3400}
+(1 row)
+
+SELECT float8_regr_combine('{3,60,200,750,20000,2000}'::float8[],
+ '{2,180,200,740,57800,-3400}'::float8[]);
+ float8_regr_combine
+------------------------------
+ {5,240,6280,1490,95080,8680}
+(1 row)
+
+DROP TABLE regr_test;
+-- test count, distinct
+SELECT count(four) AS cnt_1000 FROM onek;
+ cnt_1000
+----------
+ 1000
+(1 row)
+
+SELECT count(DISTINCT four) AS cnt_4 FROM onek;
+ cnt_4
+-------
+ 4
+(1 row)
+
+select ten, count(*), sum(four) from onek
+group by ten order by ten;
+ ten | count | sum
+-----+-------+-----
+ 0 | 100 | 100
+ 1 | 100 | 200
+ 2 | 100 | 100
+ 3 | 100 | 200
+ 4 | 100 | 100
+ 5 | 100 | 200
+ 6 | 100 | 100
+ 7 | 100 | 200
+ 8 | 100 | 100
+ 9 | 100 | 200
+(10 rows)
+
+select ten, count(four), sum(DISTINCT four) from onek
+group by ten order by ten;
+ ten | count | sum
+-----+-------+-----
+ 0 | 100 | 2
+ 1 | 100 | 4
+ 2 | 100 | 2
+ 3 | 100 | 4
+ 4 | 100 | 2
+ 5 | 100 | 4
+ 6 | 100 | 2
+ 7 | 100 | 4
+ 8 | 100 | 2
+ 9 | 100 | 4
+(10 rows)
+
+-- user-defined aggregates
+SELECT newavg(four) AS avg_1 FROM onek;
+ avg_1
+--------------------
+ 1.5000000000000000
+(1 row)
+
+SELECT newsum(four) AS sum_1500 FROM onek;
+ sum_1500
+----------
+ 1500
+(1 row)
+
+SELECT newcnt(four) AS cnt_1000 FROM onek;
+ cnt_1000
+----------
+ 1000
+(1 row)
+
+SELECT newcnt(*) AS cnt_1000 FROM onek;
+ cnt_1000
+----------
+ 1000
+(1 row)
+
+SELECT oldcnt(*) AS cnt_1000 FROM onek;
+ cnt_1000
+----------
+ 1000
+(1 row)
+
+SELECT sum2(q1,q2) FROM int8_tbl;
+ sum2
+-------------------
+ 18271560493827981
+(1 row)
+
+-- test for outer-level aggregates
+-- this should work
+select ten, sum(distinct four) from onek a
+group by ten
+having exists (select 1 from onek b where sum(distinct a.four) = b.four);
+ ten | sum
+-----+-----
+ 0 | 2
+ 2 | 2
+ 4 | 2
+ 6 | 2
+ 8 | 2
+(5 rows)
+
+-- this should fail because subquery has an agg of its own in WHERE
+select ten, sum(distinct four) from onek a
+group by ten
+having exists (select 1 from onek b
+ where sum(distinct a.four + b.four) = b.four);
+ERROR: aggregate functions are not allowed in WHERE
+LINE 4: where sum(distinct a.four + b.four) = b.four)...
+ ^
+-- Test handling of sublinks within outer-level aggregates.
+-- Per bug report from Daniel Grace.
+select
+ (select max((select i.unique2 from tenk1 i where i.unique1 = o.unique1)))
+from tenk1 o;
+ max
+------
+ 9999
+(1 row)
+
+-- Test handling of Params within aggregate arguments in hashed aggregation.
+-- Per bug report from Jeevan Chalke.
+explain (verbose, costs off)
+select s1, s2, sm
+from generate_series(1, 3) s1,
+ lateral (select s2, sum(s1 + s2) sm
+ from generate_series(1, 3) s2 group by s2) ss
+order by 1, 2;
+ QUERY PLAN
+------------------------------------------------------------------
+ Sort
+ Output: s1.s1, s2.s2, (sum((s1.s1 + s2.s2)))
+ Sort Key: s1.s1, s2.s2
+ -> Nested Loop
+ Output: s1.s1, s2.s2, (sum((s1.s1 + s2.s2)))
+ -> Function Scan on pg_catalog.generate_series s1
+ Output: s1.s1
+ Function Call: generate_series(1, 3)
+ -> HashAggregate
+ Output: s2.s2, sum((s1.s1 + s2.s2))
+ Group Key: s2.s2
+ -> Function Scan on pg_catalog.generate_series s2
+ Output: s2.s2
+ Function Call: generate_series(1, 3)
+(14 rows)
+
+select s1, s2, sm
+from generate_series(1, 3) s1,
+ lateral (select s2, sum(s1 + s2) sm
+ from generate_series(1, 3) s2 group by s2) ss
+order by 1, 2;
+ s1 | s2 | sm
+----+----+----
+ 1 | 1 | 2
+ 1 | 2 | 3
+ 1 | 3 | 4
+ 2 | 1 | 3
+ 2 | 2 | 4
+ 2 | 3 | 5
+ 3 | 1 | 4
+ 3 | 2 | 5
+ 3 | 3 | 6
+(9 rows)
+
+explain (verbose, costs off)
+select array(select sum(x+y) s
+ from generate_series(1,3) y group by y order by s)
+ from generate_series(1,3) x;
+ QUERY PLAN
+-------------------------------------------------------------------
+ Function Scan on pg_catalog.generate_series x
+ Output: (SubPlan 1)
+ Function Call: generate_series(1, 3)
+ SubPlan 1
+ -> Sort
+ Output: (sum((x.x + y.y))), y.y
+ Sort Key: (sum((x.x + y.y)))
+ -> HashAggregate
+ Output: sum((x.x + y.y)), y.y
+ Group Key: y.y
+ -> Function Scan on pg_catalog.generate_series y
+ Output: y.y
+ Function Call: generate_series(1, 3)
+(13 rows)
+
+select array(select sum(x+y) s
+ from generate_series(1,3) y group by y order by s)
+ from generate_series(1,3) x;
+ array
+---------
+ {2,3,4}
+ {3,4,5}
+ {4,5,6}
+(3 rows)
+
+--
+-- test for bitwise integer aggregates
+--
+CREATE TEMPORARY TABLE bitwise_test(
+ i2 INT2,
+ i4 INT4,
+ i8 INT8,
+ i INTEGER,
+ x INT2,
+ y BIT(4)
+);
+-- empty case
+SELECT
+ BIT_AND(i2) AS "?",
+ BIT_OR(i4) AS "?",
+ BIT_XOR(i8) AS "?"
+FROM bitwise_test;
+ ? | ? | ?
+---+---+---
+ | |
+(1 row)
+
+COPY bitwise_test FROM STDIN NULL 'null';
+SELECT
+ BIT_AND(i2) AS "1",
+ BIT_AND(i4) AS "1",
+ BIT_AND(i8) AS "1",
+ BIT_AND(i) AS "?",
+ BIT_AND(x) AS "0",
+ BIT_AND(y) AS "0100",
+ BIT_OR(i2) AS "7",
+ BIT_OR(i4) AS "7",
+ BIT_OR(i8) AS "7",
+ BIT_OR(i) AS "?",
+ BIT_OR(x) AS "7",
+ BIT_OR(y) AS "1101",
+ BIT_XOR(i2) AS "5",
+ BIT_XOR(i4) AS "5",
+ BIT_XOR(i8) AS "5",
+ BIT_XOR(i) AS "?",
+ BIT_XOR(x) AS "7",
+ BIT_XOR(y) AS "1101"
+FROM bitwise_test;
+ 1 | 1 | 1 | ? | 0 | 0100 | 7 | 7 | 7 | ? | 7 | 1101 | 5 | 5 | 5 | ? | 7 | 1101
+---+---+---+---+---+------+---+---+---+---+---+------+---+---+---+---+---+------
+ 1 | 1 | 1 | 1 | 0 | 0100 | 7 | 7 | 7 | 3 | 7 | 1101 | 5 | 5 | 5 | 2 | 7 | 1101
+(1 row)
+
+--
+-- test boolean aggregates
+--
+-- first test all possible transition and final states
+SELECT
+ -- boolean and transitions
+ -- null because strict
+ booland_statefunc(NULL, NULL) IS NULL AS "t",
+ booland_statefunc(TRUE, NULL) IS NULL AS "t",
+ booland_statefunc(FALSE, NULL) IS NULL AS "t",
+ booland_statefunc(NULL, TRUE) IS NULL AS "t",
+ booland_statefunc(NULL, FALSE) IS NULL AS "t",
+ -- and actual computations
+ booland_statefunc(TRUE, TRUE) AS "t",
+ NOT booland_statefunc(TRUE, FALSE) AS "t",
+ NOT booland_statefunc(FALSE, TRUE) AS "t",
+ NOT booland_statefunc(FALSE, FALSE) AS "t";
+ t | t | t | t | t | t | t | t | t
+---+---+---+---+---+---+---+---+---
+ t | t | t | t | t | t | t | t | t
+(1 row)
+
+SELECT
+ -- boolean or transitions
+ -- null because strict
+ boolor_statefunc(NULL, NULL) IS NULL AS "t",
+ boolor_statefunc(TRUE, NULL) IS NULL AS "t",
+ boolor_statefunc(FALSE, NULL) IS NULL AS "t",
+ boolor_statefunc(NULL, TRUE) IS NULL AS "t",
+ boolor_statefunc(NULL, FALSE) IS NULL AS "t",
+ -- actual computations
+ boolor_statefunc(TRUE, TRUE) AS "t",
+ boolor_statefunc(TRUE, FALSE) AS "t",
+ boolor_statefunc(FALSE, TRUE) AS "t",
+ NOT boolor_statefunc(FALSE, FALSE) AS "t";
+ t | t | t | t | t | t | t | t | t
+---+---+---+---+---+---+---+---+---
+ t | t | t | t | t | t | t | t | t
+(1 row)
+
+CREATE TEMPORARY TABLE bool_test(
+ b1 BOOL,
+ b2 BOOL,
+ b3 BOOL,
+ b4 BOOL);
+-- empty case
+SELECT
+ BOOL_AND(b1) AS "n",
+ BOOL_OR(b3) AS "n"
+FROM bool_test;
+ n | n
+---+---
+ |
+(1 row)
+
+COPY bool_test FROM STDIN NULL 'null';
+SELECT
+ BOOL_AND(b1) AS "f",
+ BOOL_AND(b2) AS "t",
+ BOOL_AND(b3) AS "f",
+ BOOL_AND(b4) AS "n",
+ BOOL_AND(NOT b2) AS "f",
+ BOOL_AND(NOT b3) AS "t"
+FROM bool_test;
+ f | t | f | n | f | t
+---+---+---+---+---+---
+ f | t | f | | f | t
+(1 row)
+
+SELECT
+ EVERY(b1) AS "f",
+ EVERY(b2) AS "t",
+ EVERY(b3) AS "f",
+ EVERY(b4) AS "n",
+ EVERY(NOT b2) AS "f",
+ EVERY(NOT b3) AS "t"
+FROM bool_test;
+ f | t | f | n | f | t
+---+---+---+---+---+---
+ f | t | f | | f | t
+(1 row)
+
+SELECT
+ BOOL_OR(b1) AS "t",
+ BOOL_OR(b2) AS "t",
+ BOOL_OR(b3) AS "f",
+ BOOL_OR(b4) AS "n",
+ BOOL_OR(NOT b2) AS "f",
+ BOOL_OR(NOT b3) AS "t"
+FROM bool_test;
+ t | t | f | n | f | t
+---+---+---+---+---+---
+ t | t | f | | f | t
+(1 row)
+
+--
+-- Test cases that should be optimized into indexscans instead of
+-- the generic aggregate implementation.
+--
+-- Basic cases
+explain (costs off)
+ select min(unique1) from tenk1;
+ QUERY PLAN
+------------------------------------------------------------
+ Result
+ InitPlan 1 (returns $0)
+ -> Limit
+ -> Index Only Scan using tenk1_unique1 on tenk1
+ Index Cond: (unique1 IS NOT NULL)
+(5 rows)
+
+select min(unique1) from tenk1;
+ min
+-----
+ 0
+(1 row)
+
+explain (costs off)
+ select max(unique1) from tenk1;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Result
+ InitPlan 1 (returns $0)
+ -> Limit
+ -> Index Only Scan Backward using tenk1_unique1 on tenk1
+ Index Cond: (unique1 IS NOT NULL)
+(5 rows)
+
+select max(unique1) from tenk1;
+ max
+------
+ 9999
+(1 row)
+
+explain (costs off)
+ select max(unique1) from tenk1 where unique1 < 42;
+ QUERY PLAN
+------------------------------------------------------------------------
+ Result
+ InitPlan 1 (returns $0)
+ -> Limit
+ -> Index Only Scan Backward using tenk1_unique1 on tenk1
+ Index Cond: ((unique1 IS NOT NULL) AND (unique1 < 42))
+(5 rows)
+
+select max(unique1) from tenk1 where unique1 < 42;
+ max
+-----
+ 41
+(1 row)
+
+explain (costs off)
+ select max(unique1) from tenk1 where unique1 > 42;
+ QUERY PLAN
+------------------------------------------------------------------------
+ Result
+ InitPlan 1 (returns $0)
+ -> Limit
+ -> Index Only Scan Backward using tenk1_unique1 on tenk1
+ Index Cond: ((unique1 IS NOT NULL) AND (unique1 > 42))
+(5 rows)
+
+select max(unique1) from tenk1 where unique1 > 42;
+ max
+------
+ 9999
+(1 row)
+
+-- the planner may choose a generic aggregate here if parallel query is
+-- enabled, since that plan will be parallel safe and the "optimized"
+-- plan, which has almost identical cost, will not be. we want to test
+-- the optimized plan, so temporarily disable parallel query.
+begin;
+set local max_parallel_workers_per_gather = 0;
+explain (costs off)
+ select max(unique1) from tenk1 where unique1 > 42000;
+ QUERY PLAN
+---------------------------------------------------------------------------
+ Result
+ InitPlan 1 (returns $0)
+ -> Limit
+ -> Index Only Scan Backward using tenk1_unique1 on tenk1
+ Index Cond: ((unique1 IS NOT NULL) AND (unique1 > 42000))
+(5 rows)
+
+select max(unique1) from tenk1 where unique1 > 42000;
+ max
+-----
+
+(1 row)
+
+rollback;
+-- multi-column index (uses tenk1_thous_tenthous)
+explain (costs off)
+ select max(tenthous) from tenk1 where thousand = 33;
+ QUERY PLAN
+----------------------------------------------------------------------------
+ Result
+ InitPlan 1 (returns $0)
+ -> Limit
+ -> Index Only Scan Backward using tenk1_thous_tenthous on tenk1
+ Index Cond: ((thousand = 33) AND (tenthous IS NOT NULL))
+(5 rows)
+
+select max(tenthous) from tenk1 where thousand = 33;
+ max
+------
+ 9033
+(1 row)
+
+explain (costs off)
+ select min(tenthous) from tenk1 where thousand = 33;
+ QUERY PLAN
+--------------------------------------------------------------------------
+ Result
+ InitPlan 1 (returns $0)
+ -> Limit
+ -> Index Only Scan using tenk1_thous_tenthous on tenk1
+ Index Cond: ((thousand = 33) AND (tenthous IS NOT NULL))
+(5 rows)
+
+select min(tenthous) from tenk1 where thousand = 33;
+ min
+-----
+ 33
+(1 row)
+
+-- check parameter propagation into an indexscan subquery
+explain (costs off)
+ select f1, (select min(unique1) from tenk1 where unique1 > f1) AS gt
+ from int4_tbl;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------
+ Seq Scan on int4_tbl
+ SubPlan 2
+ -> Result
+ InitPlan 1 (returns $1)
+ -> Limit
+ -> Index Only Scan using tenk1_unique1 on tenk1
+ Index Cond: ((unique1 IS NOT NULL) AND (unique1 > int4_tbl.f1))
+(7 rows)
+
+select f1, (select min(unique1) from tenk1 where unique1 > f1) AS gt
+ from int4_tbl;
+ f1 | gt
+-------------+----
+ 0 | 1
+ 123456 |
+ -123456 | 0
+ 2147483647 |
+ -2147483647 | 0
+(5 rows)
+
+-- check some cases that were handled incorrectly in 8.3.0
+explain (costs off)
+ select distinct max(unique2) from tenk1;
+ QUERY PLAN
+---------------------------------------------------------------------
+ HashAggregate
+ Group Key: $0
+ InitPlan 1 (returns $0)
+ -> Limit
+ -> Index Only Scan Backward using tenk1_unique2 on tenk1
+ Index Cond: (unique2 IS NOT NULL)
+ -> Result
+(7 rows)
+
+select distinct max(unique2) from tenk1;
+ max
+------
+ 9999
+(1 row)
+
+explain (costs off)
+ select max(unique2) from tenk1 order by 1;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Sort
+ Sort Key: ($0)
+ InitPlan 1 (returns $0)
+ -> Limit
+ -> Index Only Scan Backward using tenk1_unique2 on tenk1
+ Index Cond: (unique2 IS NOT NULL)
+ -> Result
+(7 rows)
+
+select max(unique2) from tenk1 order by 1;
+ max
+------
+ 9999
+(1 row)
+
+explain (costs off)
+ select max(unique2) from tenk1 order by max(unique2);
+ QUERY PLAN
+---------------------------------------------------------------------
+ Sort
+ Sort Key: ($0)
+ InitPlan 1 (returns $0)
+ -> Limit
+ -> Index Only Scan Backward using tenk1_unique2 on tenk1
+ Index Cond: (unique2 IS NOT NULL)
+ -> Result
+(7 rows)
+
+select max(unique2) from tenk1 order by max(unique2);
+ max
+------
+ 9999
+(1 row)
+
+explain (costs off)
+ select max(unique2) from tenk1 order by max(unique2)+1;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Sort
+ Sort Key: (($0 + 1))
+ InitPlan 1 (returns $0)
+ -> Limit
+ -> Index Only Scan Backward using tenk1_unique2 on tenk1
+ Index Cond: (unique2 IS NOT NULL)
+ -> Result
+(7 rows)
+
+select max(unique2) from tenk1 order by max(unique2)+1;
+ max
+------
+ 9999
+(1 row)
+
+explain (costs off)
+ select max(unique2), generate_series(1,3) as g from tenk1 order by g desc;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Sort
+ Sort Key: (generate_series(1, 3)) DESC
+ InitPlan 1 (returns $0)
+ -> Limit
+ -> Index Only Scan Backward using tenk1_unique2 on tenk1
+ Index Cond: (unique2 IS NOT NULL)
+ -> ProjectSet
+ -> Result
+(8 rows)
+
+select max(unique2), generate_series(1,3) as g from tenk1 order by g desc;
+ max | g
+------+---
+ 9999 | 3
+ 9999 | 2
+ 9999 | 1
+(3 rows)
+
+-- interesting corner case: constant gets optimized into a seqscan
+explain (costs off)
+ select max(100) from tenk1;
+ QUERY PLAN
+----------------------------------------------------
+ Result
+ InitPlan 1 (returns $0)
+ -> Limit
+ -> Result
+ One-Time Filter: (100 IS NOT NULL)
+ -> Seq Scan on tenk1
+(6 rows)
+
+select max(100) from tenk1;
+ max
+-----
+ 100
+(1 row)
+
+-- try it on an inheritance tree
+create table minmaxtest(f1 int);
+create table minmaxtest1() inherits (minmaxtest);
+create table minmaxtest2() inherits (minmaxtest);
+create table minmaxtest3() inherits (minmaxtest);
+create index minmaxtesti on minmaxtest(f1);
+create index minmaxtest1i on minmaxtest1(f1);
+create index minmaxtest2i on minmaxtest2(f1 desc);
+create index minmaxtest3i on minmaxtest3(f1) where f1 is not null;
+insert into minmaxtest values(11), (12);
+insert into minmaxtest1 values(13), (14);
+insert into minmaxtest2 values(15), (16);
+insert into minmaxtest3 values(17), (18);
+explain (costs off)
+ select min(f1), max(f1) from minmaxtest;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Result
+ InitPlan 1 (returns $0)
+ -> Limit
+ -> Merge Append
+ Sort Key: minmaxtest.f1
+ -> Index Only Scan using minmaxtesti on minmaxtest minmaxtest_1
+ Index Cond: (f1 IS NOT NULL)
+ -> Index Only Scan using minmaxtest1i on minmaxtest1 minmaxtest_2
+ Index Cond: (f1 IS NOT NULL)
+ -> Index Only Scan Backward using minmaxtest2i on minmaxtest2 minmaxtest_3
+ Index Cond: (f1 IS NOT NULL)
+ -> Index Only Scan using minmaxtest3i on minmaxtest3 minmaxtest_4
+ InitPlan 2 (returns $1)
+ -> Limit
+ -> Merge Append
+ Sort Key: minmaxtest_5.f1 DESC
+ -> Index Only Scan Backward using minmaxtesti on minmaxtest minmaxtest_6
+ Index Cond: (f1 IS NOT NULL)
+ -> Index Only Scan Backward using minmaxtest1i on minmaxtest1 minmaxtest_7
+ Index Cond: (f1 IS NOT NULL)
+ -> Index Only Scan using minmaxtest2i on minmaxtest2 minmaxtest_8
+ Index Cond: (f1 IS NOT NULL)
+ -> Index Only Scan Backward using minmaxtest3i on minmaxtest3 minmaxtest_9
+(23 rows)
+
+select min(f1), max(f1) from minmaxtest;
+ min | max
+-----+-----
+ 11 | 18
+(1 row)
+
+-- DISTINCT doesn't do anything useful here, but it shouldn't fail
+explain (costs off)
+ select distinct min(f1), max(f1) from minmaxtest;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Unique
+ InitPlan 1 (returns $0)
+ -> Limit
+ -> Merge Append
+ Sort Key: minmaxtest.f1
+ -> Index Only Scan using minmaxtesti on minmaxtest minmaxtest_1
+ Index Cond: (f1 IS NOT NULL)
+ -> Index Only Scan using minmaxtest1i on minmaxtest1 minmaxtest_2
+ Index Cond: (f1 IS NOT NULL)
+ -> Index Only Scan Backward using minmaxtest2i on minmaxtest2 minmaxtest_3
+ Index Cond: (f1 IS NOT NULL)
+ -> Index Only Scan using minmaxtest3i on minmaxtest3 minmaxtest_4
+ InitPlan 2 (returns $1)
+ -> Limit
+ -> Merge Append
+ Sort Key: minmaxtest_5.f1 DESC
+ -> Index Only Scan Backward using minmaxtesti on minmaxtest minmaxtest_6
+ Index Cond: (f1 IS NOT NULL)
+ -> Index Only Scan Backward using minmaxtest1i on minmaxtest1 minmaxtest_7
+ Index Cond: (f1 IS NOT NULL)
+ -> Index Only Scan using minmaxtest2i on minmaxtest2 minmaxtest_8
+ Index Cond: (f1 IS NOT NULL)
+ -> Index Only Scan Backward using minmaxtest3i on minmaxtest3 minmaxtest_9
+ -> Sort
+ Sort Key: ($0), ($1)
+ -> Result
+(26 rows)
+
+select distinct min(f1), max(f1) from minmaxtest;
+ min | max
+-----+-----
+ 11 | 18
+(1 row)
+
+drop table minmaxtest cascade;
+NOTICE: drop cascades to 3 other objects
+DETAIL: drop cascades to table minmaxtest1
+drop cascades to table minmaxtest2
+drop cascades to table minmaxtest3
+-- check for correct detection of nested-aggregate errors
+select max(min(unique1)) from tenk1;
+ERROR: aggregate function calls cannot be nested
+LINE 1: select max(min(unique1)) from tenk1;
+ ^
+select (select max(min(unique1)) from int8_tbl) from tenk1;
+ERROR: aggregate function calls cannot be nested
+LINE 1: select (select max(min(unique1)) from int8_tbl) from tenk1;
+ ^
+--
+-- Test removal of redundant GROUP BY columns
+--
+create temp table t1 (a int, b int, c int, d int, primary key (a, b));
+create temp table t2 (x int, y int, z int, primary key (x, y));
+create temp table t3 (a int, b int, c int, primary key(a, b) deferrable);
+-- Non-primary-key columns can be removed from GROUP BY
+explain (costs off) select * from t1 group by a,b,c,d;
+ QUERY PLAN
+----------------------
+ HashAggregate
+ Group Key: a, b
+ -> Seq Scan on t1
+(3 rows)
+
+-- No removal can happen if the complete PK is not present in GROUP BY
+explain (costs off) select a,c from t1 group by a,c,d;
+ QUERY PLAN
+----------------------
+ HashAggregate
+ Group Key: a, c, d
+ -> Seq Scan on t1
+(3 rows)
+
+-- Test removal across multiple relations
+explain (costs off) select *
+from t1 inner join t2 on t1.a = t2.x and t1.b = t2.y
+group by t1.a,t1.b,t1.c,t1.d,t2.x,t2.y,t2.z;
+ QUERY PLAN
+------------------------------------------------------
+ HashAggregate
+ Group Key: t1.a, t1.b, t2.x, t2.y
+ -> Hash Join
+ Hash Cond: ((t2.x = t1.a) AND (t2.y = t1.b))
+ -> Seq Scan on t2
+ -> Hash
+ -> Seq Scan on t1
+(7 rows)
+
+-- Test case where t1 can be optimized but not t2
+explain (costs off) select t1.*,t2.x,t2.z
+from t1 inner join t2 on t1.a = t2.x and t1.b = t2.y
+group by t1.a,t1.b,t1.c,t1.d,t2.x,t2.z;
+ QUERY PLAN
+------------------------------------------------------
+ HashAggregate
+ Group Key: t1.a, t1.b, t2.x, t2.z
+ -> Hash Join
+ Hash Cond: ((t2.x = t1.a) AND (t2.y = t1.b))
+ -> Seq Scan on t2
+ -> Hash
+ -> Seq Scan on t1
+(7 rows)
+
+-- Cannot optimize when PK is deferrable
+explain (costs off) select * from t3 group by a,b,c;
+ QUERY PLAN
+----------------------
+ HashAggregate
+ Group Key: a, b, c
+ -> Seq Scan on t3
+(3 rows)
+
+create temp table t1c () inherits (t1);
+-- Ensure we don't remove any columns when t1 has a child table
+explain (costs off) select * from t1 group by a,b,c,d;
+ QUERY PLAN
+-------------------------------------
+ HashAggregate
+ Group Key: t1.a, t1.b, t1.c, t1.d
+ -> Append
+ -> Seq Scan on t1 t1_1
+ -> Seq Scan on t1c t1_2
+(5 rows)
+
+-- Okay to remove columns if we're only querying the parent.
+explain (costs off) select * from only t1 group by a,b,c,d;
+ QUERY PLAN
+----------------------
+ HashAggregate
+ Group Key: a, b
+ -> Seq Scan on t1
+(3 rows)
+
+create temp table p_t1 (
+ a int,
+ b int,
+ c int,
+ d int,
+ primary key(a,b)
+) partition by list(a);
+create temp table p_t1_1 partition of p_t1 for values in(1);
+create temp table p_t1_2 partition of p_t1 for values in(2);
+-- Ensure we can remove non-PK columns for partitioned tables.
+explain (costs off) select * from p_t1 group by a,b,c,d;
+ QUERY PLAN
+--------------------------------
+ HashAggregate
+ Group Key: p_t1.a, p_t1.b
+ -> Append
+ -> Seq Scan on p_t1_1
+ -> Seq Scan on p_t1_2
+(5 rows)
+
+drop table t1 cascade;
+NOTICE: drop cascades to table t1c
+drop table t2;
+drop table t3;
+drop table p_t1;
+--
+-- Test GROUP BY matching of join columns that are type-coerced due to USING
+--
+create temp table t1(f1 int, f2 bigint);
+create temp table t2(f1 bigint, f22 bigint);
+select f1 from t1 left join t2 using (f1) group by f1;
+ f1
+----
+(0 rows)
+
+select f1 from t1 left join t2 using (f1) group by t1.f1;
+ f1
+----
+(0 rows)
+
+select t1.f1 from t1 left join t2 using (f1) group by t1.f1;
+ f1
+----
+(0 rows)
+
+-- only this one should fail:
+select t1.f1 from t1 left join t2 using (f1) group by f1;
+ERROR: column "t1.f1" must appear in the GROUP BY clause or be used in an aggregate function
+LINE 1: select t1.f1 from t1 left join t2 using (f1) group by f1;
+ ^
+drop table t1, t2;
+--
+-- Test combinations of DISTINCT and/or ORDER BY
+--
+select array_agg(a order by b)
+ from (values (1,4),(2,3),(3,1),(4,2)) v(a,b);
+ array_agg
+-----------
+ {3,4,2,1}
+(1 row)
+
+select array_agg(a order by a)
+ from (values (1,4),(2,3),(3,1),(4,2)) v(a,b);
+ array_agg
+-----------
+ {1,2,3,4}
+(1 row)
+
+select array_agg(a order by a desc)
+ from (values (1,4),(2,3),(3,1),(4,2)) v(a,b);
+ array_agg
+-----------
+ {4,3,2,1}
+(1 row)
+
+select array_agg(b order by a desc)
+ from (values (1,4),(2,3),(3,1),(4,2)) v(a,b);
+ array_agg
+-----------
+ {2,1,3,4}
+(1 row)
+
+select array_agg(distinct a)
+ from (values (1),(2),(1),(3),(null),(2)) v(a);
+ array_agg
+--------------
+ {1,2,3,NULL}
+(1 row)
+
+select array_agg(distinct a order by a)
+ from (values (1),(2),(1),(3),(null),(2)) v(a);
+ array_agg
+--------------
+ {1,2,3,NULL}
+(1 row)
+
+select array_agg(distinct a order by a desc)
+ from (values (1),(2),(1),(3),(null),(2)) v(a);
+ array_agg
+--------------
+ {NULL,3,2,1}
+(1 row)
+
+select array_agg(distinct a order by a desc nulls last)
+ from (values (1),(2),(1),(3),(null),(2)) v(a);
+ array_agg
+--------------
+ {3,2,1,NULL}
+(1 row)
+
+-- multi-arg aggs, strict/nonstrict, distinct/order by
+select aggfstr(a,b,c)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
+ aggfstr
+---------------------------------------
+ {"(1,3,foo)","(2,2,bar)","(3,1,baz)"}
+(1 row)
+
+select aggfns(a,b,c)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
+ aggfns
+-----------------------------------------------
+ {"(1,3,foo)","(0,,)","(2,2,bar)","(3,1,baz)"}
+(1 row)
+
+select aggfstr(distinct a,b,c)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,3) i;
+ aggfstr
+---------------------------------------
+ {"(1,3,foo)","(2,2,bar)","(3,1,baz)"}
+(1 row)
+
+select aggfns(distinct a,b,c)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,3) i;
+ aggfns
+-----------------------------------------------
+ {"(0,,)","(1,3,foo)","(2,2,bar)","(3,1,baz)"}
+(1 row)
+
+select aggfstr(distinct a,b,c order by b)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,3) i;
+ aggfstr
+---------------------------------------
+ {"(3,1,baz)","(2,2,bar)","(1,3,foo)"}
+(1 row)
+
+select aggfns(distinct a,b,c order by b)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,3) i;
+ aggfns
+-----------------------------------------------
+ {"(3,1,baz)","(2,2,bar)","(1,3,foo)","(0,,)"}
+(1 row)
+
+-- test specific code paths
+select aggfns(distinct a,a,c order by c using ~<~,a)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,2) i;
+ aggfns
+------------------------------------------------
+ {"(2,2,bar)","(3,3,baz)","(1,1,foo)","(0,0,)"}
+(1 row)
+
+select aggfns(distinct a,a,c order by c using ~<~)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,2) i;
+ aggfns
+------------------------------------------------
+ {"(2,2,bar)","(3,3,baz)","(1,1,foo)","(0,0,)"}
+(1 row)
+
+select aggfns(distinct a,a,c order by a)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,2) i;
+ aggfns
+------------------------------------------------
+ {"(0,0,)","(1,1,foo)","(2,2,bar)","(3,3,baz)"}
+(1 row)
+
+select aggfns(distinct a,b,c order by a,c using ~<~,b)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,2) i;
+ aggfns
+-----------------------------------------------
+ {"(0,,)","(1,3,foo)","(2,2,bar)","(3,1,baz)"}
+(1 row)
+
+-- check node I/O via view creation and usage, also deparsing logic
+create view agg_view1 as
+ select aggfns(a,b,c)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
+select * from agg_view1;
+ aggfns
+-----------------------------------------------
+ {"(1,3,foo)","(0,,)","(2,2,bar)","(3,1,baz)"}
+(1 row)
+
+select pg_get_viewdef('agg_view1'::regclass);
+ pg_get_viewdef
+---------------------------------------------------------------------------------------------------------------------
+ SELECT aggfns(v.a, v.b, v.c) AS aggfns +
+ FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c);
+(1 row)
+
+create or replace view agg_view1 as
+ select aggfns(distinct a,b,c)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,3) i;
+select * from agg_view1;
+ aggfns
+-----------------------------------------------
+ {"(0,,)","(1,3,foo)","(2,2,bar)","(3,1,baz)"}
+(1 row)
+
+select pg_get_viewdef('agg_view1'::regclass);
+ pg_get_viewdef
+---------------------------------------------------------------------------------------------------------------------
+ SELECT aggfns(DISTINCT v.a, v.b, v.c) AS aggfns +
+ FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c),+
+ generate_series(1, 3) i(i);
+(1 row)
+
+create or replace view agg_view1 as
+ select aggfns(distinct a,b,c order by b)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,3) i;
+select * from agg_view1;
+ aggfns
+-----------------------------------------------
+ {"(3,1,baz)","(2,2,bar)","(1,3,foo)","(0,,)"}
+(1 row)
+
+select pg_get_viewdef('agg_view1'::regclass);
+ pg_get_viewdef
+---------------------------------------------------------------------------------------------------------------------
+ SELECT aggfns(DISTINCT v.a, v.b, v.c ORDER BY v.b) AS aggfns +
+ FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c),+
+ generate_series(1, 3) i(i);
+(1 row)
+
+create or replace view agg_view1 as
+ select aggfns(a,b,c order by b+1)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
+select * from agg_view1;
+ aggfns
+-----------------------------------------------
+ {"(3,1,baz)","(2,2,bar)","(1,3,foo)","(0,,)"}
+(1 row)
+
+select pg_get_viewdef('agg_view1'::regclass);
+ pg_get_viewdef
+---------------------------------------------------------------------------------------------------------------------
+ SELECT aggfns(v.a, v.b, v.c ORDER BY (v.b + 1)) AS aggfns +
+ FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c);
+(1 row)
+
+create or replace view agg_view1 as
+ select aggfns(a,a,c order by b)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
+select * from agg_view1;
+ aggfns
+------------------------------------------------
+ {"(3,3,baz)","(2,2,bar)","(1,1,foo)","(0,0,)"}
+(1 row)
+
+select pg_get_viewdef('agg_view1'::regclass);
+ pg_get_viewdef
+---------------------------------------------------------------------------------------------------------------------
+ SELECT aggfns(v.a, v.a, v.c ORDER BY v.b) AS aggfns +
+ FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c);
+(1 row)
+
+create or replace view agg_view1 as
+ select aggfns(a,b,c order by c using ~<~)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
+select * from agg_view1;
+ aggfns
+-----------------------------------------------
+ {"(2,2,bar)","(3,1,baz)","(1,3,foo)","(0,,)"}
+(1 row)
+
+select pg_get_viewdef('agg_view1'::regclass);
+ pg_get_viewdef
+---------------------------------------------------------------------------------------------------------------------
+ SELECT aggfns(v.a, v.b, v.c ORDER BY v.c USING ~<~ NULLS LAST) AS aggfns +
+ FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c);
+(1 row)
+
+create or replace view agg_view1 as
+ select aggfns(distinct a,b,c order by a,c using ~<~,b)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,2) i;
+select * from agg_view1;
+ aggfns
+-----------------------------------------------
+ {"(0,,)","(1,3,foo)","(2,2,bar)","(3,1,baz)"}
+(1 row)
+
+select pg_get_viewdef('agg_view1'::regclass);
+ pg_get_viewdef
+---------------------------------------------------------------------------------------------------------------------
+ SELECT aggfns(DISTINCT v.a, v.b, v.c ORDER BY v.a, v.c USING ~<~ NULLS LAST, v.b) AS aggfns +
+ FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c),+
+ generate_series(1, 2) i(i);
+(1 row)
+
+drop view agg_view1;
+-- incorrect DISTINCT usage errors
+select aggfns(distinct a,b,c order by i)
+ from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i;
+ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list
+LINE 1: select aggfns(distinct a,b,c order by i)
+ ^
+select aggfns(distinct a,b,c order by a,b+1)
+ from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i;
+ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list
+LINE 1: select aggfns(distinct a,b,c order by a,b+1)
+ ^
+select aggfns(distinct a,b,c order by a,b,i,c)
+ from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i;
+ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list
+LINE 1: select aggfns(distinct a,b,c order by a,b,i,c)
+ ^
+select aggfns(distinct a,a,c order by a,b)
+ from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i;
+ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list
+LINE 1: select aggfns(distinct a,a,c order by a,b)
+ ^
+-- string_agg tests
+select string_agg(a,',') from (values('aaaa'),('bbbb'),('cccc')) g(a);
+ string_agg
+----------------
+ aaaa,bbbb,cccc
+(1 row)
+
+select string_agg(a,',') from (values('aaaa'),(null),('bbbb'),('cccc')) g(a);
+ string_agg
+----------------
+ aaaa,bbbb,cccc
+(1 row)
+
+select string_agg(a,'AB') from (values(null),(null),('bbbb'),('cccc')) g(a);
+ string_agg
+------------
+ bbbbABcccc
+(1 row)
+
+select string_agg(a,',') from (values(null),(null)) g(a);
+ string_agg
+------------
+
+(1 row)
+
+-- check some implicit casting cases, as per bug #5564
+select string_agg(distinct f1, ',' order by f1) from varchar_tbl; -- ok
+ string_agg
+------------
+ a,ab,abcd
+(1 row)
+
+select string_agg(distinct f1::text, ',' order by f1) from varchar_tbl; -- not ok
+ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list
+LINE 1: select string_agg(distinct f1::text, ',' order by f1) from v...
+ ^
+select string_agg(distinct f1, ',' order by f1::text) from varchar_tbl; -- not ok
+ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list
+LINE 1: select string_agg(distinct f1, ',' order by f1::text) from v...
+ ^
+select string_agg(distinct f1::text, ',' order by f1::text) from varchar_tbl; -- ok
+ string_agg
+------------
+ a,ab,abcd
+(1 row)
+
+-- string_agg bytea tests
+create table bytea_test_table(v bytea);
+select string_agg(v, '') from bytea_test_table;
+ string_agg
+------------
+
+(1 row)
+
+insert into bytea_test_table values(decode('ff','hex'));
+select string_agg(v, '') from bytea_test_table;
+ string_agg
+------------
+ \xff
+(1 row)
+
+insert into bytea_test_table values(decode('aa','hex'));
+select string_agg(v, '') from bytea_test_table;
+ string_agg
+------------
+ \xffaa
+(1 row)
+
+select string_agg(v, NULL) from bytea_test_table;
+ string_agg
+------------
+ \xffaa
+(1 row)
+
+select string_agg(v, decode('ee', 'hex')) from bytea_test_table;
+ string_agg
+------------
+ \xffeeaa
+(1 row)
+
+drop table bytea_test_table;
+-- FILTER tests
+select min(unique1) filter (where unique1 > 100) from tenk1;
+ min
+-----
+ 101
+(1 row)
+
+select sum(1/ten) filter (where ten > 0) from tenk1;
+ sum
+------
+ 1000
+(1 row)
+
+select ten, sum(distinct four) filter (where four::text ~ '123') from onek a
+group by ten;
+ ten | sum
+-----+-----
+ 0 |
+ 1 |
+ 2 |
+ 3 |
+ 4 |
+ 5 |
+ 6 |
+ 7 |
+ 8 |
+ 9 |
+(10 rows)
+
+select ten, sum(distinct four) filter (where four > 10) from onek a
+group by ten
+having exists (select 1 from onek b where sum(distinct a.four) = b.four);
+ ten | sum
+-----+-----
+ 0 |
+ 2 |
+ 4 |
+ 6 |
+ 8 |
+(5 rows)
+
+select max(foo COLLATE "C") filter (where (bar collate "POSIX") > '0')
+from (values ('a', 'b')) AS v(foo,bar);
+ max
+-----
+ a
+(1 row)
+
+-- outer reference in FILTER (PostgreSQL extension)
+select (select count(*)
+ from (values (1)) t0(inner_c))
+from (values (2),(3)) t1(outer_c); -- inner query is aggregation query
+ count
+-------
+ 1
+ 1
+(2 rows)
+
+select (select count(*) filter (where outer_c <> 0)
+ from (values (1)) t0(inner_c))
+from (values (2),(3)) t1(outer_c); -- outer query is aggregation query
+ count
+-------
+ 2
+(1 row)
+
+select (select count(inner_c) filter (where outer_c <> 0)
+ from (values (1)) t0(inner_c))
+from (values (2),(3)) t1(outer_c); -- inner query is aggregation query
+ count
+-------
+ 1
+ 1
+(2 rows)
+
+select
+ (select max((select i.unique2 from tenk1 i where i.unique1 = o.unique1))
+ filter (where o.unique1 < 10))
+from tenk1 o; -- outer query is aggregation query
+ max
+------
+ 9998
+(1 row)
+
+-- subquery in FILTER clause (PostgreSQL extension)
+select sum(unique1) FILTER (WHERE
+ unique1 IN (SELECT unique1 FROM onek where unique1 < 100)) FROM tenk1;
+ sum
+------
+ 4950
+(1 row)
+
+-- exercise lots of aggregate parts with FILTER
+select aggfns(distinct a,b,c order by a,c using ~<~,b) filter (where a > 1)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,2) i;
+ aggfns
+---------------------------
+ {"(2,2,bar)","(3,1,baz)"}
+(1 row)
+
+-- check handling of bare boolean Var in FILTER
+select max(0) filter (where b1) from bool_test;
+ max
+-----
+ 0
+(1 row)
+
+select (select max(0) filter (where b1)) from bool_test;
+ max
+-----
+ 0
+(1 row)
+
+-- check for correct detection of nested-aggregate errors in FILTER
+select max(unique1) filter (where sum(ten) > 0) from tenk1;
+ERROR: aggregate functions are not allowed in FILTER
+LINE 1: select max(unique1) filter (where sum(ten) > 0) from tenk1;
+ ^
+select (select max(unique1) filter (where sum(ten) > 0) from int8_tbl) from tenk1;
+ERROR: aggregate function calls cannot be nested
+LINE 1: select (select max(unique1) filter (where sum(ten) > 0) from...
+ ^
+select max(unique1) filter (where bool_or(ten > 0)) from tenk1;
+ERROR: aggregate functions are not allowed in FILTER
+LINE 1: select max(unique1) filter (where bool_or(ten > 0)) from ten...
+ ^
+select (select max(unique1) filter (where bool_or(ten > 0)) from int8_tbl) from tenk1;
+ERROR: aggregate function calls cannot be nested
+LINE 1: select (select max(unique1) filter (where bool_or(ten > 0)) ...
+ ^
+-- ordered-set aggregates
+select p, percentile_cont(p) within group (order by x::float8)
+from generate_series(1,5) x,
+ (values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p)
+group by p order by p;
+ p | percentile_cont
+------+-----------------
+ 0 | 1
+ 0.1 | 1.4
+ 0.25 | 2
+ 0.4 | 2.6
+ 0.5 | 3
+ 0.6 | 3.4
+ 0.75 | 4
+ 0.9 | 4.6
+ 1 | 5
+(9 rows)
+
+select p, percentile_cont(p order by p) within group (order by x) -- error
+from generate_series(1,5) x,
+ (values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p)
+group by p order by p;
+ERROR: cannot use multiple ORDER BY clauses with WITHIN GROUP
+LINE 1: select p, percentile_cont(p order by p) within group (order ...
+ ^
+select p, sum() within group (order by x::float8) -- error
+from generate_series(1,5) x,
+ (values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p)
+group by p order by p;
+ERROR: sum is not an ordered-set aggregate, so it cannot have WITHIN GROUP
+LINE 1: select p, sum() within group (order by x::float8)
+ ^
+select p, percentile_cont(p,p) -- error
+from generate_series(1,5) x,
+ (values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p)
+group by p order by p;
+ERROR: WITHIN GROUP is required for ordered-set aggregate percentile_cont
+LINE 1: select p, percentile_cont(p,p)
+ ^
+select percentile_cont(0.5) within group (order by b) from aggtest;
+ percentile_cont
+------------------
+ 53.4485001564026
+(1 row)
+
+select percentile_cont(0.5) within group (order by b), sum(b) from aggtest;
+ percentile_cont | sum
+------------------+---------
+ 53.4485001564026 | 431.773
+(1 row)
+
+select percentile_cont(0.5) within group (order by thousand) from tenk1;
+ percentile_cont
+-----------------
+ 499.5
+(1 row)
+
+select percentile_disc(0.5) within group (order by thousand) from tenk1;
+ percentile_disc
+-----------------
+ 499
+(1 row)
+
+select rank(3) within group (order by x)
+from (values (1),(1),(2),(2),(3),(3),(4)) v(x);
+ rank
+------
+ 5
+(1 row)
+
+select cume_dist(3) within group (order by x)
+from (values (1),(1),(2),(2),(3),(3),(4)) v(x);
+ cume_dist
+-----------
+ 0.875
+(1 row)
+
+select percent_rank(3) within group (order by x)
+from (values (1),(1),(2),(2),(3),(3),(4),(5)) v(x);
+ percent_rank
+--------------
+ 0.5
+(1 row)
+
+select dense_rank(3) within group (order by x)
+from (values (1),(1),(2),(2),(3),(3),(4)) v(x);
+ dense_rank
+------------
+ 3
+(1 row)
+
+select percentile_disc(array[0,0.1,0.25,0.5,0.75,0.9,1]) within group (order by thousand)
+from tenk1;
+ percentile_disc
+----------------------------
+ {0,99,249,499,749,899,999}
+(1 row)
+
+select percentile_cont(array[0,0.25,0.5,0.75,1]) within group (order by thousand)
+from tenk1;
+ percentile_cont
+-----------------------------
+ {0,249.75,499.5,749.25,999}
+(1 row)
+
+select percentile_disc(array[[null,1,0.5],[0.75,0.25,null]]) within group (order by thousand)
+from tenk1;
+ percentile_disc
+---------------------------------
+ {{NULL,999,499},{749,249,NULL}}
+(1 row)
+
+select percentile_cont(array[0,1,0.25,0.75,0.5,1,0.3,0.32,0.35,0.38,0.4]) within group (order by x)
+from generate_series(1,6) x;
+ percentile_cont
+------------------------------------------
+ {1,6,2.25,4.75,3.5,6,2.5,2.6,2.75,2.9,3}
+(1 row)
+
+select ten, mode() within group (order by string4) from tenk1 group by ten;
+ ten | mode
+-----+--------
+ 0 | HHHHxx
+ 1 | OOOOxx
+ 2 | VVVVxx
+ 3 | OOOOxx
+ 4 | HHHHxx
+ 5 | HHHHxx
+ 6 | OOOOxx
+ 7 | AAAAxx
+ 8 | VVVVxx
+ 9 | VVVVxx
+(10 rows)
+
+select percentile_disc(array[0.25,0.5,0.75]) within group (order by x)
+from unnest('{fred,jim,fred,jack,jill,fred,jill,jim,jim,sheila,jim,sheila}'::text[]) u(x);
+ percentile_disc
+-----------------
+ {fred,jill,jim}
+(1 row)
+
+-- check collation propagates up in suitable cases:
+select pg_collation_for(percentile_disc(1) within group (order by x collate "POSIX"))
+ from (values ('fred'),('jim')) v(x);
+ pg_collation_for
+------------------
+ "POSIX"
+(1 row)
+
+-- ordered-set aggs created with CREATE AGGREGATE
+select test_rank(3) within group (order by x)
+from (values (1),(1),(2),(2),(3),(3),(4)) v(x);
+ test_rank
+-----------
+ 5
+(1 row)
+
+select test_percentile_disc(0.5) within group (order by thousand) from tenk1;
+ test_percentile_disc
+----------------------
+ 499
+(1 row)
+
+-- ordered-set aggs can't use ungrouped vars in direct args:
+select rank(x) within group (order by x) from generate_series(1,5) x;
+ERROR: column "x.x" must appear in the GROUP BY clause or be used in an aggregate function
+LINE 1: select rank(x) within group (order by x) from generate_serie...
+ ^
+DETAIL: Direct arguments of an ordered-set aggregate must use only grouped columns.
+-- outer-level agg can't use a grouped arg of a lower level, either:
+select array(select percentile_disc(a) within group (order by x)
+ from (values (0.3),(0.7)) v(a) group by a)
+ from generate_series(1,5) g(x);
+ERROR: outer-level aggregate cannot contain a lower-level variable in its direct arguments
+LINE 1: select array(select percentile_disc(a) within group (order b...
+ ^
+-- agg in the direct args is a grouping violation, too:
+select rank(sum(x)) within group (order by x) from generate_series(1,5) x;
+ERROR: aggregate function calls cannot be nested
+LINE 1: select rank(sum(x)) within group (order by x) from generate_...
+ ^
+-- hypothetical-set type unification and argument-count failures:
+select rank(3) within group (order by x) from (values ('fred'),('jim')) v(x);
+ERROR: WITHIN GROUP types text and integer cannot be matched
+LINE 1: select rank(3) within group (order by x) from (values ('fred...
+ ^
+select rank(3) within group (order by stringu1,stringu2) from tenk1;
+ERROR: function rank(integer, name, name) does not exist
+LINE 1: select rank(3) within group (order by stringu1,stringu2) fro...
+ ^
+HINT: To use the hypothetical-set aggregate rank, the number of hypothetical direct arguments (here 1) must match the number of ordering columns (here 2).
+select rank('fred') within group (order by x) from generate_series(1,5) x;
+ERROR: invalid input syntax for type integer: "fred"
+LINE 1: select rank('fred') within group (order by x) from generate_...
+ ^
+select rank('adam'::text collate "C") within group (order by x collate "POSIX")
+ from (values ('fred'),('jim')) v(x);
+ERROR: collation mismatch between explicit collations "C" and "POSIX"
+LINE 1: ...adam'::text collate "C") within group (order by x collate "P...
+ ^
+-- hypothetical-set type unification successes:
+select rank('adam'::varchar) within group (order by x) from (values ('fred'),('jim')) v(x);
+ rank
+------
+ 1
+(1 row)
+
+select rank('3') within group (order by x) from generate_series(1,5) x;
+ rank
+------
+ 3
+(1 row)
+
+-- divide by zero check
+select percent_rank(0) within group (order by x) from generate_series(1,0) x;
+ percent_rank
+--------------
+ 0
+(1 row)
+
+-- deparse and multiple features:
+create view aggordview1 as
+select ten,
+ percentile_disc(0.5) within group (order by thousand) as p50,
+ percentile_disc(0.5) within group (order by thousand) filter (where hundred=1) as px,
+ rank(5,'AZZZZ',50) within group (order by hundred, string4 desc, hundred)
+ from tenk1
+ group by ten order by ten;
+select pg_get_viewdef('aggordview1');
+ pg_get_viewdef
+-------------------------------------------------------------------------------------------------------------------------------
+ SELECT tenk1.ten, +
+ percentile_disc((0.5)::double precision) WITHIN GROUP (ORDER BY tenk1.thousand) AS p50, +
+ percentile_disc((0.5)::double precision) WITHIN GROUP (ORDER BY tenk1.thousand) FILTER (WHERE (tenk1.hundred = 1)) AS px,+
+ rank(5, 'AZZZZ'::name, 50) WITHIN GROUP (ORDER BY tenk1.hundred, tenk1.string4 DESC, tenk1.hundred) AS rank +
+ FROM tenk1 +
+ GROUP BY tenk1.ten +
+ ORDER BY tenk1.ten;
+(1 row)
+
+select * from aggordview1 order by ten;
+ ten | p50 | px | rank
+-----+-----+-----+------
+ 0 | 490 | | 101
+ 1 | 491 | 401 | 101
+ 2 | 492 | | 101
+ 3 | 493 | | 101
+ 4 | 494 | | 101
+ 5 | 495 | | 67
+ 6 | 496 | | 1
+ 7 | 497 | | 1
+ 8 | 498 | | 1
+ 9 | 499 | | 1
+(10 rows)
+
+drop view aggordview1;
+-- variadic aggregates
+select least_agg(q1,q2) from int8_tbl;
+ least_agg
+-------------------
+ -4567890123456789
+(1 row)
+
+select least_agg(variadic array[q1,q2]) from int8_tbl;
+ least_agg
+-------------------
+ -4567890123456789
+(1 row)
+
+select cleast_agg(q1,q2) from int8_tbl;
+ cleast_agg
+-------------------
+ -4567890123456789
+(1 row)
+
+select cleast_agg(4.5,f1) from int4_tbl;
+ cleast_agg
+-------------
+ -2147483647
+(1 row)
+
+select cleast_agg(variadic array[4.5,f1]) from int4_tbl;
+ cleast_agg
+-------------
+ -2147483647
+(1 row)
+
+select pg_typeof(cleast_agg(variadic array[4.5,f1])) from int4_tbl;
+ pg_typeof
+-----------
+ numeric
+(1 row)
+
+-- test aggregates with common transition functions share the same states
+begin work;
+create type avg_state as (total bigint, count bigint);
+create or replace function avg_transfn(state avg_state, n int) returns avg_state as
+$$
+declare new_state avg_state;
+begin
+ raise notice 'avg_transfn called with %', n;
+ if state is null then
+ if n is not null then
+ new_state.total := n;
+ new_state.count := 1;
+ return new_state;
+ end if;
+ return null;
+ elsif n is not null then
+ state.total := state.total + n;
+ state.count := state.count + 1;
+ return state;
+ end if;
+
+ return null;
+end
+$$ language plpgsql;
+create function avg_finalfn(state avg_state) returns int4 as
+$$
+begin
+ if state is null then
+ return NULL;
+ else
+ return state.total / state.count;
+ end if;
+end
+$$ language plpgsql;
+create function sum_finalfn(state avg_state) returns int4 as
+$$
+begin
+ if state is null then
+ return NULL;
+ else
+ return state.total;
+ end if;
+end
+$$ language plpgsql;
+create aggregate my_avg(int4)
+(
+ stype = avg_state,
+ sfunc = avg_transfn,
+ finalfunc = avg_finalfn
+);
+create aggregate my_sum(int4)
+(
+ stype = avg_state,
+ sfunc = avg_transfn,
+ finalfunc = sum_finalfn
+);
+-- aggregate state should be shared as aggs are the same.
+select my_avg(one),my_avg(one) from (values(1),(3)) t(one);
+NOTICE: avg_transfn called with 1
+NOTICE: avg_transfn called with 3
+ my_avg | my_avg
+--------+--------
+ 2 | 2
+(1 row)
+
+-- aggregate state should be shared as transfn is the same for both aggs.
+select my_avg(one),my_sum(one) from (values(1),(3)) t(one);
+NOTICE: avg_transfn called with 1
+NOTICE: avg_transfn called with 3
+ my_avg | my_sum
+--------+--------
+ 2 | 4
+(1 row)
+
+-- same as previous one, but with DISTINCT, which requires sorting the input.
+select my_avg(distinct one),my_sum(distinct one) from (values(1),(3),(1)) t(one);
+NOTICE: avg_transfn called with 1
+NOTICE: avg_transfn called with 3
+ my_avg | my_sum
+--------+--------
+ 2 | 4
+(1 row)
+
+-- shouldn't share states due to the distinctness not matching.
+select my_avg(distinct one),my_sum(one) from (values(1),(3)) t(one);
+NOTICE: avg_transfn called with 1
+NOTICE: avg_transfn called with 3
+NOTICE: avg_transfn called with 1
+NOTICE: avg_transfn called with 3
+ my_avg | my_sum
+--------+--------
+ 2 | 4
+(1 row)
+
+-- shouldn't share states due to the filter clause not matching.
+select my_avg(one) filter (where one > 1),my_sum(one) from (values(1),(3)) t(one);
+NOTICE: avg_transfn called with 1
+NOTICE: avg_transfn called with 3
+NOTICE: avg_transfn called with 3
+ my_avg | my_sum
+--------+--------
+ 3 | 4
+(1 row)
+
+-- this should not share the state due to different input columns.
+select my_avg(one),my_sum(two) from (values(1,2),(3,4)) t(one,two);
+NOTICE: avg_transfn called with 1
+NOTICE: avg_transfn called with 2
+NOTICE: avg_transfn called with 3
+NOTICE: avg_transfn called with 4
+ my_avg | my_sum
+--------+--------
+ 2 | 6
+(1 row)
+
+-- exercise cases where OSAs share state
+select
+ percentile_cont(0.5) within group (order by a),
+ percentile_disc(0.5) within group (order by a)
+from (values(1::float8),(3),(5),(7)) t(a);
+ percentile_cont | percentile_disc
+-----------------+-----------------
+ 4 | 3
+(1 row)
+
+select
+ percentile_cont(0.25) within group (order by a),
+ percentile_disc(0.5) within group (order by a)
+from (values(1::float8),(3),(5),(7)) t(a);
+ percentile_cont | percentile_disc
+-----------------+-----------------
+ 2.5 | 3
+(1 row)
+
+-- these can't share state currently
+select
+ rank(4) within group (order by a),
+ dense_rank(4) within group (order by a)
+from (values(1),(3),(5),(7)) t(a);
+ rank | dense_rank
+------+------------
+ 3 | 3
+(1 row)
+
+-- test that aggs with the same sfunc and initcond share the same agg state
+create aggregate my_sum_init(int4)
+(
+ stype = avg_state,
+ sfunc = avg_transfn,
+ finalfunc = sum_finalfn,
+ initcond = '(10,0)'
+);
+create aggregate my_avg_init(int4)
+(
+ stype = avg_state,
+ sfunc = avg_transfn,
+ finalfunc = avg_finalfn,
+ initcond = '(10,0)'
+);
+create aggregate my_avg_init2(int4)
+(
+ stype = avg_state,
+ sfunc = avg_transfn,
+ finalfunc = avg_finalfn,
+ initcond = '(4,0)'
+);
+-- state should be shared if INITCONDs are matching
+select my_sum_init(one),my_avg_init(one) from (values(1),(3)) t(one);
+NOTICE: avg_transfn called with 1
+NOTICE: avg_transfn called with 3
+ my_sum_init | my_avg_init
+-------------+-------------
+ 14 | 7
+(1 row)
+
+-- Varying INITCONDs should cause the states not to be shared.
+select my_sum_init(one),my_avg_init2(one) from (values(1),(3)) t(one);
+NOTICE: avg_transfn called with 1
+NOTICE: avg_transfn called with 1
+NOTICE: avg_transfn called with 3
+NOTICE: avg_transfn called with 3
+ my_sum_init | my_avg_init2
+-------------+--------------
+ 14 | 4
+(1 row)
+
+rollback;
+-- test aggregate state sharing to ensure it works if one aggregate has a
+-- finalfn and the other one has none.
+begin work;
+create or replace function sum_transfn(state int4, n int4) returns int4 as
+$$
+declare new_state int4;
+begin
+ raise notice 'sum_transfn called with %', n;
+ if state is null then
+ if n is not null then
+ new_state := n;
+ return new_state;
+ end if;
+ return null;
+ elsif n is not null then
+ state := state + n;
+ return state;
+ end if;
+
+ return null;
+end
+$$ language plpgsql;
+create function halfsum_finalfn(state int4) returns int4 as
+$$
+begin
+ if state is null then
+ return NULL;
+ else
+ return state / 2;
+ end if;
+end
+$$ language plpgsql;
+create aggregate my_sum(int4)
+(
+ stype = int4,
+ sfunc = sum_transfn
+);
+create aggregate my_half_sum(int4)
+(
+ stype = int4,
+ sfunc = sum_transfn,
+ finalfunc = halfsum_finalfn
+);
+-- Agg state should be shared even though my_sum has no finalfn
+select my_sum(one),my_half_sum(one) from (values(1),(2),(3),(4)) t(one);
+NOTICE: sum_transfn called with 1
+NOTICE: sum_transfn called with 2
+NOTICE: sum_transfn called with 3
+NOTICE: sum_transfn called with 4
+ my_sum | my_half_sum
+--------+-------------
+ 10 | 5
+(1 row)
+
+rollback;
+-- test that the aggregate transition logic correctly handles
+-- transition / combine functions returning NULL
+-- First test the case of a normal transition function returning NULL
+BEGIN;
+CREATE FUNCTION balkifnull(int8, int4)
+RETURNS int8
+STRICT
+LANGUAGE plpgsql AS $$
+BEGIN
+ IF $1 IS NULL THEN
+ RAISE 'erroneously called with NULL argument';
+ END IF;
+ RETURN NULL;
+END$$;
+CREATE AGGREGATE balk(int4)
+(
+ SFUNC = balkifnull(int8, int4),
+ STYPE = int8,
+ PARALLEL = SAFE,
+ INITCOND = '0'
+);
+SELECT balk(hundred) FROM tenk1;
+ balk
+------
+
+(1 row)
+
+ROLLBACK;
+-- Secondly test the case of a parallel aggregate combiner function
+-- returning NULL. For that use normal transition function, but a
+-- combiner function returning NULL.
+BEGIN;
+CREATE FUNCTION balkifnull(int8, int8)
+RETURNS int8
+PARALLEL SAFE
+STRICT
+LANGUAGE plpgsql AS $$
+BEGIN
+ IF $1 IS NULL THEN
+ RAISE 'erroneously called with NULL argument';
+ END IF;
+ RETURN NULL;
+END$$;
+CREATE AGGREGATE balk(int4)
+(
+ SFUNC = int4_sum(int8, int4),
+ STYPE = int8,
+ COMBINEFUNC = balkifnull(int8, int8),
+ PARALLEL = SAFE,
+ INITCOND = '0'
+);
+-- force use of parallelism
+ALTER TABLE tenk1 set (parallel_workers = 4);
+SET LOCAL parallel_setup_cost=0;
+SET LOCAL max_parallel_workers_per_gather=4;
+EXPLAIN (COSTS OFF) SELECT balk(hundred) FROM tenk1;
+ QUERY PLAN
+-------------------------------------------------------------------------
+ Finalize Aggregate
+ -> Gather
+ Workers Planned: 4
+ -> Partial Aggregate
+ -> Parallel Index Only Scan using tenk1_hundred on tenk1
+(5 rows)
+
+SELECT balk(hundred) FROM tenk1;
+ balk
+------
+
+(1 row)
+
+ROLLBACK;
+-- test coverage for aggregate combine/serial/deserial functions
+BEGIN;
+SET parallel_setup_cost = 0;
+SET parallel_tuple_cost = 0;
+SET min_parallel_table_scan_size = 0;
+SET max_parallel_workers_per_gather = 4;
+SET parallel_leader_participation = off;
+SET enable_indexonlyscan = off;
+-- variance(int4) covers numeric_poly_combine
+-- sum(int8) covers int8_avg_combine
+-- regr_count(float8, float8) covers int8inc_float8_float8 and aggregates with > 1 arg
+EXPLAIN (COSTS OFF, VERBOSE)
+SELECT variance(unique1::int4), sum(unique1::int8), regr_count(unique1::float8, unique1::float8)
+FROM (SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1) u;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Finalize Aggregate
+ Output: variance(tenk1.unique1), sum((tenk1.unique1)::bigint), regr_count((tenk1.unique1)::double precision, (tenk1.unique1)::double precision)
+ -> Gather
+ Output: (PARTIAL variance(tenk1.unique1)), (PARTIAL sum((tenk1.unique1)::bigint)), (PARTIAL regr_count((tenk1.unique1)::double precision, (tenk1.unique1)::double precision))
+ Workers Planned: 4
+ -> Partial Aggregate
+ Output: PARTIAL variance(tenk1.unique1), PARTIAL sum((tenk1.unique1)::bigint), PARTIAL regr_count((tenk1.unique1)::double precision, (tenk1.unique1)::double precision)
+ -> Parallel Append
+ -> Parallel Seq Scan on public.tenk1
+ Output: tenk1.unique1
+ -> Parallel Seq Scan on public.tenk1 tenk1_1
+ Output: tenk1_1.unique1
+ -> Parallel Seq Scan on public.tenk1 tenk1_2
+ Output: tenk1_2.unique1
+ -> Parallel Seq Scan on public.tenk1 tenk1_3
+ Output: tenk1_3.unique1
+(16 rows)
+
+SELECT variance(unique1::int4), sum(unique1::int8), regr_count(unique1::float8, unique1::float8)
+FROM (SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1) u;
+ variance | sum | regr_count
+----------------------+-----------+------------
+ 8333541.588539713493 | 199980000 | 40000
+(1 row)
+
+-- variance(int8) covers numeric_combine
+-- avg(numeric) covers numeric_avg_combine
+EXPLAIN (COSTS OFF, VERBOSE)
+SELECT variance(unique1::int8), avg(unique1::numeric)
+FROM (SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1) u;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------
+ Finalize Aggregate
+ Output: variance((tenk1.unique1)::bigint), avg((tenk1.unique1)::numeric)
+ -> Gather
+ Output: (PARTIAL variance((tenk1.unique1)::bigint)), (PARTIAL avg((tenk1.unique1)::numeric))
+ Workers Planned: 4
+ -> Partial Aggregate
+ Output: PARTIAL variance((tenk1.unique1)::bigint), PARTIAL avg((tenk1.unique1)::numeric)
+ -> Parallel Append
+ -> Parallel Seq Scan on public.tenk1
+ Output: tenk1.unique1
+ -> Parallel Seq Scan on public.tenk1 tenk1_1
+ Output: tenk1_1.unique1
+ -> Parallel Seq Scan on public.tenk1 tenk1_2
+ Output: tenk1_2.unique1
+ -> Parallel Seq Scan on public.tenk1 tenk1_3
+ Output: tenk1_3.unique1
+(16 rows)
+
+SELECT variance(unique1::int8), avg(unique1::numeric)
+FROM (SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1) u;
+ variance | avg
+----------------------+-----------------------
+ 8333541.588539713493 | 4999.5000000000000000
+(1 row)
+
+ROLLBACK;
+-- test coverage for dense_rank
+SELECT dense_rank(x) WITHIN GROUP (ORDER BY x) FROM (VALUES (1),(1),(2),(2),(3),(3)) v(x) GROUP BY (x) ORDER BY 1;
+ dense_rank
+------------
+ 1
+ 1
+ 1
+(3 rows)
+
+-- Ensure that the STRICT checks for aggregates does not take NULLness
+-- of ORDER BY columns into account. See bug report around
+-- 2a505161-2727-2473-7c46-591ed108ac52@email.cz
+SELECT min(x ORDER BY y) FROM (VALUES(1, NULL)) AS d(x,y);
+ min
+-----
+ 1
+(1 row)
+
+SELECT min(x ORDER BY y) FROM (VALUES(1, 2)) AS d(x,y);
+ min
+-----
+ 1
+(1 row)
+
+-- check collation-sensitive matching between grouping expressions
+select v||'a', case v||'a' when 'aa' then 1 else 0 end, count(*)
+ from unnest(array['a','b']) u(v)
+ group by v||'a' order by 1;
+ ?column? | case | count
+----------+------+-------
+ aa | 1 | 1
+ ba | 0 | 1
+(2 rows)
+
+select v||'a', case when v||'a' = 'aa' then 1 else 0 end, count(*)
+ from unnest(array['a','b']) u(v)
+ group by v||'a' order by 1;
+ ?column? | case | count
+----------+------+-------
+ aa | 1 | 1
+ ba | 0 | 1
+(2 rows)
+
+-- Make sure that generation of HashAggregate for uniqification purposes
+-- does not lead to array overflow due to unexpected duplicate hash keys
+-- see CAFeeJoKKu0u+A_A9R9316djW-YW3-+Gtgvy3ju655qRHR3jtdA@mail.gmail.com
+set enable_memoize to off;
+explain (costs off)
+ select 1 from tenk1
+ where (hundred, thousand) in (select twothousand, twothousand from onek);
+ QUERY PLAN
+-------------------------------------------------------------
+ Hash Join
+ Hash Cond: (tenk1.hundred = onek.twothousand)
+ -> Seq Scan on tenk1
+ Filter: (hundred = thousand)
+ -> Hash
+ -> HashAggregate
+ Group Key: onek.twothousand, onek.twothousand
+ -> Seq Scan on onek
+(8 rows)
+
+reset enable_memoize;
+--
+-- Hash Aggregation Spill tests
+--
+set enable_sort=false;
+set work_mem='64kB';
+select unique1, count(*), sum(twothousand) from tenk1
+group by unique1
+having sum(fivethous) > 4975
+order by sum(twothousand);
+ unique1 | count | sum
+---------+-------+------
+ 4976 | 1 | 976
+ 4977 | 1 | 977
+ 4978 | 1 | 978
+ 4979 | 1 | 979
+ 4980 | 1 | 980
+ 4981 | 1 | 981
+ 4982 | 1 | 982
+ 4983 | 1 | 983
+ 4984 | 1 | 984
+ 4985 | 1 | 985
+ 4986 | 1 | 986
+ 4987 | 1 | 987
+ 4988 | 1 | 988
+ 4989 | 1 | 989
+ 4990 | 1 | 990
+ 4991 | 1 | 991
+ 4992 | 1 | 992
+ 4993 | 1 | 993
+ 4994 | 1 | 994
+ 4995 | 1 | 995
+ 4996 | 1 | 996
+ 4997 | 1 | 997
+ 4998 | 1 | 998
+ 4999 | 1 | 999
+ 9976 | 1 | 1976
+ 9977 | 1 | 1977
+ 9978 | 1 | 1978
+ 9979 | 1 | 1979
+ 9980 | 1 | 1980
+ 9981 | 1 | 1981
+ 9982 | 1 | 1982
+ 9983 | 1 | 1983
+ 9984 | 1 | 1984
+ 9985 | 1 | 1985
+ 9986 | 1 | 1986
+ 9987 | 1 | 1987
+ 9988 | 1 | 1988
+ 9989 | 1 | 1989
+ 9990 | 1 | 1990
+ 9991 | 1 | 1991
+ 9992 | 1 | 1992
+ 9993 | 1 | 1993
+ 9994 | 1 | 1994
+ 9995 | 1 | 1995
+ 9996 | 1 | 1996
+ 9997 | 1 | 1997
+ 9998 | 1 | 1998
+ 9999 | 1 | 1999
+(48 rows)
+
+set work_mem to default;
+set enable_sort to default;
+--
+-- Compare results between plans using sorting and plans using hash
+-- aggregation. Force spilling in both cases by setting work_mem low.
+--
+set work_mem='64kB';
+create table agg_data_2k as
+select g from generate_series(0, 1999) g;
+analyze agg_data_2k;
+create table agg_data_20k as
+select g from generate_series(0, 19999) g;
+analyze agg_data_20k;
+-- Produce results with sorting.
+set enable_hashagg = false;
+set jit_above_cost = 0;
+explain (costs off)
+select g%10000 as c1, sum(g::numeric) as c2, count(*) as c3
+ from agg_data_20k group by g%10000;
+ QUERY PLAN
+--------------------------------------
+ GroupAggregate
+ Group Key: ((g % 10000))
+ -> Sort
+ Sort Key: ((g % 10000))
+ -> Seq Scan on agg_data_20k
+(5 rows)
+
+create table agg_group_1 as
+select g%10000 as c1, sum(g::numeric) as c2, count(*) as c3
+ from agg_data_20k group by g%10000;
+create table agg_group_2 as
+select * from
+ (values (100), (300), (500)) as r(a),
+ lateral (
+ select (g/2)::numeric as c1,
+ array_agg(g::numeric) as c2,
+ count(*) as c3
+ from agg_data_2k
+ where g < r.a
+ group by g/2) as s;
+set jit_above_cost to default;
+create table agg_group_3 as
+select (g/2)::numeric as c1, sum(7::int4) as c2, count(*) as c3
+ from agg_data_2k group by g/2;
+create table agg_group_4 as
+select (g/2)::numeric as c1, array_agg(g::numeric) as c2, count(*) as c3
+ from agg_data_2k group by g/2;
+-- Produce results with hash aggregation
+set enable_hashagg = true;
+set enable_sort = false;
+set jit_above_cost = 0;
+explain (costs off)
+select g%10000 as c1, sum(g::numeric) as c2, count(*) as c3
+ from agg_data_20k group by g%10000;
+ QUERY PLAN
+--------------------------------
+ HashAggregate
+ Group Key: (g % 10000)
+ -> Seq Scan on agg_data_20k
+(3 rows)
+
+create table agg_hash_1 as
+select g%10000 as c1, sum(g::numeric) as c2, count(*) as c3
+ from agg_data_20k group by g%10000;
+create table agg_hash_2 as
+select * from
+ (values (100), (300), (500)) as r(a),
+ lateral (
+ select (g/2)::numeric as c1,
+ array_agg(g::numeric) as c2,
+ count(*) as c3
+ from agg_data_2k
+ where g < r.a
+ group by g/2) as s;
+set jit_above_cost to default;
+create table agg_hash_3 as
+select (g/2)::numeric as c1, sum(7::int4) as c2, count(*) as c3
+ from agg_data_2k group by g/2;
+create table agg_hash_4 as
+select (g/2)::numeric as c1, array_agg(g::numeric) as c2, count(*) as c3
+ from agg_data_2k group by g/2;
+set enable_sort = true;
+set work_mem to default;
+-- Compare group aggregation results to hash aggregation results
+(select * from agg_hash_1 except select * from agg_group_1)
+ union all
+(select * from agg_group_1 except select * from agg_hash_1);
+ c1 | c2 | c3
+----+----+----
+(0 rows)
+
+(select * from agg_hash_2 except select * from agg_group_2)
+ union all
+(select * from agg_group_2 except select * from agg_hash_2);
+ a | c1 | c2 | c3
+---+----+----+----
+(0 rows)
+
+(select * from agg_hash_3 except select * from agg_group_3)
+ union all
+(select * from agg_group_3 except select * from agg_hash_3);
+ c1 | c2 | c3
+----+----+----
+(0 rows)
+
+(select * from agg_hash_4 except select * from agg_group_4)
+ union all
+(select * from agg_group_4 except select * from agg_hash_4);
+ c1 | c2 | c3
+----+----+----
+(0 rows)
+
+drop table agg_group_1;
+drop table agg_group_2;
+drop table agg_group_3;
+drop table agg_group_4;
+drop table agg_hash_1;
+drop table agg_hash_2;
+drop table agg_hash_3;
+drop table agg_hash_4;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/aggregates.sql b/ydb/library/yql/tests/postgresql/original/cases/aggregates.sql
new file mode 100644
index 0000000000..7cf86465e9
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/aggregates.sql
@@ -0,0 +1,1247 @@
+--
+-- AGGREGATES
+--
+
+-- avoid bit-exact output here because operations may not be bit-exact.
+SET extra_float_digits = 0;
+
+SELECT avg(four) AS avg_1 FROM onek;
+
+SELECT avg(a) AS avg_32 FROM aggtest WHERE a < 100;
+
+-- In 7.1, avg(float4) is computed using float8 arithmetic.
+-- Round the result to 3 digits to avoid platform-specific results.
+
+SELECT avg(b)::numeric(10,3) AS avg_107_943 FROM aggtest;
+
+SELECT avg(gpa) AS avg_3_4 FROM ONLY student;
+
+
+SELECT sum(four) AS sum_1500 FROM onek;
+SELECT sum(a) AS sum_198 FROM aggtest;
+SELECT sum(b) AS avg_431_773 FROM aggtest;
+SELECT sum(gpa) AS avg_6_8 FROM ONLY student;
+
+SELECT max(four) AS max_3 FROM onek;
+SELECT max(a) AS max_100 FROM aggtest;
+SELECT max(aggtest.b) AS max_324_78 FROM aggtest;
+SELECT max(student.gpa) AS max_3_7 FROM student;
+
+SELECT stddev_pop(b) FROM aggtest;
+SELECT stddev_samp(b) FROM aggtest;
+SELECT var_pop(b) FROM aggtest;
+SELECT var_samp(b) FROM aggtest;
+
+SELECT stddev_pop(b::numeric) FROM aggtest;
+SELECT stddev_samp(b::numeric) FROM aggtest;
+SELECT var_pop(b::numeric) FROM aggtest;
+SELECT var_samp(b::numeric) FROM aggtest;
+
+-- population variance is defined for a single tuple, sample variance
+-- is not
+SELECT var_pop(1.0::float8), var_samp(2.0::float8);
+SELECT stddev_pop(3.0::float8), stddev_samp(4.0::float8);
+SELECT var_pop('inf'::float8), var_samp('inf'::float8);
+SELECT stddev_pop('inf'::float8), stddev_samp('inf'::float8);
+SELECT var_pop('nan'::float8), var_samp('nan'::float8);
+SELECT stddev_pop('nan'::float8), stddev_samp('nan'::float8);
+SELECT var_pop(1.0::float4), var_samp(2.0::float4);
+SELECT stddev_pop(3.0::float4), stddev_samp(4.0::float4);
+SELECT var_pop('inf'::float4), var_samp('inf'::float4);
+SELECT stddev_pop('inf'::float4), stddev_samp('inf'::float4);
+SELECT var_pop('nan'::float4), var_samp('nan'::float4);
+SELECT stddev_pop('nan'::float4), stddev_samp('nan'::float4);
+SELECT var_pop(1.0::numeric), var_samp(2.0::numeric);
+SELECT stddev_pop(3.0::numeric), stddev_samp(4.0::numeric);
+SELECT var_pop('inf'::numeric), var_samp('inf'::numeric);
+SELECT stddev_pop('inf'::numeric), stddev_samp('inf'::numeric);
+SELECT var_pop('nan'::numeric), var_samp('nan'::numeric);
+SELECT stddev_pop('nan'::numeric), stddev_samp('nan'::numeric);
+
+-- verify correct results for null and NaN inputs
+select sum(null::int4) from generate_series(1,3);
+select sum(null::int8) from generate_series(1,3);
+select sum(null::numeric) from generate_series(1,3);
+select sum(null::float8) from generate_series(1,3);
+select avg(null::int4) from generate_series(1,3);
+select avg(null::int8) from generate_series(1,3);
+select avg(null::numeric) from generate_series(1,3);
+select avg(null::float8) from generate_series(1,3);
+select sum('NaN'::numeric) from generate_series(1,3);
+select avg('NaN'::numeric) from generate_series(1,3);
+
+-- verify correct results for infinite inputs
+SELECT sum(x::float8), avg(x::float8), var_pop(x::float8)
+FROM (VALUES ('1'), ('infinity')) v(x);
+SELECT sum(x::float8), avg(x::float8), var_pop(x::float8)
+FROM (VALUES ('infinity'), ('1')) v(x);
+SELECT sum(x::float8), avg(x::float8), var_pop(x::float8)
+FROM (VALUES ('infinity'), ('infinity')) v(x);
+SELECT sum(x::float8), avg(x::float8), var_pop(x::float8)
+FROM (VALUES ('-infinity'), ('infinity')) v(x);
+SELECT sum(x::float8), avg(x::float8), var_pop(x::float8)
+FROM (VALUES ('-infinity'), ('-infinity')) v(x);
+SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric)
+FROM (VALUES ('1'), ('infinity')) v(x);
+SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric)
+FROM (VALUES ('infinity'), ('1')) v(x);
+SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric)
+FROM (VALUES ('infinity'), ('infinity')) v(x);
+SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric)
+FROM (VALUES ('-infinity'), ('infinity')) v(x);
+SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric)
+FROM (VALUES ('-infinity'), ('-infinity')) v(x);
+
+-- test accuracy with a large input offset
+SELECT avg(x::float8), var_pop(x::float8)
+FROM (VALUES (100000003), (100000004), (100000006), (100000007)) v(x);
+SELECT avg(x::float8), var_pop(x::float8)
+FROM (VALUES (7000000000005), (7000000000007)) v(x);
+
+-- SQL2003 binary aggregates
+SELECT regr_count(b, a) FROM aggtest;
+SELECT regr_sxx(b, a) FROM aggtest;
+SELECT regr_syy(b, a) FROM aggtest;
+SELECT regr_sxy(b, a) FROM aggtest;
+SELECT regr_avgx(b, a), regr_avgy(b, a) FROM aggtest;
+SELECT regr_r2(b, a) FROM aggtest;
+SELECT regr_slope(b, a), regr_intercept(b, a) FROM aggtest;
+SELECT covar_pop(b, a), covar_samp(b, a) FROM aggtest;
+SELECT corr(b, a) FROM aggtest;
+
+-- check single-tuple behavior
+SELECT covar_pop(1::float8,2::float8), covar_samp(3::float8,4::float8);
+SELECT covar_pop(1::float8,'inf'::float8), covar_samp(3::float8,'inf'::float8);
+SELECT covar_pop(1::float8,'nan'::float8), covar_samp(3::float8,'nan'::float8);
+
+-- test accum and combine functions directly
+CREATE TABLE regr_test (x float8, y float8);
+INSERT INTO regr_test VALUES (10,150),(20,250),(30,350),(80,540),(100,200);
+SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x)
+FROM regr_test WHERE x IN (10,20,30,80);
+SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x)
+FROM regr_test;
+SELECT float8_accum('{4,140,2900}'::float8[], 100);
+SELECT float8_regr_accum('{4,140,2900,1290,83075,15050}'::float8[], 200, 100);
+SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x)
+FROM regr_test WHERE x IN (10,20,30);
+SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x)
+FROM regr_test WHERE x IN (80,100);
+SELECT float8_combine('{3,60,200}'::float8[], '{0,0,0}'::float8[]);
+SELECT float8_combine('{0,0,0}'::float8[], '{2,180,200}'::float8[]);
+SELECT float8_combine('{3,60,200}'::float8[], '{2,180,200}'::float8[]);
+SELECT float8_regr_combine('{3,60,200,750,20000,2000}'::float8[],
+ '{0,0,0,0,0,0}'::float8[]);
+SELECT float8_regr_combine('{0,0,0,0,0,0}'::float8[],
+ '{2,180,200,740,57800,-3400}'::float8[]);
+SELECT float8_regr_combine('{3,60,200,750,20000,2000}'::float8[],
+ '{2,180,200,740,57800,-3400}'::float8[]);
+DROP TABLE regr_test;
+
+-- test count, distinct
+SELECT count(four) AS cnt_1000 FROM onek;
+SELECT count(DISTINCT four) AS cnt_4 FROM onek;
+
+select ten, count(*), sum(four) from onek
+group by ten order by ten;
+
+select ten, count(four), sum(DISTINCT four) from onek
+group by ten order by ten;
+
+-- user-defined aggregates
+SELECT newavg(four) AS avg_1 FROM onek;
+SELECT newsum(four) AS sum_1500 FROM onek;
+SELECT newcnt(four) AS cnt_1000 FROM onek;
+SELECT newcnt(*) AS cnt_1000 FROM onek;
+SELECT oldcnt(*) AS cnt_1000 FROM onek;
+SELECT sum2(q1,q2) FROM int8_tbl;
+
+-- test for outer-level aggregates
+
+-- this should work
+select ten, sum(distinct four) from onek a
+group by ten
+having exists (select 1 from onek b where sum(distinct a.four) = b.four);
+
+-- this should fail because subquery has an agg of its own in WHERE
+select ten, sum(distinct four) from onek a
+group by ten
+having exists (select 1 from onek b
+ where sum(distinct a.four + b.four) = b.four);
+
+-- Test handling of sublinks within outer-level aggregates.
+-- Per bug report from Daniel Grace.
+select
+ (select max((select i.unique2 from tenk1 i where i.unique1 = o.unique1)))
+from tenk1 o;
+
+-- Test handling of Params within aggregate arguments in hashed aggregation.
+-- Per bug report from Jeevan Chalke.
+explain (verbose, costs off)
+select s1, s2, sm
+from generate_series(1, 3) s1,
+ lateral (select s2, sum(s1 + s2) sm
+ from generate_series(1, 3) s2 group by s2) ss
+order by 1, 2;
+select s1, s2, sm
+from generate_series(1, 3) s1,
+ lateral (select s2, sum(s1 + s2) sm
+ from generate_series(1, 3) s2 group by s2) ss
+order by 1, 2;
+
+explain (verbose, costs off)
+select array(select sum(x+y) s
+ from generate_series(1,3) y group by y order by s)
+ from generate_series(1,3) x;
+select array(select sum(x+y) s
+ from generate_series(1,3) y group by y order by s)
+ from generate_series(1,3) x;
+
+--
+-- test for bitwise integer aggregates
+--
+CREATE TEMPORARY TABLE bitwise_test(
+ i2 INT2,
+ i4 INT4,
+ i8 INT8,
+ i INTEGER,
+ x INT2,
+ y BIT(4)
+);
+
+-- empty case
+SELECT
+ BIT_AND(i2) AS "?",
+ BIT_OR(i4) AS "?",
+ BIT_XOR(i8) AS "?"
+FROM bitwise_test;
+
+COPY bitwise_test FROM STDIN NULL 'null';
+1 1 1 1 1 B0101
+3 3 3 null 2 B0100
+7 7 7 3 4 B1100
+\.
+
+SELECT
+ BIT_AND(i2) AS "1",
+ BIT_AND(i4) AS "1",
+ BIT_AND(i8) AS "1",
+ BIT_AND(i) AS "?",
+ BIT_AND(x) AS "0",
+ BIT_AND(y) AS "0100",
+
+ BIT_OR(i2) AS "7",
+ BIT_OR(i4) AS "7",
+ BIT_OR(i8) AS "7",
+ BIT_OR(i) AS "?",
+ BIT_OR(x) AS "7",
+ BIT_OR(y) AS "1101",
+
+ BIT_XOR(i2) AS "5",
+ BIT_XOR(i4) AS "5",
+ BIT_XOR(i8) AS "5",
+ BIT_XOR(i) AS "?",
+ BIT_XOR(x) AS "7",
+ BIT_XOR(y) AS "1101"
+FROM bitwise_test;
+
+--
+-- test boolean aggregates
+--
+-- first test all possible transition and final states
+
+SELECT
+ -- boolean and transitions
+ -- null because strict
+ booland_statefunc(NULL, NULL) IS NULL AS "t",
+ booland_statefunc(TRUE, NULL) IS NULL AS "t",
+ booland_statefunc(FALSE, NULL) IS NULL AS "t",
+ booland_statefunc(NULL, TRUE) IS NULL AS "t",
+ booland_statefunc(NULL, FALSE) IS NULL AS "t",
+ -- and actual computations
+ booland_statefunc(TRUE, TRUE) AS "t",
+ NOT booland_statefunc(TRUE, FALSE) AS "t",
+ NOT booland_statefunc(FALSE, TRUE) AS "t",
+ NOT booland_statefunc(FALSE, FALSE) AS "t";
+
+SELECT
+ -- boolean or transitions
+ -- null because strict
+ boolor_statefunc(NULL, NULL) IS NULL AS "t",
+ boolor_statefunc(TRUE, NULL) IS NULL AS "t",
+ boolor_statefunc(FALSE, NULL) IS NULL AS "t",
+ boolor_statefunc(NULL, TRUE) IS NULL AS "t",
+ boolor_statefunc(NULL, FALSE) IS NULL AS "t",
+ -- actual computations
+ boolor_statefunc(TRUE, TRUE) AS "t",
+ boolor_statefunc(TRUE, FALSE) AS "t",
+ boolor_statefunc(FALSE, TRUE) AS "t",
+ NOT boolor_statefunc(FALSE, FALSE) AS "t";
+
+CREATE TEMPORARY TABLE bool_test(
+ b1 BOOL,
+ b2 BOOL,
+ b3 BOOL,
+ b4 BOOL);
+
+-- empty case
+SELECT
+ BOOL_AND(b1) AS "n",
+ BOOL_OR(b3) AS "n"
+FROM bool_test;
+
+COPY bool_test FROM STDIN NULL 'null';
+TRUE null FALSE null
+FALSE TRUE null null
+null TRUE FALSE null
+\.
+
+SELECT
+ BOOL_AND(b1) AS "f",
+ BOOL_AND(b2) AS "t",
+ BOOL_AND(b3) AS "f",
+ BOOL_AND(b4) AS "n",
+ BOOL_AND(NOT b2) AS "f",
+ BOOL_AND(NOT b3) AS "t"
+FROM bool_test;
+
+SELECT
+ EVERY(b1) AS "f",
+ EVERY(b2) AS "t",
+ EVERY(b3) AS "f",
+ EVERY(b4) AS "n",
+ EVERY(NOT b2) AS "f",
+ EVERY(NOT b3) AS "t"
+FROM bool_test;
+
+SELECT
+ BOOL_OR(b1) AS "t",
+ BOOL_OR(b2) AS "t",
+ BOOL_OR(b3) AS "f",
+ BOOL_OR(b4) AS "n",
+ BOOL_OR(NOT b2) AS "f",
+ BOOL_OR(NOT b3) AS "t"
+FROM bool_test;
+
+--
+-- Test cases that should be optimized into indexscans instead of
+-- the generic aggregate implementation.
+--
+
+-- Basic cases
+explain (costs off)
+ select min(unique1) from tenk1;
+select min(unique1) from tenk1;
+explain (costs off)
+ select max(unique1) from tenk1;
+select max(unique1) from tenk1;
+explain (costs off)
+ select max(unique1) from tenk1 where unique1 < 42;
+select max(unique1) from tenk1 where unique1 < 42;
+explain (costs off)
+ select max(unique1) from tenk1 where unique1 > 42;
+select max(unique1) from tenk1 where unique1 > 42;
+
+-- the planner may choose a generic aggregate here if parallel query is
+-- enabled, since that plan will be parallel safe and the "optimized"
+-- plan, which has almost identical cost, will not be. we want to test
+-- the optimized plan, so temporarily disable parallel query.
+begin;
+set local max_parallel_workers_per_gather = 0;
+explain (costs off)
+ select max(unique1) from tenk1 where unique1 > 42000;
+select max(unique1) from tenk1 where unique1 > 42000;
+rollback;
+
+-- multi-column index (uses tenk1_thous_tenthous)
+explain (costs off)
+ select max(tenthous) from tenk1 where thousand = 33;
+select max(tenthous) from tenk1 where thousand = 33;
+explain (costs off)
+ select min(tenthous) from tenk1 where thousand = 33;
+select min(tenthous) from tenk1 where thousand = 33;
+
+-- check parameter propagation into an indexscan subquery
+explain (costs off)
+ select f1, (select min(unique1) from tenk1 where unique1 > f1) AS gt
+ from int4_tbl;
+select f1, (select min(unique1) from tenk1 where unique1 > f1) AS gt
+ from int4_tbl;
+
+-- check some cases that were handled incorrectly in 8.3.0
+explain (costs off)
+ select distinct max(unique2) from tenk1;
+select distinct max(unique2) from tenk1;
+explain (costs off)
+ select max(unique2) from tenk1 order by 1;
+select max(unique2) from tenk1 order by 1;
+explain (costs off)
+ select max(unique2) from tenk1 order by max(unique2);
+select max(unique2) from tenk1 order by max(unique2);
+explain (costs off)
+ select max(unique2) from tenk1 order by max(unique2)+1;
+select max(unique2) from tenk1 order by max(unique2)+1;
+explain (costs off)
+ select max(unique2), generate_series(1,3) as g from tenk1 order by g desc;
+select max(unique2), generate_series(1,3) as g from tenk1 order by g desc;
+
+-- interesting corner case: constant gets optimized into a seqscan
+explain (costs off)
+ select max(100) from tenk1;
+select max(100) from tenk1;
+
+-- try it on an inheritance tree
+create table minmaxtest(f1 int);
+create table minmaxtest1() inherits (minmaxtest);
+create table minmaxtest2() inherits (minmaxtest);
+create table minmaxtest3() inherits (minmaxtest);
+create index minmaxtesti on minmaxtest(f1);
+create index minmaxtest1i on minmaxtest1(f1);
+create index minmaxtest2i on minmaxtest2(f1 desc);
+create index minmaxtest3i on minmaxtest3(f1) where f1 is not null;
+
+insert into minmaxtest values(11), (12);
+insert into minmaxtest1 values(13), (14);
+insert into minmaxtest2 values(15), (16);
+insert into minmaxtest3 values(17), (18);
+
+explain (costs off)
+ select min(f1), max(f1) from minmaxtest;
+select min(f1), max(f1) from minmaxtest;
+
+-- DISTINCT doesn't do anything useful here, but it shouldn't fail
+explain (costs off)
+ select distinct min(f1), max(f1) from minmaxtest;
+select distinct min(f1), max(f1) from minmaxtest;
+
+drop table minmaxtest cascade;
+
+-- check for correct detection of nested-aggregate errors
+select max(min(unique1)) from tenk1;
+select (select max(min(unique1)) from int8_tbl) from tenk1;
+
+--
+-- Test removal of redundant GROUP BY columns
+--
+
+create temp table t1 (a int, b int, c int, d int, primary key (a, b));
+create temp table t2 (x int, y int, z int, primary key (x, y));
+create temp table t3 (a int, b int, c int, primary key(a, b) deferrable);
+
+-- Non-primary-key columns can be removed from GROUP BY
+explain (costs off) select * from t1 group by a,b,c,d;
+
+-- No removal can happen if the complete PK is not present in GROUP BY
+explain (costs off) select a,c from t1 group by a,c,d;
+
+-- Test removal across multiple relations
+explain (costs off) select *
+from t1 inner join t2 on t1.a = t2.x and t1.b = t2.y
+group by t1.a,t1.b,t1.c,t1.d,t2.x,t2.y,t2.z;
+
+-- Test case where t1 can be optimized but not t2
+explain (costs off) select t1.*,t2.x,t2.z
+from t1 inner join t2 on t1.a = t2.x and t1.b = t2.y
+group by t1.a,t1.b,t1.c,t1.d,t2.x,t2.z;
+
+-- Cannot optimize when PK is deferrable
+explain (costs off) select * from t3 group by a,b,c;
+
+create temp table t1c () inherits (t1);
+
+-- Ensure we don't remove any columns when t1 has a child table
+explain (costs off) select * from t1 group by a,b,c,d;
+
+-- Okay to remove columns if we're only querying the parent.
+explain (costs off) select * from only t1 group by a,b,c,d;
+
+create temp table p_t1 (
+ a int,
+ b int,
+ c int,
+ d int,
+ primary key(a,b)
+) partition by list(a);
+create temp table p_t1_1 partition of p_t1 for values in(1);
+create temp table p_t1_2 partition of p_t1 for values in(2);
+
+-- Ensure we can remove non-PK columns for partitioned tables.
+explain (costs off) select * from p_t1 group by a,b,c,d;
+
+drop table t1 cascade;
+drop table t2;
+drop table t3;
+drop table p_t1;
+
+--
+-- Test GROUP BY matching of join columns that are type-coerced due to USING
+--
+
+create temp table t1(f1 int, f2 bigint);
+create temp table t2(f1 bigint, f22 bigint);
+
+select f1 from t1 left join t2 using (f1) group by f1;
+select f1 from t1 left join t2 using (f1) group by t1.f1;
+select t1.f1 from t1 left join t2 using (f1) group by t1.f1;
+-- only this one should fail:
+select t1.f1 from t1 left join t2 using (f1) group by f1;
+
+drop table t1, t2;
+
+--
+-- Test combinations of DISTINCT and/or ORDER BY
+--
+
+select array_agg(a order by b)
+ from (values (1,4),(2,3),(3,1),(4,2)) v(a,b);
+select array_agg(a order by a)
+ from (values (1,4),(2,3),(3,1),(4,2)) v(a,b);
+select array_agg(a order by a desc)
+ from (values (1,4),(2,3),(3,1),(4,2)) v(a,b);
+select array_agg(b order by a desc)
+ from (values (1,4),(2,3),(3,1),(4,2)) v(a,b);
+
+select array_agg(distinct a)
+ from (values (1),(2),(1),(3),(null),(2)) v(a);
+select array_agg(distinct a order by a)
+ from (values (1),(2),(1),(3),(null),(2)) v(a);
+select array_agg(distinct a order by a desc)
+ from (values (1),(2),(1),(3),(null),(2)) v(a);
+select array_agg(distinct a order by a desc nulls last)
+ from (values (1),(2),(1),(3),(null),(2)) v(a);
+
+-- multi-arg aggs, strict/nonstrict, distinct/order by
+
+select aggfstr(a,b,c)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
+select aggfns(a,b,c)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
+
+select aggfstr(distinct a,b,c)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,3) i;
+select aggfns(distinct a,b,c)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,3) i;
+
+select aggfstr(distinct a,b,c order by b)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,3) i;
+select aggfns(distinct a,b,c order by b)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,3) i;
+
+-- test specific code paths
+
+select aggfns(distinct a,a,c order by c using ~<~,a)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,2) i;
+select aggfns(distinct a,a,c order by c using ~<~)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,2) i;
+select aggfns(distinct a,a,c order by a)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,2) i;
+select aggfns(distinct a,b,c order by a,c using ~<~,b)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,2) i;
+
+-- check node I/O via view creation and usage, also deparsing logic
+
+create view agg_view1 as
+ select aggfns(a,b,c)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
+
+select * from agg_view1;
+select pg_get_viewdef('agg_view1'::regclass);
+
+create or replace view agg_view1 as
+ select aggfns(distinct a,b,c)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,3) i;
+
+select * from agg_view1;
+select pg_get_viewdef('agg_view1'::regclass);
+
+create or replace view agg_view1 as
+ select aggfns(distinct a,b,c order by b)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,3) i;
+
+select * from agg_view1;
+select pg_get_viewdef('agg_view1'::regclass);
+
+create or replace view agg_view1 as
+ select aggfns(a,b,c order by b+1)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
+
+select * from agg_view1;
+select pg_get_viewdef('agg_view1'::regclass);
+
+create or replace view agg_view1 as
+ select aggfns(a,a,c order by b)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
+
+select * from agg_view1;
+select pg_get_viewdef('agg_view1'::regclass);
+
+create or replace view agg_view1 as
+ select aggfns(a,b,c order by c using ~<~)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c);
+
+select * from agg_view1;
+select pg_get_viewdef('agg_view1'::regclass);
+
+create or replace view agg_view1 as
+ select aggfns(distinct a,b,c order by a,c using ~<~,b)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,2) i;
+
+select * from agg_view1;
+select pg_get_viewdef('agg_view1'::regclass);
+
+drop view agg_view1;
+
+-- incorrect DISTINCT usage errors
+
+select aggfns(distinct a,b,c order by i)
+ from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i;
+select aggfns(distinct a,b,c order by a,b+1)
+ from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i;
+select aggfns(distinct a,b,c order by a,b,i,c)
+ from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i;
+select aggfns(distinct a,a,c order by a,b)
+ from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i;
+
+-- string_agg tests
+select string_agg(a,',') from (values('aaaa'),('bbbb'),('cccc')) g(a);
+select string_agg(a,',') from (values('aaaa'),(null),('bbbb'),('cccc')) g(a);
+select string_agg(a,'AB') from (values(null),(null),('bbbb'),('cccc')) g(a);
+select string_agg(a,',') from (values(null),(null)) g(a);
+
+-- check some implicit casting cases, as per bug #5564
+select string_agg(distinct f1, ',' order by f1) from varchar_tbl; -- ok
+select string_agg(distinct f1::text, ',' order by f1) from varchar_tbl; -- not ok
+select string_agg(distinct f1, ',' order by f1::text) from varchar_tbl; -- not ok
+select string_agg(distinct f1::text, ',' order by f1::text) from varchar_tbl; -- ok
+
+-- string_agg bytea tests
+create table bytea_test_table(v bytea);
+
+select string_agg(v, '') from bytea_test_table;
+
+insert into bytea_test_table values(decode('ff','hex'));
+
+select string_agg(v, '') from bytea_test_table;
+
+insert into bytea_test_table values(decode('aa','hex'));
+
+select string_agg(v, '') from bytea_test_table;
+select string_agg(v, NULL) from bytea_test_table;
+select string_agg(v, decode('ee', 'hex')) from bytea_test_table;
+
+drop table bytea_test_table;
+
+-- FILTER tests
+
+select min(unique1) filter (where unique1 > 100) from tenk1;
+
+select sum(1/ten) filter (where ten > 0) from tenk1;
+
+select ten, sum(distinct four) filter (where four::text ~ '123') from onek a
+group by ten;
+
+select ten, sum(distinct four) filter (where four > 10) from onek a
+group by ten
+having exists (select 1 from onek b where sum(distinct a.four) = b.four);
+
+select max(foo COLLATE "C") filter (where (bar collate "POSIX") > '0')
+from (values ('a', 'b')) AS v(foo,bar);
+
+-- outer reference in FILTER (PostgreSQL extension)
+select (select count(*)
+ from (values (1)) t0(inner_c))
+from (values (2),(3)) t1(outer_c); -- inner query is aggregation query
+select (select count(*) filter (where outer_c <> 0)
+ from (values (1)) t0(inner_c))
+from (values (2),(3)) t1(outer_c); -- outer query is aggregation query
+select (select count(inner_c) filter (where outer_c <> 0)
+ from (values (1)) t0(inner_c))
+from (values (2),(3)) t1(outer_c); -- inner query is aggregation query
+select
+ (select max((select i.unique2 from tenk1 i where i.unique1 = o.unique1))
+ filter (where o.unique1 < 10))
+from tenk1 o; -- outer query is aggregation query
+
+-- subquery in FILTER clause (PostgreSQL extension)
+select sum(unique1) FILTER (WHERE
+ unique1 IN (SELECT unique1 FROM onek where unique1 < 100)) FROM tenk1;
+
+-- exercise lots of aggregate parts with FILTER
+select aggfns(distinct a,b,c order by a,c using ~<~,b) filter (where a > 1)
+ from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c),
+ generate_series(1,2) i;
+
+-- check handling of bare boolean Var in FILTER
+select max(0) filter (where b1) from bool_test;
+select (select max(0) filter (where b1)) from bool_test;
+
+-- check for correct detection of nested-aggregate errors in FILTER
+select max(unique1) filter (where sum(ten) > 0) from tenk1;
+select (select max(unique1) filter (where sum(ten) > 0) from int8_tbl) from tenk1;
+select max(unique1) filter (where bool_or(ten > 0)) from tenk1;
+select (select max(unique1) filter (where bool_or(ten > 0)) from int8_tbl) from tenk1;
+
+
+-- ordered-set aggregates
+
+select p, percentile_cont(p) within group (order by x::float8)
+from generate_series(1,5) x,
+ (values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p)
+group by p order by p;
+
+select p, percentile_cont(p order by p) within group (order by x) -- error
+from generate_series(1,5) x,
+ (values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p)
+group by p order by p;
+
+select p, sum() within group (order by x::float8) -- error
+from generate_series(1,5) x,
+ (values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p)
+group by p order by p;
+
+select p, percentile_cont(p,p) -- error
+from generate_series(1,5) x,
+ (values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p)
+group by p order by p;
+
+select percentile_cont(0.5) within group (order by b) from aggtest;
+select percentile_cont(0.5) within group (order by b), sum(b) from aggtest;
+select percentile_cont(0.5) within group (order by thousand) from tenk1;
+select percentile_disc(0.5) within group (order by thousand) from tenk1;
+select rank(3) within group (order by x)
+from (values (1),(1),(2),(2),(3),(3),(4)) v(x);
+select cume_dist(3) within group (order by x)
+from (values (1),(1),(2),(2),(3),(3),(4)) v(x);
+select percent_rank(3) within group (order by x)
+from (values (1),(1),(2),(2),(3),(3),(4),(5)) v(x);
+select dense_rank(3) within group (order by x)
+from (values (1),(1),(2),(2),(3),(3),(4)) v(x);
+
+select percentile_disc(array[0,0.1,0.25,0.5,0.75,0.9,1]) within group (order by thousand)
+from tenk1;
+select percentile_cont(array[0,0.25,0.5,0.75,1]) within group (order by thousand)
+from tenk1;
+select percentile_disc(array[[null,1,0.5],[0.75,0.25,null]]) within group (order by thousand)
+from tenk1;
+select percentile_cont(array[0,1,0.25,0.75,0.5,1,0.3,0.32,0.35,0.38,0.4]) within group (order by x)
+from generate_series(1,6) x;
+
+select ten, mode() within group (order by string4) from tenk1 group by ten;
+
+select percentile_disc(array[0.25,0.5,0.75]) within group (order by x)
+from unnest('{fred,jim,fred,jack,jill,fred,jill,jim,jim,sheila,jim,sheila}'::text[]) u(x);
+
+-- check collation propagates up in suitable cases:
+select pg_collation_for(percentile_disc(1) within group (order by x collate "POSIX"))
+ from (values ('fred'),('jim')) v(x);
+
+-- ordered-set aggs created with CREATE AGGREGATE
+select test_rank(3) within group (order by x)
+from (values (1),(1),(2),(2),(3),(3),(4)) v(x);
+select test_percentile_disc(0.5) within group (order by thousand) from tenk1;
+
+-- ordered-set aggs can't use ungrouped vars in direct args:
+select rank(x) within group (order by x) from generate_series(1,5) x;
+
+-- outer-level agg can't use a grouped arg of a lower level, either:
+select array(select percentile_disc(a) within group (order by x)
+ from (values (0.3),(0.7)) v(a) group by a)
+ from generate_series(1,5) g(x);
+
+-- agg in the direct args is a grouping violation, too:
+select rank(sum(x)) within group (order by x) from generate_series(1,5) x;
+
+-- hypothetical-set type unification and argument-count failures:
+select rank(3) within group (order by x) from (values ('fred'),('jim')) v(x);
+select rank(3) within group (order by stringu1,stringu2) from tenk1;
+select rank('fred') within group (order by x) from generate_series(1,5) x;
+select rank('adam'::text collate "C") within group (order by x collate "POSIX")
+ from (values ('fred'),('jim')) v(x);
+-- hypothetical-set type unification successes:
+select rank('adam'::varchar) within group (order by x) from (values ('fred'),('jim')) v(x);
+select rank('3') within group (order by x) from generate_series(1,5) x;
+
+-- divide by zero check
+select percent_rank(0) within group (order by x) from generate_series(1,0) x;
+
+-- deparse and multiple features:
+create view aggordview1 as
+select ten,
+ percentile_disc(0.5) within group (order by thousand) as p50,
+ percentile_disc(0.5) within group (order by thousand) filter (where hundred=1) as px,
+ rank(5,'AZZZZ',50) within group (order by hundred, string4 desc, hundred)
+ from tenk1
+ group by ten order by ten;
+
+select pg_get_viewdef('aggordview1');
+select * from aggordview1 order by ten;
+drop view aggordview1;
+
+-- variadic aggregates
+select least_agg(q1,q2) from int8_tbl;
+select least_agg(variadic array[q1,q2]) from int8_tbl;
+
+select cleast_agg(q1,q2) from int8_tbl;
+select cleast_agg(4.5,f1) from int4_tbl;
+select cleast_agg(variadic array[4.5,f1]) from int4_tbl;
+select pg_typeof(cleast_agg(variadic array[4.5,f1])) from int4_tbl;
+
+-- test aggregates with common transition functions share the same states
+begin work;
+
+create type avg_state as (total bigint, count bigint);
+
+create or replace function avg_transfn(state avg_state, n int) returns avg_state as
+$$
+declare new_state avg_state;
+begin
+ raise notice 'avg_transfn called with %', n;
+ if state is null then
+ if n is not null then
+ new_state.total := n;
+ new_state.count := 1;
+ return new_state;
+ end if;
+ return null;
+ elsif n is not null then
+ state.total := state.total + n;
+ state.count := state.count + 1;
+ return state;
+ end if;
+
+ return null;
+end
+$$ language plpgsql;
+
+create function avg_finalfn(state avg_state) returns int4 as
+$$
+begin
+ if state is null then
+ return NULL;
+ else
+ return state.total / state.count;
+ end if;
+end
+$$ language plpgsql;
+
+create function sum_finalfn(state avg_state) returns int4 as
+$$
+begin
+ if state is null then
+ return NULL;
+ else
+ return state.total;
+ end if;
+end
+$$ language plpgsql;
+
+create aggregate my_avg(int4)
+(
+ stype = avg_state,
+ sfunc = avg_transfn,
+ finalfunc = avg_finalfn
+);
+
+create aggregate my_sum(int4)
+(
+ stype = avg_state,
+ sfunc = avg_transfn,
+ finalfunc = sum_finalfn
+);
+
+-- aggregate state should be shared as aggs are the same.
+select my_avg(one),my_avg(one) from (values(1),(3)) t(one);
+
+-- aggregate state should be shared as transfn is the same for both aggs.
+select my_avg(one),my_sum(one) from (values(1),(3)) t(one);
+
+-- same as previous one, but with DISTINCT, which requires sorting the input.
+select my_avg(distinct one),my_sum(distinct one) from (values(1),(3),(1)) t(one);
+
+-- shouldn't share states due to the distinctness not matching.
+select my_avg(distinct one),my_sum(one) from (values(1),(3)) t(one);
+
+-- shouldn't share states due to the filter clause not matching.
+select my_avg(one) filter (where one > 1),my_sum(one) from (values(1),(3)) t(one);
+
+-- this should not share the state due to different input columns.
+select my_avg(one),my_sum(two) from (values(1,2),(3,4)) t(one,two);
+
+-- exercise cases where OSAs share state
+select
+ percentile_cont(0.5) within group (order by a),
+ percentile_disc(0.5) within group (order by a)
+from (values(1::float8),(3),(5),(7)) t(a);
+
+select
+ percentile_cont(0.25) within group (order by a),
+ percentile_disc(0.5) within group (order by a)
+from (values(1::float8),(3),(5),(7)) t(a);
+
+-- these can't share state currently
+select
+ rank(4) within group (order by a),
+ dense_rank(4) within group (order by a)
+from (values(1),(3),(5),(7)) t(a);
+
+-- test that aggs with the same sfunc and initcond share the same agg state
+create aggregate my_sum_init(int4)
+(
+ stype = avg_state,
+ sfunc = avg_transfn,
+ finalfunc = sum_finalfn,
+ initcond = '(10,0)'
+);
+
+create aggregate my_avg_init(int4)
+(
+ stype = avg_state,
+ sfunc = avg_transfn,
+ finalfunc = avg_finalfn,
+ initcond = '(10,0)'
+);
+
+create aggregate my_avg_init2(int4)
+(
+ stype = avg_state,
+ sfunc = avg_transfn,
+ finalfunc = avg_finalfn,
+ initcond = '(4,0)'
+);
+
+-- state should be shared if INITCONDs are matching
+select my_sum_init(one),my_avg_init(one) from (values(1),(3)) t(one);
+
+-- Varying INITCONDs should cause the states not to be shared.
+select my_sum_init(one),my_avg_init2(one) from (values(1),(3)) t(one);
+
+rollback;
+
+-- test aggregate state sharing to ensure it works if one aggregate has a
+-- finalfn and the other one has none.
+begin work;
+
+create or replace function sum_transfn(state int4, n int4) returns int4 as
+$$
+declare new_state int4;
+begin
+ raise notice 'sum_transfn called with %', n;
+ if state is null then
+ if n is not null then
+ new_state := n;
+ return new_state;
+ end if;
+ return null;
+ elsif n is not null then
+ state := state + n;
+ return state;
+ end if;
+
+ return null;
+end
+$$ language plpgsql;
+
+create function halfsum_finalfn(state int4) returns int4 as
+$$
+begin
+ if state is null then
+ return NULL;
+ else
+ return state / 2;
+ end if;
+end
+$$ language plpgsql;
+
+create aggregate my_sum(int4)
+(
+ stype = int4,
+ sfunc = sum_transfn
+);
+
+create aggregate my_half_sum(int4)
+(
+ stype = int4,
+ sfunc = sum_transfn,
+ finalfunc = halfsum_finalfn
+);
+
+-- Agg state should be shared even though my_sum has no finalfn
+select my_sum(one),my_half_sum(one) from (values(1),(2),(3),(4)) t(one);
+
+rollback;
+
+
+-- test that the aggregate transition logic correctly handles
+-- transition / combine functions returning NULL
+
+-- First test the case of a normal transition function returning NULL
+BEGIN;
+CREATE FUNCTION balkifnull(int8, int4)
+RETURNS int8
+STRICT
+LANGUAGE plpgsql AS $$
+BEGIN
+ IF $1 IS NULL THEN
+ RAISE 'erroneously called with NULL argument';
+ END IF;
+ RETURN NULL;
+END$$;
+
+CREATE AGGREGATE balk(int4)
+(
+ SFUNC = balkifnull(int8, int4),
+ STYPE = int8,
+ PARALLEL = SAFE,
+ INITCOND = '0'
+);
+
+SELECT balk(hundred) FROM tenk1;
+
+ROLLBACK;
+
+-- Secondly test the case of a parallel aggregate combiner function
+-- returning NULL. For that use normal transition function, but a
+-- combiner function returning NULL.
+BEGIN;
+CREATE FUNCTION balkifnull(int8, int8)
+RETURNS int8
+PARALLEL SAFE
+STRICT
+LANGUAGE plpgsql AS $$
+BEGIN
+ IF $1 IS NULL THEN
+ RAISE 'erroneously called with NULL argument';
+ END IF;
+ RETURN NULL;
+END$$;
+
+CREATE AGGREGATE balk(int4)
+(
+ SFUNC = int4_sum(int8, int4),
+ STYPE = int8,
+ COMBINEFUNC = balkifnull(int8, int8),
+ PARALLEL = SAFE,
+ INITCOND = '0'
+);
+
+-- force use of parallelism
+ALTER TABLE tenk1 set (parallel_workers = 4);
+SET LOCAL parallel_setup_cost=0;
+SET LOCAL max_parallel_workers_per_gather=4;
+
+EXPLAIN (COSTS OFF) SELECT balk(hundred) FROM tenk1;
+SELECT balk(hundred) FROM tenk1;
+
+ROLLBACK;
+
+-- test coverage for aggregate combine/serial/deserial functions
+BEGIN;
+
+SET parallel_setup_cost = 0;
+SET parallel_tuple_cost = 0;
+SET min_parallel_table_scan_size = 0;
+SET max_parallel_workers_per_gather = 4;
+SET parallel_leader_participation = off;
+SET enable_indexonlyscan = off;
+
+-- variance(int4) covers numeric_poly_combine
+-- sum(int8) covers int8_avg_combine
+-- regr_count(float8, float8) covers int8inc_float8_float8 and aggregates with > 1 arg
+EXPLAIN (COSTS OFF, VERBOSE)
+SELECT variance(unique1::int4), sum(unique1::int8), regr_count(unique1::float8, unique1::float8)
+FROM (SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1) u;
+
+SELECT variance(unique1::int4), sum(unique1::int8), regr_count(unique1::float8, unique1::float8)
+FROM (SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1) u;
+
+-- variance(int8) covers numeric_combine
+-- avg(numeric) covers numeric_avg_combine
+EXPLAIN (COSTS OFF, VERBOSE)
+SELECT variance(unique1::int8), avg(unique1::numeric)
+FROM (SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1) u;
+
+SELECT variance(unique1::int8), avg(unique1::numeric)
+FROM (SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1
+ UNION ALL SELECT * FROM tenk1) u;
+
+ROLLBACK;
+
+-- test coverage for dense_rank
+SELECT dense_rank(x) WITHIN GROUP (ORDER BY x) FROM (VALUES (1),(1),(2),(2),(3),(3)) v(x) GROUP BY (x) ORDER BY 1;
+
+
+-- Ensure that the STRICT checks for aggregates does not take NULLness
+-- of ORDER BY columns into account. See bug report around
+-- 2a505161-2727-2473-7c46-591ed108ac52@email.cz
+SELECT min(x ORDER BY y) FROM (VALUES(1, NULL)) AS d(x,y);
+SELECT min(x ORDER BY y) FROM (VALUES(1, 2)) AS d(x,y);
+
+-- check collation-sensitive matching between grouping expressions
+select v||'a', case v||'a' when 'aa' then 1 else 0 end, count(*)
+ from unnest(array['a','b']) u(v)
+ group by v||'a' order by 1;
+select v||'a', case when v||'a' = 'aa' then 1 else 0 end, count(*)
+ from unnest(array['a','b']) u(v)
+ group by v||'a' order by 1;
+
+-- Make sure that generation of HashAggregate for uniqification purposes
+-- does not lead to array overflow due to unexpected duplicate hash keys
+-- see CAFeeJoKKu0u+A_A9R9316djW-YW3-+Gtgvy3ju655qRHR3jtdA@mail.gmail.com
+set enable_memoize to off;
+explain (costs off)
+ select 1 from tenk1
+ where (hundred, thousand) in (select twothousand, twothousand from onek);
+reset enable_memoize;
+
+--
+-- Hash Aggregation Spill tests
+--
+
+set enable_sort=false;
+set work_mem='64kB';
+
+select unique1, count(*), sum(twothousand) from tenk1
+group by unique1
+having sum(fivethous) > 4975
+order by sum(twothousand);
+
+set work_mem to default;
+set enable_sort to default;
+
+--
+-- Compare results between plans using sorting and plans using hash
+-- aggregation. Force spilling in both cases by setting work_mem low.
+--
+
+set work_mem='64kB';
+
+create table agg_data_2k as
+select g from generate_series(0, 1999) g;
+analyze agg_data_2k;
+
+create table agg_data_20k as
+select g from generate_series(0, 19999) g;
+analyze agg_data_20k;
+
+-- Produce results with sorting.
+
+set enable_hashagg = false;
+
+set jit_above_cost = 0;
+
+explain (costs off)
+select g%10000 as c1, sum(g::numeric) as c2, count(*) as c3
+ from agg_data_20k group by g%10000;
+
+create table agg_group_1 as
+select g%10000 as c1, sum(g::numeric) as c2, count(*) as c3
+ from agg_data_20k group by g%10000;
+
+create table agg_group_2 as
+select * from
+ (values (100), (300), (500)) as r(a),
+ lateral (
+ select (g/2)::numeric as c1,
+ array_agg(g::numeric) as c2,
+ count(*) as c3
+ from agg_data_2k
+ where g < r.a
+ group by g/2) as s;
+
+set jit_above_cost to default;
+
+create table agg_group_3 as
+select (g/2)::numeric as c1, sum(7::int4) as c2, count(*) as c3
+ from agg_data_2k group by g/2;
+
+create table agg_group_4 as
+select (g/2)::numeric as c1, array_agg(g::numeric) as c2, count(*) as c3
+ from agg_data_2k group by g/2;
+
+-- Produce results with hash aggregation
+
+set enable_hashagg = true;
+set enable_sort = false;
+
+set jit_above_cost = 0;
+
+explain (costs off)
+select g%10000 as c1, sum(g::numeric) as c2, count(*) as c3
+ from agg_data_20k group by g%10000;
+
+create table agg_hash_1 as
+select g%10000 as c1, sum(g::numeric) as c2, count(*) as c3
+ from agg_data_20k group by g%10000;
+
+create table agg_hash_2 as
+select * from
+ (values (100), (300), (500)) as r(a),
+ lateral (
+ select (g/2)::numeric as c1,
+ array_agg(g::numeric) as c2,
+ count(*) as c3
+ from agg_data_2k
+ where g < r.a
+ group by g/2) as s;
+
+set jit_above_cost to default;
+
+create table agg_hash_3 as
+select (g/2)::numeric as c1, sum(7::int4) as c2, count(*) as c3
+ from agg_data_2k group by g/2;
+
+create table agg_hash_4 as
+select (g/2)::numeric as c1, array_agg(g::numeric) as c2, count(*) as c3
+ from agg_data_2k group by g/2;
+
+set enable_sort = true;
+set work_mem to default;
+
+-- Compare group aggregation results to hash aggregation results
+
+(select * from agg_hash_1 except select * from agg_group_1)
+ union all
+(select * from agg_group_1 except select * from agg_hash_1);
+
+(select * from agg_hash_2 except select * from agg_group_2)
+ union all
+(select * from agg_group_2 except select * from agg_hash_2);
+
+(select * from agg_hash_3 except select * from agg_group_3)
+ union all
+(select * from agg_group_3 except select * from agg_hash_3);
+
+(select * from agg_hash_4 except select * from agg_group_4)
+ union all
+(select * from agg_group_4 except select * from agg_hash_4);
+
+drop table agg_group_1;
+drop table agg_group_2;
+drop table agg_group_3;
+drop table agg_group_4;
+drop table agg_hash_1;
+drop table agg_hash_2;
+drop table agg_hash_3;
+drop table agg_hash_4;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/alter_table.out b/ydb/library/yql/tests/postgresql/original/cases/alter_table.out
new file mode 100644
index 0000000000..77c95d6051
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/alter_table.out
@@ -0,0 +1,4593 @@
+--
+-- ALTER_TABLE
+--
+-- Clean up in case a prior regression run failed
+SET client_min_messages TO 'warning';
+DROP ROLE IF EXISTS regress_alter_table_user1;
+RESET client_min_messages;
+CREATE USER regress_alter_table_user1;
+--
+-- add attribute
+--
+CREATE TABLE attmp (initial int4);
+COMMENT ON TABLE attmp_wrong IS 'table comment';
+ERROR: relation "attmp_wrong" does not exist
+COMMENT ON TABLE attmp IS 'table comment';
+COMMENT ON TABLE attmp IS NULL;
+ALTER TABLE attmp ADD COLUMN xmin integer; -- fails
+ERROR: column name "xmin" conflicts with a system column name
+ALTER TABLE attmp ADD COLUMN a int4 default 3;
+ALTER TABLE attmp ADD COLUMN b name;
+ALTER TABLE attmp ADD COLUMN c text;
+ALTER TABLE attmp ADD COLUMN d float8;
+ALTER TABLE attmp ADD COLUMN e float4;
+ALTER TABLE attmp ADD COLUMN f int2;
+ALTER TABLE attmp ADD COLUMN g polygon;
+ALTER TABLE attmp ADD COLUMN i char;
+ALTER TABLE attmp ADD COLUMN k int4;
+ALTER TABLE attmp ADD COLUMN l tid;
+ALTER TABLE attmp ADD COLUMN m xid;
+ALTER TABLE attmp ADD COLUMN n oidvector;
+--ALTER TABLE attmp ADD COLUMN o lock;
+ALTER TABLE attmp ADD COLUMN p boolean;
+ALTER TABLE attmp ADD COLUMN q point;
+ALTER TABLE attmp ADD COLUMN r lseg;
+ALTER TABLE attmp ADD COLUMN s path;
+ALTER TABLE attmp ADD COLUMN t box;
+ALTER TABLE attmp ADD COLUMN v timestamp;
+ALTER TABLE attmp ADD COLUMN w interval;
+ALTER TABLE attmp ADD COLUMN x float8[];
+ALTER TABLE attmp ADD COLUMN y float4[];
+ALTER TABLE attmp ADD COLUMN z int2[];
+INSERT INTO attmp (a, b, c, d, e, f, g, i, k, l, m, n, p, q, r, s, t,
+ v, w, x, y, z)
+ VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)',
+ 'c',
+ 314159, '(1,1)', '512',
+ '1 2 3 4 5 6 7 8', true, '(1.1,1.1)', '(4.1,4.1,3.1,3.1)',
+ '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)',
+ 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}');
+SELECT * FROM attmp;
+ initial | a | b | c | d | e | f | g | i | k | l | m | n | p | q | r | s | t | v | w | x | y | z
+---------+---+------+------+-----+-----+---+-----------------------+---+--------+-------+-----+-----------------+---+-----------+-----------------------+-----------------------------+---------------------+--------------------------+------------------+-----------+-----------+-----------
+ | 4 | name | text | 4.1 | 4.1 | 2 | ((4.1,4.1),(3.1,3.1)) | c | 314159 | (1,1) | 512 | 1 2 3 4 5 6 7 8 | t | (1.1,1.1) | [(4.1,4.1),(3.1,3.1)] | ((0,2),(4.1,4.1),(3.1,3.1)) | (4.1,4.1),(3.1,3.1) | Thu Jan 01 00:00:00 1970 | @ 1 hour 10 secs | {1,2,3,4} | {1,2,3,4} | {1,2,3,4}
+(1 row)
+
+DROP TABLE attmp;
+-- the wolf bug - schema mods caused inconsistent row descriptors
+CREATE TABLE attmp (
+ initial int4
+);
+ALTER TABLE attmp ADD COLUMN a int4;
+ALTER TABLE attmp ADD COLUMN b name;
+ALTER TABLE attmp ADD COLUMN c text;
+ALTER TABLE attmp ADD COLUMN d float8;
+ALTER TABLE attmp ADD COLUMN e float4;
+ALTER TABLE attmp ADD COLUMN f int2;
+ALTER TABLE attmp ADD COLUMN g polygon;
+ALTER TABLE attmp ADD COLUMN i char;
+ALTER TABLE attmp ADD COLUMN k int4;
+ALTER TABLE attmp ADD COLUMN l tid;
+ALTER TABLE attmp ADD COLUMN m xid;
+ALTER TABLE attmp ADD COLUMN n oidvector;
+--ALTER TABLE attmp ADD COLUMN o lock;
+ALTER TABLE attmp ADD COLUMN p boolean;
+ALTER TABLE attmp ADD COLUMN q point;
+ALTER TABLE attmp ADD COLUMN r lseg;
+ALTER TABLE attmp ADD COLUMN s path;
+ALTER TABLE attmp ADD COLUMN t box;
+ALTER TABLE attmp ADD COLUMN v timestamp;
+ALTER TABLE attmp ADD COLUMN w interval;
+ALTER TABLE attmp ADD COLUMN x float8[];
+ALTER TABLE attmp ADD COLUMN y float4[];
+ALTER TABLE attmp ADD COLUMN z int2[];
+INSERT INTO attmp (a, b, c, d, e, f, g, i, k, l, m, n, p, q, r, s, t,
+ v, w, x, y, z)
+ VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)',
+ 'c',
+ 314159, '(1,1)', '512',
+ '1 2 3 4 5 6 7 8', true, '(1.1,1.1)', '(4.1,4.1,3.1,3.1)',
+ '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)',
+ 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}');
+SELECT * FROM attmp;
+ initial | a | b | c | d | e | f | g | i | k | l | m | n | p | q | r | s | t | v | w | x | y | z
+---------+---+------+------+-----+-----+---+-----------------------+---+--------+-------+-----+-----------------+---+-----------+-----------------------+-----------------------------+---------------------+--------------------------+------------------+-----------+-----------+-----------
+ | 4 | name | text | 4.1 | 4.1 | 2 | ((4.1,4.1),(3.1,3.1)) | c | 314159 | (1,1) | 512 | 1 2 3 4 5 6 7 8 | t | (1.1,1.1) | [(4.1,4.1),(3.1,3.1)] | ((0,2),(4.1,4.1),(3.1,3.1)) | (4.1,4.1),(3.1,3.1) | Thu Jan 01 00:00:00 1970 | @ 1 hour 10 secs | {1,2,3,4} | {1,2,3,4} | {1,2,3,4}
+(1 row)
+
+CREATE INDEX attmp_idx ON attmp (a, (d + e), b);
+ALTER INDEX attmp_idx ALTER COLUMN 0 SET STATISTICS 1000;
+ERROR: column number must be in range from 1 to 32767
+LINE 1: ALTER INDEX attmp_idx ALTER COLUMN 0 SET STATISTICS 1000;
+ ^
+ALTER INDEX attmp_idx ALTER COLUMN 1 SET STATISTICS 1000;
+ERROR: cannot alter statistics on non-expression column "a" of index "attmp_idx"
+HINT: Alter statistics on table column instead.
+ALTER INDEX attmp_idx ALTER COLUMN 2 SET STATISTICS 1000;
+\d+ attmp_idx
+ Index "public.attmp_idx"
+ Column | Type | Key? | Definition | Storage | Stats target
+--------+------------------+------+------------+---------+--------------
+ a | integer | yes | a | plain |
+ expr | double precision | yes | (d + e) | plain | 1000
+ b | cstring | yes | b | plain |
+btree, for table "public.attmp"
+
+ALTER INDEX attmp_idx ALTER COLUMN 3 SET STATISTICS 1000;
+ERROR: cannot alter statistics on non-expression column "b" of index "attmp_idx"
+HINT: Alter statistics on table column instead.
+ALTER INDEX attmp_idx ALTER COLUMN 4 SET STATISTICS 1000;
+ERROR: column number 4 of relation "attmp_idx" does not exist
+ALTER INDEX attmp_idx ALTER COLUMN 2 SET STATISTICS -1;
+DROP TABLE attmp;
+-- fails with incorrect object type
+CREATE VIEW at_v1 AS SELECT 1 as a;
+ALTER TABLE at_v1 ALTER COLUMN a SET STATISTICS 0;
+ERROR: "at_v1" is not a table, materialized view, index, partitioned index, or foreign table
+DROP VIEW at_v1;
+--
+-- rename - check on both non-temp and temp tables
+--
+CREATE TABLE attmp (regtable int);
+CREATE TEMP TABLE attmp (attmptable int);
+ALTER TABLE attmp RENAME TO attmp_new;
+SELECT * FROM attmp;
+ regtable
+----------
+(0 rows)
+
+SELECT * FROM attmp_new;
+ attmptable
+------------
+(0 rows)
+
+ALTER TABLE attmp RENAME TO attmp_new2;
+SELECT * FROM attmp; -- should fail
+ERROR: relation "attmp" does not exist
+LINE 1: SELECT * FROM attmp;
+ ^
+SELECT * FROM attmp_new;
+ attmptable
+------------
+(0 rows)
+
+SELECT * FROM attmp_new2;
+ regtable
+----------
+(0 rows)
+
+DROP TABLE attmp_new;
+DROP TABLE attmp_new2;
+-- check rename of partitioned tables and indexes also
+CREATE TABLE part_attmp (a int primary key) partition by range (a);
+CREATE TABLE part_attmp1 PARTITION OF part_attmp FOR VALUES FROM (0) TO (100);
+ALTER INDEX part_attmp_pkey RENAME TO part_attmp_index;
+ALTER INDEX part_attmp1_pkey RENAME TO part_attmp1_index;
+ALTER TABLE part_attmp RENAME TO part_at2tmp;
+ALTER TABLE part_attmp1 RENAME TO part_at2tmp1;
+SET ROLE regress_alter_table_user1;
+ALTER INDEX part_attmp_index RENAME TO fail;
+ERROR: must be owner of index part_attmp_index
+ALTER INDEX part_attmp1_index RENAME TO fail;
+ERROR: must be owner of index part_attmp1_index
+ALTER TABLE part_at2tmp RENAME TO fail;
+ERROR: must be owner of table part_at2tmp
+ALTER TABLE part_at2tmp1 RENAME TO fail;
+ERROR: must be owner of table part_at2tmp1
+RESET ROLE;
+DROP TABLE part_at2tmp;
+--
+-- check renaming to a table's array type's autogenerated name
+-- (the array type's name should get out of the way)
+--
+CREATE TABLE attmp_array (id int);
+CREATE TABLE attmp_array2 (id int);
+SELECT typname FROM pg_type WHERE oid = 'attmp_array[]'::regtype;
+ typname
+--------------
+ _attmp_array
+(1 row)
+
+SELECT typname FROM pg_type WHERE oid = 'attmp_array2[]'::regtype;
+ typname
+---------------
+ _attmp_array2
+(1 row)
+
+ALTER TABLE attmp_array2 RENAME TO _attmp_array;
+SELECT typname FROM pg_type WHERE oid = 'attmp_array[]'::regtype;
+ typname
+---------------
+ __attmp_array
+(1 row)
+
+SELECT typname FROM pg_type WHERE oid = '_attmp_array[]'::regtype;
+ typname
+----------------
+ ___attmp_array
+(1 row)
+
+DROP TABLE _attmp_array;
+DROP TABLE attmp_array;
+-- renaming to table's own array type's name is an interesting corner case
+CREATE TABLE attmp_array (id int);
+SELECT typname FROM pg_type WHERE oid = 'attmp_array[]'::regtype;
+ typname
+--------------
+ _attmp_array
+(1 row)
+
+ALTER TABLE attmp_array RENAME TO _attmp_array;
+SELECT typname FROM pg_type WHERE oid = '_attmp_array[]'::regtype;
+ typname
+---------------
+ __attmp_array
+(1 row)
+
+DROP TABLE _attmp_array;
+-- ALTER TABLE ... RENAME on non-table relations
+-- renaming indexes (FIXME: this should probably test the index's functionality)
+ALTER INDEX IF EXISTS __onek_unique1 RENAME TO attmp_onek_unique1;
+NOTICE: relation "__onek_unique1" does not exist, skipping
+ALTER INDEX IF EXISTS __attmp_onek_unique1 RENAME TO onek_unique1;
+NOTICE: relation "__attmp_onek_unique1" does not exist, skipping
+ALTER INDEX onek_unique1 RENAME TO attmp_onek_unique1;
+ALTER INDEX attmp_onek_unique1 RENAME TO onek_unique1;
+SET ROLE regress_alter_table_user1;
+ALTER INDEX onek_unique1 RENAME TO fail; -- permission denied
+ERROR: must be owner of index onek_unique1
+RESET ROLE;
+-- rename statements with mismatching statement and object types
+CREATE TABLE alter_idx_rename_test (a INT);
+CREATE INDEX alter_idx_rename_test_idx ON alter_idx_rename_test (a);
+CREATE TABLE alter_idx_rename_test_parted (a INT) PARTITION BY LIST (a);
+CREATE INDEX alter_idx_rename_test_parted_idx ON alter_idx_rename_test_parted (a);
+BEGIN;
+ALTER INDEX alter_idx_rename_test RENAME TO alter_idx_rename_test_2;
+ALTER INDEX alter_idx_rename_test_parted RENAME TO alter_idx_rename_test_parted_2;
+SELECT relation::regclass, mode FROM pg_locks
+WHERE pid = pg_backend_pid() AND locktype = 'relation'
+ AND relation::regclass::text LIKE 'alter\_idx%'
+ORDER BY relation::regclass::text COLLATE "C";
+ relation | mode
+--------------------------------+---------------------
+ alter_idx_rename_test_2 | AccessExclusiveLock
+ alter_idx_rename_test_parted_2 | AccessExclusiveLock
+(2 rows)
+
+COMMIT;
+BEGIN;
+ALTER INDEX alter_idx_rename_test_idx RENAME TO alter_idx_rename_test_idx_2;
+ALTER INDEX alter_idx_rename_test_parted_idx RENAME TO alter_idx_rename_test_parted_idx_2;
+SELECT relation::regclass, mode FROM pg_locks
+WHERE pid = pg_backend_pid() AND locktype = 'relation'
+ AND relation::regclass::text LIKE 'alter\_idx%'
+ORDER BY relation::regclass::text COLLATE "C";
+ relation | mode
+------------------------------------+--------------------------
+ alter_idx_rename_test_idx_2 | ShareUpdateExclusiveLock
+ alter_idx_rename_test_parted_idx_2 | ShareUpdateExclusiveLock
+(2 rows)
+
+COMMIT;
+BEGIN;
+ALTER TABLE alter_idx_rename_test_idx_2 RENAME TO alter_idx_rename_test_idx_3;
+ALTER TABLE alter_idx_rename_test_parted_idx_2 RENAME TO alter_idx_rename_test_parted_idx_3;
+SELECT relation::regclass, mode FROM pg_locks
+WHERE pid = pg_backend_pid() AND locktype = 'relation'
+ AND relation::regclass::text LIKE 'alter\_idx%'
+ORDER BY relation::regclass::text COLLATE "C";
+ relation | mode
+------------------------------------+---------------------
+ alter_idx_rename_test_idx_3 | AccessExclusiveLock
+ alter_idx_rename_test_parted_idx_3 | AccessExclusiveLock
+(2 rows)
+
+COMMIT;
+DROP TABLE alter_idx_rename_test_2;
+-- renaming views
+CREATE VIEW attmp_view (unique1) AS SELECT unique1 FROM tenk1;
+ALTER TABLE attmp_view RENAME TO attmp_view_new;
+SET ROLE regress_alter_table_user1;
+ALTER VIEW attmp_view_new RENAME TO fail; -- permission denied
+ERROR: must be owner of view attmp_view_new
+RESET ROLE;
+-- hack to ensure we get an indexscan here
+set enable_seqscan to off;
+set enable_bitmapscan to off;
+-- 5 values, sorted
+SELECT unique1 FROM tenk1 WHERE unique1 < 5;
+ unique1
+---------
+ 0
+ 1
+ 2
+ 3
+ 4
+(5 rows)
+
+reset enable_seqscan;
+reset enable_bitmapscan;
+DROP VIEW attmp_view_new;
+-- toast-like relation name
+alter table stud_emp rename to pg_toast_stud_emp;
+alter table pg_toast_stud_emp rename to stud_emp;
+-- renaming index should rename constraint as well
+ALTER TABLE onek ADD CONSTRAINT onek_unique1_constraint UNIQUE (unique1);
+ALTER INDEX onek_unique1_constraint RENAME TO onek_unique1_constraint_foo;
+ALTER TABLE onek DROP CONSTRAINT onek_unique1_constraint_foo;
+-- renaming constraint
+ALTER TABLE onek ADD CONSTRAINT onek_check_constraint CHECK (unique1 >= 0);
+ALTER TABLE onek RENAME CONSTRAINT onek_check_constraint TO onek_check_constraint_foo;
+ALTER TABLE onek DROP CONSTRAINT onek_check_constraint_foo;
+-- renaming constraint should rename index as well
+ALTER TABLE onek ADD CONSTRAINT onek_unique1_constraint UNIQUE (unique1);
+DROP INDEX onek_unique1_constraint; -- to see whether it's there
+ERROR: cannot drop index onek_unique1_constraint because constraint onek_unique1_constraint on table onek requires it
+HINT: You can drop constraint onek_unique1_constraint on table onek instead.
+ALTER TABLE onek RENAME CONSTRAINT onek_unique1_constraint TO onek_unique1_constraint_foo;
+DROP INDEX onek_unique1_constraint_foo; -- to see whether it's there
+ERROR: cannot drop index onek_unique1_constraint_foo because constraint onek_unique1_constraint_foo on table onek requires it
+HINT: You can drop constraint onek_unique1_constraint_foo on table onek instead.
+ALTER TABLE onek DROP CONSTRAINT onek_unique1_constraint_foo;
+-- renaming constraints vs. inheritance
+CREATE TABLE constraint_rename_test (a int CONSTRAINT con1 CHECK (a > 0), b int, c int);
+\d constraint_rename_test
+ Table "public.constraint_rename_test"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+ b | integer | | |
+ c | integer | | |
+Check constraints:
+ "con1" CHECK (a > 0)
+
+CREATE TABLE constraint_rename_test2 (a int CONSTRAINT con1 CHECK (a > 0), d int) INHERITS (constraint_rename_test);
+NOTICE: merging column "a" with inherited definition
+NOTICE: merging constraint "con1" with inherited definition
+\d constraint_rename_test2
+ Table "public.constraint_rename_test2"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+ b | integer | | |
+ c | integer | | |
+ d | integer | | |
+Check constraints:
+ "con1" CHECK (a > 0)
+Inherits: constraint_rename_test
+
+ALTER TABLE constraint_rename_test2 RENAME CONSTRAINT con1 TO con1foo; -- fail
+ERROR: cannot rename inherited constraint "con1"
+ALTER TABLE ONLY constraint_rename_test RENAME CONSTRAINT con1 TO con1foo; -- fail
+ERROR: inherited constraint "con1" must be renamed in child tables too
+ALTER TABLE constraint_rename_test RENAME CONSTRAINT con1 TO con1foo; -- ok
+\d constraint_rename_test
+ Table "public.constraint_rename_test"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+ b | integer | | |
+ c | integer | | |
+Check constraints:
+ "con1foo" CHECK (a > 0)
+Number of child tables: 1 (Use \d+ to list them.)
+
+\d constraint_rename_test2
+ Table "public.constraint_rename_test2"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+ b | integer | | |
+ c | integer | | |
+ d | integer | | |
+Check constraints:
+ "con1foo" CHECK (a > 0)
+Inherits: constraint_rename_test
+
+ALTER TABLE constraint_rename_test ADD CONSTRAINT con2 CHECK (b > 0) NO INHERIT;
+ALTER TABLE ONLY constraint_rename_test RENAME CONSTRAINT con2 TO con2foo; -- ok
+ALTER TABLE constraint_rename_test RENAME CONSTRAINT con2foo TO con2bar; -- ok
+\d constraint_rename_test
+ Table "public.constraint_rename_test"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+ b | integer | | |
+ c | integer | | |
+Check constraints:
+ "con1foo" CHECK (a > 0)
+ "con2bar" CHECK (b > 0) NO INHERIT
+Number of child tables: 1 (Use \d+ to list them.)
+
+\d constraint_rename_test2
+ Table "public.constraint_rename_test2"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+ b | integer | | |
+ c | integer | | |
+ d | integer | | |
+Check constraints:
+ "con1foo" CHECK (a > 0)
+Inherits: constraint_rename_test
+
+ALTER TABLE constraint_rename_test ADD CONSTRAINT con3 PRIMARY KEY (a);
+ALTER TABLE constraint_rename_test RENAME CONSTRAINT con3 TO con3foo; -- ok
+\d constraint_rename_test
+ Table "public.constraint_rename_test"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | not null |
+ b | integer | | |
+ c | integer | | |
+Indexes:
+ "con3foo" PRIMARY KEY, btree (a)
+Check constraints:
+ "con1foo" CHECK (a > 0)
+ "con2bar" CHECK (b > 0) NO INHERIT
+Number of child tables: 1 (Use \d+ to list them.)
+
+\d constraint_rename_test2
+ Table "public.constraint_rename_test2"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | not null |
+ b | integer | | |
+ c | integer | | |
+ d | integer | | |
+Check constraints:
+ "con1foo" CHECK (a > 0)
+Inherits: constraint_rename_test
+
+DROP TABLE constraint_rename_test2;
+DROP TABLE constraint_rename_test;
+ALTER TABLE IF EXISTS constraint_not_exist RENAME CONSTRAINT con3 TO con3foo; -- ok
+NOTICE: relation "constraint_not_exist" does not exist, skipping
+ALTER TABLE IF EXISTS constraint_rename_test ADD CONSTRAINT con4 UNIQUE (a);
+NOTICE: relation "constraint_rename_test" does not exist, skipping
+-- renaming constraints with cache reset of target relation
+CREATE TABLE constraint_rename_cache (a int,
+ CONSTRAINT chk_a CHECK (a > 0),
+ PRIMARY KEY (a));
+ALTER TABLE constraint_rename_cache
+ RENAME CONSTRAINT chk_a TO chk_a_new;
+ALTER TABLE constraint_rename_cache
+ RENAME CONSTRAINT constraint_rename_cache_pkey TO constraint_rename_pkey_new;
+CREATE TABLE like_constraint_rename_cache
+ (LIKE constraint_rename_cache INCLUDING ALL);
+\d like_constraint_rename_cache
+ Table "public.like_constraint_rename_cache"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | not null |
+Indexes:
+ "like_constraint_rename_cache_pkey" PRIMARY KEY, btree (a)
+Check constraints:
+ "chk_a_new" CHECK (a > 0)
+
+DROP TABLE constraint_rename_cache;
+DROP TABLE like_constraint_rename_cache;
+-- FOREIGN KEY CONSTRAINT adding TEST
+CREATE TABLE attmp2 (a int primary key);
+CREATE TABLE attmp3 (a int, b int);
+CREATE TABLE attmp4 (a int, b int, unique(a,b));
+CREATE TABLE attmp5 (a int, b int);
+-- Insert rows into attmp2 (pktable)
+INSERT INTO attmp2 values (1);
+INSERT INTO attmp2 values (2);
+INSERT INTO attmp2 values (3);
+INSERT INTO attmp2 values (4);
+-- Insert rows into attmp3
+INSERT INTO attmp3 values (1,10);
+INSERT INTO attmp3 values (1,20);
+INSERT INTO attmp3 values (5,50);
+-- Try (and fail) to add constraint due to invalid source columns
+ALTER TABLE attmp3 add constraint attmpconstr foreign key(c) references attmp2 match full;
+ERROR: column "c" referenced in foreign key constraint does not exist
+-- Try (and fail) to add constraint due to invalid destination columns explicitly given
+ALTER TABLE attmp3 add constraint attmpconstr foreign key(a) references attmp2(b) match full;
+ERROR: column "b" referenced in foreign key constraint does not exist
+-- Try (and fail) to add constraint due to invalid data
+ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full;
+ERROR: insert or update on table "attmp3" violates foreign key constraint "attmpconstr"
+DETAIL: Key (a)=(5) is not present in table "attmp2".
+-- Delete failing row
+DELETE FROM attmp3 where a=5;
+-- Try (and succeed)
+ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full;
+ALTER TABLE attmp3 drop constraint attmpconstr;
+INSERT INTO attmp3 values (5,50);
+-- Try NOT VALID and then VALIDATE CONSTRAINT, but fails. Delete failure then re-validate
+ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full NOT VALID;
+ALTER TABLE attmp3 validate constraint attmpconstr;
+ERROR: insert or update on table "attmp3" violates foreign key constraint "attmpconstr"
+DETAIL: Key (a)=(5) is not present in table "attmp2".
+-- Delete failing row
+DELETE FROM attmp3 where a=5;
+-- Try (and succeed) and repeat to show it works on already valid constraint
+ALTER TABLE attmp3 validate constraint attmpconstr;
+ALTER TABLE attmp3 validate constraint attmpconstr;
+-- Try a non-verified CHECK constraint
+ALTER TABLE attmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10); -- fail
+ERROR: check constraint "b_greater_than_ten" of relation "attmp3" is violated by some row
+ALTER TABLE attmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10) NOT VALID; -- succeeds
+ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- fails
+ERROR: check constraint "b_greater_than_ten" of relation "attmp3" is violated by some row
+DELETE FROM attmp3 WHERE NOT b > 10;
+ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds
+ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds
+-- Test inherited NOT VALID CHECK constraints
+select * from attmp3;
+ a | b
+---+----
+ 1 | 20
+(1 row)
+
+CREATE TABLE attmp6 () INHERITS (attmp3);
+CREATE TABLE attmp7 () INHERITS (attmp3);
+INSERT INTO attmp6 VALUES (6, 30), (7, 16);
+ALTER TABLE attmp3 ADD CONSTRAINT b_le_20 CHECK (b <= 20) NOT VALID;
+ALTER TABLE attmp3 VALIDATE CONSTRAINT b_le_20; -- fails
+ERROR: check constraint "b_le_20" of relation "attmp6" is violated by some row
+DELETE FROM attmp6 WHERE b > 20;
+ALTER TABLE attmp3 VALIDATE CONSTRAINT b_le_20; -- succeeds
+-- An already validated constraint must not be revalidated
+CREATE FUNCTION boo(int) RETURNS int IMMUTABLE STRICT LANGUAGE plpgsql AS $$ BEGIN RAISE NOTICE 'boo: %', $1; RETURN $1; END; $$;
+INSERT INTO attmp7 VALUES (8, 18);
+ALTER TABLE attmp7 ADD CONSTRAINT identity CHECK (b = boo(b));
+NOTICE: boo: 18
+ALTER TABLE attmp3 ADD CONSTRAINT IDENTITY check (b = boo(b)) NOT VALID;
+NOTICE: merging constraint "identity" with inherited definition
+ALTER TABLE attmp3 VALIDATE CONSTRAINT identity;
+NOTICE: boo: 20
+NOTICE: boo: 16
+-- A NO INHERIT constraint should not be looked for in children during VALIDATE CONSTRAINT
+create table parent_noinh_convalid (a int);
+create table child_noinh_convalid () inherits (parent_noinh_convalid);
+insert into parent_noinh_convalid values (1);
+insert into child_noinh_convalid values (1);
+alter table parent_noinh_convalid add constraint check_a_is_2 check (a = 2) no inherit not valid;
+-- fail, because of the row in parent
+alter table parent_noinh_convalid validate constraint check_a_is_2;
+ERROR: check constraint "check_a_is_2" of relation "parent_noinh_convalid" is violated by some row
+delete from only parent_noinh_convalid;
+-- ok (parent itself contains no violating rows)
+alter table parent_noinh_convalid validate constraint check_a_is_2;
+select convalidated from pg_constraint where conrelid = 'parent_noinh_convalid'::regclass and conname = 'check_a_is_2';
+ convalidated
+--------------
+ t
+(1 row)
+
+-- cleanup
+drop table parent_noinh_convalid, child_noinh_convalid;
+-- Try (and fail) to create constraint from attmp5(a) to attmp4(a) - unique constraint on
+-- attmp4 is a,b
+ALTER TABLE attmp5 add constraint attmpconstr foreign key(a) references attmp4(a) match full;
+ERROR: there is no unique constraint matching given keys for referenced table "attmp4"
+DROP TABLE attmp7;
+DROP TABLE attmp6;
+DROP TABLE attmp5;
+DROP TABLE attmp4;
+DROP TABLE attmp3;
+DROP TABLE attmp2;
+-- NOT VALID with plan invalidation -- ensure we don't use a constraint for
+-- exclusion until validated
+set constraint_exclusion TO 'partition';
+create table nv_parent (d date, check (false) no inherit not valid);
+-- not valid constraint added at creation time should automatically become valid
+\d nv_parent
+ Table "public.nv_parent"
+ Column | Type | Collation | Nullable | Default
+--------+------+-----------+----------+---------
+ d | date | | |
+Check constraints:
+ "nv_parent_check" CHECK (false) NO INHERIT
+
+create table nv_child_2010 () inherits (nv_parent);
+create table nv_child_2011 () inherits (nv_parent);
+alter table nv_child_2010 add check (d between '2010-01-01'::date and '2010-12-31'::date) not valid;
+alter table nv_child_2011 add check (d between '2011-01-01'::date and '2011-12-31'::date) not valid;
+explain (costs off) select * from nv_parent where d between '2011-08-01' and '2011-08-31';
+ QUERY PLAN
+---------------------------------------------------------------------------
+ Append
+ -> Seq Scan on nv_parent nv_parent_1
+ Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date))
+ -> Seq Scan on nv_child_2010 nv_parent_2
+ Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date))
+ -> Seq Scan on nv_child_2011 nv_parent_3
+ Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date))
+(7 rows)
+
+create table nv_child_2009 (check (d between '2009-01-01'::date and '2009-12-31'::date)) inherits (nv_parent);
+explain (costs off) select * from nv_parent where d between '2011-08-01'::date and '2011-08-31'::date;
+ QUERY PLAN
+---------------------------------------------------------------------------
+ Append
+ -> Seq Scan on nv_parent nv_parent_1
+ Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date))
+ -> Seq Scan on nv_child_2010 nv_parent_2
+ Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date))
+ -> Seq Scan on nv_child_2011 nv_parent_3
+ Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date))
+(7 rows)
+
+explain (costs off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date;
+ QUERY PLAN
+---------------------------------------------------------------------------
+ Append
+ -> Seq Scan on nv_parent nv_parent_1
+ Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
+ -> Seq Scan on nv_child_2010 nv_parent_2
+ Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
+ -> Seq Scan on nv_child_2011 nv_parent_3
+ Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
+ -> Seq Scan on nv_child_2009 nv_parent_4
+ Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
+(9 rows)
+
+-- after validation, the constraint should be used
+alter table nv_child_2011 VALIDATE CONSTRAINT nv_child_2011_d_check;
+explain (costs off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date;
+ QUERY PLAN
+---------------------------------------------------------------------------
+ Append
+ -> Seq Scan on nv_parent nv_parent_1
+ Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
+ -> Seq Scan on nv_child_2010 nv_parent_2
+ Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
+ -> Seq Scan on nv_child_2009 nv_parent_3
+ Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date))
+(7 rows)
+
+-- add an inherited NOT VALID constraint
+alter table nv_parent add check (d between '2001-01-01'::date and '2099-12-31'::date) not valid;
+\d nv_child_2009
+ Table "public.nv_child_2009"
+ Column | Type | Collation | Nullable | Default
+--------+------+-----------+----------+---------
+ d | date | | |
+Check constraints:
+ "nv_child_2009_d_check" CHECK (d >= '01-01-2009'::date AND d <= '12-31-2009'::date)
+ "nv_parent_d_check" CHECK (d >= '01-01-2001'::date AND d <= '12-31-2099'::date) NOT VALID
+Inherits: nv_parent
+
+-- we leave nv_parent and children around to help test pg_dump logic
+-- Foreign key adding test with mixed types
+-- Note: these tables are TEMP to avoid name conflicts when this test
+-- is run in parallel with foreign_key.sql.
+CREATE TEMP TABLE PKTABLE (ptest1 int PRIMARY KEY);
+INSERT INTO PKTABLE VALUES(42);
+CREATE TEMP TABLE FKTABLE (ftest1 inet);
+-- This next should fail, because int=inet does not exist
+ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable;
+ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented
+DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: inet and integer.
+-- This should also fail for the same reason, but here we
+-- give the column name
+ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable(ptest1);
+ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented
+DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: inet and integer.
+DROP TABLE FKTABLE;
+-- This should succeed, even though they are different types,
+-- because int=int8 exists and is a member of the integer opfamily
+CREATE TEMP TABLE FKTABLE (ftest1 int8);
+ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable;
+-- Check it actually works
+INSERT INTO FKTABLE VALUES(42); -- should succeed
+INSERT INTO FKTABLE VALUES(43); -- should fail
+ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey"
+DETAIL: Key (ftest1)=(43) is not present in table "pktable".
+DROP TABLE FKTABLE;
+-- This should fail, because we'd have to cast numeric to int which is
+-- not an implicit coercion (or use numeric=numeric, but that's not part
+-- of the integer opfamily)
+CREATE TEMP TABLE FKTABLE (ftest1 numeric);
+ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable;
+ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented
+DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: numeric and integer.
+DROP TABLE FKTABLE;
+DROP TABLE PKTABLE;
+-- On the other hand, this should work because int implicitly promotes to
+-- numeric, and we allow promotion on the FK side
+CREATE TEMP TABLE PKTABLE (ptest1 numeric PRIMARY KEY);
+INSERT INTO PKTABLE VALUES(42);
+CREATE TEMP TABLE FKTABLE (ftest1 int);
+ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable;
+-- Check it actually works
+INSERT INTO FKTABLE VALUES(42); -- should succeed
+INSERT INTO FKTABLE VALUES(43); -- should fail
+ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey"
+DETAIL: Key (ftest1)=(43) is not present in table "pktable".
+DROP TABLE FKTABLE;
+DROP TABLE PKTABLE;
+CREATE TEMP TABLE PKTABLE (ptest1 int, ptest2 inet,
+ PRIMARY KEY(ptest1, ptest2));
+-- This should fail, because we just chose really odd types
+CREATE TEMP TABLE FKTABLE (ftest1 cidr, ftest2 timestamp);
+ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2) references pktable;
+ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented
+DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: cidr and integer.
+DROP TABLE FKTABLE;
+-- Again, so should this...
+CREATE TEMP TABLE FKTABLE (ftest1 cidr, ftest2 timestamp);
+ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2)
+ references pktable(ptest1, ptest2);
+ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented
+DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: cidr and integer.
+DROP TABLE FKTABLE;
+-- This fails because we mixed up the column ordering
+CREATE TEMP TABLE FKTABLE (ftest1 int, ftest2 inet);
+ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2)
+ references pktable(ptest2, ptest1);
+ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented
+DETAIL: Key columns "ftest1" and "ptest2" are of incompatible types: integer and inet.
+-- As does this...
+ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest2, ftest1)
+ references pktable(ptest1, ptest2);
+ERROR: foreign key constraint "fktable_ftest2_ftest1_fkey" cannot be implemented
+DETAIL: Key columns "ftest2" and "ptest1" are of incompatible types: inet and integer.
+DROP TABLE FKTABLE;
+DROP TABLE PKTABLE;
+-- Test that ALTER CONSTRAINT updates trigger deferrability properly
+CREATE TEMP TABLE PKTABLE (ptest1 int primary key);
+CREATE TEMP TABLE FKTABLE (ftest1 int);
+ALTER TABLE FKTABLE ADD CONSTRAINT fknd FOREIGN KEY(ftest1) REFERENCES pktable
+ ON DELETE CASCADE ON UPDATE NO ACTION NOT DEFERRABLE;
+ALTER TABLE FKTABLE ADD CONSTRAINT fkdd FOREIGN KEY(ftest1) REFERENCES pktable
+ ON DELETE CASCADE ON UPDATE NO ACTION DEFERRABLE INITIALLY DEFERRED;
+ALTER TABLE FKTABLE ADD CONSTRAINT fkdi FOREIGN KEY(ftest1) REFERENCES pktable
+ ON DELETE CASCADE ON UPDATE NO ACTION DEFERRABLE INITIALLY IMMEDIATE;
+ALTER TABLE FKTABLE ADD CONSTRAINT fknd2 FOREIGN KEY(ftest1) REFERENCES pktable
+ ON DELETE CASCADE ON UPDATE NO ACTION DEFERRABLE INITIALLY DEFERRED;
+ALTER TABLE FKTABLE ALTER CONSTRAINT fknd2 NOT DEFERRABLE;
+ALTER TABLE FKTABLE ADD CONSTRAINT fkdd2 FOREIGN KEY(ftest1) REFERENCES pktable
+ ON DELETE CASCADE ON UPDATE NO ACTION NOT DEFERRABLE;
+ALTER TABLE FKTABLE ALTER CONSTRAINT fkdd2 DEFERRABLE INITIALLY DEFERRED;
+ALTER TABLE FKTABLE ADD CONSTRAINT fkdi2 FOREIGN KEY(ftest1) REFERENCES pktable
+ ON DELETE CASCADE ON UPDATE NO ACTION NOT DEFERRABLE;
+ALTER TABLE FKTABLE ALTER CONSTRAINT fkdi2 DEFERRABLE INITIALLY IMMEDIATE;
+SELECT conname, tgfoid::regproc, tgtype, tgdeferrable, tginitdeferred
+FROM pg_trigger JOIN pg_constraint con ON con.oid = tgconstraint
+WHERE tgrelid = 'pktable'::regclass
+ORDER BY 1,2,3;
+ conname | tgfoid | tgtype | tgdeferrable | tginitdeferred
+---------+------------------------+--------+--------------+----------------
+ fkdd | "RI_FKey_cascade_del" | 9 | f | f
+ fkdd | "RI_FKey_noaction_upd" | 17 | t | t
+ fkdd2 | "RI_FKey_cascade_del" | 9 | f | f
+ fkdd2 | "RI_FKey_noaction_upd" | 17 | t | t
+ fkdi | "RI_FKey_cascade_del" | 9 | f | f
+ fkdi | "RI_FKey_noaction_upd" | 17 | t | f
+ fkdi2 | "RI_FKey_cascade_del" | 9 | f | f
+ fkdi2 | "RI_FKey_noaction_upd" | 17 | t | f
+ fknd | "RI_FKey_cascade_del" | 9 | f | f
+ fknd | "RI_FKey_noaction_upd" | 17 | f | f
+ fknd2 | "RI_FKey_cascade_del" | 9 | f | f
+ fknd2 | "RI_FKey_noaction_upd" | 17 | f | f
+(12 rows)
+
+SELECT conname, tgfoid::regproc, tgtype, tgdeferrable, tginitdeferred
+FROM pg_trigger JOIN pg_constraint con ON con.oid = tgconstraint
+WHERE tgrelid = 'fktable'::regclass
+ORDER BY 1,2,3;
+ conname | tgfoid | tgtype | tgdeferrable | tginitdeferred
+---------+---------------------+--------+--------------+----------------
+ fkdd | "RI_FKey_check_ins" | 5 | t | t
+ fkdd | "RI_FKey_check_upd" | 17 | t | t
+ fkdd2 | "RI_FKey_check_ins" | 5 | t | t
+ fkdd2 | "RI_FKey_check_upd" | 17 | t | t
+ fkdi | "RI_FKey_check_ins" | 5 | t | f
+ fkdi | "RI_FKey_check_upd" | 17 | t | f
+ fkdi2 | "RI_FKey_check_ins" | 5 | t | f
+ fkdi2 | "RI_FKey_check_upd" | 17 | t | f
+ fknd | "RI_FKey_check_ins" | 5 | f | f
+ fknd | "RI_FKey_check_upd" | 17 | f | f
+ fknd2 | "RI_FKey_check_ins" | 5 | f | f
+ fknd2 | "RI_FKey_check_upd" | 17 | f | f
+(12 rows)
+
+-- temp tables should go away by themselves, need not drop them.
+-- test check constraint adding
+create table atacc1 ( test int );
+-- add a check constraint
+alter table atacc1 add constraint atacc_test1 check (test>3);
+-- should fail
+insert into atacc1 (test) values (2);
+ERROR: new row for relation "atacc1" violates check constraint "atacc_test1"
+DETAIL: Failing row contains (2).
+-- should succeed
+insert into atacc1 (test) values (4);
+drop table atacc1;
+-- let's do one where the check fails when added
+create table atacc1 ( test int );
+-- insert a soon to be failing row
+insert into atacc1 (test) values (2);
+-- add a check constraint (fails)
+alter table atacc1 add constraint atacc_test1 check (test>3);
+ERROR: check constraint "atacc_test1" of relation "atacc1" is violated by some row
+insert into atacc1 (test) values (4);
+drop table atacc1;
+-- let's do one where the check fails because the column doesn't exist
+create table atacc1 ( test int );
+-- add a check constraint (fails)
+alter table atacc1 add constraint atacc_test1 check (test1>3);
+ERROR: column "test1" does not exist
+HINT: Perhaps you meant to reference the column "atacc1.test".
+drop table atacc1;
+-- something a little more complicated
+create table atacc1 ( test int, test2 int, test3 int);
+-- add a check constraint (fails)
+alter table atacc1 add constraint atacc_test1 check (test+test2<test3*4);
+-- should fail
+insert into atacc1 (test,test2,test3) values (4,4,2);
+ERROR: new row for relation "atacc1" violates check constraint "atacc_test1"
+DETAIL: Failing row contains (4, 4, 2).
+-- should succeed
+insert into atacc1 (test,test2,test3) values (4,4,5);
+drop table atacc1;
+-- lets do some naming tests
+create table atacc1 (test int check (test>3), test2 int);
+alter table atacc1 add check (test2>test);
+-- should fail for $2
+insert into atacc1 (test2, test) values (3, 4);
+ERROR: new row for relation "atacc1" violates check constraint "atacc1_check"
+DETAIL: Failing row contains (4, 3).
+drop table atacc1;
+-- inheritance related tests
+create table atacc1 (test int);
+create table atacc2 (test2 int);
+create table atacc3 (test3 int) inherits (atacc1, atacc2);
+alter table atacc2 add constraint foo check (test2>0);
+-- fail and then succeed on atacc2
+insert into atacc2 (test2) values (-3);
+ERROR: new row for relation "atacc2" violates check constraint "foo"
+DETAIL: Failing row contains (-3).
+insert into atacc2 (test2) values (3);
+-- fail and then succeed on atacc3
+insert into atacc3 (test2) values (-3);
+ERROR: new row for relation "atacc3" violates check constraint "foo"
+DETAIL: Failing row contains (null, -3, null).
+insert into atacc3 (test2) values (3);
+drop table atacc3;
+drop table atacc2;
+drop table atacc1;
+-- same things with one created with INHERIT
+create table atacc1 (test int);
+create table atacc2 (test2 int);
+create table atacc3 (test3 int) inherits (atacc1, atacc2);
+alter table atacc3 no inherit atacc2;
+-- fail
+alter table atacc3 no inherit atacc2;
+ERROR: relation "atacc2" is not a parent of relation "atacc3"
+-- make sure it really isn't a child
+insert into atacc3 (test2) values (3);
+select test2 from atacc2;
+ test2
+-------
+(0 rows)
+
+-- fail due to missing constraint
+alter table atacc2 add constraint foo check (test2>0);
+alter table atacc3 inherit atacc2;
+ERROR: child table is missing constraint "foo"
+-- fail due to missing column
+alter table atacc3 rename test2 to testx;
+alter table atacc3 inherit atacc2;
+ERROR: child table is missing column "test2"
+-- fail due to mismatched data type
+alter table atacc3 add test2 bool;
+alter table atacc3 inherit atacc2;
+ERROR: child table "atacc3" has different type for column "test2"
+alter table atacc3 drop test2;
+-- succeed
+alter table atacc3 add test2 int;
+update atacc3 set test2 = 4 where test2 is null;
+alter table atacc3 add constraint foo check (test2>0);
+alter table atacc3 inherit atacc2;
+-- fail due to duplicates and circular inheritance
+alter table atacc3 inherit atacc2;
+ERROR: relation "atacc2" would be inherited from more than once
+alter table atacc2 inherit atacc3;
+ERROR: circular inheritance not allowed
+DETAIL: "atacc3" is already a child of "atacc2".
+alter table atacc2 inherit atacc2;
+ERROR: circular inheritance not allowed
+DETAIL: "atacc2" is already a child of "atacc2".
+-- test that we really are a child now (should see 4 not 3 and cascade should go through)
+select test2 from atacc2;
+ test2
+-------
+ 4
+(1 row)
+
+drop table atacc2 cascade;
+NOTICE: drop cascades to table atacc3
+drop table atacc1;
+-- adding only to a parent is allowed as of 9.2
+create table atacc1 (test int);
+create table atacc2 (test2 int) inherits (atacc1);
+-- ok:
+alter table atacc1 add constraint foo check (test>0) no inherit;
+-- check constraint is not there on child
+insert into atacc2 (test) values (-3);
+-- check constraint is there on parent
+insert into atacc1 (test) values (-3);
+ERROR: new row for relation "atacc1" violates check constraint "foo"
+DETAIL: Failing row contains (-3).
+insert into atacc1 (test) values (3);
+-- fail, violating row:
+alter table atacc2 add constraint foo check (test>0) no inherit;
+ERROR: check constraint "foo" of relation "atacc2" is violated by some row
+drop table atacc2;
+drop table atacc1;
+-- test unique constraint adding
+create table atacc1 ( test int ) ;
+-- add a unique constraint
+alter table atacc1 add constraint atacc_test1 unique (test);
+-- insert first value
+insert into atacc1 (test) values (2);
+-- should fail
+insert into atacc1 (test) values (2);
+ERROR: duplicate key value violates unique constraint "atacc_test1"
+DETAIL: Key (test)=(2) already exists.
+-- should succeed
+insert into atacc1 (test) values (4);
+-- try to create duplicates via alter table using - should fail
+alter table atacc1 alter column test type integer using 0;
+ERROR: could not create unique index "atacc_test1"
+DETAIL: Key (test)=(0) is duplicated.
+drop table atacc1;
+-- let's do one where the unique constraint fails when added
+create table atacc1 ( test int );
+-- insert soon to be failing rows
+insert into atacc1 (test) values (2);
+insert into atacc1 (test) values (2);
+-- add a unique constraint (fails)
+alter table atacc1 add constraint atacc_test1 unique (test);
+ERROR: could not create unique index "atacc_test1"
+DETAIL: Key (test)=(2) is duplicated.
+insert into atacc1 (test) values (3);
+drop table atacc1;
+-- let's do one where the unique constraint fails
+-- because the column doesn't exist
+create table atacc1 ( test int );
+-- add a unique constraint (fails)
+alter table atacc1 add constraint atacc_test1 unique (test1);
+ERROR: column "test1" named in key does not exist
+drop table atacc1;
+-- something a little more complicated
+create table atacc1 ( test int, test2 int);
+-- add a unique constraint
+alter table atacc1 add constraint atacc_test1 unique (test, test2);
+-- insert initial value
+insert into atacc1 (test,test2) values (4,4);
+-- should fail
+insert into atacc1 (test,test2) values (4,4);
+ERROR: duplicate key value violates unique constraint "atacc_test1"
+DETAIL: Key (test, test2)=(4, 4) already exists.
+-- should all succeed
+insert into atacc1 (test,test2) values (4,5);
+insert into atacc1 (test,test2) values (5,4);
+insert into atacc1 (test,test2) values (5,5);
+drop table atacc1;
+-- lets do some naming tests
+create table atacc1 (test int, test2 int, unique(test));
+alter table atacc1 add unique (test2);
+-- should fail for @@ second one @@
+insert into atacc1 (test2, test) values (3, 3);
+insert into atacc1 (test2, test) values (2, 3);
+ERROR: duplicate key value violates unique constraint "atacc1_test_key"
+DETAIL: Key (test)=(3) already exists.
+drop table atacc1;
+-- test primary key constraint adding
+create table atacc1 ( id serial, test int) ;
+-- add a primary key constraint
+alter table atacc1 add constraint atacc_test1 primary key (test);
+-- insert first value
+insert into atacc1 (test) values (2);
+-- should fail
+insert into atacc1 (test) values (2);
+ERROR: duplicate key value violates unique constraint "atacc_test1"
+DETAIL: Key (test)=(2) already exists.
+-- should succeed
+insert into atacc1 (test) values (4);
+-- inserting NULL should fail
+insert into atacc1 (test) values(NULL);
+ERROR: null value in column "test" of relation "atacc1" violates not-null constraint
+DETAIL: Failing row contains (4, null).
+-- try adding a second primary key (should fail)
+alter table atacc1 add constraint atacc_oid1 primary key(id);
+ERROR: multiple primary keys for table "atacc1" are not allowed
+-- drop first primary key constraint
+alter table atacc1 drop constraint atacc_test1 restrict;
+-- try adding a primary key on oid (should succeed)
+alter table atacc1 add constraint atacc_oid1 primary key(id);
+drop table atacc1;
+-- let's do one where the primary key constraint fails when added
+create table atacc1 ( test int );
+-- insert soon to be failing rows
+insert into atacc1 (test) values (2);
+insert into atacc1 (test) values (2);
+-- add a primary key (fails)
+alter table atacc1 add constraint atacc_test1 primary key (test);
+ERROR: could not create unique index "atacc_test1"
+DETAIL: Key (test)=(2) is duplicated.
+insert into atacc1 (test) values (3);
+drop table atacc1;
+-- let's do another one where the primary key constraint fails when added
+create table atacc1 ( test int );
+-- insert soon to be failing row
+insert into atacc1 (test) values (NULL);
+-- add a primary key (fails)
+alter table atacc1 add constraint atacc_test1 primary key (test);
+ERROR: column "test" of relation "atacc1" contains null values
+insert into atacc1 (test) values (3);
+drop table atacc1;
+-- let's do one where the primary key constraint fails
+-- because the column doesn't exist
+create table atacc1 ( test int );
+-- add a primary key constraint (fails)
+alter table atacc1 add constraint atacc_test1 primary key (test1);
+ERROR: column "test1" of relation "atacc1" does not exist
+drop table atacc1;
+-- adding a new column as primary key to a non-empty table.
+-- should fail unless the column has a non-null default value.
+create table atacc1 ( test int );
+insert into atacc1 (test) values (0);
+-- add a primary key column without a default (fails).
+alter table atacc1 add column test2 int primary key;
+ERROR: column "test2" of relation "atacc1" contains null values
+-- now add a primary key column with a default (succeeds).
+alter table atacc1 add column test2 int default 0 primary key;
+drop table atacc1;
+-- this combination used to have order-of-execution problems (bug #15580)
+create table atacc1 (a int);
+insert into atacc1 values(1);
+alter table atacc1
+ add column b float8 not null default random(),
+ add primary key(a);
+drop table atacc1;
+-- additionally, we've seen issues with foreign key validation not being
+-- properly delayed until after a table rewrite. Check that works ok.
+create table atacc1 (a int primary key);
+alter table atacc1 add constraint atacc1_fkey foreign key (a) references atacc1 (a) not valid;
+alter table atacc1 validate constraint atacc1_fkey, alter a type bigint;
+drop table atacc1;
+-- we've also seen issues with check constraints being validated at the wrong
+-- time when there's a pending table rewrite.
+create table atacc1 (a bigint, b int);
+insert into atacc1 values(1,1);
+alter table atacc1 add constraint atacc1_chk check(b = 1) not valid;
+alter table atacc1 validate constraint atacc1_chk, alter a type int;
+drop table atacc1;
+-- same as above, but ensure the constraint violation is detected
+create table atacc1 (a bigint, b int);
+insert into atacc1 values(1,2);
+alter table atacc1 add constraint atacc1_chk check(b = 1) not valid;
+alter table atacc1 validate constraint atacc1_chk, alter a type int;
+ERROR: check constraint "atacc1_chk" of relation "atacc1" is violated by some row
+drop table atacc1;
+-- something a little more complicated
+create table atacc1 ( test int, test2 int);
+-- add a primary key constraint
+alter table atacc1 add constraint atacc_test1 primary key (test, test2);
+-- try adding a second primary key - should fail
+alter table atacc1 add constraint atacc_test2 primary key (test);
+ERROR: multiple primary keys for table "atacc1" are not allowed
+-- insert initial value
+insert into atacc1 (test,test2) values (4,4);
+-- should fail
+insert into atacc1 (test,test2) values (4,4);
+ERROR: duplicate key value violates unique constraint "atacc_test1"
+DETAIL: Key (test, test2)=(4, 4) already exists.
+insert into atacc1 (test,test2) values (NULL,3);
+ERROR: null value in column "test" of relation "atacc1" violates not-null constraint
+DETAIL: Failing row contains (null, 3).
+insert into atacc1 (test,test2) values (3, NULL);
+ERROR: null value in column "test2" of relation "atacc1" violates not-null constraint
+DETAIL: Failing row contains (3, null).
+insert into atacc1 (test,test2) values (NULL,NULL);
+ERROR: null value in column "test" of relation "atacc1" violates not-null constraint
+DETAIL: Failing row contains (null, null).
+-- should all succeed
+insert into atacc1 (test,test2) values (4,5);
+insert into atacc1 (test,test2) values (5,4);
+insert into atacc1 (test,test2) values (5,5);
+drop table atacc1;
+-- lets do some naming tests
+create table atacc1 (test int, test2 int, primary key(test));
+-- only first should succeed
+insert into atacc1 (test2, test) values (3, 3);
+insert into atacc1 (test2, test) values (2, 3);
+ERROR: duplicate key value violates unique constraint "atacc1_pkey"
+DETAIL: Key (test)=(3) already exists.
+insert into atacc1 (test2, test) values (1, NULL);
+ERROR: null value in column "test" of relation "atacc1" violates not-null constraint
+DETAIL: Failing row contains (null, 1).
+drop table atacc1;
+-- alter table / alter column [set/drop] not null tests
+-- try altering system catalogs, should fail
+alter table pg_class alter column relname drop not null;
+ERROR: permission denied: "pg_class" is a system catalog
+alter table pg_class alter relname set not null;
+ERROR: permission denied: "pg_class" is a system catalog
+-- try altering non-existent table, should fail
+alter table non_existent alter column bar set not null;
+ERROR: relation "non_existent" does not exist
+alter table non_existent alter column bar drop not null;
+ERROR: relation "non_existent" does not exist
+-- test setting columns to null and not null and vice versa
+-- test checking for null values and primary key
+create table atacc1 (test int not null);
+alter table atacc1 add constraint "atacc1_pkey" primary key (test);
+alter table atacc1 alter column test drop not null;
+ERROR: column "test" is in a primary key
+alter table atacc1 drop constraint "atacc1_pkey";
+alter table atacc1 alter column test drop not null;
+insert into atacc1 values (null);
+alter table atacc1 alter test set not null;
+ERROR: column "test" of relation "atacc1" contains null values
+delete from atacc1;
+alter table atacc1 alter test set not null;
+-- try altering a non-existent column, should fail
+alter table atacc1 alter bar set not null;
+ERROR: column "bar" of relation "atacc1" does not exist
+alter table atacc1 alter bar drop not null;
+ERROR: column "bar" of relation "atacc1" does not exist
+-- try creating a view and altering that, should fail
+create view myview as select * from atacc1;
+alter table myview alter column test drop not null;
+ERROR: "myview" is not a table or foreign table
+alter table myview alter column test set not null;
+ERROR: "myview" is not a table or foreign table
+drop view myview;
+drop table atacc1;
+-- set not null verified by constraints
+create table atacc1 (test_a int, test_b int);
+insert into atacc1 values (null, 1);
+-- constraint not cover all values, should fail
+alter table atacc1 add constraint atacc1_constr_or check(test_a is not null or test_b < 10);
+alter table atacc1 alter test_a set not null;
+ERROR: column "test_a" of relation "atacc1" contains null values
+alter table atacc1 drop constraint atacc1_constr_or;
+-- not valid constraint, should fail
+alter table atacc1 add constraint atacc1_constr_invalid check(test_a is not null) not valid;
+alter table atacc1 alter test_a set not null;
+ERROR: column "test_a" of relation "atacc1" contains null values
+alter table atacc1 drop constraint atacc1_constr_invalid;
+-- with valid constraint
+update atacc1 set test_a = 1;
+alter table atacc1 add constraint atacc1_constr_a_valid check(test_a is not null);
+alter table atacc1 alter test_a set not null;
+delete from atacc1;
+insert into atacc1 values (2, null);
+alter table atacc1 alter test_a drop not null;
+-- test multiple set not null at same time
+-- test_a checked by atacc1_constr_a_valid, test_b should fail by table scan
+alter table atacc1 alter test_a set not null, alter test_b set not null;
+ERROR: column "test_b" of relation "atacc1" contains null values
+-- commands order has no importance
+alter table atacc1 alter test_b set not null, alter test_a set not null;
+ERROR: column "test_b" of relation "atacc1" contains null values
+-- valid one by table scan, one by check constraints
+update atacc1 set test_b = 1;
+alter table atacc1 alter test_b set not null, alter test_a set not null;
+alter table atacc1 alter test_a drop not null, alter test_b drop not null;
+-- both column has check constraints
+alter table atacc1 add constraint atacc1_constr_b_valid check(test_b is not null);
+alter table atacc1 alter test_b set not null, alter test_a set not null;
+drop table atacc1;
+-- test inheritance
+create table parent (a int);
+create table child (b varchar(255)) inherits (parent);
+alter table parent alter a set not null;
+insert into parent values (NULL);
+ERROR: null value in column "a" of relation "parent" violates not-null constraint
+DETAIL: Failing row contains (null).
+insert into child (a, b) values (NULL, 'foo');
+ERROR: null value in column "a" of relation "child" violates not-null constraint
+DETAIL: Failing row contains (null, foo).
+alter table parent alter a drop not null;
+insert into parent values (NULL);
+insert into child (a, b) values (NULL, 'foo');
+alter table only parent alter a set not null;
+ERROR: column "a" of relation "parent" contains null values
+alter table child alter a set not null;
+ERROR: column "a" of relation "child" contains null values
+delete from parent;
+alter table only parent alter a set not null;
+insert into parent values (NULL);
+ERROR: null value in column "a" of relation "parent" violates not-null constraint
+DETAIL: Failing row contains (null).
+alter table child alter a set not null;
+insert into child (a, b) values (NULL, 'foo');
+ERROR: null value in column "a" of relation "child" violates not-null constraint
+DETAIL: Failing row contains (null, foo).
+delete from child;
+alter table child alter a set not null;
+insert into child (a, b) values (NULL, 'foo');
+ERROR: null value in column "a" of relation "child" violates not-null constraint
+DETAIL: Failing row contains (null, foo).
+drop table child;
+drop table parent;
+-- test setting and removing default values
+create table def_test (
+ c1 int4 default 5,
+ c2 text default 'initial_default'
+);
+insert into def_test default values;
+alter table def_test alter column c1 drop default;
+insert into def_test default values;
+alter table def_test alter column c2 drop default;
+insert into def_test default values;
+alter table def_test alter column c1 set default 10;
+alter table def_test alter column c2 set default 'new_default';
+insert into def_test default values;
+select * from def_test;
+ c1 | c2
+----+-----------------
+ 5 | initial_default
+ | initial_default
+ |
+ 10 | new_default
+(4 rows)
+
+-- set defaults to an incorrect type: this should fail
+alter table def_test alter column c1 set default 'wrong_datatype';
+ERROR: invalid input syntax for type integer: "wrong_datatype"
+alter table def_test alter column c2 set default 20;
+-- set defaults on a non-existent column: this should fail
+alter table def_test alter column c3 set default 30;
+ERROR: column "c3" of relation "def_test" does not exist
+-- set defaults on views: we need to create a view, add a rule
+-- to allow insertions into it, and then alter the view to add
+-- a default
+create view def_view_test as select * from def_test;
+create rule def_view_test_ins as
+ on insert to def_view_test
+ do instead insert into def_test select new.*;
+insert into def_view_test default values;
+alter table def_view_test alter column c1 set default 45;
+insert into def_view_test default values;
+alter table def_view_test alter column c2 set default 'view_default';
+insert into def_view_test default values;
+select * from def_view_test;
+ c1 | c2
+----+-----------------
+ 5 | initial_default
+ | initial_default
+ |
+ 10 | new_default
+ |
+ 45 |
+ 45 | view_default
+(7 rows)
+
+drop rule def_view_test_ins on def_view_test;
+drop view def_view_test;
+drop table def_test;
+-- alter table / drop column tests
+-- try altering system catalogs, should fail
+alter table pg_class drop column relname;
+ERROR: permission denied: "pg_class" is a system catalog
+-- try altering non-existent table, should fail
+alter table nosuchtable drop column bar;
+ERROR: relation "nosuchtable" does not exist
+-- test dropping columns
+create table atacc1 (a int4 not null, b int4, c int4 not null, d int4);
+insert into atacc1 values (1, 2, 3, 4);
+alter table atacc1 drop a;
+alter table atacc1 drop a;
+ERROR: column "a" of relation "atacc1" does not exist
+-- SELECTs
+select * from atacc1;
+ b | c | d
+---+---+---
+ 2 | 3 | 4
+(1 row)
+
+select * from atacc1 order by a;
+ERROR: column "a" does not exist
+LINE 1: select * from atacc1 order by a;
+ ^
+select * from atacc1 order by "........pg.dropped.1........";
+ERROR: column "........pg.dropped.1........" does not exist
+LINE 1: select * from atacc1 order by "........pg.dropped.1........"...
+ ^
+select * from atacc1 group by a;
+ERROR: column "a" does not exist
+LINE 1: select * from atacc1 group by a;
+ ^
+select * from atacc1 group by "........pg.dropped.1........";
+ERROR: column "........pg.dropped.1........" does not exist
+LINE 1: select * from atacc1 group by "........pg.dropped.1........"...
+ ^
+select atacc1.* from atacc1;
+ b | c | d
+---+---+---
+ 2 | 3 | 4
+(1 row)
+
+select a from atacc1;
+ERROR: column "a" does not exist
+LINE 1: select a from atacc1;
+ ^
+select atacc1.a from atacc1;
+ERROR: column atacc1.a does not exist
+LINE 1: select atacc1.a from atacc1;
+ ^
+select b,c,d from atacc1;
+ b | c | d
+---+---+---
+ 2 | 3 | 4
+(1 row)
+
+select a,b,c,d from atacc1;
+ERROR: column "a" does not exist
+LINE 1: select a,b,c,d from atacc1;
+ ^
+select * from atacc1 where a = 1;
+ERROR: column "a" does not exist
+LINE 1: select * from atacc1 where a = 1;
+ ^
+select "........pg.dropped.1........" from atacc1;
+ERROR: column "........pg.dropped.1........" does not exist
+LINE 1: select "........pg.dropped.1........" from atacc1;
+ ^
+select atacc1."........pg.dropped.1........" from atacc1;
+ERROR: column atacc1.........pg.dropped.1........ does not exist
+LINE 1: select atacc1."........pg.dropped.1........" from atacc1;
+ ^
+select "........pg.dropped.1........",b,c,d from atacc1;
+ERROR: column "........pg.dropped.1........" does not exist
+LINE 1: select "........pg.dropped.1........",b,c,d from atacc1;
+ ^
+select * from atacc1 where "........pg.dropped.1........" = 1;
+ERROR: column "........pg.dropped.1........" does not exist
+LINE 1: select * from atacc1 where "........pg.dropped.1........" = ...
+ ^
+-- UPDATEs
+update atacc1 set a = 3;
+ERROR: column "a" of relation "atacc1" does not exist
+LINE 1: update atacc1 set a = 3;
+ ^
+update atacc1 set b = 2 where a = 3;
+ERROR: column "a" does not exist
+LINE 1: update atacc1 set b = 2 where a = 3;
+ ^
+update atacc1 set "........pg.dropped.1........" = 3;
+ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
+LINE 1: update atacc1 set "........pg.dropped.1........" = 3;
+ ^
+update atacc1 set b = 2 where "........pg.dropped.1........" = 3;
+ERROR: column "........pg.dropped.1........" does not exist
+LINE 1: update atacc1 set b = 2 where "........pg.dropped.1........"...
+ ^
+-- INSERTs
+insert into atacc1 values (10, 11, 12, 13);
+ERROR: INSERT has more expressions than target columns
+LINE 1: insert into atacc1 values (10, 11, 12, 13);
+ ^
+insert into atacc1 values (default, 11, 12, 13);
+ERROR: INSERT has more expressions than target columns
+LINE 1: insert into atacc1 values (default, 11, 12, 13);
+ ^
+insert into atacc1 values (11, 12, 13);
+insert into atacc1 (a) values (10);
+ERROR: column "a" of relation "atacc1" does not exist
+LINE 1: insert into atacc1 (a) values (10);
+ ^
+insert into atacc1 (a) values (default);
+ERROR: column "a" of relation "atacc1" does not exist
+LINE 1: insert into atacc1 (a) values (default);
+ ^
+insert into atacc1 (a,b,c,d) values (10,11,12,13);
+ERROR: column "a" of relation "atacc1" does not exist
+LINE 1: insert into atacc1 (a,b,c,d) values (10,11,12,13);
+ ^
+insert into atacc1 (a,b,c,d) values (default,11,12,13);
+ERROR: column "a" of relation "atacc1" does not exist
+LINE 1: insert into atacc1 (a,b,c,d) values (default,11,12,13);
+ ^
+insert into atacc1 (b,c,d) values (11,12,13);
+insert into atacc1 ("........pg.dropped.1........") values (10);
+ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
+LINE 1: insert into atacc1 ("........pg.dropped.1........") values (...
+ ^
+insert into atacc1 ("........pg.dropped.1........") values (default);
+ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
+LINE 1: insert into atacc1 ("........pg.dropped.1........") values (...
+ ^
+insert into atacc1 ("........pg.dropped.1........",b,c,d) values (10,11,12,13);
+ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
+LINE 1: insert into atacc1 ("........pg.dropped.1........",b,c,d) va...
+ ^
+insert into atacc1 ("........pg.dropped.1........",b,c,d) values (default,11,12,13);
+ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
+LINE 1: insert into atacc1 ("........pg.dropped.1........",b,c,d) va...
+ ^
+-- DELETEs
+delete from atacc1 where a = 3;
+ERROR: column "a" does not exist
+LINE 1: delete from atacc1 where a = 3;
+ ^
+delete from atacc1 where "........pg.dropped.1........" = 3;
+ERROR: column "........pg.dropped.1........" does not exist
+LINE 1: delete from atacc1 where "........pg.dropped.1........" = 3;
+ ^
+delete from atacc1;
+-- try dropping a non-existent column, should fail
+alter table atacc1 drop bar;
+ERROR: column "bar" of relation "atacc1" does not exist
+-- try removing an oid column, should succeed (as it's nonexistent)
+alter table atacc1 SET WITHOUT OIDS;
+-- try adding an oid column, should fail (not supported)
+alter table atacc1 SET WITH OIDS;
+ERROR: syntax error at or near "WITH"
+LINE 1: alter table atacc1 SET WITH OIDS;
+ ^
+-- try dropping the xmin column, should fail
+alter table atacc1 drop xmin;
+ERROR: cannot drop system column "xmin"
+-- try creating a view and altering that, should fail
+create view myview as select * from atacc1;
+select * from myview;
+ b | c | d
+---+---+---
+(0 rows)
+
+alter table myview drop d;
+ERROR: "myview" is not a table, composite type, or foreign table
+drop view myview;
+-- test some commands to make sure they fail on the dropped column
+analyze atacc1(a);
+ERROR: column "a" of relation "atacc1" does not exist
+analyze atacc1("........pg.dropped.1........");
+ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
+vacuum analyze atacc1(a);
+ERROR: column "a" of relation "atacc1" does not exist
+vacuum analyze atacc1("........pg.dropped.1........");
+ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
+comment on column atacc1.a is 'testing';
+ERROR: column "a" of relation "atacc1" does not exist
+comment on column atacc1."........pg.dropped.1........" is 'testing';
+ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
+alter table atacc1 alter a set storage plain;
+ERROR: column "a" of relation "atacc1" does not exist
+alter table atacc1 alter "........pg.dropped.1........" set storage plain;
+ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
+alter table atacc1 alter a set statistics 0;
+ERROR: column "a" of relation "atacc1" does not exist
+alter table atacc1 alter "........pg.dropped.1........" set statistics 0;
+ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
+alter table atacc1 alter a set default 3;
+ERROR: column "a" of relation "atacc1" does not exist
+alter table atacc1 alter "........pg.dropped.1........" set default 3;
+ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
+alter table atacc1 alter a drop default;
+ERROR: column "a" of relation "atacc1" does not exist
+alter table atacc1 alter "........pg.dropped.1........" drop default;
+ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
+alter table atacc1 alter a set not null;
+ERROR: column "a" of relation "atacc1" does not exist
+alter table atacc1 alter "........pg.dropped.1........" set not null;
+ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
+alter table atacc1 alter a drop not null;
+ERROR: column "a" of relation "atacc1" does not exist
+alter table atacc1 alter "........pg.dropped.1........" drop not null;
+ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
+alter table atacc1 rename a to x;
+ERROR: column "a" does not exist
+alter table atacc1 rename "........pg.dropped.1........" to x;
+ERROR: column "........pg.dropped.1........" does not exist
+alter table atacc1 add primary key(a);
+ERROR: column "a" of relation "atacc1" does not exist
+alter table atacc1 add primary key("........pg.dropped.1........");
+ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist
+alter table atacc1 add unique(a);
+ERROR: column "a" named in key does not exist
+alter table atacc1 add unique("........pg.dropped.1........");
+ERROR: column "........pg.dropped.1........" named in key does not exist
+alter table atacc1 add check (a > 3);
+ERROR: column "a" does not exist
+alter table atacc1 add check ("........pg.dropped.1........" > 3);
+ERROR: column "........pg.dropped.1........" does not exist
+create table atacc2 (id int4 unique);
+alter table atacc1 add foreign key (a) references atacc2(id);
+ERROR: column "a" referenced in foreign key constraint does not exist
+alter table atacc1 add foreign key ("........pg.dropped.1........") references atacc2(id);
+ERROR: column "........pg.dropped.1........" referenced in foreign key constraint does not exist
+alter table atacc2 add foreign key (id) references atacc1(a);
+ERROR: column "a" referenced in foreign key constraint does not exist
+alter table atacc2 add foreign key (id) references atacc1("........pg.dropped.1........");
+ERROR: column "........pg.dropped.1........" referenced in foreign key constraint does not exist
+drop table atacc2;
+create index "testing_idx" on atacc1(a);
+ERROR: column "a" does not exist
+create index "testing_idx" on atacc1("........pg.dropped.1........");
+ERROR: column "........pg.dropped.1........" does not exist
+-- test create as and select into
+insert into atacc1 values (21, 22, 23);
+create table attest1 as select * from atacc1;
+select * from attest1;
+ b | c | d
+----+----+----
+ 21 | 22 | 23
+(1 row)
+
+drop table attest1;
+select * into attest2 from atacc1;
+select * from attest2;
+ b | c | d
+----+----+----
+ 21 | 22 | 23
+(1 row)
+
+drop table attest2;
+-- try dropping all columns
+alter table atacc1 drop c;
+alter table atacc1 drop d;
+alter table atacc1 drop b;
+select * from atacc1;
+--
+(1 row)
+
+drop table atacc1;
+-- test constraint error reporting in presence of dropped columns
+create table atacc1 (id serial primary key, value int check (value < 10));
+insert into atacc1(value) values (100);
+ERROR: new row for relation "atacc1" violates check constraint "atacc1_value_check"
+DETAIL: Failing row contains (1, 100).
+alter table atacc1 drop column value;
+alter table atacc1 add column value int check (value < 10);
+insert into atacc1(value) values (100);
+ERROR: new row for relation "atacc1" violates check constraint "atacc1_value_check"
+DETAIL: Failing row contains (2, 100).
+insert into atacc1(id, value) values (null, 0);
+ERROR: null value in column "id" of relation "atacc1" violates not-null constraint
+DETAIL: Failing row contains (null, 0).
+drop table atacc1;
+-- test inheritance
+create table parent (a int, b int, c int);
+insert into parent values (1, 2, 3);
+alter table parent drop a;
+create table child (d varchar(255)) inherits (parent);
+insert into child values (12, 13, 'testing');
+select * from parent;
+ b | c
+----+----
+ 2 | 3
+ 12 | 13
+(2 rows)
+
+select * from child;
+ b | c | d
+----+----+---------
+ 12 | 13 | testing
+(1 row)
+
+alter table parent drop c;
+select * from parent;
+ b
+----
+ 2
+ 12
+(2 rows)
+
+select * from child;
+ b | d
+----+---------
+ 12 | testing
+(1 row)
+
+drop table child;
+drop table parent;
+-- check error cases for inheritance column merging
+create table parent (a float8, b numeric(10,4), c text collate "C");
+create table child (a float4) inherits (parent); -- fail
+NOTICE: merging column "a" with inherited definition
+ERROR: column "a" has a type conflict
+DETAIL: double precision versus real
+create table child (b decimal(10,7)) inherits (parent); -- fail
+NOTICE: moving and merging column "b" with inherited definition
+DETAIL: User-specified column moved to the position of the inherited column.
+ERROR: column "b" has a type conflict
+DETAIL: numeric(10,4) versus numeric(10,7)
+create table child (c text collate "POSIX") inherits (parent); -- fail
+NOTICE: moving and merging column "c" with inherited definition
+DETAIL: User-specified column moved to the position of the inherited column.
+ERROR: column "c" has a collation conflict
+DETAIL: "C" versus "POSIX"
+create table child (a double precision, b decimal(10,4)) inherits (parent);
+NOTICE: merging column "a" with inherited definition
+NOTICE: merging column "b" with inherited definition
+drop table child;
+drop table parent;
+-- test copy in/out
+create table attest (a int4, b int4, c int4);
+insert into attest values (1,2,3);
+alter table attest drop a;
+copy attest to stdout;
+2 3
+copy attest(a) to stdout;
+ERROR: column "a" of relation "attest" does not exist
+copy attest("........pg.dropped.1........") to stdout;
+ERROR: column "........pg.dropped.1........" of relation "attest" does not exist
+copy attest from stdin;
+ERROR: extra data after last expected column
+CONTEXT: COPY attest, line 1: "10 11 12"
+select * from attest;
+ b | c
+---+---
+ 2 | 3
+(1 row)
+
+copy attest from stdin;
+select * from attest;
+ b | c
+----+----
+ 2 | 3
+ 21 | 22
+(2 rows)
+
+copy attest(a) from stdin;
+ERROR: column "a" of relation "attest" does not exist
+copy attest("........pg.dropped.1........") from stdin;
+ERROR: column "........pg.dropped.1........" of relation "attest" does not exist
+copy attest(b,c) from stdin;
+select * from attest;
+ b | c
+----+----
+ 2 | 3
+ 21 | 22
+ 31 | 32
+(3 rows)
+
+drop table attest;
+-- test inheritance
+create table dropColumn (a int, b int, e int);
+create table dropColumnChild (c int) inherits (dropColumn);
+create table dropColumnAnother (d int) inherits (dropColumnChild);
+-- these two should fail
+alter table dropColumnchild drop column a;
+ERROR: cannot drop inherited column "a"
+alter table only dropColumnChild drop column b;
+ERROR: cannot drop inherited column "b"
+-- these three should work
+alter table only dropColumn drop column e;
+alter table dropColumnChild drop column c;
+alter table dropColumn drop column a;
+create table renameColumn (a int);
+create table renameColumnChild (b int) inherits (renameColumn);
+create table renameColumnAnother (c int) inherits (renameColumnChild);
+-- these three should fail
+alter table renameColumnChild rename column a to d;
+ERROR: cannot rename inherited column "a"
+alter table only renameColumnChild rename column a to d;
+ERROR: inherited column "a" must be renamed in child tables too
+alter table only renameColumn rename column a to d;
+ERROR: inherited column "a" must be renamed in child tables too
+-- these should work
+alter table renameColumn rename column a to d;
+alter table renameColumnChild rename column b to a;
+-- these should work
+alter table if exists doesnt_exist_tab rename column a to d;
+NOTICE: relation "doesnt_exist_tab" does not exist, skipping
+alter table if exists doesnt_exist_tab rename column b to a;
+NOTICE: relation "doesnt_exist_tab" does not exist, skipping
+-- this should work
+alter table renameColumn add column w int;
+-- this should fail
+alter table only renameColumn add column x int;
+ERROR: column must be added to child tables too
+-- Test corner cases in dropping of inherited columns
+create table p1 (f1 int, f2 int);
+create table c1 (f1 int not null) inherits(p1);
+NOTICE: merging column "f1" with inherited definition
+-- should be rejected since c1.f1 is inherited
+alter table c1 drop column f1;
+ERROR: cannot drop inherited column "f1"
+-- should work
+alter table p1 drop column f1;
+-- c1.f1 is still there, but no longer inherited
+select f1 from c1;
+ f1
+----
+(0 rows)
+
+alter table c1 drop column f1;
+select f1 from c1;
+ERROR: column "f1" does not exist
+LINE 1: select f1 from c1;
+ ^
+HINT: Perhaps you meant to reference the column "c1.f2".
+drop table p1 cascade;
+NOTICE: drop cascades to table c1
+create table p1 (f1 int, f2 int);
+create table c1 () inherits(p1);
+-- should be rejected since c1.f1 is inherited
+alter table c1 drop column f1;
+ERROR: cannot drop inherited column "f1"
+alter table p1 drop column f1;
+-- c1.f1 is dropped now, since there is no local definition for it
+select f1 from c1;
+ERROR: column "f1" does not exist
+LINE 1: select f1 from c1;
+ ^
+HINT: Perhaps you meant to reference the column "c1.f2".
+drop table p1 cascade;
+NOTICE: drop cascades to table c1
+create table p1 (f1 int, f2 int);
+create table c1 () inherits(p1);
+-- should be rejected since c1.f1 is inherited
+alter table c1 drop column f1;
+ERROR: cannot drop inherited column "f1"
+alter table only p1 drop column f1;
+-- c1.f1 is NOT dropped, but must now be considered non-inherited
+alter table c1 drop column f1;
+drop table p1 cascade;
+NOTICE: drop cascades to table c1
+create table p1 (f1 int, f2 int);
+create table c1 (f1 int not null) inherits(p1);
+NOTICE: merging column "f1" with inherited definition
+-- should be rejected since c1.f1 is inherited
+alter table c1 drop column f1;
+ERROR: cannot drop inherited column "f1"
+alter table only p1 drop column f1;
+-- c1.f1 is still there, but no longer inherited
+alter table c1 drop column f1;
+drop table p1 cascade;
+NOTICE: drop cascades to table c1
+create table p1(id int, name text);
+create table p2(id2 int, name text, height int);
+create table c1(age int) inherits(p1,p2);
+NOTICE: merging multiple inherited definitions of column "name"
+create table gc1() inherits (c1);
+select relname, attname, attinhcount, attislocal
+from pg_class join pg_attribute on (pg_class.oid = pg_attribute.attrelid)
+where relname in ('p1','p2','c1','gc1') and attnum > 0 and not attisdropped
+order by relname, attnum;
+ relname | attname | attinhcount | attislocal
+---------+---------+-------------+------------
+ c1 | id | 1 | f
+ c1 | name | 2 | f
+ c1 | id2 | 1 | f
+ c1 | height | 1 | f
+ c1 | age | 0 | t
+ gc1 | id | 1 | f
+ gc1 | name | 1 | f
+ gc1 | id2 | 1 | f
+ gc1 | height | 1 | f
+ gc1 | age | 1 | f
+ p1 | id | 0 | t
+ p1 | name | 0 | t
+ p2 | id2 | 0 | t
+ p2 | name | 0 | t
+ p2 | height | 0 | t
+(15 rows)
+
+-- should work
+alter table only p1 drop column name;
+-- should work. Now c1.name is local and inhcount is 0.
+alter table p2 drop column name;
+-- should be rejected since its inherited
+alter table gc1 drop column name;
+ERROR: cannot drop inherited column "name"
+-- should work, and drop gc1.name along
+alter table c1 drop column name;
+-- should fail: column does not exist
+alter table gc1 drop column name;
+ERROR: column "name" of relation "gc1" does not exist
+-- should work and drop the attribute in all tables
+alter table p2 drop column height;
+-- IF EXISTS test
+create table dropColumnExists ();
+alter table dropColumnExists drop column non_existing; --fail
+ERROR: column "non_existing" of relation "dropcolumnexists" does not exist
+alter table dropColumnExists drop column if exists non_existing; --succeed
+NOTICE: column "non_existing" of relation "dropcolumnexists" does not exist, skipping
+select relname, attname, attinhcount, attislocal
+from pg_class join pg_attribute on (pg_class.oid = pg_attribute.attrelid)
+where relname in ('p1','p2','c1','gc1') and attnum > 0 and not attisdropped
+order by relname, attnum;
+ relname | attname | attinhcount | attislocal
+---------+---------+-------------+------------
+ c1 | id | 1 | f
+ c1 | id2 | 1 | f
+ c1 | age | 0 | t
+ gc1 | id | 1 | f
+ gc1 | id2 | 1 | f
+ gc1 | age | 1 | f
+ p1 | id | 0 | t
+ p2 | id2 | 0 | t
+(8 rows)
+
+drop table p1, p2 cascade;
+NOTICE: drop cascades to 2 other objects
+DETAIL: drop cascades to table c1
+drop cascades to table gc1
+-- test attinhcount tracking with merged columns
+create table depth0();
+create table depth1(c text) inherits (depth0);
+create table depth2() inherits (depth1);
+alter table depth0 add c text;
+NOTICE: merging definition of column "c" for child "depth1"
+select attrelid::regclass, attname, attinhcount, attislocal
+from pg_attribute
+where attnum > 0 and attrelid::regclass in ('depth0', 'depth1', 'depth2')
+order by attrelid::regclass::text, attnum;
+ attrelid | attname | attinhcount | attislocal
+----------+---------+-------------+------------
+ depth0 | c | 0 | t
+ depth1 | c | 1 | t
+ depth2 | c | 1 | f
+(3 rows)
+
+-- test renumbering of child-table columns in inherited operations
+create table p1 (f1 int);
+create table c1 (f2 text, f3 int) inherits (p1);
+alter table p1 add column a1 int check (a1 > 0);
+alter table p1 add column f2 text;
+NOTICE: merging definition of column "f2" for child "c1"
+insert into p1 values (1,2,'abc');
+insert into c1 values(11,'xyz',33,0); -- should fail
+ERROR: new row for relation "c1" violates check constraint "p1_a1_check"
+DETAIL: Failing row contains (11, xyz, 33, 0).
+insert into c1 values(11,'xyz',33,22);
+select * from p1;
+ f1 | a1 | f2
+----+----+-----
+ 1 | 2 | abc
+ 11 | 22 | xyz
+(2 rows)
+
+update p1 set a1 = a1 + 1, f2 = upper(f2);
+select * from p1;
+ f1 | a1 | f2
+----+----+-----
+ 1 | 3 | ABC
+ 11 | 23 | XYZ
+(2 rows)
+
+drop table p1 cascade;
+NOTICE: drop cascades to table c1
+-- test that operations with a dropped column do not try to reference
+-- its datatype
+create domain mytype as text;
+create temp table foo (f1 text, f2 mytype, f3 text);
+insert into foo values('bb','cc','dd');
+select * from foo;
+ f1 | f2 | f3
+----+----+----
+ bb | cc | dd
+(1 row)
+
+drop domain mytype cascade;
+NOTICE: drop cascades to column f2 of table foo
+select * from foo;
+ f1 | f3
+----+----
+ bb | dd
+(1 row)
+
+insert into foo values('qq','rr');
+select * from foo;
+ f1 | f3
+----+----
+ bb | dd
+ qq | rr
+(2 rows)
+
+update foo set f3 = 'zz';
+select * from foo;
+ f1 | f3
+----+----
+ bb | zz
+ qq | zz
+(2 rows)
+
+select f3,max(f1) from foo group by f3;
+ f3 | max
+----+-----
+ zz | qq
+(1 row)
+
+-- Simple tests for alter table column type
+alter table foo alter f1 TYPE integer; -- fails
+ERROR: column "f1" cannot be cast automatically to type integer
+HINT: You might need to specify "USING f1::integer".
+alter table foo alter f1 TYPE varchar(10);
+create table anothertab (atcol1 serial8, atcol2 boolean,
+ constraint anothertab_chk check (atcol1 <= 3));
+insert into anothertab (atcol1, atcol2) values (default, true);
+insert into anothertab (atcol1, atcol2) values (default, false);
+select * from anothertab;
+ atcol1 | atcol2
+--------+--------
+ 1 | t
+ 2 | f
+(2 rows)
+
+alter table anothertab alter column atcol1 type boolean; -- fails
+ERROR: column "atcol1" cannot be cast automatically to type boolean
+HINT: You might need to specify "USING atcol1::boolean".
+alter table anothertab alter column atcol1 type boolean using atcol1::int; -- fails
+ERROR: result of USING clause for column "atcol1" cannot be cast automatically to type boolean
+HINT: You might need to add an explicit cast.
+alter table anothertab alter column atcol1 type integer;
+select * from anothertab;
+ atcol1 | atcol2
+--------+--------
+ 1 | t
+ 2 | f
+(2 rows)
+
+insert into anothertab (atcol1, atcol2) values (45, null); -- fails
+ERROR: new row for relation "anothertab" violates check constraint "anothertab_chk"
+DETAIL: Failing row contains (45, null).
+insert into anothertab (atcol1, atcol2) values (default, null);
+select * from anothertab;
+ atcol1 | atcol2
+--------+--------
+ 1 | t
+ 2 | f
+ 3 |
+(3 rows)
+
+alter table anothertab alter column atcol2 type text
+ using case when atcol2 is true then 'IT WAS TRUE'
+ when atcol2 is false then 'IT WAS FALSE'
+ else 'IT WAS NULL!' end;
+select * from anothertab;
+ atcol1 | atcol2
+--------+--------------
+ 1 | IT WAS TRUE
+ 2 | IT WAS FALSE
+ 3 | IT WAS NULL!
+(3 rows)
+
+alter table anothertab alter column atcol1 type boolean
+ using case when atcol1 % 2 = 0 then true else false end; -- fails
+ERROR: default for column "atcol1" cannot be cast automatically to type boolean
+alter table anothertab alter column atcol1 drop default;
+alter table anothertab alter column atcol1 type boolean
+ using case when atcol1 % 2 = 0 then true else false end; -- fails
+ERROR: operator does not exist: boolean <= integer
+HINT: No operator matches the given name and argument types. You might need to add explicit type casts.
+alter table anothertab drop constraint anothertab_chk;
+alter table anothertab drop constraint anothertab_chk; -- fails
+ERROR: constraint "anothertab_chk" of relation "anothertab" does not exist
+alter table anothertab drop constraint IF EXISTS anothertab_chk; -- succeeds
+NOTICE: constraint "anothertab_chk" of relation "anothertab" does not exist, skipping
+alter table anothertab alter column atcol1 type boolean
+ using case when atcol1 % 2 = 0 then true else false end;
+select * from anothertab;
+ atcol1 | atcol2
+--------+--------------
+ f | IT WAS TRUE
+ t | IT WAS FALSE
+ f | IT WAS NULL!
+(3 rows)
+
+drop table anothertab;
+-- Test index handling in alter table column type (cf. bugs #15835, #15865)
+create table anothertab(f1 int primary key, f2 int unique,
+ f3 int, f4 int, f5 int);
+alter table anothertab
+ add exclude using btree (f3 with =);
+alter table anothertab
+ add exclude using btree (f4 with =) where (f4 is not null);
+alter table anothertab
+ add exclude using btree (f4 with =) where (f5 > 0);
+alter table anothertab
+ add unique(f1,f4);
+create index on anothertab(f2,f3);
+create unique index on anothertab(f4);
+\d anothertab
+ Table "public.anothertab"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ f1 | integer | | not null |
+ f2 | integer | | |
+ f3 | integer | | |
+ f4 | integer | | |
+ f5 | integer | | |
+Indexes:
+ "anothertab_pkey" PRIMARY KEY, btree (f1)
+ "anothertab_f1_f4_key" UNIQUE CONSTRAINT, btree (f1, f4)
+ "anothertab_f2_f3_idx" btree (f2, f3)
+ "anothertab_f2_key" UNIQUE CONSTRAINT, btree (f2)
+ "anothertab_f3_excl" EXCLUDE USING btree (f3 WITH =)
+ "anothertab_f4_excl" EXCLUDE USING btree (f4 WITH =) WHERE (f4 IS NOT NULL)
+ "anothertab_f4_excl1" EXCLUDE USING btree (f4 WITH =) WHERE (f5 > 0)
+ "anothertab_f4_idx" UNIQUE, btree (f4)
+
+alter table anothertab alter column f1 type bigint;
+alter table anothertab
+ alter column f2 type bigint,
+ alter column f3 type bigint,
+ alter column f4 type bigint;
+alter table anothertab alter column f5 type bigint;
+\d anothertab
+ Table "public.anothertab"
+ Column | Type | Collation | Nullable | Default
+--------+--------+-----------+----------+---------
+ f1 | bigint | | not null |
+ f2 | bigint | | |
+ f3 | bigint | | |
+ f4 | bigint | | |
+ f5 | bigint | | |
+Indexes:
+ "anothertab_pkey" PRIMARY KEY, btree (f1)
+ "anothertab_f1_f4_key" UNIQUE CONSTRAINT, btree (f1, f4)
+ "anothertab_f2_f3_idx" btree (f2, f3)
+ "anothertab_f2_key" UNIQUE CONSTRAINT, btree (f2)
+ "anothertab_f3_excl" EXCLUDE USING btree (f3 WITH =)
+ "anothertab_f4_excl" EXCLUDE USING btree (f4 WITH =) WHERE (f4 IS NOT NULL)
+ "anothertab_f4_excl1" EXCLUDE USING btree (f4 WITH =) WHERE (f5 > 0)
+ "anothertab_f4_idx" UNIQUE, btree (f4)
+
+drop table anothertab;
+-- test that USING expressions are parsed before column alter type / drop steps
+create table another (f1 int, f2 text, f3 text);
+insert into another values(1, 'one', 'uno');
+insert into another values(2, 'two', 'due');
+insert into another values(3, 'three', 'tre');
+select * from another;
+ f1 | f2 | f3
+----+-------+-----
+ 1 | one | uno
+ 2 | two | due
+ 3 | three | tre
+(3 rows)
+
+alter table another
+ alter f1 type text using f2 || ' and ' || f3 || ' more',
+ alter f2 type bigint using f1 * 10,
+ drop column f3;
+select * from another;
+ f1 | f2
+--------------------+----
+ one and uno more | 10
+ two and due more | 20
+ three and tre more | 30
+(3 rows)
+
+drop table another;
+-- Create an index that skips WAL, then perform a SET DATA TYPE that skips
+-- rewriting the index.
+begin;
+create table skip_wal_skip_rewrite_index (c varchar(10) primary key);
+alter table skip_wal_skip_rewrite_index alter c type varchar(20);
+commit;
+-- We disallow changing table's row type if it's used for storage
+create table at_tab1 (a int, b text);
+create table at_tab2 (x int, y at_tab1);
+alter table at_tab1 alter column b type varchar; -- fails
+ERROR: cannot alter table "at_tab1" because column "at_tab2.y" uses its row type
+drop table at_tab2;
+-- Use of row type in an expression is defended differently
+create table at_tab2 (x int, y text, check((x,y)::at_tab1 = (1,'42')::at_tab1));
+alter table at_tab1 alter column b type varchar; -- allowed, but ...
+insert into at_tab2 values(1,'42'); -- ... this will fail
+ERROR: ROW() column has type text instead of type character varying
+drop table at_tab1, at_tab2;
+-- Check it for a partitioned table, too
+create table at_tab1 (a int, b text) partition by list(a);
+create table at_tab2 (x int, y at_tab1);
+alter table at_tab1 alter column b type varchar; -- fails
+ERROR: cannot alter table "at_tab1" because column "at_tab2.y" uses its row type
+drop table at_tab1, at_tab2;
+-- Alter column type that's part of a partitioned index
+create table at_partitioned (a int, b text) partition by range (a);
+create table at_part_1 partition of at_partitioned for values from (0) to (1000);
+insert into at_partitioned values (512, '0.123');
+create table at_part_2 (b text, a int);
+insert into at_part_2 values ('1.234', 1024);
+create index on at_partitioned (b);
+create index on at_partitioned (a);
+\d at_part_1
+ Table "public.at_part_1"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+ b | text | | |
+Partition of: at_partitioned FOR VALUES FROM (0) TO (1000)
+Indexes:
+ "at_part_1_a_idx" btree (a)
+ "at_part_1_b_idx" btree (b)
+
+\d at_part_2
+ Table "public.at_part_2"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ b | text | | |
+ a | integer | | |
+
+alter table at_partitioned attach partition at_part_2 for values from (1000) to (2000);
+\d at_part_2
+ Table "public.at_part_2"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ b | text | | |
+ a | integer | | |
+Partition of: at_partitioned FOR VALUES FROM (1000) TO (2000)
+Indexes:
+ "at_part_2_a_idx" btree (a)
+ "at_part_2_b_idx" btree (b)
+
+alter table at_partitioned alter column b type numeric using b::numeric;
+\d at_part_1
+ Table "public.at_part_1"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+ b | numeric | | |
+Partition of: at_partitioned FOR VALUES FROM (0) TO (1000)
+Indexes:
+ "at_part_1_a_idx" btree (a)
+ "at_part_1_b_idx" btree (b)
+
+\d at_part_2
+ Table "public.at_part_2"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ b | numeric | | |
+ a | integer | | |
+Partition of: at_partitioned FOR VALUES FROM (1000) TO (2000)
+Indexes:
+ "at_part_2_a_idx" btree (a)
+ "at_part_2_b_idx" btree (b)
+
+drop table at_partitioned;
+-- Alter column type when no table rewrite is required
+-- Also check that comments are preserved
+create table at_partitioned(id int, name varchar(64), unique (id, name))
+ partition by hash(id);
+comment on constraint at_partitioned_id_name_key on at_partitioned is 'parent constraint';
+comment on index at_partitioned_id_name_key is 'parent index';
+create table at_partitioned_0 partition of at_partitioned
+ for values with (modulus 2, remainder 0);
+comment on constraint at_partitioned_0_id_name_key on at_partitioned_0 is 'child 0 constraint';
+comment on index at_partitioned_0_id_name_key is 'child 0 index';
+create table at_partitioned_1 partition of at_partitioned
+ for values with (modulus 2, remainder 1);
+comment on constraint at_partitioned_1_id_name_key on at_partitioned_1 is 'child 1 constraint';
+comment on index at_partitioned_1_id_name_key is 'child 1 index';
+insert into at_partitioned values(1, 'foo');
+insert into at_partitioned values(3, 'bar');
+create temp table old_oids as
+ select relname, oid as oldoid, relfilenode as oldfilenode
+ from pg_class where relname like 'at_partitioned%';
+select relname,
+ c.oid = oldoid as orig_oid,
+ case relfilenode
+ when 0 then 'none'
+ when c.oid then 'own'
+ when oldfilenode then 'orig'
+ else 'OTHER'
+ end as storage,
+ obj_description(c.oid, 'pg_class') as desc
+ from pg_class c left join old_oids using (relname)
+ where relname like 'at_partitioned%'
+ order by relname;
+ relname | orig_oid | storage | desc
+------------------------------+----------+---------+---------------
+ at_partitioned | t | none |
+ at_partitioned_0 | t | own |
+ at_partitioned_0_id_name_key | t | own | child 0 index
+ at_partitioned_1 | t | own |
+ at_partitioned_1_id_name_key | t | own | child 1 index
+ at_partitioned_id_name_key | t | none | parent index
+(6 rows)
+
+select conname, obj_description(oid, 'pg_constraint') as desc
+ from pg_constraint where conname like 'at_partitioned%'
+ order by conname;
+ conname | desc
+------------------------------+--------------------
+ at_partitioned_0_id_name_key | child 0 constraint
+ at_partitioned_1_id_name_key | child 1 constraint
+ at_partitioned_id_name_key | parent constraint
+(3 rows)
+
+alter table at_partitioned alter column name type varchar(127);
+-- Note: these tests currently show the wrong behavior for comments :-(
+select relname,
+ c.oid = oldoid as orig_oid,
+ case relfilenode
+ when 0 then 'none'
+ when c.oid then 'own'
+ when oldfilenode then 'orig'
+ else 'OTHER'
+ end as storage,
+ obj_description(c.oid, 'pg_class') as desc
+ from pg_class c left join old_oids using (relname)
+ where relname like 'at_partitioned%'
+ order by relname;
+ relname | orig_oid | storage | desc
+------------------------------+----------+---------+--------------
+ at_partitioned | t | none |
+ at_partitioned_0 | t | own |
+ at_partitioned_0_id_name_key | f | own | parent index
+ at_partitioned_1 | t | own |
+ at_partitioned_1_id_name_key | f | own | parent index
+ at_partitioned_id_name_key | f | none | parent index
+(6 rows)
+
+select conname, obj_description(oid, 'pg_constraint') as desc
+ from pg_constraint where conname like 'at_partitioned%'
+ order by conname;
+ conname | desc
+------------------------------+-------------------
+ at_partitioned_0_id_name_key |
+ at_partitioned_1_id_name_key |
+ at_partitioned_id_name_key | parent constraint
+(3 rows)
+
+-- Don't remove this DROP, it exposes bug #15672
+drop table at_partitioned;
+-- disallow recursive containment of row types
+create temp table recur1 (f1 int);
+alter table recur1 add column f2 recur1; -- fails
+ERROR: composite type recur1 cannot be made a member of itself
+alter table recur1 add column f2 recur1[]; -- fails
+ERROR: composite type recur1 cannot be made a member of itself
+create domain array_of_recur1 as recur1[];
+alter table recur1 add column f2 array_of_recur1; -- fails
+ERROR: composite type recur1 cannot be made a member of itself
+create temp table recur2 (f1 int, f2 recur1);
+alter table recur1 add column f2 recur2; -- fails
+ERROR: composite type recur1 cannot be made a member of itself
+alter table recur1 add column f2 int;
+alter table recur1 alter column f2 type recur2; -- fails
+ERROR: composite type recur1 cannot be made a member of itself
+-- SET STORAGE may need to add a TOAST table
+create table test_storage (a text);
+alter table test_storage alter a set storage plain;
+alter table test_storage add b int default 0; -- rewrite table to remove its TOAST table
+alter table test_storage alter a set storage extended; -- re-add TOAST table
+select reltoastrelid <> 0 as has_toast_table
+from pg_class
+where oid = 'test_storage'::regclass;
+ has_toast_table
+-----------------
+ t
+(1 row)
+
+-- test that SET STORAGE propagates to index correctly
+create index test_storage_idx on test_storage (b, a);
+alter table test_storage alter column a set storage external;
+\d+ test_storage
+ Table "public.test_storage"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+----------+--------------+-------------
+ a | text | | | | external | |
+ b | integer | | | 0 | plain | |
+Indexes:
+ "test_storage_idx" btree (b, a)
+
+\d+ test_storage_idx
+ Index "public.test_storage_idx"
+ Column | Type | Key? | Definition | Storage | Stats target
+--------+---------+------+------------+----------+--------------
+ b | integer | yes | b | plain |
+ a | text | yes | a | external |
+btree, for table "public.test_storage"
+
+-- ALTER COLUMN TYPE with a check constraint and a child table (bug #13779)
+CREATE TABLE test_inh_check (a float check (a > 10.2), b float);
+CREATE TABLE test_inh_check_child() INHERITS(test_inh_check);
+\d test_inh_check
+ Table "public.test_inh_check"
+ Column | Type | Collation | Nullable | Default
+--------+------------------+-----------+----------+---------
+ a | double precision | | |
+ b | double precision | | |
+Check constraints:
+ "test_inh_check_a_check" CHECK (a > 10.2::double precision)
+Number of child tables: 1 (Use \d+ to list them.)
+
+\d test_inh_check_child
+ Table "public.test_inh_check_child"
+ Column | Type | Collation | Nullable | Default
+--------+------------------+-----------+----------+---------
+ a | double precision | | |
+ b | double precision | | |
+Check constraints:
+ "test_inh_check_a_check" CHECK (a > 10.2::double precision)
+Inherits: test_inh_check
+
+select relname, conname, coninhcount, conislocal, connoinherit
+ from pg_constraint c, pg_class r
+ where relname like 'test_inh_check%' and c.conrelid = r.oid
+ order by 1, 2;
+ relname | conname | coninhcount | conislocal | connoinherit
+----------------------+------------------------+-------------+------------+--------------
+ test_inh_check | test_inh_check_a_check | 0 | t | f
+ test_inh_check_child | test_inh_check_a_check | 1 | f | f
+(2 rows)
+
+ALTER TABLE test_inh_check ALTER COLUMN a TYPE numeric;
+\d test_inh_check
+ Table "public.test_inh_check"
+ Column | Type | Collation | Nullable | Default
+--------+------------------+-----------+----------+---------
+ a | numeric | | |
+ b | double precision | | |
+Check constraints:
+ "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision)
+Number of child tables: 1 (Use \d+ to list them.)
+
+\d test_inh_check_child
+ Table "public.test_inh_check_child"
+ Column | Type | Collation | Nullable | Default
+--------+------------------+-----------+----------+---------
+ a | numeric | | |
+ b | double precision | | |
+Check constraints:
+ "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision)
+Inherits: test_inh_check
+
+select relname, conname, coninhcount, conislocal, connoinherit
+ from pg_constraint c, pg_class r
+ where relname like 'test_inh_check%' and c.conrelid = r.oid
+ order by 1, 2;
+ relname | conname | coninhcount | conislocal | connoinherit
+----------------------+------------------------+-------------+------------+--------------
+ test_inh_check | test_inh_check_a_check | 0 | t | f
+ test_inh_check_child | test_inh_check_a_check | 1 | f | f
+(2 rows)
+
+-- also try noinherit, local, and local+inherited cases
+ALTER TABLE test_inh_check ADD CONSTRAINT bnoinherit CHECK (b > 100) NO INHERIT;
+ALTER TABLE test_inh_check_child ADD CONSTRAINT blocal CHECK (b < 1000);
+ALTER TABLE test_inh_check_child ADD CONSTRAINT bmerged CHECK (b > 1);
+ALTER TABLE test_inh_check ADD CONSTRAINT bmerged CHECK (b > 1);
+NOTICE: merging constraint "bmerged" with inherited definition
+\d test_inh_check
+ Table "public.test_inh_check"
+ Column | Type | Collation | Nullable | Default
+--------+------------------+-----------+----------+---------
+ a | numeric | | |
+ b | double precision | | |
+Check constraints:
+ "bmerged" CHECK (b > 1::double precision)
+ "bnoinherit" CHECK (b > 100::double precision) NO INHERIT
+ "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision)
+Number of child tables: 1 (Use \d+ to list them.)
+
+\d test_inh_check_child
+ Table "public.test_inh_check_child"
+ Column | Type | Collation | Nullable | Default
+--------+------------------+-----------+----------+---------
+ a | numeric | | |
+ b | double precision | | |
+Check constraints:
+ "blocal" CHECK (b < 1000::double precision)
+ "bmerged" CHECK (b > 1::double precision)
+ "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision)
+Inherits: test_inh_check
+
+select relname, conname, coninhcount, conislocal, connoinherit
+ from pg_constraint c, pg_class r
+ where relname like 'test_inh_check%' and c.conrelid = r.oid
+ order by 1, 2;
+ relname | conname | coninhcount | conislocal | connoinherit
+----------------------+------------------------+-------------+------------+--------------
+ test_inh_check | bmerged | 0 | t | f
+ test_inh_check | bnoinherit | 0 | t | t
+ test_inh_check | test_inh_check_a_check | 0 | t | f
+ test_inh_check_child | blocal | 0 | t | f
+ test_inh_check_child | bmerged | 1 | t | f
+ test_inh_check_child | test_inh_check_a_check | 1 | f | f
+(6 rows)
+
+ALTER TABLE test_inh_check ALTER COLUMN b TYPE numeric;
+NOTICE: merging constraint "bmerged" with inherited definition
+\d test_inh_check
+ Table "public.test_inh_check"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | numeric | | |
+ b | numeric | | |
+Check constraints:
+ "bmerged" CHECK (b::double precision > 1::double precision)
+ "bnoinherit" CHECK (b::double precision > 100::double precision) NO INHERIT
+ "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision)
+Number of child tables: 1 (Use \d+ to list them.)
+
+\d test_inh_check_child
+ Table "public.test_inh_check_child"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | numeric | | |
+ b | numeric | | |
+Check constraints:
+ "blocal" CHECK (b::double precision < 1000::double precision)
+ "bmerged" CHECK (b::double precision > 1::double precision)
+ "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision)
+Inherits: test_inh_check
+
+select relname, conname, coninhcount, conislocal, connoinherit
+ from pg_constraint c, pg_class r
+ where relname like 'test_inh_check%' and c.conrelid = r.oid
+ order by 1, 2;
+ relname | conname | coninhcount | conislocal | connoinherit
+----------------------+------------------------+-------------+------------+--------------
+ test_inh_check | bmerged | 0 | t | f
+ test_inh_check | bnoinherit | 0 | t | t
+ test_inh_check | test_inh_check_a_check | 0 | t | f
+ test_inh_check_child | blocal | 0 | t | f
+ test_inh_check_child | bmerged | 1 | t | f
+ test_inh_check_child | test_inh_check_a_check | 1 | f | f
+(6 rows)
+
+-- ALTER COLUMN TYPE with different schema in children
+-- Bug at https://postgr.es/m/20170102225618.GA10071@telsasoft.com
+CREATE TABLE test_type_diff (f1 int);
+CREATE TABLE test_type_diff_c (extra smallint) INHERITS (test_type_diff);
+ALTER TABLE test_type_diff ADD COLUMN f2 int;
+INSERT INTO test_type_diff_c VALUES (1, 2, 3);
+ALTER TABLE test_type_diff ALTER COLUMN f2 TYPE bigint USING f2::bigint;
+CREATE TABLE test_type_diff2 (int_two int2, int_four int4, int_eight int8);
+CREATE TABLE test_type_diff2_c1 (int_four int4, int_eight int8, int_two int2);
+CREATE TABLE test_type_diff2_c2 (int_eight int8, int_two int2, int_four int4);
+CREATE TABLE test_type_diff2_c3 (int_two int2, int_four int4, int_eight int8);
+ALTER TABLE test_type_diff2_c1 INHERIT test_type_diff2;
+ALTER TABLE test_type_diff2_c2 INHERIT test_type_diff2;
+ALTER TABLE test_type_diff2_c3 INHERIT test_type_diff2;
+INSERT INTO test_type_diff2_c1 VALUES (1, 2, 3);
+INSERT INTO test_type_diff2_c2 VALUES (4, 5, 6);
+INSERT INTO test_type_diff2_c3 VALUES (7, 8, 9);
+ALTER TABLE test_type_diff2 ALTER COLUMN int_four TYPE int8 USING int_four::int8;
+-- whole-row references are disallowed
+ALTER TABLE test_type_diff2 ALTER COLUMN int_four TYPE int4 USING (pg_column_size(test_type_diff2));
+ERROR: cannot convert whole-row table reference
+DETAIL: USING expression contains a whole-row table reference.
+-- check for rollback of ANALYZE corrupting table property flags (bug #11638)
+CREATE TABLE check_fk_presence_1 (id int PRIMARY KEY, t text);
+CREATE TABLE check_fk_presence_2 (id int REFERENCES check_fk_presence_1, t text);
+BEGIN;
+ALTER TABLE check_fk_presence_2 DROP CONSTRAINT check_fk_presence_2_id_fkey;
+ANALYZE check_fk_presence_2;
+ROLLBACK;
+\d check_fk_presence_2
+ Table "public.check_fk_presence_2"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ id | integer | | |
+ t | text | | |
+Foreign-key constraints:
+ "check_fk_presence_2_id_fkey" FOREIGN KEY (id) REFERENCES check_fk_presence_1(id)
+
+DROP TABLE check_fk_presence_1, check_fk_presence_2;
+-- check column addition within a view (bug #14876)
+create table at_base_table(id int, stuff text);
+insert into at_base_table values (23, 'skidoo');
+create view at_view_1 as select * from at_base_table bt;
+create view at_view_2 as select *, to_json(v1) as j from at_view_1 v1;
+\d+ at_view_1
+ View "public.at_view_1"
+ Column | Type | Collation | Nullable | Default | Storage | Description
+--------+---------+-----------+----------+---------+----------+-------------
+ id | integer | | | | plain |
+ stuff | text | | | | extended |
+View definition:
+ SELECT bt.id,
+ bt.stuff
+ FROM at_base_table bt;
+
+\d+ at_view_2
+ View "public.at_view_2"
+ Column | Type | Collation | Nullable | Default | Storage | Description
+--------+---------+-----------+----------+---------+----------+-------------
+ id | integer | | | | plain |
+ stuff | text | | | | extended |
+ j | json | | | | extended |
+View definition:
+ SELECT v1.id,
+ v1.stuff,
+ to_json(v1.*) AS j
+ FROM at_view_1 v1;
+
+explain (verbose, costs off) select * from at_view_2;
+ QUERY PLAN
+----------------------------------------------------------
+ Seq Scan on public.at_base_table bt
+ Output: bt.id, bt.stuff, to_json(ROW(bt.id, bt.stuff))
+(2 rows)
+
+select * from at_view_2;
+ id | stuff | j
+----+--------+----------------------------
+ 23 | skidoo | {"id":23,"stuff":"skidoo"}
+(1 row)
+
+create or replace view at_view_1 as select *, 2+2 as more from at_base_table bt;
+\d+ at_view_1
+ View "public.at_view_1"
+ Column | Type | Collation | Nullable | Default | Storage | Description
+--------+---------+-----------+----------+---------+----------+-------------
+ id | integer | | | | plain |
+ stuff | text | | | | extended |
+ more | integer | | | | plain |
+View definition:
+ SELECT bt.id,
+ bt.stuff,
+ 2 + 2 AS more
+ FROM at_base_table bt;
+
+\d+ at_view_2
+ View "public.at_view_2"
+ Column | Type | Collation | Nullable | Default | Storage | Description
+--------+---------+-----------+----------+---------+----------+-------------
+ id | integer | | | | plain |
+ stuff | text | | | | extended |
+ j | json | | | | extended |
+View definition:
+ SELECT v1.id,
+ v1.stuff,
+ to_json(v1.*) AS j
+ FROM at_view_1 v1;
+
+explain (verbose, costs off) select * from at_view_2;
+ QUERY PLAN
+----------------------------------------------------------------
+ Seq Scan on public.at_base_table bt
+ Output: bt.id, bt.stuff, to_json(ROW(bt.id, bt.stuff, NULL))
+(2 rows)
+
+select * from at_view_2;
+ id | stuff | j
+----+--------+----------------------------------------
+ 23 | skidoo | {"id":23,"stuff":"skidoo","more":null}
+(1 row)
+
+drop view at_view_2;
+drop view at_view_1;
+drop table at_base_table;
+-- check adding a column not iself requiring a rewrite, together with
+-- a column requiring a default (bug #16038)
+-- ensure that rewrites aren't silently optimized away, removing the
+-- value of the test
+CREATE FUNCTION check_ddl_rewrite(p_tablename regclass, p_ddl text)
+RETURNS boolean
+LANGUAGE plpgsql AS $$
+DECLARE
+ v_relfilenode oid;
+BEGIN
+ v_relfilenode := relfilenode FROM pg_class WHERE oid = p_tablename;
+
+ EXECUTE p_ddl;
+
+ RETURN v_relfilenode <> (SELECT relfilenode FROM pg_class WHERE oid = p_tablename);
+END;
+$$;
+CREATE TABLE rewrite_test(col text);
+INSERT INTO rewrite_test VALUES ('something');
+INSERT INTO rewrite_test VALUES (NULL);
+-- empty[12] don't need rewrite, but notempty[12]_rewrite will force one
+SELECT check_ddl_rewrite('rewrite_test', $$
+ ALTER TABLE rewrite_test
+ ADD COLUMN empty1 text,
+ ADD COLUMN notempty1_rewrite serial;
+$$);
+ check_ddl_rewrite
+-------------------
+ t
+(1 row)
+
+SELECT check_ddl_rewrite('rewrite_test', $$
+ ALTER TABLE rewrite_test
+ ADD COLUMN notempty2_rewrite serial,
+ ADD COLUMN empty2 text;
+$$);
+ check_ddl_rewrite
+-------------------
+ t
+(1 row)
+
+-- also check that fast defaults cause no problem, first without rewrite
+SELECT check_ddl_rewrite('rewrite_test', $$
+ ALTER TABLE rewrite_test
+ ADD COLUMN empty3 text,
+ ADD COLUMN notempty3_norewrite int default 42;
+$$);
+ check_ddl_rewrite
+-------------------
+ f
+(1 row)
+
+SELECT check_ddl_rewrite('rewrite_test', $$
+ ALTER TABLE rewrite_test
+ ADD COLUMN notempty4_norewrite int default 42,
+ ADD COLUMN empty4 text;
+$$);
+ check_ddl_rewrite
+-------------------
+ f
+(1 row)
+
+-- then with rewrite
+SELECT check_ddl_rewrite('rewrite_test', $$
+ ALTER TABLE rewrite_test
+ ADD COLUMN empty5 text,
+ ADD COLUMN notempty5_norewrite int default 42,
+ ADD COLUMN notempty5_rewrite serial;
+$$);
+ check_ddl_rewrite
+-------------------
+ t
+(1 row)
+
+SELECT check_ddl_rewrite('rewrite_test', $$
+ ALTER TABLE rewrite_test
+ ADD COLUMN notempty6_rewrite serial,
+ ADD COLUMN empty6 text,
+ ADD COLUMN notempty6_norewrite int default 42;
+$$);
+ check_ddl_rewrite
+-------------------
+ t
+(1 row)
+
+-- cleanup
+DROP FUNCTION check_ddl_rewrite(regclass, text);
+DROP TABLE rewrite_test;
+--
+-- lock levels
+--
+drop type lockmodes;
+ERROR: type "lockmodes" does not exist
+create type lockmodes as enum (
+ 'SIReadLock'
+,'AccessShareLock'
+,'RowShareLock'
+,'RowExclusiveLock'
+,'ShareUpdateExclusiveLock'
+,'ShareLock'
+,'ShareRowExclusiveLock'
+,'ExclusiveLock'
+,'AccessExclusiveLock'
+);
+drop view my_locks;
+ERROR: view "my_locks" does not exist
+create or replace view my_locks as
+select case when c.relname like 'pg_toast%' then 'pg_toast' else c.relname end, max(mode::lockmodes) as max_lockmode
+from pg_locks l join pg_class c on l.relation = c.oid
+where virtualtransaction = (
+ select virtualtransaction
+ from pg_locks
+ where transactionid = pg_current_xact_id()::xid)
+and locktype = 'relation'
+and relnamespace != (select oid from pg_namespace where nspname = 'pg_catalog')
+and c.relname != 'my_locks'
+group by c.relname;
+create table alterlock (f1 int primary key, f2 text);
+insert into alterlock values (1, 'foo');
+create table alterlock2 (f3 int primary key, f1 int);
+insert into alterlock2 values (1, 1);
+begin; alter table alterlock alter column f2 set statistics 150;
+select * from my_locks order by 1;
+ relname | max_lockmode
+-----------+--------------------------
+ alterlock | ShareUpdateExclusiveLock
+(1 row)
+
+rollback;
+begin; alter table alterlock cluster on alterlock_pkey;
+select * from my_locks order by 1;
+ relname | max_lockmode
+----------------+--------------------------
+ alterlock | ShareUpdateExclusiveLock
+ alterlock_pkey | ShareUpdateExclusiveLock
+(2 rows)
+
+commit;
+begin; alter table alterlock set without cluster;
+select * from my_locks order by 1;
+ relname | max_lockmode
+-----------+--------------------------
+ alterlock | ShareUpdateExclusiveLock
+(1 row)
+
+commit;
+begin; alter table alterlock set (fillfactor = 100);
+select * from my_locks order by 1;
+ relname | max_lockmode
+-----------+--------------------------
+ alterlock | ShareUpdateExclusiveLock
+ pg_toast | ShareUpdateExclusiveLock
+(2 rows)
+
+commit;
+begin; alter table alterlock reset (fillfactor);
+select * from my_locks order by 1;
+ relname | max_lockmode
+-----------+--------------------------
+ alterlock | ShareUpdateExclusiveLock
+ pg_toast | ShareUpdateExclusiveLock
+(2 rows)
+
+commit;
+begin; alter table alterlock set (toast.autovacuum_enabled = off);
+select * from my_locks order by 1;
+ relname | max_lockmode
+-----------+--------------------------
+ alterlock | ShareUpdateExclusiveLock
+ pg_toast | ShareUpdateExclusiveLock
+(2 rows)
+
+commit;
+begin; alter table alterlock set (autovacuum_enabled = off);
+select * from my_locks order by 1;
+ relname | max_lockmode
+-----------+--------------------------
+ alterlock | ShareUpdateExclusiveLock
+ pg_toast | ShareUpdateExclusiveLock
+(2 rows)
+
+commit;
+begin; alter table alterlock alter column f2 set (n_distinct = 1);
+select * from my_locks order by 1;
+ relname | max_lockmode
+-----------+--------------------------
+ alterlock | ShareUpdateExclusiveLock
+(1 row)
+
+rollback;
+-- test that mixing options with different lock levels works as expected
+begin; alter table alterlock set (autovacuum_enabled = off, fillfactor = 80);
+select * from my_locks order by 1;
+ relname | max_lockmode
+-----------+--------------------------
+ alterlock | ShareUpdateExclusiveLock
+ pg_toast | ShareUpdateExclusiveLock
+(2 rows)
+
+commit;
+begin; alter table alterlock alter column f2 set storage extended;
+select * from my_locks order by 1;
+ relname | max_lockmode
+-----------+---------------------
+ alterlock | AccessExclusiveLock
+(1 row)
+
+rollback;
+begin; alter table alterlock alter column f2 set default 'x';
+select * from my_locks order by 1;
+ relname | max_lockmode
+-----------+---------------------
+ alterlock | AccessExclusiveLock
+(1 row)
+
+rollback;
+begin;
+create trigger ttdummy
+ before delete or update on alterlock
+ for each row
+ execute procedure
+ ttdummy (1, 1);
+select * from my_locks order by 1;
+ relname | max_lockmode
+-----------+-----------------------
+ alterlock | ShareRowExclusiveLock
+(1 row)
+
+rollback;
+begin;
+select * from my_locks order by 1;
+ relname | max_lockmode
+---------+--------------
+(0 rows)
+
+alter table alterlock2 add foreign key (f1) references alterlock (f1);
+select * from my_locks order by 1;
+ relname | max_lockmode
+-----------------+-----------------------
+ alterlock | ShareRowExclusiveLock
+ alterlock2 | ShareRowExclusiveLock
+ alterlock2_pkey | AccessShareLock
+ alterlock_pkey | AccessShareLock
+(4 rows)
+
+rollback;
+begin;
+alter table alterlock2
+add constraint alterlock2nv foreign key (f1) references alterlock (f1) NOT VALID;
+select * from my_locks order by 1;
+ relname | max_lockmode
+------------+-----------------------
+ alterlock | ShareRowExclusiveLock
+ alterlock2 | ShareRowExclusiveLock
+(2 rows)
+
+commit;
+begin;
+alter table alterlock2 validate constraint alterlock2nv;
+select * from my_locks order by 1;
+ relname | max_lockmode
+-----------------+--------------------------
+ alterlock | RowShareLock
+ alterlock2 | ShareUpdateExclusiveLock
+ alterlock2_pkey | AccessShareLock
+ alterlock_pkey | AccessShareLock
+(4 rows)
+
+rollback;
+create or replace view my_locks as
+select case when c.relname like 'pg_toast%' then 'pg_toast' else c.relname end, max(mode::lockmodes) as max_lockmode
+from pg_locks l join pg_class c on l.relation = c.oid
+where virtualtransaction = (
+ select virtualtransaction
+ from pg_locks
+ where transactionid = pg_current_xact_id()::xid)
+and locktype = 'relation'
+and relnamespace != (select oid from pg_namespace where nspname = 'pg_catalog')
+and c.relname = 'my_locks'
+group by c.relname;
+-- raise exception
+alter table my_locks set (autovacuum_enabled = false);
+ERROR: unrecognized parameter "autovacuum_enabled"
+alter view my_locks set (autovacuum_enabled = false);
+ERROR: unrecognized parameter "autovacuum_enabled"
+alter table my_locks reset (autovacuum_enabled);
+alter view my_locks reset (autovacuum_enabled);
+begin;
+alter view my_locks set (security_barrier=off);
+select * from my_locks order by 1;
+ relname | max_lockmode
+----------+---------------------
+ my_locks | AccessExclusiveLock
+(1 row)
+
+alter view my_locks reset (security_barrier);
+rollback;
+-- this test intentionally applies the ALTER TABLE command against a view, but
+-- uses a view option so we expect this to succeed. This form of SQL is
+-- accepted for historical reasons, as shown in the docs for ALTER VIEW
+begin;
+alter table my_locks set (security_barrier=off);
+select * from my_locks order by 1;
+ relname | max_lockmode
+----------+---------------------
+ my_locks | AccessExclusiveLock
+(1 row)
+
+alter table my_locks reset (security_barrier);
+rollback;
+-- cleanup
+drop table alterlock2;
+drop table alterlock;
+drop view my_locks;
+drop type lockmodes;
+--
+-- alter function
+--
+create function test_strict(text) returns text as
+ 'select coalesce($1, ''got passed a null'');'
+ language sql returns null on null input;
+select test_strict(NULL);
+ test_strict
+-------------
+
+(1 row)
+
+alter function test_strict(text) called on null input;
+select test_strict(NULL);
+ test_strict
+-------------------
+ got passed a null
+(1 row)
+
+create function non_strict(text) returns text as
+ 'select coalesce($1, ''got passed a null'');'
+ language sql called on null input;
+select non_strict(NULL);
+ non_strict
+-------------------
+ got passed a null
+(1 row)
+
+alter function non_strict(text) returns null on null input;
+select non_strict(NULL);
+ non_strict
+------------
+
+(1 row)
+
+--
+-- alter object set schema
+--
+create schema alter1;
+create schema alter2;
+create table alter1.t1(f1 serial primary key, f2 int check (f2 > 0));
+create view alter1.v1 as select * from alter1.t1;
+create function alter1.plus1(int) returns int as 'select $1+1' language sql;
+create domain alter1.posint integer check (value > 0);
+create type alter1.ctype as (f1 int, f2 text);
+create function alter1.same(alter1.ctype, alter1.ctype) returns boolean language sql
+as 'select $1.f1 is not distinct from $2.f1 and $1.f2 is not distinct from $2.f2';
+create operator alter1.=(procedure = alter1.same, leftarg = alter1.ctype, rightarg = alter1.ctype);
+create operator class alter1.ctype_hash_ops default for type alter1.ctype using hash as
+ operator 1 alter1.=(alter1.ctype, alter1.ctype);
+create conversion alter1.latin1_to_utf8 for 'latin1' to 'utf8' from iso8859_1_to_utf8;
+create text search parser alter1.prs(start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype);
+create text search configuration alter1.cfg(parser = alter1.prs);
+create text search template alter1.tmpl(init = dsimple_init, lexize = dsimple_lexize);
+create text search dictionary alter1.dict(template = alter1.tmpl);
+insert into alter1.t1(f2) values(11);
+insert into alter1.t1(f2) values(12);
+alter table alter1.t1 set schema alter1; -- no-op, same schema
+alter table alter1.t1 set schema alter2;
+alter table alter1.v1 set schema alter2;
+alter function alter1.plus1(int) set schema alter2;
+alter domain alter1.posint set schema alter2;
+alter operator class alter1.ctype_hash_ops using hash set schema alter2;
+alter operator family alter1.ctype_hash_ops using hash set schema alter2;
+alter operator alter1.=(alter1.ctype, alter1.ctype) set schema alter2;
+alter function alter1.same(alter1.ctype, alter1.ctype) set schema alter2;
+alter type alter1.ctype set schema alter1; -- no-op, same schema
+alter type alter1.ctype set schema alter2;
+alter conversion alter1.latin1_to_utf8 set schema alter2;
+alter text search parser alter1.prs set schema alter2;
+alter text search configuration alter1.cfg set schema alter2;
+alter text search template alter1.tmpl set schema alter2;
+alter text search dictionary alter1.dict set schema alter2;
+-- this should succeed because nothing is left in alter1
+drop schema alter1;
+insert into alter2.t1(f2) values(13);
+insert into alter2.t1(f2) values(14);
+select * from alter2.t1;
+ f1 | f2
+----+----
+ 1 | 11
+ 2 | 12
+ 3 | 13
+ 4 | 14
+(4 rows)
+
+select * from alter2.v1;
+ f1 | f2
+----+----
+ 1 | 11
+ 2 | 12
+ 3 | 13
+ 4 | 14
+(4 rows)
+
+select alter2.plus1(41);
+ plus1
+-------
+ 42
+(1 row)
+
+-- clean up
+drop schema alter2 cascade;
+NOTICE: drop cascades to 13 other objects
+DETAIL: drop cascades to table alter2.t1
+drop cascades to view alter2.v1
+drop cascades to function alter2.plus1(integer)
+drop cascades to type alter2.posint
+drop cascades to type alter2.ctype
+drop cascades to function alter2.same(alter2.ctype,alter2.ctype)
+drop cascades to operator alter2.=(alter2.ctype,alter2.ctype)
+drop cascades to operator family alter2.ctype_hash_ops for access method hash
+drop cascades to conversion alter2.latin1_to_utf8
+drop cascades to text search parser alter2.prs
+drop cascades to text search configuration alter2.cfg
+drop cascades to text search template alter2.tmpl
+drop cascades to text search dictionary alter2.dict
+--
+-- composite types
+--
+CREATE TYPE test_type AS (a int);
+\d test_type
+ Composite type "public.test_type"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+
+ALTER TYPE nosuchtype ADD ATTRIBUTE b text; -- fails
+ERROR: relation "nosuchtype" does not exist
+ALTER TYPE test_type ADD ATTRIBUTE b text;
+\d test_type
+ Composite type "public.test_type"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+ b | text | | |
+
+ALTER TYPE test_type ADD ATTRIBUTE b text; -- fails
+ERROR: column "b" of relation "test_type" already exists
+ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE varchar;
+\d test_type
+ Composite type "public.test_type"
+ Column | Type | Collation | Nullable | Default
+--------+-------------------+-----------+----------+---------
+ a | integer | | |
+ b | character varying | | |
+
+ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE integer;
+\d test_type
+ Composite type "public.test_type"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+ b | integer | | |
+
+ALTER TYPE test_type DROP ATTRIBUTE b;
+\d test_type
+ Composite type "public.test_type"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+
+ALTER TYPE test_type DROP ATTRIBUTE c; -- fails
+ERROR: column "c" of relation "test_type" does not exist
+ALTER TYPE test_type DROP ATTRIBUTE IF EXISTS c;
+NOTICE: column "c" of relation "test_type" does not exist, skipping
+ALTER TYPE test_type DROP ATTRIBUTE a, ADD ATTRIBUTE d boolean;
+\d test_type
+ Composite type "public.test_type"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ d | boolean | | |
+
+ALTER TYPE test_type RENAME ATTRIBUTE a TO aa;
+ERROR: column "a" does not exist
+ALTER TYPE test_type RENAME ATTRIBUTE d TO dd;
+\d test_type
+ Composite type "public.test_type"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ dd | boolean | | |
+
+DROP TYPE test_type;
+CREATE TYPE test_type1 AS (a int, b text);
+CREATE TABLE test_tbl1 (x int, y test_type1);
+ALTER TYPE test_type1 ALTER ATTRIBUTE b TYPE varchar; -- fails
+ERROR: cannot alter type "test_type1" because column "test_tbl1.y" uses it
+CREATE TYPE test_type2 AS (a int, b text);
+CREATE TABLE test_tbl2 OF test_type2;
+CREATE TABLE test_tbl2_subclass () INHERITS (test_tbl2);
+\d test_type2
+ Composite type "public.test_type2"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+ b | text | | |
+
+\d test_tbl2
+ Table "public.test_tbl2"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+ b | text | | |
+Number of child tables: 1 (Use \d+ to list them.)
+Typed table of type: test_type2
+
+ALTER TYPE test_type2 ADD ATTRIBUTE c text; -- fails
+ERROR: cannot alter type "test_type2" because it is the type of a typed table
+HINT: Use ALTER ... CASCADE to alter the typed tables too.
+ALTER TYPE test_type2 ADD ATTRIBUTE c text CASCADE;
+\d test_type2
+ Composite type "public.test_type2"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+ b | text | | |
+ c | text | | |
+
+\d test_tbl2
+ Table "public.test_tbl2"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+ b | text | | |
+ c | text | | |
+Number of child tables: 1 (Use \d+ to list them.)
+Typed table of type: test_type2
+
+ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar; -- fails
+ERROR: cannot alter type "test_type2" because it is the type of a typed table
+HINT: Use ALTER ... CASCADE to alter the typed tables too.
+ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar CASCADE;
+\d test_type2
+ Composite type "public.test_type2"
+ Column | Type | Collation | Nullable | Default
+--------+-------------------+-----------+----------+---------
+ a | integer | | |
+ b | character varying | | |
+ c | text | | |
+
+\d test_tbl2
+ Table "public.test_tbl2"
+ Column | Type | Collation | Nullable | Default
+--------+-------------------+-----------+----------+---------
+ a | integer | | |
+ b | character varying | | |
+ c | text | | |
+Number of child tables: 1 (Use \d+ to list them.)
+Typed table of type: test_type2
+
+ALTER TYPE test_type2 DROP ATTRIBUTE b; -- fails
+ERROR: cannot alter type "test_type2" because it is the type of a typed table
+HINT: Use ALTER ... CASCADE to alter the typed tables too.
+ALTER TYPE test_type2 DROP ATTRIBUTE b CASCADE;
+\d test_type2
+ Composite type "public.test_type2"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+ c | text | | |
+
+\d test_tbl2
+ Table "public.test_tbl2"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+ c | text | | |
+Number of child tables: 1 (Use \d+ to list them.)
+Typed table of type: test_type2
+
+ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa; -- fails
+ERROR: cannot alter type "test_type2" because it is the type of a typed table
+HINT: Use ALTER ... CASCADE to alter the typed tables too.
+ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa CASCADE;
+\d test_type2
+ Composite type "public.test_type2"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ aa | integer | | |
+ c | text | | |
+
+\d test_tbl2
+ Table "public.test_tbl2"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ aa | integer | | |
+ c | text | | |
+Number of child tables: 1 (Use \d+ to list them.)
+Typed table of type: test_type2
+
+\d test_tbl2_subclass
+ Table "public.test_tbl2_subclass"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ aa | integer | | |
+ c | text | | |
+Inherits: test_tbl2
+
+DROP TABLE test_tbl2_subclass;
+CREATE TYPE test_typex AS (a int, b text);
+CREATE TABLE test_tblx (x int, y test_typex check ((y).a > 0));
+ALTER TYPE test_typex DROP ATTRIBUTE a; -- fails
+ERROR: cannot drop column a of composite type test_typex because other objects depend on it
+DETAIL: constraint test_tblx_y_check on table test_tblx depends on column a of composite type test_typex
+HINT: Use DROP ... CASCADE to drop the dependent objects too.
+ALTER TYPE test_typex DROP ATTRIBUTE a CASCADE;
+NOTICE: drop cascades to constraint test_tblx_y_check on table test_tblx
+\d test_tblx
+ Table "public.test_tblx"
+ Column | Type | Collation | Nullable | Default
+--------+------------+-----------+----------+---------
+ x | integer | | |
+ y | test_typex | | |
+
+DROP TABLE test_tblx;
+DROP TYPE test_typex;
+-- This test isn't that interesting on its own, but the purpose is to leave
+-- behind a table to test pg_upgrade with. The table has a composite type
+-- column in it, and the composite type has a dropped attribute.
+CREATE TYPE test_type3 AS (a int);
+CREATE TABLE test_tbl3 (c) AS SELECT '(1)'::test_type3;
+ALTER TYPE test_type3 DROP ATTRIBUTE a, ADD ATTRIBUTE b int;
+CREATE TYPE test_type_empty AS ();
+DROP TYPE test_type_empty;
+--
+-- typed tables: OF / NOT OF
+--
+CREATE TYPE tt_t0 AS (z inet, x int, y numeric(8,2));
+ALTER TYPE tt_t0 DROP ATTRIBUTE z;
+CREATE TABLE tt0 (x int NOT NULL, y numeric(8,2)); -- OK
+CREATE TABLE tt1 (x int, y bigint); -- wrong base type
+CREATE TABLE tt2 (x int, y numeric(9,2)); -- wrong typmod
+CREATE TABLE tt3 (y numeric(8,2), x int); -- wrong column order
+CREATE TABLE tt4 (x int); -- too few columns
+CREATE TABLE tt5 (x int, y numeric(8,2), z int); -- too few columns
+CREATE TABLE tt6 () INHERITS (tt0); -- can't have a parent
+CREATE TABLE tt7 (x int, q text, y numeric(8,2));
+ALTER TABLE tt7 DROP q; -- OK
+ALTER TABLE tt0 OF tt_t0;
+ALTER TABLE tt1 OF tt_t0;
+ERROR: table "tt1" has different type for column "y"
+ALTER TABLE tt2 OF tt_t0;
+ERROR: table "tt2" has different type for column "y"
+ALTER TABLE tt3 OF tt_t0;
+ERROR: table has column "y" where type requires "x"
+ALTER TABLE tt4 OF tt_t0;
+ERROR: table is missing column "y"
+ALTER TABLE tt5 OF tt_t0;
+ERROR: table has extra column "z"
+ALTER TABLE tt6 OF tt_t0;
+ERROR: typed tables cannot inherit
+ALTER TABLE tt7 OF tt_t0;
+CREATE TYPE tt_t1 AS (x int, y numeric(8,2));
+ALTER TABLE tt7 OF tt_t1; -- reassign an already-typed table
+ALTER TABLE tt7 NOT OF;
+\d tt7
+ Table "public.tt7"
+ Column | Type | Collation | Nullable | Default
+--------+--------------+-----------+----------+---------
+ x | integer | | |
+ y | numeric(8,2) | | |
+
+-- make sure we can drop a constraint on the parent but it remains on the child
+CREATE TABLE test_drop_constr_parent (c text CHECK (c IS NOT NULL));
+CREATE TABLE test_drop_constr_child () INHERITS (test_drop_constr_parent);
+ALTER TABLE ONLY test_drop_constr_parent DROP CONSTRAINT "test_drop_constr_parent_c_check";
+-- should fail
+INSERT INTO test_drop_constr_child (c) VALUES (NULL);
+ERROR: new row for relation "test_drop_constr_child" violates check constraint "test_drop_constr_parent_c_check"
+DETAIL: Failing row contains (null).
+DROP TABLE test_drop_constr_parent CASCADE;
+NOTICE: drop cascades to table test_drop_constr_child
+--
+-- IF EXISTS test
+--
+ALTER TABLE IF EXISTS tt8 ADD COLUMN f int;
+NOTICE: relation "tt8" does not exist, skipping
+ALTER TABLE IF EXISTS tt8 ADD CONSTRAINT xxx PRIMARY KEY(f);
+NOTICE: relation "tt8" does not exist, skipping
+ALTER TABLE IF EXISTS tt8 ADD CHECK (f BETWEEN 0 AND 10);
+NOTICE: relation "tt8" does not exist, skipping
+ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0;
+NOTICE: relation "tt8" does not exist, skipping
+ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1;
+NOTICE: relation "tt8" does not exist, skipping
+ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2;
+NOTICE: relation "tt8" does not exist, skipping
+CREATE TABLE tt8(a int);
+CREATE SCHEMA alter2;
+ALTER TABLE IF EXISTS tt8 ADD COLUMN f int;
+ALTER TABLE IF EXISTS tt8 ADD CONSTRAINT xxx PRIMARY KEY(f);
+ALTER TABLE IF EXISTS tt8 ADD CHECK (f BETWEEN 0 AND 10);
+ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0;
+ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1;
+ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2;
+\d alter2.tt8
+ Table "alter2.tt8"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+ f1 | integer | | not null | 0
+Indexes:
+ "xxx" PRIMARY KEY, btree (f1)
+Check constraints:
+ "tt8_f_check" CHECK (f1 >= 0 AND f1 <= 10)
+
+DROP TABLE alter2.tt8;
+DROP SCHEMA alter2;
+--
+-- Check conflicts between index and CHECK constraint names
+--
+CREATE TABLE tt9(c integer);
+ALTER TABLE tt9 ADD CHECK(c > 1);
+ALTER TABLE tt9 ADD CHECK(c > 2); -- picks nonconflicting name
+ALTER TABLE tt9 ADD CONSTRAINT foo CHECK(c > 3);
+ALTER TABLE tt9 ADD CONSTRAINT foo CHECK(c > 4); -- fail, dup name
+ERROR: constraint "foo" for relation "tt9" already exists
+ALTER TABLE tt9 ADD UNIQUE(c);
+ALTER TABLE tt9 ADD UNIQUE(c); -- picks nonconflicting name
+ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key UNIQUE(c); -- fail, dup name
+ERROR: relation "tt9_c_key" already exists
+ALTER TABLE tt9 ADD CONSTRAINT foo UNIQUE(c); -- fail, dup name
+ERROR: constraint "foo" for relation "tt9" already exists
+ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key CHECK(c > 5); -- fail, dup name
+ERROR: constraint "tt9_c_key" for relation "tt9" already exists
+ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key2 CHECK(c > 6);
+ALTER TABLE tt9 ADD UNIQUE(c); -- picks nonconflicting name
+\d tt9
+ Table "public.tt9"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ c | integer | | |
+Indexes:
+ "tt9_c_key" UNIQUE CONSTRAINT, btree (c)
+ "tt9_c_key1" UNIQUE CONSTRAINT, btree (c)
+ "tt9_c_key3" UNIQUE CONSTRAINT, btree (c)
+Check constraints:
+ "foo" CHECK (c > 3)
+ "tt9_c_check" CHECK (c > 1)
+ "tt9_c_check1" CHECK (c > 2)
+ "tt9_c_key2" CHECK (c > 6)
+
+DROP TABLE tt9;
+-- Check that comments on constraints and indexes are not lost at ALTER TABLE.
+CREATE TABLE comment_test (
+ id int,
+ positive_col int CHECK (positive_col > 0),
+ indexed_col int,
+ CONSTRAINT comment_test_pk PRIMARY KEY (id));
+CREATE INDEX comment_test_index ON comment_test(indexed_col);
+COMMENT ON COLUMN comment_test.id IS 'Column ''id'' on comment_test';
+COMMENT ON INDEX comment_test_index IS 'Simple index on comment_test';
+COMMENT ON CONSTRAINT comment_test_positive_col_check ON comment_test IS 'CHECK constraint on comment_test.positive_col';
+COMMENT ON CONSTRAINT comment_test_pk ON comment_test IS 'PRIMARY KEY constraint of comment_test';
+COMMENT ON INDEX comment_test_pk IS 'Index backing the PRIMARY KEY of comment_test';
+SELECT col_description('comment_test'::regclass, 1) as comment;
+ comment
+-----------------------------
+ Column 'id' on comment_test
+(1 row)
+
+SELECT indexrelid::regclass::text as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test'::regclass ORDER BY 1, 2;
+ index | comment
+--------------------+-----------------------------------------------
+ comment_test_index | Simple index on comment_test
+ comment_test_pk | Index backing the PRIMARY KEY of comment_test
+(2 rows)
+
+SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass ORDER BY 1, 2;
+ constraint | comment
+---------------------------------+-----------------------------------------------
+ comment_test_pk | PRIMARY KEY constraint of comment_test
+ comment_test_positive_col_check | CHECK constraint on comment_test.positive_col
+(2 rows)
+
+-- Change the datatype of all the columns. ALTER TABLE is optimized to not
+-- rebuild an index if the new data type is binary compatible with the old
+-- one. Check do a dummy ALTER TABLE that doesn't change the datatype
+-- first, to test that no-op codepath, and another one that does.
+ALTER TABLE comment_test ALTER COLUMN indexed_col SET DATA TYPE int;
+ALTER TABLE comment_test ALTER COLUMN indexed_col SET DATA TYPE text;
+ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE int;
+ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE text;
+ALTER TABLE comment_test ALTER COLUMN positive_col SET DATA TYPE int;
+ALTER TABLE comment_test ALTER COLUMN positive_col SET DATA TYPE bigint;
+-- Check that the comments are intact.
+SELECT col_description('comment_test'::regclass, 1) as comment;
+ comment
+-----------------------------
+ Column 'id' on comment_test
+(1 row)
+
+SELECT indexrelid::regclass::text as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test'::regclass ORDER BY 1, 2;
+ index | comment
+--------------------+-----------------------------------------------
+ comment_test_index | Simple index on comment_test
+ comment_test_pk | Index backing the PRIMARY KEY of comment_test
+(2 rows)
+
+SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass ORDER BY 1, 2;
+ constraint | comment
+---------------------------------+-----------------------------------------------
+ comment_test_pk | PRIMARY KEY constraint of comment_test
+ comment_test_positive_col_check | CHECK constraint on comment_test.positive_col
+(2 rows)
+
+-- Check compatibility for foreign keys and comments. This is done
+-- separately as rebuilding the column type of the parent leads
+-- to an error and would reduce the test scope.
+CREATE TABLE comment_test_child (
+ id text CONSTRAINT comment_test_child_fk REFERENCES comment_test);
+CREATE INDEX comment_test_child_fk ON comment_test_child(id);
+COMMENT ON COLUMN comment_test_child.id IS 'Column ''id'' on comment_test_child';
+COMMENT ON INDEX comment_test_child_fk IS 'Index backing the FOREIGN KEY of comment_test_child';
+COMMENT ON CONSTRAINT comment_test_child_fk ON comment_test_child IS 'FOREIGN KEY constraint of comment_test_child';
+-- Change column type of parent
+ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE text;
+ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE int USING id::integer;
+ERROR: foreign key constraint "comment_test_child_fk" cannot be implemented
+DETAIL: Key columns "id" and "id" are of incompatible types: text and integer.
+-- Comments should be intact
+SELECT col_description('comment_test_child'::regclass, 1) as comment;
+ comment
+-----------------------------------
+ Column 'id' on comment_test_child
+(1 row)
+
+SELECT indexrelid::regclass::text as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test_child'::regclass ORDER BY 1, 2;
+ index | comment
+-----------------------+-----------------------------------------------------
+ comment_test_child_fk | Index backing the FOREIGN KEY of comment_test_child
+(1 row)
+
+SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test_child'::regclass ORDER BY 1, 2;
+ constraint | comment
+-----------------------+----------------------------------------------
+ comment_test_child_fk | FOREIGN KEY constraint of comment_test_child
+(1 row)
+
+-- Check that we map relation oids to filenodes and back correctly. Only
+-- display bad mappings so the test output doesn't change all the time. A
+-- filenode function call can return NULL for a relation dropped concurrently
+-- with the call's surrounding query, so ignore a NULL mapped_oid for
+-- relations that no longer exist after all calls finish.
+CREATE TEMP TABLE filenode_mapping AS
+SELECT
+ oid, mapped_oid, reltablespace, relfilenode, relname
+FROM pg_class,
+ pg_filenode_relation(reltablespace, pg_relation_filenode(oid)) AS mapped_oid
+WHERE relkind IN ('r', 'i', 'S', 't', 'm') AND mapped_oid IS DISTINCT FROM oid;
+SELECT m.* FROM filenode_mapping m LEFT JOIN pg_class c ON c.oid = m.oid
+WHERE c.oid IS NOT NULL OR m.mapped_oid IS NOT NULL;
+ oid | mapped_oid | reltablespace | relfilenode | relname
+-----+------------+---------------+-------------+---------
+(0 rows)
+
+-- Checks on creating and manipulation of user defined relations in
+-- pg_catalog.
+SHOW allow_system_table_mods;
+ allow_system_table_mods
+-------------------------
+ off
+(1 row)
+
+-- disallowed because of search_path issues with pg_dump
+CREATE TABLE pg_catalog.new_system_table();
+ERROR: permission denied to create "pg_catalog.new_system_table"
+DETAIL: System catalog modifications are currently disallowed.
+-- instead create in public first, move to catalog
+CREATE TABLE new_system_table(id serial primary key, othercol text);
+ALTER TABLE new_system_table SET SCHEMA pg_catalog;
+ALTER TABLE new_system_table SET SCHEMA public;
+ALTER TABLE new_system_table SET SCHEMA pg_catalog;
+-- will be ignored -- already there:
+ALTER TABLE new_system_table SET SCHEMA pg_catalog;
+ALTER TABLE new_system_table RENAME TO old_system_table;
+CREATE INDEX old_system_table__othercol ON old_system_table (othercol);
+INSERT INTO old_system_table(othercol) VALUES ('somedata'), ('otherdata');
+UPDATE old_system_table SET id = -id;
+DELETE FROM old_system_table WHERE othercol = 'somedata';
+TRUNCATE old_system_table;
+ALTER TABLE old_system_table DROP CONSTRAINT new_system_table_pkey;
+ALTER TABLE old_system_table DROP COLUMN othercol;
+DROP TABLE old_system_table;
+-- set logged
+CREATE UNLOGGED TABLE unlogged1(f1 SERIAL PRIMARY KEY, f2 TEXT);
+-- check relpersistence of an unlogged table
+SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged1'
+UNION ALL
+SELECT 'toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^unlogged1'
+UNION ALL
+SELECT 'toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^unlogged1'
+ORDER BY relname;
+ relname | relkind | relpersistence
+------------------+---------+----------------
+ toast index | i | u
+ toast table | t | u
+ unlogged1 | r | u
+ unlogged1_f1_seq | S | p
+ unlogged1_pkey | i | u
+(5 rows)
+
+CREATE UNLOGGED TABLE unlogged2(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES unlogged1); -- foreign key
+CREATE UNLOGGED TABLE unlogged3(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES unlogged3); -- self-referencing foreign key
+ALTER TABLE unlogged3 SET LOGGED; -- skip self-referencing foreign key
+ALTER TABLE unlogged2 SET LOGGED; -- fails because a foreign key to an unlogged table exists
+ERROR: could not change table "unlogged2" to logged because it references unlogged table "unlogged1"
+ALTER TABLE unlogged1 SET LOGGED;
+-- check relpersistence of an unlogged table after changing to permanent
+SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged1'
+UNION ALL
+SELECT 'toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^unlogged1'
+UNION ALL
+SELECT 'toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^unlogged1'
+ORDER BY relname;
+ relname | relkind | relpersistence
+------------------+---------+----------------
+ toast index | i | p
+ toast table | t | p
+ unlogged1 | r | p
+ unlogged1_f1_seq | S | p
+ unlogged1_pkey | i | p
+(5 rows)
+
+ALTER TABLE unlogged1 SET LOGGED; -- silently do nothing
+DROP TABLE unlogged3;
+DROP TABLE unlogged2;
+DROP TABLE unlogged1;
+-- set unlogged
+CREATE TABLE logged1(f1 SERIAL PRIMARY KEY, f2 TEXT);
+-- check relpersistence of a permanent table
+SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^logged1'
+UNION ALL
+SELECT 'toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^logged1'
+UNION ALL
+SELECT 'toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^logged1'
+ORDER BY relname;
+ relname | relkind | relpersistence
+----------------+---------+----------------
+ logged1 | r | p
+ logged1_f1_seq | S | p
+ logged1_pkey | i | p
+ toast index | i | p
+ toast table | t | p
+(5 rows)
+
+CREATE TABLE logged2(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES logged1); -- foreign key
+CREATE TABLE logged3(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES logged3); -- self-referencing foreign key
+ALTER TABLE logged1 SET UNLOGGED; -- fails because a foreign key from a permanent table exists
+ERROR: could not change table "logged1" to unlogged because it references logged table "logged2"
+ALTER TABLE logged3 SET UNLOGGED; -- skip self-referencing foreign key
+ALTER TABLE logged2 SET UNLOGGED;
+ALTER TABLE logged1 SET UNLOGGED;
+-- check relpersistence of a permanent table after changing to unlogged
+SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^logged1'
+UNION ALL
+SELECT 'toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^logged1'
+UNION ALL
+SELECT 'toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^logged1'
+ORDER BY relname;
+ relname | relkind | relpersistence
+----------------+---------+----------------
+ logged1 | r | u
+ logged1_f1_seq | S | p
+ logged1_pkey | i | u
+ toast index | i | u
+ toast table | t | u
+(5 rows)
+
+ALTER TABLE logged1 SET UNLOGGED; -- silently do nothing
+DROP TABLE logged3;
+DROP TABLE logged2;
+DROP TABLE logged1;
+-- test ADD COLUMN IF NOT EXISTS
+CREATE TABLE test_add_column(c1 integer);
+\d test_add_column
+ Table "public.test_add_column"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ c1 | integer | | |
+
+ALTER TABLE test_add_column
+ ADD COLUMN c2 integer;
+\d test_add_column
+ Table "public.test_add_column"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ c1 | integer | | |
+ c2 | integer | | |
+
+ALTER TABLE test_add_column
+ ADD COLUMN c2 integer; -- fail because c2 already exists
+ERROR: column "c2" of relation "test_add_column" already exists
+ALTER TABLE ONLY test_add_column
+ ADD COLUMN c2 integer; -- fail because c2 already exists
+ERROR: column "c2" of relation "test_add_column" already exists
+\d test_add_column
+ Table "public.test_add_column"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ c1 | integer | | |
+ c2 | integer | | |
+
+ALTER TABLE test_add_column
+ ADD COLUMN IF NOT EXISTS c2 integer; -- skipping because c2 already exists
+NOTICE: column "c2" of relation "test_add_column" already exists, skipping
+ALTER TABLE ONLY test_add_column
+ ADD COLUMN IF NOT EXISTS c2 integer; -- skipping because c2 already exists
+NOTICE: column "c2" of relation "test_add_column" already exists, skipping
+\d test_add_column
+ Table "public.test_add_column"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ c1 | integer | | |
+ c2 | integer | | |
+
+ALTER TABLE test_add_column
+ ADD COLUMN c2 integer, -- fail because c2 already exists
+ ADD COLUMN c3 integer primary key;
+ERROR: column "c2" of relation "test_add_column" already exists
+\d test_add_column
+ Table "public.test_add_column"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ c1 | integer | | |
+ c2 | integer | | |
+
+ALTER TABLE test_add_column
+ ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists
+ ADD COLUMN c3 integer primary key;
+NOTICE: column "c2" of relation "test_add_column" already exists, skipping
+\d test_add_column
+ Table "public.test_add_column"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ c1 | integer | | |
+ c2 | integer | | |
+ c3 | integer | | not null |
+Indexes:
+ "test_add_column_pkey" PRIMARY KEY, btree (c3)
+
+ALTER TABLE test_add_column
+ ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists
+ ADD COLUMN IF NOT EXISTS c3 integer primary key; -- skipping because c3 already exists
+NOTICE: column "c2" of relation "test_add_column" already exists, skipping
+NOTICE: column "c3" of relation "test_add_column" already exists, skipping
+\d test_add_column
+ Table "public.test_add_column"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ c1 | integer | | |
+ c2 | integer | | |
+ c3 | integer | | not null |
+Indexes:
+ "test_add_column_pkey" PRIMARY KEY, btree (c3)
+
+ALTER TABLE test_add_column
+ ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists
+ ADD COLUMN IF NOT EXISTS c3 integer, -- skipping because c3 already exists
+ ADD COLUMN c4 integer REFERENCES test_add_column;
+NOTICE: column "c2" of relation "test_add_column" already exists, skipping
+NOTICE: column "c3" of relation "test_add_column" already exists, skipping
+\d test_add_column
+ Table "public.test_add_column"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ c1 | integer | | |
+ c2 | integer | | |
+ c3 | integer | | not null |
+ c4 | integer | | |
+Indexes:
+ "test_add_column_pkey" PRIMARY KEY, btree (c3)
+Foreign-key constraints:
+ "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3)
+Referenced by:
+ TABLE "test_add_column" CONSTRAINT "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3)
+
+ALTER TABLE test_add_column
+ ADD COLUMN IF NOT EXISTS c4 integer REFERENCES test_add_column;
+NOTICE: column "c4" of relation "test_add_column" already exists, skipping
+\d test_add_column
+ Table "public.test_add_column"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ c1 | integer | | |
+ c2 | integer | | |
+ c3 | integer | | not null |
+ c4 | integer | | |
+Indexes:
+ "test_add_column_pkey" PRIMARY KEY, btree (c3)
+Foreign-key constraints:
+ "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3)
+Referenced by:
+ TABLE "test_add_column" CONSTRAINT "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3)
+
+ALTER TABLE test_add_column
+ ADD COLUMN IF NOT EXISTS c5 SERIAL CHECK (c5 > 8);
+\d test_add_column
+ Table "public.test_add_column"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------------------------------------------
+ c1 | integer | | |
+ c2 | integer | | |
+ c3 | integer | | not null |
+ c4 | integer | | |
+ c5 | integer | | not null | nextval('test_add_column_c5_seq'::regclass)
+Indexes:
+ "test_add_column_pkey" PRIMARY KEY, btree (c3)
+Check constraints:
+ "test_add_column_c5_check" CHECK (c5 > 8)
+Foreign-key constraints:
+ "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3)
+Referenced by:
+ TABLE "test_add_column" CONSTRAINT "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3)
+
+ALTER TABLE test_add_column
+ ADD COLUMN IF NOT EXISTS c5 SERIAL CHECK (c5 > 10);
+NOTICE: column "c5" of relation "test_add_column" already exists, skipping
+\d test_add_column*
+ Table "public.test_add_column"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------------------------------------------
+ c1 | integer | | |
+ c2 | integer | | |
+ c3 | integer | | not null |
+ c4 | integer | | |
+ c5 | integer | | not null | nextval('test_add_column_c5_seq'::regclass)
+Indexes:
+ "test_add_column_pkey" PRIMARY KEY, btree (c3)
+Check constraints:
+ "test_add_column_c5_check" CHECK (c5 > 8)
+Foreign-key constraints:
+ "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3)
+Referenced by:
+ TABLE "test_add_column" CONSTRAINT "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3)
+
+ Sequence "public.test_add_column_c5_seq"
+ Type | Start | Minimum | Maximum | Increment | Cycles? | Cache
+---------+-------+---------+------------+-----------+---------+-------
+ integer | 1 | 1 | 2147483647 | 1 | no | 1
+Owned by: public.test_add_column.c5
+
+ Index "public.test_add_column_pkey"
+ Column | Type | Key? | Definition
+--------+---------+------+------------
+ c3 | integer | yes | c3
+primary key, btree, for table "public.test_add_column"
+
+DROP TABLE test_add_column;
+\d test_add_column*
+-- assorted cases with multiple ALTER TABLE steps
+CREATE TABLE ataddindex(f1 INT);
+INSERT INTO ataddindex VALUES (42), (43);
+CREATE UNIQUE INDEX ataddindexi0 ON ataddindex(f1);
+ALTER TABLE ataddindex
+ ADD PRIMARY KEY USING INDEX ataddindexi0,
+ ALTER f1 TYPE BIGINT;
+\d ataddindex
+ Table "public.ataddindex"
+ Column | Type | Collation | Nullable | Default
+--------+--------+-----------+----------+---------
+ f1 | bigint | | not null |
+Indexes:
+ "ataddindexi0" PRIMARY KEY, btree (f1)
+
+DROP TABLE ataddindex;
+CREATE TABLE ataddindex(f1 VARCHAR(10));
+INSERT INTO ataddindex(f1) VALUES ('foo'), ('a');
+ALTER TABLE ataddindex
+ ALTER f1 SET DATA TYPE TEXT,
+ ADD EXCLUDE ((f1 LIKE 'a') WITH =);
+\d ataddindex
+ Table "public.ataddindex"
+ Column | Type | Collation | Nullable | Default
+--------+------+-----------+----------+---------
+ f1 | text | | |
+Indexes:
+ "ataddindex_expr_excl" EXCLUDE USING btree ((f1 ~~ 'a'::text) WITH =)
+
+DROP TABLE ataddindex;
+CREATE TABLE ataddindex(id int, ref_id int);
+ALTER TABLE ataddindex
+ ADD PRIMARY KEY (id),
+ ADD FOREIGN KEY (ref_id) REFERENCES ataddindex;
+\d ataddindex
+ Table "public.ataddindex"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ id | integer | | not null |
+ ref_id | integer | | |
+Indexes:
+ "ataddindex_pkey" PRIMARY KEY, btree (id)
+Foreign-key constraints:
+ "ataddindex_ref_id_fkey" FOREIGN KEY (ref_id) REFERENCES ataddindex(id)
+Referenced by:
+ TABLE "ataddindex" CONSTRAINT "ataddindex_ref_id_fkey" FOREIGN KEY (ref_id) REFERENCES ataddindex(id)
+
+DROP TABLE ataddindex;
+CREATE TABLE ataddindex(id int, ref_id int);
+ALTER TABLE ataddindex
+ ADD UNIQUE (id),
+ ADD FOREIGN KEY (ref_id) REFERENCES ataddindex (id);
+\d ataddindex
+ Table "public.ataddindex"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ id | integer | | |
+ ref_id | integer | | |
+Indexes:
+ "ataddindex_id_key" UNIQUE CONSTRAINT, btree (id)
+Foreign-key constraints:
+ "ataddindex_ref_id_fkey" FOREIGN KEY (ref_id) REFERENCES ataddindex(id)
+Referenced by:
+ TABLE "ataddindex" CONSTRAINT "ataddindex_ref_id_fkey" FOREIGN KEY (ref_id) REFERENCES ataddindex(id)
+
+DROP TABLE ataddindex;
+-- unsupported constraint types for partitioned tables
+CREATE TABLE partitioned (
+ a int,
+ b int
+) PARTITION BY RANGE (a, (a+b+1));
+ALTER TABLE partitioned ADD EXCLUDE USING gist (a WITH &&);
+ERROR: exclusion constraints are not supported on partitioned tables
+LINE 1: ALTER TABLE partitioned ADD EXCLUDE USING gist (a WITH &&);
+ ^
+-- cannot drop column that is part of the partition key
+ALTER TABLE partitioned DROP COLUMN a;
+ERROR: cannot drop column "a" because it is part of the partition key of relation "partitioned"
+ALTER TABLE partitioned ALTER COLUMN a TYPE char(5);
+ERROR: cannot alter column "a" because it is part of the partition key of relation "partitioned"
+ALTER TABLE partitioned DROP COLUMN b;
+ERROR: cannot drop column "b" because it is part of the partition key of relation "partitioned"
+ALTER TABLE partitioned ALTER COLUMN b TYPE char(5);
+ERROR: cannot alter column "b" because it is part of the partition key of relation "partitioned"
+-- partitioned table cannot participate in regular inheritance
+CREATE TABLE nonpartitioned (
+ a int,
+ b int
+);
+ALTER TABLE partitioned INHERIT nonpartitioned;
+ERROR: cannot change inheritance of partitioned table
+ALTER TABLE nonpartitioned INHERIT partitioned;
+ERROR: cannot inherit from partitioned table "partitioned"
+-- cannot add NO INHERIT constraint to partitioned tables
+ALTER TABLE partitioned ADD CONSTRAINT chk_a CHECK (a > 0) NO INHERIT;
+ERROR: cannot add NO INHERIT constraint to partitioned table "partitioned"
+DROP TABLE partitioned, nonpartitioned;
+--
+-- ATTACH PARTITION
+--
+-- check that target table is partitioned
+CREATE TABLE unparted (
+ a int
+);
+CREATE TABLE fail_part (like unparted);
+ALTER TABLE unparted ATTACH PARTITION fail_part FOR VALUES IN ('a');
+ERROR: table "unparted" is not partitioned
+DROP TABLE unparted, fail_part;
+-- check that partition bound is compatible
+CREATE TABLE list_parted (
+ a int NOT NULL,
+ b char(2) COLLATE "C",
+ CONSTRAINT check_a CHECK (a > 0)
+) PARTITION BY LIST (a);
+CREATE TABLE fail_part (LIKE list_parted);
+ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES FROM (1) TO (10);
+ERROR: invalid bound specification for a list partition
+LINE 1: ...list_parted ATTACH PARTITION fail_part FOR VALUES FROM (1) T...
+ ^
+DROP TABLE fail_part;
+-- check that the table being attached exists
+ALTER TABLE list_parted ATTACH PARTITION nonexistent FOR VALUES IN (1);
+ERROR: relation "nonexistent" does not exist
+-- check ownership of the source table
+CREATE ROLE regress_test_me;
+CREATE ROLE regress_test_not_me;
+CREATE TABLE not_owned_by_me (LIKE list_parted);
+ALTER TABLE not_owned_by_me OWNER TO regress_test_not_me;
+SET SESSION AUTHORIZATION regress_test_me;
+CREATE TABLE owned_by_me (
+ a int
+) PARTITION BY LIST (a);
+ALTER TABLE owned_by_me ATTACH PARTITION not_owned_by_me FOR VALUES IN (1);
+ERROR: must be owner of table not_owned_by_me
+RESET SESSION AUTHORIZATION;
+DROP TABLE owned_by_me, not_owned_by_me;
+DROP ROLE regress_test_not_me;
+DROP ROLE regress_test_me;
+-- check that the table being attached is not part of regular inheritance
+CREATE TABLE parent (LIKE list_parted);
+CREATE TABLE child () INHERITS (parent);
+ALTER TABLE list_parted ATTACH PARTITION child FOR VALUES IN (1);
+ERROR: cannot attach inheritance child as partition
+ALTER TABLE list_parted ATTACH PARTITION parent FOR VALUES IN (1);
+ERROR: cannot attach inheritance parent as partition
+DROP TABLE parent CASCADE;
+NOTICE: drop cascades to table child
+-- check any TEMP-ness
+CREATE TEMP TABLE temp_parted (a int) PARTITION BY LIST (a);
+CREATE TABLE perm_part (a int);
+ALTER TABLE temp_parted ATTACH PARTITION perm_part FOR VALUES IN (1);
+ERROR: cannot attach a permanent relation as partition of temporary relation "temp_parted"
+DROP TABLE temp_parted, perm_part;
+-- check that the table being attached is not a typed table
+CREATE TYPE mytype AS (a int);
+CREATE TABLE fail_part OF mytype;
+ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
+ERROR: cannot attach a typed table as partition
+DROP TYPE mytype CASCADE;
+NOTICE: drop cascades to table fail_part
+-- check that the table being attached has only columns present in the parent
+CREATE TABLE fail_part (like list_parted, c int);
+ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
+ERROR: table "fail_part" contains column "c" not found in parent "list_parted"
+DETAIL: The new partition may contain only the columns present in parent.
+DROP TABLE fail_part;
+-- check that the table being attached has every column of the parent
+CREATE TABLE fail_part (a int NOT NULL);
+ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
+ERROR: child table is missing column "b"
+DROP TABLE fail_part;
+-- check that columns match in type, collation and NOT NULL status
+CREATE TABLE fail_part (
+ b char(3),
+ a int NOT NULL
+);
+ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
+ERROR: child table "fail_part" has different type for column "b"
+ALTER TABLE fail_part ALTER b TYPE char (2) COLLATE "POSIX";
+ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
+ERROR: child table "fail_part" has different collation for column "b"
+DROP TABLE fail_part;
+-- check that the table being attached has all constraints of the parent
+CREATE TABLE fail_part (
+ b char(2) COLLATE "C",
+ a int NOT NULL
+);
+ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
+ERROR: child table is missing constraint "check_a"
+-- check that the constraint matches in definition with parent's constraint
+ALTER TABLE fail_part ADD CONSTRAINT check_a CHECK (a >= 0);
+ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
+ERROR: child table "fail_part" has different definition for check constraint "check_a"
+DROP TABLE fail_part;
+-- check the attributes and constraints after partition is attached
+CREATE TABLE part_1 (
+ a int NOT NULL,
+ b char(2) COLLATE "C",
+ CONSTRAINT check_a CHECK (a > 0)
+);
+ALTER TABLE list_parted ATTACH PARTITION part_1 FOR VALUES IN (1);
+-- attislocal and conislocal are always false for merged attributes and constraints respectively.
+SELECT attislocal, attinhcount FROM pg_attribute WHERE attrelid = 'part_1'::regclass AND attnum > 0;
+ attislocal | attinhcount
+------------+-------------
+ f | 1
+ f | 1
+(2 rows)
+
+SELECT conislocal, coninhcount FROM pg_constraint WHERE conrelid = 'part_1'::regclass AND conname = 'check_a';
+ conislocal | coninhcount
+------------+-------------
+ f | 1
+(1 row)
+
+-- check that the new partition won't overlap with an existing partition
+CREATE TABLE fail_part (LIKE part_1 INCLUDING CONSTRAINTS);
+ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
+ERROR: partition "fail_part" would overlap partition "part_1"
+LINE 1: ...LE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
+ ^
+DROP TABLE fail_part;
+-- check that an existing table can be attached as a default partition
+CREATE TABLE def_part (LIKE list_parted INCLUDING CONSTRAINTS);
+ALTER TABLE list_parted ATTACH PARTITION def_part DEFAULT;
+-- check attaching default partition fails if a default partition already
+-- exists
+CREATE TABLE fail_def_part (LIKE part_1 INCLUDING CONSTRAINTS);
+ALTER TABLE list_parted ATTACH PARTITION fail_def_part DEFAULT;
+ERROR: partition "fail_def_part" conflicts with existing default partition "def_part"
+LINE 1: ...ER TABLE list_parted ATTACH PARTITION fail_def_part DEFAULT;
+ ^
+-- check validation when attaching list partitions
+CREATE TABLE list_parted2 (
+ a int,
+ b char
+) PARTITION BY LIST (a);
+-- check that violating rows are correctly reported
+CREATE TABLE part_2 (LIKE list_parted2);
+INSERT INTO part_2 VALUES (3, 'a');
+ALTER TABLE list_parted2 ATTACH PARTITION part_2 FOR VALUES IN (2);
+ERROR: partition constraint of relation "part_2" is violated by some row
+-- should be ok after deleting the bad row
+DELETE FROM part_2;
+ALTER TABLE list_parted2 ATTACH PARTITION part_2 FOR VALUES IN (2);
+-- check partition cannot be attached if default has some row for its values
+CREATE TABLE list_parted2_def PARTITION OF list_parted2 DEFAULT;
+INSERT INTO list_parted2_def VALUES (11, 'z');
+CREATE TABLE part_3 (LIKE list_parted2);
+ALTER TABLE list_parted2 ATTACH PARTITION part_3 FOR VALUES IN (11);
+ERROR: updated partition constraint for default partition "list_parted2_def" would be violated by some row
+-- should be ok after deleting the bad row
+DELETE FROM list_parted2_def WHERE a = 11;
+ALTER TABLE list_parted2 ATTACH PARTITION part_3 FOR VALUES IN (11);
+-- adding constraints that describe the desired partition constraint
+-- (or more restrictive) will help skip the validation scan
+CREATE TABLE part_3_4 (
+ LIKE list_parted2,
+ CONSTRAINT check_a CHECK (a IN (3))
+);
+-- however, if a list partition does not accept nulls, there should be
+-- an explicit NOT NULL constraint on the partition key column for the
+-- validation scan to be skipped;
+ALTER TABLE list_parted2 ATTACH PARTITION part_3_4 FOR VALUES IN (3, 4);
+-- adding a NOT NULL constraint will cause the scan to be skipped
+ALTER TABLE list_parted2 DETACH PARTITION part_3_4;
+ALTER TABLE part_3_4 ALTER a SET NOT NULL;
+ALTER TABLE list_parted2 ATTACH PARTITION part_3_4 FOR VALUES IN (3, 4);
+-- check if default partition scan skipped
+ALTER TABLE list_parted2_def ADD CONSTRAINT check_a CHECK (a IN (5, 6));
+CREATE TABLE part_55_66 PARTITION OF list_parted2 FOR VALUES IN (55, 66);
+-- check validation when attaching range partitions
+CREATE TABLE range_parted (
+ a int,
+ b int
+) PARTITION BY RANGE (a, b);
+-- check that violating rows are correctly reported
+CREATE TABLE part1 (
+ a int NOT NULL CHECK (a = 1),
+ b int NOT NULL CHECK (b >= 1 AND b <= 10)
+);
+INSERT INTO part1 VALUES (1, 10);
+-- Remember the TO bound is exclusive
+ALTER TABLE range_parted ATTACH PARTITION part1 FOR VALUES FROM (1, 1) TO (1, 10);
+ERROR: partition constraint of relation "part1" is violated by some row
+-- should be ok after deleting the bad row
+DELETE FROM part1;
+ALTER TABLE range_parted ATTACH PARTITION part1 FOR VALUES FROM (1, 1) TO (1, 10);
+-- adding constraints that describe the desired partition constraint
+-- (or more restrictive) will help skip the validation scan
+CREATE TABLE part2 (
+ a int NOT NULL CHECK (a = 1),
+ b int NOT NULL CHECK (b >= 10 AND b < 18)
+);
+ALTER TABLE range_parted ATTACH PARTITION part2 FOR VALUES FROM (1, 10) TO (1, 20);
+-- Create default partition
+CREATE TABLE partr_def1 PARTITION OF range_parted DEFAULT;
+-- Only one default partition is allowed, hence, following should give error
+CREATE TABLE partr_def2 (LIKE part1 INCLUDING CONSTRAINTS);
+ALTER TABLE range_parted ATTACH PARTITION partr_def2 DEFAULT;
+ERROR: partition "partr_def2" conflicts with existing default partition "partr_def1"
+LINE 1: ...LTER TABLE range_parted ATTACH PARTITION partr_def2 DEFAULT;
+ ^
+-- Overlapping partitions cannot be attached, hence, following should give error
+INSERT INTO partr_def1 VALUES (2, 10);
+CREATE TABLE part3 (LIKE range_parted);
+ALTER TABLE range_parted ATTACH partition part3 FOR VALUES FROM (2, 10) TO (2, 20);
+ERROR: updated partition constraint for default partition "partr_def1" would be violated by some row
+-- Attaching partitions should be successful when there are no overlapping rows
+ALTER TABLE range_parted ATTACH partition part3 FOR VALUES FROM (3, 10) TO (3, 20);
+-- check that leaf partitions are scanned when attaching a partitioned
+-- table
+CREATE TABLE part_5 (
+ LIKE list_parted2
+) PARTITION BY LIST (b);
+-- check that violating rows are correctly reported
+CREATE TABLE part_5_a PARTITION OF part_5 FOR VALUES IN ('a');
+INSERT INTO part_5_a (a, b) VALUES (6, 'a');
+ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);
+ERROR: partition constraint of relation "part_5_a" is violated by some row
+-- delete the faulting row and also add a constraint to skip the scan
+DELETE FROM part_5_a WHERE a NOT IN (3);
+ALTER TABLE part_5 ADD CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 5);
+ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);
+ALTER TABLE list_parted2 DETACH PARTITION part_5;
+ALTER TABLE part_5 DROP CONSTRAINT check_a;
+-- scan should again be skipped, even though NOT NULL is now a column property
+ALTER TABLE part_5 ADD CONSTRAINT check_a CHECK (a IN (5)), ALTER a SET NOT NULL;
+ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);
+-- Check the case where attnos of the partitioning columns in the table being
+-- attached differs from the parent. It should not affect the constraint-
+-- checking logic that allows to skip the scan.
+CREATE TABLE part_6 (
+ c int,
+ LIKE list_parted2,
+ CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 6)
+);
+ALTER TABLE part_6 DROP c;
+ALTER TABLE list_parted2 ATTACH PARTITION part_6 FOR VALUES IN (6);
+-- Similar to above, but the table being attached is a partitioned table
+-- whose partition has still different attnos for the root partitioning
+-- columns.
+CREATE TABLE part_7 (
+ LIKE list_parted2,
+ CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 7)
+) PARTITION BY LIST (b);
+CREATE TABLE part_7_a_null (
+ c int,
+ d int,
+ e int,
+ LIKE list_parted2, -- 'a' will have attnum = 4
+ CONSTRAINT check_b CHECK (b IS NULL OR b = 'a'),
+ CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 7)
+);
+ALTER TABLE part_7_a_null DROP c, DROP d, DROP e;
+ALTER TABLE part_7 ATTACH PARTITION part_7_a_null FOR VALUES IN ('a', null);
+ALTER TABLE list_parted2 ATTACH PARTITION part_7 FOR VALUES IN (7);
+-- Same example, but check this time that the constraint correctly detects
+-- violating rows
+ALTER TABLE list_parted2 DETACH PARTITION part_7;
+ALTER TABLE part_7 DROP CONSTRAINT check_a; -- thusly, scan won't be skipped
+INSERT INTO part_7 (a, b) VALUES (8, null), (9, 'a');
+SELECT tableoid::regclass, a, b FROM part_7 order by a;
+ tableoid | a | b
+---------------+---+---
+ part_7_a_null | 8 |
+ part_7_a_null | 9 | a
+(2 rows)
+
+ALTER TABLE list_parted2 ATTACH PARTITION part_7 FOR VALUES IN (7);
+ERROR: partition constraint of relation "part_7_a_null" is violated by some row
+-- check that leaf partitions of default partition are scanned when
+-- attaching a partitioned table.
+ALTER TABLE part_5 DROP CONSTRAINT check_a;
+CREATE TABLE part5_def PARTITION OF part_5 DEFAULT PARTITION BY LIST(a);
+CREATE TABLE part5_def_p1 PARTITION OF part5_def FOR VALUES IN (5);
+INSERT INTO part5_def_p1 VALUES (5, 'y');
+CREATE TABLE part5_p1 (LIKE part_5);
+ALTER TABLE part_5 ATTACH PARTITION part5_p1 FOR VALUES IN ('y');
+ERROR: updated partition constraint for default partition "part5_def_p1" would be violated by some row
+-- should be ok after deleting the bad row
+DELETE FROM part5_def_p1 WHERE b = 'y';
+ALTER TABLE part_5 ATTACH PARTITION part5_p1 FOR VALUES IN ('y');
+-- check that the table being attached is not already a partition
+ALTER TABLE list_parted2 ATTACH PARTITION part_2 FOR VALUES IN (2);
+ERROR: "part_2" is already a partition
+-- check that circular inheritance is not allowed
+ALTER TABLE part_5 ATTACH PARTITION list_parted2 FOR VALUES IN ('b');
+ERROR: circular inheritance not allowed
+DETAIL: "part_5" is already a child of "list_parted2".
+ALTER TABLE list_parted2 ATTACH PARTITION list_parted2 FOR VALUES IN (0);
+ERROR: circular inheritance not allowed
+DETAIL: "list_parted2" is already a child of "list_parted2".
+-- If a partitioned table being created or an existing table being attached
+-- as a partition does not have a constraint that would allow validation scan
+-- to be skipped, but an individual partition does, then the partition's
+-- validation scan is skipped.
+CREATE TABLE quuux (a int, b text) PARTITION BY LIST (a);
+CREATE TABLE quuux_default PARTITION OF quuux DEFAULT PARTITION BY LIST (b);
+CREATE TABLE quuux_default1 PARTITION OF quuux_default (
+ CONSTRAINT check_1 CHECK (a IS NOT NULL AND a = 1)
+) FOR VALUES IN ('b');
+CREATE TABLE quuux1 (a int, b text);
+ALTER TABLE quuux ATTACH PARTITION quuux1 FOR VALUES IN (1); -- validate!
+CREATE TABLE quuux2 (a int, b text);
+ALTER TABLE quuux ATTACH PARTITION quuux2 FOR VALUES IN (2); -- skip validation
+DROP TABLE quuux1, quuux2;
+-- should validate for quuux1, but not for quuux2
+CREATE TABLE quuux1 PARTITION OF quuux FOR VALUES IN (1);
+CREATE TABLE quuux2 PARTITION OF quuux FOR VALUES IN (2);
+DROP TABLE quuux;
+-- check validation when attaching hash partitions
+-- Use hand-rolled hash functions and operator class to get predictable result
+-- on different machines. part_test_int4_ops is defined in insert.sql.
+-- check that the new partition won't overlap with an existing partition
+CREATE TABLE hash_parted (
+ a int,
+ b int
+) PARTITION BY HASH (a part_test_int4_ops);
+CREATE TABLE hpart_1 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 4, REMAINDER 0);
+CREATE TABLE fail_part (LIKE hpart_1);
+ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 4);
+ERROR: partition "fail_part" would overlap partition "hpart_1"
+LINE 1: ...hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODU...
+ ^
+ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 0);
+ERROR: partition "fail_part" would overlap partition "hpart_1"
+LINE 1: ...hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODU...
+ ^
+DROP TABLE fail_part;
+-- check validation when attaching hash partitions
+-- check that violating rows are correctly reported
+CREATE TABLE hpart_2 (LIKE hash_parted);
+INSERT INTO hpart_2 VALUES (3, 0);
+ALTER TABLE hash_parted ATTACH PARTITION hpart_2 FOR VALUES WITH (MODULUS 4, REMAINDER 1);
+ERROR: partition constraint of relation "hpart_2" is violated by some row
+-- should be ok after deleting the bad row
+DELETE FROM hpart_2;
+ALTER TABLE hash_parted ATTACH PARTITION hpart_2 FOR VALUES WITH (MODULUS 4, REMAINDER 1);
+-- check that leaf partitions are scanned when attaching a partitioned
+-- table
+CREATE TABLE hpart_5 (
+ LIKE hash_parted
+) PARTITION BY LIST (b);
+-- check that violating rows are correctly reported
+CREATE TABLE hpart_5_a PARTITION OF hpart_5 FOR VALUES IN ('1', '2', '3');
+INSERT INTO hpart_5_a (a, b) VALUES (7, 1);
+ALTER TABLE hash_parted ATTACH PARTITION hpart_5 FOR VALUES WITH (MODULUS 4, REMAINDER 2);
+ERROR: partition constraint of relation "hpart_5_a" is violated by some row
+-- should be ok after deleting the bad row
+DELETE FROM hpart_5_a;
+ALTER TABLE hash_parted ATTACH PARTITION hpart_5 FOR VALUES WITH (MODULUS 4, REMAINDER 2);
+-- check that the table being attach is with valid modulus and remainder value
+CREATE TABLE fail_part(LIKE hash_parted);
+ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 0, REMAINDER 1);
+ERROR: modulus for hash partition must be an integer value greater than zero
+ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 8);
+ERROR: remainder for hash partition must be less than modulus
+ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 3, REMAINDER 2);
+ERROR: every hash partition modulus must be a factor of the next larger modulus
+DETAIL: The new modulus 3 is not a factor of 4, the modulus of existing partition "hpart_1".
+DROP TABLE fail_part;
+-- fails with incorrect object type
+CREATE VIEW at_v1 AS SELECT 1 as a;
+ALTER TABLE at_v1 ATTACH PARTITION dummy default;
+ERROR: "at_v1" is not a table or partitioned index
+DROP VIEW at_v1;
+--
+-- DETACH PARTITION
+--
+-- check that the table is partitioned at all
+CREATE TABLE regular_table (a int);
+ALTER TABLE regular_table DETACH PARTITION any_name;
+ERROR: table "regular_table" is not partitioned
+DROP TABLE regular_table;
+-- check that the partition being detached exists at all
+ALTER TABLE list_parted2 DETACH PARTITION part_4;
+ERROR: relation "part_4" does not exist
+ALTER TABLE hash_parted DETACH PARTITION hpart_4;
+ERROR: relation "hpart_4" does not exist
+-- check that the partition being detached is actually a partition of the parent
+CREATE TABLE not_a_part (a int);
+ALTER TABLE list_parted2 DETACH PARTITION not_a_part;
+ERROR: relation "not_a_part" is not a partition of relation "list_parted2"
+ALTER TABLE list_parted2 DETACH PARTITION part_1;
+ERROR: relation "part_1" is not a partition of relation "list_parted2"
+ALTER TABLE hash_parted DETACH PARTITION not_a_part;
+ERROR: relation "not_a_part" is not a partition of relation "hash_parted"
+DROP TABLE not_a_part;
+-- check that, after being detached, attinhcount/coninhcount is dropped to 0 and
+-- attislocal/conislocal is set to true
+ALTER TABLE list_parted2 DETACH PARTITION part_3_4;
+SELECT attinhcount, attislocal FROM pg_attribute WHERE attrelid = 'part_3_4'::regclass AND attnum > 0;
+ attinhcount | attislocal
+-------------+------------
+ 0 | t
+ 0 | t
+(2 rows)
+
+SELECT coninhcount, conislocal FROM pg_constraint WHERE conrelid = 'part_3_4'::regclass AND conname = 'check_a';
+ coninhcount | conislocal
+-------------+------------
+ 0 | t
+(1 row)
+
+DROP TABLE part_3_4;
+-- check that a detached partition is not dropped on dropping a partitioned table
+CREATE TABLE range_parted2 (
+ a int
+) PARTITION BY RANGE(a);
+CREATE TABLE part_rp PARTITION OF range_parted2 FOR VALUES FROM (0) to (100);
+ALTER TABLE range_parted2 DETACH PARTITION part_rp;
+DROP TABLE range_parted2;
+SELECT * from part_rp;
+ a
+---
+(0 rows)
+
+DROP TABLE part_rp;
+-- concurrent detach
+CREATE TABLE range_parted2 (
+ a int
+) PARTITION BY RANGE(a);
+CREATE TABLE part_rp PARTITION OF range_parted2 FOR VALUES FROM (0) to (100);
+BEGIN;
+-- doesn't work in a partition block
+ALTER TABLE range_parted2 DETACH PARTITION part_rp CONCURRENTLY;
+ERROR: ALTER TABLE ... DETACH CONCURRENTLY cannot run inside a transaction block
+COMMIT;
+CREATE TABLE part_rpd PARTITION OF range_parted2 DEFAULT;
+-- doesn't work if there's a default partition
+ALTER TABLE range_parted2 DETACH PARTITION part_rp CONCURRENTLY;
+ERROR: cannot detach partitions concurrently when a default partition exists
+-- doesn't work for the default partition
+ALTER TABLE range_parted2 DETACH PARTITION part_rpd CONCURRENTLY;
+ERROR: cannot detach partitions concurrently when a default partition exists
+DROP TABLE part_rpd;
+-- works fine
+ALTER TABLE range_parted2 DETACH PARTITION part_rp CONCURRENTLY;
+\d+ range_parted2
+ Partitioned table "public.range_parted2"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+---------+--------------+-------------
+ a | integer | | | | plain | |
+Partition key: RANGE (a)
+Number of partitions: 0
+
+-- constraint should be created
+\d part_rp
+ Table "public.part_rp"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+Check constraints:
+ "part_rp_a_check" CHECK (a IS NOT NULL AND a >= 0 AND a < 100)
+
+CREATE TABLE part_rp100 PARTITION OF range_parted2 (CHECK (a>=123 AND a<133 AND a IS NOT NULL)) FOR VALUES FROM (100) to (200);
+ALTER TABLE range_parted2 DETACH PARTITION part_rp100 CONCURRENTLY;
+-- redundant constraint should not be created
+\d part_rp100
+ Table "public.part_rp100"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+Check constraints:
+ "part_rp100_a_check" CHECK (a >= 123 AND a < 133 AND a IS NOT NULL)
+
+DROP TABLE range_parted2;
+-- Check ALTER TABLE commands for partitioned tables and partitions
+-- cannot add/drop column to/from *only* the parent
+ALTER TABLE ONLY list_parted2 ADD COLUMN c int;
+ERROR: column must be added to child tables too
+ALTER TABLE ONLY list_parted2 DROP COLUMN b;
+ERROR: cannot drop column from only the partitioned table when partitions exist
+HINT: Do not specify the ONLY keyword.
+-- cannot add a column to partition or drop an inherited one
+ALTER TABLE part_2 ADD COLUMN c text;
+ERROR: cannot add column to a partition
+ALTER TABLE part_2 DROP COLUMN b;
+ERROR: cannot drop inherited column "b"
+-- Nor rename, alter type
+ALTER TABLE part_2 RENAME COLUMN b to c;
+ERROR: cannot rename inherited column "b"
+ALTER TABLE part_2 ALTER COLUMN b TYPE text;
+ERROR: cannot alter inherited column "b"
+-- cannot add/drop NOT NULL or check constraints to *only* the parent, when
+-- partitions exist
+ALTER TABLE ONLY list_parted2 ALTER b SET NOT NULL;
+ERROR: constraint must be added to child tables too
+DETAIL: Column "b" of relation "part_2" is not already NOT NULL.
+HINT: Do not specify the ONLY keyword.
+ALTER TABLE ONLY list_parted2 ADD CONSTRAINT check_b CHECK (b <> 'zz');
+ERROR: constraint must be added to child tables too
+ALTER TABLE list_parted2 ALTER b SET NOT NULL;
+ALTER TABLE ONLY list_parted2 ALTER b DROP NOT NULL;
+ERROR: cannot remove constraint from only the partitioned table when partitions exist
+HINT: Do not specify the ONLY keyword.
+ALTER TABLE list_parted2 ADD CONSTRAINT check_b CHECK (b <> 'zz');
+ALTER TABLE ONLY list_parted2 DROP CONSTRAINT check_b;
+ERROR: cannot remove constraint from only the partitioned table when partitions exist
+HINT: Do not specify the ONLY keyword.
+-- It's alright though, if no partitions are yet created
+CREATE TABLE parted_no_parts (a int) PARTITION BY LIST (a);
+ALTER TABLE ONLY parted_no_parts ALTER a SET NOT NULL;
+ALTER TABLE ONLY parted_no_parts ADD CONSTRAINT check_a CHECK (a > 0);
+ALTER TABLE ONLY parted_no_parts ALTER a DROP NOT NULL;
+ALTER TABLE ONLY parted_no_parts DROP CONSTRAINT check_a;
+DROP TABLE parted_no_parts;
+-- cannot drop inherited NOT NULL or check constraints from partition
+ALTER TABLE list_parted2 ALTER b SET NOT NULL, ADD CONSTRAINT check_a2 CHECK (a > 0);
+ALTER TABLE part_2 ALTER b DROP NOT NULL;
+ERROR: column "b" is marked NOT NULL in parent table
+ALTER TABLE part_2 DROP CONSTRAINT check_a2;
+ERROR: cannot drop inherited constraint "check_a2" of relation "part_2"
+-- Doesn't make sense to add NO INHERIT constraints on partitioned tables
+ALTER TABLE list_parted2 add constraint check_b2 check (b <> 'zz') NO INHERIT;
+ERROR: cannot add NO INHERIT constraint to partitioned table "list_parted2"
+-- check that a partition cannot participate in regular inheritance
+CREATE TABLE inh_test () INHERITS (part_2);
+ERROR: cannot inherit from partition "part_2"
+CREATE TABLE inh_test (LIKE part_2);
+ALTER TABLE inh_test INHERIT part_2;
+ERROR: cannot inherit from a partition
+ALTER TABLE part_2 INHERIT inh_test;
+ERROR: cannot change inheritance of a partition
+-- cannot drop or alter type of partition key columns of lower level
+-- partitioned tables; for example, part_5, which is list_parted2's
+-- partition, is partitioned on b;
+ALTER TABLE list_parted2 DROP COLUMN b;
+ERROR: cannot drop column "b" because it is part of the partition key of relation "part_5"
+ALTER TABLE list_parted2 ALTER COLUMN b TYPE text;
+ERROR: cannot alter column "b" because it is part of the partition key of relation "part_5"
+-- dropping non-partition key columns should be allowed on the parent table.
+ALTER TABLE list_parted DROP COLUMN b;
+SELECT * FROM list_parted;
+ a
+---
+(0 rows)
+
+-- cleanup
+DROP TABLE list_parted, list_parted2, range_parted;
+DROP TABLE fail_def_part;
+DROP TABLE hash_parted;
+-- more tests for certain multi-level partitioning scenarios
+create table p (a int, b int) partition by range (a, b);
+create table p1 (b int, a int not null) partition by range (b);
+create table p11 (like p1);
+alter table p11 drop a;
+alter table p11 add a int;
+alter table p11 drop a;
+alter table p11 add a int not null;
+-- attnum for key attribute 'a' is different in p, p1, and p11
+select attrelid::regclass, attname, attnum
+from pg_attribute
+where attname = 'a'
+ and (attrelid = 'p'::regclass
+ or attrelid = 'p1'::regclass
+ or attrelid = 'p11'::regclass)
+order by attrelid::regclass::text;
+ attrelid | attname | attnum
+----------+---------+--------
+ p | a | 1
+ p1 | a | 2
+ p11 | a | 4
+(3 rows)
+
+alter table p1 attach partition p11 for values from (2) to (5);
+insert into p1 (a, b) values (2, 3);
+-- check that partition validation scan correctly detects violating rows
+alter table p attach partition p1 for values from (1, 2) to (1, 10);
+ERROR: partition constraint of relation "p11" is violated by some row
+-- cleanup
+drop table p;
+drop table p1;
+-- validate constraint on partitioned tables should only scan leaf partitions
+create table parted_validate_test (a int) partition by list (a);
+create table parted_validate_test_1 partition of parted_validate_test for values in (0, 1);
+alter table parted_validate_test add constraint parted_validate_test_chka check (a > 0) not valid;
+alter table parted_validate_test validate constraint parted_validate_test_chka;
+drop table parted_validate_test;
+-- test alter column options
+CREATE TABLE attmp(i integer);
+INSERT INTO attmp VALUES (1);
+ALTER TABLE attmp ALTER COLUMN i SET (n_distinct = 1, n_distinct_inherited = 2);
+ALTER TABLE attmp ALTER COLUMN i RESET (n_distinct_inherited);
+ANALYZE attmp;
+DROP TABLE attmp;
+DROP USER regress_alter_table_user1;
+-- check that violating rows are correctly reported when attaching as the
+-- default partition
+create table defpart_attach_test (a int) partition by list (a);
+create table defpart_attach_test1 partition of defpart_attach_test for values in (1);
+create table defpart_attach_test_d (b int, a int);
+alter table defpart_attach_test_d drop b;
+insert into defpart_attach_test_d values (1), (2);
+-- error because its constraint as the default partition would be violated
+-- by the row containing 1
+alter table defpart_attach_test attach partition defpart_attach_test_d default;
+ERROR: partition constraint of relation "defpart_attach_test_d" is violated by some row
+delete from defpart_attach_test_d where a = 1;
+alter table defpart_attach_test_d add check (a > 1);
+-- should be attached successfully and without needing to be scanned
+alter table defpart_attach_test attach partition defpart_attach_test_d default;
+-- check that attaching a partition correctly reports any rows in the default
+-- partition that should not be there for the new partition to be attached
+-- successfully
+create table defpart_attach_test_2 (like defpart_attach_test_d);
+alter table defpart_attach_test attach partition defpart_attach_test_2 for values in (2);
+ERROR: updated partition constraint for default partition "defpart_attach_test_d" would be violated by some row
+drop table defpart_attach_test;
+-- check combinations of temporary and permanent relations when attaching
+-- partitions.
+create table perm_part_parent (a int) partition by list (a);
+create temp table temp_part_parent (a int) partition by list (a);
+create table perm_part_child (a int);
+create temp table temp_part_child (a int);
+alter table temp_part_parent attach partition perm_part_child default; -- error
+ERROR: cannot attach a permanent relation as partition of temporary relation "temp_part_parent"
+alter table perm_part_parent attach partition temp_part_child default; -- error
+ERROR: cannot attach a temporary relation as partition of permanent relation "perm_part_parent"
+alter table temp_part_parent attach partition temp_part_child default; -- ok
+drop table perm_part_parent cascade;
+drop table temp_part_parent cascade;
+-- check that attaching partitions to a table while it is being used is
+-- prevented
+create table tab_part_attach (a int) partition by list (a);
+create or replace function func_part_attach() returns trigger
+ language plpgsql as $$
+ begin
+ execute 'create table tab_part_attach_1 (a int)';
+ execute 'alter table tab_part_attach attach partition tab_part_attach_1 for values in (1)';
+ return null;
+ end $$;
+create trigger trig_part_attach before insert on tab_part_attach
+ for each statement execute procedure func_part_attach();
+insert into tab_part_attach values (1);
+ERROR: cannot ALTER TABLE "tab_part_attach" because it is being used by active queries in this session
+CONTEXT: SQL statement "alter table tab_part_attach attach partition tab_part_attach_1 for values in (1)"
+PL/pgSQL function func_part_attach() line 4 at EXECUTE
+drop table tab_part_attach;
+drop function func_part_attach();
+-- test case where the partitioning operator is a SQL function whose
+-- evaluation results in the table's relcache being rebuilt partway through
+-- the execution of an ATTACH PARTITION command
+create function at_test_sql_partop (int4, int4) returns int language sql
+as $$ select case when $1 = $2 then 0 when $1 > $2 then 1 else -1 end; $$;
+create operator class at_test_sql_partop for type int4 using btree as
+ operator 1 < (int4, int4), operator 2 <= (int4, int4),
+ operator 3 = (int4, int4), operator 4 >= (int4, int4),
+ operator 5 > (int4, int4), function 1 at_test_sql_partop(int4, int4);
+create table at_test_sql_partop (a int) partition by range (a at_test_sql_partop);
+create table at_test_sql_partop_1 (a int);
+alter table at_test_sql_partop attach partition at_test_sql_partop_1 for values from (0) to (10);
+drop table at_test_sql_partop;
+drop operator class at_test_sql_partop using btree;
+drop function at_test_sql_partop;
+/* Test case for bug #16242 */
+-- We create a parent and child where the child has missing
+-- non-null attribute values, and arrange to pass them through
+-- tuple conversion from the child to the parent tupdesc
+create table bar1 (a integer, b integer not null default 1)
+ partition by range (a);
+create table bar2 (a integer);
+insert into bar2 values (1);
+alter table bar2 add column b integer not null default 1;
+-- (at this point bar2 contains tuple with natts=1)
+alter table bar1 attach partition bar2 default;
+-- this works:
+select * from bar1;
+ a | b
+---+---
+ 1 | 1
+(1 row)
+
+-- this exercises tuple conversion:
+create function xtrig()
+ returns trigger language plpgsql
+as $$
+ declare
+ r record;
+ begin
+ for r in select * from old loop
+ raise info 'a=%, b=%', r.a, r.b;
+ end loop;
+ return NULL;
+ end;
+$$;
+create trigger xtrig
+ after update on bar1
+ referencing old table as old
+ for each statement execute procedure xtrig();
+update bar1 set a = a + 1;
+INFO: a=1, b=1
+/* End test case for bug #16242 */
+/* Test case for bug #17409 */
+create table attbl (p1 int constraint pk_attbl primary key);
+create table atref (c1 int references attbl(p1));
+cluster attbl using pk_attbl;
+alter table attbl alter column p1 set data type bigint;
+alter table atref alter column c1 set data type bigint;
+drop table attbl, atref;
+create table attbl (p1 int constraint pk_attbl primary key);
+alter table attbl replica identity using index pk_attbl;
+create table atref (c1 int references attbl(p1));
+alter table attbl alter column p1 set data type bigint;
+alter table atref alter column c1 set data type bigint;
+drop table attbl, atref;
+/* End test case for bug #17409 */
+-- Test that ALTER TABLE rewrite preserves a clustered index
+-- for normal indexes and indexes on constraints.
+create table alttype_cluster (a int);
+alter table alttype_cluster add primary key (a);
+create index alttype_cluster_ind on alttype_cluster (a);
+alter table alttype_cluster cluster on alttype_cluster_ind;
+-- Normal index remains clustered.
+select indexrelid::regclass, indisclustered from pg_index
+ where indrelid = 'alttype_cluster'::regclass
+ order by indexrelid::regclass::text;
+ indexrelid | indisclustered
+----------------------+----------------
+ alttype_cluster_ind | t
+ alttype_cluster_pkey | f
+(2 rows)
+
+alter table alttype_cluster alter a type bigint;
+select indexrelid::regclass, indisclustered from pg_index
+ where indrelid = 'alttype_cluster'::regclass
+ order by indexrelid::regclass::text;
+ indexrelid | indisclustered
+----------------------+----------------
+ alttype_cluster_ind | t
+ alttype_cluster_pkey | f
+(2 rows)
+
+-- Constraint index remains clustered.
+alter table alttype_cluster cluster on alttype_cluster_pkey;
+select indexrelid::regclass, indisclustered from pg_index
+ where indrelid = 'alttype_cluster'::regclass
+ order by indexrelid::regclass::text;
+ indexrelid | indisclustered
+----------------------+----------------
+ alttype_cluster_ind | f
+ alttype_cluster_pkey | t
+(2 rows)
+
+alter table alttype_cluster alter a type int;
+select indexrelid::regclass, indisclustered from pg_index
+ where indrelid = 'alttype_cluster'::regclass
+ order by indexrelid::regclass::text;
+ indexrelid | indisclustered
+----------------------+----------------
+ alttype_cluster_ind | f
+ alttype_cluster_pkey | t
+(2 rows)
+
+drop table alttype_cluster;
+--
+-- Check that attaching or detaching a partitioned partition correctly leads
+-- to its partitions' constraint being updated to reflect the parent's
+-- newly added/removed constraint
+create table target_parted (a int, b int) partition by list (a);
+create table attach_parted (a int, b int) partition by list (b);
+create table attach_parted_part1 partition of attach_parted for values in (1);
+-- insert a row directly into the leaf partition so that its partition
+-- constraint is built and stored in the relcache
+insert into attach_parted_part1 values (1, 1);
+-- the following better invalidate the partition constraint of the leaf
+-- partition too...
+alter table target_parted attach partition attach_parted for values in (1);
+-- ...such that the following insert fails
+insert into attach_parted_part1 values (2, 1);
+ERROR: new row for relation "attach_parted_part1" violates partition constraint
+DETAIL: Failing row contains (2, 1).
+-- ...and doesn't when the partition is detached along with its own partition
+alter table target_parted detach partition attach_parted;
+insert into attach_parted_part1 values (2, 1);
diff --git a/ydb/library/yql/tests/postgresql/original/cases/alter_table.sql b/ydb/library/yql/tests/postgresql/original/cases/alter_table.sql
new file mode 100644
index 0000000000..6ef9dd437b
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/alter_table.sql
@@ -0,0 +1,3030 @@
+--
+-- ALTER_TABLE
+--
+
+-- Clean up in case a prior regression run failed
+SET client_min_messages TO 'warning';
+DROP ROLE IF EXISTS regress_alter_table_user1;
+RESET client_min_messages;
+
+CREATE USER regress_alter_table_user1;
+
+--
+-- add attribute
+--
+
+CREATE TABLE attmp (initial int4);
+
+COMMENT ON TABLE attmp_wrong IS 'table comment';
+COMMENT ON TABLE attmp IS 'table comment';
+COMMENT ON TABLE attmp IS NULL;
+
+ALTER TABLE attmp ADD COLUMN xmin integer; -- fails
+
+ALTER TABLE attmp ADD COLUMN a int4 default 3;
+
+ALTER TABLE attmp ADD COLUMN b name;
+
+ALTER TABLE attmp ADD COLUMN c text;
+
+ALTER TABLE attmp ADD COLUMN d float8;
+
+ALTER TABLE attmp ADD COLUMN e float4;
+
+ALTER TABLE attmp ADD COLUMN f int2;
+
+ALTER TABLE attmp ADD COLUMN g polygon;
+
+ALTER TABLE attmp ADD COLUMN i char;
+
+ALTER TABLE attmp ADD COLUMN k int4;
+
+ALTER TABLE attmp ADD COLUMN l tid;
+
+ALTER TABLE attmp ADD COLUMN m xid;
+
+ALTER TABLE attmp ADD COLUMN n oidvector;
+
+--ALTER TABLE attmp ADD COLUMN o lock;
+ALTER TABLE attmp ADD COLUMN p boolean;
+
+ALTER TABLE attmp ADD COLUMN q point;
+
+ALTER TABLE attmp ADD COLUMN r lseg;
+
+ALTER TABLE attmp ADD COLUMN s path;
+
+ALTER TABLE attmp ADD COLUMN t box;
+
+ALTER TABLE attmp ADD COLUMN v timestamp;
+
+ALTER TABLE attmp ADD COLUMN w interval;
+
+ALTER TABLE attmp ADD COLUMN x float8[];
+
+ALTER TABLE attmp ADD COLUMN y float4[];
+
+ALTER TABLE attmp ADD COLUMN z int2[];
+
+INSERT INTO attmp (a, b, c, d, e, f, g, i, k, l, m, n, p, q, r, s, t,
+ v, w, x, y, z)
+ VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)',
+ 'c',
+ 314159, '(1,1)', '512',
+ '1 2 3 4 5 6 7 8', true, '(1.1,1.1)', '(4.1,4.1,3.1,3.1)',
+ '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)',
+ 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}');
+
+SELECT * FROM attmp;
+
+DROP TABLE attmp;
+
+-- the wolf bug - schema mods caused inconsistent row descriptors
+CREATE TABLE attmp (
+ initial int4
+);
+
+ALTER TABLE attmp ADD COLUMN a int4;
+
+ALTER TABLE attmp ADD COLUMN b name;
+
+ALTER TABLE attmp ADD COLUMN c text;
+
+ALTER TABLE attmp ADD COLUMN d float8;
+
+ALTER TABLE attmp ADD COLUMN e float4;
+
+ALTER TABLE attmp ADD COLUMN f int2;
+
+ALTER TABLE attmp ADD COLUMN g polygon;
+
+ALTER TABLE attmp ADD COLUMN i char;
+
+ALTER TABLE attmp ADD COLUMN k int4;
+
+ALTER TABLE attmp ADD COLUMN l tid;
+
+ALTER TABLE attmp ADD COLUMN m xid;
+
+ALTER TABLE attmp ADD COLUMN n oidvector;
+
+--ALTER TABLE attmp ADD COLUMN o lock;
+ALTER TABLE attmp ADD COLUMN p boolean;
+
+ALTER TABLE attmp ADD COLUMN q point;
+
+ALTER TABLE attmp ADD COLUMN r lseg;
+
+ALTER TABLE attmp ADD COLUMN s path;
+
+ALTER TABLE attmp ADD COLUMN t box;
+
+ALTER TABLE attmp ADD COLUMN v timestamp;
+
+ALTER TABLE attmp ADD COLUMN w interval;
+
+ALTER TABLE attmp ADD COLUMN x float8[];
+
+ALTER TABLE attmp ADD COLUMN y float4[];
+
+ALTER TABLE attmp ADD COLUMN z int2[];
+
+INSERT INTO attmp (a, b, c, d, e, f, g, i, k, l, m, n, p, q, r, s, t,
+ v, w, x, y, z)
+ VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)',
+ 'c',
+ 314159, '(1,1)', '512',
+ '1 2 3 4 5 6 7 8', true, '(1.1,1.1)', '(4.1,4.1,3.1,3.1)',
+ '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)',
+ 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}');
+
+SELECT * FROM attmp;
+
+CREATE INDEX attmp_idx ON attmp (a, (d + e), b);
+
+ALTER INDEX attmp_idx ALTER COLUMN 0 SET STATISTICS 1000;
+
+ALTER INDEX attmp_idx ALTER COLUMN 1 SET STATISTICS 1000;
+
+ALTER INDEX attmp_idx ALTER COLUMN 2 SET STATISTICS 1000;
+
+\d+ attmp_idx
+
+ALTER INDEX attmp_idx ALTER COLUMN 3 SET STATISTICS 1000;
+
+ALTER INDEX attmp_idx ALTER COLUMN 4 SET STATISTICS 1000;
+
+ALTER INDEX attmp_idx ALTER COLUMN 2 SET STATISTICS -1;
+
+DROP TABLE attmp;
+
+-- fails with incorrect object type
+CREATE VIEW at_v1 AS SELECT 1 as a;
+ALTER TABLE at_v1 ALTER COLUMN a SET STATISTICS 0;
+DROP VIEW at_v1;
+
+--
+-- rename - check on both non-temp and temp tables
+--
+CREATE TABLE attmp (regtable int);
+CREATE TEMP TABLE attmp (attmptable int);
+
+ALTER TABLE attmp RENAME TO attmp_new;
+
+SELECT * FROM attmp;
+SELECT * FROM attmp_new;
+
+ALTER TABLE attmp RENAME TO attmp_new2;
+
+SELECT * FROM attmp; -- should fail
+SELECT * FROM attmp_new;
+SELECT * FROM attmp_new2;
+
+DROP TABLE attmp_new;
+DROP TABLE attmp_new2;
+
+-- check rename of partitioned tables and indexes also
+CREATE TABLE part_attmp (a int primary key) partition by range (a);
+CREATE TABLE part_attmp1 PARTITION OF part_attmp FOR VALUES FROM (0) TO (100);
+ALTER INDEX part_attmp_pkey RENAME TO part_attmp_index;
+ALTER INDEX part_attmp1_pkey RENAME TO part_attmp1_index;
+ALTER TABLE part_attmp RENAME TO part_at2tmp;
+ALTER TABLE part_attmp1 RENAME TO part_at2tmp1;
+SET ROLE regress_alter_table_user1;
+ALTER INDEX part_attmp_index RENAME TO fail;
+ALTER INDEX part_attmp1_index RENAME TO fail;
+ALTER TABLE part_at2tmp RENAME TO fail;
+ALTER TABLE part_at2tmp1 RENAME TO fail;
+RESET ROLE;
+DROP TABLE part_at2tmp;
+
+--
+-- check renaming to a table's array type's autogenerated name
+-- (the array type's name should get out of the way)
+--
+CREATE TABLE attmp_array (id int);
+CREATE TABLE attmp_array2 (id int);
+SELECT typname FROM pg_type WHERE oid = 'attmp_array[]'::regtype;
+SELECT typname FROM pg_type WHERE oid = 'attmp_array2[]'::regtype;
+ALTER TABLE attmp_array2 RENAME TO _attmp_array;
+SELECT typname FROM pg_type WHERE oid = 'attmp_array[]'::regtype;
+SELECT typname FROM pg_type WHERE oid = '_attmp_array[]'::regtype;
+DROP TABLE _attmp_array;
+DROP TABLE attmp_array;
+
+-- renaming to table's own array type's name is an interesting corner case
+CREATE TABLE attmp_array (id int);
+SELECT typname FROM pg_type WHERE oid = 'attmp_array[]'::regtype;
+ALTER TABLE attmp_array RENAME TO _attmp_array;
+SELECT typname FROM pg_type WHERE oid = '_attmp_array[]'::regtype;
+DROP TABLE _attmp_array;
+
+-- ALTER TABLE ... RENAME on non-table relations
+-- renaming indexes (FIXME: this should probably test the index's functionality)
+ALTER INDEX IF EXISTS __onek_unique1 RENAME TO attmp_onek_unique1;
+ALTER INDEX IF EXISTS __attmp_onek_unique1 RENAME TO onek_unique1;
+
+ALTER INDEX onek_unique1 RENAME TO attmp_onek_unique1;
+ALTER INDEX attmp_onek_unique1 RENAME TO onek_unique1;
+
+SET ROLE regress_alter_table_user1;
+ALTER INDEX onek_unique1 RENAME TO fail; -- permission denied
+RESET ROLE;
+
+-- rename statements with mismatching statement and object types
+CREATE TABLE alter_idx_rename_test (a INT);
+CREATE INDEX alter_idx_rename_test_idx ON alter_idx_rename_test (a);
+CREATE TABLE alter_idx_rename_test_parted (a INT) PARTITION BY LIST (a);
+CREATE INDEX alter_idx_rename_test_parted_idx ON alter_idx_rename_test_parted (a);
+BEGIN;
+ALTER INDEX alter_idx_rename_test RENAME TO alter_idx_rename_test_2;
+ALTER INDEX alter_idx_rename_test_parted RENAME TO alter_idx_rename_test_parted_2;
+SELECT relation::regclass, mode FROM pg_locks
+WHERE pid = pg_backend_pid() AND locktype = 'relation'
+ AND relation::regclass::text LIKE 'alter\_idx%'
+ORDER BY relation::regclass::text COLLATE "C";
+COMMIT;
+BEGIN;
+ALTER INDEX alter_idx_rename_test_idx RENAME TO alter_idx_rename_test_idx_2;
+ALTER INDEX alter_idx_rename_test_parted_idx RENAME TO alter_idx_rename_test_parted_idx_2;
+SELECT relation::regclass, mode FROM pg_locks
+WHERE pid = pg_backend_pid() AND locktype = 'relation'
+ AND relation::regclass::text LIKE 'alter\_idx%'
+ORDER BY relation::regclass::text COLLATE "C";
+COMMIT;
+BEGIN;
+ALTER TABLE alter_idx_rename_test_idx_2 RENAME TO alter_idx_rename_test_idx_3;
+ALTER TABLE alter_idx_rename_test_parted_idx_2 RENAME TO alter_idx_rename_test_parted_idx_3;
+SELECT relation::regclass, mode FROM pg_locks
+WHERE pid = pg_backend_pid() AND locktype = 'relation'
+ AND relation::regclass::text LIKE 'alter\_idx%'
+ORDER BY relation::regclass::text COLLATE "C";
+COMMIT;
+DROP TABLE alter_idx_rename_test_2;
+
+-- renaming views
+CREATE VIEW attmp_view (unique1) AS SELECT unique1 FROM tenk1;
+ALTER TABLE attmp_view RENAME TO attmp_view_new;
+
+SET ROLE regress_alter_table_user1;
+ALTER VIEW attmp_view_new RENAME TO fail; -- permission denied
+RESET ROLE;
+
+-- hack to ensure we get an indexscan here
+set enable_seqscan to off;
+set enable_bitmapscan to off;
+-- 5 values, sorted
+SELECT unique1 FROM tenk1 WHERE unique1 < 5;
+reset enable_seqscan;
+reset enable_bitmapscan;
+
+DROP VIEW attmp_view_new;
+-- toast-like relation name
+alter table stud_emp rename to pg_toast_stud_emp;
+alter table pg_toast_stud_emp rename to stud_emp;
+
+-- renaming index should rename constraint as well
+ALTER TABLE onek ADD CONSTRAINT onek_unique1_constraint UNIQUE (unique1);
+ALTER INDEX onek_unique1_constraint RENAME TO onek_unique1_constraint_foo;
+ALTER TABLE onek DROP CONSTRAINT onek_unique1_constraint_foo;
+
+-- renaming constraint
+ALTER TABLE onek ADD CONSTRAINT onek_check_constraint CHECK (unique1 >= 0);
+ALTER TABLE onek RENAME CONSTRAINT onek_check_constraint TO onek_check_constraint_foo;
+ALTER TABLE onek DROP CONSTRAINT onek_check_constraint_foo;
+
+-- renaming constraint should rename index as well
+ALTER TABLE onek ADD CONSTRAINT onek_unique1_constraint UNIQUE (unique1);
+DROP INDEX onek_unique1_constraint; -- to see whether it's there
+ALTER TABLE onek RENAME CONSTRAINT onek_unique1_constraint TO onek_unique1_constraint_foo;
+DROP INDEX onek_unique1_constraint_foo; -- to see whether it's there
+ALTER TABLE onek DROP CONSTRAINT onek_unique1_constraint_foo;
+
+-- renaming constraints vs. inheritance
+CREATE TABLE constraint_rename_test (a int CONSTRAINT con1 CHECK (a > 0), b int, c int);
+\d constraint_rename_test
+CREATE TABLE constraint_rename_test2 (a int CONSTRAINT con1 CHECK (a > 0), d int) INHERITS (constraint_rename_test);
+\d constraint_rename_test2
+ALTER TABLE constraint_rename_test2 RENAME CONSTRAINT con1 TO con1foo; -- fail
+ALTER TABLE ONLY constraint_rename_test RENAME CONSTRAINT con1 TO con1foo; -- fail
+ALTER TABLE constraint_rename_test RENAME CONSTRAINT con1 TO con1foo; -- ok
+\d constraint_rename_test
+\d constraint_rename_test2
+ALTER TABLE constraint_rename_test ADD CONSTRAINT con2 CHECK (b > 0) NO INHERIT;
+ALTER TABLE ONLY constraint_rename_test RENAME CONSTRAINT con2 TO con2foo; -- ok
+ALTER TABLE constraint_rename_test RENAME CONSTRAINT con2foo TO con2bar; -- ok
+\d constraint_rename_test
+\d constraint_rename_test2
+ALTER TABLE constraint_rename_test ADD CONSTRAINT con3 PRIMARY KEY (a);
+ALTER TABLE constraint_rename_test RENAME CONSTRAINT con3 TO con3foo; -- ok
+\d constraint_rename_test
+\d constraint_rename_test2
+DROP TABLE constraint_rename_test2;
+DROP TABLE constraint_rename_test;
+ALTER TABLE IF EXISTS constraint_not_exist RENAME CONSTRAINT con3 TO con3foo; -- ok
+ALTER TABLE IF EXISTS constraint_rename_test ADD CONSTRAINT con4 UNIQUE (a);
+
+-- renaming constraints with cache reset of target relation
+CREATE TABLE constraint_rename_cache (a int,
+ CONSTRAINT chk_a CHECK (a > 0),
+ PRIMARY KEY (a));
+ALTER TABLE constraint_rename_cache
+ RENAME CONSTRAINT chk_a TO chk_a_new;
+ALTER TABLE constraint_rename_cache
+ RENAME CONSTRAINT constraint_rename_cache_pkey TO constraint_rename_pkey_new;
+CREATE TABLE like_constraint_rename_cache
+ (LIKE constraint_rename_cache INCLUDING ALL);
+\d like_constraint_rename_cache
+DROP TABLE constraint_rename_cache;
+DROP TABLE like_constraint_rename_cache;
+
+-- FOREIGN KEY CONSTRAINT adding TEST
+
+CREATE TABLE attmp2 (a int primary key);
+
+CREATE TABLE attmp3 (a int, b int);
+
+CREATE TABLE attmp4 (a int, b int, unique(a,b));
+
+CREATE TABLE attmp5 (a int, b int);
+
+-- Insert rows into attmp2 (pktable)
+INSERT INTO attmp2 values (1);
+INSERT INTO attmp2 values (2);
+INSERT INTO attmp2 values (3);
+INSERT INTO attmp2 values (4);
+
+-- Insert rows into attmp3
+INSERT INTO attmp3 values (1,10);
+INSERT INTO attmp3 values (1,20);
+INSERT INTO attmp3 values (5,50);
+
+-- Try (and fail) to add constraint due to invalid source columns
+ALTER TABLE attmp3 add constraint attmpconstr foreign key(c) references attmp2 match full;
+
+-- Try (and fail) to add constraint due to invalid destination columns explicitly given
+ALTER TABLE attmp3 add constraint attmpconstr foreign key(a) references attmp2(b) match full;
+
+-- Try (and fail) to add constraint due to invalid data
+ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full;
+
+-- Delete failing row
+DELETE FROM attmp3 where a=5;
+
+-- Try (and succeed)
+ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full;
+ALTER TABLE attmp3 drop constraint attmpconstr;
+
+INSERT INTO attmp3 values (5,50);
+
+-- Try NOT VALID and then VALIDATE CONSTRAINT, but fails. Delete failure then re-validate
+ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full NOT VALID;
+ALTER TABLE attmp3 validate constraint attmpconstr;
+
+-- Delete failing row
+DELETE FROM attmp3 where a=5;
+
+-- Try (and succeed) and repeat to show it works on already valid constraint
+ALTER TABLE attmp3 validate constraint attmpconstr;
+ALTER TABLE attmp3 validate constraint attmpconstr;
+
+-- Try a non-verified CHECK constraint
+ALTER TABLE attmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10); -- fail
+ALTER TABLE attmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10) NOT VALID; -- succeeds
+ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- fails
+DELETE FROM attmp3 WHERE NOT b > 10;
+ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds
+ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds
+
+-- Test inherited NOT VALID CHECK constraints
+select * from attmp3;
+CREATE TABLE attmp6 () INHERITS (attmp3);
+CREATE TABLE attmp7 () INHERITS (attmp3);
+
+INSERT INTO attmp6 VALUES (6, 30), (7, 16);
+ALTER TABLE attmp3 ADD CONSTRAINT b_le_20 CHECK (b <= 20) NOT VALID;
+ALTER TABLE attmp3 VALIDATE CONSTRAINT b_le_20; -- fails
+DELETE FROM attmp6 WHERE b > 20;
+ALTER TABLE attmp3 VALIDATE CONSTRAINT b_le_20; -- succeeds
+
+-- An already validated constraint must not be revalidated
+CREATE FUNCTION boo(int) RETURNS int IMMUTABLE STRICT LANGUAGE plpgsql AS $$ BEGIN RAISE NOTICE 'boo: %', $1; RETURN $1; END; $$;
+INSERT INTO attmp7 VALUES (8, 18);
+ALTER TABLE attmp7 ADD CONSTRAINT identity CHECK (b = boo(b));
+ALTER TABLE attmp3 ADD CONSTRAINT IDENTITY check (b = boo(b)) NOT VALID;
+ALTER TABLE attmp3 VALIDATE CONSTRAINT identity;
+
+-- A NO INHERIT constraint should not be looked for in children during VALIDATE CONSTRAINT
+create table parent_noinh_convalid (a int);
+create table child_noinh_convalid () inherits (parent_noinh_convalid);
+insert into parent_noinh_convalid values (1);
+insert into child_noinh_convalid values (1);
+alter table parent_noinh_convalid add constraint check_a_is_2 check (a = 2) no inherit not valid;
+-- fail, because of the row in parent
+alter table parent_noinh_convalid validate constraint check_a_is_2;
+delete from only parent_noinh_convalid;
+-- ok (parent itself contains no violating rows)
+alter table parent_noinh_convalid validate constraint check_a_is_2;
+select convalidated from pg_constraint where conrelid = 'parent_noinh_convalid'::regclass and conname = 'check_a_is_2';
+-- cleanup
+drop table parent_noinh_convalid, child_noinh_convalid;
+
+-- Try (and fail) to create constraint from attmp5(a) to attmp4(a) - unique constraint on
+-- attmp4 is a,b
+
+ALTER TABLE attmp5 add constraint attmpconstr foreign key(a) references attmp4(a) match full;
+
+DROP TABLE attmp7;
+
+DROP TABLE attmp6;
+
+DROP TABLE attmp5;
+
+DROP TABLE attmp4;
+
+DROP TABLE attmp3;
+
+DROP TABLE attmp2;
+
+-- NOT VALID with plan invalidation -- ensure we don't use a constraint for
+-- exclusion until validated
+set constraint_exclusion TO 'partition';
+create table nv_parent (d date, check (false) no inherit not valid);
+-- not valid constraint added at creation time should automatically become valid
+\d nv_parent
+
+create table nv_child_2010 () inherits (nv_parent);
+create table nv_child_2011 () inherits (nv_parent);
+alter table nv_child_2010 add check (d between '2010-01-01'::date and '2010-12-31'::date) not valid;
+alter table nv_child_2011 add check (d between '2011-01-01'::date and '2011-12-31'::date) not valid;
+explain (costs off) select * from nv_parent where d between '2011-08-01' and '2011-08-31';
+create table nv_child_2009 (check (d between '2009-01-01'::date and '2009-12-31'::date)) inherits (nv_parent);
+explain (costs off) select * from nv_parent where d between '2011-08-01'::date and '2011-08-31'::date;
+explain (costs off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date;
+-- after validation, the constraint should be used
+alter table nv_child_2011 VALIDATE CONSTRAINT nv_child_2011_d_check;
+explain (costs off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date;
+
+-- add an inherited NOT VALID constraint
+alter table nv_parent add check (d between '2001-01-01'::date and '2099-12-31'::date) not valid;
+\d nv_child_2009
+-- we leave nv_parent and children around to help test pg_dump logic
+
+-- Foreign key adding test with mixed types
+
+-- Note: these tables are TEMP to avoid name conflicts when this test
+-- is run in parallel with foreign_key.sql.
+
+CREATE TEMP TABLE PKTABLE (ptest1 int PRIMARY KEY);
+INSERT INTO PKTABLE VALUES(42);
+CREATE TEMP TABLE FKTABLE (ftest1 inet);
+-- This next should fail, because int=inet does not exist
+ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable;
+-- This should also fail for the same reason, but here we
+-- give the column name
+ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable(ptest1);
+DROP TABLE FKTABLE;
+-- This should succeed, even though they are different types,
+-- because int=int8 exists and is a member of the integer opfamily
+CREATE TEMP TABLE FKTABLE (ftest1 int8);
+ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable;
+-- Check it actually works
+INSERT INTO FKTABLE VALUES(42); -- should succeed
+INSERT INTO FKTABLE VALUES(43); -- should fail
+DROP TABLE FKTABLE;
+-- This should fail, because we'd have to cast numeric to int which is
+-- not an implicit coercion (or use numeric=numeric, but that's not part
+-- of the integer opfamily)
+CREATE TEMP TABLE FKTABLE (ftest1 numeric);
+ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable;
+DROP TABLE FKTABLE;
+DROP TABLE PKTABLE;
+-- On the other hand, this should work because int implicitly promotes to
+-- numeric, and we allow promotion on the FK side
+CREATE TEMP TABLE PKTABLE (ptest1 numeric PRIMARY KEY);
+INSERT INTO PKTABLE VALUES(42);
+CREATE TEMP TABLE FKTABLE (ftest1 int);
+ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable;
+-- Check it actually works
+INSERT INTO FKTABLE VALUES(42); -- should succeed
+INSERT INTO FKTABLE VALUES(43); -- should fail
+DROP TABLE FKTABLE;
+DROP TABLE PKTABLE;
+
+CREATE TEMP TABLE PKTABLE (ptest1 int, ptest2 inet,
+ PRIMARY KEY(ptest1, ptest2));
+-- This should fail, because we just chose really odd types
+CREATE TEMP TABLE FKTABLE (ftest1 cidr, ftest2 timestamp);
+ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2) references pktable;
+DROP TABLE FKTABLE;
+-- Again, so should this...
+CREATE TEMP TABLE FKTABLE (ftest1 cidr, ftest2 timestamp);
+ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2)
+ references pktable(ptest1, ptest2);
+DROP TABLE FKTABLE;
+-- This fails because we mixed up the column ordering
+CREATE TEMP TABLE FKTABLE (ftest1 int, ftest2 inet);
+ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2)
+ references pktable(ptest2, ptest1);
+-- As does this...
+ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest2, ftest1)
+ references pktable(ptest1, ptest2);
+DROP TABLE FKTABLE;
+DROP TABLE PKTABLE;
+
+-- Test that ALTER CONSTRAINT updates trigger deferrability properly
+
+CREATE TEMP TABLE PKTABLE (ptest1 int primary key);
+CREATE TEMP TABLE FKTABLE (ftest1 int);
+
+ALTER TABLE FKTABLE ADD CONSTRAINT fknd FOREIGN KEY(ftest1) REFERENCES pktable
+ ON DELETE CASCADE ON UPDATE NO ACTION NOT DEFERRABLE;
+ALTER TABLE FKTABLE ADD CONSTRAINT fkdd FOREIGN KEY(ftest1) REFERENCES pktable
+ ON DELETE CASCADE ON UPDATE NO ACTION DEFERRABLE INITIALLY DEFERRED;
+ALTER TABLE FKTABLE ADD CONSTRAINT fkdi FOREIGN KEY(ftest1) REFERENCES pktable
+ ON DELETE CASCADE ON UPDATE NO ACTION DEFERRABLE INITIALLY IMMEDIATE;
+
+ALTER TABLE FKTABLE ADD CONSTRAINT fknd2 FOREIGN KEY(ftest1) REFERENCES pktable
+ ON DELETE CASCADE ON UPDATE NO ACTION DEFERRABLE INITIALLY DEFERRED;
+ALTER TABLE FKTABLE ALTER CONSTRAINT fknd2 NOT DEFERRABLE;
+ALTER TABLE FKTABLE ADD CONSTRAINT fkdd2 FOREIGN KEY(ftest1) REFERENCES pktable
+ ON DELETE CASCADE ON UPDATE NO ACTION NOT DEFERRABLE;
+ALTER TABLE FKTABLE ALTER CONSTRAINT fkdd2 DEFERRABLE INITIALLY DEFERRED;
+ALTER TABLE FKTABLE ADD CONSTRAINT fkdi2 FOREIGN KEY(ftest1) REFERENCES pktable
+ ON DELETE CASCADE ON UPDATE NO ACTION NOT DEFERRABLE;
+ALTER TABLE FKTABLE ALTER CONSTRAINT fkdi2 DEFERRABLE INITIALLY IMMEDIATE;
+
+SELECT conname, tgfoid::regproc, tgtype, tgdeferrable, tginitdeferred
+FROM pg_trigger JOIN pg_constraint con ON con.oid = tgconstraint
+WHERE tgrelid = 'pktable'::regclass
+ORDER BY 1,2,3;
+SELECT conname, tgfoid::regproc, tgtype, tgdeferrable, tginitdeferred
+FROM pg_trigger JOIN pg_constraint con ON con.oid = tgconstraint
+WHERE tgrelid = 'fktable'::regclass
+ORDER BY 1,2,3;
+
+-- temp tables should go away by themselves, need not drop them.
+
+-- test check constraint adding
+
+create table atacc1 ( test int );
+-- add a check constraint
+alter table atacc1 add constraint atacc_test1 check (test>3);
+-- should fail
+insert into atacc1 (test) values (2);
+-- should succeed
+insert into atacc1 (test) values (4);
+drop table atacc1;
+
+-- let's do one where the check fails when added
+create table atacc1 ( test int );
+-- insert a soon to be failing row
+insert into atacc1 (test) values (2);
+-- add a check constraint (fails)
+alter table atacc1 add constraint atacc_test1 check (test>3);
+insert into atacc1 (test) values (4);
+drop table atacc1;
+
+-- let's do one where the check fails because the column doesn't exist
+create table atacc1 ( test int );
+-- add a check constraint (fails)
+alter table atacc1 add constraint atacc_test1 check (test1>3);
+drop table atacc1;
+
+-- something a little more complicated
+create table atacc1 ( test int, test2 int, test3 int);
+-- add a check constraint (fails)
+alter table atacc1 add constraint atacc_test1 check (test+test2<test3*4);
+-- should fail
+insert into atacc1 (test,test2,test3) values (4,4,2);
+-- should succeed
+insert into atacc1 (test,test2,test3) values (4,4,5);
+drop table atacc1;
+
+-- lets do some naming tests
+create table atacc1 (test int check (test>3), test2 int);
+alter table atacc1 add check (test2>test);
+-- should fail for $2
+insert into atacc1 (test2, test) values (3, 4);
+drop table atacc1;
+
+-- inheritance related tests
+create table atacc1 (test int);
+create table atacc2 (test2 int);
+create table atacc3 (test3 int) inherits (atacc1, atacc2);
+alter table atacc2 add constraint foo check (test2>0);
+-- fail and then succeed on atacc2
+insert into atacc2 (test2) values (-3);
+insert into atacc2 (test2) values (3);
+-- fail and then succeed on atacc3
+insert into atacc3 (test2) values (-3);
+insert into atacc3 (test2) values (3);
+drop table atacc3;
+drop table atacc2;
+drop table atacc1;
+
+-- same things with one created with INHERIT
+create table atacc1 (test int);
+create table atacc2 (test2 int);
+create table atacc3 (test3 int) inherits (atacc1, atacc2);
+alter table atacc3 no inherit atacc2;
+-- fail
+alter table atacc3 no inherit atacc2;
+-- make sure it really isn't a child
+insert into atacc3 (test2) values (3);
+select test2 from atacc2;
+-- fail due to missing constraint
+alter table atacc2 add constraint foo check (test2>0);
+alter table atacc3 inherit atacc2;
+-- fail due to missing column
+alter table atacc3 rename test2 to testx;
+alter table atacc3 inherit atacc2;
+-- fail due to mismatched data type
+alter table atacc3 add test2 bool;
+alter table atacc3 inherit atacc2;
+alter table atacc3 drop test2;
+-- succeed
+alter table atacc3 add test2 int;
+update atacc3 set test2 = 4 where test2 is null;
+alter table atacc3 add constraint foo check (test2>0);
+alter table atacc3 inherit atacc2;
+-- fail due to duplicates and circular inheritance
+alter table atacc3 inherit atacc2;
+alter table atacc2 inherit atacc3;
+alter table atacc2 inherit atacc2;
+-- test that we really are a child now (should see 4 not 3 and cascade should go through)
+select test2 from atacc2;
+drop table atacc2 cascade;
+drop table atacc1;
+
+-- adding only to a parent is allowed as of 9.2
+
+create table atacc1 (test int);
+create table atacc2 (test2 int) inherits (atacc1);
+-- ok:
+alter table atacc1 add constraint foo check (test>0) no inherit;
+-- check constraint is not there on child
+insert into atacc2 (test) values (-3);
+-- check constraint is there on parent
+insert into atacc1 (test) values (-3);
+insert into atacc1 (test) values (3);
+-- fail, violating row:
+alter table atacc2 add constraint foo check (test>0) no inherit;
+drop table atacc2;
+drop table atacc1;
+
+-- test unique constraint adding
+
+create table atacc1 ( test int ) ;
+-- add a unique constraint
+alter table atacc1 add constraint atacc_test1 unique (test);
+-- insert first value
+insert into atacc1 (test) values (2);
+-- should fail
+insert into atacc1 (test) values (2);
+-- should succeed
+insert into atacc1 (test) values (4);
+-- try to create duplicates via alter table using - should fail
+alter table atacc1 alter column test type integer using 0;
+drop table atacc1;
+
+-- let's do one where the unique constraint fails when added
+create table atacc1 ( test int );
+-- insert soon to be failing rows
+insert into atacc1 (test) values (2);
+insert into atacc1 (test) values (2);
+-- add a unique constraint (fails)
+alter table atacc1 add constraint atacc_test1 unique (test);
+insert into atacc1 (test) values (3);
+drop table atacc1;
+
+-- let's do one where the unique constraint fails
+-- because the column doesn't exist
+create table atacc1 ( test int );
+-- add a unique constraint (fails)
+alter table atacc1 add constraint atacc_test1 unique (test1);
+drop table atacc1;
+
+-- something a little more complicated
+create table atacc1 ( test int, test2 int);
+-- add a unique constraint
+alter table atacc1 add constraint atacc_test1 unique (test, test2);
+-- insert initial value
+insert into atacc1 (test,test2) values (4,4);
+-- should fail
+insert into atacc1 (test,test2) values (4,4);
+-- should all succeed
+insert into atacc1 (test,test2) values (4,5);
+insert into atacc1 (test,test2) values (5,4);
+insert into atacc1 (test,test2) values (5,5);
+drop table atacc1;
+
+-- lets do some naming tests
+create table atacc1 (test int, test2 int, unique(test));
+alter table atacc1 add unique (test2);
+-- should fail for @@ second one @@
+insert into atacc1 (test2, test) values (3, 3);
+insert into atacc1 (test2, test) values (2, 3);
+drop table atacc1;
+
+-- test primary key constraint adding
+
+create table atacc1 ( id serial, test int) ;
+-- add a primary key constraint
+alter table atacc1 add constraint atacc_test1 primary key (test);
+-- insert first value
+insert into atacc1 (test) values (2);
+-- should fail
+insert into atacc1 (test) values (2);
+-- should succeed
+insert into atacc1 (test) values (4);
+-- inserting NULL should fail
+insert into atacc1 (test) values(NULL);
+-- try adding a second primary key (should fail)
+alter table atacc1 add constraint atacc_oid1 primary key(id);
+-- drop first primary key constraint
+alter table atacc1 drop constraint atacc_test1 restrict;
+-- try adding a primary key on oid (should succeed)
+alter table atacc1 add constraint atacc_oid1 primary key(id);
+drop table atacc1;
+
+-- let's do one where the primary key constraint fails when added
+create table atacc1 ( test int );
+-- insert soon to be failing rows
+insert into atacc1 (test) values (2);
+insert into atacc1 (test) values (2);
+-- add a primary key (fails)
+alter table atacc1 add constraint atacc_test1 primary key (test);
+insert into atacc1 (test) values (3);
+drop table atacc1;
+
+-- let's do another one where the primary key constraint fails when added
+create table atacc1 ( test int );
+-- insert soon to be failing row
+insert into atacc1 (test) values (NULL);
+-- add a primary key (fails)
+alter table atacc1 add constraint atacc_test1 primary key (test);
+insert into atacc1 (test) values (3);
+drop table atacc1;
+
+-- let's do one where the primary key constraint fails
+-- because the column doesn't exist
+create table atacc1 ( test int );
+-- add a primary key constraint (fails)
+alter table atacc1 add constraint atacc_test1 primary key (test1);
+drop table atacc1;
+
+-- adding a new column as primary key to a non-empty table.
+-- should fail unless the column has a non-null default value.
+create table atacc1 ( test int );
+insert into atacc1 (test) values (0);
+-- add a primary key column without a default (fails).
+alter table atacc1 add column test2 int primary key;
+-- now add a primary key column with a default (succeeds).
+alter table atacc1 add column test2 int default 0 primary key;
+drop table atacc1;
+
+-- this combination used to have order-of-execution problems (bug #15580)
+create table atacc1 (a int);
+insert into atacc1 values(1);
+alter table atacc1
+ add column b float8 not null default random(),
+ add primary key(a);
+drop table atacc1;
+
+-- additionally, we've seen issues with foreign key validation not being
+-- properly delayed until after a table rewrite. Check that works ok.
+create table atacc1 (a int primary key);
+alter table atacc1 add constraint atacc1_fkey foreign key (a) references atacc1 (a) not valid;
+alter table atacc1 validate constraint atacc1_fkey, alter a type bigint;
+drop table atacc1;
+
+-- we've also seen issues with check constraints being validated at the wrong
+-- time when there's a pending table rewrite.
+create table atacc1 (a bigint, b int);
+insert into atacc1 values(1,1);
+alter table atacc1 add constraint atacc1_chk check(b = 1) not valid;
+alter table atacc1 validate constraint atacc1_chk, alter a type int;
+drop table atacc1;
+
+-- same as above, but ensure the constraint violation is detected
+create table atacc1 (a bigint, b int);
+insert into atacc1 values(1,2);
+alter table atacc1 add constraint atacc1_chk check(b = 1) not valid;
+alter table atacc1 validate constraint atacc1_chk, alter a type int;
+drop table atacc1;
+
+-- something a little more complicated
+create table atacc1 ( test int, test2 int);
+-- add a primary key constraint
+alter table atacc1 add constraint atacc_test1 primary key (test, test2);
+-- try adding a second primary key - should fail
+alter table atacc1 add constraint atacc_test2 primary key (test);
+-- insert initial value
+insert into atacc1 (test,test2) values (4,4);
+-- should fail
+insert into atacc1 (test,test2) values (4,4);
+insert into atacc1 (test,test2) values (NULL,3);
+insert into atacc1 (test,test2) values (3, NULL);
+insert into atacc1 (test,test2) values (NULL,NULL);
+-- should all succeed
+insert into atacc1 (test,test2) values (4,5);
+insert into atacc1 (test,test2) values (5,4);
+insert into atacc1 (test,test2) values (5,5);
+drop table atacc1;
+
+-- lets do some naming tests
+create table atacc1 (test int, test2 int, primary key(test));
+-- only first should succeed
+insert into atacc1 (test2, test) values (3, 3);
+insert into atacc1 (test2, test) values (2, 3);
+insert into atacc1 (test2, test) values (1, NULL);
+drop table atacc1;
+
+-- alter table / alter column [set/drop] not null tests
+-- try altering system catalogs, should fail
+alter table pg_class alter column relname drop not null;
+alter table pg_class alter relname set not null;
+
+-- try altering non-existent table, should fail
+alter table non_existent alter column bar set not null;
+alter table non_existent alter column bar drop not null;
+
+-- test setting columns to null and not null and vice versa
+-- test checking for null values and primary key
+create table atacc1 (test int not null);
+alter table atacc1 add constraint "atacc1_pkey" primary key (test);
+alter table atacc1 alter column test drop not null;
+alter table atacc1 drop constraint "atacc1_pkey";
+alter table atacc1 alter column test drop not null;
+insert into atacc1 values (null);
+alter table atacc1 alter test set not null;
+delete from atacc1;
+alter table atacc1 alter test set not null;
+
+-- try altering a non-existent column, should fail
+alter table atacc1 alter bar set not null;
+alter table atacc1 alter bar drop not null;
+
+-- try creating a view and altering that, should fail
+create view myview as select * from atacc1;
+alter table myview alter column test drop not null;
+alter table myview alter column test set not null;
+drop view myview;
+
+drop table atacc1;
+
+-- set not null verified by constraints
+create table atacc1 (test_a int, test_b int);
+insert into atacc1 values (null, 1);
+-- constraint not cover all values, should fail
+alter table atacc1 add constraint atacc1_constr_or check(test_a is not null or test_b < 10);
+alter table atacc1 alter test_a set not null;
+alter table atacc1 drop constraint atacc1_constr_or;
+-- not valid constraint, should fail
+alter table atacc1 add constraint atacc1_constr_invalid check(test_a is not null) not valid;
+alter table atacc1 alter test_a set not null;
+alter table atacc1 drop constraint atacc1_constr_invalid;
+-- with valid constraint
+update atacc1 set test_a = 1;
+alter table atacc1 add constraint atacc1_constr_a_valid check(test_a is not null);
+alter table atacc1 alter test_a set not null;
+delete from atacc1;
+
+insert into atacc1 values (2, null);
+alter table atacc1 alter test_a drop not null;
+-- test multiple set not null at same time
+-- test_a checked by atacc1_constr_a_valid, test_b should fail by table scan
+alter table atacc1 alter test_a set not null, alter test_b set not null;
+-- commands order has no importance
+alter table atacc1 alter test_b set not null, alter test_a set not null;
+
+-- valid one by table scan, one by check constraints
+update atacc1 set test_b = 1;
+alter table atacc1 alter test_b set not null, alter test_a set not null;
+
+alter table atacc1 alter test_a drop not null, alter test_b drop not null;
+-- both column has check constraints
+alter table atacc1 add constraint atacc1_constr_b_valid check(test_b is not null);
+alter table atacc1 alter test_b set not null, alter test_a set not null;
+drop table atacc1;
+
+-- test inheritance
+create table parent (a int);
+create table child (b varchar(255)) inherits (parent);
+
+alter table parent alter a set not null;
+insert into parent values (NULL);
+insert into child (a, b) values (NULL, 'foo');
+alter table parent alter a drop not null;
+insert into parent values (NULL);
+insert into child (a, b) values (NULL, 'foo');
+alter table only parent alter a set not null;
+alter table child alter a set not null;
+delete from parent;
+alter table only parent alter a set not null;
+insert into parent values (NULL);
+alter table child alter a set not null;
+insert into child (a, b) values (NULL, 'foo');
+delete from child;
+alter table child alter a set not null;
+insert into child (a, b) values (NULL, 'foo');
+drop table child;
+drop table parent;
+
+-- test setting and removing default values
+create table def_test (
+ c1 int4 default 5,
+ c2 text default 'initial_default'
+);
+insert into def_test default values;
+alter table def_test alter column c1 drop default;
+insert into def_test default values;
+alter table def_test alter column c2 drop default;
+insert into def_test default values;
+alter table def_test alter column c1 set default 10;
+alter table def_test alter column c2 set default 'new_default';
+insert into def_test default values;
+select * from def_test;
+
+-- set defaults to an incorrect type: this should fail
+alter table def_test alter column c1 set default 'wrong_datatype';
+alter table def_test alter column c2 set default 20;
+
+-- set defaults on a non-existent column: this should fail
+alter table def_test alter column c3 set default 30;
+
+-- set defaults on views: we need to create a view, add a rule
+-- to allow insertions into it, and then alter the view to add
+-- a default
+create view def_view_test as select * from def_test;
+create rule def_view_test_ins as
+ on insert to def_view_test
+ do instead insert into def_test select new.*;
+insert into def_view_test default values;
+alter table def_view_test alter column c1 set default 45;
+insert into def_view_test default values;
+alter table def_view_test alter column c2 set default 'view_default';
+insert into def_view_test default values;
+select * from def_view_test;
+
+drop rule def_view_test_ins on def_view_test;
+drop view def_view_test;
+drop table def_test;
+
+-- alter table / drop column tests
+-- try altering system catalogs, should fail
+alter table pg_class drop column relname;
+
+-- try altering non-existent table, should fail
+alter table nosuchtable drop column bar;
+
+-- test dropping columns
+create table atacc1 (a int4 not null, b int4, c int4 not null, d int4);
+insert into atacc1 values (1, 2, 3, 4);
+alter table atacc1 drop a;
+alter table atacc1 drop a;
+
+-- SELECTs
+select * from atacc1;
+select * from atacc1 order by a;
+select * from atacc1 order by "........pg.dropped.1........";
+select * from atacc1 group by a;
+select * from atacc1 group by "........pg.dropped.1........";
+select atacc1.* from atacc1;
+select a from atacc1;
+select atacc1.a from atacc1;
+select b,c,d from atacc1;
+select a,b,c,d from atacc1;
+select * from atacc1 where a = 1;
+select "........pg.dropped.1........" from atacc1;
+select atacc1."........pg.dropped.1........" from atacc1;
+select "........pg.dropped.1........",b,c,d from atacc1;
+select * from atacc1 where "........pg.dropped.1........" = 1;
+
+-- UPDATEs
+update atacc1 set a = 3;
+update atacc1 set b = 2 where a = 3;
+update atacc1 set "........pg.dropped.1........" = 3;
+update atacc1 set b = 2 where "........pg.dropped.1........" = 3;
+
+-- INSERTs
+insert into atacc1 values (10, 11, 12, 13);
+insert into atacc1 values (default, 11, 12, 13);
+insert into atacc1 values (11, 12, 13);
+insert into atacc1 (a) values (10);
+insert into atacc1 (a) values (default);
+insert into atacc1 (a,b,c,d) values (10,11,12,13);
+insert into atacc1 (a,b,c,d) values (default,11,12,13);
+insert into atacc1 (b,c,d) values (11,12,13);
+insert into atacc1 ("........pg.dropped.1........") values (10);
+insert into atacc1 ("........pg.dropped.1........") values (default);
+insert into atacc1 ("........pg.dropped.1........",b,c,d) values (10,11,12,13);
+insert into atacc1 ("........pg.dropped.1........",b,c,d) values (default,11,12,13);
+
+-- DELETEs
+delete from atacc1 where a = 3;
+delete from atacc1 where "........pg.dropped.1........" = 3;
+delete from atacc1;
+
+-- try dropping a non-existent column, should fail
+alter table atacc1 drop bar;
+
+-- try removing an oid column, should succeed (as it's nonexistent)
+alter table atacc1 SET WITHOUT OIDS;
+
+-- try adding an oid column, should fail (not supported)
+alter table atacc1 SET WITH OIDS;
+
+-- try dropping the xmin column, should fail
+alter table atacc1 drop xmin;
+
+-- try creating a view and altering that, should fail
+create view myview as select * from atacc1;
+select * from myview;
+alter table myview drop d;
+drop view myview;
+
+-- test some commands to make sure they fail on the dropped column
+analyze atacc1(a);
+analyze atacc1("........pg.dropped.1........");
+vacuum analyze atacc1(a);
+vacuum analyze atacc1("........pg.dropped.1........");
+comment on column atacc1.a is 'testing';
+comment on column atacc1."........pg.dropped.1........" is 'testing';
+alter table atacc1 alter a set storage plain;
+alter table atacc1 alter "........pg.dropped.1........" set storage plain;
+alter table atacc1 alter a set statistics 0;
+alter table atacc1 alter "........pg.dropped.1........" set statistics 0;
+alter table atacc1 alter a set default 3;
+alter table atacc1 alter "........pg.dropped.1........" set default 3;
+alter table atacc1 alter a drop default;
+alter table atacc1 alter "........pg.dropped.1........" drop default;
+alter table atacc1 alter a set not null;
+alter table atacc1 alter "........pg.dropped.1........" set not null;
+alter table atacc1 alter a drop not null;
+alter table atacc1 alter "........pg.dropped.1........" drop not null;
+alter table atacc1 rename a to x;
+alter table atacc1 rename "........pg.dropped.1........" to x;
+alter table atacc1 add primary key(a);
+alter table atacc1 add primary key("........pg.dropped.1........");
+alter table atacc1 add unique(a);
+alter table atacc1 add unique("........pg.dropped.1........");
+alter table atacc1 add check (a > 3);
+alter table atacc1 add check ("........pg.dropped.1........" > 3);
+create table atacc2 (id int4 unique);
+alter table atacc1 add foreign key (a) references atacc2(id);
+alter table atacc1 add foreign key ("........pg.dropped.1........") references atacc2(id);
+alter table atacc2 add foreign key (id) references atacc1(a);
+alter table atacc2 add foreign key (id) references atacc1("........pg.dropped.1........");
+drop table atacc2;
+create index "testing_idx" on atacc1(a);
+create index "testing_idx" on atacc1("........pg.dropped.1........");
+
+-- test create as and select into
+insert into atacc1 values (21, 22, 23);
+create table attest1 as select * from atacc1;
+select * from attest1;
+drop table attest1;
+select * into attest2 from atacc1;
+select * from attest2;
+drop table attest2;
+
+-- try dropping all columns
+alter table atacc1 drop c;
+alter table atacc1 drop d;
+alter table atacc1 drop b;
+select * from atacc1;
+
+drop table atacc1;
+
+-- test constraint error reporting in presence of dropped columns
+create table atacc1 (id serial primary key, value int check (value < 10));
+insert into atacc1(value) values (100);
+alter table atacc1 drop column value;
+alter table atacc1 add column value int check (value < 10);
+insert into atacc1(value) values (100);
+insert into atacc1(id, value) values (null, 0);
+drop table atacc1;
+
+-- test inheritance
+create table parent (a int, b int, c int);
+insert into parent values (1, 2, 3);
+alter table parent drop a;
+create table child (d varchar(255)) inherits (parent);
+insert into child values (12, 13, 'testing');
+
+select * from parent;
+select * from child;
+alter table parent drop c;
+select * from parent;
+select * from child;
+
+drop table child;
+drop table parent;
+
+-- check error cases for inheritance column merging
+create table parent (a float8, b numeric(10,4), c text collate "C");
+
+create table child (a float4) inherits (parent); -- fail
+create table child (b decimal(10,7)) inherits (parent); -- fail
+create table child (c text collate "POSIX") inherits (parent); -- fail
+create table child (a double precision, b decimal(10,4)) inherits (parent);
+
+drop table child;
+drop table parent;
+
+-- test copy in/out
+create table attest (a int4, b int4, c int4);
+insert into attest values (1,2,3);
+alter table attest drop a;
+copy attest to stdout;
+copy attest(a) to stdout;
+copy attest("........pg.dropped.1........") to stdout;
+copy attest from stdin;
+10 11 12
+\.
+select * from attest;
+copy attest from stdin;
+21 22
+\.
+select * from attest;
+copy attest(a) from stdin;
+copy attest("........pg.dropped.1........") from stdin;
+copy attest(b,c) from stdin;
+31 32
+\.
+select * from attest;
+drop table attest;
+
+-- test inheritance
+
+create table dropColumn (a int, b int, e int);
+create table dropColumnChild (c int) inherits (dropColumn);
+create table dropColumnAnother (d int) inherits (dropColumnChild);
+
+-- these two should fail
+alter table dropColumnchild drop column a;
+alter table only dropColumnChild drop column b;
+
+
+
+-- these three should work
+alter table only dropColumn drop column e;
+alter table dropColumnChild drop column c;
+alter table dropColumn drop column a;
+
+create table renameColumn (a int);
+create table renameColumnChild (b int) inherits (renameColumn);
+create table renameColumnAnother (c int) inherits (renameColumnChild);
+
+-- these three should fail
+alter table renameColumnChild rename column a to d;
+alter table only renameColumnChild rename column a to d;
+alter table only renameColumn rename column a to d;
+
+-- these should work
+alter table renameColumn rename column a to d;
+alter table renameColumnChild rename column b to a;
+
+-- these should work
+alter table if exists doesnt_exist_tab rename column a to d;
+alter table if exists doesnt_exist_tab rename column b to a;
+
+-- this should work
+alter table renameColumn add column w int;
+
+-- this should fail
+alter table only renameColumn add column x int;
+
+
+-- Test corner cases in dropping of inherited columns
+
+create table p1 (f1 int, f2 int);
+create table c1 (f1 int not null) inherits(p1);
+
+-- should be rejected since c1.f1 is inherited
+alter table c1 drop column f1;
+-- should work
+alter table p1 drop column f1;
+-- c1.f1 is still there, but no longer inherited
+select f1 from c1;
+alter table c1 drop column f1;
+select f1 from c1;
+
+drop table p1 cascade;
+
+create table p1 (f1 int, f2 int);
+create table c1 () inherits(p1);
+
+-- should be rejected since c1.f1 is inherited
+alter table c1 drop column f1;
+alter table p1 drop column f1;
+-- c1.f1 is dropped now, since there is no local definition for it
+select f1 from c1;
+
+drop table p1 cascade;
+
+create table p1 (f1 int, f2 int);
+create table c1 () inherits(p1);
+
+-- should be rejected since c1.f1 is inherited
+alter table c1 drop column f1;
+alter table only p1 drop column f1;
+-- c1.f1 is NOT dropped, but must now be considered non-inherited
+alter table c1 drop column f1;
+
+drop table p1 cascade;
+
+create table p1 (f1 int, f2 int);
+create table c1 (f1 int not null) inherits(p1);
+
+-- should be rejected since c1.f1 is inherited
+alter table c1 drop column f1;
+alter table only p1 drop column f1;
+-- c1.f1 is still there, but no longer inherited
+alter table c1 drop column f1;
+
+drop table p1 cascade;
+
+create table p1(id int, name text);
+create table p2(id2 int, name text, height int);
+create table c1(age int) inherits(p1,p2);
+create table gc1() inherits (c1);
+
+select relname, attname, attinhcount, attislocal
+from pg_class join pg_attribute on (pg_class.oid = pg_attribute.attrelid)
+where relname in ('p1','p2','c1','gc1') and attnum > 0 and not attisdropped
+order by relname, attnum;
+
+-- should work
+alter table only p1 drop column name;
+-- should work. Now c1.name is local and inhcount is 0.
+alter table p2 drop column name;
+-- should be rejected since its inherited
+alter table gc1 drop column name;
+-- should work, and drop gc1.name along
+alter table c1 drop column name;
+-- should fail: column does not exist
+alter table gc1 drop column name;
+-- should work and drop the attribute in all tables
+alter table p2 drop column height;
+
+-- IF EXISTS test
+create table dropColumnExists ();
+alter table dropColumnExists drop column non_existing; --fail
+alter table dropColumnExists drop column if exists non_existing; --succeed
+
+select relname, attname, attinhcount, attislocal
+from pg_class join pg_attribute on (pg_class.oid = pg_attribute.attrelid)
+where relname in ('p1','p2','c1','gc1') and attnum > 0 and not attisdropped
+order by relname, attnum;
+
+drop table p1, p2 cascade;
+
+-- test attinhcount tracking with merged columns
+
+create table depth0();
+create table depth1(c text) inherits (depth0);
+create table depth2() inherits (depth1);
+alter table depth0 add c text;
+
+select attrelid::regclass, attname, attinhcount, attislocal
+from pg_attribute
+where attnum > 0 and attrelid::regclass in ('depth0', 'depth1', 'depth2')
+order by attrelid::regclass::text, attnum;
+
+-- test renumbering of child-table columns in inherited operations
+
+create table p1 (f1 int);
+create table c1 (f2 text, f3 int) inherits (p1);
+
+alter table p1 add column a1 int check (a1 > 0);
+alter table p1 add column f2 text;
+
+insert into p1 values (1,2,'abc');
+insert into c1 values(11,'xyz',33,0); -- should fail
+insert into c1 values(11,'xyz',33,22);
+
+select * from p1;
+update p1 set a1 = a1 + 1, f2 = upper(f2);
+select * from p1;
+
+drop table p1 cascade;
+
+-- test that operations with a dropped column do not try to reference
+-- its datatype
+
+create domain mytype as text;
+create temp table foo (f1 text, f2 mytype, f3 text);
+
+insert into foo values('bb','cc','dd');
+select * from foo;
+
+drop domain mytype cascade;
+
+select * from foo;
+insert into foo values('qq','rr');
+select * from foo;
+update foo set f3 = 'zz';
+select * from foo;
+select f3,max(f1) from foo group by f3;
+
+-- Simple tests for alter table column type
+alter table foo alter f1 TYPE integer; -- fails
+alter table foo alter f1 TYPE varchar(10);
+
+create table anothertab (atcol1 serial8, atcol2 boolean,
+ constraint anothertab_chk check (atcol1 <= 3));
+
+insert into anothertab (atcol1, atcol2) values (default, true);
+insert into anothertab (atcol1, atcol2) values (default, false);
+select * from anothertab;
+
+alter table anothertab alter column atcol1 type boolean; -- fails
+alter table anothertab alter column atcol1 type boolean using atcol1::int; -- fails
+alter table anothertab alter column atcol1 type integer;
+
+select * from anothertab;
+
+insert into anothertab (atcol1, atcol2) values (45, null); -- fails
+insert into anothertab (atcol1, atcol2) values (default, null);
+
+select * from anothertab;
+
+alter table anothertab alter column atcol2 type text
+ using case when atcol2 is true then 'IT WAS TRUE'
+ when atcol2 is false then 'IT WAS FALSE'
+ else 'IT WAS NULL!' end;
+
+select * from anothertab;
+alter table anothertab alter column atcol1 type boolean
+ using case when atcol1 % 2 = 0 then true else false end; -- fails
+alter table anothertab alter column atcol1 drop default;
+alter table anothertab alter column atcol1 type boolean
+ using case when atcol1 % 2 = 0 then true else false end; -- fails
+alter table anothertab drop constraint anothertab_chk;
+alter table anothertab drop constraint anothertab_chk; -- fails
+alter table anothertab drop constraint IF EXISTS anothertab_chk; -- succeeds
+
+alter table anothertab alter column atcol1 type boolean
+ using case when atcol1 % 2 = 0 then true else false end;
+
+select * from anothertab;
+
+drop table anothertab;
+
+-- Test index handling in alter table column type (cf. bugs #15835, #15865)
+create table anothertab(f1 int primary key, f2 int unique,
+ f3 int, f4 int, f5 int);
+alter table anothertab
+ add exclude using btree (f3 with =);
+alter table anothertab
+ add exclude using btree (f4 with =) where (f4 is not null);
+alter table anothertab
+ add exclude using btree (f4 with =) where (f5 > 0);
+alter table anothertab
+ add unique(f1,f4);
+create index on anothertab(f2,f3);
+create unique index on anothertab(f4);
+
+\d anothertab
+alter table anothertab alter column f1 type bigint;
+alter table anothertab
+ alter column f2 type bigint,
+ alter column f3 type bigint,
+ alter column f4 type bigint;
+alter table anothertab alter column f5 type bigint;
+\d anothertab
+
+drop table anothertab;
+
+-- test that USING expressions are parsed before column alter type / drop steps
+create table another (f1 int, f2 text, f3 text);
+
+insert into another values(1, 'one', 'uno');
+insert into another values(2, 'two', 'due');
+insert into another values(3, 'three', 'tre');
+
+select * from another;
+
+alter table another
+ alter f1 type text using f2 || ' and ' || f3 || ' more',
+ alter f2 type bigint using f1 * 10,
+ drop column f3;
+
+select * from another;
+
+drop table another;
+
+-- Create an index that skips WAL, then perform a SET DATA TYPE that skips
+-- rewriting the index.
+begin;
+create table skip_wal_skip_rewrite_index (c varchar(10) primary key);
+alter table skip_wal_skip_rewrite_index alter c type varchar(20);
+commit;
+
+-- We disallow changing table's row type if it's used for storage
+create table at_tab1 (a int, b text);
+create table at_tab2 (x int, y at_tab1);
+alter table at_tab1 alter column b type varchar; -- fails
+drop table at_tab2;
+-- Use of row type in an expression is defended differently
+create table at_tab2 (x int, y text, check((x,y)::at_tab1 = (1,'42')::at_tab1));
+alter table at_tab1 alter column b type varchar; -- allowed, but ...
+insert into at_tab2 values(1,'42'); -- ... this will fail
+drop table at_tab1, at_tab2;
+-- Check it for a partitioned table, too
+create table at_tab1 (a int, b text) partition by list(a);
+create table at_tab2 (x int, y at_tab1);
+alter table at_tab1 alter column b type varchar; -- fails
+drop table at_tab1, at_tab2;
+
+-- Alter column type that's part of a partitioned index
+create table at_partitioned (a int, b text) partition by range (a);
+create table at_part_1 partition of at_partitioned for values from (0) to (1000);
+insert into at_partitioned values (512, '0.123');
+create table at_part_2 (b text, a int);
+insert into at_part_2 values ('1.234', 1024);
+create index on at_partitioned (b);
+create index on at_partitioned (a);
+\d at_part_1
+\d at_part_2
+alter table at_partitioned attach partition at_part_2 for values from (1000) to (2000);
+\d at_part_2
+alter table at_partitioned alter column b type numeric using b::numeric;
+\d at_part_1
+\d at_part_2
+drop table at_partitioned;
+
+-- Alter column type when no table rewrite is required
+-- Also check that comments are preserved
+create table at_partitioned(id int, name varchar(64), unique (id, name))
+ partition by hash(id);
+comment on constraint at_partitioned_id_name_key on at_partitioned is 'parent constraint';
+comment on index at_partitioned_id_name_key is 'parent index';
+create table at_partitioned_0 partition of at_partitioned
+ for values with (modulus 2, remainder 0);
+comment on constraint at_partitioned_0_id_name_key on at_partitioned_0 is 'child 0 constraint';
+comment on index at_partitioned_0_id_name_key is 'child 0 index';
+create table at_partitioned_1 partition of at_partitioned
+ for values with (modulus 2, remainder 1);
+comment on constraint at_partitioned_1_id_name_key on at_partitioned_1 is 'child 1 constraint';
+comment on index at_partitioned_1_id_name_key is 'child 1 index';
+insert into at_partitioned values(1, 'foo');
+insert into at_partitioned values(3, 'bar');
+
+create temp table old_oids as
+ select relname, oid as oldoid, relfilenode as oldfilenode
+ from pg_class where relname like 'at_partitioned%';
+
+select relname,
+ c.oid = oldoid as orig_oid,
+ case relfilenode
+ when 0 then 'none'
+ when c.oid then 'own'
+ when oldfilenode then 'orig'
+ else 'OTHER'
+ end as storage,
+ obj_description(c.oid, 'pg_class') as desc
+ from pg_class c left join old_oids using (relname)
+ where relname like 'at_partitioned%'
+ order by relname;
+
+select conname, obj_description(oid, 'pg_constraint') as desc
+ from pg_constraint where conname like 'at_partitioned%'
+ order by conname;
+
+alter table at_partitioned alter column name type varchar(127);
+
+-- Note: these tests currently show the wrong behavior for comments :-(
+
+select relname,
+ c.oid = oldoid as orig_oid,
+ case relfilenode
+ when 0 then 'none'
+ when c.oid then 'own'
+ when oldfilenode then 'orig'
+ else 'OTHER'
+ end as storage,
+ obj_description(c.oid, 'pg_class') as desc
+ from pg_class c left join old_oids using (relname)
+ where relname like 'at_partitioned%'
+ order by relname;
+
+select conname, obj_description(oid, 'pg_constraint') as desc
+ from pg_constraint where conname like 'at_partitioned%'
+ order by conname;
+
+-- Don't remove this DROP, it exposes bug #15672
+drop table at_partitioned;
+
+-- disallow recursive containment of row types
+create temp table recur1 (f1 int);
+alter table recur1 add column f2 recur1; -- fails
+alter table recur1 add column f2 recur1[]; -- fails
+create domain array_of_recur1 as recur1[];
+alter table recur1 add column f2 array_of_recur1; -- fails
+create temp table recur2 (f1 int, f2 recur1);
+alter table recur1 add column f2 recur2; -- fails
+alter table recur1 add column f2 int;
+alter table recur1 alter column f2 type recur2; -- fails
+
+-- SET STORAGE may need to add a TOAST table
+create table test_storage (a text);
+alter table test_storage alter a set storage plain;
+alter table test_storage add b int default 0; -- rewrite table to remove its TOAST table
+alter table test_storage alter a set storage extended; -- re-add TOAST table
+
+select reltoastrelid <> 0 as has_toast_table
+from pg_class
+where oid = 'test_storage'::regclass;
+
+-- test that SET STORAGE propagates to index correctly
+create index test_storage_idx on test_storage (b, a);
+alter table test_storage alter column a set storage external;
+\d+ test_storage
+\d+ test_storage_idx
+
+-- ALTER COLUMN TYPE with a check constraint and a child table (bug #13779)
+CREATE TABLE test_inh_check (a float check (a > 10.2), b float);
+CREATE TABLE test_inh_check_child() INHERITS(test_inh_check);
+\d test_inh_check
+\d test_inh_check_child
+select relname, conname, coninhcount, conislocal, connoinherit
+ from pg_constraint c, pg_class r
+ where relname like 'test_inh_check%' and c.conrelid = r.oid
+ order by 1, 2;
+ALTER TABLE test_inh_check ALTER COLUMN a TYPE numeric;
+\d test_inh_check
+\d test_inh_check_child
+select relname, conname, coninhcount, conislocal, connoinherit
+ from pg_constraint c, pg_class r
+ where relname like 'test_inh_check%' and c.conrelid = r.oid
+ order by 1, 2;
+-- also try noinherit, local, and local+inherited cases
+ALTER TABLE test_inh_check ADD CONSTRAINT bnoinherit CHECK (b > 100) NO INHERIT;
+ALTER TABLE test_inh_check_child ADD CONSTRAINT blocal CHECK (b < 1000);
+ALTER TABLE test_inh_check_child ADD CONSTRAINT bmerged CHECK (b > 1);
+ALTER TABLE test_inh_check ADD CONSTRAINT bmerged CHECK (b > 1);
+\d test_inh_check
+\d test_inh_check_child
+select relname, conname, coninhcount, conislocal, connoinherit
+ from pg_constraint c, pg_class r
+ where relname like 'test_inh_check%' and c.conrelid = r.oid
+ order by 1, 2;
+ALTER TABLE test_inh_check ALTER COLUMN b TYPE numeric;
+\d test_inh_check
+\d test_inh_check_child
+select relname, conname, coninhcount, conislocal, connoinherit
+ from pg_constraint c, pg_class r
+ where relname like 'test_inh_check%' and c.conrelid = r.oid
+ order by 1, 2;
+
+-- ALTER COLUMN TYPE with different schema in children
+-- Bug at https://postgr.es/m/20170102225618.GA10071@telsasoft.com
+CREATE TABLE test_type_diff (f1 int);
+CREATE TABLE test_type_diff_c (extra smallint) INHERITS (test_type_diff);
+ALTER TABLE test_type_diff ADD COLUMN f2 int;
+INSERT INTO test_type_diff_c VALUES (1, 2, 3);
+ALTER TABLE test_type_diff ALTER COLUMN f2 TYPE bigint USING f2::bigint;
+
+CREATE TABLE test_type_diff2 (int_two int2, int_four int4, int_eight int8);
+CREATE TABLE test_type_diff2_c1 (int_four int4, int_eight int8, int_two int2);
+CREATE TABLE test_type_diff2_c2 (int_eight int8, int_two int2, int_four int4);
+CREATE TABLE test_type_diff2_c3 (int_two int2, int_four int4, int_eight int8);
+ALTER TABLE test_type_diff2_c1 INHERIT test_type_diff2;
+ALTER TABLE test_type_diff2_c2 INHERIT test_type_diff2;
+ALTER TABLE test_type_diff2_c3 INHERIT test_type_diff2;
+INSERT INTO test_type_diff2_c1 VALUES (1, 2, 3);
+INSERT INTO test_type_diff2_c2 VALUES (4, 5, 6);
+INSERT INTO test_type_diff2_c3 VALUES (7, 8, 9);
+ALTER TABLE test_type_diff2 ALTER COLUMN int_four TYPE int8 USING int_four::int8;
+-- whole-row references are disallowed
+ALTER TABLE test_type_diff2 ALTER COLUMN int_four TYPE int4 USING (pg_column_size(test_type_diff2));
+
+-- check for rollback of ANALYZE corrupting table property flags (bug #11638)
+CREATE TABLE check_fk_presence_1 (id int PRIMARY KEY, t text);
+CREATE TABLE check_fk_presence_2 (id int REFERENCES check_fk_presence_1, t text);
+BEGIN;
+ALTER TABLE check_fk_presence_2 DROP CONSTRAINT check_fk_presence_2_id_fkey;
+ANALYZE check_fk_presence_2;
+ROLLBACK;
+\d check_fk_presence_2
+DROP TABLE check_fk_presence_1, check_fk_presence_2;
+
+-- check column addition within a view (bug #14876)
+create table at_base_table(id int, stuff text);
+insert into at_base_table values (23, 'skidoo');
+create view at_view_1 as select * from at_base_table bt;
+create view at_view_2 as select *, to_json(v1) as j from at_view_1 v1;
+\d+ at_view_1
+\d+ at_view_2
+explain (verbose, costs off) select * from at_view_2;
+select * from at_view_2;
+
+create or replace view at_view_1 as select *, 2+2 as more from at_base_table bt;
+\d+ at_view_1
+\d+ at_view_2
+explain (verbose, costs off) select * from at_view_2;
+select * from at_view_2;
+
+drop view at_view_2;
+drop view at_view_1;
+drop table at_base_table;
+
+-- check adding a column not iself requiring a rewrite, together with
+-- a column requiring a default (bug #16038)
+
+-- ensure that rewrites aren't silently optimized away, removing the
+-- value of the test
+CREATE FUNCTION check_ddl_rewrite(p_tablename regclass, p_ddl text)
+RETURNS boolean
+LANGUAGE plpgsql AS $$
+DECLARE
+ v_relfilenode oid;
+BEGIN
+ v_relfilenode := relfilenode FROM pg_class WHERE oid = p_tablename;
+
+ EXECUTE p_ddl;
+
+ RETURN v_relfilenode <> (SELECT relfilenode FROM pg_class WHERE oid = p_tablename);
+END;
+$$;
+
+CREATE TABLE rewrite_test(col text);
+INSERT INTO rewrite_test VALUES ('something');
+INSERT INTO rewrite_test VALUES (NULL);
+
+-- empty[12] don't need rewrite, but notempty[12]_rewrite will force one
+SELECT check_ddl_rewrite('rewrite_test', $$
+ ALTER TABLE rewrite_test
+ ADD COLUMN empty1 text,
+ ADD COLUMN notempty1_rewrite serial;
+$$);
+SELECT check_ddl_rewrite('rewrite_test', $$
+ ALTER TABLE rewrite_test
+ ADD COLUMN notempty2_rewrite serial,
+ ADD COLUMN empty2 text;
+$$);
+-- also check that fast defaults cause no problem, first without rewrite
+SELECT check_ddl_rewrite('rewrite_test', $$
+ ALTER TABLE rewrite_test
+ ADD COLUMN empty3 text,
+ ADD COLUMN notempty3_norewrite int default 42;
+$$);
+SELECT check_ddl_rewrite('rewrite_test', $$
+ ALTER TABLE rewrite_test
+ ADD COLUMN notempty4_norewrite int default 42,
+ ADD COLUMN empty4 text;
+$$);
+-- then with rewrite
+SELECT check_ddl_rewrite('rewrite_test', $$
+ ALTER TABLE rewrite_test
+ ADD COLUMN empty5 text,
+ ADD COLUMN notempty5_norewrite int default 42,
+ ADD COLUMN notempty5_rewrite serial;
+$$);
+SELECT check_ddl_rewrite('rewrite_test', $$
+ ALTER TABLE rewrite_test
+ ADD COLUMN notempty6_rewrite serial,
+ ADD COLUMN empty6 text,
+ ADD COLUMN notempty6_norewrite int default 42;
+$$);
+
+-- cleanup
+DROP FUNCTION check_ddl_rewrite(regclass, text);
+DROP TABLE rewrite_test;
+
+--
+-- lock levels
+--
+drop type lockmodes;
+create type lockmodes as enum (
+ 'SIReadLock'
+,'AccessShareLock'
+,'RowShareLock'
+,'RowExclusiveLock'
+,'ShareUpdateExclusiveLock'
+,'ShareLock'
+,'ShareRowExclusiveLock'
+,'ExclusiveLock'
+,'AccessExclusiveLock'
+);
+
+drop view my_locks;
+create or replace view my_locks as
+select case when c.relname like 'pg_toast%' then 'pg_toast' else c.relname end, max(mode::lockmodes) as max_lockmode
+from pg_locks l join pg_class c on l.relation = c.oid
+where virtualtransaction = (
+ select virtualtransaction
+ from pg_locks
+ where transactionid = pg_current_xact_id()::xid)
+and locktype = 'relation'
+and relnamespace != (select oid from pg_namespace where nspname = 'pg_catalog')
+and c.relname != 'my_locks'
+group by c.relname;
+
+create table alterlock (f1 int primary key, f2 text);
+insert into alterlock values (1, 'foo');
+create table alterlock2 (f3 int primary key, f1 int);
+insert into alterlock2 values (1, 1);
+
+begin; alter table alterlock alter column f2 set statistics 150;
+select * from my_locks order by 1;
+rollback;
+
+begin; alter table alterlock cluster on alterlock_pkey;
+select * from my_locks order by 1;
+commit;
+
+begin; alter table alterlock set without cluster;
+select * from my_locks order by 1;
+commit;
+
+begin; alter table alterlock set (fillfactor = 100);
+select * from my_locks order by 1;
+commit;
+
+begin; alter table alterlock reset (fillfactor);
+select * from my_locks order by 1;
+commit;
+
+begin; alter table alterlock set (toast.autovacuum_enabled = off);
+select * from my_locks order by 1;
+commit;
+
+begin; alter table alterlock set (autovacuum_enabled = off);
+select * from my_locks order by 1;
+commit;
+
+begin; alter table alterlock alter column f2 set (n_distinct = 1);
+select * from my_locks order by 1;
+rollback;
+
+-- test that mixing options with different lock levels works as expected
+begin; alter table alterlock set (autovacuum_enabled = off, fillfactor = 80);
+select * from my_locks order by 1;
+commit;
+
+begin; alter table alterlock alter column f2 set storage extended;
+select * from my_locks order by 1;
+rollback;
+
+begin; alter table alterlock alter column f2 set default 'x';
+select * from my_locks order by 1;
+rollback;
+
+begin;
+create trigger ttdummy
+ before delete or update on alterlock
+ for each row
+ execute procedure
+ ttdummy (1, 1);
+select * from my_locks order by 1;
+rollback;
+
+begin;
+select * from my_locks order by 1;
+alter table alterlock2 add foreign key (f1) references alterlock (f1);
+select * from my_locks order by 1;
+rollback;
+
+begin;
+alter table alterlock2
+add constraint alterlock2nv foreign key (f1) references alterlock (f1) NOT VALID;
+select * from my_locks order by 1;
+commit;
+begin;
+alter table alterlock2 validate constraint alterlock2nv;
+select * from my_locks order by 1;
+rollback;
+
+create or replace view my_locks as
+select case when c.relname like 'pg_toast%' then 'pg_toast' else c.relname end, max(mode::lockmodes) as max_lockmode
+from pg_locks l join pg_class c on l.relation = c.oid
+where virtualtransaction = (
+ select virtualtransaction
+ from pg_locks
+ where transactionid = pg_current_xact_id()::xid)
+and locktype = 'relation'
+and relnamespace != (select oid from pg_namespace where nspname = 'pg_catalog')
+and c.relname = 'my_locks'
+group by c.relname;
+
+-- raise exception
+alter table my_locks set (autovacuum_enabled = false);
+alter view my_locks set (autovacuum_enabled = false);
+alter table my_locks reset (autovacuum_enabled);
+alter view my_locks reset (autovacuum_enabled);
+
+begin;
+alter view my_locks set (security_barrier=off);
+select * from my_locks order by 1;
+alter view my_locks reset (security_barrier);
+rollback;
+
+-- this test intentionally applies the ALTER TABLE command against a view, but
+-- uses a view option so we expect this to succeed. This form of SQL is
+-- accepted for historical reasons, as shown in the docs for ALTER VIEW
+begin;
+alter table my_locks set (security_barrier=off);
+select * from my_locks order by 1;
+alter table my_locks reset (security_barrier);
+rollback;
+
+-- cleanup
+drop table alterlock2;
+drop table alterlock;
+drop view my_locks;
+drop type lockmodes;
+
+--
+-- alter function
+--
+create function test_strict(text) returns text as
+ 'select coalesce($1, ''got passed a null'');'
+ language sql returns null on null input;
+select test_strict(NULL);
+alter function test_strict(text) called on null input;
+select test_strict(NULL);
+
+create function non_strict(text) returns text as
+ 'select coalesce($1, ''got passed a null'');'
+ language sql called on null input;
+select non_strict(NULL);
+alter function non_strict(text) returns null on null input;
+select non_strict(NULL);
+
+--
+-- alter object set schema
+--
+
+create schema alter1;
+create schema alter2;
+
+create table alter1.t1(f1 serial primary key, f2 int check (f2 > 0));
+
+create view alter1.v1 as select * from alter1.t1;
+
+create function alter1.plus1(int) returns int as 'select $1+1' language sql;
+
+create domain alter1.posint integer check (value > 0);
+
+create type alter1.ctype as (f1 int, f2 text);
+
+create function alter1.same(alter1.ctype, alter1.ctype) returns boolean language sql
+as 'select $1.f1 is not distinct from $2.f1 and $1.f2 is not distinct from $2.f2';
+
+create operator alter1.=(procedure = alter1.same, leftarg = alter1.ctype, rightarg = alter1.ctype);
+
+create operator class alter1.ctype_hash_ops default for type alter1.ctype using hash as
+ operator 1 alter1.=(alter1.ctype, alter1.ctype);
+
+create conversion alter1.latin1_to_utf8 for 'latin1' to 'utf8' from iso8859_1_to_utf8;
+
+create text search parser alter1.prs(start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype);
+create text search configuration alter1.cfg(parser = alter1.prs);
+create text search template alter1.tmpl(init = dsimple_init, lexize = dsimple_lexize);
+create text search dictionary alter1.dict(template = alter1.tmpl);
+
+insert into alter1.t1(f2) values(11);
+insert into alter1.t1(f2) values(12);
+
+alter table alter1.t1 set schema alter1; -- no-op, same schema
+alter table alter1.t1 set schema alter2;
+alter table alter1.v1 set schema alter2;
+alter function alter1.plus1(int) set schema alter2;
+alter domain alter1.posint set schema alter2;
+alter operator class alter1.ctype_hash_ops using hash set schema alter2;
+alter operator family alter1.ctype_hash_ops using hash set schema alter2;
+alter operator alter1.=(alter1.ctype, alter1.ctype) set schema alter2;
+alter function alter1.same(alter1.ctype, alter1.ctype) set schema alter2;
+alter type alter1.ctype set schema alter1; -- no-op, same schema
+alter type alter1.ctype set schema alter2;
+alter conversion alter1.latin1_to_utf8 set schema alter2;
+alter text search parser alter1.prs set schema alter2;
+alter text search configuration alter1.cfg set schema alter2;
+alter text search template alter1.tmpl set schema alter2;
+alter text search dictionary alter1.dict set schema alter2;
+
+-- this should succeed because nothing is left in alter1
+drop schema alter1;
+
+insert into alter2.t1(f2) values(13);
+insert into alter2.t1(f2) values(14);
+
+select * from alter2.t1;
+
+select * from alter2.v1;
+
+select alter2.plus1(41);
+
+-- clean up
+drop schema alter2 cascade;
+
+--
+-- composite types
+--
+
+CREATE TYPE test_type AS (a int);
+\d test_type
+
+ALTER TYPE nosuchtype ADD ATTRIBUTE b text; -- fails
+
+ALTER TYPE test_type ADD ATTRIBUTE b text;
+\d test_type
+
+ALTER TYPE test_type ADD ATTRIBUTE b text; -- fails
+
+ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE varchar;
+\d test_type
+
+ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE integer;
+\d test_type
+
+ALTER TYPE test_type DROP ATTRIBUTE b;
+\d test_type
+
+ALTER TYPE test_type DROP ATTRIBUTE c; -- fails
+
+ALTER TYPE test_type DROP ATTRIBUTE IF EXISTS c;
+
+ALTER TYPE test_type DROP ATTRIBUTE a, ADD ATTRIBUTE d boolean;
+\d test_type
+
+ALTER TYPE test_type RENAME ATTRIBUTE a TO aa;
+ALTER TYPE test_type RENAME ATTRIBUTE d TO dd;
+\d test_type
+
+DROP TYPE test_type;
+
+CREATE TYPE test_type1 AS (a int, b text);
+CREATE TABLE test_tbl1 (x int, y test_type1);
+ALTER TYPE test_type1 ALTER ATTRIBUTE b TYPE varchar; -- fails
+
+CREATE TYPE test_type2 AS (a int, b text);
+CREATE TABLE test_tbl2 OF test_type2;
+CREATE TABLE test_tbl2_subclass () INHERITS (test_tbl2);
+\d test_type2
+\d test_tbl2
+
+ALTER TYPE test_type2 ADD ATTRIBUTE c text; -- fails
+ALTER TYPE test_type2 ADD ATTRIBUTE c text CASCADE;
+\d test_type2
+\d test_tbl2
+
+ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar; -- fails
+ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar CASCADE;
+\d test_type2
+\d test_tbl2
+
+ALTER TYPE test_type2 DROP ATTRIBUTE b; -- fails
+ALTER TYPE test_type2 DROP ATTRIBUTE b CASCADE;
+\d test_type2
+\d test_tbl2
+
+ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa; -- fails
+ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa CASCADE;
+\d test_type2
+\d test_tbl2
+\d test_tbl2_subclass
+
+DROP TABLE test_tbl2_subclass;
+
+CREATE TYPE test_typex AS (a int, b text);
+CREATE TABLE test_tblx (x int, y test_typex check ((y).a > 0));
+ALTER TYPE test_typex DROP ATTRIBUTE a; -- fails
+ALTER TYPE test_typex DROP ATTRIBUTE a CASCADE;
+\d test_tblx
+DROP TABLE test_tblx;
+DROP TYPE test_typex;
+
+-- This test isn't that interesting on its own, but the purpose is to leave
+-- behind a table to test pg_upgrade with. The table has a composite type
+-- column in it, and the composite type has a dropped attribute.
+CREATE TYPE test_type3 AS (a int);
+CREATE TABLE test_tbl3 (c) AS SELECT '(1)'::test_type3;
+ALTER TYPE test_type3 DROP ATTRIBUTE a, ADD ATTRIBUTE b int;
+
+CREATE TYPE test_type_empty AS ();
+DROP TYPE test_type_empty;
+
+--
+-- typed tables: OF / NOT OF
+--
+
+CREATE TYPE tt_t0 AS (z inet, x int, y numeric(8,2));
+ALTER TYPE tt_t0 DROP ATTRIBUTE z;
+CREATE TABLE tt0 (x int NOT NULL, y numeric(8,2)); -- OK
+CREATE TABLE tt1 (x int, y bigint); -- wrong base type
+CREATE TABLE tt2 (x int, y numeric(9,2)); -- wrong typmod
+CREATE TABLE tt3 (y numeric(8,2), x int); -- wrong column order
+CREATE TABLE tt4 (x int); -- too few columns
+CREATE TABLE tt5 (x int, y numeric(8,2), z int); -- too few columns
+CREATE TABLE tt6 () INHERITS (tt0); -- can't have a parent
+CREATE TABLE tt7 (x int, q text, y numeric(8,2));
+ALTER TABLE tt7 DROP q; -- OK
+
+ALTER TABLE tt0 OF tt_t0;
+ALTER TABLE tt1 OF tt_t0;
+ALTER TABLE tt2 OF tt_t0;
+ALTER TABLE tt3 OF tt_t0;
+ALTER TABLE tt4 OF tt_t0;
+ALTER TABLE tt5 OF tt_t0;
+ALTER TABLE tt6 OF tt_t0;
+ALTER TABLE tt7 OF tt_t0;
+
+CREATE TYPE tt_t1 AS (x int, y numeric(8,2));
+ALTER TABLE tt7 OF tt_t1; -- reassign an already-typed table
+ALTER TABLE tt7 NOT OF;
+\d tt7
+
+-- make sure we can drop a constraint on the parent but it remains on the child
+CREATE TABLE test_drop_constr_parent (c text CHECK (c IS NOT NULL));
+CREATE TABLE test_drop_constr_child () INHERITS (test_drop_constr_parent);
+ALTER TABLE ONLY test_drop_constr_parent DROP CONSTRAINT "test_drop_constr_parent_c_check";
+-- should fail
+INSERT INTO test_drop_constr_child (c) VALUES (NULL);
+DROP TABLE test_drop_constr_parent CASCADE;
+
+--
+-- IF EXISTS test
+--
+ALTER TABLE IF EXISTS tt8 ADD COLUMN f int;
+ALTER TABLE IF EXISTS tt8 ADD CONSTRAINT xxx PRIMARY KEY(f);
+ALTER TABLE IF EXISTS tt8 ADD CHECK (f BETWEEN 0 AND 10);
+ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0;
+ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1;
+ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2;
+
+CREATE TABLE tt8(a int);
+CREATE SCHEMA alter2;
+
+ALTER TABLE IF EXISTS tt8 ADD COLUMN f int;
+ALTER TABLE IF EXISTS tt8 ADD CONSTRAINT xxx PRIMARY KEY(f);
+ALTER TABLE IF EXISTS tt8 ADD CHECK (f BETWEEN 0 AND 10);
+ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0;
+ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1;
+ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2;
+
+\d alter2.tt8
+
+DROP TABLE alter2.tt8;
+DROP SCHEMA alter2;
+
+--
+-- Check conflicts between index and CHECK constraint names
+--
+CREATE TABLE tt9(c integer);
+ALTER TABLE tt9 ADD CHECK(c > 1);
+ALTER TABLE tt9 ADD CHECK(c > 2); -- picks nonconflicting name
+ALTER TABLE tt9 ADD CONSTRAINT foo CHECK(c > 3);
+ALTER TABLE tt9 ADD CONSTRAINT foo CHECK(c > 4); -- fail, dup name
+ALTER TABLE tt9 ADD UNIQUE(c);
+ALTER TABLE tt9 ADD UNIQUE(c); -- picks nonconflicting name
+ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key UNIQUE(c); -- fail, dup name
+ALTER TABLE tt9 ADD CONSTRAINT foo UNIQUE(c); -- fail, dup name
+ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key CHECK(c > 5); -- fail, dup name
+ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key2 CHECK(c > 6);
+ALTER TABLE tt9 ADD UNIQUE(c); -- picks nonconflicting name
+\d tt9
+DROP TABLE tt9;
+
+
+-- Check that comments on constraints and indexes are not lost at ALTER TABLE.
+CREATE TABLE comment_test (
+ id int,
+ positive_col int CHECK (positive_col > 0),
+ indexed_col int,
+ CONSTRAINT comment_test_pk PRIMARY KEY (id));
+CREATE INDEX comment_test_index ON comment_test(indexed_col);
+
+COMMENT ON COLUMN comment_test.id IS 'Column ''id'' on comment_test';
+COMMENT ON INDEX comment_test_index IS 'Simple index on comment_test';
+COMMENT ON CONSTRAINT comment_test_positive_col_check ON comment_test IS 'CHECK constraint on comment_test.positive_col';
+COMMENT ON CONSTRAINT comment_test_pk ON comment_test IS 'PRIMARY KEY constraint of comment_test';
+COMMENT ON INDEX comment_test_pk IS 'Index backing the PRIMARY KEY of comment_test';
+
+SELECT col_description('comment_test'::regclass, 1) as comment;
+SELECT indexrelid::regclass::text as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test'::regclass ORDER BY 1, 2;
+SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass ORDER BY 1, 2;
+
+-- Change the datatype of all the columns. ALTER TABLE is optimized to not
+-- rebuild an index if the new data type is binary compatible with the old
+-- one. Check do a dummy ALTER TABLE that doesn't change the datatype
+-- first, to test that no-op codepath, and another one that does.
+ALTER TABLE comment_test ALTER COLUMN indexed_col SET DATA TYPE int;
+ALTER TABLE comment_test ALTER COLUMN indexed_col SET DATA TYPE text;
+ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE int;
+ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE text;
+ALTER TABLE comment_test ALTER COLUMN positive_col SET DATA TYPE int;
+ALTER TABLE comment_test ALTER COLUMN positive_col SET DATA TYPE bigint;
+
+-- Check that the comments are intact.
+SELECT col_description('comment_test'::regclass, 1) as comment;
+SELECT indexrelid::regclass::text as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test'::regclass ORDER BY 1, 2;
+SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass ORDER BY 1, 2;
+
+-- Check compatibility for foreign keys and comments. This is done
+-- separately as rebuilding the column type of the parent leads
+-- to an error and would reduce the test scope.
+CREATE TABLE comment_test_child (
+ id text CONSTRAINT comment_test_child_fk REFERENCES comment_test);
+CREATE INDEX comment_test_child_fk ON comment_test_child(id);
+COMMENT ON COLUMN comment_test_child.id IS 'Column ''id'' on comment_test_child';
+COMMENT ON INDEX comment_test_child_fk IS 'Index backing the FOREIGN KEY of comment_test_child';
+COMMENT ON CONSTRAINT comment_test_child_fk ON comment_test_child IS 'FOREIGN KEY constraint of comment_test_child';
+
+-- Change column type of parent
+ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE text;
+ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE int USING id::integer;
+
+-- Comments should be intact
+SELECT col_description('comment_test_child'::regclass, 1) as comment;
+SELECT indexrelid::regclass::text as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test_child'::regclass ORDER BY 1, 2;
+SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test_child'::regclass ORDER BY 1, 2;
+
+-- Check that we map relation oids to filenodes and back correctly. Only
+-- display bad mappings so the test output doesn't change all the time. A
+-- filenode function call can return NULL for a relation dropped concurrently
+-- with the call's surrounding query, so ignore a NULL mapped_oid for
+-- relations that no longer exist after all calls finish.
+CREATE TEMP TABLE filenode_mapping AS
+SELECT
+ oid, mapped_oid, reltablespace, relfilenode, relname
+FROM pg_class,
+ pg_filenode_relation(reltablespace, pg_relation_filenode(oid)) AS mapped_oid
+WHERE relkind IN ('r', 'i', 'S', 't', 'm') AND mapped_oid IS DISTINCT FROM oid;
+
+SELECT m.* FROM filenode_mapping m LEFT JOIN pg_class c ON c.oid = m.oid
+WHERE c.oid IS NOT NULL OR m.mapped_oid IS NOT NULL;
+
+-- Checks on creating and manipulation of user defined relations in
+-- pg_catalog.
+
+SHOW allow_system_table_mods;
+-- disallowed because of search_path issues with pg_dump
+CREATE TABLE pg_catalog.new_system_table();
+-- instead create in public first, move to catalog
+CREATE TABLE new_system_table(id serial primary key, othercol text);
+ALTER TABLE new_system_table SET SCHEMA pg_catalog;
+ALTER TABLE new_system_table SET SCHEMA public;
+ALTER TABLE new_system_table SET SCHEMA pg_catalog;
+-- will be ignored -- already there:
+ALTER TABLE new_system_table SET SCHEMA pg_catalog;
+ALTER TABLE new_system_table RENAME TO old_system_table;
+CREATE INDEX old_system_table__othercol ON old_system_table (othercol);
+INSERT INTO old_system_table(othercol) VALUES ('somedata'), ('otherdata');
+UPDATE old_system_table SET id = -id;
+DELETE FROM old_system_table WHERE othercol = 'somedata';
+TRUNCATE old_system_table;
+ALTER TABLE old_system_table DROP CONSTRAINT new_system_table_pkey;
+ALTER TABLE old_system_table DROP COLUMN othercol;
+DROP TABLE old_system_table;
+
+-- set logged
+CREATE UNLOGGED TABLE unlogged1(f1 SERIAL PRIMARY KEY, f2 TEXT);
+-- check relpersistence of an unlogged table
+SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged1'
+UNION ALL
+SELECT 'toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^unlogged1'
+UNION ALL
+SELECT 'toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^unlogged1'
+ORDER BY relname;
+CREATE UNLOGGED TABLE unlogged2(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES unlogged1); -- foreign key
+CREATE UNLOGGED TABLE unlogged3(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES unlogged3); -- self-referencing foreign key
+ALTER TABLE unlogged3 SET LOGGED; -- skip self-referencing foreign key
+ALTER TABLE unlogged2 SET LOGGED; -- fails because a foreign key to an unlogged table exists
+ALTER TABLE unlogged1 SET LOGGED;
+-- check relpersistence of an unlogged table after changing to permanent
+SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged1'
+UNION ALL
+SELECT 'toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^unlogged1'
+UNION ALL
+SELECT 'toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^unlogged1'
+ORDER BY relname;
+ALTER TABLE unlogged1 SET LOGGED; -- silently do nothing
+DROP TABLE unlogged3;
+DROP TABLE unlogged2;
+DROP TABLE unlogged1;
+-- set unlogged
+CREATE TABLE logged1(f1 SERIAL PRIMARY KEY, f2 TEXT);
+-- check relpersistence of a permanent table
+SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^logged1'
+UNION ALL
+SELECT 'toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^logged1'
+UNION ALL
+SELECT 'toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^logged1'
+ORDER BY relname;
+CREATE TABLE logged2(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES logged1); -- foreign key
+CREATE TABLE logged3(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES logged3); -- self-referencing foreign key
+ALTER TABLE logged1 SET UNLOGGED; -- fails because a foreign key from a permanent table exists
+ALTER TABLE logged3 SET UNLOGGED; -- skip self-referencing foreign key
+ALTER TABLE logged2 SET UNLOGGED;
+ALTER TABLE logged1 SET UNLOGGED;
+-- check relpersistence of a permanent table after changing to unlogged
+SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^logged1'
+UNION ALL
+SELECT 'toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^logged1'
+UNION ALL
+SELECT 'toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^logged1'
+ORDER BY relname;
+ALTER TABLE logged1 SET UNLOGGED; -- silently do nothing
+DROP TABLE logged3;
+DROP TABLE logged2;
+DROP TABLE logged1;
+
+-- test ADD COLUMN IF NOT EXISTS
+CREATE TABLE test_add_column(c1 integer);
+\d test_add_column
+ALTER TABLE test_add_column
+ ADD COLUMN c2 integer;
+\d test_add_column
+ALTER TABLE test_add_column
+ ADD COLUMN c2 integer; -- fail because c2 already exists
+ALTER TABLE ONLY test_add_column
+ ADD COLUMN c2 integer; -- fail because c2 already exists
+\d test_add_column
+ALTER TABLE test_add_column
+ ADD COLUMN IF NOT EXISTS c2 integer; -- skipping because c2 already exists
+ALTER TABLE ONLY test_add_column
+ ADD COLUMN IF NOT EXISTS c2 integer; -- skipping because c2 already exists
+\d test_add_column
+ALTER TABLE test_add_column
+ ADD COLUMN c2 integer, -- fail because c2 already exists
+ ADD COLUMN c3 integer primary key;
+\d test_add_column
+ALTER TABLE test_add_column
+ ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists
+ ADD COLUMN c3 integer primary key;
+\d test_add_column
+ALTER TABLE test_add_column
+ ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists
+ ADD COLUMN IF NOT EXISTS c3 integer primary key; -- skipping because c3 already exists
+\d test_add_column
+ALTER TABLE test_add_column
+ ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists
+ ADD COLUMN IF NOT EXISTS c3 integer, -- skipping because c3 already exists
+ ADD COLUMN c4 integer REFERENCES test_add_column;
+\d test_add_column
+ALTER TABLE test_add_column
+ ADD COLUMN IF NOT EXISTS c4 integer REFERENCES test_add_column;
+\d test_add_column
+ALTER TABLE test_add_column
+ ADD COLUMN IF NOT EXISTS c5 SERIAL CHECK (c5 > 8);
+\d test_add_column
+ALTER TABLE test_add_column
+ ADD COLUMN IF NOT EXISTS c5 SERIAL CHECK (c5 > 10);
+\d test_add_column*
+DROP TABLE test_add_column;
+\d test_add_column*
+
+-- assorted cases with multiple ALTER TABLE steps
+CREATE TABLE ataddindex(f1 INT);
+INSERT INTO ataddindex VALUES (42), (43);
+CREATE UNIQUE INDEX ataddindexi0 ON ataddindex(f1);
+ALTER TABLE ataddindex
+ ADD PRIMARY KEY USING INDEX ataddindexi0,
+ ALTER f1 TYPE BIGINT;
+\d ataddindex
+DROP TABLE ataddindex;
+
+CREATE TABLE ataddindex(f1 VARCHAR(10));
+INSERT INTO ataddindex(f1) VALUES ('foo'), ('a');
+ALTER TABLE ataddindex
+ ALTER f1 SET DATA TYPE TEXT,
+ ADD EXCLUDE ((f1 LIKE 'a') WITH =);
+\d ataddindex
+DROP TABLE ataddindex;
+
+CREATE TABLE ataddindex(id int, ref_id int);
+ALTER TABLE ataddindex
+ ADD PRIMARY KEY (id),
+ ADD FOREIGN KEY (ref_id) REFERENCES ataddindex;
+\d ataddindex
+DROP TABLE ataddindex;
+
+CREATE TABLE ataddindex(id int, ref_id int);
+ALTER TABLE ataddindex
+ ADD UNIQUE (id),
+ ADD FOREIGN KEY (ref_id) REFERENCES ataddindex (id);
+\d ataddindex
+DROP TABLE ataddindex;
+
+-- unsupported constraint types for partitioned tables
+CREATE TABLE partitioned (
+ a int,
+ b int
+) PARTITION BY RANGE (a, (a+b+1));
+ALTER TABLE partitioned ADD EXCLUDE USING gist (a WITH &&);
+
+-- cannot drop column that is part of the partition key
+ALTER TABLE partitioned DROP COLUMN a;
+ALTER TABLE partitioned ALTER COLUMN a TYPE char(5);
+ALTER TABLE partitioned DROP COLUMN b;
+ALTER TABLE partitioned ALTER COLUMN b TYPE char(5);
+
+-- partitioned table cannot participate in regular inheritance
+CREATE TABLE nonpartitioned (
+ a int,
+ b int
+);
+ALTER TABLE partitioned INHERIT nonpartitioned;
+ALTER TABLE nonpartitioned INHERIT partitioned;
+
+-- cannot add NO INHERIT constraint to partitioned tables
+ALTER TABLE partitioned ADD CONSTRAINT chk_a CHECK (a > 0) NO INHERIT;
+
+DROP TABLE partitioned, nonpartitioned;
+
+--
+-- ATTACH PARTITION
+--
+
+-- check that target table is partitioned
+CREATE TABLE unparted (
+ a int
+);
+CREATE TABLE fail_part (like unparted);
+ALTER TABLE unparted ATTACH PARTITION fail_part FOR VALUES IN ('a');
+DROP TABLE unparted, fail_part;
+
+-- check that partition bound is compatible
+CREATE TABLE list_parted (
+ a int NOT NULL,
+ b char(2) COLLATE "C",
+ CONSTRAINT check_a CHECK (a > 0)
+) PARTITION BY LIST (a);
+CREATE TABLE fail_part (LIKE list_parted);
+ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES FROM (1) TO (10);
+DROP TABLE fail_part;
+
+-- check that the table being attached exists
+ALTER TABLE list_parted ATTACH PARTITION nonexistent FOR VALUES IN (1);
+
+-- check ownership of the source table
+CREATE ROLE regress_test_me;
+CREATE ROLE regress_test_not_me;
+CREATE TABLE not_owned_by_me (LIKE list_parted);
+ALTER TABLE not_owned_by_me OWNER TO regress_test_not_me;
+SET SESSION AUTHORIZATION regress_test_me;
+CREATE TABLE owned_by_me (
+ a int
+) PARTITION BY LIST (a);
+ALTER TABLE owned_by_me ATTACH PARTITION not_owned_by_me FOR VALUES IN (1);
+RESET SESSION AUTHORIZATION;
+DROP TABLE owned_by_me, not_owned_by_me;
+DROP ROLE regress_test_not_me;
+DROP ROLE regress_test_me;
+
+-- check that the table being attached is not part of regular inheritance
+CREATE TABLE parent (LIKE list_parted);
+CREATE TABLE child () INHERITS (parent);
+ALTER TABLE list_parted ATTACH PARTITION child FOR VALUES IN (1);
+ALTER TABLE list_parted ATTACH PARTITION parent FOR VALUES IN (1);
+DROP TABLE parent CASCADE;
+
+-- check any TEMP-ness
+CREATE TEMP TABLE temp_parted (a int) PARTITION BY LIST (a);
+CREATE TABLE perm_part (a int);
+ALTER TABLE temp_parted ATTACH PARTITION perm_part FOR VALUES IN (1);
+DROP TABLE temp_parted, perm_part;
+
+-- check that the table being attached is not a typed table
+CREATE TYPE mytype AS (a int);
+CREATE TABLE fail_part OF mytype;
+ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
+DROP TYPE mytype CASCADE;
+
+-- check that the table being attached has only columns present in the parent
+CREATE TABLE fail_part (like list_parted, c int);
+ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
+DROP TABLE fail_part;
+
+-- check that the table being attached has every column of the parent
+CREATE TABLE fail_part (a int NOT NULL);
+ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
+DROP TABLE fail_part;
+
+-- check that columns match in type, collation and NOT NULL status
+CREATE TABLE fail_part (
+ b char(3),
+ a int NOT NULL
+);
+ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
+ALTER TABLE fail_part ALTER b TYPE char (2) COLLATE "POSIX";
+ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
+DROP TABLE fail_part;
+
+-- check that the table being attached has all constraints of the parent
+CREATE TABLE fail_part (
+ b char(2) COLLATE "C",
+ a int NOT NULL
+);
+ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
+
+-- check that the constraint matches in definition with parent's constraint
+ALTER TABLE fail_part ADD CONSTRAINT check_a CHECK (a >= 0);
+ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
+DROP TABLE fail_part;
+
+-- check the attributes and constraints after partition is attached
+CREATE TABLE part_1 (
+ a int NOT NULL,
+ b char(2) COLLATE "C",
+ CONSTRAINT check_a CHECK (a > 0)
+);
+ALTER TABLE list_parted ATTACH PARTITION part_1 FOR VALUES IN (1);
+-- attislocal and conislocal are always false for merged attributes and constraints respectively.
+SELECT attislocal, attinhcount FROM pg_attribute WHERE attrelid = 'part_1'::regclass AND attnum > 0;
+SELECT conislocal, coninhcount FROM pg_constraint WHERE conrelid = 'part_1'::regclass AND conname = 'check_a';
+
+-- check that the new partition won't overlap with an existing partition
+CREATE TABLE fail_part (LIKE part_1 INCLUDING CONSTRAINTS);
+ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1);
+DROP TABLE fail_part;
+-- check that an existing table can be attached as a default partition
+CREATE TABLE def_part (LIKE list_parted INCLUDING CONSTRAINTS);
+ALTER TABLE list_parted ATTACH PARTITION def_part DEFAULT;
+-- check attaching default partition fails if a default partition already
+-- exists
+CREATE TABLE fail_def_part (LIKE part_1 INCLUDING CONSTRAINTS);
+ALTER TABLE list_parted ATTACH PARTITION fail_def_part DEFAULT;
+
+-- check validation when attaching list partitions
+CREATE TABLE list_parted2 (
+ a int,
+ b char
+) PARTITION BY LIST (a);
+
+-- check that violating rows are correctly reported
+CREATE TABLE part_2 (LIKE list_parted2);
+INSERT INTO part_2 VALUES (3, 'a');
+ALTER TABLE list_parted2 ATTACH PARTITION part_2 FOR VALUES IN (2);
+
+-- should be ok after deleting the bad row
+DELETE FROM part_2;
+ALTER TABLE list_parted2 ATTACH PARTITION part_2 FOR VALUES IN (2);
+
+-- check partition cannot be attached if default has some row for its values
+CREATE TABLE list_parted2_def PARTITION OF list_parted2 DEFAULT;
+INSERT INTO list_parted2_def VALUES (11, 'z');
+CREATE TABLE part_3 (LIKE list_parted2);
+ALTER TABLE list_parted2 ATTACH PARTITION part_3 FOR VALUES IN (11);
+-- should be ok after deleting the bad row
+DELETE FROM list_parted2_def WHERE a = 11;
+ALTER TABLE list_parted2 ATTACH PARTITION part_3 FOR VALUES IN (11);
+
+-- adding constraints that describe the desired partition constraint
+-- (or more restrictive) will help skip the validation scan
+CREATE TABLE part_3_4 (
+ LIKE list_parted2,
+ CONSTRAINT check_a CHECK (a IN (3))
+);
+
+-- however, if a list partition does not accept nulls, there should be
+-- an explicit NOT NULL constraint on the partition key column for the
+-- validation scan to be skipped;
+ALTER TABLE list_parted2 ATTACH PARTITION part_3_4 FOR VALUES IN (3, 4);
+
+-- adding a NOT NULL constraint will cause the scan to be skipped
+ALTER TABLE list_parted2 DETACH PARTITION part_3_4;
+ALTER TABLE part_3_4 ALTER a SET NOT NULL;
+ALTER TABLE list_parted2 ATTACH PARTITION part_3_4 FOR VALUES IN (3, 4);
+
+-- check if default partition scan skipped
+ALTER TABLE list_parted2_def ADD CONSTRAINT check_a CHECK (a IN (5, 6));
+CREATE TABLE part_55_66 PARTITION OF list_parted2 FOR VALUES IN (55, 66);
+
+-- check validation when attaching range partitions
+CREATE TABLE range_parted (
+ a int,
+ b int
+) PARTITION BY RANGE (a, b);
+
+-- check that violating rows are correctly reported
+CREATE TABLE part1 (
+ a int NOT NULL CHECK (a = 1),
+ b int NOT NULL CHECK (b >= 1 AND b <= 10)
+);
+INSERT INTO part1 VALUES (1, 10);
+-- Remember the TO bound is exclusive
+ALTER TABLE range_parted ATTACH PARTITION part1 FOR VALUES FROM (1, 1) TO (1, 10);
+
+-- should be ok after deleting the bad row
+DELETE FROM part1;
+ALTER TABLE range_parted ATTACH PARTITION part1 FOR VALUES FROM (1, 1) TO (1, 10);
+
+-- adding constraints that describe the desired partition constraint
+-- (or more restrictive) will help skip the validation scan
+CREATE TABLE part2 (
+ a int NOT NULL CHECK (a = 1),
+ b int NOT NULL CHECK (b >= 10 AND b < 18)
+);
+ALTER TABLE range_parted ATTACH PARTITION part2 FOR VALUES FROM (1, 10) TO (1, 20);
+
+-- Create default partition
+CREATE TABLE partr_def1 PARTITION OF range_parted DEFAULT;
+
+-- Only one default partition is allowed, hence, following should give error
+CREATE TABLE partr_def2 (LIKE part1 INCLUDING CONSTRAINTS);
+ALTER TABLE range_parted ATTACH PARTITION partr_def2 DEFAULT;
+
+-- Overlapping partitions cannot be attached, hence, following should give error
+INSERT INTO partr_def1 VALUES (2, 10);
+CREATE TABLE part3 (LIKE range_parted);
+ALTER TABLE range_parted ATTACH partition part3 FOR VALUES FROM (2, 10) TO (2, 20);
+
+-- Attaching partitions should be successful when there are no overlapping rows
+ALTER TABLE range_parted ATTACH partition part3 FOR VALUES FROM (3, 10) TO (3, 20);
+
+-- check that leaf partitions are scanned when attaching a partitioned
+-- table
+CREATE TABLE part_5 (
+ LIKE list_parted2
+) PARTITION BY LIST (b);
+
+-- check that violating rows are correctly reported
+CREATE TABLE part_5_a PARTITION OF part_5 FOR VALUES IN ('a');
+INSERT INTO part_5_a (a, b) VALUES (6, 'a');
+ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);
+
+-- delete the faulting row and also add a constraint to skip the scan
+DELETE FROM part_5_a WHERE a NOT IN (3);
+ALTER TABLE part_5 ADD CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 5);
+ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);
+ALTER TABLE list_parted2 DETACH PARTITION part_5;
+ALTER TABLE part_5 DROP CONSTRAINT check_a;
+
+-- scan should again be skipped, even though NOT NULL is now a column property
+ALTER TABLE part_5 ADD CONSTRAINT check_a CHECK (a IN (5)), ALTER a SET NOT NULL;
+ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5);
+
+-- Check the case where attnos of the partitioning columns in the table being
+-- attached differs from the parent. It should not affect the constraint-
+-- checking logic that allows to skip the scan.
+CREATE TABLE part_6 (
+ c int,
+ LIKE list_parted2,
+ CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 6)
+);
+ALTER TABLE part_6 DROP c;
+ALTER TABLE list_parted2 ATTACH PARTITION part_6 FOR VALUES IN (6);
+
+-- Similar to above, but the table being attached is a partitioned table
+-- whose partition has still different attnos for the root partitioning
+-- columns.
+CREATE TABLE part_7 (
+ LIKE list_parted2,
+ CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 7)
+) PARTITION BY LIST (b);
+CREATE TABLE part_7_a_null (
+ c int,
+ d int,
+ e int,
+ LIKE list_parted2, -- 'a' will have attnum = 4
+ CONSTRAINT check_b CHECK (b IS NULL OR b = 'a'),
+ CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 7)
+);
+ALTER TABLE part_7_a_null DROP c, DROP d, DROP e;
+ALTER TABLE part_7 ATTACH PARTITION part_7_a_null FOR VALUES IN ('a', null);
+ALTER TABLE list_parted2 ATTACH PARTITION part_7 FOR VALUES IN (7);
+
+-- Same example, but check this time that the constraint correctly detects
+-- violating rows
+ALTER TABLE list_parted2 DETACH PARTITION part_7;
+ALTER TABLE part_7 DROP CONSTRAINT check_a; -- thusly, scan won't be skipped
+INSERT INTO part_7 (a, b) VALUES (8, null), (9, 'a');
+SELECT tableoid::regclass, a, b FROM part_7 order by a;
+ALTER TABLE list_parted2 ATTACH PARTITION part_7 FOR VALUES IN (7);
+
+-- check that leaf partitions of default partition are scanned when
+-- attaching a partitioned table.
+ALTER TABLE part_5 DROP CONSTRAINT check_a;
+CREATE TABLE part5_def PARTITION OF part_5 DEFAULT PARTITION BY LIST(a);
+CREATE TABLE part5_def_p1 PARTITION OF part5_def FOR VALUES IN (5);
+INSERT INTO part5_def_p1 VALUES (5, 'y');
+CREATE TABLE part5_p1 (LIKE part_5);
+ALTER TABLE part_5 ATTACH PARTITION part5_p1 FOR VALUES IN ('y');
+-- should be ok after deleting the bad row
+DELETE FROM part5_def_p1 WHERE b = 'y';
+ALTER TABLE part_5 ATTACH PARTITION part5_p1 FOR VALUES IN ('y');
+
+-- check that the table being attached is not already a partition
+ALTER TABLE list_parted2 ATTACH PARTITION part_2 FOR VALUES IN (2);
+
+-- check that circular inheritance is not allowed
+ALTER TABLE part_5 ATTACH PARTITION list_parted2 FOR VALUES IN ('b');
+ALTER TABLE list_parted2 ATTACH PARTITION list_parted2 FOR VALUES IN (0);
+
+-- If a partitioned table being created or an existing table being attached
+-- as a partition does not have a constraint that would allow validation scan
+-- to be skipped, but an individual partition does, then the partition's
+-- validation scan is skipped.
+CREATE TABLE quuux (a int, b text) PARTITION BY LIST (a);
+CREATE TABLE quuux_default PARTITION OF quuux DEFAULT PARTITION BY LIST (b);
+CREATE TABLE quuux_default1 PARTITION OF quuux_default (
+ CONSTRAINT check_1 CHECK (a IS NOT NULL AND a = 1)
+) FOR VALUES IN ('b');
+CREATE TABLE quuux1 (a int, b text);
+ALTER TABLE quuux ATTACH PARTITION quuux1 FOR VALUES IN (1); -- validate!
+CREATE TABLE quuux2 (a int, b text);
+ALTER TABLE quuux ATTACH PARTITION quuux2 FOR VALUES IN (2); -- skip validation
+DROP TABLE quuux1, quuux2;
+-- should validate for quuux1, but not for quuux2
+CREATE TABLE quuux1 PARTITION OF quuux FOR VALUES IN (1);
+CREATE TABLE quuux2 PARTITION OF quuux FOR VALUES IN (2);
+DROP TABLE quuux;
+
+-- check validation when attaching hash partitions
+
+-- Use hand-rolled hash functions and operator class to get predictable result
+-- on different machines. part_test_int4_ops is defined in insert.sql.
+
+-- check that the new partition won't overlap with an existing partition
+CREATE TABLE hash_parted (
+ a int,
+ b int
+) PARTITION BY HASH (a part_test_int4_ops);
+CREATE TABLE hpart_1 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 4, REMAINDER 0);
+CREATE TABLE fail_part (LIKE hpart_1);
+ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 4);
+ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 0);
+DROP TABLE fail_part;
+
+-- check validation when attaching hash partitions
+
+-- check that violating rows are correctly reported
+CREATE TABLE hpart_2 (LIKE hash_parted);
+INSERT INTO hpart_2 VALUES (3, 0);
+ALTER TABLE hash_parted ATTACH PARTITION hpart_2 FOR VALUES WITH (MODULUS 4, REMAINDER 1);
+
+-- should be ok after deleting the bad row
+DELETE FROM hpart_2;
+ALTER TABLE hash_parted ATTACH PARTITION hpart_2 FOR VALUES WITH (MODULUS 4, REMAINDER 1);
+
+-- check that leaf partitions are scanned when attaching a partitioned
+-- table
+CREATE TABLE hpart_5 (
+ LIKE hash_parted
+) PARTITION BY LIST (b);
+
+-- check that violating rows are correctly reported
+CREATE TABLE hpart_5_a PARTITION OF hpart_5 FOR VALUES IN ('1', '2', '3');
+INSERT INTO hpart_5_a (a, b) VALUES (7, 1);
+ALTER TABLE hash_parted ATTACH PARTITION hpart_5 FOR VALUES WITH (MODULUS 4, REMAINDER 2);
+
+-- should be ok after deleting the bad row
+DELETE FROM hpart_5_a;
+ALTER TABLE hash_parted ATTACH PARTITION hpart_5 FOR VALUES WITH (MODULUS 4, REMAINDER 2);
+
+-- check that the table being attach is with valid modulus and remainder value
+CREATE TABLE fail_part(LIKE hash_parted);
+ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 0, REMAINDER 1);
+ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 8);
+ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 3, REMAINDER 2);
+DROP TABLE fail_part;
+
+-- fails with incorrect object type
+CREATE VIEW at_v1 AS SELECT 1 as a;
+ALTER TABLE at_v1 ATTACH PARTITION dummy default;
+DROP VIEW at_v1;
+
+--
+-- DETACH PARTITION
+--
+
+-- check that the table is partitioned at all
+CREATE TABLE regular_table (a int);
+ALTER TABLE regular_table DETACH PARTITION any_name;
+DROP TABLE regular_table;
+
+-- check that the partition being detached exists at all
+ALTER TABLE list_parted2 DETACH PARTITION part_4;
+ALTER TABLE hash_parted DETACH PARTITION hpart_4;
+
+-- check that the partition being detached is actually a partition of the parent
+CREATE TABLE not_a_part (a int);
+ALTER TABLE list_parted2 DETACH PARTITION not_a_part;
+ALTER TABLE list_parted2 DETACH PARTITION part_1;
+
+ALTER TABLE hash_parted DETACH PARTITION not_a_part;
+DROP TABLE not_a_part;
+
+-- check that, after being detached, attinhcount/coninhcount is dropped to 0 and
+-- attislocal/conislocal is set to true
+ALTER TABLE list_parted2 DETACH PARTITION part_3_4;
+SELECT attinhcount, attislocal FROM pg_attribute WHERE attrelid = 'part_3_4'::regclass AND attnum > 0;
+SELECT coninhcount, conislocal FROM pg_constraint WHERE conrelid = 'part_3_4'::regclass AND conname = 'check_a';
+DROP TABLE part_3_4;
+
+-- check that a detached partition is not dropped on dropping a partitioned table
+CREATE TABLE range_parted2 (
+ a int
+) PARTITION BY RANGE(a);
+CREATE TABLE part_rp PARTITION OF range_parted2 FOR VALUES FROM (0) to (100);
+ALTER TABLE range_parted2 DETACH PARTITION part_rp;
+DROP TABLE range_parted2;
+SELECT * from part_rp;
+DROP TABLE part_rp;
+
+-- concurrent detach
+CREATE TABLE range_parted2 (
+ a int
+) PARTITION BY RANGE(a);
+CREATE TABLE part_rp PARTITION OF range_parted2 FOR VALUES FROM (0) to (100);
+BEGIN;
+-- doesn't work in a partition block
+ALTER TABLE range_parted2 DETACH PARTITION part_rp CONCURRENTLY;
+COMMIT;
+CREATE TABLE part_rpd PARTITION OF range_parted2 DEFAULT;
+-- doesn't work if there's a default partition
+ALTER TABLE range_parted2 DETACH PARTITION part_rp CONCURRENTLY;
+-- doesn't work for the default partition
+ALTER TABLE range_parted2 DETACH PARTITION part_rpd CONCURRENTLY;
+DROP TABLE part_rpd;
+-- works fine
+ALTER TABLE range_parted2 DETACH PARTITION part_rp CONCURRENTLY;
+\d+ range_parted2
+-- constraint should be created
+\d part_rp
+CREATE TABLE part_rp100 PARTITION OF range_parted2 (CHECK (a>=123 AND a<133 AND a IS NOT NULL)) FOR VALUES FROM (100) to (200);
+ALTER TABLE range_parted2 DETACH PARTITION part_rp100 CONCURRENTLY;
+-- redundant constraint should not be created
+\d part_rp100
+DROP TABLE range_parted2;
+
+-- Check ALTER TABLE commands for partitioned tables and partitions
+
+-- cannot add/drop column to/from *only* the parent
+ALTER TABLE ONLY list_parted2 ADD COLUMN c int;
+ALTER TABLE ONLY list_parted2 DROP COLUMN b;
+
+-- cannot add a column to partition or drop an inherited one
+ALTER TABLE part_2 ADD COLUMN c text;
+ALTER TABLE part_2 DROP COLUMN b;
+
+-- Nor rename, alter type
+ALTER TABLE part_2 RENAME COLUMN b to c;
+ALTER TABLE part_2 ALTER COLUMN b TYPE text;
+
+-- cannot add/drop NOT NULL or check constraints to *only* the parent, when
+-- partitions exist
+ALTER TABLE ONLY list_parted2 ALTER b SET NOT NULL;
+ALTER TABLE ONLY list_parted2 ADD CONSTRAINT check_b CHECK (b <> 'zz');
+
+ALTER TABLE list_parted2 ALTER b SET NOT NULL;
+ALTER TABLE ONLY list_parted2 ALTER b DROP NOT NULL;
+ALTER TABLE list_parted2 ADD CONSTRAINT check_b CHECK (b <> 'zz');
+ALTER TABLE ONLY list_parted2 DROP CONSTRAINT check_b;
+
+-- It's alright though, if no partitions are yet created
+CREATE TABLE parted_no_parts (a int) PARTITION BY LIST (a);
+ALTER TABLE ONLY parted_no_parts ALTER a SET NOT NULL;
+ALTER TABLE ONLY parted_no_parts ADD CONSTRAINT check_a CHECK (a > 0);
+ALTER TABLE ONLY parted_no_parts ALTER a DROP NOT NULL;
+ALTER TABLE ONLY parted_no_parts DROP CONSTRAINT check_a;
+DROP TABLE parted_no_parts;
+
+-- cannot drop inherited NOT NULL or check constraints from partition
+ALTER TABLE list_parted2 ALTER b SET NOT NULL, ADD CONSTRAINT check_a2 CHECK (a > 0);
+ALTER TABLE part_2 ALTER b DROP NOT NULL;
+ALTER TABLE part_2 DROP CONSTRAINT check_a2;
+
+-- Doesn't make sense to add NO INHERIT constraints on partitioned tables
+ALTER TABLE list_parted2 add constraint check_b2 check (b <> 'zz') NO INHERIT;
+
+-- check that a partition cannot participate in regular inheritance
+CREATE TABLE inh_test () INHERITS (part_2);
+CREATE TABLE inh_test (LIKE part_2);
+ALTER TABLE inh_test INHERIT part_2;
+ALTER TABLE part_2 INHERIT inh_test;
+
+-- cannot drop or alter type of partition key columns of lower level
+-- partitioned tables; for example, part_5, which is list_parted2's
+-- partition, is partitioned on b;
+ALTER TABLE list_parted2 DROP COLUMN b;
+ALTER TABLE list_parted2 ALTER COLUMN b TYPE text;
+
+-- dropping non-partition key columns should be allowed on the parent table.
+ALTER TABLE list_parted DROP COLUMN b;
+SELECT * FROM list_parted;
+
+-- cleanup
+DROP TABLE list_parted, list_parted2, range_parted;
+DROP TABLE fail_def_part;
+DROP TABLE hash_parted;
+
+-- more tests for certain multi-level partitioning scenarios
+create table p (a int, b int) partition by range (a, b);
+create table p1 (b int, a int not null) partition by range (b);
+create table p11 (like p1);
+alter table p11 drop a;
+alter table p11 add a int;
+alter table p11 drop a;
+alter table p11 add a int not null;
+-- attnum for key attribute 'a' is different in p, p1, and p11
+select attrelid::regclass, attname, attnum
+from pg_attribute
+where attname = 'a'
+ and (attrelid = 'p'::regclass
+ or attrelid = 'p1'::regclass
+ or attrelid = 'p11'::regclass)
+order by attrelid::regclass::text;
+
+alter table p1 attach partition p11 for values from (2) to (5);
+
+insert into p1 (a, b) values (2, 3);
+-- check that partition validation scan correctly detects violating rows
+alter table p attach partition p1 for values from (1, 2) to (1, 10);
+
+-- cleanup
+drop table p;
+drop table p1;
+
+-- validate constraint on partitioned tables should only scan leaf partitions
+create table parted_validate_test (a int) partition by list (a);
+create table parted_validate_test_1 partition of parted_validate_test for values in (0, 1);
+alter table parted_validate_test add constraint parted_validate_test_chka check (a > 0) not valid;
+alter table parted_validate_test validate constraint parted_validate_test_chka;
+drop table parted_validate_test;
+-- test alter column options
+CREATE TABLE attmp(i integer);
+INSERT INTO attmp VALUES (1);
+ALTER TABLE attmp ALTER COLUMN i SET (n_distinct = 1, n_distinct_inherited = 2);
+ALTER TABLE attmp ALTER COLUMN i RESET (n_distinct_inherited);
+ANALYZE attmp;
+DROP TABLE attmp;
+
+DROP USER regress_alter_table_user1;
+
+-- check that violating rows are correctly reported when attaching as the
+-- default partition
+create table defpart_attach_test (a int) partition by list (a);
+create table defpart_attach_test1 partition of defpart_attach_test for values in (1);
+create table defpart_attach_test_d (b int, a int);
+alter table defpart_attach_test_d drop b;
+insert into defpart_attach_test_d values (1), (2);
+
+-- error because its constraint as the default partition would be violated
+-- by the row containing 1
+alter table defpart_attach_test attach partition defpart_attach_test_d default;
+delete from defpart_attach_test_d where a = 1;
+alter table defpart_attach_test_d add check (a > 1);
+
+-- should be attached successfully and without needing to be scanned
+alter table defpart_attach_test attach partition defpart_attach_test_d default;
+
+-- check that attaching a partition correctly reports any rows in the default
+-- partition that should not be there for the new partition to be attached
+-- successfully
+create table defpart_attach_test_2 (like defpart_attach_test_d);
+alter table defpart_attach_test attach partition defpart_attach_test_2 for values in (2);
+
+drop table defpart_attach_test;
+
+-- check combinations of temporary and permanent relations when attaching
+-- partitions.
+create table perm_part_parent (a int) partition by list (a);
+create temp table temp_part_parent (a int) partition by list (a);
+create table perm_part_child (a int);
+create temp table temp_part_child (a int);
+alter table temp_part_parent attach partition perm_part_child default; -- error
+alter table perm_part_parent attach partition temp_part_child default; -- error
+alter table temp_part_parent attach partition temp_part_child default; -- ok
+drop table perm_part_parent cascade;
+drop table temp_part_parent cascade;
+
+-- check that attaching partitions to a table while it is being used is
+-- prevented
+create table tab_part_attach (a int) partition by list (a);
+create or replace function func_part_attach() returns trigger
+ language plpgsql as $$
+ begin
+ execute 'create table tab_part_attach_1 (a int)';
+ execute 'alter table tab_part_attach attach partition tab_part_attach_1 for values in (1)';
+ return null;
+ end $$;
+create trigger trig_part_attach before insert on tab_part_attach
+ for each statement execute procedure func_part_attach();
+insert into tab_part_attach values (1);
+drop table tab_part_attach;
+drop function func_part_attach();
+
+-- test case where the partitioning operator is a SQL function whose
+-- evaluation results in the table's relcache being rebuilt partway through
+-- the execution of an ATTACH PARTITION command
+create function at_test_sql_partop (int4, int4) returns int language sql
+as $$ select case when $1 = $2 then 0 when $1 > $2 then 1 else -1 end; $$;
+create operator class at_test_sql_partop for type int4 using btree as
+ operator 1 < (int4, int4), operator 2 <= (int4, int4),
+ operator 3 = (int4, int4), operator 4 >= (int4, int4),
+ operator 5 > (int4, int4), function 1 at_test_sql_partop(int4, int4);
+create table at_test_sql_partop (a int) partition by range (a at_test_sql_partop);
+create table at_test_sql_partop_1 (a int);
+alter table at_test_sql_partop attach partition at_test_sql_partop_1 for values from (0) to (10);
+drop table at_test_sql_partop;
+drop operator class at_test_sql_partop using btree;
+drop function at_test_sql_partop;
+
+
+/* Test case for bug #16242 */
+
+-- We create a parent and child where the child has missing
+-- non-null attribute values, and arrange to pass them through
+-- tuple conversion from the child to the parent tupdesc
+create table bar1 (a integer, b integer not null default 1)
+ partition by range (a);
+create table bar2 (a integer);
+insert into bar2 values (1);
+alter table bar2 add column b integer not null default 1;
+-- (at this point bar2 contains tuple with natts=1)
+alter table bar1 attach partition bar2 default;
+
+-- this works:
+select * from bar1;
+
+-- this exercises tuple conversion:
+create function xtrig()
+ returns trigger language plpgsql
+as $$
+ declare
+ r record;
+ begin
+ for r in select * from old loop
+ raise info 'a=%, b=%', r.a, r.b;
+ end loop;
+ return NULL;
+ end;
+$$;
+create trigger xtrig
+ after update on bar1
+ referencing old table as old
+ for each statement execute procedure xtrig();
+
+update bar1 set a = a + 1;
+
+/* End test case for bug #16242 */
+
+/* Test case for bug #17409 */
+
+create table attbl (p1 int constraint pk_attbl primary key);
+create table atref (c1 int references attbl(p1));
+cluster attbl using pk_attbl;
+alter table attbl alter column p1 set data type bigint;
+alter table atref alter column c1 set data type bigint;
+drop table attbl, atref;
+
+create table attbl (p1 int constraint pk_attbl primary key);
+alter table attbl replica identity using index pk_attbl;
+create table atref (c1 int references attbl(p1));
+alter table attbl alter column p1 set data type bigint;
+alter table atref alter column c1 set data type bigint;
+drop table attbl, atref;
+
+/* End test case for bug #17409 */
+
+-- Test that ALTER TABLE rewrite preserves a clustered index
+-- for normal indexes and indexes on constraints.
+create table alttype_cluster (a int);
+alter table alttype_cluster add primary key (a);
+create index alttype_cluster_ind on alttype_cluster (a);
+alter table alttype_cluster cluster on alttype_cluster_ind;
+-- Normal index remains clustered.
+select indexrelid::regclass, indisclustered from pg_index
+ where indrelid = 'alttype_cluster'::regclass
+ order by indexrelid::regclass::text;
+alter table alttype_cluster alter a type bigint;
+select indexrelid::regclass, indisclustered from pg_index
+ where indrelid = 'alttype_cluster'::regclass
+ order by indexrelid::regclass::text;
+-- Constraint index remains clustered.
+alter table alttype_cluster cluster on alttype_cluster_pkey;
+select indexrelid::regclass, indisclustered from pg_index
+ where indrelid = 'alttype_cluster'::regclass
+ order by indexrelid::regclass::text;
+alter table alttype_cluster alter a type int;
+select indexrelid::regclass, indisclustered from pg_index
+ where indrelid = 'alttype_cluster'::regclass
+ order by indexrelid::regclass::text;
+drop table alttype_cluster;
+
+--
+-- Check that attaching or detaching a partitioned partition correctly leads
+-- to its partitions' constraint being updated to reflect the parent's
+-- newly added/removed constraint
+create table target_parted (a int, b int) partition by list (a);
+create table attach_parted (a int, b int) partition by list (b);
+create table attach_parted_part1 partition of attach_parted for values in (1);
+-- insert a row directly into the leaf partition so that its partition
+-- constraint is built and stored in the relcache
+insert into attach_parted_part1 values (1, 1);
+-- the following better invalidate the partition constraint of the leaf
+-- partition too...
+alter table target_parted attach partition attach_parted for values in (1);
+-- ...such that the following insert fails
+insert into attach_parted_part1 values (2, 1);
+-- ...and doesn't when the partition is detached along with its own partition
+alter table target_parted detach partition attach_parted;
+insert into attach_parted_part1 values (2, 1);
diff --git a/ydb/library/yql/tests/postgresql/original/cases/arrays.out b/ydb/library/yql/tests/postgresql/original/cases/arrays.out
new file mode 100644
index 0000000000..4923cf36d6
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/arrays.out
@@ -0,0 +1,2436 @@
+--
+-- ARRAYS
+--
+CREATE TABLE arrtest (
+ a int2[],
+ b int4[][][],
+ c name[],
+ d text[][],
+ e float8[],
+ f char(5)[],
+ g varchar(5)[]
+);
+--
+-- only the 'e' array is 0-based, the others are 1-based.
+--
+INSERT INTO arrtest (a[1:5], b[1:1][1:2][1:2], c, d, f, g)
+ VALUES ('{1,2,3,4,5}', '{{{0,0},{1,2}}}', '{}', '{}', '{}', '{}');
+UPDATE arrtest SET e[0] = '1.1';
+UPDATE arrtest SET e[1] = '2.2';
+INSERT INTO arrtest (f)
+ VALUES ('{"too long"}');
+ERROR: value too long for type character(5)
+INSERT INTO arrtest (a, b[1:2][1:2], c, d, e, f, g)
+ VALUES ('{11,12,23}', '{{3,4},{4,5}}', '{"foobar"}',
+ '{{"elt1", "elt2"}}', '{"3.4", "6.7"}',
+ '{"abc","abcde"}', '{"abc","abcde"}');
+INSERT INTO arrtest (a, b[1:2], c, d[1:2])
+ VALUES ('{}', '{3,4}', '{foo,bar}', '{bar,foo}');
+INSERT INTO arrtest (b[2]) VALUES(now()); -- error, type mismatch
+ERROR: subscripted assignment to "b" requires type integer but expression is of type timestamp with time zone
+LINE 1: INSERT INTO arrtest (b[2]) VALUES(now());
+ ^
+HINT: You will need to rewrite or cast the expression.
+INSERT INTO arrtest (b[1:2]) VALUES(now()); -- error, type mismatch
+ERROR: subscripted assignment to "b" requires type integer[] but expression is of type timestamp with time zone
+LINE 1: INSERT INTO arrtest (b[1:2]) VALUES(now());
+ ^
+HINT: You will need to rewrite or cast the expression.
+SELECT * FROM arrtest;
+ a | b | c | d | e | f | g
+-------------+-----------------+-----------+---------------+-----------------+-----------------+-------------
+ {1,2,3,4,5} | {{{0,0},{1,2}}} | {} | {} | [0:1]={1.1,2.2} | {} | {}
+ {11,12,23} | {{3,4},{4,5}} | {foobar} | {{elt1,elt2}} | {3.4,6.7} | {"abc ",abcde} | {abc,abcde}
+ {} | {3,4} | {foo,bar} | {bar,foo} | | |
+(3 rows)
+
+SELECT arrtest.a[1],
+ arrtest.b[1][1][1],
+ arrtest.c[1],
+ arrtest.d[1][1],
+ arrtest.e[0]
+ FROM arrtest;
+ a | b | c | d | e
+----+---+--------+------+-----
+ 1 | 0 | | | 1.1
+ 11 | | foobar | elt1 |
+ | | foo | |
+(3 rows)
+
+SELECT a[1], b[1][1][1], c[1], d[1][1], e[0]
+ FROM arrtest;
+ a | b | c | d | e
+----+---+--------+------+-----
+ 1 | 0 | | | 1.1
+ 11 | | foobar | elt1 |
+ | | foo | |
+(3 rows)
+
+SELECT a[1:3],
+ b[1:1][1:2][1:2],
+ c[1:2],
+ d[1:1][1:2]
+ FROM arrtest;
+ a | b | c | d
+------------+-----------------+-----------+---------------
+ {1,2,3} | {{{0,0},{1,2}}} | {} | {}
+ {11,12,23} | {} | {foobar} | {{elt1,elt2}}
+ {} | {} | {foo,bar} | {}
+(3 rows)
+
+SELECT array_ndims(a) AS a,array_ndims(b) AS b,array_ndims(c) AS c
+ FROM arrtest;
+ a | b | c
+---+---+---
+ 1 | 3 |
+ 1 | 2 | 1
+ | 1 | 1
+(3 rows)
+
+SELECT array_dims(a) AS a,array_dims(b) AS b,array_dims(c) AS c
+ FROM arrtest;
+ a | b | c
+-------+-----------------+-------
+ [1:5] | [1:1][1:2][1:2] |
+ [1:3] | [1:2][1:2] | [1:1]
+ | [1:2] | [1:2]
+(3 rows)
+
+-- returns nothing
+SELECT *
+ FROM arrtest
+ WHERE a[1] < 5 and
+ c = '{"foobar"}'::_name;
+ a | b | c | d | e | f | g
+---+---+---+---+---+---+---
+(0 rows)
+
+UPDATE arrtest
+ SET a[1:2] = '{16,25}'
+ WHERE NOT a = '{}'::_int2;
+UPDATE arrtest
+ SET b[1:1][1:1][1:2] = '{113, 117}',
+ b[1:1][1:2][2:2] = '{142, 147}'
+ WHERE array_dims(b) = '[1:1][1:2][1:2]';
+UPDATE arrtest
+ SET c[2:2] = '{"new_word"}'
+ WHERE array_dims(c) is not null;
+SELECT a,b,c FROM arrtest;
+ a | b | c
+---------------+-----------------------+-------------------
+ {16,25,3,4,5} | {{{113,142},{1,147}}} | {}
+ {} | {3,4} | {foo,new_word}
+ {16,25,23} | {{3,4},{4,5}} | {foobar,new_word}
+(3 rows)
+
+SELECT a[1:3],
+ b[1:1][1:2][1:2],
+ c[1:2],
+ d[1:1][2:2]
+ FROM arrtest;
+ a | b | c | d
+------------+-----------------------+-------------------+----------
+ {16,25,3} | {{{113,142},{1,147}}} | {} | {}
+ {} | {} | {foo,new_word} | {}
+ {16,25,23} | {} | {foobar,new_word} | {{elt2}}
+(3 rows)
+
+SELECT b[1:1][2][2],
+ d[1:1][2]
+ FROM arrtest;
+ b | d
+-----------------------+---------------
+ {{{113,142},{1,147}}} | {}
+ {} | {}
+ {} | {{elt1,elt2}}
+(3 rows)
+
+INSERT INTO arrtest(a) VALUES('{1,null,3}');
+SELECT a FROM arrtest;
+ a
+---------------
+ {16,25,3,4,5}
+ {}
+ {16,25,23}
+ {1,NULL,3}
+(4 rows)
+
+UPDATE arrtest SET a[4] = NULL WHERE a[2] IS NULL;
+SELECT a FROM arrtest WHERE a[2] IS NULL;
+ a
+-----------------
+ [4:4]={NULL}
+ {1,NULL,3,NULL}
+(2 rows)
+
+DELETE FROM arrtest WHERE a[2] IS NULL AND b IS NULL;
+SELECT a,b,c FROM arrtest;
+ a | b | c
+---------------+-----------------------+-------------------
+ {16,25,3,4,5} | {{{113,142},{1,147}}} | {}
+ {16,25,23} | {{3,4},{4,5}} | {foobar,new_word}
+ [4:4]={NULL} | {3,4} | {foo,new_word}
+(3 rows)
+
+-- test mixed slice/scalar subscripting
+select '{{1,2,3},{4,5,6},{7,8,9}}'::int[];
+ int4
+---------------------------
+ {{1,2,3},{4,5,6},{7,8,9}}
+(1 row)
+
+select ('{{1,2,3},{4,5,6},{7,8,9}}'::int[])[1:2][2];
+ int4
+---------------
+ {{1,2},{4,5}}
+(1 row)
+
+select '[0:2][0:2]={{1,2,3},{4,5,6},{7,8,9}}'::int[];
+ int4
+--------------------------------------
+ [0:2][0:2]={{1,2,3},{4,5,6},{7,8,9}}
+(1 row)
+
+select ('[0:2][0:2]={{1,2,3},{4,5,6},{7,8,9}}'::int[])[1:2][2];
+ int4
+---------------
+ {{5,6},{8,9}}
+(1 row)
+
+--
+-- check subscription corner cases
+--
+-- More subscripts than MAXDIM (6)
+SELECT ('{}'::int[])[1][2][3][4][5][6][7];
+ERROR: number of array dimensions (7) exceeds the maximum allowed (6)
+-- NULL index yields NULL when selecting
+SELECT ('{{{1},{2},{3}},{{4},{5},{6}}}'::int[])[1][NULL][1];
+ int4
+------
+
+(1 row)
+
+SELECT ('{{{1},{2},{3}},{{4},{5},{6}}}'::int[])[1][NULL:1][1];
+ int4
+------
+
+(1 row)
+
+SELECT ('{{{1},{2},{3}},{{4},{5},{6}}}'::int[])[1][1:NULL][1];
+ int4
+------
+
+(1 row)
+
+-- NULL index in assignment is an error
+UPDATE arrtest
+ SET c[NULL] = '{"can''t assign"}'
+ WHERE array_dims(c) is not null;
+ERROR: array subscript in assignment must not be null
+UPDATE arrtest
+ SET c[NULL:1] = '{"can''t assign"}'
+ WHERE array_dims(c) is not null;
+ERROR: array subscript in assignment must not be null
+UPDATE arrtest
+ SET c[1:NULL] = '{"can''t assign"}'
+ WHERE array_dims(c) is not null;
+ERROR: array subscript in assignment must not be null
+-- Un-subscriptable type
+SELECT (now())[1];
+ERROR: cannot subscript type timestamp with time zone because it does not support subscripting
+LINE 1: SELECT (now())[1];
+ ^
+-- test slices with empty lower and/or upper index
+CREATE TEMP TABLE arrtest_s (
+ a int2[],
+ b int2[][]
+);
+INSERT INTO arrtest_s VALUES ('{1,2,3,4,5}', '{{1,2,3}, {4,5,6}, {7,8,9}}');
+INSERT INTO arrtest_s VALUES ('[0:4]={1,2,3,4,5}', '[0:2][0:2]={{1,2,3}, {4,5,6}, {7,8,9}}');
+SELECT * FROM arrtest_s;
+ a | b
+-------------------+--------------------------------------
+ {1,2,3,4,5} | {{1,2,3},{4,5,6},{7,8,9}}
+ [0:4]={1,2,3,4,5} | [0:2][0:2]={{1,2,3},{4,5,6},{7,8,9}}
+(2 rows)
+
+SELECT a[:3], b[:2][:2] FROM arrtest_s;
+ a | b
+-----------+---------------------------
+ {1,2,3} | {{1,2},{4,5}}
+ {1,2,3,4} | {{1,2,3},{4,5,6},{7,8,9}}
+(2 rows)
+
+SELECT a[2:], b[2:][2:] FROM arrtest_s;
+ a | b
+-----------+---------------
+ {2,3,4,5} | {{5,6},{8,9}}
+ {3,4,5} | {{9}}
+(2 rows)
+
+SELECT a[:], b[:] FROM arrtest_s;
+ a | b
+-------------+---------------------------
+ {1,2,3,4,5} | {{1,2,3},{4,5,6},{7,8,9}}
+ {1,2,3,4,5} | {{1,2,3},{4,5,6},{7,8,9}}
+(2 rows)
+
+-- updates
+UPDATE arrtest_s SET a[:3] = '{11, 12, 13}', b[:2][:2] = '{{11,12}, {14,15}}'
+ WHERE array_lower(a,1) = 1;
+SELECT * FROM arrtest_s;
+ a | b
+-------------------+--------------------------------------
+ [0:4]={1,2,3,4,5} | [0:2][0:2]={{1,2,3},{4,5,6},{7,8,9}}
+ {11,12,13,4,5} | {{11,12,3},{14,15,6},{7,8,9}}
+(2 rows)
+
+UPDATE arrtest_s SET a[3:] = '{23, 24, 25}', b[2:][2:] = '{{25,26}, {28,29}}';
+SELECT * FROM arrtest_s;
+ a | b
+---------------------+---------------------------------------
+ [0:4]={1,2,3,23,24} | [0:2][0:2]={{1,2,3},{4,5,6},{7,8,25}}
+ {11,12,23,24,25} | {{11,12,3},{14,25,26},{7,28,29}}
+(2 rows)
+
+UPDATE arrtest_s SET a[:] = '{11, 12, 13, 14, 15}';
+SELECT * FROM arrtest_s;
+ a | b
+------------------------+---------------------------------------
+ [0:4]={11,12,13,14,15} | [0:2][0:2]={{1,2,3},{4,5,6},{7,8,25}}
+ {11,12,13,14,15} | {{11,12,3},{14,25,26},{7,28,29}}
+(2 rows)
+
+UPDATE arrtest_s SET a[:] = '{23, 24, 25}'; -- fail, too small
+ERROR: source array too small
+INSERT INTO arrtest_s VALUES(NULL, NULL);
+UPDATE arrtest_s SET a[:] = '{11, 12, 13, 14, 15}'; -- fail, no good with null
+ERROR: array slice subscript must provide both boundaries
+DETAIL: When assigning to a slice of an empty array value, slice boundaries must be fully specified.
+-- check with fixed-length-array type, such as point
+SELECT f1[0:1] FROM POINT_TBL;
+ERROR: slices of fixed-length arrays not implemented
+SELECT f1[0:] FROM POINT_TBL;
+ERROR: slices of fixed-length arrays not implemented
+SELECT f1[:1] FROM POINT_TBL;
+ERROR: slices of fixed-length arrays not implemented
+SELECT f1[:] FROM POINT_TBL;
+ERROR: slices of fixed-length arrays not implemented
+-- subscript assignments to fixed-width result in NULL if previous value is NULL
+UPDATE point_tbl SET f1[0] = 10 WHERE f1 IS NULL RETURNING *;
+ f1
+----
+
+(1 row)
+
+INSERT INTO point_tbl(f1[0]) VALUES(0) RETURNING *;
+ f1
+----
+
+(1 row)
+
+-- NULL assignments get ignored
+UPDATE point_tbl SET f1[0] = NULL WHERE f1::text = '(10,10)'::point::text RETURNING *;
+ f1
+---------
+ (10,10)
+(1 row)
+
+-- but non-NULL subscript assignments work
+UPDATE point_tbl SET f1[0] = -10, f1[1] = -10 WHERE f1::text = '(10,10)'::point::text RETURNING *;
+ f1
+-----------
+ (-10,-10)
+(1 row)
+
+-- but not to expand the range
+UPDATE point_tbl SET f1[3] = 10 WHERE f1::text = '(-10,-10)'::point::text RETURNING *;
+ERROR: array subscript out of range
+--
+-- test array extension
+--
+CREATE TEMP TABLE arrtest1 (i int[], t text[]);
+insert into arrtest1 values(array[1,2,null,4], array['one','two',null,'four']);
+select * from arrtest1;
+ i | t
+--------------+---------------------
+ {1,2,NULL,4} | {one,two,NULL,four}
+(1 row)
+
+update arrtest1 set i[2] = 22, t[2] = 'twenty-two';
+select * from arrtest1;
+ i | t
+---------------+----------------------------
+ {1,22,NULL,4} | {one,twenty-two,NULL,four}
+(1 row)
+
+update arrtest1 set i[5] = 5, t[5] = 'five';
+select * from arrtest1;
+ i | t
+-----------------+---------------------------------
+ {1,22,NULL,4,5} | {one,twenty-two,NULL,four,five}
+(1 row)
+
+update arrtest1 set i[8] = 8, t[8] = 'eight';
+select * from arrtest1;
+ i | t
+-----------------------------+-------------------------------------------------
+ {1,22,NULL,4,5,NULL,NULL,8} | {one,twenty-two,NULL,four,five,NULL,NULL,eight}
+(1 row)
+
+update arrtest1 set i[0] = 0, t[0] = 'zero';
+select * from arrtest1;
+ i | t
+-------------------------------------+------------------------------------------------------------
+ [0:8]={0,1,22,NULL,4,5,NULL,NULL,8} | [0:8]={zero,one,twenty-two,NULL,four,five,NULL,NULL,eight}
+(1 row)
+
+update arrtest1 set i[-3] = -3, t[-3] = 'minus-three';
+select * from arrtest1;
+ i | t
+---------------------------------------------------+-----------------------------------------------------------------------------------
+ [-3:8]={-3,NULL,NULL,0,1,22,NULL,4,5,NULL,NULL,8} | [-3:8]={minus-three,NULL,NULL,zero,one,twenty-two,NULL,four,five,NULL,NULL,eight}
+(1 row)
+
+update arrtest1 set i[0:2] = array[10,11,12], t[0:2] = array['ten','eleven','twelve'];
+select * from arrtest1;
+ i | t
+-----------------------------------------------------+---------------------------------------------------------------------------------
+ [-3:8]={-3,NULL,NULL,10,11,12,NULL,4,5,NULL,NULL,8} | [-3:8]={minus-three,NULL,NULL,ten,eleven,twelve,NULL,four,five,NULL,NULL,eight}
+(1 row)
+
+update arrtest1 set i[8:10] = array[18,null,20], t[8:10] = array['p18',null,'p20'];
+select * from arrtest1;
+ i | t
+---------------------------------------------------------------+-----------------------------------------------------------------------------------------
+ [-3:10]={-3,NULL,NULL,10,11,12,NULL,4,5,NULL,NULL,18,NULL,20} | [-3:10]={minus-three,NULL,NULL,ten,eleven,twelve,NULL,four,five,NULL,NULL,p18,NULL,p20}
+(1 row)
+
+update arrtest1 set i[11:12] = array[null,22], t[11:12] = array[null,'p22'];
+select * from arrtest1;
+ i | t
+-----------------------------------------------------------------------+--------------------------------------------------------------------------------------------------
+ [-3:12]={-3,NULL,NULL,10,11,12,NULL,4,5,NULL,NULL,18,NULL,20,NULL,22} | [-3:12]={minus-three,NULL,NULL,ten,eleven,twelve,NULL,four,five,NULL,NULL,p18,NULL,p20,NULL,p22}
+(1 row)
+
+update arrtest1 set i[15:16] = array[null,26], t[15:16] = array[null,'p26'];
+select * from arrtest1;
+ i | t
+-----------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------
+ [-3:16]={-3,NULL,NULL,10,11,12,NULL,4,5,NULL,NULL,18,NULL,20,NULL,22,NULL,NULL,NULL,26} | [-3:16]={minus-three,NULL,NULL,ten,eleven,twelve,NULL,four,five,NULL,NULL,p18,NULL,p20,NULL,p22,NULL,NULL,NULL,p26}
+(1 row)
+
+update arrtest1 set i[-5:-3] = array[-15,-14,-13], t[-5:-3] = array['m15','m14','m13'];
+select * from arrtest1;
+ i | t
+--------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------
+ [-5:16]={-15,-14,-13,NULL,NULL,10,11,12,NULL,4,5,NULL,NULL,18,NULL,20,NULL,22,NULL,NULL,NULL,26} | [-5:16]={m15,m14,m13,NULL,NULL,ten,eleven,twelve,NULL,four,five,NULL,NULL,p18,NULL,p20,NULL,p22,NULL,NULL,NULL,p26}
+(1 row)
+
+update arrtest1 set i[-7:-6] = array[-17,null], t[-7:-6] = array['m17',null];
+select * from arrtest1;
+ i | t
+-----------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------
+ [-7:16]={-17,NULL,-15,-14,-13,NULL,NULL,10,11,12,NULL,4,5,NULL,NULL,18,NULL,20,NULL,22,NULL,NULL,NULL,26} | [-7:16]={m17,NULL,m15,m14,m13,NULL,NULL,ten,eleven,twelve,NULL,four,five,NULL,NULL,p18,NULL,p20,NULL,p22,NULL,NULL,NULL,p26}
+(1 row)
+
+update arrtest1 set i[-12:-10] = array[-22,null,-20], t[-12:-10] = array['m22',null,'m20'];
+select * from arrtest1;
+ i | t
+-----------------------------------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------
+ [-12:16]={-22,NULL,-20,NULL,NULL,-17,NULL,-15,-14,-13,NULL,NULL,10,11,12,NULL,4,5,NULL,NULL,18,NULL,20,NULL,22,NULL,NULL,NULL,26} | [-12:16]={m22,NULL,m20,NULL,NULL,m17,NULL,m15,m14,m13,NULL,NULL,ten,eleven,twelve,NULL,four,five,NULL,NULL,p18,NULL,p20,NULL,p22,NULL,NULL,NULL,p26}
+(1 row)
+
+delete from arrtest1;
+insert into arrtest1 values(array[1,2,null,4], array['one','two',null,'four']);
+select * from arrtest1;
+ i | t
+--------------+---------------------
+ {1,2,NULL,4} | {one,two,NULL,four}
+(1 row)
+
+update arrtest1 set i[0:5] = array[0,1,2,null,4,5], t[0:5] = array['z','p1','p2',null,'p4','p5'];
+select * from arrtest1;
+ i | t
+------------------------+----------------------------
+ [0:5]={0,1,2,NULL,4,5} | [0:5]={z,p1,p2,NULL,p4,p5}
+(1 row)
+
+--
+-- array expressions and operators
+--
+-- table creation and INSERTs
+CREATE TEMP TABLE arrtest2 (i integer ARRAY[4], f float8[], n numeric[], t text[], d timestamp[]);
+INSERT INTO arrtest2 VALUES(
+ ARRAY[[[113,142],[1,147]]],
+ ARRAY[1.1,1.2,1.3]::float8[],
+ ARRAY[1.1,1.2,1.3],
+ ARRAY[[['aaa','aab'],['aba','abb'],['aca','acb']],[['baa','bab'],['bba','bbb'],['bca','bcb']]],
+ ARRAY['19620326','19931223','19970117']::timestamp[]
+);
+-- some more test data
+CREATE TEMP TABLE arrtest_f (f0 int, f1 text, f2 float8);
+insert into arrtest_f values(1,'cat1',1.21);
+insert into arrtest_f values(2,'cat1',1.24);
+insert into arrtest_f values(3,'cat1',1.18);
+insert into arrtest_f values(4,'cat1',1.26);
+insert into arrtest_f values(5,'cat1',1.15);
+insert into arrtest_f values(6,'cat2',1.15);
+insert into arrtest_f values(7,'cat2',1.26);
+insert into arrtest_f values(8,'cat2',1.32);
+insert into arrtest_f values(9,'cat2',1.30);
+CREATE TEMP TABLE arrtest_i (f0 int, f1 text, f2 int);
+insert into arrtest_i values(1,'cat1',21);
+insert into arrtest_i values(2,'cat1',24);
+insert into arrtest_i values(3,'cat1',18);
+insert into arrtest_i values(4,'cat1',26);
+insert into arrtest_i values(5,'cat1',15);
+insert into arrtest_i values(6,'cat2',15);
+insert into arrtest_i values(7,'cat2',26);
+insert into arrtest_i values(8,'cat2',32);
+insert into arrtest_i values(9,'cat2',30);
+-- expressions
+SELECT t.f[1][3][1] AS "131", t.f[2][2][1] AS "221" FROM (
+ SELECT ARRAY[[[111,112],[121,122],[131,132]],[[211,212],[221,122],[231,232]]] AS f
+) AS t;
+ 131 | 221
+-----+-----
+ 131 | 221
+(1 row)
+
+SELECT ARRAY[[[[[['hello'],['world']]]]]];
+ array
+---------------------------
+ {{{{{{hello},{world}}}}}}
+(1 row)
+
+SELECT ARRAY[ARRAY['hello'],ARRAY['world']];
+ array
+-------------------
+ {{hello},{world}}
+(1 row)
+
+SELECT ARRAY(select f2 from arrtest_f order by f2) AS "ARRAY";
+ ARRAY
+-----------------------------------------------
+ {1.15,1.15,1.18,1.21,1.24,1.26,1.26,1.3,1.32}
+(1 row)
+
+-- with nulls
+SELECT '{1,null,3}'::int[];
+ int4
+------------
+ {1,NULL,3}
+(1 row)
+
+SELECT ARRAY[1,NULL,3];
+ array
+------------
+ {1,NULL,3}
+(1 row)
+
+-- functions
+SELECT array_append(array[42], 6) AS "{42,6}";
+ {42,6}
+--------
+ {42,6}
+(1 row)
+
+SELECT array_prepend(6, array[42]) AS "{6,42}";
+ {6,42}
+--------
+ {6,42}
+(1 row)
+
+SELECT array_cat(ARRAY[1,2], ARRAY[3,4]) AS "{1,2,3,4}";
+ {1,2,3,4}
+-----------
+ {1,2,3,4}
+(1 row)
+
+SELECT array_cat(ARRAY[1,2], ARRAY[[3,4],[5,6]]) AS "{{1,2},{3,4},{5,6}}";
+ {{1,2},{3,4},{5,6}}
+---------------------
+ {{1,2},{3,4},{5,6}}
+(1 row)
+
+SELECT array_cat(ARRAY[[3,4],[5,6]], ARRAY[1,2]) AS "{{3,4},{5,6},{1,2}}";
+ {{3,4},{5,6},{1,2}}
+---------------------
+ {{3,4},{5,6},{1,2}}
+(1 row)
+
+SELECT array_position(ARRAY[1,2,3,4,5], 4);
+ array_position
+----------------
+ 4
+(1 row)
+
+SELECT array_position(ARRAY[5,3,4,2,1], 4);
+ array_position
+----------------
+ 3
+(1 row)
+
+SELECT array_position(ARRAY[[1,2],[3,4]], 3);
+ERROR: searching for elements in multidimensional arrays is not supported
+SELECT array_position(ARRAY['sun','mon','tue','wed','thu','fri','sat'], 'mon');
+ array_position
+----------------
+ 2
+(1 row)
+
+SELECT array_position(ARRAY['sun','mon','tue','wed','thu','fri','sat'], 'sat');
+ array_position
+----------------
+ 7
+(1 row)
+
+SELECT array_position(ARRAY['sun','mon','tue','wed','thu','fri','sat'], NULL);
+ array_position
+----------------
+
+(1 row)
+
+SELECT array_position(ARRAY['sun','mon','tue','wed','thu',NULL,'fri','sat'], NULL);
+ array_position
+----------------
+ 6
+(1 row)
+
+SELECT array_position(ARRAY['sun','mon','tue','wed','thu',NULL,'fri','sat'], 'sat');
+ array_position
+----------------
+ 8
+(1 row)
+
+SELECT array_positions(NULL, 10);
+ array_positions
+-----------------
+
+(1 row)
+
+SELECT array_positions(NULL, NULL::int);
+ array_positions
+-----------------
+
+(1 row)
+
+SELECT array_positions(ARRAY[1,2,3,4,5,6,1,2,3,4,5,6], 4);
+ array_positions
+-----------------
+ {4,10}
+(1 row)
+
+SELECT array_positions(ARRAY[[1,2],[3,4]], 4);
+ERROR: searching for elements in multidimensional arrays is not supported
+SELECT array_positions(ARRAY[1,2,3,4,5,6,1,2,3,4,5,6], NULL);
+ array_positions
+-----------------
+ {}
+(1 row)
+
+SELECT array_positions(ARRAY[1,2,3,NULL,5,6,1,2,3,NULL,5,6], NULL);
+ array_positions
+-----------------
+ {4,10}
+(1 row)
+
+SELECT array_length(array_positions(ARRAY(SELECT 'AAAAAAAAAAAAAAAAAAAAAAAAA'::text || i % 10
+ FROM generate_series(1,100) g(i)),
+ 'AAAAAAAAAAAAAAAAAAAAAAAAA5'), 1);
+ array_length
+--------------
+ 10
+(1 row)
+
+DO $$
+DECLARE
+ o int;
+ a int[] := ARRAY[1,2,3,2,3,1,2];
+BEGIN
+ o := array_position(a, 2);
+ WHILE o IS NOT NULL
+ LOOP
+ RAISE NOTICE '%', o;
+ o := array_position(a, 2, o + 1);
+ END LOOP;
+END
+$$ LANGUAGE plpgsql;
+NOTICE: 2
+NOTICE: 4
+NOTICE: 7
+SELECT array_position('[2:4]={1,2,3}'::int[], 1);
+ array_position
+----------------
+ 2
+(1 row)
+
+SELECT array_positions('[2:4]={1,2,3}'::int[], 1);
+ array_positions
+-----------------
+ {2}
+(1 row)
+
+SELECT
+ array_position(ids, (1, 1)),
+ array_positions(ids, (1, 1))
+ FROM
+(VALUES
+ (ARRAY[(0, 0), (1, 1)]),
+ (ARRAY[(1, 1)])
+) AS f (ids);
+ array_position | array_positions
+----------------+-----------------
+ 2 | {2}
+ 1 | {1}
+(2 rows)
+
+-- operators
+SELECT a FROM arrtest WHERE b = ARRAY[[[113,142],[1,147]]];
+ a
+---------------
+ {16,25,3,4,5}
+(1 row)
+
+SELECT NOT ARRAY[1.1,1.2,1.3] = ARRAY[1.1,1.2,1.3] AS "FALSE";
+ FALSE
+-------
+ f
+(1 row)
+
+SELECT ARRAY[1,2] || 3 AS "{1,2,3}";
+ {1,2,3}
+---------
+ {1,2,3}
+(1 row)
+
+SELECT 0 || ARRAY[1,2] AS "{0,1,2}";
+ {0,1,2}
+---------
+ {0,1,2}
+(1 row)
+
+SELECT ARRAY[1,2] || ARRAY[3,4] AS "{1,2,3,4}";
+ {1,2,3,4}
+-----------
+ {1,2,3,4}
+(1 row)
+
+SELECT ARRAY[[['hello','world']]] || ARRAY[[['happy','birthday']]] AS "ARRAY";
+ ARRAY
+--------------------------------------
+ {{{hello,world}},{{happy,birthday}}}
+(1 row)
+
+SELECT ARRAY[[1,2],[3,4]] || ARRAY[5,6] AS "{{1,2},{3,4},{5,6}}";
+ {{1,2},{3,4},{5,6}}
+---------------------
+ {{1,2},{3,4},{5,6}}
+(1 row)
+
+SELECT ARRAY[0,0] || ARRAY[1,1] || ARRAY[2,2] AS "{0,0,1,1,2,2}";
+ {0,0,1,1,2,2}
+---------------
+ {0,0,1,1,2,2}
+(1 row)
+
+SELECT 0 || ARRAY[1,2] || 3 AS "{0,1,2,3}";
+ {0,1,2,3}
+-----------
+ {0,1,2,3}
+(1 row)
+
+SELECT ARRAY[1.1] || ARRAY[2,3,4];
+ ?column?
+-------------
+ {1.1,2,3,4}
+(1 row)
+
+SELECT array_agg(x) || array_agg(x) FROM (VALUES (ROW(1,2)), (ROW(3,4))) v(x);
+ ?column?
+-----------------------------------
+ {"(1,2)","(3,4)","(1,2)","(3,4)"}
+(1 row)
+
+SELECT ROW(1,2) || array_agg(x) FROM (VALUES (ROW(3,4)), (ROW(5,6))) v(x);
+ ?column?
+---------------------------
+ {"(1,2)","(3,4)","(5,6)"}
+(1 row)
+
+SELECT * FROM array_op_test WHERE i @> '{32}' ORDER BY seqno;
+ seqno | i | t
+-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------
+ 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657}
+ 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956}
+ 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066}
+ 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673}
+ 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845}
+ 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523}
+(6 rows)
+
+SELECT * FROM array_op_test WHERE i && '{32}' ORDER BY seqno;
+ seqno | i | t
+-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------
+ 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657}
+ 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956}
+ 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066}
+ 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673}
+ 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845}
+ 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523}
+(6 rows)
+
+SELECT * FROM array_op_test WHERE i @> '{17}' ORDER BY seqno;
+ seqno | i | t
+-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------
+ 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657}
+ 12 | {17,99,18,52,91,72,0,43,96,23} | {AAAAA33250,AAAAAAAAAAAAAAAAAAA85420,AAAAAAAAAAA33576}
+ 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309}
+ 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938}
+ 53 | {38,17} | {AAAAAAAAAAA21658}
+ 65 | {61,5,76,59,17} | {AAAAAA99807,AAAAA64741,AAAAAAAAAAA53908,AA21643,AAAAAAAAA10012}
+ 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066}
+ 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673}
+(8 rows)
+
+SELECT * FROM array_op_test WHERE i && '{17}' ORDER BY seqno;
+ seqno | i | t
+-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------
+ 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657}
+ 12 | {17,99,18,52,91,72,0,43,96,23} | {AAAAA33250,AAAAAAAAAAAAAAAAAAA85420,AAAAAAAAAAA33576}
+ 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309}
+ 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938}
+ 53 | {38,17} | {AAAAAAAAAAA21658}
+ 65 | {61,5,76,59,17} | {AAAAAA99807,AAAAA64741,AAAAAAAAAAA53908,AA21643,AAAAAAAAA10012}
+ 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066}
+ 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673}
+(8 rows)
+
+SELECT * FROM array_op_test WHERE i @> '{32,17}' ORDER BY seqno;
+ seqno | i | t
+-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------
+ 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657}
+ 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066}
+ 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673}
+(3 rows)
+
+SELECT * FROM array_op_test WHERE i && '{32,17}' ORDER BY seqno;
+ seqno | i | t
+-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------
+ 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657}
+ 12 | {17,99,18,52,91,72,0,43,96,23} | {AAAAA33250,AAAAAAAAAAAAAAAAAAA85420,AAAAAAAAAAA33576}
+ 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309}
+ 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938}
+ 53 | {38,17} | {AAAAAAAAAAA21658}
+ 65 | {61,5,76,59,17} | {AAAAAA99807,AAAAA64741,AAAAAAAAAAA53908,AA21643,AAAAAAAAA10012}
+ 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956}
+ 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066}
+ 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673}
+ 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845}
+ 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523}
+(11 rows)
+
+SELECT * FROM array_op_test WHERE i <@ '{38,34,32,89}' ORDER BY seqno;
+ seqno | i | t
+-------+---------------+----------------------------------------------------------------------------------------------------------------------------
+ 40 | {34} | {AAAAAAAAAAAAAA10611,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAA50956,AAAAAAAAAAAAAAAA31334,AAAAA70466,AAAAAAAA81587,AAAAAAA74623}
+ 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956}
+ 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845}
+ 101 | {} | {}
+(4 rows)
+
+SELECT * FROM array_op_test WHERE i = '{}' ORDER BY seqno;
+ seqno | i | t
+-------+----+----
+ 101 | {} | {}
+(1 row)
+
+SELECT * FROM array_op_test WHERE i @> '{}' ORDER BY seqno;
+ seqno | i | t
+-------+---------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ 1 | {92,75,71,52,64,83} | {AAAAAAAA44066,AAAAAA1059,AAAAAAAAAAA176,AAAAAAA48038}
+ 2 | {3,6} | {AAAAAA98232,AAAAAAAA79710,AAAAAAAAAAAAAAAAA69675,AAAAAAAAAAAAAAAA55798,AAAAAAAAA12793}
+ 3 | {37,64,95,43,3,41,13,30,11,43} | {AAAAAAAAAA48845,AAAAA75968,AAAAA95309,AAA54451,AAAAAAAAAA22292,AAAAAAA99836,A96617,AA17009,AAAAAAAAAAAAAA95246}
+ 4 | {71,39,99,55,33,75,45} | {AAAAAAAAA53663,AAAAAAAAAAAAAAA67062,AAAAAAAAAA64777,AAA99043,AAAAAAAAAAAAAAAAAAA91804,39557}
+ 5 | {50,42,77,50,4} | {AAAAAAAAAAAAAAAAA26540,AAAAAAA79710,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAA176,AAAAA95309,AAAAAAAAAAA46154,AAAAAA66777,AAAAAAAAA27249,AAAAAAAAAA64777,AAAAAAAAAAAAAAAAAAA70104}
+ 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657}
+ 7 | {12,51,88,64,8} | {AAAAAAAAAAAAAAAAAA12591,AAAAAAAAAAAAAAAAA50407,AAAAAAAAAAAA67946}
+ 8 | {60,84} | {AAAAAAA81898,AAAAAA1059,AAAAAAAAAAAA81511,AAAAA961,AAAAAAAAAAAAAAAA31334,AAAAA64741,AA6416,AAAAAAAAAAAAAAAAAA32918,AAAAAAAAAAAAAAAAA50407}
+ 9 | {56,52,35,27,80,44,81,22} | {AAAAAAAAAAAAAAA73034,AAAAAAAAAAAAA7929,AAAAAAA66161,AA88409,39557,A27153,AAAAAAAA9523,AAAAAAAAAAA99000}
+ 10 | {71,5,45} | {AAAAAAAAAAA21658,AAAAAAAAAAAA21089,AAA54451,AAAAAAAAAAAAAAAAAA54141,AAAAAAAAAAAAAA28620,AAAAAAAAAAA21658,AAAAAAAAAAA74076,AAAAAAAAA27249}
+ 11 | {41,86,74,48,22,74,47,50} | {AAAAAAAA9523,AAAAAAAAAAAA37562,AAAAAAAAAAAAAAAA14047,AAAAAAAAAAA46154,AAAA41702,AAAAAAAAAAAAAAAAA764,AAAAA62737,39557}
+ 12 | {17,99,18,52,91,72,0,43,96,23} | {AAAAA33250,AAAAAAAAAAAAAAAAAAA85420,AAAAAAAAAAA33576}
+ 13 | {3,52,34,23} | {AAAAAA98232,AAAA49534,AAAAAAAAAAA21658}
+ 14 | {78,57,19} | {AAAA8857,AAAAAAAAAAAAAAA73034,AAAAAAAA81587,AAAAAAAAAAAAAAA68526,AAAAA75968,AAAAAAAAAAAAAA65909,AAAAAAAAA10012,AAAAAAAAAAAAAA65909}
+ 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309}
+ 16 | {14,63,85,11} | {AAAAAA66777}
+ 17 | {7,10,81,85} | {AAAAAA43678,AAAAAAA12144,AAAAAAAAAAA50956,AAAAAAAAAAAAAAAAAAA15356}
+ 18 | {1} | {AAAAAAAAAAA33576,AAAAA95309,64261,AAA59323,AAAAAAAAAAAAAA95246,55847,AAAAAAAAAAAA67946,AAAAAAAAAAAAAAAAAA64374}
+ 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938}
+ 20 | {72,89,70,51,54,37,8,49,79} | {AAAAAA58494}
+ 21 | {2,8,65,10,5,79,43} | {AAAAAAAAAAAAAAAAA88852,AAAAAAAAAAAAAAAAAAA91804,AAAAA64669,AAAAAAAAAAAAAAAA1443,AAAAAAAAAAAAAAAA23657,AAAAA12179,AAAAAAAAAAAAAAAAA88852,AAAAAAAAAAAAAAAA31334,AAAAAAAAAAAAAAAA41303,AAAAAAAAAAAAAAAAAAA85420}
+ 22 | {11,6,56,62,53,30} | {AAAAAAAA72908}
+ 23 | {40,90,5,38,72,40,30,10,43,55} | {A6053,AAAAAAAAAAA6119,AA44673,AAAAAAAAAAAAAAAAA764,AA17009,AAAAA17383,AAAAA70514,AAAAA33250,AAAAA95309,AAAAAAAAAAAA37562}
+ 24 | {94,61,99,35,48} | {AAAAAAAAAAA50956,AAAAAAAAAAA15165,AAAA85070,AAAAAAAAAAAAAAA36627,AAAAA961,AAAAAAAAAA55219}
+ 25 | {31,1,10,11,27,79,38} | {AAAAAAAAAAAAAAAAAA59334,45449}
+ 26 | {71,10,9,69,75} | {47735,AAAAAAA21462,AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAAAAA91804,AAAAAAAAA72121,AAAAAAAAAAAAAAAAAAA1205,AAAAA41597,AAAA8857,AAAAAAAAAAAAAAAAAAA15356,AA17009}
+ 27 | {94} | {AA6416,A6053,AAAAAAA21462,AAAAAAA57334,AAAAAAAAAAAAAAAAAA12591,AA88409,AAAAAAAAAAAAA70254}
+ 28 | {14,33,6,34,14} | {AAAAAAAAAAAAAAA13198,AAAAAAAA69452,AAAAAAAAAAA82945,AAAAAAA12144,AAAAAAAAA72121,AAAAAAAAAA18601}
+ 29 | {39,21} | {AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAAAAA38885,AAAA85070,AAAAAAAAAAAAAAAAAAA70104,AAAAA66674,AAAAAAAAAAAAA62007,AAAAAAAA69452,AAAAAAA1242,AAAAAAAAAAAAAAAA1729,AAAA35194}
+ 30 | {26,81,47,91,34} | {AAAAAAAAAAAAAAAAAAA70104,AAAAAAA80240}
+ 31 | {80,24,18,21,54} | {AAAAAAAAAAAAAAA13198,AAAAAAAAAAAAAAAAAAA70415,A27153,AAAAAAAAA53663,AAAAAAAAAAAAAAAAA50407,A68938}
+ 32 | {58,79,82,80,67,75,98,10,41} | {AAAAAAAAAAAAAAAAAA61286,AAA54451,AAAAAAAAAAAAAAAAAAA87527,A96617,51533}
+ 33 | {74,73} | {A85417,AAAAAAA56483,AAAAA17383,AAAAAAAAAAAAA62159,AAAAAAAAAAAA52814,AAAAAAAAAAAAA85723,AAAAAAAAAAAAAAAAAA55796}
+ 34 | {70,45} | {AAAAAAAAAAAAAAAAAA71621,AAAAAAAAAAAAAA28620,AAAAAAAAAA55219,AAAAAAAA23648,AAAAAAAAAA22292,AAAAAAA1242}
+ 35 | {23,40} | {AAAAAAAAAAAA52814,AAAA48949,AAAAAAAAA34727,AAAA8857,AAAAAAAAAAAAAAAAAAA62179,AAAAAAAAAAAAAAA68526,AAAAAAA99836,AAAAAAAA50094,AAAA91194,AAAAAAAAAAAAA73084}
+ 36 | {79,82,14,52,30,5,79} | {AAAAAAAAA53663,AAAAAAAAAAAAAAAA55798,AAAAAAAAAAAAAAAAAAA89194,AA88409,AAAAAAAAAAAAAAA81326,AAAAAAAAAAAAAAAAA63050,AAAAAAAAAAAAAAAA33598}
+ 37 | {53,11,81,39,3,78,58,64,74} | {AAAAAAAAAAAAAAAAAAA17075,AAAAAAA66161,AAAAAAAA23648,AAAAAAAAAAAAAA10611}
+ 38 | {59,5,4,95,28} | {AAAAAAAAAAA82945,A96617,47735,AAAAA12179,AAAAA64669,AAAAAA99807,AA74433,AAAAAAAAAAAAAAAAA59387}
+ 39 | {82,43,99,16,74} | {AAAAAAAAAAAAAAA67062,AAAAAAA57334,AAAAAAAAAAAAAA65909,A27153,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAA43052,AAAAAAAAAA64777,AAAAAAAAAAAA81511,AAAAAAAAAAAAAA65909,AAAAAAAAAAAAAA28620}
+ 40 | {34} | {AAAAAAAAAAAAAA10611,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAA50956,AAAAAAAAAAAAAAAA31334,AAAAA70466,AAAAAAAA81587,AAAAAAA74623}
+ 41 | {19,26,63,12,93,73,27,94} | {AAAAAAA79710,AAAAAAAAAA55219,AAAA41702,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAAA71621,AAAAAAAAAAAAAAAAA63050,AAAAAAA99836,AAAAAAAAAAAAAA8666}
+ 42 | {15,76,82,75,8,91} | {AAAAAAAAAAA176,AAAAAA38063,45449,AAAAAA54032,AAAAAAA81898,AA6416,AAAAAAAAAAAAAAAAAAA62179,45449,AAAAA60038,AAAAAAAA81587}
+ 43 | {39,87,91,97,79,28} | {AAAAAAAAAAA74076,A96617,AAAAAAAAAAAAAAAAAAA89194,AAAAAAAAAAAAAAAAAA55796,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAA67946}
+ 44 | {40,58,68,29,54} | {AAAAAAA81898,AAAAAA66777,AAAAAA98232}
+ 45 | {99,45} | {AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611}
+ 46 | {53,24} | {AAAAAAAAAAA53908,AAAAAA54032,AAAAA17383,AAAA48949,AAAAAAAAAA18601,AAAAA64669,45449,AAAAAAAAAAA98051,AAAAAAAAAAAAAAAAAA71621}
+ 47 | {98,23,64,12,75,61} | {AAA59323,AAAAA95309,AAAAAAAAAAAAAAAA31334,AAAAAAAAA27249,AAAAA17383,AAAAAAAAAAAA37562,AAAAAA1059,A84822,55847,AAAAA70466}
+ 48 | {76,14} | {AAAAAAAAAAAAA59671,AAAAAAAAAAAAAAAAAAA91804,AAAAAA66777,AAAAAAAAAAAAAAAAAAA89194,AAAAAAAAAAAAAAA36627,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAA73084,AAAAAAA79710,AAAAAAAAAAAAAAA40402,AAAAAAAAAAAAAAAAAAA65037}
+ 49 | {56,5,54,37,49} | {AA21643,AAAAAAAAAAA92631,AAAAAAAA81587}
+ 50 | {20,12,37,64,93} | {AAAAAAAAAA5483,AAAAAAAAAAAAAAAAAAA1205,AA6416,AAAAAAAAAAAAAAAAA63050,AAAAAAAAAAAAAAAAAA47955}
+ 51 | {47} | {AAAAAAAAAAAAAA96505,AAAAAAAAAAAAAAAAAA36842,AAAAA95309,AAAAAAAA81587,AA6416,AAAA91194,AAAAAA58494,AAAAAA1059,AAAAAAAA69452}
+ 52 | {89,0} | {AAAAAAAAAAAAAAAAAA47955,AAAAAAA48038,AAAAAAAAAAAAAAAAA43052,AAAAAAAAAAAAA73084,AAAAA70466,AAAAAAAAAAAAAAAAA764,AAAAAAAAAAA46154,AA66862}
+ 53 | {38,17} | {AAAAAAAAAAA21658}
+ 54 | {70,47} | {AAAAAAAAAAAAAAAAAA54141,AAAAA40681,AAAAAAA48038,AAAAAAAAAAAAAAAA29150,AAAAA41597,AAAAAAAAAAAAAAAAAA59334,AA15322}
+ 55 | {47,79,47,64,72,25,71,24,93} | {AAAAAAAAAAAAAAAAAA55796,AAAAA62737}
+ 56 | {33,7,60,54,93,90,77,85,39} | {AAAAAAAAAAAAAAAAAA32918,AA42406}
+ 57 | {23,45,10,42,36,21,9,96} | {AAAAAAAAAAAAAAAAAAA70415}
+ 58 | {92} | {AAAAAAAAAAAAAAAA98414,AAAAAAAA23648,AAAAAAAAAAAAAAAAAA55796,AA25381,AAAAAAAAAAA6119}
+ 59 | {9,69,46,77} | {39557,AAAAAAA89932,AAAAAAAAAAAAAAAAA43052,AAAAAAAAAAAAAAAAA26540,AAA20874,AA6416,AAAAAAAAAAAAAAAAAA47955}
+ 60 | {62,2,59,38,89} | {AAAAAAA89932,AAAAAAAAAAAAAAAAAAA15356,AA99927,AA17009,AAAAAAAAAAAAAAA35875}
+ 61 | {72,2,44,95,54,54,13} | {AAAAAAAAAAAAAAAAAAA91804}
+ 62 | {83,72,29,73} | {AAAAAAAAAAAAA15097,AAAA8857,AAAAAAAAAAAA35809,AAAAAAAAAAAA52814,AAAAAAAAAAAAAAAAAAA38885,AAAAAAAAAAAAAAAAAA24183,AAAAAA43678,A96617}
+ 63 | {11,4,61,87} | {AAAAAAAAA27249,AAAAAAAAAAAAAAAAAA32918,AAAAAAAAAAAAAAA13198,AAA20874,39557,51533,AAAAAAAAAAA53908,AAAAAAAAAAAAAA96505,AAAAAAAA78938}
+ 64 | {26,19,34,24,81,78} | {A96617,AAAAAAAAAAAAAAAAAAA70104,A68938,AAAAAAAAAAA53908,AAAAAAAAAAAAAAA453,AA17009,AAAAAAA80240}
+ 65 | {61,5,76,59,17} | {AAAAAA99807,AAAAA64741,AAAAAAAAAAA53908,AA21643,AAAAAAAAA10012}
+ 66 | {31,23,70,52,4,33,48,25} | {AAAAAAAAAAAAAAAAA69675,AAAAAAAA50094,AAAAAAAAAAA92631,AAAA35194,39557,AAAAAAA99836}
+ 67 | {31,94,7,10} | {AAAAAA38063,A96617,AAAA35194,AAAAAAAAAAAA67946}
+ 68 | {90,43,38} | {AA75092,AAAAAAAAAAAAAAAAA69675,AAAAAAAAAAA92631,AAAAAAAAA10012,AAAAAAAAAAAAA7929,AA21643}
+ 69 | {67,35,99,85,72,86,44} | {AAAAAAAAAAAAAAAAAAA1205,AAAAAAAA50094,AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAAAAAAA47955}
+ 70 | {56,70,83} | {AAAA41702,AAAAAAAAAAA82945,AA21643,AAAAAAAAAAA99000,A27153,AA25381,AAAAAAAAAAAAAA96505,AAAAAAA1242}
+ 71 | {74,26} | {AAAAAAAAAAA50956,AA74433,AAAAAAA21462,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAA36627,AAAAAAAAAAAAA70254,AAAAAAAAAA43419,39557}
+ 72 | {22,1,16,78,20,91,83} | {47735,AAAAAAA56483,AAAAAAAAAAAAA93788,AA42406,AAAAAAAAAAAAA73084,AAAAAAAA72908,AAAAAAAAAAAAAAAAAA61286,AAAAA66674,AAAAAAAAAAAAAAAAA50407}
+ 73 | {88,25,96,78,65,15,29,19} | {AAA54451,AAAAAAAAA27249,AAAAAAA9228,AAAAAAAAAAAAAAA67062,AAAAAAAAAAAAAAAAAAA70415,AAAAA17383,AAAAAAAAAAAAAAAA33598}
+ 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956}
+ 75 | {12,96,83,24,71,89,55} | {AAAA48949,AAAAAAAA29716,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAAA67946,AAAAAAAAAAAAAAAA29150,AAA28075,AAAAAAAAAAAAAAAAA43052}
+ 76 | {92,55,10,7} | {AAAAAAAAAAAAAAA67062}
+ 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066}
+ 78 | {55,89,44,84,34} | {AAAAAAAAAAA6119,AAAAAAAAAAAAAA8666,AA99927,AA42406,AAAAAAA81898,AAAAAAA9228,AAAAAAAAAAA92631,AA21643,AAAAAAAAAAAAAA28620}
+ 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908}
+ 80 | {74,89,44,80,0} | {AAAA35194,AAAAAAAA79710,AAA20874,AAAAAAAAAAAAAAAAAAA70104,AAAAAAAAAAAAA73084,AAAAAAA57334,AAAAAAA9228,AAAAAAAAAAAAA62007}
+ 81 | {63,77,54,48,61,53,97} | {AAAAAAAAAAAAAAA81326,AAAAAAAAAA22292,AA25381,AAAAAAAAAAA74076,AAAAAAA81898,AAAAAAAAA72121}
+ 82 | {34,60,4,79,78,16,86,89,42,50} | {AAAAA40681,AAAAAAAAAAAAAAAAAA12591,AAAAAAA80240,AAAAAAAAAAAAAAAA55798,AAAAAAAAAAAAAAAAAAA70104}
+ 83 | {14,10} | {AAAAAAAAAA22292,AAAAAAAAAAAAA70254,AAAAAAAAAAA6119}
+ 84 | {11,83,35,13,96,94} | {AAAAA95309,AAAAAAAAAAAAAAAAAA32918,AAAAAAAAAAAAAAAAAA24183}
+ 85 | {39,60} | {AAAAAAAAAAAAAAAA55798,AAAAAAAAAA22292,AAAAAAA66161,AAAAAAA21462,AAAAAAAAAAAAAAAAAA12591,55847,AAAAAA98232,AAAAAAAAAAA46154}
+ 86 | {33,81,72,74,45,36,82} | {AAAAAAAA81587,AAAAAAAAAAAAAA96505,45449,AAAA80176}
+ 87 | {57,27,50,12,97,68} | {AAAAAAAAAAAAAAAAA26540,AAAAAAAAA10012,AAAAAAAAAAAA35809,AAAAAAAAAAAAAAAA29150,AAAAAAAAAAA82945,AAAAAA66777,31228,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAA96505}
+ 88 | {41,90,77,24,6,24} | {AAAA35194,AAAA35194,AAAAAAA80240,AAAAAAAAAAA46154,AAAAAA58494,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAAA59334,AAAAAAAAAAAAAAAAAAA91804,AA74433}
+ 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673}
+ 90 | {88,75} | {AAAAA60038,AAAAAAAA23648,AAAAAAAAAAA99000,AAAA41702,AAAAAAAAAAAAA22860,AAAAAAAAAAAAAAA68526}
+ 91 | {78} | {AAAAAAAAAAAAA62007,AAA99043}
+ 92 | {85,63,49,45} | {AAAAAAA89932,AAAAAAAAAAAAA22860,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAAA21089}
+ 93 | {11} | {AAAAAAAAAAA176,AAAAAAAAAAAAAA8666,AAAAAAAAAAAAAAA453,AAAAAAAAAAAAA85723,A68938,AAAAAAAAAAAAA9821,AAAAAAA48038,AAAAAAAAAAAAAAAAA59387,AA99927,AAAAA17383}
+ 94 | {98,9,85,62,88,91,60,61,38,86} | {AAAAAAAA81587,AAAAA17383,AAAAAAAA81587}
+ 95 | {47,77} | {AAAAAAAAAAAAAAAAA764,AAAAAAAAAAA74076,AAAAAAAAAA18107,AAAAA40681,AAAAAAAAAAAAAAA35875,AAAAA60038,AAAAAAA56483}
+ 96 | {23,97,43} | {AAAAAAAAAA646,A87088}
+ 97 | {54,2,86,65} | {47735,AAAAAAA99836,AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAA29150,AAAAAAA80240,AAAAAAAAAAAAAAAA98414,AAAAAAA56483,AAAAAAAAAAAAAAAA29150,AAAAAAA39692,AA21643}
+ 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845}
+ 99 | {37,86} | {AAAAAAAAAAAAAAAAAA32918,AAAAA70514,AAAAAAAAA10012,AAAAAAAAAAAAAAAAA59387,AAAAAAAAAA64777,AAAAAAAAAAAAAAAAAAA15356}
+ 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523}
+ 101 | {} | {}
+ 102 | {NULL} | {NULL}
+(102 rows)
+
+SELECT * FROM array_op_test WHERE i && '{}' ORDER BY seqno;
+ seqno | i | t
+-------+---+---
+(0 rows)
+
+SELECT * FROM array_op_test WHERE i <@ '{}' ORDER BY seqno;
+ seqno | i | t
+-------+----+----
+ 101 | {} | {}
+(1 row)
+
+SELECT * FROM array_op_test WHERE i = '{NULL}' ORDER BY seqno;
+ seqno | i | t
+-------+--------+--------
+ 102 | {NULL} | {NULL}
+(1 row)
+
+SELECT * FROM array_op_test WHERE i @> '{NULL}' ORDER BY seqno;
+ seqno | i | t
+-------+---+---
+(0 rows)
+
+SELECT * FROM array_op_test WHERE i && '{NULL}' ORDER BY seqno;
+ seqno | i | t
+-------+---+---
+(0 rows)
+
+SELECT * FROM array_op_test WHERE i <@ '{NULL}' ORDER BY seqno;
+ seqno | i | t
+-------+----+----
+ 101 | {} | {}
+(1 row)
+
+SELECT * FROM array_op_test WHERE t @> '{AAAAAAAA72908}' ORDER BY seqno;
+ seqno | i | t
+-------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------
+ 22 | {11,6,56,62,53,30} | {AAAAAAAA72908}
+ 45 | {99,45} | {AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611}
+ 72 | {22,1,16,78,20,91,83} | {47735,AAAAAAA56483,AAAAAAAAAAAAA93788,AA42406,AAAAAAAAAAAAA73084,AAAAAAAA72908,AAAAAAAAAAAAAAAAAA61286,AAAAA66674,AAAAAAAAAAAAAAAAA50407}
+ 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908}
+(4 rows)
+
+SELECT * FROM array_op_test WHERE t && '{AAAAAAAA72908}' ORDER BY seqno;
+ seqno | i | t
+-------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------
+ 22 | {11,6,56,62,53,30} | {AAAAAAAA72908}
+ 45 | {99,45} | {AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611}
+ 72 | {22,1,16,78,20,91,83} | {47735,AAAAAAA56483,AAAAAAAAAAAAA93788,AA42406,AAAAAAAAAAAAA73084,AAAAAAAA72908,AAAAAAAAAAAAAAAAAA61286,AAAAA66674,AAAAAAAAAAAAAAAAA50407}
+ 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908}
+(4 rows)
+
+SELECT * FROM array_op_test WHERE t @> '{AAAAAAAAAA646}' ORDER BY seqno;
+ seqno | i | t
+-------+------------------+--------------------------------------------------------------------
+ 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309}
+ 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908}
+ 96 | {23,97,43} | {AAAAAAAAAA646,A87088}
+(3 rows)
+
+SELECT * FROM array_op_test WHERE t && '{AAAAAAAAAA646}' ORDER BY seqno;
+ seqno | i | t
+-------+------------------+--------------------------------------------------------------------
+ 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309}
+ 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908}
+ 96 | {23,97,43} | {AAAAAAAAAA646,A87088}
+(3 rows)
+
+SELECT * FROM array_op_test WHERE t @> '{AAAAAAAA72908,AAAAAAAAAA646}' ORDER BY seqno;
+ seqno | i | t
+-------+------+--------------------------------------------------------------------
+ 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908}
+(1 row)
+
+SELECT * FROM array_op_test WHERE t && '{AAAAAAAA72908,AAAAAAAAAA646}' ORDER BY seqno;
+ seqno | i | t
+-------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------
+ 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309}
+ 22 | {11,6,56,62,53,30} | {AAAAAAAA72908}
+ 45 | {99,45} | {AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611}
+ 72 | {22,1,16,78,20,91,83} | {47735,AAAAAAA56483,AAAAAAAAAAAAA93788,AA42406,AAAAAAAAAAAAA73084,AAAAAAAA72908,AAAAAAAAAAAAAAAAAA61286,AAAAA66674,AAAAAAAAAAAAAAAAA50407}
+ 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908}
+ 96 | {23,97,43} | {AAAAAAAAAA646,A87088}
+(6 rows)
+
+SELECT * FROM array_op_test WHERE t <@ '{AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611}' ORDER BY seqno;
+ seqno | i | t
+-------+--------------------+-----------------------------------------------------------------------------------------------------------
+ 22 | {11,6,56,62,53,30} | {AAAAAAAA72908}
+ 45 | {99,45} | {AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611}
+ 101 | {} | {}
+(3 rows)
+
+SELECT * FROM array_op_test WHERE t = '{}' ORDER BY seqno;
+ seqno | i | t
+-------+----+----
+ 101 | {} | {}
+(1 row)
+
+SELECT * FROM array_op_test WHERE t @> '{}' ORDER BY seqno;
+ seqno | i | t
+-------+---------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ 1 | {92,75,71,52,64,83} | {AAAAAAAA44066,AAAAAA1059,AAAAAAAAAAA176,AAAAAAA48038}
+ 2 | {3,6} | {AAAAAA98232,AAAAAAAA79710,AAAAAAAAAAAAAAAAA69675,AAAAAAAAAAAAAAAA55798,AAAAAAAAA12793}
+ 3 | {37,64,95,43,3,41,13,30,11,43} | {AAAAAAAAAA48845,AAAAA75968,AAAAA95309,AAA54451,AAAAAAAAAA22292,AAAAAAA99836,A96617,AA17009,AAAAAAAAAAAAAA95246}
+ 4 | {71,39,99,55,33,75,45} | {AAAAAAAAA53663,AAAAAAAAAAAAAAA67062,AAAAAAAAAA64777,AAA99043,AAAAAAAAAAAAAAAAAAA91804,39557}
+ 5 | {50,42,77,50,4} | {AAAAAAAAAAAAAAAAA26540,AAAAAAA79710,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAA176,AAAAA95309,AAAAAAAAAAA46154,AAAAAA66777,AAAAAAAAA27249,AAAAAAAAAA64777,AAAAAAAAAAAAAAAAAAA70104}
+ 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657}
+ 7 | {12,51,88,64,8} | {AAAAAAAAAAAAAAAAAA12591,AAAAAAAAAAAAAAAAA50407,AAAAAAAAAAAA67946}
+ 8 | {60,84} | {AAAAAAA81898,AAAAAA1059,AAAAAAAAAAAA81511,AAAAA961,AAAAAAAAAAAAAAAA31334,AAAAA64741,AA6416,AAAAAAAAAAAAAAAAAA32918,AAAAAAAAAAAAAAAAA50407}
+ 9 | {56,52,35,27,80,44,81,22} | {AAAAAAAAAAAAAAA73034,AAAAAAAAAAAAA7929,AAAAAAA66161,AA88409,39557,A27153,AAAAAAAA9523,AAAAAAAAAAA99000}
+ 10 | {71,5,45} | {AAAAAAAAAAA21658,AAAAAAAAAAAA21089,AAA54451,AAAAAAAAAAAAAAAAAA54141,AAAAAAAAAAAAAA28620,AAAAAAAAAAA21658,AAAAAAAAAAA74076,AAAAAAAAA27249}
+ 11 | {41,86,74,48,22,74,47,50} | {AAAAAAAA9523,AAAAAAAAAAAA37562,AAAAAAAAAAAAAAAA14047,AAAAAAAAAAA46154,AAAA41702,AAAAAAAAAAAAAAAAA764,AAAAA62737,39557}
+ 12 | {17,99,18,52,91,72,0,43,96,23} | {AAAAA33250,AAAAAAAAAAAAAAAAAAA85420,AAAAAAAAAAA33576}
+ 13 | {3,52,34,23} | {AAAAAA98232,AAAA49534,AAAAAAAAAAA21658}
+ 14 | {78,57,19} | {AAAA8857,AAAAAAAAAAAAAAA73034,AAAAAAAA81587,AAAAAAAAAAAAAAA68526,AAAAA75968,AAAAAAAAAAAAAA65909,AAAAAAAAA10012,AAAAAAAAAAAAAA65909}
+ 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309}
+ 16 | {14,63,85,11} | {AAAAAA66777}
+ 17 | {7,10,81,85} | {AAAAAA43678,AAAAAAA12144,AAAAAAAAAAA50956,AAAAAAAAAAAAAAAAAAA15356}
+ 18 | {1} | {AAAAAAAAAAA33576,AAAAA95309,64261,AAA59323,AAAAAAAAAAAAAA95246,55847,AAAAAAAAAAAA67946,AAAAAAAAAAAAAAAAAA64374}
+ 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938}
+ 20 | {72,89,70,51,54,37,8,49,79} | {AAAAAA58494}
+ 21 | {2,8,65,10,5,79,43} | {AAAAAAAAAAAAAAAAA88852,AAAAAAAAAAAAAAAAAAA91804,AAAAA64669,AAAAAAAAAAAAAAAA1443,AAAAAAAAAAAAAAAA23657,AAAAA12179,AAAAAAAAAAAAAAAAA88852,AAAAAAAAAAAAAAAA31334,AAAAAAAAAAAAAAAA41303,AAAAAAAAAAAAAAAAAAA85420}
+ 22 | {11,6,56,62,53,30} | {AAAAAAAA72908}
+ 23 | {40,90,5,38,72,40,30,10,43,55} | {A6053,AAAAAAAAAAA6119,AA44673,AAAAAAAAAAAAAAAAA764,AA17009,AAAAA17383,AAAAA70514,AAAAA33250,AAAAA95309,AAAAAAAAAAAA37562}
+ 24 | {94,61,99,35,48} | {AAAAAAAAAAA50956,AAAAAAAAAAA15165,AAAA85070,AAAAAAAAAAAAAAA36627,AAAAA961,AAAAAAAAAA55219}
+ 25 | {31,1,10,11,27,79,38} | {AAAAAAAAAAAAAAAAAA59334,45449}
+ 26 | {71,10,9,69,75} | {47735,AAAAAAA21462,AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAAAAA91804,AAAAAAAAA72121,AAAAAAAAAAAAAAAAAAA1205,AAAAA41597,AAAA8857,AAAAAAAAAAAAAAAAAAA15356,AA17009}
+ 27 | {94} | {AA6416,A6053,AAAAAAA21462,AAAAAAA57334,AAAAAAAAAAAAAAAAAA12591,AA88409,AAAAAAAAAAAAA70254}
+ 28 | {14,33,6,34,14} | {AAAAAAAAAAAAAAA13198,AAAAAAAA69452,AAAAAAAAAAA82945,AAAAAAA12144,AAAAAAAAA72121,AAAAAAAAAA18601}
+ 29 | {39,21} | {AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAAAAA38885,AAAA85070,AAAAAAAAAAAAAAAAAAA70104,AAAAA66674,AAAAAAAAAAAAA62007,AAAAAAAA69452,AAAAAAA1242,AAAAAAAAAAAAAAAA1729,AAAA35194}
+ 30 | {26,81,47,91,34} | {AAAAAAAAAAAAAAAAAAA70104,AAAAAAA80240}
+ 31 | {80,24,18,21,54} | {AAAAAAAAAAAAAAA13198,AAAAAAAAAAAAAAAAAAA70415,A27153,AAAAAAAAA53663,AAAAAAAAAAAAAAAAA50407,A68938}
+ 32 | {58,79,82,80,67,75,98,10,41} | {AAAAAAAAAAAAAAAAAA61286,AAA54451,AAAAAAAAAAAAAAAAAAA87527,A96617,51533}
+ 33 | {74,73} | {A85417,AAAAAAA56483,AAAAA17383,AAAAAAAAAAAAA62159,AAAAAAAAAAAA52814,AAAAAAAAAAAAA85723,AAAAAAAAAAAAAAAAAA55796}
+ 34 | {70,45} | {AAAAAAAAAAAAAAAAAA71621,AAAAAAAAAAAAAA28620,AAAAAAAAAA55219,AAAAAAAA23648,AAAAAAAAAA22292,AAAAAAA1242}
+ 35 | {23,40} | {AAAAAAAAAAAA52814,AAAA48949,AAAAAAAAA34727,AAAA8857,AAAAAAAAAAAAAAAAAAA62179,AAAAAAAAAAAAAAA68526,AAAAAAA99836,AAAAAAAA50094,AAAA91194,AAAAAAAAAAAAA73084}
+ 36 | {79,82,14,52,30,5,79} | {AAAAAAAAA53663,AAAAAAAAAAAAAAAA55798,AAAAAAAAAAAAAAAAAAA89194,AA88409,AAAAAAAAAAAAAAA81326,AAAAAAAAAAAAAAAAA63050,AAAAAAAAAAAAAAAA33598}
+ 37 | {53,11,81,39,3,78,58,64,74} | {AAAAAAAAAAAAAAAAAAA17075,AAAAAAA66161,AAAAAAAA23648,AAAAAAAAAAAAAA10611}
+ 38 | {59,5,4,95,28} | {AAAAAAAAAAA82945,A96617,47735,AAAAA12179,AAAAA64669,AAAAAA99807,AA74433,AAAAAAAAAAAAAAAAA59387}
+ 39 | {82,43,99,16,74} | {AAAAAAAAAAAAAAA67062,AAAAAAA57334,AAAAAAAAAAAAAA65909,A27153,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAA43052,AAAAAAAAAA64777,AAAAAAAAAAAA81511,AAAAAAAAAAAAAA65909,AAAAAAAAAAAAAA28620}
+ 40 | {34} | {AAAAAAAAAAAAAA10611,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAA50956,AAAAAAAAAAAAAAAA31334,AAAAA70466,AAAAAAAA81587,AAAAAAA74623}
+ 41 | {19,26,63,12,93,73,27,94} | {AAAAAAA79710,AAAAAAAAAA55219,AAAA41702,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAAA71621,AAAAAAAAAAAAAAAAA63050,AAAAAAA99836,AAAAAAAAAAAAAA8666}
+ 42 | {15,76,82,75,8,91} | {AAAAAAAAAAA176,AAAAAA38063,45449,AAAAAA54032,AAAAAAA81898,AA6416,AAAAAAAAAAAAAAAAAAA62179,45449,AAAAA60038,AAAAAAAA81587}
+ 43 | {39,87,91,97,79,28} | {AAAAAAAAAAA74076,A96617,AAAAAAAAAAAAAAAAAAA89194,AAAAAAAAAAAAAAAAAA55796,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAA67946}
+ 44 | {40,58,68,29,54} | {AAAAAAA81898,AAAAAA66777,AAAAAA98232}
+ 45 | {99,45} | {AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611}
+ 46 | {53,24} | {AAAAAAAAAAA53908,AAAAAA54032,AAAAA17383,AAAA48949,AAAAAAAAAA18601,AAAAA64669,45449,AAAAAAAAAAA98051,AAAAAAAAAAAAAAAAAA71621}
+ 47 | {98,23,64,12,75,61} | {AAA59323,AAAAA95309,AAAAAAAAAAAAAAAA31334,AAAAAAAAA27249,AAAAA17383,AAAAAAAAAAAA37562,AAAAAA1059,A84822,55847,AAAAA70466}
+ 48 | {76,14} | {AAAAAAAAAAAAA59671,AAAAAAAAAAAAAAAAAAA91804,AAAAAA66777,AAAAAAAAAAAAAAAAAAA89194,AAAAAAAAAAAAAAA36627,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAA73084,AAAAAAA79710,AAAAAAAAAAAAAAA40402,AAAAAAAAAAAAAAAAAAA65037}
+ 49 | {56,5,54,37,49} | {AA21643,AAAAAAAAAAA92631,AAAAAAAA81587}
+ 50 | {20,12,37,64,93} | {AAAAAAAAAA5483,AAAAAAAAAAAAAAAAAAA1205,AA6416,AAAAAAAAAAAAAAAAA63050,AAAAAAAAAAAAAAAAAA47955}
+ 51 | {47} | {AAAAAAAAAAAAAA96505,AAAAAAAAAAAAAAAAAA36842,AAAAA95309,AAAAAAAA81587,AA6416,AAAA91194,AAAAAA58494,AAAAAA1059,AAAAAAAA69452}
+ 52 | {89,0} | {AAAAAAAAAAAAAAAAAA47955,AAAAAAA48038,AAAAAAAAAAAAAAAAA43052,AAAAAAAAAAAAA73084,AAAAA70466,AAAAAAAAAAAAAAAAA764,AAAAAAAAAAA46154,AA66862}
+ 53 | {38,17} | {AAAAAAAAAAA21658}
+ 54 | {70,47} | {AAAAAAAAAAAAAAAAAA54141,AAAAA40681,AAAAAAA48038,AAAAAAAAAAAAAAAA29150,AAAAA41597,AAAAAAAAAAAAAAAAAA59334,AA15322}
+ 55 | {47,79,47,64,72,25,71,24,93} | {AAAAAAAAAAAAAAAAAA55796,AAAAA62737}
+ 56 | {33,7,60,54,93,90,77,85,39} | {AAAAAAAAAAAAAAAAAA32918,AA42406}
+ 57 | {23,45,10,42,36,21,9,96} | {AAAAAAAAAAAAAAAAAAA70415}
+ 58 | {92} | {AAAAAAAAAAAAAAAA98414,AAAAAAAA23648,AAAAAAAAAAAAAAAAAA55796,AA25381,AAAAAAAAAAA6119}
+ 59 | {9,69,46,77} | {39557,AAAAAAA89932,AAAAAAAAAAAAAAAAA43052,AAAAAAAAAAAAAAAAA26540,AAA20874,AA6416,AAAAAAAAAAAAAAAAAA47955}
+ 60 | {62,2,59,38,89} | {AAAAAAA89932,AAAAAAAAAAAAAAAAAAA15356,AA99927,AA17009,AAAAAAAAAAAAAAA35875}
+ 61 | {72,2,44,95,54,54,13} | {AAAAAAAAAAAAAAAAAAA91804}
+ 62 | {83,72,29,73} | {AAAAAAAAAAAAA15097,AAAA8857,AAAAAAAAAAAA35809,AAAAAAAAAAAA52814,AAAAAAAAAAAAAAAAAAA38885,AAAAAAAAAAAAAAAAAA24183,AAAAAA43678,A96617}
+ 63 | {11,4,61,87} | {AAAAAAAAA27249,AAAAAAAAAAAAAAAAAA32918,AAAAAAAAAAAAAAA13198,AAA20874,39557,51533,AAAAAAAAAAA53908,AAAAAAAAAAAAAA96505,AAAAAAAA78938}
+ 64 | {26,19,34,24,81,78} | {A96617,AAAAAAAAAAAAAAAAAAA70104,A68938,AAAAAAAAAAA53908,AAAAAAAAAAAAAAA453,AA17009,AAAAAAA80240}
+ 65 | {61,5,76,59,17} | {AAAAAA99807,AAAAA64741,AAAAAAAAAAA53908,AA21643,AAAAAAAAA10012}
+ 66 | {31,23,70,52,4,33,48,25} | {AAAAAAAAAAAAAAAAA69675,AAAAAAAA50094,AAAAAAAAAAA92631,AAAA35194,39557,AAAAAAA99836}
+ 67 | {31,94,7,10} | {AAAAAA38063,A96617,AAAA35194,AAAAAAAAAAAA67946}
+ 68 | {90,43,38} | {AA75092,AAAAAAAAAAAAAAAAA69675,AAAAAAAAAAA92631,AAAAAAAAA10012,AAAAAAAAAAAAA7929,AA21643}
+ 69 | {67,35,99,85,72,86,44} | {AAAAAAAAAAAAAAAAAAA1205,AAAAAAAA50094,AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAAAAAAA47955}
+ 70 | {56,70,83} | {AAAA41702,AAAAAAAAAAA82945,AA21643,AAAAAAAAAAA99000,A27153,AA25381,AAAAAAAAAAAAAA96505,AAAAAAA1242}
+ 71 | {74,26} | {AAAAAAAAAAA50956,AA74433,AAAAAAA21462,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAA36627,AAAAAAAAAAAAA70254,AAAAAAAAAA43419,39557}
+ 72 | {22,1,16,78,20,91,83} | {47735,AAAAAAA56483,AAAAAAAAAAAAA93788,AA42406,AAAAAAAAAAAAA73084,AAAAAAAA72908,AAAAAAAAAAAAAAAAAA61286,AAAAA66674,AAAAAAAAAAAAAAAAA50407}
+ 73 | {88,25,96,78,65,15,29,19} | {AAA54451,AAAAAAAAA27249,AAAAAAA9228,AAAAAAAAAAAAAAA67062,AAAAAAAAAAAAAAAAAAA70415,AAAAA17383,AAAAAAAAAAAAAAAA33598}
+ 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956}
+ 75 | {12,96,83,24,71,89,55} | {AAAA48949,AAAAAAAA29716,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAAA67946,AAAAAAAAAAAAAAAA29150,AAA28075,AAAAAAAAAAAAAAAAA43052}
+ 76 | {92,55,10,7} | {AAAAAAAAAAAAAAA67062}
+ 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066}
+ 78 | {55,89,44,84,34} | {AAAAAAAAAAA6119,AAAAAAAAAAAAAA8666,AA99927,AA42406,AAAAAAA81898,AAAAAAA9228,AAAAAAAAAAA92631,AA21643,AAAAAAAAAAAAAA28620}
+ 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908}
+ 80 | {74,89,44,80,0} | {AAAA35194,AAAAAAAA79710,AAA20874,AAAAAAAAAAAAAAAAAAA70104,AAAAAAAAAAAAA73084,AAAAAAA57334,AAAAAAA9228,AAAAAAAAAAAAA62007}
+ 81 | {63,77,54,48,61,53,97} | {AAAAAAAAAAAAAAA81326,AAAAAAAAAA22292,AA25381,AAAAAAAAAAA74076,AAAAAAA81898,AAAAAAAAA72121}
+ 82 | {34,60,4,79,78,16,86,89,42,50} | {AAAAA40681,AAAAAAAAAAAAAAAAAA12591,AAAAAAA80240,AAAAAAAAAAAAAAAA55798,AAAAAAAAAAAAAAAAAAA70104}
+ 83 | {14,10} | {AAAAAAAAAA22292,AAAAAAAAAAAAA70254,AAAAAAAAAAA6119}
+ 84 | {11,83,35,13,96,94} | {AAAAA95309,AAAAAAAAAAAAAAAAAA32918,AAAAAAAAAAAAAAAAAA24183}
+ 85 | {39,60} | {AAAAAAAAAAAAAAAA55798,AAAAAAAAAA22292,AAAAAAA66161,AAAAAAA21462,AAAAAAAAAAAAAAAAAA12591,55847,AAAAAA98232,AAAAAAAAAAA46154}
+ 86 | {33,81,72,74,45,36,82} | {AAAAAAAA81587,AAAAAAAAAAAAAA96505,45449,AAAA80176}
+ 87 | {57,27,50,12,97,68} | {AAAAAAAAAAAAAAAAA26540,AAAAAAAAA10012,AAAAAAAAAAAA35809,AAAAAAAAAAAAAAAA29150,AAAAAAAAAAA82945,AAAAAA66777,31228,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAA96505}
+ 88 | {41,90,77,24,6,24} | {AAAA35194,AAAA35194,AAAAAAA80240,AAAAAAAAAAA46154,AAAAAA58494,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAAA59334,AAAAAAAAAAAAAAAAAAA91804,AA74433}
+ 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673}
+ 90 | {88,75} | {AAAAA60038,AAAAAAAA23648,AAAAAAAAAAA99000,AAAA41702,AAAAAAAAAAAAA22860,AAAAAAAAAAAAAAA68526}
+ 91 | {78} | {AAAAAAAAAAAAA62007,AAA99043}
+ 92 | {85,63,49,45} | {AAAAAAA89932,AAAAAAAAAAAAA22860,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAAA21089}
+ 93 | {11} | {AAAAAAAAAAA176,AAAAAAAAAAAAAA8666,AAAAAAAAAAAAAAA453,AAAAAAAAAAAAA85723,A68938,AAAAAAAAAAAAA9821,AAAAAAA48038,AAAAAAAAAAAAAAAAA59387,AA99927,AAAAA17383}
+ 94 | {98,9,85,62,88,91,60,61,38,86} | {AAAAAAAA81587,AAAAA17383,AAAAAAAA81587}
+ 95 | {47,77} | {AAAAAAAAAAAAAAAAA764,AAAAAAAAAAA74076,AAAAAAAAAA18107,AAAAA40681,AAAAAAAAAAAAAAA35875,AAAAA60038,AAAAAAA56483}
+ 96 | {23,97,43} | {AAAAAAAAAA646,A87088}
+ 97 | {54,2,86,65} | {47735,AAAAAAA99836,AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAA29150,AAAAAAA80240,AAAAAAAAAAAAAAAA98414,AAAAAAA56483,AAAAAAAAAAAAAAAA29150,AAAAAAA39692,AA21643}
+ 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845}
+ 99 | {37,86} | {AAAAAAAAAAAAAAAAAA32918,AAAAA70514,AAAAAAAAA10012,AAAAAAAAAAAAAAAAA59387,AAAAAAAAAA64777,AAAAAAAAAAAAAAAAAAA15356}
+ 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523}
+ 101 | {} | {}
+ 102 | {NULL} | {NULL}
+(102 rows)
+
+SELECT * FROM array_op_test WHERE t && '{}' ORDER BY seqno;
+ seqno | i | t
+-------+---+---
+(0 rows)
+
+SELECT * FROM array_op_test WHERE t <@ '{}' ORDER BY seqno;
+ seqno | i | t
+-------+----+----
+ 101 | {} | {}
+(1 row)
+
+-- array casts
+SELECT ARRAY[1,2,3]::text[]::int[]::float8[] AS "{1,2,3}";
+ {1,2,3}
+---------
+ {1,2,3}
+(1 row)
+
+SELECT pg_typeof(ARRAY[1,2,3]::text[]::int[]::float8[]) AS "double precision[]";
+ double precision[]
+--------------------
+ double precision[]
+(1 row)
+
+SELECT ARRAY[['a','bc'],['def','hijk']]::text[]::varchar[] AS "{{a,bc},{def,hijk}}";
+ {{a,bc},{def,hijk}}
+---------------------
+ {{a,bc},{def,hijk}}
+(1 row)
+
+SELECT pg_typeof(ARRAY[['a','bc'],['def','hijk']]::text[]::varchar[]) AS "character varying[]";
+ character varying[]
+---------------------
+ character varying[]
+(1 row)
+
+SELECT CAST(ARRAY[[[[[['a','bb','ccc']]]]]] as text[]) as "{{{{{{a,bb,ccc}}}}}}";
+ {{{{{{a,bb,ccc}}}}}}
+----------------------
+ {{{{{{a,bb,ccc}}}}}}
+(1 row)
+
+SELECT NULL::text[]::int[] AS "NULL";
+ NULL
+------
+
+(1 row)
+
+-- scalar op any/all (array)
+select 33 = any ('{1,2,3}');
+ ?column?
+----------
+ f
+(1 row)
+
+select 33 = any ('{1,2,33}');
+ ?column?
+----------
+ t
+(1 row)
+
+select 33 = all ('{1,2,33}');
+ ?column?
+----------
+ f
+(1 row)
+
+select 33 >= all ('{1,2,33}');
+ ?column?
+----------
+ t
+(1 row)
+
+-- boundary cases
+select null::int >= all ('{1,2,33}');
+ ?column?
+----------
+
+(1 row)
+
+select null::int >= all ('{}');
+ ?column?
+----------
+ t
+(1 row)
+
+select null::int >= any ('{}');
+ ?column?
+----------
+ f
+(1 row)
+
+-- cross-datatype
+select 33.4 = any (array[1,2,3]);
+ ?column?
+----------
+ f
+(1 row)
+
+select 33.4 > all (array[1,2,3]);
+ ?column?
+----------
+ t
+(1 row)
+
+-- errors
+select 33 * any ('{1,2,3}');
+ERROR: op ANY/ALL (array) requires operator to yield boolean
+LINE 1: select 33 * any ('{1,2,3}');
+ ^
+select 33 * any (44);
+ERROR: op ANY/ALL (array) requires array on right side
+LINE 1: select 33 * any (44);
+ ^
+-- nulls
+select 33 = any (null::int[]);
+ ?column?
+----------
+
+(1 row)
+
+select null::int = any ('{1,2,3}');
+ ?column?
+----------
+
+(1 row)
+
+select 33 = any ('{1,null,3}');
+ ?column?
+----------
+
+(1 row)
+
+select 33 = any ('{1,null,33}');
+ ?column?
+----------
+ t
+(1 row)
+
+select 33 = all (null::int[]);
+ ?column?
+----------
+
+(1 row)
+
+select null::int = all ('{1,2,3}');
+ ?column?
+----------
+
+(1 row)
+
+select 33 = all ('{1,null,3}');
+ ?column?
+----------
+ f
+(1 row)
+
+select 33 = all ('{33,null,33}');
+ ?column?
+----------
+
+(1 row)
+
+-- nulls later in the bitmap
+SELECT -1 != ALL(ARRAY(SELECT NULLIF(g.i, 900) FROM generate_series(1,1000) g(i)));
+ ?column?
+----------
+
+(1 row)
+
+-- test indexes on arrays
+create temp table arr_tbl (f1 int[] unique);
+insert into arr_tbl values ('{1,2,3}');
+insert into arr_tbl values ('{1,2}');
+-- failure expected:
+insert into arr_tbl values ('{1,2,3}');
+ERROR: duplicate key value violates unique constraint "arr_tbl_f1_key"
+DETAIL: Key (f1)=({1,2,3}) already exists.
+insert into arr_tbl values ('{2,3,4}');
+insert into arr_tbl values ('{1,5,3}');
+insert into arr_tbl values ('{1,2,10}');
+set enable_seqscan to off;
+set enable_bitmapscan to off;
+select * from arr_tbl where f1 > '{1,2,3}' and f1 <= '{1,5,3}';
+ f1
+----------
+ {1,2,10}
+ {1,5,3}
+(2 rows)
+
+select * from arr_tbl where f1 >= '{1,2,3}' and f1 < '{1,5,3}';
+ f1
+----------
+ {1,2,3}
+ {1,2,10}
+(2 rows)
+
+-- test ON CONFLICT DO UPDATE with arrays
+create temp table arr_pk_tbl (pk int4 primary key, f1 int[]);
+insert into arr_pk_tbl values (1, '{1,2,3}');
+insert into arr_pk_tbl values (1, '{3,4,5}') on conflict (pk)
+ do update set f1[1] = excluded.f1[1], f1[3] = excluded.f1[3]
+ returning pk, f1;
+ pk | f1
+----+---------
+ 1 | {3,2,5}
+(1 row)
+
+insert into arr_pk_tbl(pk, f1[1:2]) values (1, '{6,7,8}') on conflict (pk)
+ do update set f1[1] = excluded.f1[1],
+ f1[2] = excluded.f1[2],
+ f1[3] = excluded.f1[3]
+ returning pk, f1;
+ pk | f1
+----+------------
+ 1 | {6,7,NULL}
+(1 row)
+
+-- note: if above selects don't produce the expected tuple order,
+-- then you didn't get an indexscan plan, and something is busted.
+reset enable_seqscan;
+reset enable_bitmapscan;
+-- test [not] (like|ilike) (any|all) (...)
+select 'foo' like any (array['%a', '%o']); -- t
+ ?column?
+----------
+ t
+(1 row)
+
+select 'foo' like any (array['%a', '%b']); -- f
+ ?column?
+----------
+ f
+(1 row)
+
+select 'foo' like all (array['f%', '%o']); -- t
+ ?column?
+----------
+ t
+(1 row)
+
+select 'foo' like all (array['f%', '%b']); -- f
+ ?column?
+----------
+ f
+(1 row)
+
+select 'foo' not like any (array['%a', '%b']); -- t
+ ?column?
+----------
+ t
+(1 row)
+
+select 'foo' not like all (array['%a', '%o']); -- f
+ ?column?
+----------
+ f
+(1 row)
+
+select 'foo' ilike any (array['%A', '%O']); -- t
+ ?column?
+----------
+ t
+(1 row)
+
+select 'foo' ilike all (array['F%', '%O']); -- t
+ ?column?
+----------
+ t
+(1 row)
+
+--
+-- General array parser tests
+--
+-- none of the following should be accepted
+select '{{1,{2}},{2,3}}'::text[];
+ERROR: malformed array literal: "{{1,{2}},{2,3}}"
+LINE 1: select '{{1,{2}},{2,3}}'::text[];
+ ^
+DETAIL: Unexpected "{" character.
+select '{{},{}}'::text[];
+ERROR: malformed array literal: "{{},{}}"
+LINE 1: select '{{},{}}'::text[];
+ ^
+DETAIL: Unexpected "}" character.
+select E'{{1,2},\\{2,3}}'::text[];
+ERROR: malformed array literal: "{{1,2},\{2,3}}"
+LINE 1: select E'{{1,2},\\{2,3}}'::text[];
+ ^
+DETAIL: Unexpected "\" character.
+select '{{"1 2" x},{3}}'::text[];
+ERROR: malformed array literal: "{{"1 2" x},{3}}"
+LINE 1: select '{{"1 2" x},{3}}'::text[];
+ ^
+DETAIL: Unexpected array element.
+select '{}}'::text[];
+ERROR: malformed array literal: "{}}"
+LINE 1: select '{}}'::text[];
+ ^
+DETAIL: Junk after closing right brace.
+select '{ }}'::text[];
+ERROR: malformed array literal: "{ }}"
+LINE 1: select '{ }}'::text[];
+ ^
+DETAIL: Junk after closing right brace.
+select array[];
+ERROR: cannot determine type of empty array
+LINE 1: select array[];
+ ^
+HINT: Explicitly cast to the desired type, for example ARRAY[]::integer[].
+-- none of the above should be accepted
+-- all of the following should be accepted
+select '{}'::text[];
+ text
+------
+ {}
+(1 row)
+
+select '{{{1,2,3,4},{2,3,4,5}},{{3,4,5,6},{4,5,6,7}}}'::text[];
+ text
+-----------------------------------------------
+ {{{1,2,3,4},{2,3,4,5}},{{3,4,5,6},{4,5,6,7}}}
+(1 row)
+
+select '{0 second ,0 second}'::interval[];
+ interval
+---------------
+ {"@ 0","@ 0"}
+(1 row)
+
+select '{ { "," } , { 3 } }'::text[];
+ text
+-------------
+ {{","},{3}}
+(1 row)
+
+select ' { { " 0 second " , 0 second } }'::text[];
+ text
+-------------------------------
+ {{" 0 second ","0 second"}}
+(1 row)
+
+select '{
+ 0 second,
+ @ 1 hour @ 42 minutes @ 20 seconds
+ }'::interval[];
+ interval
+------------------------------------
+ {"@ 0","@ 1 hour 42 mins 20 secs"}
+(1 row)
+
+select array[]::text[];
+ array
+-------
+ {}
+(1 row)
+
+select '[0:1]={1.1,2.2}'::float8[];
+ float8
+-----------------
+ [0:1]={1.1,2.2}
+(1 row)
+
+-- all of the above should be accepted
+-- tests for array aggregates
+CREATE TEMP TABLE arraggtest ( f1 INT[], f2 TEXT[][], f3 FLOAT[]);
+INSERT INTO arraggtest (f1, f2, f3) VALUES
+('{1,2,3,4}','{{grey,red},{blue,blue}}','{1.6, 0.0}');
+INSERT INTO arraggtest (f1, f2, f3) VALUES
+('{1,2,3}','{{grey,red},{grey,blue}}','{1.6}');
+SELECT max(f1), min(f1), max(f2), min(f2), max(f3), min(f3) FROM arraggtest;
+ max | min | max | min | max | min
+-----------+---------+--------------------------+--------------------------+---------+-------
+ {1,2,3,4} | {1,2,3} | {{grey,red},{grey,blue}} | {{grey,red},{blue,blue}} | {1.6,0} | {1.6}
+(1 row)
+
+INSERT INTO arraggtest (f1, f2, f3) VALUES
+('{3,3,2,4,5,6}','{{white,yellow},{pink,orange}}','{2.1,3.3,1.8,1.7,1.6}');
+SELECT max(f1), min(f1), max(f2), min(f2), max(f3), min(f3) FROM arraggtest;
+ max | min | max | min | max | min
+---------------+---------+--------------------------------+--------------------------+-----------------------+-------
+ {3,3,2,4,5,6} | {1,2,3} | {{white,yellow},{pink,orange}} | {{grey,red},{blue,blue}} | {2.1,3.3,1.8,1.7,1.6} | {1.6}
+(1 row)
+
+INSERT INTO arraggtest (f1, f2, f3) VALUES
+('{2}','{{black,red},{green,orange}}','{1.6,2.2,2.6,0.4}');
+SELECT max(f1), min(f1), max(f2), min(f2), max(f3), min(f3) FROM arraggtest;
+ max | min | max | min | max | min
+---------------+---------+--------------------------------+------------------------------+-----------------------+-------
+ {3,3,2,4,5,6} | {1,2,3} | {{white,yellow},{pink,orange}} | {{black,red},{green,orange}} | {2.1,3.3,1.8,1.7,1.6} | {1.6}
+(1 row)
+
+INSERT INTO arraggtest (f1, f2, f3) VALUES
+('{4,2,6,7,8,1}','{{red},{black},{purple},{blue},{blue}}',NULL);
+SELECT max(f1), min(f1), max(f2), min(f2), max(f3), min(f3) FROM arraggtest;
+ max | min | max | min | max | min
+---------------+---------+--------------------------------+------------------------------+-----------------------+-------
+ {4,2,6,7,8,1} | {1,2,3} | {{white,yellow},{pink,orange}} | {{black,red},{green,orange}} | {2.1,3.3,1.8,1.7,1.6} | {1.6}
+(1 row)
+
+INSERT INTO arraggtest (f1, f2, f3) VALUES
+('{}','{{pink,white,blue,red,grey,orange}}','{2.1,1.87,1.4,2.2}');
+SELECT max(f1), min(f1), max(f2), min(f2), max(f3), min(f3) FROM arraggtest;
+ max | min | max | min | max | min
+---------------+-----+--------------------------------+------------------------------+-----------------------+-------
+ {4,2,6,7,8,1} | {} | {{white,yellow},{pink,orange}} | {{black,red},{green,orange}} | {2.1,3.3,1.8,1.7,1.6} | {1.6}
+(1 row)
+
+-- A few simple tests for arrays of composite types
+create type comptype as (f1 int, f2 text);
+create table comptable (c1 comptype, c2 comptype[]);
+-- XXX would like to not have to specify row() construct types here ...
+insert into comptable
+ values (row(1,'foo'), array[row(2,'bar')::comptype, row(3,'baz')::comptype]);
+-- check that implicitly named array type _comptype isn't a problem
+create type _comptype as enum('fooey');
+select * from comptable;
+ c1 | c2
+---------+-----------------------
+ (1,foo) | {"(2,bar)","(3,baz)"}
+(1 row)
+
+select c2[2].f2 from comptable;
+ f2
+-----
+ baz
+(1 row)
+
+drop type _comptype;
+drop table comptable;
+drop type comptype;
+create or replace function unnest1(anyarray)
+returns setof anyelement as $$
+select $1[s] from generate_subscripts($1,1) g(s);
+$$ language sql immutable;
+create or replace function unnest2(anyarray)
+returns setof anyelement as $$
+select $1[s1][s2] from generate_subscripts($1,1) g1(s1),
+ generate_subscripts($1,2) g2(s2);
+$$ language sql immutable;
+select * from unnest1(array[1,2,3]);
+ unnest1
+---------
+ 1
+ 2
+ 3
+(3 rows)
+
+select * from unnest2(array[[1,2,3],[4,5,6]]);
+ unnest2
+---------
+ 1
+ 2
+ 3
+ 4
+ 5
+ 6
+(6 rows)
+
+drop function unnest1(anyarray);
+drop function unnest2(anyarray);
+select array_fill(null::integer, array[3,3],array[2,2]);
+ array_fill
+-----------------------------------------------------------------
+ [2:4][2:4]={{NULL,NULL,NULL},{NULL,NULL,NULL},{NULL,NULL,NULL}}
+(1 row)
+
+select array_fill(null::integer, array[3,3]);
+ array_fill
+------------------------------------------------------
+ {{NULL,NULL,NULL},{NULL,NULL,NULL},{NULL,NULL,NULL}}
+(1 row)
+
+select array_fill(null::text, array[3,3],array[2,2]);
+ array_fill
+-----------------------------------------------------------------
+ [2:4][2:4]={{NULL,NULL,NULL},{NULL,NULL,NULL},{NULL,NULL,NULL}}
+(1 row)
+
+select array_fill(null::text, array[3,3]);
+ array_fill
+------------------------------------------------------
+ {{NULL,NULL,NULL},{NULL,NULL,NULL},{NULL,NULL,NULL}}
+(1 row)
+
+select array_fill(7, array[3,3],array[2,2]);
+ array_fill
+--------------------------------------
+ [2:4][2:4]={{7,7,7},{7,7,7},{7,7,7}}
+(1 row)
+
+select array_fill(7, array[3,3]);
+ array_fill
+---------------------------
+ {{7,7,7},{7,7,7},{7,7,7}}
+(1 row)
+
+select array_fill('juhu'::text, array[3,3],array[2,2]);
+ array_fill
+-----------------------------------------------------------------
+ [2:4][2:4]={{juhu,juhu,juhu},{juhu,juhu,juhu},{juhu,juhu,juhu}}
+(1 row)
+
+select array_fill('juhu'::text, array[3,3]);
+ array_fill
+------------------------------------------------------
+ {{juhu,juhu,juhu},{juhu,juhu,juhu},{juhu,juhu,juhu}}
+(1 row)
+
+select a, a = '{}' as is_eq, array_dims(a)
+ from (select array_fill(42, array[0]) as a) ss;
+ a | is_eq | array_dims
+----+-------+------------
+ {} | t |
+(1 row)
+
+select a, a = '{}' as is_eq, array_dims(a)
+ from (select array_fill(42, '{}') as a) ss;
+ a | is_eq | array_dims
+----+-------+------------
+ {} | t |
+(1 row)
+
+select a, a = '{}' as is_eq, array_dims(a)
+ from (select array_fill(42, '{}', '{}') as a) ss;
+ a | is_eq | array_dims
+----+-------+------------
+ {} | t |
+(1 row)
+
+-- raise exception
+select array_fill(1, null, array[2,2]);
+ERROR: dimension array or low bound array cannot be null
+select array_fill(1, array[2,2], null);
+ERROR: dimension array or low bound array cannot be null
+select array_fill(1, array[2,2], '{}');
+ERROR: wrong number of array subscripts
+DETAIL: Low bound array has different size than dimensions array.
+select array_fill(1, array[3,3], array[1,1,1]);
+ERROR: wrong number of array subscripts
+DETAIL: Low bound array has different size than dimensions array.
+select array_fill(1, array[1,2,null]);
+ERROR: dimension values cannot be null
+select array_fill(1, array[[1,2],[3,4]]);
+ERROR: wrong number of array subscripts
+DETAIL: Dimension array must be one dimensional.
+select string_to_array('1|2|3', '|');
+ string_to_array
+-----------------
+ {1,2,3}
+(1 row)
+
+select string_to_array('1|2|3|', '|');
+ string_to_array
+-----------------
+ {1,2,3,""}
+(1 row)
+
+select string_to_array('1||2|3||', '||');
+ string_to_array
+-----------------
+ {1,2|3,""}
+(1 row)
+
+select string_to_array('1|2|3', '');
+ string_to_array
+-----------------
+ {1|2|3}
+(1 row)
+
+select string_to_array('', '|');
+ string_to_array
+-----------------
+ {}
+(1 row)
+
+select string_to_array('1|2|3', NULL);
+ string_to_array
+-----------------
+ {1,|,2,|,3}
+(1 row)
+
+select string_to_array(NULL, '|') IS NULL;
+ ?column?
+----------
+ t
+(1 row)
+
+select string_to_array('abc', '');
+ string_to_array
+-----------------
+ {abc}
+(1 row)
+
+select string_to_array('abc', '', 'abc');
+ string_to_array
+-----------------
+ {NULL}
+(1 row)
+
+select string_to_array('abc', ',');
+ string_to_array
+-----------------
+ {abc}
+(1 row)
+
+select string_to_array('abc', ',', 'abc');
+ string_to_array
+-----------------
+ {NULL}
+(1 row)
+
+select string_to_array('1,2,3,4,,6', ',');
+ string_to_array
+-----------------
+ {1,2,3,4,"",6}
+(1 row)
+
+select string_to_array('1,2,3,4,,6', ',', '');
+ string_to_array
+------------------
+ {1,2,3,4,NULL,6}
+(1 row)
+
+select string_to_array('1,2,3,4,*,6', ',', '*');
+ string_to_array
+------------------
+ {1,2,3,4,NULL,6}
+(1 row)
+
+select v, v is null as "is null" from string_to_table('1|2|3', '|') g(v);
+ v | is null
+---+---------
+ 1 | f
+ 2 | f
+ 3 | f
+(3 rows)
+
+select v, v is null as "is null" from string_to_table('1|2|3|', '|') g(v);
+ v | is null
+---+---------
+ 1 | f
+ 2 | f
+ 3 | f
+ | f
+(4 rows)
+
+select v, v is null as "is null" from string_to_table('1||2|3||', '||') g(v);
+ v | is null
+-----+---------
+ 1 | f
+ 2|3 | f
+ | f
+(3 rows)
+
+select v, v is null as "is null" from string_to_table('1|2|3', '') g(v);
+ v | is null
+-------+---------
+ 1|2|3 | f
+(1 row)
+
+select v, v is null as "is null" from string_to_table('', '|') g(v);
+ v | is null
+---+---------
+(0 rows)
+
+select v, v is null as "is null" from string_to_table('1|2|3', NULL) g(v);
+ v | is null
+---+---------
+ 1 | f
+ | | f
+ 2 | f
+ | | f
+ 3 | f
+(5 rows)
+
+select v, v is null as "is null" from string_to_table(NULL, '|') g(v);
+ v | is null
+---+---------
+(0 rows)
+
+select v, v is null as "is null" from string_to_table('abc', '') g(v);
+ v | is null
+-----+---------
+ abc | f
+(1 row)
+
+select v, v is null as "is null" from string_to_table('abc', '', 'abc') g(v);
+ v | is null
+---+---------
+ | t
+(1 row)
+
+select v, v is null as "is null" from string_to_table('abc', ',') g(v);
+ v | is null
+-----+---------
+ abc | f
+(1 row)
+
+select v, v is null as "is null" from string_to_table('abc', ',', 'abc') g(v);
+ v | is null
+---+---------
+ | t
+(1 row)
+
+select v, v is null as "is null" from string_to_table('1,2,3,4,,6', ',') g(v);
+ v | is null
+---+---------
+ 1 | f
+ 2 | f
+ 3 | f
+ 4 | f
+ | f
+ 6 | f
+(6 rows)
+
+select v, v is null as "is null" from string_to_table('1,2,3,4,,6', ',', '') g(v);
+ v | is null
+---+---------
+ 1 | f
+ 2 | f
+ 3 | f
+ 4 | f
+ | t
+ 6 | f
+(6 rows)
+
+select v, v is null as "is null" from string_to_table('1,2,3,4,*,6', ',', '*') g(v);
+ v | is null
+---+---------
+ 1 | f
+ 2 | f
+ 3 | f
+ 4 | f
+ | t
+ 6 | f
+(6 rows)
+
+select array_to_string(NULL::int4[], ',') IS NULL;
+ ?column?
+----------
+ t
+(1 row)
+
+select array_to_string('{}'::int4[], ',');
+ array_to_string
+-----------------
+
+(1 row)
+
+select array_to_string(array[1,2,3,4,NULL,6], ',');
+ array_to_string
+-----------------
+ 1,2,3,4,6
+(1 row)
+
+select array_to_string(array[1,2,3,4,NULL,6], ',', '*');
+ array_to_string
+-----------------
+ 1,2,3,4,*,6
+(1 row)
+
+select array_to_string(array[1,2,3,4,NULL,6], NULL);
+ array_to_string
+-----------------
+
+(1 row)
+
+select array_to_string(array[1,2,3,4,NULL,6], ',', NULL);
+ array_to_string
+-----------------
+ 1,2,3,4,6
+(1 row)
+
+select array_to_string(string_to_array('1|2|3', '|'), '|');
+ array_to_string
+-----------------
+ 1|2|3
+(1 row)
+
+select array_length(array[1,2,3], 1);
+ array_length
+--------------
+ 3
+(1 row)
+
+select array_length(array[[1,2,3], [4,5,6]], 0);
+ array_length
+--------------
+
+(1 row)
+
+select array_length(array[[1,2,3], [4,5,6]], 1);
+ array_length
+--------------
+ 2
+(1 row)
+
+select array_length(array[[1,2,3], [4,5,6]], 2);
+ array_length
+--------------
+ 3
+(1 row)
+
+select array_length(array[[1,2,3], [4,5,6]], 3);
+ array_length
+--------------
+
+(1 row)
+
+select cardinality(NULL::int[]);
+ cardinality
+-------------
+
+(1 row)
+
+select cardinality('{}'::int[]);
+ cardinality
+-------------
+ 0
+(1 row)
+
+select cardinality(array[1,2,3]);
+ cardinality
+-------------
+ 3
+(1 row)
+
+select cardinality('[2:4]={5,6,7}'::int[]);
+ cardinality
+-------------
+ 3
+(1 row)
+
+select cardinality('{{1,2}}'::int[]);
+ cardinality
+-------------
+ 2
+(1 row)
+
+select cardinality('{{1,2},{3,4},{5,6}}'::int[]);
+ cardinality
+-------------
+ 6
+(1 row)
+
+select cardinality('{{{1,9},{5,6}},{{2,3},{3,4}}}'::int[]);
+ cardinality
+-------------
+ 8
+(1 row)
+
+-- array_agg(anynonarray)
+select array_agg(unique1) from (select unique1 from tenk1 where unique1 < 15 order by unique1) ss;
+ array_agg
+--------------------------------------
+ {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14}
+(1 row)
+
+select array_agg(ten) from (select ten from tenk1 where unique1 < 15 order by unique1) ss;
+ array_agg
+---------------------------------
+ {0,1,2,3,4,5,6,7,8,9,0,1,2,3,4}
+(1 row)
+
+select array_agg(nullif(ten, 4)) from (select ten from tenk1 where unique1 < 15 order by unique1) ss;
+ array_agg
+---------------------------------------
+ {0,1,2,3,NULL,5,6,7,8,9,0,1,2,3,NULL}
+(1 row)
+
+select array_agg(unique1) from tenk1 where unique1 < -15;
+ array_agg
+-----------
+
+(1 row)
+
+-- array_agg(anyarray)
+select array_agg(ar)
+ from (values ('{1,2}'::int[]), ('{3,4}'::int[])) v(ar);
+ array_agg
+---------------
+ {{1,2},{3,4}}
+(1 row)
+
+select array_agg(distinct ar order by ar desc)
+ from (select array[i / 2] from generate_series(1,10) a(i)) b(ar);
+ array_agg
+---------------------------
+ {{5},{4},{3},{2},{1},{0}}
+(1 row)
+
+select array_agg(ar)
+ from (select array_agg(array[i, i+1, i-1])
+ from generate_series(1,2) a(i)) b(ar);
+ array_agg
+---------------------
+ {{{1,2,0},{2,3,1}}}
+(1 row)
+
+select array_agg(array[i+1.2, i+1.3, i+1.4]) from generate_series(1,3) g(i);
+ array_agg
+---------------------------------------------
+ {{2.2,2.3,2.4},{3.2,3.3,3.4},{4.2,4.3,4.4}}
+(1 row)
+
+select array_agg(array['Hello', i::text]) from generate_series(9,11) g(i);
+ array_agg
+-----------------------------------
+ {{Hello,9},{Hello,10},{Hello,11}}
+(1 row)
+
+select array_agg(array[i, nullif(i, 3), i+1]) from generate_series(1,4) g(i);
+ array_agg
+--------------------------------------
+ {{1,1,2},{2,2,3},{3,NULL,4},{4,4,5}}
+(1 row)
+
+-- errors
+select array_agg('{}'::int[]) from generate_series(1,2);
+ERROR: cannot accumulate empty arrays
+select array_agg(null::int[]) from generate_series(1,2);
+ERROR: cannot accumulate null arrays
+select array_agg(ar)
+ from (values ('{1,2}'::int[]), ('{3}'::int[])) v(ar);
+ERROR: cannot accumulate arrays of different dimensionality
+select unnest(array[1,2,3]);
+ unnest
+--------
+ 1
+ 2
+ 3
+(3 rows)
+
+select * from unnest(array[1,2,3]);
+ unnest
+--------
+ 1
+ 2
+ 3
+(3 rows)
+
+select unnest(array[1,2,3,4.5]::float8[]);
+ unnest
+--------
+ 1
+ 2
+ 3
+ 4.5
+(4 rows)
+
+select unnest(array[1,2,3,4.5]::numeric[]);
+ unnest
+--------
+ 1
+ 2
+ 3
+ 4.5
+(4 rows)
+
+select unnest(array[1,2,3,null,4,null,null,5,6]);
+ unnest
+--------
+ 1
+ 2
+ 3
+
+ 4
+
+
+ 5
+ 6
+(9 rows)
+
+select unnest(array[1,2,3,null,4,null,null,5,6]::text[]);
+ unnest
+--------
+ 1
+ 2
+ 3
+
+ 4
+
+
+ 5
+ 6
+(9 rows)
+
+select abs(unnest(array[1,2,null,-3]));
+ abs
+-----
+ 1
+ 2
+
+ 3
+(4 rows)
+
+select array_remove(array[1,2,2,3], 2);
+ array_remove
+--------------
+ {1,3}
+(1 row)
+
+select array_remove(array[1,2,2,3], 5);
+ array_remove
+--------------
+ {1,2,2,3}
+(1 row)
+
+select array_remove(array[1,NULL,NULL,3], NULL);
+ array_remove
+--------------
+ {1,3}
+(1 row)
+
+select array_remove(array['A','CC','D','C','RR'], 'RR');
+ array_remove
+--------------
+ {A,CC,D,C}
+(1 row)
+
+select array_remove(array[1.0, 2.1, 3.3], 1);
+ array_remove
+--------------
+ {2.1,3.3}
+(1 row)
+
+select array_remove('{{1,2,2},{1,4,3}}', 2); -- not allowed
+ERROR: removing elements from multidimensional arrays is not supported
+select array_remove(array['X','X','X'], 'X') = '{}';
+ ?column?
+----------
+ t
+(1 row)
+
+select array_replace(array[1,2,5,4],5,3);
+ array_replace
+---------------
+ {1,2,3,4}
+(1 row)
+
+select array_replace(array[1,2,5,4],5,NULL);
+ array_replace
+---------------
+ {1,2,NULL,4}
+(1 row)
+
+select array_replace(array[1,2,NULL,4,NULL],NULL,5);
+ array_replace
+---------------
+ {1,2,5,4,5}
+(1 row)
+
+select array_replace(array['A','B','DD','B'],'B','CC');
+ array_replace
+---------------
+ {A,CC,DD,CC}
+(1 row)
+
+select array_replace(array[1,NULL,3],NULL,NULL);
+ array_replace
+---------------
+ {1,NULL,3}
+(1 row)
+
+select array_replace(array['AB',NULL,'CDE'],NULL,'12');
+ array_replace
+---------------
+ {AB,12,CDE}
+(1 row)
+
+-- array(select array-value ...)
+select array(select array[i,i/2] from generate_series(1,5) i);
+ array
+---------------------------------
+ {{1,0},{2,1},{3,1},{4,2},{5,2}}
+(1 row)
+
+select array(select array['Hello', i::text] from generate_series(9,11) i);
+ array
+-----------------------------------
+ {{Hello,9},{Hello,10},{Hello,11}}
+(1 row)
+
+-- Insert/update on a column that is array of composite
+create temp table t1 (f1 int8_tbl[]);
+insert into t1 (f1[5].q1) values(42);
+select * from t1;
+ f1
+-----------------
+ [5:5]={"(42,)"}
+(1 row)
+
+update t1 set f1[5].q2 = 43;
+select * from t1;
+ f1
+-------------------
+ [5:5]={"(42,43)"}
+(1 row)
+
+-- Check that arrays of composites are safely detoasted when needed
+create temp table src (f1 text);
+insert into src
+ select string_agg(random()::text,'') from generate_series(1,10000);
+create type textandtext as (c1 text, c2 text);
+create temp table dest (f1 textandtext[]);
+insert into dest select array[row(f1,f1)::textandtext] from src;
+select length(md5((f1[1]).c2)) from dest;
+ length
+--------
+ 32
+(1 row)
+
+delete from src;
+select length(md5((f1[1]).c2)) from dest;
+ length
+--------
+ 32
+(1 row)
+
+truncate table src;
+drop table src;
+select length(md5((f1[1]).c2)) from dest;
+ length
+--------
+ 32
+(1 row)
+
+drop table dest;
+drop type textandtext;
+-- Tests for polymorphic-array form of width_bucket()
+-- this exercises the varwidth and float8 code paths
+SELECT
+ op,
+ width_bucket(op::numeric, ARRAY[1, 3, 5, 10.0]::numeric[]) AS wb_n1,
+ width_bucket(op::numeric, ARRAY[0, 5.5, 9.99]::numeric[]) AS wb_n2,
+ width_bucket(op::numeric, ARRAY[-6, -5, 2.0]::numeric[]) AS wb_n3,
+ width_bucket(op::float8, ARRAY[1, 3, 5, 10.0]::float8[]) AS wb_f1,
+ width_bucket(op::float8, ARRAY[0, 5.5, 9.99]::float8[]) AS wb_f2,
+ width_bucket(op::float8, ARRAY[-6, -5, 2.0]::float8[]) AS wb_f3
+FROM (VALUES
+ (-5.2),
+ (-0.0000000001),
+ (0.000000000001),
+ (1),
+ (1.99999999999999),
+ (2),
+ (2.00000000000001),
+ (3),
+ (4),
+ (4.5),
+ (5),
+ (5.5),
+ (6),
+ (7),
+ (8),
+ (9),
+ (9.99999999999999),
+ (10),
+ (10.0000000000001)
+) v(op);
+ op | wb_n1 | wb_n2 | wb_n3 | wb_f1 | wb_f2 | wb_f3
+------------------+-------+-------+-------+-------+-------+-------
+ -5.2 | 0 | 0 | 1 | 0 | 0 | 1
+ -0.0000000001 | 0 | 0 | 2 | 0 | 0 | 2
+ 0.000000000001 | 0 | 1 | 2 | 0 | 1 | 2
+ 1 | 1 | 1 | 2 | 1 | 1 | 2
+ 1.99999999999999 | 1 | 1 | 2 | 1 | 1 | 2
+ 2 | 1 | 1 | 3 | 1 | 1 | 3
+ 2.00000000000001 | 1 | 1 | 3 | 1 | 1 | 3
+ 3 | 2 | 1 | 3 | 2 | 1 | 3
+ 4 | 2 | 1 | 3 | 2 | 1 | 3
+ 4.5 | 2 | 1 | 3 | 2 | 1 | 3
+ 5 | 3 | 1 | 3 | 3 | 1 | 3
+ 5.5 | 3 | 2 | 3 | 3 | 2 | 3
+ 6 | 3 | 2 | 3 | 3 | 2 | 3
+ 7 | 3 | 2 | 3 | 3 | 2 | 3
+ 8 | 3 | 2 | 3 | 3 | 2 | 3
+ 9 | 3 | 2 | 3 | 3 | 2 | 3
+ 9.99999999999999 | 3 | 3 | 3 | 3 | 3 | 3
+ 10 | 4 | 3 | 3 | 4 | 3 | 3
+ 10.0000000000001 | 4 | 3 | 3 | 4 | 3 | 3
+(19 rows)
+
+-- ensure float8 path handles NaN properly
+SELECT
+ op,
+ width_bucket(op, ARRAY[1, 3, 9, 'NaN', 'NaN']::float8[]) AS wb
+FROM (VALUES
+ (-5.2::float8),
+ (4::float8),
+ (77::float8),
+ ('NaN'::float8)
+) v(op);
+ op | wb
+------+----
+ -5.2 | 0
+ 4 | 2
+ 77 | 3
+ NaN | 5
+(4 rows)
+
+-- these exercise the generic fixed-width code path
+SELECT
+ op,
+ width_bucket(op, ARRAY[1, 3, 5, 10]) AS wb_1
+FROM generate_series(0,11) as op;
+ op | wb_1
+----+------
+ 0 | 0
+ 1 | 1
+ 2 | 1
+ 3 | 2
+ 4 | 2
+ 5 | 3
+ 6 | 3
+ 7 | 3
+ 8 | 3
+ 9 | 3
+ 10 | 4
+ 11 | 4
+(12 rows)
+
+SELECT width_bucket(now(),
+ array['yesterday', 'today', 'tomorrow']::timestamptz[]);
+ width_bucket
+--------------
+ 2
+(1 row)
+
+-- corner cases
+SELECT width_bucket(5, ARRAY[3]);
+ width_bucket
+--------------
+ 1
+(1 row)
+
+SELECT width_bucket(5, '{}');
+ width_bucket
+--------------
+ 0
+(1 row)
+
+-- error cases
+SELECT width_bucket('5'::text, ARRAY[3, 4]::integer[]);
+ERROR: function width_bucket(text, integer[]) does not exist
+LINE 1: SELECT width_bucket('5'::text, ARRAY[3, 4]::integer[]);
+ ^
+HINT: No function matches the given name and argument types. You might need to add explicit type casts.
+SELECT width_bucket(5, ARRAY[3, 4, NULL]);
+ERROR: thresholds array must not contain NULLs
+SELECT width_bucket(5, ARRAY[ARRAY[1, 2], ARRAY[3, 4]]);
+ERROR: thresholds must be one-dimensional array
+-- trim_array
+SELECT arr, trim_array(arr, 2)
+FROM
+(VALUES ('{1,2,3,4,5,6}'::bigint[]),
+ ('{1,2}'),
+ ('[10:16]={1,2,3,4,5,6,7}'),
+ ('[-15:-10]={1,2,3,4,5,6}'),
+ ('{{1,10},{2,20},{3,30},{4,40}}')) v(arr);
+ arr | trim_array
+-------------------------------+-----------------
+ {1,2,3,4,5,6} | {1,2,3,4}
+ {1,2} | {}
+ [10:16]={1,2,3,4,5,6,7} | {1,2,3,4,5}
+ [-15:-10]={1,2,3,4,5,6} | {1,2,3,4}
+ {{1,10},{2,20},{3,30},{4,40}} | {{1,10},{2,20}}
+(5 rows)
+
+SELECT trim_array(ARRAY[1, 2, 3], -1); -- fail
+ERROR: number of elements to trim must be between 0 and 3
+SELECT trim_array(ARRAY[1, 2, 3], 10); -- fail
+ERROR: number of elements to trim must be between 0 and 3
+SELECT trim_array(ARRAY[]::int[], 1); -- fail
+ERROR: number of elements to trim must be between 0 and 0
diff --git a/ydb/library/yql/tests/postgresql/original/cases/arrays.sql b/ydb/library/yql/tests/postgresql/original/cases/arrays.sql
new file mode 100644
index 0000000000..5eedc4c3ce
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/arrays.sql
@@ -0,0 +1,740 @@
+--
+-- ARRAYS
+--
+
+CREATE TABLE arrtest (
+ a int2[],
+ b int4[][][],
+ c name[],
+ d text[][],
+ e float8[],
+ f char(5)[],
+ g varchar(5)[]
+);
+
+--
+-- only the 'e' array is 0-based, the others are 1-based.
+--
+
+INSERT INTO arrtest (a[1:5], b[1:1][1:2][1:2], c, d, f, g)
+ VALUES ('{1,2,3,4,5}', '{{{0,0},{1,2}}}', '{}', '{}', '{}', '{}');
+
+UPDATE arrtest SET e[0] = '1.1';
+
+UPDATE arrtest SET e[1] = '2.2';
+
+INSERT INTO arrtest (f)
+ VALUES ('{"too long"}');
+
+INSERT INTO arrtest (a, b[1:2][1:2], c, d, e, f, g)
+ VALUES ('{11,12,23}', '{{3,4},{4,5}}', '{"foobar"}',
+ '{{"elt1", "elt2"}}', '{"3.4", "6.7"}',
+ '{"abc","abcde"}', '{"abc","abcde"}');
+
+INSERT INTO arrtest (a, b[1:2], c, d[1:2])
+ VALUES ('{}', '{3,4}', '{foo,bar}', '{bar,foo}');
+
+INSERT INTO arrtest (b[2]) VALUES(now()); -- error, type mismatch
+
+INSERT INTO arrtest (b[1:2]) VALUES(now()); -- error, type mismatch
+
+SELECT * FROM arrtest;
+
+SELECT arrtest.a[1],
+ arrtest.b[1][1][1],
+ arrtest.c[1],
+ arrtest.d[1][1],
+ arrtest.e[0]
+ FROM arrtest;
+
+SELECT a[1], b[1][1][1], c[1], d[1][1], e[0]
+ FROM arrtest;
+
+SELECT a[1:3],
+ b[1:1][1:2][1:2],
+ c[1:2],
+ d[1:1][1:2]
+ FROM arrtest;
+
+SELECT array_ndims(a) AS a,array_ndims(b) AS b,array_ndims(c) AS c
+ FROM arrtest;
+
+SELECT array_dims(a) AS a,array_dims(b) AS b,array_dims(c) AS c
+ FROM arrtest;
+
+-- returns nothing
+SELECT *
+ FROM arrtest
+ WHERE a[1] < 5 and
+ c = '{"foobar"}'::_name;
+
+UPDATE arrtest
+ SET a[1:2] = '{16,25}'
+ WHERE NOT a = '{}'::_int2;
+
+UPDATE arrtest
+ SET b[1:1][1:1][1:2] = '{113, 117}',
+ b[1:1][1:2][2:2] = '{142, 147}'
+ WHERE array_dims(b) = '[1:1][1:2][1:2]';
+
+UPDATE arrtest
+ SET c[2:2] = '{"new_word"}'
+ WHERE array_dims(c) is not null;
+
+SELECT a,b,c FROM arrtest;
+
+SELECT a[1:3],
+ b[1:1][1:2][1:2],
+ c[1:2],
+ d[1:1][2:2]
+ FROM arrtest;
+
+SELECT b[1:1][2][2],
+ d[1:1][2]
+ FROM arrtest;
+
+INSERT INTO arrtest(a) VALUES('{1,null,3}');
+SELECT a FROM arrtest;
+UPDATE arrtest SET a[4] = NULL WHERE a[2] IS NULL;
+SELECT a FROM arrtest WHERE a[2] IS NULL;
+DELETE FROM arrtest WHERE a[2] IS NULL AND b IS NULL;
+SELECT a,b,c FROM arrtest;
+
+-- test mixed slice/scalar subscripting
+select '{{1,2,3},{4,5,6},{7,8,9}}'::int[];
+select ('{{1,2,3},{4,5,6},{7,8,9}}'::int[])[1:2][2];
+select '[0:2][0:2]={{1,2,3},{4,5,6},{7,8,9}}'::int[];
+select ('[0:2][0:2]={{1,2,3},{4,5,6},{7,8,9}}'::int[])[1:2][2];
+
+--
+-- check subscription corner cases
+--
+-- More subscripts than MAXDIM (6)
+SELECT ('{}'::int[])[1][2][3][4][5][6][7];
+-- NULL index yields NULL when selecting
+SELECT ('{{{1},{2},{3}},{{4},{5},{6}}}'::int[])[1][NULL][1];
+SELECT ('{{{1},{2},{3}},{{4},{5},{6}}}'::int[])[1][NULL:1][1];
+SELECT ('{{{1},{2},{3}},{{4},{5},{6}}}'::int[])[1][1:NULL][1];
+-- NULL index in assignment is an error
+UPDATE arrtest
+ SET c[NULL] = '{"can''t assign"}'
+ WHERE array_dims(c) is not null;
+UPDATE arrtest
+ SET c[NULL:1] = '{"can''t assign"}'
+ WHERE array_dims(c) is not null;
+UPDATE arrtest
+ SET c[1:NULL] = '{"can''t assign"}'
+ WHERE array_dims(c) is not null;
+-- Un-subscriptable type
+SELECT (now())[1];
+
+-- test slices with empty lower and/or upper index
+CREATE TEMP TABLE arrtest_s (
+ a int2[],
+ b int2[][]
+);
+INSERT INTO arrtest_s VALUES ('{1,2,3,4,5}', '{{1,2,3}, {4,5,6}, {7,8,9}}');
+INSERT INTO arrtest_s VALUES ('[0:4]={1,2,3,4,5}', '[0:2][0:2]={{1,2,3}, {4,5,6}, {7,8,9}}');
+
+SELECT * FROM arrtest_s;
+SELECT a[:3], b[:2][:2] FROM arrtest_s;
+SELECT a[2:], b[2:][2:] FROM arrtest_s;
+SELECT a[:], b[:] FROM arrtest_s;
+
+-- updates
+UPDATE arrtest_s SET a[:3] = '{11, 12, 13}', b[:2][:2] = '{{11,12}, {14,15}}'
+ WHERE array_lower(a,1) = 1;
+SELECT * FROM arrtest_s;
+UPDATE arrtest_s SET a[3:] = '{23, 24, 25}', b[2:][2:] = '{{25,26}, {28,29}}';
+SELECT * FROM arrtest_s;
+UPDATE arrtest_s SET a[:] = '{11, 12, 13, 14, 15}';
+SELECT * FROM arrtest_s;
+UPDATE arrtest_s SET a[:] = '{23, 24, 25}'; -- fail, too small
+INSERT INTO arrtest_s VALUES(NULL, NULL);
+UPDATE arrtest_s SET a[:] = '{11, 12, 13, 14, 15}'; -- fail, no good with null
+
+-- check with fixed-length-array type, such as point
+SELECT f1[0:1] FROM POINT_TBL;
+SELECT f1[0:] FROM POINT_TBL;
+SELECT f1[:1] FROM POINT_TBL;
+SELECT f1[:] FROM POINT_TBL;
+
+-- subscript assignments to fixed-width result in NULL if previous value is NULL
+UPDATE point_tbl SET f1[0] = 10 WHERE f1 IS NULL RETURNING *;
+INSERT INTO point_tbl(f1[0]) VALUES(0) RETURNING *;
+-- NULL assignments get ignored
+UPDATE point_tbl SET f1[0] = NULL WHERE f1::text = '(10,10)'::point::text RETURNING *;
+-- but non-NULL subscript assignments work
+UPDATE point_tbl SET f1[0] = -10, f1[1] = -10 WHERE f1::text = '(10,10)'::point::text RETURNING *;
+-- but not to expand the range
+UPDATE point_tbl SET f1[3] = 10 WHERE f1::text = '(-10,-10)'::point::text RETURNING *;
+
+--
+-- test array extension
+--
+CREATE TEMP TABLE arrtest1 (i int[], t text[]);
+insert into arrtest1 values(array[1,2,null,4], array['one','two',null,'four']);
+select * from arrtest1;
+update arrtest1 set i[2] = 22, t[2] = 'twenty-two';
+select * from arrtest1;
+update arrtest1 set i[5] = 5, t[5] = 'five';
+select * from arrtest1;
+update arrtest1 set i[8] = 8, t[8] = 'eight';
+select * from arrtest1;
+update arrtest1 set i[0] = 0, t[0] = 'zero';
+select * from arrtest1;
+update arrtest1 set i[-3] = -3, t[-3] = 'minus-three';
+select * from arrtest1;
+update arrtest1 set i[0:2] = array[10,11,12], t[0:2] = array['ten','eleven','twelve'];
+select * from arrtest1;
+update arrtest1 set i[8:10] = array[18,null,20], t[8:10] = array['p18',null,'p20'];
+select * from arrtest1;
+update arrtest1 set i[11:12] = array[null,22], t[11:12] = array[null,'p22'];
+select * from arrtest1;
+update arrtest1 set i[15:16] = array[null,26], t[15:16] = array[null,'p26'];
+select * from arrtest1;
+update arrtest1 set i[-5:-3] = array[-15,-14,-13], t[-5:-3] = array['m15','m14','m13'];
+select * from arrtest1;
+update arrtest1 set i[-7:-6] = array[-17,null], t[-7:-6] = array['m17',null];
+select * from arrtest1;
+update arrtest1 set i[-12:-10] = array[-22,null,-20], t[-12:-10] = array['m22',null,'m20'];
+select * from arrtest1;
+delete from arrtest1;
+insert into arrtest1 values(array[1,2,null,4], array['one','two',null,'four']);
+select * from arrtest1;
+update arrtest1 set i[0:5] = array[0,1,2,null,4,5], t[0:5] = array['z','p1','p2',null,'p4','p5'];
+select * from arrtest1;
+
+--
+-- array expressions and operators
+--
+
+-- table creation and INSERTs
+CREATE TEMP TABLE arrtest2 (i integer ARRAY[4], f float8[], n numeric[], t text[], d timestamp[]);
+INSERT INTO arrtest2 VALUES(
+ ARRAY[[[113,142],[1,147]]],
+ ARRAY[1.1,1.2,1.3]::float8[],
+ ARRAY[1.1,1.2,1.3],
+ ARRAY[[['aaa','aab'],['aba','abb'],['aca','acb']],[['baa','bab'],['bba','bbb'],['bca','bcb']]],
+ ARRAY['19620326','19931223','19970117']::timestamp[]
+);
+
+-- some more test data
+CREATE TEMP TABLE arrtest_f (f0 int, f1 text, f2 float8);
+insert into arrtest_f values(1,'cat1',1.21);
+insert into arrtest_f values(2,'cat1',1.24);
+insert into arrtest_f values(3,'cat1',1.18);
+insert into arrtest_f values(4,'cat1',1.26);
+insert into arrtest_f values(5,'cat1',1.15);
+insert into arrtest_f values(6,'cat2',1.15);
+insert into arrtest_f values(7,'cat2',1.26);
+insert into arrtest_f values(8,'cat2',1.32);
+insert into arrtest_f values(9,'cat2',1.30);
+
+CREATE TEMP TABLE arrtest_i (f0 int, f1 text, f2 int);
+insert into arrtest_i values(1,'cat1',21);
+insert into arrtest_i values(2,'cat1',24);
+insert into arrtest_i values(3,'cat1',18);
+insert into arrtest_i values(4,'cat1',26);
+insert into arrtest_i values(5,'cat1',15);
+insert into arrtest_i values(6,'cat2',15);
+insert into arrtest_i values(7,'cat2',26);
+insert into arrtest_i values(8,'cat2',32);
+insert into arrtest_i values(9,'cat2',30);
+
+-- expressions
+SELECT t.f[1][3][1] AS "131", t.f[2][2][1] AS "221" FROM (
+ SELECT ARRAY[[[111,112],[121,122],[131,132]],[[211,212],[221,122],[231,232]]] AS f
+) AS t;
+SELECT ARRAY[[[[[['hello'],['world']]]]]];
+SELECT ARRAY[ARRAY['hello'],ARRAY['world']];
+SELECT ARRAY(select f2 from arrtest_f order by f2) AS "ARRAY";
+
+-- with nulls
+SELECT '{1,null,3}'::int[];
+SELECT ARRAY[1,NULL,3];
+
+-- functions
+SELECT array_append(array[42], 6) AS "{42,6}";
+SELECT array_prepend(6, array[42]) AS "{6,42}";
+SELECT array_cat(ARRAY[1,2], ARRAY[3,4]) AS "{1,2,3,4}";
+SELECT array_cat(ARRAY[1,2], ARRAY[[3,4],[5,6]]) AS "{{1,2},{3,4},{5,6}}";
+SELECT array_cat(ARRAY[[3,4],[5,6]], ARRAY[1,2]) AS "{{3,4},{5,6},{1,2}}";
+
+SELECT array_position(ARRAY[1,2,3,4,5], 4);
+SELECT array_position(ARRAY[5,3,4,2,1], 4);
+SELECT array_position(ARRAY[[1,2],[3,4]], 3);
+SELECT array_position(ARRAY['sun','mon','tue','wed','thu','fri','sat'], 'mon');
+SELECT array_position(ARRAY['sun','mon','tue','wed','thu','fri','sat'], 'sat');
+SELECT array_position(ARRAY['sun','mon','tue','wed','thu','fri','sat'], NULL);
+SELECT array_position(ARRAY['sun','mon','tue','wed','thu',NULL,'fri','sat'], NULL);
+SELECT array_position(ARRAY['sun','mon','tue','wed','thu',NULL,'fri','sat'], 'sat');
+
+SELECT array_positions(NULL, 10);
+SELECT array_positions(NULL, NULL::int);
+SELECT array_positions(ARRAY[1,2,3,4,5,6,1,2,3,4,5,6], 4);
+SELECT array_positions(ARRAY[[1,2],[3,4]], 4);
+SELECT array_positions(ARRAY[1,2,3,4,5,6,1,2,3,4,5,6], NULL);
+SELECT array_positions(ARRAY[1,2,3,NULL,5,6,1,2,3,NULL,5,6], NULL);
+SELECT array_length(array_positions(ARRAY(SELECT 'AAAAAAAAAAAAAAAAAAAAAAAAA'::text || i % 10
+ FROM generate_series(1,100) g(i)),
+ 'AAAAAAAAAAAAAAAAAAAAAAAAA5'), 1);
+
+DO $$
+DECLARE
+ o int;
+ a int[] := ARRAY[1,2,3,2,3,1,2];
+BEGIN
+ o := array_position(a, 2);
+ WHILE o IS NOT NULL
+ LOOP
+ RAISE NOTICE '%', o;
+ o := array_position(a, 2, o + 1);
+ END LOOP;
+END
+$$ LANGUAGE plpgsql;
+
+SELECT array_position('[2:4]={1,2,3}'::int[], 1);
+SELECT array_positions('[2:4]={1,2,3}'::int[], 1);
+
+SELECT
+ array_position(ids, (1, 1)),
+ array_positions(ids, (1, 1))
+ FROM
+(VALUES
+ (ARRAY[(0, 0), (1, 1)]),
+ (ARRAY[(1, 1)])
+) AS f (ids);
+
+-- operators
+SELECT a FROM arrtest WHERE b = ARRAY[[[113,142],[1,147]]];
+SELECT NOT ARRAY[1.1,1.2,1.3] = ARRAY[1.1,1.2,1.3] AS "FALSE";
+SELECT ARRAY[1,2] || 3 AS "{1,2,3}";
+SELECT 0 || ARRAY[1,2] AS "{0,1,2}";
+SELECT ARRAY[1,2] || ARRAY[3,4] AS "{1,2,3,4}";
+SELECT ARRAY[[['hello','world']]] || ARRAY[[['happy','birthday']]] AS "ARRAY";
+SELECT ARRAY[[1,2],[3,4]] || ARRAY[5,6] AS "{{1,2},{3,4},{5,6}}";
+SELECT ARRAY[0,0] || ARRAY[1,1] || ARRAY[2,2] AS "{0,0,1,1,2,2}";
+SELECT 0 || ARRAY[1,2] || 3 AS "{0,1,2,3}";
+SELECT ARRAY[1.1] || ARRAY[2,3,4];
+SELECT array_agg(x) || array_agg(x) FROM (VALUES (ROW(1,2)), (ROW(3,4))) v(x);
+SELECT ROW(1,2) || array_agg(x) FROM (VALUES (ROW(3,4)), (ROW(5,6))) v(x);
+
+SELECT * FROM array_op_test WHERE i @> '{32}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE i && '{32}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE i @> '{17}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE i && '{17}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE i @> '{32,17}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE i && '{32,17}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE i <@ '{38,34,32,89}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE i = '{}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE i @> '{}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE i && '{}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE i <@ '{}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE i = '{NULL}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE i @> '{NULL}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE i && '{NULL}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE i <@ '{NULL}' ORDER BY seqno;
+
+SELECT * FROM array_op_test WHERE t @> '{AAAAAAAA72908}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE t && '{AAAAAAAA72908}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE t @> '{AAAAAAAAAA646}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE t && '{AAAAAAAAAA646}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE t @> '{AAAAAAAA72908,AAAAAAAAAA646}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE t && '{AAAAAAAA72908,AAAAAAAAAA646}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE t <@ '{AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE t = '{}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE t @> '{}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE t && '{}' ORDER BY seqno;
+SELECT * FROM array_op_test WHERE t <@ '{}' ORDER BY seqno;
+
+-- array casts
+SELECT ARRAY[1,2,3]::text[]::int[]::float8[] AS "{1,2,3}";
+SELECT pg_typeof(ARRAY[1,2,3]::text[]::int[]::float8[]) AS "double precision[]";
+SELECT ARRAY[['a','bc'],['def','hijk']]::text[]::varchar[] AS "{{a,bc},{def,hijk}}";
+SELECT pg_typeof(ARRAY[['a','bc'],['def','hijk']]::text[]::varchar[]) AS "character varying[]";
+SELECT CAST(ARRAY[[[[[['a','bb','ccc']]]]]] as text[]) as "{{{{{{a,bb,ccc}}}}}}";
+SELECT NULL::text[]::int[] AS "NULL";
+
+-- scalar op any/all (array)
+select 33 = any ('{1,2,3}');
+select 33 = any ('{1,2,33}');
+select 33 = all ('{1,2,33}');
+select 33 >= all ('{1,2,33}');
+-- boundary cases
+select null::int >= all ('{1,2,33}');
+select null::int >= all ('{}');
+select null::int >= any ('{}');
+-- cross-datatype
+select 33.4 = any (array[1,2,3]);
+select 33.4 > all (array[1,2,3]);
+-- errors
+select 33 * any ('{1,2,3}');
+select 33 * any (44);
+-- nulls
+select 33 = any (null::int[]);
+select null::int = any ('{1,2,3}');
+select 33 = any ('{1,null,3}');
+select 33 = any ('{1,null,33}');
+select 33 = all (null::int[]);
+select null::int = all ('{1,2,3}');
+select 33 = all ('{1,null,3}');
+select 33 = all ('{33,null,33}');
+-- nulls later in the bitmap
+SELECT -1 != ALL(ARRAY(SELECT NULLIF(g.i, 900) FROM generate_series(1,1000) g(i)));
+
+-- test indexes on arrays
+create temp table arr_tbl (f1 int[] unique);
+insert into arr_tbl values ('{1,2,3}');
+insert into arr_tbl values ('{1,2}');
+-- failure expected:
+insert into arr_tbl values ('{1,2,3}');
+insert into arr_tbl values ('{2,3,4}');
+insert into arr_tbl values ('{1,5,3}');
+insert into arr_tbl values ('{1,2,10}');
+
+set enable_seqscan to off;
+set enable_bitmapscan to off;
+select * from arr_tbl where f1 > '{1,2,3}' and f1 <= '{1,5,3}';
+select * from arr_tbl where f1 >= '{1,2,3}' and f1 < '{1,5,3}';
+
+-- test ON CONFLICT DO UPDATE with arrays
+create temp table arr_pk_tbl (pk int4 primary key, f1 int[]);
+insert into arr_pk_tbl values (1, '{1,2,3}');
+insert into arr_pk_tbl values (1, '{3,4,5}') on conflict (pk)
+ do update set f1[1] = excluded.f1[1], f1[3] = excluded.f1[3]
+ returning pk, f1;
+insert into arr_pk_tbl(pk, f1[1:2]) values (1, '{6,7,8}') on conflict (pk)
+ do update set f1[1] = excluded.f1[1],
+ f1[2] = excluded.f1[2],
+ f1[3] = excluded.f1[3]
+ returning pk, f1;
+
+-- note: if above selects don't produce the expected tuple order,
+-- then you didn't get an indexscan plan, and something is busted.
+reset enable_seqscan;
+reset enable_bitmapscan;
+
+-- test [not] (like|ilike) (any|all) (...)
+select 'foo' like any (array['%a', '%o']); -- t
+select 'foo' like any (array['%a', '%b']); -- f
+select 'foo' like all (array['f%', '%o']); -- t
+select 'foo' like all (array['f%', '%b']); -- f
+select 'foo' not like any (array['%a', '%b']); -- t
+select 'foo' not like all (array['%a', '%o']); -- f
+select 'foo' ilike any (array['%A', '%O']); -- t
+select 'foo' ilike all (array['F%', '%O']); -- t
+
+--
+-- General array parser tests
+--
+
+-- none of the following should be accepted
+select '{{1,{2}},{2,3}}'::text[];
+select '{{},{}}'::text[];
+select E'{{1,2},\\{2,3}}'::text[];
+select '{{"1 2" x},{3}}'::text[];
+select '{}}'::text[];
+select '{ }}'::text[];
+select array[];
+-- none of the above should be accepted
+
+-- all of the following should be accepted
+select '{}'::text[];
+select '{{{1,2,3,4},{2,3,4,5}},{{3,4,5,6},{4,5,6,7}}}'::text[];
+select '{0 second ,0 second}'::interval[];
+select '{ { "," } , { 3 } }'::text[];
+select ' { { " 0 second " , 0 second } }'::text[];
+select '{
+ 0 second,
+ @ 1 hour @ 42 minutes @ 20 seconds
+ }'::interval[];
+select array[]::text[];
+select '[0:1]={1.1,2.2}'::float8[];
+-- all of the above should be accepted
+
+-- tests for array aggregates
+CREATE TEMP TABLE arraggtest ( f1 INT[], f2 TEXT[][], f3 FLOAT[]);
+
+INSERT INTO arraggtest (f1, f2, f3) VALUES
+('{1,2,3,4}','{{grey,red},{blue,blue}}','{1.6, 0.0}');
+INSERT INTO arraggtest (f1, f2, f3) VALUES
+('{1,2,3}','{{grey,red},{grey,blue}}','{1.6}');
+SELECT max(f1), min(f1), max(f2), min(f2), max(f3), min(f3) FROM arraggtest;
+
+INSERT INTO arraggtest (f1, f2, f3) VALUES
+('{3,3,2,4,5,6}','{{white,yellow},{pink,orange}}','{2.1,3.3,1.8,1.7,1.6}');
+SELECT max(f1), min(f1), max(f2), min(f2), max(f3), min(f3) FROM arraggtest;
+
+INSERT INTO arraggtest (f1, f2, f3) VALUES
+('{2}','{{black,red},{green,orange}}','{1.6,2.2,2.6,0.4}');
+SELECT max(f1), min(f1), max(f2), min(f2), max(f3), min(f3) FROM arraggtest;
+
+INSERT INTO arraggtest (f1, f2, f3) VALUES
+('{4,2,6,7,8,1}','{{red},{black},{purple},{blue},{blue}}',NULL);
+SELECT max(f1), min(f1), max(f2), min(f2), max(f3), min(f3) FROM arraggtest;
+
+INSERT INTO arraggtest (f1, f2, f3) VALUES
+('{}','{{pink,white,blue,red,grey,orange}}','{2.1,1.87,1.4,2.2}');
+SELECT max(f1), min(f1), max(f2), min(f2), max(f3), min(f3) FROM arraggtest;
+
+-- A few simple tests for arrays of composite types
+
+create type comptype as (f1 int, f2 text);
+
+create table comptable (c1 comptype, c2 comptype[]);
+
+-- XXX would like to not have to specify row() construct types here ...
+insert into comptable
+ values (row(1,'foo'), array[row(2,'bar')::comptype, row(3,'baz')::comptype]);
+
+-- check that implicitly named array type _comptype isn't a problem
+create type _comptype as enum('fooey');
+
+select * from comptable;
+select c2[2].f2 from comptable;
+
+drop type _comptype;
+drop table comptable;
+drop type comptype;
+
+create or replace function unnest1(anyarray)
+returns setof anyelement as $$
+select $1[s] from generate_subscripts($1,1) g(s);
+$$ language sql immutable;
+
+create or replace function unnest2(anyarray)
+returns setof anyelement as $$
+select $1[s1][s2] from generate_subscripts($1,1) g1(s1),
+ generate_subscripts($1,2) g2(s2);
+$$ language sql immutable;
+
+select * from unnest1(array[1,2,3]);
+select * from unnest2(array[[1,2,3],[4,5,6]]);
+
+drop function unnest1(anyarray);
+drop function unnest2(anyarray);
+
+select array_fill(null::integer, array[3,3],array[2,2]);
+select array_fill(null::integer, array[3,3]);
+select array_fill(null::text, array[3,3],array[2,2]);
+select array_fill(null::text, array[3,3]);
+select array_fill(7, array[3,3],array[2,2]);
+select array_fill(7, array[3,3]);
+select array_fill('juhu'::text, array[3,3],array[2,2]);
+select array_fill('juhu'::text, array[3,3]);
+select a, a = '{}' as is_eq, array_dims(a)
+ from (select array_fill(42, array[0]) as a) ss;
+select a, a = '{}' as is_eq, array_dims(a)
+ from (select array_fill(42, '{}') as a) ss;
+select a, a = '{}' as is_eq, array_dims(a)
+ from (select array_fill(42, '{}', '{}') as a) ss;
+-- raise exception
+select array_fill(1, null, array[2,2]);
+select array_fill(1, array[2,2], null);
+select array_fill(1, array[2,2], '{}');
+select array_fill(1, array[3,3], array[1,1,1]);
+select array_fill(1, array[1,2,null]);
+select array_fill(1, array[[1,2],[3,4]]);
+
+select string_to_array('1|2|3', '|');
+select string_to_array('1|2|3|', '|');
+select string_to_array('1||2|3||', '||');
+select string_to_array('1|2|3', '');
+select string_to_array('', '|');
+select string_to_array('1|2|3', NULL);
+select string_to_array(NULL, '|') IS NULL;
+select string_to_array('abc', '');
+select string_to_array('abc', '', 'abc');
+select string_to_array('abc', ',');
+select string_to_array('abc', ',', 'abc');
+select string_to_array('1,2,3,4,,6', ',');
+select string_to_array('1,2,3,4,,6', ',', '');
+select string_to_array('1,2,3,4,*,6', ',', '*');
+
+select v, v is null as "is null" from string_to_table('1|2|3', '|') g(v);
+select v, v is null as "is null" from string_to_table('1|2|3|', '|') g(v);
+select v, v is null as "is null" from string_to_table('1||2|3||', '||') g(v);
+select v, v is null as "is null" from string_to_table('1|2|3', '') g(v);
+select v, v is null as "is null" from string_to_table('', '|') g(v);
+select v, v is null as "is null" from string_to_table('1|2|3', NULL) g(v);
+select v, v is null as "is null" from string_to_table(NULL, '|') g(v);
+select v, v is null as "is null" from string_to_table('abc', '') g(v);
+select v, v is null as "is null" from string_to_table('abc', '', 'abc') g(v);
+select v, v is null as "is null" from string_to_table('abc', ',') g(v);
+select v, v is null as "is null" from string_to_table('abc', ',', 'abc') g(v);
+select v, v is null as "is null" from string_to_table('1,2,3,4,,6', ',') g(v);
+select v, v is null as "is null" from string_to_table('1,2,3,4,,6', ',', '') g(v);
+select v, v is null as "is null" from string_to_table('1,2,3,4,*,6', ',', '*') g(v);
+
+select array_to_string(NULL::int4[], ',') IS NULL;
+select array_to_string('{}'::int4[], ',');
+select array_to_string(array[1,2,3,4,NULL,6], ',');
+select array_to_string(array[1,2,3,4,NULL,6], ',', '*');
+select array_to_string(array[1,2,3,4,NULL,6], NULL);
+select array_to_string(array[1,2,3,4,NULL,6], ',', NULL);
+
+select array_to_string(string_to_array('1|2|3', '|'), '|');
+
+select array_length(array[1,2,3], 1);
+select array_length(array[[1,2,3], [4,5,6]], 0);
+select array_length(array[[1,2,3], [4,5,6]], 1);
+select array_length(array[[1,2,3], [4,5,6]], 2);
+select array_length(array[[1,2,3], [4,5,6]], 3);
+
+select cardinality(NULL::int[]);
+select cardinality('{}'::int[]);
+select cardinality(array[1,2,3]);
+select cardinality('[2:4]={5,6,7}'::int[]);
+select cardinality('{{1,2}}'::int[]);
+select cardinality('{{1,2},{3,4},{5,6}}'::int[]);
+select cardinality('{{{1,9},{5,6}},{{2,3},{3,4}}}'::int[]);
+
+-- array_agg(anynonarray)
+select array_agg(unique1) from (select unique1 from tenk1 where unique1 < 15 order by unique1) ss;
+select array_agg(ten) from (select ten from tenk1 where unique1 < 15 order by unique1) ss;
+select array_agg(nullif(ten, 4)) from (select ten from tenk1 where unique1 < 15 order by unique1) ss;
+select array_agg(unique1) from tenk1 where unique1 < -15;
+
+-- array_agg(anyarray)
+select array_agg(ar)
+ from (values ('{1,2}'::int[]), ('{3,4}'::int[])) v(ar);
+select array_agg(distinct ar order by ar desc)
+ from (select array[i / 2] from generate_series(1,10) a(i)) b(ar);
+select array_agg(ar)
+ from (select array_agg(array[i, i+1, i-1])
+ from generate_series(1,2) a(i)) b(ar);
+select array_agg(array[i+1.2, i+1.3, i+1.4]) from generate_series(1,3) g(i);
+select array_agg(array['Hello', i::text]) from generate_series(9,11) g(i);
+select array_agg(array[i, nullif(i, 3), i+1]) from generate_series(1,4) g(i);
+-- errors
+select array_agg('{}'::int[]) from generate_series(1,2);
+select array_agg(null::int[]) from generate_series(1,2);
+select array_agg(ar)
+ from (values ('{1,2}'::int[]), ('{3}'::int[])) v(ar);
+
+select unnest(array[1,2,3]);
+select * from unnest(array[1,2,3]);
+select unnest(array[1,2,3,4.5]::float8[]);
+select unnest(array[1,2,3,4.5]::numeric[]);
+select unnest(array[1,2,3,null,4,null,null,5,6]);
+select unnest(array[1,2,3,null,4,null,null,5,6]::text[]);
+select abs(unnest(array[1,2,null,-3]));
+select array_remove(array[1,2,2,3], 2);
+select array_remove(array[1,2,2,3], 5);
+select array_remove(array[1,NULL,NULL,3], NULL);
+select array_remove(array['A','CC','D','C','RR'], 'RR');
+select array_remove(array[1.0, 2.1, 3.3], 1);
+select array_remove('{{1,2,2},{1,4,3}}', 2); -- not allowed
+select array_remove(array['X','X','X'], 'X') = '{}';
+select array_replace(array[1,2,5,4],5,3);
+select array_replace(array[1,2,5,4],5,NULL);
+select array_replace(array[1,2,NULL,4,NULL],NULL,5);
+select array_replace(array['A','B','DD','B'],'B','CC');
+select array_replace(array[1,NULL,3],NULL,NULL);
+select array_replace(array['AB',NULL,'CDE'],NULL,'12');
+
+-- array(select array-value ...)
+select array(select array[i,i/2] from generate_series(1,5) i);
+select array(select array['Hello', i::text] from generate_series(9,11) i);
+
+-- Insert/update on a column that is array of composite
+
+create temp table t1 (f1 int8_tbl[]);
+insert into t1 (f1[5].q1) values(42);
+select * from t1;
+update t1 set f1[5].q2 = 43;
+select * from t1;
+
+-- Check that arrays of composites are safely detoasted when needed
+
+create temp table src (f1 text);
+insert into src
+ select string_agg(random()::text,'') from generate_series(1,10000);
+create type textandtext as (c1 text, c2 text);
+create temp table dest (f1 textandtext[]);
+insert into dest select array[row(f1,f1)::textandtext] from src;
+select length(md5((f1[1]).c2)) from dest;
+delete from src;
+select length(md5((f1[1]).c2)) from dest;
+truncate table src;
+drop table src;
+select length(md5((f1[1]).c2)) from dest;
+drop table dest;
+drop type textandtext;
+
+-- Tests for polymorphic-array form of width_bucket()
+
+-- this exercises the varwidth and float8 code paths
+SELECT
+ op,
+ width_bucket(op::numeric, ARRAY[1, 3, 5, 10.0]::numeric[]) AS wb_n1,
+ width_bucket(op::numeric, ARRAY[0, 5.5, 9.99]::numeric[]) AS wb_n2,
+ width_bucket(op::numeric, ARRAY[-6, -5, 2.0]::numeric[]) AS wb_n3,
+ width_bucket(op::float8, ARRAY[1, 3, 5, 10.0]::float8[]) AS wb_f1,
+ width_bucket(op::float8, ARRAY[0, 5.5, 9.99]::float8[]) AS wb_f2,
+ width_bucket(op::float8, ARRAY[-6, -5, 2.0]::float8[]) AS wb_f3
+FROM (VALUES
+ (-5.2),
+ (-0.0000000001),
+ (0.000000000001),
+ (1),
+ (1.99999999999999),
+ (2),
+ (2.00000000000001),
+ (3),
+ (4),
+ (4.5),
+ (5),
+ (5.5),
+ (6),
+ (7),
+ (8),
+ (9),
+ (9.99999999999999),
+ (10),
+ (10.0000000000001)
+) v(op);
+
+-- ensure float8 path handles NaN properly
+SELECT
+ op,
+ width_bucket(op, ARRAY[1, 3, 9, 'NaN', 'NaN']::float8[]) AS wb
+FROM (VALUES
+ (-5.2::float8),
+ (4::float8),
+ (77::float8),
+ ('NaN'::float8)
+) v(op);
+
+-- these exercise the generic fixed-width code path
+SELECT
+ op,
+ width_bucket(op, ARRAY[1, 3, 5, 10]) AS wb_1
+FROM generate_series(0,11) as op;
+
+SELECT width_bucket(now(),
+ array['yesterday', 'today', 'tomorrow']::timestamptz[]);
+
+-- corner cases
+SELECT width_bucket(5, ARRAY[3]);
+SELECT width_bucket(5, '{}');
+
+-- error cases
+SELECT width_bucket('5'::text, ARRAY[3, 4]::integer[]);
+SELECT width_bucket(5, ARRAY[3, 4, NULL]);
+SELECT width_bucket(5, ARRAY[ARRAY[1, 2], ARRAY[3, 4]]);
+
+-- trim_array
+
+SELECT arr, trim_array(arr, 2)
+FROM
+(VALUES ('{1,2,3,4,5,6}'::bigint[]),
+ ('{1,2}'),
+ ('[10:16]={1,2,3,4,5,6,7}'),
+ ('[-15:-10]={1,2,3,4,5,6}'),
+ ('{{1,10},{2,20},{3,30},{4,40}}')) v(arr);
+
+SELECT trim_array(ARRAY[1, 2, 3], -1); -- fail
+SELECT trim_array(ARRAY[1, 2, 3], 10); -- fail
+SELECT trim_array(ARRAY[]::int[], 1); -- fail
diff --git a/ydb/library/yql/tests/postgresql/original/cases/bit.out b/ydb/library/yql/tests/postgresql/original/cases/bit.out
new file mode 100644
index 0000000000..a5aab9c0e3
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/bit.out
@@ -0,0 +1,748 @@
+--
+-- BIT types
+--
+--
+-- Build tables for testing
+--
+CREATE TABLE BIT_TABLE(b BIT(11));
+INSERT INTO BIT_TABLE VALUES (B'10'); -- too short
+ERROR: bit string length 2 does not match type bit(11)
+INSERT INTO BIT_TABLE VALUES (B'00000000000');
+INSERT INTO BIT_TABLE VALUES (B'11011000000');
+INSERT INTO BIT_TABLE VALUES (B'01010101010');
+INSERT INTO BIT_TABLE VALUES (B'101011111010'); -- too long
+ERROR: bit string length 12 does not match type bit(11)
+--INSERT INTO BIT_TABLE VALUES ('X554');
+--INSERT INTO BIT_TABLE VALUES ('X555');
+SELECT * FROM BIT_TABLE;
+ b
+-------------
+ 00000000000
+ 11011000000
+ 01010101010
+(3 rows)
+
+CREATE TABLE VARBIT_TABLE(v BIT VARYING(11));
+INSERT INTO VARBIT_TABLE VALUES (B'');
+INSERT INTO VARBIT_TABLE VALUES (B'0');
+INSERT INTO VARBIT_TABLE VALUES (B'010101');
+INSERT INTO VARBIT_TABLE VALUES (B'01010101010');
+INSERT INTO VARBIT_TABLE VALUES (B'101011111010'); -- too long
+ERROR: bit string too long for type bit varying(11)
+--INSERT INTO VARBIT_TABLE VALUES ('X554');
+--INSERT INTO VARBIT_TABLE VALUES ('X555');
+SELECT * FROM VARBIT_TABLE;
+ v
+-------------
+
+ 0
+ 010101
+ 01010101010
+(4 rows)
+
+-- Concatenation
+SELECT v, b, (v || b) AS concat
+ FROM BIT_TABLE, VARBIT_TABLE
+ ORDER BY 3;
+ v | b | concat
+-------------+-------------+------------------------
+ | 00000000000 | 00000000000
+ 0 | 00000000000 | 000000000000
+ 0 | 01010101010 | 001010101010
+ 010101 | 00000000000 | 01010100000000000
+ | 01010101010 | 01010101010
+ 01010101010 | 00000000000 | 0101010101000000000000
+ 01010101010 | 01010101010 | 0101010101001010101010
+ 010101 | 01010101010 | 01010101010101010
+ 01010101010 | 11011000000 | 0101010101011011000000
+ 010101 | 11011000000 | 01010111011000000
+ 0 | 11011000000 | 011011000000
+ | 11011000000 | 11011000000
+(12 rows)
+
+-- Length
+SELECT b, length(b) AS lb
+ FROM BIT_TABLE;
+ b | lb
+-------------+----
+ 00000000000 | 11
+ 11011000000 | 11
+ 01010101010 | 11
+(3 rows)
+
+SELECT v, length(v) AS lv
+ FROM VARBIT_TABLE;
+ v | lv
+-------------+----
+ | 0
+ 0 | 1
+ 010101 | 6
+ 01010101010 | 11
+(4 rows)
+
+-- Substring
+SELECT b,
+ SUBSTRING(b FROM 2 FOR 4) AS sub_2_4,
+ SUBSTRING(b FROM 7 FOR 13) AS sub_7_13,
+ SUBSTRING(b FROM 6) AS sub_6
+ FROM BIT_TABLE;
+ b | sub_2_4 | sub_7_13 | sub_6
+-------------+---------+----------+--------
+ 00000000000 | 0000 | 00000 | 000000
+ 11011000000 | 1011 | 00000 | 000000
+ 01010101010 | 1010 | 01010 | 101010
+(3 rows)
+
+SELECT v,
+ SUBSTRING(v FROM 2 FOR 4) AS sub_2_4,
+ SUBSTRING(v FROM 7 FOR 13) AS sub_7_13,
+ SUBSTRING(v FROM 6) AS sub_6
+ FROM VARBIT_TABLE;
+ v | sub_2_4 | sub_7_13 | sub_6
+-------------+---------+----------+--------
+ | | |
+ 0 | | |
+ 010101 | 1010 | | 1
+ 01010101010 | 1010 | 01010 | 101010
+(4 rows)
+
+-- test overflow cases
+SELECT SUBSTRING('01010101'::bit(8) FROM 2 FOR 2147483646) AS "1010101";
+ 1010101
+---------
+ 1010101
+(1 row)
+
+SELECT SUBSTRING('01010101'::bit(8) FROM -10 FOR 2147483646) AS "01010101";
+ 01010101
+----------
+ 01010101
+(1 row)
+
+SELECT SUBSTRING('01010101'::bit(8) FROM -10 FOR -2147483646) AS "error";
+ERROR: negative substring length not allowed
+SELECT SUBSTRING('01010101'::varbit FROM 2 FOR 2147483646) AS "1010101";
+ 1010101
+---------
+ 1010101
+(1 row)
+
+SELECT SUBSTRING('01010101'::varbit FROM -10 FOR 2147483646) AS "01010101";
+ 01010101
+----------
+ 01010101
+(1 row)
+
+SELECT SUBSTRING('01010101'::varbit FROM -10 FOR -2147483646) AS "error";
+ERROR: negative substring length not allowed
+--- Bit operations
+DROP TABLE varbit_table;
+CREATE TABLE varbit_table (a BIT VARYING(16), b BIT VARYING(16));
+COPY varbit_table FROM stdin;
+SELECT a, b, ~a AS "~ a", a & b AS "a & b",
+ a | b AS "a | b", a # b AS "a # b" FROM varbit_table;
+ a | b | ~ a | a & b | a | b | a # b
+------------------+------------------+------------------+------------------+------------------+------------------
+ 00001111 | 00010000 | 11110000 | 00000000 | 00011111 | 00011111
+ 00011111 | 00010001 | 11100000 | 00010001 | 00011111 | 00001110
+ 00101111 | 00010010 | 11010000 | 00000010 | 00111111 | 00111101
+ 00111111 | 00010011 | 11000000 | 00010011 | 00111111 | 00101100
+ 10001111 | 00000100 | 01110000 | 00000100 | 10001111 | 10001011
+ 0000000000001111 | 0000000000010000 | 1111111111110000 | 0000000000000000 | 0000000000011111 | 0000000000011111
+ 0000000100100011 | 1111111111111111 | 1111111011011100 | 0000000100100011 | 1111111111111111 | 1111111011011100
+ 0010010001101000 | 0010010001101000 | 1101101110010111 | 0010010001101000 | 0010010001101000 | 0000000000000000
+ 1111101001010000 | 0000010110101111 | 0000010110101111 | 0000000000000000 | 1111111111111111 | 1111111111111111
+ 0001001000110100 | 1111111111110101 | 1110110111001011 | 0001001000110100 | 1111111111110101 | 1110110111000001
+(10 rows)
+
+SELECT a,b,a<b AS "a<b",a<=b AS "a<=b",a=b AS "a=b",
+ a>=b AS "a>=b",a>b AS "a>b",a<>b AS "a<>b" FROM varbit_table;
+ a | b | a<b | a<=b | a=b | a>=b | a>b | a<>b
+------------------+------------------+-----+------+-----+------+-----+------
+ 00001111 | 00010000 | t | t | f | f | f | t
+ 00011111 | 00010001 | f | f | f | t | t | t
+ 00101111 | 00010010 | f | f | f | t | t | t
+ 00111111 | 00010011 | f | f | f | t | t | t
+ 10001111 | 00000100 | f | f | f | t | t | t
+ 0000000000001111 | 0000000000010000 | t | t | f | f | f | t
+ 0000000100100011 | 1111111111111111 | t | t | f | f | f | t
+ 0010010001101000 | 0010010001101000 | f | t | t | t | f | f
+ 1111101001010000 | 0000010110101111 | f | f | f | t | t | t
+ 0001001000110100 | 1111111111110101 | t | t | f | f | f | t
+(10 rows)
+
+SELECT a,a<<4 AS "a<<4",b,b>>2 AS "b>>2" FROM varbit_table;
+ a | a<<4 | b | b>>2
+------------------+------------------+------------------+------------------
+ 00001111 | 11110000 | 00010000 | 00000100
+ 00011111 | 11110000 | 00010001 | 00000100
+ 00101111 | 11110000 | 00010010 | 00000100
+ 00111111 | 11110000 | 00010011 | 00000100
+ 10001111 | 11110000 | 00000100 | 00000001
+ 0000000000001111 | 0000000011110000 | 0000000000010000 | 0000000000000100
+ 0000000100100011 | 0001001000110000 | 1111111111111111 | 0011111111111111
+ 0010010001101000 | 0100011010000000 | 0010010001101000 | 0000100100011010
+ 1111101001010000 | 1010010100000000 | 0000010110101111 | 0000000101101011
+ 0001001000110100 | 0010001101000000 | 1111111111110101 | 0011111111111101
+(10 rows)
+
+DROP TABLE varbit_table;
+--- Bit operations
+DROP TABLE bit_table;
+CREATE TABLE bit_table (a BIT(16), b BIT(16));
+COPY bit_table FROM stdin;
+SELECT a,b,~a AS "~ a",a & b AS "a & b",
+ a|b AS "a | b", a # b AS "a # b" FROM bit_table;
+ a | b | ~ a | a & b | a | b | a # b
+------------------+------------------+------------------+------------------+------------------+------------------
+ 0000111100000000 | 0001000000000000 | 1111000011111111 | 0000000000000000 | 0001111100000000 | 0001111100000000
+ 0001111100000000 | 0001000100000000 | 1110000011111111 | 0001000100000000 | 0001111100000000 | 0000111000000000
+ 0010111100000000 | 0001001000000000 | 1101000011111111 | 0000001000000000 | 0011111100000000 | 0011110100000000
+ 0011111100000000 | 0001001100000000 | 1100000011111111 | 0001001100000000 | 0011111100000000 | 0010110000000000
+ 1000111100000000 | 0000010000000000 | 0111000011111111 | 0000010000000000 | 1000111100000000 | 1000101100000000
+ 0000000000001111 | 0000000000010000 | 1111111111110000 | 0000000000000000 | 0000000000011111 | 0000000000011111
+ 0000000100100011 | 1111111111111111 | 1111111011011100 | 0000000100100011 | 1111111111111111 | 1111111011011100
+ 0010010001101000 | 0010010001101000 | 1101101110010111 | 0010010001101000 | 0010010001101000 | 0000000000000000
+ 1111101001010000 | 0000010110101111 | 0000010110101111 | 0000000000000000 | 1111111111111111 | 1111111111111111
+ 0001001000110100 | 1111111111110101 | 1110110111001011 | 0001001000110100 | 1111111111110101 | 1110110111000001
+(10 rows)
+
+SELECT a,b,a<b AS "a<b",a<=b AS "a<=b",a=b AS "a=b",
+ a>=b AS "a>=b",a>b AS "a>b",a<>b AS "a<>b" FROM bit_table;
+ a | b | a<b | a<=b | a=b | a>=b | a>b | a<>b
+------------------+------------------+-----+------+-----+------+-----+------
+ 0000111100000000 | 0001000000000000 | t | t | f | f | f | t
+ 0001111100000000 | 0001000100000000 | f | f | f | t | t | t
+ 0010111100000000 | 0001001000000000 | f | f | f | t | t | t
+ 0011111100000000 | 0001001100000000 | f | f | f | t | t | t
+ 1000111100000000 | 0000010000000000 | f | f | f | t | t | t
+ 0000000000001111 | 0000000000010000 | t | t | f | f | f | t
+ 0000000100100011 | 1111111111111111 | t | t | f | f | f | t
+ 0010010001101000 | 0010010001101000 | f | t | t | t | f | f
+ 1111101001010000 | 0000010110101111 | f | f | f | t | t | t
+ 0001001000110100 | 1111111111110101 | t | t | f | f | f | t
+(10 rows)
+
+SELECT a,a<<4 AS "a<<4",b,b>>2 AS "b>>2" FROM bit_table;
+ a | a<<4 | b | b>>2
+------------------+------------------+------------------+------------------
+ 0000111100000000 | 1111000000000000 | 0001000000000000 | 0000010000000000
+ 0001111100000000 | 1111000000000000 | 0001000100000000 | 0000010001000000
+ 0010111100000000 | 1111000000000000 | 0001001000000000 | 0000010010000000
+ 0011111100000000 | 1111000000000000 | 0001001100000000 | 0000010011000000
+ 1000111100000000 | 1111000000000000 | 0000010000000000 | 0000000100000000
+ 0000000000001111 | 0000000011110000 | 0000000000010000 | 0000000000000100
+ 0000000100100011 | 0001001000110000 | 1111111111111111 | 0011111111111111
+ 0010010001101000 | 0100011010000000 | 0010010001101000 | 0000100100011010
+ 1111101001010000 | 1010010100000000 | 0000010110101111 | 0000000101101011
+ 0001001000110100 | 0010001101000000 | 1111111111110101 | 0011111111111101
+(10 rows)
+
+DROP TABLE bit_table;
+-- The following should fail
+select B'001' & B'10';
+ERROR: cannot AND bit strings of different sizes
+select B'0111' | B'011';
+ERROR: cannot OR bit strings of different sizes
+select B'0010' # B'011101';
+ERROR: cannot XOR bit strings of different sizes
+-- More position tests, checking all the boundary cases
+SELECT POSITION(B'1010' IN B'0000101'); -- 0
+ position
+----------
+ 0
+(1 row)
+
+SELECT POSITION(B'1010' IN B'00001010'); -- 5
+ position
+----------
+ 5
+(1 row)
+
+SELECT POSITION(B'1010' IN B'00000101'); -- 0
+ position
+----------
+ 0
+(1 row)
+
+SELECT POSITION(B'1010' IN B'000001010'); -- 6
+ position
+----------
+ 6
+(1 row)
+
+SELECT POSITION(B'' IN B'00001010'); -- 1
+ position
+----------
+ 1
+(1 row)
+
+SELECT POSITION(B'0' IN B''); -- 0
+ position
+----------
+ 0
+(1 row)
+
+SELECT POSITION(B'' IN B''); -- 0
+ position
+----------
+ 0
+(1 row)
+
+SELECT POSITION(B'101101' IN B'001011011011011000'); -- 3
+ position
+----------
+ 3
+(1 row)
+
+SELECT POSITION(B'10110110' IN B'001011011011010'); -- 3
+ position
+----------
+ 3
+(1 row)
+
+SELECT POSITION(B'1011011011011' IN B'001011011011011'); -- 3
+ position
+----------
+ 3
+(1 row)
+
+SELECT POSITION(B'1011011011011' IN B'00001011011011011'); -- 5
+ position
+----------
+ 5
+(1 row)
+
+SELECT POSITION(B'11101011' IN B'11101011'); -- 1
+ position
+----------
+ 1
+(1 row)
+
+SELECT POSITION(B'11101011' IN B'011101011'); -- 2
+ position
+----------
+ 2
+(1 row)
+
+SELECT POSITION(B'11101011' IN B'00011101011'); -- 4
+ position
+----------
+ 4
+(1 row)
+
+SELECT POSITION(B'11101011' IN B'0000011101011'); -- 6
+ position
+----------
+ 6
+(1 row)
+
+SELECT POSITION(B'111010110' IN B'111010110'); -- 1
+ position
+----------
+ 1
+(1 row)
+
+SELECT POSITION(B'111010110' IN B'0111010110'); -- 2
+ position
+----------
+ 2
+(1 row)
+
+SELECT POSITION(B'111010110' IN B'000111010110'); -- 4
+ position
+----------
+ 4
+(1 row)
+
+SELECT POSITION(B'111010110' IN B'00000111010110'); -- 6
+ position
+----------
+ 6
+(1 row)
+
+SELECT POSITION(B'111010110' IN B'11101011'); -- 0
+ position
+----------
+ 0
+(1 row)
+
+SELECT POSITION(B'111010110' IN B'011101011'); -- 0
+ position
+----------
+ 0
+(1 row)
+
+SELECT POSITION(B'111010110' IN B'00011101011'); -- 0
+ position
+----------
+ 0
+(1 row)
+
+SELECT POSITION(B'111010110' IN B'0000011101011'); -- 0
+ position
+----------
+ 0
+(1 row)
+
+SELECT POSITION(B'111010110' IN B'111010110'); -- 1
+ position
+----------
+ 1
+(1 row)
+
+SELECT POSITION(B'111010110' IN B'0111010110'); -- 2
+ position
+----------
+ 2
+(1 row)
+
+SELECT POSITION(B'111010110' IN B'000111010110'); -- 4
+ position
+----------
+ 4
+(1 row)
+
+SELECT POSITION(B'111010110' IN B'00000111010110'); -- 6
+ position
+----------
+ 6
+(1 row)
+
+SELECT POSITION(B'111010110' IN B'000001110101111101011'); -- 0
+ position
+----------
+ 0
+(1 row)
+
+SELECT POSITION(B'111010110' IN B'0000001110101111101011'); -- 0
+ position
+----------
+ 0
+(1 row)
+
+SELECT POSITION(B'111010110' IN B'000000001110101111101011'); -- 0
+ position
+----------
+ 0
+(1 row)
+
+SELECT POSITION(B'111010110' IN B'00000000001110101111101011'); -- 0
+ position
+----------
+ 0
+(1 row)
+
+SELECT POSITION(B'111010110' IN B'0000011101011111010110'); -- 14
+ position
+----------
+ 14
+(1 row)
+
+SELECT POSITION(B'111010110' IN B'00000011101011111010110'); -- 15
+ position
+----------
+ 15
+(1 row)
+
+SELECT POSITION(B'111010110' IN B'0000000011101011111010110'); -- 17
+ position
+----------
+ 17
+(1 row)
+
+SELECT POSITION(B'111010110' IN B'000000000011101011111010110'); -- 19
+ position
+----------
+ 19
+(1 row)
+
+SELECT POSITION(B'000000000011101011111010110' IN B'000000000011101011111010110'); -- 1
+ position
+----------
+ 1
+(1 row)
+
+SELECT POSITION(B'00000000011101011111010110' IN B'000000000011101011111010110'); -- 2
+ position
+----------
+ 2
+(1 row)
+
+SELECT POSITION(B'0000000000011101011111010110' IN B'000000000011101011111010110'); -- 0
+ position
+----------
+ 0
+(1 row)
+
+-- Shifting
+CREATE TABLE BIT_SHIFT_TABLE(b BIT(16));
+INSERT INTO BIT_SHIFT_TABLE VALUES (B'1101100000000000');
+INSERT INTO BIT_SHIFT_TABLE SELECT b>>1 FROM BIT_SHIFT_TABLE;
+INSERT INTO BIT_SHIFT_TABLE SELECT b>>2 FROM BIT_SHIFT_TABLE;
+INSERT INTO BIT_SHIFT_TABLE SELECT b>>4 FROM BIT_SHIFT_TABLE;
+INSERT INTO BIT_SHIFT_TABLE SELECT b>>8 FROM BIT_SHIFT_TABLE;
+SELECT POSITION(B'1101' IN b),
+ POSITION(B'11011' IN b),
+ b
+ FROM BIT_SHIFT_TABLE ;
+ position | position | b
+----------+----------+------------------
+ 1 | 1 | 1101100000000000
+ 2 | 2 | 0110110000000000
+ 3 | 3 | 0011011000000000
+ 4 | 4 | 0001101100000000
+ 5 | 5 | 0000110110000000
+ 6 | 6 | 0000011011000000
+ 7 | 7 | 0000001101100000
+ 8 | 8 | 0000000110110000
+ 9 | 9 | 0000000011011000
+ 10 | 10 | 0000000001101100
+ 11 | 11 | 0000000000110110
+ 12 | 12 | 0000000000011011
+ 13 | 0 | 0000000000001101
+ 0 | 0 | 0000000000000110
+ 0 | 0 | 0000000000000011
+ 0 | 0 | 0000000000000001
+(16 rows)
+
+SELECT b, b >> 1 AS bsr, b << 1 AS bsl
+ FROM BIT_SHIFT_TABLE ;
+ b | bsr | bsl
+------------------+------------------+------------------
+ 1101100000000000 | 0110110000000000 | 1011000000000000
+ 0110110000000000 | 0011011000000000 | 1101100000000000
+ 0011011000000000 | 0001101100000000 | 0110110000000000
+ 0001101100000000 | 0000110110000000 | 0011011000000000
+ 0000110110000000 | 0000011011000000 | 0001101100000000
+ 0000011011000000 | 0000001101100000 | 0000110110000000
+ 0000001101100000 | 0000000110110000 | 0000011011000000
+ 0000000110110000 | 0000000011011000 | 0000001101100000
+ 0000000011011000 | 0000000001101100 | 0000000110110000
+ 0000000001101100 | 0000000000110110 | 0000000011011000
+ 0000000000110110 | 0000000000011011 | 0000000001101100
+ 0000000000011011 | 0000000000001101 | 0000000000110110
+ 0000000000001101 | 0000000000000110 | 0000000000011010
+ 0000000000000110 | 0000000000000011 | 0000000000001100
+ 0000000000000011 | 0000000000000001 | 0000000000000110
+ 0000000000000001 | 0000000000000000 | 0000000000000010
+(16 rows)
+
+SELECT b, b >> 8 AS bsr8, b << 8 AS bsl8
+ FROM BIT_SHIFT_TABLE ;
+ b | bsr8 | bsl8
+------------------+------------------+------------------
+ 1101100000000000 | 0000000011011000 | 0000000000000000
+ 0110110000000000 | 0000000001101100 | 0000000000000000
+ 0011011000000000 | 0000000000110110 | 0000000000000000
+ 0001101100000000 | 0000000000011011 | 0000000000000000
+ 0000110110000000 | 0000000000001101 | 1000000000000000
+ 0000011011000000 | 0000000000000110 | 1100000000000000
+ 0000001101100000 | 0000000000000011 | 0110000000000000
+ 0000000110110000 | 0000000000000001 | 1011000000000000
+ 0000000011011000 | 0000000000000000 | 1101100000000000
+ 0000000001101100 | 0000000000000000 | 0110110000000000
+ 0000000000110110 | 0000000000000000 | 0011011000000000
+ 0000000000011011 | 0000000000000000 | 0001101100000000
+ 0000000000001101 | 0000000000000000 | 0000110100000000
+ 0000000000000110 | 0000000000000000 | 0000011000000000
+ 0000000000000011 | 0000000000000000 | 0000001100000000
+ 0000000000000001 | 0000000000000000 | 0000000100000000
+(16 rows)
+
+SELECT b::bit(15), b::bit(15) >> 1 AS bsr, b::bit(15) << 1 AS bsl
+ FROM BIT_SHIFT_TABLE ;
+ b | bsr | bsl
+-----------------+-----------------+-----------------
+ 110110000000000 | 011011000000000 | 101100000000000
+ 011011000000000 | 001101100000000 | 110110000000000
+ 001101100000000 | 000110110000000 | 011011000000000
+ 000110110000000 | 000011011000000 | 001101100000000
+ 000011011000000 | 000001101100000 | 000110110000000
+ 000001101100000 | 000000110110000 | 000011011000000
+ 000000110110000 | 000000011011000 | 000001101100000
+ 000000011011000 | 000000001101100 | 000000110110000
+ 000000001101100 | 000000000110110 | 000000011011000
+ 000000000110110 | 000000000011011 | 000000001101100
+ 000000000011011 | 000000000001101 | 000000000110110
+ 000000000001101 | 000000000000110 | 000000000011010
+ 000000000000110 | 000000000000011 | 000000000001100
+ 000000000000011 | 000000000000001 | 000000000000110
+ 000000000000001 | 000000000000000 | 000000000000010
+ 000000000000000 | 000000000000000 | 000000000000000
+(16 rows)
+
+SELECT b::bit(15), b::bit(15) >> 8 AS bsr8, b::bit(15) << 8 AS bsl8
+ FROM BIT_SHIFT_TABLE ;
+ b | bsr8 | bsl8
+-----------------+-----------------+-----------------
+ 110110000000000 | 000000001101100 | 000000000000000
+ 011011000000000 | 000000000110110 | 000000000000000
+ 001101100000000 | 000000000011011 | 000000000000000
+ 000110110000000 | 000000000001101 | 000000000000000
+ 000011011000000 | 000000000000110 | 100000000000000
+ 000001101100000 | 000000000000011 | 110000000000000
+ 000000110110000 | 000000000000001 | 011000000000000
+ 000000011011000 | 000000000000000 | 101100000000000
+ 000000001101100 | 000000000000000 | 110110000000000
+ 000000000110110 | 000000000000000 | 011011000000000
+ 000000000011011 | 000000000000000 | 001101100000000
+ 000000000001101 | 000000000000000 | 000110100000000
+ 000000000000110 | 000000000000000 | 000011000000000
+ 000000000000011 | 000000000000000 | 000001100000000
+ 000000000000001 | 000000000000000 | 000000100000000
+ 000000000000000 | 000000000000000 | 000000000000000
+(16 rows)
+
+CREATE TABLE VARBIT_SHIFT_TABLE(v BIT VARYING(20));
+INSERT INTO VARBIT_SHIFT_TABLE VALUES (B'11011');
+INSERT INTO VARBIT_SHIFT_TABLE SELECT CAST(v || B'0' AS BIT VARYING(6)) >>1 FROM VARBIT_SHIFT_TABLE;
+INSERT INTO VARBIT_SHIFT_TABLE SELECT CAST(v || B'00' AS BIT VARYING(8)) >>2 FROM VARBIT_SHIFT_TABLE;
+INSERT INTO VARBIT_SHIFT_TABLE SELECT CAST(v || B'0000' AS BIT VARYING(12)) >>4 FROM VARBIT_SHIFT_TABLE;
+INSERT INTO VARBIT_SHIFT_TABLE SELECT CAST(v || B'00000000' AS BIT VARYING(20)) >>8 FROM VARBIT_SHIFT_TABLE;
+SELECT POSITION(B'1101' IN v),
+ POSITION(B'11011' IN v),
+ v
+ FROM VARBIT_SHIFT_TABLE ;
+ position | position | v
+----------+----------+----------------------
+ 1 | 1 | 11011
+ 2 | 2 | 011011
+ 3 | 3 | 0011011
+ 4 | 4 | 00011011
+ 5 | 5 | 000011011
+ 6 | 6 | 0000011011
+ 7 | 7 | 00000011011
+ 8 | 8 | 000000011011
+ 9 | 9 | 0000000011011
+ 10 | 10 | 00000000011011
+ 11 | 11 | 000000000011011
+ 12 | 12 | 0000000000011011
+ 13 | 13 | 00000000000011011
+ 14 | 14 | 000000000000011011
+ 15 | 15 | 0000000000000011011
+ 16 | 16 | 00000000000000011011
+(16 rows)
+
+SELECT v, v >> 1 AS vsr, v << 1 AS vsl
+ FROM VARBIT_SHIFT_TABLE ;
+ v | vsr | vsl
+----------------------+----------------------+----------------------
+ 11011 | 01101 | 10110
+ 011011 | 001101 | 110110
+ 0011011 | 0001101 | 0110110
+ 00011011 | 00001101 | 00110110
+ 000011011 | 000001101 | 000110110
+ 0000011011 | 0000001101 | 0000110110
+ 00000011011 | 00000001101 | 00000110110
+ 000000011011 | 000000001101 | 000000110110
+ 0000000011011 | 0000000001101 | 0000000110110
+ 00000000011011 | 00000000001101 | 00000000110110
+ 000000000011011 | 000000000001101 | 000000000110110
+ 0000000000011011 | 0000000000001101 | 0000000000110110
+ 00000000000011011 | 00000000000001101 | 00000000000110110
+ 000000000000011011 | 000000000000001101 | 000000000000110110
+ 0000000000000011011 | 0000000000000001101 | 0000000000000110110
+ 00000000000000011011 | 00000000000000001101 | 00000000000000110110
+(16 rows)
+
+SELECT v, v >> 8 AS vsr8, v << 8 AS vsl8
+ FROM VARBIT_SHIFT_TABLE ;
+ v | vsr8 | vsl8
+----------------------+----------------------+----------------------
+ 11011 | 00000 | 00000
+ 011011 | 000000 | 000000
+ 0011011 | 0000000 | 0000000
+ 00011011 | 00000000 | 00000000
+ 000011011 | 000000000 | 100000000
+ 0000011011 | 0000000000 | 1100000000
+ 00000011011 | 00000000000 | 01100000000
+ 000000011011 | 000000000000 | 101100000000
+ 0000000011011 | 0000000000000 | 1101100000000
+ 00000000011011 | 00000000000000 | 01101100000000
+ 000000000011011 | 000000000000000 | 001101100000000
+ 0000000000011011 | 0000000000000000 | 0001101100000000
+ 00000000000011011 | 00000000000000000 | 00001101100000000
+ 000000000000011011 | 000000000000000000 | 000001101100000000
+ 0000000000000011011 | 0000000000000000000 | 0000001101100000000
+ 00000000000000011011 | 00000000000000000000 | 00000001101100000000
+(16 rows)
+
+DROP TABLE BIT_SHIFT_TABLE;
+DROP TABLE VARBIT_SHIFT_TABLE;
+-- Get/Set bit
+SELECT get_bit(B'0101011000100', 10);
+ get_bit
+---------
+ 1
+(1 row)
+
+SELECT set_bit(B'0101011000100100', 15, 1);
+ set_bit
+------------------
+ 0101011000100101
+(1 row)
+
+SELECT set_bit(B'0101011000100100', 16, 1); -- fail
+ERROR: bit index 16 out of valid range (0..15)
+-- Overlay
+SELECT overlay(B'0101011100' placing '001' from 2 for 3);
+ overlay
+------------
+ 0001011100
+(1 row)
+
+SELECT overlay(B'0101011100' placing '101' from 6);
+ overlay
+------------
+ 0101010100
+(1 row)
+
+SELECT overlay(B'0101011100' placing '001' from 11);
+ overlay
+---------------
+ 0101011100001
+(1 row)
+
+SELECT overlay(B'0101011100' placing '001' from 20);
+ overlay
+---------------
+ 0101011100001
+(1 row)
+
+-- bit_count
+SELECT bit_count(B'0101011100'::bit(10));
+ bit_count
+-----------
+ 5
+(1 row)
+
+SELECT bit_count(B'1111111111'::bit(10));
+ bit_count
+-----------
+ 10
+(1 row)
+
+-- This table is intentionally left around to exercise pg_dump/pg_upgrade
+CREATE TABLE bit_defaults(
+ b1 bit(4) DEFAULT '1001',
+ b2 bit(4) DEFAULT B'0101',
+ b3 bit varying(5) DEFAULT '1001',
+ b4 bit varying(5) DEFAULT B'0101'
+);
+\d bit_defaults
+ Table "public.bit_defaults"
+ Column | Type | Collation | Nullable | Default
+--------+----------------+-----------+----------+---------------------
+ b1 | bit(4) | | | '1001'::"bit"
+ b2 | bit(4) | | | '0101'::"bit"
+ b3 | bit varying(5) | | | '1001'::bit varying
+ b4 | bit varying(5) | | | '0101'::"bit"
+
+INSERT INTO bit_defaults DEFAULT VALUES;
+TABLE bit_defaults;
+ b1 | b2 | b3 | b4
+------+------+------+------
+ 1001 | 0101 | 1001 | 0101
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/bit.sql b/ydb/library/yql/tests/postgresql/original/cases/bit.sql
new file mode 100644
index 0000000000..0a424e796b
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/bit.sql
@@ -0,0 +1,231 @@
+--
+-- BIT types
+--
+
+--
+-- Build tables for testing
+--
+
+CREATE TABLE BIT_TABLE(b BIT(11));
+
+INSERT INTO BIT_TABLE VALUES (B'10'); -- too short
+INSERT INTO BIT_TABLE VALUES (B'00000000000');
+INSERT INTO BIT_TABLE VALUES (B'11011000000');
+INSERT INTO BIT_TABLE VALUES (B'01010101010');
+INSERT INTO BIT_TABLE VALUES (B'101011111010'); -- too long
+--INSERT INTO BIT_TABLE VALUES ('X554');
+--INSERT INTO BIT_TABLE VALUES ('X555');
+
+SELECT * FROM BIT_TABLE;
+
+CREATE TABLE VARBIT_TABLE(v BIT VARYING(11));
+
+INSERT INTO VARBIT_TABLE VALUES (B'');
+INSERT INTO VARBIT_TABLE VALUES (B'0');
+INSERT INTO VARBIT_TABLE VALUES (B'010101');
+INSERT INTO VARBIT_TABLE VALUES (B'01010101010');
+INSERT INTO VARBIT_TABLE VALUES (B'101011111010'); -- too long
+--INSERT INTO VARBIT_TABLE VALUES ('X554');
+--INSERT INTO VARBIT_TABLE VALUES ('X555');
+SELECT * FROM VARBIT_TABLE;
+
+
+-- Concatenation
+SELECT v, b, (v || b) AS concat
+ FROM BIT_TABLE, VARBIT_TABLE
+ ORDER BY 3;
+
+-- Length
+SELECT b, length(b) AS lb
+ FROM BIT_TABLE;
+SELECT v, length(v) AS lv
+ FROM VARBIT_TABLE;
+
+-- Substring
+SELECT b,
+ SUBSTRING(b FROM 2 FOR 4) AS sub_2_4,
+ SUBSTRING(b FROM 7 FOR 13) AS sub_7_13,
+ SUBSTRING(b FROM 6) AS sub_6
+ FROM BIT_TABLE;
+SELECT v,
+ SUBSTRING(v FROM 2 FOR 4) AS sub_2_4,
+ SUBSTRING(v FROM 7 FOR 13) AS sub_7_13,
+ SUBSTRING(v FROM 6) AS sub_6
+ FROM VARBIT_TABLE;
+
+-- test overflow cases
+SELECT SUBSTRING('01010101'::bit(8) FROM 2 FOR 2147483646) AS "1010101";
+SELECT SUBSTRING('01010101'::bit(8) FROM -10 FOR 2147483646) AS "01010101";
+SELECT SUBSTRING('01010101'::bit(8) FROM -10 FOR -2147483646) AS "error";
+SELECT SUBSTRING('01010101'::varbit FROM 2 FOR 2147483646) AS "1010101";
+SELECT SUBSTRING('01010101'::varbit FROM -10 FOR 2147483646) AS "01010101";
+SELECT SUBSTRING('01010101'::varbit FROM -10 FOR -2147483646) AS "error";
+
+--- Bit operations
+DROP TABLE varbit_table;
+CREATE TABLE varbit_table (a BIT VARYING(16), b BIT VARYING(16));
+COPY varbit_table FROM stdin;
+X0F X10
+X1F X11
+X2F X12
+X3F X13
+X8F X04
+X000F X0010
+X0123 XFFFF
+X2468 X2468
+XFA50 X05AF
+X1234 XFFF5
+\.
+
+SELECT a, b, ~a AS "~ a", a & b AS "a & b",
+ a | b AS "a | b", a # b AS "a # b" FROM varbit_table;
+SELECT a,b,a<b AS "a<b",a<=b AS "a<=b",a=b AS "a=b",
+ a>=b AS "a>=b",a>b AS "a>b",a<>b AS "a<>b" FROM varbit_table;
+SELECT a,a<<4 AS "a<<4",b,b>>2 AS "b>>2" FROM varbit_table;
+
+DROP TABLE varbit_table;
+
+--- Bit operations
+DROP TABLE bit_table;
+CREATE TABLE bit_table (a BIT(16), b BIT(16));
+COPY bit_table FROM stdin;
+X0F00 X1000
+X1F00 X1100
+X2F00 X1200
+X3F00 X1300
+X8F00 X0400
+X000F X0010
+X0123 XFFFF
+X2468 X2468
+XFA50 X05AF
+X1234 XFFF5
+\.
+
+SELECT a,b,~a AS "~ a",a & b AS "a & b",
+ a|b AS "a | b", a # b AS "a # b" FROM bit_table;
+SELECT a,b,a<b AS "a<b",a<=b AS "a<=b",a=b AS "a=b",
+ a>=b AS "a>=b",a>b AS "a>b",a<>b AS "a<>b" FROM bit_table;
+SELECT a,a<<4 AS "a<<4",b,b>>2 AS "b>>2" FROM bit_table;
+
+DROP TABLE bit_table;
+
+
+-- The following should fail
+select B'001' & B'10';
+select B'0111' | B'011';
+select B'0010' # B'011101';
+
+-- More position tests, checking all the boundary cases
+SELECT POSITION(B'1010' IN B'0000101'); -- 0
+SELECT POSITION(B'1010' IN B'00001010'); -- 5
+SELECT POSITION(B'1010' IN B'00000101'); -- 0
+SELECT POSITION(B'1010' IN B'000001010'); -- 6
+
+SELECT POSITION(B'' IN B'00001010'); -- 1
+SELECT POSITION(B'0' IN B''); -- 0
+SELECT POSITION(B'' IN B''); -- 0
+SELECT POSITION(B'101101' IN B'001011011011011000'); -- 3
+SELECT POSITION(B'10110110' IN B'001011011011010'); -- 3
+SELECT POSITION(B'1011011011011' IN B'001011011011011'); -- 3
+SELECT POSITION(B'1011011011011' IN B'00001011011011011'); -- 5
+
+SELECT POSITION(B'11101011' IN B'11101011'); -- 1
+SELECT POSITION(B'11101011' IN B'011101011'); -- 2
+SELECT POSITION(B'11101011' IN B'00011101011'); -- 4
+SELECT POSITION(B'11101011' IN B'0000011101011'); -- 6
+
+SELECT POSITION(B'111010110' IN B'111010110'); -- 1
+SELECT POSITION(B'111010110' IN B'0111010110'); -- 2
+SELECT POSITION(B'111010110' IN B'000111010110'); -- 4
+SELECT POSITION(B'111010110' IN B'00000111010110'); -- 6
+
+SELECT POSITION(B'111010110' IN B'11101011'); -- 0
+SELECT POSITION(B'111010110' IN B'011101011'); -- 0
+SELECT POSITION(B'111010110' IN B'00011101011'); -- 0
+SELECT POSITION(B'111010110' IN B'0000011101011'); -- 0
+
+SELECT POSITION(B'111010110' IN B'111010110'); -- 1
+SELECT POSITION(B'111010110' IN B'0111010110'); -- 2
+SELECT POSITION(B'111010110' IN B'000111010110'); -- 4
+SELECT POSITION(B'111010110' IN B'00000111010110'); -- 6
+
+SELECT POSITION(B'111010110' IN B'000001110101111101011'); -- 0
+SELECT POSITION(B'111010110' IN B'0000001110101111101011'); -- 0
+SELECT POSITION(B'111010110' IN B'000000001110101111101011'); -- 0
+SELECT POSITION(B'111010110' IN B'00000000001110101111101011'); -- 0
+
+SELECT POSITION(B'111010110' IN B'0000011101011111010110'); -- 14
+SELECT POSITION(B'111010110' IN B'00000011101011111010110'); -- 15
+SELECT POSITION(B'111010110' IN B'0000000011101011111010110'); -- 17
+SELECT POSITION(B'111010110' IN B'000000000011101011111010110'); -- 19
+
+SELECT POSITION(B'000000000011101011111010110' IN B'000000000011101011111010110'); -- 1
+SELECT POSITION(B'00000000011101011111010110' IN B'000000000011101011111010110'); -- 2
+SELECT POSITION(B'0000000000011101011111010110' IN B'000000000011101011111010110'); -- 0
+
+
+-- Shifting
+
+CREATE TABLE BIT_SHIFT_TABLE(b BIT(16));
+INSERT INTO BIT_SHIFT_TABLE VALUES (B'1101100000000000');
+INSERT INTO BIT_SHIFT_TABLE SELECT b>>1 FROM BIT_SHIFT_TABLE;
+INSERT INTO BIT_SHIFT_TABLE SELECT b>>2 FROM BIT_SHIFT_TABLE;
+INSERT INTO BIT_SHIFT_TABLE SELECT b>>4 FROM BIT_SHIFT_TABLE;
+INSERT INTO BIT_SHIFT_TABLE SELECT b>>8 FROM BIT_SHIFT_TABLE;
+SELECT POSITION(B'1101' IN b),
+ POSITION(B'11011' IN b),
+ b
+ FROM BIT_SHIFT_TABLE ;
+SELECT b, b >> 1 AS bsr, b << 1 AS bsl
+ FROM BIT_SHIFT_TABLE ;
+SELECT b, b >> 8 AS bsr8, b << 8 AS bsl8
+ FROM BIT_SHIFT_TABLE ;
+SELECT b::bit(15), b::bit(15) >> 1 AS bsr, b::bit(15) << 1 AS bsl
+ FROM BIT_SHIFT_TABLE ;
+SELECT b::bit(15), b::bit(15) >> 8 AS bsr8, b::bit(15) << 8 AS bsl8
+ FROM BIT_SHIFT_TABLE ;
+
+
+CREATE TABLE VARBIT_SHIFT_TABLE(v BIT VARYING(20));
+INSERT INTO VARBIT_SHIFT_TABLE VALUES (B'11011');
+INSERT INTO VARBIT_SHIFT_TABLE SELECT CAST(v || B'0' AS BIT VARYING(6)) >>1 FROM VARBIT_SHIFT_TABLE;
+INSERT INTO VARBIT_SHIFT_TABLE SELECT CAST(v || B'00' AS BIT VARYING(8)) >>2 FROM VARBIT_SHIFT_TABLE;
+INSERT INTO VARBIT_SHIFT_TABLE SELECT CAST(v || B'0000' AS BIT VARYING(12)) >>4 FROM VARBIT_SHIFT_TABLE;
+INSERT INTO VARBIT_SHIFT_TABLE SELECT CAST(v || B'00000000' AS BIT VARYING(20)) >>8 FROM VARBIT_SHIFT_TABLE;
+SELECT POSITION(B'1101' IN v),
+ POSITION(B'11011' IN v),
+ v
+ FROM VARBIT_SHIFT_TABLE ;
+SELECT v, v >> 1 AS vsr, v << 1 AS vsl
+ FROM VARBIT_SHIFT_TABLE ;
+SELECT v, v >> 8 AS vsr8, v << 8 AS vsl8
+ FROM VARBIT_SHIFT_TABLE ;
+
+DROP TABLE BIT_SHIFT_TABLE;
+DROP TABLE VARBIT_SHIFT_TABLE;
+
+-- Get/Set bit
+SELECT get_bit(B'0101011000100', 10);
+SELECT set_bit(B'0101011000100100', 15, 1);
+SELECT set_bit(B'0101011000100100', 16, 1); -- fail
+
+-- Overlay
+SELECT overlay(B'0101011100' placing '001' from 2 for 3);
+SELECT overlay(B'0101011100' placing '101' from 6);
+SELECT overlay(B'0101011100' placing '001' from 11);
+SELECT overlay(B'0101011100' placing '001' from 20);
+
+-- bit_count
+SELECT bit_count(B'0101011100'::bit(10));
+SELECT bit_count(B'1111111111'::bit(10));
+
+-- This table is intentionally left around to exercise pg_dump/pg_upgrade
+CREATE TABLE bit_defaults(
+ b1 bit(4) DEFAULT '1001',
+ b2 bit(4) DEFAULT B'0101',
+ b3 bit varying(5) DEFAULT '1001',
+ b4 bit varying(5) DEFAULT B'0101'
+);
+\d bit_defaults
+INSERT INTO bit_defaults DEFAULT VALUES;
+TABLE bit_defaults;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/boolean.out b/ydb/library/yql/tests/postgresql/original/cases/boolean.out
new file mode 100644
index 0000000000..4728fe2dfd
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/boolean.out
@@ -0,0 +1,559 @@
+--
+-- BOOLEAN
+--
+--
+-- sanity check - if this fails go insane!
+--
+SELECT 1 AS one;
+ one
+-----
+ 1
+(1 row)
+
+-- ******************testing built-in type bool********************
+-- check bool input syntax
+SELECT true AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT false AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT bool 't' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool ' f ' AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT bool 'true' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool 'test' AS error;
+ERROR: invalid input syntax for type boolean: "test"
+LINE 1: SELECT bool 'test' AS error;
+ ^
+SELECT bool 'false' AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT bool 'foo' AS error;
+ERROR: invalid input syntax for type boolean: "foo"
+LINE 1: SELECT bool 'foo' AS error;
+ ^
+SELECT bool 'y' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool 'yes' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool 'yeah' AS error;
+ERROR: invalid input syntax for type boolean: "yeah"
+LINE 1: SELECT bool 'yeah' AS error;
+ ^
+SELECT bool 'n' AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT bool 'no' AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT bool 'nay' AS error;
+ERROR: invalid input syntax for type boolean: "nay"
+LINE 1: SELECT bool 'nay' AS error;
+ ^
+SELECT bool 'on' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool 'off' AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT bool 'of' AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT bool 'o' AS error;
+ERROR: invalid input syntax for type boolean: "o"
+LINE 1: SELECT bool 'o' AS error;
+ ^
+SELECT bool 'on_' AS error;
+ERROR: invalid input syntax for type boolean: "on_"
+LINE 1: SELECT bool 'on_' AS error;
+ ^
+SELECT bool 'off_' AS error;
+ERROR: invalid input syntax for type boolean: "off_"
+LINE 1: SELECT bool 'off_' AS error;
+ ^
+SELECT bool '1' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool '11' AS error;
+ERROR: invalid input syntax for type boolean: "11"
+LINE 1: SELECT bool '11' AS error;
+ ^
+SELECT bool '0' AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT bool '000' AS error;
+ERROR: invalid input syntax for type boolean: "000"
+LINE 1: SELECT bool '000' AS error;
+ ^
+SELECT bool '' AS error;
+ERROR: invalid input syntax for type boolean: ""
+LINE 1: SELECT bool '' AS error;
+ ^
+-- and, or, not in qualifications
+SELECT bool 't' or bool 'f' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool 't' and bool 'f' AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT not bool 'f' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool 't' = bool 'f' AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT bool 't' <> bool 'f' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool 't' > bool 'f' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool 't' >= bool 'f' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool 'f' < bool 't' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT bool 'f' <= bool 't' AS true;
+ true
+------
+ t
+(1 row)
+
+-- explicit casts to/from text
+SELECT 'TrUe'::text::boolean AS true, 'fAlse'::text::boolean AS false;
+ true | false
+------+-------
+ t | f
+(1 row)
+
+SELECT ' true '::text::boolean AS true,
+ ' FALSE'::text::boolean AS false;
+ true | false
+------+-------
+ t | f
+(1 row)
+
+SELECT true::boolean::text AS true, false::boolean::text AS false;
+ true | false
+------+-------
+ true | false
+(1 row)
+
+SELECT ' tru e '::text::boolean AS invalid; -- error
+ERROR: invalid input syntax for type boolean: " tru e "
+SELECT ''::text::boolean AS invalid; -- error
+ERROR: invalid input syntax for type boolean: ""
+CREATE TABLE BOOLTBL1 (f1 bool);
+INSERT INTO BOOLTBL1 (f1) VALUES (bool 't');
+INSERT INTO BOOLTBL1 (f1) VALUES (bool 'True');
+INSERT INTO BOOLTBL1 (f1) VALUES (bool 'true');
+-- BOOLTBL1 should be full of true's at this point
+SELECT BOOLTBL1.* FROM BOOLTBL1;
+ f1
+----
+ t
+ t
+ t
+(3 rows)
+
+SELECT BOOLTBL1.*
+ FROM BOOLTBL1
+ WHERE f1 = bool 'true';
+ f1
+----
+ t
+ t
+ t
+(3 rows)
+
+SELECT BOOLTBL1.*
+ FROM BOOLTBL1
+ WHERE f1 <> bool 'false';
+ f1
+----
+ t
+ t
+ t
+(3 rows)
+
+SELECT BOOLTBL1.*
+ FROM BOOLTBL1
+ WHERE booleq(bool 'false', f1);
+ f1
+----
+(0 rows)
+
+INSERT INTO BOOLTBL1 (f1) VALUES (bool 'f');
+SELECT BOOLTBL1.*
+ FROM BOOLTBL1
+ WHERE f1 = bool 'false';
+ f1
+----
+ f
+(1 row)
+
+CREATE TABLE BOOLTBL2 (f1 bool);
+INSERT INTO BOOLTBL2 (f1) VALUES (bool 'f');
+INSERT INTO BOOLTBL2 (f1) VALUES (bool 'false');
+INSERT INTO BOOLTBL2 (f1) VALUES (bool 'False');
+INSERT INTO BOOLTBL2 (f1) VALUES (bool 'FALSE');
+-- This is now an invalid expression
+-- For pre-v6.3 this evaluated to false - thomas 1997-10-23
+INSERT INTO BOOLTBL2 (f1)
+ VALUES (bool 'XXX');
+ERROR: invalid input syntax for type boolean: "XXX"
+LINE 2: VALUES (bool 'XXX');
+ ^
+-- BOOLTBL2 should be full of false's at this point
+SELECT BOOLTBL2.* FROM BOOLTBL2;
+ f1
+----
+ f
+ f
+ f
+ f
+(4 rows)
+
+SELECT BOOLTBL1.*, BOOLTBL2.*
+ FROM BOOLTBL1, BOOLTBL2
+ WHERE BOOLTBL2.f1 <> BOOLTBL1.f1;
+ f1 | f1
+----+----
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+(12 rows)
+
+SELECT BOOLTBL1.*, BOOLTBL2.*
+ FROM BOOLTBL1, BOOLTBL2
+ WHERE boolne(BOOLTBL2.f1,BOOLTBL1.f1);
+ f1 | f1
+----+----
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+(12 rows)
+
+SELECT BOOLTBL1.*, BOOLTBL2.*
+ FROM BOOLTBL1, BOOLTBL2
+ WHERE BOOLTBL2.f1 = BOOLTBL1.f1 and BOOLTBL1.f1 = bool 'false';
+ f1 | f1
+----+----
+ f | f
+ f | f
+ f | f
+ f | f
+(4 rows)
+
+SELECT BOOLTBL1.*, BOOLTBL2.*
+ FROM BOOLTBL1, BOOLTBL2
+ WHERE BOOLTBL2.f1 = BOOLTBL1.f1 or BOOLTBL1.f1 = bool 'true'
+ ORDER BY BOOLTBL1.f1, BOOLTBL2.f1;
+ f1 | f1
+----+----
+ f | f
+ f | f
+ f | f
+ f | f
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+ t | f
+(16 rows)
+
+--
+-- SQL syntax
+-- Try all combinations to ensure that we get nothing when we expect nothing
+-- - thomas 2000-01-04
+--
+SELECT f1
+ FROM BOOLTBL1
+ WHERE f1 IS TRUE;
+ f1
+----
+ t
+ t
+ t
+(3 rows)
+
+SELECT f1
+ FROM BOOLTBL1
+ WHERE f1 IS NOT FALSE;
+ f1
+----
+ t
+ t
+ t
+(3 rows)
+
+SELECT f1
+ FROM BOOLTBL1
+ WHERE f1 IS FALSE;
+ f1
+----
+ f
+(1 row)
+
+SELECT f1
+ FROM BOOLTBL1
+ WHERE f1 IS NOT TRUE;
+ f1
+----
+ f
+(1 row)
+
+SELECT f1
+ FROM BOOLTBL2
+ WHERE f1 IS TRUE;
+ f1
+----
+(0 rows)
+
+SELECT f1
+ FROM BOOLTBL2
+ WHERE f1 IS NOT FALSE;
+ f1
+----
+(0 rows)
+
+SELECT f1
+ FROM BOOLTBL2
+ WHERE f1 IS FALSE;
+ f1
+----
+ f
+ f
+ f
+ f
+(4 rows)
+
+SELECT f1
+ FROM BOOLTBL2
+ WHERE f1 IS NOT TRUE;
+ f1
+----
+ f
+ f
+ f
+ f
+(4 rows)
+
+--
+-- Tests for BooleanTest
+--
+CREATE TABLE BOOLTBL3 (d text, b bool, o int);
+INSERT INTO BOOLTBL3 (d, b, o) VALUES ('true', true, 1);
+INSERT INTO BOOLTBL3 (d, b, o) VALUES ('false', false, 2);
+INSERT INTO BOOLTBL3 (d, b, o) VALUES ('null', null, 3);
+SELECT
+ d,
+ b IS TRUE AS istrue,
+ b IS NOT TRUE AS isnottrue,
+ b IS FALSE AS isfalse,
+ b IS NOT FALSE AS isnotfalse,
+ b IS UNKNOWN AS isunknown,
+ b IS NOT UNKNOWN AS isnotunknown
+FROM booltbl3 ORDER BY o;
+ d | istrue | isnottrue | isfalse | isnotfalse | isunknown | isnotunknown
+-------+--------+-----------+---------+------------+-----------+--------------
+ true | t | f | f | t | f | t
+ false | f | t | t | f | f | t
+ null | f | t | f | t | t | f
+(3 rows)
+
+-- Test to make sure short-circuiting and NULL handling is
+-- correct. Use a table as source to prevent constant simplification
+-- to interfer.
+CREATE TABLE booltbl4(isfalse bool, istrue bool, isnul bool);
+INSERT INTO booltbl4 VALUES (false, true, null);
+\pset null '(null)'
+-- AND expression need to return null if there's any nulls and not all
+-- of the value are true
+SELECT istrue AND isnul AND istrue FROM booltbl4;
+ ?column?
+----------
+ (null)
+(1 row)
+
+SELECT istrue AND istrue AND isnul FROM booltbl4;
+ ?column?
+----------
+ (null)
+(1 row)
+
+SELECT isnul AND istrue AND istrue FROM booltbl4;
+ ?column?
+----------
+ (null)
+(1 row)
+
+SELECT isfalse AND isnul AND istrue FROM booltbl4;
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT istrue AND isfalse AND isnul FROM booltbl4;
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT isnul AND istrue AND isfalse FROM booltbl4;
+ ?column?
+----------
+ f
+(1 row)
+
+-- OR expression need to return null if there's any nulls and none
+-- of the value is true
+SELECT isfalse OR isnul OR isfalse FROM booltbl4;
+ ?column?
+----------
+ (null)
+(1 row)
+
+SELECT isfalse OR isfalse OR isnul FROM booltbl4;
+ ?column?
+----------
+ (null)
+(1 row)
+
+SELECT isnul OR isfalse OR isfalse FROM booltbl4;
+ ?column?
+----------
+ (null)
+(1 row)
+
+SELECT isfalse OR isnul OR istrue FROM booltbl4;
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT istrue OR isfalse OR isnul FROM booltbl4;
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT isnul OR istrue OR isfalse FROM booltbl4;
+ ?column?
+----------
+ t
+(1 row)
+
+--
+-- Clean up
+-- Many tables are retained by the regression test, but these do not seem
+-- particularly useful so just get rid of them for now.
+-- - thomas 1997-11-30
+--
+DROP TABLE BOOLTBL1;
+DROP TABLE BOOLTBL2;
+DROP TABLE BOOLTBL3;
+DROP TABLE BOOLTBL4;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/boolean.sql b/ydb/library/yql/tests/postgresql/original/cases/boolean.sql
new file mode 100644
index 0000000000..4dd47aaf9d
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/boolean.sql
@@ -0,0 +1,262 @@
+--
+-- BOOLEAN
+--
+
+--
+-- sanity check - if this fails go insane!
+--
+SELECT 1 AS one;
+
+
+-- ******************testing built-in type bool********************
+
+-- check bool input syntax
+
+SELECT true AS true;
+
+SELECT false AS false;
+
+SELECT bool 't' AS true;
+
+SELECT bool ' f ' AS false;
+
+SELECT bool 'true' AS true;
+
+SELECT bool 'test' AS error;
+
+SELECT bool 'false' AS false;
+
+SELECT bool 'foo' AS error;
+
+SELECT bool 'y' AS true;
+
+SELECT bool 'yes' AS true;
+
+SELECT bool 'yeah' AS error;
+
+SELECT bool 'n' AS false;
+
+SELECT bool 'no' AS false;
+
+SELECT bool 'nay' AS error;
+
+SELECT bool 'on' AS true;
+
+SELECT bool 'off' AS false;
+
+SELECT bool 'of' AS false;
+
+SELECT bool 'o' AS error;
+
+SELECT bool 'on_' AS error;
+
+SELECT bool 'off_' AS error;
+
+SELECT bool '1' AS true;
+
+SELECT bool '11' AS error;
+
+SELECT bool '0' AS false;
+
+SELECT bool '000' AS error;
+
+SELECT bool '' AS error;
+
+-- and, or, not in qualifications
+
+SELECT bool 't' or bool 'f' AS true;
+
+SELECT bool 't' and bool 'f' AS false;
+
+SELECT not bool 'f' AS true;
+
+SELECT bool 't' = bool 'f' AS false;
+
+SELECT bool 't' <> bool 'f' AS true;
+
+SELECT bool 't' > bool 'f' AS true;
+
+SELECT bool 't' >= bool 'f' AS true;
+
+SELECT bool 'f' < bool 't' AS true;
+
+SELECT bool 'f' <= bool 't' AS true;
+
+-- explicit casts to/from text
+SELECT 'TrUe'::text::boolean AS true, 'fAlse'::text::boolean AS false;
+SELECT ' true '::text::boolean AS true,
+ ' FALSE'::text::boolean AS false;
+SELECT true::boolean::text AS true, false::boolean::text AS false;
+
+SELECT ' tru e '::text::boolean AS invalid; -- error
+SELECT ''::text::boolean AS invalid; -- error
+
+CREATE TABLE BOOLTBL1 (f1 bool);
+
+INSERT INTO BOOLTBL1 (f1) VALUES (bool 't');
+
+INSERT INTO BOOLTBL1 (f1) VALUES (bool 'True');
+
+INSERT INTO BOOLTBL1 (f1) VALUES (bool 'true');
+
+
+-- BOOLTBL1 should be full of true's at this point
+SELECT BOOLTBL1.* FROM BOOLTBL1;
+
+
+SELECT BOOLTBL1.*
+ FROM BOOLTBL1
+ WHERE f1 = bool 'true';
+
+
+SELECT BOOLTBL1.*
+ FROM BOOLTBL1
+ WHERE f1 <> bool 'false';
+
+SELECT BOOLTBL1.*
+ FROM BOOLTBL1
+ WHERE booleq(bool 'false', f1);
+
+INSERT INTO BOOLTBL1 (f1) VALUES (bool 'f');
+
+SELECT BOOLTBL1.*
+ FROM BOOLTBL1
+ WHERE f1 = bool 'false';
+
+
+CREATE TABLE BOOLTBL2 (f1 bool);
+
+INSERT INTO BOOLTBL2 (f1) VALUES (bool 'f');
+
+INSERT INTO BOOLTBL2 (f1) VALUES (bool 'false');
+
+INSERT INTO BOOLTBL2 (f1) VALUES (bool 'False');
+
+INSERT INTO BOOLTBL2 (f1) VALUES (bool 'FALSE');
+
+-- This is now an invalid expression
+-- For pre-v6.3 this evaluated to false - thomas 1997-10-23
+INSERT INTO BOOLTBL2 (f1)
+ VALUES (bool 'XXX');
+
+-- BOOLTBL2 should be full of false's at this point
+SELECT BOOLTBL2.* FROM BOOLTBL2;
+
+
+SELECT BOOLTBL1.*, BOOLTBL2.*
+ FROM BOOLTBL1, BOOLTBL2
+ WHERE BOOLTBL2.f1 <> BOOLTBL1.f1;
+
+
+SELECT BOOLTBL1.*, BOOLTBL2.*
+ FROM BOOLTBL1, BOOLTBL2
+ WHERE boolne(BOOLTBL2.f1,BOOLTBL1.f1);
+
+
+SELECT BOOLTBL1.*, BOOLTBL2.*
+ FROM BOOLTBL1, BOOLTBL2
+ WHERE BOOLTBL2.f1 = BOOLTBL1.f1 and BOOLTBL1.f1 = bool 'false';
+
+
+SELECT BOOLTBL1.*, BOOLTBL2.*
+ FROM BOOLTBL1, BOOLTBL2
+ WHERE BOOLTBL2.f1 = BOOLTBL1.f1 or BOOLTBL1.f1 = bool 'true'
+ ORDER BY BOOLTBL1.f1, BOOLTBL2.f1;
+
+--
+-- SQL syntax
+-- Try all combinations to ensure that we get nothing when we expect nothing
+-- - thomas 2000-01-04
+--
+
+SELECT f1
+ FROM BOOLTBL1
+ WHERE f1 IS TRUE;
+
+SELECT f1
+ FROM BOOLTBL1
+ WHERE f1 IS NOT FALSE;
+
+SELECT f1
+ FROM BOOLTBL1
+ WHERE f1 IS FALSE;
+
+SELECT f1
+ FROM BOOLTBL1
+ WHERE f1 IS NOT TRUE;
+
+SELECT f1
+ FROM BOOLTBL2
+ WHERE f1 IS TRUE;
+
+SELECT f1
+ FROM BOOLTBL2
+ WHERE f1 IS NOT FALSE;
+
+SELECT f1
+ FROM BOOLTBL2
+ WHERE f1 IS FALSE;
+
+SELECT f1
+ FROM BOOLTBL2
+ WHERE f1 IS NOT TRUE;
+
+--
+-- Tests for BooleanTest
+--
+CREATE TABLE BOOLTBL3 (d text, b bool, o int);
+INSERT INTO BOOLTBL3 (d, b, o) VALUES ('true', true, 1);
+INSERT INTO BOOLTBL3 (d, b, o) VALUES ('false', false, 2);
+INSERT INTO BOOLTBL3 (d, b, o) VALUES ('null', null, 3);
+
+SELECT
+ d,
+ b IS TRUE AS istrue,
+ b IS NOT TRUE AS isnottrue,
+ b IS FALSE AS isfalse,
+ b IS NOT FALSE AS isnotfalse,
+ b IS UNKNOWN AS isunknown,
+ b IS NOT UNKNOWN AS isnotunknown
+FROM booltbl3 ORDER BY o;
+
+
+-- Test to make sure short-circuiting and NULL handling is
+-- correct. Use a table as source to prevent constant simplification
+-- to interfer.
+CREATE TABLE booltbl4(isfalse bool, istrue bool, isnul bool);
+INSERT INTO booltbl4 VALUES (false, true, null);
+\pset null '(null)'
+
+-- AND expression need to return null if there's any nulls and not all
+-- of the value are true
+SELECT istrue AND isnul AND istrue FROM booltbl4;
+SELECT istrue AND istrue AND isnul FROM booltbl4;
+SELECT isnul AND istrue AND istrue FROM booltbl4;
+SELECT isfalse AND isnul AND istrue FROM booltbl4;
+SELECT istrue AND isfalse AND isnul FROM booltbl4;
+SELECT isnul AND istrue AND isfalse FROM booltbl4;
+
+-- OR expression need to return null if there's any nulls and none
+-- of the value is true
+SELECT isfalse OR isnul OR isfalse FROM booltbl4;
+SELECT isfalse OR isfalse OR isnul FROM booltbl4;
+SELECT isnul OR isfalse OR isfalse FROM booltbl4;
+SELECT isfalse OR isnul OR istrue FROM booltbl4;
+SELECT istrue OR isfalse OR isnul FROM booltbl4;
+SELECT isnul OR istrue OR isfalse FROM booltbl4;
+
+
+--
+-- Clean up
+-- Many tables are retained by the regression test, but these do not seem
+-- particularly useful so just get rid of them for now.
+-- - thomas 1997-11-30
+--
+
+DROP TABLE BOOLTBL1;
+
+DROP TABLE BOOLTBL2;
+
+DROP TABLE BOOLTBL3;
+
+DROP TABLE BOOLTBL4;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/case.out b/ydb/library/yql/tests/postgresql/original/cases/case.out
new file mode 100644
index 0000000000..f5136c17ab
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/case.out
@@ -0,0 +1,419 @@
+--
+-- CASE
+-- Test the case statement
+--
+CREATE TABLE CASE_TBL (
+ i integer,
+ f double precision
+);
+CREATE TABLE CASE2_TBL (
+ i integer,
+ j integer
+);
+INSERT INTO CASE_TBL VALUES (1, 10.1);
+INSERT INTO CASE_TBL VALUES (2, 20.2);
+INSERT INTO CASE_TBL VALUES (3, -30.3);
+INSERT INTO CASE_TBL VALUES (4, NULL);
+INSERT INTO CASE2_TBL VALUES (1, -1);
+INSERT INTO CASE2_TBL VALUES (2, -2);
+INSERT INTO CASE2_TBL VALUES (3, -3);
+INSERT INTO CASE2_TBL VALUES (2, -4);
+INSERT INTO CASE2_TBL VALUES (1, NULL);
+INSERT INTO CASE2_TBL VALUES (NULL, -6);
+--
+-- Simplest examples without tables
+--
+SELECT '3' AS "One",
+ CASE
+ WHEN 1 < 2 THEN 3
+ END AS "Simple WHEN";
+ One | Simple WHEN
+-----+-------------
+ 3 | 3
+(1 row)
+
+SELECT '<NULL>' AS "One",
+ CASE
+ WHEN 1 > 2 THEN 3
+ END AS "Simple default";
+ One | Simple default
+--------+----------------
+ <NULL> |
+(1 row)
+
+SELECT '3' AS "One",
+ CASE
+ WHEN 1 < 2 THEN 3
+ ELSE 4
+ END AS "Simple ELSE";
+ One | Simple ELSE
+-----+-------------
+ 3 | 3
+(1 row)
+
+SELECT '4' AS "One",
+ CASE
+ WHEN 1 > 2 THEN 3
+ ELSE 4
+ END AS "ELSE default";
+ One | ELSE default
+-----+--------------
+ 4 | 4
+(1 row)
+
+SELECT '6' AS "One",
+ CASE
+ WHEN 1 > 2 THEN 3
+ WHEN 4 < 5 THEN 6
+ ELSE 7
+ END AS "Two WHEN with default";
+ One | Two WHEN with default
+-----+-----------------------
+ 6 | 6
+(1 row)
+
+SELECT '7' AS "None",
+ CASE WHEN random() < 0 THEN 1
+ END AS "NULL on no matches";
+ None | NULL on no matches
+------+--------------------
+ 7 |
+(1 row)
+
+-- Constant-expression folding shouldn't evaluate unreachable subexpressions
+SELECT CASE WHEN 1=0 THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END;
+ case
+------
+ 1
+(1 row)
+
+SELECT CASE 1 WHEN 0 THEN 1/0 WHEN 1 THEN 1 ELSE 2/0 END;
+ case
+------
+ 1
+(1 row)
+
+-- However we do not currently suppress folding of potentially
+-- reachable subexpressions
+SELECT CASE WHEN i > 100 THEN 1/0 ELSE 0 END FROM case_tbl;
+ERROR: division by zero
+-- Test for cases involving untyped literals in test expression
+SELECT CASE 'a' WHEN 'a' THEN 1 ELSE 2 END;
+ case
+------
+ 1
+(1 row)
+
+--
+-- Examples of targets involving tables
+--
+SELECT
+ CASE
+ WHEN i >= 3 THEN i
+ END AS ">= 3 or Null"
+ FROM CASE_TBL;
+ >= 3 or Null
+--------------
+
+
+ 3
+ 4
+(4 rows)
+
+SELECT
+ CASE WHEN i >= 3 THEN (i + i)
+ ELSE i
+ END AS "Simplest Math"
+ FROM CASE_TBL;
+ Simplest Math
+---------------
+ 1
+ 2
+ 6
+ 8
+(4 rows)
+
+SELECT i AS "Value",
+ CASE WHEN (i < 0) THEN 'small'
+ WHEN (i = 0) THEN 'zero'
+ WHEN (i = 1) THEN 'one'
+ WHEN (i = 2) THEN 'two'
+ ELSE 'big'
+ END AS "Category"
+ FROM CASE_TBL;
+ Value | Category
+-------+----------
+ 1 | one
+ 2 | two
+ 3 | big
+ 4 | big
+(4 rows)
+
+SELECT
+ CASE WHEN ((i < 0) or (i < 0)) THEN 'small'
+ WHEN ((i = 0) or (i = 0)) THEN 'zero'
+ WHEN ((i = 1) or (i = 1)) THEN 'one'
+ WHEN ((i = 2) or (i = 2)) THEN 'two'
+ ELSE 'big'
+ END AS "Category"
+ FROM CASE_TBL;
+ Category
+----------
+ one
+ two
+ big
+ big
+(4 rows)
+
+--
+-- Examples of qualifications involving tables
+--
+--
+-- NULLIF() and COALESCE()
+-- Shorthand forms for typical CASE constructs
+-- defined in the SQL standard.
+--
+SELECT * FROM CASE_TBL WHERE COALESCE(f,i) = 4;
+ i | f
+---+---
+ 4 |
+(1 row)
+
+SELECT * FROM CASE_TBL WHERE NULLIF(f,i) = 2;
+ i | f
+---+---
+(0 rows)
+
+SELECT COALESCE(a.f, b.i, b.j)
+ FROM CASE_TBL a, CASE2_TBL b;
+ coalesce
+----------
+ 10.1
+ 20.2
+ -30.3
+ 1
+ 10.1
+ 20.2
+ -30.3
+ 2
+ 10.1
+ 20.2
+ -30.3
+ 3
+ 10.1
+ 20.2
+ -30.3
+ 2
+ 10.1
+ 20.2
+ -30.3
+ 1
+ 10.1
+ 20.2
+ -30.3
+ -6
+(24 rows)
+
+SELECT *
+ FROM CASE_TBL a, CASE2_TBL b
+ WHERE COALESCE(a.f, b.i, b.j) = 2;
+ i | f | i | j
+---+---+---+----
+ 4 | | 2 | -2
+ 4 | | 2 | -4
+(2 rows)
+
+SELECT NULLIF(a.i,b.i) AS "NULLIF(a.i,b.i)",
+ NULLIF(b.i, 4) AS "NULLIF(b.i,4)"
+ FROM CASE_TBL a, CASE2_TBL b;
+ NULLIF(a.i,b.i) | NULLIF(b.i,4)
+-----------------+---------------
+ | 1
+ 2 | 1
+ 3 | 1
+ 4 | 1
+ 1 | 2
+ | 2
+ 3 | 2
+ 4 | 2
+ 1 | 3
+ 2 | 3
+ | 3
+ 4 | 3
+ 1 | 2
+ | 2
+ 3 | 2
+ 4 | 2
+ | 1
+ 2 | 1
+ 3 | 1
+ 4 | 1
+ 1 |
+ 2 |
+ 3 |
+ 4 |
+(24 rows)
+
+SELECT *
+ FROM CASE_TBL a, CASE2_TBL b
+ WHERE COALESCE(f,b.i) = 2;
+ i | f | i | j
+---+---+---+----
+ 4 | | 2 | -2
+ 4 | | 2 | -4
+(2 rows)
+
+-- Tests for constant subexpression simplification
+explain (costs off)
+SELECT * FROM CASE_TBL WHERE NULLIF(1, 2) = 2;
+ QUERY PLAN
+--------------------------
+ Result
+ One-Time Filter: false
+(2 rows)
+
+explain (costs off)
+SELECT * FROM CASE_TBL WHERE NULLIF(1, 1) IS NOT NULL;
+ QUERY PLAN
+--------------------------
+ Result
+ One-Time Filter: false
+(2 rows)
+
+explain (costs off)
+SELECT * FROM CASE_TBL WHERE NULLIF(1, null) = 2;
+ QUERY PLAN
+--------------------------
+ Result
+ One-Time Filter: false
+(2 rows)
+
+--
+-- Examples of updates involving tables
+--
+UPDATE CASE_TBL
+ SET i = CASE WHEN i >= 3 THEN (- i)
+ ELSE (2 * i) END;
+SELECT * FROM CASE_TBL;
+ i | f
+----+-------
+ 2 | 10.1
+ 4 | 20.2
+ -3 | -30.3
+ -4 |
+(4 rows)
+
+UPDATE CASE_TBL
+ SET i = CASE WHEN i >= 2 THEN (2 * i)
+ ELSE (3 * i) END;
+SELECT * FROM CASE_TBL;
+ i | f
+-----+-------
+ 4 | 10.1
+ 8 | 20.2
+ -9 | -30.3
+ -12 |
+(4 rows)
+
+UPDATE CASE_TBL
+ SET i = CASE WHEN b.i >= 2 THEN (2 * j)
+ ELSE (3 * j) END
+ FROM CASE2_TBL b
+ WHERE j = -CASE_TBL.i;
+SELECT * FROM CASE_TBL;
+ i | f
+-----+-------
+ 8 | 20.2
+ -9 | -30.3
+ -12 |
+ -8 | 10.1
+(4 rows)
+
+--
+-- Nested CASE expressions
+--
+-- This test exercises a bug caused by aliasing econtext->caseValue_isNull
+-- with the isNull argument of the inner CASE's CaseExpr evaluation. After
+-- evaluating the vol(null) expression in the inner CASE's second WHEN-clause,
+-- the isNull flag for the case test value incorrectly became true, causing
+-- the third WHEN-clause not to match. The volatile function calls are needed
+-- to prevent constant-folding in the planner, which would hide the bug.
+-- Wrap this in a single transaction so the transient '=' operator doesn't
+-- cause problems in concurrent sessions
+BEGIN;
+CREATE FUNCTION vol(text) returns text as
+ 'begin return $1; end' language plpgsql volatile;
+SELECT CASE
+ (CASE vol('bar')
+ WHEN 'foo' THEN 'it was foo!'
+ WHEN vol(null) THEN 'null input'
+ WHEN 'bar' THEN 'it was bar!' END
+ )
+ WHEN 'it was foo!' THEN 'foo recognized'
+ WHEN 'it was bar!' THEN 'bar recognized'
+ ELSE 'unrecognized' END;
+ case
+----------------
+ bar recognized
+(1 row)
+
+-- In this case, we can't inline the SQL function without confusing things.
+CREATE DOMAIN foodomain AS text;
+CREATE FUNCTION volfoo(text) returns foodomain as
+ 'begin return $1::foodomain; end' language plpgsql volatile;
+CREATE FUNCTION inline_eq(foodomain, foodomain) returns boolean as
+ 'SELECT CASE $2::text WHEN $1::text THEN true ELSE false END' language sql;
+CREATE OPERATOR = (procedure = inline_eq,
+ leftarg = foodomain, rightarg = foodomain);
+SELECT CASE volfoo('bar') WHEN 'foo'::foodomain THEN 'is foo' ELSE 'is not foo' END;
+ case
+------------
+ is not foo
+(1 row)
+
+ROLLBACK;
+-- Test multiple evaluation of a CASE arg that is a read/write object (#14472)
+-- Wrap this in a single transaction so the transient '=' operator doesn't
+-- cause problems in concurrent sessions
+BEGIN;
+CREATE DOMAIN arrdomain AS int[];
+CREATE FUNCTION make_ad(int,int) returns arrdomain as
+ 'declare x arrdomain;
+ begin
+ x := array[$1,$2];
+ return x;
+ end' language plpgsql volatile;
+CREATE FUNCTION ad_eq(arrdomain, arrdomain) returns boolean as
+ 'begin return array_eq($1, $2); end' language plpgsql;
+CREATE OPERATOR = (procedure = ad_eq,
+ leftarg = arrdomain, rightarg = arrdomain);
+SELECT CASE make_ad(1,2)
+ WHEN array[2,4]::arrdomain THEN 'wrong'
+ WHEN array[2,5]::arrdomain THEN 'still wrong'
+ WHEN array[1,2]::arrdomain THEN 'right'
+ END;
+ case
+-------
+ right
+(1 row)
+
+ROLLBACK;
+-- Test interaction of CASE with ArrayCoerceExpr (bug #15471)
+BEGIN;
+CREATE TYPE casetestenum AS ENUM ('e', 'f', 'g');
+SELECT
+ CASE 'foo'::text
+ WHEN 'foo' THEN ARRAY['a', 'b', 'c', 'd'] || enum_range(NULL::casetestenum)::text[]
+ ELSE ARRAY['x', 'y']
+ END;
+ array
+-----------------
+ {a,b,c,d,e,f,g}
+(1 row)
+
+ROLLBACK;
+--
+-- Clean up
+--
+DROP TABLE CASE_TBL;
+DROP TABLE CASE2_TBL;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/case.sql b/ydb/library/yql/tests/postgresql/original/cases/case.sql
new file mode 100644
index 0000000000..83fe43be6b
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/case.sql
@@ -0,0 +1,265 @@
+--
+-- CASE
+-- Test the case statement
+--
+
+CREATE TABLE CASE_TBL (
+ i integer,
+ f double precision
+);
+
+CREATE TABLE CASE2_TBL (
+ i integer,
+ j integer
+);
+
+INSERT INTO CASE_TBL VALUES (1, 10.1);
+INSERT INTO CASE_TBL VALUES (2, 20.2);
+INSERT INTO CASE_TBL VALUES (3, -30.3);
+INSERT INTO CASE_TBL VALUES (4, NULL);
+
+INSERT INTO CASE2_TBL VALUES (1, -1);
+INSERT INTO CASE2_TBL VALUES (2, -2);
+INSERT INTO CASE2_TBL VALUES (3, -3);
+INSERT INTO CASE2_TBL VALUES (2, -4);
+INSERT INTO CASE2_TBL VALUES (1, NULL);
+INSERT INTO CASE2_TBL VALUES (NULL, -6);
+
+--
+-- Simplest examples without tables
+--
+
+SELECT '3' AS "One",
+ CASE
+ WHEN 1 < 2 THEN 3
+ END AS "Simple WHEN";
+
+SELECT '<NULL>' AS "One",
+ CASE
+ WHEN 1 > 2 THEN 3
+ END AS "Simple default";
+
+SELECT '3' AS "One",
+ CASE
+ WHEN 1 < 2 THEN 3
+ ELSE 4
+ END AS "Simple ELSE";
+
+SELECT '4' AS "One",
+ CASE
+ WHEN 1 > 2 THEN 3
+ ELSE 4
+ END AS "ELSE default";
+
+SELECT '6' AS "One",
+ CASE
+ WHEN 1 > 2 THEN 3
+ WHEN 4 < 5 THEN 6
+ ELSE 7
+ END AS "Two WHEN with default";
+
+
+SELECT '7' AS "None",
+ CASE WHEN random() < 0 THEN 1
+ END AS "NULL on no matches";
+
+-- Constant-expression folding shouldn't evaluate unreachable subexpressions
+SELECT CASE WHEN 1=0 THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END;
+SELECT CASE 1 WHEN 0 THEN 1/0 WHEN 1 THEN 1 ELSE 2/0 END;
+
+-- However we do not currently suppress folding of potentially
+-- reachable subexpressions
+SELECT CASE WHEN i > 100 THEN 1/0 ELSE 0 END FROM case_tbl;
+
+-- Test for cases involving untyped literals in test expression
+SELECT CASE 'a' WHEN 'a' THEN 1 ELSE 2 END;
+
+--
+-- Examples of targets involving tables
+--
+
+SELECT
+ CASE
+ WHEN i >= 3 THEN i
+ END AS ">= 3 or Null"
+ FROM CASE_TBL;
+
+SELECT
+ CASE WHEN i >= 3 THEN (i + i)
+ ELSE i
+ END AS "Simplest Math"
+ FROM CASE_TBL;
+
+SELECT i AS "Value",
+ CASE WHEN (i < 0) THEN 'small'
+ WHEN (i = 0) THEN 'zero'
+ WHEN (i = 1) THEN 'one'
+ WHEN (i = 2) THEN 'two'
+ ELSE 'big'
+ END AS "Category"
+ FROM CASE_TBL;
+
+SELECT
+ CASE WHEN ((i < 0) or (i < 0)) THEN 'small'
+ WHEN ((i = 0) or (i = 0)) THEN 'zero'
+ WHEN ((i = 1) or (i = 1)) THEN 'one'
+ WHEN ((i = 2) or (i = 2)) THEN 'two'
+ ELSE 'big'
+ END AS "Category"
+ FROM CASE_TBL;
+
+--
+-- Examples of qualifications involving tables
+--
+
+--
+-- NULLIF() and COALESCE()
+-- Shorthand forms for typical CASE constructs
+-- defined in the SQL standard.
+--
+
+SELECT * FROM CASE_TBL WHERE COALESCE(f,i) = 4;
+
+SELECT * FROM CASE_TBL WHERE NULLIF(f,i) = 2;
+
+SELECT COALESCE(a.f, b.i, b.j)
+ FROM CASE_TBL a, CASE2_TBL b;
+
+SELECT *
+ FROM CASE_TBL a, CASE2_TBL b
+ WHERE COALESCE(a.f, b.i, b.j) = 2;
+
+SELECT NULLIF(a.i,b.i) AS "NULLIF(a.i,b.i)",
+ NULLIF(b.i, 4) AS "NULLIF(b.i,4)"
+ FROM CASE_TBL a, CASE2_TBL b;
+
+SELECT *
+ FROM CASE_TBL a, CASE2_TBL b
+ WHERE COALESCE(f,b.i) = 2;
+
+-- Tests for constant subexpression simplification
+
+explain (costs off)
+SELECT * FROM CASE_TBL WHERE NULLIF(1, 2) = 2;
+
+explain (costs off)
+SELECT * FROM CASE_TBL WHERE NULLIF(1, 1) IS NOT NULL;
+
+explain (costs off)
+SELECT * FROM CASE_TBL WHERE NULLIF(1, null) = 2;
+
+--
+-- Examples of updates involving tables
+--
+
+UPDATE CASE_TBL
+ SET i = CASE WHEN i >= 3 THEN (- i)
+ ELSE (2 * i) END;
+
+SELECT * FROM CASE_TBL;
+
+UPDATE CASE_TBL
+ SET i = CASE WHEN i >= 2 THEN (2 * i)
+ ELSE (3 * i) END;
+
+SELECT * FROM CASE_TBL;
+
+UPDATE CASE_TBL
+ SET i = CASE WHEN b.i >= 2 THEN (2 * j)
+ ELSE (3 * j) END
+ FROM CASE2_TBL b
+ WHERE j = -CASE_TBL.i;
+
+SELECT * FROM CASE_TBL;
+
+--
+-- Nested CASE expressions
+--
+
+-- This test exercises a bug caused by aliasing econtext->caseValue_isNull
+-- with the isNull argument of the inner CASE's CaseExpr evaluation. After
+-- evaluating the vol(null) expression in the inner CASE's second WHEN-clause,
+-- the isNull flag for the case test value incorrectly became true, causing
+-- the third WHEN-clause not to match. The volatile function calls are needed
+-- to prevent constant-folding in the planner, which would hide the bug.
+
+-- Wrap this in a single transaction so the transient '=' operator doesn't
+-- cause problems in concurrent sessions
+BEGIN;
+
+CREATE FUNCTION vol(text) returns text as
+ 'begin return $1; end' language plpgsql volatile;
+
+SELECT CASE
+ (CASE vol('bar')
+ WHEN 'foo' THEN 'it was foo!'
+ WHEN vol(null) THEN 'null input'
+ WHEN 'bar' THEN 'it was bar!' END
+ )
+ WHEN 'it was foo!' THEN 'foo recognized'
+ WHEN 'it was bar!' THEN 'bar recognized'
+ ELSE 'unrecognized' END;
+
+-- In this case, we can't inline the SQL function without confusing things.
+CREATE DOMAIN foodomain AS text;
+
+CREATE FUNCTION volfoo(text) returns foodomain as
+ 'begin return $1::foodomain; end' language plpgsql volatile;
+
+CREATE FUNCTION inline_eq(foodomain, foodomain) returns boolean as
+ 'SELECT CASE $2::text WHEN $1::text THEN true ELSE false END' language sql;
+
+CREATE OPERATOR = (procedure = inline_eq,
+ leftarg = foodomain, rightarg = foodomain);
+
+SELECT CASE volfoo('bar') WHEN 'foo'::foodomain THEN 'is foo' ELSE 'is not foo' END;
+
+ROLLBACK;
+
+-- Test multiple evaluation of a CASE arg that is a read/write object (#14472)
+-- Wrap this in a single transaction so the transient '=' operator doesn't
+-- cause problems in concurrent sessions
+BEGIN;
+
+CREATE DOMAIN arrdomain AS int[];
+
+CREATE FUNCTION make_ad(int,int) returns arrdomain as
+ 'declare x arrdomain;
+ begin
+ x := array[$1,$2];
+ return x;
+ end' language plpgsql volatile;
+
+CREATE FUNCTION ad_eq(arrdomain, arrdomain) returns boolean as
+ 'begin return array_eq($1, $2); end' language plpgsql;
+
+CREATE OPERATOR = (procedure = ad_eq,
+ leftarg = arrdomain, rightarg = arrdomain);
+
+SELECT CASE make_ad(1,2)
+ WHEN array[2,4]::arrdomain THEN 'wrong'
+ WHEN array[2,5]::arrdomain THEN 'still wrong'
+ WHEN array[1,2]::arrdomain THEN 'right'
+ END;
+
+ROLLBACK;
+
+-- Test interaction of CASE with ArrayCoerceExpr (bug #15471)
+BEGIN;
+
+CREATE TYPE casetestenum AS ENUM ('e', 'f', 'g');
+
+SELECT
+ CASE 'foo'::text
+ WHEN 'foo' THEN ARRAY['a', 'b', 'c', 'd'] || enum_range(NULL::casetestenum)::text[]
+ ELSE ARRAY['x', 'y']
+ END;
+
+ROLLBACK;
+
+--
+-- Clean up
+--
+
+DROP TABLE CASE_TBL;
+DROP TABLE CASE2_TBL;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/char.out b/ydb/library/yql/tests/postgresql/original/cases/char.out
new file mode 100644
index 0000000000..d515b3ce34
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/char.out
@@ -0,0 +1,122 @@
+--
+-- CHAR
+--
+-- fixed-length by value
+-- internally passed by value if <= 4 bytes in storage
+SELECT char 'c' = char 'c' AS true;
+ true
+------
+ t
+(1 row)
+
+--
+-- Build a table for testing
+--
+CREATE TABLE CHAR_TBL(f1 char);
+INSERT INTO CHAR_TBL (f1) VALUES ('a');
+INSERT INTO CHAR_TBL (f1) VALUES ('A');
+-- any of the following three input formats are acceptable
+INSERT INTO CHAR_TBL (f1) VALUES ('1');
+INSERT INTO CHAR_TBL (f1) VALUES (2);
+INSERT INTO CHAR_TBL (f1) VALUES ('3');
+-- zero-length char
+INSERT INTO CHAR_TBL (f1) VALUES ('');
+-- try char's of greater than 1 length
+INSERT INTO CHAR_TBL (f1) VALUES ('cd');
+ERROR: value too long for type character(1)
+INSERT INTO CHAR_TBL (f1) VALUES ('c ');
+SELECT * FROM CHAR_TBL;
+ f1
+----
+ a
+ A
+ 1
+ 2
+ 3
+
+ c
+(7 rows)
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 <> 'a';
+ f1
+----
+ A
+ 1
+ 2
+ 3
+
+ c
+(6 rows)
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 = 'a';
+ f1
+----
+ a
+(1 row)
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 < 'a';
+ f1
+----
+ A
+ 1
+ 2
+ 3
+
+(5 rows)
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 <= 'a';
+ f1
+----
+ a
+ A
+ 1
+ 2
+ 3
+
+(6 rows)
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 > 'a';
+ f1
+----
+ c
+(1 row)
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 >= 'a';
+ f1
+----
+ a
+ c
+(2 rows)
+
+DROP TABLE CHAR_TBL;
+--
+-- Now test longer arrays of char
+--
+CREATE TABLE CHAR_TBL(f1 char(4));
+INSERT INTO CHAR_TBL (f1) VALUES ('a');
+INSERT INTO CHAR_TBL (f1) VALUES ('ab');
+INSERT INTO CHAR_TBL (f1) VALUES ('abcd');
+INSERT INTO CHAR_TBL (f1) VALUES ('abcde');
+ERROR: value too long for type character(4)
+INSERT INTO CHAR_TBL (f1) VALUES ('abcd ');
+SELECT * FROM CHAR_TBL;
+ f1
+------
+ a
+ ab
+ abcd
+ abcd
+(4 rows)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/char.sql b/ydb/library/yql/tests/postgresql/original/cases/char.sql
new file mode 100644
index 0000000000..79e6e565ba
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/char.sql
@@ -0,0 +1,75 @@
+--
+-- CHAR
+--
+
+-- fixed-length by value
+-- internally passed by value if <= 4 bytes in storage
+
+SELECT char 'c' = char 'c' AS true;
+
+--
+-- Build a table for testing
+--
+
+CREATE TABLE CHAR_TBL(f1 char);
+
+INSERT INTO CHAR_TBL (f1) VALUES ('a');
+
+INSERT INTO CHAR_TBL (f1) VALUES ('A');
+
+-- any of the following three input formats are acceptable
+INSERT INTO CHAR_TBL (f1) VALUES ('1');
+
+INSERT INTO CHAR_TBL (f1) VALUES (2);
+
+INSERT INTO CHAR_TBL (f1) VALUES ('3');
+
+-- zero-length char
+INSERT INTO CHAR_TBL (f1) VALUES ('');
+
+-- try char's of greater than 1 length
+INSERT INTO CHAR_TBL (f1) VALUES ('cd');
+INSERT INTO CHAR_TBL (f1) VALUES ('c ');
+
+
+SELECT * FROM CHAR_TBL;
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 <> 'a';
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 = 'a';
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 < 'a';
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 <= 'a';
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 > 'a';
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 >= 'a';
+
+DROP TABLE CHAR_TBL;
+
+--
+-- Now test longer arrays of char
+--
+
+CREATE TABLE CHAR_TBL(f1 char(4));
+
+INSERT INTO CHAR_TBL (f1) VALUES ('a');
+INSERT INTO CHAR_TBL (f1) VALUES ('ab');
+INSERT INTO CHAR_TBL (f1) VALUES ('abcd');
+INSERT INTO CHAR_TBL (f1) VALUES ('abcde');
+INSERT INTO CHAR_TBL (f1) VALUES ('abcd ');
+
+SELECT * FROM CHAR_TBL;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/char_1.out b/ydb/library/yql/tests/postgresql/original/cases/char_1.out
new file mode 100644
index 0000000000..6c917c0b68
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/char_1.out
@@ -0,0 +1,122 @@
+--
+-- CHAR
+--
+-- fixed-length by value
+-- internally passed by value if <= 4 bytes in storage
+SELECT char 'c' = char 'c' AS true;
+ true
+------
+ t
+(1 row)
+
+--
+-- Build a table for testing
+--
+CREATE TABLE CHAR_TBL(f1 char);
+INSERT INTO CHAR_TBL (f1) VALUES ('a');
+INSERT INTO CHAR_TBL (f1) VALUES ('A');
+-- any of the following three input formats are acceptable
+INSERT INTO CHAR_TBL (f1) VALUES ('1');
+INSERT INTO CHAR_TBL (f1) VALUES (2);
+INSERT INTO CHAR_TBL (f1) VALUES ('3');
+-- zero-length char
+INSERT INTO CHAR_TBL (f1) VALUES ('');
+-- try char's of greater than 1 length
+INSERT INTO CHAR_TBL (f1) VALUES ('cd');
+ERROR: value too long for type character(1)
+INSERT INTO CHAR_TBL (f1) VALUES ('c ');
+SELECT * FROM CHAR_TBL;
+ f1
+----
+ a
+ A
+ 1
+ 2
+ 3
+
+ c
+(7 rows)
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 <> 'a';
+ f1
+----
+ A
+ 1
+ 2
+ 3
+
+ c
+(6 rows)
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 = 'a';
+ f1
+----
+ a
+(1 row)
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 < 'a';
+ f1
+----
+ 1
+ 2
+ 3
+
+(4 rows)
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 <= 'a';
+ f1
+----
+ a
+ 1
+ 2
+ 3
+
+(5 rows)
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 > 'a';
+ f1
+----
+ A
+ c
+(2 rows)
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 >= 'a';
+ f1
+----
+ a
+ A
+ c
+(3 rows)
+
+DROP TABLE CHAR_TBL;
+--
+-- Now test longer arrays of char
+--
+CREATE TABLE CHAR_TBL(f1 char(4));
+INSERT INTO CHAR_TBL (f1) VALUES ('a');
+INSERT INTO CHAR_TBL (f1) VALUES ('ab');
+INSERT INTO CHAR_TBL (f1) VALUES ('abcd');
+INSERT INTO CHAR_TBL (f1) VALUES ('abcde');
+ERROR: value too long for type character(4)
+INSERT INTO CHAR_TBL (f1) VALUES ('abcd ');
+SELECT * FROM CHAR_TBL;
+ f1
+------
+ a
+ ab
+ abcd
+ abcd
+(4 rows)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/char_2.out b/ydb/library/yql/tests/postgresql/original/cases/char_2.out
new file mode 100644
index 0000000000..9a54658632
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/char_2.out
@@ -0,0 +1,122 @@
+--
+-- CHAR
+--
+-- fixed-length by value
+-- internally passed by value if <= 4 bytes in storage
+SELECT char 'c' = char 'c' AS true;
+ true
+------
+ t
+(1 row)
+
+--
+-- Build a table for testing
+--
+CREATE TABLE CHAR_TBL(f1 char);
+INSERT INTO CHAR_TBL (f1) VALUES ('a');
+INSERT INTO CHAR_TBL (f1) VALUES ('A');
+-- any of the following three input formats are acceptable
+INSERT INTO CHAR_TBL (f1) VALUES ('1');
+INSERT INTO CHAR_TBL (f1) VALUES (2);
+INSERT INTO CHAR_TBL (f1) VALUES ('3');
+-- zero-length char
+INSERT INTO CHAR_TBL (f1) VALUES ('');
+-- try char's of greater than 1 length
+INSERT INTO CHAR_TBL (f1) VALUES ('cd');
+ERROR: value too long for type character(1)
+INSERT INTO CHAR_TBL (f1) VALUES ('c ');
+SELECT * FROM CHAR_TBL;
+ f1
+----
+ a
+ A
+ 1
+ 2
+ 3
+
+ c
+(7 rows)
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 <> 'a';
+ f1
+----
+ A
+ 1
+ 2
+ 3
+
+ c
+(6 rows)
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 = 'a';
+ f1
+----
+ a
+(1 row)
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 < 'a';
+ f1
+----
+
+(1 row)
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 <= 'a';
+ f1
+----
+ a
+
+(2 rows)
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 > 'a';
+ f1
+----
+ A
+ 1
+ 2
+ 3
+ c
+(5 rows)
+
+SELECT c.*
+ FROM CHAR_TBL c
+ WHERE c.f1 >= 'a';
+ f1
+----
+ a
+ A
+ 1
+ 2
+ 3
+ c
+(6 rows)
+
+DROP TABLE CHAR_TBL;
+--
+-- Now test longer arrays of char
+--
+CREATE TABLE CHAR_TBL(f1 char(4));
+INSERT INTO CHAR_TBL (f1) VALUES ('a');
+INSERT INTO CHAR_TBL (f1) VALUES ('ab');
+INSERT INTO CHAR_TBL (f1) VALUES ('abcd');
+INSERT INTO CHAR_TBL (f1) VALUES ('abcde');
+ERROR: value too long for type character(4)
+INSERT INTO CHAR_TBL (f1) VALUES ('abcd ');
+SELECT * FROM CHAR_TBL;
+ f1
+------
+ a
+ ab
+ abcd
+ abcd
+(4 rows)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/comments.out b/ydb/library/yql/tests/postgresql/original/cases/comments.out
new file mode 100644
index 0000000000..33f612e633
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/comments.out
@@ -0,0 +1,65 @@
+--
+-- COMMENTS
+--
+SELECT 'trailing' AS first; -- trailing single line
+ first
+----------
+ trailing
+(1 row)
+
+SELECT /* embedded single line */ 'embedded' AS second;
+ second
+----------
+ embedded
+(1 row)
+
+SELECT /* both embedded and trailing single line */ 'both' AS third; -- trailing single line
+ third
+-------
+ both
+(1 row)
+
+SELECT 'before multi-line' AS fourth;
+ fourth
+-------------------
+ before multi-line
+(1 row)
+
+/* This is an example of SQL which should not execute:
+ * select 'multi-line';
+ */
+SELECT 'after multi-line' AS fifth;
+ fifth
+------------------
+ after multi-line
+(1 row)
+
+--
+-- Nested comments
+--
+/*
+SELECT 'trailing' as x1; -- inside block comment
+*/
+/* This block comment surrounds a query which itself has a block comment...
+SELECT /* embedded single line */ 'embedded' AS x2;
+*/
+SELECT -- continued after the following block comments...
+/* Deeply nested comment.
+ This includes a single apostrophe to make sure we aren't decoding this part as a string.
+SELECT 'deep nest' AS n1;
+/* Second level of nesting...
+SELECT 'deeper nest' as n2;
+/* Third level of nesting...
+SELECT 'deepest nest' as n3;
+*/
+Hoo boy. Still two deep...
+*/
+Now just one deep...
+*/
+'deeply nested example' AS sixth;
+ sixth
+-----------------------
+ deeply nested example
+(1 row)
+
+/* and this is the end of the file */
diff --git a/ydb/library/yql/tests/postgresql/original/cases/comments.sql b/ydb/library/yql/tests/postgresql/original/cases/comments.sql
new file mode 100644
index 0000000000..e47db1ae59
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/comments.sql
@@ -0,0 +1,42 @@
+--
+-- COMMENTS
+--
+
+SELECT 'trailing' AS first; -- trailing single line
+SELECT /* embedded single line */ 'embedded' AS second;
+SELECT /* both embedded and trailing single line */ 'both' AS third; -- trailing single line
+
+SELECT 'before multi-line' AS fourth;
+/* This is an example of SQL which should not execute:
+ * select 'multi-line';
+ */
+SELECT 'after multi-line' AS fifth;
+
+--
+-- Nested comments
+--
+
+/*
+SELECT 'trailing' as x1; -- inside block comment
+*/
+
+/* This block comment surrounds a query which itself has a block comment...
+SELECT /* embedded single line */ 'embedded' AS x2;
+*/
+
+SELECT -- continued after the following block comments...
+/* Deeply nested comment.
+ This includes a single apostrophe to make sure we aren't decoding this part as a string.
+SELECT 'deep nest' AS n1;
+/* Second level of nesting...
+SELECT 'deeper nest' as n2;
+/* Third level of nesting...
+SELECT 'deepest nest' as n3;
+*/
+Hoo boy. Still two deep...
+*/
+Now just one deep...
+*/
+'deeply nested example' AS sixth;
+
+/* and this is the end of the file */
diff --git a/ydb/library/yql/tests/postgresql/original/cases/create_misc.out b/ydb/library/yql/tests/postgresql/original/cases/create_misc.out
new file mode 100644
index 0000000000..41bc4d7750
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/create_misc.out
@@ -0,0 +1,151 @@
+--
+-- CREATE_MISC
+--
+-- CLASS POPULATION
+-- (any resemblance to real life is purely coincidental)
+--
+INSERT INTO tenk2 SELECT * FROM tenk1;
+CREATE TABLE onek2 AS SELECT * FROM onek;
+INSERT INTO fast_emp4000 SELECT * FROM slow_emp4000;
+SELECT *
+ INTO TABLE Bprime
+ FROM tenk1
+ WHERE unique2 < 1000;
+INSERT INTO hobbies_r (name, person)
+ SELECT 'posthacking', p.name
+ FROM person* p
+ WHERE p.name = 'mike' or p.name = 'jeff';
+INSERT INTO hobbies_r (name, person)
+ SELECT 'basketball', p.name
+ FROM person p
+ WHERE p.name = 'joe' or p.name = 'sally';
+INSERT INTO hobbies_r (name) VALUES ('skywalking');
+INSERT INTO equipment_r (name, hobby) VALUES ('advil', 'posthacking');
+INSERT INTO equipment_r (name, hobby) VALUES ('peet''s coffee', 'posthacking');
+INSERT INTO equipment_r (name, hobby) VALUES ('hightops', 'basketball');
+INSERT INTO equipment_r (name, hobby) VALUES ('guts', 'skywalking');
+INSERT INTO city VALUES
+('Podunk', '(1,2),(3,4)', '100,127,1000'),
+('Gotham', '(1000,34),(1100,334)', '123456,127,-1000,6789');
+TABLE city;
+ name | location | budget
+--------+----------------------+-----------------------
+ Podunk | (3,4),(1,2) | 100,127,1000,0
+ Gotham | (1100,334),(1000,34) | 123456,127,-1000,6789
+(2 rows)
+
+SELECT *
+ INTO TABLE ramp
+ FROM road
+ WHERE name ~ '.*Ramp';
+INSERT INTO ihighway
+ SELECT *
+ FROM road
+ WHERE name ~ 'I- .*';
+INSERT INTO shighway
+ SELECT *
+ FROM road
+ WHERE name ~ 'State Hwy.*';
+UPDATE shighway
+ SET surface = 'asphalt';
+INSERT INTO a_star (class, a) VALUES ('a', 1);
+INSERT INTO a_star (class, a) VALUES ('a', 2);
+INSERT INTO a_star (class) VALUES ('a');
+INSERT INTO b_star (class, a, b) VALUES ('b', 3, 'mumble'::text);
+INSERT INTO b_star (class, a) VALUES ('b', 4);
+INSERT INTO b_star (class, b) VALUES ('b', 'bumble'::text);
+INSERT INTO b_star (class) VALUES ('b');
+INSERT INTO c_star (class, a, c) VALUES ('c', 5, 'hi mom'::name);
+INSERT INTO c_star (class, a) VALUES ('c', 6);
+INSERT INTO c_star (class, c) VALUES ('c', 'hi paul'::name);
+INSERT INTO c_star (class) VALUES ('c');
+INSERT INTO d_star (class, a, b, c, d)
+ VALUES ('d', 7, 'grumble'::text, 'hi sunita'::name, '0.0'::float8);
+INSERT INTO d_star (class, a, b, c)
+ VALUES ('d', 8, 'stumble'::text, 'hi koko'::name);
+INSERT INTO d_star (class, a, b, d)
+ VALUES ('d', 9, 'rumble'::text, '1.1'::float8);
+INSERT INTO d_star (class, a, c, d)
+ VALUES ('d', 10, 'hi kristin'::name, '10.01'::float8);
+INSERT INTO d_star (class, b, c, d)
+ VALUES ('d', 'crumble'::text, 'hi boris'::name, '100.001'::float8);
+INSERT INTO d_star (class, a, b)
+ VALUES ('d', 11, 'fumble'::text);
+INSERT INTO d_star (class, a, c)
+ VALUES ('d', 12, 'hi avi'::name);
+INSERT INTO d_star (class, a, d)
+ VALUES ('d', 13, '1000.0001'::float8);
+INSERT INTO d_star (class, b, c)
+ VALUES ('d', 'tumble'::text, 'hi andrew'::name);
+INSERT INTO d_star (class, b, d)
+ VALUES ('d', 'humble'::text, '10000.00001'::float8);
+INSERT INTO d_star (class, c, d)
+ VALUES ('d', 'hi ginger'::name, '100000.000001'::float8);
+INSERT INTO d_star (class, a) VALUES ('d', 14);
+INSERT INTO d_star (class, b) VALUES ('d', 'jumble'::text);
+INSERT INTO d_star (class, c) VALUES ('d', 'hi jolly'::name);
+INSERT INTO d_star (class, d) VALUES ('d', '1000000.0000001'::float8);
+INSERT INTO d_star (class) VALUES ('d');
+INSERT INTO e_star (class, a, c, e)
+ VALUES ('e', 15, 'hi carol'::name, '-1'::int2);
+INSERT INTO e_star (class, a, c)
+ VALUES ('e', 16, 'hi bob'::name);
+INSERT INTO e_star (class, a, e)
+ VALUES ('e', 17, '-2'::int2);
+INSERT INTO e_star (class, c, e)
+ VALUES ('e', 'hi michelle'::name, '-3'::int2);
+INSERT INTO e_star (class, a)
+ VALUES ('e', 18);
+INSERT INTO e_star (class, c)
+ VALUES ('e', 'hi elisa'::name);
+INSERT INTO e_star (class, e)
+ VALUES ('e', '-4'::int2);
+INSERT INTO f_star (class, a, c, e, f)
+ VALUES ('f', 19, 'hi claire'::name, '-5'::int2, '(1,3),(2,4)'::polygon);
+INSERT INTO f_star (class, a, c, e)
+ VALUES ('f', 20, 'hi mike'::name, '-6'::int2);
+INSERT INTO f_star (class, a, c, f)
+ VALUES ('f', 21, 'hi marcel'::name, '(11,44),(22,55),(33,66)'::polygon);
+INSERT INTO f_star (class, a, e, f)
+ VALUES ('f', 22, '-7'::int2, '(111,555),(222,666),(333,777),(444,888)'::polygon);
+INSERT INTO f_star (class, c, e, f)
+ VALUES ('f', 'hi keith'::name, '-8'::int2,
+ '(1111,3333),(2222,4444)'::polygon);
+INSERT INTO f_star (class, a, c)
+ VALUES ('f', 24, 'hi marc'::name);
+INSERT INTO f_star (class, a, e)
+ VALUES ('f', 25, '-9'::int2);
+INSERT INTO f_star (class, a, f)
+ VALUES ('f', 26, '(11111,33333),(22222,44444)'::polygon);
+INSERT INTO f_star (class, c, e)
+ VALUES ('f', 'hi allison'::name, '-10'::int2);
+INSERT INTO f_star (class, c, f)
+ VALUES ('f', 'hi jeff'::name,
+ '(111111,333333),(222222,444444)'::polygon);
+INSERT INTO f_star (class, e, f)
+ VALUES ('f', '-11'::int2, '(1111111,3333333),(2222222,4444444)'::polygon);
+INSERT INTO f_star (class, a) VALUES ('f', 27);
+INSERT INTO f_star (class, c) VALUES ('f', 'hi carl'::name);
+INSERT INTO f_star (class, e) VALUES ('f', '-12'::int2);
+INSERT INTO f_star (class, f)
+ VALUES ('f', '(11111111,33333333),(22222222,44444444)'::polygon);
+INSERT INTO f_star (class) VALUES ('f');
+-- Analyze the X_star tables for better plan stability in later tests
+ANALYZE a_star;
+ANALYZE b_star;
+ANALYZE c_star;
+ANALYZE d_star;
+ANALYZE e_star;
+ANALYZE f_star;
+--
+-- for internal portal (cursor) tests
+--
+CREATE TABLE iportaltest (
+ i int4,
+ d float4,
+ p polygon
+);
+INSERT INTO iportaltest (i, d, p)
+ VALUES (1, 3.567, '(3.0,1.0),(4.0,2.0)'::polygon);
+INSERT INTO iportaltest (i, d, p)
+ VALUES (2, 89.05, '(4.0,2.0),(3.0,1.0)'::polygon);
diff --git a/ydb/library/yql/tests/postgresql/original/cases/create_misc.sql b/ydb/library/yql/tests/postgresql/original/cases/create_misc.sql
new file mode 100644
index 0000000000..c7d0d064c3
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/create_misc.sql
@@ -0,0 +1,217 @@
+--
+-- CREATE_MISC
+--
+
+-- CLASS POPULATION
+-- (any resemblance to real life is purely coincidental)
+--
+
+INSERT INTO tenk2 SELECT * FROM tenk1;
+
+CREATE TABLE onek2 AS SELECT * FROM onek;
+
+INSERT INTO fast_emp4000 SELECT * FROM slow_emp4000;
+
+SELECT *
+ INTO TABLE Bprime
+ FROM tenk1
+ WHERE unique2 < 1000;
+
+INSERT INTO hobbies_r (name, person)
+ SELECT 'posthacking', p.name
+ FROM person* p
+ WHERE p.name = 'mike' or p.name = 'jeff';
+
+INSERT INTO hobbies_r (name, person)
+ SELECT 'basketball', p.name
+ FROM person p
+ WHERE p.name = 'joe' or p.name = 'sally';
+
+INSERT INTO hobbies_r (name) VALUES ('skywalking');
+
+INSERT INTO equipment_r (name, hobby) VALUES ('advil', 'posthacking');
+
+INSERT INTO equipment_r (name, hobby) VALUES ('peet''s coffee', 'posthacking');
+
+INSERT INTO equipment_r (name, hobby) VALUES ('hightops', 'basketball');
+
+INSERT INTO equipment_r (name, hobby) VALUES ('guts', 'skywalking');
+
+INSERT INTO city VALUES
+('Podunk', '(1,2),(3,4)', '100,127,1000'),
+('Gotham', '(1000,34),(1100,334)', '123456,127,-1000,6789');
+TABLE city;
+
+SELECT *
+ INTO TABLE ramp
+ FROM road
+ WHERE name ~ '.*Ramp';
+
+INSERT INTO ihighway
+ SELECT *
+ FROM road
+ WHERE name ~ 'I- .*';
+
+INSERT INTO shighway
+ SELECT *
+ FROM road
+ WHERE name ~ 'State Hwy.*';
+
+UPDATE shighway
+ SET surface = 'asphalt';
+
+INSERT INTO a_star (class, a) VALUES ('a', 1);
+
+INSERT INTO a_star (class, a) VALUES ('a', 2);
+
+INSERT INTO a_star (class) VALUES ('a');
+
+INSERT INTO b_star (class, a, b) VALUES ('b', 3, 'mumble'::text);
+
+INSERT INTO b_star (class, a) VALUES ('b', 4);
+
+INSERT INTO b_star (class, b) VALUES ('b', 'bumble'::text);
+
+INSERT INTO b_star (class) VALUES ('b');
+
+INSERT INTO c_star (class, a, c) VALUES ('c', 5, 'hi mom'::name);
+
+INSERT INTO c_star (class, a) VALUES ('c', 6);
+
+INSERT INTO c_star (class, c) VALUES ('c', 'hi paul'::name);
+
+INSERT INTO c_star (class) VALUES ('c');
+
+INSERT INTO d_star (class, a, b, c, d)
+ VALUES ('d', 7, 'grumble'::text, 'hi sunita'::name, '0.0'::float8);
+
+INSERT INTO d_star (class, a, b, c)
+ VALUES ('d', 8, 'stumble'::text, 'hi koko'::name);
+
+INSERT INTO d_star (class, a, b, d)
+ VALUES ('d', 9, 'rumble'::text, '1.1'::float8);
+
+INSERT INTO d_star (class, a, c, d)
+ VALUES ('d', 10, 'hi kristin'::name, '10.01'::float8);
+
+INSERT INTO d_star (class, b, c, d)
+ VALUES ('d', 'crumble'::text, 'hi boris'::name, '100.001'::float8);
+
+INSERT INTO d_star (class, a, b)
+ VALUES ('d', 11, 'fumble'::text);
+
+INSERT INTO d_star (class, a, c)
+ VALUES ('d', 12, 'hi avi'::name);
+
+INSERT INTO d_star (class, a, d)
+ VALUES ('d', 13, '1000.0001'::float8);
+
+INSERT INTO d_star (class, b, c)
+ VALUES ('d', 'tumble'::text, 'hi andrew'::name);
+
+INSERT INTO d_star (class, b, d)
+ VALUES ('d', 'humble'::text, '10000.00001'::float8);
+
+INSERT INTO d_star (class, c, d)
+ VALUES ('d', 'hi ginger'::name, '100000.000001'::float8);
+
+INSERT INTO d_star (class, a) VALUES ('d', 14);
+
+INSERT INTO d_star (class, b) VALUES ('d', 'jumble'::text);
+
+INSERT INTO d_star (class, c) VALUES ('d', 'hi jolly'::name);
+
+INSERT INTO d_star (class, d) VALUES ('d', '1000000.0000001'::float8);
+
+INSERT INTO d_star (class) VALUES ('d');
+
+INSERT INTO e_star (class, a, c, e)
+ VALUES ('e', 15, 'hi carol'::name, '-1'::int2);
+
+INSERT INTO e_star (class, a, c)
+ VALUES ('e', 16, 'hi bob'::name);
+
+INSERT INTO e_star (class, a, e)
+ VALUES ('e', 17, '-2'::int2);
+
+INSERT INTO e_star (class, c, e)
+ VALUES ('e', 'hi michelle'::name, '-3'::int2);
+
+INSERT INTO e_star (class, a)
+ VALUES ('e', 18);
+
+INSERT INTO e_star (class, c)
+ VALUES ('e', 'hi elisa'::name);
+
+INSERT INTO e_star (class, e)
+ VALUES ('e', '-4'::int2);
+
+INSERT INTO f_star (class, a, c, e, f)
+ VALUES ('f', 19, 'hi claire'::name, '-5'::int2, '(1,3),(2,4)'::polygon);
+
+INSERT INTO f_star (class, a, c, e)
+ VALUES ('f', 20, 'hi mike'::name, '-6'::int2);
+
+INSERT INTO f_star (class, a, c, f)
+ VALUES ('f', 21, 'hi marcel'::name, '(11,44),(22,55),(33,66)'::polygon);
+
+INSERT INTO f_star (class, a, e, f)
+ VALUES ('f', 22, '-7'::int2, '(111,555),(222,666),(333,777),(444,888)'::polygon);
+
+INSERT INTO f_star (class, c, e, f)
+ VALUES ('f', 'hi keith'::name, '-8'::int2,
+ '(1111,3333),(2222,4444)'::polygon);
+
+INSERT INTO f_star (class, a, c)
+ VALUES ('f', 24, 'hi marc'::name);
+
+INSERT INTO f_star (class, a, e)
+ VALUES ('f', 25, '-9'::int2);
+
+INSERT INTO f_star (class, a, f)
+ VALUES ('f', 26, '(11111,33333),(22222,44444)'::polygon);
+
+INSERT INTO f_star (class, c, e)
+ VALUES ('f', 'hi allison'::name, '-10'::int2);
+
+INSERT INTO f_star (class, c, f)
+ VALUES ('f', 'hi jeff'::name,
+ '(111111,333333),(222222,444444)'::polygon);
+
+INSERT INTO f_star (class, e, f)
+ VALUES ('f', '-11'::int2, '(1111111,3333333),(2222222,4444444)'::polygon);
+
+INSERT INTO f_star (class, a) VALUES ('f', 27);
+
+INSERT INTO f_star (class, c) VALUES ('f', 'hi carl'::name);
+
+INSERT INTO f_star (class, e) VALUES ('f', '-12'::int2);
+
+INSERT INTO f_star (class, f)
+ VALUES ('f', '(11111111,33333333),(22222222,44444444)'::polygon);
+
+INSERT INTO f_star (class) VALUES ('f');
+
+-- Analyze the X_star tables for better plan stability in later tests
+ANALYZE a_star;
+ANALYZE b_star;
+ANALYZE c_star;
+ANALYZE d_star;
+ANALYZE e_star;
+ANALYZE f_star;
+
+
+--
+-- for internal portal (cursor) tests
+--
+CREATE TABLE iportaltest (
+ i int4,
+ d float4,
+ p polygon
+);
+
+INSERT INTO iportaltest (i, d, p)
+ VALUES (1, 3.567, '(3.0,1.0),(4.0,2.0)'::polygon);
+
+INSERT INTO iportaltest (i, d, p)
+ VALUES (2, 89.05, '(4.0,2.0),(3.0,1.0)'::polygon);
diff --git a/ydb/library/yql/tests/postgresql/original/cases/create_table.out b/ydb/library/yql/tests/postgresql/original/cases/create_table.out
new file mode 100644
index 0000000000..a958b84979
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/create_table.out
@@ -0,0 +1,1321 @@
+--
+-- CREATE_TABLE
+--
+--
+-- CLASS DEFINITIONS
+--
+CREATE TABLE hobbies_r (
+ name text,
+ person text
+);
+CREATE TABLE equipment_r (
+ name text,
+ hobby text
+);
+CREATE TABLE onek (
+ unique1 int4,
+ unique2 int4,
+ two int4,
+ four int4,
+ ten int4,
+ twenty int4,
+ hundred int4,
+ thousand int4,
+ twothousand int4,
+ fivethous int4,
+ tenthous int4,
+ odd int4,
+ even int4,
+ stringu1 name,
+ stringu2 name,
+ string4 name
+);
+CREATE TABLE tenk1 (
+ unique1 int4,
+ unique2 int4,
+ two int4,
+ four int4,
+ ten int4,
+ twenty int4,
+ hundred int4,
+ thousand int4,
+ twothousand int4,
+ fivethous int4,
+ tenthous int4,
+ odd int4,
+ even int4,
+ stringu1 name,
+ stringu2 name,
+ string4 name
+);
+CREATE TABLE tenk2 (
+ unique1 int4,
+ unique2 int4,
+ two int4,
+ four int4,
+ ten int4,
+ twenty int4,
+ hundred int4,
+ thousand int4,
+ twothousand int4,
+ fivethous int4,
+ tenthous int4,
+ odd int4,
+ even int4,
+ stringu1 name,
+ stringu2 name,
+ string4 name
+);
+CREATE TABLE person (
+ name text,
+ age int4,
+ location point
+);
+CREATE TABLE emp (
+ salary int4,
+ manager name
+) INHERITS (person);
+CREATE TABLE student (
+ gpa float8
+) INHERITS (person);
+CREATE TABLE stud_emp (
+ percent int4
+) INHERITS (emp, student);
+NOTICE: merging multiple inherited definitions of column "name"
+NOTICE: merging multiple inherited definitions of column "age"
+NOTICE: merging multiple inherited definitions of column "location"
+CREATE TABLE city (
+ name name,
+ location box,
+ budget city_budget
+);
+CREATE TABLE dept (
+ dname name,
+ mgrname text
+);
+CREATE TABLE slow_emp4000 (
+ home_base box
+);
+CREATE TABLE fast_emp4000 (
+ home_base box
+);
+CREATE TABLE road (
+ name text,
+ thepath path
+);
+CREATE TABLE ihighway () INHERITS (road);
+CREATE TABLE shighway (
+ surface text
+) INHERITS (road);
+CREATE TABLE real_city (
+ pop int4,
+ cname text,
+ outline path
+);
+--
+-- test the "star" operators a bit more thoroughly -- this time,
+-- throw in lots of NULL fields...
+--
+-- a is the type root
+-- b and c inherit from a (one-level single inheritance)
+-- d inherits from b and c (two-level multiple inheritance)
+-- e inherits from c (two-level single inheritance)
+-- f inherits from e (three-level single inheritance)
+--
+CREATE TABLE a_star (
+ class char,
+ a int4
+);
+CREATE TABLE b_star (
+ b text
+) INHERITS (a_star);
+CREATE TABLE c_star (
+ c name
+) INHERITS (a_star);
+CREATE TABLE d_star (
+ d float8
+) INHERITS (b_star, c_star);
+NOTICE: merging multiple inherited definitions of column "class"
+NOTICE: merging multiple inherited definitions of column "a"
+CREATE TABLE e_star (
+ e int2
+) INHERITS (c_star);
+CREATE TABLE f_star (
+ f polygon
+) INHERITS (e_star);
+CREATE TABLE aggtest (
+ a int2,
+ b float4
+);
+CREATE TABLE hash_i4_heap (
+ seqno int4,
+ random int4
+);
+CREATE TABLE hash_name_heap (
+ seqno int4,
+ random name
+);
+CREATE TABLE hash_txt_heap (
+ seqno int4,
+ random text
+);
+CREATE TABLE hash_f8_heap (
+ seqno int4,
+ random float8
+);
+-- don't include the hash_ovfl_heap stuff in the distribution
+-- the data set is too large for what it's worth
+--
+-- CREATE TABLE hash_ovfl_heap (
+-- x int4,
+-- y int4
+-- );
+CREATE TABLE bt_i4_heap (
+ seqno int4,
+ random int4
+);
+CREATE TABLE bt_name_heap (
+ seqno name,
+ random int4
+);
+CREATE TABLE bt_txt_heap (
+ seqno text,
+ random int4
+);
+CREATE TABLE bt_f8_heap (
+ seqno float8,
+ random int4
+);
+CREATE TABLE array_op_test (
+ seqno int4,
+ i int4[],
+ t text[]
+);
+CREATE TABLE array_index_op_test (
+ seqno int4,
+ i int4[],
+ t text[]
+);
+CREATE TABLE testjsonb (
+ j jsonb
+);
+CREATE TABLE unknowntab (
+ u unknown -- fail
+);
+ERROR: column "u" has pseudo-type unknown
+CREATE TYPE unknown_comptype AS (
+ u unknown -- fail
+);
+ERROR: column "u" has pseudo-type unknown
+CREATE TABLE IF NOT EXISTS test_tsvector(
+ t text,
+ a tsvector
+);
+CREATE TABLE IF NOT EXISTS test_tsvector(
+ t text
+);
+NOTICE: relation "test_tsvector" already exists, skipping
+-- invalid: non-lowercase quoted reloptions identifiers
+CREATE TABLE tas_case WITH ("Fillfactor" = 10) AS SELECT 1 a;
+ERROR: unrecognized parameter "Fillfactor"
+CREATE UNLOGGED TABLE unlogged1 (a int primary key); -- OK
+CREATE TEMPORARY TABLE unlogged2 (a int primary key); -- OK
+SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged\d' ORDER BY relname;
+ relname | relkind | relpersistence
+----------------+---------+----------------
+ unlogged1 | r | u
+ unlogged1_pkey | i | u
+ unlogged2 | r | t
+ unlogged2_pkey | i | t
+(4 rows)
+
+REINDEX INDEX unlogged1_pkey;
+REINDEX INDEX unlogged2_pkey;
+SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged\d' ORDER BY relname;
+ relname | relkind | relpersistence
+----------------+---------+----------------
+ unlogged1 | r | u
+ unlogged1_pkey | i | u
+ unlogged2 | r | t
+ unlogged2_pkey | i | t
+(4 rows)
+
+DROP TABLE unlogged2;
+INSERT INTO unlogged1 VALUES (42);
+CREATE UNLOGGED TABLE public.unlogged2 (a int primary key); -- also OK
+CREATE UNLOGGED TABLE pg_temp.unlogged3 (a int primary key); -- not OK
+ERROR: only temporary relations may be created in temporary schemas
+LINE 1: CREATE UNLOGGED TABLE pg_temp.unlogged3 (a int primary key);
+ ^
+CREATE TABLE pg_temp.implicitly_temp (a int primary key); -- OK
+CREATE TEMP TABLE explicitly_temp (a int primary key); -- also OK
+CREATE TEMP TABLE pg_temp.doubly_temp (a int primary key); -- also OK
+CREATE TEMP TABLE public.temp_to_perm (a int primary key); -- not OK
+ERROR: cannot create temporary relation in non-temporary schema
+LINE 1: CREATE TEMP TABLE public.temp_to_perm (a int primary key);
+ ^
+DROP TABLE unlogged1, public.unlogged2;
+CREATE TABLE as_select1 AS SELECT * FROM pg_class WHERE relkind = 'r';
+CREATE TABLE as_select1 AS SELECT * FROM pg_class WHERE relkind = 'r';
+ERROR: relation "as_select1" already exists
+CREATE TABLE IF NOT EXISTS as_select1 AS SELECT * FROM pg_class WHERE relkind = 'r';
+NOTICE: relation "as_select1" already exists, skipping
+DROP TABLE as_select1;
+PREPARE select1 AS SELECT 1 as a;
+CREATE TABLE as_select1 AS EXECUTE select1;
+CREATE TABLE as_select1 AS EXECUTE select1;
+ERROR: relation "as_select1" already exists
+SELECT * FROM as_select1;
+ a
+---
+ 1
+(1 row)
+
+CREATE TABLE IF NOT EXISTS as_select1 AS EXECUTE select1;
+NOTICE: relation "as_select1" already exists, skipping
+DROP TABLE as_select1;
+DEALLOCATE select1;
+-- create an extra wide table to test for issues related to that
+-- (temporarily hide query, to avoid the long CREATE TABLE stmt)
+\set ECHO none
+INSERT INTO extra_wide_table(firstc, lastc) VALUES('first col', 'last col');
+SELECT firstc, lastc FROM extra_wide_table;
+ firstc | lastc
+-----------+----------
+ first col | last col
+(1 row)
+
+-- check that tables with oids cannot be created anymore
+CREATE TABLE withoid() WITH OIDS;
+ERROR: syntax error at or near "OIDS"
+LINE 1: CREATE TABLE withoid() WITH OIDS;
+ ^
+CREATE TABLE withoid() WITH (oids);
+ERROR: tables declared WITH OIDS are not supported
+CREATE TABLE withoid() WITH (oids = true);
+ERROR: tables declared WITH OIDS are not supported
+-- but explicitly not adding oids is still supported
+CREATE TEMP TABLE withoutoid() WITHOUT OIDS; DROP TABLE withoutoid;
+CREATE TEMP TABLE withoutoid() WITH (oids = false); DROP TABLE withoutoid;
+-- check restriction with default expressions
+-- invalid use of column reference in default expressions
+CREATE TABLE default_expr_column (id int DEFAULT (id));
+ERROR: cannot use column reference in DEFAULT expression
+LINE 1: CREATE TABLE default_expr_column (id int DEFAULT (id));
+ ^
+CREATE TABLE default_expr_column (id int DEFAULT (bar.id));
+ERROR: cannot use column reference in DEFAULT expression
+LINE 1: CREATE TABLE default_expr_column (id int DEFAULT (bar.id));
+ ^
+CREATE TABLE default_expr_agg_column (id int DEFAULT (avg(id)));
+ERROR: cannot use column reference in DEFAULT expression
+LINE 1: ...TE TABLE default_expr_agg_column (id int DEFAULT (avg(id)));
+ ^
+-- invalid column definition
+CREATE TABLE default_expr_non_column (a int DEFAULT (avg(non_existent)));
+ERROR: cannot use column reference in DEFAULT expression
+LINE 1: ...TABLE default_expr_non_column (a int DEFAULT (avg(non_existe...
+ ^
+-- invalid use of aggregate
+CREATE TABLE default_expr_agg (a int DEFAULT (avg(1)));
+ERROR: aggregate functions are not allowed in DEFAULT expressions
+LINE 1: CREATE TABLE default_expr_agg (a int DEFAULT (avg(1)));
+ ^
+-- invalid use of subquery
+CREATE TABLE default_expr_agg (a int DEFAULT (select 1));
+ERROR: cannot use subquery in DEFAULT expression
+LINE 1: CREATE TABLE default_expr_agg (a int DEFAULT (select 1));
+ ^
+-- invalid use of set-returning function
+CREATE TABLE default_expr_agg (a int DEFAULT (generate_series(1,3)));
+ERROR: set-returning functions are not allowed in DEFAULT expressions
+LINE 1: CREATE TABLE default_expr_agg (a int DEFAULT (generate_serie...
+ ^
+-- Verify that subtransaction rollback restores rd_createSubid.
+BEGIN;
+CREATE TABLE remember_create_subid (c int);
+SAVEPOINT q; DROP TABLE remember_create_subid; ROLLBACK TO q;
+COMMIT;
+DROP TABLE remember_create_subid;
+-- Verify that subtransaction rollback restores rd_firstRelfilenodeSubid.
+CREATE TABLE remember_node_subid (c int);
+BEGIN;
+ALTER TABLE remember_node_subid ALTER c TYPE bigint;
+SAVEPOINT q; DROP TABLE remember_node_subid; ROLLBACK TO q;
+COMMIT;
+DROP TABLE remember_node_subid;
+--
+-- Partitioned tables
+--
+-- cannot combine INHERITS and PARTITION BY (although grammar allows)
+CREATE TABLE partitioned (
+ a int
+) INHERITS (some_table) PARTITION BY LIST (a);
+ERROR: cannot create partitioned table as inheritance child
+-- cannot use more than 1 column as partition key for list partitioned table
+CREATE TABLE partitioned (
+ a1 int,
+ a2 int
+) PARTITION BY LIST (a1, a2); -- fail
+ERROR: cannot use "list" partition strategy with more than one column
+-- unsupported constraint type for partitioned tables
+CREATE TABLE partitioned (
+ a int,
+ EXCLUDE USING gist (a WITH &&)
+) PARTITION BY RANGE (a);
+ERROR: exclusion constraints are not supported on partitioned tables
+LINE 3: EXCLUDE USING gist (a WITH &&)
+ ^
+-- prevent using prohibited expressions in the key
+CREATE FUNCTION retset (a int) RETURNS SETOF int AS $$ SELECT 1; $$ LANGUAGE SQL IMMUTABLE;
+CREATE TABLE partitioned (
+ a int
+) PARTITION BY RANGE (retset(a));
+ERROR: set-returning functions are not allowed in partition key expressions
+DROP FUNCTION retset(int);
+CREATE TABLE partitioned (
+ a int
+) PARTITION BY RANGE ((avg(a)));
+ERROR: aggregate functions are not allowed in partition key expressions
+CREATE TABLE partitioned (
+ a int,
+ b int
+) PARTITION BY RANGE ((avg(a) OVER (PARTITION BY b)));
+ERROR: window functions are not allowed in partition key expressions
+CREATE TABLE partitioned (
+ a int
+) PARTITION BY LIST ((a LIKE (SELECT 1)));
+ERROR: cannot use subquery in partition key expression
+CREATE TABLE partitioned (
+ a int
+) PARTITION BY RANGE ((42));
+ERROR: cannot use constant expression as partition key
+CREATE FUNCTION const_func () RETURNS int AS $$ SELECT 1; $$ LANGUAGE SQL IMMUTABLE;
+CREATE TABLE partitioned (
+ a int
+) PARTITION BY RANGE (const_func());
+ERROR: cannot use constant expression as partition key
+DROP FUNCTION const_func();
+-- only accept valid partitioning strategy
+CREATE TABLE partitioned (
+ a int
+) PARTITION BY MAGIC (a);
+ERROR: unrecognized partitioning strategy "magic"
+-- specified column must be present in the table
+CREATE TABLE partitioned (
+ a int
+) PARTITION BY RANGE (b);
+ERROR: column "b" named in partition key does not exist
+LINE 3: ) PARTITION BY RANGE (b);
+ ^
+-- cannot use system columns in partition key
+CREATE TABLE partitioned (
+ a int
+) PARTITION BY RANGE (xmin);
+ERROR: cannot use system column "xmin" in partition key
+LINE 3: ) PARTITION BY RANGE (xmin);
+ ^
+-- cannot use pseudotypes
+CREATE TABLE partitioned (
+ a int,
+ b int
+) PARTITION BY RANGE (((a, b)));
+ERROR: partition key column 1 has pseudo-type record
+CREATE TABLE partitioned (
+ a int,
+ b int
+) PARTITION BY RANGE (a, ('unknown'));
+ERROR: partition key column 2 has pseudo-type unknown
+-- functions in key must be immutable
+CREATE FUNCTION immut_func (a int) RETURNS int AS $$ SELECT a + random()::int; $$ LANGUAGE SQL;
+CREATE TABLE partitioned (
+ a int
+) PARTITION BY RANGE (immut_func(a));
+ERROR: functions in partition key expression must be marked IMMUTABLE
+DROP FUNCTION immut_func(int);
+-- prevent using columns of unsupported types in key (type must have a btree operator class)
+CREATE TABLE partitioned (
+ a point
+) PARTITION BY LIST (a);
+ERROR: data type point has no default operator class for access method "btree"
+HINT: You must specify a btree operator class or define a default btree operator class for the data type.
+CREATE TABLE partitioned (
+ a point
+) PARTITION BY LIST (a point_ops);
+ERROR: operator class "point_ops" does not exist for access method "btree"
+CREATE TABLE partitioned (
+ a point
+) PARTITION BY RANGE (a);
+ERROR: data type point has no default operator class for access method "btree"
+HINT: You must specify a btree operator class or define a default btree operator class for the data type.
+CREATE TABLE partitioned (
+ a point
+) PARTITION BY RANGE (a point_ops);
+ERROR: operator class "point_ops" does not exist for access method "btree"
+-- cannot add NO INHERIT constraints to partitioned tables
+CREATE TABLE partitioned (
+ a int,
+ CONSTRAINT check_a CHECK (a > 0) NO INHERIT
+) PARTITION BY RANGE (a);
+ERROR: cannot add NO INHERIT constraint to partitioned table "partitioned"
+-- some checks after successful creation of a partitioned table
+CREATE FUNCTION plusone(a int) RETURNS INT AS $$ SELECT a+1; $$ LANGUAGE SQL;
+CREATE TABLE partitioned (
+ a int,
+ b int,
+ c text,
+ d text
+) PARTITION BY RANGE (a oid_ops, plusone(b), c collate "default", d collate "C");
+-- check relkind
+SELECT relkind FROM pg_class WHERE relname = 'partitioned';
+ relkind
+---------
+ p
+(1 row)
+
+-- prevent a function referenced in partition key from being dropped
+DROP FUNCTION plusone(int);
+ERROR: cannot drop function plusone(integer) because other objects depend on it
+DETAIL: table partitioned depends on function plusone(integer)
+HINT: Use DROP ... CASCADE to drop the dependent objects too.
+-- partitioned table cannot participate in regular inheritance
+CREATE TABLE partitioned2 (
+ a int,
+ b text
+) PARTITION BY RANGE ((a+1), substr(b, 1, 5));
+CREATE TABLE fail () INHERITS (partitioned2);
+ERROR: cannot inherit from partitioned table "partitioned2"
+-- Partition key in describe output
+\d partitioned
+ Partitioned table "public.partitioned"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+ b | integer | | |
+ c | text | | |
+ d | text | | |
+Partition key: RANGE (a oid_ops, plusone(b), c, d COLLATE "C")
+Number of partitions: 0
+
+\d+ partitioned2
+ Partitioned table "public.partitioned2"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+----------+--------------+-------------
+ a | integer | | | | plain | |
+ b | text | | | | extended | |
+Partition key: RANGE (((a + 1)), substr(b, 1, 5))
+Number of partitions: 0
+
+INSERT INTO partitioned2 VALUES (1, 'hello');
+ERROR: no partition of relation "partitioned2" found for row
+DETAIL: Partition key of the failing row contains ((a + 1), substr(b, 1, 5)) = (2, hello).
+CREATE TABLE part2_1 PARTITION OF partitioned2 FOR VALUES FROM (-1, 'aaaaa') TO (100, 'ccccc');
+\d+ part2_1
+ Table "public.part2_1"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+----------+--------------+-------------
+ a | integer | | | | plain | |
+ b | text | | | | extended | |
+Partition of: partitioned2 FOR VALUES FROM ('-1', 'aaaaa') TO (100, 'ccccc')
+Partition constraint: (((a + 1) IS NOT NULL) AND (substr(b, 1, 5) IS NOT NULL) AND (((a + 1) > '-1'::integer) OR (((a + 1) = '-1'::integer) AND (substr(b, 1, 5) >= 'aaaaa'::text))) AND (((a + 1) < 100) OR (((a + 1) = 100) AND (substr(b, 1, 5) < 'ccccc'::text))))
+
+DROP TABLE partitioned, partitioned2;
+-- check reference to partitioned table's rowtype in partition descriptor
+create table partitioned (a int, b int)
+ partition by list ((row(a, b)::partitioned));
+create table partitioned1
+ partition of partitioned for values in ('(1,2)'::partitioned);
+create table partitioned2
+ partition of partitioned for values in ('(2,4)'::partitioned);
+explain (costs off)
+select * from partitioned where row(a,b)::partitioned = '(1,2)'::partitioned;
+ QUERY PLAN
+-----------------------------------------------------------
+ Seq Scan on partitioned1 partitioned
+ Filter: (ROW(a, b)::partitioned = '(1,2)'::partitioned)
+(2 rows)
+
+drop table partitioned;
+-- whole-row Var in partition key works too
+create table partitioned (a int, b int)
+ partition by list ((partitioned));
+create table partitioned1
+ partition of partitioned for values in ('(1,2)');
+create table partitioned2
+ partition of partitioned for values in ('(2,4)');
+explain (costs off)
+select * from partitioned where partitioned = '(1,2)'::partitioned;
+ QUERY PLAN
+-----------------------------------------------------------------
+ Seq Scan on partitioned1 partitioned
+ Filter: ((partitioned.*)::partitioned = '(1,2)'::partitioned)
+(2 rows)
+
+\d+ partitioned1
+ Table "public.partitioned1"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+---------+--------------+-------------
+ a | integer | | | | plain | |
+ b | integer | | | | plain | |
+Partition of: partitioned FOR VALUES IN ('(1,2)')
+Partition constraint: (((partitioned1.*)::partitioned IS DISTINCT FROM NULL) AND ((partitioned1.*)::partitioned = '(1,2)'::partitioned))
+
+drop table partitioned;
+-- check that dependencies of partition columns are handled correctly
+create domain intdom1 as int;
+create table partitioned (
+ a intdom1,
+ b text
+) partition by range (a);
+alter table partitioned drop column a; -- fail
+ERROR: cannot drop column "a" because it is part of the partition key of relation "partitioned"
+drop domain intdom1; -- fail, requires cascade
+ERROR: cannot drop type intdom1 because other objects depend on it
+DETAIL: table partitioned depends on type intdom1
+HINT: Use DROP ... CASCADE to drop the dependent objects too.
+drop domain intdom1 cascade;
+NOTICE: drop cascades to table partitioned
+table partitioned; -- gone
+ERROR: relation "partitioned" does not exist
+LINE 1: table partitioned;
+ ^
+-- likewise for columns used in partition expressions
+create domain intdom1 as int;
+create table partitioned (
+ a intdom1,
+ b text
+) partition by range (plusone(a));
+alter table partitioned drop column a; -- fail
+ERROR: cannot drop column "a" because it is part of the partition key of relation "partitioned"
+drop domain intdom1; -- fail, requires cascade
+ERROR: cannot drop type intdom1 because other objects depend on it
+DETAIL: table partitioned depends on type intdom1
+HINT: Use DROP ... CASCADE to drop the dependent objects too.
+drop domain intdom1 cascade;
+NOTICE: drop cascades to table partitioned
+table partitioned; -- gone
+ERROR: relation "partitioned" does not exist
+LINE 1: table partitioned;
+ ^
+--
+-- Partitions
+--
+-- check partition bound syntax
+CREATE TABLE list_parted (
+ a int
+) PARTITION BY LIST (a);
+CREATE TABLE part_p1 PARTITION OF list_parted FOR VALUES IN ('1');
+CREATE TABLE part_p2 PARTITION OF list_parted FOR VALUES IN (2);
+CREATE TABLE part_p3 PARTITION OF list_parted FOR VALUES IN ((2+1));
+CREATE TABLE part_null PARTITION OF list_parted FOR VALUES IN (null);
+\d+ list_parted
+ Partitioned table "public.list_parted"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+---------+--------------+-------------
+ a | integer | | | | plain | |
+Partition key: LIST (a)
+Partitions: part_null FOR VALUES IN (NULL),
+ part_p1 FOR VALUES IN (1),
+ part_p2 FOR VALUES IN (2),
+ part_p3 FOR VALUES IN (3)
+
+-- forbidden expressions for partition bound with list partitioned table
+CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (somename);
+ERROR: cannot use column reference in partition bound expression
+LINE 1: ...expr_fail PARTITION OF list_parted FOR VALUES IN (somename);
+ ^
+CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (somename.somename);
+ERROR: cannot use column reference in partition bound expression
+LINE 1: ...expr_fail PARTITION OF list_parted FOR VALUES IN (somename.s...
+ ^
+CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (a);
+ERROR: cannot use column reference in partition bound expression
+LINE 1: ..._bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (a);
+ ^
+CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (sum(a));
+ERROR: cannot use column reference in partition bound expression
+LINE 1: ...s_expr_fail PARTITION OF list_parted FOR VALUES IN (sum(a));
+ ^
+CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (sum(somename));
+ERROR: cannot use column reference in partition bound expression
+LINE 1: ..._fail PARTITION OF list_parted FOR VALUES IN (sum(somename))...
+ ^
+CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (sum(1));
+ERROR: aggregate functions are not allowed in partition bound
+LINE 1: ...s_expr_fail PARTITION OF list_parted FOR VALUES IN (sum(1));
+ ^
+CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN ((select 1));
+ERROR: cannot use subquery in partition bound
+LINE 1: ...expr_fail PARTITION OF list_parted FOR VALUES IN ((select 1)...
+ ^
+CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (generate_series(4, 6));
+ERROR: set-returning functions are not allowed in partition bound
+LINE 1: ...expr_fail PARTITION OF list_parted FOR VALUES IN (generate_s...
+ ^
+CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN ((1+1) collate "POSIX");
+ERROR: collations are not supported by type integer
+LINE 1: ...ail PARTITION OF list_parted FOR VALUES IN ((1+1) collate "P...
+ ^
+-- syntax does not allow empty list of values for list partitions
+CREATE TABLE fail_part PARTITION OF list_parted FOR VALUES IN ();
+ERROR: syntax error at or near ")"
+LINE 1: ...E TABLE fail_part PARTITION OF list_parted FOR VALUES IN ();
+ ^
+-- trying to specify range for list partitioned table
+CREATE TABLE fail_part PARTITION OF list_parted FOR VALUES FROM (1) TO (2);
+ERROR: invalid bound specification for a list partition
+LINE 1: ...BLE fail_part PARTITION OF list_parted FOR VALUES FROM (1) T...
+ ^
+-- trying to specify modulus and remainder for list partitioned table
+CREATE TABLE fail_part PARTITION OF list_parted FOR VALUES WITH (MODULUS 10, REMAINDER 1);
+ERROR: invalid bound specification for a list partition
+LINE 1: ...BLE fail_part PARTITION OF list_parted FOR VALUES WITH (MODU...
+ ^
+-- check default partition cannot be created more than once
+CREATE TABLE part_default PARTITION OF list_parted DEFAULT;
+CREATE TABLE fail_default_part PARTITION OF list_parted DEFAULT;
+ERROR: partition "fail_default_part" conflicts with existing default partition "part_default"
+LINE 1: ...TE TABLE fail_default_part PARTITION OF list_parted DEFAULT;
+ ^
+-- specified literal can't be cast to the partition column data type
+CREATE TABLE bools (
+ a bool
+) PARTITION BY LIST (a);
+CREATE TABLE bools_true PARTITION OF bools FOR VALUES IN (1);
+ERROR: specified value cannot be cast to type boolean for column "a"
+LINE 1: ...REATE TABLE bools_true PARTITION OF bools FOR VALUES IN (1);
+ ^
+DROP TABLE bools;
+-- specified literal can be cast, and the cast might not be immutable
+CREATE TABLE moneyp (
+ a money
+) PARTITION BY LIST (a);
+CREATE TABLE moneyp_10 PARTITION OF moneyp FOR VALUES IN (10);
+CREATE TABLE moneyp_11 PARTITION OF moneyp FOR VALUES IN ('11');
+CREATE TABLE moneyp_12 PARTITION OF moneyp FOR VALUES IN (to_char(12, '99')::int);
+DROP TABLE moneyp;
+-- cast is immutable
+CREATE TABLE bigintp (
+ a bigint
+) PARTITION BY LIST (a);
+CREATE TABLE bigintp_10 PARTITION OF bigintp FOR VALUES IN (10);
+-- fails due to overlap:
+CREATE TABLE bigintp_10_2 PARTITION OF bigintp FOR VALUES IN ('10');
+ERROR: partition "bigintp_10_2" would overlap partition "bigintp_10"
+LINE 1: ...ABLE bigintp_10_2 PARTITION OF bigintp FOR VALUES IN ('10');
+ ^
+DROP TABLE bigintp;
+CREATE TABLE range_parted (
+ a date
+) PARTITION BY RANGE (a);
+-- forbidden expressions for partition bounds with range partitioned table
+CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted
+ FOR VALUES FROM (somename) TO ('2019-01-01');
+ERROR: cannot use column reference in partition bound expression
+LINE 2: FOR VALUES FROM (somename) TO ('2019-01-01');
+ ^
+CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted
+ FOR VALUES FROM (somename.somename) TO ('2019-01-01');
+ERROR: cannot use column reference in partition bound expression
+LINE 2: FOR VALUES FROM (somename.somename) TO ('2019-01-01');
+ ^
+CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted
+ FOR VALUES FROM (a) TO ('2019-01-01');
+ERROR: cannot use column reference in partition bound expression
+LINE 2: FOR VALUES FROM (a) TO ('2019-01-01');
+ ^
+CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted
+ FOR VALUES FROM (max(a)) TO ('2019-01-01');
+ERROR: cannot use column reference in partition bound expression
+LINE 2: FOR VALUES FROM (max(a)) TO ('2019-01-01');
+ ^
+CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted
+ FOR VALUES FROM (max(somename)) TO ('2019-01-01');
+ERROR: cannot use column reference in partition bound expression
+LINE 2: FOR VALUES FROM (max(somename)) TO ('2019-01-01');
+ ^
+CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted
+ FOR VALUES FROM (max('2019-02-01'::date)) TO ('2019-01-01');
+ERROR: aggregate functions are not allowed in partition bound
+LINE 2: FOR VALUES FROM (max('2019-02-01'::date)) TO ('2019-01-01'...
+ ^
+CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted
+ FOR VALUES FROM ((select 1)) TO ('2019-01-01');
+ERROR: cannot use subquery in partition bound
+LINE 2: FOR VALUES FROM ((select 1)) TO ('2019-01-01');
+ ^
+CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted
+ FOR VALUES FROM (generate_series(1, 3)) TO ('2019-01-01');
+ERROR: set-returning functions are not allowed in partition bound
+LINE 2: FOR VALUES FROM (generate_series(1, 3)) TO ('2019-01-01');
+ ^
+-- trying to specify list for range partitioned table
+CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES IN ('a');
+ERROR: invalid bound specification for a range partition
+LINE 1: ...BLE fail_part PARTITION OF range_parted FOR VALUES IN ('a');
+ ^
+-- trying to specify modulus and remainder for range partitioned table
+CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES WITH (MODULUS 10, REMAINDER 1);
+ERROR: invalid bound specification for a range partition
+LINE 1: ...LE fail_part PARTITION OF range_parted FOR VALUES WITH (MODU...
+ ^
+-- each of start and end bounds must have same number of values as the
+-- length of the partition key
+CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES FROM ('a', 1) TO ('z');
+ERROR: FROM must specify exactly one value per partitioning column
+CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES FROM ('a') TO ('z', 1);
+ERROR: TO must specify exactly one value per partitioning column
+-- cannot specify null values in range bounds
+CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES FROM (null) TO (maxvalue);
+ERROR: cannot specify NULL in range bound
+-- trying to specify modulus and remainder for range partitioned table
+CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES WITH (MODULUS 10, REMAINDER 1);
+ERROR: invalid bound specification for a range partition
+LINE 1: ...LE fail_part PARTITION OF range_parted FOR VALUES WITH (MODU...
+ ^
+-- check partition bound syntax for the hash partition
+CREATE TABLE hash_parted (
+ a int
+) PARTITION BY HASH (a);
+CREATE TABLE hpart_1 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 10, REMAINDER 0);
+CREATE TABLE hpart_2 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 50, REMAINDER 1);
+CREATE TABLE hpart_3 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 200, REMAINDER 2);
+CREATE TABLE hpart_4 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 10, REMAINDER 3);
+-- modulus 25 is factor of modulus of 50 but 10 is not a factor of 25.
+CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES WITH (MODULUS 25, REMAINDER 3);
+ERROR: every hash partition modulus must be a factor of the next larger modulus
+DETAIL: The new modulus 25 is not divisible by 10, the modulus of existing partition "hpart_4".
+-- previous modulus 50 is factor of 150 but this modulus is not a factor of next modulus 200.
+CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES WITH (MODULUS 150, REMAINDER 3);
+ERROR: every hash partition modulus must be a factor of the next larger modulus
+DETAIL: The new modulus 150 is not a factor of 200, the modulus of existing partition "hpart_3".
+-- overlapping remainders
+CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES WITH (MODULUS 100, REMAINDER 3);
+ERROR: partition "fail_part" would overlap partition "hpart_4"
+LINE 1: ...BLE fail_part PARTITION OF hash_parted FOR VALUES WITH (MODU...
+ ^
+-- trying to specify range for the hash partitioned table
+CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES FROM ('a', 1) TO ('z');
+ERROR: invalid bound specification for a hash partition
+LINE 1: ...BLE fail_part PARTITION OF hash_parted FOR VALUES FROM ('a',...
+ ^
+-- trying to specify list value for the hash partitioned table
+CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES IN (1000);
+ERROR: invalid bound specification for a hash partition
+LINE 1: ...BLE fail_part PARTITION OF hash_parted FOR VALUES IN (1000);
+ ^
+-- trying to create default partition for the hash partitioned table
+CREATE TABLE fail_default_part PARTITION OF hash_parted DEFAULT;
+ERROR: a hash-partitioned table may not have a default partition
+-- check if compatible with the specified parent
+-- cannot create as partition of a non-partitioned table
+CREATE TABLE unparted (
+ a int
+);
+CREATE TABLE fail_part PARTITION OF unparted FOR VALUES IN ('a');
+ERROR: "unparted" is not partitioned
+CREATE TABLE fail_part PARTITION OF unparted FOR VALUES WITH (MODULUS 2, REMAINDER 1);
+ERROR: "unparted" is not partitioned
+DROP TABLE unparted;
+-- cannot create a permanent rel as partition of a temp rel
+CREATE TEMP TABLE temp_parted (
+ a int
+) PARTITION BY LIST (a);
+CREATE TABLE fail_part PARTITION OF temp_parted FOR VALUES IN ('a');
+ERROR: cannot create a permanent relation as partition of temporary relation "temp_parted"
+DROP TABLE temp_parted;
+-- check for partition bound overlap and other invalid specifications
+CREATE TABLE list_parted2 (
+ a varchar
+) PARTITION BY LIST (a);
+CREATE TABLE part_null_z PARTITION OF list_parted2 FOR VALUES IN (null, 'z');
+CREATE TABLE part_ab PARTITION OF list_parted2 FOR VALUES IN ('a', 'b');
+CREATE TABLE list_parted2_def PARTITION OF list_parted2 DEFAULT;
+CREATE TABLE fail_part PARTITION OF list_parted2 FOR VALUES IN (null);
+ERROR: partition "fail_part" would overlap partition "part_null_z"
+LINE 1: ...LE fail_part PARTITION OF list_parted2 FOR VALUES IN (null);
+ ^
+CREATE TABLE fail_part PARTITION OF list_parted2 FOR VALUES IN ('b', 'c');
+ERROR: partition "fail_part" would overlap partition "part_ab"
+LINE 1: ...ail_part PARTITION OF list_parted2 FOR VALUES IN ('b', 'c');
+ ^
+-- check default partition overlap
+INSERT INTO list_parted2 VALUES('X');
+CREATE TABLE fail_part PARTITION OF list_parted2 FOR VALUES IN ('W', 'X', 'Y');
+ERROR: updated partition constraint for default partition "list_parted2_def" would be violated by some row
+CREATE TABLE range_parted2 (
+ a int
+) PARTITION BY RANGE (a);
+-- trying to create range partition with empty range
+CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (1) TO (0);
+ERROR: empty range bound specified for partition "fail_part"
+LINE 1: ..._part PARTITION OF range_parted2 FOR VALUES FROM (1) TO (0);
+ ^
+DETAIL: Specified lower bound (1) is greater than or equal to upper bound (0).
+-- note that the range '[1, 1)' has no elements
+CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (1) TO (1);
+ERROR: empty range bound specified for partition "fail_part"
+LINE 1: ..._part PARTITION OF range_parted2 FOR VALUES FROM (1) TO (1);
+ ^
+DETAIL: Specified lower bound (1) is greater than or equal to upper bound (1).
+CREATE TABLE part0 PARTITION OF range_parted2 FOR VALUES FROM (minvalue) TO (1);
+CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (minvalue) TO (2);
+ERROR: partition "fail_part" would overlap partition "part0"
+LINE 1: ..._part PARTITION OF range_parted2 FOR VALUES FROM (minvalue) ...
+ ^
+CREATE TABLE part1 PARTITION OF range_parted2 FOR VALUES FROM (1) TO (10);
+CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (-1) TO (1);
+ERROR: partition "fail_part" would overlap partition "part0"
+LINE 1: ..._part PARTITION OF range_parted2 FOR VALUES FROM (-1) TO (1)...
+ ^
+CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (9) TO (maxvalue);
+ERROR: partition "fail_part" would overlap partition "part1"
+LINE 1: ..._part PARTITION OF range_parted2 FOR VALUES FROM (9) TO (max...
+ ^
+CREATE TABLE part2 PARTITION OF range_parted2 FOR VALUES FROM (20) TO (30);
+CREATE TABLE part3 PARTITION OF range_parted2 FOR VALUES FROM (30) TO (40);
+CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (10) TO (30);
+ERROR: partition "fail_part" would overlap partition "part2"
+LINE 1: ...art PARTITION OF range_parted2 FOR VALUES FROM (10) TO (30);
+ ^
+CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (10) TO (50);
+ERROR: partition "fail_part" would overlap partition "part2"
+LINE 1: ...art PARTITION OF range_parted2 FOR VALUES FROM (10) TO (50);
+ ^
+-- Create a default partition for range partitioned table
+CREATE TABLE range2_default PARTITION OF range_parted2 DEFAULT;
+-- More than one default partition is not allowed, so this should give error
+CREATE TABLE fail_default_part PARTITION OF range_parted2 DEFAULT;
+ERROR: partition "fail_default_part" conflicts with existing default partition "range2_default"
+LINE 1: ... TABLE fail_default_part PARTITION OF range_parted2 DEFAULT;
+ ^
+-- Check if the range for default partitions overlap
+INSERT INTO range_parted2 VALUES (85);
+CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (80) TO (90);
+ERROR: updated partition constraint for default partition "range2_default" would be violated by some row
+CREATE TABLE part4 PARTITION OF range_parted2 FOR VALUES FROM (90) TO (100);
+-- now check for multi-column range partition key
+CREATE TABLE range_parted3 (
+ a int,
+ b int
+) PARTITION BY RANGE (a, (b+1));
+CREATE TABLE part00 PARTITION OF range_parted3 FOR VALUES FROM (0, minvalue) TO (0, maxvalue);
+CREATE TABLE fail_part PARTITION OF range_parted3 FOR VALUES FROM (0, minvalue) TO (0, 1);
+ERROR: partition "fail_part" would overlap partition "part00"
+LINE 1: ..._part PARTITION OF range_parted3 FOR VALUES FROM (0, minvalu...
+ ^
+CREATE TABLE part10 PARTITION OF range_parted3 FOR VALUES FROM (1, minvalue) TO (1, 1);
+CREATE TABLE part11 PARTITION OF range_parted3 FOR VALUES FROM (1, 1) TO (1, 10);
+CREATE TABLE part12 PARTITION OF range_parted3 FOR VALUES FROM (1, 10) TO (1, maxvalue);
+CREATE TABLE fail_part PARTITION OF range_parted3 FOR VALUES FROM (1, 10) TO (1, 20);
+ERROR: partition "fail_part" would overlap partition "part12"
+LINE 1: ...rt PARTITION OF range_parted3 FOR VALUES FROM (1, 10) TO (1,...
+ ^
+CREATE TABLE range3_default PARTITION OF range_parted3 DEFAULT;
+-- cannot create a partition that says column b is allowed to range
+-- from -infinity to +infinity, while there exist partitions that have
+-- more specific ranges
+CREATE TABLE fail_part PARTITION OF range_parted3 FOR VALUES FROM (1, minvalue) TO (1, maxvalue);
+ERROR: partition "fail_part" would overlap partition "part10"
+LINE 1: ..._part PARTITION OF range_parted3 FOR VALUES FROM (1, minvalu...
+ ^
+-- check for partition bound overlap and other invalid specifications for the hash partition
+CREATE TABLE hash_parted2 (
+ a varchar
+) PARTITION BY HASH (a);
+CREATE TABLE h2part_1 PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 4, REMAINDER 2);
+CREATE TABLE h2part_2 PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 8, REMAINDER 0);
+CREATE TABLE h2part_3 PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 8, REMAINDER 4);
+CREATE TABLE h2part_4 PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 8, REMAINDER 5);
+-- overlap with part_4
+CREATE TABLE fail_part PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
+ERROR: partition "fail_part" would overlap partition "h2part_4"
+LINE 1: ...LE fail_part PARTITION OF hash_parted2 FOR VALUES WITH (MODU...
+ ^
+-- modulus must be greater than zero
+CREATE TABLE fail_part PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 0, REMAINDER 1);
+ERROR: modulus for hash partition must be an integer value greater than zero
+-- remainder must be greater than or equal to zero and less than modulus
+CREATE TABLE fail_part PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 8, REMAINDER 8);
+ERROR: remainder for hash partition must be less than modulus
+-- check schema propagation from parent
+CREATE TABLE parted (
+ a text,
+ b int NOT NULL DEFAULT 0,
+ CONSTRAINT check_a CHECK (length(a) > 0)
+) PARTITION BY LIST (a);
+CREATE TABLE part_a PARTITION OF parted FOR VALUES IN ('a');
+-- only inherited attributes (never local ones)
+SELECT attname, attislocal, attinhcount FROM pg_attribute
+ WHERE attrelid = 'part_a'::regclass and attnum > 0
+ ORDER BY attnum;
+ attname | attislocal | attinhcount
+---------+------------+-------------
+ a | f | 1
+ b | f | 1
+(2 rows)
+
+-- able to specify column default, column constraint, and table constraint
+-- first check the "column specified more than once" error
+CREATE TABLE part_b PARTITION OF parted (
+ b NOT NULL,
+ b DEFAULT 1,
+ b CHECK (b >= 0),
+ CONSTRAINT check_a CHECK (length(a) > 0)
+) FOR VALUES IN ('b');
+ERROR: column "b" specified more than once
+CREATE TABLE part_b PARTITION OF parted (
+ b NOT NULL DEFAULT 1,
+ CONSTRAINT check_a CHECK (length(a) > 0),
+ CONSTRAINT check_b CHECK (b >= 0)
+) FOR VALUES IN ('b');
+NOTICE: merging constraint "check_a" with inherited definition
+-- conislocal should be false for any merged constraints, true otherwise
+SELECT conislocal, coninhcount FROM pg_constraint WHERE conrelid = 'part_b'::regclass ORDER BY conislocal, coninhcount;
+ conislocal | coninhcount
+------------+-------------
+ f | 1
+ t | 0
+(2 rows)
+
+-- Once check_b is added to the parent, it should be made non-local for part_b
+ALTER TABLE parted ADD CONSTRAINT check_b CHECK (b >= 0);
+NOTICE: merging constraint "check_b" with inherited definition
+SELECT conislocal, coninhcount FROM pg_constraint WHERE conrelid = 'part_b'::regclass;
+ conislocal | coninhcount
+------------+-------------
+ f | 1
+ f | 1
+(2 rows)
+
+-- Neither check_a nor check_b are droppable from part_b
+ALTER TABLE part_b DROP CONSTRAINT check_a;
+ERROR: cannot drop inherited constraint "check_a" of relation "part_b"
+ALTER TABLE part_b DROP CONSTRAINT check_b;
+ERROR: cannot drop inherited constraint "check_b" of relation "part_b"
+-- And dropping it from parted should leave no trace of them on part_b, unlike
+-- traditional inheritance where they will be left behind, because they would
+-- be local constraints.
+ALTER TABLE parted DROP CONSTRAINT check_a, DROP CONSTRAINT check_b;
+SELECT conislocal, coninhcount FROM pg_constraint WHERE conrelid = 'part_b'::regclass;
+ conislocal | coninhcount
+------------+-------------
+(0 rows)
+
+-- specify PARTITION BY for a partition
+CREATE TABLE fail_part_col_not_found PARTITION OF parted FOR VALUES IN ('c') PARTITION BY RANGE (c);
+ERROR: column "c" named in partition key does not exist
+LINE 1: ...TITION OF parted FOR VALUES IN ('c') PARTITION BY RANGE (c);
+ ^
+CREATE TABLE part_c PARTITION OF parted (b WITH OPTIONS NOT NULL DEFAULT 0) FOR VALUES IN ('c') PARTITION BY RANGE ((b));
+-- create a level-2 partition
+CREATE TABLE part_c_1_10 PARTITION OF part_c FOR VALUES FROM (1) TO (10);
+-- check that NOT NULL and default value are inherited correctly
+create table parted_notnull_inh_test (a int default 1, b int not null default 0) partition by list (a);
+create table parted_notnull_inh_test1 partition of parted_notnull_inh_test (a not null, b default 1) for values in (1);
+insert into parted_notnull_inh_test (b) values (null);
+ERROR: null value in column "b" of relation "parted_notnull_inh_test1" violates not-null constraint
+DETAIL: Failing row contains (1, null).
+-- note that while b's default is overriden, a's default is preserved
+\d parted_notnull_inh_test1
+ Table "public.parted_notnull_inh_test1"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | not null | 1
+ b | integer | | not null | 1
+Partition of: parted_notnull_inh_test FOR VALUES IN (1)
+
+drop table parted_notnull_inh_test;
+-- check that collations are assigned in partition bound expressions
+create table parted_boolean_col (a bool, b text) partition by list(a);
+create table parted_boolean_less partition of parted_boolean_col
+ for values in ('foo' < 'bar');
+create table parted_boolean_greater partition of parted_boolean_col
+ for values in ('foo' > 'bar');
+drop table parted_boolean_col;
+-- check for a conflicting COLLATE clause
+create table parted_collate_must_match (a text collate "C", b text collate "C")
+ partition by range (a);
+-- on the partition key
+create table parted_collate_must_match1 partition of parted_collate_must_match
+ (a collate "POSIX") for values from ('a') to ('m');
+-- on another column
+create table parted_collate_must_match2 partition of parted_collate_must_match
+ (b collate "POSIX") for values from ('m') to ('z');
+drop table parted_collate_must_match;
+-- check that non-matching collations for partition bound
+-- expressions are coerced to the right collation
+create table test_part_coll_posix (a text) partition by range (a collate "POSIX");
+-- ok, collation is implicitly coerced
+create table test_part_coll partition of test_part_coll_posix for values from ('a' collate "C") to ('g');
+-- ok
+create table test_part_coll2 partition of test_part_coll_posix for values from ('g') to ('m');
+-- ok, collation is implicitly coerced
+create table test_part_coll_cast partition of test_part_coll_posix for values from (name 'm' collate "C") to ('s');
+-- ok; partition collation silently overrides the default collation of type 'name'
+create table test_part_coll_cast2 partition of test_part_coll_posix for values from (name 's') to ('z');
+drop table test_part_coll_posix;
+-- Partition bound in describe output
+\d+ part_b
+ Table "public.part_b"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+----------+--------------+-------------
+ a | text | | | | extended | |
+ b | integer | | not null | 1 | plain | |
+Partition of: parted FOR VALUES IN ('b')
+Partition constraint: ((a IS NOT NULL) AND (a = 'b'::text))
+
+-- Both partition bound and partition key in describe output
+\d+ part_c
+ Partitioned table "public.part_c"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+----------+--------------+-------------
+ a | text | | | | extended | |
+ b | integer | | not null | 0 | plain | |
+Partition of: parted FOR VALUES IN ('c')
+Partition constraint: ((a IS NOT NULL) AND (a = 'c'::text))
+Partition key: RANGE (b)
+Partitions: part_c_1_10 FOR VALUES FROM (1) TO (10)
+
+-- a level-2 partition's constraint will include the parent's expressions
+\d+ part_c_1_10
+ Table "public.part_c_1_10"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+----------+--------------+-------------
+ a | text | | | | extended | |
+ b | integer | | not null | 0 | plain | |
+Partition of: part_c FOR VALUES FROM (1) TO (10)
+Partition constraint: ((a IS NOT NULL) AND (a = 'c'::text) AND (b IS NOT NULL) AND (b >= 1) AND (b < 10))
+
+-- Show partition count in the parent's describe output
+-- Tempted to include \d+ output listing partitions with bound info but
+-- output could vary depending on the order in which partition oids are
+-- returned.
+\d parted
+ Partitioned table "public.parted"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | text | | |
+ b | integer | | not null | 0
+Partition key: LIST (a)
+Number of partitions: 3 (Use \d+ to list them.)
+
+\d hash_parted
+ Partitioned table "public.hash_parted"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ a | integer | | |
+Partition key: HASH (a)
+Number of partitions: 4 (Use \d+ to list them.)
+
+-- check that we get the expected partition constraints
+CREATE TABLE range_parted4 (a int, b int, c int) PARTITION BY RANGE (abs(a), abs(b), c);
+CREATE TABLE unbounded_range_part PARTITION OF range_parted4 FOR VALUES FROM (MINVALUE, MINVALUE, MINVALUE) TO (MAXVALUE, MAXVALUE, MAXVALUE);
+\d+ unbounded_range_part
+ Table "public.unbounded_range_part"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+---------+--------------+-------------
+ a | integer | | | | plain | |
+ b | integer | | | | plain | |
+ c | integer | | | | plain | |
+Partition of: range_parted4 FOR VALUES FROM (MINVALUE, MINVALUE, MINVALUE) TO (MAXVALUE, MAXVALUE, MAXVALUE)
+Partition constraint: ((abs(a) IS NOT NULL) AND (abs(b) IS NOT NULL) AND (c IS NOT NULL))
+
+DROP TABLE unbounded_range_part;
+CREATE TABLE range_parted4_1 PARTITION OF range_parted4 FOR VALUES FROM (MINVALUE, MINVALUE, MINVALUE) TO (1, MAXVALUE, MAXVALUE);
+\d+ range_parted4_1
+ Table "public.range_parted4_1"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+---------+--------------+-------------
+ a | integer | | | | plain | |
+ b | integer | | | | plain | |
+ c | integer | | | | plain | |
+Partition of: range_parted4 FOR VALUES FROM (MINVALUE, MINVALUE, MINVALUE) TO (1, MAXVALUE, MAXVALUE)
+Partition constraint: ((abs(a) IS NOT NULL) AND (abs(b) IS NOT NULL) AND (c IS NOT NULL) AND (abs(a) <= 1))
+
+CREATE TABLE range_parted4_2 PARTITION OF range_parted4 FOR VALUES FROM (3, 4, 5) TO (6, 7, MAXVALUE);
+\d+ range_parted4_2
+ Table "public.range_parted4_2"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+---------+--------------+-------------
+ a | integer | | | | plain | |
+ b | integer | | | | plain | |
+ c | integer | | | | plain | |
+Partition of: range_parted4 FOR VALUES FROM (3, 4, 5) TO (6, 7, MAXVALUE)
+Partition constraint: ((abs(a) IS NOT NULL) AND (abs(b) IS NOT NULL) AND (c IS NOT NULL) AND ((abs(a) > 3) OR ((abs(a) = 3) AND (abs(b) > 4)) OR ((abs(a) = 3) AND (abs(b) = 4) AND (c >= 5))) AND ((abs(a) < 6) OR ((abs(a) = 6) AND (abs(b) <= 7))))
+
+CREATE TABLE range_parted4_3 PARTITION OF range_parted4 FOR VALUES FROM (6, 8, MINVALUE) TO (9, MAXVALUE, MAXVALUE);
+\d+ range_parted4_3
+ Table "public.range_parted4_3"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+---------+--------------+-------------
+ a | integer | | | | plain | |
+ b | integer | | | | plain | |
+ c | integer | | | | plain | |
+Partition of: range_parted4 FOR VALUES FROM (6, 8, MINVALUE) TO (9, MAXVALUE, MAXVALUE)
+Partition constraint: ((abs(a) IS NOT NULL) AND (abs(b) IS NOT NULL) AND (c IS NOT NULL) AND ((abs(a) > 6) OR ((abs(a) = 6) AND (abs(b) >= 8))) AND (abs(a) <= 9))
+
+DROP TABLE range_parted4;
+-- user-defined operator class in partition key
+CREATE FUNCTION my_int4_sort(int4,int4) RETURNS int LANGUAGE sql
+ AS $$ SELECT CASE WHEN $1 = $2 THEN 0 WHEN $1 > $2 THEN 1 ELSE -1 END; $$;
+CREATE OPERATOR CLASS test_int4_ops FOR TYPE int4 USING btree AS
+ OPERATOR 1 < (int4,int4), OPERATOR 2 <= (int4,int4),
+ OPERATOR 3 = (int4,int4), OPERATOR 4 >= (int4,int4),
+ OPERATOR 5 > (int4,int4), FUNCTION 1 my_int4_sort(int4,int4);
+CREATE TABLE partkey_t (a int4) PARTITION BY RANGE (a test_int4_ops);
+CREATE TABLE partkey_t_1 PARTITION OF partkey_t FOR VALUES FROM (0) TO (1000);
+INSERT INTO partkey_t VALUES (100);
+INSERT INTO partkey_t VALUES (200);
+-- cleanup
+DROP TABLE parted, list_parted, range_parted, list_parted2, range_parted2, range_parted3;
+DROP TABLE partkey_t, hash_parted, hash_parted2;
+DROP OPERATOR CLASS test_int4_ops USING btree;
+DROP FUNCTION my_int4_sort(int4,int4);
+-- comments on partitioned tables columns
+CREATE TABLE parted_col_comment (a int, b text) PARTITION BY LIST (a);
+COMMENT ON TABLE parted_col_comment IS 'Am partitioned table';
+COMMENT ON COLUMN parted_col_comment.a IS 'Partition key';
+SELECT obj_description('parted_col_comment'::regclass);
+ obj_description
+----------------------
+ Am partitioned table
+(1 row)
+
+\d+ parted_col_comment
+ Partitioned table "public.parted_col_comment"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+----------+--------------+---------------
+ a | integer | | | | plain | | Partition key
+ b | text | | | | extended | |
+Partition key: LIST (a)
+Number of partitions: 0
+
+DROP TABLE parted_col_comment;
+-- list partitioning on array type column
+CREATE TABLE arrlp (a int[]) PARTITION BY LIST (a);
+CREATE TABLE arrlp12 PARTITION OF arrlp FOR VALUES IN ('{1}', '{2}');
+\d+ arrlp12
+ Table "public.arrlp12"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+-----------+-----------+----------+---------+----------+--------------+-------------
+ a | integer[] | | | | extended | |
+Partition of: arrlp FOR VALUES IN ('{1}', '{2}')
+Partition constraint: ((a IS NOT NULL) AND ((a = '{1}'::integer[]) OR (a = '{2}'::integer[])))
+
+DROP TABLE arrlp;
+-- partition on boolean column
+create table boolspart (a bool) partition by list (a);
+create table boolspart_t partition of boolspart for values in (true);
+create table boolspart_f partition of boolspart for values in (false);
+\d+ boolspart
+ Partitioned table "public.boolspart"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+---------+--------------+-------------
+ a | boolean | | | | plain | |
+Partition key: LIST (a)
+Partitions: boolspart_f FOR VALUES IN (false),
+ boolspart_t FOR VALUES IN (true)
+
+drop table boolspart;
+-- partitions mixing temporary and permanent relations
+create table perm_parted (a int) partition by list (a);
+create temporary table temp_parted (a int) partition by list (a);
+create table perm_part partition of temp_parted default; -- error
+ERROR: cannot create a permanent relation as partition of temporary relation "temp_parted"
+create temp table temp_part partition of perm_parted default; -- error
+ERROR: cannot create a temporary relation as partition of permanent relation "perm_parted"
+create temp table temp_part partition of temp_parted default; -- ok
+drop table perm_parted cascade;
+drop table temp_parted cascade;
+-- check that adding partitions to a table while it is being used is prevented
+create table tab_part_create (a int) partition by list (a);
+create or replace function func_part_create() returns trigger
+ language plpgsql as $$
+ begin
+ execute 'create table tab_part_create_1 partition of tab_part_create for values in (1)';
+ return null;
+ end $$;
+create trigger trig_part_create before insert on tab_part_create
+ for each statement execute procedure func_part_create();
+insert into tab_part_create values (1);
+ERROR: cannot CREATE TABLE .. PARTITION OF "tab_part_create" because it is being used by active queries in this session
+CONTEXT: SQL statement "create table tab_part_create_1 partition of tab_part_create for values in (1)"
+PL/pgSQL function func_part_create() line 3 at EXECUTE
+drop table tab_part_create;
+drop function func_part_create();
+-- test using a volatile expression as partition bound
+create table volatile_partbound_test (partkey timestamp) partition by range (partkey);
+create table volatile_partbound_test1 partition of volatile_partbound_test for values from (minvalue) to (current_timestamp);
+create table volatile_partbound_test2 partition of volatile_partbound_test for values from (current_timestamp) to (maxvalue);
+-- this should go into the partition volatile_partbound_test2
+insert into volatile_partbound_test values (current_timestamp);
+select tableoid::regclass from volatile_partbound_test;
+ tableoid
+--------------------------
+ volatile_partbound_test2
+(1 row)
+
+drop table volatile_partbound_test;
+-- test the case where a check constraint on default partition allows
+-- to avoid scanning it when adding a new partition
+create table defcheck (a int, b int) partition by list (b);
+create table defcheck_def (a int, c int, b int);
+alter table defcheck_def drop c;
+alter table defcheck attach partition defcheck_def default;
+alter table defcheck_def add check (b <= 0 and b is not null);
+create table defcheck_1 partition of defcheck for values in (1, null);
+-- test that complex default partition constraints are enforced correctly
+insert into defcheck_def values (0, 0);
+create table defcheck_0 partition of defcheck for values in (0);
+ERROR: updated partition constraint for default partition "defcheck_def" would be violated by some row
+drop table defcheck;
+-- tests of column drop with partition tables and indexes using
+-- predicates and expressions.
+create table part_column_drop (
+ useless_1 int,
+ id int,
+ useless_2 int,
+ d int,
+ b int,
+ useless_3 int
+) partition by range (id);
+alter table part_column_drop drop column useless_1;
+alter table part_column_drop drop column useless_2;
+alter table part_column_drop drop column useless_3;
+create index part_column_drop_b_pred on part_column_drop(b) where b = 1;
+create index part_column_drop_b_expr on part_column_drop((b = 1));
+create index part_column_drop_d_pred on part_column_drop(d) where d = 2;
+create index part_column_drop_d_expr on part_column_drop((d = 2));
+create table part_column_drop_1_10 partition of
+ part_column_drop for values from (1) to (10);
+\d part_column_drop
+ Partitioned table "public.part_column_drop"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ id | integer | | |
+ d | integer | | |
+ b | integer | | |
+Partition key: RANGE (id)
+Indexes:
+ "part_column_drop_b_expr" btree ((b = 1))
+ "part_column_drop_b_pred" btree (b) WHERE b = 1
+ "part_column_drop_d_expr" btree ((d = 2))
+ "part_column_drop_d_pred" btree (d) WHERE d = 2
+Number of partitions: 1 (Use \d+ to list them.)
+
+\d part_column_drop_1_10
+ Table "public.part_column_drop_1_10"
+ Column | Type | Collation | Nullable | Default
+--------+---------+-----------+----------+---------
+ id | integer | | |
+ d | integer | | |
+ b | integer | | |
+Partition of: part_column_drop FOR VALUES FROM (1) TO (10)
+Indexes:
+ "part_column_drop_1_10_b_idx" btree (b) WHERE b = 1
+ "part_column_drop_1_10_d_idx" btree (d) WHERE d = 2
+ "part_column_drop_1_10_expr_idx" btree ((b = 1))
+ "part_column_drop_1_10_expr_idx1" btree ((d = 2))
+
+drop table part_column_drop;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/create_table.sql b/ydb/library/yql/tests/postgresql/original/cases/create_table.sql
new file mode 100644
index 0000000000..cc41f58ba2
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/create_table.sql
@@ -0,0 +1,977 @@
+--
+-- CREATE_TABLE
+--
+
+--
+-- CLASS DEFINITIONS
+--
+CREATE TABLE hobbies_r (
+ name text,
+ person text
+);
+
+CREATE TABLE equipment_r (
+ name text,
+ hobby text
+);
+
+CREATE TABLE onek (
+ unique1 int4,
+ unique2 int4,
+ two int4,
+ four int4,
+ ten int4,
+ twenty int4,
+ hundred int4,
+ thousand int4,
+ twothousand int4,
+ fivethous int4,
+ tenthous int4,
+ odd int4,
+ even int4,
+ stringu1 name,
+ stringu2 name,
+ string4 name
+);
+
+CREATE TABLE tenk1 (
+ unique1 int4,
+ unique2 int4,
+ two int4,
+ four int4,
+ ten int4,
+ twenty int4,
+ hundred int4,
+ thousand int4,
+ twothousand int4,
+ fivethous int4,
+ tenthous int4,
+ odd int4,
+ even int4,
+ stringu1 name,
+ stringu2 name,
+ string4 name
+);
+
+CREATE TABLE tenk2 (
+ unique1 int4,
+ unique2 int4,
+ two int4,
+ four int4,
+ ten int4,
+ twenty int4,
+ hundred int4,
+ thousand int4,
+ twothousand int4,
+ fivethous int4,
+ tenthous int4,
+ odd int4,
+ even int4,
+ stringu1 name,
+ stringu2 name,
+ string4 name
+);
+
+
+CREATE TABLE person (
+ name text,
+ age int4,
+ location point
+);
+
+
+CREATE TABLE emp (
+ salary int4,
+ manager name
+) INHERITS (person);
+
+
+CREATE TABLE student (
+ gpa float8
+) INHERITS (person);
+
+
+CREATE TABLE stud_emp (
+ percent int4
+) INHERITS (emp, student);
+
+
+CREATE TABLE city (
+ name name,
+ location box,
+ budget city_budget
+);
+
+CREATE TABLE dept (
+ dname name,
+ mgrname text
+);
+
+CREATE TABLE slow_emp4000 (
+ home_base box
+);
+
+CREATE TABLE fast_emp4000 (
+ home_base box
+);
+
+CREATE TABLE road (
+ name text,
+ thepath path
+);
+
+CREATE TABLE ihighway () INHERITS (road);
+
+CREATE TABLE shighway (
+ surface text
+) INHERITS (road);
+
+CREATE TABLE real_city (
+ pop int4,
+ cname text,
+ outline path
+);
+
+--
+-- test the "star" operators a bit more thoroughly -- this time,
+-- throw in lots of NULL fields...
+--
+-- a is the type root
+-- b and c inherit from a (one-level single inheritance)
+-- d inherits from b and c (two-level multiple inheritance)
+-- e inherits from c (two-level single inheritance)
+-- f inherits from e (three-level single inheritance)
+--
+CREATE TABLE a_star (
+ class char,
+ a int4
+);
+
+CREATE TABLE b_star (
+ b text
+) INHERITS (a_star);
+
+CREATE TABLE c_star (
+ c name
+) INHERITS (a_star);
+
+CREATE TABLE d_star (
+ d float8
+) INHERITS (b_star, c_star);
+
+CREATE TABLE e_star (
+ e int2
+) INHERITS (c_star);
+
+CREATE TABLE f_star (
+ f polygon
+) INHERITS (e_star);
+
+CREATE TABLE aggtest (
+ a int2,
+ b float4
+);
+
+CREATE TABLE hash_i4_heap (
+ seqno int4,
+ random int4
+);
+
+CREATE TABLE hash_name_heap (
+ seqno int4,
+ random name
+);
+
+CREATE TABLE hash_txt_heap (
+ seqno int4,
+ random text
+);
+
+CREATE TABLE hash_f8_heap (
+ seqno int4,
+ random float8
+);
+
+-- don't include the hash_ovfl_heap stuff in the distribution
+-- the data set is too large for what it's worth
+--
+-- CREATE TABLE hash_ovfl_heap (
+-- x int4,
+-- y int4
+-- );
+
+CREATE TABLE bt_i4_heap (
+ seqno int4,
+ random int4
+);
+
+CREATE TABLE bt_name_heap (
+ seqno name,
+ random int4
+);
+
+CREATE TABLE bt_txt_heap (
+ seqno text,
+ random int4
+);
+
+CREATE TABLE bt_f8_heap (
+ seqno float8,
+ random int4
+);
+
+CREATE TABLE array_op_test (
+ seqno int4,
+ i int4[],
+ t text[]
+);
+
+CREATE TABLE array_index_op_test (
+ seqno int4,
+ i int4[],
+ t text[]
+);
+
+CREATE TABLE testjsonb (
+ j jsonb
+);
+
+CREATE TABLE unknowntab (
+ u unknown -- fail
+);
+
+CREATE TYPE unknown_comptype AS (
+ u unknown -- fail
+);
+
+CREATE TABLE IF NOT EXISTS test_tsvector(
+ t text,
+ a tsvector
+);
+
+CREATE TABLE IF NOT EXISTS test_tsvector(
+ t text
+);
+
+-- invalid: non-lowercase quoted reloptions identifiers
+CREATE TABLE tas_case WITH ("Fillfactor" = 10) AS SELECT 1 a;
+
+CREATE UNLOGGED TABLE unlogged1 (a int primary key); -- OK
+CREATE TEMPORARY TABLE unlogged2 (a int primary key); -- OK
+SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged\d' ORDER BY relname;
+REINDEX INDEX unlogged1_pkey;
+REINDEX INDEX unlogged2_pkey;
+SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged\d' ORDER BY relname;
+DROP TABLE unlogged2;
+INSERT INTO unlogged1 VALUES (42);
+CREATE UNLOGGED TABLE public.unlogged2 (a int primary key); -- also OK
+CREATE UNLOGGED TABLE pg_temp.unlogged3 (a int primary key); -- not OK
+CREATE TABLE pg_temp.implicitly_temp (a int primary key); -- OK
+CREATE TEMP TABLE explicitly_temp (a int primary key); -- also OK
+CREATE TEMP TABLE pg_temp.doubly_temp (a int primary key); -- also OK
+CREATE TEMP TABLE public.temp_to_perm (a int primary key); -- not OK
+DROP TABLE unlogged1, public.unlogged2;
+
+CREATE TABLE as_select1 AS SELECT * FROM pg_class WHERE relkind = 'r';
+CREATE TABLE as_select1 AS SELECT * FROM pg_class WHERE relkind = 'r';
+CREATE TABLE IF NOT EXISTS as_select1 AS SELECT * FROM pg_class WHERE relkind = 'r';
+DROP TABLE as_select1;
+
+PREPARE select1 AS SELECT 1 as a;
+CREATE TABLE as_select1 AS EXECUTE select1;
+CREATE TABLE as_select1 AS EXECUTE select1;
+SELECT * FROM as_select1;
+CREATE TABLE IF NOT EXISTS as_select1 AS EXECUTE select1;
+DROP TABLE as_select1;
+DEALLOCATE select1;
+
+-- create an extra wide table to test for issues related to that
+-- (temporarily hide query, to avoid the long CREATE TABLE stmt)
+\set ECHO none
+SELECT 'CREATE TABLE extra_wide_table(firstc text, '|| array_to_string(array_agg('c'||i||' bool'),',')||', lastc text);'
+FROM generate_series(1, 1100) g(i)
+\gexec
+\set ECHO all
+INSERT INTO extra_wide_table(firstc, lastc) VALUES('first col', 'last col');
+SELECT firstc, lastc FROM extra_wide_table;
+
+-- check that tables with oids cannot be created anymore
+CREATE TABLE withoid() WITH OIDS;
+CREATE TABLE withoid() WITH (oids);
+CREATE TABLE withoid() WITH (oids = true);
+
+-- but explicitly not adding oids is still supported
+CREATE TEMP TABLE withoutoid() WITHOUT OIDS; DROP TABLE withoutoid;
+CREATE TEMP TABLE withoutoid() WITH (oids = false); DROP TABLE withoutoid;
+
+-- check restriction with default expressions
+-- invalid use of column reference in default expressions
+CREATE TABLE default_expr_column (id int DEFAULT (id));
+CREATE TABLE default_expr_column (id int DEFAULT (bar.id));
+CREATE TABLE default_expr_agg_column (id int DEFAULT (avg(id)));
+-- invalid column definition
+CREATE TABLE default_expr_non_column (a int DEFAULT (avg(non_existent)));
+-- invalid use of aggregate
+CREATE TABLE default_expr_agg (a int DEFAULT (avg(1)));
+-- invalid use of subquery
+CREATE TABLE default_expr_agg (a int DEFAULT (select 1));
+-- invalid use of set-returning function
+CREATE TABLE default_expr_agg (a int DEFAULT (generate_series(1,3)));
+
+-- Verify that subtransaction rollback restores rd_createSubid.
+BEGIN;
+CREATE TABLE remember_create_subid (c int);
+SAVEPOINT q; DROP TABLE remember_create_subid; ROLLBACK TO q;
+COMMIT;
+DROP TABLE remember_create_subid;
+
+-- Verify that subtransaction rollback restores rd_firstRelfilenodeSubid.
+CREATE TABLE remember_node_subid (c int);
+BEGIN;
+ALTER TABLE remember_node_subid ALTER c TYPE bigint;
+SAVEPOINT q; DROP TABLE remember_node_subid; ROLLBACK TO q;
+COMMIT;
+DROP TABLE remember_node_subid;
+
+--
+-- Partitioned tables
+--
+
+-- cannot combine INHERITS and PARTITION BY (although grammar allows)
+CREATE TABLE partitioned (
+ a int
+) INHERITS (some_table) PARTITION BY LIST (a);
+
+-- cannot use more than 1 column as partition key for list partitioned table
+CREATE TABLE partitioned (
+ a1 int,
+ a2 int
+) PARTITION BY LIST (a1, a2); -- fail
+
+-- unsupported constraint type for partitioned tables
+CREATE TABLE partitioned (
+ a int,
+ EXCLUDE USING gist (a WITH &&)
+) PARTITION BY RANGE (a);
+
+-- prevent using prohibited expressions in the key
+CREATE FUNCTION retset (a int) RETURNS SETOF int AS $$ SELECT 1; $$ LANGUAGE SQL IMMUTABLE;
+CREATE TABLE partitioned (
+ a int
+) PARTITION BY RANGE (retset(a));
+DROP FUNCTION retset(int);
+
+CREATE TABLE partitioned (
+ a int
+) PARTITION BY RANGE ((avg(a)));
+
+CREATE TABLE partitioned (
+ a int,
+ b int
+) PARTITION BY RANGE ((avg(a) OVER (PARTITION BY b)));
+
+CREATE TABLE partitioned (
+ a int
+) PARTITION BY LIST ((a LIKE (SELECT 1)));
+
+CREATE TABLE partitioned (
+ a int
+) PARTITION BY RANGE ((42));
+
+CREATE FUNCTION const_func () RETURNS int AS $$ SELECT 1; $$ LANGUAGE SQL IMMUTABLE;
+CREATE TABLE partitioned (
+ a int
+) PARTITION BY RANGE (const_func());
+DROP FUNCTION const_func();
+
+-- only accept valid partitioning strategy
+CREATE TABLE partitioned (
+ a int
+) PARTITION BY MAGIC (a);
+
+-- specified column must be present in the table
+CREATE TABLE partitioned (
+ a int
+) PARTITION BY RANGE (b);
+
+-- cannot use system columns in partition key
+CREATE TABLE partitioned (
+ a int
+) PARTITION BY RANGE (xmin);
+
+-- cannot use pseudotypes
+CREATE TABLE partitioned (
+ a int,
+ b int
+) PARTITION BY RANGE (((a, b)));
+CREATE TABLE partitioned (
+ a int,
+ b int
+) PARTITION BY RANGE (a, ('unknown'));
+
+-- functions in key must be immutable
+CREATE FUNCTION immut_func (a int) RETURNS int AS $$ SELECT a + random()::int; $$ LANGUAGE SQL;
+CREATE TABLE partitioned (
+ a int
+) PARTITION BY RANGE (immut_func(a));
+DROP FUNCTION immut_func(int);
+
+-- prevent using columns of unsupported types in key (type must have a btree operator class)
+CREATE TABLE partitioned (
+ a point
+) PARTITION BY LIST (a);
+CREATE TABLE partitioned (
+ a point
+) PARTITION BY LIST (a point_ops);
+CREATE TABLE partitioned (
+ a point
+) PARTITION BY RANGE (a);
+CREATE TABLE partitioned (
+ a point
+) PARTITION BY RANGE (a point_ops);
+
+-- cannot add NO INHERIT constraints to partitioned tables
+CREATE TABLE partitioned (
+ a int,
+ CONSTRAINT check_a CHECK (a > 0) NO INHERIT
+) PARTITION BY RANGE (a);
+
+-- some checks after successful creation of a partitioned table
+CREATE FUNCTION plusone(a int) RETURNS INT AS $$ SELECT a+1; $$ LANGUAGE SQL;
+
+CREATE TABLE partitioned (
+ a int,
+ b int,
+ c text,
+ d text
+) PARTITION BY RANGE (a oid_ops, plusone(b), c collate "default", d collate "C");
+
+-- check relkind
+SELECT relkind FROM pg_class WHERE relname = 'partitioned';
+
+-- prevent a function referenced in partition key from being dropped
+DROP FUNCTION plusone(int);
+
+-- partitioned table cannot participate in regular inheritance
+CREATE TABLE partitioned2 (
+ a int,
+ b text
+) PARTITION BY RANGE ((a+1), substr(b, 1, 5));
+CREATE TABLE fail () INHERITS (partitioned2);
+
+-- Partition key in describe output
+\d partitioned
+\d+ partitioned2
+
+INSERT INTO partitioned2 VALUES (1, 'hello');
+CREATE TABLE part2_1 PARTITION OF partitioned2 FOR VALUES FROM (-1, 'aaaaa') TO (100, 'ccccc');
+\d+ part2_1
+
+DROP TABLE partitioned, partitioned2;
+
+-- check reference to partitioned table's rowtype in partition descriptor
+create table partitioned (a int, b int)
+ partition by list ((row(a, b)::partitioned));
+create table partitioned1
+ partition of partitioned for values in ('(1,2)'::partitioned);
+create table partitioned2
+ partition of partitioned for values in ('(2,4)'::partitioned);
+explain (costs off)
+select * from partitioned where row(a,b)::partitioned = '(1,2)'::partitioned;
+drop table partitioned;
+
+-- whole-row Var in partition key works too
+create table partitioned (a int, b int)
+ partition by list ((partitioned));
+create table partitioned1
+ partition of partitioned for values in ('(1,2)');
+create table partitioned2
+ partition of partitioned for values in ('(2,4)');
+explain (costs off)
+select * from partitioned where partitioned = '(1,2)'::partitioned;
+\d+ partitioned1
+drop table partitioned;
+
+-- check that dependencies of partition columns are handled correctly
+create domain intdom1 as int;
+
+create table partitioned (
+ a intdom1,
+ b text
+) partition by range (a);
+
+alter table partitioned drop column a; -- fail
+
+drop domain intdom1; -- fail, requires cascade
+
+drop domain intdom1 cascade;
+
+table partitioned; -- gone
+
+-- likewise for columns used in partition expressions
+create domain intdom1 as int;
+
+create table partitioned (
+ a intdom1,
+ b text
+) partition by range (plusone(a));
+
+alter table partitioned drop column a; -- fail
+
+drop domain intdom1; -- fail, requires cascade
+
+drop domain intdom1 cascade;
+
+table partitioned; -- gone
+
+
+--
+-- Partitions
+--
+
+-- check partition bound syntax
+
+CREATE TABLE list_parted (
+ a int
+) PARTITION BY LIST (a);
+CREATE TABLE part_p1 PARTITION OF list_parted FOR VALUES IN ('1');
+CREATE TABLE part_p2 PARTITION OF list_parted FOR VALUES IN (2);
+CREATE TABLE part_p3 PARTITION OF list_parted FOR VALUES IN ((2+1));
+CREATE TABLE part_null PARTITION OF list_parted FOR VALUES IN (null);
+\d+ list_parted
+
+-- forbidden expressions for partition bound with list partitioned table
+CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (somename);
+CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (somename.somename);
+CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (a);
+CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (sum(a));
+CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (sum(somename));
+CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (sum(1));
+CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN ((select 1));
+CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (generate_series(4, 6));
+CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN ((1+1) collate "POSIX");
+
+-- syntax does not allow empty list of values for list partitions
+CREATE TABLE fail_part PARTITION OF list_parted FOR VALUES IN ();
+-- trying to specify range for list partitioned table
+CREATE TABLE fail_part PARTITION OF list_parted FOR VALUES FROM (1) TO (2);
+-- trying to specify modulus and remainder for list partitioned table
+CREATE TABLE fail_part PARTITION OF list_parted FOR VALUES WITH (MODULUS 10, REMAINDER 1);
+
+-- check default partition cannot be created more than once
+CREATE TABLE part_default PARTITION OF list_parted DEFAULT;
+CREATE TABLE fail_default_part PARTITION OF list_parted DEFAULT;
+
+-- specified literal can't be cast to the partition column data type
+CREATE TABLE bools (
+ a bool
+) PARTITION BY LIST (a);
+CREATE TABLE bools_true PARTITION OF bools FOR VALUES IN (1);
+DROP TABLE bools;
+
+-- specified literal can be cast, and the cast might not be immutable
+CREATE TABLE moneyp (
+ a money
+) PARTITION BY LIST (a);
+CREATE TABLE moneyp_10 PARTITION OF moneyp FOR VALUES IN (10);
+CREATE TABLE moneyp_11 PARTITION OF moneyp FOR VALUES IN ('11');
+CREATE TABLE moneyp_12 PARTITION OF moneyp FOR VALUES IN (to_char(12, '99')::int);
+DROP TABLE moneyp;
+
+-- cast is immutable
+CREATE TABLE bigintp (
+ a bigint
+) PARTITION BY LIST (a);
+CREATE TABLE bigintp_10 PARTITION OF bigintp FOR VALUES IN (10);
+-- fails due to overlap:
+CREATE TABLE bigintp_10_2 PARTITION OF bigintp FOR VALUES IN ('10');
+DROP TABLE bigintp;
+
+CREATE TABLE range_parted (
+ a date
+) PARTITION BY RANGE (a);
+
+-- forbidden expressions for partition bounds with range partitioned table
+CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted
+ FOR VALUES FROM (somename) TO ('2019-01-01');
+CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted
+ FOR VALUES FROM (somename.somename) TO ('2019-01-01');
+CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted
+ FOR VALUES FROM (a) TO ('2019-01-01');
+CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted
+ FOR VALUES FROM (max(a)) TO ('2019-01-01');
+CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted
+ FOR VALUES FROM (max(somename)) TO ('2019-01-01');
+CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted
+ FOR VALUES FROM (max('2019-02-01'::date)) TO ('2019-01-01');
+CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted
+ FOR VALUES FROM ((select 1)) TO ('2019-01-01');
+CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted
+ FOR VALUES FROM (generate_series(1, 3)) TO ('2019-01-01');
+
+-- trying to specify list for range partitioned table
+CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES IN ('a');
+-- trying to specify modulus and remainder for range partitioned table
+CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES WITH (MODULUS 10, REMAINDER 1);
+-- each of start and end bounds must have same number of values as the
+-- length of the partition key
+CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES FROM ('a', 1) TO ('z');
+CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES FROM ('a') TO ('z', 1);
+
+-- cannot specify null values in range bounds
+CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES FROM (null) TO (maxvalue);
+
+-- trying to specify modulus and remainder for range partitioned table
+CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES WITH (MODULUS 10, REMAINDER 1);
+
+-- check partition bound syntax for the hash partition
+CREATE TABLE hash_parted (
+ a int
+) PARTITION BY HASH (a);
+CREATE TABLE hpart_1 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 10, REMAINDER 0);
+CREATE TABLE hpart_2 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 50, REMAINDER 1);
+CREATE TABLE hpart_3 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 200, REMAINDER 2);
+CREATE TABLE hpart_4 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 10, REMAINDER 3);
+-- modulus 25 is factor of modulus of 50 but 10 is not a factor of 25.
+CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES WITH (MODULUS 25, REMAINDER 3);
+-- previous modulus 50 is factor of 150 but this modulus is not a factor of next modulus 200.
+CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES WITH (MODULUS 150, REMAINDER 3);
+-- overlapping remainders
+CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES WITH (MODULUS 100, REMAINDER 3);
+-- trying to specify range for the hash partitioned table
+CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES FROM ('a', 1) TO ('z');
+-- trying to specify list value for the hash partitioned table
+CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES IN (1000);
+
+-- trying to create default partition for the hash partitioned table
+CREATE TABLE fail_default_part PARTITION OF hash_parted DEFAULT;
+
+-- check if compatible with the specified parent
+
+-- cannot create as partition of a non-partitioned table
+CREATE TABLE unparted (
+ a int
+);
+CREATE TABLE fail_part PARTITION OF unparted FOR VALUES IN ('a');
+CREATE TABLE fail_part PARTITION OF unparted FOR VALUES WITH (MODULUS 2, REMAINDER 1);
+DROP TABLE unparted;
+
+-- cannot create a permanent rel as partition of a temp rel
+CREATE TEMP TABLE temp_parted (
+ a int
+) PARTITION BY LIST (a);
+CREATE TABLE fail_part PARTITION OF temp_parted FOR VALUES IN ('a');
+DROP TABLE temp_parted;
+
+-- check for partition bound overlap and other invalid specifications
+
+CREATE TABLE list_parted2 (
+ a varchar
+) PARTITION BY LIST (a);
+CREATE TABLE part_null_z PARTITION OF list_parted2 FOR VALUES IN (null, 'z');
+CREATE TABLE part_ab PARTITION OF list_parted2 FOR VALUES IN ('a', 'b');
+CREATE TABLE list_parted2_def PARTITION OF list_parted2 DEFAULT;
+
+CREATE TABLE fail_part PARTITION OF list_parted2 FOR VALUES IN (null);
+CREATE TABLE fail_part PARTITION OF list_parted2 FOR VALUES IN ('b', 'c');
+-- check default partition overlap
+INSERT INTO list_parted2 VALUES('X');
+CREATE TABLE fail_part PARTITION OF list_parted2 FOR VALUES IN ('W', 'X', 'Y');
+
+CREATE TABLE range_parted2 (
+ a int
+) PARTITION BY RANGE (a);
+
+-- trying to create range partition with empty range
+CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (1) TO (0);
+-- note that the range '[1, 1)' has no elements
+CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (1) TO (1);
+
+CREATE TABLE part0 PARTITION OF range_parted2 FOR VALUES FROM (minvalue) TO (1);
+CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (minvalue) TO (2);
+CREATE TABLE part1 PARTITION OF range_parted2 FOR VALUES FROM (1) TO (10);
+CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (-1) TO (1);
+CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (9) TO (maxvalue);
+CREATE TABLE part2 PARTITION OF range_parted2 FOR VALUES FROM (20) TO (30);
+CREATE TABLE part3 PARTITION OF range_parted2 FOR VALUES FROM (30) TO (40);
+CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (10) TO (30);
+CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (10) TO (50);
+
+-- Create a default partition for range partitioned table
+CREATE TABLE range2_default PARTITION OF range_parted2 DEFAULT;
+
+-- More than one default partition is not allowed, so this should give error
+CREATE TABLE fail_default_part PARTITION OF range_parted2 DEFAULT;
+
+-- Check if the range for default partitions overlap
+INSERT INTO range_parted2 VALUES (85);
+CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (80) TO (90);
+CREATE TABLE part4 PARTITION OF range_parted2 FOR VALUES FROM (90) TO (100);
+
+-- now check for multi-column range partition key
+CREATE TABLE range_parted3 (
+ a int,
+ b int
+) PARTITION BY RANGE (a, (b+1));
+
+CREATE TABLE part00 PARTITION OF range_parted3 FOR VALUES FROM (0, minvalue) TO (0, maxvalue);
+CREATE TABLE fail_part PARTITION OF range_parted3 FOR VALUES FROM (0, minvalue) TO (0, 1);
+
+CREATE TABLE part10 PARTITION OF range_parted3 FOR VALUES FROM (1, minvalue) TO (1, 1);
+CREATE TABLE part11 PARTITION OF range_parted3 FOR VALUES FROM (1, 1) TO (1, 10);
+CREATE TABLE part12 PARTITION OF range_parted3 FOR VALUES FROM (1, 10) TO (1, maxvalue);
+CREATE TABLE fail_part PARTITION OF range_parted3 FOR VALUES FROM (1, 10) TO (1, 20);
+CREATE TABLE range3_default PARTITION OF range_parted3 DEFAULT;
+
+-- cannot create a partition that says column b is allowed to range
+-- from -infinity to +infinity, while there exist partitions that have
+-- more specific ranges
+CREATE TABLE fail_part PARTITION OF range_parted3 FOR VALUES FROM (1, minvalue) TO (1, maxvalue);
+
+-- check for partition bound overlap and other invalid specifications for the hash partition
+CREATE TABLE hash_parted2 (
+ a varchar
+) PARTITION BY HASH (a);
+CREATE TABLE h2part_1 PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 4, REMAINDER 2);
+CREATE TABLE h2part_2 PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 8, REMAINDER 0);
+CREATE TABLE h2part_3 PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 8, REMAINDER 4);
+CREATE TABLE h2part_4 PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 8, REMAINDER 5);
+-- overlap with part_4
+CREATE TABLE fail_part PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
+-- modulus must be greater than zero
+CREATE TABLE fail_part PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 0, REMAINDER 1);
+-- remainder must be greater than or equal to zero and less than modulus
+CREATE TABLE fail_part PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 8, REMAINDER 8);
+
+-- check schema propagation from parent
+
+CREATE TABLE parted (
+ a text,
+ b int NOT NULL DEFAULT 0,
+ CONSTRAINT check_a CHECK (length(a) > 0)
+) PARTITION BY LIST (a);
+
+CREATE TABLE part_a PARTITION OF parted FOR VALUES IN ('a');
+
+-- only inherited attributes (never local ones)
+SELECT attname, attislocal, attinhcount FROM pg_attribute
+ WHERE attrelid = 'part_a'::regclass and attnum > 0
+ ORDER BY attnum;
+
+-- able to specify column default, column constraint, and table constraint
+
+-- first check the "column specified more than once" error
+CREATE TABLE part_b PARTITION OF parted (
+ b NOT NULL,
+ b DEFAULT 1,
+ b CHECK (b >= 0),
+ CONSTRAINT check_a CHECK (length(a) > 0)
+) FOR VALUES IN ('b');
+
+CREATE TABLE part_b PARTITION OF parted (
+ b NOT NULL DEFAULT 1,
+ CONSTRAINT check_a CHECK (length(a) > 0),
+ CONSTRAINT check_b CHECK (b >= 0)
+) FOR VALUES IN ('b');
+-- conislocal should be false for any merged constraints, true otherwise
+SELECT conislocal, coninhcount FROM pg_constraint WHERE conrelid = 'part_b'::regclass ORDER BY conislocal, coninhcount;
+
+-- Once check_b is added to the parent, it should be made non-local for part_b
+ALTER TABLE parted ADD CONSTRAINT check_b CHECK (b >= 0);
+SELECT conislocal, coninhcount FROM pg_constraint WHERE conrelid = 'part_b'::regclass;
+
+-- Neither check_a nor check_b are droppable from part_b
+ALTER TABLE part_b DROP CONSTRAINT check_a;
+ALTER TABLE part_b DROP CONSTRAINT check_b;
+
+-- And dropping it from parted should leave no trace of them on part_b, unlike
+-- traditional inheritance where they will be left behind, because they would
+-- be local constraints.
+ALTER TABLE parted DROP CONSTRAINT check_a, DROP CONSTRAINT check_b;
+SELECT conislocal, coninhcount FROM pg_constraint WHERE conrelid = 'part_b'::regclass;
+
+-- specify PARTITION BY for a partition
+CREATE TABLE fail_part_col_not_found PARTITION OF parted FOR VALUES IN ('c') PARTITION BY RANGE (c);
+CREATE TABLE part_c PARTITION OF parted (b WITH OPTIONS NOT NULL DEFAULT 0) FOR VALUES IN ('c') PARTITION BY RANGE ((b));
+
+-- create a level-2 partition
+CREATE TABLE part_c_1_10 PARTITION OF part_c FOR VALUES FROM (1) TO (10);
+
+-- check that NOT NULL and default value are inherited correctly
+create table parted_notnull_inh_test (a int default 1, b int not null default 0) partition by list (a);
+create table parted_notnull_inh_test1 partition of parted_notnull_inh_test (a not null, b default 1) for values in (1);
+insert into parted_notnull_inh_test (b) values (null);
+-- note that while b's default is overriden, a's default is preserved
+\d parted_notnull_inh_test1
+drop table parted_notnull_inh_test;
+
+-- check that collations are assigned in partition bound expressions
+create table parted_boolean_col (a bool, b text) partition by list(a);
+create table parted_boolean_less partition of parted_boolean_col
+ for values in ('foo' < 'bar');
+create table parted_boolean_greater partition of parted_boolean_col
+ for values in ('foo' > 'bar');
+drop table parted_boolean_col;
+
+-- check for a conflicting COLLATE clause
+create table parted_collate_must_match (a text collate "C", b text collate "C")
+ partition by range (a);
+-- on the partition key
+create table parted_collate_must_match1 partition of parted_collate_must_match
+ (a collate "POSIX") for values from ('a') to ('m');
+-- on another column
+create table parted_collate_must_match2 partition of parted_collate_must_match
+ (b collate "POSIX") for values from ('m') to ('z');
+drop table parted_collate_must_match;
+
+-- check that non-matching collations for partition bound
+-- expressions are coerced to the right collation
+
+create table test_part_coll_posix (a text) partition by range (a collate "POSIX");
+-- ok, collation is implicitly coerced
+create table test_part_coll partition of test_part_coll_posix for values from ('a' collate "C") to ('g');
+-- ok
+create table test_part_coll2 partition of test_part_coll_posix for values from ('g') to ('m');
+-- ok, collation is implicitly coerced
+create table test_part_coll_cast partition of test_part_coll_posix for values from (name 'm' collate "C") to ('s');
+-- ok; partition collation silently overrides the default collation of type 'name'
+create table test_part_coll_cast2 partition of test_part_coll_posix for values from (name 's') to ('z');
+
+drop table test_part_coll_posix;
+
+-- Partition bound in describe output
+\d+ part_b
+
+-- Both partition bound and partition key in describe output
+\d+ part_c
+
+-- a level-2 partition's constraint will include the parent's expressions
+\d+ part_c_1_10
+
+-- Show partition count in the parent's describe output
+-- Tempted to include \d+ output listing partitions with bound info but
+-- output could vary depending on the order in which partition oids are
+-- returned.
+\d parted
+\d hash_parted
+
+-- check that we get the expected partition constraints
+CREATE TABLE range_parted4 (a int, b int, c int) PARTITION BY RANGE (abs(a), abs(b), c);
+CREATE TABLE unbounded_range_part PARTITION OF range_parted4 FOR VALUES FROM (MINVALUE, MINVALUE, MINVALUE) TO (MAXVALUE, MAXVALUE, MAXVALUE);
+\d+ unbounded_range_part
+DROP TABLE unbounded_range_part;
+CREATE TABLE range_parted4_1 PARTITION OF range_parted4 FOR VALUES FROM (MINVALUE, MINVALUE, MINVALUE) TO (1, MAXVALUE, MAXVALUE);
+\d+ range_parted4_1
+CREATE TABLE range_parted4_2 PARTITION OF range_parted4 FOR VALUES FROM (3, 4, 5) TO (6, 7, MAXVALUE);
+\d+ range_parted4_2
+CREATE TABLE range_parted4_3 PARTITION OF range_parted4 FOR VALUES FROM (6, 8, MINVALUE) TO (9, MAXVALUE, MAXVALUE);
+\d+ range_parted4_3
+DROP TABLE range_parted4;
+
+-- user-defined operator class in partition key
+CREATE FUNCTION my_int4_sort(int4,int4) RETURNS int LANGUAGE sql
+ AS $$ SELECT CASE WHEN $1 = $2 THEN 0 WHEN $1 > $2 THEN 1 ELSE -1 END; $$;
+CREATE OPERATOR CLASS test_int4_ops FOR TYPE int4 USING btree AS
+ OPERATOR 1 < (int4,int4), OPERATOR 2 <= (int4,int4),
+ OPERATOR 3 = (int4,int4), OPERATOR 4 >= (int4,int4),
+ OPERATOR 5 > (int4,int4), FUNCTION 1 my_int4_sort(int4,int4);
+CREATE TABLE partkey_t (a int4) PARTITION BY RANGE (a test_int4_ops);
+CREATE TABLE partkey_t_1 PARTITION OF partkey_t FOR VALUES FROM (0) TO (1000);
+INSERT INTO partkey_t VALUES (100);
+INSERT INTO partkey_t VALUES (200);
+
+-- cleanup
+DROP TABLE parted, list_parted, range_parted, list_parted2, range_parted2, range_parted3;
+DROP TABLE partkey_t, hash_parted, hash_parted2;
+DROP OPERATOR CLASS test_int4_ops USING btree;
+DROP FUNCTION my_int4_sort(int4,int4);
+
+-- comments on partitioned tables columns
+CREATE TABLE parted_col_comment (a int, b text) PARTITION BY LIST (a);
+COMMENT ON TABLE parted_col_comment IS 'Am partitioned table';
+COMMENT ON COLUMN parted_col_comment.a IS 'Partition key';
+SELECT obj_description('parted_col_comment'::regclass);
+\d+ parted_col_comment
+DROP TABLE parted_col_comment;
+
+-- list partitioning on array type column
+CREATE TABLE arrlp (a int[]) PARTITION BY LIST (a);
+CREATE TABLE arrlp12 PARTITION OF arrlp FOR VALUES IN ('{1}', '{2}');
+\d+ arrlp12
+DROP TABLE arrlp;
+
+-- partition on boolean column
+create table boolspart (a bool) partition by list (a);
+create table boolspart_t partition of boolspart for values in (true);
+create table boolspart_f partition of boolspart for values in (false);
+\d+ boolspart
+drop table boolspart;
+
+-- partitions mixing temporary and permanent relations
+create table perm_parted (a int) partition by list (a);
+create temporary table temp_parted (a int) partition by list (a);
+create table perm_part partition of temp_parted default; -- error
+create temp table temp_part partition of perm_parted default; -- error
+create temp table temp_part partition of temp_parted default; -- ok
+drop table perm_parted cascade;
+drop table temp_parted cascade;
+
+-- check that adding partitions to a table while it is being used is prevented
+create table tab_part_create (a int) partition by list (a);
+create or replace function func_part_create() returns trigger
+ language plpgsql as $$
+ begin
+ execute 'create table tab_part_create_1 partition of tab_part_create for values in (1)';
+ return null;
+ end $$;
+create trigger trig_part_create before insert on tab_part_create
+ for each statement execute procedure func_part_create();
+insert into tab_part_create values (1);
+drop table tab_part_create;
+drop function func_part_create();
+
+-- test using a volatile expression as partition bound
+create table volatile_partbound_test (partkey timestamp) partition by range (partkey);
+create table volatile_partbound_test1 partition of volatile_partbound_test for values from (minvalue) to (current_timestamp);
+create table volatile_partbound_test2 partition of volatile_partbound_test for values from (current_timestamp) to (maxvalue);
+-- this should go into the partition volatile_partbound_test2
+insert into volatile_partbound_test values (current_timestamp);
+select tableoid::regclass from volatile_partbound_test;
+drop table volatile_partbound_test;
+
+-- test the case where a check constraint on default partition allows
+-- to avoid scanning it when adding a new partition
+create table defcheck (a int, b int) partition by list (b);
+create table defcheck_def (a int, c int, b int);
+alter table defcheck_def drop c;
+alter table defcheck attach partition defcheck_def default;
+alter table defcheck_def add check (b <= 0 and b is not null);
+create table defcheck_1 partition of defcheck for values in (1, null);
+
+-- test that complex default partition constraints are enforced correctly
+insert into defcheck_def values (0, 0);
+create table defcheck_0 partition of defcheck for values in (0);
+drop table defcheck;
+
+-- tests of column drop with partition tables and indexes using
+-- predicates and expressions.
+create table part_column_drop (
+ useless_1 int,
+ id int,
+ useless_2 int,
+ d int,
+ b int,
+ useless_3 int
+) partition by range (id);
+alter table part_column_drop drop column useless_1;
+alter table part_column_drop drop column useless_2;
+alter table part_column_drop drop column useless_3;
+create index part_column_drop_b_pred on part_column_drop(b) where b = 1;
+create index part_column_drop_b_expr on part_column_drop((b = 1));
+create index part_column_drop_d_pred on part_column_drop(d) where d = 2;
+create index part_column_drop_d_expr on part_column_drop((d = 2));
+create table part_column_drop_1_10 partition of
+ part_column_drop for values from (1) to (10);
+\d part_column_drop
+\d part_column_drop_1_10
+drop table part_column_drop;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/date.out b/ydb/library/yql/tests/postgresql/original/cases/date.out
new file mode 100644
index 0000000000..c8b0566ff4
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/date.out
@@ -0,0 +1,1497 @@
+--
+-- DATE
+--
+CREATE TABLE DATE_TBL (f1 date);
+INSERT INTO DATE_TBL VALUES ('1957-04-09');
+INSERT INTO DATE_TBL VALUES ('1957-06-13');
+INSERT INTO DATE_TBL VALUES ('1996-02-28');
+INSERT INTO DATE_TBL VALUES ('1996-02-29');
+INSERT INTO DATE_TBL VALUES ('1996-03-01');
+INSERT INTO DATE_TBL VALUES ('1996-03-02');
+INSERT INTO DATE_TBL VALUES ('1997-02-28');
+INSERT INTO DATE_TBL VALUES ('1997-02-29');
+ERROR: date/time field value out of range: "1997-02-29"
+LINE 1: INSERT INTO DATE_TBL VALUES ('1997-02-29');
+ ^
+INSERT INTO DATE_TBL VALUES ('1997-03-01');
+INSERT INTO DATE_TBL VALUES ('1997-03-02');
+INSERT INTO DATE_TBL VALUES ('2000-04-01');
+INSERT INTO DATE_TBL VALUES ('2000-04-02');
+INSERT INTO DATE_TBL VALUES ('2000-04-03');
+INSERT INTO DATE_TBL VALUES ('2038-04-08');
+INSERT INTO DATE_TBL VALUES ('2039-04-09');
+INSERT INTO DATE_TBL VALUES ('2040-04-10');
+INSERT INTO DATE_TBL VALUES ('2040-04-10 BC');
+SELECT f1 FROM DATE_TBL;
+ f1
+---------------
+ 04-09-1957
+ 06-13-1957
+ 02-28-1996
+ 02-29-1996
+ 03-01-1996
+ 03-02-1996
+ 02-28-1997
+ 03-01-1997
+ 03-02-1997
+ 04-01-2000
+ 04-02-2000
+ 04-03-2000
+ 04-08-2038
+ 04-09-2039
+ 04-10-2040
+ 04-10-2040 BC
+(16 rows)
+
+SELECT f1 FROM DATE_TBL WHERE f1 < '2000-01-01';
+ f1
+---------------
+ 04-09-1957
+ 06-13-1957
+ 02-28-1996
+ 02-29-1996
+ 03-01-1996
+ 03-02-1996
+ 02-28-1997
+ 03-01-1997
+ 03-02-1997
+ 04-10-2040 BC
+(10 rows)
+
+SELECT f1 FROM DATE_TBL
+ WHERE f1 BETWEEN '2000-01-01' AND '2001-01-01';
+ f1
+------------
+ 04-01-2000
+ 04-02-2000
+ 04-03-2000
+(3 rows)
+
+--
+-- Check all the documented input formats
+--
+SET datestyle TO iso; -- display results in ISO
+SET datestyle TO ymd;
+SELECT date 'January 8, 1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999-01-08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999-01-18';
+ date
+------------
+ 1999-01-18
+(1 row)
+
+SELECT date '1/8/1999';
+ERROR: date/time field value out of range: "1/8/1999"
+LINE 1: SELECT date '1/8/1999';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1/18/1999';
+ERROR: date/time field value out of range: "1/18/1999"
+LINE 1: SELECT date '1/18/1999';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '18/1/1999';
+ERROR: date/time field value out of range: "18/1/1999"
+LINE 1: SELECT date '18/1/1999';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '01/02/03';
+ date
+------------
+ 2001-02-03
+(1 row)
+
+SELECT date '19990108';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '990108';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999.008';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'J2451187';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'January 8, 99 BC';
+ERROR: date/time field value out of range: "January 8, 99 BC"
+LINE 1: SELECT date 'January 8, 99 BC';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '99-Jan-08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999-Jan-08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08-Jan-99';
+ERROR: date/time field value out of range: "08-Jan-99"
+LINE 1: SELECT date '08-Jan-99';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '08-Jan-1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'Jan-08-99';
+ERROR: date/time field value out of range: "Jan-08-99"
+LINE 1: SELECT date 'Jan-08-99';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date 'Jan-08-1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '99-08-Jan';
+ERROR: invalid input syntax for type date: "99-08-Jan"
+LINE 1: SELECT date '99-08-Jan';
+ ^
+SELECT date '1999-08-Jan';
+ERROR: invalid input syntax for type date: "1999-08-Jan"
+LINE 1: SELECT date '1999-08-Jan';
+ ^
+SELECT date '99 Jan 08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999 Jan 08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08 Jan 99';
+ERROR: date/time field value out of range: "08 Jan 99"
+LINE 1: SELECT date '08 Jan 99';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '08 Jan 1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'Jan 08 99';
+ERROR: date/time field value out of range: "Jan 08 99"
+LINE 1: SELECT date 'Jan 08 99';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date 'Jan 08 1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '99 08 Jan';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999 08 Jan';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '99-01-08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999-01-08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08-01-99';
+ERROR: date/time field value out of range: "08-01-99"
+LINE 1: SELECT date '08-01-99';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '08-01-1999';
+ERROR: date/time field value out of range: "08-01-1999"
+LINE 1: SELECT date '08-01-1999';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '01-08-99';
+ERROR: date/time field value out of range: "01-08-99"
+LINE 1: SELECT date '01-08-99';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '01-08-1999';
+ERROR: date/time field value out of range: "01-08-1999"
+LINE 1: SELECT date '01-08-1999';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '99-08-01';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '1999-08-01';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '99 01 08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999 01 08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08 01 99';
+ERROR: date/time field value out of range: "08 01 99"
+LINE 1: SELECT date '08 01 99';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '08 01 1999';
+ERROR: date/time field value out of range: "08 01 1999"
+LINE 1: SELECT date '08 01 1999';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '01 08 99';
+ERROR: date/time field value out of range: "01 08 99"
+LINE 1: SELECT date '01 08 99';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '01 08 1999';
+ERROR: date/time field value out of range: "01 08 1999"
+LINE 1: SELECT date '01 08 1999';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '99 08 01';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '1999 08 01';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SET datestyle TO dmy;
+SELECT date 'January 8, 1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999-01-08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999-01-18';
+ date
+------------
+ 1999-01-18
+(1 row)
+
+SELECT date '1/8/1999';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '1/18/1999';
+ERROR: date/time field value out of range: "1/18/1999"
+LINE 1: SELECT date '1/18/1999';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '18/1/1999';
+ date
+------------
+ 1999-01-18
+(1 row)
+
+SELECT date '01/02/03';
+ date
+------------
+ 2003-02-01
+(1 row)
+
+SELECT date '19990108';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '990108';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999.008';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'J2451187';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'January 8, 99 BC';
+ date
+---------------
+ 0099-01-08 BC
+(1 row)
+
+SELECT date '99-Jan-08';
+ERROR: date/time field value out of range: "99-Jan-08"
+LINE 1: SELECT date '99-Jan-08';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1999-Jan-08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08-Jan-99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08-Jan-1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'Jan-08-99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'Jan-08-1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '99-08-Jan';
+ERROR: invalid input syntax for type date: "99-08-Jan"
+LINE 1: SELECT date '99-08-Jan';
+ ^
+SELECT date '1999-08-Jan';
+ERROR: invalid input syntax for type date: "1999-08-Jan"
+LINE 1: SELECT date '1999-08-Jan';
+ ^
+SELECT date '99 Jan 08';
+ERROR: date/time field value out of range: "99 Jan 08"
+LINE 1: SELECT date '99 Jan 08';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1999 Jan 08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08 Jan 99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08 Jan 1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'Jan 08 99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'Jan 08 1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '99 08 Jan';
+ERROR: invalid input syntax for type date: "99 08 Jan"
+LINE 1: SELECT date '99 08 Jan';
+ ^
+SELECT date '1999 08 Jan';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '99-01-08';
+ERROR: date/time field value out of range: "99-01-08"
+LINE 1: SELECT date '99-01-08';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1999-01-08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08-01-99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08-01-1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '01-08-99';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '01-08-1999';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '99-08-01';
+ERROR: date/time field value out of range: "99-08-01"
+LINE 1: SELECT date '99-08-01';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1999-08-01';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '99 01 08';
+ERROR: date/time field value out of range: "99 01 08"
+LINE 1: SELECT date '99 01 08';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1999 01 08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08 01 99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08 01 1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '01 08 99';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '01 08 1999';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '99 08 01';
+ERROR: date/time field value out of range: "99 08 01"
+LINE 1: SELECT date '99 08 01';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1999 08 01';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SET datestyle TO mdy;
+SELECT date 'January 8, 1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999-01-08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999-01-18';
+ date
+------------
+ 1999-01-18
+(1 row)
+
+SELECT date '1/8/1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1/18/1999';
+ date
+------------
+ 1999-01-18
+(1 row)
+
+SELECT date '18/1/1999';
+ERROR: date/time field value out of range: "18/1/1999"
+LINE 1: SELECT date '18/1/1999';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '01/02/03';
+ date
+------------
+ 2003-01-02
+(1 row)
+
+SELECT date '19990108';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '990108';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '1999.008';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'J2451187';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'January 8, 99 BC';
+ date
+---------------
+ 0099-01-08 BC
+(1 row)
+
+SELECT date '99-Jan-08';
+ERROR: date/time field value out of range: "99-Jan-08"
+LINE 1: SELECT date '99-Jan-08';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1999-Jan-08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08-Jan-99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08-Jan-1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'Jan-08-99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'Jan-08-1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '99-08-Jan';
+ERROR: invalid input syntax for type date: "99-08-Jan"
+LINE 1: SELECT date '99-08-Jan';
+ ^
+SELECT date '1999-08-Jan';
+ERROR: invalid input syntax for type date: "1999-08-Jan"
+LINE 1: SELECT date '1999-08-Jan';
+ ^
+SELECT date '99 Jan 08';
+ERROR: invalid input syntax for type date: "99 Jan 08"
+LINE 1: SELECT date '99 Jan 08';
+ ^
+SELECT date '1999 Jan 08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08 Jan 99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08 Jan 1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'Jan 08 99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date 'Jan 08 1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '99 08 Jan';
+ERROR: invalid input syntax for type date: "99 08 Jan"
+LINE 1: SELECT date '99 08 Jan';
+ ^
+SELECT date '1999 08 Jan';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '99-01-08';
+ERROR: date/time field value out of range: "99-01-08"
+LINE 1: SELECT date '99-01-08';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1999-01-08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08-01-99';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '08-01-1999';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '01-08-99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '01-08-1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '99-08-01';
+ERROR: date/time field value out of range: "99-08-01"
+LINE 1: SELECT date '99-08-01';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1999-08-01';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '99 01 08';
+ERROR: date/time field value out of range: "99 01 08"
+LINE 1: SELECT date '99 01 08';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1999 01 08';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '08 01 99';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '08 01 1999';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+SELECT date '01 08 99';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '01 08 1999';
+ date
+------------
+ 1999-01-08
+(1 row)
+
+SELECT date '99 08 01';
+ERROR: date/time field value out of range: "99 08 01"
+LINE 1: SELECT date '99 08 01';
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+SELECT date '1999 08 01';
+ date
+------------
+ 1999-08-01
+(1 row)
+
+-- Check upper and lower limits of date range
+SELECT date '4714-11-24 BC';
+ date
+---------------
+ 4714-11-24 BC
+(1 row)
+
+SELECT date '4714-11-23 BC'; -- out of range
+ERROR: date out of range: "4714-11-23 BC"
+LINE 1: SELECT date '4714-11-23 BC';
+ ^
+SELECT date '5874897-12-31';
+ date
+---------------
+ 5874897-12-31
+(1 row)
+
+SELECT date '5874898-01-01'; -- out of range
+ERROR: date out of range: "5874898-01-01"
+LINE 1: SELECT date '5874898-01-01';
+ ^
+RESET datestyle;
+--
+-- Simple math
+-- Leave most of it for the horology tests
+--
+SELECT f1 - date '2000-01-01' AS "Days From 2K" FROM DATE_TBL;
+ Days From 2K
+--------------
+ -15607
+ -15542
+ -1403
+ -1402
+ -1401
+ -1400
+ -1037
+ -1036
+ -1035
+ 91
+ 92
+ 93
+ 13977
+ 14343
+ 14710
+ -1475115
+(16 rows)
+
+SELECT f1 - date 'epoch' AS "Days From Epoch" FROM DATE_TBL;
+ Days From Epoch
+-----------------
+ -4650
+ -4585
+ 9554
+ 9555
+ 9556
+ 9557
+ 9920
+ 9921
+ 9922
+ 11048
+ 11049
+ 11050
+ 24934
+ 25300
+ 25667
+ -1464158
+(16 rows)
+
+SELECT date 'yesterday' - date 'today' AS "One day";
+ One day
+---------
+ -1
+(1 row)
+
+SELECT date 'today' - date 'tomorrow' AS "One day";
+ One day
+---------
+ -1
+(1 row)
+
+SELECT date 'yesterday' - date 'tomorrow' AS "Two days";
+ Two days
+----------
+ -2
+(1 row)
+
+SELECT date 'tomorrow' - date 'today' AS "One day";
+ One day
+---------
+ 1
+(1 row)
+
+SELECT date 'today' - date 'yesterday' AS "One day";
+ One day
+---------
+ 1
+(1 row)
+
+SELECT date 'tomorrow' - date 'yesterday' AS "Two days";
+ Two days
+----------
+ 2
+(1 row)
+
+--
+-- test extract!
+--
+SELECT f1 as "date",
+ date_part('year', f1) AS year,
+ date_part('month', f1) AS month,
+ date_part('day', f1) AS day,
+ date_part('quarter', f1) AS quarter,
+ date_part('decade', f1) AS decade,
+ date_part('century', f1) AS century,
+ date_part('millennium', f1) AS millennium,
+ date_part('isoyear', f1) AS isoyear,
+ date_part('week', f1) AS week,
+ date_part('dow', f1) AS dow,
+ date_part('isodow', f1) AS isodow,
+ date_part('doy', f1) AS doy,
+ date_part('julian', f1) AS julian,
+ date_part('epoch', f1) AS epoch
+ FROM date_tbl;
+ date | year | month | day | quarter | decade | century | millennium | isoyear | week | dow | isodow | doy | julian | epoch
+---------------+-------+-------+-----+---------+--------+---------+------------+---------+------+-----+--------+-----+---------+---------------
+ 04-09-1957 | 1957 | 4 | 9 | 2 | 195 | 20 | 2 | 1957 | 15 | 2 | 2 | 99 | 2435938 | -401760000
+ 06-13-1957 | 1957 | 6 | 13 | 2 | 195 | 20 | 2 | 1957 | 24 | 4 | 4 | 164 | 2436003 | -396144000
+ 02-28-1996 | 1996 | 2 | 28 | 1 | 199 | 20 | 2 | 1996 | 9 | 3 | 3 | 59 | 2450142 | 825465600
+ 02-29-1996 | 1996 | 2 | 29 | 1 | 199 | 20 | 2 | 1996 | 9 | 4 | 4 | 60 | 2450143 | 825552000
+ 03-01-1996 | 1996 | 3 | 1 | 1 | 199 | 20 | 2 | 1996 | 9 | 5 | 5 | 61 | 2450144 | 825638400
+ 03-02-1996 | 1996 | 3 | 2 | 1 | 199 | 20 | 2 | 1996 | 9 | 6 | 6 | 62 | 2450145 | 825724800
+ 02-28-1997 | 1997 | 2 | 28 | 1 | 199 | 20 | 2 | 1997 | 9 | 5 | 5 | 59 | 2450508 | 857088000
+ 03-01-1997 | 1997 | 3 | 1 | 1 | 199 | 20 | 2 | 1997 | 9 | 6 | 6 | 60 | 2450509 | 857174400
+ 03-02-1997 | 1997 | 3 | 2 | 1 | 199 | 20 | 2 | 1997 | 9 | 0 | 7 | 61 | 2450510 | 857260800
+ 04-01-2000 | 2000 | 4 | 1 | 2 | 200 | 20 | 2 | 2000 | 13 | 6 | 6 | 92 | 2451636 | 954547200
+ 04-02-2000 | 2000 | 4 | 2 | 2 | 200 | 20 | 2 | 2000 | 13 | 0 | 7 | 93 | 2451637 | 954633600
+ 04-03-2000 | 2000 | 4 | 3 | 2 | 200 | 20 | 2 | 2000 | 14 | 1 | 1 | 94 | 2451638 | 954720000
+ 04-08-2038 | 2038 | 4 | 8 | 2 | 203 | 21 | 3 | 2038 | 14 | 4 | 4 | 98 | 2465522 | 2154297600
+ 04-09-2039 | 2039 | 4 | 9 | 2 | 203 | 21 | 3 | 2039 | 14 | 6 | 6 | 99 | 2465888 | 2185920000
+ 04-10-2040 | 2040 | 4 | 10 | 2 | 204 | 21 | 3 | 2040 | 15 | 2 | 2 | 101 | 2466255 | 2217628800
+ 04-10-2040 BC | -2040 | 4 | 10 | 2 | -204 | -21 | -3 | -2040 | 15 | 1 | 1 | 100 | 976430 | -126503251200
+(16 rows)
+
+--
+-- epoch
+--
+SELECT EXTRACT(EPOCH FROM DATE '1970-01-01'); -- 0
+ extract
+---------
+ 0
+(1 row)
+
+--
+-- century
+--
+SELECT EXTRACT(CENTURY FROM DATE '0101-12-31 BC'); -- -2
+ extract
+---------
+ -2
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM DATE '0100-12-31 BC'); -- -1
+ extract
+---------
+ -1
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM DATE '0001-12-31 BC'); -- -1
+ extract
+---------
+ -1
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM DATE '0001-01-01'); -- 1
+ extract
+---------
+ 1
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM DATE '0001-01-01 AD'); -- 1
+ extract
+---------
+ 1
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM DATE '1900-12-31'); -- 19
+ extract
+---------
+ 19
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM DATE '1901-01-01'); -- 20
+ extract
+---------
+ 20
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM DATE '2000-12-31'); -- 20
+ extract
+---------
+ 20
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM DATE '2001-01-01'); -- 21
+ extract
+---------
+ 21
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM CURRENT_DATE)>=21 AS True; -- true
+ true
+------
+ t
+(1 row)
+
+--
+-- millennium
+--
+SELECT EXTRACT(MILLENNIUM FROM DATE '0001-12-31 BC'); -- -1
+ extract
+---------
+ -1
+(1 row)
+
+SELECT EXTRACT(MILLENNIUM FROM DATE '0001-01-01 AD'); -- 1
+ extract
+---------
+ 1
+(1 row)
+
+SELECT EXTRACT(MILLENNIUM FROM DATE '1000-12-31'); -- 1
+ extract
+---------
+ 1
+(1 row)
+
+SELECT EXTRACT(MILLENNIUM FROM DATE '1001-01-01'); -- 2
+ extract
+---------
+ 2
+(1 row)
+
+SELECT EXTRACT(MILLENNIUM FROM DATE '2000-12-31'); -- 2
+ extract
+---------
+ 2
+(1 row)
+
+SELECT EXTRACT(MILLENNIUM FROM DATE '2001-01-01'); -- 3
+ extract
+---------
+ 3
+(1 row)
+
+-- next test to be fixed on the turn of the next millennium;-)
+SELECT EXTRACT(MILLENNIUM FROM CURRENT_DATE); -- 3
+ extract
+---------
+ 3
+(1 row)
+
+--
+-- decade
+--
+SELECT EXTRACT(DECADE FROM DATE '1994-12-25'); -- 199
+ extract
+---------
+ 199
+(1 row)
+
+SELECT EXTRACT(DECADE FROM DATE '0010-01-01'); -- 1
+ extract
+---------
+ 1
+(1 row)
+
+SELECT EXTRACT(DECADE FROM DATE '0009-12-31'); -- 0
+ extract
+---------
+ 0
+(1 row)
+
+SELECT EXTRACT(DECADE FROM DATE '0001-01-01 BC'); -- 0
+ extract
+---------
+ 0
+(1 row)
+
+SELECT EXTRACT(DECADE FROM DATE '0002-12-31 BC'); -- -1
+ extract
+---------
+ -1
+(1 row)
+
+SELECT EXTRACT(DECADE FROM DATE '0011-01-01 BC'); -- -1
+ extract
+---------
+ -1
+(1 row)
+
+SELECT EXTRACT(DECADE FROM DATE '0012-12-31 BC'); -- -2
+ extract
+---------
+ -2
+(1 row)
+
+--
+-- all possible fields
+--
+SELECT EXTRACT(MICROSECONDS FROM DATE '2020-08-11');
+ERROR: date units "microseconds" not supported
+SELECT EXTRACT(MILLISECONDS FROM DATE '2020-08-11');
+ERROR: date units "milliseconds" not supported
+SELECT EXTRACT(SECOND FROM DATE '2020-08-11');
+ERROR: date units "second" not supported
+SELECT EXTRACT(MINUTE FROM DATE '2020-08-11');
+ERROR: date units "minute" not supported
+SELECT EXTRACT(HOUR FROM DATE '2020-08-11');
+ERROR: date units "hour" not supported
+SELECT EXTRACT(DAY FROM DATE '2020-08-11');
+ extract
+---------
+ 11
+(1 row)
+
+SELECT EXTRACT(MONTH FROM DATE '2020-08-11');
+ extract
+---------
+ 8
+(1 row)
+
+SELECT EXTRACT(YEAR FROM DATE '2020-08-11');
+ extract
+---------
+ 2020
+(1 row)
+
+SELECT EXTRACT(YEAR FROM DATE '2020-08-11 BC');
+ extract
+---------
+ -2020
+(1 row)
+
+SELECT EXTRACT(DECADE FROM DATE '2020-08-11');
+ extract
+---------
+ 202
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM DATE '2020-08-11');
+ extract
+---------
+ 21
+(1 row)
+
+SELECT EXTRACT(MILLENNIUM FROM DATE '2020-08-11');
+ extract
+---------
+ 3
+(1 row)
+
+SELECT EXTRACT(ISOYEAR FROM DATE '2020-08-11');
+ extract
+---------
+ 2020
+(1 row)
+
+SELECT EXTRACT(ISOYEAR FROM DATE '2020-08-11 BC');
+ extract
+---------
+ -2020
+(1 row)
+
+SELECT EXTRACT(QUARTER FROM DATE '2020-08-11');
+ extract
+---------
+ 3
+(1 row)
+
+SELECT EXTRACT(WEEK FROM DATE '2020-08-11');
+ extract
+---------
+ 33
+(1 row)
+
+SELECT EXTRACT(DOW FROM DATE '2020-08-11');
+ extract
+---------
+ 2
+(1 row)
+
+SELECT EXTRACT(DOW FROM DATE '2020-08-16');
+ extract
+---------
+ 0
+(1 row)
+
+SELECT EXTRACT(ISODOW FROM DATE '2020-08-11');
+ extract
+---------
+ 2
+(1 row)
+
+SELECT EXTRACT(ISODOW FROM DATE '2020-08-16');
+ extract
+---------
+ 7
+(1 row)
+
+SELECT EXTRACT(DOY FROM DATE '2020-08-11');
+ extract
+---------
+ 224
+(1 row)
+
+SELECT EXTRACT(TIMEZONE FROM DATE '2020-08-11');
+ERROR: date units "timezone" not supported
+SELECT EXTRACT(TIMEZONE_M FROM DATE '2020-08-11');
+ERROR: date units "timezone_m" not supported
+SELECT EXTRACT(TIMEZONE_H FROM DATE '2020-08-11');
+ERROR: date units "timezone_h" not supported
+SELECT EXTRACT(EPOCH FROM DATE '2020-08-11');
+ extract
+------------
+ 1597104000
+(1 row)
+
+SELECT EXTRACT(JULIAN FROM DATE '2020-08-11');
+ extract
+---------
+ 2459073
+(1 row)
+
+--
+-- test trunc function!
+--
+SELECT DATE_TRUNC('MILLENNIUM', TIMESTAMP '1970-03-20 04:30:00.00000'); -- 1001
+ date_trunc
+--------------------------
+ Thu Jan 01 00:00:00 1001
+(1 row)
+
+SELECT DATE_TRUNC('MILLENNIUM', DATE '1970-03-20'); -- 1001-01-01
+ date_trunc
+------------------------------
+ Thu Jan 01 00:00:00 1001 PST
+(1 row)
+
+SELECT DATE_TRUNC('CENTURY', TIMESTAMP '1970-03-20 04:30:00.00000'); -- 1901
+ date_trunc
+--------------------------
+ Tue Jan 01 00:00:00 1901
+(1 row)
+
+SELECT DATE_TRUNC('CENTURY', DATE '1970-03-20'); -- 1901
+ date_trunc
+------------------------------
+ Tue Jan 01 00:00:00 1901 PST
+(1 row)
+
+SELECT DATE_TRUNC('CENTURY', DATE '2004-08-10'); -- 2001-01-01
+ date_trunc
+------------------------------
+ Mon Jan 01 00:00:00 2001 PST
+(1 row)
+
+SELECT DATE_TRUNC('CENTURY', DATE '0002-02-04'); -- 0001-01-01
+ date_trunc
+------------------------------
+ Mon Jan 01 00:00:00 0001 PST
+(1 row)
+
+SELECT DATE_TRUNC('CENTURY', DATE '0055-08-10 BC'); -- 0100-01-01 BC
+ date_trunc
+---------------------------------
+ Tue Jan 01 00:00:00 0100 PST BC
+(1 row)
+
+SELECT DATE_TRUNC('DECADE', DATE '1993-12-25'); -- 1990-01-01
+ date_trunc
+------------------------------
+ Mon Jan 01 00:00:00 1990 PST
+(1 row)
+
+SELECT DATE_TRUNC('DECADE', DATE '0004-12-25'); -- 0001-01-01 BC
+ date_trunc
+---------------------------------
+ Sat Jan 01 00:00:00 0001 PST BC
+(1 row)
+
+SELECT DATE_TRUNC('DECADE', DATE '0002-12-31 BC'); -- 0011-01-01 BC
+ date_trunc
+---------------------------------
+ Mon Jan 01 00:00:00 0011 PST BC
+(1 row)
+
+--
+-- test infinity
+--
+select 'infinity'::date, '-infinity'::date;
+ date | date
+----------+-----------
+ infinity | -infinity
+(1 row)
+
+select 'infinity'::date > 'today'::date as t;
+ t
+---
+ t
+(1 row)
+
+select '-infinity'::date < 'today'::date as t;
+ t
+---
+ t
+(1 row)
+
+select isfinite('infinity'::date), isfinite('-infinity'::date), isfinite('today'::date);
+ isfinite | isfinite | isfinite
+----------+----------+----------
+ f | f | t
+(1 row)
+
+--
+-- oscillating fields from non-finite date:
+--
+SELECT EXTRACT(DAY FROM DATE 'infinity'); -- NULL
+ extract
+---------
+
+(1 row)
+
+SELECT EXTRACT(DAY FROM DATE '-infinity'); -- NULL
+ extract
+---------
+
+(1 row)
+
+-- all supported fields
+SELECT EXTRACT(DAY FROM DATE 'infinity'); -- NULL
+ extract
+---------
+
+(1 row)
+
+SELECT EXTRACT(MONTH FROM DATE 'infinity'); -- NULL
+ extract
+---------
+
+(1 row)
+
+SELECT EXTRACT(QUARTER FROM DATE 'infinity'); -- NULL
+ extract
+---------
+
+(1 row)
+
+SELECT EXTRACT(WEEK FROM DATE 'infinity'); -- NULL
+ extract
+---------
+
+(1 row)
+
+SELECT EXTRACT(DOW FROM DATE 'infinity'); -- NULL
+ extract
+---------
+
+(1 row)
+
+SELECT EXTRACT(ISODOW FROM DATE 'infinity'); -- NULL
+ extract
+---------
+
+(1 row)
+
+SELECT EXTRACT(DOY FROM DATE 'infinity'); -- NULL
+ extract
+---------
+
+(1 row)
+
+--
+-- monotonic fields from non-finite date:
+--
+SELECT EXTRACT(EPOCH FROM DATE 'infinity'); -- Infinity
+ extract
+----------
+ Infinity
+(1 row)
+
+SELECT EXTRACT(EPOCH FROM DATE '-infinity'); -- -Infinity
+ extract
+-----------
+ -Infinity
+(1 row)
+
+-- all supported fields
+SELECT EXTRACT(YEAR FROM DATE 'infinity'); -- Infinity
+ extract
+----------
+ Infinity
+(1 row)
+
+SELECT EXTRACT(DECADE FROM DATE 'infinity'); -- Infinity
+ extract
+----------
+ Infinity
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM DATE 'infinity'); -- Infinity
+ extract
+----------
+ Infinity
+(1 row)
+
+SELECT EXTRACT(MILLENNIUM FROM DATE 'infinity'); -- Infinity
+ extract
+----------
+ Infinity
+(1 row)
+
+SELECT EXTRACT(JULIAN FROM DATE 'infinity'); -- Infinity
+ extract
+----------
+ Infinity
+(1 row)
+
+SELECT EXTRACT(ISOYEAR FROM DATE 'infinity'); -- Infinity
+ extract
+----------
+ Infinity
+(1 row)
+
+SELECT EXTRACT(EPOCH FROM DATE 'infinity'); -- Infinity
+ extract
+----------
+ Infinity
+(1 row)
+
+--
+-- wrong fields from non-finite date:
+--
+SELECT EXTRACT(MICROSEC FROM DATE 'infinity'); -- error
+ERROR: date units "microsec" not recognized
+-- test constructors
+select make_date(2013, 7, 15);
+ make_date
+------------
+ 07-15-2013
+(1 row)
+
+select make_date(-44, 3, 15);
+ make_date
+---------------
+ 03-15-0044 BC
+(1 row)
+
+select make_time(8, 20, 0.0);
+ make_time
+-----------
+ 08:20:00
+(1 row)
+
+-- should fail
+select make_date(0, 7, 15);
+ERROR: date field value out of range: 0-07-15
+select make_date(2013, 2, 30);
+ERROR: date field value out of range: 2013-02-30
+select make_date(2013, 13, 1);
+ERROR: date field value out of range: 2013-13-01
+select make_date(2013, 11, -1);
+ERROR: date field value out of range: 2013-11--1
+select make_time(10, 55, 100.1);
+ERROR: time field value out of range: 10:55:100.1
+select make_time(24, 0, 2.1);
+ERROR: time field value out of range: 24:00:2.1
diff --git a/ydb/library/yql/tests/postgresql/original/cases/date.sql b/ydb/library/yql/tests/postgresql/original/cases/date.sql
new file mode 100644
index 0000000000..8f7435b767
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/date.sql
@@ -0,0 +1,366 @@
+--
+-- DATE
+--
+
+CREATE TABLE DATE_TBL (f1 date);
+
+INSERT INTO DATE_TBL VALUES ('1957-04-09');
+INSERT INTO DATE_TBL VALUES ('1957-06-13');
+INSERT INTO DATE_TBL VALUES ('1996-02-28');
+INSERT INTO DATE_TBL VALUES ('1996-02-29');
+INSERT INTO DATE_TBL VALUES ('1996-03-01');
+INSERT INTO DATE_TBL VALUES ('1996-03-02');
+INSERT INTO DATE_TBL VALUES ('1997-02-28');
+INSERT INTO DATE_TBL VALUES ('1997-02-29');
+INSERT INTO DATE_TBL VALUES ('1997-03-01');
+INSERT INTO DATE_TBL VALUES ('1997-03-02');
+INSERT INTO DATE_TBL VALUES ('2000-04-01');
+INSERT INTO DATE_TBL VALUES ('2000-04-02');
+INSERT INTO DATE_TBL VALUES ('2000-04-03');
+INSERT INTO DATE_TBL VALUES ('2038-04-08');
+INSERT INTO DATE_TBL VALUES ('2039-04-09');
+INSERT INTO DATE_TBL VALUES ('2040-04-10');
+INSERT INTO DATE_TBL VALUES ('2040-04-10 BC');
+
+SELECT f1 FROM DATE_TBL;
+
+SELECT f1 FROM DATE_TBL WHERE f1 < '2000-01-01';
+
+SELECT f1 FROM DATE_TBL
+ WHERE f1 BETWEEN '2000-01-01' AND '2001-01-01';
+
+--
+-- Check all the documented input formats
+--
+SET datestyle TO iso; -- display results in ISO
+
+SET datestyle TO ymd;
+
+SELECT date 'January 8, 1999';
+SELECT date '1999-01-08';
+SELECT date '1999-01-18';
+SELECT date '1/8/1999';
+SELECT date '1/18/1999';
+SELECT date '18/1/1999';
+SELECT date '01/02/03';
+SELECT date '19990108';
+SELECT date '990108';
+SELECT date '1999.008';
+SELECT date 'J2451187';
+SELECT date 'January 8, 99 BC';
+
+SELECT date '99-Jan-08';
+SELECT date '1999-Jan-08';
+SELECT date '08-Jan-99';
+SELECT date '08-Jan-1999';
+SELECT date 'Jan-08-99';
+SELECT date 'Jan-08-1999';
+SELECT date '99-08-Jan';
+SELECT date '1999-08-Jan';
+
+SELECT date '99 Jan 08';
+SELECT date '1999 Jan 08';
+SELECT date '08 Jan 99';
+SELECT date '08 Jan 1999';
+SELECT date 'Jan 08 99';
+SELECT date 'Jan 08 1999';
+SELECT date '99 08 Jan';
+SELECT date '1999 08 Jan';
+
+SELECT date '99-01-08';
+SELECT date '1999-01-08';
+SELECT date '08-01-99';
+SELECT date '08-01-1999';
+SELECT date '01-08-99';
+SELECT date '01-08-1999';
+SELECT date '99-08-01';
+SELECT date '1999-08-01';
+
+SELECT date '99 01 08';
+SELECT date '1999 01 08';
+SELECT date '08 01 99';
+SELECT date '08 01 1999';
+SELECT date '01 08 99';
+SELECT date '01 08 1999';
+SELECT date '99 08 01';
+SELECT date '1999 08 01';
+
+SET datestyle TO dmy;
+
+SELECT date 'January 8, 1999';
+SELECT date '1999-01-08';
+SELECT date '1999-01-18';
+SELECT date '1/8/1999';
+SELECT date '1/18/1999';
+SELECT date '18/1/1999';
+SELECT date '01/02/03';
+SELECT date '19990108';
+SELECT date '990108';
+SELECT date '1999.008';
+SELECT date 'J2451187';
+SELECT date 'January 8, 99 BC';
+
+SELECT date '99-Jan-08';
+SELECT date '1999-Jan-08';
+SELECT date '08-Jan-99';
+SELECT date '08-Jan-1999';
+SELECT date 'Jan-08-99';
+SELECT date 'Jan-08-1999';
+SELECT date '99-08-Jan';
+SELECT date '1999-08-Jan';
+
+SELECT date '99 Jan 08';
+SELECT date '1999 Jan 08';
+SELECT date '08 Jan 99';
+SELECT date '08 Jan 1999';
+SELECT date 'Jan 08 99';
+SELECT date 'Jan 08 1999';
+SELECT date '99 08 Jan';
+SELECT date '1999 08 Jan';
+
+SELECT date '99-01-08';
+SELECT date '1999-01-08';
+SELECT date '08-01-99';
+SELECT date '08-01-1999';
+SELECT date '01-08-99';
+SELECT date '01-08-1999';
+SELECT date '99-08-01';
+SELECT date '1999-08-01';
+
+SELECT date '99 01 08';
+SELECT date '1999 01 08';
+SELECT date '08 01 99';
+SELECT date '08 01 1999';
+SELECT date '01 08 99';
+SELECT date '01 08 1999';
+SELECT date '99 08 01';
+SELECT date '1999 08 01';
+
+SET datestyle TO mdy;
+
+SELECT date 'January 8, 1999';
+SELECT date '1999-01-08';
+SELECT date '1999-01-18';
+SELECT date '1/8/1999';
+SELECT date '1/18/1999';
+SELECT date '18/1/1999';
+SELECT date '01/02/03';
+SELECT date '19990108';
+SELECT date '990108';
+SELECT date '1999.008';
+SELECT date 'J2451187';
+SELECT date 'January 8, 99 BC';
+
+SELECT date '99-Jan-08';
+SELECT date '1999-Jan-08';
+SELECT date '08-Jan-99';
+SELECT date '08-Jan-1999';
+SELECT date 'Jan-08-99';
+SELECT date 'Jan-08-1999';
+SELECT date '99-08-Jan';
+SELECT date '1999-08-Jan';
+
+SELECT date '99 Jan 08';
+SELECT date '1999 Jan 08';
+SELECT date '08 Jan 99';
+SELECT date '08 Jan 1999';
+SELECT date 'Jan 08 99';
+SELECT date 'Jan 08 1999';
+SELECT date '99 08 Jan';
+SELECT date '1999 08 Jan';
+
+SELECT date '99-01-08';
+SELECT date '1999-01-08';
+SELECT date '08-01-99';
+SELECT date '08-01-1999';
+SELECT date '01-08-99';
+SELECT date '01-08-1999';
+SELECT date '99-08-01';
+SELECT date '1999-08-01';
+
+SELECT date '99 01 08';
+SELECT date '1999 01 08';
+SELECT date '08 01 99';
+SELECT date '08 01 1999';
+SELECT date '01 08 99';
+SELECT date '01 08 1999';
+SELECT date '99 08 01';
+SELECT date '1999 08 01';
+
+-- Check upper and lower limits of date range
+SELECT date '4714-11-24 BC';
+SELECT date '4714-11-23 BC'; -- out of range
+SELECT date '5874897-12-31';
+SELECT date '5874898-01-01'; -- out of range
+
+RESET datestyle;
+
+--
+-- Simple math
+-- Leave most of it for the horology tests
+--
+
+SELECT f1 - date '2000-01-01' AS "Days From 2K" FROM DATE_TBL;
+
+SELECT f1 - date 'epoch' AS "Days From Epoch" FROM DATE_TBL;
+
+SELECT date 'yesterday' - date 'today' AS "One day";
+
+SELECT date 'today' - date 'tomorrow' AS "One day";
+
+SELECT date 'yesterday' - date 'tomorrow' AS "Two days";
+
+SELECT date 'tomorrow' - date 'today' AS "One day";
+
+SELECT date 'today' - date 'yesterday' AS "One day";
+
+SELECT date 'tomorrow' - date 'yesterday' AS "Two days";
+
+--
+-- test extract!
+--
+SELECT f1 as "date",
+ date_part('year', f1) AS year,
+ date_part('month', f1) AS month,
+ date_part('day', f1) AS day,
+ date_part('quarter', f1) AS quarter,
+ date_part('decade', f1) AS decade,
+ date_part('century', f1) AS century,
+ date_part('millennium', f1) AS millennium,
+ date_part('isoyear', f1) AS isoyear,
+ date_part('week', f1) AS week,
+ date_part('dow', f1) AS dow,
+ date_part('isodow', f1) AS isodow,
+ date_part('doy', f1) AS doy,
+ date_part('julian', f1) AS julian,
+ date_part('epoch', f1) AS epoch
+ FROM date_tbl;
+--
+-- epoch
+--
+SELECT EXTRACT(EPOCH FROM DATE '1970-01-01'); -- 0
+--
+-- century
+--
+SELECT EXTRACT(CENTURY FROM DATE '0101-12-31 BC'); -- -2
+SELECT EXTRACT(CENTURY FROM DATE '0100-12-31 BC'); -- -1
+SELECT EXTRACT(CENTURY FROM DATE '0001-12-31 BC'); -- -1
+SELECT EXTRACT(CENTURY FROM DATE '0001-01-01'); -- 1
+SELECT EXTRACT(CENTURY FROM DATE '0001-01-01 AD'); -- 1
+SELECT EXTRACT(CENTURY FROM DATE '1900-12-31'); -- 19
+SELECT EXTRACT(CENTURY FROM DATE '1901-01-01'); -- 20
+SELECT EXTRACT(CENTURY FROM DATE '2000-12-31'); -- 20
+SELECT EXTRACT(CENTURY FROM DATE '2001-01-01'); -- 21
+SELECT EXTRACT(CENTURY FROM CURRENT_DATE)>=21 AS True; -- true
+--
+-- millennium
+--
+SELECT EXTRACT(MILLENNIUM FROM DATE '0001-12-31 BC'); -- -1
+SELECT EXTRACT(MILLENNIUM FROM DATE '0001-01-01 AD'); -- 1
+SELECT EXTRACT(MILLENNIUM FROM DATE '1000-12-31'); -- 1
+SELECT EXTRACT(MILLENNIUM FROM DATE '1001-01-01'); -- 2
+SELECT EXTRACT(MILLENNIUM FROM DATE '2000-12-31'); -- 2
+SELECT EXTRACT(MILLENNIUM FROM DATE '2001-01-01'); -- 3
+-- next test to be fixed on the turn of the next millennium;-)
+SELECT EXTRACT(MILLENNIUM FROM CURRENT_DATE); -- 3
+--
+-- decade
+--
+SELECT EXTRACT(DECADE FROM DATE '1994-12-25'); -- 199
+SELECT EXTRACT(DECADE FROM DATE '0010-01-01'); -- 1
+SELECT EXTRACT(DECADE FROM DATE '0009-12-31'); -- 0
+SELECT EXTRACT(DECADE FROM DATE '0001-01-01 BC'); -- 0
+SELECT EXTRACT(DECADE FROM DATE '0002-12-31 BC'); -- -1
+SELECT EXTRACT(DECADE FROM DATE '0011-01-01 BC'); -- -1
+SELECT EXTRACT(DECADE FROM DATE '0012-12-31 BC'); -- -2
+--
+-- all possible fields
+--
+SELECT EXTRACT(MICROSECONDS FROM DATE '2020-08-11');
+SELECT EXTRACT(MILLISECONDS FROM DATE '2020-08-11');
+SELECT EXTRACT(SECOND FROM DATE '2020-08-11');
+SELECT EXTRACT(MINUTE FROM DATE '2020-08-11');
+SELECT EXTRACT(HOUR FROM DATE '2020-08-11');
+SELECT EXTRACT(DAY FROM DATE '2020-08-11');
+SELECT EXTRACT(MONTH FROM DATE '2020-08-11');
+SELECT EXTRACT(YEAR FROM DATE '2020-08-11');
+SELECT EXTRACT(YEAR FROM DATE '2020-08-11 BC');
+SELECT EXTRACT(DECADE FROM DATE '2020-08-11');
+SELECT EXTRACT(CENTURY FROM DATE '2020-08-11');
+SELECT EXTRACT(MILLENNIUM FROM DATE '2020-08-11');
+SELECT EXTRACT(ISOYEAR FROM DATE '2020-08-11');
+SELECT EXTRACT(ISOYEAR FROM DATE '2020-08-11 BC');
+SELECT EXTRACT(QUARTER FROM DATE '2020-08-11');
+SELECT EXTRACT(WEEK FROM DATE '2020-08-11');
+SELECT EXTRACT(DOW FROM DATE '2020-08-11');
+SELECT EXTRACT(DOW FROM DATE '2020-08-16');
+SELECT EXTRACT(ISODOW FROM DATE '2020-08-11');
+SELECT EXTRACT(ISODOW FROM DATE '2020-08-16');
+SELECT EXTRACT(DOY FROM DATE '2020-08-11');
+SELECT EXTRACT(TIMEZONE FROM DATE '2020-08-11');
+SELECT EXTRACT(TIMEZONE_M FROM DATE '2020-08-11');
+SELECT EXTRACT(TIMEZONE_H FROM DATE '2020-08-11');
+SELECT EXTRACT(EPOCH FROM DATE '2020-08-11');
+SELECT EXTRACT(JULIAN FROM DATE '2020-08-11');
+--
+-- test trunc function!
+--
+SELECT DATE_TRUNC('MILLENNIUM', TIMESTAMP '1970-03-20 04:30:00.00000'); -- 1001
+SELECT DATE_TRUNC('MILLENNIUM', DATE '1970-03-20'); -- 1001-01-01
+SELECT DATE_TRUNC('CENTURY', TIMESTAMP '1970-03-20 04:30:00.00000'); -- 1901
+SELECT DATE_TRUNC('CENTURY', DATE '1970-03-20'); -- 1901
+SELECT DATE_TRUNC('CENTURY', DATE '2004-08-10'); -- 2001-01-01
+SELECT DATE_TRUNC('CENTURY', DATE '0002-02-04'); -- 0001-01-01
+SELECT DATE_TRUNC('CENTURY', DATE '0055-08-10 BC'); -- 0100-01-01 BC
+SELECT DATE_TRUNC('DECADE', DATE '1993-12-25'); -- 1990-01-01
+SELECT DATE_TRUNC('DECADE', DATE '0004-12-25'); -- 0001-01-01 BC
+SELECT DATE_TRUNC('DECADE', DATE '0002-12-31 BC'); -- 0011-01-01 BC
+--
+-- test infinity
+--
+select 'infinity'::date, '-infinity'::date;
+select 'infinity'::date > 'today'::date as t;
+select '-infinity'::date < 'today'::date as t;
+select isfinite('infinity'::date), isfinite('-infinity'::date), isfinite('today'::date);
+--
+-- oscillating fields from non-finite date:
+--
+SELECT EXTRACT(DAY FROM DATE 'infinity'); -- NULL
+SELECT EXTRACT(DAY FROM DATE '-infinity'); -- NULL
+-- all supported fields
+SELECT EXTRACT(DAY FROM DATE 'infinity'); -- NULL
+SELECT EXTRACT(MONTH FROM DATE 'infinity'); -- NULL
+SELECT EXTRACT(QUARTER FROM DATE 'infinity'); -- NULL
+SELECT EXTRACT(WEEK FROM DATE 'infinity'); -- NULL
+SELECT EXTRACT(DOW FROM DATE 'infinity'); -- NULL
+SELECT EXTRACT(ISODOW FROM DATE 'infinity'); -- NULL
+SELECT EXTRACT(DOY FROM DATE 'infinity'); -- NULL
+--
+-- monotonic fields from non-finite date:
+--
+SELECT EXTRACT(EPOCH FROM DATE 'infinity'); -- Infinity
+SELECT EXTRACT(EPOCH FROM DATE '-infinity'); -- -Infinity
+-- all supported fields
+SELECT EXTRACT(YEAR FROM DATE 'infinity'); -- Infinity
+SELECT EXTRACT(DECADE FROM DATE 'infinity'); -- Infinity
+SELECT EXTRACT(CENTURY FROM DATE 'infinity'); -- Infinity
+SELECT EXTRACT(MILLENNIUM FROM DATE 'infinity'); -- Infinity
+SELECT EXTRACT(JULIAN FROM DATE 'infinity'); -- Infinity
+SELECT EXTRACT(ISOYEAR FROM DATE 'infinity'); -- Infinity
+SELECT EXTRACT(EPOCH FROM DATE 'infinity'); -- Infinity
+--
+-- wrong fields from non-finite date:
+--
+SELECT EXTRACT(MICROSEC FROM DATE 'infinity'); -- error
+
+-- test constructors
+select make_date(2013, 7, 15);
+select make_date(-44, 3, 15);
+select make_time(8, 20, 0.0);
+-- should fail
+select make_date(0, 7, 15);
+select make_date(2013, 2, 30);
+select make_date(2013, 13, 1);
+select make_date(2013, 11, -1);
+select make_time(10, 55, 100.1);
+select make_time(24, 0, 2.1);
diff --git a/ydb/library/yql/tests/postgresql/original/cases/dbsize.out b/ydb/library/yql/tests/postgresql/original/cases/dbsize.out
new file mode 100644
index 0000000000..29804aee8b
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/dbsize.out
@@ -0,0 +1,188 @@
+SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM
+ (VALUES (10::bigint), (1000::bigint), (1000000::bigint),
+ (1000000000::bigint), (1000000000000::bigint),
+ (1000000000000000::bigint)) x(size);
+ size | pg_size_pretty | pg_size_pretty
+------------------+----------------+----------------
+ 10 | 10 bytes | -10 bytes
+ 1000 | 1000 bytes | -1000 bytes
+ 1000000 | 977 kB | -977 kB
+ 1000000000 | 954 MB | -954 MB
+ 1000000000000 | 931 GB | -931 GB
+ 1000000000000000 | 909 TB | -909 TB
+(6 rows)
+
+SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM
+ (VALUES (10::numeric), (1000::numeric), (1000000::numeric),
+ (1000000000::numeric), (1000000000000::numeric),
+ (1000000000000000::numeric),
+ (10.5::numeric), (1000.5::numeric), (1000000.5::numeric),
+ (1000000000.5::numeric), (1000000000000.5::numeric),
+ (1000000000000000.5::numeric)) x(size);
+ size | pg_size_pretty | pg_size_pretty
+--------------------+----------------+----------------
+ 10 | 10 bytes | -10 bytes
+ 1000 | 1000 bytes | -1000 bytes
+ 1000000 | 977 kB | -977 kB
+ 1000000000 | 954 MB | -954 MB
+ 1000000000000 | 931 GB | -931 GB
+ 1000000000000000 | 909 TB | -909 TB
+ 10.5 | 10.5 bytes | -10.5 bytes
+ 1000.5 | 1000.5 bytes | -1000.5 bytes
+ 1000000.5 | 977 kB | -977 kB
+ 1000000000.5 | 954 MB | -954 MB
+ 1000000000000.5 | 931 GB | -931 GB
+ 1000000000000000.5 | 909 TB | -909 TB
+(12 rows)
+
+-- test where units change up
+SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM
+ (VALUES (10239::bigint), (10240::bigint),
+ (10485247::bigint), (10485248::bigint),
+ (10736893951::bigint), (10736893952::bigint),
+ (10994579406847::bigint), (10994579406848::bigint),
+ (11258449312612351::bigint), (11258449312612352::bigint)) x(size);
+ size | pg_size_pretty | pg_size_pretty
+-------------------+----------------+----------------
+ 10239 | 10239 bytes | -10239 bytes
+ 10240 | 10 kB | -10 kB
+ 10485247 | 10239 kB | -10239 kB
+ 10485248 | 10 MB | -10 MB
+ 10736893951 | 10239 MB | -10239 MB
+ 10736893952 | 10 GB | -10 GB
+ 10994579406847 | 10239 GB | -10239 GB
+ 10994579406848 | 10 TB | -10 TB
+ 11258449312612351 | 10239 TB | -10239 TB
+ 11258449312612352 | 10240 TB | -10240 TB
+(10 rows)
+
+SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM
+ (VALUES (10239::numeric), (10240::numeric),
+ (10485247::numeric), (10485248::numeric),
+ (10736893951::numeric), (10736893952::numeric),
+ (10994579406847::numeric), (10994579406848::numeric),
+ (11258449312612351::numeric), (11258449312612352::numeric)) x(size);
+ size | pg_size_pretty | pg_size_pretty
+-------------------+----------------+----------------
+ 10239 | 10239 bytes | -10239 bytes
+ 10240 | 10 kB | -10 kB
+ 10485247 | 10239 kB | -10239 kB
+ 10485248 | 10 MB | -10 MB
+ 10736893951 | 10239 MB | -10239 MB
+ 10736893952 | 10 GB | -10 GB
+ 10994579406847 | 10239 GB | -10239 GB
+ 10994579406848 | 10 TB | -10 TB
+ 11258449312612351 | 10239 TB | -10239 TB
+ 11258449312612352 | 10240 TB | -10240 TB
+(10 rows)
+
+-- pg_size_bytes() tests
+SELECT size, pg_size_bytes(size) FROM
+ (VALUES ('1'), ('123bytes'), ('1kB'), ('1MB'), (' 1 GB'), ('1.5 GB '),
+ ('1TB'), ('3000 TB'), ('1e6 MB')) x(size);
+ size | pg_size_bytes
+----------+------------------
+ 1 | 1
+ 123bytes | 123
+ 1kB | 1024
+ 1MB | 1048576
+ 1 GB | 1073741824
+ 1.5 GB | 1610612736
+ 1TB | 1099511627776
+ 3000 TB | 3298534883328000
+ 1e6 MB | 1048576000000
+(9 rows)
+
+-- case-insensitive units are supported
+SELECT size, pg_size_bytes(size) FROM
+ (VALUES ('1'), ('123bYteS'), ('1kb'), ('1mb'), (' 1 Gb'), ('1.5 gB '),
+ ('1tb'), ('3000 tb'), ('1e6 mb')) x(size);
+ size | pg_size_bytes
+----------+------------------
+ 1 | 1
+ 123bYteS | 123
+ 1kb | 1024
+ 1mb | 1048576
+ 1 Gb | 1073741824
+ 1.5 gB | 1610612736
+ 1tb | 1099511627776
+ 3000 tb | 3298534883328000
+ 1e6 mb | 1048576000000
+(9 rows)
+
+-- negative numbers are supported
+SELECT size, pg_size_bytes(size) FROM
+ (VALUES ('-1'), ('-123bytes'), ('-1kb'), ('-1mb'), (' -1 Gb'), ('-1.5 gB '),
+ ('-1tb'), ('-3000 TB'), ('-10e-1 MB')) x(size);
+ size | pg_size_bytes
+-----------+-------------------
+ -1 | -1
+ -123bytes | -123
+ -1kb | -1024
+ -1mb | -1048576
+ -1 Gb | -1073741824
+ -1.5 gB | -1610612736
+ -1tb | -1099511627776
+ -3000 TB | -3298534883328000
+ -10e-1 MB | -1048576
+(9 rows)
+
+-- different cases with allowed points
+SELECT size, pg_size_bytes(size) FROM
+ (VALUES ('-1.'), ('-1.kb'), ('-1. kb'), ('-0. gb'),
+ ('-.1'), ('-.1kb'), ('-.1 kb'), ('-.0 gb')) x(size);
+ size | pg_size_bytes
+--------+---------------
+ -1. | -1
+ -1.kb | -1024
+ -1. kb | -1024
+ -0. gb | 0
+ -.1 | 0
+ -.1kb | -102
+ -.1 kb | -102
+ -.0 gb | 0
+(8 rows)
+
+-- invalid inputs
+SELECT pg_size_bytes('1 AB');
+ERROR: invalid size: "1 AB"
+DETAIL: Invalid size unit: "AB".
+HINT: Valid units are "bytes", "kB", "MB", "GB", and "TB".
+SELECT pg_size_bytes('1 AB A');
+ERROR: invalid size: "1 AB A"
+DETAIL: Invalid size unit: "AB A".
+HINT: Valid units are "bytes", "kB", "MB", "GB", and "TB".
+SELECT pg_size_bytes('1 AB A ');
+ERROR: invalid size: "1 AB A "
+DETAIL: Invalid size unit: "AB A".
+HINT: Valid units are "bytes", "kB", "MB", "GB", and "TB".
+SELECT pg_size_bytes('9223372036854775807.9');
+ERROR: bigint out of range
+SELECT pg_size_bytes('1e100');
+ERROR: bigint out of range
+SELECT pg_size_bytes('1e1000000000000000000');
+ERROR: value overflows numeric format
+SELECT pg_size_bytes('1 byte'); -- the singular "byte" is not supported
+ERROR: invalid size: "1 byte"
+DETAIL: Invalid size unit: "byte".
+HINT: Valid units are "bytes", "kB", "MB", "GB", and "TB".
+SELECT pg_size_bytes('');
+ERROR: invalid size: ""
+SELECT pg_size_bytes('kb');
+ERROR: invalid size: "kb"
+SELECT pg_size_bytes('..');
+ERROR: invalid size: ".."
+SELECT pg_size_bytes('-.');
+ERROR: invalid size: "-."
+SELECT pg_size_bytes('-.kb');
+ERROR: invalid size: "-.kb"
+SELECT pg_size_bytes('-. kb');
+ERROR: invalid size: "-. kb"
+SELECT pg_size_bytes('.+912');
+ERROR: invalid size: ".+912"
+SELECT pg_size_bytes('+912+ kB');
+ERROR: invalid size: "+912+ kB"
+DETAIL: Invalid size unit: "+ kB".
+HINT: Valid units are "bytes", "kB", "MB", "GB", and "TB".
+SELECT pg_size_bytes('++123 kB');
+ERROR: invalid size: "++123 kB"
diff --git a/ydb/library/yql/tests/postgresql/original/cases/dbsize.sql b/ydb/library/yql/tests/postgresql/original/cases/dbsize.sql
new file mode 100644
index 0000000000..6a45c5eb1c
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/dbsize.sql
@@ -0,0 +1,67 @@
+SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM
+ (VALUES (10::bigint), (1000::bigint), (1000000::bigint),
+ (1000000000::bigint), (1000000000000::bigint),
+ (1000000000000000::bigint)) x(size);
+
+SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM
+ (VALUES (10::numeric), (1000::numeric), (1000000::numeric),
+ (1000000000::numeric), (1000000000000::numeric),
+ (1000000000000000::numeric),
+ (10.5::numeric), (1000.5::numeric), (1000000.5::numeric),
+ (1000000000.5::numeric), (1000000000000.5::numeric),
+ (1000000000000000.5::numeric)) x(size);
+
+-- test where units change up
+SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM
+ (VALUES (10239::bigint), (10240::bigint),
+ (10485247::bigint), (10485248::bigint),
+ (10736893951::bigint), (10736893952::bigint),
+ (10994579406847::bigint), (10994579406848::bigint),
+ (11258449312612351::bigint), (11258449312612352::bigint)) x(size);
+
+SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM
+ (VALUES (10239::numeric), (10240::numeric),
+ (10485247::numeric), (10485248::numeric),
+ (10736893951::numeric), (10736893952::numeric),
+ (10994579406847::numeric), (10994579406848::numeric),
+ (11258449312612351::numeric), (11258449312612352::numeric)) x(size);
+
+-- pg_size_bytes() tests
+SELECT size, pg_size_bytes(size) FROM
+ (VALUES ('1'), ('123bytes'), ('1kB'), ('1MB'), (' 1 GB'), ('1.5 GB '),
+ ('1TB'), ('3000 TB'), ('1e6 MB')) x(size);
+
+-- case-insensitive units are supported
+SELECT size, pg_size_bytes(size) FROM
+ (VALUES ('1'), ('123bYteS'), ('1kb'), ('1mb'), (' 1 Gb'), ('1.5 gB '),
+ ('1tb'), ('3000 tb'), ('1e6 mb')) x(size);
+
+-- negative numbers are supported
+SELECT size, pg_size_bytes(size) FROM
+ (VALUES ('-1'), ('-123bytes'), ('-1kb'), ('-1mb'), (' -1 Gb'), ('-1.5 gB '),
+ ('-1tb'), ('-3000 TB'), ('-10e-1 MB')) x(size);
+
+-- different cases with allowed points
+SELECT size, pg_size_bytes(size) FROM
+ (VALUES ('-1.'), ('-1.kb'), ('-1. kb'), ('-0. gb'),
+ ('-.1'), ('-.1kb'), ('-.1 kb'), ('-.0 gb')) x(size);
+
+-- invalid inputs
+SELECT pg_size_bytes('1 AB');
+SELECT pg_size_bytes('1 AB A');
+SELECT pg_size_bytes('1 AB A ');
+SELECT pg_size_bytes('9223372036854775807.9');
+SELECT pg_size_bytes('1e100');
+SELECT pg_size_bytes('1e1000000000000000000');
+SELECT pg_size_bytes('1 byte'); -- the singular "byte" is not supported
+SELECT pg_size_bytes('');
+
+SELECT pg_size_bytes('kb');
+SELECT pg_size_bytes('..');
+SELECT pg_size_bytes('-.');
+SELECT pg_size_bytes('-.kb');
+SELECT pg_size_bytes('-. kb');
+
+SELECT pg_size_bytes('.+912');
+SELECT pg_size_bytes('+912+ kB');
+SELECT pg_size_bytes('++123 kB');
diff --git a/ydb/library/yql/tests/postgresql/original/cases/delete.out b/ydb/library/yql/tests/postgresql/original/cases/delete.out
new file mode 100644
index 0000000000..e7eb3285f8
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/delete.out
@@ -0,0 +1,33 @@
+CREATE TABLE delete_test (
+ id SERIAL PRIMARY KEY,
+ a INT,
+ b text
+);
+INSERT INTO delete_test (a) VALUES (10);
+INSERT INTO delete_test (a, b) VALUES (50, repeat('x', 10000));
+INSERT INTO delete_test (a) VALUES (100);
+-- allow an alias to be specified for DELETE's target table
+DELETE FROM delete_test AS dt WHERE dt.a > 75;
+-- if an alias is specified, don't allow the original table name
+-- to be referenced
+DELETE FROM delete_test dt WHERE delete_test.a > 25;
+ERROR: invalid reference to FROM-clause entry for table "delete_test"
+LINE 1: DELETE FROM delete_test dt WHERE delete_test.a > 25;
+ ^
+HINT: Perhaps you meant to reference the table alias "dt".
+SELECT id, a, char_length(b) FROM delete_test;
+ id | a | char_length
+----+----+-------------
+ 1 | 10 |
+ 2 | 50 | 10000
+(2 rows)
+
+-- delete a row with a TOASTed value
+DELETE FROM delete_test WHERE a > 25;
+SELECT id, a, char_length(b) FROM delete_test;
+ id | a | char_length
+----+----+-------------
+ 1 | 10 |
+(1 row)
+
+DROP TABLE delete_test;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/delete.sql b/ydb/library/yql/tests/postgresql/original/cases/delete.sql
new file mode 100644
index 0000000000..d8cb99e93c
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/delete.sql
@@ -0,0 +1,25 @@
+CREATE TABLE delete_test (
+ id SERIAL PRIMARY KEY,
+ a INT,
+ b text
+);
+
+INSERT INTO delete_test (a) VALUES (10);
+INSERT INTO delete_test (a, b) VALUES (50, repeat('x', 10000));
+INSERT INTO delete_test (a) VALUES (100);
+
+-- allow an alias to be specified for DELETE's target table
+DELETE FROM delete_test AS dt WHERE dt.a > 75;
+
+-- if an alias is specified, don't allow the original table name
+-- to be referenced
+DELETE FROM delete_test dt WHERE delete_test.a > 25;
+
+SELECT id, a, char_length(b) FROM delete_test;
+
+-- delete a row with a TOASTed value
+DELETE FROM delete_test WHERE a > 25;
+
+SELECT id, a, char_length(b) FROM delete_test;
+
+DROP TABLE delete_test;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/expressions.out b/ydb/library/yql/tests/postgresql/original/cases/expressions.out
new file mode 100644
index 0000000000..9699d86cc6
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/expressions.out
@@ -0,0 +1,381 @@
+--
+-- expression evaluation tests that don't fit into a more specific file
+--
+--
+-- Tests for SQLVAlueFunction
+--
+-- current_date (always matches because of transactional behaviour)
+SELECT date(now())::text = current_date::text;
+ ?column?
+----------
+ t
+(1 row)
+
+-- current_time / localtime
+SELECT now()::timetz::text = current_time::text;
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT now()::timetz(4)::text = current_time(4)::text;
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT now()::time::text = localtime::text;
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT now()::time(3)::text = localtime(3)::text;
+ ?column?
+----------
+ t
+(1 row)
+
+-- current_timestamp / localtimestamp (always matches because of transactional behaviour)
+SELECT current_timestamp = NOW();
+ ?column?
+----------
+ t
+(1 row)
+
+-- precision
+SELECT length(current_timestamp::text) >= length(current_timestamp(0)::text);
+ ?column?
+----------
+ t
+(1 row)
+
+-- localtimestamp
+SELECT now()::timestamp::text = localtimestamp::text;
+ ?column?
+----------
+ t
+(1 row)
+
+-- current_role/user/user is tested in rolnames.sql
+-- current database / catalog
+SELECT current_catalog = current_database();
+ ?column?
+----------
+ t
+(1 row)
+
+-- current_schema
+SELECT current_schema;
+ current_schema
+----------------
+ public
+(1 row)
+
+SET search_path = 'notme';
+SELECT current_schema;
+ current_schema
+----------------
+
+(1 row)
+
+SET search_path = 'pg_catalog';
+SELECT current_schema;
+ current_schema
+----------------
+ pg_catalog
+(1 row)
+
+RESET search_path;
+--
+-- Tests for BETWEEN
+--
+explain (costs off)
+select count(*) from date_tbl
+ where f1 between '1997-01-01' and '1998-01-01';
+ QUERY PLAN
+-----------------------------------------------------------------------------
+ Aggregate
+ -> Seq Scan on date_tbl
+ Filter: ((f1 >= '01-01-1997'::date) AND (f1 <= '01-01-1998'::date))
+(3 rows)
+
+select count(*) from date_tbl
+ where f1 between '1997-01-01' and '1998-01-01';
+ count
+-------
+ 3
+(1 row)
+
+explain (costs off)
+select count(*) from date_tbl
+ where f1 not between '1997-01-01' and '1998-01-01';
+ QUERY PLAN
+--------------------------------------------------------------------------
+ Aggregate
+ -> Seq Scan on date_tbl
+ Filter: ((f1 < '01-01-1997'::date) OR (f1 > '01-01-1998'::date))
+(3 rows)
+
+select count(*) from date_tbl
+ where f1 not between '1997-01-01' and '1998-01-01';
+ count
+-------
+ 13
+(1 row)
+
+explain (costs off)
+select count(*) from date_tbl
+ where f1 between symmetric '1997-01-01' and '1998-01-01';
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------
+ Aggregate
+ -> Seq Scan on date_tbl
+ Filter: (((f1 >= '01-01-1997'::date) AND (f1 <= '01-01-1998'::date)) OR ((f1 >= '01-01-1998'::date) AND (f1 <= '01-01-1997'::date)))
+(3 rows)
+
+select count(*) from date_tbl
+ where f1 between symmetric '1997-01-01' and '1998-01-01';
+ count
+-------
+ 3
+(1 row)
+
+explain (costs off)
+select count(*) from date_tbl
+ where f1 not between symmetric '1997-01-01' and '1998-01-01';
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------
+ Aggregate
+ -> Seq Scan on date_tbl
+ Filter: (((f1 < '01-01-1997'::date) OR (f1 > '01-01-1998'::date)) AND ((f1 < '01-01-1998'::date) OR (f1 > '01-01-1997'::date)))
+(3 rows)
+
+select count(*) from date_tbl
+ where f1 not between symmetric '1997-01-01' and '1998-01-01';
+ count
+-------
+ 13
+(1 row)
+
+--
+-- Test parsing of a no-op cast to a type with unspecified typmod
+--
+begin;
+create table numeric_tbl (f1 numeric(18,3), f2 numeric);
+create view numeric_view as
+ select
+ f1, f1::numeric(16,4) as f1164, f1::numeric as f1n,
+ f2, f2::numeric(16,4) as f2164, f2::numeric as f2n
+ from numeric_tbl;
+\d+ numeric_view
+ View "public.numeric_view"
+ Column | Type | Collation | Nullable | Default | Storage | Description
+--------+---------------+-----------+----------+---------+---------+-------------
+ f1 | numeric(18,3) | | | | main |
+ f1164 | numeric(16,4) | | | | main |
+ f1n | numeric | | | | main |
+ f2 | numeric | | | | main |
+ f2164 | numeric(16,4) | | | | main |
+ f2n | numeric | | | | main |
+View definition:
+ SELECT numeric_tbl.f1,
+ numeric_tbl.f1::numeric(16,4) AS f1164,
+ numeric_tbl.f1::numeric AS f1n,
+ numeric_tbl.f2,
+ numeric_tbl.f2::numeric(16,4) AS f2164,
+ numeric_tbl.f2 AS f2n
+ FROM numeric_tbl;
+
+explain (verbose, costs off) select * from numeric_view;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------
+ Seq Scan on public.numeric_tbl
+ Output: numeric_tbl.f1, (numeric_tbl.f1)::numeric(16,4), (numeric_tbl.f1)::numeric, numeric_tbl.f2, (numeric_tbl.f2)::numeric(16,4), numeric_tbl.f2
+(2 rows)
+
+-- bpchar, lacking planner support for its length coercion function,
+-- could behave differently
+create table bpchar_tbl (f1 character(16) unique, f2 bpchar);
+create view bpchar_view as
+ select
+ f1, f1::character(14) as f114, f1::bpchar as f1n,
+ f2, f2::character(14) as f214, f2::bpchar as f2n
+ from bpchar_tbl;
+\d+ bpchar_view
+ View "public.bpchar_view"
+ Column | Type | Collation | Nullable | Default | Storage | Description
+--------+---------------+-----------+----------+---------+----------+-------------
+ f1 | character(16) | | | | extended |
+ f114 | character(14) | | | | extended |
+ f1n | bpchar | | | | extended |
+ f2 | bpchar | | | | extended |
+ f214 | character(14) | | | | extended |
+ f2n | bpchar | | | | extended |
+View definition:
+ SELECT bpchar_tbl.f1,
+ bpchar_tbl.f1::character(14) AS f114,
+ bpchar_tbl.f1::bpchar AS f1n,
+ bpchar_tbl.f2,
+ bpchar_tbl.f2::character(14) AS f214,
+ bpchar_tbl.f2 AS f2n
+ FROM bpchar_tbl;
+
+explain (verbose, costs off) select * from bpchar_view
+ where f1::bpchar = 'foo';
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------
+ Index Scan using bpchar_tbl_f1_key on public.bpchar_tbl
+ Output: bpchar_tbl.f1, (bpchar_tbl.f1)::character(14), (bpchar_tbl.f1)::bpchar, bpchar_tbl.f2, (bpchar_tbl.f2)::character(14), bpchar_tbl.f2
+ Index Cond: ((bpchar_tbl.f1)::bpchar = 'foo'::bpchar)
+(3 rows)
+
+rollback;
+--
+-- Ordinarily, IN/NOT IN can be converted to a ScalarArrayOpExpr
+-- with a suitably-chosen array type.
+--
+explain (verbose, costs off)
+select random() IN (1, 4, 8.0);
+ QUERY PLAN
+------------------------------------------------------------
+ Result
+ Output: (random() = ANY ('{1,4,8}'::double precision[]))
+(2 rows)
+
+explain (verbose, costs off)
+select random()::int IN (1, 4, 8.0);
+ QUERY PLAN
+---------------------------------------------------------------------------
+ Result
+ Output: (((random())::integer)::numeric = ANY ('{1,4,8.0}'::numeric[]))
+(2 rows)
+
+-- However, if there's not a common supertype for the IN elements,
+-- we should instead try to produce "x = v1 OR x = v2 OR ...".
+-- In most cases that'll fail for lack of all the requisite = operators,
+-- but it can succeed sometimes. So this should complain about lack of
+-- an = operator, not about cast failure.
+select '(0,0)'::point in ('(0,0,0,0)'::box, point(0,0));
+ERROR: operator does not exist: point = box
+LINE 1: select '(0,0)'::point in ('(0,0,0,0)'::box, point(0,0));
+ ^
+HINT: No operator matches the given name and argument types. You might need to add explicit type casts.
+--
+-- Tests for ScalarArrayOpExpr with a hashfn
+--
+-- create a stable function so that the tests below are not
+-- evaluated using the planner's constant folding.
+begin;
+create function return_int_input(int) returns int as $$
+begin
+ return $1;
+end;
+$$ language plpgsql stable;
+create function return_text_input(text) returns text as $$
+begin
+ return $1;
+end;
+$$ language plpgsql stable;
+select return_int_input(1) in (10, 9, 2, 8, 3, 7, 4, 6, 5, 1);
+ ?column?
+----------
+ t
+(1 row)
+
+select return_int_input(1) in (10, 9, 2, 8, 3, 7, 4, 6, 5, null);
+ ?column?
+----------
+
+(1 row)
+
+select return_int_input(1) in (null, null, null, null, null, null, null, null, null, null, null);
+ ?column?
+----------
+
+(1 row)
+
+select return_int_input(1) in (10, 9, 2, 8, 3, 7, 4, 6, 5, 1, null);
+ ?column?
+----------
+ t
+(1 row)
+
+select return_int_input(null::int) in (10, 9, 2, 8, 3, 7, 4, 6, 5, 1);
+ ?column?
+----------
+
+(1 row)
+
+select return_int_input(null::int) in (10, 9, 2, 8, 3, 7, 4, 6, 5, null);
+ ?column?
+----------
+
+(1 row)
+
+select return_text_input('a') in ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j');
+ ?column?
+----------
+ t
+(1 row)
+
+rollback;
+-- Test with non-strict equality function.
+-- We need to create our own type for this.
+begin;
+create type myint;
+create function myintin(cstring) returns myint strict immutable language
+ internal as 'int4in';
+NOTICE: return type myint is only a shell
+create function myintout(myint) returns cstring strict immutable language
+ internal as 'int4out';
+NOTICE: argument type myint is only a shell
+create function myinthash(myint) returns integer strict immutable language
+ internal as 'hashint4';
+NOTICE: argument type myint is only a shell
+create type myint (input = myintin, output = myintout, like = int4);
+create cast (int4 as myint) without function;
+create cast (myint as int4) without function;
+create function myinteq(myint, myint) returns bool as $$
+begin
+ if $1 is null and $2 is null then
+ return true;
+ else
+ return $1::int = $2::int;
+ end if;
+end;
+$$ language plpgsql immutable;
+create operator = (
+ leftarg = myint,
+ rightarg = myint,
+ commutator = =,
+ negator = <>,
+ procedure = myinteq,
+ restrict = eqsel,
+ join = eqjoinsel,
+ merges
+);
+create operator class myint_ops
+default for type myint using hash as
+ operator 1 = (myint, myint),
+ function 1 myinthash(myint);
+create table inttest (a myint);
+insert into inttest values(1::myint),(null);
+-- try an array with enough elements to cause hashing
+select * from inttest where a in (1::myint,2::myint,3::myint,4::myint,5::myint,6::myint,7::myint,8::myint,9::myint, null);
+ a
+---
+ 1
+
+(2 rows)
+
+-- ensure the result matched with the non-hashed version. We simply remove
+-- some array elements so that we don't reach the hashing threshold.
+select * from inttest where a in (1::myint,2::myint,3::myint,4::myint,5::myint, null);
+ a
+---
+ 1
+
+(2 rows)
+
+rollback;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/expressions.sql b/ydb/library/yql/tests/postgresql/original/cases/expressions.sql
new file mode 100644
index 0000000000..f9f9f97efa
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/expressions.sql
@@ -0,0 +1,205 @@
+--
+-- expression evaluation tests that don't fit into a more specific file
+--
+
+--
+-- Tests for SQLVAlueFunction
+--
+
+
+-- current_date (always matches because of transactional behaviour)
+SELECT date(now())::text = current_date::text;
+
+
+-- current_time / localtime
+SELECT now()::timetz::text = current_time::text;
+SELECT now()::timetz(4)::text = current_time(4)::text;
+SELECT now()::time::text = localtime::text;
+SELECT now()::time(3)::text = localtime(3)::text;
+
+-- current_timestamp / localtimestamp (always matches because of transactional behaviour)
+SELECT current_timestamp = NOW();
+-- precision
+SELECT length(current_timestamp::text) >= length(current_timestamp(0)::text);
+-- localtimestamp
+SELECT now()::timestamp::text = localtimestamp::text;
+
+-- current_role/user/user is tested in rolnames.sql
+
+-- current database / catalog
+SELECT current_catalog = current_database();
+
+-- current_schema
+SELECT current_schema;
+SET search_path = 'notme';
+SELECT current_schema;
+SET search_path = 'pg_catalog';
+SELECT current_schema;
+RESET search_path;
+
+
+--
+-- Tests for BETWEEN
+--
+
+explain (costs off)
+select count(*) from date_tbl
+ where f1 between '1997-01-01' and '1998-01-01';
+select count(*) from date_tbl
+ where f1 between '1997-01-01' and '1998-01-01';
+
+explain (costs off)
+select count(*) from date_tbl
+ where f1 not between '1997-01-01' and '1998-01-01';
+select count(*) from date_tbl
+ where f1 not between '1997-01-01' and '1998-01-01';
+
+explain (costs off)
+select count(*) from date_tbl
+ where f1 between symmetric '1997-01-01' and '1998-01-01';
+select count(*) from date_tbl
+ where f1 between symmetric '1997-01-01' and '1998-01-01';
+
+explain (costs off)
+select count(*) from date_tbl
+ where f1 not between symmetric '1997-01-01' and '1998-01-01';
+select count(*) from date_tbl
+ where f1 not between symmetric '1997-01-01' and '1998-01-01';
+
+
+--
+-- Test parsing of a no-op cast to a type with unspecified typmod
+--
+begin;
+
+create table numeric_tbl (f1 numeric(18,3), f2 numeric);
+
+create view numeric_view as
+ select
+ f1, f1::numeric(16,4) as f1164, f1::numeric as f1n,
+ f2, f2::numeric(16,4) as f2164, f2::numeric as f2n
+ from numeric_tbl;
+
+\d+ numeric_view
+
+explain (verbose, costs off) select * from numeric_view;
+
+-- bpchar, lacking planner support for its length coercion function,
+-- could behave differently
+
+create table bpchar_tbl (f1 character(16) unique, f2 bpchar);
+
+create view bpchar_view as
+ select
+ f1, f1::character(14) as f114, f1::bpchar as f1n,
+ f2, f2::character(14) as f214, f2::bpchar as f2n
+ from bpchar_tbl;
+
+\d+ bpchar_view
+
+explain (verbose, costs off) select * from bpchar_view
+ where f1::bpchar = 'foo';
+
+rollback;
+
+
+--
+-- Ordinarily, IN/NOT IN can be converted to a ScalarArrayOpExpr
+-- with a suitably-chosen array type.
+--
+explain (verbose, costs off)
+select random() IN (1, 4, 8.0);
+explain (verbose, costs off)
+select random()::int IN (1, 4, 8.0);
+-- However, if there's not a common supertype for the IN elements,
+-- we should instead try to produce "x = v1 OR x = v2 OR ...".
+-- In most cases that'll fail for lack of all the requisite = operators,
+-- but it can succeed sometimes. So this should complain about lack of
+-- an = operator, not about cast failure.
+select '(0,0)'::point in ('(0,0,0,0)'::box, point(0,0));
+
+
+--
+-- Tests for ScalarArrayOpExpr with a hashfn
+--
+
+-- create a stable function so that the tests below are not
+-- evaluated using the planner's constant folding.
+begin;
+
+create function return_int_input(int) returns int as $$
+begin
+ return $1;
+end;
+$$ language plpgsql stable;
+
+create function return_text_input(text) returns text as $$
+begin
+ return $1;
+end;
+$$ language plpgsql stable;
+
+select return_int_input(1) in (10, 9, 2, 8, 3, 7, 4, 6, 5, 1);
+select return_int_input(1) in (10, 9, 2, 8, 3, 7, 4, 6, 5, null);
+select return_int_input(1) in (null, null, null, null, null, null, null, null, null, null, null);
+select return_int_input(1) in (10, 9, 2, 8, 3, 7, 4, 6, 5, 1, null);
+select return_int_input(null::int) in (10, 9, 2, 8, 3, 7, 4, 6, 5, 1);
+select return_int_input(null::int) in (10, 9, 2, 8, 3, 7, 4, 6, 5, null);
+select return_text_input('a') in ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j');
+
+rollback;
+
+-- Test with non-strict equality function.
+-- We need to create our own type for this.
+
+begin;
+
+create type myint;
+create function myintin(cstring) returns myint strict immutable language
+ internal as 'int4in';
+create function myintout(myint) returns cstring strict immutable language
+ internal as 'int4out';
+create function myinthash(myint) returns integer strict immutable language
+ internal as 'hashint4';
+
+create type myint (input = myintin, output = myintout, like = int4);
+
+create cast (int4 as myint) without function;
+create cast (myint as int4) without function;
+
+create function myinteq(myint, myint) returns bool as $$
+begin
+ if $1 is null and $2 is null then
+ return true;
+ else
+ return $1::int = $2::int;
+ end if;
+end;
+$$ language plpgsql immutable;
+
+create operator = (
+ leftarg = myint,
+ rightarg = myint,
+ commutator = =,
+ negator = <>,
+ procedure = myinteq,
+ restrict = eqsel,
+ join = eqjoinsel,
+ merges
+);
+
+create operator class myint_ops
+default for type myint using hash as
+ operator 1 = (myint, myint),
+ function 1 myinthash(myint);
+
+create table inttest (a myint);
+insert into inttest values(1::myint),(null);
+
+-- try an array with enough elements to cause hashing
+select * from inttest where a in (1::myint,2::myint,3::myint,4::myint,5::myint,6::myint,7::myint,8::myint,9::myint, null);
+-- ensure the result matched with the non-hashed version. We simply remove
+-- some array elements so that we don't reach the hashing threshold.
+select * from inttest where a in (1::myint,2::myint,3::myint,4::myint,5::myint, null);
+
+rollback;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/float4.out b/ydb/library/yql/tests/postgresql/original/cases/float4.out
new file mode 100644
index 0000000000..6ad5d00aa2
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/float4.out
@@ -0,0 +1,961 @@
+--
+-- FLOAT4
+--
+CREATE TABLE FLOAT4_TBL (f1 float4);
+INSERT INTO FLOAT4_TBL(f1) VALUES (' 0.0');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('1004.30 ');
+INSERT INTO FLOAT4_TBL(f1) VALUES (' -34.84 ');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e+20');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e-20');
+-- test for over and under flow
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70');
+ERROR: "10e70" is out of range for type real
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e70');
+ERROR: "-10e70" is out of range for type real
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e70');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-70');
+ERROR: "10e-70" is out of range for type real
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-70');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-70');
+ERROR: "-10e-70" is out of range for type real
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-70');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70'::float8);
+ERROR: value out of range: overflow
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e70'::float8);
+ERROR: value out of range: overflow
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-70'::float8);
+ERROR: value out of range: underflow
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-70'::float8);
+ERROR: value out of range: underflow
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e400');
+ERROR: "10e400" is out of range for type real
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e400');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e400');
+ERROR: "-10e400" is out of range for type real
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e400');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-400');
+ERROR: "10e-400" is out of range for type real
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-400');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-400');
+ERROR: "-10e-400" is out of range for type real
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-400');
+ ^
+-- bad input
+INSERT INTO FLOAT4_TBL(f1) VALUES ('');
+ERROR: invalid input syntax for type real: ""
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES (' ');
+ERROR: invalid input syntax for type real: " "
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES (' ');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('xyz');
+ERROR: invalid input syntax for type real: "xyz"
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('xyz');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('5.0.0');
+ERROR: invalid input syntax for type real: "5.0.0"
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('5.0.0');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('5 . 0');
+ERROR: invalid input syntax for type real: "5 . 0"
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('5 . 0');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('5. 0');
+ERROR: invalid input syntax for type real: "5. 0"
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('5. 0');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES (' - 3.0');
+ERROR: invalid input syntax for type real: " - 3.0"
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES (' - 3.0');
+ ^
+INSERT INTO FLOAT4_TBL(f1) VALUES ('123 5');
+ERROR: invalid input syntax for type real: "123 5"
+LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('123 5');
+ ^
+-- special inputs
+SELECT 'NaN'::float4;
+ float4
+--------
+ NaN
+(1 row)
+
+SELECT 'nan'::float4;
+ float4
+--------
+ NaN
+(1 row)
+
+SELECT ' NAN '::float4;
+ float4
+--------
+ NaN
+(1 row)
+
+SELECT 'infinity'::float4;
+ float4
+----------
+ Infinity
+(1 row)
+
+SELECT ' -INFINiTY '::float4;
+ float4
+-----------
+ -Infinity
+(1 row)
+
+-- bad special inputs
+SELECT 'N A N'::float4;
+ERROR: invalid input syntax for type real: "N A N"
+LINE 1: SELECT 'N A N'::float4;
+ ^
+SELECT 'NaN x'::float4;
+ERROR: invalid input syntax for type real: "NaN x"
+LINE 1: SELECT 'NaN x'::float4;
+ ^
+SELECT ' INFINITY x'::float4;
+ERROR: invalid input syntax for type real: " INFINITY x"
+LINE 1: SELECT ' INFINITY x'::float4;
+ ^
+SELECT 'Infinity'::float4 + 100.0;
+ ?column?
+----------
+ Infinity
+(1 row)
+
+SELECT 'Infinity'::float4 / 'Infinity'::float4;
+ ?column?
+----------
+ NaN
+(1 row)
+
+SELECT '42'::float4 / 'Infinity'::float4;
+ ?column?
+----------
+ 0
+(1 row)
+
+SELECT 'nan'::float4 / 'nan'::float4;
+ ?column?
+----------
+ NaN
+(1 row)
+
+SELECT 'nan'::float4 / '0'::float4;
+ ?column?
+----------
+ NaN
+(1 row)
+
+SELECT 'nan'::numeric::float4;
+ float4
+--------
+ NaN
+(1 row)
+
+SELECT * FROM FLOAT4_TBL;
+ f1
+---------------
+ 0
+ 1004.3
+ -34.84
+ 1.2345679e+20
+ 1.2345679e-20
+(5 rows)
+
+SELECT f.* FROM FLOAT4_TBL f WHERE f.f1 <> '1004.3';
+ f1
+---------------
+ 0
+ -34.84
+ 1.2345679e+20
+ 1.2345679e-20
+(4 rows)
+
+SELECT f.* FROM FLOAT4_TBL f WHERE f.f1 = '1004.3';
+ f1
+--------
+ 1004.3
+(1 row)
+
+SELECT f.* FROM FLOAT4_TBL f WHERE '1004.3' > f.f1;
+ f1
+---------------
+ 0
+ -34.84
+ 1.2345679e-20
+(3 rows)
+
+SELECT f.* FROM FLOAT4_TBL f WHERE f.f1 < '1004.3';
+ f1
+---------------
+ 0
+ -34.84
+ 1.2345679e-20
+(3 rows)
+
+SELECT f.* FROM FLOAT4_TBL f WHERE '1004.3' >= f.f1;
+ f1
+---------------
+ 0
+ 1004.3
+ -34.84
+ 1.2345679e-20
+(4 rows)
+
+SELECT f.* FROM FLOAT4_TBL f WHERE f.f1 <= '1004.3';
+ f1
+---------------
+ 0
+ 1004.3
+ -34.84
+ 1.2345679e-20
+(4 rows)
+
+SELECT f.f1, f.f1 * '-10' AS x FROM FLOAT4_TBL f
+ WHERE f.f1 > '0.0';
+ f1 | x
+---------------+----------------
+ 1004.3 | -10043
+ 1.2345679e+20 | -1.2345678e+21
+ 1.2345679e-20 | -1.2345678e-19
+(3 rows)
+
+SELECT f.f1, f.f1 + '-10' AS x FROM FLOAT4_TBL f
+ WHERE f.f1 > '0.0';
+ f1 | x
+---------------+---------------
+ 1004.3 | 994.3
+ 1.2345679e+20 | 1.2345679e+20
+ 1.2345679e-20 | -10
+(3 rows)
+
+SELECT f.f1, f.f1 / '-10' AS x FROM FLOAT4_TBL f
+ WHERE f.f1 > '0.0';
+ f1 | x
+---------------+----------------
+ 1004.3 | -100.43
+ 1.2345679e+20 | -1.2345679e+19
+ 1.2345679e-20 | -1.2345679e-21
+(3 rows)
+
+SELECT f.f1, f.f1 - '-10' AS x FROM FLOAT4_TBL f
+ WHERE f.f1 > '0.0';
+ f1 | x
+---------------+---------------
+ 1004.3 | 1014.3
+ 1.2345679e+20 | 1.2345679e+20
+ 1.2345679e-20 | 10
+(3 rows)
+
+-- test divide by zero
+SELECT f.f1 / '0.0' from FLOAT4_TBL f;
+ERROR: division by zero
+SELECT * FROM FLOAT4_TBL;
+ f1
+---------------
+ 0
+ 1004.3
+ -34.84
+ 1.2345679e+20
+ 1.2345679e-20
+(5 rows)
+
+-- test the unary float4abs operator
+SELECT f.f1, @f.f1 AS abs_f1 FROM FLOAT4_TBL f;
+ f1 | abs_f1
+---------------+---------------
+ 0 | 0
+ 1004.3 | 1004.3
+ -34.84 | 34.84
+ 1.2345679e+20 | 1.2345679e+20
+ 1.2345679e-20 | 1.2345679e-20
+(5 rows)
+
+UPDATE FLOAT4_TBL
+ SET f1 = FLOAT4_TBL.f1 * '-1'
+ WHERE FLOAT4_TBL.f1 > '0.0';
+SELECT * FROM FLOAT4_TBL;
+ f1
+----------------
+ 0
+ -34.84
+ -1004.3
+ -1.2345679e+20
+ -1.2345679e-20
+(5 rows)
+
+-- test edge-case coercions to integer
+SELECT '32767.4'::float4::int2;
+ int2
+-------
+ 32767
+(1 row)
+
+SELECT '32767.6'::float4::int2;
+ERROR: smallint out of range
+SELECT '-32768.4'::float4::int2;
+ int2
+--------
+ -32768
+(1 row)
+
+SELECT '-32768.6'::float4::int2;
+ERROR: smallint out of range
+SELECT '2147483520'::float4::int4;
+ int4
+------------
+ 2147483520
+(1 row)
+
+SELECT '2147483647'::float4::int4;
+ERROR: integer out of range
+SELECT '-2147483648.5'::float4::int4;
+ int4
+-------------
+ -2147483648
+(1 row)
+
+SELECT '-2147483900'::float4::int4;
+ERROR: integer out of range
+SELECT '9223369837831520256'::float4::int8;
+ int8
+---------------------
+ 9223369837831520256
+(1 row)
+
+SELECT '9223372036854775807'::float4::int8;
+ERROR: bigint out of range
+SELECT '-9223372036854775808.5'::float4::int8;
+ int8
+----------------------
+ -9223372036854775808
+(1 row)
+
+SELECT '-9223380000000000000'::float4::int8;
+ERROR: bigint out of range
+-- Test for correct input rounding in edge cases.
+-- These lists are from Paxson 1991, excluding subnormals and
+-- inputs of over 9 sig. digits.
+SELECT float4send('5e-20'::float4);
+ float4send
+------------
+ \x1f6c1e4a
+(1 row)
+
+SELECT float4send('67e14'::float4);
+ float4send
+------------
+ \x59be6cea
+(1 row)
+
+SELECT float4send('985e15'::float4);
+ float4send
+------------
+ \x5d5ab6c4
+(1 row)
+
+SELECT float4send('55895e-16'::float4);
+ float4send
+------------
+ \x2cc4a9bd
+(1 row)
+
+SELECT float4send('7038531e-32'::float4);
+ float4send
+------------
+ \x15ae43fd
+(1 row)
+
+SELECT float4send('702990899e-20'::float4);
+ float4send
+------------
+ \x2cf757ca
+(1 row)
+
+SELECT float4send('3e-23'::float4);
+ float4send
+------------
+ \x1a111234
+(1 row)
+
+SELECT float4send('57e18'::float4);
+ float4send
+------------
+ \x6045c22c
+(1 row)
+
+SELECT float4send('789e-35'::float4);
+ float4send
+------------
+ \x0a23de70
+(1 row)
+
+SELECT float4send('2539e-18'::float4);
+ float4send
+------------
+ \x2736f449
+(1 row)
+
+SELECT float4send('76173e28'::float4);
+ float4send
+------------
+ \x7616398a
+(1 row)
+
+SELECT float4send('887745e-11'::float4);
+ float4send
+------------
+ \x3714f05c
+(1 row)
+
+SELECT float4send('5382571e-37'::float4);
+ float4send
+------------
+ \x0d2eaca7
+(1 row)
+
+SELECT float4send('82381273e-35'::float4);
+ float4send
+------------
+ \x128289d1
+(1 row)
+
+SELECT float4send('750486563e-38'::float4);
+ float4send
+------------
+ \x0f18377e
+(1 row)
+
+-- Test that the smallest possible normalized input value inputs
+-- correctly, either in 9-significant-digit or shortest-decimal
+-- format.
+--
+-- exact val is 1.1754943508...
+-- shortest val is 1.1754944000
+-- midpoint to next val is 1.1754944208...
+SELECT float4send('1.17549435e-38'::float4);
+ float4send
+------------
+ \x00800000
+(1 row)
+
+SELECT float4send('1.1754944e-38'::float4);
+ float4send
+------------
+ \x00800000
+(1 row)
+
+-- test output (and round-trip safety) of various values.
+-- To ensure we're testing what we think we're testing, start with
+-- float values specified by bit patterns (as a useful side effect,
+-- this means we'll fail on non-IEEE platforms).
+create type xfloat4;
+create function xfloat4in(cstring) returns xfloat4 immutable strict
+ language internal as 'int4in';
+NOTICE: return type xfloat4 is only a shell
+create function xfloat4out(xfloat4) returns cstring immutable strict
+ language internal as 'int4out';
+NOTICE: argument type xfloat4 is only a shell
+create type xfloat4 (input = xfloat4in, output = xfloat4out, like = float4);
+create cast (xfloat4 as float4) without function;
+create cast (float4 as xfloat4) without function;
+create cast (xfloat4 as integer) without function;
+create cast (integer as xfloat4) without function;
+-- float4: seeeeeee emmmmmmm mmmmmmmm mmmmmmmm
+-- we don't care to assume the platform's strtod() handles subnormals
+-- correctly; those are "use at your own risk". However we do test
+-- subnormal outputs, since those are under our control.
+with testdata(bits) as (values
+ -- small subnormals
+ (x'00000001'),
+ (x'00000002'), (x'00000003'),
+ (x'00000010'), (x'00000011'), (x'00000100'), (x'00000101'),
+ (x'00004000'), (x'00004001'), (x'00080000'), (x'00080001'),
+ -- stress values
+ (x'0053c4f4'), -- 7693e-42
+ (x'006c85c4'), -- 996622e-44
+ (x'0041ca76'), -- 60419369e-46
+ (x'004b7678'), -- 6930161142e-48
+ -- taken from upstream testsuite
+ (x'00000007'),
+ (x'00424fe2'),
+ -- borderline between subnormal and normal
+ (x'007ffff0'), (x'007ffff1'), (x'007ffffe'), (x'007fffff'))
+select float4send(flt) as ibits,
+ flt
+ from (select bits::integer::xfloat4::float4 as flt
+ from testdata
+ offset 0) s;
+ ibits | flt
+------------+---------------
+ \x00000001 | 1e-45
+ \x00000002 | 3e-45
+ \x00000003 | 4e-45
+ \x00000010 | 2.2e-44
+ \x00000011 | 2.4e-44
+ \x00000100 | 3.59e-43
+ \x00000101 | 3.6e-43
+ \x00004000 | 2.2959e-41
+ \x00004001 | 2.296e-41
+ \x00080000 | 7.34684e-40
+ \x00080001 | 7.34685e-40
+ \x0053c4f4 | 7.693e-39
+ \x006c85c4 | 9.96622e-39
+ \x0041ca76 | 6.041937e-39
+ \x004b7678 | 6.930161e-39
+ \x00000007 | 1e-44
+ \x00424fe2 | 6.0898e-39
+ \x007ffff0 | 1.1754921e-38
+ \x007ffff1 | 1.1754922e-38
+ \x007ffffe | 1.1754941e-38
+ \x007fffff | 1.1754942e-38
+(21 rows)
+
+with testdata(bits) as (values
+ (x'00000000'),
+ -- smallest normal values
+ (x'00800000'), (x'00800001'), (x'00800004'), (x'00800005'),
+ (x'00800006'),
+ -- small normal values chosen for short vs. long output
+ (x'008002f1'), (x'008002f2'), (x'008002f3'),
+ (x'00800e17'), (x'00800e18'), (x'00800e19'),
+ -- assorted values (random mantissae)
+ (x'01000001'), (x'01102843'), (x'01a52c98'),
+ (x'0219c229'), (x'02e4464d'), (x'037343c1'), (x'03a91b36'),
+ (x'047ada65'), (x'0496fe87'), (x'0550844f'), (x'05999da3'),
+ (x'060ea5e2'), (x'06e63c45'), (x'07f1e548'), (x'0fc5282b'),
+ (x'1f850283'), (x'2874a9d6'),
+ -- values around 5e-08
+ (x'3356bf94'), (x'3356bf95'), (x'3356bf96'),
+ -- around 1e-07
+ (x'33d6bf94'), (x'33d6bf95'), (x'33d6bf96'),
+ -- around 3e-07 .. 1e-04
+ (x'34a10faf'), (x'34a10fb0'), (x'34a10fb1'),
+ (x'350637bc'), (x'350637bd'), (x'350637be'),
+ (x'35719786'), (x'35719787'), (x'35719788'),
+ (x'358637bc'), (x'358637bd'), (x'358637be'),
+ (x'36a7c5ab'), (x'36a7c5ac'), (x'36a7c5ad'),
+ (x'3727c5ab'), (x'3727c5ac'), (x'3727c5ad'),
+ -- format crossover at 1e-04
+ (x'38d1b714'), (x'38d1b715'), (x'38d1b716'),
+ (x'38d1b717'), (x'38d1b718'), (x'38d1b719'),
+ (x'38d1b71a'), (x'38d1b71b'), (x'38d1b71c'),
+ (x'38d1b71d'),
+ --
+ (x'38dffffe'), (x'38dfffff'), (x'38e00000'),
+ (x'38efffff'), (x'38f00000'), (x'38f00001'),
+ (x'3a83126e'), (x'3a83126f'), (x'3a831270'),
+ (x'3c23d709'), (x'3c23d70a'), (x'3c23d70b'),
+ (x'3dcccccc'), (x'3dcccccd'), (x'3dccccce'),
+ -- chosen to need 9 digits for 3dcccd70
+ (x'3dcccd6f'), (x'3dcccd70'), (x'3dcccd71'),
+ --
+ (x'3effffff'), (x'3f000000'), (x'3f000001'),
+ (x'3f333332'), (x'3f333333'), (x'3f333334'),
+ -- approach 1.0 with increasing numbers of 9s
+ (x'3f666665'), (x'3f666666'), (x'3f666667'),
+ (x'3f7d70a3'), (x'3f7d70a4'), (x'3f7d70a5'),
+ (x'3f7fbe76'), (x'3f7fbe77'), (x'3f7fbe78'),
+ (x'3f7ff971'), (x'3f7ff972'), (x'3f7ff973'),
+ (x'3f7fff57'), (x'3f7fff58'), (x'3f7fff59'),
+ (x'3f7fffee'), (x'3f7fffef'),
+ -- values very close to 1
+ (x'3f7ffff0'), (x'3f7ffff1'), (x'3f7ffff2'),
+ (x'3f7ffff3'), (x'3f7ffff4'), (x'3f7ffff5'),
+ (x'3f7ffff6'), (x'3f7ffff7'), (x'3f7ffff8'),
+ (x'3f7ffff9'), (x'3f7ffffa'), (x'3f7ffffb'),
+ (x'3f7ffffc'), (x'3f7ffffd'), (x'3f7ffffe'),
+ (x'3f7fffff'),
+ (x'3f800000'),
+ (x'3f800001'), (x'3f800002'), (x'3f800003'),
+ (x'3f800004'), (x'3f800005'), (x'3f800006'),
+ (x'3f800007'), (x'3f800008'), (x'3f800009'),
+ -- values 1 to 1.1
+ (x'3f80000f'), (x'3f800010'), (x'3f800011'),
+ (x'3f800012'), (x'3f800013'), (x'3f800014'),
+ (x'3f800017'), (x'3f800018'), (x'3f800019'),
+ (x'3f80001a'), (x'3f80001b'), (x'3f80001c'),
+ (x'3f800029'), (x'3f80002a'), (x'3f80002b'),
+ (x'3f800053'), (x'3f800054'), (x'3f800055'),
+ (x'3f800346'), (x'3f800347'), (x'3f800348'),
+ (x'3f8020c4'), (x'3f8020c5'), (x'3f8020c6'),
+ (x'3f8147ad'), (x'3f8147ae'), (x'3f8147af'),
+ (x'3f8ccccc'), (x'3f8ccccd'), (x'3f8cccce'),
+ --
+ (x'3fc90fdb'), -- pi/2
+ (x'402df854'), -- e
+ (x'40490fdb'), -- pi
+ --
+ (x'409fffff'), (x'40a00000'), (x'40a00001'),
+ (x'40afffff'), (x'40b00000'), (x'40b00001'),
+ (x'411fffff'), (x'41200000'), (x'41200001'),
+ (x'42c7ffff'), (x'42c80000'), (x'42c80001'),
+ (x'4479ffff'), (x'447a0000'), (x'447a0001'),
+ (x'461c3fff'), (x'461c4000'), (x'461c4001'),
+ (x'47c34fff'), (x'47c35000'), (x'47c35001'),
+ (x'497423ff'), (x'49742400'), (x'49742401'),
+ (x'4b18967f'), (x'4b189680'), (x'4b189681'),
+ (x'4cbebc1f'), (x'4cbebc20'), (x'4cbebc21'),
+ (x'4e6e6b27'), (x'4e6e6b28'), (x'4e6e6b29'),
+ (x'501502f8'), (x'501502f9'), (x'501502fa'),
+ (x'51ba43b6'), (x'51ba43b7'), (x'51ba43b8'),
+ -- stress values
+ (x'1f6c1e4a'), -- 5e-20
+ (x'59be6cea'), -- 67e14
+ (x'5d5ab6c4'), -- 985e15
+ (x'2cc4a9bd'), -- 55895e-16
+ (x'15ae43fd'), -- 7038531e-32
+ (x'2cf757ca'), -- 702990899e-20
+ (x'665ba998'), -- 25933168707e13
+ (x'743c3324'), -- 596428896559e20
+ -- exercise fixed-point memmoves
+ (x'47f1205a'),
+ (x'4640e6ae'),
+ (x'449a5225'),
+ (x'42f6e9d5'),
+ (x'414587dd'),
+ (x'3f9e064b'),
+ -- these cases come from the upstream's testsuite
+ -- BoundaryRoundEven
+ (x'4c000004'),
+ (x'50061c46'),
+ (x'510006a8'),
+ -- ExactValueRoundEven
+ (x'48951f84'),
+ (x'45fd1840'),
+ -- LotsOfTrailingZeros
+ (x'39800000'),
+ (x'3b200000'),
+ (x'3b900000'),
+ (x'3bd00000'),
+ -- Regression
+ (x'63800000'),
+ (x'4b000000'),
+ (x'4b800000'),
+ (x'4c000001'),
+ (x'4c800b0d'),
+ (x'00d24584'),
+ (x'00d90b88'),
+ (x'45803f34'),
+ (x'4f9f24f7'),
+ (x'3a8722c3'),
+ (x'5c800041'),
+ (x'15ae43fd'),
+ (x'5d4cccfb'),
+ (x'4c800001'),
+ (x'57800ed8'),
+ (x'5f000000'),
+ (x'700000f0'),
+ (x'5f23e9ac'),
+ (x'5e9502f9'),
+ (x'5e8012b1'),
+ (x'3c000028'),
+ (x'60cde861'),
+ (x'03aa2a50'),
+ (x'43480000'),
+ (x'4c000000'),
+ -- LooksLikePow5
+ (x'5D1502F9'),
+ (x'5D9502F9'),
+ (x'5E1502F9'),
+ -- OutputLength
+ (x'3f99999a'),
+ (x'3f9d70a4'),
+ (x'3f9df3b6'),
+ (x'3f9e0419'),
+ (x'3f9e0610'),
+ (x'3f9e064b'),
+ (x'3f9e0651'),
+ (x'03d20cfe')
+)
+select float4send(flt) as ibits,
+ flt,
+ flt::text::float4 as r_flt,
+ float4send(flt::text::float4) as obits,
+ float4send(flt::text::float4) = float4send(flt) as correct
+ from (select bits::integer::xfloat4::float4 as flt
+ from testdata
+ offset 0) s;
+ ibits | flt | r_flt | obits | correct
+------------+----------------+----------------+------------+---------
+ \x00000000 | 0 | 0 | \x00000000 | t
+ \x00800000 | 1.1754944e-38 | 1.1754944e-38 | \x00800000 | t
+ \x00800001 | 1.1754945e-38 | 1.1754945e-38 | \x00800001 | t
+ \x00800004 | 1.1754949e-38 | 1.1754949e-38 | \x00800004 | t
+ \x00800005 | 1.175495e-38 | 1.175495e-38 | \x00800005 | t
+ \x00800006 | 1.1754952e-38 | 1.1754952e-38 | \x00800006 | t
+ \x008002f1 | 1.1755999e-38 | 1.1755999e-38 | \x008002f1 | t
+ \x008002f2 | 1.1756e-38 | 1.1756e-38 | \x008002f2 | t
+ \x008002f3 | 1.1756001e-38 | 1.1756001e-38 | \x008002f3 | t
+ \x00800e17 | 1.1759998e-38 | 1.1759998e-38 | \x00800e17 | t
+ \x00800e18 | 1.176e-38 | 1.176e-38 | \x00800e18 | t
+ \x00800e19 | 1.1760001e-38 | 1.1760001e-38 | \x00800e19 | t
+ \x01000001 | 2.350989e-38 | 2.350989e-38 | \x01000001 | t
+ \x01102843 | 2.647751e-38 | 2.647751e-38 | \x01102843 | t
+ \x01a52c98 | 6.0675416e-38 | 6.0675416e-38 | \x01a52c98 | t
+ \x0219c229 | 1.1296386e-37 | 1.1296386e-37 | \x0219c229 | t
+ \x02e4464d | 3.354194e-37 | 3.354194e-37 | \x02e4464d | t
+ \x037343c1 | 7.148906e-37 | 7.148906e-37 | \x037343c1 | t
+ \x03a91b36 | 9.939175e-37 | 9.939175e-37 | \x03a91b36 | t
+ \x047ada65 | 2.948764e-36 | 2.948764e-36 | \x047ada65 | t
+ \x0496fe87 | 3.5498577e-36 | 3.5498577e-36 | \x0496fe87 | t
+ \x0550844f | 9.804414e-36 | 9.804414e-36 | \x0550844f | t
+ \x05999da3 | 1.4445957e-35 | 1.4445957e-35 | \x05999da3 | t
+ \x060ea5e2 | 2.6829103e-35 | 2.6829103e-35 | \x060ea5e2 | t
+ \x06e63c45 | 8.660494e-35 | 8.660494e-35 | \x06e63c45 | t
+ \x07f1e548 | 3.639641e-34 | 3.639641e-34 | \x07f1e548 | t
+ \x0fc5282b | 1.9441172e-29 | 1.9441172e-29 | \x0fc5282b | t
+ \x1f850283 | 5.6331846e-20 | 5.6331846e-20 | \x1f850283 | t
+ \x2874a9d6 | 1.3581548e-14 | 1.3581548e-14 | \x2874a9d6 | t
+ \x3356bf94 | 4.9999997e-08 | 4.9999997e-08 | \x3356bf94 | t
+ \x3356bf95 | 5e-08 | 5e-08 | \x3356bf95 | t
+ \x3356bf96 | 5.0000004e-08 | 5.0000004e-08 | \x3356bf96 | t
+ \x33d6bf94 | 9.9999994e-08 | 9.9999994e-08 | \x33d6bf94 | t
+ \x33d6bf95 | 1e-07 | 1e-07 | \x33d6bf95 | t
+ \x33d6bf96 | 1.0000001e-07 | 1.0000001e-07 | \x33d6bf96 | t
+ \x34a10faf | 2.9999998e-07 | 2.9999998e-07 | \x34a10faf | t
+ \x34a10fb0 | 3e-07 | 3e-07 | \x34a10fb0 | t
+ \x34a10fb1 | 3.0000004e-07 | 3.0000004e-07 | \x34a10fb1 | t
+ \x350637bc | 4.9999994e-07 | 4.9999994e-07 | \x350637bc | t
+ \x350637bd | 5e-07 | 5e-07 | \x350637bd | t
+ \x350637be | 5.0000006e-07 | 5.0000006e-07 | \x350637be | t
+ \x35719786 | 8.999999e-07 | 8.999999e-07 | \x35719786 | t
+ \x35719787 | 9e-07 | 9e-07 | \x35719787 | t
+ \x35719788 | 9.0000003e-07 | 9.0000003e-07 | \x35719788 | t
+ \x358637bc | 9.999999e-07 | 9.999999e-07 | \x358637bc | t
+ \x358637bd | 1e-06 | 1e-06 | \x358637bd | t
+ \x358637be | 1.0000001e-06 | 1.0000001e-06 | \x358637be | t
+ \x36a7c5ab | 4.9999994e-06 | 4.9999994e-06 | \x36a7c5ab | t
+ \x36a7c5ac | 5e-06 | 5e-06 | \x36a7c5ac | t
+ \x36a7c5ad | 5.0000003e-06 | 5.0000003e-06 | \x36a7c5ad | t
+ \x3727c5ab | 9.999999e-06 | 9.999999e-06 | \x3727c5ab | t
+ \x3727c5ac | 1e-05 | 1e-05 | \x3727c5ac | t
+ \x3727c5ad | 1.0000001e-05 | 1.0000001e-05 | \x3727c5ad | t
+ \x38d1b714 | 9.9999976e-05 | 9.9999976e-05 | \x38d1b714 | t
+ \x38d1b715 | 9.999998e-05 | 9.999998e-05 | \x38d1b715 | t
+ \x38d1b716 | 9.999999e-05 | 9.999999e-05 | \x38d1b716 | t
+ \x38d1b717 | 0.0001 | 0.0001 | \x38d1b717 | t
+ \x38d1b718 | 0.000100000005 | 0.000100000005 | \x38d1b718 | t
+ \x38d1b719 | 0.00010000001 | 0.00010000001 | \x38d1b719 | t
+ \x38d1b71a | 0.00010000002 | 0.00010000002 | \x38d1b71a | t
+ \x38d1b71b | 0.00010000003 | 0.00010000003 | \x38d1b71b | t
+ \x38d1b71c | 0.000100000034 | 0.000100000034 | \x38d1b71c | t
+ \x38d1b71d | 0.00010000004 | 0.00010000004 | \x38d1b71d | t
+ \x38dffffe | 0.00010681151 | 0.00010681151 | \x38dffffe | t
+ \x38dfffff | 0.000106811516 | 0.000106811516 | \x38dfffff | t
+ \x38e00000 | 0.00010681152 | 0.00010681152 | \x38e00000 | t
+ \x38efffff | 0.00011444091 | 0.00011444091 | \x38efffff | t
+ \x38f00000 | 0.00011444092 | 0.00011444092 | \x38f00000 | t
+ \x38f00001 | 0.000114440925 | 0.000114440925 | \x38f00001 | t
+ \x3a83126e | 0.0009999999 | 0.0009999999 | \x3a83126e | t
+ \x3a83126f | 0.001 | 0.001 | \x3a83126f | t
+ \x3a831270 | 0.0010000002 | 0.0010000002 | \x3a831270 | t
+ \x3c23d709 | 0.009999999 | 0.009999999 | \x3c23d709 | t
+ \x3c23d70a | 0.01 | 0.01 | \x3c23d70a | t
+ \x3c23d70b | 0.010000001 | 0.010000001 | \x3c23d70b | t
+ \x3dcccccc | 0.099999994 | 0.099999994 | \x3dcccccc | t
+ \x3dcccccd | 0.1 | 0.1 | \x3dcccccd | t
+ \x3dccccce | 0.10000001 | 0.10000001 | \x3dccccce | t
+ \x3dcccd6f | 0.10000121 | 0.10000121 | \x3dcccd6f | t
+ \x3dcccd70 | 0.100001216 | 0.100001216 | \x3dcccd70 | t
+ \x3dcccd71 | 0.10000122 | 0.10000122 | \x3dcccd71 | t
+ \x3effffff | 0.49999997 | 0.49999997 | \x3effffff | t
+ \x3f000000 | 0.5 | 0.5 | \x3f000000 | t
+ \x3f000001 | 0.50000006 | 0.50000006 | \x3f000001 | t
+ \x3f333332 | 0.6999999 | 0.6999999 | \x3f333332 | t
+ \x3f333333 | 0.7 | 0.7 | \x3f333333 | t
+ \x3f333334 | 0.70000005 | 0.70000005 | \x3f333334 | t
+ \x3f666665 | 0.8999999 | 0.8999999 | \x3f666665 | t
+ \x3f666666 | 0.9 | 0.9 | \x3f666666 | t
+ \x3f666667 | 0.90000004 | 0.90000004 | \x3f666667 | t
+ \x3f7d70a3 | 0.98999995 | 0.98999995 | \x3f7d70a3 | t
+ \x3f7d70a4 | 0.99 | 0.99 | \x3f7d70a4 | t
+ \x3f7d70a5 | 0.99000007 | 0.99000007 | \x3f7d70a5 | t
+ \x3f7fbe76 | 0.99899995 | 0.99899995 | \x3f7fbe76 | t
+ \x3f7fbe77 | 0.999 | 0.999 | \x3f7fbe77 | t
+ \x3f7fbe78 | 0.9990001 | 0.9990001 | \x3f7fbe78 | t
+ \x3f7ff971 | 0.9998999 | 0.9998999 | \x3f7ff971 | t
+ \x3f7ff972 | 0.9999 | 0.9999 | \x3f7ff972 | t
+ \x3f7ff973 | 0.99990004 | 0.99990004 | \x3f7ff973 | t
+ \x3f7fff57 | 0.9999899 | 0.9999899 | \x3f7fff57 | t
+ \x3f7fff58 | 0.99999 | 0.99999 | \x3f7fff58 | t
+ \x3f7fff59 | 0.99999005 | 0.99999005 | \x3f7fff59 | t
+ \x3f7fffee | 0.9999989 | 0.9999989 | \x3f7fffee | t
+ \x3f7fffef | 0.999999 | 0.999999 | \x3f7fffef | t
+ \x3f7ffff0 | 0.99999905 | 0.99999905 | \x3f7ffff0 | t
+ \x3f7ffff1 | 0.9999991 | 0.9999991 | \x3f7ffff1 | t
+ \x3f7ffff2 | 0.99999917 | 0.99999917 | \x3f7ffff2 | t
+ \x3f7ffff3 | 0.9999992 | 0.9999992 | \x3f7ffff3 | t
+ \x3f7ffff4 | 0.9999993 | 0.9999993 | \x3f7ffff4 | t
+ \x3f7ffff5 | 0.99999934 | 0.99999934 | \x3f7ffff5 | t
+ \x3f7ffff6 | 0.9999994 | 0.9999994 | \x3f7ffff6 | t
+ \x3f7ffff7 | 0.99999946 | 0.99999946 | \x3f7ffff7 | t
+ \x3f7ffff8 | 0.9999995 | 0.9999995 | \x3f7ffff8 | t
+ \x3f7ffff9 | 0.9999996 | 0.9999996 | \x3f7ffff9 | t
+ \x3f7ffffa | 0.99999964 | 0.99999964 | \x3f7ffffa | t
+ \x3f7ffffb | 0.9999997 | 0.9999997 | \x3f7ffffb | t
+ \x3f7ffffc | 0.99999976 | 0.99999976 | \x3f7ffffc | t
+ \x3f7ffffd | 0.9999998 | 0.9999998 | \x3f7ffffd | t
+ \x3f7ffffe | 0.9999999 | 0.9999999 | \x3f7ffffe | t
+ \x3f7fffff | 0.99999994 | 0.99999994 | \x3f7fffff | t
+ \x3f800000 | 1 | 1 | \x3f800000 | t
+ \x3f800001 | 1.0000001 | 1.0000001 | \x3f800001 | t
+ \x3f800002 | 1.0000002 | 1.0000002 | \x3f800002 | t
+ \x3f800003 | 1.0000004 | 1.0000004 | \x3f800003 | t
+ \x3f800004 | 1.0000005 | 1.0000005 | \x3f800004 | t
+ \x3f800005 | 1.0000006 | 1.0000006 | \x3f800005 | t
+ \x3f800006 | 1.0000007 | 1.0000007 | \x3f800006 | t
+ \x3f800007 | 1.0000008 | 1.0000008 | \x3f800007 | t
+ \x3f800008 | 1.000001 | 1.000001 | \x3f800008 | t
+ \x3f800009 | 1.0000011 | 1.0000011 | \x3f800009 | t
+ \x3f80000f | 1.0000018 | 1.0000018 | \x3f80000f | t
+ \x3f800010 | 1.0000019 | 1.0000019 | \x3f800010 | t
+ \x3f800011 | 1.000002 | 1.000002 | \x3f800011 | t
+ \x3f800012 | 1.0000021 | 1.0000021 | \x3f800012 | t
+ \x3f800013 | 1.0000023 | 1.0000023 | \x3f800013 | t
+ \x3f800014 | 1.0000024 | 1.0000024 | \x3f800014 | t
+ \x3f800017 | 1.0000027 | 1.0000027 | \x3f800017 | t
+ \x3f800018 | 1.0000029 | 1.0000029 | \x3f800018 | t
+ \x3f800019 | 1.000003 | 1.000003 | \x3f800019 | t
+ \x3f80001a | 1.0000031 | 1.0000031 | \x3f80001a | t
+ \x3f80001b | 1.0000032 | 1.0000032 | \x3f80001b | t
+ \x3f80001c | 1.0000033 | 1.0000033 | \x3f80001c | t
+ \x3f800029 | 1.0000049 | 1.0000049 | \x3f800029 | t
+ \x3f80002a | 1.000005 | 1.000005 | \x3f80002a | t
+ \x3f80002b | 1.0000051 | 1.0000051 | \x3f80002b | t
+ \x3f800053 | 1.0000099 | 1.0000099 | \x3f800053 | t
+ \x3f800054 | 1.00001 | 1.00001 | \x3f800054 | t
+ \x3f800055 | 1.0000101 | 1.0000101 | \x3f800055 | t
+ \x3f800346 | 1.0000999 | 1.0000999 | \x3f800346 | t
+ \x3f800347 | 1.0001 | 1.0001 | \x3f800347 | t
+ \x3f800348 | 1.0001001 | 1.0001001 | \x3f800348 | t
+ \x3f8020c4 | 1.0009999 | 1.0009999 | \x3f8020c4 | t
+ \x3f8020c5 | 1.001 | 1.001 | \x3f8020c5 | t
+ \x3f8020c6 | 1.0010002 | 1.0010002 | \x3f8020c6 | t
+ \x3f8147ad | 1.0099999 | 1.0099999 | \x3f8147ad | t
+ \x3f8147ae | 1.01 | 1.01 | \x3f8147ae | t
+ \x3f8147af | 1.0100001 | 1.0100001 | \x3f8147af | t
+ \x3f8ccccc | 1.0999999 | 1.0999999 | \x3f8ccccc | t
+ \x3f8ccccd | 1.1 | 1.1 | \x3f8ccccd | t
+ \x3f8cccce | 1.1000001 | 1.1000001 | \x3f8cccce | t
+ \x3fc90fdb | 1.5707964 | 1.5707964 | \x3fc90fdb | t
+ \x402df854 | 2.7182817 | 2.7182817 | \x402df854 | t
+ \x40490fdb | 3.1415927 | 3.1415927 | \x40490fdb | t
+ \x409fffff | 4.9999995 | 4.9999995 | \x409fffff | t
+ \x40a00000 | 5 | 5 | \x40a00000 | t
+ \x40a00001 | 5.0000005 | 5.0000005 | \x40a00001 | t
+ \x40afffff | 5.4999995 | 5.4999995 | \x40afffff | t
+ \x40b00000 | 5.5 | 5.5 | \x40b00000 | t
+ \x40b00001 | 5.5000005 | 5.5000005 | \x40b00001 | t
+ \x411fffff | 9.999999 | 9.999999 | \x411fffff | t
+ \x41200000 | 10 | 10 | \x41200000 | t
+ \x41200001 | 10.000001 | 10.000001 | \x41200001 | t
+ \x42c7ffff | 99.99999 | 99.99999 | \x42c7ffff | t
+ \x42c80000 | 100 | 100 | \x42c80000 | t
+ \x42c80001 | 100.00001 | 100.00001 | \x42c80001 | t
+ \x4479ffff | 999.99994 | 999.99994 | \x4479ffff | t
+ \x447a0000 | 1000 | 1000 | \x447a0000 | t
+ \x447a0001 | 1000.00006 | 1000.00006 | \x447a0001 | t
+ \x461c3fff | 9999.999 | 9999.999 | \x461c3fff | t
+ \x461c4000 | 10000 | 10000 | \x461c4000 | t
+ \x461c4001 | 10000.001 | 10000.001 | \x461c4001 | t
+ \x47c34fff | 99999.99 | 99999.99 | \x47c34fff | t
+ \x47c35000 | 100000 | 100000 | \x47c35000 | t
+ \x47c35001 | 100000.01 | 100000.01 | \x47c35001 | t
+ \x497423ff | 999999.94 | 999999.94 | \x497423ff | t
+ \x49742400 | 1e+06 | 1e+06 | \x49742400 | t
+ \x49742401 | 1.00000006e+06 | 1.00000006e+06 | \x49742401 | t
+ \x4b18967f | 9.999999e+06 | 9.999999e+06 | \x4b18967f | t
+ \x4b189680 | 1e+07 | 1e+07 | \x4b189680 | t
+ \x4b189681 | 1.0000001e+07 | 1.0000001e+07 | \x4b189681 | t
+ \x4cbebc1f | 9.999999e+07 | 9.999999e+07 | \x4cbebc1f | t
+ \x4cbebc20 | 1e+08 | 1e+08 | \x4cbebc20 | t
+ \x4cbebc21 | 1.0000001e+08 | 1.0000001e+08 | \x4cbebc21 | t
+ \x4e6e6b27 | 9.9999994e+08 | 9.9999994e+08 | \x4e6e6b27 | t
+ \x4e6e6b28 | 1e+09 | 1e+09 | \x4e6e6b28 | t
+ \x4e6e6b29 | 1.00000006e+09 | 1.00000006e+09 | \x4e6e6b29 | t
+ \x501502f8 | 9.999999e+09 | 9.999999e+09 | \x501502f8 | t
+ \x501502f9 | 1e+10 | 1e+10 | \x501502f9 | t
+ \x501502fa | 1.0000001e+10 | 1.0000001e+10 | \x501502fa | t
+ \x51ba43b6 | 9.999999e+10 | 9.999999e+10 | \x51ba43b6 | t
+ \x51ba43b7 | 1e+11 | 1e+11 | \x51ba43b7 | t
+ \x51ba43b8 | 1.0000001e+11 | 1.0000001e+11 | \x51ba43b8 | t
+ \x1f6c1e4a | 5e-20 | 5e-20 | \x1f6c1e4a | t
+ \x59be6cea | 6.7e+15 | 6.7e+15 | \x59be6cea | t
+ \x5d5ab6c4 | 9.85e+17 | 9.85e+17 | \x5d5ab6c4 | t
+ \x2cc4a9bd | 5.5895e-12 | 5.5895e-12 | \x2cc4a9bd | t
+ \x15ae43fd | 7.038531e-26 | 7.038531e-26 | \x15ae43fd | t
+ \x2cf757ca | 7.0299088e-12 | 7.0299088e-12 | \x2cf757ca | t
+ \x665ba998 | 2.5933168e+23 | 2.5933168e+23 | \x665ba998 | t
+ \x743c3324 | 5.9642887e+31 | 5.9642887e+31 | \x743c3324 | t
+ \x47f1205a | 123456.7 | 123456.7 | \x47f1205a | t
+ \x4640e6ae | 12345.67 | 12345.67 | \x4640e6ae | t
+ \x449a5225 | 1234.567 | 1234.567 | \x449a5225 | t
+ \x42f6e9d5 | 123.4567 | 123.4567 | \x42f6e9d5 | t
+ \x414587dd | 12.34567 | 12.34567 | \x414587dd | t
+ \x3f9e064b | 1.234567 | 1.234567 | \x3f9e064b | t
+ \x4c000004 | 3.3554448e+07 | 3.3554448e+07 | \x4c000004 | t
+ \x50061c46 | 8.999999e+09 | 8.999999e+09 | \x50061c46 | t
+ \x510006a8 | 3.4366718e+10 | 3.4366718e+10 | \x510006a8 | t
+ \x48951f84 | 305404.12 | 305404.12 | \x48951f84 | t
+ \x45fd1840 | 8099.0312 | 8099.0312 | \x45fd1840 | t
+ \x39800000 | 0.00024414062 | 0.00024414062 | \x39800000 | t
+ \x3b200000 | 0.0024414062 | 0.0024414062 | \x3b200000 | t
+ \x3b900000 | 0.0043945312 | 0.0043945312 | \x3b900000 | t
+ \x3bd00000 | 0.0063476562 | 0.0063476562 | \x3bd00000 | t
+ \x63800000 | 4.7223665e+21 | 4.7223665e+21 | \x63800000 | t
+ \x4b000000 | 8.388608e+06 | 8.388608e+06 | \x4b000000 | t
+ \x4b800000 | 1.6777216e+07 | 1.6777216e+07 | \x4b800000 | t
+ \x4c000001 | 3.3554436e+07 | 3.3554436e+07 | \x4c000001 | t
+ \x4c800b0d | 6.7131496e+07 | 6.7131496e+07 | \x4c800b0d | t
+ \x00d24584 | 1.9310392e-38 | 1.9310392e-38 | \x00d24584 | t
+ \x00d90b88 | 1.993244e-38 | 1.993244e-38 | \x00d90b88 | t
+ \x45803f34 | 4103.9004 | 4103.9004 | \x45803f34 | t
+ \x4f9f24f7 | 5.3399997e+09 | 5.3399997e+09 | \x4f9f24f7 | t
+ \x3a8722c3 | 0.0010310042 | 0.0010310042 | \x3a8722c3 | t
+ \x5c800041 | 2.882326e+17 | 2.882326e+17 | \x5c800041 | t
+ \x15ae43fd | 7.038531e-26 | 7.038531e-26 | \x15ae43fd | t
+ \x5d4cccfb | 9.223404e+17 | 9.223404e+17 | \x5d4cccfb | t
+ \x4c800001 | 6.710887e+07 | 6.710887e+07 | \x4c800001 | t
+ \x57800ed8 | 2.816025e+14 | 2.816025e+14 | \x57800ed8 | t
+ \x5f000000 | 9.223372e+18 | 9.223372e+18 | \x5f000000 | t
+ \x700000f0 | 1.5846086e+29 | 1.5846086e+29 | \x700000f0 | t
+ \x5f23e9ac | 1.1811161e+19 | 1.1811161e+19 | \x5f23e9ac | t
+ \x5e9502f9 | 5.368709e+18 | 5.368709e+18 | \x5e9502f9 | t
+ \x5e8012b1 | 4.6143166e+18 | 4.6143166e+18 | \x5e8012b1 | t
+ \x3c000028 | 0.007812537 | 0.007812537 | \x3c000028 | t
+ \x60cde861 | 1.18697725e+20 | 1.18697725e+20 | \x60cde861 | t
+ \x03aa2a50 | 1.00014165e-36 | 1.00014165e-36 | \x03aa2a50 | t
+ \x43480000 | 200 | 200 | \x43480000 | t
+ \x4c000000 | 3.3554432e+07 | 3.3554432e+07 | \x4c000000 | t
+ \x5d1502f9 | 6.7108864e+17 | 6.7108864e+17 | \x5d1502f9 | t
+ \x5d9502f9 | 1.3421773e+18 | 1.3421773e+18 | \x5d9502f9 | t
+ \x5e1502f9 | 2.6843546e+18 | 2.6843546e+18 | \x5e1502f9 | t
+ \x3f99999a | 1.2 | 1.2 | \x3f99999a | t
+ \x3f9d70a4 | 1.23 | 1.23 | \x3f9d70a4 | t
+ \x3f9df3b6 | 1.234 | 1.234 | \x3f9df3b6 | t
+ \x3f9e0419 | 1.2345 | 1.2345 | \x3f9e0419 | t
+ \x3f9e0610 | 1.23456 | 1.23456 | \x3f9e0610 | t
+ \x3f9e064b | 1.234567 | 1.234567 | \x3f9e064b | t
+ \x3f9e0651 | 1.2345678 | 1.2345678 | \x3f9e0651 | t
+ \x03d20cfe | 1.23456735e-36 | 1.23456735e-36 | \x03d20cfe | t
+(261 rows)
+
+-- clean up, lest opr_sanity complain
+drop type xfloat4 cascade;
+NOTICE: drop cascades to 6 other objects
+DETAIL: drop cascades to function xfloat4in(cstring)
+drop cascades to function xfloat4out(xfloat4)
+drop cascades to cast from xfloat4 to real
+drop cascades to cast from real to xfloat4
+drop cascades to cast from xfloat4 to integer
+drop cascades to cast from integer to xfloat4
diff --git a/ydb/library/yql/tests/postgresql/original/cases/float4.sql b/ydb/library/yql/tests/postgresql/original/cases/float4.sql
new file mode 100644
index 0000000000..612486ecbd
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/float4.sql
@@ -0,0 +1,354 @@
+--
+-- FLOAT4
+--
+
+CREATE TABLE FLOAT4_TBL (f1 float4);
+
+INSERT INTO FLOAT4_TBL(f1) VALUES (' 0.0');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('1004.30 ');
+INSERT INTO FLOAT4_TBL(f1) VALUES (' -34.84 ');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e+20');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e-20');
+
+-- test for over and under flow
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e70');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-70');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-70');
+
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70'::float8);
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e70'::float8);
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-70'::float8);
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-70'::float8);
+
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e400');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e400');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-400');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-400');
+
+-- bad input
+INSERT INTO FLOAT4_TBL(f1) VALUES ('');
+INSERT INTO FLOAT4_TBL(f1) VALUES (' ');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('xyz');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('5.0.0');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('5 . 0');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('5. 0');
+INSERT INTO FLOAT4_TBL(f1) VALUES (' - 3.0');
+INSERT INTO FLOAT4_TBL(f1) VALUES ('123 5');
+
+-- special inputs
+SELECT 'NaN'::float4;
+SELECT 'nan'::float4;
+SELECT ' NAN '::float4;
+SELECT 'infinity'::float4;
+SELECT ' -INFINiTY '::float4;
+-- bad special inputs
+SELECT 'N A N'::float4;
+SELECT 'NaN x'::float4;
+SELECT ' INFINITY x'::float4;
+
+SELECT 'Infinity'::float4 + 100.0;
+SELECT 'Infinity'::float4 / 'Infinity'::float4;
+SELECT '42'::float4 / 'Infinity'::float4;
+SELECT 'nan'::float4 / 'nan'::float4;
+SELECT 'nan'::float4 / '0'::float4;
+SELECT 'nan'::numeric::float4;
+
+SELECT * FROM FLOAT4_TBL;
+
+SELECT f.* FROM FLOAT4_TBL f WHERE f.f1 <> '1004.3';
+
+SELECT f.* FROM FLOAT4_TBL f WHERE f.f1 = '1004.3';
+
+SELECT f.* FROM FLOAT4_TBL f WHERE '1004.3' > f.f1;
+
+SELECT f.* FROM FLOAT4_TBL f WHERE f.f1 < '1004.3';
+
+SELECT f.* FROM FLOAT4_TBL f WHERE '1004.3' >= f.f1;
+
+SELECT f.* FROM FLOAT4_TBL f WHERE f.f1 <= '1004.3';
+
+SELECT f.f1, f.f1 * '-10' AS x FROM FLOAT4_TBL f
+ WHERE f.f1 > '0.0';
+
+SELECT f.f1, f.f1 + '-10' AS x FROM FLOAT4_TBL f
+ WHERE f.f1 > '0.0';
+
+SELECT f.f1, f.f1 / '-10' AS x FROM FLOAT4_TBL f
+ WHERE f.f1 > '0.0';
+
+SELECT f.f1, f.f1 - '-10' AS x FROM FLOAT4_TBL f
+ WHERE f.f1 > '0.0';
+
+-- test divide by zero
+SELECT f.f1 / '0.0' from FLOAT4_TBL f;
+
+SELECT * FROM FLOAT4_TBL;
+
+-- test the unary float4abs operator
+SELECT f.f1, @f.f1 AS abs_f1 FROM FLOAT4_TBL f;
+
+UPDATE FLOAT4_TBL
+ SET f1 = FLOAT4_TBL.f1 * '-1'
+ WHERE FLOAT4_TBL.f1 > '0.0';
+
+SELECT * FROM FLOAT4_TBL;
+
+-- test edge-case coercions to integer
+SELECT '32767.4'::float4::int2;
+SELECT '32767.6'::float4::int2;
+SELECT '-32768.4'::float4::int2;
+SELECT '-32768.6'::float4::int2;
+SELECT '2147483520'::float4::int4;
+SELECT '2147483647'::float4::int4;
+SELECT '-2147483648.5'::float4::int4;
+SELECT '-2147483900'::float4::int4;
+SELECT '9223369837831520256'::float4::int8;
+SELECT '9223372036854775807'::float4::int8;
+SELECT '-9223372036854775808.5'::float4::int8;
+SELECT '-9223380000000000000'::float4::int8;
+
+-- Test for correct input rounding in edge cases.
+-- These lists are from Paxson 1991, excluding subnormals and
+-- inputs of over 9 sig. digits.
+
+SELECT float4send('5e-20'::float4);
+SELECT float4send('67e14'::float4);
+SELECT float4send('985e15'::float4);
+SELECT float4send('55895e-16'::float4);
+SELECT float4send('7038531e-32'::float4);
+SELECT float4send('702990899e-20'::float4);
+
+SELECT float4send('3e-23'::float4);
+SELECT float4send('57e18'::float4);
+SELECT float4send('789e-35'::float4);
+SELECT float4send('2539e-18'::float4);
+SELECT float4send('76173e28'::float4);
+SELECT float4send('887745e-11'::float4);
+SELECT float4send('5382571e-37'::float4);
+SELECT float4send('82381273e-35'::float4);
+SELECT float4send('750486563e-38'::float4);
+
+-- Test that the smallest possible normalized input value inputs
+-- correctly, either in 9-significant-digit or shortest-decimal
+-- format.
+--
+-- exact val is 1.1754943508...
+-- shortest val is 1.1754944000
+-- midpoint to next val is 1.1754944208...
+
+SELECT float4send('1.17549435e-38'::float4);
+SELECT float4send('1.1754944e-38'::float4);
+
+-- test output (and round-trip safety) of various values.
+-- To ensure we're testing what we think we're testing, start with
+-- float values specified by bit patterns (as a useful side effect,
+-- this means we'll fail on non-IEEE platforms).
+
+create type xfloat4;
+create function xfloat4in(cstring) returns xfloat4 immutable strict
+ language internal as 'int4in';
+create function xfloat4out(xfloat4) returns cstring immutable strict
+ language internal as 'int4out';
+create type xfloat4 (input = xfloat4in, output = xfloat4out, like = float4);
+create cast (xfloat4 as float4) without function;
+create cast (float4 as xfloat4) without function;
+create cast (xfloat4 as integer) without function;
+create cast (integer as xfloat4) without function;
+
+-- float4: seeeeeee emmmmmmm mmmmmmmm mmmmmmmm
+
+-- we don't care to assume the platform's strtod() handles subnormals
+-- correctly; those are "use at your own risk". However we do test
+-- subnormal outputs, since those are under our control.
+
+with testdata(bits) as (values
+ -- small subnormals
+ (x'00000001'),
+ (x'00000002'), (x'00000003'),
+ (x'00000010'), (x'00000011'), (x'00000100'), (x'00000101'),
+ (x'00004000'), (x'00004001'), (x'00080000'), (x'00080001'),
+ -- stress values
+ (x'0053c4f4'), -- 7693e-42
+ (x'006c85c4'), -- 996622e-44
+ (x'0041ca76'), -- 60419369e-46
+ (x'004b7678'), -- 6930161142e-48
+ -- taken from upstream testsuite
+ (x'00000007'),
+ (x'00424fe2'),
+ -- borderline between subnormal and normal
+ (x'007ffff0'), (x'007ffff1'), (x'007ffffe'), (x'007fffff'))
+select float4send(flt) as ibits,
+ flt
+ from (select bits::integer::xfloat4::float4 as flt
+ from testdata
+ offset 0) s;
+
+with testdata(bits) as (values
+ (x'00000000'),
+ -- smallest normal values
+ (x'00800000'), (x'00800001'), (x'00800004'), (x'00800005'),
+ (x'00800006'),
+ -- small normal values chosen for short vs. long output
+ (x'008002f1'), (x'008002f2'), (x'008002f3'),
+ (x'00800e17'), (x'00800e18'), (x'00800e19'),
+ -- assorted values (random mantissae)
+ (x'01000001'), (x'01102843'), (x'01a52c98'),
+ (x'0219c229'), (x'02e4464d'), (x'037343c1'), (x'03a91b36'),
+ (x'047ada65'), (x'0496fe87'), (x'0550844f'), (x'05999da3'),
+ (x'060ea5e2'), (x'06e63c45'), (x'07f1e548'), (x'0fc5282b'),
+ (x'1f850283'), (x'2874a9d6'),
+ -- values around 5e-08
+ (x'3356bf94'), (x'3356bf95'), (x'3356bf96'),
+ -- around 1e-07
+ (x'33d6bf94'), (x'33d6bf95'), (x'33d6bf96'),
+ -- around 3e-07 .. 1e-04
+ (x'34a10faf'), (x'34a10fb0'), (x'34a10fb1'),
+ (x'350637bc'), (x'350637bd'), (x'350637be'),
+ (x'35719786'), (x'35719787'), (x'35719788'),
+ (x'358637bc'), (x'358637bd'), (x'358637be'),
+ (x'36a7c5ab'), (x'36a7c5ac'), (x'36a7c5ad'),
+ (x'3727c5ab'), (x'3727c5ac'), (x'3727c5ad'),
+ -- format crossover at 1e-04
+ (x'38d1b714'), (x'38d1b715'), (x'38d1b716'),
+ (x'38d1b717'), (x'38d1b718'), (x'38d1b719'),
+ (x'38d1b71a'), (x'38d1b71b'), (x'38d1b71c'),
+ (x'38d1b71d'),
+ --
+ (x'38dffffe'), (x'38dfffff'), (x'38e00000'),
+ (x'38efffff'), (x'38f00000'), (x'38f00001'),
+ (x'3a83126e'), (x'3a83126f'), (x'3a831270'),
+ (x'3c23d709'), (x'3c23d70a'), (x'3c23d70b'),
+ (x'3dcccccc'), (x'3dcccccd'), (x'3dccccce'),
+ -- chosen to need 9 digits for 3dcccd70
+ (x'3dcccd6f'), (x'3dcccd70'), (x'3dcccd71'),
+ --
+ (x'3effffff'), (x'3f000000'), (x'3f000001'),
+ (x'3f333332'), (x'3f333333'), (x'3f333334'),
+ -- approach 1.0 with increasing numbers of 9s
+ (x'3f666665'), (x'3f666666'), (x'3f666667'),
+ (x'3f7d70a3'), (x'3f7d70a4'), (x'3f7d70a5'),
+ (x'3f7fbe76'), (x'3f7fbe77'), (x'3f7fbe78'),
+ (x'3f7ff971'), (x'3f7ff972'), (x'3f7ff973'),
+ (x'3f7fff57'), (x'3f7fff58'), (x'3f7fff59'),
+ (x'3f7fffee'), (x'3f7fffef'),
+ -- values very close to 1
+ (x'3f7ffff0'), (x'3f7ffff1'), (x'3f7ffff2'),
+ (x'3f7ffff3'), (x'3f7ffff4'), (x'3f7ffff5'),
+ (x'3f7ffff6'), (x'3f7ffff7'), (x'3f7ffff8'),
+ (x'3f7ffff9'), (x'3f7ffffa'), (x'3f7ffffb'),
+ (x'3f7ffffc'), (x'3f7ffffd'), (x'3f7ffffe'),
+ (x'3f7fffff'),
+ (x'3f800000'),
+ (x'3f800001'), (x'3f800002'), (x'3f800003'),
+ (x'3f800004'), (x'3f800005'), (x'3f800006'),
+ (x'3f800007'), (x'3f800008'), (x'3f800009'),
+ -- values 1 to 1.1
+ (x'3f80000f'), (x'3f800010'), (x'3f800011'),
+ (x'3f800012'), (x'3f800013'), (x'3f800014'),
+ (x'3f800017'), (x'3f800018'), (x'3f800019'),
+ (x'3f80001a'), (x'3f80001b'), (x'3f80001c'),
+ (x'3f800029'), (x'3f80002a'), (x'3f80002b'),
+ (x'3f800053'), (x'3f800054'), (x'3f800055'),
+ (x'3f800346'), (x'3f800347'), (x'3f800348'),
+ (x'3f8020c4'), (x'3f8020c5'), (x'3f8020c6'),
+ (x'3f8147ad'), (x'3f8147ae'), (x'3f8147af'),
+ (x'3f8ccccc'), (x'3f8ccccd'), (x'3f8cccce'),
+ --
+ (x'3fc90fdb'), -- pi/2
+ (x'402df854'), -- e
+ (x'40490fdb'), -- pi
+ --
+ (x'409fffff'), (x'40a00000'), (x'40a00001'),
+ (x'40afffff'), (x'40b00000'), (x'40b00001'),
+ (x'411fffff'), (x'41200000'), (x'41200001'),
+ (x'42c7ffff'), (x'42c80000'), (x'42c80001'),
+ (x'4479ffff'), (x'447a0000'), (x'447a0001'),
+ (x'461c3fff'), (x'461c4000'), (x'461c4001'),
+ (x'47c34fff'), (x'47c35000'), (x'47c35001'),
+ (x'497423ff'), (x'49742400'), (x'49742401'),
+ (x'4b18967f'), (x'4b189680'), (x'4b189681'),
+ (x'4cbebc1f'), (x'4cbebc20'), (x'4cbebc21'),
+ (x'4e6e6b27'), (x'4e6e6b28'), (x'4e6e6b29'),
+ (x'501502f8'), (x'501502f9'), (x'501502fa'),
+ (x'51ba43b6'), (x'51ba43b7'), (x'51ba43b8'),
+ -- stress values
+ (x'1f6c1e4a'), -- 5e-20
+ (x'59be6cea'), -- 67e14
+ (x'5d5ab6c4'), -- 985e15
+ (x'2cc4a9bd'), -- 55895e-16
+ (x'15ae43fd'), -- 7038531e-32
+ (x'2cf757ca'), -- 702990899e-20
+ (x'665ba998'), -- 25933168707e13
+ (x'743c3324'), -- 596428896559e20
+ -- exercise fixed-point memmoves
+ (x'47f1205a'),
+ (x'4640e6ae'),
+ (x'449a5225'),
+ (x'42f6e9d5'),
+ (x'414587dd'),
+ (x'3f9e064b'),
+ -- these cases come from the upstream's testsuite
+ -- BoundaryRoundEven
+ (x'4c000004'),
+ (x'50061c46'),
+ (x'510006a8'),
+ -- ExactValueRoundEven
+ (x'48951f84'),
+ (x'45fd1840'),
+ -- LotsOfTrailingZeros
+ (x'39800000'),
+ (x'3b200000'),
+ (x'3b900000'),
+ (x'3bd00000'),
+ -- Regression
+ (x'63800000'),
+ (x'4b000000'),
+ (x'4b800000'),
+ (x'4c000001'),
+ (x'4c800b0d'),
+ (x'00d24584'),
+ (x'00d90b88'),
+ (x'45803f34'),
+ (x'4f9f24f7'),
+ (x'3a8722c3'),
+ (x'5c800041'),
+ (x'15ae43fd'),
+ (x'5d4cccfb'),
+ (x'4c800001'),
+ (x'57800ed8'),
+ (x'5f000000'),
+ (x'700000f0'),
+ (x'5f23e9ac'),
+ (x'5e9502f9'),
+ (x'5e8012b1'),
+ (x'3c000028'),
+ (x'60cde861'),
+ (x'03aa2a50'),
+ (x'43480000'),
+ (x'4c000000'),
+ -- LooksLikePow5
+ (x'5D1502F9'),
+ (x'5D9502F9'),
+ (x'5E1502F9'),
+ -- OutputLength
+ (x'3f99999a'),
+ (x'3f9d70a4'),
+ (x'3f9df3b6'),
+ (x'3f9e0419'),
+ (x'3f9e0610'),
+ (x'3f9e064b'),
+ (x'3f9e0651'),
+ (x'03d20cfe')
+)
+select float4send(flt) as ibits,
+ flt,
+ flt::text::float4 as r_flt,
+ float4send(flt::text::float4) as obits,
+ float4send(flt::text::float4) = float4send(flt) as correct
+ from (select bits::integer::xfloat4::float4 as flt
+ from testdata
+ offset 0) s;
+
+-- clean up, lest opr_sanity complain
+drop type xfloat4 cascade;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/float8.out b/ydb/library/yql/tests/postgresql/original/cases/float8.out
new file mode 100644
index 0000000000..fc4212b32d
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/float8.out
@@ -0,0 +1,1382 @@
+--
+-- FLOAT8
+--
+CREATE TABLE FLOAT8_TBL(f1 float8);
+INSERT INTO FLOAT8_TBL(f1) VALUES (' 0.0 ');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('1004.30 ');
+INSERT INTO FLOAT8_TBL(f1) VALUES (' -34.84');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('1.2345678901234e+200');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('1.2345678901234e-200');
+-- test for underflow and overflow handling
+SELECT '10e400'::float8;
+ERROR: "10e400" is out of range for type double precision
+LINE 1: SELECT '10e400'::float8;
+ ^
+SELECT '-10e400'::float8;
+ERROR: "-10e400" is out of range for type double precision
+LINE 1: SELECT '-10e400'::float8;
+ ^
+SELECT '10e-400'::float8;
+ERROR: "10e-400" is out of range for type double precision
+LINE 1: SELECT '10e-400'::float8;
+ ^
+SELECT '-10e-400'::float8;
+ERROR: "-10e-400" is out of range for type double precision
+LINE 1: SELECT '-10e-400'::float8;
+ ^
+-- test smallest normalized input
+SELECT float8send('2.2250738585072014E-308'::float8);
+ float8send
+--------------------
+ \x0010000000000000
+(1 row)
+
+-- bad input
+INSERT INTO FLOAT8_TBL(f1) VALUES ('');
+ERROR: invalid input syntax for type double precision: ""
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('');
+ ^
+INSERT INTO FLOAT8_TBL(f1) VALUES (' ');
+ERROR: invalid input syntax for type double precision: " "
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES (' ');
+ ^
+INSERT INTO FLOAT8_TBL(f1) VALUES ('xyz');
+ERROR: invalid input syntax for type double precision: "xyz"
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('xyz');
+ ^
+INSERT INTO FLOAT8_TBL(f1) VALUES ('5.0.0');
+ERROR: invalid input syntax for type double precision: "5.0.0"
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('5.0.0');
+ ^
+INSERT INTO FLOAT8_TBL(f1) VALUES ('5 . 0');
+ERROR: invalid input syntax for type double precision: "5 . 0"
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('5 . 0');
+ ^
+INSERT INTO FLOAT8_TBL(f1) VALUES ('5. 0');
+ERROR: invalid input syntax for type double precision: "5. 0"
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('5. 0');
+ ^
+INSERT INTO FLOAT8_TBL(f1) VALUES (' - 3');
+ERROR: invalid input syntax for type double precision: " - 3"
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES (' - 3');
+ ^
+INSERT INTO FLOAT8_TBL(f1) VALUES ('123 5');
+ERROR: invalid input syntax for type double precision: "123 5"
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('123 5');
+ ^
+-- special inputs
+SELECT 'NaN'::float8;
+ float8
+--------
+ NaN
+(1 row)
+
+SELECT 'nan'::float8;
+ float8
+--------
+ NaN
+(1 row)
+
+SELECT ' NAN '::float8;
+ float8
+--------
+ NaN
+(1 row)
+
+SELECT 'infinity'::float8;
+ float8
+----------
+ Infinity
+(1 row)
+
+SELECT ' -INFINiTY '::float8;
+ float8
+-----------
+ -Infinity
+(1 row)
+
+-- bad special inputs
+SELECT 'N A N'::float8;
+ERROR: invalid input syntax for type double precision: "N A N"
+LINE 1: SELECT 'N A N'::float8;
+ ^
+SELECT 'NaN x'::float8;
+ERROR: invalid input syntax for type double precision: "NaN x"
+LINE 1: SELECT 'NaN x'::float8;
+ ^
+SELECT ' INFINITY x'::float8;
+ERROR: invalid input syntax for type double precision: " INFINITY x"
+LINE 1: SELECT ' INFINITY x'::float8;
+ ^
+SELECT 'Infinity'::float8 + 100.0;
+ ?column?
+----------
+ Infinity
+(1 row)
+
+SELECT 'Infinity'::float8 / 'Infinity'::float8;
+ ?column?
+----------
+ NaN
+(1 row)
+
+SELECT '42'::float8 / 'Infinity'::float8;
+ ?column?
+----------
+ 0
+(1 row)
+
+SELECT 'nan'::float8 / 'nan'::float8;
+ ?column?
+----------
+ NaN
+(1 row)
+
+SELECT 'nan'::float8 / '0'::float8;
+ ?column?
+----------
+ NaN
+(1 row)
+
+SELECT 'nan'::numeric::float8;
+ float8
+--------
+ NaN
+(1 row)
+
+SELECT * FROM FLOAT8_TBL;
+ f1
+----------------------
+ 0
+ 1004.3
+ -34.84
+ 1.2345678901234e+200
+ 1.2345678901234e-200
+(5 rows)
+
+SELECT f.* FROM FLOAT8_TBL f WHERE f.f1 <> '1004.3';
+ f1
+----------------------
+ 0
+ -34.84
+ 1.2345678901234e+200
+ 1.2345678901234e-200
+(4 rows)
+
+SELECT f.* FROM FLOAT8_TBL f WHERE f.f1 = '1004.3';
+ f1
+--------
+ 1004.3
+(1 row)
+
+SELECT f.* FROM FLOAT8_TBL f WHERE '1004.3' > f.f1;
+ f1
+----------------------
+ 0
+ -34.84
+ 1.2345678901234e-200
+(3 rows)
+
+SELECT f.* FROM FLOAT8_TBL f WHERE f.f1 < '1004.3';
+ f1
+----------------------
+ 0
+ -34.84
+ 1.2345678901234e-200
+(3 rows)
+
+SELECT f.* FROM FLOAT8_TBL f WHERE '1004.3' >= f.f1;
+ f1
+----------------------
+ 0
+ 1004.3
+ -34.84
+ 1.2345678901234e-200
+(4 rows)
+
+SELECT f.* FROM FLOAT8_TBL f WHERE f.f1 <= '1004.3';
+ f1
+----------------------
+ 0
+ 1004.3
+ -34.84
+ 1.2345678901234e-200
+(4 rows)
+
+SELECT f.f1, f.f1 * '-10' AS x
+ FROM FLOAT8_TBL f
+ WHERE f.f1 > '0.0';
+ f1 | x
+----------------------+-----------------------
+ 1004.3 | -10043
+ 1.2345678901234e+200 | -1.2345678901234e+201
+ 1.2345678901234e-200 | -1.2345678901234e-199
+(3 rows)
+
+SELECT f.f1, f.f1 + '-10' AS x
+ FROM FLOAT8_TBL f
+ WHERE f.f1 > '0.0';
+ f1 | x
+----------------------+----------------------
+ 1004.3 | 994.3
+ 1.2345678901234e+200 | 1.2345678901234e+200
+ 1.2345678901234e-200 | -10
+(3 rows)
+
+SELECT f.f1, f.f1 / '-10' AS x
+ FROM FLOAT8_TBL f
+ WHERE f.f1 > '0.0';
+ f1 | x
+----------------------+-----------------------
+ 1004.3 | -100.42999999999999
+ 1.2345678901234e+200 | -1.2345678901234e+199
+ 1.2345678901234e-200 | -1.2345678901234e-201
+(3 rows)
+
+SELECT f.f1, f.f1 - '-10' AS x
+ FROM FLOAT8_TBL f
+ WHERE f.f1 > '0.0';
+ f1 | x
+----------------------+----------------------
+ 1004.3 | 1014.3
+ 1.2345678901234e+200 | 1.2345678901234e+200
+ 1.2345678901234e-200 | 10
+(3 rows)
+
+SELECT f.f1 ^ '2.0' AS square_f1
+ FROM FLOAT8_TBL f where f.f1 = '1004.3';
+ square_f1
+--------------------
+ 1008618.4899999999
+(1 row)
+
+-- absolute value
+SELECT f.f1, @f.f1 AS abs_f1
+ FROM FLOAT8_TBL f;
+ f1 | abs_f1
+----------------------+----------------------
+ 0 | 0
+ 1004.3 | 1004.3
+ -34.84 | 34.84
+ 1.2345678901234e+200 | 1.2345678901234e+200
+ 1.2345678901234e-200 | 1.2345678901234e-200
+(5 rows)
+
+-- truncate
+SELECT f.f1, trunc(f.f1) AS trunc_f1
+ FROM FLOAT8_TBL f;
+ f1 | trunc_f1
+----------------------+----------------------
+ 0 | 0
+ 1004.3 | 1004
+ -34.84 | -34
+ 1.2345678901234e+200 | 1.2345678901234e+200
+ 1.2345678901234e-200 | 0
+(5 rows)
+
+-- round
+SELECT f.f1, round(f.f1) AS round_f1
+ FROM FLOAT8_TBL f;
+ f1 | round_f1
+----------------------+----------------------
+ 0 | 0
+ 1004.3 | 1004
+ -34.84 | -35
+ 1.2345678901234e+200 | 1.2345678901234e+200
+ 1.2345678901234e-200 | 0
+(5 rows)
+
+-- ceil / ceiling
+select ceil(f1) as ceil_f1 from float8_tbl f;
+ ceil_f1
+----------------------
+ 0
+ 1005
+ -34
+ 1.2345678901234e+200
+ 1
+(5 rows)
+
+select ceiling(f1) as ceiling_f1 from float8_tbl f;
+ ceiling_f1
+----------------------
+ 0
+ 1005
+ -34
+ 1.2345678901234e+200
+ 1
+(5 rows)
+
+-- floor
+select floor(f1) as floor_f1 from float8_tbl f;
+ floor_f1
+----------------------
+ 0
+ 1004
+ -35
+ 1.2345678901234e+200
+ 0
+(5 rows)
+
+-- sign
+select sign(f1) as sign_f1 from float8_tbl f;
+ sign_f1
+---------
+ 0
+ 1
+ -1
+ 1
+ 1
+(5 rows)
+
+-- avoid bit-exact output here because operations may not be bit-exact.
+SET extra_float_digits = 0;
+-- square root
+SELECT sqrt(float8 '64') AS eight;
+ eight
+-------
+ 8
+(1 row)
+
+SELECT |/ float8 '64' AS eight;
+ eight
+-------
+ 8
+(1 row)
+
+SELECT f.f1, |/f.f1 AS sqrt_f1
+ FROM FLOAT8_TBL f
+ WHERE f.f1 > '0.0';
+ f1 | sqrt_f1
+----------------------+-----------------------
+ 1004.3 | 31.6906926399535
+ 1.2345678901234e+200 | 1.11111110611109e+100
+ 1.2345678901234e-200 | 1.11111110611109e-100
+(3 rows)
+
+-- power
+SELECT power(float8 '144', float8 '0.5');
+ power
+-------
+ 12
+(1 row)
+
+SELECT power(float8 'NaN', float8 '0.5');
+ power
+-------
+ NaN
+(1 row)
+
+SELECT power(float8 '144', float8 'NaN');
+ power
+-------
+ NaN
+(1 row)
+
+SELECT power(float8 'NaN', float8 'NaN');
+ power
+-------
+ NaN
+(1 row)
+
+SELECT power(float8 '-1', float8 'NaN');
+ power
+-------
+ NaN
+(1 row)
+
+SELECT power(float8 '1', float8 'NaN');
+ power
+-------
+ 1
+(1 row)
+
+SELECT power(float8 'NaN', float8 '0');
+ power
+-------
+ 1
+(1 row)
+
+SELECT power(float8 'inf', float8 '0');
+ power
+-------
+ 1
+(1 row)
+
+SELECT power(float8 '-inf', float8 '0');
+ power
+-------
+ 1
+(1 row)
+
+SELECT power(float8 '0', float8 'inf');
+ power
+-------
+ 0
+(1 row)
+
+SELECT power(float8 '0', float8 '-inf');
+ERROR: zero raised to a negative power is undefined
+SELECT power(float8 '1', float8 'inf');
+ power
+-------
+ 1
+(1 row)
+
+SELECT power(float8 '1', float8 '-inf');
+ power
+-------
+ 1
+(1 row)
+
+SELECT power(float8 '-1', float8 'inf');
+ power
+-------
+ 1
+(1 row)
+
+SELECT power(float8 '-1', float8 '-inf');
+ power
+-------
+ 1
+(1 row)
+
+SELECT power(float8 '0.1', float8 'inf');
+ power
+-------
+ 0
+(1 row)
+
+SELECT power(float8 '-0.1', float8 'inf');
+ power
+-------
+ 0
+(1 row)
+
+SELECT power(float8 '1.1', float8 'inf');
+ power
+----------
+ Infinity
+(1 row)
+
+SELECT power(float8 '-1.1', float8 'inf');
+ power
+----------
+ Infinity
+(1 row)
+
+SELECT power(float8 '0.1', float8 '-inf');
+ power
+----------
+ Infinity
+(1 row)
+
+SELECT power(float8 '-0.1', float8 '-inf');
+ power
+----------
+ Infinity
+(1 row)
+
+SELECT power(float8 '1.1', float8 '-inf');
+ power
+-------
+ 0
+(1 row)
+
+SELECT power(float8 '-1.1', float8 '-inf');
+ power
+-------
+ 0
+(1 row)
+
+SELECT power(float8 'inf', float8 '-2');
+ power
+-------
+ 0
+(1 row)
+
+SELECT power(float8 'inf', float8 '2');
+ power
+----------
+ Infinity
+(1 row)
+
+SELECT power(float8 'inf', float8 'inf');
+ power
+----------
+ Infinity
+(1 row)
+
+SELECT power(float8 'inf', float8 '-inf');
+ power
+-------
+ 0
+(1 row)
+
+-- Intel's icc misoptimizes the code that controls the sign of this result,
+-- even with -mp1. Pending a fix for that, only test for "is it zero".
+SELECT power(float8 '-inf', float8 '-2') = '0';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT power(float8 '-inf', float8 '-3');
+ power
+-------
+ -0
+(1 row)
+
+SELECT power(float8 '-inf', float8 '2');
+ power
+----------
+ Infinity
+(1 row)
+
+SELECT power(float8 '-inf', float8 '3');
+ power
+-----------
+ -Infinity
+(1 row)
+
+SELECT power(float8 '-inf', float8 '3.5');
+ERROR: a negative number raised to a non-integer power yields a complex result
+SELECT power(float8 '-inf', float8 'inf');
+ power
+----------
+ Infinity
+(1 row)
+
+SELECT power(float8 '-inf', float8 '-inf');
+ power
+-------
+ 0
+(1 row)
+
+-- take exp of ln(f.f1)
+SELECT f.f1, exp(ln(f.f1)) AS exp_ln_f1
+ FROM FLOAT8_TBL f
+ WHERE f.f1 > '0.0';
+ f1 | exp_ln_f1
+----------------------+-----------------------
+ 1004.3 | 1004.3
+ 1.2345678901234e+200 | 1.23456789012338e+200
+ 1.2345678901234e-200 | 1.23456789012339e-200
+(3 rows)
+
+-- check edge cases for exp
+SELECT exp('inf'::float8), exp('-inf'::float8), exp('nan'::float8);
+ exp | exp | exp
+----------+-----+-----
+ Infinity | 0 | NaN
+(1 row)
+
+-- cube root
+SELECT ||/ float8 '27' AS three;
+ three
+-------
+ 3
+(1 row)
+
+SELECT f.f1, ||/f.f1 AS cbrt_f1 FROM FLOAT8_TBL f;
+ f1 | cbrt_f1
+----------------------+----------------------
+ 0 | 0
+ 1004.3 | 10.014312837827
+ -34.84 | -3.26607421344208
+ 1.2345678901234e+200 | 4.97933859234765e+66
+ 1.2345678901234e-200 | 2.3112042409018e-67
+(5 rows)
+
+SELECT * FROM FLOAT8_TBL;
+ f1
+----------------------
+ 0
+ 1004.3
+ -34.84
+ 1.2345678901234e+200
+ 1.2345678901234e-200
+(5 rows)
+
+UPDATE FLOAT8_TBL
+ SET f1 = FLOAT8_TBL.f1 * '-1'
+ WHERE FLOAT8_TBL.f1 > '0.0';
+SELECT f.f1 * '1e200' from FLOAT8_TBL f;
+ERROR: value out of range: overflow
+SELECT f.f1 ^ '1e200' from FLOAT8_TBL f;
+ERROR: value out of range: overflow
+SELECT 0 ^ 0 + 0 ^ 1 + 0 ^ 0.0 + 0 ^ 0.5;
+ ?column?
+----------
+ 2
+(1 row)
+
+SELECT ln(f.f1) from FLOAT8_TBL f where f.f1 = '0.0' ;
+ERROR: cannot take logarithm of zero
+SELECT ln(f.f1) from FLOAT8_TBL f where f.f1 < '0.0' ;
+ERROR: cannot take logarithm of a negative number
+SELECT exp(f.f1) from FLOAT8_TBL f;
+ERROR: value out of range: underflow
+SELECT f.f1 / '0.0' from FLOAT8_TBL f;
+ERROR: division by zero
+SELECT * FROM FLOAT8_TBL;
+ f1
+-----------------------
+ 0
+ -34.84
+ -1004.3
+ -1.2345678901234e+200
+ -1.2345678901234e-200
+(5 rows)
+
+-- hyperbolic functions
+-- we run these with extra_float_digits = 0 too, since different platforms
+-- tend to produce results that vary in the last place.
+SELECT sinh(float8 '1');
+ sinh
+-----------------
+ 1.1752011936438
+(1 row)
+
+SELECT cosh(float8 '1');
+ cosh
+------------------
+ 1.54308063481524
+(1 row)
+
+SELECT tanh(float8 '1');
+ tanh
+-------------------
+ 0.761594155955765
+(1 row)
+
+SELECT asinh(float8 '1');
+ asinh
+-------------------
+ 0.881373587019543
+(1 row)
+
+SELECT acosh(float8 '2');
+ acosh
+------------------
+ 1.31695789692482
+(1 row)
+
+SELECT atanh(float8 '0.5');
+ atanh
+-------------------
+ 0.549306144334055
+(1 row)
+
+-- test Inf/NaN cases for hyperbolic functions
+SELECT sinh(float8 'infinity');
+ sinh
+----------
+ Infinity
+(1 row)
+
+SELECT sinh(float8 '-infinity');
+ sinh
+-----------
+ -Infinity
+(1 row)
+
+SELECT sinh(float8 'nan');
+ sinh
+------
+ NaN
+(1 row)
+
+SELECT cosh(float8 'infinity');
+ cosh
+----------
+ Infinity
+(1 row)
+
+SELECT cosh(float8 '-infinity');
+ cosh
+----------
+ Infinity
+(1 row)
+
+SELECT cosh(float8 'nan');
+ cosh
+------
+ NaN
+(1 row)
+
+SELECT tanh(float8 'infinity');
+ tanh
+------
+ 1
+(1 row)
+
+SELECT tanh(float8 '-infinity');
+ tanh
+------
+ -1
+(1 row)
+
+SELECT tanh(float8 'nan');
+ tanh
+------
+ NaN
+(1 row)
+
+SELECT asinh(float8 'infinity');
+ asinh
+----------
+ Infinity
+(1 row)
+
+SELECT asinh(float8 '-infinity');
+ asinh
+-----------
+ -Infinity
+(1 row)
+
+SELECT asinh(float8 'nan');
+ asinh
+-------
+ NaN
+(1 row)
+
+-- acosh(Inf) should be Inf, but some mingw versions produce NaN, so skip test
+-- SELECT acosh(float8 'infinity');
+SELECT acosh(float8 '-infinity');
+ERROR: input is out of range
+SELECT acosh(float8 'nan');
+ acosh
+-------
+ NaN
+(1 row)
+
+SELECT atanh(float8 'infinity');
+ERROR: input is out of range
+SELECT atanh(float8 '-infinity');
+ERROR: input is out of range
+SELECT atanh(float8 'nan');
+ atanh
+-------
+ NaN
+(1 row)
+
+RESET extra_float_digits;
+-- test for over- and underflow
+INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400');
+ERROR: "10e400" is out of range for type double precision
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400');
+ ^
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e400');
+ERROR: "-10e400" is out of range for type double precision
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e400');
+ ^
+INSERT INTO FLOAT8_TBL(f1) VALUES ('10e-400');
+ERROR: "10e-400" is out of range for type double precision
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('10e-400');
+ ^
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e-400');
+ERROR: "-10e-400" is out of range for type double precision
+LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e-400');
+ ^
+-- maintain external table consistency across platforms
+-- delete all values and reinsert well-behaved ones
+DELETE FROM FLOAT8_TBL;
+INSERT INTO FLOAT8_TBL(f1) VALUES ('0.0');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-34.84');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-1004.30');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e+200');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e-200');
+SELECT * FROM FLOAT8_TBL;
+ f1
+-----------------------
+ 0
+ -34.84
+ -1004.3
+ -1.2345678901234e+200
+ -1.2345678901234e-200
+(5 rows)
+
+-- test edge-case coercions to integer
+SELECT '32767.4'::float8::int2;
+ int2
+-------
+ 32767
+(1 row)
+
+SELECT '32767.6'::float8::int2;
+ERROR: smallint out of range
+SELECT '-32768.4'::float8::int2;
+ int2
+--------
+ -32768
+(1 row)
+
+SELECT '-32768.6'::float8::int2;
+ERROR: smallint out of range
+SELECT '2147483647.4'::float8::int4;
+ int4
+------------
+ 2147483647
+(1 row)
+
+SELECT '2147483647.6'::float8::int4;
+ERROR: integer out of range
+SELECT '-2147483648.4'::float8::int4;
+ int4
+-------------
+ -2147483648
+(1 row)
+
+SELECT '-2147483648.6'::float8::int4;
+ERROR: integer out of range
+SELECT '9223372036854773760'::float8::int8;
+ int8
+---------------------
+ 9223372036854773760
+(1 row)
+
+SELECT '9223372036854775807'::float8::int8;
+ERROR: bigint out of range
+SELECT '-9223372036854775808.5'::float8::int8;
+ int8
+----------------------
+ -9223372036854775808
+(1 row)
+
+SELECT '-9223372036854780000'::float8::int8;
+ERROR: bigint out of range
+-- test exact cases for trigonometric functions in degrees
+SELECT x,
+ sind(x),
+ sind(x) IN (-1,-0.5,0,0.5,1) AS sind_exact
+FROM (VALUES (0), (30), (90), (150), (180),
+ (210), (270), (330), (360)) AS t(x);
+ x | sind | sind_exact
+-----+------+------------
+ 0 | 0 | t
+ 30 | 0.5 | t
+ 90 | 1 | t
+ 150 | 0.5 | t
+ 180 | 0 | t
+ 210 | -0.5 | t
+ 270 | -1 | t
+ 330 | -0.5 | t
+ 360 | 0 | t
+(9 rows)
+
+SELECT x,
+ cosd(x),
+ cosd(x) IN (-1,-0.5,0,0.5,1) AS cosd_exact
+FROM (VALUES (0), (60), (90), (120), (180),
+ (240), (270), (300), (360)) AS t(x);
+ x | cosd | cosd_exact
+-----+------+------------
+ 0 | 1 | t
+ 60 | 0.5 | t
+ 90 | 0 | t
+ 120 | -0.5 | t
+ 180 | -1 | t
+ 240 | -0.5 | t
+ 270 | 0 | t
+ 300 | 0.5 | t
+ 360 | 1 | t
+(9 rows)
+
+SELECT x,
+ tand(x),
+ tand(x) IN ('-Infinity'::float8,-1,0,
+ 1,'Infinity'::float8) AS tand_exact,
+ cotd(x),
+ cotd(x) IN ('-Infinity'::float8,-1,0,
+ 1,'Infinity'::float8) AS cotd_exact
+FROM (VALUES (0), (45), (90), (135), (180),
+ (225), (270), (315), (360)) AS t(x);
+ x | tand | tand_exact | cotd | cotd_exact
+-----+-----------+------------+-----------+------------
+ 0 | 0 | t | Infinity | t
+ 45 | 1 | t | 1 | t
+ 90 | Infinity | t | 0 | t
+ 135 | -1 | t | -1 | t
+ 180 | 0 | t | -Infinity | t
+ 225 | 1 | t | 1 | t
+ 270 | -Infinity | t | 0 | t
+ 315 | -1 | t | -1 | t
+ 360 | 0 | t | Infinity | t
+(9 rows)
+
+SELECT x,
+ asind(x),
+ asind(x) IN (-90,-30,0,30,90) AS asind_exact,
+ acosd(x),
+ acosd(x) IN (0,60,90,120,180) AS acosd_exact
+FROM (VALUES (-1), (-0.5), (0), (0.5), (1)) AS t(x);
+ x | asind | asind_exact | acosd | acosd_exact
+------+-------+-------------+-------+-------------
+ -1 | -90 | t | 180 | t
+ -0.5 | -30 | t | 120 | t
+ 0 | 0 | t | 90 | t
+ 0.5 | 30 | t | 60 | t
+ 1 | 90 | t | 0 | t
+(5 rows)
+
+SELECT x,
+ atand(x),
+ atand(x) IN (-90,-45,0,45,90) AS atand_exact
+FROM (VALUES ('-Infinity'::float8), (-1), (0), (1),
+ ('Infinity'::float8)) AS t(x);
+ x | atand | atand_exact
+-----------+-------+-------------
+ -Infinity | -90 | t
+ -1 | -45 | t
+ 0 | 0 | t
+ 1 | 45 | t
+ Infinity | 90 | t
+(5 rows)
+
+SELECT x, y,
+ atan2d(y, x),
+ atan2d(y, x) IN (-90,0,90,180) AS atan2d_exact
+FROM (SELECT 10*cosd(a), 10*sind(a)
+ FROM generate_series(0, 360, 90) AS t(a)) AS t(x,y);
+ x | y | atan2d | atan2d_exact
+-----+-----+--------+--------------
+ 10 | 0 | 0 | t
+ 0 | 10 | 90 | t
+ -10 | 0 | 180 | t
+ 0 | -10 | -90 | t
+ 10 | 0 | 0 | t
+(5 rows)
+
+--
+-- test output (and round-trip safety) of various values.
+-- To ensure we're testing what we think we're testing, start with
+-- float values specified by bit patterns (as a useful side effect,
+-- this means we'll fail on non-IEEE platforms).
+create type xfloat8;
+create function xfloat8in(cstring) returns xfloat8 immutable strict
+ language internal as 'int8in';
+NOTICE: return type xfloat8 is only a shell
+create function xfloat8out(xfloat8) returns cstring immutable strict
+ language internal as 'int8out';
+NOTICE: argument type xfloat8 is only a shell
+create type xfloat8 (input = xfloat8in, output = xfloat8out, like = float8);
+create cast (xfloat8 as float8) without function;
+create cast (float8 as xfloat8) without function;
+create cast (xfloat8 as bigint) without function;
+create cast (bigint as xfloat8) without function;
+-- float8: seeeeeee eeeeeeee eeeeeeee mmmmmmmm mmmmmmmm(x4)
+-- we don't care to assume the platform's strtod() handles subnormals
+-- correctly; those are "use at your own risk". However we do test
+-- subnormal outputs, since those are under our control.
+with testdata(bits) as (values
+ -- small subnormals
+ (x'0000000000000001'),
+ (x'0000000000000002'), (x'0000000000000003'),
+ (x'0000000000001000'), (x'0000000100000000'),
+ (x'0000010000000000'), (x'0000010100000000'),
+ (x'0000400000000000'), (x'0000400100000000'),
+ (x'0000800000000000'), (x'0000800000000001'),
+ -- these values taken from upstream testsuite
+ (x'00000000000f4240'),
+ (x'00000000016e3600'),
+ (x'0000008cdcdea440'),
+ -- borderline between subnormal and normal
+ (x'000ffffffffffff0'), (x'000ffffffffffff1'),
+ (x'000ffffffffffffe'), (x'000fffffffffffff'))
+select float8send(flt) as ibits,
+ flt
+ from (select bits::bigint::xfloat8::float8 as flt
+ from testdata
+ offset 0) s;
+ ibits | flt
+--------------------+-------------------------
+ \x0000000000000001 | 5e-324
+ \x0000000000000002 | 1e-323
+ \x0000000000000003 | 1.5e-323
+ \x0000000000001000 | 2.0237e-320
+ \x0000000100000000 | 2.121995791e-314
+ \x0000010000000000 | 5.43230922487e-312
+ \x0000010100000000 | 5.45352918278e-312
+ \x0000400000000000 | 3.4766779039175e-310
+ \x0000400100000000 | 3.4768901034966e-310
+ \x0000800000000000 | 6.953355807835e-310
+ \x0000800000000001 | 6.95335580783505e-310
+ \x00000000000f4240 | 4.940656e-318
+ \x00000000016e3600 | 1.18575755e-316
+ \x0000008cdcdea440 | 2.989102097996e-312
+ \x000ffffffffffff0 | 2.2250738585071935e-308
+ \x000ffffffffffff1 | 2.225073858507194e-308
+ \x000ffffffffffffe | 2.2250738585072004e-308
+ \x000fffffffffffff | 2.225073858507201e-308
+(18 rows)
+
+-- round-trip tests
+with testdata(bits) as (values
+ (x'0000000000000000'),
+ -- smallest normal values
+ (x'0010000000000000'), (x'0010000000000001'),
+ (x'0010000000000002'), (x'0018000000000000'),
+ --
+ (x'3ddb7cdfd9d7bdba'), (x'3ddb7cdfd9d7bdbb'), (x'3ddb7cdfd9d7bdbc'),
+ (x'3e112e0be826d694'), (x'3e112e0be826d695'), (x'3e112e0be826d696'),
+ (x'3e45798ee2308c39'), (x'3e45798ee2308c3a'), (x'3e45798ee2308c3b'),
+ (x'3e7ad7f29abcaf47'), (x'3e7ad7f29abcaf48'), (x'3e7ad7f29abcaf49'),
+ (x'3eb0c6f7a0b5ed8c'), (x'3eb0c6f7a0b5ed8d'), (x'3eb0c6f7a0b5ed8e'),
+ (x'3ee4f8b588e368ef'), (x'3ee4f8b588e368f0'), (x'3ee4f8b588e368f1'),
+ (x'3f1a36e2eb1c432c'), (x'3f1a36e2eb1c432d'), (x'3f1a36e2eb1c432e'),
+ (x'3f50624dd2f1a9fb'), (x'3f50624dd2f1a9fc'), (x'3f50624dd2f1a9fd'),
+ (x'3f847ae147ae147a'), (x'3f847ae147ae147b'), (x'3f847ae147ae147c'),
+ (x'3fb9999999999999'), (x'3fb999999999999a'), (x'3fb999999999999b'),
+ -- values very close to 1
+ (x'3feffffffffffff0'), (x'3feffffffffffff1'), (x'3feffffffffffff2'),
+ (x'3feffffffffffff3'), (x'3feffffffffffff4'), (x'3feffffffffffff5'),
+ (x'3feffffffffffff6'), (x'3feffffffffffff7'), (x'3feffffffffffff8'),
+ (x'3feffffffffffff9'), (x'3feffffffffffffa'), (x'3feffffffffffffb'),
+ (x'3feffffffffffffc'), (x'3feffffffffffffd'), (x'3feffffffffffffe'),
+ (x'3fefffffffffffff'),
+ (x'3ff0000000000000'),
+ (x'3ff0000000000001'), (x'3ff0000000000002'), (x'3ff0000000000003'),
+ (x'3ff0000000000004'), (x'3ff0000000000005'), (x'3ff0000000000006'),
+ (x'3ff0000000000007'), (x'3ff0000000000008'), (x'3ff0000000000009'),
+ --
+ (x'3ff921fb54442d18'),
+ (x'4005bf0a8b14576a'),
+ (x'400921fb54442d18'),
+ --
+ (x'4023ffffffffffff'), (x'4024000000000000'), (x'4024000000000001'),
+ (x'4058ffffffffffff'), (x'4059000000000000'), (x'4059000000000001'),
+ (x'408f3fffffffffff'), (x'408f400000000000'), (x'408f400000000001'),
+ (x'40c387ffffffffff'), (x'40c3880000000000'), (x'40c3880000000001'),
+ (x'40f869ffffffffff'), (x'40f86a0000000000'), (x'40f86a0000000001'),
+ (x'412e847fffffffff'), (x'412e848000000000'), (x'412e848000000001'),
+ (x'416312cfffffffff'), (x'416312d000000000'), (x'416312d000000001'),
+ (x'4197d783ffffffff'), (x'4197d78400000000'), (x'4197d78400000001'),
+ (x'41cdcd64ffffffff'), (x'41cdcd6500000000'), (x'41cdcd6500000001'),
+ (x'4202a05f1fffffff'), (x'4202a05f20000000'), (x'4202a05f20000001'),
+ (x'42374876e7ffffff'), (x'42374876e8000000'), (x'42374876e8000001'),
+ (x'426d1a94a1ffffff'), (x'426d1a94a2000000'), (x'426d1a94a2000001'),
+ (x'42a2309ce53fffff'), (x'42a2309ce5400000'), (x'42a2309ce5400001'),
+ (x'42d6bcc41e8fffff'), (x'42d6bcc41e900000'), (x'42d6bcc41e900001'),
+ (x'430c6bf52633ffff'), (x'430c6bf526340000'), (x'430c6bf526340001'),
+ (x'4341c37937e07fff'), (x'4341c37937e08000'), (x'4341c37937e08001'),
+ (x'4376345785d89fff'), (x'4376345785d8a000'), (x'4376345785d8a001'),
+ (x'43abc16d674ec7ff'), (x'43abc16d674ec800'), (x'43abc16d674ec801'),
+ (x'43e158e460913cff'), (x'43e158e460913d00'), (x'43e158e460913d01'),
+ (x'4415af1d78b58c3f'), (x'4415af1d78b58c40'), (x'4415af1d78b58c41'),
+ (x'444b1ae4d6e2ef4f'), (x'444b1ae4d6e2ef50'), (x'444b1ae4d6e2ef51'),
+ (x'4480f0cf064dd591'), (x'4480f0cf064dd592'), (x'4480f0cf064dd593'),
+ (x'44b52d02c7e14af5'), (x'44b52d02c7e14af6'), (x'44b52d02c7e14af7'),
+ (x'44ea784379d99db3'), (x'44ea784379d99db4'), (x'44ea784379d99db5'),
+ (x'45208b2a2c280290'), (x'45208b2a2c280291'), (x'45208b2a2c280292'),
+ --
+ (x'7feffffffffffffe'), (x'7fefffffffffffff'),
+ -- round to even tests (+ve)
+ (x'4350000000000002'),
+ (x'4350000000002e06'),
+ (x'4352000000000003'),
+ (x'4352000000000004'),
+ (x'4358000000000003'),
+ (x'4358000000000004'),
+ (x'435f000000000020'),
+ -- round to even tests (-ve)
+ (x'c350000000000002'),
+ (x'c350000000002e06'),
+ (x'c352000000000003'),
+ (x'c352000000000004'),
+ (x'c358000000000003'),
+ (x'c358000000000004'),
+ (x'c35f000000000020'),
+ -- exercise fixed-point memmoves
+ (x'42dc12218377de66'),
+ (x'42a674e79c5fe51f'),
+ (x'4271f71fb04cb74c'),
+ (x'423cbe991a145879'),
+ (x'4206fee0e1a9e061'),
+ (x'41d26580b487e6b4'),
+ (x'419d6f34540ca453'),
+ (x'41678c29dcd6e9dc'),
+ (x'4132d687e3df217d'),
+ (x'40fe240c9fcb68c8'),
+ (x'40c81cd6e63c53d3'),
+ (x'40934a4584fd0fdc'),
+ (x'405edd3c07fb4c93'),
+ (x'4028b0fcd32f7076'),
+ (x'3ff3c0ca428c59f8'),
+ -- these cases come from the upstream's testsuite
+ -- LotsOfTrailingZeros)
+ (x'3e60000000000000'),
+ -- Regression
+ (x'c352bd2668e077c4'),
+ (x'434018601510c000'),
+ (x'43d055dc36f24000'),
+ (x'43e052961c6f8000'),
+ (x'3ff3c0ca2a5b1d5d'),
+ -- LooksLikePow5
+ (x'4830f0cf064dd592'),
+ (x'4840f0cf064dd592'),
+ (x'4850f0cf064dd592'),
+ -- OutputLength
+ (x'3ff3333333333333'),
+ (x'3ff3ae147ae147ae'),
+ (x'3ff3be76c8b43958'),
+ (x'3ff3c083126e978d'),
+ (x'3ff3c0c1fc8f3238'),
+ (x'3ff3c0c9539b8887'),
+ (x'3ff3c0ca2a5b1d5d'),
+ (x'3ff3c0ca4283de1b'),
+ (x'3ff3c0ca43db770a'),
+ (x'3ff3c0ca428abd53'),
+ (x'3ff3c0ca428c1d2b'),
+ (x'3ff3c0ca428c51f2'),
+ (x'3ff3c0ca428c58fc'),
+ (x'3ff3c0ca428c59dd'),
+ (x'3ff3c0ca428c59f8'),
+ (x'3ff3c0ca428c59fb'),
+ -- 32-bit chunking
+ (x'40112e0be8047a7d'),
+ (x'40112e0be815a889'),
+ (x'40112e0be826d695'),
+ (x'40112e0be83804a1'),
+ (x'40112e0be84932ad'),
+ -- MinMaxShift
+ (x'0040000000000000'),
+ (x'007fffffffffffff'),
+ (x'0290000000000000'),
+ (x'029fffffffffffff'),
+ (x'4350000000000000'),
+ (x'435fffffffffffff'),
+ (x'1330000000000000'),
+ (x'133fffffffffffff'),
+ (x'3a6fa7161a4d6e0c')
+)
+select float8send(flt) as ibits,
+ flt,
+ flt::text::float8 as r_flt,
+ float8send(flt::text::float8) as obits,
+ float8send(flt::text::float8) = float8send(flt) as correct
+ from (select bits::bigint::xfloat8::float8 as flt
+ from testdata
+ offset 0) s;
+ ibits | flt | r_flt | obits | correct
+--------------------+-------------------------+-------------------------+--------------------+---------
+ \x0000000000000000 | 0 | 0 | \x0000000000000000 | t
+ \x0010000000000000 | 2.2250738585072014e-308 | 2.2250738585072014e-308 | \x0010000000000000 | t
+ \x0010000000000001 | 2.225073858507202e-308 | 2.225073858507202e-308 | \x0010000000000001 | t
+ \x0010000000000002 | 2.2250738585072024e-308 | 2.2250738585072024e-308 | \x0010000000000002 | t
+ \x0018000000000000 | 3.337610787760802e-308 | 3.337610787760802e-308 | \x0018000000000000 | t
+ \x3ddb7cdfd9d7bdba | 9.999999999999999e-11 | 9.999999999999999e-11 | \x3ddb7cdfd9d7bdba | t
+ \x3ddb7cdfd9d7bdbb | 1e-10 | 1e-10 | \x3ddb7cdfd9d7bdbb | t
+ \x3ddb7cdfd9d7bdbc | 1.0000000000000002e-10 | 1.0000000000000002e-10 | \x3ddb7cdfd9d7bdbc | t
+ \x3e112e0be826d694 | 9.999999999999999e-10 | 9.999999999999999e-10 | \x3e112e0be826d694 | t
+ \x3e112e0be826d695 | 1e-09 | 1e-09 | \x3e112e0be826d695 | t
+ \x3e112e0be826d696 | 1.0000000000000003e-09 | 1.0000000000000003e-09 | \x3e112e0be826d696 | t
+ \x3e45798ee2308c39 | 9.999999999999999e-09 | 9.999999999999999e-09 | \x3e45798ee2308c39 | t
+ \x3e45798ee2308c3a | 1e-08 | 1e-08 | \x3e45798ee2308c3a | t
+ \x3e45798ee2308c3b | 1.0000000000000002e-08 | 1.0000000000000002e-08 | \x3e45798ee2308c3b | t
+ \x3e7ad7f29abcaf47 | 9.999999999999998e-08 | 9.999999999999998e-08 | \x3e7ad7f29abcaf47 | t
+ \x3e7ad7f29abcaf48 | 1e-07 | 1e-07 | \x3e7ad7f29abcaf48 | t
+ \x3e7ad7f29abcaf49 | 1.0000000000000001e-07 | 1.0000000000000001e-07 | \x3e7ad7f29abcaf49 | t
+ \x3eb0c6f7a0b5ed8c | 9.999999999999997e-07 | 9.999999999999997e-07 | \x3eb0c6f7a0b5ed8c | t
+ \x3eb0c6f7a0b5ed8d | 1e-06 | 1e-06 | \x3eb0c6f7a0b5ed8d | t
+ \x3eb0c6f7a0b5ed8e | 1.0000000000000002e-06 | 1.0000000000000002e-06 | \x3eb0c6f7a0b5ed8e | t
+ \x3ee4f8b588e368ef | 9.999999999999997e-06 | 9.999999999999997e-06 | \x3ee4f8b588e368ef | t
+ \x3ee4f8b588e368f0 | 9.999999999999999e-06 | 9.999999999999999e-06 | \x3ee4f8b588e368f0 | t
+ \x3ee4f8b588e368f1 | 1e-05 | 1e-05 | \x3ee4f8b588e368f1 | t
+ \x3f1a36e2eb1c432c | 9.999999999999999e-05 | 9.999999999999999e-05 | \x3f1a36e2eb1c432c | t
+ \x3f1a36e2eb1c432d | 0.0001 | 0.0001 | \x3f1a36e2eb1c432d | t
+ \x3f1a36e2eb1c432e | 0.00010000000000000002 | 0.00010000000000000002 | \x3f1a36e2eb1c432e | t
+ \x3f50624dd2f1a9fb | 0.0009999999999999998 | 0.0009999999999999998 | \x3f50624dd2f1a9fb | t
+ \x3f50624dd2f1a9fc | 0.001 | 0.001 | \x3f50624dd2f1a9fc | t
+ \x3f50624dd2f1a9fd | 0.0010000000000000002 | 0.0010000000000000002 | \x3f50624dd2f1a9fd | t
+ \x3f847ae147ae147a | 0.009999999999999998 | 0.009999999999999998 | \x3f847ae147ae147a | t
+ \x3f847ae147ae147b | 0.01 | 0.01 | \x3f847ae147ae147b | t
+ \x3f847ae147ae147c | 0.010000000000000002 | 0.010000000000000002 | \x3f847ae147ae147c | t
+ \x3fb9999999999999 | 0.09999999999999999 | 0.09999999999999999 | \x3fb9999999999999 | t
+ \x3fb999999999999a | 0.1 | 0.1 | \x3fb999999999999a | t
+ \x3fb999999999999b | 0.10000000000000002 | 0.10000000000000002 | \x3fb999999999999b | t
+ \x3feffffffffffff0 | 0.9999999999999982 | 0.9999999999999982 | \x3feffffffffffff0 | t
+ \x3feffffffffffff1 | 0.9999999999999983 | 0.9999999999999983 | \x3feffffffffffff1 | t
+ \x3feffffffffffff2 | 0.9999999999999984 | 0.9999999999999984 | \x3feffffffffffff2 | t
+ \x3feffffffffffff3 | 0.9999999999999986 | 0.9999999999999986 | \x3feffffffffffff3 | t
+ \x3feffffffffffff4 | 0.9999999999999987 | 0.9999999999999987 | \x3feffffffffffff4 | t
+ \x3feffffffffffff5 | 0.9999999999999988 | 0.9999999999999988 | \x3feffffffffffff5 | t
+ \x3feffffffffffff6 | 0.9999999999999989 | 0.9999999999999989 | \x3feffffffffffff6 | t
+ \x3feffffffffffff7 | 0.999999999999999 | 0.999999999999999 | \x3feffffffffffff7 | t
+ \x3feffffffffffff8 | 0.9999999999999991 | 0.9999999999999991 | \x3feffffffffffff8 | t
+ \x3feffffffffffff9 | 0.9999999999999992 | 0.9999999999999992 | \x3feffffffffffff9 | t
+ \x3feffffffffffffa | 0.9999999999999993 | 0.9999999999999993 | \x3feffffffffffffa | t
+ \x3feffffffffffffb | 0.9999999999999994 | 0.9999999999999994 | \x3feffffffffffffb | t
+ \x3feffffffffffffc | 0.9999999999999996 | 0.9999999999999996 | \x3feffffffffffffc | t
+ \x3feffffffffffffd | 0.9999999999999997 | 0.9999999999999997 | \x3feffffffffffffd | t
+ \x3feffffffffffffe | 0.9999999999999998 | 0.9999999999999998 | \x3feffffffffffffe | t
+ \x3fefffffffffffff | 0.9999999999999999 | 0.9999999999999999 | \x3fefffffffffffff | t
+ \x3ff0000000000000 | 1 | 1 | \x3ff0000000000000 | t
+ \x3ff0000000000001 | 1.0000000000000002 | 1.0000000000000002 | \x3ff0000000000001 | t
+ \x3ff0000000000002 | 1.0000000000000004 | 1.0000000000000004 | \x3ff0000000000002 | t
+ \x3ff0000000000003 | 1.0000000000000007 | 1.0000000000000007 | \x3ff0000000000003 | t
+ \x3ff0000000000004 | 1.0000000000000009 | 1.0000000000000009 | \x3ff0000000000004 | t
+ \x3ff0000000000005 | 1.000000000000001 | 1.000000000000001 | \x3ff0000000000005 | t
+ \x3ff0000000000006 | 1.0000000000000013 | 1.0000000000000013 | \x3ff0000000000006 | t
+ \x3ff0000000000007 | 1.0000000000000016 | 1.0000000000000016 | \x3ff0000000000007 | t
+ \x3ff0000000000008 | 1.0000000000000018 | 1.0000000000000018 | \x3ff0000000000008 | t
+ \x3ff0000000000009 | 1.000000000000002 | 1.000000000000002 | \x3ff0000000000009 | t
+ \x3ff921fb54442d18 | 1.5707963267948966 | 1.5707963267948966 | \x3ff921fb54442d18 | t
+ \x4005bf0a8b14576a | 2.7182818284590455 | 2.7182818284590455 | \x4005bf0a8b14576a | t
+ \x400921fb54442d18 | 3.141592653589793 | 3.141592653589793 | \x400921fb54442d18 | t
+ \x4023ffffffffffff | 9.999999999999998 | 9.999999999999998 | \x4023ffffffffffff | t
+ \x4024000000000000 | 10 | 10 | \x4024000000000000 | t
+ \x4024000000000001 | 10.000000000000002 | 10.000000000000002 | \x4024000000000001 | t
+ \x4058ffffffffffff | 99.99999999999999 | 99.99999999999999 | \x4058ffffffffffff | t
+ \x4059000000000000 | 100 | 100 | \x4059000000000000 | t
+ \x4059000000000001 | 100.00000000000001 | 100.00000000000001 | \x4059000000000001 | t
+ \x408f3fffffffffff | 999.9999999999999 | 999.9999999999999 | \x408f3fffffffffff | t
+ \x408f400000000000 | 1000 | 1000 | \x408f400000000000 | t
+ \x408f400000000001 | 1000.0000000000001 | 1000.0000000000001 | \x408f400000000001 | t
+ \x40c387ffffffffff | 9999.999999999998 | 9999.999999999998 | \x40c387ffffffffff | t
+ \x40c3880000000000 | 10000 | 10000 | \x40c3880000000000 | t
+ \x40c3880000000001 | 10000.000000000002 | 10000.000000000002 | \x40c3880000000001 | t
+ \x40f869ffffffffff | 99999.99999999999 | 99999.99999999999 | \x40f869ffffffffff | t
+ \x40f86a0000000000 | 100000 | 100000 | \x40f86a0000000000 | t
+ \x40f86a0000000001 | 100000.00000000001 | 100000.00000000001 | \x40f86a0000000001 | t
+ \x412e847fffffffff | 999999.9999999999 | 999999.9999999999 | \x412e847fffffffff | t
+ \x412e848000000000 | 1000000 | 1000000 | \x412e848000000000 | t
+ \x412e848000000001 | 1000000.0000000001 | 1000000.0000000001 | \x412e848000000001 | t
+ \x416312cfffffffff | 9999999.999999998 | 9999999.999999998 | \x416312cfffffffff | t
+ \x416312d000000000 | 10000000 | 10000000 | \x416312d000000000 | t
+ \x416312d000000001 | 10000000.000000002 | 10000000.000000002 | \x416312d000000001 | t
+ \x4197d783ffffffff | 99999999.99999999 | 99999999.99999999 | \x4197d783ffffffff | t
+ \x4197d78400000000 | 100000000 | 100000000 | \x4197d78400000000 | t
+ \x4197d78400000001 | 100000000.00000001 | 100000000.00000001 | \x4197d78400000001 | t
+ \x41cdcd64ffffffff | 999999999.9999999 | 999999999.9999999 | \x41cdcd64ffffffff | t
+ \x41cdcd6500000000 | 1000000000 | 1000000000 | \x41cdcd6500000000 | t
+ \x41cdcd6500000001 | 1000000000.0000001 | 1000000000.0000001 | \x41cdcd6500000001 | t
+ \x4202a05f1fffffff | 9999999999.999998 | 9999999999.999998 | \x4202a05f1fffffff | t
+ \x4202a05f20000000 | 10000000000 | 10000000000 | \x4202a05f20000000 | t
+ \x4202a05f20000001 | 10000000000.000002 | 10000000000.000002 | \x4202a05f20000001 | t
+ \x42374876e7ffffff | 99999999999.99998 | 99999999999.99998 | \x42374876e7ffffff | t
+ \x42374876e8000000 | 100000000000 | 100000000000 | \x42374876e8000000 | t
+ \x42374876e8000001 | 100000000000.00002 | 100000000000.00002 | \x42374876e8000001 | t
+ \x426d1a94a1ffffff | 999999999999.9999 | 999999999999.9999 | \x426d1a94a1ffffff | t
+ \x426d1a94a2000000 | 1000000000000 | 1000000000000 | \x426d1a94a2000000 | t
+ \x426d1a94a2000001 | 1000000000000.0001 | 1000000000000.0001 | \x426d1a94a2000001 | t
+ \x42a2309ce53fffff | 9999999999999.998 | 9999999999999.998 | \x42a2309ce53fffff | t
+ \x42a2309ce5400000 | 10000000000000 | 10000000000000 | \x42a2309ce5400000 | t
+ \x42a2309ce5400001 | 10000000000000.002 | 10000000000000.002 | \x42a2309ce5400001 | t
+ \x42d6bcc41e8fffff | 99999999999999.98 | 99999999999999.98 | \x42d6bcc41e8fffff | t
+ \x42d6bcc41e900000 | 100000000000000 | 100000000000000 | \x42d6bcc41e900000 | t
+ \x42d6bcc41e900001 | 100000000000000.02 | 100000000000000.02 | \x42d6bcc41e900001 | t
+ \x430c6bf52633ffff | 999999999999999.9 | 999999999999999.9 | \x430c6bf52633ffff | t
+ \x430c6bf526340000 | 1e+15 | 1e+15 | \x430c6bf526340000 | t
+ \x430c6bf526340001 | 1.0000000000000001e+15 | 1.0000000000000001e+15 | \x430c6bf526340001 | t
+ \x4341c37937e07fff | 9.999999999999998e+15 | 9.999999999999998e+15 | \x4341c37937e07fff | t
+ \x4341c37937e08000 | 1e+16 | 1e+16 | \x4341c37937e08000 | t
+ \x4341c37937e08001 | 1.0000000000000002e+16 | 1.0000000000000002e+16 | \x4341c37937e08001 | t
+ \x4376345785d89fff | 9.999999999999998e+16 | 9.999999999999998e+16 | \x4376345785d89fff | t
+ \x4376345785d8a000 | 1e+17 | 1e+17 | \x4376345785d8a000 | t
+ \x4376345785d8a001 | 1.0000000000000002e+17 | 1.0000000000000002e+17 | \x4376345785d8a001 | t
+ \x43abc16d674ec7ff | 9.999999999999999e+17 | 9.999999999999999e+17 | \x43abc16d674ec7ff | t
+ \x43abc16d674ec800 | 1e+18 | 1e+18 | \x43abc16d674ec800 | t
+ \x43abc16d674ec801 | 1.0000000000000001e+18 | 1.0000000000000001e+18 | \x43abc16d674ec801 | t
+ \x43e158e460913cff | 9.999999999999998e+18 | 9.999999999999998e+18 | \x43e158e460913cff | t
+ \x43e158e460913d00 | 1e+19 | 1e+19 | \x43e158e460913d00 | t
+ \x43e158e460913d01 | 1.0000000000000002e+19 | 1.0000000000000002e+19 | \x43e158e460913d01 | t
+ \x4415af1d78b58c3f | 9.999999999999998e+19 | 9.999999999999998e+19 | \x4415af1d78b58c3f | t
+ \x4415af1d78b58c40 | 1e+20 | 1e+20 | \x4415af1d78b58c40 | t
+ \x4415af1d78b58c41 | 1.0000000000000002e+20 | 1.0000000000000002e+20 | \x4415af1d78b58c41 | t
+ \x444b1ae4d6e2ef4f | 9.999999999999999e+20 | 9.999999999999999e+20 | \x444b1ae4d6e2ef4f | t
+ \x444b1ae4d6e2ef50 | 1e+21 | 1e+21 | \x444b1ae4d6e2ef50 | t
+ \x444b1ae4d6e2ef51 | 1.0000000000000001e+21 | 1.0000000000000001e+21 | \x444b1ae4d6e2ef51 | t
+ \x4480f0cf064dd591 | 9.999999999999998e+21 | 9.999999999999998e+21 | \x4480f0cf064dd591 | t
+ \x4480f0cf064dd592 | 1e+22 | 1e+22 | \x4480f0cf064dd592 | t
+ \x4480f0cf064dd593 | 1.0000000000000002e+22 | 1.0000000000000002e+22 | \x4480f0cf064dd593 | t
+ \x44b52d02c7e14af5 | 9.999999999999997e+22 | 9.999999999999997e+22 | \x44b52d02c7e14af5 | t
+ \x44b52d02c7e14af6 | 9.999999999999999e+22 | 9.999999999999999e+22 | \x44b52d02c7e14af6 | t
+ \x44b52d02c7e14af7 | 1.0000000000000001e+23 | 1.0000000000000001e+23 | \x44b52d02c7e14af7 | t
+ \x44ea784379d99db3 | 9.999999999999998e+23 | 9.999999999999998e+23 | \x44ea784379d99db3 | t
+ \x44ea784379d99db4 | 1e+24 | 1e+24 | \x44ea784379d99db4 | t
+ \x44ea784379d99db5 | 1.0000000000000001e+24 | 1.0000000000000001e+24 | \x44ea784379d99db5 | t
+ \x45208b2a2c280290 | 9.999999999999999e+24 | 9.999999999999999e+24 | \x45208b2a2c280290 | t
+ \x45208b2a2c280291 | 1e+25 | 1e+25 | \x45208b2a2c280291 | t
+ \x45208b2a2c280292 | 1.0000000000000003e+25 | 1.0000000000000003e+25 | \x45208b2a2c280292 | t
+ \x7feffffffffffffe | 1.7976931348623155e+308 | 1.7976931348623155e+308 | \x7feffffffffffffe | t
+ \x7fefffffffffffff | 1.7976931348623157e+308 | 1.7976931348623157e+308 | \x7fefffffffffffff | t
+ \x4350000000000002 | 1.8014398509481992e+16 | 1.8014398509481992e+16 | \x4350000000000002 | t
+ \x4350000000002e06 | 1.8014398509529112e+16 | 1.8014398509529112e+16 | \x4350000000002e06 | t
+ \x4352000000000003 | 2.0266198323167244e+16 | 2.0266198323167244e+16 | \x4352000000000003 | t
+ \x4352000000000004 | 2.0266198323167248e+16 | 2.0266198323167248e+16 | \x4352000000000004 | t
+ \x4358000000000003 | 2.7021597764222988e+16 | 2.7021597764222988e+16 | \x4358000000000003 | t
+ \x4358000000000004 | 2.7021597764222992e+16 | 2.7021597764222992e+16 | \x4358000000000004 | t
+ \x435f000000000020 | 3.4902897112121472e+16 | 3.4902897112121472e+16 | \x435f000000000020 | t
+ \xc350000000000002 | -1.8014398509481992e+16 | -1.8014398509481992e+16 | \xc350000000000002 | t
+ \xc350000000002e06 | -1.8014398509529112e+16 | -1.8014398509529112e+16 | \xc350000000002e06 | t
+ \xc352000000000003 | -2.0266198323167244e+16 | -2.0266198323167244e+16 | \xc352000000000003 | t
+ \xc352000000000004 | -2.0266198323167248e+16 | -2.0266198323167248e+16 | \xc352000000000004 | t
+ \xc358000000000003 | -2.7021597764222988e+16 | -2.7021597764222988e+16 | \xc358000000000003 | t
+ \xc358000000000004 | -2.7021597764222992e+16 | -2.7021597764222992e+16 | \xc358000000000004 | t
+ \xc35f000000000020 | -3.4902897112121472e+16 | -3.4902897112121472e+16 | \xc35f000000000020 | t
+ \x42dc12218377de66 | 123456789012345.6 | 123456789012345.6 | \x42dc12218377de66 | t
+ \x42a674e79c5fe51f | 12345678901234.56 | 12345678901234.56 | \x42a674e79c5fe51f | t
+ \x4271f71fb04cb74c | 1234567890123.456 | 1234567890123.456 | \x4271f71fb04cb74c | t
+ \x423cbe991a145879 | 123456789012.3456 | 123456789012.3456 | \x423cbe991a145879 | t
+ \x4206fee0e1a9e061 | 12345678901.23456 | 12345678901.23456 | \x4206fee0e1a9e061 | t
+ \x41d26580b487e6b4 | 1234567890.123456 | 1234567890.123456 | \x41d26580b487e6b4 | t
+ \x419d6f34540ca453 | 123456789.0123456 | 123456789.0123456 | \x419d6f34540ca453 | t
+ \x41678c29dcd6e9dc | 12345678.90123456 | 12345678.90123456 | \x41678c29dcd6e9dc | t
+ \x4132d687e3df217d | 1234567.890123456 | 1234567.890123456 | \x4132d687e3df217d | t
+ \x40fe240c9fcb68c8 | 123456.7890123456 | 123456.7890123456 | \x40fe240c9fcb68c8 | t
+ \x40c81cd6e63c53d3 | 12345.67890123456 | 12345.67890123456 | \x40c81cd6e63c53d3 | t
+ \x40934a4584fd0fdc | 1234.567890123456 | 1234.567890123456 | \x40934a4584fd0fdc | t
+ \x405edd3c07fb4c93 | 123.4567890123456 | 123.4567890123456 | \x405edd3c07fb4c93 | t
+ \x4028b0fcd32f7076 | 12.34567890123456 | 12.34567890123456 | \x4028b0fcd32f7076 | t
+ \x3ff3c0ca428c59f8 | 1.234567890123456 | 1.234567890123456 | \x3ff3c0ca428c59f8 | t
+ \x3e60000000000000 | 2.9802322387695312e-08 | 2.9802322387695312e-08 | \x3e60000000000000 | t
+ \xc352bd2668e077c4 | -2.1098088986959632e+16 | -2.1098088986959632e+16 | \xc352bd2668e077c4 | t
+ \x434018601510c000 | 9.0608011534336e+15 | 9.0608011534336e+15 | \x434018601510c000 | t
+ \x43d055dc36f24000 | 4.708356024711512e+18 | 4.708356024711512e+18 | \x43d055dc36f24000 | t
+ \x43e052961c6f8000 | 9.409340012568248e+18 | 9.409340012568248e+18 | \x43e052961c6f8000 | t
+ \x3ff3c0ca2a5b1d5d | 1.2345678 | 1.2345678 | \x3ff3c0ca2a5b1d5d | t
+ \x4830f0cf064dd592 | 5.764607523034235e+39 | 5.764607523034235e+39 | \x4830f0cf064dd592 | t
+ \x4840f0cf064dd592 | 1.152921504606847e+40 | 1.152921504606847e+40 | \x4840f0cf064dd592 | t
+ \x4850f0cf064dd592 | 2.305843009213694e+40 | 2.305843009213694e+40 | \x4850f0cf064dd592 | t
+ \x3ff3333333333333 | 1.2 | 1.2 | \x3ff3333333333333 | t
+ \x3ff3ae147ae147ae | 1.23 | 1.23 | \x3ff3ae147ae147ae | t
+ \x3ff3be76c8b43958 | 1.234 | 1.234 | \x3ff3be76c8b43958 | t
+ \x3ff3c083126e978d | 1.2345 | 1.2345 | \x3ff3c083126e978d | t
+ \x3ff3c0c1fc8f3238 | 1.23456 | 1.23456 | \x3ff3c0c1fc8f3238 | t
+ \x3ff3c0c9539b8887 | 1.234567 | 1.234567 | \x3ff3c0c9539b8887 | t
+ \x3ff3c0ca2a5b1d5d | 1.2345678 | 1.2345678 | \x3ff3c0ca2a5b1d5d | t
+ \x3ff3c0ca4283de1b | 1.23456789 | 1.23456789 | \x3ff3c0ca4283de1b | t
+ \x3ff3c0ca43db770a | 1.234567895 | 1.234567895 | \x3ff3c0ca43db770a | t
+ \x3ff3c0ca428abd53 | 1.2345678901 | 1.2345678901 | \x3ff3c0ca428abd53 | t
+ \x3ff3c0ca428c1d2b | 1.23456789012 | 1.23456789012 | \x3ff3c0ca428c1d2b | t
+ \x3ff3c0ca428c51f2 | 1.234567890123 | 1.234567890123 | \x3ff3c0ca428c51f2 | t
+ \x3ff3c0ca428c58fc | 1.2345678901234 | 1.2345678901234 | \x3ff3c0ca428c58fc | t
+ \x3ff3c0ca428c59dd | 1.23456789012345 | 1.23456789012345 | \x3ff3c0ca428c59dd | t
+ \x3ff3c0ca428c59f8 | 1.234567890123456 | 1.234567890123456 | \x3ff3c0ca428c59f8 | t
+ \x3ff3c0ca428c59fb | 1.2345678901234567 | 1.2345678901234567 | \x3ff3c0ca428c59fb | t
+ \x40112e0be8047a7d | 4.294967294 | 4.294967294 | \x40112e0be8047a7d | t
+ \x40112e0be815a889 | 4.294967295 | 4.294967295 | \x40112e0be815a889 | t
+ \x40112e0be826d695 | 4.294967296 | 4.294967296 | \x40112e0be826d695 | t
+ \x40112e0be83804a1 | 4.294967297 | 4.294967297 | \x40112e0be83804a1 | t
+ \x40112e0be84932ad | 4.294967298 | 4.294967298 | \x40112e0be84932ad | t
+ \x0040000000000000 | 1.7800590868057611e-307 | 1.7800590868057611e-307 | \x0040000000000000 | t
+ \x007fffffffffffff | 2.8480945388892175e-306 | 2.8480945388892175e-306 | \x007fffffffffffff | t
+ \x0290000000000000 | 2.446494580089078e-296 | 2.446494580089078e-296 | \x0290000000000000 | t
+ \x029fffffffffffff | 4.8929891601781557e-296 | 4.8929891601781557e-296 | \x029fffffffffffff | t
+ \x4350000000000000 | 1.8014398509481984e+16 | 1.8014398509481984e+16 | \x4350000000000000 | t
+ \x435fffffffffffff | 3.6028797018963964e+16 | 3.6028797018963964e+16 | \x435fffffffffffff | t
+ \x1330000000000000 | 2.900835519859558e-216 | 2.900835519859558e-216 | \x1330000000000000 | t
+ \x133fffffffffffff | 5.801671039719115e-216 | 5.801671039719115e-216 | \x133fffffffffffff | t
+ \x3a6fa7161a4d6e0c | 3.196104012172126e-27 | 3.196104012172126e-27 | \x3a6fa7161a4d6e0c | t
+(209 rows)
+
+-- clean up, lest opr_sanity complain
+drop type xfloat8 cascade;
+NOTICE: drop cascades to 6 other objects
+DETAIL: drop cascades to function xfloat8in(cstring)
+drop cascades to function xfloat8out(xfloat8)
+drop cascades to cast from xfloat8 to double precision
+drop cascades to cast from double precision to xfloat8
+drop cascades to cast from xfloat8 to bigint
+drop cascades to cast from bigint to xfloat8
diff --git a/ydb/library/yql/tests/postgresql/original/cases/float8.sql b/ydb/library/yql/tests/postgresql/original/cases/float8.sql
new file mode 100644
index 0000000000..97f0c3bb2f
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/float8.sql
@@ -0,0 +1,500 @@
+--
+-- FLOAT8
+--
+
+CREATE TABLE FLOAT8_TBL(f1 float8);
+
+INSERT INTO FLOAT8_TBL(f1) VALUES (' 0.0 ');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('1004.30 ');
+INSERT INTO FLOAT8_TBL(f1) VALUES (' -34.84');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('1.2345678901234e+200');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('1.2345678901234e-200');
+
+-- test for underflow and overflow handling
+SELECT '10e400'::float8;
+SELECT '-10e400'::float8;
+SELECT '10e-400'::float8;
+SELECT '-10e-400'::float8;
+
+-- test smallest normalized input
+SELECT float8send('2.2250738585072014E-308'::float8);
+
+-- bad input
+INSERT INTO FLOAT8_TBL(f1) VALUES ('');
+INSERT INTO FLOAT8_TBL(f1) VALUES (' ');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('xyz');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('5.0.0');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('5 . 0');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('5. 0');
+INSERT INTO FLOAT8_TBL(f1) VALUES (' - 3');
+INSERT INTO FLOAT8_TBL(f1) VALUES ('123 5');
+
+-- special inputs
+SELECT 'NaN'::float8;
+SELECT 'nan'::float8;
+SELECT ' NAN '::float8;
+SELECT 'infinity'::float8;
+SELECT ' -INFINiTY '::float8;
+-- bad special inputs
+SELECT 'N A N'::float8;
+SELECT 'NaN x'::float8;
+SELECT ' INFINITY x'::float8;
+
+SELECT 'Infinity'::float8 + 100.0;
+SELECT 'Infinity'::float8 / 'Infinity'::float8;
+SELECT '42'::float8 / 'Infinity'::float8;
+SELECT 'nan'::float8 / 'nan'::float8;
+SELECT 'nan'::float8 / '0'::float8;
+SELECT 'nan'::numeric::float8;
+
+SELECT * FROM FLOAT8_TBL;
+
+SELECT f.* FROM FLOAT8_TBL f WHERE f.f1 <> '1004.3';
+
+SELECT f.* FROM FLOAT8_TBL f WHERE f.f1 = '1004.3';
+
+SELECT f.* FROM FLOAT8_TBL f WHERE '1004.3' > f.f1;
+
+SELECT f.* FROM FLOAT8_TBL f WHERE f.f1 < '1004.3';
+
+SELECT f.* FROM FLOAT8_TBL f WHERE '1004.3' >= f.f1;
+
+SELECT f.* FROM FLOAT8_TBL f WHERE f.f1 <= '1004.3';
+
+SELECT f.f1, f.f1 * '-10' AS x
+ FROM FLOAT8_TBL f
+ WHERE f.f1 > '0.0';
+
+SELECT f.f1, f.f1 + '-10' AS x
+ FROM FLOAT8_TBL f
+ WHERE f.f1 > '0.0';
+
+SELECT f.f1, f.f1 / '-10' AS x
+ FROM FLOAT8_TBL f
+ WHERE f.f1 > '0.0';
+
+SELECT f.f1, f.f1 - '-10' AS x
+ FROM FLOAT8_TBL f
+ WHERE f.f1 > '0.0';
+
+SELECT f.f1 ^ '2.0' AS square_f1
+ FROM FLOAT8_TBL f where f.f1 = '1004.3';
+
+-- absolute value
+SELECT f.f1, @f.f1 AS abs_f1
+ FROM FLOAT8_TBL f;
+
+-- truncate
+SELECT f.f1, trunc(f.f1) AS trunc_f1
+ FROM FLOAT8_TBL f;
+
+-- round
+SELECT f.f1, round(f.f1) AS round_f1
+ FROM FLOAT8_TBL f;
+
+-- ceil / ceiling
+select ceil(f1) as ceil_f1 from float8_tbl f;
+select ceiling(f1) as ceiling_f1 from float8_tbl f;
+
+-- floor
+select floor(f1) as floor_f1 from float8_tbl f;
+
+-- sign
+select sign(f1) as sign_f1 from float8_tbl f;
+
+-- avoid bit-exact output here because operations may not be bit-exact.
+SET extra_float_digits = 0;
+
+-- square root
+SELECT sqrt(float8 '64') AS eight;
+
+SELECT |/ float8 '64' AS eight;
+
+SELECT f.f1, |/f.f1 AS sqrt_f1
+ FROM FLOAT8_TBL f
+ WHERE f.f1 > '0.0';
+
+-- power
+SELECT power(float8 '144', float8 '0.5');
+SELECT power(float8 'NaN', float8 '0.5');
+SELECT power(float8 '144', float8 'NaN');
+SELECT power(float8 'NaN', float8 'NaN');
+SELECT power(float8 '-1', float8 'NaN');
+SELECT power(float8 '1', float8 'NaN');
+SELECT power(float8 'NaN', float8 '0');
+SELECT power(float8 'inf', float8 '0');
+SELECT power(float8 '-inf', float8 '0');
+SELECT power(float8 '0', float8 'inf');
+SELECT power(float8 '0', float8 '-inf');
+SELECT power(float8 '1', float8 'inf');
+SELECT power(float8 '1', float8 '-inf');
+SELECT power(float8 '-1', float8 'inf');
+SELECT power(float8 '-1', float8 '-inf');
+SELECT power(float8 '0.1', float8 'inf');
+SELECT power(float8 '-0.1', float8 'inf');
+SELECT power(float8 '1.1', float8 'inf');
+SELECT power(float8 '-1.1', float8 'inf');
+SELECT power(float8 '0.1', float8 '-inf');
+SELECT power(float8 '-0.1', float8 '-inf');
+SELECT power(float8 '1.1', float8 '-inf');
+SELECT power(float8 '-1.1', float8 '-inf');
+SELECT power(float8 'inf', float8 '-2');
+SELECT power(float8 'inf', float8 '2');
+SELECT power(float8 'inf', float8 'inf');
+SELECT power(float8 'inf', float8 '-inf');
+-- Intel's icc misoptimizes the code that controls the sign of this result,
+-- even with -mp1. Pending a fix for that, only test for "is it zero".
+SELECT power(float8 '-inf', float8 '-2') = '0';
+SELECT power(float8 '-inf', float8 '-3');
+SELECT power(float8 '-inf', float8 '2');
+SELECT power(float8 '-inf', float8 '3');
+SELECT power(float8 '-inf', float8 '3.5');
+SELECT power(float8 '-inf', float8 'inf');
+SELECT power(float8 '-inf', float8 '-inf');
+
+-- take exp of ln(f.f1)
+SELECT f.f1, exp(ln(f.f1)) AS exp_ln_f1
+ FROM FLOAT8_TBL f
+ WHERE f.f1 > '0.0';
+
+-- check edge cases for exp
+SELECT exp('inf'::float8), exp('-inf'::float8), exp('nan'::float8);
+
+-- cube root
+SELECT ||/ float8 '27' AS three;
+
+SELECT f.f1, ||/f.f1 AS cbrt_f1 FROM FLOAT8_TBL f;
+
+
+SELECT * FROM FLOAT8_TBL;
+
+UPDATE FLOAT8_TBL
+ SET f1 = FLOAT8_TBL.f1 * '-1'
+ WHERE FLOAT8_TBL.f1 > '0.0';
+
+SELECT f.f1 * '1e200' from FLOAT8_TBL f;
+
+SELECT f.f1 ^ '1e200' from FLOAT8_TBL f;
+
+SELECT 0 ^ 0 + 0 ^ 1 + 0 ^ 0.0 + 0 ^ 0.5;
+
+SELECT ln(f.f1) from FLOAT8_TBL f where f.f1 = '0.0' ;
+
+SELECT ln(f.f1) from FLOAT8_TBL f where f.f1 < '0.0' ;
+
+SELECT exp(f.f1) from FLOAT8_TBL f;
+
+SELECT f.f1 / '0.0' from FLOAT8_TBL f;
+
+SELECT * FROM FLOAT8_TBL;
+
+-- hyperbolic functions
+-- we run these with extra_float_digits = 0 too, since different platforms
+-- tend to produce results that vary in the last place.
+SELECT sinh(float8 '1');
+SELECT cosh(float8 '1');
+SELECT tanh(float8 '1');
+SELECT asinh(float8 '1');
+SELECT acosh(float8 '2');
+SELECT atanh(float8 '0.5');
+-- test Inf/NaN cases for hyperbolic functions
+SELECT sinh(float8 'infinity');
+SELECT sinh(float8 '-infinity');
+SELECT sinh(float8 'nan');
+SELECT cosh(float8 'infinity');
+SELECT cosh(float8 '-infinity');
+SELECT cosh(float8 'nan');
+SELECT tanh(float8 'infinity');
+SELECT tanh(float8 '-infinity');
+SELECT tanh(float8 'nan');
+SELECT asinh(float8 'infinity');
+SELECT asinh(float8 '-infinity');
+SELECT asinh(float8 'nan');
+-- acosh(Inf) should be Inf, but some mingw versions produce NaN, so skip test
+-- SELECT acosh(float8 'infinity');
+SELECT acosh(float8 '-infinity');
+SELECT acosh(float8 'nan');
+SELECT atanh(float8 'infinity');
+SELECT atanh(float8 '-infinity');
+SELECT atanh(float8 'nan');
+
+RESET extra_float_digits;
+
+-- test for over- and underflow
+INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400');
+
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e400');
+
+INSERT INTO FLOAT8_TBL(f1) VALUES ('10e-400');
+
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e-400');
+
+-- maintain external table consistency across platforms
+-- delete all values and reinsert well-behaved ones
+
+DELETE FROM FLOAT8_TBL;
+
+INSERT INTO FLOAT8_TBL(f1) VALUES ('0.0');
+
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-34.84');
+
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-1004.30');
+
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e+200');
+
+INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e-200');
+
+SELECT * FROM FLOAT8_TBL;
+
+-- test edge-case coercions to integer
+SELECT '32767.4'::float8::int2;
+SELECT '32767.6'::float8::int2;
+SELECT '-32768.4'::float8::int2;
+SELECT '-32768.6'::float8::int2;
+SELECT '2147483647.4'::float8::int4;
+SELECT '2147483647.6'::float8::int4;
+SELECT '-2147483648.4'::float8::int4;
+SELECT '-2147483648.6'::float8::int4;
+SELECT '9223372036854773760'::float8::int8;
+SELECT '9223372036854775807'::float8::int8;
+SELECT '-9223372036854775808.5'::float8::int8;
+SELECT '-9223372036854780000'::float8::int8;
+
+-- test exact cases for trigonometric functions in degrees
+
+SELECT x,
+ sind(x),
+ sind(x) IN (-1,-0.5,0,0.5,1) AS sind_exact
+FROM (VALUES (0), (30), (90), (150), (180),
+ (210), (270), (330), (360)) AS t(x);
+
+SELECT x,
+ cosd(x),
+ cosd(x) IN (-1,-0.5,0,0.5,1) AS cosd_exact
+FROM (VALUES (0), (60), (90), (120), (180),
+ (240), (270), (300), (360)) AS t(x);
+
+SELECT x,
+ tand(x),
+ tand(x) IN ('-Infinity'::float8,-1,0,
+ 1,'Infinity'::float8) AS tand_exact,
+ cotd(x),
+ cotd(x) IN ('-Infinity'::float8,-1,0,
+ 1,'Infinity'::float8) AS cotd_exact
+FROM (VALUES (0), (45), (90), (135), (180),
+ (225), (270), (315), (360)) AS t(x);
+
+SELECT x,
+ asind(x),
+ asind(x) IN (-90,-30,0,30,90) AS asind_exact,
+ acosd(x),
+ acosd(x) IN (0,60,90,120,180) AS acosd_exact
+FROM (VALUES (-1), (-0.5), (0), (0.5), (1)) AS t(x);
+
+SELECT x,
+ atand(x),
+ atand(x) IN (-90,-45,0,45,90) AS atand_exact
+FROM (VALUES ('-Infinity'::float8), (-1), (0), (1),
+ ('Infinity'::float8)) AS t(x);
+
+SELECT x, y,
+ atan2d(y, x),
+ atan2d(y, x) IN (-90,0,90,180) AS atan2d_exact
+FROM (SELECT 10*cosd(a), 10*sind(a)
+ FROM generate_series(0, 360, 90) AS t(a)) AS t(x,y);
+
+--
+-- test output (and round-trip safety) of various values.
+-- To ensure we're testing what we think we're testing, start with
+-- float values specified by bit patterns (as a useful side effect,
+-- this means we'll fail on non-IEEE platforms).
+
+create type xfloat8;
+create function xfloat8in(cstring) returns xfloat8 immutable strict
+ language internal as 'int8in';
+create function xfloat8out(xfloat8) returns cstring immutable strict
+ language internal as 'int8out';
+create type xfloat8 (input = xfloat8in, output = xfloat8out, like = float8);
+create cast (xfloat8 as float8) without function;
+create cast (float8 as xfloat8) without function;
+create cast (xfloat8 as bigint) without function;
+create cast (bigint as xfloat8) without function;
+
+-- float8: seeeeeee eeeeeeee eeeeeeee mmmmmmmm mmmmmmmm(x4)
+
+-- we don't care to assume the platform's strtod() handles subnormals
+-- correctly; those are "use at your own risk". However we do test
+-- subnormal outputs, since those are under our control.
+
+with testdata(bits) as (values
+ -- small subnormals
+ (x'0000000000000001'),
+ (x'0000000000000002'), (x'0000000000000003'),
+ (x'0000000000001000'), (x'0000000100000000'),
+ (x'0000010000000000'), (x'0000010100000000'),
+ (x'0000400000000000'), (x'0000400100000000'),
+ (x'0000800000000000'), (x'0000800000000001'),
+ -- these values taken from upstream testsuite
+ (x'00000000000f4240'),
+ (x'00000000016e3600'),
+ (x'0000008cdcdea440'),
+ -- borderline between subnormal and normal
+ (x'000ffffffffffff0'), (x'000ffffffffffff1'),
+ (x'000ffffffffffffe'), (x'000fffffffffffff'))
+select float8send(flt) as ibits,
+ flt
+ from (select bits::bigint::xfloat8::float8 as flt
+ from testdata
+ offset 0) s;
+
+-- round-trip tests
+
+with testdata(bits) as (values
+ (x'0000000000000000'),
+ -- smallest normal values
+ (x'0010000000000000'), (x'0010000000000001'),
+ (x'0010000000000002'), (x'0018000000000000'),
+ --
+ (x'3ddb7cdfd9d7bdba'), (x'3ddb7cdfd9d7bdbb'), (x'3ddb7cdfd9d7bdbc'),
+ (x'3e112e0be826d694'), (x'3e112e0be826d695'), (x'3e112e0be826d696'),
+ (x'3e45798ee2308c39'), (x'3e45798ee2308c3a'), (x'3e45798ee2308c3b'),
+ (x'3e7ad7f29abcaf47'), (x'3e7ad7f29abcaf48'), (x'3e7ad7f29abcaf49'),
+ (x'3eb0c6f7a0b5ed8c'), (x'3eb0c6f7a0b5ed8d'), (x'3eb0c6f7a0b5ed8e'),
+ (x'3ee4f8b588e368ef'), (x'3ee4f8b588e368f0'), (x'3ee4f8b588e368f1'),
+ (x'3f1a36e2eb1c432c'), (x'3f1a36e2eb1c432d'), (x'3f1a36e2eb1c432e'),
+ (x'3f50624dd2f1a9fb'), (x'3f50624dd2f1a9fc'), (x'3f50624dd2f1a9fd'),
+ (x'3f847ae147ae147a'), (x'3f847ae147ae147b'), (x'3f847ae147ae147c'),
+ (x'3fb9999999999999'), (x'3fb999999999999a'), (x'3fb999999999999b'),
+ -- values very close to 1
+ (x'3feffffffffffff0'), (x'3feffffffffffff1'), (x'3feffffffffffff2'),
+ (x'3feffffffffffff3'), (x'3feffffffffffff4'), (x'3feffffffffffff5'),
+ (x'3feffffffffffff6'), (x'3feffffffffffff7'), (x'3feffffffffffff8'),
+ (x'3feffffffffffff9'), (x'3feffffffffffffa'), (x'3feffffffffffffb'),
+ (x'3feffffffffffffc'), (x'3feffffffffffffd'), (x'3feffffffffffffe'),
+ (x'3fefffffffffffff'),
+ (x'3ff0000000000000'),
+ (x'3ff0000000000001'), (x'3ff0000000000002'), (x'3ff0000000000003'),
+ (x'3ff0000000000004'), (x'3ff0000000000005'), (x'3ff0000000000006'),
+ (x'3ff0000000000007'), (x'3ff0000000000008'), (x'3ff0000000000009'),
+ --
+ (x'3ff921fb54442d18'),
+ (x'4005bf0a8b14576a'),
+ (x'400921fb54442d18'),
+ --
+ (x'4023ffffffffffff'), (x'4024000000000000'), (x'4024000000000001'),
+ (x'4058ffffffffffff'), (x'4059000000000000'), (x'4059000000000001'),
+ (x'408f3fffffffffff'), (x'408f400000000000'), (x'408f400000000001'),
+ (x'40c387ffffffffff'), (x'40c3880000000000'), (x'40c3880000000001'),
+ (x'40f869ffffffffff'), (x'40f86a0000000000'), (x'40f86a0000000001'),
+ (x'412e847fffffffff'), (x'412e848000000000'), (x'412e848000000001'),
+ (x'416312cfffffffff'), (x'416312d000000000'), (x'416312d000000001'),
+ (x'4197d783ffffffff'), (x'4197d78400000000'), (x'4197d78400000001'),
+ (x'41cdcd64ffffffff'), (x'41cdcd6500000000'), (x'41cdcd6500000001'),
+ (x'4202a05f1fffffff'), (x'4202a05f20000000'), (x'4202a05f20000001'),
+ (x'42374876e7ffffff'), (x'42374876e8000000'), (x'42374876e8000001'),
+ (x'426d1a94a1ffffff'), (x'426d1a94a2000000'), (x'426d1a94a2000001'),
+ (x'42a2309ce53fffff'), (x'42a2309ce5400000'), (x'42a2309ce5400001'),
+ (x'42d6bcc41e8fffff'), (x'42d6bcc41e900000'), (x'42d6bcc41e900001'),
+ (x'430c6bf52633ffff'), (x'430c6bf526340000'), (x'430c6bf526340001'),
+ (x'4341c37937e07fff'), (x'4341c37937e08000'), (x'4341c37937e08001'),
+ (x'4376345785d89fff'), (x'4376345785d8a000'), (x'4376345785d8a001'),
+ (x'43abc16d674ec7ff'), (x'43abc16d674ec800'), (x'43abc16d674ec801'),
+ (x'43e158e460913cff'), (x'43e158e460913d00'), (x'43e158e460913d01'),
+ (x'4415af1d78b58c3f'), (x'4415af1d78b58c40'), (x'4415af1d78b58c41'),
+ (x'444b1ae4d6e2ef4f'), (x'444b1ae4d6e2ef50'), (x'444b1ae4d6e2ef51'),
+ (x'4480f0cf064dd591'), (x'4480f0cf064dd592'), (x'4480f0cf064dd593'),
+ (x'44b52d02c7e14af5'), (x'44b52d02c7e14af6'), (x'44b52d02c7e14af7'),
+ (x'44ea784379d99db3'), (x'44ea784379d99db4'), (x'44ea784379d99db5'),
+ (x'45208b2a2c280290'), (x'45208b2a2c280291'), (x'45208b2a2c280292'),
+ --
+ (x'7feffffffffffffe'), (x'7fefffffffffffff'),
+ -- round to even tests (+ve)
+ (x'4350000000000002'),
+ (x'4350000000002e06'),
+ (x'4352000000000003'),
+ (x'4352000000000004'),
+ (x'4358000000000003'),
+ (x'4358000000000004'),
+ (x'435f000000000020'),
+ -- round to even tests (-ve)
+ (x'c350000000000002'),
+ (x'c350000000002e06'),
+ (x'c352000000000003'),
+ (x'c352000000000004'),
+ (x'c358000000000003'),
+ (x'c358000000000004'),
+ (x'c35f000000000020'),
+ -- exercise fixed-point memmoves
+ (x'42dc12218377de66'),
+ (x'42a674e79c5fe51f'),
+ (x'4271f71fb04cb74c'),
+ (x'423cbe991a145879'),
+ (x'4206fee0e1a9e061'),
+ (x'41d26580b487e6b4'),
+ (x'419d6f34540ca453'),
+ (x'41678c29dcd6e9dc'),
+ (x'4132d687e3df217d'),
+ (x'40fe240c9fcb68c8'),
+ (x'40c81cd6e63c53d3'),
+ (x'40934a4584fd0fdc'),
+ (x'405edd3c07fb4c93'),
+ (x'4028b0fcd32f7076'),
+ (x'3ff3c0ca428c59f8'),
+ -- these cases come from the upstream's testsuite
+ -- LotsOfTrailingZeros)
+ (x'3e60000000000000'),
+ -- Regression
+ (x'c352bd2668e077c4'),
+ (x'434018601510c000'),
+ (x'43d055dc36f24000'),
+ (x'43e052961c6f8000'),
+ (x'3ff3c0ca2a5b1d5d'),
+ -- LooksLikePow5
+ (x'4830f0cf064dd592'),
+ (x'4840f0cf064dd592'),
+ (x'4850f0cf064dd592'),
+ -- OutputLength
+ (x'3ff3333333333333'),
+ (x'3ff3ae147ae147ae'),
+ (x'3ff3be76c8b43958'),
+ (x'3ff3c083126e978d'),
+ (x'3ff3c0c1fc8f3238'),
+ (x'3ff3c0c9539b8887'),
+ (x'3ff3c0ca2a5b1d5d'),
+ (x'3ff3c0ca4283de1b'),
+ (x'3ff3c0ca43db770a'),
+ (x'3ff3c0ca428abd53'),
+ (x'3ff3c0ca428c1d2b'),
+ (x'3ff3c0ca428c51f2'),
+ (x'3ff3c0ca428c58fc'),
+ (x'3ff3c0ca428c59dd'),
+ (x'3ff3c0ca428c59f8'),
+ (x'3ff3c0ca428c59fb'),
+ -- 32-bit chunking
+ (x'40112e0be8047a7d'),
+ (x'40112e0be815a889'),
+ (x'40112e0be826d695'),
+ (x'40112e0be83804a1'),
+ (x'40112e0be84932ad'),
+ -- MinMaxShift
+ (x'0040000000000000'),
+ (x'007fffffffffffff'),
+ (x'0290000000000000'),
+ (x'029fffffffffffff'),
+ (x'4350000000000000'),
+ (x'435fffffffffffff'),
+ (x'1330000000000000'),
+ (x'133fffffffffffff'),
+ (x'3a6fa7161a4d6e0c')
+)
+select float8send(flt) as ibits,
+ flt,
+ flt::text::float8 as r_flt,
+ float8send(flt::text::float8) as obits,
+ float8send(flt::text::float8) = float8send(flt) as correct
+ from (select bits::bigint::xfloat8::float8 as flt
+ from testdata
+ offset 0) s;
+
+-- clean up, lest opr_sanity complain
+drop type xfloat8 cascade;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/functional_deps.out b/ydb/library/yql/tests/postgresql/original/cases/functional_deps.out
new file mode 100644
index 0000000000..32381b8ae7
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/functional_deps.out
@@ -0,0 +1,232 @@
+-- from http://www.depesz.com/index.php/2010/04/19/getting-unique-elements/
+CREATE TEMP TABLE articles (
+ id int CONSTRAINT articles_pkey PRIMARY KEY,
+ keywords text,
+ title text UNIQUE NOT NULL,
+ body text UNIQUE,
+ created date
+);
+CREATE TEMP TABLE articles_in_category (
+ article_id int,
+ category_id int,
+ changed date,
+ PRIMARY KEY (article_id, category_id)
+);
+-- test functional dependencies based on primary keys/unique constraints
+-- base tables
+-- group by primary key (OK)
+SELECT id, keywords, title, body, created
+FROM articles
+GROUP BY id;
+ id | keywords | title | body | created
+----+----------+-------+------+---------
+(0 rows)
+
+-- group by unique not null (fail/todo)
+SELECT id, keywords, title, body, created
+FROM articles
+GROUP BY title;
+ERROR: column "articles.id" must appear in the GROUP BY clause or be used in an aggregate function
+LINE 1: SELECT id, keywords, title, body, created
+ ^
+-- group by unique nullable (fail)
+SELECT id, keywords, title, body, created
+FROM articles
+GROUP BY body;
+ERROR: column "articles.id" must appear in the GROUP BY clause or be used in an aggregate function
+LINE 1: SELECT id, keywords, title, body, created
+ ^
+-- group by something else (fail)
+SELECT id, keywords, title, body, created
+FROM articles
+GROUP BY keywords;
+ERROR: column "articles.id" must appear in the GROUP BY clause or be used in an aggregate function
+LINE 1: SELECT id, keywords, title, body, created
+ ^
+-- multiple tables
+-- group by primary key (OK)
+SELECT a.id, a.keywords, a.title, a.body, a.created
+FROM articles AS a, articles_in_category AS aic
+WHERE a.id = aic.article_id AND aic.category_id in (14,62,70,53,138)
+GROUP BY a.id;
+ id | keywords | title | body | created
+----+----------+-------+------+---------
+(0 rows)
+
+-- group by something else (fail)
+SELECT a.id, a.keywords, a.title, a.body, a.created
+FROM articles AS a, articles_in_category AS aic
+WHERE a.id = aic.article_id AND aic.category_id in (14,62,70,53,138)
+GROUP BY aic.article_id, aic.category_id;
+ERROR: column "a.id" must appear in the GROUP BY clause or be used in an aggregate function
+LINE 1: SELECT a.id, a.keywords, a.title, a.body, a.created
+ ^
+-- JOIN syntax
+-- group by left table's primary key (OK)
+SELECT a.id, a.keywords, a.title, a.body, a.created
+FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id
+WHERE aic.category_id in (14,62,70,53,138)
+GROUP BY a.id;
+ id | keywords | title | body | created
+----+----------+-------+------+---------
+(0 rows)
+
+-- group by something else (fail)
+SELECT a.id, a.keywords, a.title, a.body, a.created
+FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id
+WHERE aic.category_id in (14,62,70,53,138)
+GROUP BY aic.article_id, aic.category_id;
+ERROR: column "a.id" must appear in the GROUP BY clause or be used in an aggregate function
+LINE 1: SELECT a.id, a.keywords, a.title, a.body, a.created
+ ^
+-- group by right table's (composite) primary key (OK)
+SELECT aic.changed
+FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id
+WHERE aic.category_id in (14,62,70,53,138)
+GROUP BY aic.category_id, aic.article_id;
+ changed
+---------
+(0 rows)
+
+-- group by right table's partial primary key (fail)
+SELECT aic.changed
+FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id
+WHERE aic.category_id in (14,62,70,53,138)
+GROUP BY aic.article_id;
+ERROR: column "aic.changed" must appear in the GROUP BY clause or be used in an aggregate function
+LINE 1: SELECT aic.changed
+ ^
+-- example from documentation
+CREATE TEMP TABLE products (product_id int, name text, price numeric);
+CREATE TEMP TABLE sales (product_id int, units int);
+-- OK
+SELECT product_id, p.name, (sum(s.units) * p.price) AS sales
+ FROM products p LEFT JOIN sales s USING (product_id)
+ GROUP BY product_id, p.name, p.price;
+ product_id | name | sales
+------------+------+-------
+(0 rows)
+
+-- fail
+SELECT product_id, p.name, (sum(s.units) * p.price) AS sales
+ FROM products p LEFT JOIN sales s USING (product_id)
+ GROUP BY product_id;
+ERROR: column "p.name" must appear in the GROUP BY clause or be used in an aggregate function
+LINE 1: SELECT product_id, p.name, (sum(s.units) * p.price) AS sales
+ ^
+ALTER TABLE products ADD PRIMARY KEY (product_id);
+-- OK now
+SELECT product_id, p.name, (sum(s.units) * p.price) AS sales
+ FROM products p LEFT JOIN sales s USING (product_id)
+ GROUP BY product_id;
+ product_id | name | sales
+------------+------+-------
+(0 rows)
+
+-- Drupal example, http://drupal.org/node/555530
+CREATE TEMP TABLE node (
+ nid SERIAL,
+ vid integer NOT NULL default '0',
+ type varchar(32) NOT NULL default '',
+ title varchar(128) NOT NULL default '',
+ uid integer NOT NULL default '0',
+ status integer NOT NULL default '1',
+ created integer NOT NULL default '0',
+ -- snip
+ PRIMARY KEY (nid, vid)
+);
+CREATE TEMP TABLE users (
+ uid integer NOT NULL default '0',
+ name varchar(60) NOT NULL default '',
+ pass varchar(32) NOT NULL default '',
+ -- snip
+ PRIMARY KEY (uid),
+ UNIQUE (name)
+);
+-- OK
+SELECT u.uid, u.name FROM node n
+INNER JOIN users u ON u.uid = n.uid
+WHERE n.type = 'blog' AND n.status = 1
+GROUP BY u.uid, u.name;
+ uid | name
+-----+------
+(0 rows)
+
+-- OK
+SELECT u.uid, u.name FROM node n
+INNER JOIN users u ON u.uid = n.uid
+WHERE n.type = 'blog' AND n.status = 1
+GROUP BY u.uid;
+ uid | name
+-----+------
+(0 rows)
+
+-- Check views and dependencies
+-- fail
+CREATE TEMP VIEW fdv1 AS
+SELECT id, keywords, title, body, created
+FROM articles
+GROUP BY body;
+ERROR: column "articles.id" must appear in the GROUP BY clause or be used in an aggregate function
+LINE 2: SELECT id, keywords, title, body, created
+ ^
+-- OK
+CREATE TEMP VIEW fdv1 AS
+SELECT id, keywords, title, body, created
+FROM articles
+GROUP BY id;
+-- fail
+ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT;
+ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it
+DETAIL: view fdv1 depends on constraint articles_pkey on table articles
+HINT: Use DROP ... CASCADE to drop the dependent objects too.
+DROP VIEW fdv1;
+-- multiple dependencies
+CREATE TEMP VIEW fdv2 AS
+SELECT a.id, a.keywords, a.title, aic.category_id, aic.changed
+FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id
+WHERE aic.category_id in (14,62,70,53,138)
+GROUP BY a.id, aic.category_id, aic.article_id;
+ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -- fail
+ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it
+DETAIL: view fdv2 depends on constraint articles_pkey on table articles
+HINT: Use DROP ... CASCADE to drop the dependent objects too.
+ALTER TABLE articles_in_category DROP CONSTRAINT articles_in_category_pkey RESTRICT; --fail
+ERROR: cannot drop constraint articles_in_category_pkey on table articles_in_category because other objects depend on it
+DETAIL: view fdv2 depends on constraint articles_in_category_pkey on table articles_in_category
+HINT: Use DROP ... CASCADE to drop the dependent objects too.
+DROP VIEW fdv2;
+-- nested queries
+CREATE TEMP VIEW fdv3 AS
+SELECT id, keywords, title, body, created
+FROM articles
+GROUP BY id
+UNION
+SELECT id, keywords, title, body, created
+FROM articles
+GROUP BY id;
+ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -- fail
+ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it
+DETAIL: view fdv3 depends on constraint articles_pkey on table articles
+HINT: Use DROP ... CASCADE to drop the dependent objects too.
+DROP VIEW fdv3;
+CREATE TEMP VIEW fdv4 AS
+SELECT * FROM articles WHERE title IN (SELECT title FROM articles GROUP BY id);
+ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -- fail
+ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it
+DETAIL: view fdv4 depends on constraint articles_pkey on table articles
+HINT: Use DROP ... CASCADE to drop the dependent objects too.
+DROP VIEW fdv4;
+-- prepared query plans: this results in failure on reuse
+PREPARE foo AS
+ SELECT id, keywords, title, body, created
+ FROM articles
+ GROUP BY id;
+EXECUTE foo;
+ id | keywords | title | body | created
+----+----------+-------+------+---------
+(0 rows)
+
+ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT;
+EXECUTE foo; -- fail
+ERROR: column "articles.keywords" must appear in the GROUP BY clause or be used in an aggregate function
diff --git a/ydb/library/yql/tests/postgresql/original/cases/functional_deps.sql b/ydb/library/yql/tests/postgresql/original/cases/functional_deps.sql
new file mode 100644
index 0000000000..406490b995
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/functional_deps.sql
@@ -0,0 +1,210 @@
+-- from http://www.depesz.com/index.php/2010/04/19/getting-unique-elements/
+
+CREATE TEMP TABLE articles (
+ id int CONSTRAINT articles_pkey PRIMARY KEY,
+ keywords text,
+ title text UNIQUE NOT NULL,
+ body text UNIQUE,
+ created date
+);
+
+CREATE TEMP TABLE articles_in_category (
+ article_id int,
+ category_id int,
+ changed date,
+ PRIMARY KEY (article_id, category_id)
+);
+
+-- test functional dependencies based on primary keys/unique constraints
+
+-- base tables
+
+-- group by primary key (OK)
+SELECT id, keywords, title, body, created
+FROM articles
+GROUP BY id;
+
+-- group by unique not null (fail/todo)
+SELECT id, keywords, title, body, created
+FROM articles
+GROUP BY title;
+
+-- group by unique nullable (fail)
+SELECT id, keywords, title, body, created
+FROM articles
+GROUP BY body;
+
+-- group by something else (fail)
+SELECT id, keywords, title, body, created
+FROM articles
+GROUP BY keywords;
+
+-- multiple tables
+
+-- group by primary key (OK)
+SELECT a.id, a.keywords, a.title, a.body, a.created
+FROM articles AS a, articles_in_category AS aic
+WHERE a.id = aic.article_id AND aic.category_id in (14,62,70,53,138)
+GROUP BY a.id;
+
+-- group by something else (fail)
+SELECT a.id, a.keywords, a.title, a.body, a.created
+FROM articles AS a, articles_in_category AS aic
+WHERE a.id = aic.article_id AND aic.category_id in (14,62,70,53,138)
+GROUP BY aic.article_id, aic.category_id;
+
+-- JOIN syntax
+
+-- group by left table's primary key (OK)
+SELECT a.id, a.keywords, a.title, a.body, a.created
+FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id
+WHERE aic.category_id in (14,62,70,53,138)
+GROUP BY a.id;
+
+-- group by something else (fail)
+SELECT a.id, a.keywords, a.title, a.body, a.created
+FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id
+WHERE aic.category_id in (14,62,70,53,138)
+GROUP BY aic.article_id, aic.category_id;
+
+-- group by right table's (composite) primary key (OK)
+SELECT aic.changed
+FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id
+WHERE aic.category_id in (14,62,70,53,138)
+GROUP BY aic.category_id, aic.article_id;
+
+-- group by right table's partial primary key (fail)
+SELECT aic.changed
+FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id
+WHERE aic.category_id in (14,62,70,53,138)
+GROUP BY aic.article_id;
+
+
+-- example from documentation
+
+CREATE TEMP TABLE products (product_id int, name text, price numeric);
+CREATE TEMP TABLE sales (product_id int, units int);
+
+-- OK
+SELECT product_id, p.name, (sum(s.units) * p.price) AS sales
+ FROM products p LEFT JOIN sales s USING (product_id)
+ GROUP BY product_id, p.name, p.price;
+
+-- fail
+SELECT product_id, p.name, (sum(s.units) * p.price) AS sales
+ FROM products p LEFT JOIN sales s USING (product_id)
+ GROUP BY product_id;
+
+ALTER TABLE products ADD PRIMARY KEY (product_id);
+
+-- OK now
+SELECT product_id, p.name, (sum(s.units) * p.price) AS sales
+ FROM products p LEFT JOIN sales s USING (product_id)
+ GROUP BY product_id;
+
+
+-- Drupal example, http://drupal.org/node/555530
+
+CREATE TEMP TABLE node (
+ nid SERIAL,
+ vid integer NOT NULL default '0',
+ type varchar(32) NOT NULL default '',
+ title varchar(128) NOT NULL default '',
+ uid integer NOT NULL default '0',
+ status integer NOT NULL default '1',
+ created integer NOT NULL default '0',
+ -- snip
+ PRIMARY KEY (nid, vid)
+);
+
+CREATE TEMP TABLE users (
+ uid integer NOT NULL default '0',
+ name varchar(60) NOT NULL default '',
+ pass varchar(32) NOT NULL default '',
+ -- snip
+ PRIMARY KEY (uid),
+ UNIQUE (name)
+);
+
+-- OK
+SELECT u.uid, u.name FROM node n
+INNER JOIN users u ON u.uid = n.uid
+WHERE n.type = 'blog' AND n.status = 1
+GROUP BY u.uid, u.name;
+
+-- OK
+SELECT u.uid, u.name FROM node n
+INNER JOIN users u ON u.uid = n.uid
+WHERE n.type = 'blog' AND n.status = 1
+GROUP BY u.uid;
+
+
+-- Check views and dependencies
+
+-- fail
+CREATE TEMP VIEW fdv1 AS
+SELECT id, keywords, title, body, created
+FROM articles
+GROUP BY body;
+
+-- OK
+CREATE TEMP VIEW fdv1 AS
+SELECT id, keywords, title, body, created
+FROM articles
+GROUP BY id;
+
+-- fail
+ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT;
+
+DROP VIEW fdv1;
+
+
+-- multiple dependencies
+CREATE TEMP VIEW fdv2 AS
+SELECT a.id, a.keywords, a.title, aic.category_id, aic.changed
+FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id
+WHERE aic.category_id in (14,62,70,53,138)
+GROUP BY a.id, aic.category_id, aic.article_id;
+
+ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -- fail
+ALTER TABLE articles_in_category DROP CONSTRAINT articles_in_category_pkey RESTRICT; --fail
+
+DROP VIEW fdv2;
+
+
+-- nested queries
+
+CREATE TEMP VIEW fdv3 AS
+SELECT id, keywords, title, body, created
+FROM articles
+GROUP BY id
+UNION
+SELECT id, keywords, title, body, created
+FROM articles
+GROUP BY id;
+
+ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -- fail
+
+DROP VIEW fdv3;
+
+
+CREATE TEMP VIEW fdv4 AS
+SELECT * FROM articles WHERE title IN (SELECT title FROM articles GROUP BY id);
+
+ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -- fail
+
+DROP VIEW fdv4;
+
+
+-- prepared query plans: this results in failure on reuse
+
+PREPARE foo AS
+ SELECT id, keywords, title, body, created
+ FROM articles
+ GROUP BY id;
+
+EXECUTE foo;
+
+ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT;
+
+EXECUTE foo; -- fail
diff --git a/ydb/library/yql/tests/postgresql/original/cases/horology.out b/ydb/library/yql/tests/postgresql/original/cases/horology.out
new file mode 100644
index 0000000000..4b865e73f3
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/horology.out
@@ -0,0 +1,3344 @@
+--
+-- HOROLOGY
+--
+SET DateStyle = 'Postgres, MDY';
+SHOW TimeZone; -- Many of these tests depend on the prevailing setting
+ TimeZone
+----------
+ PST8PDT
+(1 row)
+
+--
+-- Test various input formats
+--
+SELECT timestamp with time zone '20011227 040506+08';
+ timestamptz
+------------------------------
+ Wed Dec 26 12:05:06 2001 PST
+(1 row)
+
+SELECT timestamp with time zone '20011227 040506-08';
+ timestamptz
+------------------------------
+ Thu Dec 27 04:05:06 2001 PST
+(1 row)
+
+SELECT timestamp with time zone '20011227 040506.789+08';
+ timestamptz
+----------------------------------
+ Wed Dec 26 12:05:06.789 2001 PST
+(1 row)
+
+SELECT timestamp with time zone '20011227 040506.789-08';
+ timestamptz
+----------------------------------
+ Thu Dec 27 04:05:06.789 2001 PST
+(1 row)
+
+SELECT timestamp with time zone '20011227T040506+08';
+ timestamptz
+------------------------------
+ Wed Dec 26 12:05:06 2001 PST
+(1 row)
+
+SELECT timestamp with time zone '20011227T040506-08';
+ timestamptz
+------------------------------
+ Thu Dec 27 04:05:06 2001 PST
+(1 row)
+
+SELECT timestamp with time zone '20011227T040506.789+08';
+ timestamptz
+----------------------------------
+ Wed Dec 26 12:05:06.789 2001 PST
+(1 row)
+
+SELECT timestamp with time zone '20011227T040506.789-08';
+ timestamptz
+----------------------------------
+ Thu Dec 27 04:05:06.789 2001 PST
+(1 row)
+
+SELECT timestamp with time zone '2001-12-27 04:05:06.789-08';
+ timestamptz
+----------------------------------
+ Thu Dec 27 04:05:06.789 2001 PST
+(1 row)
+
+SELECT timestamp with time zone '2001.12.27 04:05:06.789-08';
+ timestamptz
+----------------------------------
+ Thu Dec 27 04:05:06.789 2001 PST
+(1 row)
+
+SELECT timestamp with time zone '2001/12/27 04:05:06.789-08';
+ timestamptz
+----------------------------------
+ Thu Dec 27 04:05:06.789 2001 PST
+(1 row)
+
+SELECT timestamp with time zone '12/27/2001 04:05:06.789-08';
+ timestamptz
+----------------------------------
+ Thu Dec 27 04:05:06.789 2001 PST
+(1 row)
+
+-- should fail in mdy mode:
+SELECT timestamp with time zone '27/12/2001 04:05:06.789-08';
+ERROR: date/time field value out of range: "27/12/2001 04:05:06.789-08"
+LINE 1: SELECT timestamp with time zone '27/12/2001 04:05:06.789-08'...
+ ^
+HINT: Perhaps you need a different "datestyle" setting.
+set datestyle to dmy;
+SELECT timestamp with time zone '27/12/2001 04:05:06.789-08';
+ timestamptz
+----------------------------------
+ Thu 27 Dec 04:05:06.789 2001 PST
+(1 row)
+
+reset datestyle;
+SELECT timestamp with time zone 'Y2001M12D27H04M05S06.789+08';
+ timestamptz
+----------------------------------
+ Wed Dec 26 12:05:06.789 2001 PST
+(1 row)
+
+SELECT timestamp with time zone 'Y2001M12D27H04M05S06.789-08';
+ timestamptz
+----------------------------------
+ Thu Dec 27 04:05:06.789 2001 PST
+(1 row)
+
+SELECT timestamp with time zone 'Y2001M12D27H04MM05S06.789+08';
+ timestamptz
+----------------------------------
+ Wed Dec 26 12:05:06.789 2001 PST
+(1 row)
+
+SELECT timestamp with time zone 'Y2001M12D27H04MM05S06.789-08';
+ timestamptz
+----------------------------------
+ Thu Dec 27 04:05:06.789 2001 PST
+(1 row)
+
+SELECT timestamp with time zone 'J2452271+08';
+ timestamptz
+------------------------------
+ Wed Dec 26 08:00:00 2001 PST
+(1 row)
+
+SELECT timestamp with time zone 'J2452271-08';
+ timestamptz
+------------------------------
+ Thu Dec 27 00:00:00 2001 PST
+(1 row)
+
+SELECT timestamp with time zone 'J2452271.5+08';
+ timestamptz
+------------------------------
+ Wed Dec 26 20:00:00 2001 PST
+(1 row)
+
+SELECT timestamp with time zone 'J2452271.5-08';
+ timestamptz
+------------------------------
+ Thu Dec 27 12:00:00 2001 PST
+(1 row)
+
+SELECT timestamp with time zone 'J2452271 04:05:06+08';
+ timestamptz
+------------------------------
+ Wed Dec 26 12:05:06 2001 PST
+(1 row)
+
+SELECT timestamp with time zone 'J2452271 04:05:06-08';
+ timestamptz
+------------------------------
+ Thu Dec 27 04:05:06 2001 PST
+(1 row)
+
+SELECT timestamp with time zone 'J2452271T040506+08';
+ timestamptz
+------------------------------
+ Wed Dec 26 12:05:06 2001 PST
+(1 row)
+
+SELECT timestamp with time zone 'J2452271T040506-08';
+ timestamptz
+------------------------------
+ Thu Dec 27 04:05:06 2001 PST
+(1 row)
+
+SELECT timestamp with time zone 'J2452271T040506.789+08';
+ timestamptz
+----------------------------------
+ Wed Dec 26 12:05:06.789 2001 PST
+(1 row)
+
+SELECT timestamp with time zone 'J2452271T040506.789-08';
+ timestamptz
+----------------------------------
+ Thu Dec 27 04:05:06.789 2001 PST
+(1 row)
+
+-- German/European-style dates with periods as delimiters
+SELECT timestamp with time zone '12.27.2001 04:05:06.789+08';
+ timestamptz
+----------------------------------
+ Wed Dec 26 12:05:06.789 2001 PST
+(1 row)
+
+SELECT timestamp with time zone '12.27.2001 04:05:06.789-08';
+ timestamptz
+----------------------------------
+ Thu Dec 27 04:05:06.789 2001 PST
+(1 row)
+
+SET DateStyle = 'German';
+SELECT timestamp with time zone '27.12.2001 04:05:06.789+08';
+ timestamptz
+-----------------------------
+ 26.12.2001 12:05:06.789 PST
+(1 row)
+
+SELECT timestamp with time zone '27.12.2001 04:05:06.789-08';
+ timestamptz
+-----------------------------
+ 27.12.2001 04:05:06.789 PST
+(1 row)
+
+SET DateStyle = 'ISO';
+-- As of 7.4, allow time without time zone having a time zone specified
+SELECT time without time zone '040506.789+08';
+ time
+--------------
+ 04:05:06.789
+(1 row)
+
+SELECT time without time zone '040506.789-08';
+ time
+--------------
+ 04:05:06.789
+(1 row)
+
+SELECT time without time zone 'T040506.789+08';
+ time
+--------------
+ 04:05:06.789
+(1 row)
+
+SELECT time without time zone 'T040506.789-08';
+ time
+--------------
+ 04:05:06.789
+(1 row)
+
+SELECT time with time zone '040506.789+08';
+ timetz
+-----------------
+ 04:05:06.789+08
+(1 row)
+
+SELECT time with time zone '040506.789-08';
+ timetz
+-----------------
+ 04:05:06.789-08
+(1 row)
+
+SELECT time with time zone 'T040506.789+08';
+ timetz
+-----------------
+ 04:05:06.789+08
+(1 row)
+
+SELECT time with time zone 'T040506.789-08';
+ timetz
+-----------------
+ 04:05:06.789-08
+(1 row)
+
+SELECT time with time zone 'T040506.789 +08';
+ timetz
+-----------------
+ 04:05:06.789+08
+(1 row)
+
+SELECT time with time zone 'T040506.789 -08';
+ timetz
+-----------------
+ 04:05:06.789-08
+(1 row)
+
+SET DateStyle = 'Postgres, MDY';
+-- Check Julian dates BC
+SELECT date 'J1520447' AS "Confucius' Birthday";
+ Confucius' Birthday
+---------------------
+ 09-28-0551 BC
+(1 row)
+
+SELECT date 'J0' AS "Julian Epoch";
+ Julian Epoch
+---------------
+ 11-24-4714 BC
+(1 row)
+
+--
+-- date, time arithmetic
+--
+SELECT date '1981-02-03' + time '04:05:06' AS "Date + Time";
+ Date + Time
+--------------------------
+ Tue Feb 03 04:05:06 1981
+(1 row)
+
+SELECT date '1991-02-03' + time with time zone '04:05:06 PST' AS "Date + Time PST";
+ Date + Time PST
+------------------------------
+ Sun Feb 03 04:05:06 1991 PST
+(1 row)
+
+SELECT date '2001-02-03' + time with time zone '04:05:06 UTC' AS "Date + Time UTC";
+ Date + Time UTC
+------------------------------
+ Fri Feb 02 20:05:06 2001 PST
+(1 row)
+
+SELECT date '1991-02-03' + interval '2 years' AS "Add Two Years";
+ Add Two Years
+--------------------------
+ Wed Feb 03 00:00:00 1993
+(1 row)
+
+SELECT date '2001-12-13' - interval '2 years' AS "Subtract Two Years";
+ Subtract Two Years
+--------------------------
+ Mon Dec 13 00:00:00 1999
+(1 row)
+
+-- subtract time from date should not make sense; use interval instead
+SELECT date '1991-02-03' - time '04:05:06' AS "Subtract Time";
+ Subtract Time
+--------------------------
+ Sat Feb 02 19:54:54 1991
+(1 row)
+
+SELECT date '1991-02-03' - time with time zone '04:05:06 UTC' AS "Subtract Time UTC";
+ERROR: operator does not exist: date - time with time zone
+LINE 1: SELECT date '1991-02-03' - time with time zone '04:05:06 UTC...
+ ^
+HINT: No operator matches the given name and argument types. You might need to add explicit type casts.
+--
+-- timestamp, interval arithmetic
+--
+SELECT timestamp without time zone '1996-03-01' - interval '1 second' AS "Feb 29";
+ Feb 29
+--------------------------
+ Thu Feb 29 23:59:59 1996
+(1 row)
+
+SELECT timestamp without time zone '1999-03-01' - interval '1 second' AS "Feb 28";
+ Feb 28
+--------------------------
+ Sun Feb 28 23:59:59 1999
+(1 row)
+
+SELECT timestamp without time zone '2000-03-01' - interval '1 second' AS "Feb 29";
+ Feb 29
+--------------------------
+ Tue Feb 29 23:59:59 2000
+(1 row)
+
+SELECT timestamp without time zone '1999-12-01' + interval '1 month - 1 second' AS "Dec 31";
+ Dec 31
+--------------------------
+ Fri Dec 31 23:59:59 1999
+(1 row)
+
+SELECT timestamp without time zone 'Jan 1, 4713 BC' + interval '106000000 days' AS "Feb 23, 285506";
+ Feb 23, 285506
+----------------------------
+ Fri Feb 23 00:00:00 285506
+(1 row)
+
+SELECT timestamp without time zone 'Jan 1, 4713 BC' + interval '107000000 days' AS "Jan 20, 288244";
+ Jan 20, 288244
+----------------------------
+ Sat Jan 20 00:00:00 288244
+(1 row)
+
+SELECT timestamp without time zone 'Jan 1, 4713 BC' + interval '109203489 days' AS "Dec 31, 294276";
+ Dec 31, 294276
+----------------------------
+ Sun Dec 31 00:00:00 294276
+(1 row)
+
+SELECT timestamp without time zone '12/31/294276' - timestamp without time zone '12/23/1999' AS "106751991 Days";
+ 106751991 Days
+------------------
+ @ 106751991 days
+(1 row)
+
+-- Shorthand values
+-- Not directly usable for regression testing since these are not constants.
+-- So, just try to test parser and hope for the best - thomas 97/04/26
+SELECT (timestamp without time zone 'today' = (timestamp without time zone 'yesterday' + interval '1 day')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp without time zone 'today' = (timestamp without time zone 'tomorrow' - interval '1 day')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp without time zone 'today 10:30' = (timestamp without time zone 'yesterday' + interval '1 day 10 hr 30 min')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp without time zone '10:30 today' = (timestamp without time zone 'yesterday' + interval '1 day 10 hr 30 min')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp without time zone 'tomorrow' = (timestamp without time zone 'yesterday' + interval '2 days')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp without time zone 'tomorrow 16:00:00' = (timestamp without time zone 'today' + interval '1 day 16 hours')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp without time zone '16:00:00 tomorrow' = (timestamp without time zone 'today' + interval '1 day 16 hours')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp without time zone 'yesterday 12:34:56' = (timestamp without time zone 'tomorrow' - interval '2 days - 12:34:56')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp without time zone '12:34:56 yesterday' = (timestamp without time zone 'tomorrow' - interval '2 days - 12:34:56')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp without time zone 'tomorrow' > 'now') as "True";
+ True
+------
+ t
+(1 row)
+
+-- Convert from date and time to timestamp
+-- This test used to be timestamp(date,time) but no longer allowed by grammar
+-- to enable support for SQL99 timestamp type syntax.
+SELECT date '1994-01-01' + time '11:00' AS "Jan_01_1994_11am";
+ Jan_01_1994_11am
+--------------------------
+ Sat Jan 01 11:00:00 1994
+(1 row)
+
+SELECT date '1994-01-01' + time '10:00' AS "Jan_01_1994_10am";
+ Jan_01_1994_10am
+--------------------------
+ Sat Jan 01 10:00:00 1994
+(1 row)
+
+SELECT date '1994-01-01' + timetz '11:00-5' AS "Jan_01_1994_8am";
+ Jan_01_1994_8am
+------------------------------
+ Sat Jan 01 08:00:00 1994 PST
+(1 row)
+
+SELECT timestamptz(date '1994-01-01', time with time zone '11:00-5') AS "Jan_01_1994_8am";
+ Jan_01_1994_8am
+------------------------------
+ Sat Jan 01 08:00:00 1994 PST
+(1 row)
+
+SELECT d1 + interval '1 year' AS one_year FROM TIMESTAMP_TBL;
+ one_year
+-----------------------------
+ -infinity
+ infinity
+ Fri Jan 01 00:00:00 1971
+ Tue Feb 10 17:32:01 1998
+ Tue Feb 10 17:32:01 1998
+ Tue Feb 10 17:32:02 1998
+ Tue Feb 10 17:32:01.4 1998
+ Tue Feb 10 17:32:01.5 1998
+ Tue Feb 10 17:32:01.6 1998
+ Fri Jan 02 00:00:00 1998
+ Fri Jan 02 03:04:05 1998
+ Tue Feb 10 17:32:01 1998
+ Tue Feb 10 17:32:01 1998
+ Tue Feb 10 17:32:01 1998
+ Tue Feb 10 17:32:01 1998
+ Wed Jun 10 17:32:01 1998
+ Sun Sep 22 18:19:20 2002
+ Thu Mar 15 08:14:01 2001
+ Thu Mar 15 13:14:02 2001
+ Thu Mar 15 12:14:03 2001
+ Thu Mar 15 03:14:04 2001
+ Thu Mar 15 02:14:05 2001
+ Tue Feb 10 17:32:01 1998
+ Tue Feb 10 17:32:01 1998
+ Tue Feb 10 17:32:00 1998
+ Tue Feb 10 17:32:01 1998
+ Tue Feb 10 17:32:01 1998
+ Tue Feb 10 17:32:01 1998
+ Tue Feb 10 17:32:01 1998
+ Tue Feb 10 17:32:01 1998
+ Tue Feb 10 17:32:01 1998
+ Tue Feb 10 17:32:01 1998
+ Tue Feb 10 17:32:01 1998
+ Tue Feb 10 17:32:01 1998
+ Wed Jun 10 18:32:01 1998
+ Tue Feb 10 17:32:01 1998
+ Wed Feb 11 17:32:01 1998
+ Thu Feb 12 17:32:01 1998
+ Fri Feb 13 17:32:01 1998
+ Sat Feb 14 17:32:01 1998
+ Sun Feb 15 17:32:01 1998
+ Mon Feb 16 17:32:01 1998
+ Thu Feb 16 17:32:01 0096 BC
+ Sun Feb 16 17:32:01 0098
+ Fri Feb 16 17:32:01 0598
+ Wed Feb 16 17:32:01 1098
+ Sun Feb 16 17:32:01 1698
+ Fri Feb 16 17:32:01 1798
+ Wed Feb 16 17:32:01 1898
+ Mon Feb 16 17:32:01 1998
+ Sun Feb 16 17:32:01 2098
+ Fri Feb 28 17:32:01 1997
+ Fri Feb 28 17:32:01 1997
+ Sat Mar 01 17:32:01 1997
+ Tue Dec 30 17:32:01 1997
+ Wed Dec 31 17:32:01 1997
+ Thu Jan 01 17:32:01 1998
+ Sat Feb 28 17:32:01 1998
+ Sun Mar 01 17:32:01 1998
+ Wed Dec 30 17:32:01 1998
+ Thu Dec 31 17:32:01 1998
+ Sun Dec 31 17:32:01 2000
+ Mon Jan 01 17:32:01 2001
+ Mon Dec 31 17:32:01 2001
+ Tue Jan 01 17:32:01 2002
+(65 rows)
+
+SELECT d1 - interval '1 year' AS one_year FROM TIMESTAMP_TBL;
+ one_year
+-----------------------------
+ -infinity
+ infinity
+ Wed Jan 01 00:00:00 1969
+ Sat Feb 10 17:32:01 1996
+ Sat Feb 10 17:32:01 1996
+ Sat Feb 10 17:32:02 1996
+ Sat Feb 10 17:32:01.4 1996
+ Sat Feb 10 17:32:01.5 1996
+ Sat Feb 10 17:32:01.6 1996
+ Tue Jan 02 00:00:00 1996
+ Tue Jan 02 03:04:05 1996
+ Sat Feb 10 17:32:01 1996
+ Sat Feb 10 17:32:01 1996
+ Sat Feb 10 17:32:01 1996
+ Sat Feb 10 17:32:01 1996
+ Mon Jun 10 17:32:01 1996
+ Fri Sep 22 18:19:20 2000
+ Mon Mar 15 08:14:01 1999
+ Mon Mar 15 13:14:02 1999
+ Mon Mar 15 12:14:03 1999
+ Mon Mar 15 03:14:04 1999
+ Mon Mar 15 02:14:05 1999
+ Sat Feb 10 17:32:01 1996
+ Sat Feb 10 17:32:01 1996
+ Sat Feb 10 17:32:00 1996
+ Sat Feb 10 17:32:01 1996
+ Sat Feb 10 17:32:01 1996
+ Sat Feb 10 17:32:01 1996
+ Sat Feb 10 17:32:01 1996
+ Sat Feb 10 17:32:01 1996
+ Sat Feb 10 17:32:01 1996
+ Sat Feb 10 17:32:01 1996
+ Sat Feb 10 17:32:01 1996
+ Sat Feb 10 17:32:01 1996
+ Mon Jun 10 18:32:01 1996
+ Sat Feb 10 17:32:01 1996
+ Sun Feb 11 17:32:01 1996
+ Mon Feb 12 17:32:01 1996
+ Tue Feb 13 17:32:01 1996
+ Wed Feb 14 17:32:01 1996
+ Thu Feb 15 17:32:01 1996
+ Fri Feb 16 17:32:01 1996
+ Mon Feb 16 17:32:01 0098 BC
+ Thu Feb 16 17:32:01 0096
+ Tue Feb 16 17:32:01 0596
+ Sun Feb 16 17:32:01 1096
+ Thu Feb 16 17:32:01 1696
+ Tue Feb 16 17:32:01 1796
+ Sun Feb 16 17:32:01 1896
+ Fri Feb 16 17:32:01 1996
+ Thu Feb 16 17:32:01 2096
+ Tue Feb 28 17:32:01 1995
+ Tue Feb 28 17:32:01 1995
+ Wed Mar 01 17:32:01 1995
+ Sat Dec 30 17:32:01 1995
+ Sun Dec 31 17:32:01 1995
+ Mon Jan 01 17:32:01 1996
+ Wed Feb 28 17:32:01 1996
+ Fri Mar 01 17:32:01 1996
+ Mon Dec 30 17:32:01 1996
+ Tue Dec 31 17:32:01 1996
+ Thu Dec 31 17:32:01 1998
+ Fri Jan 01 17:32:01 1999
+ Fri Dec 31 17:32:01 1999
+ Sat Jan 01 17:32:01 2000
+(65 rows)
+
+SELECT timestamp with time zone '1996-03-01' - interval '1 second' AS "Feb 29";
+ Feb 29
+------------------------------
+ Thu Feb 29 23:59:59 1996 PST
+(1 row)
+
+SELECT timestamp with time zone '1999-03-01' - interval '1 second' AS "Feb 28";
+ Feb 28
+------------------------------
+ Sun Feb 28 23:59:59 1999 PST
+(1 row)
+
+SELECT timestamp with time zone '2000-03-01' - interval '1 second' AS "Feb 29";
+ Feb 29
+------------------------------
+ Tue Feb 29 23:59:59 2000 PST
+(1 row)
+
+SELECT timestamp with time zone '1999-12-01' + interval '1 month - 1 second' AS "Dec 31";
+ Dec 31
+------------------------------
+ Fri Dec 31 23:59:59 1999 PST
+(1 row)
+
+SELECT (timestamp with time zone 'today' = (timestamp with time zone 'yesterday' + interval '1 day')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp with time zone 'today' = (timestamp with time zone 'tomorrow' - interval '1 day')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp with time zone 'tomorrow' = (timestamp with time zone 'yesterday' + interval '2 days')) as "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp with time zone 'tomorrow' > 'now') as "True";
+ True
+------
+ t
+(1 row)
+
+-- timestamp with time zone, interval arithmetic around DST change
+-- (just for fun, let's use an intentionally nonstandard POSIX zone spec)
+SET TIME ZONE 'CST7CDT,M4.1.0,M10.5.0';
+SELECT timestamp with time zone '2005-04-02 12:00-07' + interval '1 day' as "Apr 3, 12:00";
+ Apr 3, 12:00
+------------------------------
+ Sun Apr 03 12:00:00 2005 CDT
+(1 row)
+
+SELECT timestamp with time zone '2005-04-02 12:00-07' + interval '24 hours' as "Apr 3, 13:00";
+ Apr 3, 13:00
+------------------------------
+ Sun Apr 03 13:00:00 2005 CDT
+(1 row)
+
+SELECT timestamp with time zone '2005-04-03 12:00-06' - interval '1 day' as "Apr 2, 12:00";
+ Apr 2, 12:00
+------------------------------
+ Sat Apr 02 12:00:00 2005 CST
+(1 row)
+
+SELECT timestamp with time zone '2005-04-03 12:00-06' - interval '24 hours' as "Apr 2, 11:00";
+ Apr 2, 11:00
+------------------------------
+ Sat Apr 02 11:00:00 2005 CST
+(1 row)
+
+RESET TIME ZONE;
+SELECT timestamptz(date '1994-01-01', time '11:00') AS "Jan_01_1994_10am";
+ Jan_01_1994_10am
+------------------------------
+ Sat Jan 01 11:00:00 1994 PST
+(1 row)
+
+SELECT timestamptz(date '1994-01-01', time '10:00') AS "Jan_01_1994_9am";
+ Jan_01_1994_9am
+------------------------------
+ Sat Jan 01 10:00:00 1994 PST
+(1 row)
+
+SELECT timestamptz(date '1994-01-01', time with time zone '11:00-8') AS "Jan_01_1994_11am";
+ Jan_01_1994_11am
+------------------------------
+ Sat Jan 01 11:00:00 1994 PST
+(1 row)
+
+SELECT timestamptz(date '1994-01-01', time with time zone '10:00-8') AS "Jan_01_1994_10am";
+ Jan_01_1994_10am
+------------------------------
+ Sat Jan 01 10:00:00 1994 PST
+(1 row)
+
+SELECT timestamptz(date '1994-01-01', time with time zone '11:00-5') AS "Jan_01_1994_8am";
+ Jan_01_1994_8am
+------------------------------
+ Sat Jan 01 08:00:00 1994 PST
+(1 row)
+
+SELECT d1 + interval '1 year' AS one_year FROM TIMESTAMPTZ_TBL;
+ one_year
+---------------------------------
+ -infinity
+ infinity
+ Thu Dec 31 16:00:00 1970 PST
+ Tue Feb 10 17:32:01 1998 PST
+ Tue Feb 10 17:32:01 1998 PST
+ Tue Feb 10 17:32:02 1998 PST
+ Tue Feb 10 17:32:01.4 1998 PST
+ Tue Feb 10 17:32:01.5 1998 PST
+ Tue Feb 10 17:32:01.6 1998 PST
+ Fri Jan 02 00:00:00 1998 PST
+ Fri Jan 02 03:04:05 1998 PST
+ Tue Feb 10 17:32:01 1998 PST
+ Tue Feb 10 17:32:01 1998 PST
+ Tue Feb 10 17:32:01 1998 PST
+ Tue Feb 10 17:32:01 1998 PST
+ Wed Jun 10 17:32:01 1998 PDT
+ Sun Sep 22 18:19:20 2002 PDT
+ Thu Mar 15 08:14:01 2001 PST
+ Thu Mar 15 04:14:02 2001 PST
+ Thu Mar 15 02:14:03 2001 PST
+ Thu Mar 15 03:14:04 2001 PST
+ Thu Mar 15 01:14:05 2001 PST
+ Tue Feb 10 17:32:01 1998 PST
+ Tue Feb 10 17:32:01 1998 PST
+ Tue Feb 10 17:32:00 1998 PST
+ Tue Feb 10 17:32:01 1998 PST
+ Tue Feb 10 17:32:01 1998 PST
+ Tue Feb 10 17:32:01 1998 PST
+ Tue Feb 10 17:32:01 1998 PST
+ Tue Feb 10 17:32:01 1998 PST
+ Tue Feb 10 09:32:01 1998 PST
+ Tue Feb 10 09:32:01 1998 PST
+ Tue Feb 10 09:32:01 1998 PST
+ Tue Feb 10 14:32:01 1998 PST
+ Fri Jul 10 14:32:01 1998 PDT
+ Wed Jun 10 18:32:01 1998 PDT
+ Tue Feb 10 17:32:01 1998 PST
+ Wed Feb 11 17:32:01 1998 PST
+ Thu Feb 12 17:32:01 1998 PST
+ Fri Feb 13 17:32:01 1998 PST
+ Sat Feb 14 17:32:01 1998 PST
+ Sun Feb 15 17:32:01 1998 PST
+ Mon Feb 16 17:32:01 1998 PST
+ Thu Feb 16 17:32:01 0096 PST BC
+ Sun Feb 16 17:32:01 0098 PST
+ Fri Feb 16 17:32:01 0598 PST
+ Wed Feb 16 17:32:01 1098 PST
+ Sun Feb 16 17:32:01 1698 PST
+ Fri Feb 16 17:32:01 1798 PST
+ Wed Feb 16 17:32:01 1898 PST
+ Mon Feb 16 17:32:01 1998 PST
+ Sun Feb 16 17:32:01 2098 PST
+ Fri Feb 28 17:32:01 1997 PST
+ Fri Feb 28 17:32:01 1997 PST
+ Sat Mar 01 17:32:01 1997 PST
+ Tue Dec 30 17:32:01 1997 PST
+ Wed Dec 31 17:32:01 1997 PST
+ Thu Jan 01 17:32:01 1998 PST
+ Sat Feb 28 17:32:01 1998 PST
+ Sun Mar 01 17:32:01 1998 PST
+ Wed Dec 30 17:32:01 1998 PST
+ Thu Dec 31 17:32:01 1998 PST
+ Sun Dec 31 17:32:01 2000 PST
+ Mon Jan 01 17:32:01 2001 PST
+ Mon Dec 31 17:32:01 2001 PST
+ Tue Jan 01 17:32:01 2002 PST
+(66 rows)
+
+SELECT d1 - interval '1 year' AS one_year FROM TIMESTAMPTZ_TBL;
+ one_year
+---------------------------------
+ -infinity
+ infinity
+ Tue Dec 31 16:00:00 1968 PST
+ Sat Feb 10 17:32:01 1996 PST
+ Sat Feb 10 17:32:01 1996 PST
+ Sat Feb 10 17:32:02 1996 PST
+ Sat Feb 10 17:32:01.4 1996 PST
+ Sat Feb 10 17:32:01.5 1996 PST
+ Sat Feb 10 17:32:01.6 1996 PST
+ Tue Jan 02 00:00:00 1996 PST
+ Tue Jan 02 03:04:05 1996 PST
+ Sat Feb 10 17:32:01 1996 PST
+ Sat Feb 10 17:32:01 1996 PST
+ Sat Feb 10 17:32:01 1996 PST
+ Sat Feb 10 17:32:01 1996 PST
+ Mon Jun 10 17:32:01 1996 PDT
+ Fri Sep 22 18:19:20 2000 PDT
+ Mon Mar 15 08:14:01 1999 PST
+ Mon Mar 15 04:14:02 1999 PST
+ Mon Mar 15 02:14:03 1999 PST
+ Mon Mar 15 03:14:04 1999 PST
+ Mon Mar 15 01:14:05 1999 PST
+ Sat Feb 10 17:32:01 1996 PST
+ Sat Feb 10 17:32:01 1996 PST
+ Sat Feb 10 17:32:00 1996 PST
+ Sat Feb 10 17:32:01 1996 PST
+ Sat Feb 10 17:32:01 1996 PST
+ Sat Feb 10 17:32:01 1996 PST
+ Sat Feb 10 17:32:01 1996 PST
+ Sat Feb 10 17:32:01 1996 PST
+ Sat Feb 10 09:32:01 1996 PST
+ Sat Feb 10 09:32:01 1996 PST
+ Sat Feb 10 09:32:01 1996 PST
+ Sat Feb 10 14:32:01 1996 PST
+ Wed Jul 10 14:32:01 1996 PDT
+ Mon Jun 10 18:32:01 1996 PDT
+ Sat Feb 10 17:32:01 1996 PST
+ Sun Feb 11 17:32:01 1996 PST
+ Mon Feb 12 17:32:01 1996 PST
+ Tue Feb 13 17:32:01 1996 PST
+ Wed Feb 14 17:32:01 1996 PST
+ Thu Feb 15 17:32:01 1996 PST
+ Fri Feb 16 17:32:01 1996 PST
+ Mon Feb 16 17:32:01 0098 PST BC
+ Thu Feb 16 17:32:01 0096 PST
+ Tue Feb 16 17:32:01 0596 PST
+ Sun Feb 16 17:32:01 1096 PST
+ Thu Feb 16 17:32:01 1696 PST
+ Tue Feb 16 17:32:01 1796 PST
+ Sun Feb 16 17:32:01 1896 PST
+ Fri Feb 16 17:32:01 1996 PST
+ Thu Feb 16 17:32:01 2096 PST
+ Tue Feb 28 17:32:01 1995 PST
+ Tue Feb 28 17:32:01 1995 PST
+ Wed Mar 01 17:32:01 1995 PST
+ Sat Dec 30 17:32:01 1995 PST
+ Sun Dec 31 17:32:01 1995 PST
+ Mon Jan 01 17:32:01 1996 PST
+ Wed Feb 28 17:32:01 1996 PST
+ Fri Mar 01 17:32:01 1996 PST
+ Mon Dec 30 17:32:01 1996 PST
+ Tue Dec 31 17:32:01 1996 PST
+ Thu Dec 31 17:32:01 1998 PST
+ Fri Jan 01 17:32:01 1999 PST
+ Fri Dec 31 17:32:01 1999 PST
+ Sat Jan 01 17:32:01 2000 PST
+(66 rows)
+
+--
+-- time, interval arithmetic
+--
+SELECT CAST(time '01:02' AS interval) AS "+01:02";
+ +01:02
+-----------------
+ @ 1 hour 2 mins
+(1 row)
+
+SELECT CAST(interval '02:03' AS time) AS "02:03:00";
+ 02:03:00
+----------
+ 02:03:00
+(1 row)
+
+SELECT time '01:30' + interval '02:01' AS "03:31:00";
+ 03:31:00
+----------
+ 03:31:00
+(1 row)
+
+SELECT time '01:30' - interval '02:01' AS "23:29:00";
+ 23:29:00
+----------
+ 23:29:00
+(1 row)
+
+SELECT time '02:30' + interval '36:01' AS "14:31:00";
+ 14:31:00
+----------
+ 14:31:00
+(1 row)
+
+SELECT time '03:30' + interval '1 month 04:01' AS "07:31:00";
+ 07:31:00
+----------
+ 07:31:00
+(1 row)
+
+SELECT CAST(time with time zone '01:02-08' AS interval) AS "+00:01";
+ERROR: cannot cast type time with time zone to interval
+LINE 1: SELECT CAST(time with time zone '01:02-08' AS interval) AS "...
+ ^
+SELECT CAST(interval '02:03' AS time with time zone) AS "02:03:00-08";
+ERROR: cannot cast type interval to time with time zone
+LINE 1: SELECT CAST(interval '02:03' AS time with time zone) AS "02:...
+ ^
+SELECT time with time zone '01:30-08' - interval '02:01' AS "23:29:00-08";
+ 23:29:00-08
+-------------
+ 23:29:00-08
+(1 row)
+
+SELECT time with time zone '02:30-08' + interval '36:01' AS "14:31:00-08";
+ 14:31:00-08
+-------------
+ 14:31:00-08
+(1 row)
+
+-- These two tests cannot be used because they default to current timezone,
+-- which may be either -08 or -07 depending on the time of year.
+-- SELECT time with time zone '01:30' + interval '02:01' AS "03:31:00-08";
+-- SELECT time with time zone '03:30' + interval '1 month 04:01' AS "07:31:00-08";
+-- Try the following two tests instead, as a poor substitute
+SELECT CAST(CAST(date 'today' + time with time zone '05:30'
+ + interval '02:01' AS time with time zone) AS time) AS "07:31:00";
+ 07:31:00
+----------
+ 07:31:00
+(1 row)
+
+SELECT CAST(cast(date 'today' + time with time zone '03:30'
+ + interval '1 month 04:01' as timestamp without time zone) AS time) AS "07:31:00";
+ 07:31:00
+----------
+ 07:31:00
+(1 row)
+
+SELECT t.d1 AS t, i.f1 AS i, t.d1 + i.f1 AS "add", t.d1 - i.f1 AS "subtract"
+ FROM TIMESTAMP_TBL t, INTERVAL_TBL i
+ WHERE t.d1 BETWEEN '1990-01-01' AND '2001-01-01'
+ AND i.f1 BETWEEN '00:00' AND '23:00'
+ ORDER BY 1,2;
+ t | i | add | subtract
+----------------------------+-----------+----------------------------+----------------------------
+ Wed Feb 28 17:32:01 1996 | @ 1 min | Wed Feb 28 17:33:01 1996 | Wed Feb 28 17:31:01 1996
+ Wed Feb 28 17:32:01 1996 | @ 5 hours | Wed Feb 28 22:32:01 1996 | Wed Feb 28 12:32:01 1996
+ Thu Feb 29 17:32:01 1996 | @ 1 min | Thu Feb 29 17:33:01 1996 | Thu Feb 29 17:31:01 1996
+ Thu Feb 29 17:32:01 1996 | @ 5 hours | Thu Feb 29 22:32:01 1996 | Thu Feb 29 12:32:01 1996
+ Fri Mar 01 17:32:01 1996 | @ 1 min | Fri Mar 01 17:33:01 1996 | Fri Mar 01 17:31:01 1996
+ Fri Mar 01 17:32:01 1996 | @ 5 hours | Fri Mar 01 22:32:01 1996 | Fri Mar 01 12:32:01 1996
+ Mon Dec 30 17:32:01 1996 | @ 1 min | Mon Dec 30 17:33:01 1996 | Mon Dec 30 17:31:01 1996
+ Mon Dec 30 17:32:01 1996 | @ 5 hours | Mon Dec 30 22:32:01 1996 | Mon Dec 30 12:32:01 1996
+ Tue Dec 31 17:32:01 1996 | @ 1 min | Tue Dec 31 17:33:01 1996 | Tue Dec 31 17:31:01 1996
+ Tue Dec 31 17:32:01 1996 | @ 5 hours | Tue Dec 31 22:32:01 1996 | Tue Dec 31 12:32:01 1996
+ Wed Jan 01 17:32:01 1997 | @ 1 min | Wed Jan 01 17:33:01 1997 | Wed Jan 01 17:31:01 1997
+ Wed Jan 01 17:32:01 1997 | @ 5 hours | Wed Jan 01 22:32:01 1997 | Wed Jan 01 12:32:01 1997
+ Thu Jan 02 00:00:00 1997 | @ 1 min | Thu Jan 02 00:01:00 1997 | Wed Jan 01 23:59:00 1997
+ Thu Jan 02 00:00:00 1997 | @ 5 hours | Thu Jan 02 05:00:00 1997 | Wed Jan 01 19:00:00 1997
+ Thu Jan 02 03:04:05 1997 | @ 1 min | Thu Jan 02 03:05:05 1997 | Thu Jan 02 03:03:05 1997
+ Thu Jan 02 03:04:05 1997 | @ 5 hours | Thu Jan 02 08:04:05 1997 | Wed Jan 01 22:04:05 1997
+ Mon Feb 10 17:32:00 1997 | @ 1 min | Mon Feb 10 17:33:00 1997 | Mon Feb 10 17:31:00 1997
+ Mon Feb 10 17:32:00 1997 | @ 5 hours | Mon Feb 10 22:32:00 1997 | Mon Feb 10 12:32:00 1997
+ Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997
+ Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997
+ Mon Feb 10 17:32:01.4 1997 | @ 1 min | Mon Feb 10 17:33:01.4 1997 | Mon Feb 10 17:31:01.4 1997
+ Mon Feb 10 17:32:01.4 1997 | @ 5 hours | Mon Feb 10 22:32:01.4 1997 | Mon Feb 10 12:32:01.4 1997
+ Mon Feb 10 17:32:01.5 1997 | @ 1 min | Mon Feb 10 17:33:01.5 1997 | Mon Feb 10 17:31:01.5 1997
+ Mon Feb 10 17:32:01.5 1997 | @ 5 hours | Mon Feb 10 22:32:01.5 1997 | Mon Feb 10 12:32:01.5 1997
+ Mon Feb 10 17:32:01.6 1997 | @ 1 min | Mon Feb 10 17:33:01.6 1997 | Mon Feb 10 17:31:01.6 1997
+ Mon Feb 10 17:32:01.6 1997 | @ 5 hours | Mon Feb 10 22:32:01.6 1997 | Mon Feb 10 12:32:01.6 1997
+ Mon Feb 10 17:32:02 1997 | @ 1 min | Mon Feb 10 17:33:02 1997 | Mon Feb 10 17:31:02 1997
+ Mon Feb 10 17:32:02 1997 | @ 5 hours | Mon Feb 10 22:32:02 1997 | Mon Feb 10 12:32:02 1997
+ Tue Feb 11 17:32:01 1997 | @ 1 min | Tue Feb 11 17:33:01 1997 | Tue Feb 11 17:31:01 1997
+ Tue Feb 11 17:32:01 1997 | @ 5 hours | Tue Feb 11 22:32:01 1997 | Tue Feb 11 12:32:01 1997
+ Wed Feb 12 17:32:01 1997 | @ 1 min | Wed Feb 12 17:33:01 1997 | Wed Feb 12 17:31:01 1997
+ Wed Feb 12 17:32:01 1997 | @ 5 hours | Wed Feb 12 22:32:01 1997 | Wed Feb 12 12:32:01 1997
+ Thu Feb 13 17:32:01 1997 | @ 1 min | Thu Feb 13 17:33:01 1997 | Thu Feb 13 17:31:01 1997
+ Thu Feb 13 17:32:01 1997 | @ 5 hours | Thu Feb 13 22:32:01 1997 | Thu Feb 13 12:32:01 1997
+ Fri Feb 14 17:32:01 1997 | @ 1 min | Fri Feb 14 17:33:01 1997 | Fri Feb 14 17:31:01 1997
+ Fri Feb 14 17:32:01 1997 | @ 5 hours | Fri Feb 14 22:32:01 1997 | Fri Feb 14 12:32:01 1997
+ Sat Feb 15 17:32:01 1997 | @ 1 min | Sat Feb 15 17:33:01 1997 | Sat Feb 15 17:31:01 1997
+ Sat Feb 15 17:32:01 1997 | @ 5 hours | Sat Feb 15 22:32:01 1997 | Sat Feb 15 12:32:01 1997
+ Sun Feb 16 17:32:01 1997 | @ 1 min | Sun Feb 16 17:33:01 1997 | Sun Feb 16 17:31:01 1997
+ Sun Feb 16 17:32:01 1997 | @ 1 min | Sun Feb 16 17:33:01 1997 | Sun Feb 16 17:31:01 1997
+ Sun Feb 16 17:32:01 1997 | @ 5 hours | Sun Feb 16 22:32:01 1997 | Sun Feb 16 12:32:01 1997
+ Sun Feb 16 17:32:01 1997 | @ 5 hours | Sun Feb 16 22:32:01 1997 | Sun Feb 16 12:32:01 1997
+ Fri Feb 28 17:32:01 1997 | @ 1 min | Fri Feb 28 17:33:01 1997 | Fri Feb 28 17:31:01 1997
+ Fri Feb 28 17:32:01 1997 | @ 5 hours | Fri Feb 28 22:32:01 1997 | Fri Feb 28 12:32:01 1997
+ Sat Mar 01 17:32:01 1997 | @ 1 min | Sat Mar 01 17:33:01 1997 | Sat Mar 01 17:31:01 1997
+ Sat Mar 01 17:32:01 1997 | @ 5 hours | Sat Mar 01 22:32:01 1997 | Sat Mar 01 12:32:01 1997
+ Tue Jun 10 17:32:01 1997 | @ 1 min | Tue Jun 10 17:33:01 1997 | Tue Jun 10 17:31:01 1997
+ Tue Jun 10 17:32:01 1997 | @ 5 hours | Tue Jun 10 22:32:01 1997 | Tue Jun 10 12:32:01 1997
+ Tue Jun 10 18:32:01 1997 | @ 1 min | Tue Jun 10 18:33:01 1997 | Tue Jun 10 18:31:01 1997
+ Tue Jun 10 18:32:01 1997 | @ 5 hours | Tue Jun 10 23:32:01 1997 | Tue Jun 10 13:32:01 1997
+ Tue Dec 30 17:32:01 1997 | @ 1 min | Tue Dec 30 17:33:01 1997 | Tue Dec 30 17:31:01 1997
+ Tue Dec 30 17:32:01 1997 | @ 5 hours | Tue Dec 30 22:32:01 1997 | Tue Dec 30 12:32:01 1997
+ Wed Dec 31 17:32:01 1997 | @ 1 min | Wed Dec 31 17:33:01 1997 | Wed Dec 31 17:31:01 1997
+ Wed Dec 31 17:32:01 1997 | @ 5 hours | Wed Dec 31 22:32:01 1997 | Wed Dec 31 12:32:01 1997
+ Fri Dec 31 17:32:01 1999 | @ 1 min | Fri Dec 31 17:33:01 1999 | Fri Dec 31 17:31:01 1999
+ Fri Dec 31 17:32:01 1999 | @ 5 hours | Fri Dec 31 22:32:01 1999 | Fri Dec 31 12:32:01 1999
+ Sat Jan 01 17:32:01 2000 | @ 1 min | Sat Jan 01 17:33:01 2000 | Sat Jan 01 17:31:01 2000
+ Sat Jan 01 17:32:01 2000 | @ 5 hours | Sat Jan 01 22:32:01 2000 | Sat Jan 01 12:32:01 2000
+ Wed Mar 15 02:14:05 2000 | @ 1 min | Wed Mar 15 02:15:05 2000 | Wed Mar 15 02:13:05 2000
+ Wed Mar 15 02:14:05 2000 | @ 5 hours | Wed Mar 15 07:14:05 2000 | Tue Mar 14 21:14:05 2000
+ Wed Mar 15 03:14:04 2000 | @ 1 min | Wed Mar 15 03:15:04 2000 | Wed Mar 15 03:13:04 2000
+ Wed Mar 15 03:14:04 2000 | @ 5 hours | Wed Mar 15 08:14:04 2000 | Tue Mar 14 22:14:04 2000
+ Wed Mar 15 08:14:01 2000 | @ 1 min | Wed Mar 15 08:15:01 2000 | Wed Mar 15 08:13:01 2000
+ Wed Mar 15 08:14:01 2000 | @ 5 hours | Wed Mar 15 13:14:01 2000 | Wed Mar 15 03:14:01 2000
+ Wed Mar 15 12:14:03 2000 | @ 1 min | Wed Mar 15 12:15:03 2000 | Wed Mar 15 12:13:03 2000
+ Wed Mar 15 12:14:03 2000 | @ 5 hours | Wed Mar 15 17:14:03 2000 | Wed Mar 15 07:14:03 2000
+ Wed Mar 15 13:14:02 2000 | @ 1 min | Wed Mar 15 13:15:02 2000 | Wed Mar 15 13:13:02 2000
+ Wed Mar 15 13:14:02 2000 | @ 5 hours | Wed Mar 15 18:14:02 2000 | Wed Mar 15 08:14:02 2000
+ Sun Dec 31 17:32:01 2000 | @ 1 min | Sun Dec 31 17:33:01 2000 | Sun Dec 31 17:31:01 2000
+ Sun Dec 31 17:32:01 2000 | @ 5 hours | Sun Dec 31 22:32:01 2000 | Sun Dec 31 12:32:01 2000
+(104 rows)
+
+SELECT t.f1 AS t, i.f1 AS i, t.f1 + i.f1 AS "add", t.f1 - i.f1 AS "subtract"
+ FROM TIME_TBL t, INTERVAL_TBL i
+ ORDER BY 1,2;
+ t | i | add | subtract
+-------------+-------------------------------+-------------+-------------
+ 00:00:00 | @ 14 secs ago | 23:59:46 | 00:00:14
+ 00:00:00 | @ 1 min | 00:01:00 | 23:59:00
+ 00:00:00 | @ 5 hours | 05:00:00 | 19:00:00
+ 00:00:00 | @ 1 day 2 hours 3 mins 4 secs | 02:03:04 | 21:56:56
+ 00:00:00 | @ 10 days | 00:00:00 | 00:00:00
+ 00:00:00 | @ 3 mons | 00:00:00 | 00:00:00
+ 00:00:00 | @ 5 mons | 00:00:00 | 00:00:00
+ 00:00:00 | @ 5 mons 12 hours | 12:00:00 | 12:00:00
+ 00:00:00 | @ 6 years | 00:00:00 | 00:00:00
+ 00:00:00 | @ 34 years | 00:00:00 | 00:00:00
+ 01:00:00 | @ 14 secs ago | 00:59:46 | 01:00:14
+ 01:00:00 | @ 1 min | 01:01:00 | 00:59:00
+ 01:00:00 | @ 5 hours | 06:00:00 | 20:00:00
+ 01:00:00 | @ 1 day 2 hours 3 mins 4 secs | 03:03:04 | 22:56:56
+ 01:00:00 | @ 10 days | 01:00:00 | 01:00:00
+ 01:00:00 | @ 3 mons | 01:00:00 | 01:00:00
+ 01:00:00 | @ 5 mons | 01:00:00 | 01:00:00
+ 01:00:00 | @ 5 mons 12 hours | 13:00:00 | 13:00:00
+ 01:00:00 | @ 6 years | 01:00:00 | 01:00:00
+ 01:00:00 | @ 34 years | 01:00:00 | 01:00:00
+ 02:03:00 | @ 14 secs ago | 02:02:46 | 02:03:14
+ 02:03:00 | @ 1 min | 02:04:00 | 02:02:00
+ 02:03:00 | @ 5 hours | 07:03:00 | 21:03:00
+ 02:03:00 | @ 1 day 2 hours 3 mins 4 secs | 04:06:04 | 23:59:56
+ 02:03:00 | @ 10 days | 02:03:00 | 02:03:00
+ 02:03:00 | @ 3 mons | 02:03:00 | 02:03:00
+ 02:03:00 | @ 5 mons | 02:03:00 | 02:03:00
+ 02:03:00 | @ 5 mons 12 hours | 14:03:00 | 14:03:00
+ 02:03:00 | @ 6 years | 02:03:00 | 02:03:00
+ 02:03:00 | @ 34 years | 02:03:00 | 02:03:00
+ 11:59:00 | @ 14 secs ago | 11:58:46 | 11:59:14
+ 11:59:00 | @ 1 min | 12:00:00 | 11:58:00
+ 11:59:00 | @ 5 hours | 16:59:00 | 06:59:00
+ 11:59:00 | @ 1 day 2 hours 3 mins 4 secs | 14:02:04 | 09:55:56
+ 11:59:00 | @ 10 days | 11:59:00 | 11:59:00
+ 11:59:00 | @ 3 mons | 11:59:00 | 11:59:00
+ 11:59:00 | @ 5 mons | 11:59:00 | 11:59:00
+ 11:59:00 | @ 5 mons 12 hours | 23:59:00 | 23:59:00
+ 11:59:00 | @ 6 years | 11:59:00 | 11:59:00
+ 11:59:00 | @ 34 years | 11:59:00 | 11:59:00
+ 12:00:00 | @ 14 secs ago | 11:59:46 | 12:00:14
+ 12:00:00 | @ 1 min | 12:01:00 | 11:59:00
+ 12:00:00 | @ 5 hours | 17:00:00 | 07:00:00
+ 12:00:00 | @ 1 day 2 hours 3 mins 4 secs | 14:03:04 | 09:56:56
+ 12:00:00 | @ 10 days | 12:00:00 | 12:00:00
+ 12:00:00 | @ 3 mons | 12:00:00 | 12:00:00
+ 12:00:00 | @ 5 mons | 12:00:00 | 12:00:00
+ 12:00:00 | @ 5 mons 12 hours | 00:00:00 | 00:00:00
+ 12:00:00 | @ 6 years | 12:00:00 | 12:00:00
+ 12:00:00 | @ 34 years | 12:00:00 | 12:00:00
+ 12:01:00 | @ 14 secs ago | 12:00:46 | 12:01:14
+ 12:01:00 | @ 1 min | 12:02:00 | 12:00:00
+ 12:01:00 | @ 5 hours | 17:01:00 | 07:01:00
+ 12:01:00 | @ 1 day 2 hours 3 mins 4 secs | 14:04:04 | 09:57:56
+ 12:01:00 | @ 10 days | 12:01:00 | 12:01:00
+ 12:01:00 | @ 3 mons | 12:01:00 | 12:01:00
+ 12:01:00 | @ 5 mons | 12:01:00 | 12:01:00
+ 12:01:00 | @ 5 mons 12 hours | 00:01:00 | 00:01:00
+ 12:01:00 | @ 6 years | 12:01:00 | 12:01:00
+ 12:01:00 | @ 34 years | 12:01:00 | 12:01:00
+ 15:36:39 | @ 14 secs ago | 15:36:25 | 15:36:53
+ 15:36:39 | @ 14 secs ago | 15:36:25 | 15:36:53
+ 15:36:39 | @ 1 min | 15:37:39 | 15:35:39
+ 15:36:39 | @ 1 min | 15:37:39 | 15:35:39
+ 15:36:39 | @ 5 hours | 20:36:39 | 10:36:39
+ 15:36:39 | @ 5 hours | 20:36:39 | 10:36:39
+ 15:36:39 | @ 1 day 2 hours 3 mins 4 secs | 17:39:43 | 13:33:35
+ 15:36:39 | @ 1 day 2 hours 3 mins 4 secs | 17:39:43 | 13:33:35
+ 15:36:39 | @ 10 days | 15:36:39 | 15:36:39
+ 15:36:39 | @ 10 days | 15:36:39 | 15:36:39
+ 15:36:39 | @ 3 mons | 15:36:39 | 15:36:39
+ 15:36:39 | @ 3 mons | 15:36:39 | 15:36:39
+ 15:36:39 | @ 5 mons | 15:36:39 | 15:36:39
+ 15:36:39 | @ 5 mons | 15:36:39 | 15:36:39
+ 15:36:39 | @ 5 mons 12 hours | 03:36:39 | 03:36:39
+ 15:36:39 | @ 5 mons 12 hours | 03:36:39 | 03:36:39
+ 15:36:39 | @ 6 years | 15:36:39 | 15:36:39
+ 15:36:39 | @ 6 years | 15:36:39 | 15:36:39
+ 15:36:39 | @ 34 years | 15:36:39 | 15:36:39
+ 15:36:39 | @ 34 years | 15:36:39 | 15:36:39
+ 23:59:00 | @ 14 secs ago | 23:58:46 | 23:59:14
+ 23:59:00 | @ 1 min | 00:00:00 | 23:58:00
+ 23:59:00 | @ 5 hours | 04:59:00 | 18:59:00
+ 23:59:00 | @ 1 day 2 hours 3 mins 4 secs | 02:02:04 | 21:55:56
+ 23:59:00 | @ 10 days | 23:59:00 | 23:59:00
+ 23:59:00 | @ 3 mons | 23:59:00 | 23:59:00
+ 23:59:00 | @ 5 mons | 23:59:00 | 23:59:00
+ 23:59:00 | @ 5 mons 12 hours | 11:59:00 | 11:59:00
+ 23:59:00 | @ 6 years | 23:59:00 | 23:59:00
+ 23:59:00 | @ 34 years | 23:59:00 | 23:59:00
+ 23:59:59.99 | @ 14 secs ago | 23:59:45.99 | 00:00:13.99
+ 23:59:59.99 | @ 1 min | 00:00:59.99 | 23:58:59.99
+ 23:59:59.99 | @ 5 hours | 04:59:59.99 | 18:59:59.99
+ 23:59:59.99 | @ 1 day 2 hours 3 mins 4 secs | 02:03:03.99 | 21:56:55.99
+ 23:59:59.99 | @ 10 days | 23:59:59.99 | 23:59:59.99
+ 23:59:59.99 | @ 3 mons | 23:59:59.99 | 23:59:59.99
+ 23:59:59.99 | @ 5 mons | 23:59:59.99 | 23:59:59.99
+ 23:59:59.99 | @ 5 mons 12 hours | 11:59:59.99 | 11:59:59.99
+ 23:59:59.99 | @ 6 years | 23:59:59.99 | 23:59:59.99
+ 23:59:59.99 | @ 34 years | 23:59:59.99 | 23:59:59.99
+(100 rows)
+
+SELECT t.f1 AS t, i.f1 AS i, t.f1 + i.f1 AS "add", t.f1 - i.f1 AS "subtract"
+ FROM TIMETZ_TBL t, INTERVAL_TBL i
+ ORDER BY 1,2;
+ t | i | add | subtract
+----------------+-------------------------------+----------------+----------------
+ 00:01:00-07 | @ 14 secs ago | 00:00:46-07 | 00:01:14-07
+ 00:01:00-07 | @ 1 min | 00:02:00-07 | 00:00:00-07
+ 00:01:00-07 | @ 5 hours | 05:01:00-07 | 19:01:00-07
+ 00:01:00-07 | @ 1 day 2 hours 3 mins 4 secs | 02:04:04-07 | 21:57:56-07
+ 00:01:00-07 | @ 10 days | 00:01:00-07 | 00:01:00-07
+ 00:01:00-07 | @ 3 mons | 00:01:00-07 | 00:01:00-07
+ 00:01:00-07 | @ 5 mons | 00:01:00-07 | 00:01:00-07
+ 00:01:00-07 | @ 5 mons 12 hours | 12:01:00-07 | 12:01:00-07
+ 00:01:00-07 | @ 6 years | 00:01:00-07 | 00:01:00-07
+ 00:01:00-07 | @ 34 years | 00:01:00-07 | 00:01:00-07
+ 01:00:00-07 | @ 14 secs ago | 00:59:46-07 | 01:00:14-07
+ 01:00:00-07 | @ 1 min | 01:01:00-07 | 00:59:00-07
+ 01:00:00-07 | @ 5 hours | 06:00:00-07 | 20:00:00-07
+ 01:00:00-07 | @ 1 day 2 hours 3 mins 4 secs | 03:03:04-07 | 22:56:56-07
+ 01:00:00-07 | @ 10 days | 01:00:00-07 | 01:00:00-07
+ 01:00:00-07 | @ 3 mons | 01:00:00-07 | 01:00:00-07
+ 01:00:00-07 | @ 5 mons | 01:00:00-07 | 01:00:00-07
+ 01:00:00-07 | @ 5 mons 12 hours | 13:00:00-07 | 13:00:00-07
+ 01:00:00-07 | @ 6 years | 01:00:00-07 | 01:00:00-07
+ 01:00:00-07 | @ 34 years | 01:00:00-07 | 01:00:00-07
+ 02:03:00-07 | @ 14 secs ago | 02:02:46-07 | 02:03:14-07
+ 02:03:00-07 | @ 1 min | 02:04:00-07 | 02:02:00-07
+ 02:03:00-07 | @ 5 hours | 07:03:00-07 | 21:03:00-07
+ 02:03:00-07 | @ 1 day 2 hours 3 mins 4 secs | 04:06:04-07 | 23:59:56-07
+ 02:03:00-07 | @ 10 days | 02:03:00-07 | 02:03:00-07
+ 02:03:00-07 | @ 3 mons | 02:03:00-07 | 02:03:00-07
+ 02:03:00-07 | @ 5 mons | 02:03:00-07 | 02:03:00-07
+ 02:03:00-07 | @ 5 mons 12 hours | 14:03:00-07 | 14:03:00-07
+ 02:03:00-07 | @ 6 years | 02:03:00-07 | 02:03:00-07
+ 02:03:00-07 | @ 34 years | 02:03:00-07 | 02:03:00-07
+ 08:08:00-04 | @ 14 secs ago | 08:07:46-04 | 08:08:14-04
+ 08:08:00-04 | @ 1 min | 08:09:00-04 | 08:07:00-04
+ 08:08:00-04 | @ 5 hours | 13:08:00-04 | 03:08:00-04
+ 08:08:00-04 | @ 1 day 2 hours 3 mins 4 secs | 10:11:04-04 | 06:04:56-04
+ 08:08:00-04 | @ 10 days | 08:08:00-04 | 08:08:00-04
+ 08:08:00-04 | @ 3 mons | 08:08:00-04 | 08:08:00-04
+ 08:08:00-04 | @ 5 mons | 08:08:00-04 | 08:08:00-04
+ 08:08:00-04 | @ 5 mons 12 hours | 20:08:00-04 | 20:08:00-04
+ 08:08:00-04 | @ 6 years | 08:08:00-04 | 08:08:00-04
+ 08:08:00-04 | @ 34 years | 08:08:00-04 | 08:08:00-04
+ 07:07:00-08 | @ 14 secs ago | 07:06:46-08 | 07:07:14-08
+ 07:07:00-08 | @ 1 min | 07:08:00-08 | 07:06:00-08
+ 07:07:00-08 | @ 5 hours | 12:07:00-08 | 02:07:00-08
+ 07:07:00-08 | @ 1 day 2 hours 3 mins 4 secs | 09:10:04-08 | 05:03:56-08
+ 07:07:00-08 | @ 10 days | 07:07:00-08 | 07:07:00-08
+ 07:07:00-08 | @ 3 mons | 07:07:00-08 | 07:07:00-08
+ 07:07:00-08 | @ 5 mons | 07:07:00-08 | 07:07:00-08
+ 07:07:00-08 | @ 5 mons 12 hours | 19:07:00-08 | 19:07:00-08
+ 07:07:00-08 | @ 6 years | 07:07:00-08 | 07:07:00-08
+ 07:07:00-08 | @ 34 years | 07:07:00-08 | 07:07:00-08
+ 11:59:00-07 | @ 14 secs ago | 11:58:46-07 | 11:59:14-07
+ 11:59:00-07 | @ 1 min | 12:00:00-07 | 11:58:00-07
+ 11:59:00-07 | @ 5 hours | 16:59:00-07 | 06:59:00-07
+ 11:59:00-07 | @ 1 day 2 hours 3 mins 4 secs | 14:02:04-07 | 09:55:56-07
+ 11:59:00-07 | @ 10 days | 11:59:00-07 | 11:59:00-07
+ 11:59:00-07 | @ 3 mons | 11:59:00-07 | 11:59:00-07
+ 11:59:00-07 | @ 5 mons | 11:59:00-07 | 11:59:00-07
+ 11:59:00-07 | @ 5 mons 12 hours | 23:59:00-07 | 23:59:00-07
+ 11:59:00-07 | @ 6 years | 11:59:00-07 | 11:59:00-07
+ 11:59:00-07 | @ 34 years | 11:59:00-07 | 11:59:00-07
+ 12:00:00-07 | @ 14 secs ago | 11:59:46-07 | 12:00:14-07
+ 12:00:00-07 | @ 1 min | 12:01:00-07 | 11:59:00-07
+ 12:00:00-07 | @ 5 hours | 17:00:00-07 | 07:00:00-07
+ 12:00:00-07 | @ 1 day 2 hours 3 mins 4 secs | 14:03:04-07 | 09:56:56-07
+ 12:00:00-07 | @ 10 days | 12:00:00-07 | 12:00:00-07
+ 12:00:00-07 | @ 3 mons | 12:00:00-07 | 12:00:00-07
+ 12:00:00-07 | @ 5 mons | 12:00:00-07 | 12:00:00-07
+ 12:00:00-07 | @ 5 mons 12 hours | 00:00:00-07 | 00:00:00-07
+ 12:00:00-07 | @ 6 years | 12:00:00-07 | 12:00:00-07
+ 12:00:00-07 | @ 34 years | 12:00:00-07 | 12:00:00-07
+ 12:01:00-07 | @ 14 secs ago | 12:00:46-07 | 12:01:14-07
+ 12:01:00-07 | @ 1 min | 12:02:00-07 | 12:00:00-07
+ 12:01:00-07 | @ 5 hours | 17:01:00-07 | 07:01:00-07
+ 12:01:00-07 | @ 1 day 2 hours 3 mins 4 secs | 14:04:04-07 | 09:57:56-07
+ 12:01:00-07 | @ 10 days | 12:01:00-07 | 12:01:00-07
+ 12:01:00-07 | @ 3 mons | 12:01:00-07 | 12:01:00-07
+ 12:01:00-07 | @ 5 mons | 12:01:00-07 | 12:01:00-07
+ 12:01:00-07 | @ 5 mons 12 hours | 00:01:00-07 | 00:01:00-07
+ 12:01:00-07 | @ 6 years | 12:01:00-07 | 12:01:00-07
+ 12:01:00-07 | @ 34 years | 12:01:00-07 | 12:01:00-07
+ 15:36:39-04 | @ 14 secs ago | 15:36:25-04 | 15:36:53-04
+ 15:36:39-04 | @ 1 min | 15:37:39-04 | 15:35:39-04
+ 15:36:39-04 | @ 5 hours | 20:36:39-04 | 10:36:39-04
+ 15:36:39-04 | @ 1 day 2 hours 3 mins 4 secs | 17:39:43-04 | 13:33:35-04
+ 15:36:39-04 | @ 10 days | 15:36:39-04 | 15:36:39-04
+ 15:36:39-04 | @ 3 mons | 15:36:39-04 | 15:36:39-04
+ 15:36:39-04 | @ 5 mons | 15:36:39-04 | 15:36:39-04
+ 15:36:39-04 | @ 5 mons 12 hours | 03:36:39-04 | 03:36:39-04
+ 15:36:39-04 | @ 6 years | 15:36:39-04 | 15:36:39-04
+ 15:36:39-04 | @ 34 years | 15:36:39-04 | 15:36:39-04
+ 15:36:39-05 | @ 14 secs ago | 15:36:25-05 | 15:36:53-05
+ 15:36:39-05 | @ 1 min | 15:37:39-05 | 15:35:39-05
+ 15:36:39-05 | @ 5 hours | 20:36:39-05 | 10:36:39-05
+ 15:36:39-05 | @ 1 day 2 hours 3 mins 4 secs | 17:39:43-05 | 13:33:35-05
+ 15:36:39-05 | @ 10 days | 15:36:39-05 | 15:36:39-05
+ 15:36:39-05 | @ 3 mons | 15:36:39-05 | 15:36:39-05
+ 15:36:39-05 | @ 5 mons | 15:36:39-05 | 15:36:39-05
+ 15:36:39-05 | @ 5 mons 12 hours | 03:36:39-05 | 03:36:39-05
+ 15:36:39-05 | @ 6 years | 15:36:39-05 | 15:36:39-05
+ 15:36:39-05 | @ 34 years | 15:36:39-05 | 15:36:39-05
+ 23:59:00-07 | @ 14 secs ago | 23:58:46-07 | 23:59:14-07
+ 23:59:00-07 | @ 1 min | 00:00:00-07 | 23:58:00-07
+ 23:59:00-07 | @ 5 hours | 04:59:00-07 | 18:59:00-07
+ 23:59:00-07 | @ 1 day 2 hours 3 mins 4 secs | 02:02:04-07 | 21:55:56-07
+ 23:59:00-07 | @ 10 days | 23:59:00-07 | 23:59:00-07
+ 23:59:00-07 | @ 3 mons | 23:59:00-07 | 23:59:00-07
+ 23:59:00-07 | @ 5 mons | 23:59:00-07 | 23:59:00-07
+ 23:59:00-07 | @ 5 mons 12 hours | 11:59:00-07 | 11:59:00-07
+ 23:59:00-07 | @ 6 years | 23:59:00-07 | 23:59:00-07
+ 23:59:00-07 | @ 34 years | 23:59:00-07 | 23:59:00-07
+ 23:59:59.99-07 | @ 14 secs ago | 23:59:45.99-07 | 00:00:13.99-07
+ 23:59:59.99-07 | @ 1 min | 00:00:59.99-07 | 23:58:59.99-07
+ 23:59:59.99-07 | @ 5 hours | 04:59:59.99-07 | 18:59:59.99-07
+ 23:59:59.99-07 | @ 1 day 2 hours 3 mins 4 secs | 02:03:03.99-07 | 21:56:55.99-07
+ 23:59:59.99-07 | @ 10 days | 23:59:59.99-07 | 23:59:59.99-07
+ 23:59:59.99-07 | @ 3 mons | 23:59:59.99-07 | 23:59:59.99-07
+ 23:59:59.99-07 | @ 5 mons | 23:59:59.99-07 | 23:59:59.99-07
+ 23:59:59.99-07 | @ 5 mons 12 hours | 11:59:59.99-07 | 11:59:59.99-07
+ 23:59:59.99-07 | @ 6 years | 23:59:59.99-07 | 23:59:59.99-07
+ 23:59:59.99-07 | @ 34 years | 23:59:59.99-07 | 23:59:59.99-07
+(120 rows)
+
+-- SQL9x OVERLAPS operator
+-- test with time zone
+SELECT (timestamp with time zone '2000-11-27', timestamp with time zone '2000-11-28')
+ OVERLAPS (timestamp with time zone '2000-11-27 12:00', timestamp with time zone '2000-11-30') AS "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp with time zone '2000-11-26', timestamp with time zone '2000-11-27')
+ OVERLAPS (timestamp with time zone '2000-11-27 12:00', timestamp with time zone '2000-11-30') AS "False";
+ False
+-------
+ f
+(1 row)
+
+SELECT (timestamp with time zone '2000-11-27', timestamp with time zone '2000-11-28')
+ OVERLAPS (timestamp with time zone '2000-11-27 12:00', interval '1 day') AS "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp with time zone '2000-11-27', interval '12 hours')
+ OVERLAPS (timestamp with time zone '2000-11-27 12:00', timestamp with time zone '2000-11-30') AS "False";
+ False
+-------
+ f
+(1 row)
+
+SELECT (timestamp with time zone '2000-11-27', interval '12 hours')
+ OVERLAPS (timestamp with time zone '2000-11-27', interval '12 hours') AS "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp with time zone '2000-11-27', interval '12 hours')
+ OVERLAPS (timestamp with time zone '2000-11-27 12:00', interval '12 hours') AS "False";
+ False
+-------
+ f
+(1 row)
+
+-- test without time zone
+SELECT (timestamp without time zone '2000-11-27', timestamp without time zone '2000-11-28')
+ OVERLAPS (timestamp without time zone '2000-11-27 12:00', timestamp without time zone '2000-11-30') AS "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp without time zone '2000-11-26', timestamp without time zone '2000-11-27')
+ OVERLAPS (timestamp without time zone '2000-11-27 12:00', timestamp without time zone '2000-11-30') AS "False";
+ False
+-------
+ f
+(1 row)
+
+SELECT (timestamp without time zone '2000-11-27', timestamp without time zone '2000-11-28')
+ OVERLAPS (timestamp without time zone '2000-11-27 12:00', interval '1 day') AS "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp without time zone '2000-11-27', interval '12 hours')
+ OVERLAPS (timestamp without time zone '2000-11-27 12:00', timestamp without time zone '2000-11-30') AS "False";
+ False
+-------
+ f
+(1 row)
+
+SELECT (timestamp without time zone '2000-11-27', interval '12 hours')
+ OVERLAPS (timestamp without time zone '2000-11-27', interval '12 hours') AS "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (timestamp without time zone '2000-11-27', interval '12 hours')
+ OVERLAPS (timestamp without time zone '2000-11-27 12:00', interval '12 hours') AS "False";
+ False
+-------
+ f
+(1 row)
+
+-- test time and interval
+SELECT (time '00:00', time '01:00')
+ OVERLAPS (time '00:30', time '01:30') AS "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (time '00:00', interval '1 hour')
+ OVERLAPS (time '00:30', interval '1 hour') AS "True";
+ True
+------
+ t
+(1 row)
+
+SELECT (time '00:00', interval '1 hour')
+ OVERLAPS (time '01:30', interval '1 hour') AS "False";
+ False
+-------
+ f
+(1 row)
+
+-- SQL99 seems to want this to be false (and we conform to the spec).
+-- istm that this *should* return true, on the theory that time
+-- intervals can wrap around the day boundary - thomas 2001-09-25
+SELECT (time '00:00', interval '1 hour')
+ OVERLAPS (time '01:30', interval '1 day') AS "False";
+ False
+-------
+ f
+(1 row)
+
+CREATE TABLE TEMP_TIMESTAMP (f1 timestamp with time zone);
+-- get some candidate input values
+INSERT INTO TEMP_TIMESTAMP (f1)
+ SELECT d1 FROM TIMESTAMP_TBL
+ WHERE d1 BETWEEN '13-jun-1957' AND '1-jan-1997'
+ OR d1 BETWEEN '1-jan-1999' AND '1-jan-2010';
+SELECT f1 AS "timestamp"
+ FROM TEMP_TIMESTAMP
+ ORDER BY "timestamp";
+ timestamp
+------------------------------
+ Thu Jan 01 00:00:00 1970 PST
+ Wed Feb 28 17:32:01 1996 PST
+ Thu Feb 29 17:32:01 1996 PST
+ Fri Mar 01 17:32:01 1996 PST
+ Mon Dec 30 17:32:01 1996 PST
+ Tue Dec 31 17:32:01 1996 PST
+ Fri Dec 31 17:32:01 1999 PST
+ Sat Jan 01 17:32:01 2000 PST
+ Wed Mar 15 02:14:05 2000 PST
+ Wed Mar 15 03:14:04 2000 PST
+ Wed Mar 15 08:14:01 2000 PST
+ Wed Mar 15 12:14:03 2000 PST
+ Wed Mar 15 13:14:02 2000 PST
+ Sun Dec 31 17:32:01 2000 PST
+ Mon Jan 01 17:32:01 2001 PST
+ Sat Sep 22 18:19:20 2001 PDT
+(16 rows)
+
+SELECT d.f1 AS "timestamp", t.f1 AS "interval", d.f1 + t.f1 AS plus
+ FROM TEMP_TIMESTAMP d, INTERVAL_TBL t
+ ORDER BY plus, "timestamp", "interval";
+ timestamp | interval | plus
+------------------------------+-------------------------------+------------------------------
+ Thu Jan 01 00:00:00 1970 PST | @ 14 secs ago | Wed Dec 31 23:59:46 1969 PST
+ Thu Jan 01 00:00:00 1970 PST | @ 1 min | Thu Jan 01 00:01:00 1970 PST
+ Thu Jan 01 00:00:00 1970 PST | @ 5 hours | Thu Jan 01 05:00:00 1970 PST
+ Thu Jan 01 00:00:00 1970 PST | @ 1 day 2 hours 3 mins 4 secs | Fri Jan 02 02:03:04 1970 PST
+ Thu Jan 01 00:00:00 1970 PST | @ 10 days | Sun Jan 11 00:00:00 1970 PST
+ Thu Jan 01 00:00:00 1970 PST | @ 3 mons | Wed Apr 01 00:00:00 1970 PST
+ Thu Jan 01 00:00:00 1970 PST | @ 5 mons | Mon Jun 01 00:00:00 1970 PDT
+ Thu Jan 01 00:00:00 1970 PST | @ 5 mons 12 hours | Mon Jun 01 12:00:00 1970 PDT
+ Thu Jan 01 00:00:00 1970 PST | @ 6 years | Thu Jan 01 00:00:00 1976 PST
+ Wed Feb 28 17:32:01 1996 PST | @ 14 secs ago | Wed Feb 28 17:31:47 1996 PST
+ Wed Feb 28 17:32:01 1996 PST | @ 1 min | Wed Feb 28 17:33:01 1996 PST
+ Wed Feb 28 17:32:01 1996 PST | @ 5 hours | Wed Feb 28 22:32:01 1996 PST
+ Thu Feb 29 17:32:01 1996 PST | @ 14 secs ago | Thu Feb 29 17:31:47 1996 PST
+ Thu Feb 29 17:32:01 1996 PST | @ 1 min | Thu Feb 29 17:33:01 1996 PST
+ Wed Feb 28 17:32:01 1996 PST | @ 1 day 2 hours 3 mins 4 secs | Thu Feb 29 19:35:05 1996 PST
+ Thu Feb 29 17:32:01 1996 PST | @ 5 hours | Thu Feb 29 22:32:01 1996 PST
+ Fri Mar 01 17:32:01 1996 PST | @ 14 secs ago | Fri Mar 01 17:31:47 1996 PST
+ Fri Mar 01 17:32:01 1996 PST | @ 1 min | Fri Mar 01 17:33:01 1996 PST
+ Thu Feb 29 17:32:01 1996 PST | @ 1 day 2 hours 3 mins 4 secs | Fri Mar 01 19:35:05 1996 PST
+ Fri Mar 01 17:32:01 1996 PST | @ 5 hours | Fri Mar 01 22:32:01 1996 PST
+ Fri Mar 01 17:32:01 1996 PST | @ 1 day 2 hours 3 mins 4 secs | Sat Mar 02 19:35:05 1996 PST
+ Wed Feb 28 17:32:01 1996 PST | @ 10 days | Sat Mar 09 17:32:01 1996 PST
+ Thu Feb 29 17:32:01 1996 PST | @ 10 days | Sun Mar 10 17:32:01 1996 PST
+ Fri Mar 01 17:32:01 1996 PST | @ 10 days | Mon Mar 11 17:32:01 1996 PST
+ Wed Feb 28 17:32:01 1996 PST | @ 3 mons | Tue May 28 17:32:01 1996 PDT
+ Thu Feb 29 17:32:01 1996 PST | @ 3 mons | Wed May 29 17:32:01 1996 PDT
+ Fri Mar 01 17:32:01 1996 PST | @ 3 mons | Sat Jun 01 17:32:01 1996 PDT
+ Wed Feb 28 17:32:01 1996 PST | @ 5 mons | Sun Jul 28 17:32:01 1996 PDT
+ Wed Feb 28 17:32:01 1996 PST | @ 5 mons 12 hours | Mon Jul 29 05:32:01 1996 PDT
+ Thu Feb 29 17:32:01 1996 PST | @ 5 mons | Mon Jul 29 17:32:01 1996 PDT
+ Thu Feb 29 17:32:01 1996 PST | @ 5 mons 12 hours | Tue Jul 30 05:32:01 1996 PDT
+ Fri Mar 01 17:32:01 1996 PST | @ 5 mons | Thu Aug 01 17:32:01 1996 PDT
+ Fri Mar 01 17:32:01 1996 PST | @ 5 mons 12 hours | Fri Aug 02 05:32:01 1996 PDT
+ Mon Dec 30 17:32:01 1996 PST | @ 14 secs ago | Mon Dec 30 17:31:47 1996 PST
+ Mon Dec 30 17:32:01 1996 PST | @ 1 min | Mon Dec 30 17:33:01 1996 PST
+ Mon Dec 30 17:32:01 1996 PST | @ 5 hours | Mon Dec 30 22:32:01 1996 PST
+ Tue Dec 31 17:32:01 1996 PST | @ 14 secs ago | Tue Dec 31 17:31:47 1996 PST
+ Tue Dec 31 17:32:01 1996 PST | @ 1 min | Tue Dec 31 17:33:01 1996 PST
+ Mon Dec 30 17:32:01 1996 PST | @ 1 day 2 hours 3 mins 4 secs | Tue Dec 31 19:35:05 1996 PST
+ Tue Dec 31 17:32:01 1996 PST | @ 5 hours | Tue Dec 31 22:32:01 1996 PST
+ Tue Dec 31 17:32:01 1996 PST | @ 1 day 2 hours 3 mins 4 secs | Wed Jan 01 19:35:05 1997 PST
+ Mon Dec 30 17:32:01 1996 PST | @ 10 days | Thu Jan 09 17:32:01 1997 PST
+ Tue Dec 31 17:32:01 1996 PST | @ 10 days | Fri Jan 10 17:32:01 1997 PST
+ Mon Dec 30 17:32:01 1996 PST | @ 3 mons | Sun Mar 30 17:32:01 1997 PST
+ Tue Dec 31 17:32:01 1996 PST | @ 3 mons | Mon Mar 31 17:32:01 1997 PST
+ Mon Dec 30 17:32:01 1996 PST | @ 5 mons | Fri May 30 17:32:01 1997 PDT
+ Mon Dec 30 17:32:01 1996 PST | @ 5 mons 12 hours | Sat May 31 05:32:01 1997 PDT
+ Tue Dec 31 17:32:01 1996 PST | @ 5 mons | Sat May 31 17:32:01 1997 PDT
+ Tue Dec 31 17:32:01 1996 PST | @ 5 mons 12 hours | Sun Jun 01 05:32:01 1997 PDT
+ Fri Dec 31 17:32:01 1999 PST | @ 14 secs ago | Fri Dec 31 17:31:47 1999 PST
+ Fri Dec 31 17:32:01 1999 PST | @ 1 min | Fri Dec 31 17:33:01 1999 PST
+ Fri Dec 31 17:32:01 1999 PST | @ 5 hours | Fri Dec 31 22:32:01 1999 PST
+ Sat Jan 01 17:32:01 2000 PST | @ 14 secs ago | Sat Jan 01 17:31:47 2000 PST
+ Sat Jan 01 17:32:01 2000 PST | @ 1 min | Sat Jan 01 17:33:01 2000 PST
+ Fri Dec 31 17:32:01 1999 PST | @ 1 day 2 hours 3 mins 4 secs | Sat Jan 01 19:35:05 2000 PST
+ Sat Jan 01 17:32:01 2000 PST | @ 5 hours | Sat Jan 01 22:32:01 2000 PST
+ Sat Jan 01 17:32:01 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Sun Jan 02 19:35:05 2000 PST
+ Fri Dec 31 17:32:01 1999 PST | @ 10 days | Mon Jan 10 17:32:01 2000 PST
+ Sat Jan 01 17:32:01 2000 PST | @ 10 days | Tue Jan 11 17:32:01 2000 PST
+ Wed Mar 15 02:14:05 2000 PST | @ 14 secs ago | Wed Mar 15 02:13:51 2000 PST
+ Wed Mar 15 02:14:05 2000 PST | @ 1 min | Wed Mar 15 02:15:05 2000 PST
+ Wed Mar 15 03:14:04 2000 PST | @ 14 secs ago | Wed Mar 15 03:13:50 2000 PST
+ Wed Mar 15 03:14:04 2000 PST | @ 1 min | Wed Mar 15 03:15:04 2000 PST
+ Wed Mar 15 02:14:05 2000 PST | @ 5 hours | Wed Mar 15 07:14:05 2000 PST
+ Wed Mar 15 08:14:01 2000 PST | @ 14 secs ago | Wed Mar 15 08:13:47 2000 PST
+ Wed Mar 15 03:14:04 2000 PST | @ 5 hours | Wed Mar 15 08:14:04 2000 PST
+ Wed Mar 15 08:14:01 2000 PST | @ 1 min | Wed Mar 15 08:15:01 2000 PST
+ Wed Mar 15 12:14:03 2000 PST | @ 14 secs ago | Wed Mar 15 12:13:49 2000 PST
+ Wed Mar 15 12:14:03 2000 PST | @ 1 min | Wed Mar 15 12:15:03 2000 PST
+ Wed Mar 15 13:14:02 2000 PST | @ 14 secs ago | Wed Mar 15 13:13:48 2000 PST
+ Wed Mar 15 08:14:01 2000 PST | @ 5 hours | Wed Mar 15 13:14:01 2000 PST
+ Wed Mar 15 13:14:02 2000 PST | @ 1 min | Wed Mar 15 13:15:02 2000 PST
+ Wed Mar 15 12:14:03 2000 PST | @ 5 hours | Wed Mar 15 17:14:03 2000 PST
+ Wed Mar 15 13:14:02 2000 PST | @ 5 hours | Wed Mar 15 18:14:02 2000 PST
+ Wed Mar 15 02:14:05 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Thu Mar 16 04:17:09 2000 PST
+ Wed Mar 15 03:14:04 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Thu Mar 16 05:17:08 2000 PST
+ Wed Mar 15 08:14:01 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Thu Mar 16 10:17:05 2000 PST
+ Wed Mar 15 12:14:03 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Thu Mar 16 14:17:07 2000 PST
+ Wed Mar 15 13:14:02 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Thu Mar 16 15:17:06 2000 PST
+ Wed Mar 15 02:14:05 2000 PST | @ 10 days | Sat Mar 25 02:14:05 2000 PST
+ Wed Mar 15 03:14:04 2000 PST | @ 10 days | Sat Mar 25 03:14:04 2000 PST
+ Wed Mar 15 08:14:01 2000 PST | @ 10 days | Sat Mar 25 08:14:01 2000 PST
+ Wed Mar 15 12:14:03 2000 PST | @ 10 days | Sat Mar 25 12:14:03 2000 PST
+ Wed Mar 15 13:14:02 2000 PST | @ 10 days | Sat Mar 25 13:14:02 2000 PST
+ Fri Dec 31 17:32:01 1999 PST | @ 3 mons | Fri Mar 31 17:32:01 2000 PST
+ Sat Jan 01 17:32:01 2000 PST | @ 3 mons | Sat Apr 01 17:32:01 2000 PST
+ Fri Dec 31 17:32:01 1999 PST | @ 5 mons | Wed May 31 17:32:01 2000 PDT
+ Fri Dec 31 17:32:01 1999 PST | @ 5 mons 12 hours | Thu Jun 01 05:32:01 2000 PDT
+ Sat Jan 01 17:32:01 2000 PST | @ 5 mons | Thu Jun 01 17:32:01 2000 PDT
+ Sat Jan 01 17:32:01 2000 PST | @ 5 mons 12 hours | Fri Jun 02 05:32:01 2000 PDT
+ Wed Mar 15 02:14:05 2000 PST | @ 3 mons | Thu Jun 15 02:14:05 2000 PDT
+ Wed Mar 15 03:14:04 2000 PST | @ 3 mons | Thu Jun 15 03:14:04 2000 PDT
+ Wed Mar 15 08:14:01 2000 PST | @ 3 mons | Thu Jun 15 08:14:01 2000 PDT
+ Wed Mar 15 12:14:03 2000 PST | @ 3 mons | Thu Jun 15 12:14:03 2000 PDT
+ Wed Mar 15 13:14:02 2000 PST | @ 3 mons | Thu Jun 15 13:14:02 2000 PDT
+ Wed Mar 15 02:14:05 2000 PST | @ 5 mons | Tue Aug 15 02:14:05 2000 PDT
+ Wed Mar 15 03:14:04 2000 PST | @ 5 mons | Tue Aug 15 03:14:04 2000 PDT
+ Wed Mar 15 08:14:01 2000 PST | @ 5 mons | Tue Aug 15 08:14:01 2000 PDT
+ Wed Mar 15 12:14:03 2000 PST | @ 5 mons | Tue Aug 15 12:14:03 2000 PDT
+ Wed Mar 15 13:14:02 2000 PST | @ 5 mons | Tue Aug 15 13:14:02 2000 PDT
+ Wed Mar 15 02:14:05 2000 PST | @ 5 mons 12 hours | Tue Aug 15 14:14:05 2000 PDT
+ Wed Mar 15 03:14:04 2000 PST | @ 5 mons 12 hours | Tue Aug 15 15:14:04 2000 PDT
+ Wed Mar 15 08:14:01 2000 PST | @ 5 mons 12 hours | Tue Aug 15 20:14:01 2000 PDT
+ Wed Mar 15 12:14:03 2000 PST | @ 5 mons 12 hours | Wed Aug 16 00:14:03 2000 PDT
+ Wed Mar 15 13:14:02 2000 PST | @ 5 mons 12 hours | Wed Aug 16 01:14:02 2000 PDT
+ Sun Dec 31 17:32:01 2000 PST | @ 14 secs ago | Sun Dec 31 17:31:47 2000 PST
+ Sun Dec 31 17:32:01 2000 PST | @ 1 min | Sun Dec 31 17:33:01 2000 PST
+ Sun Dec 31 17:32:01 2000 PST | @ 5 hours | Sun Dec 31 22:32:01 2000 PST
+ Mon Jan 01 17:32:01 2001 PST | @ 14 secs ago | Mon Jan 01 17:31:47 2001 PST
+ Mon Jan 01 17:32:01 2001 PST | @ 1 min | Mon Jan 01 17:33:01 2001 PST
+ Sun Dec 31 17:32:01 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Mon Jan 01 19:35:05 2001 PST
+ Mon Jan 01 17:32:01 2001 PST | @ 5 hours | Mon Jan 01 22:32:01 2001 PST
+ Mon Jan 01 17:32:01 2001 PST | @ 1 day 2 hours 3 mins 4 secs | Tue Jan 02 19:35:05 2001 PST
+ Sun Dec 31 17:32:01 2000 PST | @ 10 days | Wed Jan 10 17:32:01 2001 PST
+ Mon Jan 01 17:32:01 2001 PST | @ 10 days | Thu Jan 11 17:32:01 2001 PST
+ Sun Dec 31 17:32:01 2000 PST | @ 3 mons | Sat Mar 31 17:32:01 2001 PST
+ Mon Jan 01 17:32:01 2001 PST | @ 3 mons | Sun Apr 01 17:32:01 2001 PDT
+ Sun Dec 31 17:32:01 2000 PST | @ 5 mons | Thu May 31 17:32:01 2001 PDT
+ Sun Dec 31 17:32:01 2000 PST | @ 5 mons 12 hours | Fri Jun 01 05:32:01 2001 PDT
+ Mon Jan 01 17:32:01 2001 PST | @ 5 mons | Fri Jun 01 17:32:01 2001 PDT
+ Mon Jan 01 17:32:01 2001 PST | @ 5 mons 12 hours | Sat Jun 02 05:32:01 2001 PDT
+ Sat Sep 22 18:19:20 2001 PDT | @ 14 secs ago | Sat Sep 22 18:19:06 2001 PDT
+ Sat Sep 22 18:19:20 2001 PDT | @ 1 min | Sat Sep 22 18:20:20 2001 PDT
+ Sat Sep 22 18:19:20 2001 PDT | @ 5 hours | Sat Sep 22 23:19:20 2001 PDT
+ Sat Sep 22 18:19:20 2001 PDT | @ 1 day 2 hours 3 mins 4 secs | Sun Sep 23 20:22:24 2001 PDT
+ Sat Sep 22 18:19:20 2001 PDT | @ 10 days | Tue Oct 02 18:19:20 2001 PDT
+ Sat Sep 22 18:19:20 2001 PDT | @ 3 mons | Sat Dec 22 18:19:20 2001 PST
+ Sat Sep 22 18:19:20 2001 PDT | @ 5 mons | Fri Feb 22 18:19:20 2002 PST
+ Sat Sep 22 18:19:20 2001 PDT | @ 5 mons 12 hours | Sat Feb 23 06:19:20 2002 PST
+ Wed Feb 28 17:32:01 1996 PST | @ 6 years | Thu Feb 28 17:32:01 2002 PST
+ Thu Feb 29 17:32:01 1996 PST | @ 6 years | Thu Feb 28 17:32:01 2002 PST
+ Fri Mar 01 17:32:01 1996 PST | @ 6 years | Fri Mar 01 17:32:01 2002 PST
+ Mon Dec 30 17:32:01 1996 PST | @ 6 years | Mon Dec 30 17:32:01 2002 PST
+ Tue Dec 31 17:32:01 1996 PST | @ 6 years | Tue Dec 31 17:32:01 2002 PST
+ Thu Jan 01 00:00:00 1970 PST | @ 34 years | Thu Jan 01 00:00:00 2004 PST
+ Fri Dec 31 17:32:01 1999 PST | @ 6 years | Sat Dec 31 17:32:01 2005 PST
+ Sat Jan 01 17:32:01 2000 PST | @ 6 years | Sun Jan 01 17:32:01 2006 PST
+ Wed Mar 15 02:14:05 2000 PST | @ 6 years | Wed Mar 15 02:14:05 2006 PST
+ Wed Mar 15 03:14:04 2000 PST | @ 6 years | Wed Mar 15 03:14:04 2006 PST
+ Wed Mar 15 08:14:01 2000 PST | @ 6 years | Wed Mar 15 08:14:01 2006 PST
+ Wed Mar 15 12:14:03 2000 PST | @ 6 years | Wed Mar 15 12:14:03 2006 PST
+ Wed Mar 15 13:14:02 2000 PST | @ 6 years | Wed Mar 15 13:14:02 2006 PST
+ Sun Dec 31 17:32:01 2000 PST | @ 6 years | Sun Dec 31 17:32:01 2006 PST
+ Mon Jan 01 17:32:01 2001 PST | @ 6 years | Mon Jan 01 17:32:01 2007 PST
+ Sat Sep 22 18:19:20 2001 PDT | @ 6 years | Sat Sep 22 18:19:20 2007 PDT
+ Wed Feb 28 17:32:01 1996 PST | @ 34 years | Thu Feb 28 17:32:01 2030 PST
+ Thu Feb 29 17:32:01 1996 PST | @ 34 years | Thu Feb 28 17:32:01 2030 PST
+ Fri Mar 01 17:32:01 1996 PST | @ 34 years | Fri Mar 01 17:32:01 2030 PST
+ Mon Dec 30 17:32:01 1996 PST | @ 34 years | Mon Dec 30 17:32:01 2030 PST
+ Tue Dec 31 17:32:01 1996 PST | @ 34 years | Tue Dec 31 17:32:01 2030 PST
+ Fri Dec 31 17:32:01 1999 PST | @ 34 years | Sat Dec 31 17:32:01 2033 PST
+ Sat Jan 01 17:32:01 2000 PST | @ 34 years | Sun Jan 01 17:32:01 2034 PST
+ Wed Mar 15 02:14:05 2000 PST | @ 34 years | Wed Mar 15 02:14:05 2034 PDT
+ Wed Mar 15 03:14:04 2000 PST | @ 34 years | Wed Mar 15 03:14:04 2034 PDT
+ Wed Mar 15 08:14:01 2000 PST | @ 34 years | Wed Mar 15 08:14:01 2034 PDT
+ Wed Mar 15 12:14:03 2000 PST | @ 34 years | Wed Mar 15 12:14:03 2034 PDT
+ Wed Mar 15 13:14:02 2000 PST | @ 34 years | Wed Mar 15 13:14:02 2034 PDT
+ Sun Dec 31 17:32:01 2000 PST | @ 34 years | Sun Dec 31 17:32:01 2034 PST
+ Mon Jan 01 17:32:01 2001 PST | @ 34 years | Mon Jan 01 17:32:01 2035 PST
+ Sat Sep 22 18:19:20 2001 PDT | @ 34 years | Sat Sep 22 18:19:20 2035 PDT
+(160 rows)
+
+SELECT d.f1 AS "timestamp", t.f1 AS "interval", d.f1 - t.f1 AS minus
+ FROM TEMP_TIMESTAMP d, INTERVAL_TBL t
+ WHERE isfinite(d.f1)
+ ORDER BY minus, "timestamp", "interval";
+ timestamp | interval | minus
+------------------------------+-------------------------------+------------------------------
+ Thu Jan 01 00:00:00 1970 PST | @ 34 years | Wed Jan 01 00:00:00 1936 PST
+ Wed Feb 28 17:32:01 1996 PST | @ 34 years | Wed Feb 28 17:32:01 1962 PST
+ Thu Feb 29 17:32:01 1996 PST | @ 34 years | Wed Feb 28 17:32:01 1962 PST
+ Fri Mar 01 17:32:01 1996 PST | @ 34 years | Thu Mar 01 17:32:01 1962 PST
+ Mon Dec 30 17:32:01 1996 PST | @ 34 years | Sun Dec 30 17:32:01 1962 PST
+ Tue Dec 31 17:32:01 1996 PST | @ 34 years | Mon Dec 31 17:32:01 1962 PST
+ Thu Jan 01 00:00:00 1970 PST | @ 6 years | Wed Jan 01 00:00:00 1964 PST
+ Fri Dec 31 17:32:01 1999 PST | @ 34 years | Fri Dec 31 17:32:01 1965 PST
+ Sat Jan 01 17:32:01 2000 PST | @ 34 years | Sat Jan 01 17:32:01 1966 PST
+ Wed Mar 15 02:14:05 2000 PST | @ 34 years | Tue Mar 15 02:14:05 1966 PST
+ Wed Mar 15 03:14:04 2000 PST | @ 34 years | Tue Mar 15 03:14:04 1966 PST
+ Wed Mar 15 08:14:01 2000 PST | @ 34 years | Tue Mar 15 08:14:01 1966 PST
+ Wed Mar 15 12:14:03 2000 PST | @ 34 years | Tue Mar 15 12:14:03 1966 PST
+ Wed Mar 15 13:14:02 2000 PST | @ 34 years | Tue Mar 15 13:14:02 1966 PST
+ Sun Dec 31 17:32:01 2000 PST | @ 34 years | Sat Dec 31 17:32:01 1966 PST
+ Mon Jan 01 17:32:01 2001 PST | @ 34 years | Sun Jan 01 17:32:01 1967 PST
+ Sat Sep 22 18:19:20 2001 PDT | @ 34 years | Fri Sep 22 18:19:20 1967 PDT
+ Thu Jan 01 00:00:00 1970 PST | @ 5 mons 12 hours | Thu Jul 31 12:00:00 1969 PDT
+ Thu Jan 01 00:00:00 1970 PST | @ 5 mons | Fri Aug 01 00:00:00 1969 PDT
+ Thu Jan 01 00:00:00 1970 PST | @ 3 mons | Wed Oct 01 00:00:00 1969 PDT
+ Thu Jan 01 00:00:00 1970 PST | @ 10 days | Mon Dec 22 00:00:00 1969 PST
+ Thu Jan 01 00:00:00 1970 PST | @ 1 day 2 hours 3 mins 4 secs | Tue Dec 30 21:56:56 1969 PST
+ Thu Jan 01 00:00:00 1970 PST | @ 5 hours | Wed Dec 31 19:00:00 1969 PST
+ Thu Jan 01 00:00:00 1970 PST | @ 1 min | Wed Dec 31 23:59:00 1969 PST
+ Thu Jan 01 00:00:00 1970 PST | @ 14 secs ago | Thu Jan 01 00:00:14 1970 PST
+ Wed Feb 28 17:32:01 1996 PST | @ 6 years | Wed Feb 28 17:32:01 1990 PST
+ Thu Feb 29 17:32:01 1996 PST | @ 6 years | Wed Feb 28 17:32:01 1990 PST
+ Fri Mar 01 17:32:01 1996 PST | @ 6 years | Thu Mar 01 17:32:01 1990 PST
+ Mon Dec 30 17:32:01 1996 PST | @ 6 years | Sun Dec 30 17:32:01 1990 PST
+ Tue Dec 31 17:32:01 1996 PST | @ 6 years | Mon Dec 31 17:32:01 1990 PST
+ Fri Dec 31 17:32:01 1999 PST | @ 6 years | Fri Dec 31 17:32:01 1993 PST
+ Sat Jan 01 17:32:01 2000 PST | @ 6 years | Sat Jan 01 17:32:01 1994 PST
+ Wed Mar 15 02:14:05 2000 PST | @ 6 years | Tue Mar 15 02:14:05 1994 PST
+ Wed Mar 15 03:14:04 2000 PST | @ 6 years | Tue Mar 15 03:14:04 1994 PST
+ Wed Mar 15 08:14:01 2000 PST | @ 6 years | Tue Mar 15 08:14:01 1994 PST
+ Wed Mar 15 12:14:03 2000 PST | @ 6 years | Tue Mar 15 12:14:03 1994 PST
+ Wed Mar 15 13:14:02 2000 PST | @ 6 years | Tue Mar 15 13:14:02 1994 PST
+ Sun Dec 31 17:32:01 2000 PST | @ 6 years | Sat Dec 31 17:32:01 1994 PST
+ Mon Jan 01 17:32:01 2001 PST | @ 6 years | Sun Jan 01 17:32:01 1995 PST
+ Sat Sep 22 18:19:20 2001 PDT | @ 6 years | Fri Sep 22 18:19:20 1995 PDT
+ Wed Feb 28 17:32:01 1996 PST | @ 5 mons 12 hours | Thu Sep 28 05:32:01 1995 PDT
+ Wed Feb 28 17:32:01 1996 PST | @ 5 mons | Thu Sep 28 17:32:01 1995 PDT
+ Thu Feb 29 17:32:01 1996 PST | @ 5 mons 12 hours | Fri Sep 29 05:32:01 1995 PDT
+ Thu Feb 29 17:32:01 1996 PST | @ 5 mons | Fri Sep 29 17:32:01 1995 PDT
+ Fri Mar 01 17:32:01 1996 PST | @ 5 mons 12 hours | Sun Oct 01 05:32:01 1995 PDT
+ Fri Mar 01 17:32:01 1996 PST | @ 5 mons | Sun Oct 01 17:32:01 1995 PDT
+ Wed Feb 28 17:32:01 1996 PST | @ 3 mons | Tue Nov 28 17:32:01 1995 PST
+ Thu Feb 29 17:32:01 1996 PST | @ 3 mons | Wed Nov 29 17:32:01 1995 PST
+ Fri Mar 01 17:32:01 1996 PST | @ 3 mons | Fri Dec 01 17:32:01 1995 PST
+ Wed Feb 28 17:32:01 1996 PST | @ 10 days | Sun Feb 18 17:32:01 1996 PST
+ Thu Feb 29 17:32:01 1996 PST | @ 10 days | Mon Feb 19 17:32:01 1996 PST
+ Fri Mar 01 17:32:01 1996 PST | @ 10 days | Tue Feb 20 17:32:01 1996 PST
+ Wed Feb 28 17:32:01 1996 PST | @ 1 day 2 hours 3 mins 4 secs | Tue Feb 27 15:28:57 1996 PST
+ Wed Feb 28 17:32:01 1996 PST | @ 5 hours | Wed Feb 28 12:32:01 1996 PST
+ Thu Feb 29 17:32:01 1996 PST | @ 1 day 2 hours 3 mins 4 secs | Wed Feb 28 15:28:57 1996 PST
+ Wed Feb 28 17:32:01 1996 PST | @ 1 min | Wed Feb 28 17:31:01 1996 PST
+ Wed Feb 28 17:32:01 1996 PST | @ 14 secs ago | Wed Feb 28 17:32:15 1996 PST
+ Thu Feb 29 17:32:01 1996 PST | @ 5 hours | Thu Feb 29 12:32:01 1996 PST
+ Fri Mar 01 17:32:01 1996 PST | @ 1 day 2 hours 3 mins 4 secs | Thu Feb 29 15:28:57 1996 PST
+ Thu Feb 29 17:32:01 1996 PST | @ 1 min | Thu Feb 29 17:31:01 1996 PST
+ Thu Feb 29 17:32:01 1996 PST | @ 14 secs ago | Thu Feb 29 17:32:15 1996 PST
+ Fri Mar 01 17:32:01 1996 PST | @ 5 hours | Fri Mar 01 12:32:01 1996 PST
+ Fri Mar 01 17:32:01 1996 PST | @ 1 min | Fri Mar 01 17:31:01 1996 PST
+ Fri Mar 01 17:32:01 1996 PST | @ 14 secs ago | Fri Mar 01 17:32:15 1996 PST
+ Mon Dec 30 17:32:01 1996 PST | @ 5 mons 12 hours | Tue Jul 30 05:32:01 1996 PDT
+ Mon Dec 30 17:32:01 1996 PST | @ 5 mons | Tue Jul 30 17:32:01 1996 PDT
+ Tue Dec 31 17:32:01 1996 PST | @ 5 mons 12 hours | Wed Jul 31 05:32:01 1996 PDT
+ Tue Dec 31 17:32:01 1996 PST | @ 5 mons | Wed Jul 31 17:32:01 1996 PDT
+ Mon Dec 30 17:32:01 1996 PST | @ 3 mons | Mon Sep 30 17:32:01 1996 PDT
+ Tue Dec 31 17:32:01 1996 PST | @ 3 mons | Mon Sep 30 17:32:01 1996 PDT
+ Mon Dec 30 17:32:01 1996 PST | @ 10 days | Fri Dec 20 17:32:01 1996 PST
+ Tue Dec 31 17:32:01 1996 PST | @ 10 days | Sat Dec 21 17:32:01 1996 PST
+ Mon Dec 30 17:32:01 1996 PST | @ 1 day 2 hours 3 mins 4 secs | Sun Dec 29 15:28:57 1996 PST
+ Mon Dec 30 17:32:01 1996 PST | @ 5 hours | Mon Dec 30 12:32:01 1996 PST
+ Tue Dec 31 17:32:01 1996 PST | @ 1 day 2 hours 3 mins 4 secs | Mon Dec 30 15:28:57 1996 PST
+ Mon Dec 30 17:32:01 1996 PST | @ 1 min | Mon Dec 30 17:31:01 1996 PST
+ Mon Dec 30 17:32:01 1996 PST | @ 14 secs ago | Mon Dec 30 17:32:15 1996 PST
+ Tue Dec 31 17:32:01 1996 PST | @ 5 hours | Tue Dec 31 12:32:01 1996 PST
+ Tue Dec 31 17:32:01 1996 PST | @ 1 min | Tue Dec 31 17:31:01 1996 PST
+ Tue Dec 31 17:32:01 1996 PST | @ 14 secs ago | Tue Dec 31 17:32:15 1996 PST
+ Fri Dec 31 17:32:01 1999 PST | @ 5 mons 12 hours | Sat Jul 31 05:32:01 1999 PDT
+ Fri Dec 31 17:32:01 1999 PST | @ 5 mons | Sat Jul 31 17:32:01 1999 PDT
+ Sat Jan 01 17:32:01 2000 PST | @ 5 mons 12 hours | Sun Aug 01 05:32:01 1999 PDT
+ Sat Jan 01 17:32:01 2000 PST | @ 5 mons | Sun Aug 01 17:32:01 1999 PDT
+ Fri Dec 31 17:32:01 1999 PST | @ 3 mons | Thu Sep 30 17:32:01 1999 PDT
+ Sat Jan 01 17:32:01 2000 PST | @ 3 mons | Fri Oct 01 17:32:01 1999 PDT
+ Wed Mar 15 02:14:05 2000 PST | @ 5 mons 12 hours | Thu Oct 14 14:14:05 1999 PDT
+ Wed Mar 15 03:14:04 2000 PST | @ 5 mons 12 hours | Thu Oct 14 15:14:04 1999 PDT
+ Wed Mar 15 08:14:01 2000 PST | @ 5 mons 12 hours | Thu Oct 14 20:14:01 1999 PDT
+ Wed Mar 15 12:14:03 2000 PST | @ 5 mons 12 hours | Fri Oct 15 00:14:03 1999 PDT
+ Wed Mar 15 13:14:02 2000 PST | @ 5 mons 12 hours | Fri Oct 15 01:14:02 1999 PDT
+ Wed Mar 15 02:14:05 2000 PST | @ 5 mons | Fri Oct 15 02:14:05 1999 PDT
+ Wed Mar 15 03:14:04 2000 PST | @ 5 mons | Fri Oct 15 03:14:04 1999 PDT
+ Wed Mar 15 08:14:01 2000 PST | @ 5 mons | Fri Oct 15 08:14:01 1999 PDT
+ Wed Mar 15 12:14:03 2000 PST | @ 5 mons | Fri Oct 15 12:14:03 1999 PDT
+ Wed Mar 15 13:14:02 2000 PST | @ 5 mons | Fri Oct 15 13:14:02 1999 PDT
+ Wed Mar 15 02:14:05 2000 PST | @ 3 mons | Wed Dec 15 02:14:05 1999 PST
+ Wed Mar 15 03:14:04 2000 PST | @ 3 mons | Wed Dec 15 03:14:04 1999 PST
+ Wed Mar 15 08:14:01 2000 PST | @ 3 mons | Wed Dec 15 08:14:01 1999 PST
+ Wed Mar 15 12:14:03 2000 PST | @ 3 mons | Wed Dec 15 12:14:03 1999 PST
+ Wed Mar 15 13:14:02 2000 PST | @ 3 mons | Wed Dec 15 13:14:02 1999 PST
+ Fri Dec 31 17:32:01 1999 PST | @ 10 days | Tue Dec 21 17:32:01 1999 PST
+ Sat Jan 01 17:32:01 2000 PST | @ 10 days | Wed Dec 22 17:32:01 1999 PST
+ Fri Dec 31 17:32:01 1999 PST | @ 1 day 2 hours 3 mins 4 secs | Thu Dec 30 15:28:57 1999 PST
+ Fri Dec 31 17:32:01 1999 PST | @ 5 hours | Fri Dec 31 12:32:01 1999 PST
+ Sat Jan 01 17:32:01 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Fri Dec 31 15:28:57 1999 PST
+ Fri Dec 31 17:32:01 1999 PST | @ 1 min | Fri Dec 31 17:31:01 1999 PST
+ Fri Dec 31 17:32:01 1999 PST | @ 14 secs ago | Fri Dec 31 17:32:15 1999 PST
+ Sat Jan 01 17:32:01 2000 PST | @ 5 hours | Sat Jan 01 12:32:01 2000 PST
+ Sat Jan 01 17:32:01 2000 PST | @ 1 min | Sat Jan 01 17:31:01 2000 PST
+ Sat Jan 01 17:32:01 2000 PST | @ 14 secs ago | Sat Jan 01 17:32:15 2000 PST
+ Wed Mar 15 02:14:05 2000 PST | @ 10 days | Sun Mar 05 02:14:05 2000 PST
+ Wed Mar 15 03:14:04 2000 PST | @ 10 days | Sun Mar 05 03:14:04 2000 PST
+ Wed Mar 15 08:14:01 2000 PST | @ 10 days | Sun Mar 05 08:14:01 2000 PST
+ Wed Mar 15 12:14:03 2000 PST | @ 10 days | Sun Mar 05 12:14:03 2000 PST
+ Wed Mar 15 13:14:02 2000 PST | @ 10 days | Sun Mar 05 13:14:02 2000 PST
+ Wed Mar 15 02:14:05 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Tue Mar 14 00:11:01 2000 PST
+ Wed Mar 15 03:14:04 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Tue Mar 14 01:11:00 2000 PST
+ Wed Mar 15 08:14:01 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Tue Mar 14 06:10:57 2000 PST
+ Wed Mar 15 12:14:03 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Tue Mar 14 10:10:59 2000 PST
+ Wed Mar 15 13:14:02 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Tue Mar 14 11:10:58 2000 PST
+ Wed Mar 15 02:14:05 2000 PST | @ 5 hours | Tue Mar 14 21:14:05 2000 PST
+ Wed Mar 15 03:14:04 2000 PST | @ 5 hours | Tue Mar 14 22:14:04 2000 PST
+ Wed Mar 15 02:14:05 2000 PST | @ 1 min | Wed Mar 15 02:13:05 2000 PST
+ Wed Mar 15 02:14:05 2000 PST | @ 14 secs ago | Wed Mar 15 02:14:19 2000 PST
+ Wed Mar 15 03:14:04 2000 PST | @ 1 min | Wed Mar 15 03:13:04 2000 PST
+ Wed Mar 15 08:14:01 2000 PST | @ 5 hours | Wed Mar 15 03:14:01 2000 PST
+ Wed Mar 15 03:14:04 2000 PST | @ 14 secs ago | Wed Mar 15 03:14:18 2000 PST
+ Wed Mar 15 12:14:03 2000 PST | @ 5 hours | Wed Mar 15 07:14:03 2000 PST
+ Wed Mar 15 08:14:01 2000 PST | @ 1 min | Wed Mar 15 08:13:01 2000 PST
+ Wed Mar 15 13:14:02 2000 PST | @ 5 hours | Wed Mar 15 08:14:02 2000 PST
+ Wed Mar 15 08:14:01 2000 PST | @ 14 secs ago | Wed Mar 15 08:14:15 2000 PST
+ Wed Mar 15 12:14:03 2000 PST | @ 1 min | Wed Mar 15 12:13:03 2000 PST
+ Wed Mar 15 12:14:03 2000 PST | @ 14 secs ago | Wed Mar 15 12:14:17 2000 PST
+ Wed Mar 15 13:14:02 2000 PST | @ 1 min | Wed Mar 15 13:13:02 2000 PST
+ Wed Mar 15 13:14:02 2000 PST | @ 14 secs ago | Wed Mar 15 13:14:16 2000 PST
+ Sun Dec 31 17:32:01 2000 PST | @ 5 mons 12 hours | Mon Jul 31 05:32:01 2000 PDT
+ Sun Dec 31 17:32:01 2000 PST | @ 5 mons | Mon Jul 31 17:32:01 2000 PDT
+ Mon Jan 01 17:32:01 2001 PST | @ 5 mons 12 hours | Tue Aug 01 05:32:01 2000 PDT
+ Mon Jan 01 17:32:01 2001 PST | @ 5 mons | Tue Aug 01 17:32:01 2000 PDT
+ Sun Dec 31 17:32:01 2000 PST | @ 3 mons | Sat Sep 30 17:32:01 2000 PDT
+ Mon Jan 01 17:32:01 2001 PST | @ 3 mons | Sun Oct 01 17:32:01 2000 PDT
+ Sun Dec 31 17:32:01 2000 PST | @ 10 days | Thu Dec 21 17:32:01 2000 PST
+ Mon Jan 01 17:32:01 2001 PST | @ 10 days | Fri Dec 22 17:32:01 2000 PST
+ Sun Dec 31 17:32:01 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Sat Dec 30 15:28:57 2000 PST
+ Sun Dec 31 17:32:01 2000 PST | @ 5 hours | Sun Dec 31 12:32:01 2000 PST
+ Mon Jan 01 17:32:01 2001 PST | @ 1 day 2 hours 3 mins 4 secs | Sun Dec 31 15:28:57 2000 PST
+ Sun Dec 31 17:32:01 2000 PST | @ 1 min | Sun Dec 31 17:31:01 2000 PST
+ Sun Dec 31 17:32:01 2000 PST | @ 14 secs ago | Sun Dec 31 17:32:15 2000 PST
+ Mon Jan 01 17:32:01 2001 PST | @ 5 hours | Mon Jan 01 12:32:01 2001 PST
+ Mon Jan 01 17:32:01 2001 PST | @ 1 min | Mon Jan 01 17:31:01 2001 PST
+ Mon Jan 01 17:32:01 2001 PST | @ 14 secs ago | Mon Jan 01 17:32:15 2001 PST
+ Sat Sep 22 18:19:20 2001 PDT | @ 5 mons 12 hours | Sun Apr 22 06:19:20 2001 PDT
+ Sat Sep 22 18:19:20 2001 PDT | @ 5 mons | Sun Apr 22 18:19:20 2001 PDT
+ Sat Sep 22 18:19:20 2001 PDT | @ 3 mons | Fri Jun 22 18:19:20 2001 PDT
+ Sat Sep 22 18:19:20 2001 PDT | @ 10 days | Wed Sep 12 18:19:20 2001 PDT
+ Sat Sep 22 18:19:20 2001 PDT | @ 1 day 2 hours 3 mins 4 secs | Fri Sep 21 16:16:16 2001 PDT
+ Sat Sep 22 18:19:20 2001 PDT | @ 5 hours | Sat Sep 22 13:19:20 2001 PDT
+ Sat Sep 22 18:19:20 2001 PDT | @ 1 min | Sat Sep 22 18:18:20 2001 PDT
+ Sat Sep 22 18:19:20 2001 PDT | @ 14 secs ago | Sat Sep 22 18:19:34 2001 PDT
+(160 rows)
+
+SELECT d.f1 AS "timestamp",
+ timestamp with time zone '1980-01-06 00:00 GMT' AS gpstime_zero,
+ d.f1 - timestamp with time zone '1980-01-06 00:00 GMT' AS difference
+ FROM TEMP_TIMESTAMP d
+ ORDER BY difference;
+ timestamp | gpstime_zero | difference
+------------------------------+------------------------------+-------------------------------------
+ Thu Jan 01 00:00:00 1970 PST | Sat Jan 05 16:00:00 1980 PST | @ 3656 days 16 hours ago
+ Wed Feb 28 17:32:01 1996 PST | Sat Jan 05 16:00:00 1980 PST | @ 5898 days 1 hour 32 mins 1 sec
+ Thu Feb 29 17:32:01 1996 PST | Sat Jan 05 16:00:00 1980 PST | @ 5899 days 1 hour 32 mins 1 sec
+ Fri Mar 01 17:32:01 1996 PST | Sat Jan 05 16:00:00 1980 PST | @ 5900 days 1 hour 32 mins 1 sec
+ Mon Dec 30 17:32:01 1996 PST | Sat Jan 05 16:00:00 1980 PST | @ 6204 days 1 hour 32 mins 1 sec
+ Tue Dec 31 17:32:01 1996 PST | Sat Jan 05 16:00:00 1980 PST | @ 6205 days 1 hour 32 mins 1 sec
+ Fri Dec 31 17:32:01 1999 PST | Sat Jan 05 16:00:00 1980 PST | @ 7300 days 1 hour 32 mins 1 sec
+ Sat Jan 01 17:32:01 2000 PST | Sat Jan 05 16:00:00 1980 PST | @ 7301 days 1 hour 32 mins 1 sec
+ Wed Mar 15 02:14:05 2000 PST | Sat Jan 05 16:00:00 1980 PST | @ 7374 days 10 hours 14 mins 5 secs
+ Wed Mar 15 03:14:04 2000 PST | Sat Jan 05 16:00:00 1980 PST | @ 7374 days 11 hours 14 mins 4 secs
+ Wed Mar 15 08:14:01 2000 PST | Sat Jan 05 16:00:00 1980 PST | @ 7374 days 16 hours 14 mins 1 sec
+ Wed Mar 15 12:14:03 2000 PST | Sat Jan 05 16:00:00 1980 PST | @ 7374 days 20 hours 14 mins 3 secs
+ Wed Mar 15 13:14:02 2000 PST | Sat Jan 05 16:00:00 1980 PST | @ 7374 days 21 hours 14 mins 2 secs
+ Sun Dec 31 17:32:01 2000 PST | Sat Jan 05 16:00:00 1980 PST | @ 7666 days 1 hour 32 mins 1 sec
+ Mon Jan 01 17:32:01 2001 PST | Sat Jan 05 16:00:00 1980 PST | @ 7667 days 1 hour 32 mins 1 sec
+ Sat Sep 22 18:19:20 2001 PDT | Sat Jan 05 16:00:00 1980 PST | @ 7931 days 1 hour 19 mins 20 secs
+(16 rows)
+
+SELECT d1.f1 AS timestamp1, d2.f1 AS timestamp2, d1.f1 - d2.f1 AS difference
+ FROM TEMP_TIMESTAMP d1, TEMP_TIMESTAMP d2
+ ORDER BY timestamp1, timestamp2, difference;
+ timestamp1 | timestamp2 | difference
+------------------------------+------------------------------+-------------------------------------------
+ Thu Jan 01 00:00:00 1970 PST | Thu Jan 01 00:00:00 1970 PST | @ 0
+ Thu Jan 01 00:00:00 1970 PST | Wed Feb 28 17:32:01 1996 PST | @ 9554 days 17 hours 32 mins 1 sec ago
+ Thu Jan 01 00:00:00 1970 PST | Thu Feb 29 17:32:01 1996 PST | @ 9555 days 17 hours 32 mins 1 sec ago
+ Thu Jan 01 00:00:00 1970 PST | Fri Mar 01 17:32:01 1996 PST | @ 9556 days 17 hours 32 mins 1 sec ago
+ Thu Jan 01 00:00:00 1970 PST | Mon Dec 30 17:32:01 1996 PST | @ 9860 days 17 hours 32 mins 1 sec ago
+ Thu Jan 01 00:00:00 1970 PST | Tue Dec 31 17:32:01 1996 PST | @ 9861 days 17 hours 32 mins 1 sec ago
+ Thu Jan 01 00:00:00 1970 PST | Fri Dec 31 17:32:01 1999 PST | @ 10956 days 17 hours 32 mins 1 sec ago
+ Thu Jan 01 00:00:00 1970 PST | Sat Jan 01 17:32:01 2000 PST | @ 10957 days 17 hours 32 mins 1 sec ago
+ Thu Jan 01 00:00:00 1970 PST | Wed Mar 15 02:14:05 2000 PST | @ 11031 days 2 hours 14 mins 5 secs ago
+ Thu Jan 01 00:00:00 1970 PST | Wed Mar 15 03:14:04 2000 PST | @ 11031 days 3 hours 14 mins 4 secs ago
+ Thu Jan 01 00:00:00 1970 PST | Wed Mar 15 08:14:01 2000 PST | @ 11031 days 8 hours 14 mins 1 sec ago
+ Thu Jan 01 00:00:00 1970 PST | Wed Mar 15 12:14:03 2000 PST | @ 11031 days 12 hours 14 mins 3 secs ago
+ Thu Jan 01 00:00:00 1970 PST | Wed Mar 15 13:14:02 2000 PST | @ 11031 days 13 hours 14 mins 2 secs ago
+ Thu Jan 01 00:00:00 1970 PST | Sun Dec 31 17:32:01 2000 PST | @ 11322 days 17 hours 32 mins 1 sec ago
+ Thu Jan 01 00:00:00 1970 PST | Mon Jan 01 17:32:01 2001 PST | @ 11323 days 17 hours 32 mins 1 sec ago
+ Thu Jan 01 00:00:00 1970 PST | Sat Sep 22 18:19:20 2001 PDT | @ 11587 days 17 hours 19 mins 20 secs ago
+ Wed Feb 28 17:32:01 1996 PST | Thu Jan 01 00:00:00 1970 PST | @ 9554 days 17 hours 32 mins 1 sec
+ Wed Feb 28 17:32:01 1996 PST | Wed Feb 28 17:32:01 1996 PST | @ 0
+ Wed Feb 28 17:32:01 1996 PST | Thu Feb 29 17:32:01 1996 PST | @ 1 day ago
+ Wed Feb 28 17:32:01 1996 PST | Fri Mar 01 17:32:01 1996 PST | @ 2 days ago
+ Wed Feb 28 17:32:01 1996 PST | Mon Dec 30 17:32:01 1996 PST | @ 306 days ago
+ Wed Feb 28 17:32:01 1996 PST | Tue Dec 31 17:32:01 1996 PST | @ 307 days ago
+ Wed Feb 28 17:32:01 1996 PST | Fri Dec 31 17:32:01 1999 PST | @ 1402 days ago
+ Wed Feb 28 17:32:01 1996 PST | Sat Jan 01 17:32:01 2000 PST | @ 1403 days ago
+ Wed Feb 28 17:32:01 1996 PST | Wed Mar 15 02:14:05 2000 PST | @ 1476 days 8 hours 42 mins 4 secs ago
+ Wed Feb 28 17:32:01 1996 PST | Wed Mar 15 03:14:04 2000 PST | @ 1476 days 9 hours 42 mins 3 secs ago
+ Wed Feb 28 17:32:01 1996 PST | Wed Mar 15 08:14:01 2000 PST | @ 1476 days 14 hours 42 mins ago
+ Wed Feb 28 17:32:01 1996 PST | Wed Mar 15 12:14:03 2000 PST | @ 1476 days 18 hours 42 mins 2 secs ago
+ Wed Feb 28 17:32:01 1996 PST | Wed Mar 15 13:14:02 2000 PST | @ 1476 days 19 hours 42 mins 1 sec ago
+ Wed Feb 28 17:32:01 1996 PST | Sun Dec 31 17:32:01 2000 PST | @ 1768 days ago
+ Wed Feb 28 17:32:01 1996 PST | Mon Jan 01 17:32:01 2001 PST | @ 1769 days ago
+ Wed Feb 28 17:32:01 1996 PST | Sat Sep 22 18:19:20 2001 PDT | @ 2032 days 23 hours 47 mins 19 secs ago
+ Thu Feb 29 17:32:01 1996 PST | Thu Jan 01 00:00:00 1970 PST | @ 9555 days 17 hours 32 mins 1 sec
+ Thu Feb 29 17:32:01 1996 PST | Wed Feb 28 17:32:01 1996 PST | @ 1 day
+ Thu Feb 29 17:32:01 1996 PST | Thu Feb 29 17:32:01 1996 PST | @ 0
+ Thu Feb 29 17:32:01 1996 PST | Fri Mar 01 17:32:01 1996 PST | @ 1 day ago
+ Thu Feb 29 17:32:01 1996 PST | Mon Dec 30 17:32:01 1996 PST | @ 305 days ago
+ Thu Feb 29 17:32:01 1996 PST | Tue Dec 31 17:32:01 1996 PST | @ 306 days ago
+ Thu Feb 29 17:32:01 1996 PST | Fri Dec 31 17:32:01 1999 PST | @ 1401 days ago
+ Thu Feb 29 17:32:01 1996 PST | Sat Jan 01 17:32:01 2000 PST | @ 1402 days ago
+ Thu Feb 29 17:32:01 1996 PST | Wed Mar 15 02:14:05 2000 PST | @ 1475 days 8 hours 42 mins 4 secs ago
+ Thu Feb 29 17:32:01 1996 PST | Wed Mar 15 03:14:04 2000 PST | @ 1475 days 9 hours 42 mins 3 secs ago
+ Thu Feb 29 17:32:01 1996 PST | Wed Mar 15 08:14:01 2000 PST | @ 1475 days 14 hours 42 mins ago
+ Thu Feb 29 17:32:01 1996 PST | Wed Mar 15 12:14:03 2000 PST | @ 1475 days 18 hours 42 mins 2 secs ago
+ Thu Feb 29 17:32:01 1996 PST | Wed Mar 15 13:14:02 2000 PST | @ 1475 days 19 hours 42 mins 1 sec ago
+ Thu Feb 29 17:32:01 1996 PST | Sun Dec 31 17:32:01 2000 PST | @ 1767 days ago
+ Thu Feb 29 17:32:01 1996 PST | Mon Jan 01 17:32:01 2001 PST | @ 1768 days ago
+ Thu Feb 29 17:32:01 1996 PST | Sat Sep 22 18:19:20 2001 PDT | @ 2031 days 23 hours 47 mins 19 secs ago
+ Fri Mar 01 17:32:01 1996 PST | Thu Jan 01 00:00:00 1970 PST | @ 9556 days 17 hours 32 mins 1 sec
+ Fri Mar 01 17:32:01 1996 PST | Wed Feb 28 17:32:01 1996 PST | @ 2 days
+ Fri Mar 01 17:32:01 1996 PST | Thu Feb 29 17:32:01 1996 PST | @ 1 day
+ Fri Mar 01 17:32:01 1996 PST | Fri Mar 01 17:32:01 1996 PST | @ 0
+ Fri Mar 01 17:32:01 1996 PST | Mon Dec 30 17:32:01 1996 PST | @ 304 days ago
+ Fri Mar 01 17:32:01 1996 PST | Tue Dec 31 17:32:01 1996 PST | @ 305 days ago
+ Fri Mar 01 17:32:01 1996 PST | Fri Dec 31 17:32:01 1999 PST | @ 1400 days ago
+ Fri Mar 01 17:32:01 1996 PST | Sat Jan 01 17:32:01 2000 PST | @ 1401 days ago
+ Fri Mar 01 17:32:01 1996 PST | Wed Mar 15 02:14:05 2000 PST | @ 1474 days 8 hours 42 mins 4 secs ago
+ Fri Mar 01 17:32:01 1996 PST | Wed Mar 15 03:14:04 2000 PST | @ 1474 days 9 hours 42 mins 3 secs ago
+ Fri Mar 01 17:32:01 1996 PST | Wed Mar 15 08:14:01 2000 PST | @ 1474 days 14 hours 42 mins ago
+ Fri Mar 01 17:32:01 1996 PST | Wed Mar 15 12:14:03 2000 PST | @ 1474 days 18 hours 42 mins 2 secs ago
+ Fri Mar 01 17:32:01 1996 PST | Wed Mar 15 13:14:02 2000 PST | @ 1474 days 19 hours 42 mins 1 sec ago
+ Fri Mar 01 17:32:01 1996 PST | Sun Dec 31 17:32:01 2000 PST | @ 1766 days ago
+ Fri Mar 01 17:32:01 1996 PST | Mon Jan 01 17:32:01 2001 PST | @ 1767 days ago
+ Fri Mar 01 17:32:01 1996 PST | Sat Sep 22 18:19:20 2001 PDT | @ 2030 days 23 hours 47 mins 19 secs ago
+ Mon Dec 30 17:32:01 1996 PST | Thu Jan 01 00:00:00 1970 PST | @ 9860 days 17 hours 32 mins 1 sec
+ Mon Dec 30 17:32:01 1996 PST | Wed Feb 28 17:32:01 1996 PST | @ 306 days
+ Mon Dec 30 17:32:01 1996 PST | Thu Feb 29 17:32:01 1996 PST | @ 305 days
+ Mon Dec 30 17:32:01 1996 PST | Fri Mar 01 17:32:01 1996 PST | @ 304 days
+ Mon Dec 30 17:32:01 1996 PST | Mon Dec 30 17:32:01 1996 PST | @ 0
+ Mon Dec 30 17:32:01 1996 PST | Tue Dec 31 17:32:01 1996 PST | @ 1 day ago
+ Mon Dec 30 17:32:01 1996 PST | Fri Dec 31 17:32:01 1999 PST | @ 1096 days ago
+ Mon Dec 30 17:32:01 1996 PST | Sat Jan 01 17:32:01 2000 PST | @ 1097 days ago
+ Mon Dec 30 17:32:01 1996 PST | Wed Mar 15 02:14:05 2000 PST | @ 1170 days 8 hours 42 mins 4 secs ago
+ Mon Dec 30 17:32:01 1996 PST | Wed Mar 15 03:14:04 2000 PST | @ 1170 days 9 hours 42 mins 3 secs ago
+ Mon Dec 30 17:32:01 1996 PST | Wed Mar 15 08:14:01 2000 PST | @ 1170 days 14 hours 42 mins ago
+ Mon Dec 30 17:32:01 1996 PST | Wed Mar 15 12:14:03 2000 PST | @ 1170 days 18 hours 42 mins 2 secs ago
+ Mon Dec 30 17:32:01 1996 PST | Wed Mar 15 13:14:02 2000 PST | @ 1170 days 19 hours 42 mins 1 sec ago
+ Mon Dec 30 17:32:01 1996 PST | Sun Dec 31 17:32:01 2000 PST | @ 1462 days ago
+ Mon Dec 30 17:32:01 1996 PST | Mon Jan 01 17:32:01 2001 PST | @ 1463 days ago
+ Mon Dec 30 17:32:01 1996 PST | Sat Sep 22 18:19:20 2001 PDT | @ 1726 days 23 hours 47 mins 19 secs ago
+ Tue Dec 31 17:32:01 1996 PST | Thu Jan 01 00:00:00 1970 PST | @ 9861 days 17 hours 32 mins 1 sec
+ Tue Dec 31 17:32:01 1996 PST | Wed Feb 28 17:32:01 1996 PST | @ 307 days
+ Tue Dec 31 17:32:01 1996 PST | Thu Feb 29 17:32:01 1996 PST | @ 306 days
+ Tue Dec 31 17:32:01 1996 PST | Fri Mar 01 17:32:01 1996 PST | @ 305 days
+ Tue Dec 31 17:32:01 1996 PST | Mon Dec 30 17:32:01 1996 PST | @ 1 day
+ Tue Dec 31 17:32:01 1996 PST | Tue Dec 31 17:32:01 1996 PST | @ 0
+ Tue Dec 31 17:32:01 1996 PST | Fri Dec 31 17:32:01 1999 PST | @ 1095 days ago
+ Tue Dec 31 17:32:01 1996 PST | Sat Jan 01 17:32:01 2000 PST | @ 1096 days ago
+ Tue Dec 31 17:32:01 1996 PST | Wed Mar 15 02:14:05 2000 PST | @ 1169 days 8 hours 42 mins 4 secs ago
+ Tue Dec 31 17:32:01 1996 PST | Wed Mar 15 03:14:04 2000 PST | @ 1169 days 9 hours 42 mins 3 secs ago
+ Tue Dec 31 17:32:01 1996 PST | Wed Mar 15 08:14:01 2000 PST | @ 1169 days 14 hours 42 mins ago
+ Tue Dec 31 17:32:01 1996 PST | Wed Mar 15 12:14:03 2000 PST | @ 1169 days 18 hours 42 mins 2 secs ago
+ Tue Dec 31 17:32:01 1996 PST | Wed Mar 15 13:14:02 2000 PST | @ 1169 days 19 hours 42 mins 1 sec ago
+ Tue Dec 31 17:32:01 1996 PST | Sun Dec 31 17:32:01 2000 PST | @ 1461 days ago
+ Tue Dec 31 17:32:01 1996 PST | Mon Jan 01 17:32:01 2001 PST | @ 1462 days ago
+ Tue Dec 31 17:32:01 1996 PST | Sat Sep 22 18:19:20 2001 PDT | @ 1725 days 23 hours 47 mins 19 secs ago
+ Fri Dec 31 17:32:01 1999 PST | Thu Jan 01 00:00:00 1970 PST | @ 10956 days 17 hours 32 mins 1 sec
+ Fri Dec 31 17:32:01 1999 PST | Wed Feb 28 17:32:01 1996 PST | @ 1402 days
+ Fri Dec 31 17:32:01 1999 PST | Thu Feb 29 17:32:01 1996 PST | @ 1401 days
+ Fri Dec 31 17:32:01 1999 PST | Fri Mar 01 17:32:01 1996 PST | @ 1400 days
+ Fri Dec 31 17:32:01 1999 PST | Mon Dec 30 17:32:01 1996 PST | @ 1096 days
+ Fri Dec 31 17:32:01 1999 PST | Tue Dec 31 17:32:01 1996 PST | @ 1095 days
+ Fri Dec 31 17:32:01 1999 PST | Fri Dec 31 17:32:01 1999 PST | @ 0
+ Fri Dec 31 17:32:01 1999 PST | Sat Jan 01 17:32:01 2000 PST | @ 1 day ago
+ Fri Dec 31 17:32:01 1999 PST | Wed Mar 15 02:14:05 2000 PST | @ 74 days 8 hours 42 mins 4 secs ago
+ Fri Dec 31 17:32:01 1999 PST | Wed Mar 15 03:14:04 2000 PST | @ 74 days 9 hours 42 mins 3 secs ago
+ Fri Dec 31 17:32:01 1999 PST | Wed Mar 15 08:14:01 2000 PST | @ 74 days 14 hours 42 mins ago
+ Fri Dec 31 17:32:01 1999 PST | Wed Mar 15 12:14:03 2000 PST | @ 74 days 18 hours 42 mins 2 secs ago
+ Fri Dec 31 17:32:01 1999 PST | Wed Mar 15 13:14:02 2000 PST | @ 74 days 19 hours 42 mins 1 sec ago
+ Fri Dec 31 17:32:01 1999 PST | Sun Dec 31 17:32:01 2000 PST | @ 366 days ago
+ Fri Dec 31 17:32:01 1999 PST | Mon Jan 01 17:32:01 2001 PST | @ 367 days ago
+ Fri Dec 31 17:32:01 1999 PST | Sat Sep 22 18:19:20 2001 PDT | @ 630 days 23 hours 47 mins 19 secs ago
+ Sat Jan 01 17:32:01 2000 PST | Thu Jan 01 00:00:00 1970 PST | @ 10957 days 17 hours 32 mins 1 sec
+ Sat Jan 01 17:32:01 2000 PST | Wed Feb 28 17:32:01 1996 PST | @ 1403 days
+ Sat Jan 01 17:32:01 2000 PST | Thu Feb 29 17:32:01 1996 PST | @ 1402 days
+ Sat Jan 01 17:32:01 2000 PST | Fri Mar 01 17:32:01 1996 PST | @ 1401 days
+ Sat Jan 01 17:32:01 2000 PST | Mon Dec 30 17:32:01 1996 PST | @ 1097 days
+ Sat Jan 01 17:32:01 2000 PST | Tue Dec 31 17:32:01 1996 PST | @ 1096 days
+ Sat Jan 01 17:32:01 2000 PST | Fri Dec 31 17:32:01 1999 PST | @ 1 day
+ Sat Jan 01 17:32:01 2000 PST | Sat Jan 01 17:32:01 2000 PST | @ 0
+ Sat Jan 01 17:32:01 2000 PST | Wed Mar 15 02:14:05 2000 PST | @ 73 days 8 hours 42 mins 4 secs ago
+ Sat Jan 01 17:32:01 2000 PST | Wed Mar 15 03:14:04 2000 PST | @ 73 days 9 hours 42 mins 3 secs ago
+ Sat Jan 01 17:32:01 2000 PST | Wed Mar 15 08:14:01 2000 PST | @ 73 days 14 hours 42 mins ago
+ Sat Jan 01 17:32:01 2000 PST | Wed Mar 15 12:14:03 2000 PST | @ 73 days 18 hours 42 mins 2 secs ago
+ Sat Jan 01 17:32:01 2000 PST | Wed Mar 15 13:14:02 2000 PST | @ 73 days 19 hours 42 mins 1 sec ago
+ Sat Jan 01 17:32:01 2000 PST | Sun Dec 31 17:32:01 2000 PST | @ 365 days ago
+ Sat Jan 01 17:32:01 2000 PST | Mon Jan 01 17:32:01 2001 PST | @ 366 days ago
+ Sat Jan 01 17:32:01 2000 PST | Sat Sep 22 18:19:20 2001 PDT | @ 629 days 23 hours 47 mins 19 secs ago
+ Wed Mar 15 02:14:05 2000 PST | Thu Jan 01 00:00:00 1970 PST | @ 11031 days 2 hours 14 mins 5 secs
+ Wed Mar 15 02:14:05 2000 PST | Wed Feb 28 17:32:01 1996 PST | @ 1476 days 8 hours 42 mins 4 secs
+ Wed Mar 15 02:14:05 2000 PST | Thu Feb 29 17:32:01 1996 PST | @ 1475 days 8 hours 42 mins 4 secs
+ Wed Mar 15 02:14:05 2000 PST | Fri Mar 01 17:32:01 1996 PST | @ 1474 days 8 hours 42 mins 4 secs
+ Wed Mar 15 02:14:05 2000 PST | Mon Dec 30 17:32:01 1996 PST | @ 1170 days 8 hours 42 mins 4 secs
+ Wed Mar 15 02:14:05 2000 PST | Tue Dec 31 17:32:01 1996 PST | @ 1169 days 8 hours 42 mins 4 secs
+ Wed Mar 15 02:14:05 2000 PST | Fri Dec 31 17:32:01 1999 PST | @ 74 days 8 hours 42 mins 4 secs
+ Wed Mar 15 02:14:05 2000 PST | Sat Jan 01 17:32:01 2000 PST | @ 73 days 8 hours 42 mins 4 secs
+ Wed Mar 15 02:14:05 2000 PST | Wed Mar 15 02:14:05 2000 PST | @ 0
+ Wed Mar 15 02:14:05 2000 PST | Wed Mar 15 03:14:04 2000 PST | @ 59 mins 59 secs ago
+ Wed Mar 15 02:14:05 2000 PST | Wed Mar 15 08:14:01 2000 PST | @ 5 hours 59 mins 56 secs ago
+ Wed Mar 15 02:14:05 2000 PST | Wed Mar 15 12:14:03 2000 PST | @ 9 hours 59 mins 58 secs ago
+ Wed Mar 15 02:14:05 2000 PST | Wed Mar 15 13:14:02 2000 PST | @ 10 hours 59 mins 57 secs ago
+ Wed Mar 15 02:14:05 2000 PST | Sun Dec 31 17:32:01 2000 PST | @ 291 days 15 hours 17 mins 56 secs ago
+ Wed Mar 15 02:14:05 2000 PST | Mon Jan 01 17:32:01 2001 PST | @ 292 days 15 hours 17 mins 56 secs ago
+ Wed Mar 15 02:14:05 2000 PST | Sat Sep 22 18:19:20 2001 PDT | @ 556 days 15 hours 5 mins 15 secs ago
+ Wed Mar 15 03:14:04 2000 PST | Thu Jan 01 00:00:00 1970 PST | @ 11031 days 3 hours 14 mins 4 secs
+ Wed Mar 15 03:14:04 2000 PST | Wed Feb 28 17:32:01 1996 PST | @ 1476 days 9 hours 42 mins 3 secs
+ Wed Mar 15 03:14:04 2000 PST | Thu Feb 29 17:32:01 1996 PST | @ 1475 days 9 hours 42 mins 3 secs
+ Wed Mar 15 03:14:04 2000 PST | Fri Mar 01 17:32:01 1996 PST | @ 1474 days 9 hours 42 mins 3 secs
+ Wed Mar 15 03:14:04 2000 PST | Mon Dec 30 17:32:01 1996 PST | @ 1170 days 9 hours 42 mins 3 secs
+ Wed Mar 15 03:14:04 2000 PST | Tue Dec 31 17:32:01 1996 PST | @ 1169 days 9 hours 42 mins 3 secs
+ Wed Mar 15 03:14:04 2000 PST | Fri Dec 31 17:32:01 1999 PST | @ 74 days 9 hours 42 mins 3 secs
+ Wed Mar 15 03:14:04 2000 PST | Sat Jan 01 17:32:01 2000 PST | @ 73 days 9 hours 42 mins 3 secs
+ Wed Mar 15 03:14:04 2000 PST | Wed Mar 15 02:14:05 2000 PST | @ 59 mins 59 secs
+ Wed Mar 15 03:14:04 2000 PST | Wed Mar 15 03:14:04 2000 PST | @ 0
+ Wed Mar 15 03:14:04 2000 PST | Wed Mar 15 08:14:01 2000 PST | @ 4 hours 59 mins 57 secs ago
+ Wed Mar 15 03:14:04 2000 PST | Wed Mar 15 12:14:03 2000 PST | @ 8 hours 59 mins 59 secs ago
+ Wed Mar 15 03:14:04 2000 PST | Wed Mar 15 13:14:02 2000 PST | @ 9 hours 59 mins 58 secs ago
+ Wed Mar 15 03:14:04 2000 PST | Sun Dec 31 17:32:01 2000 PST | @ 291 days 14 hours 17 mins 57 secs ago
+ Wed Mar 15 03:14:04 2000 PST | Mon Jan 01 17:32:01 2001 PST | @ 292 days 14 hours 17 mins 57 secs ago
+ Wed Mar 15 03:14:04 2000 PST | Sat Sep 22 18:19:20 2001 PDT | @ 556 days 14 hours 5 mins 16 secs ago
+ Wed Mar 15 08:14:01 2000 PST | Thu Jan 01 00:00:00 1970 PST | @ 11031 days 8 hours 14 mins 1 sec
+ Wed Mar 15 08:14:01 2000 PST | Wed Feb 28 17:32:01 1996 PST | @ 1476 days 14 hours 42 mins
+ Wed Mar 15 08:14:01 2000 PST | Thu Feb 29 17:32:01 1996 PST | @ 1475 days 14 hours 42 mins
+ Wed Mar 15 08:14:01 2000 PST | Fri Mar 01 17:32:01 1996 PST | @ 1474 days 14 hours 42 mins
+ Wed Mar 15 08:14:01 2000 PST | Mon Dec 30 17:32:01 1996 PST | @ 1170 days 14 hours 42 mins
+ Wed Mar 15 08:14:01 2000 PST | Tue Dec 31 17:32:01 1996 PST | @ 1169 days 14 hours 42 mins
+ Wed Mar 15 08:14:01 2000 PST | Fri Dec 31 17:32:01 1999 PST | @ 74 days 14 hours 42 mins
+ Wed Mar 15 08:14:01 2000 PST | Sat Jan 01 17:32:01 2000 PST | @ 73 days 14 hours 42 mins
+ Wed Mar 15 08:14:01 2000 PST | Wed Mar 15 02:14:05 2000 PST | @ 5 hours 59 mins 56 secs
+ Wed Mar 15 08:14:01 2000 PST | Wed Mar 15 03:14:04 2000 PST | @ 4 hours 59 mins 57 secs
+ Wed Mar 15 08:14:01 2000 PST | Wed Mar 15 08:14:01 2000 PST | @ 0
+ Wed Mar 15 08:14:01 2000 PST | Wed Mar 15 12:14:03 2000 PST | @ 4 hours 2 secs ago
+ Wed Mar 15 08:14:01 2000 PST | Wed Mar 15 13:14:02 2000 PST | @ 5 hours 1 sec ago
+ Wed Mar 15 08:14:01 2000 PST | Sun Dec 31 17:32:01 2000 PST | @ 291 days 9 hours 18 mins ago
+ Wed Mar 15 08:14:01 2000 PST | Mon Jan 01 17:32:01 2001 PST | @ 292 days 9 hours 18 mins ago
+ Wed Mar 15 08:14:01 2000 PST | Sat Sep 22 18:19:20 2001 PDT | @ 556 days 9 hours 5 mins 19 secs ago
+ Wed Mar 15 12:14:03 2000 PST | Thu Jan 01 00:00:00 1970 PST | @ 11031 days 12 hours 14 mins 3 secs
+ Wed Mar 15 12:14:03 2000 PST | Wed Feb 28 17:32:01 1996 PST | @ 1476 days 18 hours 42 mins 2 secs
+ Wed Mar 15 12:14:03 2000 PST | Thu Feb 29 17:32:01 1996 PST | @ 1475 days 18 hours 42 mins 2 secs
+ Wed Mar 15 12:14:03 2000 PST | Fri Mar 01 17:32:01 1996 PST | @ 1474 days 18 hours 42 mins 2 secs
+ Wed Mar 15 12:14:03 2000 PST | Mon Dec 30 17:32:01 1996 PST | @ 1170 days 18 hours 42 mins 2 secs
+ Wed Mar 15 12:14:03 2000 PST | Tue Dec 31 17:32:01 1996 PST | @ 1169 days 18 hours 42 mins 2 secs
+ Wed Mar 15 12:14:03 2000 PST | Fri Dec 31 17:32:01 1999 PST | @ 74 days 18 hours 42 mins 2 secs
+ Wed Mar 15 12:14:03 2000 PST | Sat Jan 01 17:32:01 2000 PST | @ 73 days 18 hours 42 mins 2 secs
+ Wed Mar 15 12:14:03 2000 PST | Wed Mar 15 02:14:05 2000 PST | @ 9 hours 59 mins 58 secs
+ Wed Mar 15 12:14:03 2000 PST | Wed Mar 15 03:14:04 2000 PST | @ 8 hours 59 mins 59 secs
+ Wed Mar 15 12:14:03 2000 PST | Wed Mar 15 08:14:01 2000 PST | @ 4 hours 2 secs
+ Wed Mar 15 12:14:03 2000 PST | Wed Mar 15 12:14:03 2000 PST | @ 0
+ Wed Mar 15 12:14:03 2000 PST | Wed Mar 15 13:14:02 2000 PST | @ 59 mins 59 secs ago
+ Wed Mar 15 12:14:03 2000 PST | Sun Dec 31 17:32:01 2000 PST | @ 291 days 5 hours 17 mins 58 secs ago
+ Wed Mar 15 12:14:03 2000 PST | Mon Jan 01 17:32:01 2001 PST | @ 292 days 5 hours 17 mins 58 secs ago
+ Wed Mar 15 12:14:03 2000 PST | Sat Sep 22 18:19:20 2001 PDT | @ 556 days 5 hours 5 mins 17 secs ago
+ Wed Mar 15 13:14:02 2000 PST | Thu Jan 01 00:00:00 1970 PST | @ 11031 days 13 hours 14 mins 2 secs
+ Wed Mar 15 13:14:02 2000 PST | Wed Feb 28 17:32:01 1996 PST | @ 1476 days 19 hours 42 mins 1 sec
+ Wed Mar 15 13:14:02 2000 PST | Thu Feb 29 17:32:01 1996 PST | @ 1475 days 19 hours 42 mins 1 sec
+ Wed Mar 15 13:14:02 2000 PST | Fri Mar 01 17:32:01 1996 PST | @ 1474 days 19 hours 42 mins 1 sec
+ Wed Mar 15 13:14:02 2000 PST | Mon Dec 30 17:32:01 1996 PST | @ 1170 days 19 hours 42 mins 1 sec
+ Wed Mar 15 13:14:02 2000 PST | Tue Dec 31 17:32:01 1996 PST | @ 1169 days 19 hours 42 mins 1 sec
+ Wed Mar 15 13:14:02 2000 PST | Fri Dec 31 17:32:01 1999 PST | @ 74 days 19 hours 42 mins 1 sec
+ Wed Mar 15 13:14:02 2000 PST | Sat Jan 01 17:32:01 2000 PST | @ 73 days 19 hours 42 mins 1 sec
+ Wed Mar 15 13:14:02 2000 PST | Wed Mar 15 02:14:05 2000 PST | @ 10 hours 59 mins 57 secs
+ Wed Mar 15 13:14:02 2000 PST | Wed Mar 15 03:14:04 2000 PST | @ 9 hours 59 mins 58 secs
+ Wed Mar 15 13:14:02 2000 PST | Wed Mar 15 08:14:01 2000 PST | @ 5 hours 1 sec
+ Wed Mar 15 13:14:02 2000 PST | Wed Mar 15 12:14:03 2000 PST | @ 59 mins 59 secs
+ Wed Mar 15 13:14:02 2000 PST | Wed Mar 15 13:14:02 2000 PST | @ 0
+ Wed Mar 15 13:14:02 2000 PST | Sun Dec 31 17:32:01 2000 PST | @ 291 days 4 hours 17 mins 59 secs ago
+ Wed Mar 15 13:14:02 2000 PST | Mon Jan 01 17:32:01 2001 PST | @ 292 days 4 hours 17 mins 59 secs ago
+ Wed Mar 15 13:14:02 2000 PST | Sat Sep 22 18:19:20 2001 PDT | @ 556 days 4 hours 5 mins 18 secs ago
+ Sun Dec 31 17:32:01 2000 PST | Thu Jan 01 00:00:00 1970 PST | @ 11322 days 17 hours 32 mins 1 sec
+ Sun Dec 31 17:32:01 2000 PST | Wed Feb 28 17:32:01 1996 PST | @ 1768 days
+ Sun Dec 31 17:32:01 2000 PST | Thu Feb 29 17:32:01 1996 PST | @ 1767 days
+ Sun Dec 31 17:32:01 2000 PST | Fri Mar 01 17:32:01 1996 PST | @ 1766 days
+ Sun Dec 31 17:32:01 2000 PST | Mon Dec 30 17:32:01 1996 PST | @ 1462 days
+ Sun Dec 31 17:32:01 2000 PST | Tue Dec 31 17:32:01 1996 PST | @ 1461 days
+ Sun Dec 31 17:32:01 2000 PST | Fri Dec 31 17:32:01 1999 PST | @ 366 days
+ Sun Dec 31 17:32:01 2000 PST | Sat Jan 01 17:32:01 2000 PST | @ 365 days
+ Sun Dec 31 17:32:01 2000 PST | Wed Mar 15 02:14:05 2000 PST | @ 291 days 15 hours 17 mins 56 secs
+ Sun Dec 31 17:32:01 2000 PST | Wed Mar 15 03:14:04 2000 PST | @ 291 days 14 hours 17 mins 57 secs
+ Sun Dec 31 17:32:01 2000 PST | Wed Mar 15 08:14:01 2000 PST | @ 291 days 9 hours 18 mins
+ Sun Dec 31 17:32:01 2000 PST | Wed Mar 15 12:14:03 2000 PST | @ 291 days 5 hours 17 mins 58 secs
+ Sun Dec 31 17:32:01 2000 PST | Wed Mar 15 13:14:02 2000 PST | @ 291 days 4 hours 17 mins 59 secs
+ Sun Dec 31 17:32:01 2000 PST | Sun Dec 31 17:32:01 2000 PST | @ 0
+ Sun Dec 31 17:32:01 2000 PST | Mon Jan 01 17:32:01 2001 PST | @ 1 day ago
+ Sun Dec 31 17:32:01 2000 PST | Sat Sep 22 18:19:20 2001 PDT | @ 264 days 23 hours 47 mins 19 secs ago
+ Mon Jan 01 17:32:01 2001 PST | Thu Jan 01 00:00:00 1970 PST | @ 11323 days 17 hours 32 mins 1 sec
+ Mon Jan 01 17:32:01 2001 PST | Wed Feb 28 17:32:01 1996 PST | @ 1769 days
+ Mon Jan 01 17:32:01 2001 PST | Thu Feb 29 17:32:01 1996 PST | @ 1768 days
+ Mon Jan 01 17:32:01 2001 PST | Fri Mar 01 17:32:01 1996 PST | @ 1767 days
+ Mon Jan 01 17:32:01 2001 PST | Mon Dec 30 17:32:01 1996 PST | @ 1463 days
+ Mon Jan 01 17:32:01 2001 PST | Tue Dec 31 17:32:01 1996 PST | @ 1462 days
+ Mon Jan 01 17:32:01 2001 PST | Fri Dec 31 17:32:01 1999 PST | @ 367 days
+ Mon Jan 01 17:32:01 2001 PST | Sat Jan 01 17:32:01 2000 PST | @ 366 days
+ Mon Jan 01 17:32:01 2001 PST | Wed Mar 15 02:14:05 2000 PST | @ 292 days 15 hours 17 mins 56 secs
+ Mon Jan 01 17:32:01 2001 PST | Wed Mar 15 03:14:04 2000 PST | @ 292 days 14 hours 17 mins 57 secs
+ Mon Jan 01 17:32:01 2001 PST | Wed Mar 15 08:14:01 2000 PST | @ 292 days 9 hours 18 mins
+ Mon Jan 01 17:32:01 2001 PST | Wed Mar 15 12:14:03 2000 PST | @ 292 days 5 hours 17 mins 58 secs
+ Mon Jan 01 17:32:01 2001 PST | Wed Mar 15 13:14:02 2000 PST | @ 292 days 4 hours 17 mins 59 secs
+ Mon Jan 01 17:32:01 2001 PST | Sun Dec 31 17:32:01 2000 PST | @ 1 day
+ Mon Jan 01 17:32:01 2001 PST | Mon Jan 01 17:32:01 2001 PST | @ 0
+ Mon Jan 01 17:32:01 2001 PST | Sat Sep 22 18:19:20 2001 PDT | @ 263 days 23 hours 47 mins 19 secs ago
+ Sat Sep 22 18:19:20 2001 PDT | Thu Jan 01 00:00:00 1970 PST | @ 11587 days 17 hours 19 mins 20 secs
+ Sat Sep 22 18:19:20 2001 PDT | Wed Feb 28 17:32:01 1996 PST | @ 2032 days 23 hours 47 mins 19 secs
+ Sat Sep 22 18:19:20 2001 PDT | Thu Feb 29 17:32:01 1996 PST | @ 2031 days 23 hours 47 mins 19 secs
+ Sat Sep 22 18:19:20 2001 PDT | Fri Mar 01 17:32:01 1996 PST | @ 2030 days 23 hours 47 mins 19 secs
+ Sat Sep 22 18:19:20 2001 PDT | Mon Dec 30 17:32:01 1996 PST | @ 1726 days 23 hours 47 mins 19 secs
+ Sat Sep 22 18:19:20 2001 PDT | Tue Dec 31 17:32:01 1996 PST | @ 1725 days 23 hours 47 mins 19 secs
+ Sat Sep 22 18:19:20 2001 PDT | Fri Dec 31 17:32:01 1999 PST | @ 630 days 23 hours 47 mins 19 secs
+ Sat Sep 22 18:19:20 2001 PDT | Sat Jan 01 17:32:01 2000 PST | @ 629 days 23 hours 47 mins 19 secs
+ Sat Sep 22 18:19:20 2001 PDT | Wed Mar 15 02:14:05 2000 PST | @ 556 days 15 hours 5 mins 15 secs
+ Sat Sep 22 18:19:20 2001 PDT | Wed Mar 15 03:14:04 2000 PST | @ 556 days 14 hours 5 mins 16 secs
+ Sat Sep 22 18:19:20 2001 PDT | Wed Mar 15 08:14:01 2000 PST | @ 556 days 9 hours 5 mins 19 secs
+ Sat Sep 22 18:19:20 2001 PDT | Wed Mar 15 12:14:03 2000 PST | @ 556 days 5 hours 5 mins 17 secs
+ Sat Sep 22 18:19:20 2001 PDT | Wed Mar 15 13:14:02 2000 PST | @ 556 days 4 hours 5 mins 18 secs
+ Sat Sep 22 18:19:20 2001 PDT | Sun Dec 31 17:32:01 2000 PST | @ 264 days 23 hours 47 mins 19 secs
+ Sat Sep 22 18:19:20 2001 PDT | Mon Jan 01 17:32:01 2001 PST | @ 263 days 23 hours 47 mins 19 secs
+ Sat Sep 22 18:19:20 2001 PDT | Sat Sep 22 18:19:20 2001 PDT | @ 0
+(256 rows)
+
+--
+-- Conversions
+--
+SELECT f1 AS "timestamp", date(f1) AS date
+ FROM TEMP_TIMESTAMP
+ WHERE f1 <> timestamp 'now'
+ ORDER BY date, "timestamp";
+ timestamp | date
+------------------------------+------------
+ Thu Jan 01 00:00:00 1970 PST | 01-01-1970
+ Wed Feb 28 17:32:01 1996 PST | 02-28-1996
+ Thu Feb 29 17:32:01 1996 PST | 02-29-1996
+ Fri Mar 01 17:32:01 1996 PST | 03-01-1996
+ Mon Dec 30 17:32:01 1996 PST | 12-30-1996
+ Tue Dec 31 17:32:01 1996 PST | 12-31-1996
+ Fri Dec 31 17:32:01 1999 PST | 12-31-1999
+ Sat Jan 01 17:32:01 2000 PST | 01-01-2000
+ Wed Mar 15 02:14:05 2000 PST | 03-15-2000
+ Wed Mar 15 03:14:04 2000 PST | 03-15-2000
+ Wed Mar 15 08:14:01 2000 PST | 03-15-2000
+ Wed Mar 15 12:14:03 2000 PST | 03-15-2000
+ Wed Mar 15 13:14:02 2000 PST | 03-15-2000
+ Sun Dec 31 17:32:01 2000 PST | 12-31-2000
+ Mon Jan 01 17:32:01 2001 PST | 01-01-2001
+ Sat Sep 22 18:19:20 2001 PDT | 09-22-2001
+(16 rows)
+
+DROP TABLE TEMP_TIMESTAMP;
+--
+-- Comparisons between datetime types, especially overflow cases
+---
+SELECT '2202020-10-05'::date::timestamp; -- fail
+ERROR: date out of range for timestamp
+SELECT '2202020-10-05'::date > '2020-10-05'::timestamp as t;
+ t
+---
+ t
+(1 row)
+
+SELECT '2020-10-05'::timestamp > '2202020-10-05'::date as f;
+ f
+---
+ f
+(1 row)
+
+SELECT '2202020-10-05'::date::timestamptz; -- fail
+ERROR: date out of range for timestamp
+SELECT '2202020-10-05'::date > '2020-10-05'::timestamptz as t;
+ t
+---
+ t
+(1 row)
+
+SELECT '2020-10-05'::timestamptz > '2202020-10-05'::date as f;
+ f
+---
+ f
+(1 row)
+
+-- This conversion may work depending on timezone
+SELECT '4714-11-24 BC'::date::timestamptz;
+ timestamptz
+---------------------------------
+ Mon Nov 24 00:00:00 4714 PST BC
+(1 row)
+
+SET TimeZone = 'UTC-2';
+SELECT '4714-11-24 BC'::date::timestamptz; -- fail
+ERROR: date out of range for timestamp
+SELECT '4714-11-24 BC'::date < '2020-10-05'::timestamptz as t;
+ t
+---
+ t
+(1 row)
+
+SELECT '2020-10-05'::timestamptz >= '4714-11-24 BC'::date as t;
+ t
+---
+ t
+(1 row)
+
+SELECT '4714-11-24 BC'::timestamp < '2020-10-05'::timestamptz as t;
+ t
+---
+ t
+(1 row)
+
+SELECT '2020-10-05'::timestamptz >= '4714-11-24 BC'::timestamp as t;
+ t
+---
+ t
+(1 row)
+
+RESET TimeZone;
+--
+-- Formats
+--
+SET DateStyle TO 'US,Postgres';
+SHOW DateStyle;
+ DateStyle
+---------------
+ Postgres, MDY
+(1 row)
+
+SELECT d1 AS us_postgres FROM TIMESTAMP_TBL;
+ us_postgres
+-----------------------------
+ -infinity
+ infinity
+ Thu Jan 01 00:00:00 1970
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:02 1997
+ Mon Feb 10 17:32:01.4 1997
+ Mon Feb 10 17:32:01.5 1997
+ Mon Feb 10 17:32:01.6 1997
+ Thu Jan 02 00:00:00 1997
+ Thu Jan 02 03:04:05 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Tue Jun 10 17:32:01 1997
+ Sat Sep 22 18:19:20 2001
+ Wed Mar 15 08:14:01 2000
+ Wed Mar 15 13:14:02 2000
+ Wed Mar 15 12:14:03 2000
+ Wed Mar 15 03:14:04 2000
+ Wed Mar 15 02:14:05 2000
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:00 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Tue Jun 10 18:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Tue Feb 11 17:32:01 1997
+ Wed Feb 12 17:32:01 1997
+ Thu Feb 13 17:32:01 1997
+ Fri Feb 14 17:32:01 1997
+ Sat Feb 15 17:32:01 1997
+ Sun Feb 16 17:32:01 1997
+ Tue Feb 16 17:32:01 0097 BC
+ Sat Feb 16 17:32:01 0097
+ Thu Feb 16 17:32:01 0597
+ Tue Feb 16 17:32:01 1097
+ Sat Feb 16 17:32:01 1697
+ Thu Feb 16 17:32:01 1797
+ Tue Feb 16 17:32:01 1897
+ Sun Feb 16 17:32:01 1997
+ Sat Feb 16 17:32:01 2097
+ Wed Feb 28 17:32:01 1996
+ Thu Feb 29 17:32:01 1996
+ Fri Mar 01 17:32:01 1996
+ Mon Dec 30 17:32:01 1996
+ Tue Dec 31 17:32:01 1996
+ Wed Jan 01 17:32:01 1997
+ Fri Feb 28 17:32:01 1997
+ Sat Mar 01 17:32:01 1997
+ Tue Dec 30 17:32:01 1997
+ Wed Dec 31 17:32:01 1997
+ Fri Dec 31 17:32:01 1999
+ Sat Jan 01 17:32:01 2000
+ Sun Dec 31 17:32:01 2000
+ Mon Jan 01 17:32:01 2001
+(65 rows)
+
+SET DateStyle TO 'US,ISO';
+SELECT d1 AS us_iso FROM TIMESTAMP_TBL;
+ us_iso
+------------------------
+ -infinity
+ infinity
+ 1970-01-01 00:00:00
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:02
+ 1997-02-10 17:32:01.4
+ 1997-02-10 17:32:01.5
+ 1997-02-10 17:32:01.6
+ 1997-01-02 00:00:00
+ 1997-01-02 03:04:05
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-06-10 17:32:01
+ 2001-09-22 18:19:20
+ 2000-03-15 08:14:01
+ 2000-03-15 13:14:02
+ 2000-03-15 12:14:03
+ 2000-03-15 03:14:04
+ 2000-03-15 02:14:05
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:00
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-06-10 18:32:01
+ 1997-02-10 17:32:01
+ 1997-02-11 17:32:01
+ 1997-02-12 17:32:01
+ 1997-02-13 17:32:01
+ 1997-02-14 17:32:01
+ 1997-02-15 17:32:01
+ 1997-02-16 17:32:01
+ 0097-02-16 17:32:01 BC
+ 0097-02-16 17:32:01
+ 0597-02-16 17:32:01
+ 1097-02-16 17:32:01
+ 1697-02-16 17:32:01
+ 1797-02-16 17:32:01
+ 1897-02-16 17:32:01
+ 1997-02-16 17:32:01
+ 2097-02-16 17:32:01
+ 1996-02-28 17:32:01
+ 1996-02-29 17:32:01
+ 1996-03-01 17:32:01
+ 1996-12-30 17:32:01
+ 1996-12-31 17:32:01
+ 1997-01-01 17:32:01
+ 1997-02-28 17:32:01
+ 1997-03-01 17:32:01
+ 1997-12-30 17:32:01
+ 1997-12-31 17:32:01
+ 1999-12-31 17:32:01
+ 2000-01-01 17:32:01
+ 2000-12-31 17:32:01
+ 2001-01-01 17:32:01
+(65 rows)
+
+SET DateStyle TO 'US,SQL';
+SHOW DateStyle;
+ DateStyle
+-----------
+ SQL, MDY
+(1 row)
+
+SELECT d1 AS us_sql FROM TIMESTAMP_TBL;
+ us_sql
+------------------------
+ -infinity
+ infinity
+ 01/01/1970 00:00:00
+ 02/10/1997 17:32:01
+ 02/10/1997 17:32:01
+ 02/10/1997 17:32:02
+ 02/10/1997 17:32:01.4
+ 02/10/1997 17:32:01.5
+ 02/10/1997 17:32:01.6
+ 01/02/1997 00:00:00
+ 01/02/1997 03:04:05
+ 02/10/1997 17:32:01
+ 02/10/1997 17:32:01
+ 02/10/1997 17:32:01
+ 02/10/1997 17:32:01
+ 06/10/1997 17:32:01
+ 09/22/2001 18:19:20
+ 03/15/2000 08:14:01
+ 03/15/2000 13:14:02
+ 03/15/2000 12:14:03
+ 03/15/2000 03:14:04
+ 03/15/2000 02:14:05
+ 02/10/1997 17:32:01
+ 02/10/1997 17:32:01
+ 02/10/1997 17:32:00
+ 02/10/1997 17:32:01
+ 02/10/1997 17:32:01
+ 02/10/1997 17:32:01
+ 02/10/1997 17:32:01
+ 02/10/1997 17:32:01
+ 02/10/1997 17:32:01
+ 02/10/1997 17:32:01
+ 02/10/1997 17:32:01
+ 02/10/1997 17:32:01
+ 06/10/1997 18:32:01
+ 02/10/1997 17:32:01
+ 02/11/1997 17:32:01
+ 02/12/1997 17:32:01
+ 02/13/1997 17:32:01
+ 02/14/1997 17:32:01
+ 02/15/1997 17:32:01
+ 02/16/1997 17:32:01
+ 02/16/0097 17:32:01 BC
+ 02/16/0097 17:32:01
+ 02/16/0597 17:32:01
+ 02/16/1097 17:32:01
+ 02/16/1697 17:32:01
+ 02/16/1797 17:32:01
+ 02/16/1897 17:32:01
+ 02/16/1997 17:32:01
+ 02/16/2097 17:32:01
+ 02/28/1996 17:32:01
+ 02/29/1996 17:32:01
+ 03/01/1996 17:32:01
+ 12/30/1996 17:32:01
+ 12/31/1996 17:32:01
+ 01/01/1997 17:32:01
+ 02/28/1997 17:32:01
+ 03/01/1997 17:32:01
+ 12/30/1997 17:32:01
+ 12/31/1997 17:32:01
+ 12/31/1999 17:32:01
+ 01/01/2000 17:32:01
+ 12/31/2000 17:32:01
+ 01/01/2001 17:32:01
+(65 rows)
+
+SET DateStyle TO 'European,Postgres';
+SHOW DateStyle;
+ DateStyle
+---------------
+ Postgres, DMY
+(1 row)
+
+INSERT INTO TIMESTAMP_TBL VALUES('13/06/1957');
+SELECT count(*) as one FROM TIMESTAMP_TBL WHERE d1 = 'Jun 13 1957';
+ one
+-----
+ 1
+(1 row)
+
+SELECT d1 AS european_postgres FROM TIMESTAMP_TBL;
+ european_postgres
+-----------------------------
+ -infinity
+ infinity
+ Thu 01 Jan 00:00:00 1970
+ Mon 10 Feb 17:32:01 1997
+ Mon 10 Feb 17:32:01 1997
+ Mon 10 Feb 17:32:02 1997
+ Mon 10 Feb 17:32:01.4 1997
+ Mon 10 Feb 17:32:01.5 1997
+ Mon 10 Feb 17:32:01.6 1997
+ Thu 02 Jan 00:00:00 1997
+ Thu 02 Jan 03:04:05 1997
+ Mon 10 Feb 17:32:01 1997
+ Mon 10 Feb 17:32:01 1997
+ Mon 10 Feb 17:32:01 1997
+ Mon 10 Feb 17:32:01 1997
+ Tue 10 Jun 17:32:01 1997
+ Sat 22 Sep 18:19:20 2001
+ Wed 15 Mar 08:14:01 2000
+ Wed 15 Mar 13:14:02 2000
+ Wed 15 Mar 12:14:03 2000
+ Wed 15 Mar 03:14:04 2000
+ Wed 15 Mar 02:14:05 2000
+ Mon 10 Feb 17:32:01 1997
+ Mon 10 Feb 17:32:01 1997
+ Mon 10 Feb 17:32:00 1997
+ Mon 10 Feb 17:32:01 1997
+ Mon 10 Feb 17:32:01 1997
+ Mon 10 Feb 17:32:01 1997
+ Mon 10 Feb 17:32:01 1997
+ Mon 10 Feb 17:32:01 1997
+ Mon 10 Feb 17:32:01 1997
+ Mon 10 Feb 17:32:01 1997
+ Mon 10 Feb 17:32:01 1997
+ Mon 10 Feb 17:32:01 1997
+ Tue 10 Jun 18:32:01 1997
+ Mon 10 Feb 17:32:01 1997
+ Tue 11 Feb 17:32:01 1997
+ Wed 12 Feb 17:32:01 1997
+ Thu 13 Feb 17:32:01 1997
+ Fri 14 Feb 17:32:01 1997
+ Sat 15 Feb 17:32:01 1997
+ Sun 16 Feb 17:32:01 1997
+ Tue 16 Feb 17:32:01 0097 BC
+ Sat 16 Feb 17:32:01 0097
+ Thu 16 Feb 17:32:01 0597
+ Tue 16 Feb 17:32:01 1097
+ Sat 16 Feb 17:32:01 1697
+ Thu 16 Feb 17:32:01 1797
+ Tue 16 Feb 17:32:01 1897
+ Sun 16 Feb 17:32:01 1997
+ Sat 16 Feb 17:32:01 2097
+ Wed 28 Feb 17:32:01 1996
+ Thu 29 Feb 17:32:01 1996
+ Fri 01 Mar 17:32:01 1996
+ Mon 30 Dec 17:32:01 1996
+ Tue 31 Dec 17:32:01 1996
+ Wed 01 Jan 17:32:01 1997
+ Fri 28 Feb 17:32:01 1997
+ Sat 01 Mar 17:32:01 1997
+ Tue 30 Dec 17:32:01 1997
+ Wed 31 Dec 17:32:01 1997
+ Fri 31 Dec 17:32:01 1999
+ Sat 01 Jan 17:32:01 2000
+ Sun 31 Dec 17:32:01 2000
+ Mon 01 Jan 17:32:01 2001
+ Thu 13 Jun 00:00:00 1957
+(66 rows)
+
+SET DateStyle TO 'European,ISO';
+SHOW DateStyle;
+ DateStyle
+-----------
+ ISO, DMY
+(1 row)
+
+SELECT d1 AS european_iso FROM TIMESTAMP_TBL;
+ european_iso
+------------------------
+ -infinity
+ infinity
+ 1970-01-01 00:00:00
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:02
+ 1997-02-10 17:32:01.4
+ 1997-02-10 17:32:01.5
+ 1997-02-10 17:32:01.6
+ 1997-01-02 00:00:00
+ 1997-01-02 03:04:05
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-06-10 17:32:01
+ 2001-09-22 18:19:20
+ 2000-03-15 08:14:01
+ 2000-03-15 13:14:02
+ 2000-03-15 12:14:03
+ 2000-03-15 03:14:04
+ 2000-03-15 02:14:05
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:00
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-02-10 17:32:01
+ 1997-06-10 18:32:01
+ 1997-02-10 17:32:01
+ 1997-02-11 17:32:01
+ 1997-02-12 17:32:01
+ 1997-02-13 17:32:01
+ 1997-02-14 17:32:01
+ 1997-02-15 17:32:01
+ 1997-02-16 17:32:01
+ 0097-02-16 17:32:01 BC
+ 0097-02-16 17:32:01
+ 0597-02-16 17:32:01
+ 1097-02-16 17:32:01
+ 1697-02-16 17:32:01
+ 1797-02-16 17:32:01
+ 1897-02-16 17:32:01
+ 1997-02-16 17:32:01
+ 2097-02-16 17:32:01
+ 1996-02-28 17:32:01
+ 1996-02-29 17:32:01
+ 1996-03-01 17:32:01
+ 1996-12-30 17:32:01
+ 1996-12-31 17:32:01
+ 1997-01-01 17:32:01
+ 1997-02-28 17:32:01
+ 1997-03-01 17:32:01
+ 1997-12-30 17:32:01
+ 1997-12-31 17:32:01
+ 1999-12-31 17:32:01
+ 2000-01-01 17:32:01
+ 2000-12-31 17:32:01
+ 2001-01-01 17:32:01
+ 1957-06-13 00:00:00
+(66 rows)
+
+SET DateStyle TO 'European,SQL';
+SHOW DateStyle;
+ DateStyle
+-----------
+ SQL, DMY
+(1 row)
+
+SELECT d1 AS european_sql FROM TIMESTAMP_TBL;
+ european_sql
+------------------------
+ -infinity
+ infinity
+ 01/01/1970 00:00:00
+ 10/02/1997 17:32:01
+ 10/02/1997 17:32:01
+ 10/02/1997 17:32:02
+ 10/02/1997 17:32:01.4
+ 10/02/1997 17:32:01.5
+ 10/02/1997 17:32:01.6
+ 02/01/1997 00:00:00
+ 02/01/1997 03:04:05
+ 10/02/1997 17:32:01
+ 10/02/1997 17:32:01
+ 10/02/1997 17:32:01
+ 10/02/1997 17:32:01
+ 10/06/1997 17:32:01
+ 22/09/2001 18:19:20
+ 15/03/2000 08:14:01
+ 15/03/2000 13:14:02
+ 15/03/2000 12:14:03
+ 15/03/2000 03:14:04
+ 15/03/2000 02:14:05
+ 10/02/1997 17:32:01
+ 10/02/1997 17:32:01
+ 10/02/1997 17:32:00
+ 10/02/1997 17:32:01
+ 10/02/1997 17:32:01
+ 10/02/1997 17:32:01
+ 10/02/1997 17:32:01
+ 10/02/1997 17:32:01
+ 10/02/1997 17:32:01
+ 10/02/1997 17:32:01
+ 10/02/1997 17:32:01
+ 10/02/1997 17:32:01
+ 10/06/1997 18:32:01
+ 10/02/1997 17:32:01
+ 11/02/1997 17:32:01
+ 12/02/1997 17:32:01
+ 13/02/1997 17:32:01
+ 14/02/1997 17:32:01
+ 15/02/1997 17:32:01
+ 16/02/1997 17:32:01
+ 16/02/0097 17:32:01 BC
+ 16/02/0097 17:32:01
+ 16/02/0597 17:32:01
+ 16/02/1097 17:32:01
+ 16/02/1697 17:32:01
+ 16/02/1797 17:32:01
+ 16/02/1897 17:32:01
+ 16/02/1997 17:32:01
+ 16/02/2097 17:32:01
+ 28/02/1996 17:32:01
+ 29/02/1996 17:32:01
+ 01/03/1996 17:32:01
+ 30/12/1996 17:32:01
+ 31/12/1996 17:32:01
+ 01/01/1997 17:32:01
+ 28/02/1997 17:32:01
+ 01/03/1997 17:32:01
+ 30/12/1997 17:32:01
+ 31/12/1997 17:32:01
+ 31/12/1999 17:32:01
+ 01/01/2000 17:32:01
+ 31/12/2000 17:32:01
+ 01/01/2001 17:32:01
+ 13/06/1957 00:00:00
+(66 rows)
+
+RESET DateStyle;
+--
+-- to_timestamp()
+--
+SELECT to_timestamp('0097/Feb/16 --> 08:14:30', 'YYYY/Mon/DD --> HH:MI:SS');
+ to_timestamp
+------------------------------
+ Sat Feb 16 08:14:30 0097 PST
+(1 row)
+
+SELECT to_timestamp('97/2/16 8:14:30', 'FMYYYY/FMMM/FMDD FMHH:FMMI:FMSS');
+ to_timestamp
+------------------------------
+ Sat Feb 16 08:14:30 0097 PST
+(1 row)
+
+SELECT to_timestamp('2011$03!18 23_38_15', 'YYYY-MM-DD HH24:MI:SS');
+ to_timestamp
+------------------------------
+ Fri Mar 18 23:38:15 2011 PDT
+(1 row)
+
+SELECT to_timestamp('1985 January 12', 'YYYY FMMonth DD');
+ to_timestamp
+------------------------------
+ Sat Jan 12 00:00:00 1985 PST
+(1 row)
+
+SELECT to_timestamp('1985 FMMonth 12', 'YYYY "FMMonth" DD');
+ to_timestamp
+------------------------------
+ Sat Jan 12 00:00:00 1985 PST
+(1 row)
+
+SELECT to_timestamp('1985 \ 12', 'YYYY \\ DD');
+ to_timestamp
+------------------------------
+ Sat Jan 12 00:00:00 1985 PST
+(1 row)
+
+SELECT to_timestamp('My birthday-> Year: 1976, Month: May, Day: 16',
+ '"My birthday-> Year:" YYYY, "Month:" FMMonth, "Day:" DD');
+ to_timestamp
+------------------------------
+ Sun May 16 00:00:00 1976 PDT
+(1 row)
+
+SELECT to_timestamp('1,582nd VIII 21', 'Y,YYYth FMRM DD');
+ to_timestamp
+------------------------------
+ Sat Aug 21 00:00:00 1582 PST
+(1 row)
+
+SELECT to_timestamp('15 "text between quote marks" 98 54 45',
+ E'HH24 "\\"text between quote marks\\"" YY MI SS');
+ to_timestamp
+------------------------------
+ Thu Jan 01 15:54:45 1998 PST
+(1 row)
+
+SELECT to_timestamp('05121445482000', 'MMDDHH24MISSYYYY');
+ to_timestamp
+------------------------------
+ Fri May 12 14:45:48 2000 PDT
+(1 row)
+
+SELECT to_timestamp('2000January09Sunday', 'YYYYFMMonthDDFMDay');
+ to_timestamp
+------------------------------
+ Sun Jan 09 00:00:00 2000 PST
+(1 row)
+
+SELECT to_timestamp('97/Feb/16', 'YYMonDD');
+ERROR: invalid value "/Feb/16" for "Mon"
+DETAIL: The given value did not match any of the allowed values for this field.
+SELECT to_timestamp('97/Feb/16', 'YY:Mon:DD');
+ to_timestamp
+------------------------------
+ Sun Feb 16 00:00:00 1997 PST
+(1 row)
+
+SELECT to_timestamp('97/Feb/16', 'FXYY:Mon:DD');
+ to_timestamp
+------------------------------
+ Sun Feb 16 00:00:00 1997 PST
+(1 row)
+
+SELECT to_timestamp('97/Feb/16', 'FXYY/Mon/DD');
+ to_timestamp
+------------------------------
+ Sun Feb 16 00:00:00 1997 PST
+(1 row)
+
+SELECT to_timestamp('19971116', 'YYYYMMDD');
+ to_timestamp
+------------------------------
+ Sun Nov 16 00:00:00 1997 PST
+(1 row)
+
+SELECT to_timestamp('20000-1116', 'YYYY-MMDD');
+ to_timestamp
+-------------------------------
+ Thu Nov 16 00:00:00 20000 PST
+(1 row)
+
+SELECT to_timestamp('1997 AD 11 16', 'YYYY BC MM DD');
+ to_timestamp
+------------------------------
+ Sun Nov 16 00:00:00 1997 PST
+(1 row)
+
+SELECT to_timestamp('1997 BC 11 16', 'YYYY BC MM DD');
+ to_timestamp
+---------------------------------
+ Tue Nov 16 00:00:00 1997 PST BC
+(1 row)
+
+SELECT to_timestamp('1997 A.D. 11 16', 'YYYY B.C. MM DD');
+ to_timestamp
+------------------------------
+ Sun Nov 16 00:00:00 1997 PST
+(1 row)
+
+SELECT to_timestamp('1997 B.C. 11 16', 'YYYY B.C. MM DD');
+ to_timestamp
+---------------------------------
+ Tue Nov 16 00:00:00 1997 PST BC
+(1 row)
+
+SELECT to_timestamp('9-1116', 'Y-MMDD');
+ to_timestamp
+------------------------------
+ Mon Nov 16 00:00:00 2009 PST
+(1 row)
+
+SELECT to_timestamp('95-1116', 'YY-MMDD');
+ to_timestamp
+------------------------------
+ Thu Nov 16 00:00:00 1995 PST
+(1 row)
+
+SELECT to_timestamp('995-1116', 'YYY-MMDD');
+ to_timestamp
+------------------------------
+ Thu Nov 16 00:00:00 1995 PST
+(1 row)
+
+SELECT to_timestamp('2005426', 'YYYYWWD');
+ to_timestamp
+------------------------------
+ Sat Oct 15 00:00:00 2005 PDT
+(1 row)
+
+SELECT to_timestamp('2005300', 'YYYYDDD');
+ to_timestamp
+------------------------------
+ Thu Oct 27 00:00:00 2005 PDT
+(1 row)
+
+SELECT to_timestamp('2005527', 'IYYYIWID');
+ to_timestamp
+------------------------------
+ Sun Jan 01 00:00:00 2006 PST
+(1 row)
+
+SELECT to_timestamp('005527', 'IYYIWID');
+ to_timestamp
+------------------------------
+ Sun Jan 01 00:00:00 2006 PST
+(1 row)
+
+SELECT to_timestamp('05527', 'IYIWID');
+ to_timestamp
+------------------------------
+ Sun Jan 01 00:00:00 2006 PST
+(1 row)
+
+SELECT to_timestamp('5527', 'IIWID');
+ to_timestamp
+------------------------------
+ Sun Jan 01 00:00:00 2006 PST
+(1 row)
+
+SELECT to_timestamp('2005364', 'IYYYIDDD');
+ to_timestamp
+------------------------------
+ Sun Jan 01 00:00:00 2006 PST
+(1 row)
+
+SELECT to_timestamp('20050302', 'YYYYMMDD');
+ to_timestamp
+------------------------------
+ Wed Mar 02 00:00:00 2005 PST
+(1 row)
+
+SELECT to_timestamp('2005 03 02', 'YYYYMMDD');
+ to_timestamp
+------------------------------
+ Wed Mar 02 00:00:00 2005 PST
+(1 row)
+
+SELECT to_timestamp(' 2005 03 02', 'YYYYMMDD');
+ to_timestamp
+------------------------------
+ Wed Mar 02 00:00:00 2005 PST
+(1 row)
+
+SELECT to_timestamp(' 20050302', 'YYYYMMDD');
+ to_timestamp
+------------------------------
+ Wed Mar 02 00:00:00 2005 PST
+(1 row)
+
+SELECT to_timestamp('2011-12-18 11:38 AM', 'YYYY-MM-DD HH12:MI PM');
+ to_timestamp
+------------------------------
+ Sun Dec 18 11:38:00 2011 PST
+(1 row)
+
+SELECT to_timestamp('2011-12-18 11:38 PM', 'YYYY-MM-DD HH12:MI PM');
+ to_timestamp
+------------------------------
+ Sun Dec 18 23:38:00 2011 PST
+(1 row)
+
+SELECT to_timestamp('2011-12-18 11:38 A.M.', 'YYYY-MM-DD HH12:MI P.M.');
+ to_timestamp
+------------------------------
+ Sun Dec 18 11:38:00 2011 PST
+(1 row)
+
+SELECT to_timestamp('2011-12-18 11:38 P.M.', 'YYYY-MM-DD HH12:MI P.M.');
+ to_timestamp
+------------------------------
+ Sun Dec 18 23:38:00 2011 PST
+(1 row)
+
+SELECT to_timestamp('2011-12-18 11:38 +05', 'YYYY-MM-DD HH12:MI TZH');
+ to_timestamp
+------------------------------
+ Sat Dec 17 22:38:00 2011 PST
+(1 row)
+
+SELECT to_timestamp('2011-12-18 11:38 -05', 'YYYY-MM-DD HH12:MI TZH');
+ to_timestamp
+------------------------------
+ Sun Dec 18 08:38:00 2011 PST
+(1 row)
+
+SELECT to_timestamp('2011-12-18 11:38 +05:20', 'YYYY-MM-DD HH12:MI TZH:TZM');
+ to_timestamp
+------------------------------
+ Sat Dec 17 22:18:00 2011 PST
+(1 row)
+
+SELECT to_timestamp('2011-12-18 11:38 -05:20', 'YYYY-MM-DD HH12:MI TZH:TZM');
+ to_timestamp
+------------------------------
+ Sun Dec 18 08:58:00 2011 PST
+(1 row)
+
+SELECT to_timestamp('2011-12-18 11:38 20', 'YYYY-MM-DD HH12:MI TZM');
+ to_timestamp
+------------------------------
+ Sun Dec 18 03:18:00 2011 PST
+(1 row)
+
+SELECT to_timestamp('2011-12-18 11:38 PST', 'YYYY-MM-DD HH12:MI TZ'); -- NYI
+ERROR: formatting field "TZ" is only supported in to_char
+SELECT to_timestamp('2018-11-02 12:34:56.025', 'YYYY-MM-DD HH24:MI:SS.MS');
+ to_timestamp
+----------------------------------
+ Fri Nov 02 12:34:56.025 2018 PDT
+(1 row)
+
+SELECT i, to_timestamp('2018-11-02 12:34:56', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i;
+ i | to_timestamp
+---+------------------------------
+ 1 | Fri Nov 02 12:34:56 2018 PDT
+ 2 | Fri Nov 02 12:34:56 2018 PDT
+ 3 | Fri Nov 02 12:34:56 2018 PDT
+ 4 | Fri Nov 02 12:34:56 2018 PDT
+ 5 | Fri Nov 02 12:34:56 2018 PDT
+ 6 | Fri Nov 02 12:34:56 2018 PDT
+(6 rows)
+
+SELECT i, to_timestamp('2018-11-02 12:34:56.1', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i;
+ i | to_timestamp
+---+--------------------------------
+ 1 | Fri Nov 02 12:34:56.1 2018 PDT
+ 2 | Fri Nov 02 12:34:56.1 2018 PDT
+ 3 | Fri Nov 02 12:34:56.1 2018 PDT
+ 4 | Fri Nov 02 12:34:56.1 2018 PDT
+ 5 | Fri Nov 02 12:34:56.1 2018 PDT
+ 6 | Fri Nov 02 12:34:56.1 2018 PDT
+(6 rows)
+
+SELECT i, to_timestamp('2018-11-02 12:34:56.12', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i;
+ i | to_timestamp
+---+---------------------------------
+ 1 | Fri Nov 02 12:34:56.1 2018 PDT
+ 2 | Fri Nov 02 12:34:56.12 2018 PDT
+ 3 | Fri Nov 02 12:34:56.12 2018 PDT
+ 4 | Fri Nov 02 12:34:56.12 2018 PDT
+ 5 | Fri Nov 02 12:34:56.12 2018 PDT
+ 6 | Fri Nov 02 12:34:56.12 2018 PDT
+(6 rows)
+
+SELECT i, to_timestamp('2018-11-02 12:34:56.123', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i;
+ i | to_timestamp
+---+----------------------------------
+ 1 | Fri Nov 02 12:34:56.1 2018 PDT
+ 2 | Fri Nov 02 12:34:56.12 2018 PDT
+ 3 | Fri Nov 02 12:34:56.123 2018 PDT
+ 4 | Fri Nov 02 12:34:56.123 2018 PDT
+ 5 | Fri Nov 02 12:34:56.123 2018 PDT
+ 6 | Fri Nov 02 12:34:56.123 2018 PDT
+(6 rows)
+
+SELECT i, to_timestamp('2018-11-02 12:34:56.1234', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i;
+ i | to_timestamp
+---+-----------------------------------
+ 1 | Fri Nov 02 12:34:56.1 2018 PDT
+ 2 | Fri Nov 02 12:34:56.12 2018 PDT
+ 3 | Fri Nov 02 12:34:56.123 2018 PDT
+ 4 | Fri Nov 02 12:34:56.1234 2018 PDT
+ 5 | Fri Nov 02 12:34:56.1234 2018 PDT
+ 6 | Fri Nov 02 12:34:56.1234 2018 PDT
+(6 rows)
+
+SELECT i, to_timestamp('2018-11-02 12:34:56.12345', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i;
+ i | to_timestamp
+---+------------------------------------
+ 1 | Fri Nov 02 12:34:56.1 2018 PDT
+ 2 | Fri Nov 02 12:34:56.12 2018 PDT
+ 3 | Fri Nov 02 12:34:56.123 2018 PDT
+ 4 | Fri Nov 02 12:34:56.1235 2018 PDT
+ 5 | Fri Nov 02 12:34:56.12345 2018 PDT
+ 6 | Fri Nov 02 12:34:56.12345 2018 PDT
+(6 rows)
+
+SELECT i, to_timestamp('2018-11-02 12:34:56.123456', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i;
+ i | to_timestamp
+---+-------------------------------------
+ 1 | Fri Nov 02 12:34:56.1 2018 PDT
+ 2 | Fri Nov 02 12:34:56.12 2018 PDT
+ 3 | Fri Nov 02 12:34:56.123 2018 PDT
+ 4 | Fri Nov 02 12:34:56.1235 2018 PDT
+ 5 | Fri Nov 02 12:34:56.12346 2018 PDT
+ 6 | Fri Nov 02 12:34:56.123456 2018 PDT
+(6 rows)
+
+SELECT i, to_timestamp('2018-11-02 12:34:56.123456789', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i;
+ERROR: date/time field value out of range: "2018-11-02 12:34:56.123456789"
+SELECT to_date('1 4 1902', 'Q MM YYYY'); -- Q is ignored
+ to_date
+------------
+ 04-01-1902
+(1 row)
+
+SELECT to_date('3 4 21 01', 'W MM CC YY');
+ to_date
+------------
+ 04-15-2001
+(1 row)
+
+SELECT to_date('2458872', 'J');
+ to_date
+------------
+ 01-23-2020
+(1 row)
+
+--
+-- Check handling of BC dates
+--
+SELECT to_date('44-02-01 BC','YYYY-MM-DD BC');
+ to_date
+---------------
+ 02-01-0044 BC
+(1 row)
+
+SELECT to_date('-44-02-01','YYYY-MM-DD');
+ to_date
+---------------
+ 02-01-0044 BC
+(1 row)
+
+SELECT to_date('-44-02-01 BC','YYYY-MM-DD BC');
+ to_date
+------------
+ 02-01-0044
+(1 row)
+
+SELECT to_timestamp('44-02-01 11:12:13 BC','YYYY-MM-DD HH24:MI:SS BC');
+ to_timestamp
+---------------------------------
+ Fri Feb 01 11:12:13 0044 PST BC
+(1 row)
+
+SELECT to_timestamp('-44-02-01 11:12:13','YYYY-MM-DD HH24:MI:SS');
+ to_timestamp
+---------------------------------
+ Fri Feb 01 11:12:13 0044 PST BC
+(1 row)
+
+SELECT to_timestamp('-44-02-01 11:12:13 BC','YYYY-MM-DD HH24:MI:SS BC');
+ to_timestamp
+------------------------------
+ Mon Feb 01 11:12:13 0044 PST
+(1 row)
+
+--
+-- Check handling of multiple spaces in format and/or input
+--
+SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS');
+ to_timestamp
+------------------------------
+ Sun Dec 18 23:38:15 2011 PST
+(1 row)
+
+SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS');
+ to_timestamp
+------------------------------
+ Sun Dec 18 23:38:15 2011 PST
+(1 row)
+
+SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS');
+ to_timestamp
+------------------------------
+ Sun Dec 18 23:38:15 2011 PST
+(1 row)
+
+SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS');
+ to_timestamp
+------------------------------
+ Sun Dec 18 23:38:15 2011 PST
+(1 row)
+
+SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS');
+ to_timestamp
+------------------------------
+ Sun Dec 18 23:38:15 2011 PST
+(1 row)
+
+SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS');
+ to_timestamp
+------------------------------
+ Sun Dec 18 23:38:15 2011 PST
+(1 row)
+
+SELECT to_timestamp('2000+ JUN', 'YYYY/MON');
+ to_timestamp
+------------------------------
+ Thu Jun 01 00:00:00 2000 PDT
+(1 row)
+
+SELECT to_timestamp(' 2000 +JUN', 'YYYY/MON');
+ to_timestamp
+------------------------------
+ Thu Jun 01 00:00:00 2000 PDT
+(1 row)
+
+SELECT to_timestamp(' 2000 +JUN', 'YYYY//MON');
+ to_timestamp
+------------------------------
+ Thu Jun 01 00:00:00 2000 PDT
+(1 row)
+
+SELECT to_timestamp('2000 +JUN', 'YYYY//MON');
+ to_timestamp
+------------------------------
+ Thu Jun 01 00:00:00 2000 PDT
+(1 row)
+
+SELECT to_timestamp('2000 + JUN', 'YYYY MON');
+ to_timestamp
+------------------------------
+ Thu Jun 01 00:00:00 2000 PDT
+(1 row)
+
+SELECT to_timestamp('2000 ++ JUN', 'YYYY MON');
+ to_timestamp
+------------------------------
+ Thu Jun 01 00:00:00 2000 PDT
+(1 row)
+
+SELECT to_timestamp('2000 + + JUN', 'YYYY MON');
+ERROR: invalid value "+" for "MON"
+DETAIL: The given value did not match any of the allowed values for this field.
+SELECT to_timestamp('2000 + + JUN', 'YYYY MON');
+ to_timestamp
+------------------------------
+ Thu Jun 01 00:00:00 2000 PDT
+(1 row)
+
+SELECT to_timestamp('2000 -10', 'YYYY TZH');
+ to_timestamp
+------------------------------
+ Sat Jan 01 02:00:00 2000 PST
+(1 row)
+
+SELECT to_timestamp('2000 -10', 'YYYY TZH');
+ to_timestamp
+------------------------------
+ Fri Dec 31 06:00:00 1999 PST
+(1 row)
+
+SELECT to_date('2011 12 18', 'YYYY MM DD');
+ to_date
+------------
+ 12-18-2011
+(1 row)
+
+SELECT to_date('2011 12 18', 'YYYY MM DD');
+ to_date
+------------
+ 12-18-2011
+(1 row)
+
+SELECT to_date('2011 12 18', 'YYYY MM DD');
+ to_date
+------------
+ 12-18-2011
+(1 row)
+
+SELECT to_date('2011 12 18', 'YYYY MM DD');
+ to_date
+------------
+ 12-18-2011
+(1 row)
+
+SELECT to_date('2011 12 18', 'YYYY MM DD');
+ to_date
+------------
+ 12-18-2011
+(1 row)
+
+SELECT to_date('2011 12 18', 'YYYY MM DD');
+ to_date
+------------
+ 12-18-2011
+(1 row)
+
+SELECT to_date('2011 12 18', 'YYYYxMMxDD');
+ to_date
+------------
+ 12-18-2011
+(1 row)
+
+SELECT to_date('2011x 12x 18', 'YYYYxMMxDD');
+ to_date
+------------
+ 12-18-2011
+(1 row)
+
+SELECT to_date('2011 x12 x18', 'YYYYxMMxDD');
+ERROR: invalid value "x1" for "MM"
+DETAIL: Value must be an integer.
+--
+-- Check errors for some incorrect usages of to_timestamp() and to_date()
+--
+-- Mixture of date conventions (ISO week and Gregorian):
+SELECT to_timestamp('2005527', 'YYYYIWID');
+ERROR: invalid combination of date conventions
+HINT: Do not mix Gregorian and ISO week date conventions in a formatting template.
+-- Insufficient characters in the source string:
+SELECT to_timestamp('19971', 'YYYYMMDD');
+ERROR: source string too short for "MM" formatting field
+DETAIL: Field requires 2 characters, but only 1 remain.
+HINT: If your source string is not fixed-width, try using the "FM" modifier.
+-- Insufficient digit characters for a single node:
+SELECT to_timestamp('19971)24', 'YYYYMMDD');
+ERROR: invalid value "1)" for "MM"
+DETAIL: Field requires 2 characters, but only 1 could be parsed.
+HINT: If your source string is not fixed-width, try using the "FM" modifier.
+-- We don't accept full-length day or month names if short form is specified:
+SELECT to_timestamp('Friday 1-January-1999', 'DY DD MON YYYY');
+ERROR: invalid value "da" for "DD"
+DETAIL: Value must be an integer.
+SELECT to_timestamp('Fri 1-January-1999', 'DY DD MON YYYY');
+ERROR: invalid value "uary" for "YYYY"
+DETAIL: Value must be an integer.
+SELECT to_timestamp('Fri 1-Jan-1999', 'DY DD MON YYYY'); -- ok
+ to_timestamp
+------------------------------
+ Fri Jan 01 00:00:00 1999 PST
+(1 row)
+
+-- Value clobbering:
+SELECT to_timestamp('1997-11-Jan-16', 'YYYY-MM-Mon-DD');
+ERROR: conflicting values for "Mon" field in formatting string
+DETAIL: This value contradicts a previous setting for the same field type.
+-- Non-numeric input:
+SELECT to_timestamp('199711xy', 'YYYYMMDD');
+ERROR: invalid value "xy" for "DD"
+DETAIL: Value must be an integer.
+-- Input that doesn't fit in an int:
+SELECT to_timestamp('10000000000', 'FMYYYY');
+ERROR: value for "YYYY" in source string is out of range
+DETAIL: Value must be in the range -2147483648 to 2147483647.
+-- Out-of-range and not-quite-out-of-range fields:
+SELECT to_timestamp('2016-06-13 25:00:00', 'YYYY-MM-DD HH24:MI:SS');
+ERROR: date/time field value out of range: "2016-06-13 25:00:00"
+SELECT to_timestamp('2016-06-13 15:60:00', 'YYYY-MM-DD HH24:MI:SS');
+ERROR: date/time field value out of range: "2016-06-13 15:60:00"
+SELECT to_timestamp('2016-06-13 15:50:60', 'YYYY-MM-DD HH24:MI:SS');
+ERROR: date/time field value out of range: "2016-06-13 15:50:60"
+SELECT to_timestamp('2016-06-13 15:50:55', 'YYYY-MM-DD HH24:MI:SS'); -- ok
+ to_timestamp
+------------------------------
+ Mon Jun 13 15:50:55 2016 PDT
+(1 row)
+
+SELECT to_timestamp('2016-06-13 15:50:55', 'YYYY-MM-DD HH:MI:SS');
+ERROR: hour "15" is invalid for the 12-hour clock
+HINT: Use the 24-hour clock, or give an hour between 1 and 12.
+SELECT to_timestamp('2016-13-01 15:50:55', 'YYYY-MM-DD HH24:MI:SS');
+ERROR: date/time field value out of range: "2016-13-01 15:50:55"
+SELECT to_timestamp('2016-02-30 15:50:55', 'YYYY-MM-DD HH24:MI:SS');
+ERROR: date/time field value out of range: "2016-02-30 15:50:55"
+SELECT to_timestamp('2016-02-29 15:50:55', 'YYYY-MM-DD HH24:MI:SS'); -- ok
+ to_timestamp
+------------------------------
+ Mon Feb 29 15:50:55 2016 PST
+(1 row)
+
+SELECT to_timestamp('2015-02-29 15:50:55', 'YYYY-MM-DD HH24:MI:SS');
+ERROR: date/time field value out of range: "2015-02-29 15:50:55"
+SELECT to_timestamp('2015-02-11 86000', 'YYYY-MM-DD SSSS'); -- ok
+ to_timestamp
+------------------------------
+ Wed Feb 11 23:53:20 2015 PST
+(1 row)
+
+SELECT to_timestamp('2015-02-11 86400', 'YYYY-MM-DD SSSS');
+ERROR: date/time field value out of range: "2015-02-11 86400"
+SELECT to_timestamp('2015-02-11 86000', 'YYYY-MM-DD SSSSS'); -- ok
+ to_timestamp
+------------------------------
+ Wed Feb 11 23:53:20 2015 PST
+(1 row)
+
+SELECT to_timestamp('2015-02-11 86400', 'YYYY-MM-DD SSSSS');
+ERROR: date/time field value out of range: "2015-02-11 86400"
+SELECT to_date('2016-13-10', 'YYYY-MM-DD');
+ERROR: date/time field value out of range: "2016-13-10"
+SELECT to_date('2016-02-30', 'YYYY-MM-DD');
+ERROR: date/time field value out of range: "2016-02-30"
+SELECT to_date('2016-02-29', 'YYYY-MM-DD'); -- ok
+ to_date
+------------
+ 02-29-2016
+(1 row)
+
+SELECT to_date('2015-02-29', 'YYYY-MM-DD');
+ERROR: date/time field value out of range: "2015-02-29"
+SELECT to_date('2015 365', 'YYYY DDD'); -- ok
+ to_date
+------------
+ 12-31-2015
+(1 row)
+
+SELECT to_date('2015 366', 'YYYY DDD');
+ERROR: date/time field value out of range: "2015 366"
+SELECT to_date('2016 365', 'YYYY DDD'); -- ok
+ to_date
+------------
+ 12-30-2016
+(1 row)
+
+SELECT to_date('2016 366', 'YYYY DDD'); -- ok
+ to_date
+------------
+ 12-31-2016
+(1 row)
+
+SELECT to_date('2016 367', 'YYYY DDD');
+ERROR: date/time field value out of range: "2016 367"
+SELECT to_date('0000-02-01','YYYY-MM-DD'); -- allowed, though it shouldn't be
+ to_date
+---------------
+ 02-01-0001 BC
+(1 row)
+
+--
+-- Check behavior with SQL-style fixed-GMT-offset time zone (cf bug #8572)
+--
+SET TIME ZONE 'America/New_York';
+SET TIME ZONE '-1.5';
+SHOW TIME ZONE;
+ TimeZone
+----------------
+ <-01:30>+01:30
+(1 row)
+
+SELECT '2012-12-12 12:00'::timestamptz;
+ timestamptz
+---------------------------------
+ Wed Dec 12 12:00:00 2012 -01:30
+(1 row)
+
+SELECT '2012-12-12 12:00 America/New_York'::timestamptz;
+ timestamptz
+---------------------------------
+ Wed Dec 12 15:30:00 2012 -01:30
+(1 row)
+
+SELECT to_char('2012-12-12 12:00'::timestamptz, 'YYYY-MM-DD HH:MI:SS TZ');
+ to_char
+----------------------------
+ 2012-12-12 12:00:00 -01:30
+(1 row)
+
+SELECT to_char('2012-12-12 12:00'::timestamptz, 'YYYY-MM-DD SSSS');
+ to_char
+------------------
+ 2012-12-12 43200
+(1 row)
+
+SELECT to_char('2012-12-12 12:00'::timestamptz, 'YYYY-MM-DD SSSSS');
+ to_char
+------------------
+ 2012-12-12 43200
+(1 row)
+
+RESET TIME ZONE;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/horology.sql b/ydb/library/yql/tests/postgresql/original/cases/horology.sql
new file mode 100644
index 0000000000..78091112ca
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/horology.sql
@@ -0,0 +1,570 @@
+--
+-- HOROLOGY
+--
+SET DateStyle = 'Postgres, MDY';
+
+SHOW TimeZone; -- Many of these tests depend on the prevailing setting
+
+--
+-- Test various input formats
+--
+SELECT timestamp with time zone '20011227 040506+08';
+SELECT timestamp with time zone '20011227 040506-08';
+SELECT timestamp with time zone '20011227 040506.789+08';
+SELECT timestamp with time zone '20011227 040506.789-08';
+SELECT timestamp with time zone '20011227T040506+08';
+SELECT timestamp with time zone '20011227T040506-08';
+SELECT timestamp with time zone '20011227T040506.789+08';
+SELECT timestamp with time zone '20011227T040506.789-08';
+SELECT timestamp with time zone '2001-12-27 04:05:06.789-08';
+SELECT timestamp with time zone '2001.12.27 04:05:06.789-08';
+SELECT timestamp with time zone '2001/12/27 04:05:06.789-08';
+SELECT timestamp with time zone '12/27/2001 04:05:06.789-08';
+-- should fail in mdy mode:
+SELECT timestamp with time zone '27/12/2001 04:05:06.789-08';
+set datestyle to dmy;
+SELECT timestamp with time zone '27/12/2001 04:05:06.789-08';
+reset datestyle;
+SELECT timestamp with time zone 'Y2001M12D27H04M05S06.789+08';
+SELECT timestamp with time zone 'Y2001M12D27H04M05S06.789-08';
+SELECT timestamp with time zone 'Y2001M12D27H04MM05S06.789+08';
+SELECT timestamp with time zone 'Y2001M12D27H04MM05S06.789-08';
+SELECT timestamp with time zone 'J2452271+08';
+SELECT timestamp with time zone 'J2452271-08';
+SELECT timestamp with time zone 'J2452271.5+08';
+SELECT timestamp with time zone 'J2452271.5-08';
+SELECT timestamp with time zone 'J2452271 04:05:06+08';
+SELECT timestamp with time zone 'J2452271 04:05:06-08';
+SELECT timestamp with time zone 'J2452271T040506+08';
+SELECT timestamp with time zone 'J2452271T040506-08';
+SELECT timestamp with time zone 'J2452271T040506.789+08';
+SELECT timestamp with time zone 'J2452271T040506.789-08';
+-- German/European-style dates with periods as delimiters
+SELECT timestamp with time zone '12.27.2001 04:05:06.789+08';
+SELECT timestamp with time zone '12.27.2001 04:05:06.789-08';
+SET DateStyle = 'German';
+SELECT timestamp with time zone '27.12.2001 04:05:06.789+08';
+SELECT timestamp with time zone '27.12.2001 04:05:06.789-08';
+SET DateStyle = 'ISO';
+-- As of 7.4, allow time without time zone having a time zone specified
+SELECT time without time zone '040506.789+08';
+SELECT time without time zone '040506.789-08';
+SELECT time without time zone 'T040506.789+08';
+SELECT time without time zone 'T040506.789-08';
+SELECT time with time zone '040506.789+08';
+SELECT time with time zone '040506.789-08';
+SELECT time with time zone 'T040506.789+08';
+SELECT time with time zone 'T040506.789-08';
+SELECT time with time zone 'T040506.789 +08';
+SELECT time with time zone 'T040506.789 -08';
+SET DateStyle = 'Postgres, MDY';
+-- Check Julian dates BC
+SELECT date 'J1520447' AS "Confucius' Birthday";
+SELECT date 'J0' AS "Julian Epoch";
+
+--
+-- date, time arithmetic
+--
+
+SELECT date '1981-02-03' + time '04:05:06' AS "Date + Time";
+SELECT date '1991-02-03' + time with time zone '04:05:06 PST' AS "Date + Time PST";
+SELECT date '2001-02-03' + time with time zone '04:05:06 UTC' AS "Date + Time UTC";
+SELECT date '1991-02-03' + interval '2 years' AS "Add Two Years";
+SELECT date '2001-12-13' - interval '2 years' AS "Subtract Two Years";
+-- subtract time from date should not make sense; use interval instead
+SELECT date '1991-02-03' - time '04:05:06' AS "Subtract Time";
+SELECT date '1991-02-03' - time with time zone '04:05:06 UTC' AS "Subtract Time UTC";
+
+--
+-- timestamp, interval arithmetic
+--
+
+SELECT timestamp without time zone '1996-03-01' - interval '1 second' AS "Feb 29";
+SELECT timestamp without time zone '1999-03-01' - interval '1 second' AS "Feb 28";
+SELECT timestamp without time zone '2000-03-01' - interval '1 second' AS "Feb 29";
+SELECT timestamp without time zone '1999-12-01' + interval '1 month - 1 second' AS "Dec 31";
+SELECT timestamp without time zone 'Jan 1, 4713 BC' + interval '106000000 days' AS "Feb 23, 285506";
+SELECT timestamp without time zone 'Jan 1, 4713 BC' + interval '107000000 days' AS "Jan 20, 288244";
+SELECT timestamp without time zone 'Jan 1, 4713 BC' + interval '109203489 days' AS "Dec 31, 294276";
+SELECT timestamp without time zone '12/31/294276' - timestamp without time zone '12/23/1999' AS "106751991 Days";
+
+-- Shorthand values
+-- Not directly usable for regression testing since these are not constants.
+-- So, just try to test parser and hope for the best - thomas 97/04/26
+SELECT (timestamp without time zone 'today' = (timestamp without time zone 'yesterday' + interval '1 day')) as "True";
+SELECT (timestamp without time zone 'today' = (timestamp without time zone 'tomorrow' - interval '1 day')) as "True";
+SELECT (timestamp without time zone 'today 10:30' = (timestamp without time zone 'yesterday' + interval '1 day 10 hr 30 min')) as "True";
+SELECT (timestamp without time zone '10:30 today' = (timestamp without time zone 'yesterday' + interval '1 day 10 hr 30 min')) as "True";
+SELECT (timestamp without time zone 'tomorrow' = (timestamp without time zone 'yesterday' + interval '2 days')) as "True";
+SELECT (timestamp without time zone 'tomorrow 16:00:00' = (timestamp without time zone 'today' + interval '1 day 16 hours')) as "True";
+SELECT (timestamp without time zone '16:00:00 tomorrow' = (timestamp without time zone 'today' + interval '1 day 16 hours')) as "True";
+SELECT (timestamp without time zone 'yesterday 12:34:56' = (timestamp without time zone 'tomorrow' - interval '2 days - 12:34:56')) as "True";
+SELECT (timestamp without time zone '12:34:56 yesterday' = (timestamp without time zone 'tomorrow' - interval '2 days - 12:34:56')) as "True";
+SELECT (timestamp without time zone 'tomorrow' > 'now') as "True";
+
+-- Convert from date and time to timestamp
+-- This test used to be timestamp(date,time) but no longer allowed by grammar
+-- to enable support for SQL99 timestamp type syntax.
+SELECT date '1994-01-01' + time '11:00' AS "Jan_01_1994_11am";
+SELECT date '1994-01-01' + time '10:00' AS "Jan_01_1994_10am";
+SELECT date '1994-01-01' + timetz '11:00-5' AS "Jan_01_1994_8am";
+SELECT timestamptz(date '1994-01-01', time with time zone '11:00-5') AS "Jan_01_1994_8am";
+
+SELECT d1 + interval '1 year' AS one_year FROM TIMESTAMP_TBL;
+SELECT d1 - interval '1 year' AS one_year FROM TIMESTAMP_TBL;
+
+SELECT timestamp with time zone '1996-03-01' - interval '1 second' AS "Feb 29";
+SELECT timestamp with time zone '1999-03-01' - interval '1 second' AS "Feb 28";
+SELECT timestamp with time zone '2000-03-01' - interval '1 second' AS "Feb 29";
+SELECT timestamp with time zone '1999-12-01' + interval '1 month - 1 second' AS "Dec 31";
+
+SELECT (timestamp with time zone 'today' = (timestamp with time zone 'yesterday' + interval '1 day')) as "True";
+SELECT (timestamp with time zone 'today' = (timestamp with time zone 'tomorrow' - interval '1 day')) as "True";
+SELECT (timestamp with time zone 'tomorrow' = (timestamp with time zone 'yesterday' + interval '2 days')) as "True";
+SELECT (timestamp with time zone 'tomorrow' > 'now') as "True";
+
+-- timestamp with time zone, interval arithmetic around DST change
+-- (just for fun, let's use an intentionally nonstandard POSIX zone spec)
+SET TIME ZONE 'CST7CDT,M4.1.0,M10.5.0';
+SELECT timestamp with time zone '2005-04-02 12:00-07' + interval '1 day' as "Apr 3, 12:00";
+SELECT timestamp with time zone '2005-04-02 12:00-07' + interval '24 hours' as "Apr 3, 13:00";
+SELECT timestamp with time zone '2005-04-03 12:00-06' - interval '1 day' as "Apr 2, 12:00";
+SELECT timestamp with time zone '2005-04-03 12:00-06' - interval '24 hours' as "Apr 2, 11:00";
+RESET TIME ZONE;
+
+
+SELECT timestamptz(date '1994-01-01', time '11:00') AS "Jan_01_1994_10am";
+SELECT timestamptz(date '1994-01-01', time '10:00') AS "Jan_01_1994_9am";
+SELECT timestamptz(date '1994-01-01', time with time zone '11:00-8') AS "Jan_01_1994_11am";
+SELECT timestamptz(date '1994-01-01', time with time zone '10:00-8') AS "Jan_01_1994_10am";
+SELECT timestamptz(date '1994-01-01', time with time zone '11:00-5') AS "Jan_01_1994_8am";
+
+SELECT d1 + interval '1 year' AS one_year FROM TIMESTAMPTZ_TBL;
+SELECT d1 - interval '1 year' AS one_year FROM TIMESTAMPTZ_TBL;
+
+--
+-- time, interval arithmetic
+--
+
+SELECT CAST(time '01:02' AS interval) AS "+01:02";
+SELECT CAST(interval '02:03' AS time) AS "02:03:00";
+SELECT time '01:30' + interval '02:01' AS "03:31:00";
+SELECT time '01:30' - interval '02:01' AS "23:29:00";
+SELECT time '02:30' + interval '36:01' AS "14:31:00";
+SELECT time '03:30' + interval '1 month 04:01' AS "07:31:00";
+SELECT CAST(time with time zone '01:02-08' AS interval) AS "+00:01";
+SELECT CAST(interval '02:03' AS time with time zone) AS "02:03:00-08";
+SELECT time with time zone '01:30-08' - interval '02:01' AS "23:29:00-08";
+SELECT time with time zone '02:30-08' + interval '36:01' AS "14:31:00-08";
+
+-- These two tests cannot be used because they default to current timezone,
+-- which may be either -08 or -07 depending on the time of year.
+-- SELECT time with time zone '01:30' + interval '02:01' AS "03:31:00-08";
+-- SELECT time with time zone '03:30' + interval '1 month 04:01' AS "07:31:00-08";
+-- Try the following two tests instead, as a poor substitute
+
+SELECT CAST(CAST(date 'today' + time with time zone '05:30'
+ + interval '02:01' AS time with time zone) AS time) AS "07:31:00";
+
+SELECT CAST(cast(date 'today' + time with time zone '03:30'
+ + interval '1 month 04:01' as timestamp without time zone) AS time) AS "07:31:00";
+
+SELECT t.d1 AS t, i.f1 AS i, t.d1 + i.f1 AS "add", t.d1 - i.f1 AS "subtract"
+ FROM TIMESTAMP_TBL t, INTERVAL_TBL i
+ WHERE t.d1 BETWEEN '1990-01-01' AND '2001-01-01'
+ AND i.f1 BETWEEN '00:00' AND '23:00'
+ ORDER BY 1,2;
+
+SELECT t.f1 AS t, i.f1 AS i, t.f1 + i.f1 AS "add", t.f1 - i.f1 AS "subtract"
+ FROM TIME_TBL t, INTERVAL_TBL i
+ ORDER BY 1,2;
+
+SELECT t.f1 AS t, i.f1 AS i, t.f1 + i.f1 AS "add", t.f1 - i.f1 AS "subtract"
+ FROM TIMETZ_TBL t, INTERVAL_TBL i
+ ORDER BY 1,2;
+
+-- SQL9x OVERLAPS operator
+-- test with time zone
+SELECT (timestamp with time zone '2000-11-27', timestamp with time zone '2000-11-28')
+ OVERLAPS (timestamp with time zone '2000-11-27 12:00', timestamp with time zone '2000-11-30') AS "True";
+
+SELECT (timestamp with time zone '2000-11-26', timestamp with time zone '2000-11-27')
+ OVERLAPS (timestamp with time zone '2000-11-27 12:00', timestamp with time zone '2000-11-30') AS "False";
+
+SELECT (timestamp with time zone '2000-11-27', timestamp with time zone '2000-11-28')
+ OVERLAPS (timestamp with time zone '2000-11-27 12:00', interval '1 day') AS "True";
+
+SELECT (timestamp with time zone '2000-11-27', interval '12 hours')
+ OVERLAPS (timestamp with time zone '2000-11-27 12:00', timestamp with time zone '2000-11-30') AS "False";
+
+SELECT (timestamp with time zone '2000-11-27', interval '12 hours')
+ OVERLAPS (timestamp with time zone '2000-11-27', interval '12 hours') AS "True";
+
+SELECT (timestamp with time zone '2000-11-27', interval '12 hours')
+ OVERLAPS (timestamp with time zone '2000-11-27 12:00', interval '12 hours') AS "False";
+
+-- test without time zone
+SELECT (timestamp without time zone '2000-11-27', timestamp without time zone '2000-11-28')
+ OVERLAPS (timestamp without time zone '2000-11-27 12:00', timestamp without time zone '2000-11-30') AS "True";
+
+SELECT (timestamp without time zone '2000-11-26', timestamp without time zone '2000-11-27')
+ OVERLAPS (timestamp without time zone '2000-11-27 12:00', timestamp without time zone '2000-11-30') AS "False";
+
+SELECT (timestamp without time zone '2000-11-27', timestamp without time zone '2000-11-28')
+ OVERLAPS (timestamp without time zone '2000-11-27 12:00', interval '1 day') AS "True";
+
+SELECT (timestamp without time zone '2000-11-27', interval '12 hours')
+ OVERLAPS (timestamp without time zone '2000-11-27 12:00', timestamp without time zone '2000-11-30') AS "False";
+
+SELECT (timestamp without time zone '2000-11-27', interval '12 hours')
+ OVERLAPS (timestamp without time zone '2000-11-27', interval '12 hours') AS "True";
+
+SELECT (timestamp without time zone '2000-11-27', interval '12 hours')
+ OVERLAPS (timestamp without time zone '2000-11-27 12:00', interval '12 hours') AS "False";
+
+-- test time and interval
+SELECT (time '00:00', time '01:00')
+ OVERLAPS (time '00:30', time '01:30') AS "True";
+
+SELECT (time '00:00', interval '1 hour')
+ OVERLAPS (time '00:30', interval '1 hour') AS "True";
+
+SELECT (time '00:00', interval '1 hour')
+ OVERLAPS (time '01:30', interval '1 hour') AS "False";
+
+-- SQL99 seems to want this to be false (and we conform to the spec).
+-- istm that this *should* return true, on the theory that time
+-- intervals can wrap around the day boundary - thomas 2001-09-25
+SELECT (time '00:00', interval '1 hour')
+ OVERLAPS (time '01:30', interval '1 day') AS "False";
+
+CREATE TABLE TEMP_TIMESTAMP (f1 timestamp with time zone);
+
+-- get some candidate input values
+
+INSERT INTO TEMP_TIMESTAMP (f1)
+ SELECT d1 FROM TIMESTAMP_TBL
+ WHERE d1 BETWEEN '13-jun-1957' AND '1-jan-1997'
+ OR d1 BETWEEN '1-jan-1999' AND '1-jan-2010';
+
+SELECT f1 AS "timestamp"
+ FROM TEMP_TIMESTAMP
+ ORDER BY "timestamp";
+
+SELECT d.f1 AS "timestamp", t.f1 AS "interval", d.f1 + t.f1 AS plus
+ FROM TEMP_TIMESTAMP d, INTERVAL_TBL t
+ ORDER BY plus, "timestamp", "interval";
+
+SELECT d.f1 AS "timestamp", t.f1 AS "interval", d.f1 - t.f1 AS minus
+ FROM TEMP_TIMESTAMP d, INTERVAL_TBL t
+ WHERE isfinite(d.f1)
+ ORDER BY minus, "timestamp", "interval";
+
+SELECT d.f1 AS "timestamp",
+ timestamp with time zone '1980-01-06 00:00 GMT' AS gpstime_zero,
+ d.f1 - timestamp with time zone '1980-01-06 00:00 GMT' AS difference
+ FROM TEMP_TIMESTAMP d
+ ORDER BY difference;
+
+SELECT d1.f1 AS timestamp1, d2.f1 AS timestamp2, d1.f1 - d2.f1 AS difference
+ FROM TEMP_TIMESTAMP d1, TEMP_TIMESTAMP d2
+ ORDER BY timestamp1, timestamp2, difference;
+
+--
+-- Conversions
+--
+
+SELECT f1 AS "timestamp", date(f1) AS date
+ FROM TEMP_TIMESTAMP
+ WHERE f1 <> timestamp 'now'
+ ORDER BY date, "timestamp";
+
+DROP TABLE TEMP_TIMESTAMP;
+
+--
+-- Comparisons between datetime types, especially overflow cases
+---
+
+SELECT '2202020-10-05'::date::timestamp; -- fail
+SELECT '2202020-10-05'::date > '2020-10-05'::timestamp as t;
+SELECT '2020-10-05'::timestamp > '2202020-10-05'::date as f;
+
+SELECT '2202020-10-05'::date::timestamptz; -- fail
+SELECT '2202020-10-05'::date > '2020-10-05'::timestamptz as t;
+SELECT '2020-10-05'::timestamptz > '2202020-10-05'::date as f;
+
+-- This conversion may work depending on timezone
+SELECT '4714-11-24 BC'::date::timestamptz;
+SET TimeZone = 'UTC-2';
+SELECT '4714-11-24 BC'::date::timestamptz; -- fail
+
+SELECT '4714-11-24 BC'::date < '2020-10-05'::timestamptz as t;
+SELECT '2020-10-05'::timestamptz >= '4714-11-24 BC'::date as t;
+
+SELECT '4714-11-24 BC'::timestamp < '2020-10-05'::timestamptz as t;
+SELECT '2020-10-05'::timestamptz >= '4714-11-24 BC'::timestamp as t;
+
+RESET TimeZone;
+
+--
+-- Formats
+--
+
+SET DateStyle TO 'US,Postgres';
+
+SHOW DateStyle;
+
+SELECT d1 AS us_postgres FROM TIMESTAMP_TBL;
+
+SET DateStyle TO 'US,ISO';
+
+SELECT d1 AS us_iso FROM TIMESTAMP_TBL;
+
+SET DateStyle TO 'US,SQL';
+
+SHOW DateStyle;
+
+SELECT d1 AS us_sql FROM TIMESTAMP_TBL;
+
+SET DateStyle TO 'European,Postgres';
+
+SHOW DateStyle;
+
+INSERT INTO TIMESTAMP_TBL VALUES('13/06/1957');
+
+SELECT count(*) as one FROM TIMESTAMP_TBL WHERE d1 = 'Jun 13 1957';
+
+SELECT d1 AS european_postgres FROM TIMESTAMP_TBL;
+
+SET DateStyle TO 'European,ISO';
+
+SHOW DateStyle;
+
+SELECT d1 AS european_iso FROM TIMESTAMP_TBL;
+
+SET DateStyle TO 'European,SQL';
+
+SHOW DateStyle;
+
+SELECT d1 AS european_sql FROM TIMESTAMP_TBL;
+
+RESET DateStyle;
+
+--
+-- to_timestamp()
+--
+
+SELECT to_timestamp('0097/Feb/16 --> 08:14:30', 'YYYY/Mon/DD --> HH:MI:SS');
+
+SELECT to_timestamp('97/2/16 8:14:30', 'FMYYYY/FMMM/FMDD FMHH:FMMI:FMSS');
+
+SELECT to_timestamp('2011$03!18 23_38_15', 'YYYY-MM-DD HH24:MI:SS');
+
+SELECT to_timestamp('1985 January 12', 'YYYY FMMonth DD');
+
+SELECT to_timestamp('1985 FMMonth 12', 'YYYY "FMMonth" DD');
+
+SELECT to_timestamp('1985 \ 12', 'YYYY \\ DD');
+
+SELECT to_timestamp('My birthday-> Year: 1976, Month: May, Day: 16',
+ '"My birthday-> Year:" YYYY, "Month:" FMMonth, "Day:" DD');
+
+SELECT to_timestamp('1,582nd VIII 21', 'Y,YYYth FMRM DD');
+
+SELECT to_timestamp('15 "text between quote marks" 98 54 45',
+ E'HH24 "\\"text between quote marks\\"" YY MI SS');
+
+SELECT to_timestamp('05121445482000', 'MMDDHH24MISSYYYY');
+
+SELECT to_timestamp('2000January09Sunday', 'YYYYFMMonthDDFMDay');
+
+SELECT to_timestamp('97/Feb/16', 'YYMonDD');
+
+SELECT to_timestamp('97/Feb/16', 'YY:Mon:DD');
+
+SELECT to_timestamp('97/Feb/16', 'FXYY:Mon:DD');
+
+SELECT to_timestamp('97/Feb/16', 'FXYY/Mon/DD');
+
+SELECT to_timestamp('19971116', 'YYYYMMDD');
+
+SELECT to_timestamp('20000-1116', 'YYYY-MMDD');
+
+SELECT to_timestamp('1997 AD 11 16', 'YYYY BC MM DD');
+SELECT to_timestamp('1997 BC 11 16', 'YYYY BC MM DD');
+
+SELECT to_timestamp('1997 A.D. 11 16', 'YYYY B.C. MM DD');
+SELECT to_timestamp('1997 B.C. 11 16', 'YYYY B.C. MM DD');
+
+SELECT to_timestamp('9-1116', 'Y-MMDD');
+
+SELECT to_timestamp('95-1116', 'YY-MMDD');
+
+SELECT to_timestamp('995-1116', 'YYY-MMDD');
+
+SELECT to_timestamp('2005426', 'YYYYWWD');
+
+SELECT to_timestamp('2005300', 'YYYYDDD');
+
+SELECT to_timestamp('2005527', 'IYYYIWID');
+
+SELECT to_timestamp('005527', 'IYYIWID');
+
+SELECT to_timestamp('05527', 'IYIWID');
+
+SELECT to_timestamp('5527', 'IIWID');
+
+SELECT to_timestamp('2005364', 'IYYYIDDD');
+
+SELECT to_timestamp('20050302', 'YYYYMMDD');
+
+SELECT to_timestamp('2005 03 02', 'YYYYMMDD');
+
+SELECT to_timestamp(' 2005 03 02', 'YYYYMMDD');
+
+SELECT to_timestamp(' 20050302', 'YYYYMMDD');
+
+SELECT to_timestamp('2011-12-18 11:38 AM', 'YYYY-MM-DD HH12:MI PM');
+SELECT to_timestamp('2011-12-18 11:38 PM', 'YYYY-MM-DD HH12:MI PM');
+
+SELECT to_timestamp('2011-12-18 11:38 A.M.', 'YYYY-MM-DD HH12:MI P.M.');
+SELECT to_timestamp('2011-12-18 11:38 P.M.', 'YYYY-MM-DD HH12:MI P.M.');
+
+SELECT to_timestamp('2011-12-18 11:38 +05', 'YYYY-MM-DD HH12:MI TZH');
+SELECT to_timestamp('2011-12-18 11:38 -05', 'YYYY-MM-DD HH12:MI TZH');
+SELECT to_timestamp('2011-12-18 11:38 +05:20', 'YYYY-MM-DD HH12:MI TZH:TZM');
+SELECT to_timestamp('2011-12-18 11:38 -05:20', 'YYYY-MM-DD HH12:MI TZH:TZM');
+SELECT to_timestamp('2011-12-18 11:38 20', 'YYYY-MM-DD HH12:MI TZM');
+
+SELECT to_timestamp('2011-12-18 11:38 PST', 'YYYY-MM-DD HH12:MI TZ'); -- NYI
+
+SELECT to_timestamp('2018-11-02 12:34:56.025', 'YYYY-MM-DD HH24:MI:SS.MS');
+
+SELECT i, to_timestamp('2018-11-02 12:34:56', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i;
+SELECT i, to_timestamp('2018-11-02 12:34:56.1', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i;
+SELECT i, to_timestamp('2018-11-02 12:34:56.12', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i;
+SELECT i, to_timestamp('2018-11-02 12:34:56.123', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i;
+SELECT i, to_timestamp('2018-11-02 12:34:56.1234', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i;
+SELECT i, to_timestamp('2018-11-02 12:34:56.12345', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i;
+SELECT i, to_timestamp('2018-11-02 12:34:56.123456', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i;
+SELECT i, to_timestamp('2018-11-02 12:34:56.123456789', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i;
+
+SELECT to_date('1 4 1902', 'Q MM YYYY'); -- Q is ignored
+SELECT to_date('3 4 21 01', 'W MM CC YY');
+SELECT to_date('2458872', 'J');
+
+--
+-- Check handling of BC dates
+--
+
+SELECT to_date('44-02-01 BC','YYYY-MM-DD BC');
+SELECT to_date('-44-02-01','YYYY-MM-DD');
+SELECT to_date('-44-02-01 BC','YYYY-MM-DD BC');
+SELECT to_timestamp('44-02-01 11:12:13 BC','YYYY-MM-DD HH24:MI:SS BC');
+SELECT to_timestamp('-44-02-01 11:12:13','YYYY-MM-DD HH24:MI:SS');
+SELECT to_timestamp('-44-02-01 11:12:13 BC','YYYY-MM-DD HH24:MI:SS BC');
+
+--
+-- Check handling of multiple spaces in format and/or input
+--
+
+SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS');
+SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS');
+SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS');
+
+SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS');
+SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS');
+SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS');
+
+SELECT to_timestamp('2000+ JUN', 'YYYY/MON');
+SELECT to_timestamp(' 2000 +JUN', 'YYYY/MON');
+SELECT to_timestamp(' 2000 +JUN', 'YYYY//MON');
+SELECT to_timestamp('2000 +JUN', 'YYYY//MON');
+SELECT to_timestamp('2000 + JUN', 'YYYY MON');
+SELECT to_timestamp('2000 ++ JUN', 'YYYY MON');
+SELECT to_timestamp('2000 + + JUN', 'YYYY MON');
+SELECT to_timestamp('2000 + + JUN', 'YYYY MON');
+SELECT to_timestamp('2000 -10', 'YYYY TZH');
+SELECT to_timestamp('2000 -10', 'YYYY TZH');
+
+SELECT to_date('2011 12 18', 'YYYY MM DD');
+SELECT to_date('2011 12 18', 'YYYY MM DD');
+SELECT to_date('2011 12 18', 'YYYY MM DD');
+
+SELECT to_date('2011 12 18', 'YYYY MM DD');
+SELECT to_date('2011 12 18', 'YYYY MM DD');
+SELECT to_date('2011 12 18', 'YYYY MM DD');
+
+SELECT to_date('2011 12 18', 'YYYYxMMxDD');
+SELECT to_date('2011x 12x 18', 'YYYYxMMxDD');
+SELECT to_date('2011 x12 x18', 'YYYYxMMxDD');
+
+--
+-- Check errors for some incorrect usages of to_timestamp() and to_date()
+--
+
+-- Mixture of date conventions (ISO week and Gregorian):
+SELECT to_timestamp('2005527', 'YYYYIWID');
+
+-- Insufficient characters in the source string:
+SELECT to_timestamp('19971', 'YYYYMMDD');
+
+-- Insufficient digit characters for a single node:
+SELECT to_timestamp('19971)24', 'YYYYMMDD');
+
+-- We don't accept full-length day or month names if short form is specified:
+SELECT to_timestamp('Friday 1-January-1999', 'DY DD MON YYYY');
+SELECT to_timestamp('Fri 1-January-1999', 'DY DD MON YYYY');
+SELECT to_timestamp('Fri 1-Jan-1999', 'DY DD MON YYYY'); -- ok
+
+-- Value clobbering:
+SELECT to_timestamp('1997-11-Jan-16', 'YYYY-MM-Mon-DD');
+
+-- Non-numeric input:
+SELECT to_timestamp('199711xy', 'YYYYMMDD');
+
+-- Input that doesn't fit in an int:
+SELECT to_timestamp('10000000000', 'FMYYYY');
+
+-- Out-of-range and not-quite-out-of-range fields:
+SELECT to_timestamp('2016-06-13 25:00:00', 'YYYY-MM-DD HH24:MI:SS');
+SELECT to_timestamp('2016-06-13 15:60:00', 'YYYY-MM-DD HH24:MI:SS');
+SELECT to_timestamp('2016-06-13 15:50:60', 'YYYY-MM-DD HH24:MI:SS');
+SELECT to_timestamp('2016-06-13 15:50:55', 'YYYY-MM-DD HH24:MI:SS'); -- ok
+SELECT to_timestamp('2016-06-13 15:50:55', 'YYYY-MM-DD HH:MI:SS');
+SELECT to_timestamp('2016-13-01 15:50:55', 'YYYY-MM-DD HH24:MI:SS');
+SELECT to_timestamp('2016-02-30 15:50:55', 'YYYY-MM-DD HH24:MI:SS');
+SELECT to_timestamp('2016-02-29 15:50:55', 'YYYY-MM-DD HH24:MI:SS'); -- ok
+SELECT to_timestamp('2015-02-29 15:50:55', 'YYYY-MM-DD HH24:MI:SS');
+SELECT to_timestamp('2015-02-11 86000', 'YYYY-MM-DD SSSS'); -- ok
+SELECT to_timestamp('2015-02-11 86400', 'YYYY-MM-DD SSSS');
+SELECT to_timestamp('2015-02-11 86000', 'YYYY-MM-DD SSSSS'); -- ok
+SELECT to_timestamp('2015-02-11 86400', 'YYYY-MM-DD SSSSS');
+SELECT to_date('2016-13-10', 'YYYY-MM-DD');
+SELECT to_date('2016-02-30', 'YYYY-MM-DD');
+SELECT to_date('2016-02-29', 'YYYY-MM-DD'); -- ok
+SELECT to_date('2015-02-29', 'YYYY-MM-DD');
+SELECT to_date('2015 365', 'YYYY DDD'); -- ok
+SELECT to_date('2015 366', 'YYYY DDD');
+SELECT to_date('2016 365', 'YYYY DDD'); -- ok
+SELECT to_date('2016 366', 'YYYY DDD'); -- ok
+SELECT to_date('2016 367', 'YYYY DDD');
+SELECT to_date('0000-02-01','YYYY-MM-DD'); -- allowed, though it shouldn't be
+
+--
+-- Check behavior with SQL-style fixed-GMT-offset time zone (cf bug #8572)
+--
+
+SET TIME ZONE 'America/New_York';
+SET TIME ZONE '-1.5';
+
+SHOW TIME ZONE;
+
+SELECT '2012-12-12 12:00'::timestamptz;
+SELECT '2012-12-12 12:00 America/New_York'::timestamptz;
+
+SELECT to_char('2012-12-12 12:00'::timestamptz, 'YYYY-MM-DD HH:MI:SS TZ');
+SELECT to_char('2012-12-12 12:00'::timestamptz, 'YYYY-MM-DD SSSS');
+SELECT to_char('2012-12-12 12:00'::timestamptz, 'YYYY-MM-DD SSSSS');
+
+RESET TIME ZONE;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/insert.out b/ydb/library/yql/tests/postgresql/original/cases/insert.out
new file mode 100644
index 0000000000..5063a3dc22
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/insert.out
@@ -0,0 +1,1004 @@
+--
+-- insert with DEFAULT in the target_list
+--
+create table inserttest (col1 int4, col2 int4 NOT NULL, col3 text default 'testing');
+insert into inserttest (col1, col2, col3) values (DEFAULT, DEFAULT, DEFAULT);
+ERROR: null value in column "col2" of relation "inserttest" violates not-null constraint
+DETAIL: Failing row contains (null, null, testing).
+insert into inserttest (col2, col3) values (3, DEFAULT);
+insert into inserttest (col1, col2, col3) values (DEFAULT, 5, DEFAULT);
+insert into inserttest values (DEFAULT, 5, 'test');
+insert into inserttest values (DEFAULT, 7);
+select * from inserttest;
+ col1 | col2 | col3
+------+------+---------
+ | 3 | testing
+ | 5 | testing
+ | 5 | test
+ | 7 | testing
+(4 rows)
+
+--
+-- insert with similar expression / target_list values (all fail)
+--
+insert into inserttest (col1, col2, col3) values (DEFAULT, DEFAULT);
+ERROR: INSERT has more target columns than expressions
+LINE 1: insert into inserttest (col1, col2, col3) values (DEFAULT, D...
+ ^
+insert into inserttest (col1, col2, col3) values (1, 2);
+ERROR: INSERT has more target columns than expressions
+LINE 1: insert into inserttest (col1, col2, col3) values (1, 2);
+ ^
+insert into inserttest (col1) values (1, 2);
+ERROR: INSERT has more expressions than target columns
+LINE 1: insert into inserttest (col1) values (1, 2);
+ ^
+insert into inserttest (col1) values (DEFAULT, DEFAULT);
+ERROR: INSERT has more expressions than target columns
+LINE 1: insert into inserttest (col1) values (DEFAULT, DEFAULT);
+ ^
+select * from inserttest;
+ col1 | col2 | col3
+------+------+---------
+ | 3 | testing
+ | 5 | testing
+ | 5 | test
+ | 7 | testing
+(4 rows)
+
+--
+-- VALUES test
+--
+insert into inserttest values(10, 20, '40'), (-1, 2, DEFAULT),
+ ((select 2), (select i from (values(3)) as foo (i)), 'values are fun!');
+select * from inserttest;
+ col1 | col2 | col3
+------+------+-----------------
+ | 3 | testing
+ | 5 | testing
+ | 5 | test
+ | 7 | testing
+ 10 | 20 | 40
+ -1 | 2 | testing
+ 2 | 3 | values are fun!
+(7 rows)
+
+--
+-- TOASTed value test
+--
+insert into inserttest values(30, 50, repeat('x', 10000));
+select col1, col2, char_length(col3) from inserttest;
+ col1 | col2 | char_length
+------+------+-------------
+ | 3 | 7
+ | 5 | 7
+ | 5 | 4
+ | 7 | 7
+ 10 | 20 | 2
+ -1 | 2 | 7
+ 2 | 3 | 15
+ 30 | 50 | 10000
+(8 rows)
+
+drop table inserttest;
+--
+-- tuple larger than fillfactor
+--
+CREATE TABLE large_tuple_test (a int, b text) WITH (fillfactor = 10);
+ALTER TABLE large_tuple_test ALTER COLUMN b SET STORAGE plain;
+-- create page w/ free space in range [nearlyEmptyFreeSpace, MaxHeapTupleSize)
+INSERT INTO large_tuple_test (select 1, NULL);
+-- should still fit on the page
+INSERT INTO large_tuple_test (select 2, repeat('a', 1000));
+SELECT pg_size_pretty(pg_relation_size('large_tuple_test'::regclass, 'main'));
+ pg_size_pretty
+----------------
+ 8192 bytes
+(1 row)
+
+-- add small record to the second page
+INSERT INTO large_tuple_test (select 3, NULL);
+-- now this tuple won't fit on the second page, but the insert should
+-- still succeed by extending the relation
+INSERT INTO large_tuple_test (select 4, repeat('a', 8126));
+DROP TABLE large_tuple_test;
+--
+-- check indirection (field/array assignment), cf bug #14265
+--
+-- these tests are aware that transformInsertStmt has 3 separate code paths
+--
+create type insert_test_type as (if1 int, if2 text[]);
+create table inserttest (f1 int, f2 int[],
+ f3 insert_test_type, f4 insert_test_type[]);
+insert into inserttest (f2[1], f2[2]) values (1,2);
+insert into inserttest (f2[1], f2[2]) values (3,4), (5,6);
+insert into inserttest (f2[1], f2[2]) select 7,8;
+insert into inserttest (f2[1], f2[2]) values (1,default); -- not supported
+ERROR: cannot set an array element to DEFAULT
+LINE 1: insert into inserttest (f2[1], f2[2]) values (1,default);
+ ^
+insert into inserttest (f3.if1, f3.if2) values (1,array['foo']);
+insert into inserttest (f3.if1, f3.if2) values (1,'{foo}'), (2,'{bar}');
+insert into inserttest (f3.if1, f3.if2) select 3, '{baz,quux}';
+insert into inserttest (f3.if1, f3.if2) values (1,default); -- not supported
+ERROR: cannot set a subfield to DEFAULT
+LINE 1: insert into inserttest (f3.if1, f3.if2) values (1,default);
+ ^
+insert into inserttest (f3.if2[1], f3.if2[2]) values ('foo', 'bar');
+insert into inserttest (f3.if2[1], f3.if2[2]) values ('foo', 'bar'), ('baz', 'quux');
+insert into inserttest (f3.if2[1], f3.if2[2]) select 'bear', 'beer';
+insert into inserttest (f4[1].if2[1], f4[1].if2[2]) values ('foo', 'bar');
+insert into inserttest (f4[1].if2[1], f4[1].if2[2]) values ('foo', 'bar'), ('baz', 'quux');
+insert into inserttest (f4[1].if2[1], f4[1].if2[2]) select 'bear', 'beer';
+select * from inserttest;
+ f1 | f2 | f3 | f4
+----+-------+------------------+------------------------
+ | {1,2} | |
+ | {3,4} | |
+ | {5,6} | |
+ | {7,8} | |
+ | | (1,{foo}) |
+ | | (1,{foo}) |
+ | | (2,{bar}) |
+ | | (3,"{baz,quux}") |
+ | | (,"{foo,bar}") |
+ | | (,"{foo,bar}") |
+ | | (,"{baz,quux}") |
+ | | (,"{bear,beer}") |
+ | | | {"(,\"{foo,bar}\")"}
+ | | | {"(,\"{foo,bar}\")"}
+ | | | {"(,\"{baz,quux}\")"}
+ | | | {"(,\"{bear,beer}\")"}
+(16 rows)
+
+-- also check reverse-listing
+create table inserttest2 (f1 bigint, f2 text);
+create rule irule1 as on insert to inserttest2 do also
+ insert into inserttest (f3.if2[1], f3.if2[2])
+ values (new.f1,new.f2);
+create rule irule2 as on insert to inserttest2 do also
+ insert into inserttest (f4[1].if1, f4[1].if2[2])
+ values (1,'fool'),(new.f1,new.f2);
+create rule irule3 as on insert to inserttest2 do also
+ insert into inserttest (f4[1].if1, f4[1].if2[2])
+ select new.f1, new.f2;
+\d+ inserttest2
+ Table "public.inserttest2"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+--------+-----------+----------+---------+----------+--------------+-------------
+ f1 | bigint | | | | plain | |
+ f2 | text | | | | extended | |
+Rules:
+ irule1 AS
+ ON INSERT TO inserttest2 DO INSERT INTO inserttest (f3.if2[1], f3.if2[2])
+ VALUES (new.f1, new.f2)
+ irule2 AS
+ ON INSERT TO inserttest2 DO INSERT INTO inserttest (f4[1].if1, f4[1].if2[2]) VALUES (1,'fool'::text), (new.f1,new.f2)
+ irule3 AS
+ ON INSERT TO inserttest2 DO INSERT INTO inserttest (f4[1].if1, f4[1].if2[2]) SELECT new.f1,
+ new.f2
+
+drop table inserttest2;
+drop table inserttest;
+drop type insert_test_type;
+-- direct partition inserts should check partition bound constraint
+create table range_parted (
+ a text,
+ b int
+) partition by range (a, (b+0));
+-- no partitions, so fail
+insert into range_parted values ('a', 11);
+ERROR: no partition of relation "range_parted" found for row
+DETAIL: Partition key of the failing row contains (a, (b + 0)) = (a, 11).
+create table part1 partition of range_parted for values from ('a', 1) to ('a', 10);
+create table part2 partition of range_parted for values from ('a', 10) to ('a', 20);
+create table part3 partition of range_parted for values from ('b', 1) to ('b', 10);
+create table part4 partition of range_parted for values from ('b', 10) to ('b', 20);
+-- fail
+insert into part1 values ('a', 11);
+ERROR: new row for relation "part1" violates partition constraint
+DETAIL: Failing row contains (a, 11).
+insert into part1 values ('b', 1);
+ERROR: new row for relation "part1" violates partition constraint
+DETAIL: Failing row contains (b, 1).
+-- ok
+insert into part1 values ('a', 1);
+-- fail
+insert into part4 values ('b', 21);
+ERROR: new row for relation "part4" violates partition constraint
+DETAIL: Failing row contains (b, 21).
+insert into part4 values ('a', 10);
+ERROR: new row for relation "part4" violates partition constraint
+DETAIL: Failing row contains (a, 10).
+-- ok
+insert into part4 values ('b', 10);
+-- fail (partition key a has a NOT NULL constraint)
+insert into part1 values (null);
+ERROR: new row for relation "part1" violates partition constraint
+DETAIL: Failing row contains (null, null).
+-- fail (expression key (b+0) cannot be null either)
+insert into part1 values (1);
+ERROR: new row for relation "part1" violates partition constraint
+DETAIL: Failing row contains (1, null).
+create table list_parted (
+ a text,
+ b int
+) partition by list (lower(a));
+create table part_aa_bb partition of list_parted FOR VALUES IN ('aa', 'bb');
+create table part_cc_dd partition of list_parted FOR VALUES IN ('cc', 'dd');
+create table part_null partition of list_parted FOR VALUES IN (null);
+-- fail
+insert into part_aa_bb values ('cc', 1);
+ERROR: new row for relation "part_aa_bb" violates partition constraint
+DETAIL: Failing row contains (cc, 1).
+insert into part_aa_bb values ('AAa', 1);
+ERROR: new row for relation "part_aa_bb" violates partition constraint
+DETAIL: Failing row contains (AAa, 1).
+insert into part_aa_bb values (null);
+ERROR: new row for relation "part_aa_bb" violates partition constraint
+DETAIL: Failing row contains (null, null).
+-- ok
+insert into part_cc_dd values ('cC', 1);
+insert into part_null values (null, 0);
+-- check in case of multi-level partitioned table
+create table part_ee_ff partition of list_parted for values in ('ee', 'ff') partition by range (b);
+create table part_ee_ff1 partition of part_ee_ff for values from (1) to (10);
+create table part_ee_ff2 partition of part_ee_ff for values from (10) to (20);
+-- test default partition
+create table part_default partition of list_parted default;
+-- Negative test: a row, which would fit in other partition, does not fit
+-- default partition, even when inserted directly
+insert into part_default values ('aa', 2);
+ERROR: new row for relation "part_default" violates partition constraint
+DETAIL: Failing row contains (aa, 2).
+insert into part_default values (null, 2);
+ERROR: new row for relation "part_default" violates partition constraint
+DETAIL: Failing row contains (null, 2).
+-- ok
+insert into part_default values ('Zz', 2);
+-- test if default partition works as expected for multi-level partitioned
+-- table as well as when default partition itself is further partitioned
+drop table part_default;
+create table part_xx_yy partition of list_parted for values in ('xx', 'yy') partition by list (a);
+create table part_xx_yy_p1 partition of part_xx_yy for values in ('xx');
+create table part_xx_yy_defpart partition of part_xx_yy default;
+create table part_default partition of list_parted default partition by range(b);
+create table part_default_p1 partition of part_default for values from (20) to (30);
+create table part_default_p2 partition of part_default for values from (30) to (40);
+-- fail
+insert into part_ee_ff1 values ('EE', 11);
+ERROR: new row for relation "part_ee_ff1" violates partition constraint
+DETAIL: Failing row contains (EE, 11).
+insert into part_default_p2 values ('gg', 43);
+ERROR: new row for relation "part_default_p2" violates partition constraint
+DETAIL: Failing row contains (gg, 43).
+-- fail (even the parent's, ie, part_ee_ff's partition constraint applies)
+insert into part_ee_ff1 values ('cc', 1);
+ERROR: new row for relation "part_ee_ff1" violates partition constraint
+DETAIL: Failing row contains (cc, 1).
+insert into part_default values ('gg', 43);
+ERROR: no partition of relation "part_default" found for row
+DETAIL: Partition key of the failing row contains (b) = (43).
+-- ok
+insert into part_ee_ff1 values ('ff', 1);
+insert into part_ee_ff2 values ('ff', 11);
+insert into part_default_p1 values ('cd', 25);
+insert into part_default_p2 values ('de', 35);
+insert into list_parted values ('ab', 21);
+insert into list_parted values ('xx', 1);
+insert into list_parted values ('yy', 2);
+select tableoid::regclass, * from list_parted;
+ tableoid | a | b
+--------------------+----+----
+ part_cc_dd | cC | 1
+ part_ee_ff1 | ff | 1
+ part_ee_ff2 | ff | 11
+ part_xx_yy_p1 | xx | 1
+ part_xx_yy_defpart | yy | 2
+ part_null | | 0
+ part_default_p1 | cd | 25
+ part_default_p1 | ab | 21
+ part_default_p2 | de | 35
+(9 rows)
+
+-- Check tuple routing for partitioned tables
+-- fail
+insert into range_parted values ('a', 0);
+ERROR: no partition of relation "range_parted" found for row
+DETAIL: Partition key of the failing row contains (a, (b + 0)) = (a, 0).
+-- ok
+insert into range_parted values ('a', 1);
+insert into range_parted values ('a', 10);
+-- fail
+insert into range_parted values ('a', 20);
+ERROR: no partition of relation "range_parted" found for row
+DETAIL: Partition key of the failing row contains (a, (b + 0)) = (a, 20).
+-- ok
+insert into range_parted values ('b', 1);
+insert into range_parted values ('b', 10);
+-- fail (partition key (b+0) is null)
+insert into range_parted values ('a');
+ERROR: no partition of relation "range_parted" found for row
+DETAIL: Partition key of the failing row contains (a, (b + 0)) = (a, null).
+-- Check default partition
+create table part_def partition of range_parted default;
+-- fail
+insert into part_def values ('b', 10);
+ERROR: new row for relation "part_def" violates partition constraint
+DETAIL: Failing row contains (b, 10).
+-- ok
+insert into part_def values ('c', 10);
+insert into range_parted values (null, null);
+insert into range_parted values ('a', null);
+insert into range_parted values (null, 19);
+insert into range_parted values ('b', 20);
+select tableoid::regclass, * from range_parted;
+ tableoid | a | b
+----------+---+----
+ part1 | a | 1
+ part1 | a | 1
+ part2 | a | 10
+ part3 | b | 1
+ part4 | b | 10
+ part4 | b | 10
+ part_def | c | 10
+ part_def | |
+ part_def | a |
+ part_def | | 19
+ part_def | b | 20
+(11 rows)
+
+-- ok
+insert into list_parted values (null, 1);
+insert into list_parted (a) values ('aA');
+-- fail (partition of part_ee_ff not found in both cases)
+insert into list_parted values ('EE', 0);
+ERROR: no partition of relation "part_ee_ff" found for row
+DETAIL: Partition key of the failing row contains (b) = (0).
+insert into part_ee_ff values ('EE', 0);
+ERROR: no partition of relation "part_ee_ff" found for row
+DETAIL: Partition key of the failing row contains (b) = (0).
+-- ok
+insert into list_parted values ('EE', 1);
+insert into part_ee_ff values ('EE', 10);
+select tableoid::regclass, * from list_parted;
+ tableoid | a | b
+--------------------+----+----
+ part_aa_bb | aA |
+ part_cc_dd | cC | 1
+ part_ee_ff1 | ff | 1
+ part_ee_ff1 | EE | 1
+ part_ee_ff2 | ff | 11
+ part_ee_ff2 | EE | 10
+ part_xx_yy_p1 | xx | 1
+ part_xx_yy_defpart | yy | 2
+ part_null | | 0
+ part_null | | 1
+ part_default_p1 | cd | 25
+ part_default_p1 | ab | 21
+ part_default_p2 | de | 35
+(13 rows)
+
+-- some more tests to exercise tuple-routing with multi-level partitioning
+create table part_gg partition of list_parted for values in ('gg') partition by range (b);
+create table part_gg1 partition of part_gg for values from (minvalue) to (1);
+create table part_gg2 partition of part_gg for values from (1) to (10) partition by range (b);
+create table part_gg2_1 partition of part_gg2 for values from (1) to (5);
+create table part_gg2_2 partition of part_gg2 for values from (5) to (10);
+create table part_ee_ff3 partition of part_ee_ff for values from (20) to (30) partition by range (b);
+create table part_ee_ff3_1 partition of part_ee_ff3 for values from (20) to (25);
+create table part_ee_ff3_2 partition of part_ee_ff3 for values from (25) to (30);
+truncate list_parted;
+insert into list_parted values ('aa'), ('cc');
+insert into list_parted select 'Ff', s.a from generate_series(1, 29) s(a);
+insert into list_parted select 'gg', s.a from generate_series(1, 9) s(a);
+insert into list_parted (b) values (1);
+select tableoid::regclass::text, a, min(b) as min_b, max(b) as max_b from list_parted group by 1, 2 order by 1;
+ tableoid | a | min_b | max_b
+---------------+----+-------+-------
+ part_aa_bb | aa | |
+ part_cc_dd | cc | |
+ part_ee_ff1 | Ff | 1 | 9
+ part_ee_ff2 | Ff | 10 | 19
+ part_ee_ff3_1 | Ff | 20 | 24
+ part_ee_ff3_2 | Ff | 25 | 29
+ part_gg2_1 | gg | 1 | 4
+ part_gg2_2 | gg | 5 | 9
+ part_null | | 1 | 1
+(9 rows)
+
+-- direct partition inserts should check hash partition bound constraint
+-- Use hand-rolled hash functions and operator classes to get predictable
+-- result on different machines. The hash function for int4 simply returns
+-- the sum of the values passed to it and the one for text returns the length
+-- of the non-empty string value passed to it or 0.
+create or replace function part_hashint4_noop(value int4, seed int8)
+returns int8 as $$
+select value + seed;
+$$ language sql immutable;
+create operator class part_test_int4_ops
+for type int4
+using hash as
+operator 1 =,
+function 2 part_hashint4_noop(int4, int8);
+create or replace function part_hashtext_length(value text, seed int8)
+RETURNS int8 AS $$
+select length(coalesce(value, ''))::int8
+$$ language sql immutable;
+create operator class part_test_text_ops
+for type text
+using hash as
+operator 1 =,
+function 2 part_hashtext_length(text, int8);
+create table hash_parted (
+ a int
+) partition by hash (a part_test_int4_ops);
+create table hpart0 partition of hash_parted for values with (modulus 4, remainder 0);
+create table hpart1 partition of hash_parted for values with (modulus 4, remainder 1);
+create table hpart2 partition of hash_parted for values with (modulus 4, remainder 2);
+create table hpart3 partition of hash_parted for values with (modulus 4, remainder 3);
+insert into hash_parted values(generate_series(1,10));
+-- direct insert of values divisible by 4 - ok;
+insert into hpart0 values(12),(16);
+-- fail;
+insert into hpart0 values(11);
+ERROR: new row for relation "hpart0" violates partition constraint
+DETAIL: Failing row contains (11).
+-- 11 % 4 -> 3 remainder i.e. valid data for hpart3 partition
+insert into hpart3 values(11);
+-- view data
+select tableoid::regclass as part, a, a%4 as "remainder = a % 4"
+from hash_parted order by part;
+ part | a | remainder = a % 4
+--------+----+-------------------
+ hpart0 | 4 | 0
+ hpart0 | 8 | 0
+ hpart0 | 12 | 0
+ hpart0 | 16 | 0
+ hpart1 | 1 | 1
+ hpart1 | 5 | 1
+ hpart1 | 9 | 1
+ hpart2 | 2 | 2
+ hpart2 | 6 | 2
+ hpart2 | 10 | 2
+ hpart3 | 3 | 3
+ hpart3 | 7 | 3
+ hpart3 | 11 | 3
+(13 rows)
+
+-- test \d+ output on a table which has both partitioned and unpartitioned
+-- partitions
+\d+ list_parted
+ Partitioned table "public.list_parted"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+----------+--------------+-------------
+ a | text | | | | extended | |
+ b | integer | | | | plain | |
+Partition key: LIST (lower(a))
+Partitions: part_aa_bb FOR VALUES IN ('aa', 'bb'),
+ part_cc_dd FOR VALUES IN ('cc', 'dd'),
+ part_ee_ff FOR VALUES IN ('ee', 'ff'), PARTITIONED,
+ part_gg FOR VALUES IN ('gg'), PARTITIONED,
+ part_null FOR VALUES IN (NULL),
+ part_xx_yy FOR VALUES IN ('xx', 'yy'), PARTITIONED,
+ part_default DEFAULT, PARTITIONED
+
+-- cleanup
+drop table range_parted, list_parted;
+drop table hash_parted;
+-- test that a default partition added as the first partition accepts any value
+-- including null
+create table list_parted (a int) partition by list (a);
+create table part_default partition of list_parted default;
+\d+ part_default
+ Table "public.part_default"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+---------+--------------+-------------
+ a | integer | | | | plain | |
+Partition of: list_parted DEFAULT
+No partition constraint
+
+insert into part_default values (null);
+insert into part_default values (1);
+insert into part_default values (-1);
+select tableoid::regclass, a from list_parted;
+ tableoid | a
+--------------+----
+ part_default |
+ part_default | 1
+ part_default | -1
+(3 rows)
+
+-- cleanup
+drop table list_parted;
+-- more tests for certain multi-level partitioning scenarios
+create table mlparted (a int, b int) partition by range (a, b);
+create table mlparted1 (b int not null, a int not null) partition by range ((b+0));
+create table mlparted11 (like mlparted1);
+alter table mlparted11 drop a;
+alter table mlparted11 add a int;
+alter table mlparted11 drop a;
+alter table mlparted11 add a int not null;
+-- attnum for key attribute 'a' is different in mlparted, mlparted1, and mlparted11
+select attrelid::regclass, attname, attnum
+from pg_attribute
+where attname = 'a'
+ and (attrelid = 'mlparted'::regclass
+ or attrelid = 'mlparted1'::regclass
+ or attrelid = 'mlparted11'::regclass)
+order by attrelid::regclass::text;
+ attrelid | attname | attnum
+------------+---------+--------
+ mlparted | a | 1
+ mlparted1 | a | 2
+ mlparted11 | a | 4
+(3 rows)
+
+alter table mlparted1 attach partition mlparted11 for values from (2) to (5);
+alter table mlparted attach partition mlparted1 for values from (1, 2) to (1, 10);
+-- check that "(1, 2)" is correctly routed to mlparted11.
+insert into mlparted values (1, 2);
+select tableoid::regclass, * from mlparted;
+ tableoid | a | b
+------------+---+---
+ mlparted11 | 1 | 2
+(1 row)
+
+-- check that proper message is shown after failure to route through mlparted1
+insert into mlparted (a, b) values (1, 5);
+ERROR: no partition of relation "mlparted1" found for row
+DETAIL: Partition key of the failing row contains ((b + 0)) = (5).
+truncate mlparted;
+alter table mlparted add constraint check_b check (b = 3);
+-- have a BR trigger modify the row such that the check_b is violated
+create function mlparted11_trig_fn()
+returns trigger AS
+$$
+begin
+ NEW.b := 4;
+ return NEW;
+end;
+$$
+language plpgsql;
+create trigger mlparted11_trig before insert ON mlparted11
+ for each row execute procedure mlparted11_trig_fn();
+-- check that the correct row is shown when constraint check_b fails after
+-- "(1, 2)" is routed to mlparted11 (actually "(1, 4)" would be shown due
+-- to the BR trigger mlparted11_trig_fn)
+insert into mlparted values (1, 2);
+ERROR: new row for relation "mlparted11" violates check constraint "check_b"
+DETAIL: Failing row contains (1, 4).
+drop trigger mlparted11_trig on mlparted11;
+drop function mlparted11_trig_fn();
+-- check that inserting into an internal partition successfully results in
+-- checking its partition constraint before inserting into the leaf partition
+-- selected by tuple-routing
+insert into mlparted1 (a, b) values (2, 3);
+ERROR: new row for relation "mlparted1" violates partition constraint
+DETAIL: Failing row contains (3, 2).
+-- check routing error through a list partitioned table when the key is null
+create table lparted_nonullpart (a int, b char) partition by list (b);
+create table lparted_nonullpart_a partition of lparted_nonullpart for values in ('a');
+insert into lparted_nonullpart values (1);
+ERROR: no partition of relation "lparted_nonullpart" found for row
+DETAIL: Partition key of the failing row contains (b) = (null).
+drop table lparted_nonullpart;
+-- check that RETURNING works correctly with tuple-routing
+alter table mlparted drop constraint check_b;
+create table mlparted12 partition of mlparted1 for values from (5) to (10);
+create table mlparted2 (b int not null, a int not null);
+alter table mlparted attach partition mlparted2 for values from (1, 10) to (1, 20);
+create table mlparted3 partition of mlparted for values from (1, 20) to (1, 30);
+create table mlparted4 (like mlparted);
+alter table mlparted4 drop a;
+alter table mlparted4 add a int not null;
+alter table mlparted attach partition mlparted4 for values from (1, 30) to (1, 40);
+with ins (a, b, c) as
+ (insert into mlparted (b, a) select s.a, 1 from generate_series(2, 39) s(a) returning tableoid::regclass, *)
+ select a, b, min(c), max(c) from ins group by a, b order by 1;
+ a | b | min | max
+------------+---+-----+-----
+ mlparted11 | 1 | 2 | 4
+ mlparted12 | 1 | 5 | 9
+ mlparted2 | 1 | 10 | 19
+ mlparted3 | 1 | 20 | 29
+ mlparted4 | 1 | 30 | 39
+(5 rows)
+
+alter table mlparted add c text;
+create table mlparted5 (c text, a int not null, b int not null) partition by list (c);
+create table mlparted5a (a int not null, c text, b int not null);
+alter table mlparted5 attach partition mlparted5a for values in ('a');
+alter table mlparted attach partition mlparted5 for values from (1, 40) to (1, 50);
+alter table mlparted add constraint check_b check (a = 1 and b < 45);
+insert into mlparted values (1, 45, 'a');
+ERROR: new row for relation "mlparted5a" violates check constraint "check_b"
+DETAIL: Failing row contains (1, 45, a).
+create function mlparted5abrtrig_func() returns trigger as $$ begin new.c = 'b'; return new; end; $$ language plpgsql;
+create trigger mlparted5abrtrig before insert on mlparted5a for each row execute procedure mlparted5abrtrig_func();
+insert into mlparted5 (a, b, c) values (1, 40, 'a');
+ERROR: new row for relation "mlparted5a" violates partition constraint
+DETAIL: Failing row contains (b, 1, 40).
+drop table mlparted5;
+alter table mlparted drop constraint check_b;
+-- Check multi-level default partition
+create table mlparted_def partition of mlparted default partition by range(a);
+create table mlparted_def1 partition of mlparted_def for values from (40) to (50);
+create table mlparted_def2 partition of mlparted_def for values from (50) to (60);
+insert into mlparted values (40, 100);
+insert into mlparted_def1 values (42, 100);
+insert into mlparted_def2 values (54, 50);
+-- fail
+insert into mlparted values (70, 100);
+ERROR: no partition of relation "mlparted_def" found for row
+DETAIL: Partition key of the failing row contains (a) = (70).
+insert into mlparted_def1 values (52, 50);
+ERROR: new row for relation "mlparted_def1" violates partition constraint
+DETAIL: Failing row contains (52, 50, null).
+insert into mlparted_def2 values (34, 50);
+ERROR: new row for relation "mlparted_def2" violates partition constraint
+DETAIL: Failing row contains (34, 50, null).
+-- ok
+create table mlparted_defd partition of mlparted_def default;
+insert into mlparted values (70, 100);
+select tableoid::regclass, * from mlparted_def;
+ tableoid | a | b | c
+---------------+----+-----+---
+ mlparted_def1 | 40 | 100 |
+ mlparted_def1 | 42 | 100 |
+ mlparted_def2 | 54 | 50 |
+ mlparted_defd | 70 | 100 |
+(4 rows)
+
+-- Check multi-level tuple routing with attributes dropped from the
+-- top-most parent. First remove the last attribute.
+alter table mlparted add d int, add e int;
+alter table mlparted drop e;
+create table mlparted5 partition of mlparted
+ for values from (1, 40) to (1, 50) partition by range (c);
+create table mlparted5_ab partition of mlparted5
+ for values from ('a') to ('c') partition by list (c);
+-- This partitioned table should remain with no partitions.
+create table mlparted5_cd partition of mlparted5
+ for values from ('c') to ('e') partition by list (c);
+create table mlparted5_a partition of mlparted5_ab for values in ('a');
+create table mlparted5_b (d int, b int, c text, a int);
+alter table mlparted5_ab attach partition mlparted5_b for values in ('b');
+truncate mlparted;
+insert into mlparted values (1, 2, 'a', 1);
+insert into mlparted values (1, 40, 'a', 1); -- goes to mlparted5_a
+insert into mlparted values (1, 45, 'b', 1); -- goes to mlparted5_b
+insert into mlparted values (1, 45, 'c', 1); -- goes to mlparted5_cd, fails
+ERROR: no partition of relation "mlparted5_cd" found for row
+DETAIL: Partition key of the failing row contains (c) = (c).
+insert into mlparted values (1, 45, 'f', 1); -- goes to mlparted5, fails
+ERROR: no partition of relation "mlparted5" found for row
+DETAIL: Partition key of the failing row contains (c) = (f).
+select tableoid::regclass, * from mlparted order by a, b, c, d;
+ tableoid | a | b | c | d
+-------------+---+----+---+---
+ mlparted11 | 1 | 2 | a | 1
+ mlparted5_a | 1 | 40 | a | 1
+ mlparted5_b | 1 | 45 | b | 1
+(3 rows)
+
+alter table mlparted drop d;
+truncate mlparted;
+-- Remove the before last attribute.
+alter table mlparted add e int, add d int;
+alter table mlparted drop e;
+insert into mlparted values (1, 2, 'a', 1);
+insert into mlparted values (1, 40, 'a', 1); -- goes to mlparted5_a
+insert into mlparted values (1, 45, 'b', 1); -- goes to mlparted5_b
+insert into mlparted values (1, 45, 'c', 1); -- goes to mlparted5_cd, fails
+ERROR: no partition of relation "mlparted5_cd" found for row
+DETAIL: Partition key of the failing row contains (c) = (c).
+insert into mlparted values (1, 45, 'f', 1); -- goes to mlparted5, fails
+ERROR: no partition of relation "mlparted5" found for row
+DETAIL: Partition key of the failing row contains (c) = (f).
+select tableoid::regclass, * from mlparted order by a, b, c, d;
+ tableoid | a | b | c | d
+-------------+---+----+---+---
+ mlparted11 | 1 | 2 | a | 1
+ mlparted5_a | 1 | 40 | a | 1
+ mlparted5_b | 1 | 45 | b | 1
+(3 rows)
+
+alter table mlparted drop d;
+drop table mlparted5;
+-- check that message shown after failure to find a partition shows the
+-- appropriate key description (or none) in various situations
+create table key_desc (a int, b int) partition by list ((a+0));
+create table key_desc_1 partition of key_desc for values in (1) partition by range (b);
+create user regress_insert_other_user;
+grant select (a) on key_desc_1 to regress_insert_other_user;
+grant insert on key_desc to regress_insert_other_user;
+set role regress_insert_other_user;
+-- no key description is shown
+insert into key_desc values (1, 1);
+ERROR: no partition of relation "key_desc_1" found for row
+reset role;
+grant select (b) on key_desc_1 to regress_insert_other_user;
+set role regress_insert_other_user;
+-- key description (b)=(1) is now shown
+insert into key_desc values (1, 1);
+ERROR: no partition of relation "key_desc_1" found for row
+DETAIL: Partition key of the failing row contains (b) = (1).
+-- key description is not shown if key contains expression
+insert into key_desc values (2, 1);
+ERROR: no partition of relation "key_desc" found for row
+reset role;
+revoke all on key_desc from regress_insert_other_user;
+revoke all on key_desc_1 from regress_insert_other_user;
+drop role regress_insert_other_user;
+drop table key_desc, key_desc_1;
+-- test minvalue/maxvalue restrictions
+create table mcrparted (a int, b int, c int) partition by range (a, abs(b), c);
+create table mcrparted0 partition of mcrparted for values from (minvalue, 0, 0) to (1, maxvalue, maxvalue);
+ERROR: every bound following MINVALUE must also be MINVALUE
+LINE 1: ...partition of mcrparted for values from (minvalue, 0, 0) to (...
+ ^
+create table mcrparted2 partition of mcrparted for values from (10, 6, minvalue) to (10, maxvalue, minvalue);
+ERROR: every bound following MAXVALUE must also be MAXVALUE
+LINE 1: ...r values from (10, 6, minvalue) to (10, maxvalue, minvalue);
+ ^
+create table mcrparted4 partition of mcrparted for values from (21, minvalue, 0) to (30, 20, minvalue);
+ERROR: every bound following MINVALUE must also be MINVALUE
+LINE 1: ...ition of mcrparted for values from (21, minvalue, 0) to (30,...
+ ^
+-- check multi-column range partitioning expression enforces the same
+-- constraint as what tuple-routing would determine it to be
+create table mcrparted0 partition of mcrparted for values from (minvalue, minvalue, minvalue) to (1, maxvalue, maxvalue);
+create table mcrparted1 partition of mcrparted for values from (2, 1, minvalue) to (10, 5, 10);
+create table mcrparted2 partition of mcrparted for values from (10, 6, minvalue) to (10, maxvalue, maxvalue);
+create table mcrparted3 partition of mcrparted for values from (11, 1, 1) to (20, 10, 10);
+create table mcrparted4 partition of mcrparted for values from (21, minvalue, minvalue) to (30, 20, maxvalue);
+create table mcrparted5 partition of mcrparted for values from (30, 21, 20) to (maxvalue, maxvalue, maxvalue);
+-- null not allowed in range partition
+insert into mcrparted values (null, null, null);
+ERROR: no partition of relation "mcrparted" found for row
+DETAIL: Partition key of the failing row contains (a, abs(b), c) = (null, null, null).
+-- routed to mcrparted0
+insert into mcrparted values (0, 1, 1);
+insert into mcrparted0 values (0, 1, 1);
+-- routed to mcparted1
+insert into mcrparted values (9, 1000, 1);
+insert into mcrparted1 values (9, 1000, 1);
+insert into mcrparted values (10, 5, -1);
+insert into mcrparted1 values (10, 5, -1);
+insert into mcrparted values (2, 1, 0);
+insert into mcrparted1 values (2, 1, 0);
+-- routed to mcparted2
+insert into mcrparted values (10, 6, 1000);
+insert into mcrparted2 values (10, 6, 1000);
+insert into mcrparted values (10, 1000, 1000);
+insert into mcrparted2 values (10, 1000, 1000);
+-- no partition exists, nor does mcrparted3 accept it
+insert into mcrparted values (11, 1, -1);
+ERROR: no partition of relation "mcrparted" found for row
+DETAIL: Partition key of the failing row contains (a, abs(b), c) = (11, 1, -1).
+insert into mcrparted3 values (11, 1, -1);
+ERROR: new row for relation "mcrparted3" violates partition constraint
+DETAIL: Failing row contains (11, 1, -1).
+-- routed to mcrparted5
+insert into mcrparted values (30, 21, 20);
+insert into mcrparted5 values (30, 21, 20);
+insert into mcrparted4 values (30, 21, 20); -- error
+ERROR: new row for relation "mcrparted4" violates partition constraint
+DETAIL: Failing row contains (30, 21, 20).
+-- check rows
+select tableoid::regclass::text, * from mcrparted order by 1;
+ tableoid | a | b | c
+------------+----+------+------
+ mcrparted0 | 0 | 1 | 1
+ mcrparted0 | 0 | 1 | 1
+ mcrparted1 | 9 | 1000 | 1
+ mcrparted1 | 9 | 1000 | 1
+ mcrparted1 | 10 | 5 | -1
+ mcrparted1 | 10 | 5 | -1
+ mcrparted1 | 2 | 1 | 0
+ mcrparted1 | 2 | 1 | 0
+ mcrparted2 | 10 | 6 | 1000
+ mcrparted2 | 10 | 6 | 1000
+ mcrparted2 | 10 | 1000 | 1000
+ mcrparted2 | 10 | 1000 | 1000
+ mcrparted5 | 30 | 21 | 20
+ mcrparted5 | 30 | 21 | 20
+(14 rows)
+
+-- cleanup
+drop table mcrparted;
+-- check that a BR constraint can't make partition contain violating rows
+create table brtrigpartcon (a int, b text) partition by list (a);
+create table brtrigpartcon1 partition of brtrigpartcon for values in (1);
+create or replace function brtrigpartcon1trigf() returns trigger as $$begin new.a := 2; return new; end$$ language plpgsql;
+create trigger brtrigpartcon1trig before insert on brtrigpartcon1 for each row execute procedure brtrigpartcon1trigf();
+insert into brtrigpartcon values (1, 'hi there');
+ERROR: new row for relation "brtrigpartcon1" violates partition constraint
+DETAIL: Failing row contains (2, hi there).
+insert into brtrigpartcon1 values (1, 'hi there');
+ERROR: new row for relation "brtrigpartcon1" violates partition constraint
+DETAIL: Failing row contains (2, hi there).
+-- check that the message shows the appropriate column description in a
+-- situation where the partitioned table is not the primary ModifyTable node
+create table inserttest3 (f1 text default 'foo', f2 text default 'bar', f3 int);
+create role regress_coldesc_role;
+grant insert on inserttest3 to regress_coldesc_role;
+grant insert on brtrigpartcon to regress_coldesc_role;
+revoke select on brtrigpartcon from regress_coldesc_role;
+set role regress_coldesc_role;
+with result as (insert into brtrigpartcon values (1, 'hi there') returning 1)
+ insert into inserttest3 (f3) select * from result;
+ERROR: new row for relation "brtrigpartcon1" violates partition constraint
+DETAIL: Failing row contains (a, b) = (2, hi there).
+reset role;
+-- cleanup
+revoke all on inserttest3 from regress_coldesc_role;
+revoke all on brtrigpartcon from regress_coldesc_role;
+drop role regress_coldesc_role;
+drop table inserttest3;
+drop table brtrigpartcon;
+drop function brtrigpartcon1trigf();
+-- check that "do nothing" BR triggers work with tuple-routing
+create table donothingbrtrig_test (a int, b text) partition by list (a);
+create table donothingbrtrig_test1 (b text, a int);
+create table donothingbrtrig_test2 (c text, b text, a int);
+alter table donothingbrtrig_test2 drop column c;
+create or replace function donothingbrtrig_func() returns trigger as $$begin raise notice 'b: %', new.b; return NULL; end$$ language plpgsql;
+create trigger donothingbrtrig1 before insert on donothingbrtrig_test1 for each row execute procedure donothingbrtrig_func();
+create trigger donothingbrtrig2 before insert on donothingbrtrig_test2 for each row execute procedure donothingbrtrig_func();
+alter table donothingbrtrig_test attach partition donothingbrtrig_test1 for values in (1);
+alter table donothingbrtrig_test attach partition donothingbrtrig_test2 for values in (2);
+insert into donothingbrtrig_test values (1, 'foo'), (2, 'bar');
+NOTICE: b: foo
+NOTICE: b: bar
+copy donothingbrtrig_test from stdout;
+NOTICE: b: baz
+NOTICE: b: qux
+select tableoid::regclass, * from donothingbrtrig_test;
+ tableoid | a | b
+----------+---+---
+(0 rows)
+
+-- cleanup
+drop table donothingbrtrig_test;
+drop function donothingbrtrig_func();
+-- check multi-column range partitioning with minvalue/maxvalue constraints
+create table mcrparted (a text, b int) partition by range(a, b);
+create table mcrparted1_lt_b partition of mcrparted for values from (minvalue, minvalue) to ('b', minvalue);
+create table mcrparted2_b partition of mcrparted for values from ('b', minvalue) to ('c', minvalue);
+create table mcrparted3_c_to_common partition of mcrparted for values from ('c', minvalue) to ('common', minvalue);
+create table mcrparted4_common_lt_0 partition of mcrparted for values from ('common', minvalue) to ('common', 0);
+create table mcrparted5_common_0_to_10 partition of mcrparted for values from ('common', 0) to ('common', 10);
+create table mcrparted6_common_ge_10 partition of mcrparted for values from ('common', 10) to ('common', maxvalue);
+create table mcrparted7_gt_common_lt_d partition of mcrparted for values from ('common', maxvalue) to ('d', minvalue);
+create table mcrparted8_ge_d partition of mcrparted for values from ('d', minvalue) to (maxvalue, maxvalue);
+\d+ mcrparted
+ Partitioned table "public.mcrparted"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+----------+--------------+-------------
+ a | text | | | | extended | |
+ b | integer | | | | plain | |
+Partition key: RANGE (a, b)
+Partitions: mcrparted1_lt_b FOR VALUES FROM (MINVALUE, MINVALUE) TO ('b', MINVALUE),
+ mcrparted2_b FOR VALUES FROM ('b', MINVALUE) TO ('c', MINVALUE),
+ mcrparted3_c_to_common FOR VALUES FROM ('c', MINVALUE) TO ('common', MINVALUE),
+ mcrparted4_common_lt_0 FOR VALUES FROM ('common', MINVALUE) TO ('common', 0),
+ mcrparted5_common_0_to_10 FOR VALUES FROM ('common', 0) TO ('common', 10),
+ mcrparted6_common_ge_10 FOR VALUES FROM ('common', 10) TO ('common', MAXVALUE),
+ mcrparted7_gt_common_lt_d FOR VALUES FROM ('common', MAXVALUE) TO ('d', MINVALUE),
+ mcrparted8_ge_d FOR VALUES FROM ('d', MINVALUE) TO (MAXVALUE, MAXVALUE)
+
+\d+ mcrparted1_lt_b
+ Table "public.mcrparted1_lt_b"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+----------+--------------+-------------
+ a | text | | | | extended | |
+ b | integer | | | | plain | |
+Partition of: mcrparted FOR VALUES FROM (MINVALUE, MINVALUE) TO ('b', MINVALUE)
+Partition constraint: ((a IS NOT NULL) AND (b IS NOT NULL) AND (a < 'b'::text))
+
+\d+ mcrparted2_b
+ Table "public.mcrparted2_b"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+----------+--------------+-------------
+ a | text | | | | extended | |
+ b | integer | | | | plain | |
+Partition of: mcrparted FOR VALUES FROM ('b', MINVALUE) TO ('c', MINVALUE)
+Partition constraint: ((a IS NOT NULL) AND (b IS NOT NULL) AND (a >= 'b'::text) AND (a < 'c'::text))
+
+\d+ mcrparted3_c_to_common
+ Table "public.mcrparted3_c_to_common"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+----------+--------------+-------------
+ a | text | | | | extended | |
+ b | integer | | | | plain | |
+Partition of: mcrparted FOR VALUES FROM ('c', MINVALUE) TO ('common', MINVALUE)
+Partition constraint: ((a IS NOT NULL) AND (b IS NOT NULL) AND (a >= 'c'::text) AND (a < 'common'::text))
+
+\d+ mcrparted4_common_lt_0
+ Table "public.mcrparted4_common_lt_0"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+----------+--------------+-------------
+ a | text | | | | extended | |
+ b | integer | | | | plain | |
+Partition of: mcrparted FOR VALUES FROM ('common', MINVALUE) TO ('common', 0)
+Partition constraint: ((a IS NOT NULL) AND (b IS NOT NULL) AND (a = 'common'::text) AND (b < 0))
+
+\d+ mcrparted5_common_0_to_10
+ Table "public.mcrparted5_common_0_to_10"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+----------+--------------+-------------
+ a | text | | | | extended | |
+ b | integer | | | | plain | |
+Partition of: mcrparted FOR VALUES FROM ('common', 0) TO ('common', 10)
+Partition constraint: ((a IS NOT NULL) AND (b IS NOT NULL) AND (a = 'common'::text) AND (b >= 0) AND (b < 10))
+
+\d+ mcrparted6_common_ge_10
+ Table "public.mcrparted6_common_ge_10"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+----------+--------------+-------------
+ a | text | | | | extended | |
+ b | integer | | | | plain | |
+Partition of: mcrparted FOR VALUES FROM ('common', 10) TO ('common', MAXVALUE)
+Partition constraint: ((a IS NOT NULL) AND (b IS NOT NULL) AND (a = 'common'::text) AND (b >= 10))
+
+\d+ mcrparted7_gt_common_lt_d
+ Table "public.mcrparted7_gt_common_lt_d"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+----------+--------------+-------------
+ a | text | | | | extended | |
+ b | integer | | | | plain | |
+Partition of: mcrparted FOR VALUES FROM ('common', MAXVALUE) TO ('d', MINVALUE)
+Partition constraint: ((a IS NOT NULL) AND (b IS NOT NULL) AND (a > 'common'::text) AND (a < 'd'::text))
+
+\d+ mcrparted8_ge_d
+ Table "public.mcrparted8_ge_d"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+----------+--------------+-------------
+ a | text | | | | extended | |
+ b | integer | | | | plain | |
+Partition of: mcrparted FOR VALUES FROM ('d', MINVALUE) TO (MAXVALUE, MAXVALUE)
+Partition constraint: ((a IS NOT NULL) AND (b IS NOT NULL) AND (a >= 'd'::text))
+
+insert into mcrparted values ('aaa', 0), ('b', 0), ('bz', 10), ('c', -10),
+ ('comm', -10), ('common', -10), ('common', 0), ('common', 10),
+ ('commons', 0), ('d', -10), ('e', 0);
+select tableoid::regclass, * from mcrparted order by a, b;
+ tableoid | a | b
+---------------------------+---------+-----
+ mcrparted1_lt_b | aaa | 0
+ mcrparted2_b | b | 0
+ mcrparted2_b | bz | 10
+ mcrparted3_c_to_common | c | -10
+ mcrparted3_c_to_common | comm | -10
+ mcrparted4_common_lt_0 | common | -10
+ mcrparted5_common_0_to_10 | common | 0
+ mcrparted6_common_ge_10 | common | 10
+ mcrparted7_gt_common_lt_d | commons | 0
+ mcrparted8_ge_d | d | -10
+ mcrparted8_ge_d | e | 0
+(11 rows)
+
+drop table mcrparted;
+-- check that wholerow vars in the RETURNING list work with partitioned tables
+create table returningwrtest (a int) partition by list (a);
+create table returningwrtest1 partition of returningwrtest for values in (1);
+insert into returningwrtest values (1) returning returningwrtest;
+ returningwrtest
+-----------------
+ (1)
+(1 row)
+
+-- check also that the wholerow vars in RETURNING list are converted as needed
+alter table returningwrtest add b text;
+create table returningwrtest2 (b text, c int, a int);
+alter table returningwrtest2 drop c;
+alter table returningwrtest attach partition returningwrtest2 for values in (2);
+insert into returningwrtest values (2, 'foo') returning returningwrtest;
+ returningwrtest
+-----------------
+ (2,foo)
+(1 row)
+
+drop table returningwrtest;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/insert.sql b/ydb/library/yql/tests/postgresql/original/cases/insert.sql
new file mode 100644
index 0000000000..bfaa8a3b27
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/insert.sql
@@ -0,0 +1,626 @@
+--
+-- insert with DEFAULT in the target_list
+--
+create table inserttest (col1 int4, col2 int4 NOT NULL, col3 text default 'testing');
+insert into inserttest (col1, col2, col3) values (DEFAULT, DEFAULT, DEFAULT);
+insert into inserttest (col2, col3) values (3, DEFAULT);
+insert into inserttest (col1, col2, col3) values (DEFAULT, 5, DEFAULT);
+insert into inserttest values (DEFAULT, 5, 'test');
+insert into inserttest values (DEFAULT, 7);
+
+select * from inserttest;
+
+--
+-- insert with similar expression / target_list values (all fail)
+--
+insert into inserttest (col1, col2, col3) values (DEFAULT, DEFAULT);
+insert into inserttest (col1, col2, col3) values (1, 2);
+insert into inserttest (col1) values (1, 2);
+insert into inserttest (col1) values (DEFAULT, DEFAULT);
+
+select * from inserttest;
+
+--
+-- VALUES test
+--
+insert into inserttest values(10, 20, '40'), (-1, 2, DEFAULT),
+ ((select 2), (select i from (values(3)) as foo (i)), 'values are fun!');
+
+select * from inserttest;
+
+--
+-- TOASTed value test
+--
+insert into inserttest values(30, 50, repeat('x', 10000));
+
+select col1, col2, char_length(col3) from inserttest;
+
+drop table inserttest;
+
+--
+-- tuple larger than fillfactor
+--
+CREATE TABLE large_tuple_test (a int, b text) WITH (fillfactor = 10);
+ALTER TABLE large_tuple_test ALTER COLUMN b SET STORAGE plain;
+
+-- create page w/ free space in range [nearlyEmptyFreeSpace, MaxHeapTupleSize)
+INSERT INTO large_tuple_test (select 1, NULL);
+
+-- should still fit on the page
+INSERT INTO large_tuple_test (select 2, repeat('a', 1000));
+SELECT pg_size_pretty(pg_relation_size('large_tuple_test'::regclass, 'main'));
+
+-- add small record to the second page
+INSERT INTO large_tuple_test (select 3, NULL);
+
+-- now this tuple won't fit on the second page, but the insert should
+-- still succeed by extending the relation
+INSERT INTO large_tuple_test (select 4, repeat('a', 8126));
+
+DROP TABLE large_tuple_test;
+
+--
+-- check indirection (field/array assignment), cf bug #14265
+--
+-- these tests are aware that transformInsertStmt has 3 separate code paths
+--
+
+create type insert_test_type as (if1 int, if2 text[]);
+
+create table inserttest (f1 int, f2 int[],
+ f3 insert_test_type, f4 insert_test_type[]);
+
+insert into inserttest (f2[1], f2[2]) values (1,2);
+insert into inserttest (f2[1], f2[2]) values (3,4), (5,6);
+insert into inserttest (f2[1], f2[2]) select 7,8;
+insert into inserttest (f2[1], f2[2]) values (1,default); -- not supported
+
+insert into inserttest (f3.if1, f3.if2) values (1,array['foo']);
+insert into inserttest (f3.if1, f3.if2) values (1,'{foo}'), (2,'{bar}');
+insert into inserttest (f3.if1, f3.if2) select 3, '{baz,quux}';
+insert into inserttest (f3.if1, f3.if2) values (1,default); -- not supported
+
+insert into inserttest (f3.if2[1], f3.if2[2]) values ('foo', 'bar');
+insert into inserttest (f3.if2[1], f3.if2[2]) values ('foo', 'bar'), ('baz', 'quux');
+insert into inserttest (f3.if2[1], f3.if2[2]) select 'bear', 'beer';
+
+insert into inserttest (f4[1].if2[1], f4[1].if2[2]) values ('foo', 'bar');
+insert into inserttest (f4[1].if2[1], f4[1].if2[2]) values ('foo', 'bar'), ('baz', 'quux');
+insert into inserttest (f4[1].if2[1], f4[1].if2[2]) select 'bear', 'beer';
+
+select * from inserttest;
+
+-- also check reverse-listing
+create table inserttest2 (f1 bigint, f2 text);
+create rule irule1 as on insert to inserttest2 do also
+ insert into inserttest (f3.if2[1], f3.if2[2])
+ values (new.f1,new.f2);
+create rule irule2 as on insert to inserttest2 do also
+ insert into inserttest (f4[1].if1, f4[1].if2[2])
+ values (1,'fool'),(new.f1,new.f2);
+create rule irule3 as on insert to inserttest2 do also
+ insert into inserttest (f4[1].if1, f4[1].if2[2])
+ select new.f1, new.f2;
+\d+ inserttest2
+
+drop table inserttest2;
+drop table inserttest;
+drop type insert_test_type;
+
+-- direct partition inserts should check partition bound constraint
+create table range_parted (
+ a text,
+ b int
+) partition by range (a, (b+0));
+
+-- no partitions, so fail
+insert into range_parted values ('a', 11);
+
+create table part1 partition of range_parted for values from ('a', 1) to ('a', 10);
+create table part2 partition of range_parted for values from ('a', 10) to ('a', 20);
+create table part3 partition of range_parted for values from ('b', 1) to ('b', 10);
+create table part4 partition of range_parted for values from ('b', 10) to ('b', 20);
+
+-- fail
+insert into part1 values ('a', 11);
+insert into part1 values ('b', 1);
+-- ok
+insert into part1 values ('a', 1);
+-- fail
+insert into part4 values ('b', 21);
+insert into part4 values ('a', 10);
+-- ok
+insert into part4 values ('b', 10);
+
+-- fail (partition key a has a NOT NULL constraint)
+insert into part1 values (null);
+-- fail (expression key (b+0) cannot be null either)
+insert into part1 values (1);
+
+create table list_parted (
+ a text,
+ b int
+) partition by list (lower(a));
+create table part_aa_bb partition of list_parted FOR VALUES IN ('aa', 'bb');
+create table part_cc_dd partition of list_parted FOR VALUES IN ('cc', 'dd');
+create table part_null partition of list_parted FOR VALUES IN (null);
+
+-- fail
+insert into part_aa_bb values ('cc', 1);
+insert into part_aa_bb values ('AAa', 1);
+insert into part_aa_bb values (null);
+-- ok
+insert into part_cc_dd values ('cC', 1);
+insert into part_null values (null, 0);
+
+-- check in case of multi-level partitioned table
+create table part_ee_ff partition of list_parted for values in ('ee', 'ff') partition by range (b);
+create table part_ee_ff1 partition of part_ee_ff for values from (1) to (10);
+create table part_ee_ff2 partition of part_ee_ff for values from (10) to (20);
+
+-- test default partition
+create table part_default partition of list_parted default;
+-- Negative test: a row, which would fit in other partition, does not fit
+-- default partition, even when inserted directly
+insert into part_default values ('aa', 2);
+insert into part_default values (null, 2);
+-- ok
+insert into part_default values ('Zz', 2);
+-- test if default partition works as expected for multi-level partitioned
+-- table as well as when default partition itself is further partitioned
+drop table part_default;
+create table part_xx_yy partition of list_parted for values in ('xx', 'yy') partition by list (a);
+create table part_xx_yy_p1 partition of part_xx_yy for values in ('xx');
+create table part_xx_yy_defpart partition of part_xx_yy default;
+create table part_default partition of list_parted default partition by range(b);
+create table part_default_p1 partition of part_default for values from (20) to (30);
+create table part_default_p2 partition of part_default for values from (30) to (40);
+
+-- fail
+insert into part_ee_ff1 values ('EE', 11);
+insert into part_default_p2 values ('gg', 43);
+-- fail (even the parent's, ie, part_ee_ff's partition constraint applies)
+insert into part_ee_ff1 values ('cc', 1);
+insert into part_default values ('gg', 43);
+-- ok
+insert into part_ee_ff1 values ('ff', 1);
+insert into part_ee_ff2 values ('ff', 11);
+insert into part_default_p1 values ('cd', 25);
+insert into part_default_p2 values ('de', 35);
+insert into list_parted values ('ab', 21);
+insert into list_parted values ('xx', 1);
+insert into list_parted values ('yy', 2);
+select tableoid::regclass, * from list_parted;
+
+-- Check tuple routing for partitioned tables
+
+-- fail
+insert into range_parted values ('a', 0);
+-- ok
+insert into range_parted values ('a', 1);
+insert into range_parted values ('a', 10);
+-- fail
+insert into range_parted values ('a', 20);
+-- ok
+insert into range_parted values ('b', 1);
+insert into range_parted values ('b', 10);
+-- fail (partition key (b+0) is null)
+insert into range_parted values ('a');
+
+-- Check default partition
+create table part_def partition of range_parted default;
+-- fail
+insert into part_def values ('b', 10);
+-- ok
+insert into part_def values ('c', 10);
+insert into range_parted values (null, null);
+insert into range_parted values ('a', null);
+insert into range_parted values (null, 19);
+insert into range_parted values ('b', 20);
+
+select tableoid::regclass, * from range_parted;
+-- ok
+insert into list_parted values (null, 1);
+insert into list_parted (a) values ('aA');
+-- fail (partition of part_ee_ff not found in both cases)
+insert into list_parted values ('EE', 0);
+insert into part_ee_ff values ('EE', 0);
+-- ok
+insert into list_parted values ('EE', 1);
+insert into part_ee_ff values ('EE', 10);
+select tableoid::regclass, * from list_parted;
+
+-- some more tests to exercise tuple-routing with multi-level partitioning
+create table part_gg partition of list_parted for values in ('gg') partition by range (b);
+create table part_gg1 partition of part_gg for values from (minvalue) to (1);
+create table part_gg2 partition of part_gg for values from (1) to (10) partition by range (b);
+create table part_gg2_1 partition of part_gg2 for values from (1) to (5);
+create table part_gg2_2 partition of part_gg2 for values from (5) to (10);
+
+create table part_ee_ff3 partition of part_ee_ff for values from (20) to (30) partition by range (b);
+create table part_ee_ff3_1 partition of part_ee_ff3 for values from (20) to (25);
+create table part_ee_ff3_2 partition of part_ee_ff3 for values from (25) to (30);
+
+truncate list_parted;
+insert into list_parted values ('aa'), ('cc');
+insert into list_parted select 'Ff', s.a from generate_series(1, 29) s(a);
+insert into list_parted select 'gg', s.a from generate_series(1, 9) s(a);
+insert into list_parted (b) values (1);
+select tableoid::regclass::text, a, min(b) as min_b, max(b) as max_b from list_parted group by 1, 2 order by 1;
+
+-- direct partition inserts should check hash partition bound constraint
+
+-- Use hand-rolled hash functions and operator classes to get predictable
+-- result on different machines. The hash function for int4 simply returns
+-- the sum of the values passed to it and the one for text returns the length
+-- of the non-empty string value passed to it or 0.
+
+create or replace function part_hashint4_noop(value int4, seed int8)
+returns int8 as $$
+select value + seed;
+$$ language sql immutable;
+
+create operator class part_test_int4_ops
+for type int4
+using hash as
+operator 1 =,
+function 2 part_hashint4_noop(int4, int8);
+
+create or replace function part_hashtext_length(value text, seed int8)
+RETURNS int8 AS $$
+select length(coalesce(value, ''))::int8
+$$ language sql immutable;
+
+create operator class part_test_text_ops
+for type text
+using hash as
+operator 1 =,
+function 2 part_hashtext_length(text, int8);
+
+create table hash_parted (
+ a int
+) partition by hash (a part_test_int4_ops);
+create table hpart0 partition of hash_parted for values with (modulus 4, remainder 0);
+create table hpart1 partition of hash_parted for values with (modulus 4, remainder 1);
+create table hpart2 partition of hash_parted for values with (modulus 4, remainder 2);
+create table hpart3 partition of hash_parted for values with (modulus 4, remainder 3);
+
+insert into hash_parted values(generate_series(1,10));
+
+-- direct insert of values divisible by 4 - ok;
+insert into hpart0 values(12),(16);
+-- fail;
+insert into hpart0 values(11);
+-- 11 % 4 -> 3 remainder i.e. valid data for hpart3 partition
+insert into hpart3 values(11);
+
+-- view data
+select tableoid::regclass as part, a, a%4 as "remainder = a % 4"
+from hash_parted order by part;
+
+-- test \d+ output on a table which has both partitioned and unpartitioned
+-- partitions
+\d+ list_parted
+
+-- cleanup
+drop table range_parted, list_parted;
+drop table hash_parted;
+
+-- test that a default partition added as the first partition accepts any value
+-- including null
+create table list_parted (a int) partition by list (a);
+create table part_default partition of list_parted default;
+\d+ part_default
+insert into part_default values (null);
+insert into part_default values (1);
+insert into part_default values (-1);
+select tableoid::regclass, a from list_parted;
+-- cleanup
+drop table list_parted;
+
+-- more tests for certain multi-level partitioning scenarios
+create table mlparted (a int, b int) partition by range (a, b);
+create table mlparted1 (b int not null, a int not null) partition by range ((b+0));
+create table mlparted11 (like mlparted1);
+alter table mlparted11 drop a;
+alter table mlparted11 add a int;
+alter table mlparted11 drop a;
+alter table mlparted11 add a int not null;
+-- attnum for key attribute 'a' is different in mlparted, mlparted1, and mlparted11
+select attrelid::regclass, attname, attnum
+from pg_attribute
+where attname = 'a'
+ and (attrelid = 'mlparted'::regclass
+ or attrelid = 'mlparted1'::regclass
+ or attrelid = 'mlparted11'::regclass)
+order by attrelid::regclass::text;
+
+alter table mlparted1 attach partition mlparted11 for values from (2) to (5);
+alter table mlparted attach partition mlparted1 for values from (1, 2) to (1, 10);
+
+-- check that "(1, 2)" is correctly routed to mlparted11.
+insert into mlparted values (1, 2);
+select tableoid::regclass, * from mlparted;
+
+-- check that proper message is shown after failure to route through mlparted1
+insert into mlparted (a, b) values (1, 5);
+
+truncate mlparted;
+alter table mlparted add constraint check_b check (b = 3);
+
+-- have a BR trigger modify the row such that the check_b is violated
+create function mlparted11_trig_fn()
+returns trigger AS
+$$
+begin
+ NEW.b := 4;
+ return NEW;
+end;
+$$
+language plpgsql;
+create trigger mlparted11_trig before insert ON mlparted11
+ for each row execute procedure mlparted11_trig_fn();
+
+-- check that the correct row is shown when constraint check_b fails after
+-- "(1, 2)" is routed to mlparted11 (actually "(1, 4)" would be shown due
+-- to the BR trigger mlparted11_trig_fn)
+insert into mlparted values (1, 2);
+drop trigger mlparted11_trig on mlparted11;
+drop function mlparted11_trig_fn();
+
+-- check that inserting into an internal partition successfully results in
+-- checking its partition constraint before inserting into the leaf partition
+-- selected by tuple-routing
+insert into mlparted1 (a, b) values (2, 3);
+
+-- check routing error through a list partitioned table when the key is null
+create table lparted_nonullpart (a int, b char) partition by list (b);
+create table lparted_nonullpart_a partition of lparted_nonullpart for values in ('a');
+insert into lparted_nonullpart values (1);
+drop table lparted_nonullpart;
+
+-- check that RETURNING works correctly with tuple-routing
+alter table mlparted drop constraint check_b;
+create table mlparted12 partition of mlparted1 for values from (5) to (10);
+create table mlparted2 (b int not null, a int not null);
+alter table mlparted attach partition mlparted2 for values from (1, 10) to (1, 20);
+create table mlparted3 partition of mlparted for values from (1, 20) to (1, 30);
+create table mlparted4 (like mlparted);
+alter table mlparted4 drop a;
+alter table mlparted4 add a int not null;
+alter table mlparted attach partition mlparted4 for values from (1, 30) to (1, 40);
+with ins (a, b, c) as
+ (insert into mlparted (b, a) select s.a, 1 from generate_series(2, 39) s(a) returning tableoid::regclass, *)
+ select a, b, min(c), max(c) from ins group by a, b order by 1;
+
+alter table mlparted add c text;
+create table mlparted5 (c text, a int not null, b int not null) partition by list (c);
+create table mlparted5a (a int not null, c text, b int not null);
+alter table mlparted5 attach partition mlparted5a for values in ('a');
+alter table mlparted attach partition mlparted5 for values from (1, 40) to (1, 50);
+alter table mlparted add constraint check_b check (a = 1 and b < 45);
+insert into mlparted values (1, 45, 'a');
+create function mlparted5abrtrig_func() returns trigger as $$ begin new.c = 'b'; return new; end; $$ language plpgsql;
+create trigger mlparted5abrtrig before insert on mlparted5a for each row execute procedure mlparted5abrtrig_func();
+insert into mlparted5 (a, b, c) values (1, 40, 'a');
+drop table mlparted5;
+alter table mlparted drop constraint check_b;
+
+-- Check multi-level default partition
+create table mlparted_def partition of mlparted default partition by range(a);
+create table mlparted_def1 partition of mlparted_def for values from (40) to (50);
+create table mlparted_def2 partition of mlparted_def for values from (50) to (60);
+insert into mlparted values (40, 100);
+insert into mlparted_def1 values (42, 100);
+insert into mlparted_def2 values (54, 50);
+-- fail
+insert into mlparted values (70, 100);
+insert into mlparted_def1 values (52, 50);
+insert into mlparted_def2 values (34, 50);
+-- ok
+create table mlparted_defd partition of mlparted_def default;
+insert into mlparted values (70, 100);
+
+select tableoid::regclass, * from mlparted_def;
+
+-- Check multi-level tuple routing with attributes dropped from the
+-- top-most parent. First remove the last attribute.
+alter table mlparted add d int, add e int;
+alter table mlparted drop e;
+create table mlparted5 partition of mlparted
+ for values from (1, 40) to (1, 50) partition by range (c);
+create table mlparted5_ab partition of mlparted5
+ for values from ('a') to ('c') partition by list (c);
+-- This partitioned table should remain with no partitions.
+create table mlparted5_cd partition of mlparted5
+ for values from ('c') to ('e') partition by list (c);
+create table mlparted5_a partition of mlparted5_ab for values in ('a');
+create table mlparted5_b (d int, b int, c text, a int);
+alter table mlparted5_ab attach partition mlparted5_b for values in ('b');
+truncate mlparted;
+insert into mlparted values (1, 2, 'a', 1);
+insert into mlparted values (1, 40, 'a', 1); -- goes to mlparted5_a
+insert into mlparted values (1, 45, 'b', 1); -- goes to mlparted5_b
+insert into mlparted values (1, 45, 'c', 1); -- goes to mlparted5_cd, fails
+insert into mlparted values (1, 45, 'f', 1); -- goes to mlparted5, fails
+select tableoid::regclass, * from mlparted order by a, b, c, d;
+alter table mlparted drop d;
+truncate mlparted;
+-- Remove the before last attribute.
+alter table mlparted add e int, add d int;
+alter table mlparted drop e;
+insert into mlparted values (1, 2, 'a', 1);
+insert into mlparted values (1, 40, 'a', 1); -- goes to mlparted5_a
+insert into mlparted values (1, 45, 'b', 1); -- goes to mlparted5_b
+insert into mlparted values (1, 45, 'c', 1); -- goes to mlparted5_cd, fails
+insert into mlparted values (1, 45, 'f', 1); -- goes to mlparted5, fails
+select tableoid::regclass, * from mlparted order by a, b, c, d;
+alter table mlparted drop d;
+drop table mlparted5;
+
+-- check that message shown after failure to find a partition shows the
+-- appropriate key description (or none) in various situations
+create table key_desc (a int, b int) partition by list ((a+0));
+create table key_desc_1 partition of key_desc for values in (1) partition by range (b);
+
+create user regress_insert_other_user;
+grant select (a) on key_desc_1 to regress_insert_other_user;
+grant insert on key_desc to regress_insert_other_user;
+
+set role regress_insert_other_user;
+-- no key description is shown
+insert into key_desc values (1, 1);
+
+reset role;
+grant select (b) on key_desc_1 to regress_insert_other_user;
+set role regress_insert_other_user;
+-- key description (b)=(1) is now shown
+insert into key_desc values (1, 1);
+
+-- key description is not shown if key contains expression
+insert into key_desc values (2, 1);
+reset role;
+revoke all on key_desc from regress_insert_other_user;
+revoke all on key_desc_1 from regress_insert_other_user;
+drop role regress_insert_other_user;
+drop table key_desc, key_desc_1;
+
+-- test minvalue/maxvalue restrictions
+create table mcrparted (a int, b int, c int) partition by range (a, abs(b), c);
+create table mcrparted0 partition of mcrparted for values from (minvalue, 0, 0) to (1, maxvalue, maxvalue);
+create table mcrparted2 partition of mcrparted for values from (10, 6, minvalue) to (10, maxvalue, minvalue);
+create table mcrparted4 partition of mcrparted for values from (21, minvalue, 0) to (30, 20, minvalue);
+
+-- check multi-column range partitioning expression enforces the same
+-- constraint as what tuple-routing would determine it to be
+create table mcrparted0 partition of mcrparted for values from (minvalue, minvalue, minvalue) to (1, maxvalue, maxvalue);
+create table mcrparted1 partition of mcrparted for values from (2, 1, minvalue) to (10, 5, 10);
+create table mcrparted2 partition of mcrparted for values from (10, 6, minvalue) to (10, maxvalue, maxvalue);
+create table mcrparted3 partition of mcrparted for values from (11, 1, 1) to (20, 10, 10);
+create table mcrparted4 partition of mcrparted for values from (21, minvalue, minvalue) to (30, 20, maxvalue);
+create table mcrparted5 partition of mcrparted for values from (30, 21, 20) to (maxvalue, maxvalue, maxvalue);
+
+-- null not allowed in range partition
+insert into mcrparted values (null, null, null);
+
+-- routed to mcrparted0
+insert into mcrparted values (0, 1, 1);
+insert into mcrparted0 values (0, 1, 1);
+
+-- routed to mcparted1
+insert into mcrparted values (9, 1000, 1);
+insert into mcrparted1 values (9, 1000, 1);
+insert into mcrparted values (10, 5, -1);
+insert into mcrparted1 values (10, 5, -1);
+insert into mcrparted values (2, 1, 0);
+insert into mcrparted1 values (2, 1, 0);
+
+-- routed to mcparted2
+insert into mcrparted values (10, 6, 1000);
+insert into mcrparted2 values (10, 6, 1000);
+insert into mcrparted values (10, 1000, 1000);
+insert into mcrparted2 values (10, 1000, 1000);
+
+-- no partition exists, nor does mcrparted3 accept it
+insert into mcrparted values (11, 1, -1);
+insert into mcrparted3 values (11, 1, -1);
+
+-- routed to mcrparted5
+insert into mcrparted values (30, 21, 20);
+insert into mcrparted5 values (30, 21, 20);
+insert into mcrparted4 values (30, 21, 20); -- error
+
+-- check rows
+select tableoid::regclass::text, * from mcrparted order by 1;
+
+-- cleanup
+drop table mcrparted;
+
+-- check that a BR constraint can't make partition contain violating rows
+create table brtrigpartcon (a int, b text) partition by list (a);
+create table brtrigpartcon1 partition of brtrigpartcon for values in (1);
+create or replace function brtrigpartcon1trigf() returns trigger as $$begin new.a := 2; return new; end$$ language plpgsql;
+create trigger brtrigpartcon1trig before insert on brtrigpartcon1 for each row execute procedure brtrigpartcon1trigf();
+insert into brtrigpartcon values (1, 'hi there');
+insert into brtrigpartcon1 values (1, 'hi there');
+
+-- check that the message shows the appropriate column description in a
+-- situation where the partitioned table is not the primary ModifyTable node
+create table inserttest3 (f1 text default 'foo', f2 text default 'bar', f3 int);
+create role regress_coldesc_role;
+grant insert on inserttest3 to regress_coldesc_role;
+grant insert on brtrigpartcon to regress_coldesc_role;
+revoke select on brtrigpartcon from regress_coldesc_role;
+set role regress_coldesc_role;
+with result as (insert into brtrigpartcon values (1, 'hi there') returning 1)
+ insert into inserttest3 (f3) select * from result;
+reset role;
+
+-- cleanup
+revoke all on inserttest3 from regress_coldesc_role;
+revoke all on brtrigpartcon from regress_coldesc_role;
+drop role regress_coldesc_role;
+drop table inserttest3;
+drop table brtrigpartcon;
+drop function brtrigpartcon1trigf();
+
+-- check that "do nothing" BR triggers work with tuple-routing
+create table donothingbrtrig_test (a int, b text) partition by list (a);
+create table donothingbrtrig_test1 (b text, a int);
+create table donothingbrtrig_test2 (c text, b text, a int);
+alter table donothingbrtrig_test2 drop column c;
+create or replace function donothingbrtrig_func() returns trigger as $$begin raise notice 'b: %', new.b; return NULL; end$$ language plpgsql;
+create trigger donothingbrtrig1 before insert on donothingbrtrig_test1 for each row execute procedure donothingbrtrig_func();
+create trigger donothingbrtrig2 before insert on donothingbrtrig_test2 for each row execute procedure donothingbrtrig_func();
+alter table donothingbrtrig_test attach partition donothingbrtrig_test1 for values in (1);
+alter table donothingbrtrig_test attach partition donothingbrtrig_test2 for values in (2);
+insert into donothingbrtrig_test values (1, 'foo'), (2, 'bar');
+copy donothingbrtrig_test from stdout;
+1 baz
+2 qux
+\.
+select tableoid::regclass, * from donothingbrtrig_test;
+
+-- cleanup
+drop table donothingbrtrig_test;
+drop function donothingbrtrig_func();
+
+-- check multi-column range partitioning with minvalue/maxvalue constraints
+create table mcrparted (a text, b int) partition by range(a, b);
+create table mcrparted1_lt_b partition of mcrparted for values from (minvalue, minvalue) to ('b', minvalue);
+create table mcrparted2_b partition of mcrparted for values from ('b', minvalue) to ('c', minvalue);
+create table mcrparted3_c_to_common partition of mcrparted for values from ('c', minvalue) to ('common', minvalue);
+create table mcrparted4_common_lt_0 partition of mcrparted for values from ('common', minvalue) to ('common', 0);
+create table mcrparted5_common_0_to_10 partition of mcrparted for values from ('common', 0) to ('common', 10);
+create table mcrparted6_common_ge_10 partition of mcrparted for values from ('common', 10) to ('common', maxvalue);
+create table mcrparted7_gt_common_lt_d partition of mcrparted for values from ('common', maxvalue) to ('d', minvalue);
+create table mcrparted8_ge_d partition of mcrparted for values from ('d', minvalue) to (maxvalue, maxvalue);
+
+\d+ mcrparted
+\d+ mcrparted1_lt_b
+\d+ mcrparted2_b
+\d+ mcrparted3_c_to_common
+\d+ mcrparted4_common_lt_0
+\d+ mcrparted5_common_0_to_10
+\d+ mcrparted6_common_ge_10
+\d+ mcrparted7_gt_common_lt_d
+\d+ mcrparted8_ge_d
+
+insert into mcrparted values ('aaa', 0), ('b', 0), ('bz', 10), ('c', -10),
+ ('comm', -10), ('common', -10), ('common', 0), ('common', 10),
+ ('commons', 0), ('d', -10), ('e', 0);
+select tableoid::regclass, * from mcrparted order by a, b;
+drop table mcrparted;
+
+-- check that wholerow vars in the RETURNING list work with partitioned tables
+create table returningwrtest (a int) partition by list (a);
+create table returningwrtest1 partition of returningwrtest for values in (1);
+insert into returningwrtest values (1) returning returningwrtest;
+
+-- check also that the wholerow vars in RETURNING list are converted as needed
+alter table returningwrtest add b text;
+create table returningwrtest2 (b text, c int, a int);
+alter table returningwrtest2 drop c;
+alter table returningwrtest attach partition returningwrtest2 for values in (2);
+insert into returningwrtest values (2, 'foo') returning returningwrtest;
+drop table returningwrtest;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/int2.out b/ydb/library/yql/tests/postgresql/original/cases/int2.out
new file mode 100644
index 0000000000..8a64df725c
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/int2.out
@@ -0,0 +1,312 @@
+--
+-- INT2
+--
+CREATE TABLE INT2_TBL(f1 int2);
+INSERT INTO INT2_TBL(f1) VALUES ('0 ');
+INSERT INTO INT2_TBL(f1) VALUES (' 1234 ');
+INSERT INTO INT2_TBL(f1) VALUES (' -1234');
+INSERT INTO INT2_TBL(f1) VALUES ('34.5');
+ERROR: invalid input syntax for type smallint: "34.5"
+LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('34.5');
+ ^
+-- largest and smallest values
+INSERT INTO INT2_TBL(f1) VALUES ('32767');
+INSERT INTO INT2_TBL(f1) VALUES ('-32767');
+-- bad input values -- should give errors
+INSERT INTO INT2_TBL(f1) VALUES ('100000');
+ERROR: value "100000" is out of range for type smallint
+LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('100000');
+ ^
+INSERT INTO INT2_TBL(f1) VALUES ('asdf');
+ERROR: invalid input syntax for type smallint: "asdf"
+LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('asdf');
+ ^
+INSERT INTO INT2_TBL(f1) VALUES (' ');
+ERROR: invalid input syntax for type smallint: " "
+LINE 1: INSERT INTO INT2_TBL(f1) VALUES (' ');
+ ^
+INSERT INTO INT2_TBL(f1) VALUES ('- 1234');
+ERROR: invalid input syntax for type smallint: "- 1234"
+LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('- 1234');
+ ^
+INSERT INTO INT2_TBL(f1) VALUES ('4 444');
+ERROR: invalid input syntax for type smallint: "4 444"
+LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('4 444');
+ ^
+INSERT INTO INT2_TBL(f1) VALUES ('123 dt');
+ERROR: invalid input syntax for type smallint: "123 dt"
+LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('123 dt');
+ ^
+INSERT INTO INT2_TBL(f1) VALUES ('');
+ERROR: invalid input syntax for type smallint: ""
+LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('');
+ ^
+SELECT * FROM INT2_TBL;
+ f1
+--------
+ 0
+ 1234
+ -1234
+ 32767
+ -32767
+(5 rows)
+
+SELECT * FROM INT2_TBL AS f(a, b);
+ERROR: table "f" has 1 columns available but 2 columns specified
+SELECT * FROM (TABLE int2_tbl) AS s (a, b);
+ERROR: table "s" has 1 columns available but 2 columns specified
+SELECT i.* FROM INT2_TBL i WHERE i.f1 <> int2 '0';
+ f1
+--------
+ 1234
+ -1234
+ 32767
+ -32767
+(4 rows)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 <> int4 '0';
+ f1
+--------
+ 1234
+ -1234
+ 32767
+ -32767
+(4 rows)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 = int2 '0';
+ f1
+----
+ 0
+(1 row)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 = int4 '0';
+ f1
+----
+ 0
+(1 row)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 < int2 '0';
+ f1
+--------
+ -1234
+ -32767
+(2 rows)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 < int4 '0';
+ f1
+--------
+ -1234
+ -32767
+(2 rows)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 <= int2 '0';
+ f1
+--------
+ 0
+ -1234
+ -32767
+(3 rows)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 <= int4 '0';
+ f1
+--------
+ 0
+ -1234
+ -32767
+(3 rows)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 > int2 '0';
+ f1
+-------
+ 1234
+ 32767
+(2 rows)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 > int4 '0';
+ f1
+-------
+ 1234
+ 32767
+(2 rows)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 >= int2 '0';
+ f1
+-------
+ 0
+ 1234
+ 32767
+(3 rows)
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 >= int4 '0';
+ f1
+-------
+ 0
+ 1234
+ 32767
+(3 rows)
+
+-- positive odds
+SELECT i.* FROM INT2_TBL i WHERE (i.f1 % int2 '2') = int2 '1';
+ f1
+-------
+ 32767
+(1 row)
+
+-- any evens
+SELECT i.* FROM INT2_TBL i WHERE (i.f1 % int4 '2') = int2 '0';
+ f1
+-------
+ 0
+ 1234
+ -1234
+(3 rows)
+
+SELECT i.f1, i.f1 * int2 '2' AS x FROM INT2_TBL i;
+ERROR: smallint out of range
+SELECT i.f1, i.f1 * int2 '2' AS x FROM INT2_TBL i
+WHERE abs(f1) < 16384;
+ f1 | x
+-------+-------
+ 0 | 0
+ 1234 | 2468
+ -1234 | -2468
+(3 rows)
+
+SELECT i.f1, i.f1 * int4 '2' AS x FROM INT2_TBL i;
+ f1 | x
+--------+--------
+ 0 | 0
+ 1234 | 2468
+ -1234 | -2468
+ 32767 | 65534
+ -32767 | -65534
+(5 rows)
+
+SELECT i.f1, i.f1 + int2 '2' AS x FROM INT2_TBL i;
+ERROR: smallint out of range
+SELECT i.f1, i.f1 + int2 '2' AS x FROM INT2_TBL i
+WHERE f1 < 32766;
+ f1 | x
+--------+--------
+ 0 | 2
+ 1234 | 1236
+ -1234 | -1232
+ -32767 | -32765
+(4 rows)
+
+SELECT i.f1, i.f1 + int4 '2' AS x FROM INT2_TBL i;
+ f1 | x
+--------+--------
+ 0 | 2
+ 1234 | 1236
+ -1234 | -1232
+ 32767 | 32769
+ -32767 | -32765
+(5 rows)
+
+SELECT i.f1, i.f1 - int2 '2' AS x FROM INT2_TBL i;
+ERROR: smallint out of range
+SELECT i.f1, i.f1 - int2 '2' AS x FROM INT2_TBL i
+WHERE f1 > -32767;
+ f1 | x
+-------+-------
+ 0 | -2
+ 1234 | 1232
+ -1234 | -1236
+ 32767 | 32765
+(4 rows)
+
+SELECT i.f1, i.f1 - int4 '2' AS x FROM INT2_TBL i;
+ f1 | x
+--------+--------
+ 0 | -2
+ 1234 | 1232
+ -1234 | -1236
+ 32767 | 32765
+ -32767 | -32769
+(5 rows)
+
+SELECT i.f1, i.f1 / int2 '2' AS x FROM INT2_TBL i;
+ f1 | x
+--------+--------
+ 0 | 0
+ 1234 | 617
+ -1234 | -617
+ 32767 | 16383
+ -32767 | -16383
+(5 rows)
+
+SELECT i.f1, i.f1 / int4 '2' AS x FROM INT2_TBL i;
+ f1 | x
+--------+--------
+ 0 | 0
+ 1234 | 617
+ -1234 | -617
+ 32767 | 16383
+ -32767 | -16383
+(5 rows)
+
+-- corner cases
+SELECT (-1::int2<<15)::text;
+ text
+--------
+ -32768
+(1 row)
+
+SELECT ((-1::int2<<15)+1::int2)::text;
+ text
+--------
+ -32767
+(1 row)
+
+-- check sane handling of INT16_MIN overflow cases
+SELECT (-32768)::int2 * (-1)::int2;
+ERROR: smallint out of range
+SELECT (-32768)::int2 / (-1)::int2;
+ERROR: smallint out of range
+SELECT (-32768)::int2 % (-1)::int2;
+ ?column?
+----------
+ 0
+(1 row)
+
+-- check rounding when casting from float
+SELECT x, x::int2 AS int2_value
+FROM (VALUES (-2.5::float8),
+ (-1.5::float8),
+ (-0.5::float8),
+ (0.0::float8),
+ (0.5::float8),
+ (1.5::float8),
+ (2.5::float8)) t(x);
+ x | int2_value
+------+------------
+ -2.5 | -2
+ -1.5 | -2
+ -0.5 | 0
+ 0 | 0
+ 0.5 | 0
+ 1.5 | 2
+ 2.5 | 2
+(7 rows)
+
+-- check rounding when casting from numeric
+SELECT x, x::int2 AS int2_value
+FROM (VALUES (-2.5::numeric),
+ (-1.5::numeric),
+ (-0.5::numeric),
+ (0.0::numeric),
+ (0.5::numeric),
+ (1.5::numeric),
+ (2.5::numeric)) t(x);
+ x | int2_value
+------+------------
+ -2.5 | -3
+ -1.5 | -2
+ -0.5 | -1
+ 0.0 | 0
+ 0.5 | 1
+ 1.5 | 2
+ 2.5 | 3
+(7 rows)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/int2.sql b/ydb/library/yql/tests/postgresql/original/cases/int2.sql
new file mode 100644
index 0000000000..2f5ea64e3b
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/int2.sql
@@ -0,0 +1,118 @@
+--
+-- INT2
+--
+
+CREATE TABLE INT2_TBL(f1 int2);
+
+INSERT INTO INT2_TBL(f1) VALUES ('0 ');
+
+INSERT INTO INT2_TBL(f1) VALUES (' 1234 ');
+
+INSERT INTO INT2_TBL(f1) VALUES (' -1234');
+
+INSERT INTO INT2_TBL(f1) VALUES ('34.5');
+
+-- largest and smallest values
+INSERT INTO INT2_TBL(f1) VALUES ('32767');
+
+INSERT INTO INT2_TBL(f1) VALUES ('-32767');
+
+-- bad input values -- should give errors
+INSERT INTO INT2_TBL(f1) VALUES ('100000');
+INSERT INTO INT2_TBL(f1) VALUES ('asdf');
+INSERT INTO INT2_TBL(f1) VALUES (' ');
+INSERT INTO INT2_TBL(f1) VALUES ('- 1234');
+INSERT INTO INT2_TBL(f1) VALUES ('4 444');
+INSERT INTO INT2_TBL(f1) VALUES ('123 dt');
+INSERT INTO INT2_TBL(f1) VALUES ('');
+
+
+SELECT * FROM INT2_TBL;
+
+SELECT * FROM INT2_TBL AS f(a, b);
+
+SELECT * FROM (TABLE int2_tbl) AS s (a, b);
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 <> int2 '0';
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 <> int4 '0';
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 = int2 '0';
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 = int4 '0';
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 < int2 '0';
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 < int4 '0';
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 <= int2 '0';
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 <= int4 '0';
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 > int2 '0';
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 > int4 '0';
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 >= int2 '0';
+
+SELECT i.* FROM INT2_TBL i WHERE i.f1 >= int4 '0';
+
+-- positive odds
+SELECT i.* FROM INT2_TBL i WHERE (i.f1 % int2 '2') = int2 '1';
+
+-- any evens
+SELECT i.* FROM INT2_TBL i WHERE (i.f1 % int4 '2') = int2 '0';
+
+SELECT i.f1, i.f1 * int2 '2' AS x FROM INT2_TBL i;
+
+SELECT i.f1, i.f1 * int2 '2' AS x FROM INT2_TBL i
+WHERE abs(f1) < 16384;
+
+SELECT i.f1, i.f1 * int4 '2' AS x FROM INT2_TBL i;
+
+SELECT i.f1, i.f1 + int2 '2' AS x FROM INT2_TBL i;
+
+SELECT i.f1, i.f1 + int2 '2' AS x FROM INT2_TBL i
+WHERE f1 < 32766;
+
+SELECT i.f1, i.f1 + int4 '2' AS x FROM INT2_TBL i;
+
+SELECT i.f1, i.f1 - int2 '2' AS x FROM INT2_TBL i;
+
+SELECT i.f1, i.f1 - int2 '2' AS x FROM INT2_TBL i
+WHERE f1 > -32767;
+
+SELECT i.f1, i.f1 - int4 '2' AS x FROM INT2_TBL i;
+
+SELECT i.f1, i.f1 / int2 '2' AS x FROM INT2_TBL i;
+
+SELECT i.f1, i.f1 / int4 '2' AS x FROM INT2_TBL i;
+
+-- corner cases
+SELECT (-1::int2<<15)::text;
+SELECT ((-1::int2<<15)+1::int2)::text;
+
+-- check sane handling of INT16_MIN overflow cases
+SELECT (-32768)::int2 * (-1)::int2;
+SELECT (-32768)::int2 / (-1)::int2;
+SELECT (-32768)::int2 % (-1)::int2;
+
+-- check rounding when casting from float
+SELECT x, x::int2 AS int2_value
+FROM (VALUES (-2.5::float8),
+ (-1.5::float8),
+ (-0.5::float8),
+ (0.0::float8),
+ (0.5::float8),
+ (1.5::float8),
+ (2.5::float8)) t(x);
+
+-- check rounding when casting from numeric
+SELECT x, x::int2 AS int2_value
+FROM (VALUES (-2.5::numeric),
+ (-1.5::numeric),
+ (-0.5::numeric),
+ (0.0::numeric),
+ (0.5::numeric),
+ (1.5::numeric),
+ (2.5::numeric)) t(x);
diff --git a/ydb/library/yql/tests/postgresql/original/cases/int4.out b/ydb/library/yql/tests/postgresql/original/cases/int4.out
new file mode 100644
index 0000000000..9d20b3380f
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/int4.out
@@ -0,0 +1,439 @@
+--
+-- INT4
+--
+CREATE TABLE INT4_TBL(f1 int4);
+INSERT INTO INT4_TBL(f1) VALUES (' 0 ');
+INSERT INTO INT4_TBL(f1) VALUES ('123456 ');
+INSERT INTO INT4_TBL(f1) VALUES (' -123456');
+INSERT INTO INT4_TBL(f1) VALUES ('34.5');
+ERROR: invalid input syntax for type integer: "34.5"
+LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('34.5');
+ ^
+-- largest and smallest values
+INSERT INTO INT4_TBL(f1) VALUES ('2147483647');
+INSERT INTO INT4_TBL(f1) VALUES ('-2147483647');
+-- bad input values -- should give errors
+INSERT INTO INT4_TBL(f1) VALUES ('1000000000000');
+ERROR: value "1000000000000" is out of range for type integer
+LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('1000000000000');
+ ^
+INSERT INTO INT4_TBL(f1) VALUES ('asdf');
+ERROR: invalid input syntax for type integer: "asdf"
+LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('asdf');
+ ^
+INSERT INTO INT4_TBL(f1) VALUES (' ');
+ERROR: invalid input syntax for type integer: " "
+LINE 1: INSERT INTO INT4_TBL(f1) VALUES (' ');
+ ^
+INSERT INTO INT4_TBL(f1) VALUES (' asdf ');
+ERROR: invalid input syntax for type integer: " asdf "
+LINE 1: INSERT INTO INT4_TBL(f1) VALUES (' asdf ');
+ ^
+INSERT INTO INT4_TBL(f1) VALUES ('- 1234');
+ERROR: invalid input syntax for type integer: "- 1234"
+LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('- 1234');
+ ^
+INSERT INTO INT4_TBL(f1) VALUES ('123 5');
+ERROR: invalid input syntax for type integer: "123 5"
+LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('123 5');
+ ^
+INSERT INTO INT4_TBL(f1) VALUES ('');
+ERROR: invalid input syntax for type integer: ""
+LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('');
+ ^
+SELECT * FROM INT4_TBL;
+ f1
+-------------
+ 0
+ 123456
+ -123456
+ 2147483647
+ -2147483647
+(5 rows)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 <> int2 '0';
+ f1
+-------------
+ 123456
+ -123456
+ 2147483647
+ -2147483647
+(4 rows)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 <> int4 '0';
+ f1
+-------------
+ 123456
+ -123456
+ 2147483647
+ -2147483647
+(4 rows)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 = int2 '0';
+ f1
+----
+ 0
+(1 row)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 = int4 '0';
+ f1
+----
+ 0
+(1 row)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 < int2 '0';
+ f1
+-------------
+ -123456
+ -2147483647
+(2 rows)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 < int4 '0';
+ f1
+-------------
+ -123456
+ -2147483647
+(2 rows)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 <= int2 '0';
+ f1
+-------------
+ 0
+ -123456
+ -2147483647
+(3 rows)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 <= int4 '0';
+ f1
+-------------
+ 0
+ -123456
+ -2147483647
+(3 rows)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 > int2 '0';
+ f1
+------------
+ 123456
+ 2147483647
+(2 rows)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 > int4 '0';
+ f1
+------------
+ 123456
+ 2147483647
+(2 rows)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 >= int2 '0';
+ f1
+------------
+ 0
+ 123456
+ 2147483647
+(3 rows)
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 >= int4 '0';
+ f1
+------------
+ 0
+ 123456
+ 2147483647
+(3 rows)
+
+-- positive odds
+SELECT i.* FROM INT4_TBL i WHERE (i.f1 % int2 '2') = int2 '1';
+ f1
+------------
+ 2147483647
+(1 row)
+
+-- any evens
+SELECT i.* FROM INT4_TBL i WHERE (i.f1 % int4 '2') = int2 '0';
+ f1
+---------
+ 0
+ 123456
+ -123456
+(3 rows)
+
+SELECT i.f1, i.f1 * int2 '2' AS x FROM INT4_TBL i;
+ERROR: integer out of range
+SELECT i.f1, i.f1 * int2 '2' AS x FROM INT4_TBL i
+WHERE abs(f1) < 1073741824;
+ f1 | x
+---------+---------
+ 0 | 0
+ 123456 | 246912
+ -123456 | -246912
+(3 rows)
+
+SELECT i.f1, i.f1 * int4 '2' AS x FROM INT4_TBL i;
+ERROR: integer out of range
+SELECT i.f1, i.f1 * int4 '2' AS x FROM INT4_TBL i
+WHERE abs(f1) < 1073741824;
+ f1 | x
+---------+---------
+ 0 | 0
+ 123456 | 246912
+ -123456 | -246912
+(3 rows)
+
+SELECT i.f1, i.f1 + int2 '2' AS x FROM INT4_TBL i;
+ERROR: integer out of range
+SELECT i.f1, i.f1 + int2 '2' AS x FROM INT4_TBL i
+WHERE f1 < 2147483646;
+ f1 | x
+-------------+-------------
+ 0 | 2
+ 123456 | 123458
+ -123456 | -123454
+ -2147483647 | -2147483645
+(4 rows)
+
+SELECT i.f1, i.f1 + int4 '2' AS x FROM INT4_TBL i;
+ERROR: integer out of range
+SELECT i.f1, i.f1 + int4 '2' AS x FROM INT4_TBL i
+WHERE f1 < 2147483646;
+ f1 | x
+-------------+-------------
+ 0 | 2
+ 123456 | 123458
+ -123456 | -123454
+ -2147483647 | -2147483645
+(4 rows)
+
+SELECT i.f1, i.f1 - int2 '2' AS x FROM INT4_TBL i;
+ERROR: integer out of range
+SELECT i.f1, i.f1 - int2 '2' AS x FROM INT4_TBL i
+WHERE f1 > -2147483647;
+ f1 | x
+------------+------------
+ 0 | -2
+ 123456 | 123454
+ -123456 | -123458
+ 2147483647 | 2147483645
+(4 rows)
+
+SELECT i.f1, i.f1 - int4 '2' AS x FROM INT4_TBL i;
+ERROR: integer out of range
+SELECT i.f1, i.f1 - int4 '2' AS x FROM INT4_TBL i
+WHERE f1 > -2147483647;
+ f1 | x
+------------+------------
+ 0 | -2
+ 123456 | 123454
+ -123456 | -123458
+ 2147483647 | 2147483645
+(4 rows)
+
+SELECT i.f1, i.f1 / int2 '2' AS x FROM INT4_TBL i;
+ f1 | x
+-------------+-------------
+ 0 | 0
+ 123456 | 61728
+ -123456 | -61728
+ 2147483647 | 1073741823
+ -2147483647 | -1073741823
+(5 rows)
+
+SELECT i.f1, i.f1 / int4 '2' AS x FROM INT4_TBL i;
+ f1 | x
+-------------+-------------
+ 0 | 0
+ 123456 | 61728
+ -123456 | -61728
+ 2147483647 | 1073741823
+ -2147483647 | -1073741823
+(5 rows)
+
+--
+-- more complex expressions
+--
+-- variations on unary minus parsing
+SELECT -2+3 AS one;
+ one
+-----
+ 1
+(1 row)
+
+SELECT 4-2 AS two;
+ two
+-----
+ 2
+(1 row)
+
+SELECT 2- -1 AS three;
+ three
+-------
+ 3
+(1 row)
+
+SELECT 2 - -2 AS four;
+ four
+------
+ 4
+(1 row)
+
+SELECT int2 '2' * int2 '2' = int2 '16' / int2 '4' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT int4 '2' * int2 '2' = int2 '16' / int4 '4' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT int2 '2' * int4 '2' = int4 '16' / int2 '4' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT int4 '1000' < int4 '999' AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 AS ten;
+ ten
+-----
+ 10
+(1 row)
+
+SELECT 2 + 2 / 2 AS three;
+ three
+-------
+ 3
+(1 row)
+
+SELECT (2 + 2) / 2 AS two;
+ two
+-----
+ 2
+(1 row)
+
+-- corner case
+SELECT (-1::int4<<31)::text;
+ text
+-------------
+ -2147483648
+(1 row)
+
+SELECT ((-1::int4<<31)+1)::text;
+ text
+-------------
+ -2147483647
+(1 row)
+
+-- check sane handling of INT_MIN overflow cases
+SELECT (-2147483648)::int4 * (-1)::int4;
+ERROR: integer out of range
+SELECT (-2147483648)::int4 / (-1)::int4;
+ERROR: integer out of range
+SELECT (-2147483648)::int4 % (-1)::int4;
+ ?column?
+----------
+ 0
+(1 row)
+
+SELECT (-2147483648)::int4 * (-1)::int2;
+ERROR: integer out of range
+SELECT (-2147483648)::int4 / (-1)::int2;
+ERROR: integer out of range
+SELECT (-2147483648)::int4 % (-1)::int2;
+ ?column?
+----------
+ 0
+(1 row)
+
+-- check rounding when casting from float
+SELECT x, x::int4 AS int4_value
+FROM (VALUES (-2.5::float8),
+ (-1.5::float8),
+ (-0.5::float8),
+ (0.0::float8),
+ (0.5::float8),
+ (1.5::float8),
+ (2.5::float8)) t(x);
+ x | int4_value
+------+------------
+ -2.5 | -2
+ -1.5 | -2
+ -0.5 | 0
+ 0 | 0
+ 0.5 | 0
+ 1.5 | 2
+ 2.5 | 2
+(7 rows)
+
+-- check rounding when casting from numeric
+SELECT x, x::int4 AS int4_value
+FROM (VALUES (-2.5::numeric),
+ (-1.5::numeric),
+ (-0.5::numeric),
+ (0.0::numeric),
+ (0.5::numeric),
+ (1.5::numeric),
+ (2.5::numeric)) t(x);
+ x | int4_value
+------+------------
+ -2.5 | -3
+ -1.5 | -2
+ -0.5 | -1
+ 0.0 | 0
+ 0.5 | 1
+ 1.5 | 2
+ 2.5 | 3
+(7 rows)
+
+-- test gcd()
+SELECT a, b, gcd(a, b), gcd(a, -b), gcd(b, a), gcd(-b, a)
+FROM (VALUES (0::int4, 0::int4),
+ (0::int4, 6410818::int4),
+ (61866666::int4, 6410818::int4),
+ (-61866666::int4, 6410818::int4),
+ ((-2147483648)::int4, 1::int4),
+ ((-2147483648)::int4, 2147483647::int4),
+ ((-2147483648)::int4, 1073741824::int4)) AS v(a, b);
+ a | b | gcd | gcd | gcd | gcd
+-------------+------------+------------+------------+------------+------------
+ 0 | 0 | 0 | 0 | 0 | 0
+ 0 | 6410818 | 6410818 | 6410818 | 6410818 | 6410818
+ 61866666 | 6410818 | 1466 | 1466 | 1466 | 1466
+ -61866666 | 6410818 | 1466 | 1466 | 1466 | 1466
+ -2147483648 | 1 | 1 | 1 | 1 | 1
+ -2147483648 | 2147483647 | 1 | 1 | 1 | 1
+ -2147483648 | 1073741824 | 1073741824 | 1073741824 | 1073741824 | 1073741824
+(7 rows)
+
+SELECT gcd((-2147483648)::int4, 0::int4); -- overflow
+ERROR: integer out of range
+SELECT gcd((-2147483648)::int4, (-2147483648)::int4); -- overflow
+ERROR: integer out of range
+-- test lcm()
+SELECT a, b, lcm(a, b), lcm(a, -b), lcm(b, a), lcm(-b, a)
+FROM (VALUES (0::int4, 0::int4),
+ (0::int4, 42::int4),
+ (42::int4, 42::int4),
+ (330::int4, 462::int4),
+ (-330::int4, 462::int4),
+ ((-2147483648)::int4, 0::int4)) AS v(a, b);
+ a | b | lcm | lcm | lcm | lcm
+-------------+-----+------+------+------+------
+ 0 | 0 | 0 | 0 | 0 | 0
+ 0 | 42 | 0 | 0 | 0 | 0
+ 42 | 42 | 42 | 42 | 42 | 42
+ 330 | 462 | 2310 | 2310 | 2310 | 2310
+ -330 | 462 | 2310 | 2310 | 2310 | 2310
+ -2147483648 | 0 | 0 | 0 | 0 | 0
+(6 rows)
+
+SELECT lcm((-2147483648)::int4, 1::int4); -- overflow
+ERROR: integer out of range
+SELECT lcm(2147483647::int4, 2147483646::int4); -- overflow
+ERROR: integer out of range
diff --git a/ydb/library/yql/tests/postgresql/original/cases/int4.sql b/ydb/library/yql/tests/postgresql/original/cases/int4.sql
new file mode 100644
index 0000000000..55ec07a147
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/int4.sql
@@ -0,0 +1,178 @@
+--
+-- INT4
+--
+
+CREATE TABLE INT4_TBL(f1 int4);
+
+INSERT INTO INT4_TBL(f1) VALUES (' 0 ');
+
+INSERT INTO INT4_TBL(f1) VALUES ('123456 ');
+
+INSERT INTO INT4_TBL(f1) VALUES (' -123456');
+
+INSERT INTO INT4_TBL(f1) VALUES ('34.5');
+
+-- largest and smallest values
+INSERT INTO INT4_TBL(f1) VALUES ('2147483647');
+
+INSERT INTO INT4_TBL(f1) VALUES ('-2147483647');
+
+-- bad input values -- should give errors
+INSERT INTO INT4_TBL(f1) VALUES ('1000000000000');
+INSERT INTO INT4_TBL(f1) VALUES ('asdf');
+INSERT INTO INT4_TBL(f1) VALUES (' ');
+INSERT INTO INT4_TBL(f1) VALUES (' asdf ');
+INSERT INTO INT4_TBL(f1) VALUES ('- 1234');
+INSERT INTO INT4_TBL(f1) VALUES ('123 5');
+INSERT INTO INT4_TBL(f1) VALUES ('');
+
+
+SELECT * FROM INT4_TBL;
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 <> int2 '0';
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 <> int4 '0';
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 = int2 '0';
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 = int4 '0';
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 < int2 '0';
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 < int4 '0';
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 <= int2 '0';
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 <= int4 '0';
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 > int2 '0';
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 > int4 '0';
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 >= int2 '0';
+
+SELECT i.* FROM INT4_TBL i WHERE i.f1 >= int4 '0';
+
+-- positive odds
+SELECT i.* FROM INT4_TBL i WHERE (i.f1 % int2 '2') = int2 '1';
+
+-- any evens
+SELECT i.* FROM INT4_TBL i WHERE (i.f1 % int4 '2') = int2 '0';
+
+SELECT i.f1, i.f1 * int2 '2' AS x FROM INT4_TBL i;
+
+SELECT i.f1, i.f1 * int2 '2' AS x FROM INT4_TBL i
+WHERE abs(f1) < 1073741824;
+
+SELECT i.f1, i.f1 * int4 '2' AS x FROM INT4_TBL i;
+
+SELECT i.f1, i.f1 * int4 '2' AS x FROM INT4_TBL i
+WHERE abs(f1) < 1073741824;
+
+SELECT i.f1, i.f1 + int2 '2' AS x FROM INT4_TBL i;
+
+SELECT i.f1, i.f1 + int2 '2' AS x FROM INT4_TBL i
+WHERE f1 < 2147483646;
+
+SELECT i.f1, i.f1 + int4 '2' AS x FROM INT4_TBL i;
+
+SELECT i.f1, i.f1 + int4 '2' AS x FROM INT4_TBL i
+WHERE f1 < 2147483646;
+
+SELECT i.f1, i.f1 - int2 '2' AS x FROM INT4_TBL i;
+
+SELECT i.f1, i.f1 - int2 '2' AS x FROM INT4_TBL i
+WHERE f1 > -2147483647;
+
+SELECT i.f1, i.f1 - int4 '2' AS x FROM INT4_TBL i;
+
+SELECT i.f1, i.f1 - int4 '2' AS x FROM INT4_TBL i
+WHERE f1 > -2147483647;
+
+SELECT i.f1, i.f1 / int2 '2' AS x FROM INT4_TBL i;
+
+SELECT i.f1, i.f1 / int4 '2' AS x FROM INT4_TBL i;
+
+--
+-- more complex expressions
+--
+
+-- variations on unary minus parsing
+SELECT -2+3 AS one;
+
+SELECT 4-2 AS two;
+
+SELECT 2- -1 AS three;
+
+SELECT 2 - -2 AS four;
+
+SELECT int2 '2' * int2 '2' = int2 '16' / int2 '4' AS true;
+
+SELECT int4 '2' * int2 '2' = int2 '16' / int4 '4' AS true;
+
+SELECT int2 '2' * int4 '2' = int4 '16' / int2 '4' AS true;
+
+SELECT int4 '1000' < int4 '999' AS false;
+
+SELECT 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 AS ten;
+
+SELECT 2 + 2 / 2 AS three;
+
+SELECT (2 + 2) / 2 AS two;
+
+-- corner case
+SELECT (-1::int4<<31)::text;
+SELECT ((-1::int4<<31)+1)::text;
+
+-- check sane handling of INT_MIN overflow cases
+SELECT (-2147483648)::int4 * (-1)::int4;
+SELECT (-2147483648)::int4 / (-1)::int4;
+SELECT (-2147483648)::int4 % (-1)::int4;
+SELECT (-2147483648)::int4 * (-1)::int2;
+SELECT (-2147483648)::int4 / (-1)::int2;
+SELECT (-2147483648)::int4 % (-1)::int2;
+
+-- check rounding when casting from float
+SELECT x, x::int4 AS int4_value
+FROM (VALUES (-2.5::float8),
+ (-1.5::float8),
+ (-0.5::float8),
+ (0.0::float8),
+ (0.5::float8),
+ (1.5::float8),
+ (2.5::float8)) t(x);
+
+-- check rounding when casting from numeric
+SELECT x, x::int4 AS int4_value
+FROM (VALUES (-2.5::numeric),
+ (-1.5::numeric),
+ (-0.5::numeric),
+ (0.0::numeric),
+ (0.5::numeric),
+ (1.5::numeric),
+ (2.5::numeric)) t(x);
+
+-- test gcd()
+SELECT a, b, gcd(a, b), gcd(a, -b), gcd(b, a), gcd(-b, a)
+FROM (VALUES (0::int4, 0::int4),
+ (0::int4, 6410818::int4),
+ (61866666::int4, 6410818::int4),
+ (-61866666::int4, 6410818::int4),
+ ((-2147483648)::int4, 1::int4),
+ ((-2147483648)::int4, 2147483647::int4),
+ ((-2147483648)::int4, 1073741824::int4)) AS v(a, b);
+
+SELECT gcd((-2147483648)::int4, 0::int4); -- overflow
+SELECT gcd((-2147483648)::int4, (-2147483648)::int4); -- overflow
+
+-- test lcm()
+SELECT a, b, lcm(a, b), lcm(a, -b), lcm(b, a), lcm(-b, a)
+FROM (VALUES (0::int4, 0::int4),
+ (0::int4, 42::int4),
+ (42::int4, 42::int4),
+ (330::int4, 462::int4),
+ (-330::int4, 462::int4),
+ ((-2147483648)::int4, 0::int4)) AS v(a, b);
+
+SELECT lcm((-2147483648)::int4, 1::int4); -- overflow
+SELECT lcm(2147483647::int4, 2147483646::int4); -- overflow
diff --git a/ydb/library/yql/tests/postgresql/original/cases/int8.out b/ydb/library/yql/tests/postgresql/original/cases/int8.out
new file mode 100644
index 0000000000..36540ec456
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/int8.out
@@ -0,0 +1,934 @@
+--
+-- INT8
+-- Test int8 64-bit integers.
+--
+CREATE TABLE INT8_TBL(q1 int8, q2 int8);
+INSERT INTO INT8_TBL VALUES(' 123 ',' 456');
+INSERT INTO INT8_TBL VALUES('123 ','4567890123456789');
+INSERT INTO INT8_TBL VALUES('4567890123456789','123');
+INSERT INTO INT8_TBL VALUES(+4567890123456789,'4567890123456789');
+INSERT INTO INT8_TBL VALUES('+4567890123456789','-4567890123456789');
+-- bad inputs
+INSERT INTO INT8_TBL(q1) VALUES (' ');
+ERROR: invalid input syntax for type bigint: " "
+LINE 1: INSERT INTO INT8_TBL(q1) VALUES (' ');
+ ^
+INSERT INTO INT8_TBL(q1) VALUES ('xxx');
+ERROR: invalid input syntax for type bigint: "xxx"
+LINE 1: INSERT INTO INT8_TBL(q1) VALUES ('xxx');
+ ^
+INSERT INTO INT8_TBL(q1) VALUES ('3908203590239580293850293850329485');
+ERROR: value "3908203590239580293850293850329485" is out of range for type bigint
+LINE 1: INSERT INTO INT8_TBL(q1) VALUES ('39082035902395802938502938...
+ ^
+INSERT INTO INT8_TBL(q1) VALUES ('-1204982019841029840928340329840934');
+ERROR: value "-1204982019841029840928340329840934" is out of range for type bigint
+LINE 1: INSERT INTO INT8_TBL(q1) VALUES ('-1204982019841029840928340...
+ ^
+INSERT INTO INT8_TBL(q1) VALUES ('- 123');
+ERROR: invalid input syntax for type bigint: "- 123"
+LINE 1: INSERT INTO INT8_TBL(q1) VALUES ('- 123');
+ ^
+INSERT INTO INT8_TBL(q1) VALUES (' 345 5');
+ERROR: invalid input syntax for type bigint: " 345 5"
+LINE 1: INSERT INTO INT8_TBL(q1) VALUES (' 345 5');
+ ^
+INSERT INTO INT8_TBL(q1) VALUES ('');
+ERROR: invalid input syntax for type bigint: ""
+LINE 1: INSERT INTO INT8_TBL(q1) VALUES ('');
+ ^
+SELECT * FROM INT8_TBL;
+ q1 | q2
+------------------+-------------------
+ 123 | 456
+ 123 | 4567890123456789
+ 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+(5 rows)
+
+-- int8/int8 cmp
+SELECT * FROM INT8_TBL WHERE q2 = 4567890123456789;
+ q1 | q2
+------------------+------------------
+ 123 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+(2 rows)
+
+SELECT * FROM INT8_TBL WHERE q2 <> 4567890123456789;
+ q1 | q2
+------------------+-------------------
+ 123 | 456
+ 4567890123456789 | 123
+ 4567890123456789 | -4567890123456789
+(3 rows)
+
+SELECT * FROM INT8_TBL WHERE q2 < 4567890123456789;
+ q1 | q2
+------------------+-------------------
+ 123 | 456
+ 4567890123456789 | 123
+ 4567890123456789 | -4567890123456789
+(3 rows)
+
+SELECT * FROM INT8_TBL WHERE q2 > 4567890123456789;
+ q1 | q2
+----+----
+(0 rows)
+
+SELECT * FROM INT8_TBL WHERE q2 <= 4567890123456789;
+ q1 | q2
+------------------+-------------------
+ 123 | 456
+ 123 | 4567890123456789
+ 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+(5 rows)
+
+SELECT * FROM INT8_TBL WHERE q2 >= 4567890123456789;
+ q1 | q2
+------------------+------------------
+ 123 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+(2 rows)
+
+-- int8/int4 cmp
+SELECT * FROM INT8_TBL WHERE q2 = 456;
+ q1 | q2
+-----+-----
+ 123 | 456
+(1 row)
+
+SELECT * FROM INT8_TBL WHERE q2 <> 456;
+ q1 | q2
+------------------+-------------------
+ 123 | 4567890123456789
+ 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+(4 rows)
+
+SELECT * FROM INT8_TBL WHERE q2 < 456;
+ q1 | q2
+------------------+-------------------
+ 4567890123456789 | 123
+ 4567890123456789 | -4567890123456789
+(2 rows)
+
+SELECT * FROM INT8_TBL WHERE q2 > 456;
+ q1 | q2
+------------------+------------------
+ 123 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+(2 rows)
+
+SELECT * FROM INT8_TBL WHERE q2 <= 456;
+ q1 | q2
+------------------+-------------------
+ 123 | 456
+ 4567890123456789 | 123
+ 4567890123456789 | -4567890123456789
+(3 rows)
+
+SELECT * FROM INT8_TBL WHERE q2 >= 456;
+ q1 | q2
+------------------+------------------
+ 123 | 456
+ 123 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+(3 rows)
+
+-- int4/int8 cmp
+SELECT * FROM INT8_TBL WHERE 123 = q1;
+ q1 | q2
+-----+------------------
+ 123 | 456
+ 123 | 4567890123456789
+(2 rows)
+
+SELECT * FROM INT8_TBL WHERE 123 <> q1;
+ q1 | q2
+------------------+-------------------
+ 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+(3 rows)
+
+SELECT * FROM INT8_TBL WHERE 123 < q1;
+ q1 | q2
+------------------+-------------------
+ 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+(3 rows)
+
+SELECT * FROM INT8_TBL WHERE 123 > q1;
+ q1 | q2
+----+----
+(0 rows)
+
+SELECT * FROM INT8_TBL WHERE 123 <= q1;
+ q1 | q2
+------------------+-------------------
+ 123 | 456
+ 123 | 4567890123456789
+ 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+(5 rows)
+
+SELECT * FROM INT8_TBL WHERE 123 >= q1;
+ q1 | q2
+-----+------------------
+ 123 | 456
+ 123 | 4567890123456789
+(2 rows)
+
+-- int8/int2 cmp
+SELECT * FROM INT8_TBL WHERE q2 = '456'::int2;
+ q1 | q2
+-----+-----
+ 123 | 456
+(1 row)
+
+SELECT * FROM INT8_TBL WHERE q2 <> '456'::int2;
+ q1 | q2
+------------------+-------------------
+ 123 | 4567890123456789
+ 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+(4 rows)
+
+SELECT * FROM INT8_TBL WHERE q2 < '456'::int2;
+ q1 | q2
+------------------+-------------------
+ 4567890123456789 | 123
+ 4567890123456789 | -4567890123456789
+(2 rows)
+
+SELECT * FROM INT8_TBL WHERE q2 > '456'::int2;
+ q1 | q2
+------------------+------------------
+ 123 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+(2 rows)
+
+SELECT * FROM INT8_TBL WHERE q2 <= '456'::int2;
+ q1 | q2
+------------------+-------------------
+ 123 | 456
+ 4567890123456789 | 123
+ 4567890123456789 | -4567890123456789
+(3 rows)
+
+SELECT * FROM INT8_TBL WHERE q2 >= '456'::int2;
+ q1 | q2
+------------------+------------------
+ 123 | 456
+ 123 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+(3 rows)
+
+-- int2/int8 cmp
+SELECT * FROM INT8_TBL WHERE '123'::int2 = q1;
+ q1 | q2
+-----+------------------
+ 123 | 456
+ 123 | 4567890123456789
+(2 rows)
+
+SELECT * FROM INT8_TBL WHERE '123'::int2 <> q1;
+ q1 | q2
+------------------+-------------------
+ 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+(3 rows)
+
+SELECT * FROM INT8_TBL WHERE '123'::int2 < q1;
+ q1 | q2
+------------------+-------------------
+ 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+(3 rows)
+
+SELECT * FROM INT8_TBL WHERE '123'::int2 > q1;
+ q1 | q2
+----+----
+(0 rows)
+
+SELECT * FROM INT8_TBL WHERE '123'::int2 <= q1;
+ q1 | q2
+------------------+-------------------
+ 123 | 456
+ 123 | 4567890123456789
+ 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+(5 rows)
+
+SELECT * FROM INT8_TBL WHERE '123'::int2 >= q1;
+ q1 | q2
+-----+------------------
+ 123 | 456
+ 123 | 4567890123456789
+(2 rows)
+
+SELECT q1 AS plus, -q1 AS minus FROM INT8_TBL;
+ plus | minus
+------------------+-------------------
+ 123 | -123
+ 123 | -123
+ 4567890123456789 | -4567890123456789
+ 4567890123456789 | -4567890123456789
+ 4567890123456789 | -4567890123456789
+(5 rows)
+
+SELECT q1, q2, q1 + q2 AS plus FROM INT8_TBL;
+ q1 | q2 | plus
+------------------+-------------------+------------------
+ 123 | 456 | 579
+ 123 | 4567890123456789 | 4567890123456912
+ 4567890123456789 | 123 | 4567890123456912
+ 4567890123456789 | 4567890123456789 | 9135780246913578
+ 4567890123456789 | -4567890123456789 | 0
+(5 rows)
+
+SELECT q1, q2, q1 - q2 AS minus FROM INT8_TBL;
+ q1 | q2 | minus
+------------------+-------------------+-------------------
+ 123 | 456 | -333
+ 123 | 4567890123456789 | -4567890123456666
+ 4567890123456789 | 123 | 4567890123456666
+ 4567890123456789 | 4567890123456789 | 0
+ 4567890123456789 | -4567890123456789 | 9135780246913578
+(5 rows)
+
+SELECT q1, q2, q1 * q2 AS multiply FROM INT8_TBL;
+ERROR: bigint out of range
+SELECT q1, q2, q1 * q2 AS multiply FROM INT8_TBL
+ WHERE q1 < 1000 or (q2 > 0 and q2 < 1000);
+ q1 | q2 | multiply
+------------------+------------------+--------------------
+ 123 | 456 | 56088
+ 123 | 4567890123456789 | 561850485185185047
+ 4567890123456789 | 123 | 561850485185185047
+(3 rows)
+
+SELECT q1, q2, q1 / q2 AS divide, q1 % q2 AS mod FROM INT8_TBL;
+ q1 | q2 | divide | mod
+------------------+-------------------+----------------+-----
+ 123 | 456 | 0 | 123
+ 123 | 4567890123456789 | 0 | 123
+ 4567890123456789 | 123 | 37137318076884 | 57
+ 4567890123456789 | 4567890123456789 | 1 | 0
+ 4567890123456789 | -4567890123456789 | -1 | 0
+(5 rows)
+
+SELECT q1, float8(q1) FROM INT8_TBL;
+ q1 | float8
+------------------+-----------------------
+ 123 | 123
+ 123 | 123
+ 4567890123456789 | 4.567890123456789e+15
+ 4567890123456789 | 4.567890123456789e+15
+ 4567890123456789 | 4.567890123456789e+15
+(5 rows)
+
+SELECT q2, float8(q2) FROM INT8_TBL;
+ q2 | float8
+-------------------+------------------------
+ 456 | 456
+ 4567890123456789 | 4.567890123456789e+15
+ 123 | 123
+ 4567890123456789 | 4.567890123456789e+15
+ -4567890123456789 | -4.567890123456789e+15
+(5 rows)
+
+SELECT 37 + q1 AS plus4 FROM INT8_TBL;
+ plus4
+------------------
+ 160
+ 160
+ 4567890123456826
+ 4567890123456826
+ 4567890123456826
+(5 rows)
+
+SELECT 37 - q1 AS minus4 FROM INT8_TBL;
+ minus4
+-------------------
+ -86
+ -86
+ -4567890123456752
+ -4567890123456752
+ -4567890123456752
+(5 rows)
+
+SELECT 2 * q1 AS "twice int4" FROM INT8_TBL;
+ twice int4
+------------------
+ 246
+ 246
+ 9135780246913578
+ 9135780246913578
+ 9135780246913578
+(5 rows)
+
+SELECT q1 * 2 AS "twice int4" FROM INT8_TBL;
+ twice int4
+------------------
+ 246
+ 246
+ 9135780246913578
+ 9135780246913578
+ 9135780246913578
+(5 rows)
+
+-- int8 op int4
+SELECT q1 + 42::int4 AS "8plus4", q1 - 42::int4 AS "8minus4", q1 * 42::int4 AS "8mul4", q1 / 42::int4 AS "8div4" FROM INT8_TBL;
+ 8plus4 | 8minus4 | 8mul4 | 8div4
+------------------+------------------+--------------------+-----------------
+ 165 | 81 | 5166 | 2
+ 165 | 81 | 5166 | 2
+ 4567890123456831 | 4567890123456747 | 191851385185185138 | 108759288653733
+ 4567890123456831 | 4567890123456747 | 191851385185185138 | 108759288653733
+ 4567890123456831 | 4567890123456747 | 191851385185185138 | 108759288653733
+(5 rows)
+
+-- int4 op int8
+SELECT 246::int4 + q1 AS "4plus8", 246::int4 - q1 AS "4minus8", 246::int4 * q1 AS "4mul8", 246::int4 / q1 AS "4div8" FROM INT8_TBL;
+ 4plus8 | 4minus8 | 4mul8 | 4div8
+------------------+-------------------+---------------------+-------
+ 369 | 123 | 30258 | 2
+ 369 | 123 | 30258 | 2
+ 4567890123457035 | -4567890123456543 | 1123700970370370094 | 0
+ 4567890123457035 | -4567890123456543 | 1123700970370370094 | 0
+ 4567890123457035 | -4567890123456543 | 1123700970370370094 | 0
+(5 rows)
+
+-- int8 op int2
+SELECT q1 + 42::int2 AS "8plus2", q1 - 42::int2 AS "8minus2", q1 * 42::int2 AS "8mul2", q1 / 42::int2 AS "8div2" FROM INT8_TBL;
+ 8plus2 | 8minus2 | 8mul2 | 8div2
+------------------+------------------+--------------------+-----------------
+ 165 | 81 | 5166 | 2
+ 165 | 81 | 5166 | 2
+ 4567890123456831 | 4567890123456747 | 191851385185185138 | 108759288653733
+ 4567890123456831 | 4567890123456747 | 191851385185185138 | 108759288653733
+ 4567890123456831 | 4567890123456747 | 191851385185185138 | 108759288653733
+(5 rows)
+
+-- int2 op int8
+SELECT 246::int2 + q1 AS "2plus8", 246::int2 - q1 AS "2minus8", 246::int2 * q1 AS "2mul8", 246::int2 / q1 AS "2div8" FROM INT8_TBL;
+ 2plus8 | 2minus8 | 2mul8 | 2div8
+------------------+-------------------+---------------------+-------
+ 369 | 123 | 30258 | 2
+ 369 | 123 | 30258 | 2
+ 4567890123457035 | -4567890123456543 | 1123700970370370094 | 0
+ 4567890123457035 | -4567890123456543 | 1123700970370370094 | 0
+ 4567890123457035 | -4567890123456543 | 1123700970370370094 | 0
+(5 rows)
+
+SELECT q2, abs(q2) FROM INT8_TBL;
+ q2 | abs
+-------------------+------------------
+ 456 | 456
+ 4567890123456789 | 4567890123456789
+ 123 | 123
+ 4567890123456789 | 4567890123456789
+ -4567890123456789 | 4567890123456789
+(5 rows)
+
+SELECT min(q1), min(q2) FROM INT8_TBL;
+ min | min
+-----+-------------------
+ 123 | -4567890123456789
+(1 row)
+
+SELECT max(q1), max(q2) FROM INT8_TBL;
+ max | max
+------------------+------------------
+ 4567890123456789 | 4567890123456789
+(1 row)
+
+-- TO_CHAR()
+--
+SELECT to_char(q1, '9G999G999G999G999G999'), to_char(q2, '9,999,999,999,999,999')
+ FROM INT8_TBL;
+ to_char | to_char
+------------------------+------------------------
+ 123 | 456
+ 123 | 4,567,890,123,456,789
+ 4,567,890,123,456,789 | 123
+ 4,567,890,123,456,789 | 4,567,890,123,456,789
+ 4,567,890,123,456,789 | -4,567,890,123,456,789
+(5 rows)
+
+SELECT to_char(q1, '9G999G999G999G999G999D999G999'), to_char(q2, '9,999,999,999,999,999.999,999')
+ FROM INT8_TBL;
+ to_char | to_char
+--------------------------------+--------------------------------
+ 123.000,000 | 456.000,000
+ 123.000,000 | 4,567,890,123,456,789.000,000
+ 4,567,890,123,456,789.000,000 | 123.000,000
+ 4,567,890,123,456,789.000,000 | 4,567,890,123,456,789.000,000
+ 4,567,890,123,456,789.000,000 | -4,567,890,123,456,789.000,000
+(5 rows)
+
+SELECT to_char( (q1 * -1), '9999999999999999PR'), to_char( (q2 * -1), '9999999999999999.999PR')
+ FROM INT8_TBL;
+ to_char | to_char
+--------------------+------------------------
+ <123> | <456.000>
+ <123> | <4567890123456789.000>
+ <4567890123456789> | <123.000>
+ <4567890123456789> | <4567890123456789.000>
+ <4567890123456789> | 4567890123456789.000
+(5 rows)
+
+SELECT to_char( (q1 * -1), '9999999999999999S'), to_char( (q2 * -1), 'S9999999999999999')
+ FROM INT8_TBL;
+ to_char | to_char
+-------------------+-------------------
+ 123- | -456
+ 123- | -4567890123456789
+ 4567890123456789- | -123
+ 4567890123456789- | -4567890123456789
+ 4567890123456789- | +4567890123456789
+(5 rows)
+
+SELECT to_char(q2, 'MI9999999999999999') FROM INT8_TBL;
+ to_char
+-------------------
+ 456
+ 4567890123456789
+ 123
+ 4567890123456789
+ -4567890123456789
+(5 rows)
+
+SELECT to_char(q2, 'FMS9999999999999999') FROM INT8_TBL;
+ to_char
+-------------------
+ +456
+ +4567890123456789
+ +123
+ +4567890123456789
+ -4567890123456789
+(5 rows)
+
+SELECT to_char(q2, 'FM9999999999999999THPR') FROM INT8_TBL;
+ to_char
+--------------------
+ 456TH
+ 4567890123456789TH
+ 123RD
+ 4567890123456789TH
+ <4567890123456789>
+(5 rows)
+
+SELECT to_char(q2, 'SG9999999999999999th') FROM INT8_TBL;
+ to_char
+---------------------
+ + 456th
+ +4567890123456789th
+ + 123rd
+ +4567890123456789th
+ -4567890123456789
+(5 rows)
+
+SELECT to_char(q2, '0999999999999999') FROM INT8_TBL;
+ to_char
+-------------------
+ 0000000000000456
+ 4567890123456789
+ 0000000000000123
+ 4567890123456789
+ -4567890123456789
+(5 rows)
+
+SELECT to_char(q2, 'S0999999999999999') FROM INT8_TBL;
+ to_char
+-------------------
+ +0000000000000456
+ +4567890123456789
+ +0000000000000123
+ +4567890123456789
+ -4567890123456789
+(5 rows)
+
+SELECT to_char(q2, 'FM0999999999999999') FROM INT8_TBL;
+ to_char
+-------------------
+ 0000000000000456
+ 4567890123456789
+ 0000000000000123
+ 4567890123456789
+ -4567890123456789
+(5 rows)
+
+SELECT to_char(q2, 'FM9999999999999999.000') FROM INT8_TBL;
+ to_char
+-----------------------
+ 456.000
+ 4567890123456789.000
+ 123.000
+ 4567890123456789.000
+ -4567890123456789.000
+(5 rows)
+
+SELECT to_char(q2, 'L9999999999999999.000') FROM INT8_TBL;
+ to_char
+------------------------
+ 456.000
+ 4567890123456789.000
+ 123.000
+ 4567890123456789.000
+ -4567890123456789.000
+(5 rows)
+
+SELECT to_char(q2, 'FM9999999999999999.999') FROM INT8_TBL;
+ to_char
+--------------------
+ 456.
+ 4567890123456789.
+ 123.
+ 4567890123456789.
+ -4567890123456789.
+(5 rows)
+
+SELECT to_char(q2, 'S 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 . 9 9 9') FROM INT8_TBL;
+ to_char
+-------------------------------------------
+ +4 5 6 . 0 0 0
+ +4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 . 0 0 0
+ +1 2 3 . 0 0 0
+ +4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 . 0 0 0
+ -4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 . 0 0 0
+(5 rows)
+
+SELECT to_char(q2, E'99999 "text" 9999 "9999" 999 "\\"text between quote marks\\"" 9999') FROM INT8_TBL;
+ to_char
+-----------------------------------------------------------
+ text 9999 "text between quote marks" 456
+ 45678 text 9012 9999 345 "text between quote marks" 6789
+ text 9999 "text between quote marks" 123
+ 45678 text 9012 9999 345 "text between quote marks" 6789
+ -45678 text 9012 9999 345 "text between quote marks" 6789
+(5 rows)
+
+SELECT to_char(q2, '999999SG9999999999') FROM INT8_TBL;
+ to_char
+-------------------
+ + 456
+ 456789+0123456789
+ + 123
+ 456789+0123456789
+ 456789-0123456789
+(5 rows)
+
+-- check min/max values and overflow behavior
+select '-9223372036854775808'::int8;
+ int8
+----------------------
+ -9223372036854775808
+(1 row)
+
+select '-9223372036854775809'::int8;
+ERROR: value "-9223372036854775809" is out of range for type bigint
+LINE 1: select '-9223372036854775809'::int8;
+ ^
+select '9223372036854775807'::int8;
+ int8
+---------------------
+ 9223372036854775807
+(1 row)
+
+select '9223372036854775808'::int8;
+ERROR: value "9223372036854775808" is out of range for type bigint
+LINE 1: select '9223372036854775808'::int8;
+ ^
+select -('-9223372036854775807'::int8);
+ ?column?
+---------------------
+ 9223372036854775807
+(1 row)
+
+select -('-9223372036854775808'::int8);
+ERROR: bigint out of range
+select '9223372036854775800'::int8 + '9223372036854775800'::int8;
+ERROR: bigint out of range
+select '-9223372036854775800'::int8 + '-9223372036854775800'::int8;
+ERROR: bigint out of range
+select '9223372036854775800'::int8 - '-9223372036854775800'::int8;
+ERROR: bigint out of range
+select '-9223372036854775800'::int8 - '9223372036854775800'::int8;
+ERROR: bigint out of range
+select '9223372036854775800'::int8 * '9223372036854775800'::int8;
+ERROR: bigint out of range
+select '9223372036854775800'::int8 / '0'::int8;
+ERROR: division by zero
+select '9223372036854775800'::int8 % '0'::int8;
+ERROR: division by zero
+select abs('-9223372036854775808'::int8);
+ERROR: bigint out of range
+select '9223372036854775800'::int8 + '100'::int4;
+ERROR: bigint out of range
+select '-9223372036854775800'::int8 - '100'::int4;
+ERROR: bigint out of range
+select '9223372036854775800'::int8 * '100'::int4;
+ERROR: bigint out of range
+select '100'::int4 + '9223372036854775800'::int8;
+ERROR: bigint out of range
+select '-100'::int4 - '9223372036854775800'::int8;
+ERROR: bigint out of range
+select '100'::int4 * '9223372036854775800'::int8;
+ERROR: bigint out of range
+select '9223372036854775800'::int8 + '100'::int2;
+ERROR: bigint out of range
+select '-9223372036854775800'::int8 - '100'::int2;
+ERROR: bigint out of range
+select '9223372036854775800'::int8 * '100'::int2;
+ERROR: bigint out of range
+select '-9223372036854775808'::int8 / '0'::int2;
+ERROR: division by zero
+select '100'::int2 + '9223372036854775800'::int8;
+ERROR: bigint out of range
+select '-100'::int2 - '9223372036854775800'::int8;
+ERROR: bigint out of range
+select '100'::int2 * '9223372036854775800'::int8;
+ERROR: bigint out of range
+select '100'::int2 / '0'::int8;
+ERROR: division by zero
+SELECT CAST(q1 AS int4) FROM int8_tbl WHERE q2 = 456;
+ q1
+-----
+ 123
+(1 row)
+
+SELECT CAST(q1 AS int4) FROM int8_tbl WHERE q2 <> 456;
+ERROR: integer out of range
+SELECT CAST(q1 AS int2) FROM int8_tbl WHERE q2 = 456;
+ q1
+-----
+ 123
+(1 row)
+
+SELECT CAST(q1 AS int2) FROM int8_tbl WHERE q2 <> 456;
+ERROR: smallint out of range
+SELECT CAST('42'::int2 AS int8), CAST('-37'::int2 AS int8);
+ int8 | int8
+------+------
+ 42 | -37
+(1 row)
+
+SELECT CAST(q1 AS float4), CAST(q2 AS float8) FROM INT8_TBL;
+ q1 | q2
+-------------+------------------------
+ 123 | 456
+ 123 | 4.567890123456789e+15
+ 4.56789e+15 | 123
+ 4.56789e+15 | 4.567890123456789e+15
+ 4.56789e+15 | -4.567890123456789e+15
+(5 rows)
+
+SELECT CAST('36854775807.0'::float4 AS int8);
+ int8
+-------------
+ 36854775808
+(1 row)
+
+SELECT CAST('922337203685477580700.0'::float8 AS int8);
+ERROR: bigint out of range
+SELECT CAST(q1 AS oid) FROM INT8_TBL;
+ERROR: OID out of range
+SELECT oid::int8 FROM pg_class WHERE relname = 'pg_class';
+ oid
+------
+ 1259
+(1 row)
+
+-- bit operations
+SELECT q1, q2, q1 & q2 AS "and", q1 | q2 AS "or", q1 # q2 AS "xor", ~q1 AS "not" FROM INT8_TBL;
+ q1 | q2 | and | or | xor | not
+------------------+-------------------+------------------+------------------+------------------+-------------------
+ 123 | 456 | 72 | 507 | 435 | -124
+ 123 | 4567890123456789 | 17 | 4567890123456895 | 4567890123456878 | -124
+ 4567890123456789 | 123 | 17 | 4567890123456895 | 4567890123456878 | -4567890123456790
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 0 | -4567890123456790
+ 4567890123456789 | -4567890123456789 | 1 | -1 | -2 | -4567890123456790
+(5 rows)
+
+SELECT q1, q1 << 2 AS "shl", q1 >> 3 AS "shr" FROM INT8_TBL;
+ q1 | shl | shr
+------------------+-------------------+-----------------
+ 123 | 492 | 15
+ 123 | 492 | 15
+ 4567890123456789 | 18271560493827156 | 570986265432098
+ 4567890123456789 | 18271560493827156 | 570986265432098
+ 4567890123456789 | 18271560493827156 | 570986265432098
+(5 rows)
+
+-- generate_series
+SELECT * FROM generate_series('+4567890123456789'::int8, '+4567890123456799'::int8);
+ generate_series
+------------------
+ 4567890123456789
+ 4567890123456790
+ 4567890123456791
+ 4567890123456792
+ 4567890123456793
+ 4567890123456794
+ 4567890123456795
+ 4567890123456796
+ 4567890123456797
+ 4567890123456798
+ 4567890123456799
+(11 rows)
+
+SELECT * FROM generate_series('+4567890123456789'::int8, '+4567890123456799'::int8, 0);
+ERROR: step size cannot equal zero
+SELECT * FROM generate_series('+4567890123456789'::int8, '+4567890123456799'::int8, 2);
+ generate_series
+------------------
+ 4567890123456789
+ 4567890123456791
+ 4567890123456793
+ 4567890123456795
+ 4567890123456797
+ 4567890123456799
+(6 rows)
+
+-- corner case
+SELECT (-1::int8<<63)::text;
+ text
+----------------------
+ -9223372036854775808
+(1 row)
+
+SELECT ((-1::int8<<63)+1)::text;
+ text
+----------------------
+ -9223372036854775807
+(1 row)
+
+-- check sane handling of INT64_MIN overflow cases
+SELECT (-9223372036854775808)::int8 * (-1)::int8;
+ERROR: bigint out of range
+SELECT (-9223372036854775808)::int8 / (-1)::int8;
+ERROR: bigint out of range
+SELECT (-9223372036854775808)::int8 % (-1)::int8;
+ ?column?
+----------
+ 0
+(1 row)
+
+SELECT (-9223372036854775808)::int8 * (-1)::int4;
+ERROR: bigint out of range
+SELECT (-9223372036854775808)::int8 / (-1)::int4;
+ERROR: bigint out of range
+SELECT (-9223372036854775808)::int8 % (-1)::int4;
+ ?column?
+----------
+ 0
+(1 row)
+
+SELECT (-9223372036854775808)::int8 * (-1)::int2;
+ERROR: bigint out of range
+SELECT (-9223372036854775808)::int8 / (-1)::int2;
+ERROR: bigint out of range
+SELECT (-9223372036854775808)::int8 % (-1)::int2;
+ ?column?
+----------
+ 0
+(1 row)
+
+-- check rounding when casting from float
+SELECT x, x::int8 AS int8_value
+FROM (VALUES (-2.5::float8),
+ (-1.5::float8),
+ (-0.5::float8),
+ (0.0::float8),
+ (0.5::float8),
+ (1.5::float8),
+ (2.5::float8)) t(x);
+ x | int8_value
+------+------------
+ -2.5 | -2
+ -1.5 | -2
+ -0.5 | 0
+ 0 | 0
+ 0.5 | 0
+ 1.5 | 2
+ 2.5 | 2
+(7 rows)
+
+-- check rounding when casting from numeric
+SELECT x, x::int8 AS int8_value
+FROM (VALUES (-2.5::numeric),
+ (-1.5::numeric),
+ (-0.5::numeric),
+ (0.0::numeric),
+ (0.5::numeric),
+ (1.5::numeric),
+ (2.5::numeric)) t(x);
+ x | int8_value
+------+------------
+ -2.5 | -3
+ -1.5 | -2
+ -0.5 | -1
+ 0.0 | 0
+ 0.5 | 1
+ 1.5 | 2
+ 2.5 | 3
+(7 rows)
+
+-- test gcd()
+SELECT a, b, gcd(a, b), gcd(a, -b), gcd(b, a), gcd(-b, a)
+FROM (VALUES (0::int8, 0::int8),
+ (0::int8, 29893644334::int8),
+ (288484263558::int8, 29893644334::int8),
+ (-288484263558::int8, 29893644334::int8),
+ ((-9223372036854775808)::int8, 1::int8),
+ ((-9223372036854775808)::int8, 9223372036854775807::int8),
+ ((-9223372036854775808)::int8, 4611686018427387904::int8)) AS v(a, b);
+ a | b | gcd | gcd | gcd | gcd
+----------------------+---------------------+---------------------+---------------------+---------------------+---------------------
+ 0 | 0 | 0 | 0 | 0 | 0
+ 0 | 29893644334 | 29893644334 | 29893644334 | 29893644334 | 29893644334
+ 288484263558 | 29893644334 | 6835958 | 6835958 | 6835958 | 6835958
+ -288484263558 | 29893644334 | 6835958 | 6835958 | 6835958 | 6835958
+ -9223372036854775808 | 1 | 1 | 1 | 1 | 1
+ -9223372036854775808 | 9223372036854775807 | 1 | 1 | 1 | 1
+ -9223372036854775808 | 4611686018427387904 | 4611686018427387904 | 4611686018427387904 | 4611686018427387904 | 4611686018427387904
+(7 rows)
+
+SELECT gcd((-9223372036854775808)::int8, 0::int8); -- overflow
+ERROR: bigint out of range
+SELECT gcd((-9223372036854775808)::int8, (-9223372036854775808)::int8); -- overflow
+ERROR: bigint out of range
+-- test lcm()
+SELECT a, b, lcm(a, b), lcm(a, -b), lcm(b, a), lcm(-b, a)
+FROM (VALUES (0::int8, 0::int8),
+ (0::int8, 29893644334::int8),
+ (29893644334::int8, 29893644334::int8),
+ (288484263558::int8, 29893644334::int8),
+ (-288484263558::int8, 29893644334::int8),
+ ((-9223372036854775808)::int8, 0::int8)) AS v(a, b);
+ a | b | lcm | lcm | lcm | lcm
+----------------------+-------------+------------------+------------------+------------------+------------------
+ 0 | 0 | 0 | 0 | 0 | 0
+ 0 | 29893644334 | 0 | 0 | 0 | 0
+ 29893644334 | 29893644334 | 29893644334 | 29893644334 | 29893644334 | 29893644334
+ 288484263558 | 29893644334 | 1261541684539134 | 1261541684539134 | 1261541684539134 | 1261541684539134
+ -288484263558 | 29893644334 | 1261541684539134 | 1261541684539134 | 1261541684539134 | 1261541684539134
+ -9223372036854775808 | 0 | 0 | 0 | 0 | 0
+(6 rows)
+
+SELECT lcm((-9223372036854775808)::int8, 1::int8); -- overflow
+ERROR: bigint out of range
+SELECT lcm(9223372036854775807::int8, 9223372036854775806::int8); -- overflow
+ERROR: bigint out of range
diff --git a/ydb/library/yql/tests/postgresql/original/cases/int8.sql b/ydb/library/yql/tests/postgresql/original/cases/int8.sql
new file mode 100644
index 0000000000..32940b4daa
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/int8.sql
@@ -0,0 +1,252 @@
+--
+-- INT8
+-- Test int8 64-bit integers.
+--
+CREATE TABLE INT8_TBL(q1 int8, q2 int8);
+
+INSERT INTO INT8_TBL VALUES(' 123 ',' 456');
+INSERT INTO INT8_TBL VALUES('123 ','4567890123456789');
+INSERT INTO INT8_TBL VALUES('4567890123456789','123');
+INSERT INTO INT8_TBL VALUES(+4567890123456789,'4567890123456789');
+INSERT INTO INT8_TBL VALUES('+4567890123456789','-4567890123456789');
+
+-- bad inputs
+INSERT INTO INT8_TBL(q1) VALUES (' ');
+INSERT INTO INT8_TBL(q1) VALUES ('xxx');
+INSERT INTO INT8_TBL(q1) VALUES ('3908203590239580293850293850329485');
+INSERT INTO INT8_TBL(q1) VALUES ('-1204982019841029840928340329840934');
+INSERT INTO INT8_TBL(q1) VALUES ('- 123');
+INSERT INTO INT8_TBL(q1) VALUES (' 345 5');
+INSERT INTO INT8_TBL(q1) VALUES ('');
+
+SELECT * FROM INT8_TBL;
+
+-- int8/int8 cmp
+SELECT * FROM INT8_TBL WHERE q2 = 4567890123456789;
+SELECT * FROM INT8_TBL WHERE q2 <> 4567890123456789;
+SELECT * FROM INT8_TBL WHERE q2 < 4567890123456789;
+SELECT * FROM INT8_TBL WHERE q2 > 4567890123456789;
+SELECT * FROM INT8_TBL WHERE q2 <= 4567890123456789;
+SELECT * FROM INT8_TBL WHERE q2 >= 4567890123456789;
+
+-- int8/int4 cmp
+SELECT * FROM INT8_TBL WHERE q2 = 456;
+SELECT * FROM INT8_TBL WHERE q2 <> 456;
+SELECT * FROM INT8_TBL WHERE q2 < 456;
+SELECT * FROM INT8_TBL WHERE q2 > 456;
+SELECT * FROM INT8_TBL WHERE q2 <= 456;
+SELECT * FROM INT8_TBL WHERE q2 >= 456;
+
+-- int4/int8 cmp
+SELECT * FROM INT8_TBL WHERE 123 = q1;
+SELECT * FROM INT8_TBL WHERE 123 <> q1;
+SELECT * FROM INT8_TBL WHERE 123 < q1;
+SELECT * FROM INT8_TBL WHERE 123 > q1;
+SELECT * FROM INT8_TBL WHERE 123 <= q1;
+SELECT * FROM INT8_TBL WHERE 123 >= q1;
+
+-- int8/int2 cmp
+SELECT * FROM INT8_TBL WHERE q2 = '456'::int2;
+SELECT * FROM INT8_TBL WHERE q2 <> '456'::int2;
+SELECT * FROM INT8_TBL WHERE q2 < '456'::int2;
+SELECT * FROM INT8_TBL WHERE q2 > '456'::int2;
+SELECT * FROM INT8_TBL WHERE q2 <= '456'::int2;
+SELECT * FROM INT8_TBL WHERE q2 >= '456'::int2;
+
+-- int2/int8 cmp
+SELECT * FROM INT8_TBL WHERE '123'::int2 = q1;
+SELECT * FROM INT8_TBL WHERE '123'::int2 <> q1;
+SELECT * FROM INT8_TBL WHERE '123'::int2 < q1;
+SELECT * FROM INT8_TBL WHERE '123'::int2 > q1;
+SELECT * FROM INT8_TBL WHERE '123'::int2 <= q1;
+SELECT * FROM INT8_TBL WHERE '123'::int2 >= q1;
+
+
+SELECT q1 AS plus, -q1 AS minus FROM INT8_TBL;
+
+SELECT q1, q2, q1 + q2 AS plus FROM INT8_TBL;
+SELECT q1, q2, q1 - q2 AS minus FROM INT8_TBL;
+SELECT q1, q2, q1 * q2 AS multiply FROM INT8_TBL;
+SELECT q1, q2, q1 * q2 AS multiply FROM INT8_TBL
+ WHERE q1 < 1000 or (q2 > 0 and q2 < 1000);
+SELECT q1, q2, q1 / q2 AS divide, q1 % q2 AS mod FROM INT8_TBL;
+
+SELECT q1, float8(q1) FROM INT8_TBL;
+SELECT q2, float8(q2) FROM INT8_TBL;
+
+SELECT 37 + q1 AS plus4 FROM INT8_TBL;
+SELECT 37 - q1 AS minus4 FROM INT8_TBL;
+SELECT 2 * q1 AS "twice int4" FROM INT8_TBL;
+SELECT q1 * 2 AS "twice int4" FROM INT8_TBL;
+
+-- int8 op int4
+SELECT q1 + 42::int4 AS "8plus4", q1 - 42::int4 AS "8minus4", q1 * 42::int4 AS "8mul4", q1 / 42::int4 AS "8div4" FROM INT8_TBL;
+-- int4 op int8
+SELECT 246::int4 + q1 AS "4plus8", 246::int4 - q1 AS "4minus8", 246::int4 * q1 AS "4mul8", 246::int4 / q1 AS "4div8" FROM INT8_TBL;
+
+-- int8 op int2
+SELECT q1 + 42::int2 AS "8plus2", q1 - 42::int2 AS "8minus2", q1 * 42::int2 AS "8mul2", q1 / 42::int2 AS "8div2" FROM INT8_TBL;
+-- int2 op int8
+SELECT 246::int2 + q1 AS "2plus8", 246::int2 - q1 AS "2minus8", 246::int2 * q1 AS "2mul8", 246::int2 / q1 AS "2div8" FROM INT8_TBL;
+
+SELECT q2, abs(q2) FROM INT8_TBL;
+SELECT min(q1), min(q2) FROM INT8_TBL;
+SELECT max(q1), max(q2) FROM INT8_TBL;
+
+
+-- TO_CHAR()
+--
+SELECT to_char(q1, '9G999G999G999G999G999'), to_char(q2, '9,999,999,999,999,999')
+ FROM INT8_TBL;
+
+SELECT to_char(q1, '9G999G999G999G999G999D999G999'), to_char(q2, '9,999,999,999,999,999.999,999')
+ FROM INT8_TBL;
+
+SELECT to_char( (q1 * -1), '9999999999999999PR'), to_char( (q2 * -1), '9999999999999999.999PR')
+ FROM INT8_TBL;
+
+SELECT to_char( (q1 * -1), '9999999999999999S'), to_char( (q2 * -1), 'S9999999999999999')
+ FROM INT8_TBL;
+
+SELECT to_char(q2, 'MI9999999999999999') FROM INT8_TBL;
+SELECT to_char(q2, 'FMS9999999999999999') FROM INT8_TBL;
+SELECT to_char(q2, 'FM9999999999999999THPR') FROM INT8_TBL;
+SELECT to_char(q2, 'SG9999999999999999th') FROM INT8_TBL;
+SELECT to_char(q2, '0999999999999999') FROM INT8_TBL;
+SELECT to_char(q2, 'S0999999999999999') FROM INT8_TBL;
+SELECT to_char(q2, 'FM0999999999999999') FROM INT8_TBL;
+SELECT to_char(q2, 'FM9999999999999999.000') FROM INT8_TBL;
+SELECT to_char(q2, 'L9999999999999999.000') FROM INT8_TBL;
+SELECT to_char(q2, 'FM9999999999999999.999') FROM INT8_TBL;
+SELECT to_char(q2, 'S 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 . 9 9 9') FROM INT8_TBL;
+SELECT to_char(q2, E'99999 "text" 9999 "9999" 999 "\\"text between quote marks\\"" 9999') FROM INT8_TBL;
+SELECT to_char(q2, '999999SG9999999999') FROM INT8_TBL;
+
+-- check min/max values and overflow behavior
+
+select '-9223372036854775808'::int8;
+select '-9223372036854775809'::int8;
+select '9223372036854775807'::int8;
+select '9223372036854775808'::int8;
+
+select -('-9223372036854775807'::int8);
+select -('-9223372036854775808'::int8);
+
+select '9223372036854775800'::int8 + '9223372036854775800'::int8;
+select '-9223372036854775800'::int8 + '-9223372036854775800'::int8;
+
+select '9223372036854775800'::int8 - '-9223372036854775800'::int8;
+select '-9223372036854775800'::int8 - '9223372036854775800'::int8;
+
+select '9223372036854775800'::int8 * '9223372036854775800'::int8;
+
+select '9223372036854775800'::int8 / '0'::int8;
+select '9223372036854775800'::int8 % '0'::int8;
+
+select abs('-9223372036854775808'::int8);
+
+select '9223372036854775800'::int8 + '100'::int4;
+select '-9223372036854775800'::int8 - '100'::int4;
+select '9223372036854775800'::int8 * '100'::int4;
+
+select '100'::int4 + '9223372036854775800'::int8;
+select '-100'::int4 - '9223372036854775800'::int8;
+select '100'::int4 * '9223372036854775800'::int8;
+
+select '9223372036854775800'::int8 + '100'::int2;
+select '-9223372036854775800'::int8 - '100'::int2;
+select '9223372036854775800'::int8 * '100'::int2;
+select '-9223372036854775808'::int8 / '0'::int2;
+
+select '100'::int2 + '9223372036854775800'::int8;
+select '-100'::int2 - '9223372036854775800'::int8;
+select '100'::int2 * '9223372036854775800'::int8;
+select '100'::int2 / '0'::int8;
+
+SELECT CAST(q1 AS int4) FROM int8_tbl WHERE q2 = 456;
+SELECT CAST(q1 AS int4) FROM int8_tbl WHERE q2 <> 456;
+
+SELECT CAST(q1 AS int2) FROM int8_tbl WHERE q2 = 456;
+SELECT CAST(q1 AS int2) FROM int8_tbl WHERE q2 <> 456;
+
+SELECT CAST('42'::int2 AS int8), CAST('-37'::int2 AS int8);
+
+SELECT CAST(q1 AS float4), CAST(q2 AS float8) FROM INT8_TBL;
+SELECT CAST('36854775807.0'::float4 AS int8);
+SELECT CAST('922337203685477580700.0'::float8 AS int8);
+
+SELECT CAST(q1 AS oid) FROM INT8_TBL;
+SELECT oid::int8 FROM pg_class WHERE relname = 'pg_class';
+
+
+-- bit operations
+
+SELECT q1, q2, q1 & q2 AS "and", q1 | q2 AS "or", q1 # q2 AS "xor", ~q1 AS "not" FROM INT8_TBL;
+SELECT q1, q1 << 2 AS "shl", q1 >> 3 AS "shr" FROM INT8_TBL;
+
+
+-- generate_series
+
+SELECT * FROM generate_series('+4567890123456789'::int8, '+4567890123456799'::int8);
+SELECT * FROM generate_series('+4567890123456789'::int8, '+4567890123456799'::int8, 0);
+SELECT * FROM generate_series('+4567890123456789'::int8, '+4567890123456799'::int8, 2);
+
+-- corner case
+SELECT (-1::int8<<63)::text;
+SELECT ((-1::int8<<63)+1)::text;
+
+-- check sane handling of INT64_MIN overflow cases
+SELECT (-9223372036854775808)::int8 * (-1)::int8;
+SELECT (-9223372036854775808)::int8 / (-1)::int8;
+SELECT (-9223372036854775808)::int8 % (-1)::int8;
+SELECT (-9223372036854775808)::int8 * (-1)::int4;
+SELECT (-9223372036854775808)::int8 / (-1)::int4;
+SELECT (-9223372036854775808)::int8 % (-1)::int4;
+SELECT (-9223372036854775808)::int8 * (-1)::int2;
+SELECT (-9223372036854775808)::int8 / (-1)::int2;
+SELECT (-9223372036854775808)::int8 % (-1)::int2;
+
+-- check rounding when casting from float
+SELECT x, x::int8 AS int8_value
+FROM (VALUES (-2.5::float8),
+ (-1.5::float8),
+ (-0.5::float8),
+ (0.0::float8),
+ (0.5::float8),
+ (1.5::float8),
+ (2.5::float8)) t(x);
+
+-- check rounding when casting from numeric
+SELECT x, x::int8 AS int8_value
+FROM (VALUES (-2.5::numeric),
+ (-1.5::numeric),
+ (-0.5::numeric),
+ (0.0::numeric),
+ (0.5::numeric),
+ (1.5::numeric),
+ (2.5::numeric)) t(x);
+
+-- test gcd()
+SELECT a, b, gcd(a, b), gcd(a, -b), gcd(b, a), gcd(-b, a)
+FROM (VALUES (0::int8, 0::int8),
+ (0::int8, 29893644334::int8),
+ (288484263558::int8, 29893644334::int8),
+ (-288484263558::int8, 29893644334::int8),
+ ((-9223372036854775808)::int8, 1::int8),
+ ((-9223372036854775808)::int8, 9223372036854775807::int8),
+ ((-9223372036854775808)::int8, 4611686018427387904::int8)) AS v(a, b);
+
+SELECT gcd((-9223372036854775808)::int8, 0::int8); -- overflow
+SELECT gcd((-9223372036854775808)::int8, (-9223372036854775808)::int8); -- overflow
+
+-- test lcm()
+SELECT a, b, lcm(a, b), lcm(a, -b), lcm(b, a), lcm(-b, a)
+FROM (VALUES (0::int8, 0::int8),
+ (0::int8, 29893644334::int8),
+ (29893644334::int8, 29893644334::int8),
+ (288484263558::int8, 29893644334::int8),
+ (-288484263558::int8, 29893644334::int8),
+ ((-9223372036854775808)::int8, 0::int8)) AS v(a, b);
+
+SELECT lcm((-9223372036854775808)::int8, 1::int8); -- overflow
+SELECT lcm(9223372036854775807::int8, 9223372036854775806::int8); -- overflow
diff --git a/ydb/library/yql/tests/postgresql/original/cases/interval.out b/ydb/library/yql/tests/postgresql/original/cases/interval.out
new file mode 100644
index 0000000000..8e2d535543
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/interval.out
@@ -0,0 +1,1045 @@
+--
+-- INTERVAL
+--
+SET DATESTYLE = 'ISO';
+SET IntervalStyle to postgres;
+-- check acceptance of "time zone style"
+SELECT INTERVAL '01:00' AS "One hour";
+ One hour
+----------
+ 01:00:00
+(1 row)
+
+SELECT INTERVAL '+02:00' AS "Two hours";
+ Two hours
+-----------
+ 02:00:00
+(1 row)
+
+SELECT INTERVAL '-08:00' AS "Eight hours";
+ Eight hours
+-------------
+ -08:00:00
+(1 row)
+
+SELECT INTERVAL '-1 +02:03' AS "22 hours ago...";
+ 22 hours ago...
+-------------------
+ -1 days +02:03:00
+(1 row)
+
+SELECT INTERVAL '-1 days +02:03' AS "22 hours ago...";
+ 22 hours ago...
+-------------------
+ -1 days +02:03:00
+(1 row)
+
+SELECT INTERVAL '1.5 weeks' AS "Ten days twelve hours";
+ Ten days twelve hours
+-----------------------
+ 10 days 12:00:00
+(1 row)
+
+SELECT INTERVAL '1.5 months' AS "One month 15 days";
+ One month 15 days
+-------------------
+ 1 mon 15 days
+(1 row)
+
+SELECT INTERVAL '10 years -11 month -12 days +13:14' AS "9 years...";
+ 9 years...
+----------------------------------
+ 9 years 1 mon -12 days +13:14:00
+(1 row)
+
+CREATE TABLE INTERVAL_TBL (f1 interval);
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 1 minute');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 5 hour');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 10 day');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 34 year');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 3 months');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 14 seconds ago');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('1 day 2 hours 3 minutes 4 seconds');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('6 years');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('5 months');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('5 months 12 hours');
+-- badly formatted interval
+INSERT INTO INTERVAL_TBL (f1) VALUES ('badly formatted interval');
+ERROR: invalid input syntax for type interval: "badly formatted interval"
+LINE 1: INSERT INTO INTERVAL_TBL (f1) VALUES ('badly formatted inter...
+ ^
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 30 eons ago');
+ERROR: invalid input syntax for type interval: "@ 30 eons ago"
+LINE 1: INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 30 eons ago');
+ ^
+-- test interval operators
+SELECT * FROM INTERVAL_TBL;
+ f1
+-----------------
+ 00:01:00
+ 05:00:00
+ 10 days
+ 34 years
+ 3 mons
+ -00:00:14
+ 1 day 02:03:04
+ 6 years
+ 5 mons
+ 5 mons 12:00:00
+(10 rows)
+
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 <> interval '@ 10 days';
+ f1
+-----------------
+ 00:01:00
+ 05:00:00
+ 34 years
+ 3 mons
+ -00:00:14
+ 1 day 02:03:04
+ 6 years
+ 5 mons
+ 5 mons 12:00:00
+(9 rows)
+
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 <= interval '@ 5 hours';
+ f1
+-----------
+ 00:01:00
+ 05:00:00
+ -00:00:14
+(3 rows)
+
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 < interval '@ 1 day';
+ f1
+-----------
+ 00:01:00
+ 05:00:00
+ -00:00:14
+(3 rows)
+
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 = interval '@ 34 years';
+ f1
+----------
+ 34 years
+(1 row)
+
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 >= interval '@ 1 month';
+ f1
+-----------------
+ 34 years
+ 3 mons
+ 6 years
+ 5 mons
+ 5 mons 12:00:00
+(5 rows)
+
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 > interval '@ 3 seconds ago';
+ f1
+-----------------
+ 00:01:00
+ 05:00:00
+ 10 days
+ 34 years
+ 3 mons
+ 1 day 02:03:04
+ 6 years
+ 5 mons
+ 5 mons 12:00:00
+(9 rows)
+
+SELECT r1.*, r2.*
+ FROM INTERVAL_TBL r1, INTERVAL_TBL r2
+ WHERE r1.f1 > r2.f1
+ ORDER BY r1.f1, r2.f1;
+ f1 | f1
+-----------------+-----------------
+ 00:01:00 | -00:00:14
+ 05:00:00 | -00:00:14
+ 05:00:00 | 00:01:00
+ 1 day 02:03:04 | -00:00:14
+ 1 day 02:03:04 | 00:01:00
+ 1 day 02:03:04 | 05:00:00
+ 10 days | -00:00:14
+ 10 days | 00:01:00
+ 10 days | 05:00:00
+ 10 days | 1 day 02:03:04
+ 3 mons | -00:00:14
+ 3 mons | 00:01:00
+ 3 mons | 05:00:00
+ 3 mons | 1 day 02:03:04
+ 3 mons | 10 days
+ 5 mons | -00:00:14
+ 5 mons | 00:01:00
+ 5 mons | 05:00:00
+ 5 mons | 1 day 02:03:04
+ 5 mons | 10 days
+ 5 mons | 3 mons
+ 5 mons 12:00:00 | -00:00:14
+ 5 mons 12:00:00 | 00:01:00
+ 5 mons 12:00:00 | 05:00:00
+ 5 mons 12:00:00 | 1 day 02:03:04
+ 5 mons 12:00:00 | 10 days
+ 5 mons 12:00:00 | 3 mons
+ 5 mons 12:00:00 | 5 mons
+ 6 years | -00:00:14
+ 6 years | 00:01:00
+ 6 years | 05:00:00
+ 6 years | 1 day 02:03:04
+ 6 years | 10 days
+ 6 years | 3 mons
+ 6 years | 5 mons
+ 6 years | 5 mons 12:00:00
+ 34 years | -00:00:14
+ 34 years | 00:01:00
+ 34 years | 05:00:00
+ 34 years | 1 day 02:03:04
+ 34 years | 10 days
+ 34 years | 3 mons
+ 34 years | 5 mons
+ 34 years | 5 mons 12:00:00
+ 34 years | 6 years
+(45 rows)
+
+-- Test intervals that are large enough to overflow 64 bits in comparisons
+CREATE TEMP TABLE INTERVAL_TBL_OF (f1 interval);
+INSERT INTO INTERVAL_TBL_OF (f1) VALUES
+ ('2147483647 days 2147483647 months'),
+ ('2147483647 days -2147483648 months'),
+ ('1 year'),
+ ('-2147483648 days 2147483647 months'),
+ ('-2147483648 days -2147483648 months');
+-- these should fail as out-of-range
+INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('2147483648 days');
+ERROR: interval field value out of range: "2147483648 days"
+LINE 1: INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('2147483648 days');
+ ^
+INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('-2147483649 days');
+ERROR: interval field value out of range: "-2147483649 days"
+LINE 1: INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('-2147483649 days')...
+ ^
+INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('2147483647 years');
+ERROR: interval out of range
+LINE 1: INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('2147483647 years')...
+ ^
+INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('-2147483648 years');
+ERROR: interval out of range
+LINE 1: INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('-2147483648 years'...
+ ^
+-- Test edge-case overflow detection in interval multiplication
+select extract(epoch from '256 microseconds'::interval * (2^55)::float8);
+ERROR: interval out of range
+SELECT r1.*, r2.*
+ FROM INTERVAL_TBL_OF r1, INTERVAL_TBL_OF r2
+ WHERE r1.f1 > r2.f1
+ ORDER BY r1.f1, r2.f1;
+ f1 | f1
+-------------------------------------------+-------------------------------------------
+ -178956970 years -8 mons +2147483647 days | -178956970 years -8 mons -2147483648 days
+ 1 year | -178956970 years -8 mons -2147483648 days
+ 1 year | -178956970 years -8 mons +2147483647 days
+ 178956970 years 7 mons -2147483648 days | -178956970 years -8 mons -2147483648 days
+ 178956970 years 7 mons -2147483648 days | -178956970 years -8 mons +2147483647 days
+ 178956970 years 7 mons -2147483648 days | 1 year
+ 178956970 years 7 mons 2147483647 days | -178956970 years -8 mons -2147483648 days
+ 178956970 years 7 mons 2147483647 days | -178956970 years -8 mons +2147483647 days
+ 178956970 years 7 mons 2147483647 days | 1 year
+ 178956970 years 7 mons 2147483647 days | 178956970 years 7 mons -2147483648 days
+(10 rows)
+
+CREATE INDEX ON INTERVAL_TBL_OF USING btree (f1);
+SET enable_seqscan TO false;
+EXPLAIN (COSTS OFF)
+SELECT f1 FROM INTERVAL_TBL_OF r1 ORDER BY f1;
+ QUERY PLAN
+--------------------------------------------------------------------
+ Index Only Scan using interval_tbl_of_f1_idx on interval_tbl_of r1
+(1 row)
+
+SELECT f1 FROM INTERVAL_TBL_OF r1 ORDER BY f1;
+ f1
+-------------------------------------------
+ -178956970 years -8 mons -2147483648 days
+ -178956970 years -8 mons +2147483647 days
+ 1 year
+ 178956970 years 7 mons -2147483648 days
+ 178956970 years 7 mons 2147483647 days
+(5 rows)
+
+RESET enable_seqscan;
+DROP TABLE INTERVAL_TBL_OF;
+-- Test multiplication and division with intervals.
+-- Floating point arithmetic rounding errors can lead to unexpected results,
+-- though the code attempts to do the right thing and round up to days and
+-- minutes to avoid results such as '3 days 24:00 hours' or '14:20:60'.
+-- Note that it is expected for some day components to be greater than 29 and
+-- some time components be greater than 23:59:59 due to how intervals are
+-- stored internally.
+CREATE TABLE INTERVAL_MULDIV_TBL (span interval);
+COPY INTERVAL_MULDIV_TBL FROM STDIN;
+SELECT span * 0.3 AS product
+FROM INTERVAL_MULDIV_TBL;
+ product
+------------------------------------
+ 1 year 12 days 122:24:00
+ -1 years -12 days +93:36:00
+ -3 days -14:24:00
+ 2 mons 13 days 01:22:28.8
+ -10 mons +120 days 37:28:21.6567
+ 1 mon 6 days
+ 4 mons 6 days
+ 24 years 11 mons 320 days 16:48:00
+(8 rows)
+
+SELECT span * 8.2 AS product
+FROM INTERVAL_MULDIV_TBL;
+ product
+---------------------------------------------
+ 28 years 104 days 2961:36:00
+ -28 years -104 days +2942:24:00
+ -98 days -09:36:00
+ 6 years 1 mon -197 days +93:34:27.2
+ -24 years -7 mons +3946 days 640:15:11.9498
+ 2 years 8 mons 24 days
+ 9 years 6 mons 24 days
+ 682 years 7 mons 8215 days 19:12:00
+(8 rows)
+
+SELECT span / 10 AS quotient
+FROM INTERVAL_MULDIV_TBL;
+ quotient
+----------------------------------
+ 4 mons 4 days 40:48:00
+ -4 mons -4 days +31:12:00
+ -1 days -04:48:00
+ 25 days -15:32:30.4
+ -3 mons +30 days 12:29:27.2189
+ 12 days
+ 1 mon 12 days
+ 8 years 3 mons 126 days 21:36:00
+(8 rows)
+
+SELECT span / 100 AS quotient
+FROM INTERVAL_MULDIV_TBL;
+ quotient
+-------------------------
+ 12 days 13:40:48
+ -12 days -06:28:48
+ -02:52:48
+ 2 days 10:26:44.96
+ -6 days +01:14:56.72189
+ 1 day 04:48:00
+ 4 days 04:48:00
+ 9 mons 39 days 16:33:36
+(8 rows)
+
+DROP TABLE INTERVAL_MULDIV_TBL;
+SET DATESTYLE = 'postgres';
+SET IntervalStyle to postgres_verbose;
+SELECT * FROM INTERVAL_TBL;
+ f1
+-------------------------------
+ @ 1 min
+ @ 5 hours
+ @ 10 days
+ @ 34 years
+ @ 3 mons
+ @ 14 secs ago
+ @ 1 day 2 hours 3 mins 4 secs
+ @ 6 years
+ @ 5 mons
+ @ 5 mons 12 hours
+(10 rows)
+
+-- test avg(interval), which is somewhat fragile since people have been
+-- known to change the allowed input syntax for type interval without
+-- updating pg_aggregate.agginitval
+select avg(f1) from interval_tbl;
+ avg
+-------------------------------------------------
+ @ 4 years 1 mon 10 days 4 hours 18 mins 23 secs
+(1 row)
+
+-- test long interval input
+select '4 millenniums 5 centuries 4 decades 1 year 4 months 4 days 17 minutes 31 seconds'::interval;
+ interval
+--------------------------------------------
+ @ 4541 years 4 mons 4 days 17 mins 31 secs
+(1 row)
+
+-- test long interval output
+-- Note: the actual maximum length of the interval output is longer,
+-- but we need the test to work for both integer and floating-point
+-- timestamps.
+select '100000000y 10mon -1000000000d -100000h -10min -10.000001s ago'::interval;
+ interval
+---------------------------------------------------------------------------------------
+ @ 100000000 years 10 mons -1000000000 days -100000 hours -10 mins -10.000001 secs ago
+(1 row)
+
+-- test justify_hours() and justify_days()
+SELECT justify_hours(interval '6 months 3 days 52 hours 3 minutes 2 seconds') as "6 mons 5 days 4 hours 3 mins 2 seconds";
+ 6 mons 5 days 4 hours 3 mins 2 seconds
+----------------------------------------
+ @ 6 mons 5 days 4 hours 3 mins 2 secs
+(1 row)
+
+SELECT justify_days(interval '6 months 36 days 5 hours 4 minutes 3 seconds') as "7 mons 6 days 5 hours 4 mins 3 seconds";
+ 7 mons 6 days 5 hours 4 mins 3 seconds
+----------------------------------------
+ @ 7 mons 6 days 5 hours 4 mins 3 secs
+(1 row)
+
+-- test justify_interval()
+SELECT justify_interval(interval '1 month -1 hour') as "1 month -1 hour";
+ 1 month -1 hour
+--------------------
+ @ 29 days 23 hours
+(1 row)
+
+-- test fractional second input, and detection of duplicate units
+SET DATESTYLE = 'ISO';
+SET IntervalStyle TO postgres;
+SELECT '1 millisecond'::interval, '1 microsecond'::interval,
+ '500 seconds 99 milliseconds 51 microseconds'::interval;
+ interval | interval | interval
+--------------+-----------------+-----------------
+ 00:00:00.001 | 00:00:00.000001 | 00:08:20.099051
+(1 row)
+
+SELECT '3 days 5 milliseconds'::interval;
+ interval
+---------------------
+ 3 days 00:00:00.005
+(1 row)
+
+SELECT '1 second 2 seconds'::interval; -- error
+ERROR: invalid input syntax for type interval: "1 second 2 seconds"
+LINE 1: SELECT '1 second 2 seconds'::interval;
+ ^
+SELECT '10 milliseconds 20 milliseconds'::interval; -- error
+ERROR: invalid input syntax for type interval: "10 milliseconds 20 milliseconds"
+LINE 1: SELECT '10 milliseconds 20 milliseconds'::interval;
+ ^
+SELECT '5.5 seconds 3 milliseconds'::interval; -- error
+ERROR: invalid input syntax for type interval: "5.5 seconds 3 milliseconds"
+LINE 1: SELECT '5.5 seconds 3 milliseconds'::interval;
+ ^
+SELECT '1:20:05 5 microseconds'::interval; -- error
+ERROR: invalid input syntax for type interval: "1:20:05 5 microseconds"
+LINE 1: SELECT '1:20:05 5 microseconds'::interval;
+ ^
+SELECT '1 day 1 day'::interval; -- error
+ERROR: invalid input syntax for type interval: "1 day 1 day"
+LINE 1: SELECT '1 day 1 day'::interval;
+ ^
+SELECT interval '1-2'; -- SQL year-month literal
+ interval
+---------------
+ 1 year 2 mons
+(1 row)
+
+SELECT interval '999' second; -- oversize leading field is ok
+ interval
+----------
+ 00:16:39
+(1 row)
+
+SELECT interval '999' minute;
+ interval
+----------
+ 16:39:00
+(1 row)
+
+SELECT interval '999' hour;
+ interval
+-----------
+ 999:00:00
+(1 row)
+
+SELECT interval '999' day;
+ interval
+----------
+ 999 days
+(1 row)
+
+SELECT interval '999' month;
+ interval
+-----------------
+ 83 years 3 mons
+(1 row)
+
+-- test SQL-spec syntaxes for restricted field sets
+SELECT interval '1' year;
+ interval
+----------
+ 1 year
+(1 row)
+
+SELECT interval '2' month;
+ interval
+----------
+ 2 mons
+(1 row)
+
+SELECT interval '3' day;
+ interval
+----------
+ 3 days
+(1 row)
+
+SELECT interval '4' hour;
+ interval
+----------
+ 04:00:00
+(1 row)
+
+SELECT interval '5' minute;
+ interval
+----------
+ 00:05:00
+(1 row)
+
+SELECT interval '6' second;
+ interval
+----------
+ 00:00:06
+(1 row)
+
+SELECT interval '1' year to month;
+ interval
+----------
+ 1 mon
+(1 row)
+
+SELECT interval '1-2' year to month;
+ interval
+---------------
+ 1 year 2 mons
+(1 row)
+
+SELECT interval '1 2' day to hour;
+ interval
+----------------
+ 1 day 02:00:00
+(1 row)
+
+SELECT interval '1 2:03' day to hour;
+ interval
+----------------
+ 1 day 02:00:00
+(1 row)
+
+SELECT interval '1 2:03:04' day to hour;
+ interval
+----------------
+ 1 day 02:00:00
+(1 row)
+
+SELECT interval '1 2' day to minute;
+ERROR: invalid input syntax for type interval: "1 2"
+LINE 1: SELECT interval '1 2' day to minute;
+ ^
+SELECT interval '1 2:03' day to minute;
+ interval
+----------------
+ 1 day 02:03:00
+(1 row)
+
+SELECT interval '1 2:03:04' day to minute;
+ interval
+----------------
+ 1 day 02:03:00
+(1 row)
+
+SELECT interval '1 2' day to second;
+ERROR: invalid input syntax for type interval: "1 2"
+LINE 1: SELECT interval '1 2' day to second;
+ ^
+SELECT interval '1 2:03' day to second;
+ interval
+----------------
+ 1 day 02:03:00
+(1 row)
+
+SELECT interval '1 2:03:04' day to second;
+ interval
+----------------
+ 1 day 02:03:04
+(1 row)
+
+SELECT interval '1 2' hour to minute;
+ERROR: invalid input syntax for type interval: "1 2"
+LINE 1: SELECT interval '1 2' hour to minute;
+ ^
+SELECT interval '1 2:03' hour to minute;
+ interval
+----------------
+ 1 day 02:03:00
+(1 row)
+
+SELECT interval '1 2:03:04' hour to minute;
+ interval
+----------------
+ 1 day 02:03:00
+(1 row)
+
+SELECT interval '1 2' hour to second;
+ERROR: invalid input syntax for type interval: "1 2"
+LINE 1: SELECT interval '1 2' hour to second;
+ ^
+SELECT interval '1 2:03' hour to second;
+ interval
+----------------
+ 1 day 02:03:00
+(1 row)
+
+SELECT interval '1 2:03:04' hour to second;
+ interval
+----------------
+ 1 day 02:03:04
+(1 row)
+
+SELECT interval '1 2' minute to second;
+ERROR: invalid input syntax for type interval: "1 2"
+LINE 1: SELECT interval '1 2' minute to second;
+ ^
+SELECT interval '1 2:03' minute to second;
+ interval
+----------------
+ 1 day 00:02:03
+(1 row)
+
+SELECT interval '1 2:03:04' minute to second;
+ interval
+----------------
+ 1 day 02:03:04
+(1 row)
+
+SELECT interval '1 +2:03' minute to second;
+ interval
+----------------
+ 1 day 00:02:03
+(1 row)
+
+SELECT interval '1 +2:03:04' minute to second;
+ interval
+----------------
+ 1 day 02:03:04
+(1 row)
+
+SELECT interval '1 -2:03' minute to second;
+ interval
+-----------------
+ 1 day -00:02:03
+(1 row)
+
+SELECT interval '1 -2:03:04' minute to second;
+ interval
+-----------------
+ 1 day -02:03:04
+(1 row)
+
+SELECT interval '123 11' day to hour; -- ok
+ interval
+-------------------
+ 123 days 11:00:00
+(1 row)
+
+SELECT interval '123 11' day; -- not ok
+ERROR: invalid input syntax for type interval: "123 11"
+LINE 1: SELECT interval '123 11' day;
+ ^
+SELECT interval '123 11'; -- not ok, too ambiguous
+ERROR: invalid input syntax for type interval: "123 11"
+LINE 1: SELECT interval '123 11';
+ ^
+SELECT interval '123 2:03 -2:04'; -- not ok, redundant hh:mm fields
+ERROR: invalid input syntax for type interval: "123 2:03 -2:04"
+LINE 1: SELECT interval '123 2:03 -2:04';
+ ^
+-- test syntaxes for restricted precision
+SELECT interval(0) '1 day 01:23:45.6789';
+ interval
+----------------
+ 1 day 01:23:46
+(1 row)
+
+SELECT interval(2) '1 day 01:23:45.6789';
+ interval
+-------------------
+ 1 day 01:23:45.68
+(1 row)
+
+SELECT interval '12:34.5678' minute to second(2); -- per SQL spec
+ interval
+-------------
+ 00:12:34.57
+(1 row)
+
+SELECT interval '1.234' second;
+ interval
+--------------
+ 00:00:01.234
+(1 row)
+
+SELECT interval '1.234' second(2);
+ interval
+-------------
+ 00:00:01.23
+(1 row)
+
+SELECT interval '1 2.345' day to second(2);
+ERROR: invalid input syntax for type interval: "1 2.345"
+LINE 1: SELECT interval '1 2.345' day to second(2);
+ ^
+SELECT interval '1 2:03' day to second(2);
+ interval
+----------------
+ 1 day 02:03:00
+(1 row)
+
+SELECT interval '1 2:03.4567' day to second(2);
+ interval
+-------------------
+ 1 day 00:02:03.46
+(1 row)
+
+SELECT interval '1 2:03:04.5678' day to second(2);
+ interval
+-------------------
+ 1 day 02:03:04.57
+(1 row)
+
+SELECT interval '1 2.345' hour to second(2);
+ERROR: invalid input syntax for type interval: "1 2.345"
+LINE 1: SELECT interval '1 2.345' hour to second(2);
+ ^
+SELECT interval '1 2:03.45678' hour to second(2);
+ interval
+-------------------
+ 1 day 00:02:03.46
+(1 row)
+
+SELECT interval '1 2:03:04.5678' hour to second(2);
+ interval
+-------------------
+ 1 day 02:03:04.57
+(1 row)
+
+SELECT interval '1 2.3456' minute to second(2);
+ERROR: invalid input syntax for type interval: "1 2.3456"
+LINE 1: SELECT interval '1 2.3456' minute to second(2);
+ ^
+SELECT interval '1 2:03.5678' minute to second(2);
+ interval
+-------------------
+ 1 day 00:02:03.57
+(1 row)
+
+SELECT interval '1 2:03:04.5678' minute to second(2);
+ interval
+-------------------
+ 1 day 02:03:04.57
+(1 row)
+
+-- test casting to restricted precision (bug #14479)
+SELECT f1, f1::INTERVAL DAY TO MINUTE AS "minutes",
+ (f1 + INTERVAL '1 month')::INTERVAL MONTH::INTERVAL YEAR AS "years"
+ FROM interval_tbl;
+ f1 | minutes | years
+-----------------+-----------------+----------
+ 00:01:00 | 00:01:00 | 00:00:00
+ 05:00:00 | 05:00:00 | 00:00:00
+ 10 days | 10 days | 00:00:00
+ 34 years | 34 years | 34 years
+ 3 mons | 3 mons | 00:00:00
+ -00:00:14 | 00:00:00 | 00:00:00
+ 1 day 02:03:04 | 1 day 02:03:00 | 00:00:00
+ 6 years | 6 years | 6 years
+ 5 mons | 5 mons | 00:00:00
+ 5 mons 12:00:00 | 5 mons 12:00:00 | 00:00:00
+(10 rows)
+
+-- test inputting and outputting SQL standard interval literals
+SET IntervalStyle TO sql_standard;
+SELECT interval '0' AS "zero",
+ interval '1-2' year to month AS "year-month",
+ interval '1 2:03:04' day to second AS "day-time",
+ - interval '1-2' AS "negative year-month",
+ - interval '1 2:03:04' AS "negative day-time";
+ zero | year-month | day-time | negative year-month | negative day-time
+------+------------+-----------+---------------------+-------------------
+ 0 | 1-2 | 1 2:03:04 | -1-2 | -1 2:03:04
+(1 row)
+
+-- test input of some not-quite-standard interval values in the sql style
+SET IntervalStyle TO postgres;
+SELECT interval '+1 -1:00:00',
+ interval '-1 +1:00:00',
+ interval '+1-2 -3 +4:05:06.789',
+ interval '-1-2 +3 -4:05:06.789';
+ interval | interval | interval | interval
+-----------------+-------------------+-------------------------------------+----------------------------------------
+ 1 day -01:00:00 | -1 days +01:00:00 | 1 year 2 mons -3 days +04:05:06.789 | -1 years -2 mons +3 days -04:05:06.789
+(1 row)
+
+-- test output of couple non-standard interval values in the sql style
+SET IntervalStyle TO sql_standard;
+SELECT interval '1 day -1 hours',
+ interval '-1 days +1 hours',
+ interval '1 years 2 months -3 days 4 hours 5 minutes 6.789 seconds',
+ - interval '1 years 2 months -3 days 4 hours 5 minutes 6.789 seconds';
+ interval | interval | interval | ?column?
+------------------+------------------+----------------------+----------------------
+ +0-0 +1 -1:00:00 | +0-0 -1 +1:00:00 | +1-2 -3 +4:05:06.789 | -1-2 +3 -4:05:06.789
+(1 row)
+
+-- test outputting iso8601 intervals
+SET IntervalStyle to iso_8601;
+select interval '0' AS "zero",
+ interval '1-2' AS "a year 2 months",
+ interval '1 2:03:04' AS "a bit over a day",
+ interval '2:03:04.45679' AS "a bit over 2 hours",
+ (interval '1-2' + interval '3 4:05:06.7') AS "all fields",
+ (interval '1-2' - interval '3 4:05:06.7') AS "mixed sign",
+ (- interval '1-2' + interval '3 4:05:06.7') AS "negative";
+ zero | a year 2 months | a bit over a day | a bit over 2 hours | all fields | mixed sign | negative
+------+-----------------+------------------+--------------------+------------------+----------------------+--------------------
+ PT0S | P1Y2M | P1DT2H3M4S | PT2H3M4.45679S | P1Y2M3DT4H5M6.7S | P1Y2M-3DT-4H-5M-6.7S | P-1Y-2M3DT4H5M6.7S
+(1 row)
+
+-- test inputting ISO 8601 4.4.2.1 "Format With Time Unit Designators"
+SET IntervalStyle to sql_standard;
+select interval 'P0Y' AS "zero",
+ interval 'P1Y2M' AS "a year 2 months",
+ interval 'P1W' AS "a week",
+ interval 'P1DT2H3M4S' AS "a bit over a day",
+ interval 'P1Y2M3DT4H5M6.7S' AS "all fields",
+ interval 'P-1Y-2M-3DT-4H-5M-6.7S' AS "negative",
+ interval 'PT-0.1S' AS "fractional second";
+ zero | a year 2 months | a week | a bit over a day | all fields | negative | fractional second
+------+-----------------+-----------+------------------+--------------------+--------------------+-------------------
+ 0 | 1-2 | 7 0:00:00 | 1 2:03:04 | +1-2 +3 +4:05:06.7 | -1-2 -3 -4:05:06.7 | -0:00:00.1
+(1 row)
+
+-- test inputting ISO 8601 4.4.2.2 "Alternative Format"
+SET IntervalStyle to postgres;
+select interval 'P00021015T103020' AS "ISO8601 Basic Format",
+ interval 'P0002-10-15T10:30:20' AS "ISO8601 Extended Format";
+ ISO8601 Basic Format | ISO8601 Extended Format
+----------------------------------+----------------------------------
+ 2 years 10 mons 15 days 10:30:20 | 2 years 10 mons 15 days 10:30:20
+(1 row)
+
+-- Make sure optional ISO8601 alternative format fields are optional.
+select interval 'P0002' AS "year only",
+ interval 'P0002-10' AS "year month",
+ interval 'P0002-10-15' AS "year month day",
+ interval 'P0002T1S' AS "year only plus time",
+ interval 'P0002-10T1S' AS "year month plus time",
+ interval 'P0002-10-15T1S' AS "year month day plus time",
+ interval 'PT10' AS "hour only",
+ interval 'PT10:30' AS "hour minute";
+ year only | year month | year month day | year only plus time | year month plus time | year month day plus time | hour only | hour minute
+-----------+-----------------+-------------------------+---------------------+--------------------------+----------------------------------+-----------+-------------
+ 2 years | 2 years 10 mons | 2 years 10 mons 15 days | 2 years 00:00:01 | 2 years 10 mons 00:00:01 | 2 years 10 mons 15 days 00:00:01 | 10:00:00 | 10:30:00
+(1 row)
+
+-- test a couple rounding cases that changed since 8.3 w/ HAVE_INT64_TIMESTAMP.
+SET IntervalStyle to postgres_verbose;
+select interval '-10 mons -3 days +03:55:06.70';
+ interval
+--------------------------------------------------
+ @ 10 mons 3 days -3 hours -55 mins -6.7 secs ago
+(1 row)
+
+select interval '1 year 2 mons 3 days 04:05:06.699999';
+ interval
+-----------------------------------------------------
+ @ 1 year 2 mons 3 days 4 hours 5 mins 6.699999 secs
+(1 row)
+
+select interval '0:0:0.7', interval '@ 0.70 secs', interval '0.7 seconds';
+ interval | interval | interval
+------------+------------+------------
+ @ 0.7 secs | @ 0.7 secs | @ 0.7 secs
+(1 row)
+
+-- check that '30 days' equals '1 month' according to the hash function
+select '30 days'::interval = '1 month'::interval as t;
+ t
+---
+ t
+(1 row)
+
+select interval_hash('30 days'::interval) = interval_hash('1 month'::interval) as t;
+ t
+---
+ t
+(1 row)
+
+-- numeric constructor
+select make_interval(years := 2);
+ make_interval
+---------------
+ @ 2 years
+(1 row)
+
+select make_interval(years := 1, months := 6);
+ make_interval
+-----------------
+ @ 1 year 6 mons
+(1 row)
+
+select make_interval(years := 1, months := -1, weeks := 5, days := -7, hours := 25, mins := -180);
+ make_interval
+----------------------------
+ @ 11 mons 28 days 22 hours
+(1 row)
+
+select make_interval() = make_interval(years := 0, months := 0, weeks := 0, days := 0, mins := 0, secs := 0.0);
+ ?column?
+----------
+ t
+(1 row)
+
+select make_interval(hours := -2, mins := -10, secs := -25.3);
+ make_interval
+---------------------------------
+ @ 2 hours 10 mins 25.3 secs ago
+(1 row)
+
+select make_interval(years := 'inf'::float::int);
+ERROR: integer out of range
+select make_interval(months := 'NaN'::float::int);
+ERROR: integer out of range
+select make_interval(secs := 'inf');
+ERROR: interval out of range
+select make_interval(secs := 'NaN');
+ERROR: interval out of range
+select make_interval(secs := 7e12);
+ make_interval
+------------------------------------
+ @ 1944444444 hours 26 mins 40 secs
+(1 row)
+
+--
+-- test EXTRACT
+--
+SELECT f1,
+ EXTRACT(MICROSECOND FROM f1) AS MICROSECOND,
+ EXTRACT(MILLISECOND FROM f1) AS MILLISECOND,
+ EXTRACT(SECOND FROM f1) AS SECOND,
+ EXTRACT(MINUTE FROM f1) AS MINUTE,
+ EXTRACT(HOUR FROM f1) AS HOUR,
+ EXTRACT(DAY FROM f1) AS DAY,
+ EXTRACT(MONTH FROM f1) AS MONTH,
+ EXTRACT(QUARTER FROM f1) AS QUARTER,
+ EXTRACT(YEAR FROM f1) AS YEAR,
+ EXTRACT(DECADE FROM f1) AS DECADE,
+ EXTRACT(CENTURY FROM f1) AS CENTURY,
+ EXTRACT(MILLENNIUM FROM f1) AS MILLENNIUM,
+ EXTRACT(EPOCH FROM f1) AS EPOCH
+ FROM INTERVAL_TBL;
+ f1 | microsecond | millisecond | second | minute | hour | day | month | quarter | year | decade | century | millennium | epoch
+-------------------------------+-------------+-------------+------------+--------+------+-----+-------+---------+------+--------+---------+------------+-------------------
+ @ 1 min | 0 | 0.000 | 0.000000 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 60.000000
+ @ 5 hours | 0 | 0.000 | 0.000000 | 0 | 5 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 18000.000000
+ @ 10 days | 0 | 0.000 | 0.000000 | 0 | 0 | 10 | 0 | 1 | 0 | 0 | 0 | 0 | 864000.000000
+ @ 34 years | 0 | 0.000 | 0.000000 | 0 | 0 | 0 | 0 | 1 | 34 | 3 | 0 | 0 | 1072958400.000000
+ @ 3 mons | 0 | 0.000 | 0.000000 | 0 | 0 | 0 | 3 | 2 | 0 | 0 | 0 | 0 | 7776000.000000
+ @ 14 secs ago | -14000000 | -14000.000 | -14.000000 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | -14.000000
+ @ 1 day 2 hours 3 mins 4 secs | 4000000 | 4000.000 | 4.000000 | 3 | 2 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 93784.000000
+ @ 6 years | 0 | 0.000 | 0.000000 | 0 | 0 | 0 | 0 | 1 | 6 | 0 | 0 | 0 | 189345600.000000
+ @ 5 mons | 0 | 0.000 | 0.000000 | 0 | 0 | 0 | 5 | 2 | 0 | 0 | 0 | 0 | 12960000.000000
+ @ 5 mons 12 hours | 0 | 0.000 | 0.000000 | 0 | 12 | 0 | 5 | 2 | 0 | 0 | 0 | 0 | 13003200.000000
+(10 rows)
+
+SELECT EXTRACT(FORTNIGHT FROM INTERVAL '2 days'); -- error
+ERROR: interval units "fortnight" not recognized
+SELECT EXTRACT(TIMEZONE FROM INTERVAL '2 days'); -- error
+ERROR: interval units "timezone" not supported
+SELECT EXTRACT(DECADE FROM INTERVAL '100 y');
+ extract
+---------
+ 10
+(1 row)
+
+SELECT EXTRACT(DECADE FROM INTERVAL '99 y');
+ extract
+---------
+ 9
+(1 row)
+
+SELECT EXTRACT(DECADE FROM INTERVAL '-99 y');
+ extract
+---------
+ -9
+(1 row)
+
+SELECT EXTRACT(DECADE FROM INTERVAL '-100 y');
+ extract
+---------
+ -10
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM INTERVAL '100 y');
+ extract
+---------
+ 1
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM INTERVAL '99 y');
+ extract
+---------
+ 0
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM INTERVAL '-99 y');
+ extract
+---------
+ 0
+(1 row)
+
+SELECT EXTRACT(CENTURY FROM INTERVAL '-100 y');
+ extract
+---------
+ -1
+(1 row)
+
+-- date_part implementation is mostly the same as extract, so only
+-- test a few cases for additional coverage.
+SELECT f1,
+ date_part('microsecond', f1) AS microsecond,
+ date_part('millisecond', f1) AS millisecond,
+ date_part('second', f1) AS second,
+ date_part('epoch', f1) AS epoch
+ FROM INTERVAL_TBL;
+ f1 | microsecond | millisecond | second | epoch
+-------------------------------+-------------+-------------+--------+------------
+ @ 1 min | 0 | 0 | 0 | 60
+ @ 5 hours | 0 | 0 | 0 | 18000
+ @ 10 days | 0 | 0 | 0 | 864000
+ @ 34 years | 0 | 0 | 0 | 1072958400
+ @ 3 mons | 0 | 0 | 0 | 7776000
+ @ 14 secs ago | -14000000 | -14000 | -14 | -14
+ @ 1 day 2 hours 3 mins 4 secs | 4000000 | 4000 | 4 | 93784
+ @ 6 years | 0 | 0 | 0 | 189345600
+ @ 5 mons | 0 | 0 | 0 | 12960000
+ @ 5 mons 12 hours | 0 | 0 | 0 | 13003200
+(10 rows)
+
+-- internal overflow test case
+SELECT extract(epoch from interval '1000000000 days');
+ extract
+-----------------------
+ 86400000000000.000000
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/interval.sql b/ydb/library/yql/tests/postgresql/original/cases/interval.sql
new file mode 100644
index 0000000000..6d532398bd
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/interval.sql
@@ -0,0 +1,357 @@
+--
+-- INTERVAL
+--
+
+SET DATESTYLE = 'ISO';
+SET IntervalStyle to postgres;
+
+-- check acceptance of "time zone style"
+SELECT INTERVAL '01:00' AS "One hour";
+SELECT INTERVAL '+02:00' AS "Two hours";
+SELECT INTERVAL '-08:00' AS "Eight hours";
+SELECT INTERVAL '-1 +02:03' AS "22 hours ago...";
+SELECT INTERVAL '-1 days +02:03' AS "22 hours ago...";
+SELECT INTERVAL '1.5 weeks' AS "Ten days twelve hours";
+SELECT INTERVAL '1.5 months' AS "One month 15 days";
+SELECT INTERVAL '10 years -11 month -12 days +13:14' AS "9 years...";
+
+CREATE TABLE INTERVAL_TBL (f1 interval);
+
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 1 minute');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 5 hour');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 10 day');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 34 year');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 3 months');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 14 seconds ago');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('1 day 2 hours 3 minutes 4 seconds');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('6 years');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('5 months');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('5 months 12 hours');
+
+-- badly formatted interval
+INSERT INTO INTERVAL_TBL (f1) VALUES ('badly formatted interval');
+INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 30 eons ago');
+
+-- test interval operators
+
+SELECT * FROM INTERVAL_TBL;
+
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 <> interval '@ 10 days';
+
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 <= interval '@ 5 hours';
+
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 < interval '@ 1 day';
+
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 = interval '@ 34 years';
+
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 >= interval '@ 1 month';
+
+SELECT * FROM INTERVAL_TBL
+ WHERE INTERVAL_TBL.f1 > interval '@ 3 seconds ago';
+
+SELECT r1.*, r2.*
+ FROM INTERVAL_TBL r1, INTERVAL_TBL r2
+ WHERE r1.f1 > r2.f1
+ ORDER BY r1.f1, r2.f1;
+
+-- Test intervals that are large enough to overflow 64 bits in comparisons
+CREATE TEMP TABLE INTERVAL_TBL_OF (f1 interval);
+INSERT INTO INTERVAL_TBL_OF (f1) VALUES
+ ('2147483647 days 2147483647 months'),
+ ('2147483647 days -2147483648 months'),
+ ('1 year'),
+ ('-2147483648 days 2147483647 months'),
+ ('-2147483648 days -2147483648 months');
+-- these should fail as out-of-range
+INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('2147483648 days');
+INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('-2147483649 days');
+INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('2147483647 years');
+INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('-2147483648 years');
+
+-- Test edge-case overflow detection in interval multiplication
+select extract(epoch from '256 microseconds'::interval * (2^55)::float8);
+
+SELECT r1.*, r2.*
+ FROM INTERVAL_TBL_OF r1, INTERVAL_TBL_OF r2
+ WHERE r1.f1 > r2.f1
+ ORDER BY r1.f1, r2.f1;
+
+CREATE INDEX ON INTERVAL_TBL_OF USING btree (f1);
+SET enable_seqscan TO false;
+EXPLAIN (COSTS OFF)
+SELECT f1 FROM INTERVAL_TBL_OF r1 ORDER BY f1;
+SELECT f1 FROM INTERVAL_TBL_OF r1 ORDER BY f1;
+RESET enable_seqscan;
+
+DROP TABLE INTERVAL_TBL_OF;
+
+-- Test multiplication and division with intervals.
+-- Floating point arithmetic rounding errors can lead to unexpected results,
+-- though the code attempts to do the right thing and round up to days and
+-- minutes to avoid results such as '3 days 24:00 hours' or '14:20:60'.
+-- Note that it is expected for some day components to be greater than 29 and
+-- some time components be greater than 23:59:59 due to how intervals are
+-- stored internally.
+
+CREATE TABLE INTERVAL_MULDIV_TBL (span interval);
+COPY INTERVAL_MULDIV_TBL FROM STDIN;
+41 mon 12 days 360:00
+-41 mon -12 days +360:00
+-12 days
+9 mon -27 days 12:34:56
+-3 years 482 days 76:54:32.189
+4 mon
+14 mon
+999 mon 999 days
+\.
+
+SELECT span * 0.3 AS product
+FROM INTERVAL_MULDIV_TBL;
+
+SELECT span * 8.2 AS product
+FROM INTERVAL_MULDIV_TBL;
+
+SELECT span / 10 AS quotient
+FROM INTERVAL_MULDIV_TBL;
+
+SELECT span / 100 AS quotient
+FROM INTERVAL_MULDIV_TBL;
+
+DROP TABLE INTERVAL_MULDIV_TBL;
+
+SET DATESTYLE = 'postgres';
+SET IntervalStyle to postgres_verbose;
+
+SELECT * FROM INTERVAL_TBL;
+
+-- test avg(interval), which is somewhat fragile since people have been
+-- known to change the allowed input syntax for type interval without
+-- updating pg_aggregate.agginitval
+
+select avg(f1) from interval_tbl;
+
+-- test long interval input
+select '4 millenniums 5 centuries 4 decades 1 year 4 months 4 days 17 minutes 31 seconds'::interval;
+
+-- test long interval output
+-- Note: the actual maximum length of the interval output is longer,
+-- but we need the test to work for both integer and floating-point
+-- timestamps.
+select '100000000y 10mon -1000000000d -100000h -10min -10.000001s ago'::interval;
+
+-- test justify_hours() and justify_days()
+
+SELECT justify_hours(interval '6 months 3 days 52 hours 3 minutes 2 seconds') as "6 mons 5 days 4 hours 3 mins 2 seconds";
+SELECT justify_days(interval '6 months 36 days 5 hours 4 minutes 3 seconds') as "7 mons 6 days 5 hours 4 mins 3 seconds";
+
+-- test justify_interval()
+
+SELECT justify_interval(interval '1 month -1 hour') as "1 month -1 hour";
+
+-- test fractional second input, and detection of duplicate units
+SET DATESTYLE = 'ISO';
+SET IntervalStyle TO postgres;
+
+SELECT '1 millisecond'::interval, '1 microsecond'::interval,
+ '500 seconds 99 milliseconds 51 microseconds'::interval;
+SELECT '3 days 5 milliseconds'::interval;
+
+SELECT '1 second 2 seconds'::interval; -- error
+SELECT '10 milliseconds 20 milliseconds'::interval; -- error
+SELECT '5.5 seconds 3 milliseconds'::interval; -- error
+SELECT '1:20:05 5 microseconds'::interval; -- error
+SELECT '1 day 1 day'::interval; -- error
+SELECT interval '1-2'; -- SQL year-month literal
+SELECT interval '999' second; -- oversize leading field is ok
+SELECT interval '999' minute;
+SELECT interval '999' hour;
+SELECT interval '999' day;
+SELECT interval '999' month;
+
+-- test SQL-spec syntaxes for restricted field sets
+SELECT interval '1' year;
+SELECT interval '2' month;
+SELECT interval '3' day;
+SELECT interval '4' hour;
+SELECT interval '5' minute;
+SELECT interval '6' second;
+SELECT interval '1' year to month;
+SELECT interval '1-2' year to month;
+SELECT interval '1 2' day to hour;
+SELECT interval '1 2:03' day to hour;
+SELECT interval '1 2:03:04' day to hour;
+SELECT interval '1 2' day to minute;
+SELECT interval '1 2:03' day to minute;
+SELECT interval '1 2:03:04' day to minute;
+SELECT interval '1 2' day to second;
+SELECT interval '1 2:03' day to second;
+SELECT interval '1 2:03:04' day to second;
+SELECT interval '1 2' hour to minute;
+SELECT interval '1 2:03' hour to minute;
+SELECT interval '1 2:03:04' hour to minute;
+SELECT interval '1 2' hour to second;
+SELECT interval '1 2:03' hour to second;
+SELECT interval '1 2:03:04' hour to second;
+SELECT interval '1 2' minute to second;
+SELECT interval '1 2:03' minute to second;
+SELECT interval '1 2:03:04' minute to second;
+SELECT interval '1 +2:03' minute to second;
+SELECT interval '1 +2:03:04' minute to second;
+SELECT interval '1 -2:03' minute to second;
+SELECT interval '1 -2:03:04' minute to second;
+SELECT interval '123 11' day to hour; -- ok
+SELECT interval '123 11' day; -- not ok
+SELECT interval '123 11'; -- not ok, too ambiguous
+SELECT interval '123 2:03 -2:04'; -- not ok, redundant hh:mm fields
+
+-- test syntaxes for restricted precision
+SELECT interval(0) '1 day 01:23:45.6789';
+SELECT interval(2) '1 day 01:23:45.6789';
+SELECT interval '12:34.5678' minute to second(2); -- per SQL spec
+SELECT interval '1.234' second;
+SELECT interval '1.234' second(2);
+SELECT interval '1 2.345' day to second(2);
+SELECT interval '1 2:03' day to second(2);
+SELECT interval '1 2:03.4567' day to second(2);
+SELECT interval '1 2:03:04.5678' day to second(2);
+SELECT interval '1 2.345' hour to second(2);
+SELECT interval '1 2:03.45678' hour to second(2);
+SELECT interval '1 2:03:04.5678' hour to second(2);
+SELECT interval '1 2.3456' minute to second(2);
+SELECT interval '1 2:03.5678' minute to second(2);
+SELECT interval '1 2:03:04.5678' minute to second(2);
+
+-- test casting to restricted precision (bug #14479)
+SELECT f1, f1::INTERVAL DAY TO MINUTE AS "minutes",
+ (f1 + INTERVAL '1 month')::INTERVAL MONTH::INTERVAL YEAR AS "years"
+ FROM interval_tbl;
+
+-- test inputting and outputting SQL standard interval literals
+SET IntervalStyle TO sql_standard;
+SELECT interval '0' AS "zero",
+ interval '1-2' year to month AS "year-month",
+ interval '1 2:03:04' day to second AS "day-time",
+ - interval '1-2' AS "negative year-month",
+ - interval '1 2:03:04' AS "negative day-time";
+
+-- test input of some not-quite-standard interval values in the sql style
+SET IntervalStyle TO postgres;
+SELECT interval '+1 -1:00:00',
+ interval '-1 +1:00:00',
+ interval '+1-2 -3 +4:05:06.789',
+ interval '-1-2 +3 -4:05:06.789';
+
+-- test output of couple non-standard interval values in the sql style
+SET IntervalStyle TO sql_standard;
+SELECT interval '1 day -1 hours',
+ interval '-1 days +1 hours',
+ interval '1 years 2 months -3 days 4 hours 5 minutes 6.789 seconds',
+ - interval '1 years 2 months -3 days 4 hours 5 minutes 6.789 seconds';
+
+-- test outputting iso8601 intervals
+SET IntervalStyle to iso_8601;
+select interval '0' AS "zero",
+ interval '1-2' AS "a year 2 months",
+ interval '1 2:03:04' AS "a bit over a day",
+ interval '2:03:04.45679' AS "a bit over 2 hours",
+ (interval '1-2' + interval '3 4:05:06.7') AS "all fields",
+ (interval '1-2' - interval '3 4:05:06.7') AS "mixed sign",
+ (- interval '1-2' + interval '3 4:05:06.7') AS "negative";
+
+-- test inputting ISO 8601 4.4.2.1 "Format With Time Unit Designators"
+SET IntervalStyle to sql_standard;
+select interval 'P0Y' AS "zero",
+ interval 'P1Y2M' AS "a year 2 months",
+ interval 'P1W' AS "a week",
+ interval 'P1DT2H3M4S' AS "a bit over a day",
+ interval 'P1Y2M3DT4H5M6.7S' AS "all fields",
+ interval 'P-1Y-2M-3DT-4H-5M-6.7S' AS "negative",
+ interval 'PT-0.1S' AS "fractional second";
+
+-- test inputting ISO 8601 4.4.2.2 "Alternative Format"
+SET IntervalStyle to postgres;
+select interval 'P00021015T103020' AS "ISO8601 Basic Format",
+ interval 'P0002-10-15T10:30:20' AS "ISO8601 Extended Format";
+
+-- Make sure optional ISO8601 alternative format fields are optional.
+select interval 'P0002' AS "year only",
+ interval 'P0002-10' AS "year month",
+ interval 'P0002-10-15' AS "year month day",
+ interval 'P0002T1S' AS "year only plus time",
+ interval 'P0002-10T1S' AS "year month plus time",
+ interval 'P0002-10-15T1S' AS "year month day plus time",
+ interval 'PT10' AS "hour only",
+ interval 'PT10:30' AS "hour minute";
+
+-- test a couple rounding cases that changed since 8.3 w/ HAVE_INT64_TIMESTAMP.
+SET IntervalStyle to postgres_verbose;
+select interval '-10 mons -3 days +03:55:06.70';
+select interval '1 year 2 mons 3 days 04:05:06.699999';
+select interval '0:0:0.7', interval '@ 0.70 secs', interval '0.7 seconds';
+
+-- check that '30 days' equals '1 month' according to the hash function
+select '30 days'::interval = '1 month'::interval as t;
+select interval_hash('30 days'::interval) = interval_hash('1 month'::interval) as t;
+
+-- numeric constructor
+select make_interval(years := 2);
+select make_interval(years := 1, months := 6);
+select make_interval(years := 1, months := -1, weeks := 5, days := -7, hours := 25, mins := -180);
+
+select make_interval() = make_interval(years := 0, months := 0, weeks := 0, days := 0, mins := 0, secs := 0.0);
+select make_interval(hours := -2, mins := -10, secs := -25.3);
+
+select make_interval(years := 'inf'::float::int);
+select make_interval(months := 'NaN'::float::int);
+select make_interval(secs := 'inf');
+select make_interval(secs := 'NaN');
+select make_interval(secs := 7e12);
+
+--
+-- test EXTRACT
+--
+SELECT f1,
+ EXTRACT(MICROSECOND FROM f1) AS MICROSECOND,
+ EXTRACT(MILLISECOND FROM f1) AS MILLISECOND,
+ EXTRACT(SECOND FROM f1) AS SECOND,
+ EXTRACT(MINUTE FROM f1) AS MINUTE,
+ EXTRACT(HOUR FROM f1) AS HOUR,
+ EXTRACT(DAY FROM f1) AS DAY,
+ EXTRACT(MONTH FROM f1) AS MONTH,
+ EXTRACT(QUARTER FROM f1) AS QUARTER,
+ EXTRACT(YEAR FROM f1) AS YEAR,
+ EXTRACT(DECADE FROM f1) AS DECADE,
+ EXTRACT(CENTURY FROM f1) AS CENTURY,
+ EXTRACT(MILLENNIUM FROM f1) AS MILLENNIUM,
+ EXTRACT(EPOCH FROM f1) AS EPOCH
+ FROM INTERVAL_TBL;
+
+SELECT EXTRACT(FORTNIGHT FROM INTERVAL '2 days'); -- error
+SELECT EXTRACT(TIMEZONE FROM INTERVAL '2 days'); -- error
+
+SELECT EXTRACT(DECADE FROM INTERVAL '100 y');
+SELECT EXTRACT(DECADE FROM INTERVAL '99 y');
+SELECT EXTRACT(DECADE FROM INTERVAL '-99 y');
+SELECT EXTRACT(DECADE FROM INTERVAL '-100 y');
+
+SELECT EXTRACT(CENTURY FROM INTERVAL '100 y');
+SELECT EXTRACT(CENTURY FROM INTERVAL '99 y');
+SELECT EXTRACT(CENTURY FROM INTERVAL '-99 y');
+SELECT EXTRACT(CENTURY FROM INTERVAL '-100 y');
+
+-- date_part implementation is mostly the same as extract, so only
+-- test a few cases for additional coverage.
+SELECT f1,
+ date_part('microsecond', f1) AS microsecond,
+ date_part('millisecond', f1) AS millisecond,
+ date_part('second', f1) AS second,
+ date_part('epoch', f1) AS epoch
+ FROM INTERVAL_TBL;
+
+-- internal overflow test case
+SELECT extract(epoch from interval '1000000000 days');
diff --git a/ydb/library/yql/tests/postgresql/original/cases/join.out b/ydb/library/yql/tests/postgresql/original/cases/join.out
new file mode 100644
index 0000000000..5c56304504
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/join.out
@@ -0,0 +1,6628 @@
+--
+-- JOIN
+-- Test JOIN clauses
+--
+CREATE TABLE J1_TBL (
+ i integer,
+ j integer,
+ t text
+);
+CREATE TABLE J2_TBL (
+ i integer,
+ k integer
+);
+INSERT INTO J1_TBL VALUES (1, 4, 'one');
+INSERT INTO J1_TBL VALUES (2, 3, 'two');
+INSERT INTO J1_TBL VALUES (3, 2, 'three');
+INSERT INTO J1_TBL VALUES (4, 1, 'four');
+INSERT INTO J1_TBL VALUES (5, 0, 'five');
+INSERT INTO J1_TBL VALUES (6, 6, 'six');
+INSERT INTO J1_TBL VALUES (7, 7, 'seven');
+INSERT INTO J1_TBL VALUES (8, 8, 'eight');
+INSERT INTO J1_TBL VALUES (0, NULL, 'zero');
+INSERT INTO J1_TBL VALUES (NULL, NULL, 'null');
+INSERT INTO J1_TBL VALUES (NULL, 0, 'zero');
+INSERT INTO J2_TBL VALUES (1, -1);
+INSERT INTO J2_TBL VALUES (2, 2);
+INSERT INTO J2_TBL VALUES (3, -3);
+INSERT INTO J2_TBL VALUES (2, 4);
+INSERT INTO J2_TBL VALUES (5, -5);
+INSERT INTO J2_TBL VALUES (5, -5);
+INSERT INTO J2_TBL VALUES (0, NULL);
+INSERT INTO J2_TBL VALUES (NULL, NULL);
+INSERT INTO J2_TBL VALUES (NULL, 0);
+-- useful in some tests below
+create temp table onerow();
+insert into onerow default values;
+analyze onerow;
+--
+-- CORRELATION NAMES
+-- Make sure that table/column aliases are supported
+-- before diving into more complex join syntax.
+--
+SELECT *
+ FROM J1_TBL AS tx;
+ i | j | t
+---+---+-------
+ 1 | 4 | one
+ 2 | 3 | two
+ 3 | 2 | three
+ 4 | 1 | four
+ 5 | 0 | five
+ 6 | 6 | six
+ 7 | 7 | seven
+ 8 | 8 | eight
+ 0 | | zero
+ | | null
+ | 0 | zero
+(11 rows)
+
+SELECT *
+ FROM J1_TBL tx;
+ i | j | t
+---+---+-------
+ 1 | 4 | one
+ 2 | 3 | two
+ 3 | 2 | three
+ 4 | 1 | four
+ 5 | 0 | five
+ 6 | 6 | six
+ 7 | 7 | seven
+ 8 | 8 | eight
+ 0 | | zero
+ | | null
+ | 0 | zero
+(11 rows)
+
+SELECT *
+ FROM J1_TBL AS t1 (a, b, c);
+ a | b | c
+---+---+-------
+ 1 | 4 | one
+ 2 | 3 | two
+ 3 | 2 | three
+ 4 | 1 | four
+ 5 | 0 | five
+ 6 | 6 | six
+ 7 | 7 | seven
+ 8 | 8 | eight
+ 0 | | zero
+ | | null
+ | 0 | zero
+(11 rows)
+
+SELECT *
+ FROM J1_TBL t1 (a, b, c);
+ a | b | c
+---+---+-------
+ 1 | 4 | one
+ 2 | 3 | two
+ 3 | 2 | three
+ 4 | 1 | four
+ 5 | 0 | five
+ 6 | 6 | six
+ 7 | 7 | seven
+ 8 | 8 | eight
+ 0 | | zero
+ | | null
+ | 0 | zero
+(11 rows)
+
+SELECT *
+ FROM J1_TBL t1 (a, b, c), J2_TBL t2 (d, e);
+ a | b | c | d | e
+---+---+-------+---+----
+ 1 | 4 | one | 1 | -1
+ 2 | 3 | two | 1 | -1
+ 3 | 2 | three | 1 | -1
+ 4 | 1 | four | 1 | -1
+ 5 | 0 | five | 1 | -1
+ 6 | 6 | six | 1 | -1
+ 7 | 7 | seven | 1 | -1
+ 8 | 8 | eight | 1 | -1
+ 0 | | zero | 1 | -1
+ | | null | 1 | -1
+ | 0 | zero | 1 | -1
+ 1 | 4 | one | 2 | 2
+ 2 | 3 | two | 2 | 2
+ 3 | 2 | three | 2 | 2
+ 4 | 1 | four | 2 | 2
+ 5 | 0 | five | 2 | 2
+ 6 | 6 | six | 2 | 2
+ 7 | 7 | seven | 2 | 2
+ 8 | 8 | eight | 2 | 2
+ 0 | | zero | 2 | 2
+ | | null | 2 | 2
+ | 0 | zero | 2 | 2
+ 1 | 4 | one | 3 | -3
+ 2 | 3 | two | 3 | -3
+ 3 | 2 | three | 3 | -3
+ 4 | 1 | four | 3 | -3
+ 5 | 0 | five | 3 | -3
+ 6 | 6 | six | 3 | -3
+ 7 | 7 | seven | 3 | -3
+ 8 | 8 | eight | 3 | -3
+ 0 | | zero | 3 | -3
+ | | null | 3 | -3
+ | 0 | zero | 3 | -3
+ 1 | 4 | one | 2 | 4
+ 2 | 3 | two | 2 | 4
+ 3 | 2 | three | 2 | 4
+ 4 | 1 | four | 2 | 4
+ 5 | 0 | five | 2 | 4
+ 6 | 6 | six | 2 | 4
+ 7 | 7 | seven | 2 | 4
+ 8 | 8 | eight | 2 | 4
+ 0 | | zero | 2 | 4
+ | | null | 2 | 4
+ | 0 | zero | 2 | 4
+ 1 | 4 | one | 5 | -5
+ 2 | 3 | two | 5 | -5
+ 3 | 2 | three | 5 | -5
+ 4 | 1 | four | 5 | -5
+ 5 | 0 | five | 5 | -5
+ 6 | 6 | six | 5 | -5
+ 7 | 7 | seven | 5 | -5
+ 8 | 8 | eight | 5 | -5
+ 0 | | zero | 5 | -5
+ | | null | 5 | -5
+ | 0 | zero | 5 | -5
+ 1 | 4 | one | 5 | -5
+ 2 | 3 | two | 5 | -5
+ 3 | 2 | three | 5 | -5
+ 4 | 1 | four | 5 | -5
+ 5 | 0 | five | 5 | -5
+ 6 | 6 | six | 5 | -5
+ 7 | 7 | seven | 5 | -5
+ 8 | 8 | eight | 5 | -5
+ 0 | | zero | 5 | -5
+ | | null | 5 | -5
+ | 0 | zero | 5 | -5
+ 1 | 4 | one | 0 |
+ 2 | 3 | two | 0 |
+ 3 | 2 | three | 0 |
+ 4 | 1 | four | 0 |
+ 5 | 0 | five | 0 |
+ 6 | 6 | six | 0 |
+ 7 | 7 | seven | 0 |
+ 8 | 8 | eight | 0 |
+ 0 | | zero | 0 |
+ | | null | 0 |
+ | 0 | zero | 0 |
+ 1 | 4 | one | |
+ 2 | 3 | two | |
+ 3 | 2 | three | |
+ 4 | 1 | four | |
+ 5 | 0 | five | |
+ 6 | 6 | six | |
+ 7 | 7 | seven | |
+ 8 | 8 | eight | |
+ 0 | | zero | |
+ | | null | |
+ | 0 | zero | |
+ 1 | 4 | one | | 0
+ 2 | 3 | two | | 0
+ 3 | 2 | three | | 0
+ 4 | 1 | four | | 0
+ 5 | 0 | five | | 0
+ 6 | 6 | six | | 0
+ 7 | 7 | seven | | 0
+ 8 | 8 | eight | | 0
+ 0 | | zero | | 0
+ | | null | | 0
+ | 0 | zero | | 0
+(99 rows)
+
+SELECT t1.a, t2.e
+ FROM J1_TBL t1 (a, b, c), J2_TBL t2 (d, e)
+ WHERE t1.a = t2.d;
+ a | e
+---+----
+ 0 |
+ 1 | -1
+ 2 | 2
+ 2 | 4
+ 3 | -3
+ 5 | -5
+ 5 | -5
+(7 rows)
+
+--
+-- CROSS JOIN
+-- Qualifications are not allowed on cross joins,
+-- which degenerate into a standard unqualified inner join.
+--
+SELECT *
+ FROM J1_TBL CROSS JOIN J2_TBL;
+ i | j | t | i | k
+---+---+-------+---+----
+ 1 | 4 | one | 1 | -1
+ 2 | 3 | two | 1 | -1
+ 3 | 2 | three | 1 | -1
+ 4 | 1 | four | 1 | -1
+ 5 | 0 | five | 1 | -1
+ 6 | 6 | six | 1 | -1
+ 7 | 7 | seven | 1 | -1
+ 8 | 8 | eight | 1 | -1
+ 0 | | zero | 1 | -1
+ | | null | 1 | -1
+ | 0 | zero | 1 | -1
+ 1 | 4 | one | 2 | 2
+ 2 | 3 | two | 2 | 2
+ 3 | 2 | three | 2 | 2
+ 4 | 1 | four | 2 | 2
+ 5 | 0 | five | 2 | 2
+ 6 | 6 | six | 2 | 2
+ 7 | 7 | seven | 2 | 2
+ 8 | 8 | eight | 2 | 2
+ 0 | | zero | 2 | 2
+ | | null | 2 | 2
+ | 0 | zero | 2 | 2
+ 1 | 4 | one | 3 | -3
+ 2 | 3 | two | 3 | -3
+ 3 | 2 | three | 3 | -3
+ 4 | 1 | four | 3 | -3
+ 5 | 0 | five | 3 | -3
+ 6 | 6 | six | 3 | -3
+ 7 | 7 | seven | 3 | -3
+ 8 | 8 | eight | 3 | -3
+ 0 | | zero | 3 | -3
+ | | null | 3 | -3
+ | 0 | zero | 3 | -3
+ 1 | 4 | one | 2 | 4
+ 2 | 3 | two | 2 | 4
+ 3 | 2 | three | 2 | 4
+ 4 | 1 | four | 2 | 4
+ 5 | 0 | five | 2 | 4
+ 6 | 6 | six | 2 | 4
+ 7 | 7 | seven | 2 | 4
+ 8 | 8 | eight | 2 | 4
+ 0 | | zero | 2 | 4
+ | | null | 2 | 4
+ | 0 | zero | 2 | 4
+ 1 | 4 | one | 5 | -5
+ 2 | 3 | two | 5 | -5
+ 3 | 2 | three | 5 | -5
+ 4 | 1 | four | 5 | -5
+ 5 | 0 | five | 5 | -5
+ 6 | 6 | six | 5 | -5
+ 7 | 7 | seven | 5 | -5
+ 8 | 8 | eight | 5 | -5
+ 0 | | zero | 5 | -5
+ | | null | 5 | -5
+ | 0 | zero | 5 | -5
+ 1 | 4 | one | 5 | -5
+ 2 | 3 | two | 5 | -5
+ 3 | 2 | three | 5 | -5
+ 4 | 1 | four | 5 | -5
+ 5 | 0 | five | 5 | -5
+ 6 | 6 | six | 5 | -5
+ 7 | 7 | seven | 5 | -5
+ 8 | 8 | eight | 5 | -5
+ 0 | | zero | 5 | -5
+ | | null | 5 | -5
+ | 0 | zero | 5 | -5
+ 1 | 4 | one | 0 |
+ 2 | 3 | two | 0 |
+ 3 | 2 | three | 0 |
+ 4 | 1 | four | 0 |
+ 5 | 0 | five | 0 |
+ 6 | 6 | six | 0 |
+ 7 | 7 | seven | 0 |
+ 8 | 8 | eight | 0 |
+ 0 | | zero | 0 |
+ | | null | 0 |
+ | 0 | zero | 0 |
+ 1 | 4 | one | |
+ 2 | 3 | two | |
+ 3 | 2 | three | |
+ 4 | 1 | four | |
+ 5 | 0 | five | |
+ 6 | 6 | six | |
+ 7 | 7 | seven | |
+ 8 | 8 | eight | |
+ 0 | | zero | |
+ | | null | |
+ | 0 | zero | |
+ 1 | 4 | one | | 0
+ 2 | 3 | two | | 0
+ 3 | 2 | three | | 0
+ 4 | 1 | four | | 0
+ 5 | 0 | five | | 0
+ 6 | 6 | six | | 0
+ 7 | 7 | seven | | 0
+ 8 | 8 | eight | | 0
+ 0 | | zero | | 0
+ | | null | | 0
+ | 0 | zero | | 0
+(99 rows)
+
+-- ambiguous column
+SELECT i, k, t
+ FROM J1_TBL CROSS JOIN J2_TBL;
+ERROR: column reference "i" is ambiguous
+LINE 1: SELECT i, k, t
+ ^
+-- resolve previous ambiguity by specifying the table name
+SELECT t1.i, k, t
+ FROM J1_TBL t1 CROSS JOIN J2_TBL t2;
+ i | k | t
+---+----+-------
+ 1 | -1 | one
+ 2 | -1 | two
+ 3 | -1 | three
+ 4 | -1 | four
+ 5 | -1 | five
+ 6 | -1 | six
+ 7 | -1 | seven
+ 8 | -1 | eight
+ 0 | -1 | zero
+ | -1 | null
+ | -1 | zero
+ 1 | 2 | one
+ 2 | 2 | two
+ 3 | 2 | three
+ 4 | 2 | four
+ 5 | 2 | five
+ 6 | 2 | six
+ 7 | 2 | seven
+ 8 | 2 | eight
+ 0 | 2 | zero
+ | 2 | null
+ | 2 | zero
+ 1 | -3 | one
+ 2 | -3 | two
+ 3 | -3 | three
+ 4 | -3 | four
+ 5 | -3 | five
+ 6 | -3 | six
+ 7 | -3 | seven
+ 8 | -3 | eight
+ 0 | -3 | zero
+ | -3 | null
+ | -3 | zero
+ 1 | 4 | one
+ 2 | 4 | two
+ 3 | 4 | three
+ 4 | 4 | four
+ 5 | 4 | five
+ 6 | 4 | six
+ 7 | 4 | seven
+ 8 | 4 | eight
+ 0 | 4 | zero
+ | 4 | null
+ | 4 | zero
+ 1 | -5 | one
+ 2 | -5 | two
+ 3 | -5 | three
+ 4 | -5 | four
+ 5 | -5 | five
+ 6 | -5 | six
+ 7 | -5 | seven
+ 8 | -5 | eight
+ 0 | -5 | zero
+ | -5 | null
+ | -5 | zero
+ 1 | -5 | one
+ 2 | -5 | two
+ 3 | -5 | three
+ 4 | -5 | four
+ 5 | -5 | five
+ 6 | -5 | six
+ 7 | -5 | seven
+ 8 | -5 | eight
+ 0 | -5 | zero
+ | -5 | null
+ | -5 | zero
+ 1 | | one
+ 2 | | two
+ 3 | | three
+ 4 | | four
+ 5 | | five
+ 6 | | six
+ 7 | | seven
+ 8 | | eight
+ 0 | | zero
+ | | null
+ | | zero
+ 1 | | one
+ 2 | | two
+ 3 | | three
+ 4 | | four
+ 5 | | five
+ 6 | | six
+ 7 | | seven
+ 8 | | eight
+ 0 | | zero
+ | | null
+ | | zero
+ 1 | 0 | one
+ 2 | 0 | two
+ 3 | 0 | three
+ 4 | 0 | four
+ 5 | 0 | five
+ 6 | 0 | six
+ 7 | 0 | seven
+ 8 | 0 | eight
+ 0 | 0 | zero
+ | 0 | null
+ | 0 | zero
+(99 rows)
+
+SELECT ii, tt, kk
+ FROM (J1_TBL CROSS JOIN J2_TBL)
+ AS tx (ii, jj, tt, ii2, kk);
+ ii | tt | kk
+----+-------+----
+ 1 | one | -1
+ 2 | two | -1
+ 3 | three | -1
+ 4 | four | -1
+ 5 | five | -1
+ 6 | six | -1
+ 7 | seven | -1
+ 8 | eight | -1
+ 0 | zero | -1
+ | null | -1
+ | zero | -1
+ 1 | one | 2
+ 2 | two | 2
+ 3 | three | 2
+ 4 | four | 2
+ 5 | five | 2
+ 6 | six | 2
+ 7 | seven | 2
+ 8 | eight | 2
+ 0 | zero | 2
+ | null | 2
+ | zero | 2
+ 1 | one | -3
+ 2 | two | -3
+ 3 | three | -3
+ 4 | four | -3
+ 5 | five | -3
+ 6 | six | -3
+ 7 | seven | -3
+ 8 | eight | -3
+ 0 | zero | -3
+ | null | -3
+ | zero | -3
+ 1 | one | 4
+ 2 | two | 4
+ 3 | three | 4
+ 4 | four | 4
+ 5 | five | 4
+ 6 | six | 4
+ 7 | seven | 4
+ 8 | eight | 4
+ 0 | zero | 4
+ | null | 4
+ | zero | 4
+ 1 | one | -5
+ 2 | two | -5
+ 3 | three | -5
+ 4 | four | -5
+ 5 | five | -5
+ 6 | six | -5
+ 7 | seven | -5
+ 8 | eight | -5
+ 0 | zero | -5
+ | null | -5
+ | zero | -5
+ 1 | one | -5
+ 2 | two | -5
+ 3 | three | -5
+ 4 | four | -5
+ 5 | five | -5
+ 6 | six | -5
+ 7 | seven | -5
+ 8 | eight | -5
+ 0 | zero | -5
+ | null | -5
+ | zero | -5
+ 1 | one |
+ 2 | two |
+ 3 | three |
+ 4 | four |
+ 5 | five |
+ 6 | six |
+ 7 | seven |
+ 8 | eight |
+ 0 | zero |
+ | null |
+ | zero |
+ 1 | one |
+ 2 | two |
+ 3 | three |
+ 4 | four |
+ 5 | five |
+ 6 | six |
+ 7 | seven |
+ 8 | eight |
+ 0 | zero |
+ | null |
+ | zero |
+ 1 | one | 0
+ 2 | two | 0
+ 3 | three | 0
+ 4 | four | 0
+ 5 | five | 0
+ 6 | six | 0
+ 7 | seven | 0
+ 8 | eight | 0
+ 0 | zero | 0
+ | null | 0
+ | zero | 0
+(99 rows)
+
+SELECT tx.ii, tx.jj, tx.kk
+ FROM (J1_TBL t1 (a, b, c) CROSS JOIN J2_TBL t2 (d, e))
+ AS tx (ii, jj, tt, ii2, kk);
+ ii | jj | kk
+----+----+----
+ 1 | 4 | -1
+ 2 | 3 | -1
+ 3 | 2 | -1
+ 4 | 1 | -1
+ 5 | 0 | -1
+ 6 | 6 | -1
+ 7 | 7 | -1
+ 8 | 8 | -1
+ 0 | | -1
+ | | -1
+ | 0 | -1
+ 1 | 4 | 2
+ 2 | 3 | 2
+ 3 | 2 | 2
+ 4 | 1 | 2
+ 5 | 0 | 2
+ 6 | 6 | 2
+ 7 | 7 | 2
+ 8 | 8 | 2
+ 0 | | 2
+ | | 2
+ | 0 | 2
+ 1 | 4 | -3
+ 2 | 3 | -3
+ 3 | 2 | -3
+ 4 | 1 | -3
+ 5 | 0 | -3
+ 6 | 6 | -3
+ 7 | 7 | -3
+ 8 | 8 | -3
+ 0 | | -3
+ | | -3
+ | 0 | -3
+ 1 | 4 | 4
+ 2 | 3 | 4
+ 3 | 2 | 4
+ 4 | 1 | 4
+ 5 | 0 | 4
+ 6 | 6 | 4
+ 7 | 7 | 4
+ 8 | 8 | 4
+ 0 | | 4
+ | | 4
+ | 0 | 4
+ 1 | 4 | -5
+ 2 | 3 | -5
+ 3 | 2 | -5
+ 4 | 1 | -5
+ 5 | 0 | -5
+ 6 | 6 | -5
+ 7 | 7 | -5
+ 8 | 8 | -5
+ 0 | | -5
+ | | -5
+ | 0 | -5
+ 1 | 4 | -5
+ 2 | 3 | -5
+ 3 | 2 | -5
+ 4 | 1 | -5
+ 5 | 0 | -5
+ 6 | 6 | -5
+ 7 | 7 | -5
+ 8 | 8 | -5
+ 0 | | -5
+ | | -5
+ | 0 | -5
+ 1 | 4 |
+ 2 | 3 |
+ 3 | 2 |
+ 4 | 1 |
+ 5 | 0 |
+ 6 | 6 |
+ 7 | 7 |
+ 8 | 8 |
+ 0 | |
+ | |
+ | 0 |
+ 1 | 4 |
+ 2 | 3 |
+ 3 | 2 |
+ 4 | 1 |
+ 5 | 0 |
+ 6 | 6 |
+ 7 | 7 |
+ 8 | 8 |
+ 0 | |
+ | |
+ | 0 |
+ 1 | 4 | 0
+ 2 | 3 | 0
+ 3 | 2 | 0
+ 4 | 1 | 0
+ 5 | 0 | 0
+ 6 | 6 | 0
+ 7 | 7 | 0
+ 8 | 8 | 0
+ 0 | | 0
+ | | 0
+ | 0 | 0
+(99 rows)
+
+SELECT *
+ FROM J1_TBL CROSS JOIN J2_TBL a CROSS JOIN J2_TBL b;
+ i | j | t | i | k | i | k
+---+---+-------+---+----+---+----
+ 1 | 4 | one | 1 | -1 | 1 | -1
+ 1 | 4 | one | 1 | -1 | 2 | 2
+ 1 | 4 | one | 1 | -1 | 3 | -3
+ 1 | 4 | one | 1 | -1 | 2 | 4
+ 1 | 4 | one | 1 | -1 | 5 | -5
+ 1 | 4 | one | 1 | -1 | 5 | -5
+ 1 | 4 | one | 1 | -1 | 0 |
+ 1 | 4 | one | 1 | -1 | |
+ 1 | 4 | one | 1 | -1 | | 0
+ 2 | 3 | two | 1 | -1 | 1 | -1
+ 2 | 3 | two | 1 | -1 | 2 | 2
+ 2 | 3 | two | 1 | -1 | 3 | -3
+ 2 | 3 | two | 1 | -1 | 2 | 4
+ 2 | 3 | two | 1 | -1 | 5 | -5
+ 2 | 3 | two | 1 | -1 | 5 | -5
+ 2 | 3 | two | 1 | -1 | 0 |
+ 2 | 3 | two | 1 | -1 | |
+ 2 | 3 | two | 1 | -1 | | 0
+ 3 | 2 | three | 1 | -1 | 1 | -1
+ 3 | 2 | three | 1 | -1 | 2 | 2
+ 3 | 2 | three | 1 | -1 | 3 | -3
+ 3 | 2 | three | 1 | -1 | 2 | 4
+ 3 | 2 | three | 1 | -1 | 5 | -5
+ 3 | 2 | three | 1 | -1 | 5 | -5
+ 3 | 2 | three | 1 | -1 | 0 |
+ 3 | 2 | three | 1 | -1 | |
+ 3 | 2 | three | 1 | -1 | | 0
+ 4 | 1 | four | 1 | -1 | 1 | -1
+ 4 | 1 | four | 1 | -1 | 2 | 2
+ 4 | 1 | four | 1 | -1 | 3 | -3
+ 4 | 1 | four | 1 | -1 | 2 | 4
+ 4 | 1 | four | 1 | -1 | 5 | -5
+ 4 | 1 | four | 1 | -1 | 5 | -5
+ 4 | 1 | four | 1 | -1 | 0 |
+ 4 | 1 | four | 1 | -1 | |
+ 4 | 1 | four | 1 | -1 | | 0
+ 5 | 0 | five | 1 | -1 | 1 | -1
+ 5 | 0 | five | 1 | -1 | 2 | 2
+ 5 | 0 | five | 1 | -1 | 3 | -3
+ 5 | 0 | five | 1 | -1 | 2 | 4
+ 5 | 0 | five | 1 | -1 | 5 | -5
+ 5 | 0 | five | 1 | -1 | 5 | -5
+ 5 | 0 | five | 1 | -1 | 0 |
+ 5 | 0 | five | 1 | -1 | |
+ 5 | 0 | five | 1 | -1 | | 0
+ 6 | 6 | six | 1 | -1 | 1 | -1
+ 6 | 6 | six | 1 | -1 | 2 | 2
+ 6 | 6 | six | 1 | -1 | 3 | -3
+ 6 | 6 | six | 1 | -1 | 2 | 4
+ 6 | 6 | six | 1 | -1 | 5 | -5
+ 6 | 6 | six | 1 | -1 | 5 | -5
+ 6 | 6 | six | 1 | -1 | 0 |
+ 6 | 6 | six | 1 | -1 | |
+ 6 | 6 | six | 1 | -1 | | 0
+ 7 | 7 | seven | 1 | -1 | 1 | -1
+ 7 | 7 | seven | 1 | -1 | 2 | 2
+ 7 | 7 | seven | 1 | -1 | 3 | -3
+ 7 | 7 | seven | 1 | -1 | 2 | 4
+ 7 | 7 | seven | 1 | -1 | 5 | -5
+ 7 | 7 | seven | 1 | -1 | 5 | -5
+ 7 | 7 | seven | 1 | -1 | 0 |
+ 7 | 7 | seven | 1 | -1 | |
+ 7 | 7 | seven | 1 | -1 | | 0
+ 8 | 8 | eight | 1 | -1 | 1 | -1
+ 8 | 8 | eight | 1 | -1 | 2 | 2
+ 8 | 8 | eight | 1 | -1 | 3 | -3
+ 8 | 8 | eight | 1 | -1 | 2 | 4
+ 8 | 8 | eight | 1 | -1 | 5 | -5
+ 8 | 8 | eight | 1 | -1 | 5 | -5
+ 8 | 8 | eight | 1 | -1 | 0 |
+ 8 | 8 | eight | 1 | -1 | |
+ 8 | 8 | eight | 1 | -1 | | 0
+ 0 | | zero | 1 | -1 | 1 | -1
+ 0 | | zero | 1 | -1 | 2 | 2
+ 0 | | zero | 1 | -1 | 3 | -3
+ 0 | | zero | 1 | -1 | 2 | 4
+ 0 | | zero | 1 | -1 | 5 | -5
+ 0 | | zero | 1 | -1 | 5 | -5
+ 0 | | zero | 1 | -1 | 0 |
+ 0 | | zero | 1 | -1 | |
+ 0 | | zero | 1 | -1 | | 0
+ | | null | 1 | -1 | 1 | -1
+ | | null | 1 | -1 | 2 | 2
+ | | null | 1 | -1 | 3 | -3
+ | | null | 1 | -1 | 2 | 4
+ | | null | 1 | -1 | 5 | -5
+ | | null | 1 | -1 | 5 | -5
+ | | null | 1 | -1 | 0 |
+ | | null | 1 | -1 | |
+ | | null | 1 | -1 | | 0
+ | 0 | zero | 1 | -1 | 1 | -1
+ | 0 | zero | 1 | -1 | 2 | 2
+ | 0 | zero | 1 | -1 | 3 | -3
+ | 0 | zero | 1 | -1 | 2 | 4
+ | 0 | zero | 1 | -1 | 5 | -5
+ | 0 | zero | 1 | -1 | 5 | -5
+ | 0 | zero | 1 | -1 | 0 |
+ | 0 | zero | 1 | -1 | |
+ | 0 | zero | 1 | -1 | | 0
+ 1 | 4 | one | 2 | 2 | 1 | -1
+ 1 | 4 | one | 2 | 2 | 2 | 2
+ 1 | 4 | one | 2 | 2 | 3 | -3
+ 1 | 4 | one | 2 | 2 | 2 | 4
+ 1 | 4 | one | 2 | 2 | 5 | -5
+ 1 | 4 | one | 2 | 2 | 5 | -5
+ 1 | 4 | one | 2 | 2 | 0 |
+ 1 | 4 | one | 2 | 2 | |
+ 1 | 4 | one | 2 | 2 | | 0
+ 2 | 3 | two | 2 | 2 | 1 | -1
+ 2 | 3 | two | 2 | 2 | 2 | 2
+ 2 | 3 | two | 2 | 2 | 3 | -3
+ 2 | 3 | two | 2 | 2 | 2 | 4
+ 2 | 3 | two | 2 | 2 | 5 | -5
+ 2 | 3 | two | 2 | 2 | 5 | -5
+ 2 | 3 | two | 2 | 2 | 0 |
+ 2 | 3 | two | 2 | 2 | |
+ 2 | 3 | two | 2 | 2 | | 0
+ 3 | 2 | three | 2 | 2 | 1 | -1
+ 3 | 2 | three | 2 | 2 | 2 | 2
+ 3 | 2 | three | 2 | 2 | 3 | -3
+ 3 | 2 | three | 2 | 2 | 2 | 4
+ 3 | 2 | three | 2 | 2 | 5 | -5
+ 3 | 2 | three | 2 | 2 | 5 | -5
+ 3 | 2 | three | 2 | 2 | 0 |
+ 3 | 2 | three | 2 | 2 | |
+ 3 | 2 | three | 2 | 2 | | 0
+ 4 | 1 | four | 2 | 2 | 1 | -1
+ 4 | 1 | four | 2 | 2 | 2 | 2
+ 4 | 1 | four | 2 | 2 | 3 | -3
+ 4 | 1 | four | 2 | 2 | 2 | 4
+ 4 | 1 | four | 2 | 2 | 5 | -5
+ 4 | 1 | four | 2 | 2 | 5 | -5
+ 4 | 1 | four | 2 | 2 | 0 |
+ 4 | 1 | four | 2 | 2 | |
+ 4 | 1 | four | 2 | 2 | | 0
+ 5 | 0 | five | 2 | 2 | 1 | -1
+ 5 | 0 | five | 2 | 2 | 2 | 2
+ 5 | 0 | five | 2 | 2 | 3 | -3
+ 5 | 0 | five | 2 | 2 | 2 | 4
+ 5 | 0 | five | 2 | 2 | 5 | -5
+ 5 | 0 | five | 2 | 2 | 5 | -5
+ 5 | 0 | five | 2 | 2 | 0 |
+ 5 | 0 | five | 2 | 2 | |
+ 5 | 0 | five | 2 | 2 | | 0
+ 6 | 6 | six | 2 | 2 | 1 | -1
+ 6 | 6 | six | 2 | 2 | 2 | 2
+ 6 | 6 | six | 2 | 2 | 3 | -3
+ 6 | 6 | six | 2 | 2 | 2 | 4
+ 6 | 6 | six | 2 | 2 | 5 | -5
+ 6 | 6 | six | 2 | 2 | 5 | -5
+ 6 | 6 | six | 2 | 2 | 0 |
+ 6 | 6 | six | 2 | 2 | |
+ 6 | 6 | six | 2 | 2 | | 0
+ 7 | 7 | seven | 2 | 2 | 1 | -1
+ 7 | 7 | seven | 2 | 2 | 2 | 2
+ 7 | 7 | seven | 2 | 2 | 3 | -3
+ 7 | 7 | seven | 2 | 2 | 2 | 4
+ 7 | 7 | seven | 2 | 2 | 5 | -5
+ 7 | 7 | seven | 2 | 2 | 5 | -5
+ 7 | 7 | seven | 2 | 2 | 0 |
+ 7 | 7 | seven | 2 | 2 | |
+ 7 | 7 | seven | 2 | 2 | | 0
+ 8 | 8 | eight | 2 | 2 | 1 | -1
+ 8 | 8 | eight | 2 | 2 | 2 | 2
+ 8 | 8 | eight | 2 | 2 | 3 | -3
+ 8 | 8 | eight | 2 | 2 | 2 | 4
+ 8 | 8 | eight | 2 | 2 | 5 | -5
+ 8 | 8 | eight | 2 | 2 | 5 | -5
+ 8 | 8 | eight | 2 | 2 | 0 |
+ 8 | 8 | eight | 2 | 2 | |
+ 8 | 8 | eight | 2 | 2 | | 0
+ 0 | | zero | 2 | 2 | 1 | -1
+ 0 | | zero | 2 | 2 | 2 | 2
+ 0 | | zero | 2 | 2 | 3 | -3
+ 0 | | zero | 2 | 2 | 2 | 4
+ 0 | | zero | 2 | 2 | 5 | -5
+ 0 | | zero | 2 | 2 | 5 | -5
+ 0 | | zero | 2 | 2 | 0 |
+ 0 | | zero | 2 | 2 | |
+ 0 | | zero | 2 | 2 | | 0
+ | | null | 2 | 2 | 1 | -1
+ | | null | 2 | 2 | 2 | 2
+ | | null | 2 | 2 | 3 | -3
+ | | null | 2 | 2 | 2 | 4
+ | | null | 2 | 2 | 5 | -5
+ | | null | 2 | 2 | 5 | -5
+ | | null | 2 | 2 | 0 |
+ | | null | 2 | 2 | |
+ | | null | 2 | 2 | | 0
+ | 0 | zero | 2 | 2 | 1 | -1
+ | 0 | zero | 2 | 2 | 2 | 2
+ | 0 | zero | 2 | 2 | 3 | -3
+ | 0 | zero | 2 | 2 | 2 | 4
+ | 0 | zero | 2 | 2 | 5 | -5
+ | 0 | zero | 2 | 2 | 5 | -5
+ | 0 | zero | 2 | 2 | 0 |
+ | 0 | zero | 2 | 2 | |
+ | 0 | zero | 2 | 2 | | 0
+ 1 | 4 | one | 3 | -3 | 1 | -1
+ 1 | 4 | one | 3 | -3 | 2 | 2
+ 1 | 4 | one | 3 | -3 | 3 | -3
+ 1 | 4 | one | 3 | -3 | 2 | 4
+ 1 | 4 | one | 3 | -3 | 5 | -5
+ 1 | 4 | one | 3 | -3 | 5 | -5
+ 1 | 4 | one | 3 | -3 | 0 |
+ 1 | 4 | one | 3 | -3 | |
+ 1 | 4 | one | 3 | -3 | | 0
+ 2 | 3 | two | 3 | -3 | 1 | -1
+ 2 | 3 | two | 3 | -3 | 2 | 2
+ 2 | 3 | two | 3 | -3 | 3 | -3
+ 2 | 3 | two | 3 | -3 | 2 | 4
+ 2 | 3 | two | 3 | -3 | 5 | -5
+ 2 | 3 | two | 3 | -3 | 5 | -5
+ 2 | 3 | two | 3 | -3 | 0 |
+ 2 | 3 | two | 3 | -3 | |
+ 2 | 3 | two | 3 | -3 | | 0
+ 3 | 2 | three | 3 | -3 | 1 | -1
+ 3 | 2 | three | 3 | -3 | 2 | 2
+ 3 | 2 | three | 3 | -3 | 3 | -3
+ 3 | 2 | three | 3 | -3 | 2 | 4
+ 3 | 2 | three | 3 | -3 | 5 | -5
+ 3 | 2 | three | 3 | -3 | 5 | -5
+ 3 | 2 | three | 3 | -3 | 0 |
+ 3 | 2 | three | 3 | -3 | |
+ 3 | 2 | three | 3 | -3 | | 0
+ 4 | 1 | four | 3 | -3 | 1 | -1
+ 4 | 1 | four | 3 | -3 | 2 | 2
+ 4 | 1 | four | 3 | -3 | 3 | -3
+ 4 | 1 | four | 3 | -3 | 2 | 4
+ 4 | 1 | four | 3 | -3 | 5 | -5
+ 4 | 1 | four | 3 | -3 | 5 | -5
+ 4 | 1 | four | 3 | -3 | 0 |
+ 4 | 1 | four | 3 | -3 | |
+ 4 | 1 | four | 3 | -3 | | 0
+ 5 | 0 | five | 3 | -3 | 1 | -1
+ 5 | 0 | five | 3 | -3 | 2 | 2
+ 5 | 0 | five | 3 | -3 | 3 | -3
+ 5 | 0 | five | 3 | -3 | 2 | 4
+ 5 | 0 | five | 3 | -3 | 5 | -5
+ 5 | 0 | five | 3 | -3 | 5 | -5
+ 5 | 0 | five | 3 | -3 | 0 |
+ 5 | 0 | five | 3 | -3 | |
+ 5 | 0 | five | 3 | -3 | | 0
+ 6 | 6 | six | 3 | -3 | 1 | -1
+ 6 | 6 | six | 3 | -3 | 2 | 2
+ 6 | 6 | six | 3 | -3 | 3 | -3
+ 6 | 6 | six | 3 | -3 | 2 | 4
+ 6 | 6 | six | 3 | -3 | 5 | -5
+ 6 | 6 | six | 3 | -3 | 5 | -5
+ 6 | 6 | six | 3 | -3 | 0 |
+ 6 | 6 | six | 3 | -3 | |
+ 6 | 6 | six | 3 | -3 | | 0
+ 7 | 7 | seven | 3 | -3 | 1 | -1
+ 7 | 7 | seven | 3 | -3 | 2 | 2
+ 7 | 7 | seven | 3 | -3 | 3 | -3
+ 7 | 7 | seven | 3 | -3 | 2 | 4
+ 7 | 7 | seven | 3 | -3 | 5 | -5
+ 7 | 7 | seven | 3 | -3 | 5 | -5
+ 7 | 7 | seven | 3 | -3 | 0 |
+ 7 | 7 | seven | 3 | -3 | |
+ 7 | 7 | seven | 3 | -3 | | 0
+ 8 | 8 | eight | 3 | -3 | 1 | -1
+ 8 | 8 | eight | 3 | -3 | 2 | 2
+ 8 | 8 | eight | 3 | -3 | 3 | -3
+ 8 | 8 | eight | 3 | -3 | 2 | 4
+ 8 | 8 | eight | 3 | -3 | 5 | -5
+ 8 | 8 | eight | 3 | -3 | 5 | -5
+ 8 | 8 | eight | 3 | -3 | 0 |
+ 8 | 8 | eight | 3 | -3 | |
+ 8 | 8 | eight | 3 | -3 | | 0
+ 0 | | zero | 3 | -3 | 1 | -1
+ 0 | | zero | 3 | -3 | 2 | 2
+ 0 | | zero | 3 | -3 | 3 | -3
+ 0 | | zero | 3 | -3 | 2 | 4
+ 0 | | zero | 3 | -3 | 5 | -5
+ 0 | | zero | 3 | -3 | 5 | -5
+ 0 | | zero | 3 | -3 | 0 |
+ 0 | | zero | 3 | -3 | |
+ 0 | | zero | 3 | -3 | | 0
+ | | null | 3 | -3 | 1 | -1
+ | | null | 3 | -3 | 2 | 2
+ | | null | 3 | -3 | 3 | -3
+ | | null | 3 | -3 | 2 | 4
+ | | null | 3 | -3 | 5 | -5
+ | | null | 3 | -3 | 5 | -5
+ | | null | 3 | -3 | 0 |
+ | | null | 3 | -3 | |
+ | | null | 3 | -3 | | 0
+ | 0 | zero | 3 | -3 | 1 | -1
+ | 0 | zero | 3 | -3 | 2 | 2
+ | 0 | zero | 3 | -3 | 3 | -3
+ | 0 | zero | 3 | -3 | 2 | 4
+ | 0 | zero | 3 | -3 | 5 | -5
+ | 0 | zero | 3 | -3 | 5 | -5
+ | 0 | zero | 3 | -3 | 0 |
+ | 0 | zero | 3 | -3 | |
+ | 0 | zero | 3 | -3 | | 0
+ 1 | 4 | one | 2 | 4 | 1 | -1
+ 1 | 4 | one | 2 | 4 | 2 | 2
+ 1 | 4 | one | 2 | 4 | 3 | -3
+ 1 | 4 | one | 2 | 4 | 2 | 4
+ 1 | 4 | one | 2 | 4 | 5 | -5
+ 1 | 4 | one | 2 | 4 | 5 | -5
+ 1 | 4 | one | 2 | 4 | 0 |
+ 1 | 4 | one | 2 | 4 | |
+ 1 | 4 | one | 2 | 4 | | 0
+ 2 | 3 | two | 2 | 4 | 1 | -1
+ 2 | 3 | two | 2 | 4 | 2 | 2
+ 2 | 3 | two | 2 | 4 | 3 | -3
+ 2 | 3 | two | 2 | 4 | 2 | 4
+ 2 | 3 | two | 2 | 4 | 5 | -5
+ 2 | 3 | two | 2 | 4 | 5 | -5
+ 2 | 3 | two | 2 | 4 | 0 |
+ 2 | 3 | two | 2 | 4 | |
+ 2 | 3 | two | 2 | 4 | | 0
+ 3 | 2 | three | 2 | 4 | 1 | -1
+ 3 | 2 | three | 2 | 4 | 2 | 2
+ 3 | 2 | three | 2 | 4 | 3 | -3
+ 3 | 2 | three | 2 | 4 | 2 | 4
+ 3 | 2 | three | 2 | 4 | 5 | -5
+ 3 | 2 | three | 2 | 4 | 5 | -5
+ 3 | 2 | three | 2 | 4 | 0 |
+ 3 | 2 | three | 2 | 4 | |
+ 3 | 2 | three | 2 | 4 | | 0
+ 4 | 1 | four | 2 | 4 | 1 | -1
+ 4 | 1 | four | 2 | 4 | 2 | 2
+ 4 | 1 | four | 2 | 4 | 3 | -3
+ 4 | 1 | four | 2 | 4 | 2 | 4
+ 4 | 1 | four | 2 | 4 | 5 | -5
+ 4 | 1 | four | 2 | 4 | 5 | -5
+ 4 | 1 | four | 2 | 4 | 0 |
+ 4 | 1 | four | 2 | 4 | |
+ 4 | 1 | four | 2 | 4 | | 0
+ 5 | 0 | five | 2 | 4 | 1 | -1
+ 5 | 0 | five | 2 | 4 | 2 | 2
+ 5 | 0 | five | 2 | 4 | 3 | -3
+ 5 | 0 | five | 2 | 4 | 2 | 4
+ 5 | 0 | five | 2 | 4 | 5 | -5
+ 5 | 0 | five | 2 | 4 | 5 | -5
+ 5 | 0 | five | 2 | 4 | 0 |
+ 5 | 0 | five | 2 | 4 | |
+ 5 | 0 | five | 2 | 4 | | 0
+ 6 | 6 | six | 2 | 4 | 1 | -1
+ 6 | 6 | six | 2 | 4 | 2 | 2
+ 6 | 6 | six | 2 | 4 | 3 | -3
+ 6 | 6 | six | 2 | 4 | 2 | 4
+ 6 | 6 | six | 2 | 4 | 5 | -5
+ 6 | 6 | six | 2 | 4 | 5 | -5
+ 6 | 6 | six | 2 | 4 | 0 |
+ 6 | 6 | six | 2 | 4 | |
+ 6 | 6 | six | 2 | 4 | | 0
+ 7 | 7 | seven | 2 | 4 | 1 | -1
+ 7 | 7 | seven | 2 | 4 | 2 | 2
+ 7 | 7 | seven | 2 | 4 | 3 | -3
+ 7 | 7 | seven | 2 | 4 | 2 | 4
+ 7 | 7 | seven | 2 | 4 | 5 | -5
+ 7 | 7 | seven | 2 | 4 | 5 | -5
+ 7 | 7 | seven | 2 | 4 | 0 |
+ 7 | 7 | seven | 2 | 4 | |
+ 7 | 7 | seven | 2 | 4 | | 0
+ 8 | 8 | eight | 2 | 4 | 1 | -1
+ 8 | 8 | eight | 2 | 4 | 2 | 2
+ 8 | 8 | eight | 2 | 4 | 3 | -3
+ 8 | 8 | eight | 2 | 4 | 2 | 4
+ 8 | 8 | eight | 2 | 4 | 5 | -5
+ 8 | 8 | eight | 2 | 4 | 5 | -5
+ 8 | 8 | eight | 2 | 4 | 0 |
+ 8 | 8 | eight | 2 | 4 | |
+ 8 | 8 | eight | 2 | 4 | | 0
+ 0 | | zero | 2 | 4 | 1 | -1
+ 0 | | zero | 2 | 4 | 2 | 2
+ 0 | | zero | 2 | 4 | 3 | -3
+ 0 | | zero | 2 | 4 | 2 | 4
+ 0 | | zero | 2 | 4 | 5 | -5
+ 0 | | zero | 2 | 4 | 5 | -5
+ 0 | | zero | 2 | 4 | 0 |
+ 0 | | zero | 2 | 4 | |
+ 0 | | zero | 2 | 4 | | 0
+ | | null | 2 | 4 | 1 | -1
+ | | null | 2 | 4 | 2 | 2
+ | | null | 2 | 4 | 3 | -3
+ | | null | 2 | 4 | 2 | 4
+ | | null | 2 | 4 | 5 | -5
+ | | null | 2 | 4 | 5 | -5
+ | | null | 2 | 4 | 0 |
+ | | null | 2 | 4 | |
+ | | null | 2 | 4 | | 0
+ | 0 | zero | 2 | 4 | 1 | -1
+ | 0 | zero | 2 | 4 | 2 | 2
+ | 0 | zero | 2 | 4 | 3 | -3
+ | 0 | zero | 2 | 4 | 2 | 4
+ | 0 | zero | 2 | 4 | 5 | -5
+ | 0 | zero | 2 | 4 | 5 | -5
+ | 0 | zero | 2 | 4 | 0 |
+ | 0 | zero | 2 | 4 | |
+ | 0 | zero | 2 | 4 | | 0
+ 1 | 4 | one | 5 | -5 | 1 | -1
+ 1 | 4 | one | 5 | -5 | 2 | 2
+ 1 | 4 | one | 5 | -5 | 3 | -3
+ 1 | 4 | one | 5 | -5 | 2 | 4
+ 1 | 4 | one | 5 | -5 | 5 | -5
+ 1 | 4 | one | 5 | -5 | 5 | -5
+ 1 | 4 | one | 5 | -5 | 0 |
+ 1 | 4 | one | 5 | -5 | |
+ 1 | 4 | one | 5 | -5 | | 0
+ 2 | 3 | two | 5 | -5 | 1 | -1
+ 2 | 3 | two | 5 | -5 | 2 | 2
+ 2 | 3 | two | 5 | -5 | 3 | -3
+ 2 | 3 | two | 5 | -5 | 2 | 4
+ 2 | 3 | two | 5 | -5 | 5 | -5
+ 2 | 3 | two | 5 | -5 | 5 | -5
+ 2 | 3 | two | 5 | -5 | 0 |
+ 2 | 3 | two | 5 | -5 | |
+ 2 | 3 | two | 5 | -5 | | 0
+ 3 | 2 | three | 5 | -5 | 1 | -1
+ 3 | 2 | three | 5 | -5 | 2 | 2
+ 3 | 2 | three | 5 | -5 | 3 | -3
+ 3 | 2 | three | 5 | -5 | 2 | 4
+ 3 | 2 | three | 5 | -5 | 5 | -5
+ 3 | 2 | three | 5 | -5 | 5 | -5
+ 3 | 2 | three | 5 | -5 | 0 |
+ 3 | 2 | three | 5 | -5 | |
+ 3 | 2 | three | 5 | -5 | | 0
+ 4 | 1 | four | 5 | -5 | 1 | -1
+ 4 | 1 | four | 5 | -5 | 2 | 2
+ 4 | 1 | four | 5 | -5 | 3 | -3
+ 4 | 1 | four | 5 | -5 | 2 | 4
+ 4 | 1 | four | 5 | -5 | 5 | -5
+ 4 | 1 | four | 5 | -5 | 5 | -5
+ 4 | 1 | four | 5 | -5 | 0 |
+ 4 | 1 | four | 5 | -5 | |
+ 4 | 1 | four | 5 | -5 | | 0
+ 5 | 0 | five | 5 | -5 | 1 | -1
+ 5 | 0 | five | 5 | -5 | 2 | 2
+ 5 | 0 | five | 5 | -5 | 3 | -3
+ 5 | 0 | five | 5 | -5 | 2 | 4
+ 5 | 0 | five | 5 | -5 | 5 | -5
+ 5 | 0 | five | 5 | -5 | 5 | -5
+ 5 | 0 | five | 5 | -5 | 0 |
+ 5 | 0 | five | 5 | -5 | |
+ 5 | 0 | five | 5 | -5 | | 0
+ 6 | 6 | six | 5 | -5 | 1 | -1
+ 6 | 6 | six | 5 | -5 | 2 | 2
+ 6 | 6 | six | 5 | -5 | 3 | -3
+ 6 | 6 | six | 5 | -5 | 2 | 4
+ 6 | 6 | six | 5 | -5 | 5 | -5
+ 6 | 6 | six | 5 | -5 | 5 | -5
+ 6 | 6 | six | 5 | -5 | 0 |
+ 6 | 6 | six | 5 | -5 | |
+ 6 | 6 | six | 5 | -5 | | 0
+ 7 | 7 | seven | 5 | -5 | 1 | -1
+ 7 | 7 | seven | 5 | -5 | 2 | 2
+ 7 | 7 | seven | 5 | -5 | 3 | -3
+ 7 | 7 | seven | 5 | -5 | 2 | 4
+ 7 | 7 | seven | 5 | -5 | 5 | -5
+ 7 | 7 | seven | 5 | -5 | 5 | -5
+ 7 | 7 | seven | 5 | -5 | 0 |
+ 7 | 7 | seven | 5 | -5 | |
+ 7 | 7 | seven | 5 | -5 | | 0
+ 8 | 8 | eight | 5 | -5 | 1 | -1
+ 8 | 8 | eight | 5 | -5 | 2 | 2
+ 8 | 8 | eight | 5 | -5 | 3 | -3
+ 8 | 8 | eight | 5 | -5 | 2 | 4
+ 8 | 8 | eight | 5 | -5 | 5 | -5
+ 8 | 8 | eight | 5 | -5 | 5 | -5
+ 8 | 8 | eight | 5 | -5 | 0 |
+ 8 | 8 | eight | 5 | -5 | |
+ 8 | 8 | eight | 5 | -5 | | 0
+ 0 | | zero | 5 | -5 | 1 | -1
+ 0 | | zero | 5 | -5 | 2 | 2
+ 0 | | zero | 5 | -5 | 3 | -3
+ 0 | | zero | 5 | -5 | 2 | 4
+ 0 | | zero | 5 | -5 | 5 | -5
+ 0 | | zero | 5 | -5 | 5 | -5
+ 0 | | zero | 5 | -5 | 0 |
+ 0 | | zero | 5 | -5 | |
+ 0 | | zero | 5 | -5 | | 0
+ | | null | 5 | -5 | 1 | -1
+ | | null | 5 | -5 | 2 | 2
+ | | null | 5 | -5 | 3 | -3
+ | | null | 5 | -5 | 2 | 4
+ | | null | 5 | -5 | 5 | -5
+ | | null | 5 | -5 | 5 | -5
+ | | null | 5 | -5 | 0 |
+ | | null | 5 | -5 | |
+ | | null | 5 | -5 | | 0
+ | 0 | zero | 5 | -5 | 1 | -1
+ | 0 | zero | 5 | -5 | 2 | 2
+ | 0 | zero | 5 | -5 | 3 | -3
+ | 0 | zero | 5 | -5 | 2 | 4
+ | 0 | zero | 5 | -5 | 5 | -5
+ | 0 | zero | 5 | -5 | 5 | -5
+ | 0 | zero | 5 | -5 | 0 |
+ | 0 | zero | 5 | -5 | |
+ | 0 | zero | 5 | -5 | | 0
+ 1 | 4 | one | 5 | -5 | 1 | -1
+ 1 | 4 | one | 5 | -5 | 2 | 2
+ 1 | 4 | one | 5 | -5 | 3 | -3
+ 1 | 4 | one | 5 | -5 | 2 | 4
+ 1 | 4 | one | 5 | -5 | 5 | -5
+ 1 | 4 | one | 5 | -5 | 5 | -5
+ 1 | 4 | one | 5 | -5 | 0 |
+ 1 | 4 | one | 5 | -5 | |
+ 1 | 4 | one | 5 | -5 | | 0
+ 2 | 3 | two | 5 | -5 | 1 | -1
+ 2 | 3 | two | 5 | -5 | 2 | 2
+ 2 | 3 | two | 5 | -5 | 3 | -3
+ 2 | 3 | two | 5 | -5 | 2 | 4
+ 2 | 3 | two | 5 | -5 | 5 | -5
+ 2 | 3 | two | 5 | -5 | 5 | -5
+ 2 | 3 | two | 5 | -5 | 0 |
+ 2 | 3 | two | 5 | -5 | |
+ 2 | 3 | two | 5 | -5 | | 0
+ 3 | 2 | three | 5 | -5 | 1 | -1
+ 3 | 2 | three | 5 | -5 | 2 | 2
+ 3 | 2 | three | 5 | -5 | 3 | -3
+ 3 | 2 | three | 5 | -5 | 2 | 4
+ 3 | 2 | three | 5 | -5 | 5 | -5
+ 3 | 2 | three | 5 | -5 | 5 | -5
+ 3 | 2 | three | 5 | -5 | 0 |
+ 3 | 2 | three | 5 | -5 | |
+ 3 | 2 | three | 5 | -5 | | 0
+ 4 | 1 | four | 5 | -5 | 1 | -1
+ 4 | 1 | four | 5 | -5 | 2 | 2
+ 4 | 1 | four | 5 | -5 | 3 | -3
+ 4 | 1 | four | 5 | -5 | 2 | 4
+ 4 | 1 | four | 5 | -5 | 5 | -5
+ 4 | 1 | four | 5 | -5 | 5 | -5
+ 4 | 1 | four | 5 | -5 | 0 |
+ 4 | 1 | four | 5 | -5 | |
+ 4 | 1 | four | 5 | -5 | | 0
+ 5 | 0 | five | 5 | -5 | 1 | -1
+ 5 | 0 | five | 5 | -5 | 2 | 2
+ 5 | 0 | five | 5 | -5 | 3 | -3
+ 5 | 0 | five | 5 | -5 | 2 | 4
+ 5 | 0 | five | 5 | -5 | 5 | -5
+ 5 | 0 | five | 5 | -5 | 5 | -5
+ 5 | 0 | five | 5 | -5 | 0 |
+ 5 | 0 | five | 5 | -5 | |
+ 5 | 0 | five | 5 | -5 | | 0
+ 6 | 6 | six | 5 | -5 | 1 | -1
+ 6 | 6 | six | 5 | -5 | 2 | 2
+ 6 | 6 | six | 5 | -5 | 3 | -3
+ 6 | 6 | six | 5 | -5 | 2 | 4
+ 6 | 6 | six | 5 | -5 | 5 | -5
+ 6 | 6 | six | 5 | -5 | 5 | -5
+ 6 | 6 | six | 5 | -5 | 0 |
+ 6 | 6 | six | 5 | -5 | |
+ 6 | 6 | six | 5 | -5 | | 0
+ 7 | 7 | seven | 5 | -5 | 1 | -1
+ 7 | 7 | seven | 5 | -5 | 2 | 2
+ 7 | 7 | seven | 5 | -5 | 3 | -3
+ 7 | 7 | seven | 5 | -5 | 2 | 4
+ 7 | 7 | seven | 5 | -5 | 5 | -5
+ 7 | 7 | seven | 5 | -5 | 5 | -5
+ 7 | 7 | seven | 5 | -5 | 0 |
+ 7 | 7 | seven | 5 | -5 | |
+ 7 | 7 | seven | 5 | -5 | | 0
+ 8 | 8 | eight | 5 | -5 | 1 | -1
+ 8 | 8 | eight | 5 | -5 | 2 | 2
+ 8 | 8 | eight | 5 | -5 | 3 | -3
+ 8 | 8 | eight | 5 | -5 | 2 | 4
+ 8 | 8 | eight | 5 | -5 | 5 | -5
+ 8 | 8 | eight | 5 | -5 | 5 | -5
+ 8 | 8 | eight | 5 | -5 | 0 |
+ 8 | 8 | eight | 5 | -5 | |
+ 8 | 8 | eight | 5 | -5 | | 0
+ 0 | | zero | 5 | -5 | 1 | -1
+ 0 | | zero | 5 | -5 | 2 | 2
+ 0 | | zero | 5 | -5 | 3 | -3
+ 0 | | zero | 5 | -5 | 2 | 4
+ 0 | | zero | 5 | -5 | 5 | -5
+ 0 | | zero | 5 | -5 | 5 | -5
+ 0 | | zero | 5 | -5 | 0 |
+ 0 | | zero | 5 | -5 | |
+ 0 | | zero | 5 | -5 | | 0
+ | | null | 5 | -5 | 1 | -1
+ | | null | 5 | -5 | 2 | 2
+ | | null | 5 | -5 | 3 | -3
+ | | null | 5 | -5 | 2 | 4
+ | | null | 5 | -5 | 5 | -5
+ | | null | 5 | -5 | 5 | -5
+ | | null | 5 | -5 | 0 |
+ | | null | 5 | -5 | |
+ | | null | 5 | -5 | | 0
+ | 0 | zero | 5 | -5 | 1 | -1
+ | 0 | zero | 5 | -5 | 2 | 2
+ | 0 | zero | 5 | -5 | 3 | -3
+ | 0 | zero | 5 | -5 | 2 | 4
+ | 0 | zero | 5 | -5 | 5 | -5
+ | 0 | zero | 5 | -5 | 5 | -5
+ | 0 | zero | 5 | -5 | 0 |
+ | 0 | zero | 5 | -5 | |
+ | 0 | zero | 5 | -5 | | 0
+ 1 | 4 | one | 0 | | 1 | -1
+ 1 | 4 | one | 0 | | 2 | 2
+ 1 | 4 | one | 0 | | 3 | -3
+ 1 | 4 | one | 0 | | 2 | 4
+ 1 | 4 | one | 0 | | 5 | -5
+ 1 | 4 | one | 0 | | 5 | -5
+ 1 | 4 | one | 0 | | 0 |
+ 1 | 4 | one | 0 | | |
+ 1 | 4 | one | 0 | | | 0
+ 2 | 3 | two | 0 | | 1 | -1
+ 2 | 3 | two | 0 | | 2 | 2
+ 2 | 3 | two | 0 | | 3 | -3
+ 2 | 3 | two | 0 | | 2 | 4
+ 2 | 3 | two | 0 | | 5 | -5
+ 2 | 3 | two | 0 | | 5 | -5
+ 2 | 3 | two | 0 | | 0 |
+ 2 | 3 | two | 0 | | |
+ 2 | 3 | two | 0 | | | 0
+ 3 | 2 | three | 0 | | 1 | -1
+ 3 | 2 | three | 0 | | 2 | 2
+ 3 | 2 | three | 0 | | 3 | -3
+ 3 | 2 | three | 0 | | 2 | 4
+ 3 | 2 | three | 0 | | 5 | -5
+ 3 | 2 | three | 0 | | 5 | -5
+ 3 | 2 | three | 0 | | 0 |
+ 3 | 2 | three | 0 | | |
+ 3 | 2 | three | 0 | | | 0
+ 4 | 1 | four | 0 | | 1 | -1
+ 4 | 1 | four | 0 | | 2 | 2
+ 4 | 1 | four | 0 | | 3 | -3
+ 4 | 1 | four | 0 | | 2 | 4
+ 4 | 1 | four | 0 | | 5 | -5
+ 4 | 1 | four | 0 | | 5 | -5
+ 4 | 1 | four | 0 | | 0 |
+ 4 | 1 | four | 0 | | |
+ 4 | 1 | four | 0 | | | 0
+ 5 | 0 | five | 0 | | 1 | -1
+ 5 | 0 | five | 0 | | 2 | 2
+ 5 | 0 | five | 0 | | 3 | -3
+ 5 | 0 | five | 0 | | 2 | 4
+ 5 | 0 | five | 0 | | 5 | -5
+ 5 | 0 | five | 0 | | 5 | -5
+ 5 | 0 | five | 0 | | 0 |
+ 5 | 0 | five | 0 | | |
+ 5 | 0 | five | 0 | | | 0
+ 6 | 6 | six | 0 | | 1 | -1
+ 6 | 6 | six | 0 | | 2 | 2
+ 6 | 6 | six | 0 | | 3 | -3
+ 6 | 6 | six | 0 | | 2 | 4
+ 6 | 6 | six | 0 | | 5 | -5
+ 6 | 6 | six | 0 | | 5 | -5
+ 6 | 6 | six | 0 | | 0 |
+ 6 | 6 | six | 0 | | |
+ 6 | 6 | six | 0 | | | 0
+ 7 | 7 | seven | 0 | | 1 | -1
+ 7 | 7 | seven | 0 | | 2 | 2
+ 7 | 7 | seven | 0 | | 3 | -3
+ 7 | 7 | seven | 0 | | 2 | 4
+ 7 | 7 | seven | 0 | | 5 | -5
+ 7 | 7 | seven | 0 | | 5 | -5
+ 7 | 7 | seven | 0 | | 0 |
+ 7 | 7 | seven | 0 | | |
+ 7 | 7 | seven | 0 | | | 0
+ 8 | 8 | eight | 0 | | 1 | -1
+ 8 | 8 | eight | 0 | | 2 | 2
+ 8 | 8 | eight | 0 | | 3 | -3
+ 8 | 8 | eight | 0 | | 2 | 4
+ 8 | 8 | eight | 0 | | 5 | -5
+ 8 | 8 | eight | 0 | | 5 | -5
+ 8 | 8 | eight | 0 | | 0 |
+ 8 | 8 | eight | 0 | | |
+ 8 | 8 | eight | 0 | | | 0
+ 0 | | zero | 0 | | 1 | -1
+ 0 | | zero | 0 | | 2 | 2
+ 0 | | zero | 0 | | 3 | -3
+ 0 | | zero | 0 | | 2 | 4
+ 0 | | zero | 0 | | 5 | -5
+ 0 | | zero | 0 | | 5 | -5
+ 0 | | zero | 0 | | 0 |
+ 0 | | zero | 0 | | |
+ 0 | | zero | 0 | | | 0
+ | | null | 0 | | 1 | -1
+ | | null | 0 | | 2 | 2
+ | | null | 0 | | 3 | -3
+ | | null | 0 | | 2 | 4
+ | | null | 0 | | 5 | -5
+ | | null | 0 | | 5 | -5
+ | | null | 0 | | 0 |
+ | | null | 0 | | |
+ | | null | 0 | | | 0
+ | 0 | zero | 0 | | 1 | -1
+ | 0 | zero | 0 | | 2 | 2
+ | 0 | zero | 0 | | 3 | -3
+ | 0 | zero | 0 | | 2 | 4
+ | 0 | zero | 0 | | 5 | -5
+ | 0 | zero | 0 | | 5 | -5
+ | 0 | zero | 0 | | 0 |
+ | 0 | zero | 0 | | |
+ | 0 | zero | 0 | | | 0
+ 1 | 4 | one | | | 1 | -1
+ 1 | 4 | one | | | 2 | 2
+ 1 | 4 | one | | | 3 | -3
+ 1 | 4 | one | | | 2 | 4
+ 1 | 4 | one | | | 5 | -5
+ 1 | 4 | one | | | 5 | -5
+ 1 | 4 | one | | | 0 |
+ 1 | 4 | one | | | |
+ 1 | 4 | one | | | | 0
+ 2 | 3 | two | | | 1 | -1
+ 2 | 3 | two | | | 2 | 2
+ 2 | 3 | two | | | 3 | -3
+ 2 | 3 | two | | | 2 | 4
+ 2 | 3 | two | | | 5 | -5
+ 2 | 3 | two | | | 5 | -5
+ 2 | 3 | two | | | 0 |
+ 2 | 3 | two | | | |
+ 2 | 3 | two | | | | 0
+ 3 | 2 | three | | | 1 | -1
+ 3 | 2 | three | | | 2 | 2
+ 3 | 2 | three | | | 3 | -3
+ 3 | 2 | three | | | 2 | 4
+ 3 | 2 | three | | | 5 | -5
+ 3 | 2 | three | | | 5 | -5
+ 3 | 2 | three | | | 0 |
+ 3 | 2 | three | | | |
+ 3 | 2 | three | | | | 0
+ 4 | 1 | four | | | 1 | -1
+ 4 | 1 | four | | | 2 | 2
+ 4 | 1 | four | | | 3 | -3
+ 4 | 1 | four | | | 2 | 4
+ 4 | 1 | four | | | 5 | -5
+ 4 | 1 | four | | | 5 | -5
+ 4 | 1 | four | | | 0 |
+ 4 | 1 | four | | | |
+ 4 | 1 | four | | | | 0
+ 5 | 0 | five | | | 1 | -1
+ 5 | 0 | five | | | 2 | 2
+ 5 | 0 | five | | | 3 | -3
+ 5 | 0 | five | | | 2 | 4
+ 5 | 0 | five | | | 5 | -5
+ 5 | 0 | five | | | 5 | -5
+ 5 | 0 | five | | | 0 |
+ 5 | 0 | five | | | |
+ 5 | 0 | five | | | | 0
+ 6 | 6 | six | | | 1 | -1
+ 6 | 6 | six | | | 2 | 2
+ 6 | 6 | six | | | 3 | -3
+ 6 | 6 | six | | | 2 | 4
+ 6 | 6 | six | | | 5 | -5
+ 6 | 6 | six | | | 5 | -5
+ 6 | 6 | six | | | 0 |
+ 6 | 6 | six | | | |
+ 6 | 6 | six | | | | 0
+ 7 | 7 | seven | | | 1 | -1
+ 7 | 7 | seven | | | 2 | 2
+ 7 | 7 | seven | | | 3 | -3
+ 7 | 7 | seven | | | 2 | 4
+ 7 | 7 | seven | | | 5 | -5
+ 7 | 7 | seven | | | 5 | -5
+ 7 | 7 | seven | | | 0 |
+ 7 | 7 | seven | | | |
+ 7 | 7 | seven | | | | 0
+ 8 | 8 | eight | | | 1 | -1
+ 8 | 8 | eight | | | 2 | 2
+ 8 | 8 | eight | | | 3 | -3
+ 8 | 8 | eight | | | 2 | 4
+ 8 | 8 | eight | | | 5 | -5
+ 8 | 8 | eight | | | 5 | -5
+ 8 | 8 | eight | | | 0 |
+ 8 | 8 | eight | | | |
+ 8 | 8 | eight | | | | 0
+ 0 | | zero | | | 1 | -1
+ 0 | | zero | | | 2 | 2
+ 0 | | zero | | | 3 | -3
+ 0 | | zero | | | 2 | 4
+ 0 | | zero | | | 5 | -5
+ 0 | | zero | | | 5 | -5
+ 0 | | zero | | | 0 |
+ 0 | | zero | | | |
+ 0 | | zero | | | | 0
+ | | null | | | 1 | -1
+ | | null | | | 2 | 2
+ | | null | | | 3 | -3
+ | | null | | | 2 | 4
+ | | null | | | 5 | -5
+ | | null | | | 5 | -5
+ | | null | | | 0 |
+ | | null | | | |
+ | | null | | | | 0
+ | 0 | zero | | | 1 | -1
+ | 0 | zero | | | 2 | 2
+ | 0 | zero | | | 3 | -3
+ | 0 | zero | | | 2 | 4
+ | 0 | zero | | | 5 | -5
+ | 0 | zero | | | 5 | -5
+ | 0 | zero | | | 0 |
+ | 0 | zero | | | |
+ | 0 | zero | | | | 0
+ 1 | 4 | one | | 0 | 1 | -1
+ 1 | 4 | one | | 0 | 2 | 2
+ 1 | 4 | one | | 0 | 3 | -3
+ 1 | 4 | one | | 0 | 2 | 4
+ 1 | 4 | one | | 0 | 5 | -5
+ 1 | 4 | one | | 0 | 5 | -5
+ 1 | 4 | one | | 0 | 0 |
+ 1 | 4 | one | | 0 | |
+ 1 | 4 | one | | 0 | | 0
+ 2 | 3 | two | | 0 | 1 | -1
+ 2 | 3 | two | | 0 | 2 | 2
+ 2 | 3 | two | | 0 | 3 | -3
+ 2 | 3 | two | | 0 | 2 | 4
+ 2 | 3 | two | | 0 | 5 | -5
+ 2 | 3 | two | | 0 | 5 | -5
+ 2 | 3 | two | | 0 | 0 |
+ 2 | 3 | two | | 0 | |
+ 2 | 3 | two | | 0 | | 0
+ 3 | 2 | three | | 0 | 1 | -1
+ 3 | 2 | three | | 0 | 2 | 2
+ 3 | 2 | three | | 0 | 3 | -3
+ 3 | 2 | three | | 0 | 2 | 4
+ 3 | 2 | three | | 0 | 5 | -5
+ 3 | 2 | three | | 0 | 5 | -5
+ 3 | 2 | three | | 0 | 0 |
+ 3 | 2 | three | | 0 | |
+ 3 | 2 | three | | 0 | | 0
+ 4 | 1 | four | | 0 | 1 | -1
+ 4 | 1 | four | | 0 | 2 | 2
+ 4 | 1 | four | | 0 | 3 | -3
+ 4 | 1 | four | | 0 | 2 | 4
+ 4 | 1 | four | | 0 | 5 | -5
+ 4 | 1 | four | | 0 | 5 | -5
+ 4 | 1 | four | | 0 | 0 |
+ 4 | 1 | four | | 0 | |
+ 4 | 1 | four | | 0 | | 0
+ 5 | 0 | five | | 0 | 1 | -1
+ 5 | 0 | five | | 0 | 2 | 2
+ 5 | 0 | five | | 0 | 3 | -3
+ 5 | 0 | five | | 0 | 2 | 4
+ 5 | 0 | five | | 0 | 5 | -5
+ 5 | 0 | five | | 0 | 5 | -5
+ 5 | 0 | five | | 0 | 0 |
+ 5 | 0 | five | | 0 | |
+ 5 | 0 | five | | 0 | | 0
+ 6 | 6 | six | | 0 | 1 | -1
+ 6 | 6 | six | | 0 | 2 | 2
+ 6 | 6 | six | | 0 | 3 | -3
+ 6 | 6 | six | | 0 | 2 | 4
+ 6 | 6 | six | | 0 | 5 | -5
+ 6 | 6 | six | | 0 | 5 | -5
+ 6 | 6 | six | | 0 | 0 |
+ 6 | 6 | six | | 0 | |
+ 6 | 6 | six | | 0 | | 0
+ 7 | 7 | seven | | 0 | 1 | -1
+ 7 | 7 | seven | | 0 | 2 | 2
+ 7 | 7 | seven | | 0 | 3 | -3
+ 7 | 7 | seven | | 0 | 2 | 4
+ 7 | 7 | seven | | 0 | 5 | -5
+ 7 | 7 | seven | | 0 | 5 | -5
+ 7 | 7 | seven | | 0 | 0 |
+ 7 | 7 | seven | | 0 | |
+ 7 | 7 | seven | | 0 | | 0
+ 8 | 8 | eight | | 0 | 1 | -1
+ 8 | 8 | eight | | 0 | 2 | 2
+ 8 | 8 | eight | | 0 | 3 | -3
+ 8 | 8 | eight | | 0 | 2 | 4
+ 8 | 8 | eight | | 0 | 5 | -5
+ 8 | 8 | eight | | 0 | 5 | -5
+ 8 | 8 | eight | | 0 | 0 |
+ 8 | 8 | eight | | 0 | |
+ 8 | 8 | eight | | 0 | | 0
+ 0 | | zero | | 0 | 1 | -1
+ 0 | | zero | | 0 | 2 | 2
+ 0 | | zero | | 0 | 3 | -3
+ 0 | | zero | | 0 | 2 | 4
+ 0 | | zero | | 0 | 5 | -5
+ 0 | | zero | | 0 | 5 | -5
+ 0 | | zero | | 0 | 0 |
+ 0 | | zero | | 0 | |
+ 0 | | zero | | 0 | | 0
+ | | null | | 0 | 1 | -1
+ | | null | | 0 | 2 | 2
+ | | null | | 0 | 3 | -3
+ | | null | | 0 | 2 | 4
+ | | null | | 0 | 5 | -5
+ | | null | | 0 | 5 | -5
+ | | null | | 0 | 0 |
+ | | null | | 0 | |
+ | | null | | 0 | | 0
+ | 0 | zero | | 0 | 1 | -1
+ | 0 | zero | | 0 | 2 | 2
+ | 0 | zero | | 0 | 3 | -3
+ | 0 | zero | | 0 | 2 | 4
+ | 0 | zero | | 0 | 5 | -5
+ | 0 | zero | | 0 | 5 | -5
+ | 0 | zero | | 0 | 0 |
+ | 0 | zero | | 0 | |
+ | 0 | zero | | 0 | | 0
+(891 rows)
+
+--
+--
+-- Inner joins (equi-joins)
+--
+--
+--
+-- Inner joins (equi-joins) with USING clause
+-- The USING syntax changes the shape of the resulting table
+-- by including a column in the USING clause only once in the result.
+--
+-- Inner equi-join on specified column
+SELECT *
+ FROM J1_TBL INNER JOIN J2_TBL USING (i);
+ i | j | t | k
+---+---+-------+----
+ 0 | | zero |
+ 1 | 4 | one | -1
+ 2 | 3 | two | 2
+ 2 | 3 | two | 4
+ 3 | 2 | three | -3
+ 5 | 0 | five | -5
+ 5 | 0 | five | -5
+(7 rows)
+
+-- Same as above, slightly different syntax
+SELECT *
+ FROM J1_TBL JOIN J2_TBL USING (i);
+ i | j | t | k
+---+---+-------+----
+ 0 | | zero |
+ 1 | 4 | one | -1
+ 2 | 3 | two | 2
+ 2 | 3 | two | 4
+ 3 | 2 | three | -3
+ 5 | 0 | five | -5
+ 5 | 0 | five | -5
+(7 rows)
+
+SELECT *
+ FROM J1_TBL t1 (a, b, c) JOIN J2_TBL t2 (a, d) USING (a)
+ ORDER BY a, d;
+ a | b | c | d
+---+---+-------+----
+ 0 | | zero |
+ 1 | 4 | one | -1
+ 2 | 3 | two | 2
+ 2 | 3 | two | 4
+ 3 | 2 | three | -3
+ 5 | 0 | five | -5
+ 5 | 0 | five | -5
+(7 rows)
+
+SELECT *
+ FROM J1_TBL t1 (a, b, c) JOIN J2_TBL t2 (a, b) USING (b)
+ ORDER BY b, t1.a;
+ b | a | c | a
+---+---+-------+---
+ 0 | 5 | five |
+ 0 | | zero |
+ 2 | 3 | three | 2
+ 4 | 1 | one | 2
+(4 rows)
+
+-- test join using aliases
+SELECT * FROM J1_TBL JOIN J2_TBL USING (i) WHERE J1_TBL.t = 'one'; -- ok
+ i | j | t | k
+---+---+-----+----
+ 1 | 4 | one | -1
+(1 row)
+
+SELECT * FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE J1_TBL.t = 'one'; -- ok
+ i | j | t | k
+---+---+-----+----
+ 1 | 4 | one | -1
+(1 row)
+
+SELECT * FROM (J1_TBL JOIN J2_TBL USING (i)) AS x WHERE J1_TBL.t = 'one'; -- error
+ERROR: invalid reference to FROM-clause entry for table "j1_tbl"
+LINE 1: ... * FROM (J1_TBL JOIN J2_TBL USING (i)) AS x WHERE J1_TBL.t =...
+ ^
+HINT: There is an entry for table "j1_tbl", but it cannot be referenced from this part of the query.
+SELECT * FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE x.i = 1; -- ok
+ i | j | t | k
+---+---+-----+----
+ 1 | 4 | one | -1
+(1 row)
+
+SELECT * FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE x.t = 'one'; -- error
+ERROR: column x.t does not exist
+LINE 1: ...CT * FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE x.t = 'one...
+ ^
+SELECT * FROM (J1_TBL JOIN J2_TBL USING (i) AS x) AS xx WHERE x.i = 1; -- error (XXX could use better hint)
+ERROR: missing FROM-clause entry for table "x"
+LINE 1: ...ROM (J1_TBL JOIN J2_TBL USING (i) AS x) AS xx WHERE x.i = 1;
+ ^
+SELECT * FROM J1_TBL a1 JOIN J2_TBL a2 USING (i) AS a1; -- error
+ERROR: table name "a1" specified more than once
+SELECT x.* FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE J1_TBL.t = 'one';
+ i
+---
+ 1
+(1 row)
+
+SELECT ROW(x.*) FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE J1_TBL.t = 'one';
+ row
+-----
+ (1)
+(1 row)
+
+SELECT row_to_json(x.*) FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE J1_TBL.t = 'one';
+ row_to_json
+-------------
+ {"i":1}
+(1 row)
+
+--
+-- NATURAL JOIN
+-- Inner equi-join on all columns with the same name
+--
+SELECT *
+ FROM J1_TBL NATURAL JOIN J2_TBL;
+ i | j | t | k
+---+---+-------+----
+ 0 | | zero |
+ 1 | 4 | one | -1
+ 2 | 3 | two | 2
+ 2 | 3 | two | 4
+ 3 | 2 | three | -3
+ 5 | 0 | five | -5
+ 5 | 0 | five | -5
+(7 rows)
+
+SELECT *
+ FROM J1_TBL t1 (a, b, c) NATURAL JOIN J2_TBL t2 (a, d);
+ a | b | c | d
+---+---+-------+----
+ 0 | | zero |
+ 1 | 4 | one | -1
+ 2 | 3 | two | 2
+ 2 | 3 | two | 4
+ 3 | 2 | three | -3
+ 5 | 0 | five | -5
+ 5 | 0 | five | -5
+(7 rows)
+
+SELECT *
+ FROM J1_TBL t1 (a, b, c) NATURAL JOIN J2_TBL t2 (d, a);
+ a | b | c | d
+---+---+------+---
+ 0 | | zero |
+ 2 | 3 | two | 2
+ 4 | 1 | four | 2
+(3 rows)
+
+-- mismatch number of columns
+-- currently, Postgres will fill in with underlying names
+SELECT *
+ FROM J1_TBL t1 (a, b) NATURAL JOIN J2_TBL t2 (a);
+ a | b | t | k
+---+---+-------+----
+ 0 | | zero |
+ 1 | 4 | one | -1
+ 2 | 3 | two | 2
+ 2 | 3 | two | 4
+ 3 | 2 | three | -3
+ 5 | 0 | five | -5
+ 5 | 0 | five | -5
+(7 rows)
+
+--
+-- Inner joins (equi-joins)
+--
+SELECT *
+ FROM J1_TBL JOIN J2_TBL ON (J1_TBL.i = J2_TBL.i);
+ i | j | t | i | k
+---+---+-------+---+----
+ 0 | | zero | 0 |
+ 1 | 4 | one | 1 | -1
+ 2 | 3 | two | 2 | 2
+ 2 | 3 | two | 2 | 4
+ 3 | 2 | three | 3 | -3
+ 5 | 0 | five | 5 | -5
+ 5 | 0 | five | 5 | -5
+(7 rows)
+
+SELECT *
+ FROM J1_TBL JOIN J2_TBL ON (J1_TBL.i = J2_TBL.k);
+ i | j | t | i | k
+---+---+------+---+---
+ 0 | | zero | | 0
+ 2 | 3 | two | 2 | 2
+ 4 | 1 | four | 2 | 4
+(3 rows)
+
+--
+-- Non-equi-joins
+--
+SELECT *
+ FROM J1_TBL JOIN J2_TBL ON (J1_TBL.i <= J2_TBL.k);
+ i | j | t | i | k
+---+---+-------+---+---
+ 1 | 4 | one | 2 | 2
+ 2 | 3 | two | 2 | 2
+ 0 | | zero | 2 | 2
+ 1 | 4 | one | 2 | 4
+ 2 | 3 | two | 2 | 4
+ 3 | 2 | three | 2 | 4
+ 4 | 1 | four | 2 | 4
+ 0 | | zero | 2 | 4
+ 0 | | zero | | 0
+(9 rows)
+
+--
+-- Outer joins
+-- Note that OUTER is a noise word
+--
+SELECT *
+ FROM J1_TBL LEFT OUTER JOIN J2_TBL USING (i)
+ ORDER BY i, k, t;
+ i | j | t | k
+---+---+-------+----
+ 0 | | zero |
+ 1 | 4 | one | -1
+ 2 | 3 | two | 2
+ 2 | 3 | two | 4
+ 3 | 2 | three | -3
+ 4 | 1 | four |
+ 5 | 0 | five | -5
+ 5 | 0 | five | -5
+ 6 | 6 | six |
+ 7 | 7 | seven |
+ 8 | 8 | eight |
+ | | null |
+ | 0 | zero |
+(13 rows)
+
+SELECT *
+ FROM J1_TBL LEFT JOIN J2_TBL USING (i)
+ ORDER BY i, k, t;
+ i | j | t | k
+---+---+-------+----
+ 0 | | zero |
+ 1 | 4 | one | -1
+ 2 | 3 | two | 2
+ 2 | 3 | two | 4
+ 3 | 2 | three | -3
+ 4 | 1 | four |
+ 5 | 0 | five | -5
+ 5 | 0 | five | -5
+ 6 | 6 | six |
+ 7 | 7 | seven |
+ 8 | 8 | eight |
+ | | null |
+ | 0 | zero |
+(13 rows)
+
+SELECT *
+ FROM J1_TBL RIGHT OUTER JOIN J2_TBL USING (i);
+ i | j | t | k
+---+---+-------+----
+ 0 | | zero |
+ 1 | 4 | one | -1
+ 2 | 3 | two | 2
+ 2 | 3 | two | 4
+ 3 | 2 | three | -3
+ 5 | 0 | five | -5
+ 5 | 0 | five | -5
+ | | |
+ | | | 0
+(9 rows)
+
+SELECT *
+ FROM J1_TBL RIGHT JOIN J2_TBL USING (i);
+ i | j | t | k
+---+---+-------+----
+ 0 | | zero |
+ 1 | 4 | one | -1
+ 2 | 3 | two | 2
+ 2 | 3 | two | 4
+ 3 | 2 | three | -3
+ 5 | 0 | five | -5
+ 5 | 0 | five | -5
+ | | |
+ | | | 0
+(9 rows)
+
+SELECT *
+ FROM J1_TBL FULL OUTER JOIN J2_TBL USING (i)
+ ORDER BY i, k, t;
+ i | j | t | k
+---+---+-------+----
+ 0 | | zero |
+ 1 | 4 | one | -1
+ 2 | 3 | two | 2
+ 2 | 3 | two | 4
+ 3 | 2 | three | -3
+ 4 | 1 | four |
+ 5 | 0 | five | -5
+ 5 | 0 | five | -5
+ 6 | 6 | six |
+ 7 | 7 | seven |
+ 8 | 8 | eight |
+ | | | 0
+ | | null |
+ | 0 | zero |
+ | | |
+(15 rows)
+
+SELECT *
+ FROM J1_TBL FULL JOIN J2_TBL USING (i)
+ ORDER BY i, k, t;
+ i | j | t | k
+---+---+-------+----
+ 0 | | zero |
+ 1 | 4 | one | -1
+ 2 | 3 | two | 2
+ 2 | 3 | two | 4
+ 3 | 2 | three | -3
+ 4 | 1 | four |
+ 5 | 0 | five | -5
+ 5 | 0 | five | -5
+ 6 | 6 | six |
+ 7 | 7 | seven |
+ 8 | 8 | eight |
+ | | | 0
+ | | null |
+ | 0 | zero |
+ | | |
+(15 rows)
+
+SELECT *
+ FROM J1_TBL LEFT JOIN J2_TBL USING (i) WHERE (k = 1);
+ i | j | t | k
+---+---+---+---
+(0 rows)
+
+SELECT *
+ FROM J1_TBL LEFT JOIN J2_TBL USING (i) WHERE (i = 1);
+ i | j | t | k
+---+---+-----+----
+ 1 | 4 | one | -1
+(1 row)
+
+--
+-- semijoin selectivity for <>
+--
+explain (costs off)
+select * from int4_tbl i4, tenk1 a
+where exists(select * from tenk1 b
+ where a.twothousand = b.twothousand and a.fivethous <> b.fivethous)
+ and i4.f1 = a.tenthous;
+ QUERY PLAN
+----------------------------------------------
+ Hash Semi Join
+ Hash Cond: (a.twothousand = b.twothousand)
+ Join Filter: (a.fivethous <> b.fivethous)
+ -> Hash Join
+ Hash Cond: (a.tenthous = i4.f1)
+ -> Seq Scan on tenk1 a
+ -> Hash
+ -> Seq Scan on int4_tbl i4
+ -> Hash
+ -> Seq Scan on tenk1 b
+(10 rows)
+
+--
+-- More complicated constructs
+--
+--
+-- Multiway full join
+--
+CREATE TABLE t1 (name TEXT, n INTEGER);
+CREATE TABLE t2 (name TEXT, n INTEGER);
+CREATE TABLE t3 (name TEXT, n INTEGER);
+INSERT INTO t1 VALUES ( 'bb', 11 );
+INSERT INTO t2 VALUES ( 'bb', 12 );
+INSERT INTO t2 VALUES ( 'cc', 22 );
+INSERT INTO t2 VALUES ( 'ee', 42 );
+INSERT INTO t3 VALUES ( 'bb', 13 );
+INSERT INTO t3 VALUES ( 'cc', 23 );
+INSERT INTO t3 VALUES ( 'dd', 33 );
+SELECT * FROM t1 FULL JOIN t2 USING (name) FULL JOIN t3 USING (name);
+ name | n | n | n
+------+----+----+----
+ bb | 11 | 12 | 13
+ cc | | 22 | 23
+ dd | | | 33
+ ee | | 42 |
+(4 rows)
+
+--
+-- Test interactions of join syntax and subqueries
+--
+-- Basic cases (we expect planner to pull up the subquery here)
+SELECT * FROM
+(SELECT * FROM t2) as s2
+INNER JOIN
+(SELECT * FROM t3) s3
+USING (name);
+ name | n | n
+------+----+----
+ bb | 12 | 13
+ cc | 22 | 23
+(2 rows)
+
+SELECT * FROM
+(SELECT * FROM t2) as s2
+LEFT JOIN
+(SELECT * FROM t3) s3
+USING (name);
+ name | n | n
+------+----+----
+ bb | 12 | 13
+ cc | 22 | 23
+ ee | 42 |
+(3 rows)
+
+SELECT * FROM
+(SELECT * FROM t2) as s2
+FULL JOIN
+(SELECT * FROM t3) s3
+USING (name);
+ name | n | n
+------+----+----
+ bb | 12 | 13
+ cc | 22 | 23
+ dd | | 33
+ ee | 42 |
+(4 rows)
+
+-- Cases with non-nullable expressions in subquery results;
+-- make sure these go to null as expected
+SELECT * FROM
+(SELECT name, n as s2_n, 2 as s2_2 FROM t2) as s2
+NATURAL INNER JOIN
+(SELECT name, n as s3_n, 3 as s3_2 FROM t3) s3;
+ name | s2_n | s2_2 | s3_n | s3_2
+------+------+------+------+------
+ bb | 12 | 2 | 13 | 3
+ cc | 22 | 2 | 23 | 3
+(2 rows)
+
+SELECT * FROM
+(SELECT name, n as s2_n, 2 as s2_2 FROM t2) as s2
+NATURAL LEFT JOIN
+(SELECT name, n as s3_n, 3 as s3_2 FROM t3) s3;
+ name | s2_n | s2_2 | s3_n | s3_2
+------+------+------+------+------
+ bb | 12 | 2 | 13 | 3
+ cc | 22 | 2 | 23 | 3
+ ee | 42 | 2 | |
+(3 rows)
+
+SELECT * FROM
+(SELECT name, n as s2_n, 2 as s2_2 FROM t2) as s2
+NATURAL FULL JOIN
+(SELECT name, n as s3_n, 3 as s3_2 FROM t3) s3;
+ name | s2_n | s2_2 | s3_n | s3_2
+------+------+------+------+------
+ bb | 12 | 2 | 13 | 3
+ cc | 22 | 2 | 23 | 3
+ dd | | | 33 | 3
+ ee | 42 | 2 | |
+(4 rows)
+
+SELECT * FROM
+(SELECT name, n as s1_n, 1 as s1_1 FROM t1) as s1
+NATURAL INNER JOIN
+(SELECT name, n as s2_n, 2 as s2_2 FROM t2) as s2
+NATURAL INNER JOIN
+(SELECT name, n as s3_n, 3 as s3_2 FROM t3) s3;
+ name | s1_n | s1_1 | s2_n | s2_2 | s3_n | s3_2
+------+------+------+------+------+------+------
+ bb | 11 | 1 | 12 | 2 | 13 | 3
+(1 row)
+
+SELECT * FROM
+(SELECT name, n as s1_n, 1 as s1_1 FROM t1) as s1
+NATURAL FULL JOIN
+(SELECT name, n as s2_n, 2 as s2_2 FROM t2) as s2
+NATURAL FULL JOIN
+(SELECT name, n as s3_n, 3 as s3_2 FROM t3) s3;
+ name | s1_n | s1_1 | s2_n | s2_2 | s3_n | s3_2
+------+------+------+------+------+------+------
+ bb | 11 | 1 | 12 | 2 | 13 | 3
+ cc | | | 22 | 2 | 23 | 3
+ dd | | | | | 33 | 3
+ ee | | | 42 | 2 | |
+(4 rows)
+
+SELECT * FROM
+(SELECT name, n as s1_n FROM t1) as s1
+NATURAL FULL JOIN
+ (SELECT * FROM
+ (SELECT name, n as s2_n FROM t2) as s2
+ NATURAL FULL JOIN
+ (SELECT name, n as s3_n FROM t3) as s3
+ ) ss2;
+ name | s1_n | s2_n | s3_n
+------+------+------+------
+ bb | 11 | 12 | 13
+ cc | | 22 | 23
+ dd | | | 33
+ ee | | 42 |
+(4 rows)
+
+SELECT * FROM
+(SELECT name, n as s1_n FROM t1) as s1
+NATURAL FULL JOIN
+ (SELECT * FROM
+ (SELECT name, n as s2_n, 2 as s2_2 FROM t2) as s2
+ NATURAL FULL JOIN
+ (SELECT name, n as s3_n FROM t3) as s3
+ ) ss2;
+ name | s1_n | s2_n | s2_2 | s3_n
+------+------+------+------+------
+ bb | 11 | 12 | 2 | 13
+ cc | | 22 | 2 | 23
+ dd | | | | 33
+ ee | | 42 | 2 |
+(4 rows)
+
+-- Constants as join keys can also be problematic
+SELECT * FROM
+ (SELECT name, n as s1_n FROM t1) as s1
+FULL JOIN
+ (SELECT name, 2 as s2_n FROM t2) as s2
+ON (s1_n = s2_n);
+ name | s1_n | name | s2_n
+------+------+------+------
+ | | bb | 2
+ | | cc | 2
+ | | ee | 2
+ bb | 11 | |
+(4 rows)
+
+-- Test for propagation of nullability constraints into sub-joins
+create temp table x (x1 int, x2 int);
+insert into x values (1,11);
+insert into x values (2,22);
+insert into x values (3,null);
+insert into x values (4,44);
+insert into x values (5,null);
+create temp table y (y1 int, y2 int);
+insert into y values (1,111);
+insert into y values (2,222);
+insert into y values (3,333);
+insert into y values (4,null);
+select * from x;
+ x1 | x2
+----+----
+ 1 | 11
+ 2 | 22
+ 3 |
+ 4 | 44
+ 5 |
+(5 rows)
+
+select * from y;
+ y1 | y2
+----+-----
+ 1 | 111
+ 2 | 222
+ 3 | 333
+ 4 |
+(4 rows)
+
+select * from x left join y on (x1 = y1 and x2 is not null);
+ x1 | x2 | y1 | y2
+----+----+----+-----
+ 1 | 11 | 1 | 111
+ 2 | 22 | 2 | 222
+ 3 | | |
+ 4 | 44 | 4 |
+ 5 | | |
+(5 rows)
+
+select * from x left join y on (x1 = y1 and y2 is not null);
+ x1 | x2 | y1 | y2
+----+----+----+-----
+ 1 | 11 | 1 | 111
+ 2 | 22 | 2 | 222
+ 3 | | 3 | 333
+ 4 | 44 | |
+ 5 | | |
+(5 rows)
+
+select * from (x left join y on (x1 = y1)) left join x xx(xx1,xx2)
+on (x1 = xx1);
+ x1 | x2 | y1 | y2 | xx1 | xx2
+----+----+----+-----+-----+-----
+ 1 | 11 | 1 | 111 | 1 | 11
+ 2 | 22 | 2 | 222 | 2 | 22
+ 3 | | 3 | 333 | 3 |
+ 4 | 44 | 4 | | 4 | 44
+ 5 | | | | 5 |
+(5 rows)
+
+select * from (x left join y on (x1 = y1)) left join x xx(xx1,xx2)
+on (x1 = xx1 and x2 is not null);
+ x1 | x2 | y1 | y2 | xx1 | xx2
+----+----+----+-----+-----+-----
+ 1 | 11 | 1 | 111 | 1 | 11
+ 2 | 22 | 2 | 222 | 2 | 22
+ 3 | | 3 | 333 | |
+ 4 | 44 | 4 | | 4 | 44
+ 5 | | | | |
+(5 rows)
+
+select * from (x left join y on (x1 = y1)) left join x xx(xx1,xx2)
+on (x1 = xx1 and y2 is not null);
+ x1 | x2 | y1 | y2 | xx1 | xx2
+----+----+----+-----+-----+-----
+ 1 | 11 | 1 | 111 | 1 | 11
+ 2 | 22 | 2 | 222 | 2 | 22
+ 3 | | 3 | 333 | 3 |
+ 4 | 44 | 4 | | |
+ 5 | | | | |
+(5 rows)
+
+select * from (x left join y on (x1 = y1)) left join x xx(xx1,xx2)
+on (x1 = xx1 and xx2 is not null);
+ x1 | x2 | y1 | y2 | xx1 | xx2
+----+----+----+-----+-----+-----
+ 1 | 11 | 1 | 111 | 1 | 11
+ 2 | 22 | 2 | 222 | 2 | 22
+ 3 | | 3 | 333 | |
+ 4 | 44 | 4 | | 4 | 44
+ 5 | | | | |
+(5 rows)
+
+-- these should NOT give the same answers as above
+select * from (x left join y on (x1 = y1)) left join x xx(xx1,xx2)
+on (x1 = xx1) where (x2 is not null);
+ x1 | x2 | y1 | y2 | xx1 | xx2
+----+----+----+-----+-----+-----
+ 1 | 11 | 1 | 111 | 1 | 11
+ 2 | 22 | 2 | 222 | 2 | 22
+ 4 | 44 | 4 | | 4 | 44
+(3 rows)
+
+select * from (x left join y on (x1 = y1)) left join x xx(xx1,xx2)
+on (x1 = xx1) where (y2 is not null);
+ x1 | x2 | y1 | y2 | xx1 | xx2
+----+----+----+-----+-----+-----
+ 1 | 11 | 1 | 111 | 1 | 11
+ 2 | 22 | 2 | 222 | 2 | 22
+ 3 | | 3 | 333 | 3 |
+(3 rows)
+
+select * from (x left join y on (x1 = y1)) left join x xx(xx1,xx2)
+on (x1 = xx1) where (xx2 is not null);
+ x1 | x2 | y1 | y2 | xx1 | xx2
+----+----+----+-----+-----+-----
+ 1 | 11 | 1 | 111 | 1 | 11
+ 2 | 22 | 2 | 222 | 2 | 22
+ 4 | 44 | 4 | | 4 | 44
+(3 rows)
+
+--
+-- regression test: check for bug with propagation of implied equality
+-- to outside an IN
+--
+select count(*) from tenk1 a where unique1 in
+ (select unique1 from tenk1 b join tenk1 c using (unique1)
+ where b.unique2 = 42);
+ count
+-------
+ 1
+(1 row)
+
+--
+-- regression test: check for failure to generate a plan with multiple
+-- degenerate IN clauses
+--
+select count(*) from tenk1 x where
+ x.unique1 in (select a.f1 from int4_tbl a,float8_tbl b where a.f1=b.f1) and
+ x.unique1 = 0 and
+ x.unique1 in (select aa.f1 from int4_tbl aa,float8_tbl bb where aa.f1=bb.f1);
+ count
+-------
+ 1
+(1 row)
+
+-- try that with GEQO too
+begin;
+set geqo = on;
+set geqo_threshold = 2;
+select count(*) from tenk1 x where
+ x.unique1 in (select a.f1 from int4_tbl a,float8_tbl b where a.f1=b.f1) and
+ x.unique1 = 0 and
+ x.unique1 in (select aa.f1 from int4_tbl aa,float8_tbl bb where aa.f1=bb.f1);
+ count
+-------
+ 1
+(1 row)
+
+rollback;
+--
+-- regression test: be sure we cope with proven-dummy append rels
+--
+explain (costs off)
+select aa, bb, unique1, unique1
+ from tenk1 right join b on aa = unique1
+ where bb < bb and bb is null;
+ QUERY PLAN
+--------------------------
+ Result
+ One-Time Filter: false
+(2 rows)
+
+select aa, bb, unique1, unique1
+ from tenk1 right join b on aa = unique1
+ where bb < bb and bb is null;
+ aa | bb | unique1 | unique1
+----+----+---------+---------
+(0 rows)
+
+--
+-- regression test: check handling of empty-FROM subquery underneath outer join
+--
+explain (costs off)
+select * from int8_tbl i1 left join (int8_tbl i2 join
+ (select 123 as x) ss on i2.q1 = x) on i1.q2 = i2.q2
+order by 1, 2;
+ QUERY PLAN
+-------------------------------------------
+ Sort
+ Sort Key: i1.q1, i1.q2
+ -> Hash Left Join
+ Hash Cond: (i1.q2 = i2.q2)
+ -> Seq Scan on int8_tbl i1
+ -> Hash
+ -> Seq Scan on int8_tbl i2
+ Filter: (q1 = 123)
+(8 rows)
+
+select * from int8_tbl i1 left join (int8_tbl i2 join
+ (select 123 as x) ss on i2.q1 = x) on i1.q2 = i2.q2
+order by 1, 2;
+ q1 | q2 | q1 | q2 | x
+------------------+-------------------+-----+------------------+-----
+ 123 | 456 | 123 | 456 | 123
+ 123 | 4567890123456789 | 123 | 4567890123456789 | 123
+ 4567890123456789 | -4567890123456789 | | |
+ 4567890123456789 | 123 | | |
+ 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 | 123
+(5 rows)
+
+--
+-- regression test: check a case where join_clause_is_movable_into() gives
+-- an imprecise result, causing an assertion failure
+--
+select count(*)
+from
+ (select t3.tenthous as x1, coalesce(t1.stringu1, t2.stringu1) as x2
+ from tenk1 t1
+ left join tenk1 t2 on t1.unique1 = t2.unique1
+ join tenk1 t3 on t1.unique2 = t3.unique2) ss,
+ tenk1 t4,
+ tenk1 t5
+where t4.thousand = t5.unique1 and ss.x1 = t4.tenthous and ss.x2 = t5.stringu1;
+ count
+-------
+ 1000
+(1 row)
+
+--
+-- regression test: check a case where we formerly missed including an EC
+-- enforcement clause because it was expected to be handled at scan level
+--
+explain (costs off)
+select a.f1, b.f1, t.thousand, t.tenthous from
+ tenk1 t,
+ (select sum(f1)+1 as f1 from int4_tbl i4a) a,
+ (select sum(f1) as f1 from int4_tbl i4b) b
+where b.f1 = t.thousand and a.f1 = b.f1 and (a.f1+b.f1+999) = t.tenthous;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------
+ Nested Loop
+ -> Aggregate
+ -> Seq Scan on int4_tbl i4b
+ -> Nested Loop
+ Join Filter: ((sum(i4b.f1)) = ((sum(i4a.f1) + 1)))
+ -> Aggregate
+ -> Seq Scan on int4_tbl i4a
+ -> Index Only Scan using tenk1_thous_tenthous on tenk1 t
+ Index Cond: ((thousand = (sum(i4b.f1))) AND (tenthous = ((((sum(i4a.f1) + 1)) + (sum(i4b.f1))) + 999)))
+(9 rows)
+
+select a.f1, b.f1, t.thousand, t.tenthous from
+ tenk1 t,
+ (select sum(f1)+1 as f1 from int4_tbl i4a) a,
+ (select sum(f1) as f1 from int4_tbl i4b) b
+where b.f1 = t.thousand and a.f1 = b.f1 and (a.f1+b.f1+999) = t.tenthous;
+ f1 | f1 | thousand | tenthous
+----+----+----------+----------
+(0 rows)
+
+--
+-- check a case where we formerly got confused by conflicting sort orders
+-- in redundant merge join path keys
+--
+explain (costs off)
+select * from
+ j1_tbl full join
+ (select * from j2_tbl order by j2_tbl.i desc, j2_tbl.k asc) j2_tbl
+ on j1_tbl.i = j2_tbl.i and j1_tbl.i = j2_tbl.k;
+ QUERY PLAN
+-----------------------------------------------------------------
+ Merge Full Join
+ Merge Cond: ((j2_tbl.i = j1_tbl.i) AND (j2_tbl.k = j1_tbl.i))
+ -> Sort
+ Sort Key: j2_tbl.i DESC, j2_tbl.k
+ -> Seq Scan on j2_tbl
+ -> Sort
+ Sort Key: j1_tbl.i DESC
+ -> Seq Scan on j1_tbl
+(8 rows)
+
+select * from
+ j1_tbl full join
+ (select * from j2_tbl order by j2_tbl.i desc, j2_tbl.k asc) j2_tbl
+ on j1_tbl.i = j2_tbl.i and j1_tbl.i = j2_tbl.k;
+ i | j | t | i | k
+---+---+-------+---+----
+ | | | | 0
+ | | | |
+ | 0 | zero | |
+ | | null | |
+ 8 | 8 | eight | |
+ 7 | 7 | seven | |
+ 6 | 6 | six | |
+ | | | 5 | -5
+ | | | 5 | -5
+ 5 | 0 | five | |
+ 4 | 1 | four | |
+ | | | 3 | -3
+ 3 | 2 | three | |
+ 2 | 3 | two | 2 | 2
+ | | | 2 | 4
+ | | | 1 | -1
+ | | | 0 |
+ 1 | 4 | one | |
+ 0 | | zero | |
+(19 rows)
+
+--
+-- a different check for handling of redundant sort keys in merge joins
+--
+explain (costs off)
+select count(*) from
+ (select * from tenk1 x order by x.thousand, x.twothousand, x.fivethous) x
+ left join
+ (select * from tenk1 y order by y.unique2) y
+ on x.thousand = y.unique2 and x.twothousand = y.hundred and x.fivethous = y.unique2;
+ QUERY PLAN
+----------------------------------------------------------------------------------
+ Aggregate
+ -> Merge Left Join
+ Merge Cond: (x.thousand = y.unique2)
+ Join Filter: ((x.twothousand = y.hundred) AND (x.fivethous = y.unique2))
+ -> Sort
+ Sort Key: x.thousand, x.twothousand, x.fivethous
+ -> Seq Scan on tenk1 x
+ -> Materialize
+ -> Index Scan using tenk1_unique2 on tenk1 y
+(9 rows)
+
+select count(*) from
+ (select * from tenk1 x order by x.thousand, x.twothousand, x.fivethous) x
+ left join
+ (select * from tenk1 y order by y.unique2) y
+ on x.thousand = y.unique2 and x.twothousand = y.hundred and x.fivethous = y.unique2;
+ count
+-------
+ 10000
+(1 row)
+
+--
+-- Clean up
+--
+DROP TABLE t1;
+DROP TABLE t2;
+DROP TABLE t3;
+DROP TABLE J1_TBL;
+DROP TABLE J2_TBL;
+-- Both DELETE and UPDATE allow the specification of additional tables
+-- to "join" against to determine which rows should be modified.
+CREATE TEMP TABLE t1 (a int, b int);
+CREATE TEMP TABLE t2 (a int, b int);
+CREATE TEMP TABLE t3 (x int, y int);
+INSERT INTO t1 VALUES (5, 10);
+INSERT INTO t1 VALUES (15, 20);
+INSERT INTO t1 VALUES (100, 100);
+INSERT INTO t1 VALUES (200, 1000);
+INSERT INTO t2 VALUES (200, 2000);
+INSERT INTO t3 VALUES (5, 20);
+INSERT INTO t3 VALUES (6, 7);
+INSERT INTO t3 VALUES (7, 8);
+INSERT INTO t3 VALUES (500, 100);
+DELETE FROM t3 USING t1 table1 WHERE t3.x = table1.a;
+SELECT * FROM t3;
+ x | y
+-----+-----
+ 6 | 7
+ 7 | 8
+ 500 | 100
+(3 rows)
+
+DELETE FROM t3 USING t1 JOIN t2 USING (a) WHERE t3.x > t1.a;
+SELECT * FROM t3;
+ x | y
+---+---
+ 6 | 7
+ 7 | 8
+(2 rows)
+
+DELETE FROM t3 USING t3 t3_other WHERE t3.x = t3_other.x AND t3.y = t3_other.y;
+SELECT * FROM t3;
+ x | y
+---+---
+(0 rows)
+
+-- Test join against inheritance tree
+create temp table t2a () inherits (t2);
+insert into t2a values (200, 2001);
+select * from t1 left join t2 on (t1.a = t2.a);
+ a | b | a | b
+-----+------+-----+------
+ 5 | 10 | |
+ 15 | 20 | |
+ 100 | 100 | |
+ 200 | 1000 | 200 | 2000
+ 200 | 1000 | 200 | 2001
+(5 rows)
+
+-- Test matching of column name with wrong alias
+select t1.x from t1 join t3 on (t1.a = t3.x);
+ERROR: column t1.x does not exist
+LINE 1: select t1.x from t1 join t3 on (t1.a = t3.x);
+ ^
+HINT: Perhaps you meant to reference the column "t3.x".
+-- Test matching of locking clause with wrong alias
+select t1.*, t2.*, unnamed_join.* from
+ t1 join t2 on (t1.a = t2.a), t3 as unnamed_join
+ for update of unnamed_join;
+ a | b | a | b | x | y
+---+---+---+---+---+---
+(0 rows)
+
+select foo.*, unnamed_join.* from
+ t1 join t2 using (a) as foo, t3 as unnamed_join
+ for update of unnamed_join;
+ a | x | y
+---+---+---
+(0 rows)
+
+select foo.*, unnamed_join.* from
+ t1 join t2 using (a) as foo, t3 as unnamed_join
+ for update of foo;
+ERROR: FOR UPDATE cannot be applied to a join
+LINE 3: for update of foo;
+ ^
+select bar.*, unnamed_join.* from
+ (t1 join t2 using (a) as foo) as bar, t3 as unnamed_join
+ for update of foo;
+ERROR: relation "foo" in FOR UPDATE clause not found in FROM clause
+LINE 3: for update of foo;
+ ^
+select bar.*, unnamed_join.* from
+ (t1 join t2 using (a) as foo) as bar, t3 as unnamed_join
+ for update of bar;
+ERROR: FOR UPDATE cannot be applied to a join
+LINE 3: for update of bar;
+ ^
+--
+-- regression test for 8.1 merge right join bug
+--
+CREATE TEMP TABLE tt1 ( tt1_id int4, joincol int4 );
+INSERT INTO tt1 VALUES (1, 11);
+INSERT INTO tt1 VALUES (2, NULL);
+CREATE TEMP TABLE tt2 ( tt2_id int4, joincol int4 );
+INSERT INTO tt2 VALUES (21, 11);
+INSERT INTO tt2 VALUES (22, 11);
+set enable_hashjoin to off;
+set enable_nestloop to off;
+-- these should give the same results
+select tt1.*, tt2.* from tt1 left join tt2 on tt1.joincol = tt2.joincol;
+ tt1_id | joincol | tt2_id | joincol
+--------+---------+--------+---------
+ 1 | 11 | 21 | 11
+ 1 | 11 | 22 | 11
+ 2 | | |
+(3 rows)
+
+select tt1.*, tt2.* from tt2 right join tt1 on tt1.joincol = tt2.joincol;
+ tt1_id | joincol | tt2_id | joincol
+--------+---------+--------+---------
+ 1 | 11 | 21 | 11
+ 1 | 11 | 22 | 11
+ 2 | | |
+(3 rows)
+
+reset enable_hashjoin;
+reset enable_nestloop;
+--
+-- regression test for bug #13908 (hash join with skew tuples & nbatch increase)
+--
+set work_mem to '64kB';
+set enable_mergejoin to off;
+set enable_memoize to off;
+explain (costs off)
+select count(*) from tenk1 a, tenk1 b
+ where a.hundred = b.thousand and (b.fivethous % 10) < 10;
+ QUERY PLAN
+------------------------------------------------------------
+ Aggregate
+ -> Hash Join
+ Hash Cond: (a.hundred = b.thousand)
+ -> Index Only Scan using tenk1_hundred on tenk1 a
+ -> Hash
+ -> Seq Scan on tenk1 b
+ Filter: ((fivethous % 10) < 10)
+(7 rows)
+
+select count(*) from tenk1 a, tenk1 b
+ where a.hundred = b.thousand and (b.fivethous % 10) < 10;
+ count
+--------
+ 100000
+(1 row)
+
+reset work_mem;
+reset enable_mergejoin;
+reset enable_memoize;
+--
+-- regression test for 8.2 bug with improper re-ordering of left joins
+--
+create temp table tt3(f1 int, f2 text);
+insert into tt3 select x, repeat('xyzzy', 100) from generate_series(1,10000) x;
+create index tt3i on tt3(f1);
+analyze tt3;
+create temp table tt4(f1 int);
+insert into tt4 values (0),(1),(9999);
+analyze tt4;
+SELECT a.f1
+FROM tt4 a
+LEFT JOIN (
+ SELECT b.f1
+ FROM tt3 b LEFT JOIN tt3 c ON (b.f1 = c.f1)
+ WHERE c.f1 IS NULL
+) AS d ON (a.f1 = d.f1)
+WHERE d.f1 IS NULL;
+ f1
+------
+ 0
+ 1
+ 9999
+(3 rows)
+
+--
+-- regression test for proper handling of outer joins within antijoins
+--
+create temp table tt4x(c1 int, c2 int, c3 int);
+explain (costs off)
+select * from tt4x t1
+where not exists (
+ select 1 from tt4x t2
+ left join tt4x t3 on t2.c3 = t3.c1
+ left join ( select t5.c1 as c1
+ from tt4x t4 left join tt4x t5 on t4.c2 = t5.c1
+ ) a1 on t3.c2 = a1.c1
+ where t1.c1 = t2.c2
+);
+ QUERY PLAN
+---------------------------------------------------------
+ Hash Anti Join
+ Hash Cond: (t1.c1 = t2.c2)
+ -> Seq Scan on tt4x t1
+ -> Hash
+ -> Merge Right Join
+ Merge Cond: (t5.c1 = t3.c2)
+ -> Merge Join
+ Merge Cond: (t4.c2 = t5.c1)
+ -> Sort
+ Sort Key: t4.c2
+ -> Seq Scan on tt4x t4
+ -> Sort
+ Sort Key: t5.c1
+ -> Seq Scan on tt4x t5
+ -> Sort
+ Sort Key: t3.c2
+ -> Merge Left Join
+ Merge Cond: (t2.c3 = t3.c1)
+ -> Sort
+ Sort Key: t2.c3
+ -> Seq Scan on tt4x t2
+ -> Sort
+ Sort Key: t3.c1
+ -> Seq Scan on tt4x t3
+(24 rows)
+
+--
+-- regression test for problems of the sort depicted in bug #3494
+--
+create temp table tt5(f1 int, f2 int);
+create temp table tt6(f1 int, f2 int);
+insert into tt5 values(1, 10);
+insert into tt5 values(1, 11);
+insert into tt6 values(1, 9);
+insert into tt6 values(1, 2);
+insert into tt6 values(2, 9);
+select * from tt5,tt6 where tt5.f1 = tt6.f1 and tt5.f1 = tt5.f2 - tt6.f2;
+ f1 | f2 | f1 | f2
+----+----+----+----
+ 1 | 10 | 1 | 9
+(1 row)
+
+--
+-- regression test for problems of the sort depicted in bug #3588
+--
+create temp table xx (pkxx int);
+create temp table yy (pkyy int, pkxx int);
+insert into xx values (1);
+insert into xx values (2);
+insert into xx values (3);
+insert into yy values (101, 1);
+insert into yy values (201, 2);
+insert into yy values (301, NULL);
+select yy.pkyy as yy_pkyy, yy.pkxx as yy_pkxx, yya.pkyy as yya_pkyy,
+ xxa.pkxx as xxa_pkxx, xxb.pkxx as xxb_pkxx
+from yy
+ left join (SELECT * FROM yy where pkyy = 101) as yya ON yy.pkyy = yya.pkyy
+ left join xx xxa on yya.pkxx = xxa.pkxx
+ left join xx xxb on coalesce (xxa.pkxx, 1) = xxb.pkxx;
+ yy_pkyy | yy_pkxx | yya_pkyy | xxa_pkxx | xxb_pkxx
+---------+---------+----------+----------+----------
+ 101 | 1 | 101 | 1 | 1
+ 201 | 2 | | | 1
+ 301 | | | | 1
+(3 rows)
+
+--
+-- regression test for improper pushing of constants across outer-join clauses
+-- (as seen in early 8.2.x releases)
+--
+create temp table zt1 (f1 int primary key);
+create temp table zt2 (f2 int primary key);
+create temp table zt3 (f3 int primary key);
+insert into zt1 values(53);
+insert into zt2 values(53);
+select * from
+ zt2 left join zt3 on (f2 = f3)
+ left join zt1 on (f3 = f1)
+where f2 = 53;
+ f2 | f3 | f1
+----+----+----
+ 53 | |
+(1 row)
+
+create temp view zv1 as select *,'dummy'::text AS junk from zt1;
+select * from
+ zt2 left join zt3 on (f2 = f3)
+ left join zv1 on (f3 = f1)
+where f2 = 53;
+ f2 | f3 | f1 | junk
+----+----+----+------
+ 53 | | |
+(1 row)
+
+--
+-- regression test for improper extraction of OR indexqual conditions
+-- (as seen in early 8.3.x releases)
+--
+select a.unique2, a.ten, b.tenthous, b.unique2, b.hundred
+from tenk1 a left join tenk1 b on a.unique2 = b.tenthous
+where a.unique1 = 42 and
+ ((b.unique2 is null and a.ten = 2) or b.hundred = 3);
+ unique2 | ten | tenthous | unique2 | hundred
+---------+-----+----------+---------+---------
+(0 rows)
+
+--
+-- test proper positioning of one-time quals in EXISTS (8.4devel bug)
+--
+prepare foo(bool) as
+ select count(*) from tenk1 a left join tenk1 b
+ on (a.unique2 = b.unique1 and exists
+ (select 1 from tenk1 c where c.thousand = b.unique2 and $1));
+execute foo(true);
+ count
+-------
+ 10000
+(1 row)
+
+execute foo(false);
+ count
+-------
+ 10000
+(1 row)
+
+--
+-- test for sane behavior with noncanonical merge clauses, per bug #4926
+--
+begin;
+set enable_mergejoin = 1;
+set enable_hashjoin = 0;
+set enable_nestloop = 0;
+create temp table a (i integer);
+create temp table b (x integer, y integer);
+select * from a left join b on i = x and i = y and x = i;
+ i | x | y
+---+---+---
+(0 rows)
+
+rollback;
+--
+-- test handling of merge clauses using record_ops
+--
+begin;
+create type mycomptype as (id int, v bigint);
+create temp table tidv (idv mycomptype);
+create index on tidv (idv);
+explain (costs off)
+select a.idv, b.idv from tidv a, tidv b where a.idv = b.idv;
+ QUERY PLAN
+----------------------------------------------------------
+ Merge Join
+ Merge Cond: (a.idv = b.idv)
+ -> Index Only Scan using tidv_idv_idx on tidv a
+ -> Materialize
+ -> Index Only Scan using tidv_idv_idx on tidv b
+(5 rows)
+
+set enable_mergejoin = 0;
+set enable_hashjoin = 0;
+explain (costs off)
+select a.idv, b.idv from tidv a, tidv b where a.idv = b.idv;
+ QUERY PLAN
+----------------------------------------------------
+ Nested Loop
+ -> Seq Scan on tidv a
+ -> Index Only Scan using tidv_idv_idx on tidv b
+ Index Cond: (idv = a.idv)
+(4 rows)
+
+rollback;
+--
+-- test NULL behavior of whole-row Vars, per bug #5025
+--
+select t1.q2, count(t2.*)
+from int8_tbl t1 left join int8_tbl t2 on (t1.q2 = t2.q1)
+group by t1.q2 order by 1;
+ q2 | count
+-------------------+-------
+ -4567890123456789 | 0
+ 123 | 2
+ 456 | 0
+ 4567890123456789 | 6
+(4 rows)
+
+select t1.q2, count(t2.*)
+from int8_tbl t1 left join (select * from int8_tbl) t2 on (t1.q2 = t2.q1)
+group by t1.q2 order by 1;
+ q2 | count
+-------------------+-------
+ -4567890123456789 | 0
+ 123 | 2
+ 456 | 0
+ 4567890123456789 | 6
+(4 rows)
+
+select t1.q2, count(t2.*)
+from int8_tbl t1 left join (select * from int8_tbl offset 0) t2 on (t1.q2 = t2.q1)
+group by t1.q2 order by 1;
+ q2 | count
+-------------------+-------
+ -4567890123456789 | 0
+ 123 | 2
+ 456 | 0
+ 4567890123456789 | 6
+(4 rows)
+
+select t1.q2, count(t2.*)
+from int8_tbl t1 left join
+ (select q1, case when q2=1 then 1 else q2 end as q2 from int8_tbl) t2
+ on (t1.q2 = t2.q1)
+group by t1.q2 order by 1;
+ q2 | count
+-------------------+-------
+ -4567890123456789 | 0
+ 123 | 2
+ 456 | 0
+ 4567890123456789 | 6
+(4 rows)
+
+--
+-- test incorrect failure to NULL pulled-up subexpressions
+--
+begin;
+create temp table a (
+ code char not null,
+ constraint a_pk primary key (code)
+);
+create temp table b (
+ a char not null,
+ num integer not null,
+ constraint b_pk primary key (a, num)
+);
+create temp table c (
+ name char not null,
+ a char,
+ constraint c_pk primary key (name)
+);
+insert into a (code) values ('p');
+insert into a (code) values ('q');
+insert into b (a, num) values ('p', 1);
+insert into b (a, num) values ('p', 2);
+insert into c (name, a) values ('A', 'p');
+insert into c (name, a) values ('B', 'q');
+insert into c (name, a) values ('C', null);
+select c.name, ss.code, ss.b_cnt, ss.const
+from c left join
+ (select a.code, coalesce(b_grp.cnt, 0) as b_cnt, -1 as const
+ from a left join
+ (select count(1) as cnt, b.a from b group by b.a) as b_grp
+ on a.code = b_grp.a
+ ) as ss
+ on (c.a = ss.code)
+order by c.name;
+ name | code | b_cnt | const
+------+------+-------+-------
+ A | p | 2 | -1
+ B | q | 0 | -1
+ C | | |
+(3 rows)
+
+rollback;
+--
+-- test incorrect handling of placeholders that only appear in targetlists,
+-- per bug #6154
+--
+SELECT * FROM
+( SELECT 1 as key1 ) sub1
+LEFT JOIN
+( SELECT sub3.key3, sub4.value2, COALESCE(sub4.value2, 66) as value3 FROM
+ ( SELECT 1 as key3 ) sub3
+ LEFT JOIN
+ ( SELECT sub5.key5, COALESCE(sub6.value1, 1) as value2 FROM
+ ( SELECT 1 as key5 ) sub5
+ LEFT JOIN
+ ( SELECT 2 as key6, 42 as value1 ) sub6
+ ON sub5.key5 = sub6.key6
+ ) sub4
+ ON sub4.key5 = sub3.key3
+) sub2
+ON sub1.key1 = sub2.key3;
+ key1 | key3 | value2 | value3
+------+------+--------+--------
+ 1 | 1 | 1 | 1
+(1 row)
+
+-- test the path using join aliases, too
+SELECT * FROM
+( SELECT 1 as key1 ) sub1
+LEFT JOIN
+( SELECT sub3.key3, value2, COALESCE(value2, 66) as value3 FROM
+ ( SELECT 1 as key3 ) sub3
+ LEFT JOIN
+ ( SELECT sub5.key5, COALESCE(sub6.value1, 1) as value2 FROM
+ ( SELECT 1 as key5 ) sub5
+ LEFT JOIN
+ ( SELECT 2 as key6, 42 as value1 ) sub6
+ ON sub5.key5 = sub6.key6
+ ) sub4
+ ON sub4.key5 = sub3.key3
+) sub2
+ON sub1.key1 = sub2.key3;
+ key1 | key3 | value2 | value3
+------+------+--------+--------
+ 1 | 1 | 1 | 1
+(1 row)
+
+--
+-- test case where a PlaceHolderVar is used as a nestloop parameter
+--
+EXPLAIN (COSTS OFF)
+SELECT qq, unique1
+ FROM
+ ( SELECT COALESCE(q1, 0) AS qq FROM int8_tbl a ) AS ss1
+ FULL OUTER JOIN
+ ( SELECT COALESCE(q2, -1) AS qq FROM int8_tbl b ) AS ss2
+ USING (qq)
+ INNER JOIN tenk1 c ON qq = unique2;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------
+ Nested Loop
+ -> Hash Full Join
+ Hash Cond: ((COALESCE(a.q1, '0'::bigint)) = (COALESCE(b.q2, '-1'::bigint)))
+ -> Seq Scan on int8_tbl a
+ -> Hash
+ -> Seq Scan on int8_tbl b
+ -> Index Scan using tenk1_unique2 on tenk1 c
+ Index Cond: (unique2 = COALESCE((COALESCE(a.q1, '0'::bigint)), (COALESCE(b.q2, '-1'::bigint))))
+(8 rows)
+
+SELECT qq, unique1
+ FROM
+ ( SELECT COALESCE(q1, 0) AS qq FROM int8_tbl a ) AS ss1
+ FULL OUTER JOIN
+ ( SELECT COALESCE(q2, -1) AS qq FROM int8_tbl b ) AS ss2
+ USING (qq)
+ INNER JOIN tenk1 c ON qq = unique2;
+ qq | unique1
+-----+---------
+ 123 | 4596
+ 123 | 4596
+ 456 | 7318
+(3 rows)
+
+--
+-- nested nestloops can require nested PlaceHolderVars
+--
+create temp table nt1 (
+ id int primary key,
+ a1 boolean,
+ a2 boolean
+);
+create temp table nt2 (
+ id int primary key,
+ nt1_id int,
+ b1 boolean,
+ b2 boolean,
+ foreign key (nt1_id) references nt1(id)
+);
+create temp table nt3 (
+ id int primary key,
+ nt2_id int,
+ c1 boolean,
+ foreign key (nt2_id) references nt2(id)
+);
+insert into nt1 values (1,true,true);
+insert into nt1 values (2,true,false);
+insert into nt1 values (3,false,false);
+insert into nt2 values (1,1,true,true);
+insert into nt2 values (2,2,true,false);
+insert into nt2 values (3,3,false,false);
+insert into nt3 values (1,1,true);
+insert into nt3 values (2,2,false);
+insert into nt3 values (3,3,true);
+explain (costs off)
+select nt3.id
+from nt3 as nt3
+ left join
+ (select nt2.*, (nt2.b1 and ss1.a3) AS b3
+ from nt2 as nt2
+ left join
+ (select nt1.*, (nt1.id is not null) as a3 from nt1) as ss1
+ on ss1.id = nt2.nt1_id
+ ) as ss2
+ on ss2.id = nt3.nt2_id
+where nt3.id = 1 and ss2.b3;
+ QUERY PLAN
+-----------------------------------------------
+ Nested Loop
+ -> Nested Loop
+ -> Index Scan using nt3_pkey on nt3
+ Index Cond: (id = 1)
+ -> Index Scan using nt2_pkey on nt2
+ Index Cond: (id = nt3.nt2_id)
+ -> Index Only Scan using nt1_pkey on nt1
+ Index Cond: (id = nt2.nt1_id)
+ Filter: (nt2.b1 AND (id IS NOT NULL))
+(9 rows)
+
+select nt3.id
+from nt3 as nt3
+ left join
+ (select nt2.*, (nt2.b1 and ss1.a3) AS b3
+ from nt2 as nt2
+ left join
+ (select nt1.*, (nt1.id is not null) as a3 from nt1) as ss1
+ on ss1.id = nt2.nt1_id
+ ) as ss2
+ on ss2.id = nt3.nt2_id
+where nt3.id = 1 and ss2.b3;
+ id
+----
+ 1
+(1 row)
+
+--
+-- test case where a PlaceHolderVar is propagated into a subquery
+--
+explain (costs off)
+select * from
+ int8_tbl t1 left join
+ (select q1 as x, 42 as y from int8_tbl t2) ss
+ on t1.q2 = ss.x
+where
+ 1 = (select 1 from int8_tbl t3 where ss.y is not null limit 1)
+order by 1,2;
+ QUERY PLAN
+-----------------------------------------------------------
+ Sort
+ Sort Key: t1.q1, t1.q2
+ -> Hash Left Join
+ Hash Cond: (t1.q2 = t2.q1)
+ Filter: (1 = (SubPlan 1))
+ -> Seq Scan on int8_tbl t1
+ -> Hash
+ -> Seq Scan on int8_tbl t2
+ SubPlan 1
+ -> Limit
+ -> Result
+ One-Time Filter: ((42) IS NOT NULL)
+ -> Seq Scan on int8_tbl t3
+(13 rows)
+
+select * from
+ int8_tbl t1 left join
+ (select q1 as x, 42 as y from int8_tbl t2) ss
+ on t1.q2 = ss.x
+where
+ 1 = (select 1 from int8_tbl t3 where ss.y is not null limit 1)
+order by 1,2;
+ q1 | q2 | x | y
+------------------+------------------+------------------+----
+ 123 | 4567890123456789 | 4567890123456789 | 42
+ 123 | 4567890123456789 | 4567890123456789 | 42
+ 123 | 4567890123456789 | 4567890123456789 | 42
+ 4567890123456789 | 123 | 123 | 42
+ 4567890123456789 | 123 | 123 | 42
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 42
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 42
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 42
+(8 rows)
+
+--
+-- variant where a PlaceHolderVar is needed at a join, but not above the join
+--
+explain (costs off)
+select * from
+ int4_tbl as i41,
+ lateral
+ (select 1 as x from
+ (select i41.f1 as lat,
+ i42.f1 as loc from
+ int8_tbl as i81, int4_tbl as i42) as ss1
+ right join int4_tbl as i43 on (i43.f1 > 1)
+ where ss1.loc = ss1.lat) as ss2
+where i41.f1 > 0;
+ QUERY PLAN
+--------------------------------------------------
+ Nested Loop
+ -> Nested Loop
+ -> Seq Scan on int4_tbl i41
+ Filter: (f1 > 0)
+ -> Nested Loop
+ Join Filter: (i41.f1 = i42.f1)
+ -> Seq Scan on int8_tbl i81
+ -> Materialize
+ -> Seq Scan on int4_tbl i42
+ -> Materialize
+ -> Seq Scan on int4_tbl i43
+ Filter: (f1 > 1)
+(12 rows)
+
+select * from
+ int4_tbl as i41,
+ lateral
+ (select 1 as x from
+ (select i41.f1 as lat,
+ i42.f1 as loc from
+ int8_tbl as i81, int4_tbl as i42) as ss1
+ right join int4_tbl as i43 on (i43.f1 > 1)
+ where ss1.loc = ss1.lat) as ss2
+where i41.f1 > 0;
+ f1 | x
+------------+---
+ 123456 | 1
+ 123456 | 1
+ 123456 | 1
+ 123456 | 1
+ 123456 | 1
+ 123456 | 1
+ 123456 | 1
+ 123456 | 1
+ 123456 | 1
+ 123456 | 1
+ 2147483647 | 1
+ 2147483647 | 1
+ 2147483647 | 1
+ 2147483647 | 1
+ 2147483647 | 1
+ 2147483647 | 1
+ 2147483647 | 1
+ 2147483647 | 1
+ 2147483647 | 1
+ 2147483647 | 1
+(20 rows)
+
+--
+-- test the corner cases FULL JOIN ON TRUE and FULL JOIN ON FALSE
+--
+select * from int4_tbl a full join int4_tbl b on true;
+ f1 | f1
+-------------+-------------
+ 0 | 0
+ 0 | 123456
+ 0 | -123456
+ 0 | 2147483647
+ 0 | -2147483647
+ 123456 | 0
+ 123456 | 123456
+ 123456 | -123456
+ 123456 | 2147483647
+ 123456 | -2147483647
+ -123456 | 0
+ -123456 | 123456
+ -123456 | -123456
+ -123456 | 2147483647
+ -123456 | -2147483647
+ 2147483647 | 0
+ 2147483647 | 123456
+ 2147483647 | -123456
+ 2147483647 | 2147483647
+ 2147483647 | -2147483647
+ -2147483647 | 0
+ -2147483647 | 123456
+ -2147483647 | -123456
+ -2147483647 | 2147483647
+ -2147483647 | -2147483647
+(25 rows)
+
+select * from int4_tbl a full join int4_tbl b on false;
+ f1 | f1
+-------------+-------------
+ | 0
+ | 123456
+ | -123456
+ | 2147483647
+ | -2147483647
+ 0 |
+ 123456 |
+ -123456 |
+ 2147483647 |
+ -2147483647 |
+(10 rows)
+
+--
+-- test for ability to use a cartesian join when necessary
+--
+create temp table q1 as select 1 as q1;
+create temp table q2 as select 0 as q2;
+analyze q1;
+analyze q2;
+explain (costs off)
+select * from
+ tenk1 join int4_tbl on f1 = twothousand,
+ q1, q2
+where q1 = thousand or q2 = thousand;
+ QUERY PLAN
+------------------------------------------------------------------------
+ Hash Join
+ Hash Cond: (tenk1.twothousand = int4_tbl.f1)
+ -> Nested Loop
+ -> Nested Loop
+ -> Seq Scan on q1
+ -> Seq Scan on q2
+ -> Bitmap Heap Scan on tenk1
+ Recheck Cond: ((q1.q1 = thousand) OR (q2.q2 = thousand))
+ -> BitmapOr
+ -> Bitmap Index Scan on tenk1_thous_tenthous
+ Index Cond: (thousand = q1.q1)
+ -> Bitmap Index Scan on tenk1_thous_tenthous
+ Index Cond: (thousand = q2.q2)
+ -> Hash
+ -> Seq Scan on int4_tbl
+(15 rows)
+
+explain (costs off)
+select * from
+ tenk1 join int4_tbl on f1 = twothousand,
+ q1, q2
+where thousand = (q1 + q2);
+ QUERY PLAN
+--------------------------------------------------------------
+ Hash Join
+ Hash Cond: (tenk1.twothousand = int4_tbl.f1)
+ -> Nested Loop
+ -> Nested Loop
+ -> Seq Scan on q1
+ -> Seq Scan on q2
+ -> Bitmap Heap Scan on tenk1
+ Recheck Cond: (thousand = (q1.q1 + q2.q2))
+ -> Bitmap Index Scan on tenk1_thous_tenthous
+ Index Cond: (thousand = (q1.q1 + q2.q2))
+ -> Hash
+ -> Seq Scan on int4_tbl
+(12 rows)
+
+--
+-- test ability to generate a suitable plan for a star-schema query
+--
+explain (costs off)
+select * from
+ tenk1, int8_tbl a, int8_tbl b
+where thousand = a.q1 and tenthous = b.q1 and a.q2 = 1 and b.q2 = 2;
+ QUERY PLAN
+---------------------------------------------------------------------
+ Nested Loop
+ -> Seq Scan on int8_tbl b
+ Filter: (q2 = 2)
+ -> Nested Loop
+ -> Seq Scan on int8_tbl a
+ Filter: (q2 = 1)
+ -> Index Scan using tenk1_thous_tenthous on tenk1
+ Index Cond: ((thousand = a.q1) AND (tenthous = b.q1))
+(8 rows)
+
+--
+-- test a corner case in which we shouldn't apply the star-schema optimization
+--
+explain (costs off)
+select t1.unique2, t1.stringu1, t2.unique1, t2.stringu2 from
+ tenk1 t1
+ inner join int4_tbl i1
+ left join (select v1.x2, v2.y1, 11 AS d1
+ from (select 1,0 from onerow) v1(x1,x2)
+ left join (select 3,1 from onerow) v2(y1,y2)
+ on v1.x1 = v2.y2) subq1
+ on (i1.f1 = subq1.x2)
+ on (t1.unique2 = subq1.d1)
+ left join tenk1 t2
+ on (subq1.y1 = t2.unique1)
+where t1.unique2 < 42 and t1.stringu1 > t2.stringu2;
+ QUERY PLAN
+-----------------------------------------------------------------------
+ Nested Loop
+ -> Nested Loop
+ Join Filter: (t1.stringu1 > t2.stringu2)
+ -> Nested Loop
+ -> Nested Loop
+ -> Seq Scan on onerow
+ -> Seq Scan on onerow onerow_1
+ -> Index Scan using tenk1_unique2 on tenk1 t1
+ Index Cond: ((unique2 = (11)) AND (unique2 < 42))
+ -> Index Scan using tenk1_unique1 on tenk1 t2
+ Index Cond: (unique1 = (3))
+ -> Seq Scan on int4_tbl i1
+ Filter: (f1 = 0)
+(13 rows)
+
+select t1.unique2, t1.stringu1, t2.unique1, t2.stringu2 from
+ tenk1 t1
+ inner join int4_tbl i1
+ left join (select v1.x2, v2.y1, 11 AS d1
+ from (select 1,0 from onerow) v1(x1,x2)
+ left join (select 3,1 from onerow) v2(y1,y2)
+ on v1.x1 = v2.y2) subq1
+ on (i1.f1 = subq1.x2)
+ on (t1.unique2 = subq1.d1)
+ left join tenk1 t2
+ on (subq1.y1 = t2.unique1)
+where t1.unique2 < 42 and t1.stringu1 > t2.stringu2;
+ unique2 | stringu1 | unique1 | stringu2
+---------+----------+---------+----------
+ 11 | WFAAAA | 3 | LKIAAA
+(1 row)
+
+-- variant that isn't quite a star-schema case
+select ss1.d1 from
+ tenk1 as t1
+ inner join tenk1 as t2
+ on t1.tenthous = t2.ten
+ inner join
+ int8_tbl as i8
+ left join int4_tbl as i4
+ inner join (select 64::information_schema.cardinal_number as d1
+ from tenk1 t3,
+ lateral (select abs(t3.unique1) + random()) ss0(x)
+ where t3.fivethous < 0) as ss1
+ on i4.f1 = ss1.d1
+ on i8.q1 = i4.f1
+ on t1.tenthous = ss1.d1
+where t1.unique1 < i4.f1;
+ d1
+----
+(0 rows)
+
+-- this variant is foldable by the remove-useless-RESULT-RTEs code
+explain (costs off)
+select t1.unique2, t1.stringu1, t2.unique1, t2.stringu2 from
+ tenk1 t1
+ inner join int4_tbl i1
+ left join (select v1.x2, v2.y1, 11 AS d1
+ from (values(1,0)) v1(x1,x2)
+ left join (values(3,1)) v2(y1,y2)
+ on v1.x1 = v2.y2) subq1
+ on (i1.f1 = subq1.x2)
+ on (t1.unique2 = subq1.d1)
+ left join tenk1 t2
+ on (subq1.y1 = t2.unique1)
+where t1.unique2 < 42 and t1.stringu1 > t2.stringu2;
+ QUERY PLAN
+-----------------------------------------------------------------
+ Nested Loop
+ Join Filter: (t1.stringu1 > t2.stringu2)
+ -> Nested Loop
+ -> Seq Scan on int4_tbl i1
+ Filter: (f1 = 0)
+ -> Index Scan using tenk1_unique2 on tenk1 t1
+ Index Cond: ((unique2 = (11)) AND (unique2 < 42))
+ -> Index Scan using tenk1_unique1 on tenk1 t2
+ Index Cond: (unique1 = (3))
+(9 rows)
+
+select t1.unique2, t1.stringu1, t2.unique1, t2.stringu2 from
+ tenk1 t1
+ inner join int4_tbl i1
+ left join (select v1.x2, v2.y1, 11 AS d1
+ from (values(1,0)) v1(x1,x2)
+ left join (values(3,1)) v2(y1,y2)
+ on v1.x1 = v2.y2) subq1
+ on (i1.f1 = subq1.x2)
+ on (t1.unique2 = subq1.d1)
+ left join tenk1 t2
+ on (subq1.y1 = t2.unique1)
+where t1.unique2 < 42 and t1.stringu1 > t2.stringu2;
+ unique2 | stringu1 | unique1 | stringu2
+---------+----------+---------+----------
+ 11 | WFAAAA | 3 | LKIAAA
+(1 row)
+
+-- Here's a variant that we can't fold too aggressively, though,
+-- or we end up with noplace to evaluate the lateral PHV
+explain (verbose, costs off)
+select * from
+ (select 1 as x) ss1 left join (select 2 as y) ss2 on (true),
+ lateral (select ss2.y as z limit 1) ss3;
+ QUERY PLAN
+---------------------------
+ Nested Loop
+ Output: 1, (2), ((2))
+ -> Result
+ Output: 2
+ -> Limit
+ Output: ((2))
+ -> Result
+ Output: (2)
+(8 rows)
+
+select * from
+ (select 1 as x) ss1 left join (select 2 as y) ss2 on (true),
+ lateral (select ss2.y as z limit 1) ss3;
+ x | y | z
+---+---+---
+ 1 | 2 | 2
+(1 row)
+
+-- Test proper handling of appendrel PHVs during useless-RTE removal
+explain (costs off)
+select * from
+ (select 0 as z) as t1
+ left join
+ (select true as a) as t2
+ on true,
+ lateral (select true as b
+ union all
+ select a as b) as t3
+where b;
+ QUERY PLAN
+---------------------------------------
+ Nested Loop
+ -> Result
+ -> Append
+ -> Result
+ -> Result
+ One-Time Filter: (true)
+(6 rows)
+
+select * from
+ (select 0 as z) as t1
+ left join
+ (select true as a) as t2
+ on true,
+ lateral (select true as b
+ union all
+ select a as b) as t3
+where b;
+ z | a | b
+---+---+---
+ 0 | t | t
+ 0 | t | t
+(2 rows)
+
+--
+-- test inlining of immutable functions
+--
+create function f_immutable_int4(i integer) returns integer as
+$$ begin return i; end; $$ language plpgsql immutable;
+-- check optimization of function scan with join
+explain (costs off)
+select unique1 from tenk1, (select * from f_immutable_int4(1) x) x
+where x = unique1;
+ QUERY PLAN
+----------------------------------------------
+ Index Only Scan using tenk1_unique1 on tenk1
+ Index Cond: (unique1 = 1)
+(2 rows)
+
+explain (verbose, costs off)
+select unique1, x.*
+from tenk1, (select *, random() from f_immutable_int4(1) x) x
+where x = unique1;
+ QUERY PLAN
+-----------------------------------------------------------
+ Nested Loop
+ Output: tenk1.unique1, (1), (random())
+ -> Result
+ Output: 1, random()
+ -> Index Only Scan using tenk1_unique1 on public.tenk1
+ Output: tenk1.unique1
+ Index Cond: (tenk1.unique1 = (1))
+(7 rows)
+
+explain (costs off)
+select unique1 from tenk1, f_immutable_int4(1) x where x = unique1;
+ QUERY PLAN
+----------------------------------------------
+ Index Only Scan using tenk1_unique1 on tenk1
+ Index Cond: (unique1 = 1)
+(2 rows)
+
+explain (costs off)
+select unique1 from tenk1, lateral f_immutable_int4(1) x where x = unique1;
+ QUERY PLAN
+----------------------------------------------
+ Index Only Scan using tenk1_unique1 on tenk1
+ Index Cond: (unique1 = 1)
+(2 rows)
+
+explain (costs off)
+select unique1 from tenk1, lateral f_immutable_int4(1) x where x in (select 17);
+ QUERY PLAN
+--------------------------
+ Result
+ One-Time Filter: false
+(2 rows)
+
+explain (costs off)
+select unique1, x from tenk1 join f_immutable_int4(1) x on unique1 = x;
+ QUERY PLAN
+----------------------------------------------
+ Index Only Scan using tenk1_unique1 on tenk1
+ Index Cond: (unique1 = 1)
+(2 rows)
+
+explain (costs off)
+select unique1, x from tenk1 left join f_immutable_int4(1) x on unique1 = x;
+ QUERY PLAN
+----------------------------------------------------
+ Nested Loop Left Join
+ Join Filter: (tenk1.unique1 = 1)
+ -> Index Only Scan using tenk1_unique1 on tenk1
+ -> Materialize
+ -> Result
+(5 rows)
+
+explain (costs off)
+select unique1, x from tenk1 right join f_immutable_int4(1) x on unique1 = x;
+ QUERY PLAN
+----------------------------------------------------
+ Nested Loop Left Join
+ -> Result
+ -> Index Only Scan using tenk1_unique1 on tenk1
+ Index Cond: (unique1 = 1)
+(4 rows)
+
+explain (costs off)
+select unique1, x from tenk1 full join f_immutable_int4(1) x on unique1 = x;
+ QUERY PLAN
+----------------------------------------------------
+ Merge Full Join
+ Merge Cond: (tenk1.unique1 = (1))
+ -> Index Only Scan using tenk1_unique1 on tenk1
+ -> Sort
+ Sort Key: (1)
+ -> Result
+(6 rows)
+
+-- check that pullup of a const function allows further const-folding
+explain (costs off)
+select unique1 from tenk1, f_immutable_int4(1) x where x = 42;
+ QUERY PLAN
+--------------------------
+ Result
+ One-Time Filter: false
+(2 rows)
+
+-- test inlining of immutable functions with PlaceHolderVars
+explain (costs off)
+select nt3.id
+from nt3 as nt3
+ left join
+ (select nt2.*, (nt2.b1 or i4 = 42) AS b3
+ from nt2 as nt2
+ left join
+ f_immutable_int4(0) i4
+ on i4 = nt2.nt1_id
+ ) as ss2
+ on ss2.id = nt3.nt2_id
+where nt3.id = 1 and ss2.b3;
+ QUERY PLAN
+----------------------------------------------
+ Nested Loop Left Join
+ Filter: ((nt2.b1 OR ((0) = 42)))
+ -> Index Scan using nt3_pkey on nt3
+ Index Cond: (id = 1)
+ -> Nested Loop Left Join
+ Join Filter: (0 = nt2.nt1_id)
+ -> Index Scan using nt2_pkey on nt2
+ Index Cond: (id = nt3.nt2_id)
+ -> Result
+(9 rows)
+
+drop function f_immutable_int4(int);
+-- test inlining when function returns composite
+create function mki8(bigint, bigint) returns int8_tbl as
+$$select row($1,$2)::int8_tbl$$ language sql;
+create function mki4(int) returns int4_tbl as
+$$select row($1)::int4_tbl$$ language sql;
+explain (verbose, costs off)
+select * from mki8(1,2);
+ QUERY PLAN
+------------------------------------
+ Function Scan on mki8
+ Output: q1, q2
+ Function Call: '(1,2)'::int8_tbl
+(3 rows)
+
+select * from mki8(1,2);
+ q1 | q2
+----+----
+ 1 | 2
+(1 row)
+
+explain (verbose, costs off)
+select * from mki4(42);
+ QUERY PLAN
+-----------------------------------
+ Function Scan on mki4
+ Output: f1
+ Function Call: '(42)'::int4_tbl
+(3 rows)
+
+select * from mki4(42);
+ f1
+----
+ 42
+(1 row)
+
+drop function mki8(bigint, bigint);
+drop function mki4(int);
+--
+-- test extraction of restriction OR clauses from join OR clause
+-- (we used to only do this for indexable clauses)
+--
+explain (costs off)
+select * from tenk1 a join tenk1 b on
+ (a.unique1 = 1 and b.unique1 = 2) or (a.unique2 = 3 and b.hundred = 4);
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------
+ Nested Loop
+ Join Filter: (((a.unique1 = 1) AND (b.unique1 = 2)) OR ((a.unique2 = 3) AND (b.hundred = 4)))
+ -> Bitmap Heap Scan on tenk1 b
+ Recheck Cond: ((unique1 = 2) OR (hundred = 4))
+ -> BitmapOr
+ -> Bitmap Index Scan on tenk1_unique1
+ Index Cond: (unique1 = 2)
+ -> Bitmap Index Scan on tenk1_hundred
+ Index Cond: (hundred = 4)
+ -> Materialize
+ -> Bitmap Heap Scan on tenk1 a
+ Recheck Cond: ((unique1 = 1) OR (unique2 = 3))
+ -> BitmapOr
+ -> Bitmap Index Scan on tenk1_unique1
+ Index Cond: (unique1 = 1)
+ -> Bitmap Index Scan on tenk1_unique2
+ Index Cond: (unique2 = 3)
+(17 rows)
+
+explain (costs off)
+select * from tenk1 a join tenk1 b on
+ (a.unique1 = 1 and b.unique1 = 2) or (a.unique2 = 3 and b.ten = 4);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Nested Loop
+ Join Filter: (((a.unique1 = 1) AND (b.unique1 = 2)) OR ((a.unique2 = 3) AND (b.ten = 4)))
+ -> Seq Scan on tenk1 b
+ Filter: ((unique1 = 2) OR (ten = 4))
+ -> Materialize
+ -> Bitmap Heap Scan on tenk1 a
+ Recheck Cond: ((unique1 = 1) OR (unique2 = 3))
+ -> BitmapOr
+ -> Bitmap Index Scan on tenk1_unique1
+ Index Cond: (unique1 = 1)
+ -> Bitmap Index Scan on tenk1_unique2
+ Index Cond: (unique2 = 3)
+(12 rows)
+
+explain (costs off)
+select * from tenk1 a join tenk1 b on
+ (a.unique1 = 1 and b.unique1 = 2) or
+ ((a.unique2 = 3 or a.unique2 = 7) and b.hundred = 4);
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Join Filter: (((a.unique1 = 1) AND (b.unique1 = 2)) OR (((a.unique2 = 3) OR (a.unique2 = 7)) AND (b.hundred = 4)))
+ -> Bitmap Heap Scan on tenk1 b
+ Recheck Cond: ((unique1 = 2) OR (hundred = 4))
+ -> BitmapOr
+ -> Bitmap Index Scan on tenk1_unique1
+ Index Cond: (unique1 = 2)
+ -> Bitmap Index Scan on tenk1_hundred
+ Index Cond: (hundred = 4)
+ -> Materialize
+ -> Bitmap Heap Scan on tenk1 a
+ Recheck Cond: ((unique1 = 1) OR (unique2 = 3) OR (unique2 = 7))
+ -> BitmapOr
+ -> Bitmap Index Scan on tenk1_unique1
+ Index Cond: (unique1 = 1)
+ -> Bitmap Index Scan on tenk1_unique2
+ Index Cond: (unique2 = 3)
+ -> Bitmap Index Scan on tenk1_unique2
+ Index Cond: (unique2 = 7)
+(19 rows)
+
+--
+-- test placement of movable quals in a parameterized join tree
+--
+explain (costs off)
+select * from tenk1 t1 left join
+ (tenk1 t2 join tenk1 t3 on t2.thousand = t3.unique2)
+ on t1.hundred = t2.hundred and t1.ten = t3.ten
+where t1.unique1 = 1;
+ QUERY PLAN
+--------------------------------------------------------
+ Nested Loop Left Join
+ -> Index Scan using tenk1_unique1 on tenk1 t1
+ Index Cond: (unique1 = 1)
+ -> Nested Loop
+ Join Filter: (t1.ten = t3.ten)
+ -> Bitmap Heap Scan on tenk1 t2
+ Recheck Cond: (t1.hundred = hundred)
+ -> Bitmap Index Scan on tenk1_hundred
+ Index Cond: (hundred = t1.hundred)
+ -> Index Scan using tenk1_unique2 on tenk1 t3
+ Index Cond: (unique2 = t2.thousand)
+(11 rows)
+
+explain (costs off)
+select * from tenk1 t1 left join
+ (tenk1 t2 join tenk1 t3 on t2.thousand = t3.unique2)
+ on t1.hundred = t2.hundred and t1.ten + t2.ten = t3.ten
+where t1.unique1 = 1;
+ QUERY PLAN
+--------------------------------------------------------
+ Nested Loop Left Join
+ -> Index Scan using tenk1_unique1 on tenk1 t1
+ Index Cond: (unique1 = 1)
+ -> Nested Loop
+ Join Filter: ((t1.ten + t2.ten) = t3.ten)
+ -> Bitmap Heap Scan on tenk1 t2
+ Recheck Cond: (t1.hundred = hundred)
+ -> Bitmap Index Scan on tenk1_hundred
+ Index Cond: (hundred = t1.hundred)
+ -> Index Scan using tenk1_unique2 on tenk1 t3
+ Index Cond: (unique2 = t2.thousand)
+(11 rows)
+
+explain (costs off)
+select count(*) from
+ tenk1 a join tenk1 b on a.unique1 = b.unique2
+ left join tenk1 c on a.unique2 = b.unique1 and c.thousand = a.thousand
+ join int4_tbl on b.thousand = f1;
+ QUERY PLAN
+-------------------------------------------------------------------------
+ Aggregate
+ -> Nested Loop Left Join
+ Join Filter: (a.unique2 = b.unique1)
+ -> Nested Loop
+ -> Nested Loop
+ -> Seq Scan on int4_tbl
+ -> Bitmap Heap Scan on tenk1 b
+ Recheck Cond: (thousand = int4_tbl.f1)
+ -> Bitmap Index Scan on tenk1_thous_tenthous
+ Index Cond: (thousand = int4_tbl.f1)
+ -> Index Scan using tenk1_unique1 on tenk1 a
+ Index Cond: (unique1 = b.unique2)
+ -> Index Only Scan using tenk1_thous_tenthous on tenk1 c
+ Index Cond: (thousand = a.thousand)
+(14 rows)
+
+select count(*) from
+ tenk1 a join tenk1 b on a.unique1 = b.unique2
+ left join tenk1 c on a.unique2 = b.unique1 and c.thousand = a.thousand
+ join int4_tbl on b.thousand = f1;
+ count
+-------
+ 10
+(1 row)
+
+explain (costs off)
+select b.unique1 from
+ tenk1 a join tenk1 b on a.unique1 = b.unique2
+ left join tenk1 c on b.unique1 = 42 and c.thousand = a.thousand
+ join int4_tbl i1 on b.thousand = f1
+ right join int4_tbl i2 on i2.f1 = b.tenthous
+ order by 1;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------
+ Sort
+ Sort Key: b.unique1
+ -> Nested Loop Left Join
+ -> Seq Scan on int4_tbl i2
+ -> Nested Loop Left Join
+ Join Filter: (b.unique1 = 42)
+ -> Nested Loop
+ -> Nested Loop
+ -> Seq Scan on int4_tbl i1
+ -> Index Scan using tenk1_thous_tenthous on tenk1 b
+ Index Cond: ((thousand = i1.f1) AND (tenthous = i2.f1))
+ -> Index Scan using tenk1_unique1 on tenk1 a
+ Index Cond: (unique1 = b.unique2)
+ -> Index Only Scan using tenk1_thous_tenthous on tenk1 c
+ Index Cond: (thousand = a.thousand)
+(15 rows)
+
+select b.unique1 from
+ tenk1 a join tenk1 b on a.unique1 = b.unique2
+ left join tenk1 c on b.unique1 = 42 and c.thousand = a.thousand
+ join int4_tbl i1 on b.thousand = f1
+ right join int4_tbl i2 on i2.f1 = b.tenthous
+ order by 1;
+ unique1
+---------
+ 0
+
+
+
+
+(5 rows)
+
+explain (costs off)
+select * from
+(
+ select unique1, q1, coalesce(unique1, -1) + q1 as fault
+ from int8_tbl left join tenk1 on (q2 = unique2)
+) ss
+where fault = 122
+order by fault;
+ QUERY PLAN
+--------------------------------------------------------------------------
+ Nested Loop Left Join
+ Filter: ((COALESCE(tenk1.unique1, '-1'::integer) + int8_tbl.q1) = 122)
+ -> Seq Scan on int8_tbl
+ -> Index Scan using tenk1_unique2 on tenk1
+ Index Cond: (unique2 = int8_tbl.q2)
+(5 rows)
+
+select * from
+(
+ select unique1, q1, coalesce(unique1, -1) + q1 as fault
+ from int8_tbl left join tenk1 on (q2 = unique2)
+) ss
+where fault = 122
+order by fault;
+ unique1 | q1 | fault
+---------+-----+-------
+ | 123 | 122
+(1 row)
+
+explain (costs off)
+select * from
+(values (1, array[10,20]), (2, array[20,30])) as v1(v1x,v1ys)
+left join (values (1, 10), (2, 20)) as v2(v2x,v2y) on v2x = v1x
+left join unnest(v1ys) as u1(u1y) on u1y = v2y;
+ QUERY PLAN
+-------------------------------------------------------------
+ Nested Loop Left Join
+ -> Values Scan on "*VALUES*"
+ -> Hash Right Join
+ Hash Cond: (u1.u1y = "*VALUES*_1".column2)
+ Filter: ("*VALUES*_1".column1 = "*VALUES*".column1)
+ -> Function Scan on unnest u1
+ -> Hash
+ -> Values Scan on "*VALUES*_1"
+(8 rows)
+
+select * from
+(values (1, array[10,20]), (2, array[20,30])) as v1(v1x,v1ys)
+left join (values (1, 10), (2, 20)) as v2(v2x,v2y) on v2x = v1x
+left join unnest(v1ys) as u1(u1y) on u1y = v2y;
+ v1x | v1ys | v2x | v2y | u1y
+-----+---------+-----+-----+-----
+ 1 | {10,20} | 1 | 10 | 10
+ 2 | {20,30} | 2 | 20 | 20
+(2 rows)
+
+--
+-- test handling of potential equivalence clauses above outer joins
+--
+explain (costs off)
+select q1, unique2, thousand, hundred
+ from int8_tbl a left join tenk1 b on q1 = unique2
+ where coalesce(thousand,123) = q1 and q1 = coalesce(hundred,123);
+ QUERY PLAN
+--------------------------------------------------------------------------------------
+ Nested Loop Left Join
+ Filter: ((COALESCE(b.thousand, 123) = a.q1) AND (a.q1 = COALESCE(b.hundred, 123)))
+ -> Seq Scan on int8_tbl a
+ -> Index Scan using tenk1_unique2 on tenk1 b
+ Index Cond: (unique2 = a.q1)
+(5 rows)
+
+select q1, unique2, thousand, hundred
+ from int8_tbl a left join tenk1 b on q1 = unique2
+ where coalesce(thousand,123) = q1 and q1 = coalesce(hundred,123);
+ q1 | unique2 | thousand | hundred
+----+---------+----------+---------
+(0 rows)
+
+explain (costs off)
+select f1, unique2, case when unique2 is null then f1 else 0 end
+ from int4_tbl a left join tenk1 b on f1 = unique2
+ where (case when unique2 is null then f1 else 0 end) = 0;
+ QUERY PLAN
+--------------------------------------------------------------------
+ Nested Loop Left Join
+ Filter: (CASE WHEN (b.unique2 IS NULL) THEN a.f1 ELSE 0 END = 0)
+ -> Seq Scan on int4_tbl a
+ -> Index Only Scan using tenk1_unique2 on tenk1 b
+ Index Cond: (unique2 = a.f1)
+(5 rows)
+
+select f1, unique2, case when unique2 is null then f1 else 0 end
+ from int4_tbl a left join tenk1 b on f1 = unique2
+ where (case when unique2 is null then f1 else 0 end) = 0;
+ f1 | unique2 | case
+----+---------+------
+ 0 | 0 | 0
+(1 row)
+
+--
+-- another case with equivalence clauses above outer joins (bug #8591)
+--
+explain (costs off)
+select a.unique1, b.unique1, c.unique1, coalesce(b.twothousand, a.twothousand)
+ from tenk1 a left join tenk1 b on b.thousand = a.unique1 left join tenk1 c on c.unique2 = coalesce(b.twothousand, a.twothousand)
+ where a.unique2 < 10 and coalesce(b.twothousand, a.twothousand) = 44;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Nested Loop Left Join
+ -> Nested Loop Left Join
+ Filter: (COALESCE(b.twothousand, a.twothousand) = 44)
+ -> Index Scan using tenk1_unique2 on tenk1 a
+ Index Cond: (unique2 < 10)
+ -> Bitmap Heap Scan on tenk1 b
+ Recheck Cond: (thousand = a.unique1)
+ -> Bitmap Index Scan on tenk1_thous_tenthous
+ Index Cond: (thousand = a.unique1)
+ -> Index Scan using tenk1_unique2 on tenk1 c
+ Index Cond: ((unique2 = COALESCE(b.twothousand, a.twothousand)) AND (unique2 = 44))
+(11 rows)
+
+select a.unique1, b.unique1, c.unique1, coalesce(b.twothousand, a.twothousand)
+ from tenk1 a left join tenk1 b on b.thousand = a.unique1 left join tenk1 c on c.unique2 = coalesce(b.twothousand, a.twothousand)
+ where a.unique2 < 10 and coalesce(b.twothousand, a.twothousand) = 44;
+ unique1 | unique1 | unique1 | coalesce
+---------+---------+---------+----------
+(0 rows)
+
+--
+-- check handling of join aliases when flattening multiple levels of subquery
+--
+explain (verbose, costs off)
+select foo1.join_key as foo1_id, foo3.join_key AS foo3_id, bug_field from
+ (values (0),(1)) foo1(join_key)
+left join
+ (select join_key, bug_field from
+ (select ss1.join_key, ss1.bug_field from
+ (select f1 as join_key, 666 as bug_field from int4_tbl i1) ss1
+ ) foo2
+ left join
+ (select unique2 as join_key from tenk1 i2) ss2
+ using (join_key)
+ ) foo3
+using (join_key);
+ QUERY PLAN
+--------------------------------------------------------------------------
+ Nested Loop Left Join
+ Output: "*VALUES*".column1, i1.f1, (666)
+ Join Filter: ("*VALUES*".column1 = i1.f1)
+ -> Values Scan on "*VALUES*"
+ Output: "*VALUES*".column1
+ -> Materialize
+ Output: i1.f1, (666)
+ -> Nested Loop Left Join
+ Output: i1.f1, 666
+ -> Seq Scan on public.int4_tbl i1
+ Output: i1.f1
+ -> Index Only Scan using tenk1_unique2 on public.tenk1 i2
+ Output: i2.unique2
+ Index Cond: (i2.unique2 = i1.f1)
+(14 rows)
+
+select foo1.join_key as foo1_id, foo3.join_key AS foo3_id, bug_field from
+ (values (0),(1)) foo1(join_key)
+left join
+ (select join_key, bug_field from
+ (select ss1.join_key, ss1.bug_field from
+ (select f1 as join_key, 666 as bug_field from int4_tbl i1) ss1
+ ) foo2
+ left join
+ (select unique2 as join_key from tenk1 i2) ss2
+ using (join_key)
+ ) foo3
+using (join_key);
+ foo1_id | foo3_id | bug_field
+---------+---------+-----------
+ 0 | 0 | 666
+ 1 | |
+(2 rows)
+
+--
+-- test successful handling of nested outer joins with degenerate join quals
+--
+explain (verbose, costs off)
+select t1.* from
+ text_tbl t1
+ left join (select *, '***'::text as d1 from int8_tbl i8b1) b1
+ left join int8_tbl i8
+ left join (select *, null::int as d2 from int8_tbl i8b2) b2
+ on (i8.q1 = b2.q1)
+ on (b2.d2 = b1.q2)
+ on (t1.f1 = b1.d1)
+ left join int4_tbl i4
+ on (i8.q2 = i4.f1);
+ QUERY PLAN
+----------------------------------------------------------------------
+ Hash Left Join
+ Output: t1.f1
+ Hash Cond: (i8.q2 = i4.f1)
+ -> Nested Loop Left Join
+ Output: t1.f1, i8.q2
+ Join Filter: (t1.f1 = '***'::text)
+ -> Seq Scan on public.text_tbl t1
+ Output: t1.f1
+ -> Materialize
+ Output: i8.q2
+ -> Hash Right Join
+ Output: i8.q2
+ Hash Cond: ((NULL::integer) = i8b1.q2)
+ -> Hash Join
+ Output: i8.q2, (NULL::integer)
+ Hash Cond: (i8.q1 = i8b2.q1)
+ -> Seq Scan on public.int8_tbl i8
+ Output: i8.q1, i8.q2
+ -> Hash
+ Output: i8b2.q1, (NULL::integer)
+ -> Seq Scan on public.int8_tbl i8b2
+ Output: i8b2.q1, NULL::integer
+ -> Hash
+ Output: i8b1.q2
+ -> Seq Scan on public.int8_tbl i8b1
+ Output: i8b1.q2
+ -> Hash
+ Output: i4.f1
+ -> Seq Scan on public.int4_tbl i4
+ Output: i4.f1
+(30 rows)
+
+select t1.* from
+ text_tbl t1
+ left join (select *, '***'::text as d1 from int8_tbl i8b1) b1
+ left join int8_tbl i8
+ left join (select *, null::int as d2 from int8_tbl i8b2) b2
+ on (i8.q1 = b2.q1)
+ on (b2.d2 = b1.q2)
+ on (t1.f1 = b1.d1)
+ left join int4_tbl i4
+ on (i8.q2 = i4.f1);
+ f1
+-------------------
+ doh!
+ hi de ho neighbor
+(2 rows)
+
+explain (verbose, costs off)
+select t1.* from
+ text_tbl t1
+ left join (select *, '***'::text as d1 from int8_tbl i8b1) b1
+ left join int8_tbl i8
+ left join (select *, null::int as d2 from int8_tbl i8b2, int4_tbl i4b2) b2
+ on (i8.q1 = b2.q1)
+ on (b2.d2 = b1.q2)
+ on (t1.f1 = b1.d1)
+ left join int4_tbl i4
+ on (i8.q2 = i4.f1);
+ QUERY PLAN
+----------------------------------------------------------------------------
+ Hash Left Join
+ Output: t1.f1
+ Hash Cond: (i8.q2 = i4.f1)
+ -> Nested Loop Left Join
+ Output: t1.f1, i8.q2
+ Join Filter: (t1.f1 = '***'::text)
+ -> Seq Scan on public.text_tbl t1
+ Output: t1.f1
+ -> Materialize
+ Output: i8.q2
+ -> Hash Right Join
+ Output: i8.q2
+ Hash Cond: ((NULL::integer) = i8b1.q2)
+ -> Hash Right Join
+ Output: i8.q2, (NULL::integer)
+ Hash Cond: (i8b2.q1 = i8.q1)
+ -> Nested Loop
+ Output: i8b2.q1, NULL::integer
+ -> Seq Scan on public.int8_tbl i8b2
+ Output: i8b2.q1, i8b2.q2
+ -> Materialize
+ -> Seq Scan on public.int4_tbl i4b2
+ -> Hash
+ Output: i8.q1, i8.q2
+ -> Seq Scan on public.int8_tbl i8
+ Output: i8.q1, i8.q2
+ -> Hash
+ Output: i8b1.q2
+ -> Seq Scan on public.int8_tbl i8b1
+ Output: i8b1.q2
+ -> Hash
+ Output: i4.f1
+ -> Seq Scan on public.int4_tbl i4
+ Output: i4.f1
+(34 rows)
+
+select t1.* from
+ text_tbl t1
+ left join (select *, '***'::text as d1 from int8_tbl i8b1) b1
+ left join int8_tbl i8
+ left join (select *, null::int as d2 from int8_tbl i8b2, int4_tbl i4b2) b2
+ on (i8.q1 = b2.q1)
+ on (b2.d2 = b1.q2)
+ on (t1.f1 = b1.d1)
+ left join int4_tbl i4
+ on (i8.q2 = i4.f1);
+ f1
+-------------------
+ doh!
+ hi de ho neighbor
+(2 rows)
+
+explain (verbose, costs off)
+select t1.* from
+ text_tbl t1
+ left join (select *, '***'::text as d1 from int8_tbl i8b1) b1
+ left join int8_tbl i8
+ left join (select *, null::int as d2 from int8_tbl i8b2, int4_tbl i4b2
+ where q1 = f1) b2
+ on (i8.q1 = b2.q1)
+ on (b2.d2 = b1.q2)
+ on (t1.f1 = b1.d1)
+ left join int4_tbl i4
+ on (i8.q2 = i4.f1);
+ QUERY PLAN
+----------------------------------------------------------------------------
+ Hash Left Join
+ Output: t1.f1
+ Hash Cond: (i8.q2 = i4.f1)
+ -> Nested Loop Left Join
+ Output: t1.f1, i8.q2
+ Join Filter: (t1.f1 = '***'::text)
+ -> Seq Scan on public.text_tbl t1
+ Output: t1.f1
+ -> Materialize
+ Output: i8.q2
+ -> Hash Right Join
+ Output: i8.q2
+ Hash Cond: ((NULL::integer) = i8b1.q2)
+ -> Hash Right Join
+ Output: i8.q2, (NULL::integer)
+ Hash Cond: (i8b2.q1 = i8.q1)
+ -> Hash Join
+ Output: i8b2.q1, NULL::integer
+ Hash Cond: (i8b2.q1 = i4b2.f1)
+ -> Seq Scan on public.int8_tbl i8b2
+ Output: i8b2.q1, i8b2.q2
+ -> Hash
+ Output: i4b2.f1
+ -> Seq Scan on public.int4_tbl i4b2
+ Output: i4b2.f1
+ -> Hash
+ Output: i8.q1, i8.q2
+ -> Seq Scan on public.int8_tbl i8
+ Output: i8.q1, i8.q2
+ -> Hash
+ Output: i8b1.q2
+ -> Seq Scan on public.int8_tbl i8b1
+ Output: i8b1.q2
+ -> Hash
+ Output: i4.f1
+ -> Seq Scan on public.int4_tbl i4
+ Output: i4.f1
+(37 rows)
+
+select t1.* from
+ text_tbl t1
+ left join (select *, '***'::text as d1 from int8_tbl i8b1) b1
+ left join int8_tbl i8
+ left join (select *, null::int as d2 from int8_tbl i8b2, int4_tbl i4b2
+ where q1 = f1) b2
+ on (i8.q1 = b2.q1)
+ on (b2.d2 = b1.q2)
+ on (t1.f1 = b1.d1)
+ left join int4_tbl i4
+ on (i8.q2 = i4.f1);
+ f1
+-------------------
+ doh!
+ hi de ho neighbor
+(2 rows)
+
+explain (verbose, costs off)
+select * from
+ text_tbl t1
+ inner join int8_tbl i8
+ on i8.q2 = 456
+ right join text_tbl t2
+ on t1.f1 = 'doh!'
+ left join int4_tbl i4
+ on i8.q1 = i4.f1;
+ QUERY PLAN
+--------------------------------------------------------
+ Nested Loop Left Join
+ Output: t1.f1, i8.q1, i8.q2, t2.f1, i4.f1
+ -> Seq Scan on public.text_tbl t2
+ Output: t2.f1
+ -> Materialize
+ Output: i8.q1, i8.q2, i4.f1, t1.f1
+ -> Nested Loop
+ Output: i8.q1, i8.q2, i4.f1, t1.f1
+ -> Nested Loop Left Join
+ Output: i8.q1, i8.q2, i4.f1
+ Join Filter: (i8.q1 = i4.f1)
+ -> Seq Scan on public.int8_tbl i8
+ Output: i8.q1, i8.q2
+ Filter: (i8.q2 = 456)
+ -> Seq Scan on public.int4_tbl i4
+ Output: i4.f1
+ -> Seq Scan on public.text_tbl t1
+ Output: t1.f1
+ Filter: (t1.f1 = 'doh!'::text)
+(19 rows)
+
+select * from
+ text_tbl t1
+ inner join int8_tbl i8
+ on i8.q2 = 456
+ right join text_tbl t2
+ on t1.f1 = 'doh!'
+ left join int4_tbl i4
+ on i8.q1 = i4.f1;
+ f1 | q1 | q2 | f1 | f1
+------+-----+-----+-------------------+----
+ doh! | 123 | 456 | doh! |
+ doh! | 123 | 456 | hi de ho neighbor |
+(2 rows)
+
+--
+-- test for appropriate join order in the presence of lateral references
+--
+explain (verbose, costs off)
+select * from
+ text_tbl t1
+ left join int8_tbl i8
+ on i8.q2 = 123,
+ lateral (select i8.q1, t2.f1 from text_tbl t2 limit 1) as ss
+where t1.f1 = ss.f1;
+ QUERY PLAN
+--------------------------------------------------
+ Nested Loop
+ Output: t1.f1, i8.q1, i8.q2, (i8.q1), t2.f1
+ Join Filter: (t1.f1 = t2.f1)
+ -> Nested Loop Left Join
+ Output: t1.f1, i8.q1, i8.q2
+ -> Seq Scan on public.text_tbl t1
+ Output: t1.f1
+ -> Materialize
+ Output: i8.q1, i8.q2
+ -> Seq Scan on public.int8_tbl i8
+ Output: i8.q1, i8.q2
+ Filter: (i8.q2 = 123)
+ -> Memoize
+ Output: (i8.q1), t2.f1
+ Cache Key: i8.q1
+ Cache Mode: binary
+ -> Limit
+ Output: (i8.q1), t2.f1
+ -> Seq Scan on public.text_tbl t2
+ Output: i8.q1, t2.f1
+(20 rows)
+
+select * from
+ text_tbl t1
+ left join int8_tbl i8
+ on i8.q2 = 123,
+ lateral (select i8.q1, t2.f1 from text_tbl t2 limit 1) as ss
+where t1.f1 = ss.f1;
+ f1 | q1 | q2 | q1 | f1
+------+------------------+-----+------------------+------
+ doh! | 4567890123456789 | 123 | 4567890123456789 | doh!
+(1 row)
+
+explain (verbose, costs off)
+select * from
+ text_tbl t1
+ left join int8_tbl i8
+ on i8.q2 = 123,
+ lateral (select i8.q1, t2.f1 from text_tbl t2 limit 1) as ss1,
+ lateral (select ss1.* from text_tbl t3 limit 1) as ss2
+where t1.f1 = ss2.f1;
+ QUERY PLAN
+-------------------------------------------------------------------
+ Nested Loop
+ Output: t1.f1, i8.q1, i8.q2, (i8.q1), t2.f1, ((i8.q1)), (t2.f1)
+ Join Filter: (t1.f1 = (t2.f1))
+ -> Nested Loop
+ Output: t1.f1, i8.q1, i8.q2, (i8.q1), t2.f1
+ -> Nested Loop Left Join
+ Output: t1.f1, i8.q1, i8.q2
+ -> Seq Scan on public.text_tbl t1
+ Output: t1.f1
+ -> Materialize
+ Output: i8.q1, i8.q2
+ -> Seq Scan on public.int8_tbl i8
+ Output: i8.q1, i8.q2
+ Filter: (i8.q2 = 123)
+ -> Memoize
+ Output: (i8.q1), t2.f1
+ Cache Key: i8.q1
+ Cache Mode: binary
+ -> Limit
+ Output: (i8.q1), t2.f1
+ -> Seq Scan on public.text_tbl t2
+ Output: i8.q1, t2.f1
+ -> Memoize
+ Output: ((i8.q1)), (t2.f1)
+ Cache Key: (i8.q1), t2.f1
+ Cache Mode: binary
+ -> Limit
+ Output: ((i8.q1)), (t2.f1)
+ -> Seq Scan on public.text_tbl t3
+ Output: (i8.q1), t2.f1
+(30 rows)
+
+select * from
+ text_tbl t1
+ left join int8_tbl i8
+ on i8.q2 = 123,
+ lateral (select i8.q1, t2.f1 from text_tbl t2 limit 1) as ss1,
+ lateral (select ss1.* from text_tbl t3 limit 1) as ss2
+where t1.f1 = ss2.f1;
+ f1 | q1 | q2 | q1 | f1 | q1 | f1
+------+------------------+-----+------------------+------+------------------+------
+ doh! | 4567890123456789 | 123 | 4567890123456789 | doh! | 4567890123456789 | doh!
+(1 row)
+
+explain (verbose, costs off)
+select 1 from
+ text_tbl as tt1
+ inner join text_tbl as tt2 on (tt1.f1 = 'foo')
+ left join text_tbl as tt3 on (tt3.f1 = 'foo')
+ left join text_tbl as tt4 on (tt3.f1 = tt4.f1),
+ lateral (select tt4.f1 as c0 from text_tbl as tt5 limit 1) as ss1
+where tt1.f1 = ss1.c0;
+ QUERY PLAN
+----------------------------------------------------------
+ Nested Loop
+ Output: 1
+ -> Nested Loop Left Join
+ Output: tt1.f1, tt4.f1
+ -> Nested Loop
+ Output: tt1.f1
+ -> Seq Scan on public.text_tbl tt1
+ Output: tt1.f1
+ Filter: (tt1.f1 = 'foo'::text)
+ -> Seq Scan on public.text_tbl tt2
+ Output: tt2.f1
+ -> Materialize
+ Output: tt4.f1
+ -> Nested Loop Left Join
+ Output: tt4.f1
+ Join Filter: (tt3.f1 = tt4.f1)
+ -> Seq Scan on public.text_tbl tt3
+ Output: tt3.f1
+ Filter: (tt3.f1 = 'foo'::text)
+ -> Seq Scan on public.text_tbl tt4
+ Output: tt4.f1
+ Filter: (tt4.f1 = 'foo'::text)
+ -> Memoize
+ Output: ss1.c0
+ Cache Key: tt4.f1
+ Cache Mode: binary
+ -> Subquery Scan on ss1
+ Output: ss1.c0
+ Filter: (ss1.c0 = 'foo'::text)
+ -> Limit
+ Output: (tt4.f1)
+ -> Seq Scan on public.text_tbl tt5
+ Output: tt4.f1
+(33 rows)
+
+select 1 from
+ text_tbl as tt1
+ inner join text_tbl as tt2 on (tt1.f1 = 'foo')
+ left join text_tbl as tt3 on (tt3.f1 = 'foo')
+ left join text_tbl as tt4 on (tt3.f1 = tt4.f1),
+ lateral (select tt4.f1 as c0 from text_tbl as tt5 limit 1) as ss1
+where tt1.f1 = ss1.c0;
+ ?column?
+----------
+(0 rows)
+
+--
+-- check a case in which a PlaceHolderVar forces join order
+--
+explain (verbose, costs off)
+select ss2.* from
+ int4_tbl i41
+ left join int8_tbl i8
+ join (select i42.f1 as c1, i43.f1 as c2, 42 as c3
+ from int4_tbl i42, int4_tbl i43) ss1
+ on i8.q1 = ss1.c2
+ on i41.f1 = ss1.c1,
+ lateral (select i41.*, i8.*, ss1.* from text_tbl limit 1) ss2
+where ss1.c2 = 0;
+ QUERY PLAN
+------------------------------------------------------------------------
+ Nested Loop
+ Output: (i41.f1), (i8.q1), (i8.q2), (i42.f1), (i43.f1), ((42))
+ -> Hash Join
+ Output: i41.f1, i42.f1, i8.q1, i8.q2, i43.f1, 42
+ Hash Cond: (i41.f1 = i42.f1)
+ -> Nested Loop
+ Output: i8.q1, i8.q2, i43.f1, i41.f1
+ -> Nested Loop
+ Output: i8.q1, i8.q2, i43.f1
+ -> Seq Scan on public.int8_tbl i8
+ Output: i8.q1, i8.q2
+ Filter: (i8.q1 = 0)
+ -> Seq Scan on public.int4_tbl i43
+ Output: i43.f1
+ Filter: (i43.f1 = 0)
+ -> Seq Scan on public.int4_tbl i41
+ Output: i41.f1
+ -> Hash
+ Output: i42.f1
+ -> Seq Scan on public.int4_tbl i42
+ Output: i42.f1
+ -> Limit
+ Output: (i41.f1), (i8.q1), (i8.q2), (i42.f1), (i43.f1), ((42))
+ -> Seq Scan on public.text_tbl
+ Output: i41.f1, i8.q1, i8.q2, i42.f1, i43.f1, (42)
+(25 rows)
+
+select ss2.* from
+ int4_tbl i41
+ left join int8_tbl i8
+ join (select i42.f1 as c1, i43.f1 as c2, 42 as c3
+ from int4_tbl i42, int4_tbl i43) ss1
+ on i8.q1 = ss1.c2
+ on i41.f1 = ss1.c1,
+ lateral (select i41.*, i8.*, ss1.* from text_tbl limit 1) ss2
+where ss1.c2 = 0;
+ f1 | q1 | q2 | c1 | c2 | c3
+----+----+----+----+----+----
+(0 rows)
+
+--
+-- test successful handling of full join underneath left join (bug #14105)
+--
+explain (costs off)
+select * from
+ (select 1 as id) as xx
+ left join
+ (tenk1 as a1 full join (select 1 as id) as yy on (a1.unique1 = yy.id))
+ on (xx.id = coalesce(yy.id));
+ QUERY PLAN
+---------------------------------------
+ Nested Loop Left Join
+ -> Result
+ -> Hash Full Join
+ Hash Cond: (a1.unique1 = (1))
+ Filter: (1 = COALESCE((1)))
+ -> Seq Scan on tenk1 a1
+ -> Hash
+ -> Result
+(8 rows)
+
+select * from
+ (select 1 as id) as xx
+ left join
+ (tenk1 as a1 full join (select 1 as id) as yy on (a1.unique1 = yy.id))
+ on (xx.id = coalesce(yy.id));
+ id | unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 | id
+----+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------+----
+ 1 | 1 | 2838 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 2 | 3 | BAAAAA | EFEAAA | OOOOxx | 1
+(1 row)
+
+--
+-- test ability to push constants through outer join clauses
+--
+explain (costs off)
+ select * from int4_tbl a left join tenk1 b on f1 = unique2 where f1 = 0;
+ QUERY PLAN
+-------------------------------------------------
+ Nested Loop Left Join
+ Join Filter: (a.f1 = b.unique2)
+ -> Seq Scan on int4_tbl a
+ Filter: (f1 = 0)
+ -> Index Scan using tenk1_unique2 on tenk1 b
+ Index Cond: (unique2 = 0)
+(6 rows)
+
+explain (costs off)
+ select * from tenk1 a full join tenk1 b using(unique2) where unique2 = 42;
+ QUERY PLAN
+-------------------------------------------------
+ Merge Full Join
+ Merge Cond: (a.unique2 = b.unique2)
+ -> Index Scan using tenk1_unique2 on tenk1 a
+ Index Cond: (unique2 = 42)
+ -> Index Scan using tenk1_unique2 on tenk1 b
+ Index Cond: (unique2 = 42)
+(6 rows)
+
+--
+-- test that quals attached to an outer join have correct semantics,
+-- specifically that they don't re-use expressions computed below the join;
+-- we force a mergejoin so that coalesce(b.q1, 1) appears as a join input
+--
+set enable_hashjoin to off;
+set enable_nestloop to off;
+explain (verbose, costs off)
+ select a.q2, b.q1
+ from int8_tbl a left join int8_tbl b on a.q2 = coalesce(b.q1, 1)
+ where coalesce(b.q1, 1) > 0;
+ QUERY PLAN
+---------------------------------------------------------
+ Merge Left Join
+ Output: a.q2, b.q1
+ Merge Cond: (a.q2 = (COALESCE(b.q1, '1'::bigint)))
+ Filter: (COALESCE(b.q1, '1'::bigint) > 0)
+ -> Sort
+ Output: a.q2
+ Sort Key: a.q2
+ -> Seq Scan on public.int8_tbl a
+ Output: a.q2
+ -> Sort
+ Output: b.q1, (COALESCE(b.q1, '1'::bigint))
+ Sort Key: (COALESCE(b.q1, '1'::bigint))
+ -> Seq Scan on public.int8_tbl b
+ Output: b.q1, COALESCE(b.q1, '1'::bigint)
+(14 rows)
+
+select a.q2, b.q1
+ from int8_tbl a left join int8_tbl b on a.q2 = coalesce(b.q1, 1)
+ where coalesce(b.q1, 1) > 0;
+ q2 | q1
+-------------------+------------------
+ -4567890123456789 |
+ 123 | 123
+ 123 | 123
+ 456 |
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+(10 rows)
+
+reset enable_hashjoin;
+reset enable_nestloop;
+--
+-- test join removal
+--
+begin;
+CREATE TEMP TABLE a (id int PRIMARY KEY, b_id int);
+CREATE TEMP TABLE b (id int PRIMARY KEY, c_id int);
+CREATE TEMP TABLE c (id int PRIMARY KEY);
+CREATE TEMP TABLE d (a int, b int);
+INSERT INTO a VALUES (0, 0), (1, NULL);
+INSERT INTO b VALUES (0, 0), (1, NULL);
+INSERT INTO c VALUES (0), (1);
+INSERT INTO d VALUES (1,3), (2,2), (3,1);
+-- all three cases should be optimizable into a simple seqscan
+explain (costs off) SELECT a.* FROM a LEFT JOIN b ON a.b_id = b.id;
+ QUERY PLAN
+---------------
+ Seq Scan on a
+(1 row)
+
+explain (costs off) SELECT b.* FROM b LEFT JOIN c ON b.c_id = c.id;
+ QUERY PLAN
+---------------
+ Seq Scan on b
+(1 row)
+
+explain (costs off)
+ SELECT a.* FROM a LEFT JOIN (b left join c on b.c_id = c.id)
+ ON (a.b_id = b.id);
+ QUERY PLAN
+---------------
+ Seq Scan on a
+(1 row)
+
+-- check optimization of outer join within another special join
+explain (costs off)
+select id from a where id in (
+ select b.id from b left join c on b.id = c.id
+);
+ QUERY PLAN
+----------------------------
+ Hash Join
+ Hash Cond: (a.id = b.id)
+ -> Seq Scan on a
+ -> Hash
+ -> Seq Scan on b
+(5 rows)
+
+-- check that join removal works for a left join when joining a subquery
+-- that is guaranteed to be unique by its GROUP BY clause
+explain (costs off)
+select d.* from d left join (select * from b group by b.id, b.c_id) s
+ on d.a = s.id and d.b = s.c_id;
+ QUERY PLAN
+---------------
+ Seq Scan on d
+(1 row)
+
+-- similarly, but keying off a DISTINCT clause
+explain (costs off)
+select d.* from d left join (select distinct * from b) s
+ on d.a = s.id and d.b = s.c_id;
+ QUERY PLAN
+---------------
+ Seq Scan on d
+(1 row)
+
+-- join removal is not possible when the GROUP BY contains a column that is
+-- not in the join condition. (Note: as of 9.6, we notice that b.id is a
+-- primary key and so drop b.c_id from the GROUP BY of the resulting plan;
+-- but this happens too late for join removal in the outer plan level.)
+explain (costs off)
+select d.* from d left join (select * from b group by b.id, b.c_id) s
+ on d.a = s.id;
+ QUERY PLAN
+------------------------------------------
+ Merge Right Join
+ Merge Cond: (b.id = d.a)
+ -> Group
+ Group Key: b.id
+ -> Index Scan using b_pkey on b
+ -> Sort
+ Sort Key: d.a
+ -> Seq Scan on d
+(8 rows)
+
+-- similarly, but keying off a DISTINCT clause
+explain (costs off)
+select d.* from d left join (select distinct * from b) s
+ on d.a = s.id;
+ QUERY PLAN
+--------------------------------------
+ Merge Right Join
+ Merge Cond: (b.id = d.a)
+ -> Unique
+ -> Sort
+ Sort Key: b.id, b.c_id
+ -> Seq Scan on b
+ -> Sort
+ Sort Key: d.a
+ -> Seq Scan on d
+(9 rows)
+
+-- check join removal works when uniqueness of the join condition is enforced
+-- by a UNION
+explain (costs off)
+select d.* from d left join (select id from a union select id from b) s
+ on d.a = s.id;
+ QUERY PLAN
+---------------
+ Seq Scan on d
+(1 row)
+
+-- check join removal with a cross-type comparison operator
+explain (costs off)
+select i8.* from int8_tbl i8 left join (select f1 from int4_tbl group by f1) i4
+ on i8.q1 = i4.f1;
+ QUERY PLAN
+-------------------------
+ Seq Scan on int8_tbl i8
+(1 row)
+
+-- check join removal with lateral references
+explain (costs off)
+select 1 from (select a.id FROM a left join b on a.b_id = b.id) q,
+ lateral generate_series(1, q.id) gs(i) where q.id = gs.i;
+ QUERY PLAN
+-------------------------------------------
+ Nested Loop
+ -> Seq Scan on a
+ -> Function Scan on generate_series gs
+ Filter: (a.id = i)
+(4 rows)
+
+rollback;
+create temp table parent (k int primary key, pd int);
+create temp table child (k int unique, cd int);
+insert into parent values (1, 10), (2, 20), (3, 30);
+insert into child values (1, 100), (4, 400);
+-- this case is optimizable
+select p.* from parent p left join child c on (p.k = c.k);
+ k | pd
+---+----
+ 1 | 10
+ 2 | 20
+ 3 | 30
+(3 rows)
+
+explain (costs off)
+ select p.* from parent p left join child c on (p.k = c.k);
+ QUERY PLAN
+----------------------
+ Seq Scan on parent p
+(1 row)
+
+-- this case is not
+select p.*, linked from parent p
+ left join (select c.*, true as linked from child c) as ss
+ on (p.k = ss.k);
+ k | pd | linked
+---+----+--------
+ 1 | 10 | t
+ 2 | 20 |
+ 3 | 30 |
+(3 rows)
+
+explain (costs off)
+ select p.*, linked from parent p
+ left join (select c.*, true as linked from child c) as ss
+ on (p.k = ss.k);
+ QUERY PLAN
+---------------------------------
+ Hash Left Join
+ Hash Cond: (p.k = c.k)
+ -> Seq Scan on parent p
+ -> Hash
+ -> Seq Scan on child c
+(5 rows)
+
+-- check for a 9.0rc1 bug: join removal breaks pseudoconstant qual handling
+select p.* from
+ parent p left join child c on (p.k = c.k)
+ where p.k = 1 and p.k = 2;
+ k | pd
+---+----
+(0 rows)
+
+explain (costs off)
+select p.* from
+ parent p left join child c on (p.k = c.k)
+ where p.k = 1 and p.k = 2;
+ QUERY PLAN
+------------------------------------------------
+ Result
+ One-Time Filter: false
+ -> Index Scan using parent_pkey on parent p
+ Index Cond: (k = 1)
+(4 rows)
+
+select p.* from
+ (parent p left join child c on (p.k = c.k)) join parent x on p.k = x.k
+ where p.k = 1 and p.k = 2;
+ k | pd
+---+----
+(0 rows)
+
+explain (costs off)
+select p.* from
+ (parent p left join child c on (p.k = c.k)) join parent x on p.k = x.k
+ where p.k = 1 and p.k = 2;
+ QUERY PLAN
+--------------------------
+ Result
+ One-Time Filter: false
+(2 rows)
+
+-- bug 5255: this is not optimizable by join removal
+begin;
+CREATE TEMP TABLE a (id int PRIMARY KEY);
+CREATE TEMP TABLE b (id int PRIMARY KEY, a_id int);
+INSERT INTO a VALUES (0), (1);
+INSERT INTO b VALUES (0, 0), (1, NULL);
+SELECT * FROM b LEFT JOIN a ON (b.a_id = a.id) WHERE (a.id IS NULL OR a.id > 0);
+ id | a_id | id
+----+------+----
+ 1 | |
+(1 row)
+
+SELECT b.* FROM b LEFT JOIN a ON (b.a_id = a.id) WHERE (a.id IS NULL OR a.id > 0);
+ id | a_id
+----+------
+ 1 |
+(1 row)
+
+rollback;
+-- another join removal bug: this is not optimizable, either
+begin;
+create temp table innertab (id int8 primary key, dat1 int8);
+insert into innertab values(123, 42);
+SELECT * FROM
+ (SELECT 1 AS x) ss1
+ LEFT JOIN
+ (SELECT q1, q2, COALESCE(dat1, q1) AS y
+ FROM int8_tbl LEFT JOIN innertab ON q2 = id) ss2
+ ON true;
+ x | q1 | q2 | y
+---+------------------+-------------------+------------------
+ 1 | 123 | 456 | 123
+ 1 | 123 | 4567890123456789 | 123
+ 1 | 4567890123456789 | 123 | 42
+ 1 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 1 | 4567890123456789 | -4567890123456789 | 4567890123456789
+(5 rows)
+
+rollback;
+-- another join removal bug: we must clean up correctly when removing a PHV
+begin;
+create temp table uniquetbl (f1 text unique);
+explain (costs off)
+select t1.* from
+ uniquetbl as t1
+ left join (select *, '***'::text as d1 from uniquetbl) t2
+ on t1.f1 = t2.f1
+ left join uniquetbl t3
+ on t2.d1 = t3.f1;
+ QUERY PLAN
+--------------------------
+ Seq Scan on uniquetbl t1
+(1 row)
+
+explain (costs off)
+select t0.*
+from
+ text_tbl t0
+ left join
+ (select case t1.ten when 0 then 'doh!'::text else null::text end as case1,
+ t1.stringu2
+ from tenk1 t1
+ join int4_tbl i4 ON i4.f1 = t1.unique2
+ left join uniquetbl u1 ON u1.f1 = t1.string4) ss
+ on t0.f1 = ss.case1
+where ss.stringu2 !~* ss.case1;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------
+ Nested Loop
+ Join Filter: (CASE t1.ten WHEN 0 THEN 'doh!'::text ELSE NULL::text END = t0.f1)
+ -> Nested Loop
+ -> Seq Scan on int4_tbl i4
+ -> Index Scan using tenk1_unique2 on tenk1 t1
+ Index Cond: (unique2 = i4.f1)
+ Filter: (stringu2 !~* CASE ten WHEN 0 THEN 'doh!'::text ELSE NULL::text END)
+ -> Materialize
+ -> Seq Scan on text_tbl t0
+(9 rows)
+
+select t0.*
+from
+ text_tbl t0
+ left join
+ (select case t1.ten when 0 then 'doh!'::text else null::text end as case1,
+ t1.stringu2
+ from tenk1 t1
+ join int4_tbl i4 ON i4.f1 = t1.unique2
+ left join uniquetbl u1 ON u1.f1 = t1.string4) ss
+ on t0.f1 = ss.case1
+where ss.stringu2 !~* ss.case1;
+ f1
+------
+ doh!
+(1 row)
+
+rollback;
+-- test case to expose miscomputation of required relid set for a PHV
+explain (verbose, costs off)
+select i8.*, ss.v, t.unique2
+ from int8_tbl i8
+ left join int4_tbl i4 on i4.f1 = 1
+ left join lateral (select i4.f1 + 1 as v) as ss on true
+ left join tenk1 t on t.unique2 = ss.v
+where q2 = 456;
+ QUERY PLAN
+-------------------------------------------------------------
+ Nested Loop Left Join
+ Output: i8.q1, i8.q2, ((i4.f1 + 1)), t.unique2
+ -> Nested Loop Left Join
+ Output: i8.q1, i8.q2, (i4.f1 + 1)
+ -> Seq Scan on public.int8_tbl i8
+ Output: i8.q1, i8.q2
+ Filter: (i8.q2 = 456)
+ -> Seq Scan on public.int4_tbl i4
+ Output: i4.f1
+ Filter: (i4.f1 = 1)
+ -> Index Only Scan using tenk1_unique2 on public.tenk1 t
+ Output: t.unique2
+ Index Cond: (t.unique2 = ((i4.f1 + 1)))
+(13 rows)
+
+select i8.*, ss.v, t.unique2
+ from int8_tbl i8
+ left join int4_tbl i4 on i4.f1 = 1
+ left join lateral (select i4.f1 + 1 as v) as ss on true
+ left join tenk1 t on t.unique2 = ss.v
+where q2 = 456;
+ q1 | q2 | v | unique2
+-----+-----+---+---------
+ 123 | 456 | |
+(1 row)
+
+-- and check a related issue where we miscompute required relids for
+-- a PHV that's been translated to a child rel
+create temp table parttbl (a integer primary key) partition by range (a);
+create temp table parttbl1 partition of parttbl for values from (1) to (100);
+insert into parttbl values (11), (12);
+explain (costs off)
+select * from
+ (select *, 12 as phv from parttbl) as ss
+ right join int4_tbl on true
+where ss.a = ss.phv and f1 = 0;
+ QUERY PLAN
+------------------------------------
+ Nested Loop
+ -> Seq Scan on int4_tbl
+ Filter: (f1 = 0)
+ -> Seq Scan on parttbl1 parttbl
+ Filter: (a = 12)
+(5 rows)
+
+select * from
+ (select *, 12 as phv from parttbl) as ss
+ right join int4_tbl on true
+where ss.a = ss.phv and f1 = 0;
+ a | phv | f1
+----+-----+----
+ 12 | 12 | 0
+(1 row)
+
+-- bug #8444: we've historically allowed duplicate aliases within aliased JOINs
+select * from
+ int8_tbl x join (int4_tbl x cross join int4_tbl y) j on q1 = f1; -- error
+ERROR: column reference "f1" is ambiguous
+LINE 2: ..._tbl x join (int4_tbl x cross join int4_tbl y) j on q1 = f1;
+ ^
+select * from
+ int8_tbl x join (int4_tbl x cross join int4_tbl y) j on q1 = y.f1; -- error
+ERROR: invalid reference to FROM-clause entry for table "y"
+LINE 2: ...bl x join (int4_tbl x cross join int4_tbl y) j on q1 = y.f1;
+ ^
+HINT: There is an entry for table "y", but it cannot be referenced from this part of the query.
+select * from
+ int8_tbl x join (int4_tbl x cross join int4_tbl y(ff)) j on q1 = f1; -- ok
+ q1 | q2 | f1 | ff
+----+----+----+----
+(0 rows)
+
+--
+-- Test hints given on incorrect column references are useful
+--
+select t1.uunique1 from
+ tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t1" suggestion
+ERROR: column t1.uunique1 does not exist
+LINE 1: select t1.uunique1 from
+ ^
+HINT: Perhaps you meant to reference the column "t1.unique1".
+select t2.uunique1 from
+ tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t2" suggestion
+ERROR: column t2.uunique1 does not exist
+LINE 1: select t2.uunique1 from
+ ^
+HINT: Perhaps you meant to reference the column "t2.unique1".
+select uunique1 from
+ tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, suggest both at once
+ERROR: column "uunique1" does not exist
+LINE 1: select uunique1 from
+ ^
+HINT: Perhaps you meant to reference the column "t1.unique1" or the column "t2.unique1".
+--
+-- Take care to reference the correct RTE
+--
+select atts.relid::regclass, s.* from pg_stats s join
+ pg_attribute a on s.attname = a.attname and s.tablename =
+ a.attrelid::regclass::text join (select unnest(indkey) attnum,
+ indexrelid from pg_index i) atts on atts.attnum = a.attnum where
+ schemaname != 'pg_catalog';
+ERROR: column atts.relid does not exist
+LINE 1: select atts.relid::regclass, s.* from pg_stats s join
+ ^
+--
+-- Test LATERAL
+--
+select unique2, x.*
+from tenk1 a, lateral (select * from int4_tbl b where f1 = a.unique1) x;
+ unique2 | f1
+---------+----
+ 9998 | 0
+(1 row)
+
+explain (costs off)
+ select unique2, x.*
+ from tenk1 a, lateral (select * from int4_tbl b where f1 = a.unique1) x;
+ QUERY PLAN
+-------------------------------------------------
+ Nested Loop
+ -> Seq Scan on int4_tbl b
+ -> Index Scan using tenk1_unique1 on tenk1 a
+ Index Cond: (unique1 = b.f1)
+(4 rows)
+
+select unique2, x.*
+from int4_tbl x, lateral (select unique2 from tenk1 where f1 = unique1) ss;
+ unique2 | f1
+---------+----
+ 9998 | 0
+(1 row)
+
+explain (costs off)
+ select unique2, x.*
+ from int4_tbl x, lateral (select unique2 from tenk1 where f1 = unique1) ss;
+ QUERY PLAN
+-----------------------------------------------
+ Nested Loop
+ -> Seq Scan on int4_tbl x
+ -> Index Scan using tenk1_unique1 on tenk1
+ Index Cond: (unique1 = x.f1)
+(4 rows)
+
+explain (costs off)
+ select unique2, x.*
+ from int4_tbl x cross join lateral (select unique2 from tenk1 where f1 = unique1) ss;
+ QUERY PLAN
+-----------------------------------------------
+ Nested Loop
+ -> Seq Scan on int4_tbl x
+ -> Index Scan using tenk1_unique1 on tenk1
+ Index Cond: (unique1 = x.f1)
+(4 rows)
+
+select unique2, x.*
+from int4_tbl x left join lateral (select unique1, unique2 from tenk1 where f1 = unique1) ss on true;
+ unique2 | f1
+---------+-------------
+ 9998 | 0
+ | 123456
+ | -123456
+ | 2147483647
+ | -2147483647
+(5 rows)
+
+explain (costs off)
+ select unique2, x.*
+ from int4_tbl x left join lateral (select unique1, unique2 from tenk1 where f1 = unique1) ss on true;
+ QUERY PLAN
+-----------------------------------------------
+ Nested Loop Left Join
+ -> Seq Scan on int4_tbl x
+ -> Index Scan using tenk1_unique1 on tenk1
+ Index Cond: (unique1 = x.f1)
+(4 rows)
+
+-- check scoping of lateral versus parent references
+-- the first of these should return int8_tbl.q2, the second int8_tbl.q1
+select *, (select r from (select q1 as q2) x, (select q2 as r) y) from int8_tbl;
+ q1 | q2 | r
+------------------+-------------------+-------------------
+ 123 | 456 | 456
+ 123 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 123 | 123
+ 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789 | -4567890123456789
+(5 rows)
+
+select *, (select r from (select q1 as q2) x, lateral (select q2 as r) y) from int8_tbl;
+ q1 | q2 | r
+------------------+-------------------+------------------
+ 123 | 456 | 123
+ 123 | 4567890123456789 | 123
+ 4567890123456789 | 123 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789 | 4567890123456789
+(5 rows)
+
+-- lateral with function in FROM
+select count(*) from tenk1 a, lateral generate_series(1,two) g;
+ count
+-------
+ 5000
+(1 row)
+
+explain (costs off)
+ select count(*) from tenk1 a, lateral generate_series(1,two) g;
+ QUERY PLAN
+------------------------------------------------------
+ Aggregate
+ -> Nested Loop
+ -> Seq Scan on tenk1 a
+ -> Memoize
+ Cache Key: a.two
+ Cache Mode: binary
+ -> Function Scan on generate_series g
+(7 rows)
+
+explain (costs off)
+ select count(*) from tenk1 a cross join lateral generate_series(1,two) g;
+ QUERY PLAN
+------------------------------------------------------
+ Aggregate
+ -> Nested Loop
+ -> Seq Scan on tenk1 a
+ -> Memoize
+ Cache Key: a.two
+ Cache Mode: binary
+ -> Function Scan on generate_series g
+(7 rows)
+
+-- don't need the explicit LATERAL keyword for functions
+explain (costs off)
+ select count(*) from tenk1 a, generate_series(1,two) g;
+ QUERY PLAN
+------------------------------------------------------
+ Aggregate
+ -> Nested Loop
+ -> Seq Scan on tenk1 a
+ -> Memoize
+ Cache Key: a.two
+ Cache Mode: binary
+ -> Function Scan on generate_series g
+(7 rows)
+
+-- lateral with UNION ALL subselect
+explain (costs off)
+ select * from generate_series(100,200) g,
+ lateral (select * from int8_tbl a where g = q1 union all
+ select * from int8_tbl b where g = q2) ss;
+ QUERY PLAN
+------------------------------------------
+ Nested Loop
+ -> Function Scan on generate_series g
+ -> Append
+ -> Seq Scan on int8_tbl a
+ Filter: (g.g = q1)
+ -> Seq Scan on int8_tbl b
+ Filter: (g.g = q2)
+(7 rows)
+
+select * from generate_series(100,200) g,
+ lateral (select * from int8_tbl a where g = q1 union all
+ select * from int8_tbl b where g = q2) ss;
+ g | q1 | q2
+-----+------------------+------------------
+ 123 | 123 | 456
+ 123 | 123 | 4567890123456789
+ 123 | 4567890123456789 | 123
+(3 rows)
+
+-- lateral with VALUES
+explain (costs off)
+ select count(*) from tenk1 a,
+ tenk1 b join lateral (values(a.unique1)) ss(x) on b.unique2 = ss.x;
+ QUERY PLAN
+------------------------------------------------------------
+ Aggregate
+ -> Merge Join
+ Merge Cond: (a.unique1 = b.unique2)
+ -> Index Only Scan using tenk1_unique1 on tenk1 a
+ -> Index Only Scan using tenk1_unique2 on tenk1 b
+(5 rows)
+
+select count(*) from tenk1 a,
+ tenk1 b join lateral (values(a.unique1)) ss(x) on b.unique2 = ss.x;
+ count
+-------
+ 10000
+(1 row)
+
+-- lateral with VALUES, no flattening possible
+explain (costs off)
+ select count(*) from tenk1 a,
+ tenk1 b join lateral (values(a.unique1),(-1)) ss(x) on b.unique2 = ss.x;
+ QUERY PLAN
+------------------------------------------------------------------
+ Aggregate
+ -> Nested Loop
+ -> Nested Loop
+ -> Index Only Scan using tenk1_unique1 on tenk1 a
+ -> Values Scan on "*VALUES*"
+ -> Memoize
+ Cache Key: "*VALUES*".column1
+ Cache Mode: logical
+ -> Index Only Scan using tenk1_unique2 on tenk1 b
+ Index Cond: (unique2 = "*VALUES*".column1)
+(10 rows)
+
+select count(*) from tenk1 a,
+ tenk1 b join lateral (values(a.unique1),(-1)) ss(x) on b.unique2 = ss.x;
+ count
+-------
+ 10000
+(1 row)
+
+-- lateral injecting a strange outer join condition
+explain (costs off)
+ select * from int8_tbl a,
+ int8_tbl x left join lateral (select a.q1 from int4_tbl y) ss(z)
+ on x.q2 = ss.z
+ order by a.q1, a.q2, x.q1, x.q2, ss.z;
+ QUERY PLAN
+------------------------------------------------
+ Sort
+ Sort Key: a.q1, a.q2, x.q1, x.q2, (a.q1)
+ -> Nested Loop
+ -> Seq Scan on int8_tbl a
+ -> Hash Right Join
+ Hash Cond: ((a.q1) = x.q2)
+ -> Seq Scan on int4_tbl y
+ -> Hash
+ -> Seq Scan on int8_tbl x
+(9 rows)
+
+select * from int8_tbl a,
+ int8_tbl x left join lateral (select a.q1 from int4_tbl y) ss(z)
+ on x.q2 = ss.z
+ order by a.q1, a.q2, x.q1, x.q2, ss.z;
+ q1 | q2 | q1 | q2 | z
+------------------+-------------------+------------------+-------------------+------------------
+ 123 | 456 | 123 | 456 |
+ 123 | 456 | 123 | 4567890123456789 |
+ 123 | 456 | 4567890123456789 | -4567890123456789 |
+ 123 | 456 | 4567890123456789 | 123 | 123
+ 123 | 456 | 4567890123456789 | 123 | 123
+ 123 | 456 | 4567890123456789 | 123 | 123
+ 123 | 456 | 4567890123456789 | 123 | 123
+ 123 | 456 | 4567890123456789 | 123 | 123
+ 123 | 456 | 4567890123456789 | 4567890123456789 |
+ 123 | 4567890123456789 | 123 | 456 |
+ 123 | 4567890123456789 | 123 | 4567890123456789 |
+ 123 | 4567890123456789 | 4567890123456789 | -4567890123456789 |
+ 123 | 4567890123456789 | 4567890123456789 | 123 | 123
+ 123 | 4567890123456789 | 4567890123456789 | 123 | 123
+ 123 | 4567890123456789 | 4567890123456789 | 123 | 123
+ 123 | 4567890123456789 | 4567890123456789 | 123 | 123
+ 123 | 4567890123456789 | 4567890123456789 | 123 | 123
+ 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 |
+ 4567890123456789 | -4567890123456789 | 123 | 456 |
+ 4567890123456789 | -4567890123456789 | 123 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789 | 123 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789 | 123 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789 | 123 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789 | 123 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789 | 4567890123456789 | -4567890123456789 |
+ 4567890123456789 | -4567890123456789 | 4567890123456789 | 123 |
+ 4567890123456789 | -4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 123 | 123 | 456 |
+ 4567890123456789 | 123 | 123 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 123 | 123 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 123 | 123 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 123 | 123 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 123 | 123 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 123 | 4567890123456789 | -4567890123456789 |
+ 4567890123456789 | 123 | 4567890123456789 | 123 |
+ 4567890123456789 | 123 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 123 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 123 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 123 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 123 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 123 | 456 |
+ 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | -4567890123456789 |
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 |
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+(57 rows)
+
+-- lateral reference to a join alias variable
+select * from (select f1/2 as x from int4_tbl) ss1 join int4_tbl i4 on x = f1,
+ lateral (select x) ss2(y);
+ x | f1 | y
+---+----+---
+ 0 | 0 | 0
+(1 row)
+
+select * from (select f1 as x from int4_tbl) ss1 join int4_tbl i4 on x = f1,
+ lateral (values(x)) ss2(y);
+ x | f1 | y
+-------------+-------------+-------------
+ 0 | 0 | 0
+ 123456 | 123456 | 123456
+ -123456 | -123456 | -123456
+ 2147483647 | 2147483647 | 2147483647
+ -2147483647 | -2147483647 | -2147483647
+(5 rows)
+
+select * from ((select f1/2 as x from int4_tbl) ss1 join int4_tbl i4 on x = f1) j,
+ lateral (select x) ss2(y);
+ x | f1 | y
+---+----+---
+ 0 | 0 | 0
+(1 row)
+
+-- lateral references requiring pullup
+select * from (values(1)) x(lb),
+ lateral generate_series(lb,4) x4;
+ lb | x4
+----+----
+ 1 | 1
+ 1 | 2
+ 1 | 3
+ 1 | 4
+(4 rows)
+
+select * from (select f1/1000000000 from int4_tbl) x(lb),
+ lateral generate_series(lb,4) x4;
+ lb | x4
+----+----
+ 0 | 0
+ 0 | 1
+ 0 | 2
+ 0 | 3
+ 0 | 4
+ 0 | 0
+ 0 | 1
+ 0 | 2
+ 0 | 3
+ 0 | 4
+ 0 | 0
+ 0 | 1
+ 0 | 2
+ 0 | 3
+ 0 | 4
+ 2 | 2
+ 2 | 3
+ 2 | 4
+ -2 | -2
+ -2 | -1
+ -2 | 0
+ -2 | 1
+ -2 | 2
+ -2 | 3
+ -2 | 4
+(25 rows)
+
+select * from (values(1)) x(lb),
+ lateral (values(lb)) y(lbcopy);
+ lb | lbcopy
+----+--------
+ 1 | 1
+(1 row)
+
+select * from (values(1)) x(lb),
+ lateral (select lb from int4_tbl) y(lbcopy);
+ lb | lbcopy
+----+--------
+ 1 | 1
+ 1 | 1
+ 1 | 1
+ 1 | 1
+ 1 | 1
+(5 rows)
+
+select * from
+ int8_tbl x left join (select q1,coalesce(q2,0) q2 from int8_tbl) y on x.q2 = y.q1,
+ lateral (values(x.q1,y.q1,y.q2)) v(xq1,yq1,yq2);
+ q1 | q2 | q1 | q2 | xq1 | yq1 | yq2
+------------------+-------------------+------------------+-------------------+------------------+------------------+-------------------
+ 123 | 456 | | | 123 | |
+ 123 | 4567890123456789 | 4567890123456789 | -4567890123456789 | 123 | 4567890123456789 | -4567890123456789
+ 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 | 4567890123456789
+ 123 | 4567890123456789 | 4567890123456789 | 123 | 123 | 4567890123456789 | 123
+ 4567890123456789 | 123 | 123 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789
+ 4567890123456789 | 123 | 123 | 456 | 4567890123456789 | 123 | 456
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | -4567890123456789 | 4567890123456789 | 4567890123456789 | -4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 | 4567890123456789 | 123
+ 4567890123456789 | -4567890123456789 | | | 4567890123456789 | |
+(10 rows)
+
+select * from
+ int8_tbl x left join (select q1,coalesce(q2,0) q2 from int8_tbl) y on x.q2 = y.q1,
+ lateral (select x.q1,y.q1,y.q2) v(xq1,yq1,yq2);
+ q1 | q2 | q1 | q2 | xq1 | yq1 | yq2
+------------------+-------------------+------------------+-------------------+------------------+------------------+-------------------
+ 123 | 456 | | | 123 | |
+ 123 | 4567890123456789 | 4567890123456789 | -4567890123456789 | 123 | 4567890123456789 | -4567890123456789
+ 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 | 4567890123456789
+ 123 | 4567890123456789 | 4567890123456789 | 123 | 123 | 4567890123456789 | 123
+ 4567890123456789 | 123 | 123 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789
+ 4567890123456789 | 123 | 123 | 456 | 4567890123456789 | 123 | 456
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | -4567890123456789 | 4567890123456789 | 4567890123456789 | -4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 | 4567890123456789 | 123
+ 4567890123456789 | -4567890123456789 | | | 4567890123456789 | |
+(10 rows)
+
+select x.* from
+ int8_tbl x left join (select q1,coalesce(q2,0) q2 from int8_tbl) y on x.q2 = y.q1,
+ lateral (select x.q1,y.q1,y.q2) v(xq1,yq1,yq2);
+ q1 | q2
+------------------+-------------------
+ 123 | 456
+ 123 | 4567890123456789
+ 123 | 4567890123456789
+ 123 | 4567890123456789
+ 4567890123456789 | 123
+ 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+(10 rows)
+
+select v.* from
+ (int8_tbl x left join (select q1,coalesce(q2,0) q2 from int8_tbl) y on x.q2 = y.q1)
+ left join int4_tbl z on z.f1 = x.q2,
+ lateral (select x.q1,y.q1 union all select x.q2,y.q2) v(vx,vy);
+ vx | vy
+-------------------+-------------------
+ 123 |
+ 456 |
+ 123 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+ 123 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+ 123 | 4567890123456789
+ 4567890123456789 | 123
+ 4567890123456789 | 123
+ 123 | 4567890123456789
+ 4567890123456789 | 123
+ 123 | 456
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | 123
+ 4567890123456789 |
+ -4567890123456789 |
+(20 rows)
+
+select v.* from
+ (int8_tbl x left join (select q1,(select coalesce(q2,0)) q2 from int8_tbl) y on x.q2 = y.q1)
+ left join int4_tbl z on z.f1 = x.q2,
+ lateral (select x.q1,y.q1 union all select x.q2,y.q2) v(vx,vy);
+ vx | vy
+-------------------+-------------------
+ 4567890123456789 | 123
+ 123 | 456
+ 4567890123456789 | 123
+ 123 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | 123
+ 123 | 4567890123456789
+ 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+ 123 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+ 123 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+ 123 |
+ 456 |
+ 4567890123456789 |
+ -4567890123456789 |
+(20 rows)
+
+select v.* from
+ (int8_tbl x left join (select q1,(select coalesce(q2,0)) q2 from int8_tbl) y on x.q2 = y.q1)
+ left join int4_tbl z on z.f1 = x.q2,
+ lateral (select x.q1,y.q1 from onerow union all select x.q2,y.q2 from onerow) v(vx,vy);
+ vx | vy
+-------------------+-------------------
+ 4567890123456789 | 123
+ 123 | 456
+ 4567890123456789 | 123
+ 123 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | 123
+ 123 | 4567890123456789
+ 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+ 123 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+ 123 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+ 123 |
+ 456 |
+ 4567890123456789 |
+ -4567890123456789 |
+(20 rows)
+
+explain (verbose, costs off)
+select * from
+ int8_tbl a left join
+ lateral (select *, a.q2 as x from int8_tbl b) ss on a.q2 = ss.q1;
+ QUERY PLAN
+------------------------------------------
+ Nested Loop Left Join
+ Output: a.q1, a.q2, b.q1, b.q2, (a.q2)
+ -> Seq Scan on public.int8_tbl a
+ Output: a.q1, a.q2
+ -> Seq Scan on public.int8_tbl b
+ Output: b.q1, b.q2, a.q2
+ Filter: (a.q2 = b.q1)
+(7 rows)
+
+select * from
+ int8_tbl a left join
+ lateral (select *, a.q2 as x from int8_tbl b) ss on a.q2 = ss.q1;
+ q1 | q2 | q1 | q2 | x
+------------------+-------------------+------------------+-------------------+------------------
+ 123 | 456 | | |
+ 123 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789
+ 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 123 | 4567890123456789 | 4567890123456789 | -4567890123456789 | 4567890123456789
+ 4567890123456789 | 123 | 123 | 456 | 123
+ 4567890123456789 | 123 | 123 | 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | -4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789 | | |
+(10 rows)
+
+explain (verbose, costs off)
+select * from
+ int8_tbl a left join
+ lateral (select *, coalesce(a.q2, 42) as x from int8_tbl b) ss on a.q2 = ss.q1;
+ QUERY PLAN
+------------------------------------------------------------------
+ Nested Loop Left Join
+ Output: a.q1, a.q2, b.q1, b.q2, (COALESCE(a.q2, '42'::bigint))
+ -> Seq Scan on public.int8_tbl a
+ Output: a.q1, a.q2
+ -> Seq Scan on public.int8_tbl b
+ Output: b.q1, b.q2, COALESCE(a.q2, '42'::bigint)
+ Filter: (a.q2 = b.q1)
+(7 rows)
+
+select * from
+ int8_tbl a left join
+ lateral (select *, coalesce(a.q2, 42) as x from int8_tbl b) ss on a.q2 = ss.q1;
+ q1 | q2 | q1 | q2 | x
+------------------+-------------------+------------------+-------------------+------------------
+ 123 | 456 | | |
+ 123 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789
+ 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 123 | 4567890123456789 | 4567890123456789 | -4567890123456789 | 4567890123456789
+ 4567890123456789 | 123 | 123 | 456 | 123
+ 4567890123456789 | 123 | 123 | 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | -4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789 | | |
+(10 rows)
+
+-- lateral can result in join conditions appearing below their
+-- real semantic level
+explain (verbose, costs off)
+select * from int4_tbl i left join
+ lateral (select * from int2_tbl j where i.f1 = j.f1) k on true;
+ QUERY PLAN
+-------------------------------------------
+ Hash Left Join
+ Output: i.f1, j.f1
+ Hash Cond: (i.f1 = j.f1)
+ -> Seq Scan on public.int4_tbl i
+ Output: i.f1
+ -> Hash
+ Output: j.f1
+ -> Seq Scan on public.int2_tbl j
+ Output: j.f1
+(9 rows)
+
+select * from int4_tbl i left join
+ lateral (select * from int2_tbl j where i.f1 = j.f1) k on true;
+ f1 | f1
+-------------+----
+ 0 | 0
+ 123456 |
+ -123456 |
+ 2147483647 |
+ -2147483647 |
+(5 rows)
+
+explain (verbose, costs off)
+select * from int4_tbl i left join
+ lateral (select coalesce(i) from int2_tbl j where i.f1 = j.f1) k on true;
+ QUERY PLAN
+-------------------------------------
+ Nested Loop Left Join
+ Output: i.f1, (COALESCE(i.*))
+ -> Seq Scan on public.int4_tbl i
+ Output: i.f1, i.*
+ -> Seq Scan on public.int2_tbl j
+ Output: j.f1, COALESCE(i.*)
+ Filter: (i.f1 = j.f1)
+(7 rows)
+
+select * from int4_tbl i left join
+ lateral (select coalesce(i) from int2_tbl j where i.f1 = j.f1) k on true;
+ f1 | coalesce
+-------------+----------
+ 0 | (0)
+ 123456 |
+ -123456 |
+ 2147483647 |
+ -2147483647 |
+(5 rows)
+
+explain (verbose, costs off)
+select * from int4_tbl a,
+ lateral (
+ select * from int4_tbl b left join int8_tbl c on (b.f1 = q1 and a.f1 = q2)
+ ) ss;
+ QUERY PLAN
+-------------------------------------------------
+ Nested Loop
+ Output: a.f1, b.f1, c.q1, c.q2
+ -> Seq Scan on public.int4_tbl a
+ Output: a.f1
+ -> Hash Left Join
+ Output: b.f1, c.q1, c.q2
+ Hash Cond: (b.f1 = c.q1)
+ -> Seq Scan on public.int4_tbl b
+ Output: b.f1
+ -> Hash
+ Output: c.q1, c.q2
+ -> Seq Scan on public.int8_tbl c
+ Output: c.q1, c.q2
+ Filter: (a.f1 = c.q2)
+(14 rows)
+
+select * from int4_tbl a,
+ lateral (
+ select * from int4_tbl b left join int8_tbl c on (b.f1 = q1 and a.f1 = q2)
+ ) ss;
+ f1 | f1 | q1 | q2
+-------------+-------------+----+----
+ 0 | 0 | |
+ 0 | 123456 | |
+ 0 | -123456 | |
+ 0 | 2147483647 | |
+ 0 | -2147483647 | |
+ 123456 | 0 | |
+ 123456 | 123456 | |
+ 123456 | -123456 | |
+ 123456 | 2147483647 | |
+ 123456 | -2147483647 | |
+ -123456 | 0 | |
+ -123456 | 123456 | |
+ -123456 | -123456 | |
+ -123456 | 2147483647 | |
+ -123456 | -2147483647 | |
+ 2147483647 | 0 | |
+ 2147483647 | 123456 | |
+ 2147483647 | -123456 | |
+ 2147483647 | 2147483647 | |
+ 2147483647 | -2147483647 | |
+ -2147483647 | 0 | |
+ -2147483647 | 123456 | |
+ -2147483647 | -123456 | |
+ -2147483647 | 2147483647 | |
+ -2147483647 | -2147483647 | |
+(25 rows)
+
+-- lateral reference in a PlaceHolderVar evaluated at join level
+explain (verbose, costs off)
+select * from
+ int8_tbl a left join lateral
+ (select b.q1 as bq1, c.q1 as cq1, least(a.q1,b.q1,c.q1) from
+ int8_tbl b cross join int8_tbl c) ss
+ on a.q2 = ss.bq1;
+ QUERY PLAN
+-------------------------------------------------------------
+ Nested Loop Left Join
+ Output: a.q1, a.q2, b.q1, c.q1, (LEAST(a.q1, b.q1, c.q1))
+ -> Seq Scan on public.int8_tbl a
+ Output: a.q1, a.q2
+ -> Nested Loop
+ Output: b.q1, c.q1, LEAST(a.q1, b.q1, c.q1)
+ -> Seq Scan on public.int8_tbl b
+ Output: b.q1, b.q2
+ Filter: (a.q2 = b.q1)
+ -> Seq Scan on public.int8_tbl c
+ Output: c.q1, c.q2
+(11 rows)
+
+select * from
+ int8_tbl a left join lateral
+ (select b.q1 as bq1, c.q1 as cq1, least(a.q1,b.q1,c.q1) from
+ int8_tbl b cross join int8_tbl c) ss
+ on a.q2 = ss.bq1;
+ q1 | q2 | bq1 | cq1 | least
+------------------+-------------------+------------------+------------------+------------------
+ 123 | 456 | | |
+ 123 | 4567890123456789 | 4567890123456789 | 123 | 123
+ 123 | 4567890123456789 | 4567890123456789 | 123 | 123
+ 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123
+ 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123
+ 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123
+ 123 | 4567890123456789 | 4567890123456789 | 123 | 123
+ 123 | 4567890123456789 | 4567890123456789 | 123 | 123
+ 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123
+ 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123
+ 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123
+ 123 | 4567890123456789 | 4567890123456789 | 123 | 123
+ 123 | 4567890123456789 | 4567890123456789 | 123 | 123
+ 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123
+ 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123
+ 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123
+ 4567890123456789 | 123 | 123 | 123 | 123
+ 4567890123456789 | 123 | 123 | 123 | 123
+ 4567890123456789 | 123 | 123 | 4567890123456789 | 123
+ 4567890123456789 | 123 | 123 | 4567890123456789 | 123
+ 4567890123456789 | 123 | 123 | 4567890123456789 | 123
+ 4567890123456789 | 123 | 123 | 123 | 123
+ 4567890123456789 | 123 | 123 | 123 | 123
+ 4567890123456789 | 123 | 123 | 4567890123456789 | 123
+ 4567890123456789 | 123 | 123 | 4567890123456789 | 123
+ 4567890123456789 | 123 | 123 | 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 123
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 123
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 123
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 123
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 123
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 123
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789 | | |
+(42 rows)
+
+-- case requiring nested PlaceHolderVars
+explain (verbose, costs off)
+select * from
+ int8_tbl c left join (
+ int8_tbl a left join (select q1, coalesce(q2,42) as x from int8_tbl b) ss1
+ on a.q2 = ss1.q1
+ cross join
+ lateral (select q1, coalesce(ss1.x,q2) as y from int8_tbl d) ss2
+ ) on c.q2 = ss2.q1,
+ lateral (select ss2.y offset 0) ss3;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: c.q1, c.q2, a.q1, a.q2, b.q1, (COALESCE(b.q2, '42'::bigint)), d.q1, (COALESCE((COALESCE(b.q2, '42'::bigint)), d.q2)), ((COALESCE((COALESCE(b.q2, '42'::bigint)), d.q2)))
+ -> Hash Right Join
+ Output: c.q1, c.q2, a.q1, a.q2, b.q1, d.q1, (COALESCE(b.q2, '42'::bigint)), (COALESCE((COALESCE(b.q2, '42'::bigint)), d.q2))
+ Hash Cond: (d.q1 = c.q2)
+ -> Nested Loop
+ Output: a.q1, a.q2, b.q1, d.q1, (COALESCE(b.q2, '42'::bigint)), (COALESCE((COALESCE(b.q2, '42'::bigint)), d.q2))
+ -> Hash Left Join
+ Output: a.q1, a.q2, b.q1, (COALESCE(b.q2, '42'::bigint))
+ Hash Cond: (a.q2 = b.q1)
+ -> Seq Scan on public.int8_tbl a
+ Output: a.q1, a.q2
+ -> Hash
+ Output: b.q1, (COALESCE(b.q2, '42'::bigint))
+ -> Seq Scan on public.int8_tbl b
+ Output: b.q1, COALESCE(b.q2, '42'::bigint)
+ -> Seq Scan on public.int8_tbl d
+ Output: d.q1, COALESCE((COALESCE(b.q2, '42'::bigint)), d.q2)
+ -> Hash
+ Output: c.q1, c.q2
+ -> Seq Scan on public.int8_tbl c
+ Output: c.q1, c.q2
+ -> Result
+ Output: (COALESCE((COALESCE(b.q2, '42'::bigint)), d.q2))
+(24 rows)
+
+-- case that breaks the old ph_may_need optimization
+explain (verbose, costs off)
+select c.*,a.*,ss1.q1,ss2.q1,ss3.* from
+ int8_tbl c left join (
+ int8_tbl a left join
+ (select q1, coalesce(q2,f1) as x from int8_tbl b, int4_tbl b2
+ where q1 < f1) ss1
+ on a.q2 = ss1.q1
+ cross join
+ lateral (select q1, coalesce(ss1.x,q2) as y from int8_tbl d) ss2
+ ) on c.q2 = ss2.q1,
+ lateral (select * from int4_tbl i where ss2.y > f1) ss3;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: c.q1, c.q2, a.q1, a.q2, b.q1, d.q1, i.f1
+ Join Filter: ((COALESCE((COALESCE(b.q2, (b2.f1)::bigint)), d.q2)) > i.f1)
+ -> Hash Right Join
+ Output: c.q1, c.q2, a.q1, a.q2, b.q1, d.q1, (COALESCE((COALESCE(b.q2, (b2.f1)::bigint)), d.q2))
+ Hash Cond: (d.q1 = c.q2)
+ -> Nested Loop
+ Output: a.q1, a.q2, b.q1, d.q1, (COALESCE((COALESCE(b.q2, (b2.f1)::bigint)), d.q2))
+ -> Hash Right Join
+ Output: a.q1, a.q2, b.q1, (COALESCE(b.q2, (b2.f1)::bigint))
+ Hash Cond: (b.q1 = a.q2)
+ -> Nested Loop
+ Output: b.q1, COALESCE(b.q2, (b2.f1)::bigint)
+ Join Filter: (b.q1 < b2.f1)
+ -> Seq Scan on public.int8_tbl b
+ Output: b.q1, b.q2
+ -> Materialize
+ Output: b2.f1
+ -> Seq Scan on public.int4_tbl b2
+ Output: b2.f1
+ -> Hash
+ Output: a.q1, a.q2
+ -> Seq Scan on public.int8_tbl a
+ Output: a.q1, a.q2
+ -> Seq Scan on public.int8_tbl d
+ Output: d.q1, COALESCE((COALESCE(b.q2, (b2.f1)::bigint)), d.q2)
+ -> Hash
+ Output: c.q1, c.q2
+ -> Seq Scan on public.int8_tbl c
+ Output: c.q1, c.q2
+ -> Materialize
+ Output: i.f1
+ -> Seq Scan on public.int4_tbl i
+ Output: i.f1
+(34 rows)
+
+-- check processing of postponed quals (bug #9041)
+explain (verbose, costs off)
+select * from
+ (select 1 as x offset 0) x cross join (select 2 as y offset 0) y
+ left join lateral (
+ select * from (select 3 as z offset 0) z where z.z = x.x
+ ) zz on zz.z = y.y;
+ QUERY PLAN
+----------------------------------------------
+ Nested Loop Left Join
+ Output: (1), (2), (3)
+ Join Filter: (((3) = (1)) AND ((3) = (2)))
+ -> Nested Loop
+ Output: (1), (2)
+ -> Result
+ Output: 1
+ -> Result
+ Output: 2
+ -> Result
+ Output: 3
+(11 rows)
+
+-- check dummy rels with lateral references (bug #15694)
+explain (verbose, costs off)
+select * from int8_tbl i8 left join lateral
+ (select *, i8.q2 from int4_tbl where false) ss on true;
+ QUERY PLAN
+--------------------------------------
+ Nested Loop Left Join
+ Output: i8.q1, i8.q2, f1, (i8.q2)
+ -> Seq Scan on public.int8_tbl i8
+ Output: i8.q1, i8.q2
+ -> Result
+ Output: f1, i8.q2
+ One-Time Filter: false
+(7 rows)
+
+explain (verbose, costs off)
+select * from int8_tbl i8 left join lateral
+ (select *, i8.q2 from int4_tbl i1, int4_tbl i2 where false) ss on true;
+ QUERY PLAN
+-----------------------------------------
+ Nested Loop Left Join
+ Output: i8.q1, i8.q2, f1, f1, (i8.q2)
+ -> Seq Scan on public.int8_tbl i8
+ Output: i8.q1, i8.q2
+ -> Result
+ Output: f1, f1, i8.q2
+ One-Time Filter: false
+(7 rows)
+
+-- check handling of nested appendrels inside LATERAL
+select * from
+ ((select 2 as v) union all (select 3 as v)) as q1
+ cross join lateral
+ ((select * from
+ ((select 4 as v) union all (select 5 as v)) as q3)
+ union all
+ (select q1.v)
+ ) as q2;
+ v | v
+---+---
+ 2 | 4
+ 2 | 5
+ 2 | 2
+ 3 | 4
+ 3 | 5
+ 3 | 3
+(6 rows)
+
+-- check the number of columns specified
+SELECT * FROM (int8_tbl i cross join int4_tbl j) ss(a,b,c,d);
+ERROR: join expression "ss" has 3 columns available but 4 columns specified
+-- check we don't try to do a unique-ified semijoin with LATERAL
+explain (verbose, costs off)
+select * from
+ (values (0,9998), (1,1000)) v(id,x),
+ lateral (select f1 from int4_tbl
+ where f1 = any (select unique1 from tenk1
+ where unique2 = v.x offset 0)) ss;
+ QUERY PLAN
+----------------------------------------------------------------------
+ Nested Loop
+ Output: "*VALUES*".column1, "*VALUES*".column2, int4_tbl.f1
+ -> Values Scan on "*VALUES*"
+ Output: "*VALUES*".column1, "*VALUES*".column2
+ -> Nested Loop Semi Join
+ Output: int4_tbl.f1
+ Join Filter: (int4_tbl.f1 = tenk1.unique1)
+ -> Seq Scan on public.int4_tbl
+ Output: int4_tbl.f1
+ -> Materialize
+ Output: tenk1.unique1
+ -> Index Scan using tenk1_unique2 on public.tenk1
+ Output: tenk1.unique1
+ Index Cond: (tenk1.unique2 = "*VALUES*".column2)
+(14 rows)
+
+select * from
+ (values (0,9998), (1,1000)) v(id,x),
+ lateral (select f1 from int4_tbl
+ where f1 = any (select unique1 from tenk1
+ where unique2 = v.x offset 0)) ss;
+ id | x | f1
+----+------+----
+ 0 | 9998 | 0
+(1 row)
+
+-- check proper extParam/allParam handling (this isn't exactly a LATERAL issue,
+-- but we can make the test case much more compact with LATERAL)
+explain (verbose, costs off)
+select * from (values (0), (1)) v(id),
+lateral (select * from int8_tbl t1,
+ lateral (select * from
+ (select * from int8_tbl t2
+ where q1 = any (select q2 from int8_tbl t3
+ where q2 = (select greatest(t1.q1,t2.q2))
+ and (select v.id=0)) offset 0) ss2) ss
+ where t1.q1 = ss.q2) ss0;
+ QUERY PLAN
+-----------------------------------------------------------------
+ Nested Loop
+ Output: "*VALUES*".column1, t1.q1, t1.q2, ss2.q1, ss2.q2
+ -> Seq Scan on public.int8_tbl t1
+ Output: t1.q1, t1.q2
+ -> Nested Loop
+ Output: "*VALUES*".column1, ss2.q1, ss2.q2
+ -> Values Scan on "*VALUES*"
+ Output: "*VALUES*".column1
+ -> Subquery Scan on ss2
+ Output: ss2.q1, ss2.q2
+ Filter: (t1.q1 = ss2.q2)
+ -> Seq Scan on public.int8_tbl t2
+ Output: t2.q1, t2.q2
+ Filter: (SubPlan 3)
+ SubPlan 3
+ -> Result
+ Output: t3.q2
+ One-Time Filter: $4
+ InitPlan 1 (returns $2)
+ -> Result
+ Output: GREATEST($0, t2.q2)
+ InitPlan 2 (returns $4)
+ -> Result
+ Output: ($3 = 0)
+ -> Seq Scan on public.int8_tbl t3
+ Output: t3.q1, t3.q2
+ Filter: (t3.q2 = $2)
+(27 rows)
+
+select * from (values (0), (1)) v(id),
+lateral (select * from int8_tbl t1,
+ lateral (select * from
+ (select * from int8_tbl t2
+ where q1 = any (select q2 from int8_tbl t3
+ where q2 = (select greatest(t1.q1,t2.q2))
+ and (select v.id=0)) offset 0) ss2) ss
+ where t1.q1 = ss.q2) ss0;
+ id | q1 | q2 | q1 | q2
+----+------------------+-------------------+------------------+------------------
+ 0 | 4567890123456789 | 123 | 4567890123456789 | 4567890123456789
+ 0 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789
+ 0 | 4567890123456789 | -4567890123456789 | 4567890123456789 | 4567890123456789
+(3 rows)
+
+-- test some error cases where LATERAL should have been used but wasn't
+select f1,g from int4_tbl a, (select f1 as g) ss;
+ERROR: column "f1" does not exist
+LINE 1: select f1,g from int4_tbl a, (select f1 as g) ss;
+ ^
+HINT: There is a column named "f1" in table "a", but it cannot be referenced from this part of the query.
+select f1,g from int4_tbl a, (select a.f1 as g) ss;
+ERROR: invalid reference to FROM-clause entry for table "a"
+LINE 1: select f1,g from int4_tbl a, (select a.f1 as g) ss;
+ ^
+HINT: There is an entry for table "a", but it cannot be referenced from this part of the query.
+select f1,g from int4_tbl a cross join (select f1 as g) ss;
+ERROR: column "f1" does not exist
+LINE 1: select f1,g from int4_tbl a cross join (select f1 as g) ss;
+ ^
+HINT: There is a column named "f1" in table "a", but it cannot be referenced from this part of the query.
+select f1,g from int4_tbl a cross join (select a.f1 as g) ss;
+ERROR: invalid reference to FROM-clause entry for table "a"
+LINE 1: select f1,g from int4_tbl a cross join (select a.f1 as g) ss...
+ ^
+HINT: There is an entry for table "a", but it cannot be referenced from this part of the query.
+-- SQL:2008 says the left table is in scope but illegal to access here
+select f1,g from int4_tbl a right join lateral generate_series(0, a.f1) g on true;
+ERROR: invalid reference to FROM-clause entry for table "a"
+LINE 1: ... int4_tbl a right join lateral generate_series(0, a.f1) g on...
+ ^
+DETAIL: The combining JOIN type must be INNER or LEFT for a LATERAL reference.
+select f1,g from int4_tbl a full join lateral generate_series(0, a.f1) g on true;
+ERROR: invalid reference to FROM-clause entry for table "a"
+LINE 1: ...m int4_tbl a full join lateral generate_series(0, a.f1) g on...
+ ^
+DETAIL: The combining JOIN type must be INNER or LEFT for a LATERAL reference.
+-- check we complain about ambiguous table references
+select * from
+ int8_tbl x cross join (int4_tbl x cross join lateral (select x.f1) ss);
+ERROR: table reference "x" is ambiguous
+LINE 2: ...cross join (int4_tbl x cross join lateral (select x.f1) ss);
+ ^
+-- LATERAL can be used to put an aggregate into the FROM clause of its query
+select 1 from tenk1 a, lateral (select max(a.unique1) from int4_tbl b) ss;
+ERROR: aggregate functions are not allowed in FROM clause of their own query level
+LINE 1: select 1 from tenk1 a, lateral (select max(a.unique1) from i...
+ ^
+-- check behavior of LATERAL in UPDATE/DELETE
+create temp table xx1 as select f1 as x1, -f1 as x2 from int4_tbl;
+-- error, can't do this:
+update xx1 set x2 = f1 from (select * from int4_tbl where f1 = x1) ss;
+ERROR: column "x1" does not exist
+LINE 1: ... set x2 = f1 from (select * from int4_tbl where f1 = x1) ss;
+ ^
+HINT: There is a column named "x1" in table "xx1", but it cannot be referenced from this part of the query.
+update xx1 set x2 = f1 from (select * from int4_tbl where f1 = xx1.x1) ss;
+ERROR: invalid reference to FROM-clause entry for table "xx1"
+LINE 1: ...t x2 = f1 from (select * from int4_tbl where f1 = xx1.x1) ss...
+ ^
+HINT: There is an entry for table "xx1", but it cannot be referenced from this part of the query.
+-- can't do it even with LATERAL:
+update xx1 set x2 = f1 from lateral (select * from int4_tbl where f1 = x1) ss;
+ERROR: invalid reference to FROM-clause entry for table "xx1"
+LINE 1: ...= f1 from lateral (select * from int4_tbl where f1 = x1) ss;
+ ^
+HINT: There is an entry for table "xx1", but it cannot be referenced from this part of the query.
+-- we might in future allow something like this, but for now it's an error:
+update xx1 set x2 = f1 from xx1, lateral (select * from int4_tbl where f1 = x1) ss;
+ERROR: table name "xx1" specified more than once
+-- also errors:
+delete from xx1 using (select * from int4_tbl where f1 = x1) ss;
+ERROR: column "x1" does not exist
+LINE 1: ...te from xx1 using (select * from int4_tbl where f1 = x1) ss;
+ ^
+HINT: There is a column named "x1" in table "xx1", but it cannot be referenced from this part of the query.
+delete from xx1 using (select * from int4_tbl where f1 = xx1.x1) ss;
+ERROR: invalid reference to FROM-clause entry for table "xx1"
+LINE 1: ...from xx1 using (select * from int4_tbl where f1 = xx1.x1) ss...
+ ^
+HINT: There is an entry for table "xx1", but it cannot be referenced from this part of the query.
+delete from xx1 using lateral (select * from int4_tbl where f1 = x1) ss;
+ERROR: invalid reference to FROM-clause entry for table "xx1"
+LINE 1: ...xx1 using lateral (select * from int4_tbl where f1 = x1) ss;
+ ^
+HINT: There is an entry for table "xx1", but it cannot be referenced from this part of the query.
+--
+-- test LATERAL reference propagation down a multi-level inheritance hierarchy
+-- produced for a multi-level partitioned table hierarchy.
+--
+create table join_pt1 (a int, b int, c varchar) partition by range(a);
+create table join_pt1p1 partition of join_pt1 for values from (0) to (100) partition by range(b);
+create table join_pt1p2 partition of join_pt1 for values from (100) to (200);
+create table join_pt1p1p1 partition of join_pt1p1 for values from (0) to (100);
+insert into join_pt1 values (1, 1, 'x'), (101, 101, 'y');
+create table join_ut1 (a int, b int, c varchar);
+insert into join_ut1 values (101, 101, 'y'), (2, 2, 'z');
+explain (verbose, costs off)
+select t1.b, ss.phv from join_ut1 t1 left join lateral
+ (select t2.a as t2a, t3.a t3a, least(t1.a, t2.a, t3.a) phv
+ from join_pt1 t2 join join_ut1 t3 on t2.a = t3.b) ss
+ on t1.a = ss.t2a order by t1.a;
+ QUERY PLAN
+--------------------------------------------------------------------
+ Sort
+ Output: t1.b, (LEAST(t1.a, t2.a, t3.a)), t1.a
+ Sort Key: t1.a
+ -> Nested Loop Left Join
+ Output: t1.b, (LEAST(t1.a, t2.a, t3.a)), t1.a
+ -> Seq Scan on public.join_ut1 t1
+ Output: t1.a, t1.b, t1.c
+ -> Hash Join
+ Output: t2.a, LEAST(t1.a, t2.a, t3.a)
+ Hash Cond: (t3.b = t2.a)
+ -> Seq Scan on public.join_ut1 t3
+ Output: t3.a, t3.b, t3.c
+ -> Hash
+ Output: t2.a
+ -> Append
+ -> Seq Scan on public.join_pt1p1p1 t2_1
+ Output: t2_1.a
+ Filter: (t1.a = t2_1.a)
+ -> Seq Scan on public.join_pt1p2 t2_2
+ Output: t2_2.a
+ Filter: (t1.a = t2_2.a)
+(21 rows)
+
+select t1.b, ss.phv from join_ut1 t1 left join lateral
+ (select t2.a as t2a, t3.a t3a, least(t1.a, t2.a, t3.a) phv
+ from join_pt1 t2 join join_ut1 t3 on t2.a = t3.b) ss
+ on t1.a = ss.t2a order by t1.a;
+ b | phv
+-----+-----
+ 2 |
+ 101 | 101
+(2 rows)
+
+drop table join_pt1;
+drop table join_ut1;
+--
+-- test estimation behavior with multi-column foreign key and constant qual
+--
+begin;
+create table fkest (x integer, x10 integer, x10b integer, x100 integer);
+insert into fkest select x, x/10, x/10, x/100 from generate_series(1,1000) x;
+create unique index on fkest(x, x10, x100);
+analyze fkest;
+explain (costs off)
+select * from fkest f1
+ join fkest f2 on (f1.x = f2.x and f1.x10 = f2.x10b and f1.x100 = f2.x100)
+ join fkest f3 on f1.x = f3.x
+ where f1.x100 = 2;
+ QUERY PLAN
+-----------------------------------------------------------
+ Nested Loop
+ -> Hash Join
+ Hash Cond: ((f2.x = f1.x) AND (f2.x10b = f1.x10))
+ -> Seq Scan on fkest f2
+ Filter: (x100 = 2)
+ -> Hash
+ -> Seq Scan on fkest f1
+ Filter: (x100 = 2)
+ -> Index Scan using fkest_x_x10_x100_idx on fkest f3
+ Index Cond: (x = f1.x)
+(10 rows)
+
+alter table fkest add constraint fk
+ foreign key (x, x10b, x100) references fkest (x, x10, x100);
+explain (costs off)
+select * from fkest f1
+ join fkest f2 on (f1.x = f2.x and f1.x10 = f2.x10b and f1.x100 = f2.x100)
+ join fkest f3 on f1.x = f3.x
+ where f1.x100 = 2;
+ QUERY PLAN
+-----------------------------------------------------
+ Hash Join
+ Hash Cond: ((f2.x = f1.x) AND (f2.x10b = f1.x10))
+ -> Hash Join
+ Hash Cond: (f3.x = f2.x)
+ -> Seq Scan on fkest f3
+ -> Hash
+ -> Seq Scan on fkest f2
+ Filter: (x100 = 2)
+ -> Hash
+ -> Seq Scan on fkest f1
+ Filter: (x100 = 2)
+(11 rows)
+
+rollback;
+--
+-- test that foreign key join estimation performs sanely for outer joins
+--
+begin;
+create table fkest (a int, b int, c int unique, primary key(a,b));
+create table fkest1 (a int, b int, primary key(a,b));
+insert into fkest select x/10, x%10, x from generate_series(1,1000) x;
+insert into fkest1 select x/10, x%10 from generate_series(1,1000) x;
+alter table fkest1
+ add constraint fkest1_a_b_fkey foreign key (a,b) references fkest;
+analyze fkest;
+analyze fkest1;
+explain (costs off)
+select *
+from fkest f
+ left join fkest1 f1 on f.a = f1.a and f.b = f1.b
+ left join fkest1 f2 on f.a = f2.a and f.b = f2.b
+ left join fkest1 f3 on f.a = f3.a and f.b = f3.b
+where f.c = 1;
+ QUERY PLAN
+------------------------------------------------------------------
+ Nested Loop Left Join
+ -> Nested Loop Left Join
+ -> Nested Loop Left Join
+ -> Index Scan using fkest_c_key on fkest f
+ Index Cond: (c = 1)
+ -> Index Only Scan using fkest1_pkey on fkest1 f1
+ Index Cond: ((a = f.a) AND (b = f.b))
+ -> Index Only Scan using fkest1_pkey on fkest1 f2
+ Index Cond: ((a = f.a) AND (b = f.b))
+ -> Index Only Scan using fkest1_pkey on fkest1 f3
+ Index Cond: ((a = f.a) AND (b = f.b))
+(11 rows)
+
+rollback;
+--
+-- test planner's ability to mark joins as unique
+--
+create table j1 (id int primary key);
+create table j2 (id int primary key);
+create table j3 (id int);
+insert into j1 values(1),(2),(3);
+insert into j2 values(1),(2),(3);
+insert into j3 values(1),(1);
+analyze j1;
+analyze j2;
+analyze j3;
+-- ensure join is properly marked as unique
+explain (verbose, costs off)
+select * from j1 inner join j2 on j1.id = j2.id;
+ QUERY PLAN
+-----------------------------------
+ Hash Join
+ Output: j1.id, j2.id
+ Inner Unique: true
+ Hash Cond: (j1.id = j2.id)
+ -> Seq Scan on public.j1
+ Output: j1.id
+ -> Hash
+ Output: j2.id
+ -> Seq Scan on public.j2
+ Output: j2.id
+(10 rows)
+
+-- ensure join is not unique when not an equi-join
+explain (verbose, costs off)
+select * from j1 inner join j2 on j1.id > j2.id;
+ QUERY PLAN
+-----------------------------------
+ Nested Loop
+ Output: j1.id, j2.id
+ Join Filter: (j1.id > j2.id)
+ -> Seq Scan on public.j1
+ Output: j1.id
+ -> Materialize
+ Output: j2.id
+ -> Seq Scan on public.j2
+ Output: j2.id
+(9 rows)
+
+-- ensure non-unique rel is not chosen as inner
+explain (verbose, costs off)
+select * from j1 inner join j3 on j1.id = j3.id;
+ QUERY PLAN
+-----------------------------------
+ Hash Join
+ Output: j1.id, j3.id
+ Inner Unique: true
+ Hash Cond: (j3.id = j1.id)
+ -> Seq Scan on public.j3
+ Output: j3.id
+ -> Hash
+ Output: j1.id
+ -> Seq Scan on public.j1
+ Output: j1.id
+(10 rows)
+
+-- ensure left join is marked as unique
+explain (verbose, costs off)
+select * from j1 left join j2 on j1.id = j2.id;
+ QUERY PLAN
+-----------------------------------
+ Hash Left Join
+ Output: j1.id, j2.id
+ Inner Unique: true
+ Hash Cond: (j1.id = j2.id)
+ -> Seq Scan on public.j1
+ Output: j1.id
+ -> Hash
+ Output: j2.id
+ -> Seq Scan on public.j2
+ Output: j2.id
+(10 rows)
+
+-- ensure right join is marked as unique
+explain (verbose, costs off)
+select * from j1 right join j2 on j1.id = j2.id;
+ QUERY PLAN
+-----------------------------------
+ Hash Left Join
+ Output: j1.id, j2.id
+ Inner Unique: true
+ Hash Cond: (j2.id = j1.id)
+ -> Seq Scan on public.j2
+ Output: j2.id
+ -> Hash
+ Output: j1.id
+ -> Seq Scan on public.j1
+ Output: j1.id
+(10 rows)
+
+-- ensure full join is marked as unique
+explain (verbose, costs off)
+select * from j1 full join j2 on j1.id = j2.id;
+ QUERY PLAN
+-----------------------------------
+ Hash Full Join
+ Output: j1.id, j2.id
+ Inner Unique: true
+ Hash Cond: (j1.id = j2.id)
+ -> Seq Scan on public.j1
+ Output: j1.id
+ -> Hash
+ Output: j2.id
+ -> Seq Scan on public.j2
+ Output: j2.id
+(10 rows)
+
+-- a clauseless (cross) join can't be unique
+explain (verbose, costs off)
+select * from j1 cross join j2;
+ QUERY PLAN
+-----------------------------------
+ Nested Loop
+ Output: j1.id, j2.id
+ -> Seq Scan on public.j1
+ Output: j1.id
+ -> Materialize
+ Output: j2.id
+ -> Seq Scan on public.j2
+ Output: j2.id
+(8 rows)
+
+-- ensure a natural join is marked as unique
+explain (verbose, costs off)
+select * from j1 natural join j2;
+ QUERY PLAN
+-----------------------------------
+ Hash Join
+ Output: j1.id
+ Inner Unique: true
+ Hash Cond: (j1.id = j2.id)
+ -> Seq Scan on public.j1
+ Output: j1.id
+ -> Hash
+ Output: j2.id
+ -> Seq Scan on public.j2
+ Output: j2.id
+(10 rows)
+
+-- ensure a distinct clause allows the inner to become unique
+explain (verbose, costs off)
+select * from j1
+inner join (select distinct id from j3) j3 on j1.id = j3.id;
+ QUERY PLAN
+-----------------------------------------
+ Nested Loop
+ Output: j1.id, j3.id
+ Inner Unique: true
+ Join Filter: (j1.id = j3.id)
+ -> Unique
+ Output: j3.id
+ -> Sort
+ Output: j3.id
+ Sort Key: j3.id
+ -> Seq Scan on public.j3
+ Output: j3.id
+ -> Seq Scan on public.j1
+ Output: j1.id
+(13 rows)
+
+-- ensure group by clause allows the inner to become unique
+explain (verbose, costs off)
+select * from j1
+inner join (select id from j3 group by id) j3 on j1.id = j3.id;
+ QUERY PLAN
+-----------------------------------------
+ Nested Loop
+ Output: j1.id, j3.id
+ Inner Unique: true
+ Join Filter: (j1.id = j3.id)
+ -> Group
+ Output: j3.id
+ Group Key: j3.id
+ -> Sort
+ Output: j3.id
+ Sort Key: j3.id
+ -> Seq Scan on public.j3
+ Output: j3.id
+ -> Seq Scan on public.j1
+ Output: j1.id
+(14 rows)
+
+drop table j1;
+drop table j2;
+drop table j3;
+-- test more complex permutations of unique joins
+create table j1 (id1 int, id2 int, primary key(id1,id2));
+create table j2 (id1 int, id2 int, primary key(id1,id2));
+create table j3 (id1 int, id2 int, primary key(id1,id2));
+insert into j1 values(1,1),(1,2);
+insert into j2 values(1,1);
+insert into j3 values(1,1);
+analyze j1;
+analyze j2;
+analyze j3;
+-- ensure there's no unique join when not all columns which are part of the
+-- unique index are seen in the join clause
+explain (verbose, costs off)
+select * from j1
+inner join j2 on j1.id1 = j2.id1;
+ QUERY PLAN
+------------------------------------------
+ Nested Loop
+ Output: j1.id1, j1.id2, j2.id1, j2.id2
+ Join Filter: (j1.id1 = j2.id1)
+ -> Seq Scan on public.j2
+ Output: j2.id1, j2.id2
+ -> Seq Scan on public.j1
+ Output: j1.id1, j1.id2
+(7 rows)
+
+-- ensure proper unique detection with multiple join quals
+explain (verbose, costs off)
+select * from j1
+inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2;
+ QUERY PLAN
+----------------------------------------------------------
+ Nested Loop
+ Output: j1.id1, j1.id2, j2.id1, j2.id2
+ Inner Unique: true
+ Join Filter: ((j1.id1 = j2.id1) AND (j1.id2 = j2.id2))
+ -> Seq Scan on public.j2
+ Output: j2.id1, j2.id2
+ -> Seq Scan on public.j1
+ Output: j1.id1, j1.id2
+(8 rows)
+
+-- ensure we don't detect the join to be unique when quals are not part of the
+-- join condition
+explain (verbose, costs off)
+select * from j1
+inner join j2 on j1.id1 = j2.id1 where j1.id2 = 1;
+ QUERY PLAN
+------------------------------------------
+ Nested Loop
+ Output: j1.id1, j1.id2, j2.id1, j2.id2
+ Join Filter: (j1.id1 = j2.id1)
+ -> Seq Scan on public.j1
+ Output: j1.id1, j1.id2
+ Filter: (j1.id2 = 1)
+ -> Seq Scan on public.j2
+ Output: j2.id1, j2.id2
+(8 rows)
+
+-- as above, but for left joins.
+explain (verbose, costs off)
+select * from j1
+left join j2 on j1.id1 = j2.id1 where j1.id2 = 1;
+ QUERY PLAN
+------------------------------------------
+ Nested Loop Left Join
+ Output: j1.id1, j1.id2, j2.id1, j2.id2
+ Join Filter: (j1.id1 = j2.id1)
+ -> Seq Scan on public.j1
+ Output: j1.id1, j1.id2
+ Filter: (j1.id2 = 1)
+ -> Seq Scan on public.j2
+ Output: j2.id1, j2.id2
+(8 rows)
+
+-- validate logic in merge joins which skips mark and restore.
+-- it should only do this if all quals which were used to detect the unique
+-- are present as join quals, and not plain quals.
+set enable_nestloop to 0;
+set enable_hashjoin to 0;
+set enable_sort to 0;
+-- create indexes that will be preferred over the PKs to perform the join
+create index j1_id1_idx on j1 (id1) where id1 % 1000 = 1;
+create index j2_id1_idx on j2 (id1) where id1 % 1000 = 1;
+-- need an additional row in j2, if we want j2_id1_idx to be preferred
+insert into j2 values(1,2);
+analyze j2;
+explain (costs off) select * from j1
+inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2
+where j1.id1 % 1000 = 1 and j2.id1 % 1000 = 1;
+ QUERY PLAN
+-----------------------------------------
+ Merge Join
+ Merge Cond: (j1.id1 = j2.id1)
+ Join Filter: (j1.id2 = j2.id2)
+ -> Index Scan using j1_id1_idx on j1
+ -> Index Scan using j2_id1_idx on j2
+(5 rows)
+
+select * from j1
+inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2
+where j1.id1 % 1000 = 1 and j2.id1 % 1000 = 1;
+ id1 | id2 | id1 | id2
+-----+-----+-----+-----
+ 1 | 1 | 1 | 1
+ 1 | 2 | 1 | 2
+(2 rows)
+
+-- Exercise array keys mark/restore B-Tree code
+explain (costs off) select * from j1
+inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2
+where j1.id1 % 1000 = 1 and j2.id1 % 1000 = 1 and j2.id1 = any (array[1]);
+ QUERY PLAN
+----------------------------------------------------
+ Merge Join
+ Merge Cond: (j1.id1 = j2.id1)
+ Join Filter: (j1.id2 = j2.id2)
+ -> Index Scan using j1_id1_idx on j1
+ -> Index Scan using j2_id1_idx on j2
+ Index Cond: (id1 = ANY ('{1}'::integer[]))
+(6 rows)
+
+select * from j1
+inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2
+where j1.id1 % 1000 = 1 and j2.id1 % 1000 = 1 and j2.id1 = any (array[1]);
+ id1 | id2 | id1 | id2
+-----+-----+-----+-----
+ 1 | 1 | 1 | 1
+ 1 | 2 | 1 | 2
+(2 rows)
+
+-- Exercise array keys "find extreme element" B-Tree code
+explain (costs off) select * from j1
+inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2
+where j1.id1 % 1000 = 1 and j2.id1 % 1000 = 1 and j2.id1 >= any (array[1,5]);
+ QUERY PLAN
+-------------------------------------------------------
+ Merge Join
+ Merge Cond: (j1.id1 = j2.id1)
+ Join Filter: (j1.id2 = j2.id2)
+ -> Index Scan using j1_id1_idx on j1
+ -> Index Only Scan using j2_pkey on j2
+ Index Cond: (id1 >= ANY ('{1,5}'::integer[]))
+ Filter: ((id1 % 1000) = 1)
+(7 rows)
+
+select * from j1
+inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2
+where j1.id1 % 1000 = 1 and j2.id1 % 1000 = 1 and j2.id1 >= any (array[1,5]);
+ id1 | id2 | id1 | id2
+-----+-----+-----+-----
+ 1 | 1 | 1 | 1
+ 1 | 2 | 1 | 2
+(2 rows)
+
+reset enable_nestloop;
+reset enable_hashjoin;
+reset enable_sort;
+drop table j1;
+drop table j2;
+drop table j3;
+-- check that semijoin inner is not seen as unique for a portion of the outerrel
+explain (verbose, costs off)
+select t1.unique1, t2.hundred
+from onek t1, tenk1 t2
+where exists (select 1 from tenk1 t3
+ where t3.thousand = t1.unique1 and t3.tenthous = t2.hundred)
+ and t1.unique1 < 1;
+ QUERY PLAN
+---------------------------------------------------------------------------------
+ Nested Loop
+ Output: t1.unique1, t2.hundred
+ -> Hash Join
+ Output: t1.unique1, t3.tenthous
+ Hash Cond: (t3.thousand = t1.unique1)
+ -> HashAggregate
+ Output: t3.thousand, t3.tenthous
+ Group Key: t3.thousand, t3.tenthous
+ -> Index Only Scan using tenk1_thous_tenthous on public.tenk1 t3
+ Output: t3.thousand, t3.tenthous
+ -> Hash
+ Output: t1.unique1
+ -> Index Only Scan using onek_unique1 on public.onek t1
+ Output: t1.unique1
+ Index Cond: (t1.unique1 < 1)
+ -> Index Only Scan using tenk1_hundred on public.tenk1 t2
+ Output: t2.hundred
+ Index Cond: (t2.hundred = t3.tenthous)
+(18 rows)
+
+-- ... unless it actually is unique
+create table j3 as select unique1, tenthous from onek;
+vacuum analyze j3;
+create unique index on j3(unique1, tenthous);
+explain (verbose, costs off)
+select t1.unique1, t2.hundred
+from onek t1, tenk1 t2
+where exists (select 1 from j3
+ where j3.unique1 = t1.unique1 and j3.tenthous = t2.hundred)
+ and t1.unique1 < 1;
+ QUERY PLAN
+------------------------------------------------------------------------
+ Nested Loop
+ Output: t1.unique1, t2.hundred
+ -> Nested Loop
+ Output: t1.unique1, j3.tenthous
+ -> Index Only Scan using onek_unique1 on public.onek t1
+ Output: t1.unique1
+ Index Cond: (t1.unique1 < 1)
+ -> Index Only Scan using j3_unique1_tenthous_idx on public.j3
+ Output: j3.unique1, j3.tenthous
+ Index Cond: (j3.unique1 = t1.unique1)
+ -> Index Only Scan using tenk1_hundred on public.tenk1 t2
+ Output: t2.hundred
+ Index Cond: (t2.hundred = j3.tenthous)
+(13 rows)
+
+drop table j3;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/join.sql b/ydb/library/yql/tests/postgresql/original/cases/join.sql
new file mode 100644
index 0000000000..eb24a727e7
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/join.sql
@@ -0,0 +1,2325 @@
+--
+-- JOIN
+-- Test JOIN clauses
+--
+
+CREATE TABLE J1_TBL (
+ i integer,
+ j integer,
+ t text
+);
+
+CREATE TABLE J2_TBL (
+ i integer,
+ k integer
+);
+
+
+INSERT INTO J1_TBL VALUES (1, 4, 'one');
+INSERT INTO J1_TBL VALUES (2, 3, 'two');
+INSERT INTO J1_TBL VALUES (3, 2, 'three');
+INSERT INTO J1_TBL VALUES (4, 1, 'four');
+INSERT INTO J1_TBL VALUES (5, 0, 'five');
+INSERT INTO J1_TBL VALUES (6, 6, 'six');
+INSERT INTO J1_TBL VALUES (7, 7, 'seven');
+INSERT INTO J1_TBL VALUES (8, 8, 'eight');
+INSERT INTO J1_TBL VALUES (0, NULL, 'zero');
+INSERT INTO J1_TBL VALUES (NULL, NULL, 'null');
+INSERT INTO J1_TBL VALUES (NULL, 0, 'zero');
+
+INSERT INTO J2_TBL VALUES (1, -1);
+INSERT INTO J2_TBL VALUES (2, 2);
+INSERT INTO J2_TBL VALUES (3, -3);
+INSERT INTO J2_TBL VALUES (2, 4);
+INSERT INTO J2_TBL VALUES (5, -5);
+INSERT INTO J2_TBL VALUES (5, -5);
+INSERT INTO J2_TBL VALUES (0, NULL);
+INSERT INTO J2_TBL VALUES (NULL, NULL);
+INSERT INTO J2_TBL VALUES (NULL, 0);
+
+-- useful in some tests below
+create temp table onerow();
+insert into onerow default values;
+analyze onerow;
+
+
+--
+-- CORRELATION NAMES
+-- Make sure that table/column aliases are supported
+-- before diving into more complex join syntax.
+--
+
+SELECT *
+ FROM J1_TBL AS tx;
+
+SELECT *
+ FROM J1_TBL tx;
+
+SELECT *
+ FROM J1_TBL AS t1 (a, b, c);
+
+SELECT *
+ FROM J1_TBL t1 (a, b, c);
+
+SELECT *
+ FROM J1_TBL t1 (a, b, c), J2_TBL t2 (d, e);
+
+SELECT t1.a, t2.e
+ FROM J1_TBL t1 (a, b, c), J2_TBL t2 (d, e)
+ WHERE t1.a = t2.d;
+
+
+--
+-- CROSS JOIN
+-- Qualifications are not allowed on cross joins,
+-- which degenerate into a standard unqualified inner join.
+--
+
+SELECT *
+ FROM J1_TBL CROSS JOIN J2_TBL;
+
+-- ambiguous column
+SELECT i, k, t
+ FROM J1_TBL CROSS JOIN J2_TBL;
+
+-- resolve previous ambiguity by specifying the table name
+SELECT t1.i, k, t
+ FROM J1_TBL t1 CROSS JOIN J2_TBL t2;
+
+SELECT ii, tt, kk
+ FROM (J1_TBL CROSS JOIN J2_TBL)
+ AS tx (ii, jj, tt, ii2, kk);
+
+SELECT tx.ii, tx.jj, tx.kk
+ FROM (J1_TBL t1 (a, b, c) CROSS JOIN J2_TBL t2 (d, e))
+ AS tx (ii, jj, tt, ii2, kk);
+
+SELECT *
+ FROM J1_TBL CROSS JOIN J2_TBL a CROSS JOIN J2_TBL b;
+
+
+--
+--
+-- Inner joins (equi-joins)
+--
+--
+
+--
+-- Inner joins (equi-joins) with USING clause
+-- The USING syntax changes the shape of the resulting table
+-- by including a column in the USING clause only once in the result.
+--
+
+-- Inner equi-join on specified column
+SELECT *
+ FROM J1_TBL INNER JOIN J2_TBL USING (i);
+
+-- Same as above, slightly different syntax
+SELECT *
+ FROM J1_TBL JOIN J2_TBL USING (i);
+
+SELECT *
+ FROM J1_TBL t1 (a, b, c) JOIN J2_TBL t2 (a, d) USING (a)
+ ORDER BY a, d;
+
+SELECT *
+ FROM J1_TBL t1 (a, b, c) JOIN J2_TBL t2 (a, b) USING (b)
+ ORDER BY b, t1.a;
+
+-- test join using aliases
+SELECT * FROM J1_TBL JOIN J2_TBL USING (i) WHERE J1_TBL.t = 'one'; -- ok
+SELECT * FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE J1_TBL.t = 'one'; -- ok
+SELECT * FROM (J1_TBL JOIN J2_TBL USING (i)) AS x WHERE J1_TBL.t = 'one'; -- error
+SELECT * FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE x.i = 1; -- ok
+SELECT * FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE x.t = 'one'; -- error
+SELECT * FROM (J1_TBL JOIN J2_TBL USING (i) AS x) AS xx WHERE x.i = 1; -- error (XXX could use better hint)
+SELECT * FROM J1_TBL a1 JOIN J2_TBL a2 USING (i) AS a1; -- error
+SELECT x.* FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE J1_TBL.t = 'one';
+SELECT ROW(x.*) FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE J1_TBL.t = 'one';
+SELECT row_to_json(x.*) FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE J1_TBL.t = 'one';
+
+--
+-- NATURAL JOIN
+-- Inner equi-join on all columns with the same name
+--
+
+SELECT *
+ FROM J1_TBL NATURAL JOIN J2_TBL;
+
+SELECT *
+ FROM J1_TBL t1 (a, b, c) NATURAL JOIN J2_TBL t2 (a, d);
+
+SELECT *
+ FROM J1_TBL t1 (a, b, c) NATURAL JOIN J2_TBL t2 (d, a);
+
+-- mismatch number of columns
+-- currently, Postgres will fill in with underlying names
+SELECT *
+ FROM J1_TBL t1 (a, b) NATURAL JOIN J2_TBL t2 (a);
+
+
+--
+-- Inner joins (equi-joins)
+--
+
+SELECT *
+ FROM J1_TBL JOIN J2_TBL ON (J1_TBL.i = J2_TBL.i);
+
+SELECT *
+ FROM J1_TBL JOIN J2_TBL ON (J1_TBL.i = J2_TBL.k);
+
+
+--
+-- Non-equi-joins
+--
+
+SELECT *
+ FROM J1_TBL JOIN J2_TBL ON (J1_TBL.i <= J2_TBL.k);
+
+
+--
+-- Outer joins
+-- Note that OUTER is a noise word
+--
+
+SELECT *
+ FROM J1_TBL LEFT OUTER JOIN J2_TBL USING (i)
+ ORDER BY i, k, t;
+
+SELECT *
+ FROM J1_TBL LEFT JOIN J2_TBL USING (i)
+ ORDER BY i, k, t;
+
+SELECT *
+ FROM J1_TBL RIGHT OUTER JOIN J2_TBL USING (i);
+
+SELECT *
+ FROM J1_TBL RIGHT JOIN J2_TBL USING (i);
+
+SELECT *
+ FROM J1_TBL FULL OUTER JOIN J2_TBL USING (i)
+ ORDER BY i, k, t;
+
+SELECT *
+ FROM J1_TBL FULL JOIN J2_TBL USING (i)
+ ORDER BY i, k, t;
+
+SELECT *
+ FROM J1_TBL LEFT JOIN J2_TBL USING (i) WHERE (k = 1);
+
+SELECT *
+ FROM J1_TBL LEFT JOIN J2_TBL USING (i) WHERE (i = 1);
+
+--
+-- semijoin selectivity for <>
+--
+explain (costs off)
+select * from int4_tbl i4, tenk1 a
+where exists(select * from tenk1 b
+ where a.twothousand = b.twothousand and a.fivethous <> b.fivethous)
+ and i4.f1 = a.tenthous;
+
+
+--
+-- More complicated constructs
+--
+
+--
+-- Multiway full join
+--
+
+CREATE TABLE t1 (name TEXT, n INTEGER);
+CREATE TABLE t2 (name TEXT, n INTEGER);
+CREATE TABLE t3 (name TEXT, n INTEGER);
+
+INSERT INTO t1 VALUES ( 'bb', 11 );
+INSERT INTO t2 VALUES ( 'bb', 12 );
+INSERT INTO t2 VALUES ( 'cc', 22 );
+INSERT INTO t2 VALUES ( 'ee', 42 );
+INSERT INTO t3 VALUES ( 'bb', 13 );
+INSERT INTO t3 VALUES ( 'cc', 23 );
+INSERT INTO t3 VALUES ( 'dd', 33 );
+
+SELECT * FROM t1 FULL JOIN t2 USING (name) FULL JOIN t3 USING (name);
+
+--
+-- Test interactions of join syntax and subqueries
+--
+
+-- Basic cases (we expect planner to pull up the subquery here)
+SELECT * FROM
+(SELECT * FROM t2) as s2
+INNER JOIN
+(SELECT * FROM t3) s3
+USING (name);
+
+SELECT * FROM
+(SELECT * FROM t2) as s2
+LEFT JOIN
+(SELECT * FROM t3) s3
+USING (name);
+
+SELECT * FROM
+(SELECT * FROM t2) as s2
+FULL JOIN
+(SELECT * FROM t3) s3
+USING (name);
+
+-- Cases with non-nullable expressions in subquery results;
+-- make sure these go to null as expected
+SELECT * FROM
+(SELECT name, n as s2_n, 2 as s2_2 FROM t2) as s2
+NATURAL INNER JOIN
+(SELECT name, n as s3_n, 3 as s3_2 FROM t3) s3;
+
+SELECT * FROM
+(SELECT name, n as s2_n, 2 as s2_2 FROM t2) as s2
+NATURAL LEFT JOIN
+(SELECT name, n as s3_n, 3 as s3_2 FROM t3) s3;
+
+SELECT * FROM
+(SELECT name, n as s2_n, 2 as s2_2 FROM t2) as s2
+NATURAL FULL JOIN
+(SELECT name, n as s3_n, 3 as s3_2 FROM t3) s3;
+
+SELECT * FROM
+(SELECT name, n as s1_n, 1 as s1_1 FROM t1) as s1
+NATURAL INNER JOIN
+(SELECT name, n as s2_n, 2 as s2_2 FROM t2) as s2
+NATURAL INNER JOIN
+(SELECT name, n as s3_n, 3 as s3_2 FROM t3) s3;
+
+SELECT * FROM
+(SELECT name, n as s1_n, 1 as s1_1 FROM t1) as s1
+NATURAL FULL JOIN
+(SELECT name, n as s2_n, 2 as s2_2 FROM t2) as s2
+NATURAL FULL JOIN
+(SELECT name, n as s3_n, 3 as s3_2 FROM t3) s3;
+
+SELECT * FROM
+(SELECT name, n as s1_n FROM t1) as s1
+NATURAL FULL JOIN
+ (SELECT * FROM
+ (SELECT name, n as s2_n FROM t2) as s2
+ NATURAL FULL JOIN
+ (SELECT name, n as s3_n FROM t3) as s3
+ ) ss2;
+
+SELECT * FROM
+(SELECT name, n as s1_n FROM t1) as s1
+NATURAL FULL JOIN
+ (SELECT * FROM
+ (SELECT name, n as s2_n, 2 as s2_2 FROM t2) as s2
+ NATURAL FULL JOIN
+ (SELECT name, n as s3_n FROM t3) as s3
+ ) ss2;
+
+-- Constants as join keys can also be problematic
+SELECT * FROM
+ (SELECT name, n as s1_n FROM t1) as s1
+FULL JOIN
+ (SELECT name, 2 as s2_n FROM t2) as s2
+ON (s1_n = s2_n);
+
+
+-- Test for propagation of nullability constraints into sub-joins
+
+create temp table x (x1 int, x2 int);
+insert into x values (1,11);
+insert into x values (2,22);
+insert into x values (3,null);
+insert into x values (4,44);
+insert into x values (5,null);
+
+create temp table y (y1 int, y2 int);
+insert into y values (1,111);
+insert into y values (2,222);
+insert into y values (3,333);
+insert into y values (4,null);
+
+select * from x;
+select * from y;
+
+select * from x left join y on (x1 = y1 and x2 is not null);
+select * from x left join y on (x1 = y1 and y2 is not null);
+
+select * from (x left join y on (x1 = y1)) left join x xx(xx1,xx2)
+on (x1 = xx1);
+select * from (x left join y on (x1 = y1)) left join x xx(xx1,xx2)
+on (x1 = xx1 and x2 is not null);
+select * from (x left join y on (x1 = y1)) left join x xx(xx1,xx2)
+on (x1 = xx1 and y2 is not null);
+select * from (x left join y on (x1 = y1)) left join x xx(xx1,xx2)
+on (x1 = xx1 and xx2 is not null);
+-- these should NOT give the same answers as above
+select * from (x left join y on (x1 = y1)) left join x xx(xx1,xx2)
+on (x1 = xx1) where (x2 is not null);
+select * from (x left join y on (x1 = y1)) left join x xx(xx1,xx2)
+on (x1 = xx1) where (y2 is not null);
+select * from (x left join y on (x1 = y1)) left join x xx(xx1,xx2)
+on (x1 = xx1) where (xx2 is not null);
+
+--
+-- regression test: check for bug with propagation of implied equality
+-- to outside an IN
+--
+select count(*) from tenk1 a where unique1 in
+ (select unique1 from tenk1 b join tenk1 c using (unique1)
+ where b.unique2 = 42);
+
+--
+-- regression test: check for failure to generate a plan with multiple
+-- degenerate IN clauses
+--
+select count(*) from tenk1 x where
+ x.unique1 in (select a.f1 from int4_tbl a,float8_tbl b where a.f1=b.f1) and
+ x.unique1 = 0 and
+ x.unique1 in (select aa.f1 from int4_tbl aa,float8_tbl bb where aa.f1=bb.f1);
+
+-- try that with GEQO too
+begin;
+set geqo = on;
+set geqo_threshold = 2;
+select count(*) from tenk1 x where
+ x.unique1 in (select a.f1 from int4_tbl a,float8_tbl b where a.f1=b.f1) and
+ x.unique1 = 0 and
+ x.unique1 in (select aa.f1 from int4_tbl aa,float8_tbl bb where aa.f1=bb.f1);
+rollback;
+
+--
+-- regression test: be sure we cope with proven-dummy append rels
+--
+explain (costs off)
+select aa, bb, unique1, unique1
+ from tenk1 right join b on aa = unique1
+ where bb < bb and bb is null;
+
+select aa, bb, unique1, unique1
+ from tenk1 right join b on aa = unique1
+ where bb < bb and bb is null;
+
+--
+-- regression test: check handling of empty-FROM subquery underneath outer join
+--
+explain (costs off)
+select * from int8_tbl i1 left join (int8_tbl i2 join
+ (select 123 as x) ss on i2.q1 = x) on i1.q2 = i2.q2
+order by 1, 2;
+
+select * from int8_tbl i1 left join (int8_tbl i2 join
+ (select 123 as x) ss on i2.q1 = x) on i1.q2 = i2.q2
+order by 1, 2;
+
+--
+-- regression test: check a case where join_clause_is_movable_into() gives
+-- an imprecise result, causing an assertion failure
+--
+select count(*)
+from
+ (select t3.tenthous as x1, coalesce(t1.stringu1, t2.stringu1) as x2
+ from tenk1 t1
+ left join tenk1 t2 on t1.unique1 = t2.unique1
+ join tenk1 t3 on t1.unique2 = t3.unique2) ss,
+ tenk1 t4,
+ tenk1 t5
+where t4.thousand = t5.unique1 and ss.x1 = t4.tenthous and ss.x2 = t5.stringu1;
+
+--
+-- regression test: check a case where we formerly missed including an EC
+-- enforcement clause because it was expected to be handled at scan level
+--
+explain (costs off)
+select a.f1, b.f1, t.thousand, t.tenthous from
+ tenk1 t,
+ (select sum(f1)+1 as f1 from int4_tbl i4a) a,
+ (select sum(f1) as f1 from int4_tbl i4b) b
+where b.f1 = t.thousand and a.f1 = b.f1 and (a.f1+b.f1+999) = t.tenthous;
+
+select a.f1, b.f1, t.thousand, t.tenthous from
+ tenk1 t,
+ (select sum(f1)+1 as f1 from int4_tbl i4a) a,
+ (select sum(f1) as f1 from int4_tbl i4b) b
+where b.f1 = t.thousand and a.f1 = b.f1 and (a.f1+b.f1+999) = t.tenthous;
+
+--
+-- check a case where we formerly got confused by conflicting sort orders
+-- in redundant merge join path keys
+--
+explain (costs off)
+select * from
+ j1_tbl full join
+ (select * from j2_tbl order by j2_tbl.i desc, j2_tbl.k asc) j2_tbl
+ on j1_tbl.i = j2_tbl.i and j1_tbl.i = j2_tbl.k;
+
+select * from
+ j1_tbl full join
+ (select * from j2_tbl order by j2_tbl.i desc, j2_tbl.k asc) j2_tbl
+ on j1_tbl.i = j2_tbl.i and j1_tbl.i = j2_tbl.k;
+
+--
+-- a different check for handling of redundant sort keys in merge joins
+--
+explain (costs off)
+select count(*) from
+ (select * from tenk1 x order by x.thousand, x.twothousand, x.fivethous) x
+ left join
+ (select * from tenk1 y order by y.unique2) y
+ on x.thousand = y.unique2 and x.twothousand = y.hundred and x.fivethous = y.unique2;
+
+select count(*) from
+ (select * from tenk1 x order by x.thousand, x.twothousand, x.fivethous) x
+ left join
+ (select * from tenk1 y order by y.unique2) y
+ on x.thousand = y.unique2 and x.twothousand = y.hundred and x.fivethous = y.unique2;
+
+
+--
+-- Clean up
+--
+
+DROP TABLE t1;
+DROP TABLE t2;
+DROP TABLE t3;
+
+DROP TABLE J1_TBL;
+DROP TABLE J2_TBL;
+
+-- Both DELETE and UPDATE allow the specification of additional tables
+-- to "join" against to determine which rows should be modified.
+
+CREATE TEMP TABLE t1 (a int, b int);
+CREATE TEMP TABLE t2 (a int, b int);
+CREATE TEMP TABLE t3 (x int, y int);
+
+INSERT INTO t1 VALUES (5, 10);
+INSERT INTO t1 VALUES (15, 20);
+INSERT INTO t1 VALUES (100, 100);
+INSERT INTO t1 VALUES (200, 1000);
+INSERT INTO t2 VALUES (200, 2000);
+INSERT INTO t3 VALUES (5, 20);
+INSERT INTO t3 VALUES (6, 7);
+INSERT INTO t3 VALUES (7, 8);
+INSERT INTO t3 VALUES (500, 100);
+
+DELETE FROM t3 USING t1 table1 WHERE t3.x = table1.a;
+SELECT * FROM t3;
+DELETE FROM t3 USING t1 JOIN t2 USING (a) WHERE t3.x > t1.a;
+SELECT * FROM t3;
+DELETE FROM t3 USING t3 t3_other WHERE t3.x = t3_other.x AND t3.y = t3_other.y;
+SELECT * FROM t3;
+
+-- Test join against inheritance tree
+
+create temp table t2a () inherits (t2);
+
+insert into t2a values (200, 2001);
+
+select * from t1 left join t2 on (t1.a = t2.a);
+
+-- Test matching of column name with wrong alias
+
+select t1.x from t1 join t3 on (t1.a = t3.x);
+
+-- Test matching of locking clause with wrong alias
+
+select t1.*, t2.*, unnamed_join.* from
+ t1 join t2 on (t1.a = t2.a), t3 as unnamed_join
+ for update of unnamed_join;
+
+select foo.*, unnamed_join.* from
+ t1 join t2 using (a) as foo, t3 as unnamed_join
+ for update of unnamed_join;
+
+select foo.*, unnamed_join.* from
+ t1 join t2 using (a) as foo, t3 as unnamed_join
+ for update of foo;
+
+select bar.*, unnamed_join.* from
+ (t1 join t2 using (a) as foo) as bar, t3 as unnamed_join
+ for update of foo;
+
+select bar.*, unnamed_join.* from
+ (t1 join t2 using (a) as foo) as bar, t3 as unnamed_join
+ for update of bar;
+
+--
+-- regression test for 8.1 merge right join bug
+--
+
+CREATE TEMP TABLE tt1 ( tt1_id int4, joincol int4 );
+INSERT INTO tt1 VALUES (1, 11);
+INSERT INTO tt1 VALUES (2, NULL);
+
+CREATE TEMP TABLE tt2 ( tt2_id int4, joincol int4 );
+INSERT INTO tt2 VALUES (21, 11);
+INSERT INTO tt2 VALUES (22, 11);
+
+set enable_hashjoin to off;
+set enable_nestloop to off;
+
+-- these should give the same results
+
+select tt1.*, tt2.* from tt1 left join tt2 on tt1.joincol = tt2.joincol;
+
+select tt1.*, tt2.* from tt2 right join tt1 on tt1.joincol = tt2.joincol;
+
+reset enable_hashjoin;
+reset enable_nestloop;
+
+--
+-- regression test for bug #13908 (hash join with skew tuples & nbatch increase)
+--
+
+set work_mem to '64kB';
+set enable_mergejoin to off;
+set enable_memoize to off;
+
+explain (costs off)
+select count(*) from tenk1 a, tenk1 b
+ where a.hundred = b.thousand and (b.fivethous % 10) < 10;
+select count(*) from tenk1 a, tenk1 b
+ where a.hundred = b.thousand and (b.fivethous % 10) < 10;
+
+reset work_mem;
+reset enable_mergejoin;
+reset enable_memoize;
+
+--
+-- regression test for 8.2 bug with improper re-ordering of left joins
+--
+
+create temp table tt3(f1 int, f2 text);
+insert into tt3 select x, repeat('xyzzy', 100) from generate_series(1,10000) x;
+create index tt3i on tt3(f1);
+analyze tt3;
+
+create temp table tt4(f1 int);
+insert into tt4 values (0),(1),(9999);
+analyze tt4;
+
+SELECT a.f1
+FROM tt4 a
+LEFT JOIN (
+ SELECT b.f1
+ FROM tt3 b LEFT JOIN tt3 c ON (b.f1 = c.f1)
+ WHERE c.f1 IS NULL
+) AS d ON (a.f1 = d.f1)
+WHERE d.f1 IS NULL;
+
+--
+-- regression test for proper handling of outer joins within antijoins
+--
+
+create temp table tt4x(c1 int, c2 int, c3 int);
+
+explain (costs off)
+select * from tt4x t1
+where not exists (
+ select 1 from tt4x t2
+ left join tt4x t3 on t2.c3 = t3.c1
+ left join ( select t5.c1 as c1
+ from tt4x t4 left join tt4x t5 on t4.c2 = t5.c1
+ ) a1 on t3.c2 = a1.c1
+ where t1.c1 = t2.c2
+);
+
+--
+-- regression test for problems of the sort depicted in bug #3494
+--
+
+create temp table tt5(f1 int, f2 int);
+create temp table tt6(f1 int, f2 int);
+
+insert into tt5 values(1, 10);
+insert into tt5 values(1, 11);
+
+insert into tt6 values(1, 9);
+insert into tt6 values(1, 2);
+insert into tt6 values(2, 9);
+
+select * from tt5,tt6 where tt5.f1 = tt6.f1 and tt5.f1 = tt5.f2 - tt6.f2;
+
+--
+-- regression test for problems of the sort depicted in bug #3588
+--
+
+create temp table xx (pkxx int);
+create temp table yy (pkyy int, pkxx int);
+
+insert into xx values (1);
+insert into xx values (2);
+insert into xx values (3);
+
+insert into yy values (101, 1);
+insert into yy values (201, 2);
+insert into yy values (301, NULL);
+
+select yy.pkyy as yy_pkyy, yy.pkxx as yy_pkxx, yya.pkyy as yya_pkyy,
+ xxa.pkxx as xxa_pkxx, xxb.pkxx as xxb_pkxx
+from yy
+ left join (SELECT * FROM yy where pkyy = 101) as yya ON yy.pkyy = yya.pkyy
+ left join xx xxa on yya.pkxx = xxa.pkxx
+ left join xx xxb on coalesce (xxa.pkxx, 1) = xxb.pkxx;
+
+--
+-- regression test for improper pushing of constants across outer-join clauses
+-- (as seen in early 8.2.x releases)
+--
+
+create temp table zt1 (f1 int primary key);
+create temp table zt2 (f2 int primary key);
+create temp table zt3 (f3 int primary key);
+insert into zt1 values(53);
+insert into zt2 values(53);
+
+select * from
+ zt2 left join zt3 on (f2 = f3)
+ left join zt1 on (f3 = f1)
+where f2 = 53;
+
+create temp view zv1 as select *,'dummy'::text AS junk from zt1;
+
+select * from
+ zt2 left join zt3 on (f2 = f3)
+ left join zv1 on (f3 = f1)
+where f2 = 53;
+
+--
+-- regression test for improper extraction of OR indexqual conditions
+-- (as seen in early 8.3.x releases)
+--
+
+select a.unique2, a.ten, b.tenthous, b.unique2, b.hundred
+from tenk1 a left join tenk1 b on a.unique2 = b.tenthous
+where a.unique1 = 42 and
+ ((b.unique2 is null and a.ten = 2) or b.hundred = 3);
+
+--
+-- test proper positioning of one-time quals in EXISTS (8.4devel bug)
+--
+prepare foo(bool) as
+ select count(*) from tenk1 a left join tenk1 b
+ on (a.unique2 = b.unique1 and exists
+ (select 1 from tenk1 c where c.thousand = b.unique2 and $1));
+execute foo(true);
+execute foo(false);
+
+--
+-- test for sane behavior with noncanonical merge clauses, per bug #4926
+--
+
+begin;
+
+set enable_mergejoin = 1;
+set enable_hashjoin = 0;
+set enable_nestloop = 0;
+
+create temp table a (i integer);
+create temp table b (x integer, y integer);
+
+select * from a left join b on i = x and i = y and x = i;
+
+rollback;
+
+--
+-- test handling of merge clauses using record_ops
+--
+begin;
+
+create type mycomptype as (id int, v bigint);
+
+create temp table tidv (idv mycomptype);
+create index on tidv (idv);
+
+explain (costs off)
+select a.idv, b.idv from tidv a, tidv b where a.idv = b.idv;
+
+set enable_mergejoin = 0;
+set enable_hashjoin = 0;
+
+explain (costs off)
+select a.idv, b.idv from tidv a, tidv b where a.idv = b.idv;
+
+rollback;
+
+--
+-- test NULL behavior of whole-row Vars, per bug #5025
+--
+select t1.q2, count(t2.*)
+from int8_tbl t1 left join int8_tbl t2 on (t1.q2 = t2.q1)
+group by t1.q2 order by 1;
+
+select t1.q2, count(t2.*)
+from int8_tbl t1 left join (select * from int8_tbl) t2 on (t1.q2 = t2.q1)
+group by t1.q2 order by 1;
+
+select t1.q2, count(t2.*)
+from int8_tbl t1 left join (select * from int8_tbl offset 0) t2 on (t1.q2 = t2.q1)
+group by t1.q2 order by 1;
+
+select t1.q2, count(t2.*)
+from int8_tbl t1 left join
+ (select q1, case when q2=1 then 1 else q2 end as q2 from int8_tbl) t2
+ on (t1.q2 = t2.q1)
+group by t1.q2 order by 1;
+
+--
+-- test incorrect failure to NULL pulled-up subexpressions
+--
+begin;
+
+create temp table a (
+ code char not null,
+ constraint a_pk primary key (code)
+);
+create temp table b (
+ a char not null,
+ num integer not null,
+ constraint b_pk primary key (a, num)
+);
+create temp table c (
+ name char not null,
+ a char,
+ constraint c_pk primary key (name)
+);
+
+insert into a (code) values ('p');
+insert into a (code) values ('q');
+insert into b (a, num) values ('p', 1);
+insert into b (a, num) values ('p', 2);
+insert into c (name, a) values ('A', 'p');
+insert into c (name, a) values ('B', 'q');
+insert into c (name, a) values ('C', null);
+
+select c.name, ss.code, ss.b_cnt, ss.const
+from c left join
+ (select a.code, coalesce(b_grp.cnt, 0) as b_cnt, -1 as const
+ from a left join
+ (select count(1) as cnt, b.a from b group by b.a) as b_grp
+ on a.code = b_grp.a
+ ) as ss
+ on (c.a = ss.code)
+order by c.name;
+
+rollback;
+
+--
+-- test incorrect handling of placeholders that only appear in targetlists,
+-- per bug #6154
+--
+SELECT * FROM
+( SELECT 1 as key1 ) sub1
+LEFT JOIN
+( SELECT sub3.key3, sub4.value2, COALESCE(sub4.value2, 66) as value3 FROM
+ ( SELECT 1 as key3 ) sub3
+ LEFT JOIN
+ ( SELECT sub5.key5, COALESCE(sub6.value1, 1) as value2 FROM
+ ( SELECT 1 as key5 ) sub5
+ LEFT JOIN
+ ( SELECT 2 as key6, 42 as value1 ) sub6
+ ON sub5.key5 = sub6.key6
+ ) sub4
+ ON sub4.key5 = sub3.key3
+) sub2
+ON sub1.key1 = sub2.key3;
+
+-- test the path using join aliases, too
+SELECT * FROM
+( SELECT 1 as key1 ) sub1
+LEFT JOIN
+( SELECT sub3.key3, value2, COALESCE(value2, 66) as value3 FROM
+ ( SELECT 1 as key3 ) sub3
+ LEFT JOIN
+ ( SELECT sub5.key5, COALESCE(sub6.value1, 1) as value2 FROM
+ ( SELECT 1 as key5 ) sub5
+ LEFT JOIN
+ ( SELECT 2 as key6, 42 as value1 ) sub6
+ ON sub5.key5 = sub6.key6
+ ) sub4
+ ON sub4.key5 = sub3.key3
+) sub2
+ON sub1.key1 = sub2.key3;
+
+--
+-- test case where a PlaceHolderVar is used as a nestloop parameter
+--
+
+EXPLAIN (COSTS OFF)
+SELECT qq, unique1
+ FROM
+ ( SELECT COALESCE(q1, 0) AS qq FROM int8_tbl a ) AS ss1
+ FULL OUTER JOIN
+ ( SELECT COALESCE(q2, -1) AS qq FROM int8_tbl b ) AS ss2
+ USING (qq)
+ INNER JOIN tenk1 c ON qq = unique2;
+
+SELECT qq, unique1
+ FROM
+ ( SELECT COALESCE(q1, 0) AS qq FROM int8_tbl a ) AS ss1
+ FULL OUTER JOIN
+ ( SELECT COALESCE(q2, -1) AS qq FROM int8_tbl b ) AS ss2
+ USING (qq)
+ INNER JOIN tenk1 c ON qq = unique2;
+
+--
+-- nested nestloops can require nested PlaceHolderVars
+--
+
+create temp table nt1 (
+ id int primary key,
+ a1 boolean,
+ a2 boolean
+);
+create temp table nt2 (
+ id int primary key,
+ nt1_id int,
+ b1 boolean,
+ b2 boolean,
+ foreign key (nt1_id) references nt1(id)
+);
+create temp table nt3 (
+ id int primary key,
+ nt2_id int,
+ c1 boolean,
+ foreign key (nt2_id) references nt2(id)
+);
+
+insert into nt1 values (1,true,true);
+insert into nt1 values (2,true,false);
+insert into nt1 values (3,false,false);
+insert into nt2 values (1,1,true,true);
+insert into nt2 values (2,2,true,false);
+insert into nt2 values (3,3,false,false);
+insert into nt3 values (1,1,true);
+insert into nt3 values (2,2,false);
+insert into nt3 values (3,3,true);
+
+explain (costs off)
+select nt3.id
+from nt3 as nt3
+ left join
+ (select nt2.*, (nt2.b1 and ss1.a3) AS b3
+ from nt2 as nt2
+ left join
+ (select nt1.*, (nt1.id is not null) as a3 from nt1) as ss1
+ on ss1.id = nt2.nt1_id
+ ) as ss2
+ on ss2.id = nt3.nt2_id
+where nt3.id = 1 and ss2.b3;
+
+select nt3.id
+from nt3 as nt3
+ left join
+ (select nt2.*, (nt2.b1 and ss1.a3) AS b3
+ from nt2 as nt2
+ left join
+ (select nt1.*, (nt1.id is not null) as a3 from nt1) as ss1
+ on ss1.id = nt2.nt1_id
+ ) as ss2
+ on ss2.id = nt3.nt2_id
+where nt3.id = 1 and ss2.b3;
+
+--
+-- test case where a PlaceHolderVar is propagated into a subquery
+--
+
+explain (costs off)
+select * from
+ int8_tbl t1 left join
+ (select q1 as x, 42 as y from int8_tbl t2) ss
+ on t1.q2 = ss.x
+where
+ 1 = (select 1 from int8_tbl t3 where ss.y is not null limit 1)
+order by 1,2;
+
+select * from
+ int8_tbl t1 left join
+ (select q1 as x, 42 as y from int8_tbl t2) ss
+ on t1.q2 = ss.x
+where
+ 1 = (select 1 from int8_tbl t3 where ss.y is not null limit 1)
+order by 1,2;
+
+--
+-- variant where a PlaceHolderVar is needed at a join, but not above the join
+--
+
+explain (costs off)
+select * from
+ int4_tbl as i41,
+ lateral
+ (select 1 as x from
+ (select i41.f1 as lat,
+ i42.f1 as loc from
+ int8_tbl as i81, int4_tbl as i42) as ss1
+ right join int4_tbl as i43 on (i43.f1 > 1)
+ where ss1.loc = ss1.lat) as ss2
+where i41.f1 > 0;
+
+select * from
+ int4_tbl as i41,
+ lateral
+ (select 1 as x from
+ (select i41.f1 as lat,
+ i42.f1 as loc from
+ int8_tbl as i81, int4_tbl as i42) as ss1
+ right join int4_tbl as i43 on (i43.f1 > 1)
+ where ss1.loc = ss1.lat) as ss2
+where i41.f1 > 0;
+
+--
+-- test the corner cases FULL JOIN ON TRUE and FULL JOIN ON FALSE
+--
+select * from int4_tbl a full join int4_tbl b on true;
+select * from int4_tbl a full join int4_tbl b on false;
+
+--
+-- test for ability to use a cartesian join when necessary
+--
+
+create temp table q1 as select 1 as q1;
+create temp table q2 as select 0 as q2;
+analyze q1;
+analyze q2;
+
+explain (costs off)
+select * from
+ tenk1 join int4_tbl on f1 = twothousand,
+ q1, q2
+where q1 = thousand or q2 = thousand;
+
+explain (costs off)
+select * from
+ tenk1 join int4_tbl on f1 = twothousand,
+ q1, q2
+where thousand = (q1 + q2);
+
+--
+-- test ability to generate a suitable plan for a star-schema query
+--
+
+explain (costs off)
+select * from
+ tenk1, int8_tbl a, int8_tbl b
+where thousand = a.q1 and tenthous = b.q1 and a.q2 = 1 and b.q2 = 2;
+
+--
+-- test a corner case in which we shouldn't apply the star-schema optimization
+--
+
+explain (costs off)
+select t1.unique2, t1.stringu1, t2.unique1, t2.stringu2 from
+ tenk1 t1
+ inner join int4_tbl i1
+ left join (select v1.x2, v2.y1, 11 AS d1
+ from (select 1,0 from onerow) v1(x1,x2)
+ left join (select 3,1 from onerow) v2(y1,y2)
+ on v1.x1 = v2.y2) subq1
+ on (i1.f1 = subq1.x2)
+ on (t1.unique2 = subq1.d1)
+ left join tenk1 t2
+ on (subq1.y1 = t2.unique1)
+where t1.unique2 < 42 and t1.stringu1 > t2.stringu2;
+
+select t1.unique2, t1.stringu1, t2.unique1, t2.stringu2 from
+ tenk1 t1
+ inner join int4_tbl i1
+ left join (select v1.x2, v2.y1, 11 AS d1
+ from (select 1,0 from onerow) v1(x1,x2)
+ left join (select 3,1 from onerow) v2(y1,y2)
+ on v1.x1 = v2.y2) subq1
+ on (i1.f1 = subq1.x2)
+ on (t1.unique2 = subq1.d1)
+ left join tenk1 t2
+ on (subq1.y1 = t2.unique1)
+where t1.unique2 < 42 and t1.stringu1 > t2.stringu2;
+
+-- variant that isn't quite a star-schema case
+
+select ss1.d1 from
+ tenk1 as t1
+ inner join tenk1 as t2
+ on t1.tenthous = t2.ten
+ inner join
+ int8_tbl as i8
+ left join int4_tbl as i4
+ inner join (select 64::information_schema.cardinal_number as d1
+ from tenk1 t3,
+ lateral (select abs(t3.unique1) + random()) ss0(x)
+ where t3.fivethous < 0) as ss1
+ on i4.f1 = ss1.d1
+ on i8.q1 = i4.f1
+ on t1.tenthous = ss1.d1
+where t1.unique1 < i4.f1;
+
+-- this variant is foldable by the remove-useless-RESULT-RTEs code
+
+explain (costs off)
+select t1.unique2, t1.stringu1, t2.unique1, t2.stringu2 from
+ tenk1 t1
+ inner join int4_tbl i1
+ left join (select v1.x2, v2.y1, 11 AS d1
+ from (values(1,0)) v1(x1,x2)
+ left join (values(3,1)) v2(y1,y2)
+ on v1.x1 = v2.y2) subq1
+ on (i1.f1 = subq1.x2)
+ on (t1.unique2 = subq1.d1)
+ left join tenk1 t2
+ on (subq1.y1 = t2.unique1)
+where t1.unique2 < 42 and t1.stringu1 > t2.stringu2;
+
+select t1.unique2, t1.stringu1, t2.unique1, t2.stringu2 from
+ tenk1 t1
+ inner join int4_tbl i1
+ left join (select v1.x2, v2.y1, 11 AS d1
+ from (values(1,0)) v1(x1,x2)
+ left join (values(3,1)) v2(y1,y2)
+ on v1.x1 = v2.y2) subq1
+ on (i1.f1 = subq1.x2)
+ on (t1.unique2 = subq1.d1)
+ left join tenk1 t2
+ on (subq1.y1 = t2.unique1)
+where t1.unique2 < 42 and t1.stringu1 > t2.stringu2;
+
+-- Here's a variant that we can't fold too aggressively, though,
+-- or we end up with noplace to evaluate the lateral PHV
+explain (verbose, costs off)
+select * from
+ (select 1 as x) ss1 left join (select 2 as y) ss2 on (true),
+ lateral (select ss2.y as z limit 1) ss3;
+select * from
+ (select 1 as x) ss1 left join (select 2 as y) ss2 on (true),
+ lateral (select ss2.y as z limit 1) ss3;
+
+-- Test proper handling of appendrel PHVs during useless-RTE removal
+explain (costs off)
+select * from
+ (select 0 as z) as t1
+ left join
+ (select true as a) as t2
+ on true,
+ lateral (select true as b
+ union all
+ select a as b) as t3
+where b;
+
+select * from
+ (select 0 as z) as t1
+ left join
+ (select true as a) as t2
+ on true,
+ lateral (select true as b
+ union all
+ select a as b) as t3
+where b;
+
+--
+-- test inlining of immutable functions
+--
+create function f_immutable_int4(i integer) returns integer as
+$$ begin return i; end; $$ language plpgsql immutable;
+
+-- check optimization of function scan with join
+explain (costs off)
+select unique1 from tenk1, (select * from f_immutable_int4(1) x) x
+where x = unique1;
+
+explain (verbose, costs off)
+select unique1, x.*
+from tenk1, (select *, random() from f_immutable_int4(1) x) x
+where x = unique1;
+
+explain (costs off)
+select unique1 from tenk1, f_immutable_int4(1) x where x = unique1;
+
+explain (costs off)
+select unique1 from tenk1, lateral f_immutable_int4(1) x where x = unique1;
+
+explain (costs off)
+select unique1 from tenk1, lateral f_immutable_int4(1) x where x in (select 17);
+
+explain (costs off)
+select unique1, x from tenk1 join f_immutable_int4(1) x on unique1 = x;
+
+explain (costs off)
+select unique1, x from tenk1 left join f_immutable_int4(1) x on unique1 = x;
+
+explain (costs off)
+select unique1, x from tenk1 right join f_immutable_int4(1) x on unique1 = x;
+
+explain (costs off)
+select unique1, x from tenk1 full join f_immutable_int4(1) x on unique1 = x;
+
+-- check that pullup of a const function allows further const-folding
+explain (costs off)
+select unique1 from tenk1, f_immutable_int4(1) x where x = 42;
+
+-- test inlining of immutable functions with PlaceHolderVars
+explain (costs off)
+select nt3.id
+from nt3 as nt3
+ left join
+ (select nt2.*, (nt2.b1 or i4 = 42) AS b3
+ from nt2 as nt2
+ left join
+ f_immutable_int4(0) i4
+ on i4 = nt2.nt1_id
+ ) as ss2
+ on ss2.id = nt3.nt2_id
+where nt3.id = 1 and ss2.b3;
+
+drop function f_immutable_int4(int);
+
+-- test inlining when function returns composite
+
+create function mki8(bigint, bigint) returns int8_tbl as
+$$select row($1,$2)::int8_tbl$$ language sql;
+
+create function mki4(int) returns int4_tbl as
+$$select row($1)::int4_tbl$$ language sql;
+
+explain (verbose, costs off)
+select * from mki8(1,2);
+select * from mki8(1,2);
+
+explain (verbose, costs off)
+select * from mki4(42);
+select * from mki4(42);
+
+drop function mki8(bigint, bigint);
+drop function mki4(int);
+
+--
+-- test extraction of restriction OR clauses from join OR clause
+-- (we used to only do this for indexable clauses)
+--
+
+explain (costs off)
+select * from tenk1 a join tenk1 b on
+ (a.unique1 = 1 and b.unique1 = 2) or (a.unique2 = 3 and b.hundred = 4);
+explain (costs off)
+select * from tenk1 a join tenk1 b on
+ (a.unique1 = 1 and b.unique1 = 2) or (a.unique2 = 3 and b.ten = 4);
+explain (costs off)
+select * from tenk1 a join tenk1 b on
+ (a.unique1 = 1 and b.unique1 = 2) or
+ ((a.unique2 = 3 or a.unique2 = 7) and b.hundred = 4);
+
+--
+-- test placement of movable quals in a parameterized join tree
+--
+
+explain (costs off)
+select * from tenk1 t1 left join
+ (tenk1 t2 join tenk1 t3 on t2.thousand = t3.unique2)
+ on t1.hundred = t2.hundred and t1.ten = t3.ten
+where t1.unique1 = 1;
+
+explain (costs off)
+select * from tenk1 t1 left join
+ (tenk1 t2 join tenk1 t3 on t2.thousand = t3.unique2)
+ on t1.hundred = t2.hundred and t1.ten + t2.ten = t3.ten
+where t1.unique1 = 1;
+
+explain (costs off)
+select count(*) from
+ tenk1 a join tenk1 b on a.unique1 = b.unique2
+ left join tenk1 c on a.unique2 = b.unique1 and c.thousand = a.thousand
+ join int4_tbl on b.thousand = f1;
+
+select count(*) from
+ tenk1 a join tenk1 b on a.unique1 = b.unique2
+ left join tenk1 c on a.unique2 = b.unique1 and c.thousand = a.thousand
+ join int4_tbl on b.thousand = f1;
+
+explain (costs off)
+select b.unique1 from
+ tenk1 a join tenk1 b on a.unique1 = b.unique2
+ left join tenk1 c on b.unique1 = 42 and c.thousand = a.thousand
+ join int4_tbl i1 on b.thousand = f1
+ right join int4_tbl i2 on i2.f1 = b.tenthous
+ order by 1;
+
+select b.unique1 from
+ tenk1 a join tenk1 b on a.unique1 = b.unique2
+ left join tenk1 c on b.unique1 = 42 and c.thousand = a.thousand
+ join int4_tbl i1 on b.thousand = f1
+ right join int4_tbl i2 on i2.f1 = b.tenthous
+ order by 1;
+
+explain (costs off)
+select * from
+(
+ select unique1, q1, coalesce(unique1, -1) + q1 as fault
+ from int8_tbl left join tenk1 on (q2 = unique2)
+) ss
+where fault = 122
+order by fault;
+
+select * from
+(
+ select unique1, q1, coalesce(unique1, -1) + q1 as fault
+ from int8_tbl left join tenk1 on (q2 = unique2)
+) ss
+where fault = 122
+order by fault;
+
+explain (costs off)
+select * from
+(values (1, array[10,20]), (2, array[20,30])) as v1(v1x,v1ys)
+left join (values (1, 10), (2, 20)) as v2(v2x,v2y) on v2x = v1x
+left join unnest(v1ys) as u1(u1y) on u1y = v2y;
+
+select * from
+(values (1, array[10,20]), (2, array[20,30])) as v1(v1x,v1ys)
+left join (values (1, 10), (2, 20)) as v2(v2x,v2y) on v2x = v1x
+left join unnest(v1ys) as u1(u1y) on u1y = v2y;
+
+--
+-- test handling of potential equivalence clauses above outer joins
+--
+
+explain (costs off)
+select q1, unique2, thousand, hundred
+ from int8_tbl a left join tenk1 b on q1 = unique2
+ where coalesce(thousand,123) = q1 and q1 = coalesce(hundred,123);
+
+select q1, unique2, thousand, hundred
+ from int8_tbl a left join tenk1 b on q1 = unique2
+ where coalesce(thousand,123) = q1 and q1 = coalesce(hundred,123);
+
+explain (costs off)
+select f1, unique2, case when unique2 is null then f1 else 0 end
+ from int4_tbl a left join tenk1 b on f1 = unique2
+ where (case when unique2 is null then f1 else 0 end) = 0;
+
+select f1, unique2, case when unique2 is null then f1 else 0 end
+ from int4_tbl a left join tenk1 b on f1 = unique2
+ where (case when unique2 is null then f1 else 0 end) = 0;
+
+--
+-- another case with equivalence clauses above outer joins (bug #8591)
+--
+
+explain (costs off)
+select a.unique1, b.unique1, c.unique1, coalesce(b.twothousand, a.twothousand)
+ from tenk1 a left join tenk1 b on b.thousand = a.unique1 left join tenk1 c on c.unique2 = coalesce(b.twothousand, a.twothousand)
+ where a.unique2 < 10 and coalesce(b.twothousand, a.twothousand) = 44;
+
+select a.unique1, b.unique1, c.unique1, coalesce(b.twothousand, a.twothousand)
+ from tenk1 a left join tenk1 b on b.thousand = a.unique1 left join tenk1 c on c.unique2 = coalesce(b.twothousand, a.twothousand)
+ where a.unique2 < 10 and coalesce(b.twothousand, a.twothousand) = 44;
+
+--
+-- check handling of join aliases when flattening multiple levels of subquery
+--
+
+explain (verbose, costs off)
+select foo1.join_key as foo1_id, foo3.join_key AS foo3_id, bug_field from
+ (values (0),(1)) foo1(join_key)
+left join
+ (select join_key, bug_field from
+ (select ss1.join_key, ss1.bug_field from
+ (select f1 as join_key, 666 as bug_field from int4_tbl i1) ss1
+ ) foo2
+ left join
+ (select unique2 as join_key from tenk1 i2) ss2
+ using (join_key)
+ ) foo3
+using (join_key);
+
+select foo1.join_key as foo1_id, foo3.join_key AS foo3_id, bug_field from
+ (values (0),(1)) foo1(join_key)
+left join
+ (select join_key, bug_field from
+ (select ss1.join_key, ss1.bug_field from
+ (select f1 as join_key, 666 as bug_field from int4_tbl i1) ss1
+ ) foo2
+ left join
+ (select unique2 as join_key from tenk1 i2) ss2
+ using (join_key)
+ ) foo3
+using (join_key);
+
+--
+-- test successful handling of nested outer joins with degenerate join quals
+--
+
+explain (verbose, costs off)
+select t1.* from
+ text_tbl t1
+ left join (select *, '***'::text as d1 from int8_tbl i8b1) b1
+ left join int8_tbl i8
+ left join (select *, null::int as d2 from int8_tbl i8b2) b2
+ on (i8.q1 = b2.q1)
+ on (b2.d2 = b1.q2)
+ on (t1.f1 = b1.d1)
+ left join int4_tbl i4
+ on (i8.q2 = i4.f1);
+
+select t1.* from
+ text_tbl t1
+ left join (select *, '***'::text as d1 from int8_tbl i8b1) b1
+ left join int8_tbl i8
+ left join (select *, null::int as d2 from int8_tbl i8b2) b2
+ on (i8.q1 = b2.q1)
+ on (b2.d2 = b1.q2)
+ on (t1.f1 = b1.d1)
+ left join int4_tbl i4
+ on (i8.q2 = i4.f1);
+
+explain (verbose, costs off)
+select t1.* from
+ text_tbl t1
+ left join (select *, '***'::text as d1 from int8_tbl i8b1) b1
+ left join int8_tbl i8
+ left join (select *, null::int as d2 from int8_tbl i8b2, int4_tbl i4b2) b2
+ on (i8.q1 = b2.q1)
+ on (b2.d2 = b1.q2)
+ on (t1.f1 = b1.d1)
+ left join int4_tbl i4
+ on (i8.q2 = i4.f1);
+
+select t1.* from
+ text_tbl t1
+ left join (select *, '***'::text as d1 from int8_tbl i8b1) b1
+ left join int8_tbl i8
+ left join (select *, null::int as d2 from int8_tbl i8b2, int4_tbl i4b2) b2
+ on (i8.q1 = b2.q1)
+ on (b2.d2 = b1.q2)
+ on (t1.f1 = b1.d1)
+ left join int4_tbl i4
+ on (i8.q2 = i4.f1);
+
+explain (verbose, costs off)
+select t1.* from
+ text_tbl t1
+ left join (select *, '***'::text as d1 from int8_tbl i8b1) b1
+ left join int8_tbl i8
+ left join (select *, null::int as d2 from int8_tbl i8b2, int4_tbl i4b2
+ where q1 = f1) b2
+ on (i8.q1 = b2.q1)
+ on (b2.d2 = b1.q2)
+ on (t1.f1 = b1.d1)
+ left join int4_tbl i4
+ on (i8.q2 = i4.f1);
+
+select t1.* from
+ text_tbl t1
+ left join (select *, '***'::text as d1 from int8_tbl i8b1) b1
+ left join int8_tbl i8
+ left join (select *, null::int as d2 from int8_tbl i8b2, int4_tbl i4b2
+ where q1 = f1) b2
+ on (i8.q1 = b2.q1)
+ on (b2.d2 = b1.q2)
+ on (t1.f1 = b1.d1)
+ left join int4_tbl i4
+ on (i8.q2 = i4.f1);
+
+explain (verbose, costs off)
+select * from
+ text_tbl t1
+ inner join int8_tbl i8
+ on i8.q2 = 456
+ right join text_tbl t2
+ on t1.f1 = 'doh!'
+ left join int4_tbl i4
+ on i8.q1 = i4.f1;
+
+select * from
+ text_tbl t1
+ inner join int8_tbl i8
+ on i8.q2 = 456
+ right join text_tbl t2
+ on t1.f1 = 'doh!'
+ left join int4_tbl i4
+ on i8.q1 = i4.f1;
+
+--
+-- test for appropriate join order in the presence of lateral references
+--
+
+explain (verbose, costs off)
+select * from
+ text_tbl t1
+ left join int8_tbl i8
+ on i8.q2 = 123,
+ lateral (select i8.q1, t2.f1 from text_tbl t2 limit 1) as ss
+where t1.f1 = ss.f1;
+
+select * from
+ text_tbl t1
+ left join int8_tbl i8
+ on i8.q2 = 123,
+ lateral (select i8.q1, t2.f1 from text_tbl t2 limit 1) as ss
+where t1.f1 = ss.f1;
+
+explain (verbose, costs off)
+select * from
+ text_tbl t1
+ left join int8_tbl i8
+ on i8.q2 = 123,
+ lateral (select i8.q1, t2.f1 from text_tbl t2 limit 1) as ss1,
+ lateral (select ss1.* from text_tbl t3 limit 1) as ss2
+where t1.f1 = ss2.f1;
+
+select * from
+ text_tbl t1
+ left join int8_tbl i8
+ on i8.q2 = 123,
+ lateral (select i8.q1, t2.f1 from text_tbl t2 limit 1) as ss1,
+ lateral (select ss1.* from text_tbl t3 limit 1) as ss2
+where t1.f1 = ss2.f1;
+
+explain (verbose, costs off)
+select 1 from
+ text_tbl as tt1
+ inner join text_tbl as tt2 on (tt1.f1 = 'foo')
+ left join text_tbl as tt3 on (tt3.f1 = 'foo')
+ left join text_tbl as tt4 on (tt3.f1 = tt4.f1),
+ lateral (select tt4.f1 as c0 from text_tbl as tt5 limit 1) as ss1
+where tt1.f1 = ss1.c0;
+
+select 1 from
+ text_tbl as tt1
+ inner join text_tbl as tt2 on (tt1.f1 = 'foo')
+ left join text_tbl as tt3 on (tt3.f1 = 'foo')
+ left join text_tbl as tt4 on (tt3.f1 = tt4.f1),
+ lateral (select tt4.f1 as c0 from text_tbl as tt5 limit 1) as ss1
+where tt1.f1 = ss1.c0;
+
+--
+-- check a case in which a PlaceHolderVar forces join order
+--
+
+explain (verbose, costs off)
+select ss2.* from
+ int4_tbl i41
+ left join int8_tbl i8
+ join (select i42.f1 as c1, i43.f1 as c2, 42 as c3
+ from int4_tbl i42, int4_tbl i43) ss1
+ on i8.q1 = ss1.c2
+ on i41.f1 = ss1.c1,
+ lateral (select i41.*, i8.*, ss1.* from text_tbl limit 1) ss2
+where ss1.c2 = 0;
+
+select ss2.* from
+ int4_tbl i41
+ left join int8_tbl i8
+ join (select i42.f1 as c1, i43.f1 as c2, 42 as c3
+ from int4_tbl i42, int4_tbl i43) ss1
+ on i8.q1 = ss1.c2
+ on i41.f1 = ss1.c1,
+ lateral (select i41.*, i8.*, ss1.* from text_tbl limit 1) ss2
+where ss1.c2 = 0;
+
+--
+-- test successful handling of full join underneath left join (bug #14105)
+--
+
+explain (costs off)
+select * from
+ (select 1 as id) as xx
+ left join
+ (tenk1 as a1 full join (select 1 as id) as yy on (a1.unique1 = yy.id))
+ on (xx.id = coalesce(yy.id));
+
+select * from
+ (select 1 as id) as xx
+ left join
+ (tenk1 as a1 full join (select 1 as id) as yy on (a1.unique1 = yy.id))
+ on (xx.id = coalesce(yy.id));
+
+--
+-- test ability to push constants through outer join clauses
+--
+
+explain (costs off)
+ select * from int4_tbl a left join tenk1 b on f1 = unique2 where f1 = 0;
+
+explain (costs off)
+ select * from tenk1 a full join tenk1 b using(unique2) where unique2 = 42;
+
+--
+-- test that quals attached to an outer join have correct semantics,
+-- specifically that they don't re-use expressions computed below the join;
+-- we force a mergejoin so that coalesce(b.q1, 1) appears as a join input
+--
+
+set enable_hashjoin to off;
+set enable_nestloop to off;
+
+explain (verbose, costs off)
+ select a.q2, b.q1
+ from int8_tbl a left join int8_tbl b on a.q2 = coalesce(b.q1, 1)
+ where coalesce(b.q1, 1) > 0;
+select a.q2, b.q1
+ from int8_tbl a left join int8_tbl b on a.q2 = coalesce(b.q1, 1)
+ where coalesce(b.q1, 1) > 0;
+
+reset enable_hashjoin;
+reset enable_nestloop;
+
+--
+-- test join removal
+--
+
+begin;
+
+CREATE TEMP TABLE a (id int PRIMARY KEY, b_id int);
+CREATE TEMP TABLE b (id int PRIMARY KEY, c_id int);
+CREATE TEMP TABLE c (id int PRIMARY KEY);
+CREATE TEMP TABLE d (a int, b int);
+INSERT INTO a VALUES (0, 0), (1, NULL);
+INSERT INTO b VALUES (0, 0), (1, NULL);
+INSERT INTO c VALUES (0), (1);
+INSERT INTO d VALUES (1,3), (2,2), (3,1);
+
+-- all three cases should be optimizable into a simple seqscan
+explain (costs off) SELECT a.* FROM a LEFT JOIN b ON a.b_id = b.id;
+explain (costs off) SELECT b.* FROM b LEFT JOIN c ON b.c_id = c.id;
+explain (costs off)
+ SELECT a.* FROM a LEFT JOIN (b left join c on b.c_id = c.id)
+ ON (a.b_id = b.id);
+
+-- check optimization of outer join within another special join
+explain (costs off)
+select id from a where id in (
+ select b.id from b left join c on b.id = c.id
+);
+
+-- check that join removal works for a left join when joining a subquery
+-- that is guaranteed to be unique by its GROUP BY clause
+explain (costs off)
+select d.* from d left join (select * from b group by b.id, b.c_id) s
+ on d.a = s.id and d.b = s.c_id;
+
+-- similarly, but keying off a DISTINCT clause
+explain (costs off)
+select d.* from d left join (select distinct * from b) s
+ on d.a = s.id and d.b = s.c_id;
+
+-- join removal is not possible when the GROUP BY contains a column that is
+-- not in the join condition. (Note: as of 9.6, we notice that b.id is a
+-- primary key and so drop b.c_id from the GROUP BY of the resulting plan;
+-- but this happens too late for join removal in the outer plan level.)
+explain (costs off)
+select d.* from d left join (select * from b group by b.id, b.c_id) s
+ on d.a = s.id;
+
+-- similarly, but keying off a DISTINCT clause
+explain (costs off)
+select d.* from d left join (select distinct * from b) s
+ on d.a = s.id;
+
+-- check join removal works when uniqueness of the join condition is enforced
+-- by a UNION
+explain (costs off)
+select d.* from d left join (select id from a union select id from b) s
+ on d.a = s.id;
+
+-- check join removal with a cross-type comparison operator
+explain (costs off)
+select i8.* from int8_tbl i8 left join (select f1 from int4_tbl group by f1) i4
+ on i8.q1 = i4.f1;
+
+-- check join removal with lateral references
+explain (costs off)
+select 1 from (select a.id FROM a left join b on a.b_id = b.id) q,
+ lateral generate_series(1, q.id) gs(i) where q.id = gs.i;
+
+rollback;
+
+create temp table parent (k int primary key, pd int);
+create temp table child (k int unique, cd int);
+insert into parent values (1, 10), (2, 20), (3, 30);
+insert into child values (1, 100), (4, 400);
+
+-- this case is optimizable
+select p.* from parent p left join child c on (p.k = c.k);
+explain (costs off)
+ select p.* from parent p left join child c on (p.k = c.k);
+
+-- this case is not
+select p.*, linked from parent p
+ left join (select c.*, true as linked from child c) as ss
+ on (p.k = ss.k);
+explain (costs off)
+ select p.*, linked from parent p
+ left join (select c.*, true as linked from child c) as ss
+ on (p.k = ss.k);
+
+-- check for a 9.0rc1 bug: join removal breaks pseudoconstant qual handling
+select p.* from
+ parent p left join child c on (p.k = c.k)
+ where p.k = 1 and p.k = 2;
+explain (costs off)
+select p.* from
+ parent p left join child c on (p.k = c.k)
+ where p.k = 1 and p.k = 2;
+
+select p.* from
+ (parent p left join child c on (p.k = c.k)) join parent x on p.k = x.k
+ where p.k = 1 and p.k = 2;
+explain (costs off)
+select p.* from
+ (parent p left join child c on (p.k = c.k)) join parent x on p.k = x.k
+ where p.k = 1 and p.k = 2;
+
+-- bug 5255: this is not optimizable by join removal
+begin;
+
+CREATE TEMP TABLE a (id int PRIMARY KEY);
+CREATE TEMP TABLE b (id int PRIMARY KEY, a_id int);
+INSERT INTO a VALUES (0), (1);
+INSERT INTO b VALUES (0, 0), (1, NULL);
+
+SELECT * FROM b LEFT JOIN a ON (b.a_id = a.id) WHERE (a.id IS NULL OR a.id > 0);
+SELECT b.* FROM b LEFT JOIN a ON (b.a_id = a.id) WHERE (a.id IS NULL OR a.id > 0);
+
+rollback;
+
+-- another join removal bug: this is not optimizable, either
+begin;
+
+create temp table innertab (id int8 primary key, dat1 int8);
+insert into innertab values(123, 42);
+
+SELECT * FROM
+ (SELECT 1 AS x) ss1
+ LEFT JOIN
+ (SELECT q1, q2, COALESCE(dat1, q1) AS y
+ FROM int8_tbl LEFT JOIN innertab ON q2 = id) ss2
+ ON true;
+
+rollback;
+
+-- another join removal bug: we must clean up correctly when removing a PHV
+begin;
+
+create temp table uniquetbl (f1 text unique);
+
+explain (costs off)
+select t1.* from
+ uniquetbl as t1
+ left join (select *, '***'::text as d1 from uniquetbl) t2
+ on t1.f1 = t2.f1
+ left join uniquetbl t3
+ on t2.d1 = t3.f1;
+
+explain (costs off)
+select t0.*
+from
+ text_tbl t0
+ left join
+ (select case t1.ten when 0 then 'doh!'::text else null::text end as case1,
+ t1.stringu2
+ from tenk1 t1
+ join int4_tbl i4 ON i4.f1 = t1.unique2
+ left join uniquetbl u1 ON u1.f1 = t1.string4) ss
+ on t0.f1 = ss.case1
+where ss.stringu2 !~* ss.case1;
+
+select t0.*
+from
+ text_tbl t0
+ left join
+ (select case t1.ten when 0 then 'doh!'::text else null::text end as case1,
+ t1.stringu2
+ from tenk1 t1
+ join int4_tbl i4 ON i4.f1 = t1.unique2
+ left join uniquetbl u1 ON u1.f1 = t1.string4) ss
+ on t0.f1 = ss.case1
+where ss.stringu2 !~* ss.case1;
+
+rollback;
+
+-- test case to expose miscomputation of required relid set for a PHV
+explain (verbose, costs off)
+select i8.*, ss.v, t.unique2
+ from int8_tbl i8
+ left join int4_tbl i4 on i4.f1 = 1
+ left join lateral (select i4.f1 + 1 as v) as ss on true
+ left join tenk1 t on t.unique2 = ss.v
+where q2 = 456;
+
+select i8.*, ss.v, t.unique2
+ from int8_tbl i8
+ left join int4_tbl i4 on i4.f1 = 1
+ left join lateral (select i4.f1 + 1 as v) as ss on true
+ left join tenk1 t on t.unique2 = ss.v
+where q2 = 456;
+
+-- and check a related issue where we miscompute required relids for
+-- a PHV that's been translated to a child rel
+create temp table parttbl (a integer primary key) partition by range (a);
+create temp table parttbl1 partition of parttbl for values from (1) to (100);
+insert into parttbl values (11), (12);
+explain (costs off)
+select * from
+ (select *, 12 as phv from parttbl) as ss
+ right join int4_tbl on true
+where ss.a = ss.phv and f1 = 0;
+
+select * from
+ (select *, 12 as phv from parttbl) as ss
+ right join int4_tbl on true
+where ss.a = ss.phv and f1 = 0;
+
+-- bug #8444: we've historically allowed duplicate aliases within aliased JOINs
+
+select * from
+ int8_tbl x join (int4_tbl x cross join int4_tbl y) j on q1 = f1; -- error
+select * from
+ int8_tbl x join (int4_tbl x cross join int4_tbl y) j on q1 = y.f1; -- error
+select * from
+ int8_tbl x join (int4_tbl x cross join int4_tbl y(ff)) j on q1 = f1; -- ok
+
+--
+-- Test hints given on incorrect column references are useful
+--
+
+select t1.uunique1 from
+ tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t1" suggestion
+select t2.uunique1 from
+ tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t2" suggestion
+select uunique1 from
+ tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, suggest both at once
+
+--
+-- Take care to reference the correct RTE
+--
+
+select atts.relid::regclass, s.* from pg_stats s join
+ pg_attribute a on s.attname = a.attname and s.tablename =
+ a.attrelid::regclass::text join (select unnest(indkey) attnum,
+ indexrelid from pg_index i) atts on atts.attnum = a.attnum where
+ schemaname != 'pg_catalog';
+
+--
+-- Test LATERAL
+--
+
+select unique2, x.*
+from tenk1 a, lateral (select * from int4_tbl b where f1 = a.unique1) x;
+explain (costs off)
+ select unique2, x.*
+ from tenk1 a, lateral (select * from int4_tbl b where f1 = a.unique1) x;
+select unique2, x.*
+from int4_tbl x, lateral (select unique2 from tenk1 where f1 = unique1) ss;
+explain (costs off)
+ select unique2, x.*
+ from int4_tbl x, lateral (select unique2 from tenk1 where f1 = unique1) ss;
+explain (costs off)
+ select unique2, x.*
+ from int4_tbl x cross join lateral (select unique2 from tenk1 where f1 = unique1) ss;
+select unique2, x.*
+from int4_tbl x left join lateral (select unique1, unique2 from tenk1 where f1 = unique1) ss on true;
+explain (costs off)
+ select unique2, x.*
+ from int4_tbl x left join lateral (select unique1, unique2 from tenk1 where f1 = unique1) ss on true;
+
+-- check scoping of lateral versus parent references
+-- the first of these should return int8_tbl.q2, the second int8_tbl.q1
+select *, (select r from (select q1 as q2) x, (select q2 as r) y) from int8_tbl;
+select *, (select r from (select q1 as q2) x, lateral (select q2 as r) y) from int8_tbl;
+
+-- lateral with function in FROM
+select count(*) from tenk1 a, lateral generate_series(1,two) g;
+explain (costs off)
+ select count(*) from tenk1 a, lateral generate_series(1,two) g;
+explain (costs off)
+ select count(*) from tenk1 a cross join lateral generate_series(1,two) g;
+-- don't need the explicit LATERAL keyword for functions
+explain (costs off)
+ select count(*) from tenk1 a, generate_series(1,two) g;
+
+-- lateral with UNION ALL subselect
+explain (costs off)
+ select * from generate_series(100,200) g,
+ lateral (select * from int8_tbl a where g = q1 union all
+ select * from int8_tbl b where g = q2) ss;
+select * from generate_series(100,200) g,
+ lateral (select * from int8_tbl a where g = q1 union all
+ select * from int8_tbl b where g = q2) ss;
+
+-- lateral with VALUES
+explain (costs off)
+ select count(*) from tenk1 a,
+ tenk1 b join lateral (values(a.unique1)) ss(x) on b.unique2 = ss.x;
+select count(*) from tenk1 a,
+ tenk1 b join lateral (values(a.unique1)) ss(x) on b.unique2 = ss.x;
+
+-- lateral with VALUES, no flattening possible
+explain (costs off)
+ select count(*) from tenk1 a,
+ tenk1 b join lateral (values(a.unique1),(-1)) ss(x) on b.unique2 = ss.x;
+select count(*) from tenk1 a,
+ tenk1 b join lateral (values(a.unique1),(-1)) ss(x) on b.unique2 = ss.x;
+
+-- lateral injecting a strange outer join condition
+explain (costs off)
+ select * from int8_tbl a,
+ int8_tbl x left join lateral (select a.q1 from int4_tbl y) ss(z)
+ on x.q2 = ss.z
+ order by a.q1, a.q2, x.q1, x.q2, ss.z;
+select * from int8_tbl a,
+ int8_tbl x left join lateral (select a.q1 from int4_tbl y) ss(z)
+ on x.q2 = ss.z
+ order by a.q1, a.q2, x.q1, x.q2, ss.z;
+
+-- lateral reference to a join alias variable
+select * from (select f1/2 as x from int4_tbl) ss1 join int4_tbl i4 on x = f1,
+ lateral (select x) ss2(y);
+select * from (select f1 as x from int4_tbl) ss1 join int4_tbl i4 on x = f1,
+ lateral (values(x)) ss2(y);
+select * from ((select f1/2 as x from int4_tbl) ss1 join int4_tbl i4 on x = f1) j,
+ lateral (select x) ss2(y);
+
+-- lateral references requiring pullup
+select * from (values(1)) x(lb),
+ lateral generate_series(lb,4) x4;
+select * from (select f1/1000000000 from int4_tbl) x(lb),
+ lateral generate_series(lb,4) x4;
+select * from (values(1)) x(lb),
+ lateral (values(lb)) y(lbcopy);
+select * from (values(1)) x(lb),
+ lateral (select lb from int4_tbl) y(lbcopy);
+select * from
+ int8_tbl x left join (select q1,coalesce(q2,0) q2 from int8_tbl) y on x.q2 = y.q1,
+ lateral (values(x.q1,y.q1,y.q2)) v(xq1,yq1,yq2);
+select * from
+ int8_tbl x left join (select q1,coalesce(q2,0) q2 from int8_tbl) y on x.q2 = y.q1,
+ lateral (select x.q1,y.q1,y.q2) v(xq1,yq1,yq2);
+select x.* from
+ int8_tbl x left join (select q1,coalesce(q2,0) q2 from int8_tbl) y on x.q2 = y.q1,
+ lateral (select x.q1,y.q1,y.q2) v(xq1,yq1,yq2);
+select v.* from
+ (int8_tbl x left join (select q1,coalesce(q2,0) q2 from int8_tbl) y on x.q2 = y.q1)
+ left join int4_tbl z on z.f1 = x.q2,
+ lateral (select x.q1,y.q1 union all select x.q2,y.q2) v(vx,vy);
+select v.* from
+ (int8_tbl x left join (select q1,(select coalesce(q2,0)) q2 from int8_tbl) y on x.q2 = y.q1)
+ left join int4_tbl z on z.f1 = x.q2,
+ lateral (select x.q1,y.q1 union all select x.q2,y.q2) v(vx,vy);
+select v.* from
+ (int8_tbl x left join (select q1,(select coalesce(q2,0)) q2 from int8_tbl) y on x.q2 = y.q1)
+ left join int4_tbl z on z.f1 = x.q2,
+ lateral (select x.q1,y.q1 from onerow union all select x.q2,y.q2 from onerow) v(vx,vy);
+
+explain (verbose, costs off)
+select * from
+ int8_tbl a left join
+ lateral (select *, a.q2 as x from int8_tbl b) ss on a.q2 = ss.q1;
+select * from
+ int8_tbl a left join
+ lateral (select *, a.q2 as x from int8_tbl b) ss on a.q2 = ss.q1;
+explain (verbose, costs off)
+select * from
+ int8_tbl a left join
+ lateral (select *, coalesce(a.q2, 42) as x from int8_tbl b) ss on a.q2 = ss.q1;
+select * from
+ int8_tbl a left join
+ lateral (select *, coalesce(a.q2, 42) as x from int8_tbl b) ss on a.q2 = ss.q1;
+
+-- lateral can result in join conditions appearing below their
+-- real semantic level
+explain (verbose, costs off)
+select * from int4_tbl i left join
+ lateral (select * from int2_tbl j where i.f1 = j.f1) k on true;
+select * from int4_tbl i left join
+ lateral (select * from int2_tbl j where i.f1 = j.f1) k on true;
+explain (verbose, costs off)
+select * from int4_tbl i left join
+ lateral (select coalesce(i) from int2_tbl j where i.f1 = j.f1) k on true;
+select * from int4_tbl i left join
+ lateral (select coalesce(i) from int2_tbl j where i.f1 = j.f1) k on true;
+explain (verbose, costs off)
+select * from int4_tbl a,
+ lateral (
+ select * from int4_tbl b left join int8_tbl c on (b.f1 = q1 and a.f1 = q2)
+ ) ss;
+select * from int4_tbl a,
+ lateral (
+ select * from int4_tbl b left join int8_tbl c on (b.f1 = q1 and a.f1 = q2)
+ ) ss;
+
+-- lateral reference in a PlaceHolderVar evaluated at join level
+explain (verbose, costs off)
+select * from
+ int8_tbl a left join lateral
+ (select b.q1 as bq1, c.q1 as cq1, least(a.q1,b.q1,c.q1) from
+ int8_tbl b cross join int8_tbl c) ss
+ on a.q2 = ss.bq1;
+select * from
+ int8_tbl a left join lateral
+ (select b.q1 as bq1, c.q1 as cq1, least(a.q1,b.q1,c.q1) from
+ int8_tbl b cross join int8_tbl c) ss
+ on a.q2 = ss.bq1;
+
+-- case requiring nested PlaceHolderVars
+explain (verbose, costs off)
+select * from
+ int8_tbl c left join (
+ int8_tbl a left join (select q1, coalesce(q2,42) as x from int8_tbl b) ss1
+ on a.q2 = ss1.q1
+ cross join
+ lateral (select q1, coalesce(ss1.x,q2) as y from int8_tbl d) ss2
+ ) on c.q2 = ss2.q1,
+ lateral (select ss2.y offset 0) ss3;
+
+-- case that breaks the old ph_may_need optimization
+explain (verbose, costs off)
+select c.*,a.*,ss1.q1,ss2.q1,ss3.* from
+ int8_tbl c left join (
+ int8_tbl a left join
+ (select q1, coalesce(q2,f1) as x from int8_tbl b, int4_tbl b2
+ where q1 < f1) ss1
+ on a.q2 = ss1.q1
+ cross join
+ lateral (select q1, coalesce(ss1.x,q2) as y from int8_tbl d) ss2
+ ) on c.q2 = ss2.q1,
+ lateral (select * from int4_tbl i where ss2.y > f1) ss3;
+
+-- check processing of postponed quals (bug #9041)
+explain (verbose, costs off)
+select * from
+ (select 1 as x offset 0) x cross join (select 2 as y offset 0) y
+ left join lateral (
+ select * from (select 3 as z offset 0) z where z.z = x.x
+ ) zz on zz.z = y.y;
+
+-- check dummy rels with lateral references (bug #15694)
+explain (verbose, costs off)
+select * from int8_tbl i8 left join lateral
+ (select *, i8.q2 from int4_tbl where false) ss on true;
+explain (verbose, costs off)
+select * from int8_tbl i8 left join lateral
+ (select *, i8.q2 from int4_tbl i1, int4_tbl i2 where false) ss on true;
+
+-- check handling of nested appendrels inside LATERAL
+select * from
+ ((select 2 as v) union all (select 3 as v)) as q1
+ cross join lateral
+ ((select * from
+ ((select 4 as v) union all (select 5 as v)) as q3)
+ union all
+ (select q1.v)
+ ) as q2;
+
+-- check the number of columns specified
+SELECT * FROM (int8_tbl i cross join int4_tbl j) ss(a,b,c,d);
+
+-- check we don't try to do a unique-ified semijoin with LATERAL
+explain (verbose, costs off)
+select * from
+ (values (0,9998), (1,1000)) v(id,x),
+ lateral (select f1 from int4_tbl
+ where f1 = any (select unique1 from tenk1
+ where unique2 = v.x offset 0)) ss;
+select * from
+ (values (0,9998), (1,1000)) v(id,x),
+ lateral (select f1 from int4_tbl
+ where f1 = any (select unique1 from tenk1
+ where unique2 = v.x offset 0)) ss;
+
+-- check proper extParam/allParam handling (this isn't exactly a LATERAL issue,
+-- but we can make the test case much more compact with LATERAL)
+explain (verbose, costs off)
+select * from (values (0), (1)) v(id),
+lateral (select * from int8_tbl t1,
+ lateral (select * from
+ (select * from int8_tbl t2
+ where q1 = any (select q2 from int8_tbl t3
+ where q2 = (select greatest(t1.q1,t2.q2))
+ and (select v.id=0)) offset 0) ss2) ss
+ where t1.q1 = ss.q2) ss0;
+
+select * from (values (0), (1)) v(id),
+lateral (select * from int8_tbl t1,
+ lateral (select * from
+ (select * from int8_tbl t2
+ where q1 = any (select q2 from int8_tbl t3
+ where q2 = (select greatest(t1.q1,t2.q2))
+ and (select v.id=0)) offset 0) ss2) ss
+ where t1.q1 = ss.q2) ss0;
+
+-- test some error cases where LATERAL should have been used but wasn't
+select f1,g from int4_tbl a, (select f1 as g) ss;
+select f1,g from int4_tbl a, (select a.f1 as g) ss;
+select f1,g from int4_tbl a cross join (select f1 as g) ss;
+select f1,g from int4_tbl a cross join (select a.f1 as g) ss;
+-- SQL:2008 says the left table is in scope but illegal to access here
+select f1,g from int4_tbl a right join lateral generate_series(0, a.f1) g on true;
+select f1,g from int4_tbl a full join lateral generate_series(0, a.f1) g on true;
+-- check we complain about ambiguous table references
+select * from
+ int8_tbl x cross join (int4_tbl x cross join lateral (select x.f1) ss);
+-- LATERAL can be used to put an aggregate into the FROM clause of its query
+select 1 from tenk1 a, lateral (select max(a.unique1) from int4_tbl b) ss;
+
+-- check behavior of LATERAL in UPDATE/DELETE
+
+create temp table xx1 as select f1 as x1, -f1 as x2 from int4_tbl;
+
+-- error, can't do this:
+update xx1 set x2 = f1 from (select * from int4_tbl where f1 = x1) ss;
+update xx1 set x2 = f1 from (select * from int4_tbl where f1 = xx1.x1) ss;
+-- can't do it even with LATERAL:
+update xx1 set x2 = f1 from lateral (select * from int4_tbl where f1 = x1) ss;
+-- we might in future allow something like this, but for now it's an error:
+update xx1 set x2 = f1 from xx1, lateral (select * from int4_tbl where f1 = x1) ss;
+
+-- also errors:
+delete from xx1 using (select * from int4_tbl where f1 = x1) ss;
+delete from xx1 using (select * from int4_tbl where f1 = xx1.x1) ss;
+delete from xx1 using lateral (select * from int4_tbl where f1 = x1) ss;
+
+--
+-- test LATERAL reference propagation down a multi-level inheritance hierarchy
+-- produced for a multi-level partitioned table hierarchy.
+--
+create table join_pt1 (a int, b int, c varchar) partition by range(a);
+create table join_pt1p1 partition of join_pt1 for values from (0) to (100) partition by range(b);
+create table join_pt1p2 partition of join_pt1 for values from (100) to (200);
+create table join_pt1p1p1 partition of join_pt1p1 for values from (0) to (100);
+insert into join_pt1 values (1, 1, 'x'), (101, 101, 'y');
+create table join_ut1 (a int, b int, c varchar);
+insert into join_ut1 values (101, 101, 'y'), (2, 2, 'z');
+explain (verbose, costs off)
+select t1.b, ss.phv from join_ut1 t1 left join lateral
+ (select t2.a as t2a, t3.a t3a, least(t1.a, t2.a, t3.a) phv
+ from join_pt1 t2 join join_ut1 t3 on t2.a = t3.b) ss
+ on t1.a = ss.t2a order by t1.a;
+select t1.b, ss.phv from join_ut1 t1 left join lateral
+ (select t2.a as t2a, t3.a t3a, least(t1.a, t2.a, t3.a) phv
+ from join_pt1 t2 join join_ut1 t3 on t2.a = t3.b) ss
+ on t1.a = ss.t2a order by t1.a;
+
+drop table join_pt1;
+drop table join_ut1;
+
+--
+-- test estimation behavior with multi-column foreign key and constant qual
+--
+
+begin;
+
+create table fkest (x integer, x10 integer, x10b integer, x100 integer);
+insert into fkest select x, x/10, x/10, x/100 from generate_series(1,1000) x;
+create unique index on fkest(x, x10, x100);
+analyze fkest;
+
+explain (costs off)
+select * from fkest f1
+ join fkest f2 on (f1.x = f2.x and f1.x10 = f2.x10b and f1.x100 = f2.x100)
+ join fkest f3 on f1.x = f3.x
+ where f1.x100 = 2;
+
+alter table fkest add constraint fk
+ foreign key (x, x10b, x100) references fkest (x, x10, x100);
+
+explain (costs off)
+select * from fkest f1
+ join fkest f2 on (f1.x = f2.x and f1.x10 = f2.x10b and f1.x100 = f2.x100)
+ join fkest f3 on f1.x = f3.x
+ where f1.x100 = 2;
+
+rollback;
+
+--
+-- test that foreign key join estimation performs sanely for outer joins
+--
+
+begin;
+
+create table fkest (a int, b int, c int unique, primary key(a,b));
+create table fkest1 (a int, b int, primary key(a,b));
+
+insert into fkest select x/10, x%10, x from generate_series(1,1000) x;
+insert into fkest1 select x/10, x%10 from generate_series(1,1000) x;
+
+alter table fkest1
+ add constraint fkest1_a_b_fkey foreign key (a,b) references fkest;
+
+analyze fkest;
+analyze fkest1;
+
+explain (costs off)
+select *
+from fkest f
+ left join fkest1 f1 on f.a = f1.a and f.b = f1.b
+ left join fkest1 f2 on f.a = f2.a and f.b = f2.b
+ left join fkest1 f3 on f.a = f3.a and f.b = f3.b
+where f.c = 1;
+
+rollback;
+
+--
+-- test planner's ability to mark joins as unique
+--
+
+create table j1 (id int primary key);
+create table j2 (id int primary key);
+create table j3 (id int);
+
+insert into j1 values(1),(2),(3);
+insert into j2 values(1),(2),(3);
+insert into j3 values(1),(1);
+
+analyze j1;
+analyze j2;
+analyze j3;
+
+-- ensure join is properly marked as unique
+explain (verbose, costs off)
+select * from j1 inner join j2 on j1.id = j2.id;
+
+-- ensure join is not unique when not an equi-join
+explain (verbose, costs off)
+select * from j1 inner join j2 on j1.id > j2.id;
+
+-- ensure non-unique rel is not chosen as inner
+explain (verbose, costs off)
+select * from j1 inner join j3 on j1.id = j3.id;
+
+-- ensure left join is marked as unique
+explain (verbose, costs off)
+select * from j1 left join j2 on j1.id = j2.id;
+
+-- ensure right join is marked as unique
+explain (verbose, costs off)
+select * from j1 right join j2 on j1.id = j2.id;
+
+-- ensure full join is marked as unique
+explain (verbose, costs off)
+select * from j1 full join j2 on j1.id = j2.id;
+
+-- a clauseless (cross) join can't be unique
+explain (verbose, costs off)
+select * from j1 cross join j2;
+
+-- ensure a natural join is marked as unique
+explain (verbose, costs off)
+select * from j1 natural join j2;
+
+-- ensure a distinct clause allows the inner to become unique
+explain (verbose, costs off)
+select * from j1
+inner join (select distinct id from j3) j3 on j1.id = j3.id;
+
+-- ensure group by clause allows the inner to become unique
+explain (verbose, costs off)
+select * from j1
+inner join (select id from j3 group by id) j3 on j1.id = j3.id;
+
+drop table j1;
+drop table j2;
+drop table j3;
+
+-- test more complex permutations of unique joins
+
+create table j1 (id1 int, id2 int, primary key(id1,id2));
+create table j2 (id1 int, id2 int, primary key(id1,id2));
+create table j3 (id1 int, id2 int, primary key(id1,id2));
+
+insert into j1 values(1,1),(1,2);
+insert into j2 values(1,1);
+insert into j3 values(1,1);
+
+analyze j1;
+analyze j2;
+analyze j3;
+
+-- ensure there's no unique join when not all columns which are part of the
+-- unique index are seen in the join clause
+explain (verbose, costs off)
+select * from j1
+inner join j2 on j1.id1 = j2.id1;
+
+-- ensure proper unique detection with multiple join quals
+explain (verbose, costs off)
+select * from j1
+inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2;
+
+-- ensure we don't detect the join to be unique when quals are not part of the
+-- join condition
+explain (verbose, costs off)
+select * from j1
+inner join j2 on j1.id1 = j2.id1 where j1.id2 = 1;
+
+-- as above, but for left joins.
+explain (verbose, costs off)
+select * from j1
+left join j2 on j1.id1 = j2.id1 where j1.id2 = 1;
+
+-- validate logic in merge joins which skips mark and restore.
+-- it should only do this if all quals which were used to detect the unique
+-- are present as join quals, and not plain quals.
+set enable_nestloop to 0;
+set enable_hashjoin to 0;
+set enable_sort to 0;
+
+-- create indexes that will be preferred over the PKs to perform the join
+create index j1_id1_idx on j1 (id1) where id1 % 1000 = 1;
+create index j2_id1_idx on j2 (id1) where id1 % 1000 = 1;
+
+-- need an additional row in j2, if we want j2_id1_idx to be preferred
+insert into j2 values(1,2);
+analyze j2;
+
+explain (costs off) select * from j1
+inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2
+where j1.id1 % 1000 = 1 and j2.id1 % 1000 = 1;
+
+select * from j1
+inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2
+where j1.id1 % 1000 = 1 and j2.id1 % 1000 = 1;
+
+-- Exercise array keys mark/restore B-Tree code
+explain (costs off) select * from j1
+inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2
+where j1.id1 % 1000 = 1 and j2.id1 % 1000 = 1 and j2.id1 = any (array[1]);
+
+select * from j1
+inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2
+where j1.id1 % 1000 = 1 and j2.id1 % 1000 = 1 and j2.id1 = any (array[1]);
+
+-- Exercise array keys "find extreme element" B-Tree code
+explain (costs off) select * from j1
+inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2
+where j1.id1 % 1000 = 1 and j2.id1 % 1000 = 1 and j2.id1 >= any (array[1,5]);
+
+select * from j1
+inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2
+where j1.id1 % 1000 = 1 and j2.id1 % 1000 = 1 and j2.id1 >= any (array[1,5]);
+
+reset enable_nestloop;
+reset enable_hashjoin;
+reset enable_sort;
+
+drop table j1;
+drop table j2;
+drop table j3;
+
+-- check that semijoin inner is not seen as unique for a portion of the outerrel
+explain (verbose, costs off)
+select t1.unique1, t2.hundred
+from onek t1, tenk1 t2
+where exists (select 1 from tenk1 t3
+ where t3.thousand = t1.unique1 and t3.tenthous = t2.hundred)
+ and t1.unique1 < 1;
+
+-- ... unless it actually is unique
+create table j3 as select unique1, tenthous from onek;
+vacuum analyze j3;
+create unique index on j3(unique1, tenthous);
+
+explain (verbose, costs off)
+select t1.unique1, t2.hundred
+from onek t1, tenk1 t2
+where exists (select 1 from j3
+ where j3.unique1 = t1.unique1 and j3.tenthous = t2.hundred)
+ and t1.unique1 < 1;
+
+drop table j3;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/json.out b/ydb/library/yql/tests/postgresql/original/cases/json.out
new file mode 100644
index 0000000000..e9d6e9faf2
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/json.out
@@ -0,0 +1,2638 @@
+-- Strings.
+SELECT '""'::json; -- OK.
+ json
+------
+ ""
+(1 row)
+
+SELECT $$''$$::json; -- ERROR, single quotes are not allowed
+ERROR: invalid input syntax for type json
+LINE 1: SELECT $$''$$::json;
+ ^
+DETAIL: Token "'" is invalid.
+CONTEXT: JSON data, line 1: '...
+SELECT '"abc"'::json; -- OK
+ json
+-------
+ "abc"
+(1 row)
+
+SELECT '"abc'::json; -- ERROR, quotes not closed
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"abc'::json;
+ ^
+DETAIL: Token ""abc" is invalid.
+CONTEXT: JSON data, line 1: "abc
+SELECT '"abc
+def"'::json; -- ERROR, unescaped newline in string constant
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"abc
+ ^
+DETAIL: Character with value 0x0a must be escaped.
+CONTEXT: JSON data, line 1: "abc
+SELECT '"\n\"\\"'::json; -- OK, legal escapes
+ json
+----------
+ "\n\"\\"
+(1 row)
+
+SELECT '"\v"'::json; -- ERROR, not a valid JSON escape
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"\v"'::json;
+ ^
+DETAIL: Escape sequence "\v" is invalid.
+CONTEXT: JSON data, line 1: "\v...
+-- see json_encoding test for input with unicode escapes
+-- Numbers.
+SELECT '1'::json; -- OK
+ json
+------
+ 1
+(1 row)
+
+SELECT '0'::json; -- OK
+ json
+------
+ 0
+(1 row)
+
+SELECT '01'::json; -- ERROR, not valid according to JSON spec
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '01'::json;
+ ^
+DETAIL: Token "01" is invalid.
+CONTEXT: JSON data, line 1: 01
+SELECT '0.1'::json; -- OK
+ json
+------
+ 0.1
+(1 row)
+
+SELECT '9223372036854775808'::json; -- OK, even though it's too large for int8
+ json
+---------------------
+ 9223372036854775808
+(1 row)
+
+SELECT '1e100'::json; -- OK
+ json
+-------
+ 1e100
+(1 row)
+
+SELECT '1.3e100'::json; -- OK
+ json
+---------
+ 1.3e100
+(1 row)
+
+SELECT '1f2'::json; -- ERROR
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '1f2'::json;
+ ^
+DETAIL: Token "1f2" is invalid.
+CONTEXT: JSON data, line 1: 1f2
+SELECT '0.x1'::json; -- ERROR
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '0.x1'::json;
+ ^
+DETAIL: Token "0.x1" is invalid.
+CONTEXT: JSON data, line 1: 0.x1
+SELECT '1.3ex100'::json; -- ERROR
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '1.3ex100'::json;
+ ^
+DETAIL: Token "1.3ex100" is invalid.
+CONTEXT: JSON data, line 1: 1.3ex100
+-- Arrays.
+SELECT '[]'::json; -- OK
+ json
+------
+ []
+(1 row)
+
+SELECT '[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]'::json; -- OK
+ json
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]
+(1 row)
+
+SELECT '[1,2]'::json; -- OK
+ json
+-------
+ [1,2]
+(1 row)
+
+SELECT '[1,2,]'::json; -- ERROR, trailing comma
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '[1,2,]'::json;
+ ^
+DETAIL: Expected JSON value, but found "]".
+CONTEXT: JSON data, line 1: [1,2,]
+SELECT '[1,2'::json; -- ERROR, no closing bracket
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '[1,2'::json;
+ ^
+DETAIL: The input string ended unexpectedly.
+CONTEXT: JSON data, line 1: [1,2
+SELECT '[1,[2]'::json; -- ERROR, no closing bracket
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '[1,[2]'::json;
+ ^
+DETAIL: The input string ended unexpectedly.
+CONTEXT: JSON data, line 1: [1,[2]
+-- Objects.
+SELECT '{}'::json; -- OK
+ json
+------
+ {}
+(1 row)
+
+SELECT '{"abc"}'::json; -- ERROR, no value
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc"}'::json;
+ ^
+DETAIL: Expected ":", but found "}".
+CONTEXT: JSON data, line 1: {"abc"}
+SELECT '{"abc":1}'::json; -- OK
+ json
+-----------
+ {"abc":1}
+(1 row)
+
+SELECT '{1:"abc"}'::json; -- ERROR, keys must be strings
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{1:"abc"}'::json;
+ ^
+DETAIL: Expected string or "}", but found "1".
+CONTEXT: JSON data, line 1: {1...
+SELECT '{"abc",1}'::json; -- ERROR, wrong separator
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc",1}'::json;
+ ^
+DETAIL: Expected ":", but found ",".
+CONTEXT: JSON data, line 1: {"abc",...
+SELECT '{"abc"=1}'::json; -- ERROR, totally wrong separator
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc"=1}'::json;
+ ^
+DETAIL: Token "=" is invalid.
+CONTEXT: JSON data, line 1: {"abc"=...
+SELECT '{"abc"::1}'::json; -- ERROR, another wrong separator
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc"::1}'::json;
+ ^
+DETAIL: Expected JSON value, but found ":".
+CONTEXT: JSON data, line 1: {"abc"::...
+SELECT '{"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}}'::json; -- OK
+ json
+---------------------------------------------------------
+ {"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}}
+(1 row)
+
+SELECT '{"abc":1:2}'::json; -- ERROR, colon in wrong spot
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc":1:2}'::json;
+ ^
+DETAIL: Expected "," or "}", but found ":".
+CONTEXT: JSON data, line 1: {"abc":1:...
+SELECT '{"abc":1,3}'::json; -- ERROR, no value
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc":1,3}'::json;
+ ^
+DETAIL: Expected string, but found "3".
+CONTEXT: JSON data, line 1: {"abc":1,3...
+-- Recursion.
+SET max_stack_depth = '100kB';
+SELECT repeat('[', 10000)::json;
+ERROR: stack depth limit exceeded
+HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
+SELECT repeat('{"a":', 10000)::json;
+ERROR: stack depth limit exceeded
+HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
+RESET max_stack_depth;
+-- Miscellaneous stuff.
+SELECT 'true'::json; -- OK
+ json
+------
+ true
+(1 row)
+
+SELECT 'false'::json; -- OK
+ json
+-------
+ false
+(1 row)
+
+SELECT 'null'::json; -- OK
+ json
+------
+ null
+(1 row)
+
+SELECT ' true '::json; -- OK, even with extra whitespace
+ json
+--------
+ true
+(1 row)
+
+SELECT 'true false'::json; -- ERROR, too many values
+ERROR: invalid input syntax for type json
+LINE 1: SELECT 'true false'::json;
+ ^
+DETAIL: Expected end of input, but found "false".
+CONTEXT: JSON data, line 1: true false
+SELECT 'true, false'::json; -- ERROR, too many values
+ERROR: invalid input syntax for type json
+LINE 1: SELECT 'true, false'::json;
+ ^
+DETAIL: Expected end of input, but found ",".
+CONTEXT: JSON data, line 1: true,...
+SELECT 'truf'::json; -- ERROR, not a keyword
+ERROR: invalid input syntax for type json
+LINE 1: SELECT 'truf'::json;
+ ^
+DETAIL: Token "truf" is invalid.
+CONTEXT: JSON data, line 1: truf
+SELECT 'trues'::json; -- ERROR, not a keyword
+ERROR: invalid input syntax for type json
+LINE 1: SELECT 'trues'::json;
+ ^
+DETAIL: Token "trues" is invalid.
+CONTEXT: JSON data, line 1: trues
+SELECT ''::json; -- ERROR, no value
+ERROR: invalid input syntax for type json
+LINE 1: SELECT ''::json;
+ ^
+DETAIL: The input string ended unexpectedly.
+CONTEXT: JSON data, line 1:
+SELECT ' '::json; -- ERROR, no value
+ERROR: invalid input syntax for type json
+LINE 1: SELECT ' '::json;
+ ^
+DETAIL: The input string ended unexpectedly.
+CONTEXT: JSON data, line 1:
+-- Multi-line JSON input to check ERROR reporting
+SELECT '{
+ "one": 1,
+ "two":"two",
+ "three":
+ true}'::json; -- OK
+ json
+------------------------------
+ { +
+ "one": 1, +
+ "two":"two",+
+ "three": +
+ true}
+(1 row)
+
+SELECT '{
+ "one": 1,
+ "two":,"two", -- ERROR extraneous comma before field "two"
+ "three":
+ true}'::json;
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{
+ ^
+DETAIL: Expected JSON value, but found ",".
+CONTEXT: JSON data, line 3: "two":,...
+SELECT '{
+ "one": 1,
+ "two":"two",
+ "averyveryveryveryveryveryveryveryveryverylongfieldname":}'::json;
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{
+ ^
+DETAIL: Expected JSON value, but found "}".
+CONTEXT: JSON data, line 4: ...yveryveryveryveryveryveryveryverylongfieldname":}
+-- ERROR missing value for last field
+--constructors
+-- array_to_json
+SELECT array_to_json(array(select 1 as a));
+ array_to_json
+---------------
+ [1]
+(1 row)
+
+SELECT array_to_json(array_agg(q),false) from (select x as b, x * 2 as c from generate_series(1,3) x) q;
+ array_to_json
+---------------------------------------------
+ [{"b":1,"c":2},{"b":2,"c":4},{"b":3,"c":6}]
+(1 row)
+
+SELECT array_to_json(array_agg(q),true) from (select x as b, x * 2 as c from generate_series(1,3) x) q;
+ array_to_json
+-----------------
+ [{"b":1,"c":2},+
+ {"b":2,"c":4},+
+ {"b":3,"c":6}]
+(1 row)
+
+SELECT array_to_json(array_agg(q),false)
+ FROM ( SELECT $$a$$ || x AS b, y AS c,
+ ARRAY[ROW(x.*,ARRAY[1,2,3]),
+ ROW(y.*,ARRAY[4,5,6])] AS z
+ FROM generate_series(1,2) x,
+ generate_series(4,5) y) q;
+ array_to_json
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ [{"b":"a1","c":4,"z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]},{"b":"a1","c":5,"z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]},{"b":"a2","c":4,"z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]},{"b":"a2","c":5,"z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}]
+(1 row)
+
+SELECT array_to_json(array_agg(x),false) from generate_series(5,10) x;
+ array_to_json
+----------------
+ [5,6,7,8,9,10]
+(1 row)
+
+SELECT array_to_json('{{1,5},{99,100}}'::int[]);
+ array_to_json
+------------------
+ [[1,5],[99,100]]
+(1 row)
+
+-- row_to_json
+SELECT row_to_json(row(1,'foo'));
+ row_to_json
+---------------------
+ {"f1":1,"f2":"foo"}
+(1 row)
+
+SELECT row_to_json(q)
+FROM (SELECT $$a$$ || x AS b,
+ y AS c,
+ ARRAY[ROW(x.*,ARRAY[1,2,3]),
+ ROW(y.*,ARRAY[4,5,6])] AS z
+ FROM generate_series(1,2) x,
+ generate_series(4,5) y) q;
+ row_to_json
+--------------------------------------------------------------------
+ {"b":"a1","c":4,"z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]}
+ {"b":"a1","c":5,"z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}
+ {"b":"a2","c":4,"z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]}
+ {"b":"a2","c":5,"z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}
+(4 rows)
+
+SELECT row_to_json(q,true)
+FROM (SELECT $$a$$ || x AS b,
+ y AS c,
+ ARRAY[ROW(x.*,ARRAY[1,2,3]),
+ ROW(y.*,ARRAY[4,5,6])] AS z
+ FROM generate_series(1,2) x,
+ generate_series(4,5) y) q;
+ row_to_json
+-----------------------------------------------------
+ {"b":"a1", +
+ "c":4, +
+ "z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]}
+ {"b":"a1", +
+ "c":5, +
+ "z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}
+ {"b":"a2", +
+ "c":4, +
+ "z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]}
+ {"b":"a2", +
+ "c":5, +
+ "z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}
+(4 rows)
+
+CREATE TEMP TABLE rows AS
+SELECT x, 'txt' || x as y
+FROM generate_series(1,3) AS x;
+SELECT row_to_json(q,true)
+FROM rows q;
+ row_to_json
+--------------
+ {"x":1, +
+ "y":"txt1"}
+ {"x":2, +
+ "y":"txt2"}
+ {"x":3, +
+ "y":"txt3"}
+(3 rows)
+
+SELECT row_to_json(row((select array_agg(x) as d from generate_series(5,10) x)),false);
+ row_to_json
+-----------------------
+ {"f1":[5,6,7,8,9,10]}
+(1 row)
+
+-- anyarray column
+analyze rows;
+select attname, to_json(histogram_bounds) histogram_bounds
+from pg_stats
+where tablename = 'rows' and
+ schemaname = pg_my_temp_schema()::regnamespace::text
+order by 1;
+ attname | histogram_bounds
+---------+------------------------
+ x | [1,2,3]
+ y | ["txt1","txt2","txt3"]
+(2 rows)
+
+-- to_json, timestamps
+select to_json(timestamp '2014-05-28 12:22:35.614298');
+ to_json
+------------------------------
+ "2014-05-28T12:22:35.614298"
+(1 row)
+
+BEGIN;
+SET LOCAL TIME ZONE 10.5;
+select to_json(timestamptz '2014-05-28 12:22:35.614298-04');
+ to_json
+------------------------------------
+ "2014-05-29T02:52:35.614298+10:30"
+(1 row)
+
+SET LOCAL TIME ZONE -8;
+select to_json(timestamptz '2014-05-28 12:22:35.614298-04');
+ to_json
+------------------------------------
+ "2014-05-28T08:22:35.614298-08:00"
+(1 row)
+
+COMMIT;
+select to_json(date '2014-05-28');
+ to_json
+--------------
+ "2014-05-28"
+(1 row)
+
+select to_json(date 'Infinity');
+ to_json
+------------
+ "infinity"
+(1 row)
+
+select to_json(date '-Infinity');
+ to_json
+-------------
+ "-infinity"
+(1 row)
+
+select to_json(timestamp 'Infinity');
+ to_json
+------------
+ "infinity"
+(1 row)
+
+select to_json(timestamp '-Infinity');
+ to_json
+-------------
+ "-infinity"
+(1 row)
+
+select to_json(timestamptz 'Infinity');
+ to_json
+------------
+ "infinity"
+(1 row)
+
+select to_json(timestamptz '-Infinity');
+ to_json
+-------------
+ "-infinity"
+(1 row)
+
+--json_agg
+SELECT json_agg(q)
+ FROM ( SELECT $$a$$ || x AS b, y AS c,
+ ARRAY[ROW(x.*,ARRAY[1,2,3]),
+ ROW(y.*,ARRAY[4,5,6])] AS z
+ FROM generate_series(1,2) x,
+ generate_series(4,5) y) q;
+ json_agg
+-----------------------------------------------------------------------
+ [{"b":"a1","c":4,"z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]}, +
+ {"b":"a1","c":5,"z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}, +
+ {"b":"a2","c":4,"z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]}, +
+ {"b":"a2","c":5,"z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}]
+(1 row)
+
+SELECT json_agg(q ORDER BY x, y)
+ FROM rows q;
+ json_agg
+-----------------------
+ [{"x":1,"y":"txt1"}, +
+ {"x":2,"y":"txt2"}, +
+ {"x":3,"y":"txt3"}]
+(1 row)
+
+UPDATE rows SET x = NULL WHERE x = 1;
+SELECT json_agg(q ORDER BY x NULLS FIRST, y)
+ FROM rows q;
+ json_agg
+--------------------------
+ [{"x":null,"y":"txt1"}, +
+ {"x":2,"y":"txt2"}, +
+ {"x":3,"y":"txt3"}]
+(1 row)
+
+-- non-numeric output
+SELECT row_to_json(q)
+FROM (SELECT 'NaN'::float8 AS "float8field") q;
+ row_to_json
+-----------------------
+ {"float8field":"NaN"}
+(1 row)
+
+SELECT row_to_json(q)
+FROM (SELECT 'Infinity'::float8 AS "float8field") q;
+ row_to_json
+----------------------------
+ {"float8field":"Infinity"}
+(1 row)
+
+SELECT row_to_json(q)
+FROM (SELECT '-Infinity'::float8 AS "float8field") q;
+ row_to_json
+-----------------------------
+ {"float8field":"-Infinity"}
+(1 row)
+
+-- json input
+SELECT row_to_json(q)
+FROM (SELECT '{"a":1,"b": [2,3,4,"d","e","f"],"c":{"p":1,"q":2}}'::json AS "jsonfield") q;
+ row_to_json
+------------------------------------------------------------------
+ {"jsonfield":{"a":1,"b": [2,3,4,"d","e","f"],"c":{"p":1,"q":2}}}
+(1 row)
+
+-- json extraction functions
+CREATE TEMP TABLE test_json (
+ json_type text,
+ test_json json
+);
+INSERT INTO test_json VALUES
+('scalar','"a scalar"'),
+('array','["zero", "one","two",null,"four","five", [1,2,3],{"f1":9}]'),
+('object','{"field1":"val1","field2":"val2","field3":null, "field4": 4, "field5": [1,2,3], "field6": {"f1":9}}');
+SELECT test_json -> 'x'
+FROM test_json
+WHERE json_type = 'scalar';
+ ?column?
+----------
+
+(1 row)
+
+SELECT test_json -> 'x'
+FROM test_json
+WHERE json_type = 'array';
+ ?column?
+----------
+
+(1 row)
+
+SELECT test_json -> 'x'
+FROM test_json
+WHERE json_type = 'object';
+ ?column?
+----------
+
+(1 row)
+
+SELECT test_json->'field2'
+FROM test_json
+WHERE json_type = 'object';
+ ?column?
+----------
+ "val2"
+(1 row)
+
+SELECT test_json->>'field2'
+FROM test_json
+WHERE json_type = 'object';
+ ?column?
+----------
+ val2
+(1 row)
+
+SELECT test_json -> 2
+FROM test_json
+WHERE json_type = 'scalar';
+ ?column?
+----------
+
+(1 row)
+
+SELECT test_json -> 2
+FROM test_json
+WHERE json_type = 'array';
+ ?column?
+----------
+ "two"
+(1 row)
+
+SELECT test_json -> -1
+FROM test_json
+WHERE json_type = 'array';
+ ?column?
+----------
+ {"f1":9}
+(1 row)
+
+SELECT test_json -> 2
+FROM test_json
+WHERE json_type = 'object';
+ ?column?
+----------
+
+(1 row)
+
+SELECT test_json->>2
+FROM test_json
+WHERE json_type = 'array';
+ ?column?
+----------
+ two
+(1 row)
+
+SELECT test_json ->> 6 FROM test_json WHERE json_type = 'array';
+ ?column?
+----------
+ [1,2,3]
+(1 row)
+
+SELECT test_json ->> 7 FROM test_json WHERE json_type = 'array';
+ ?column?
+----------
+ {"f1":9}
+(1 row)
+
+SELECT test_json ->> 'field4' FROM test_json WHERE json_type = 'object';
+ ?column?
+----------
+ 4
+(1 row)
+
+SELECT test_json ->> 'field5' FROM test_json WHERE json_type = 'object';
+ ?column?
+----------
+ [1,2,3]
+(1 row)
+
+SELECT test_json ->> 'field6' FROM test_json WHERE json_type = 'object';
+ ?column?
+----------
+ {"f1":9}
+(1 row)
+
+SELECT json_object_keys(test_json)
+FROM test_json
+WHERE json_type = 'scalar';
+ERROR: cannot call json_object_keys on a scalar
+SELECT json_object_keys(test_json)
+FROM test_json
+WHERE json_type = 'array';
+ERROR: cannot call json_object_keys on an array
+SELECT json_object_keys(test_json)
+FROM test_json
+WHERE json_type = 'object';
+ json_object_keys
+------------------
+ field1
+ field2
+ field3
+ field4
+ field5
+ field6
+(6 rows)
+
+-- test extending object_keys resultset - initial resultset size is 256
+select count(*) from
+ (select json_object_keys(json_object(array_agg(g)))
+ from (select unnest(array['f'||n,n::text])as g
+ from generate_series(1,300) as n) x ) y;
+ count
+-------
+ 300
+(1 row)
+
+-- nulls
+select (test_json->'field3') is null as expect_false
+from test_json
+where json_type = 'object';
+ expect_false
+--------------
+ f
+(1 row)
+
+select (test_json->>'field3') is null as expect_true
+from test_json
+where json_type = 'object';
+ expect_true
+-------------
+ t
+(1 row)
+
+select (test_json->3) is null as expect_false
+from test_json
+where json_type = 'array';
+ expect_false
+--------------
+ f
+(1 row)
+
+select (test_json->>3) is null as expect_true
+from test_json
+where json_type = 'array';
+ expect_true
+-------------
+ t
+(1 row)
+
+-- corner cases
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> null::text;
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> null::int;
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> 1;
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> -1;
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> 'z';
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> '';
+ ?column?
+----------
+
+(1 row)
+
+select '[{"b": "c"}, {"b": "cc"}]'::json -> 1;
+ ?column?
+-------------
+ {"b": "cc"}
+(1 row)
+
+select '[{"b": "c"}, {"b": "cc"}]'::json -> 3;
+ ?column?
+----------
+
+(1 row)
+
+select '[{"b": "c"}, {"b": "cc"}]'::json -> 'z';
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": "c", "b": null}'::json -> 'b';
+ ?column?
+----------
+ null
+(1 row)
+
+select '"foo"'::json -> 1;
+ ?column?
+----------
+
+(1 row)
+
+select '"foo"'::json -> 'z';
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> null::text;
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> null::int;
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> 1;
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> 'z';
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> '';
+ ?column?
+----------
+
+(1 row)
+
+select '[{"b": "c"}, {"b": "cc"}]'::json ->> 1;
+ ?column?
+-------------
+ {"b": "cc"}
+(1 row)
+
+select '[{"b": "c"}, {"b": "cc"}]'::json ->> 3;
+ ?column?
+----------
+
+(1 row)
+
+select '[{"b": "c"}, {"b": "cc"}]'::json ->> 'z';
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": "c", "b": null}'::json ->> 'b';
+ ?column?
+----------
+
+(1 row)
+
+select '"foo"'::json ->> 1;
+ ?column?
+----------
+
+(1 row)
+
+select '"foo"'::json ->> 'z';
+ ?column?
+----------
+
+(1 row)
+
+-- array length
+SELECT json_array_length('[1,2,3,{"f1":1,"f2":[5,6]},4]');
+ json_array_length
+-------------------
+ 5
+(1 row)
+
+SELECT json_array_length('[]');
+ json_array_length
+-------------------
+ 0
+(1 row)
+
+SELECT json_array_length('{"f1":1,"f2":[5,6]}');
+ERROR: cannot get array length of a non-array
+SELECT json_array_length('4');
+ERROR: cannot get array length of a scalar
+-- each
+select json_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null}');
+ json_each
+-------------------
+ (f1,"[1,2,3]")
+ (f2,"{""f3"":1}")
+ (f4,null)
+(3 rows)
+
+select * from json_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q;
+ key | value
+-----+-----------
+ f1 | [1,2,3]
+ f2 | {"f3":1}
+ f4 | null
+ f5 | 99
+ f6 | "stringy"
+(5 rows)
+
+select json_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":"null"}');
+ json_each_text
+-------------------
+ (f1,"[1,2,3]")
+ (f2,"{""f3"":1}")
+ (f4,)
+ (f5,null)
+(4 rows)
+
+select * from json_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q;
+ key | value
+-----+----------
+ f1 | [1,2,3]
+ f2 | {"f3":1}
+ f4 |
+ f5 | 99
+ f6 | stringy
+(5 rows)
+
+-- extract_path, extract_path_as_text
+select json_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6');
+ json_extract_path
+-------------------
+ "stringy"
+(1 row)
+
+select json_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2');
+ json_extract_path
+-------------------
+ {"f3":1}
+(1 row)
+
+select json_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text);
+ json_extract_path
+-------------------
+ "f3"
+(1 row)
+
+select json_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text);
+ json_extract_path
+-------------------
+ 1
+(1 row)
+
+select json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6');
+ json_extract_path_text
+------------------------
+ stringy
+(1 row)
+
+select json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2');
+ json_extract_path_text
+------------------------
+ {"f3":1}
+(1 row)
+
+select json_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text);
+ json_extract_path_text
+------------------------
+ f3
+(1 row)
+
+select json_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text);
+ json_extract_path_text
+------------------------
+ 1
+(1 row)
+
+-- extract_path nulls
+select json_extract_path('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') is null as expect_false;
+ expect_false
+--------------
+ f
+(1 row)
+
+select json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') is null as expect_true;
+ expect_true
+-------------
+ t
+(1 row)
+
+select json_extract_path('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') is null as expect_false;
+ expect_false
+--------------
+ f
+(1 row)
+
+select json_extract_path_text('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') is null as expect_true;
+ expect_true
+-------------
+ t
+(1 row)
+
+-- extract_path operators
+select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f4','f6'];
+ ?column?
+-----------
+ "stringy"
+(1 row)
+
+select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f2'];
+ ?column?
+----------
+ {"f3":1}
+(1 row)
+
+select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f2','0'];
+ ?column?
+----------
+ "f3"
+(1 row)
+
+select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f2','1'];
+ ?column?
+----------
+ 1
+(1 row)
+
+select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f4','f6'];
+ ?column?
+----------
+ stringy
+(1 row)
+
+select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f2'];
+ ?column?
+----------
+ {"f3":1}
+(1 row)
+
+select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f2','0'];
+ ?column?
+----------
+ f3
+(1 row)
+
+select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f2','1'];
+ ?column?
+----------
+ 1
+(1 row)
+
+-- corner cases for same
+select '{"a": {"b":{"c": "foo"}}}'::json #> '{}';
+ ?column?
+---------------------------
+ {"a": {"b":{"c": "foo"}}}
+(1 row)
+
+select '[1,2,3]'::json #> '{}';
+ ?column?
+----------
+ [1,2,3]
+(1 row)
+
+select '"foo"'::json #> '{}';
+ ?column?
+----------
+ "foo"
+(1 row)
+
+select '42'::json #> '{}';
+ ?column?
+----------
+ 42
+(1 row)
+
+select 'null'::json #> '{}';
+ ?column?
+----------
+ null
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::json #> array['a'];
+ ?column?
+--------------------
+ {"b":{"c": "foo"}}
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::json #> array['a', null];
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::json #> array['a', ''];
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','b'];
+ ?column?
+--------------
+ {"c": "foo"}
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','b','c'];
+ ?column?
+----------
+ "foo"
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','b','c','d'];
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','z','c'];
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #> array['a','1','b'];
+ ?column?
+----------
+ "cc"
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #> array['a','z','b'];
+ ?column?
+----------
+
+(1 row)
+
+select '[{"b": "c"}, {"b": "cc"}]'::json #> array['1','b'];
+ ?column?
+----------
+ "cc"
+(1 row)
+
+select '[{"b": "c"}, {"b": "cc"}]'::json #> array['z','b'];
+ ?column?
+----------
+
+(1 row)
+
+select '[{"b": "c"}, {"b": null}]'::json #> array['1','b'];
+ ?column?
+----------
+ null
+(1 row)
+
+select '"foo"'::json #> array['z'];
+ ?column?
+----------
+
+(1 row)
+
+select '42'::json #> array['f2'];
+ ?column?
+----------
+
+(1 row)
+
+select '42'::json #> array['0'];
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::json #>> '{}';
+ ?column?
+---------------------------
+ {"a": {"b":{"c": "foo"}}}
+(1 row)
+
+select '[1,2,3]'::json #>> '{}';
+ ?column?
+----------
+ [1,2,3]
+(1 row)
+
+select '"foo"'::json #>> '{}';
+ ?column?
+----------
+ foo
+(1 row)
+
+select '42'::json #>> '{}';
+ ?column?
+----------
+ 42
+(1 row)
+
+select 'null'::json #>> '{}';
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a'];
+ ?column?
+--------------------
+ {"b":{"c": "foo"}}
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a', null];
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a', ''];
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','b'];
+ ?column?
+--------------
+ {"c": "foo"}
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','b','c'];
+ ?column?
+----------
+ foo
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','b','c','d'];
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','z','c'];
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #>> array['a','1','b'];
+ ?column?
+----------
+ cc
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #>> array['a','z','b'];
+ ?column?
+----------
+
+(1 row)
+
+select '[{"b": "c"}, {"b": "cc"}]'::json #>> array['1','b'];
+ ?column?
+----------
+ cc
+(1 row)
+
+select '[{"b": "c"}, {"b": "cc"}]'::json #>> array['z','b'];
+ ?column?
+----------
+
+(1 row)
+
+select '[{"b": "c"}, {"b": null}]'::json #>> array['1','b'];
+ ?column?
+----------
+
+(1 row)
+
+select '"foo"'::json #>> array['z'];
+ ?column?
+----------
+
+(1 row)
+
+select '42'::json #>> array['f2'];
+ ?column?
+----------
+
+(1 row)
+
+select '42'::json #>> array['0'];
+ ?column?
+----------
+
+(1 row)
+
+-- array_elements
+select json_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]');
+ json_array_elements
+-----------------------
+ 1
+ true
+ [1,[2,3]]
+ null
+ {"f1":1,"f2":[7,8,9]}
+ false
+ "stringy"
+(7 rows)
+
+select * from json_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]') q;
+ value
+-----------------------
+ 1
+ true
+ [1,[2,3]]
+ null
+ {"f1":1,"f2":[7,8,9]}
+ false
+ "stringy"
+(7 rows)
+
+select json_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]');
+ json_array_elements_text
+--------------------------
+ 1
+ true
+ [1,[2,3]]
+
+ {"f1":1,"f2":[7,8,9]}
+ false
+ stringy
+(7 rows)
+
+select * from json_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]') q;
+ value
+-----------------------
+ 1
+ true
+ [1,[2,3]]
+
+ {"f1":1,"f2":[7,8,9]}
+ false
+ stringy
+(7 rows)
+
+-- populate_record
+create type jpop as (a text, b int, c timestamp);
+CREATE DOMAIN js_int_not_null AS int NOT NULL;
+CREATE DOMAIN js_int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 3);
+CREATE DOMAIN js_int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 3);
+create type j_unordered_pair as (x int, y int);
+create domain j_ordered_pair as j_unordered_pair check((value).x <= (value).y);
+CREATE TYPE jsrec AS (
+ i int,
+ ia _int4,
+ ia1 int[],
+ ia2 int[][],
+ ia3 int[][][],
+ ia1d js_int_array_1d,
+ ia2d js_int_array_2d,
+ t text,
+ ta text[],
+ c char(10),
+ ca char(10)[],
+ ts timestamp,
+ js json,
+ jsb jsonb,
+ jsa json[],
+ rec jpop,
+ reca jpop[]
+);
+CREATE TYPE jsrec_i_not_null AS (
+ i js_int_not_null
+);
+select * from json_populate_record(null::jpop,'{"a":"blurfl","x":43.2}') q;
+ a | b | c
+--------+---+---
+ blurfl | |
+(1 row)
+
+select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"a":"blurfl","x":43.2}') q;
+ a | b | c
+--------+---+--------------------------
+ blurfl | 3 | Mon Dec 31 15:30:56 2012
+(1 row)
+
+select * from json_populate_record(null::jpop,'{"a":"blurfl","x":43.2}') q;
+ a | b | c
+--------+---+---
+ blurfl | |
+(1 row)
+
+select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"a":"blurfl","x":43.2}') q;
+ a | b | c
+--------+---+--------------------------
+ blurfl | 3 | Mon Dec 31 15:30:56 2012
+(1 row)
+
+select * from json_populate_record(null::jpop,'{"a":[100,200,false],"x":43.2}') q;
+ a | b | c
+-----------------+---+---
+ [100,200,false] | |
+(1 row)
+
+select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"a":[100,200,false],"x":43.2}') q;
+ a | b | c
+-----------------+---+--------------------------
+ [100,200,false] | 3 | Mon Dec 31 15:30:56 2012
+(1 row)
+
+select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"c":[100,200,false],"x":43.2}') q;
+ERROR: invalid input syntax for type timestamp: "[100,200,false]"
+select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{}') q;
+ a | b | c
+---+---+--------------------------
+ x | 3 | Mon Dec 31 15:30:56 2012
+(1 row)
+
+SELECT i FROM json_populate_record(NULL::jsrec_i_not_null, '{"x": 43.2}') q;
+ERROR: domain js_int_not_null does not allow null values
+SELECT i FROM json_populate_record(NULL::jsrec_i_not_null, '{"i": null}') q;
+ERROR: domain js_int_not_null does not allow null values
+SELECT i FROM json_populate_record(NULL::jsrec_i_not_null, '{"i": 12345}') q;
+ i
+-------
+ 12345
+(1 row)
+
+SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": null}') q;
+ ia
+----
+
+(1 row)
+
+SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": 123}') q;
+ERROR: expected JSON array
+HINT: See the value of key "ia".
+SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [1, "2", null, 4]}') q;
+ ia
+--------------
+ {1,2,NULL,4}
+(1 row)
+
+SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [[1, 2], [3, 4]]}') q;
+ ia
+---------------
+ {{1,2},{3,4}}
+(1 row)
+
+SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [[1], 2]}') q;
+ERROR: expected JSON array
+HINT: See the array element [1] of key "ia".
+SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [[1], [2, 3]]}') q;
+ERROR: malformed JSON array
+DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions.
+SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": "{1,2,3}"}') q;
+ ia
+---------
+ {1,2,3}
+(1 row)
+
+SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": null}') q;
+ ia1
+-----
+
+(1 row)
+
+SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": 123}') q;
+ERROR: expected JSON array
+HINT: See the value of key "ia1".
+SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": [1, "2", null, 4]}') q;
+ ia1
+--------------
+ {1,2,NULL,4}
+(1 row)
+
+SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": [[1, 2, 3]]}') q;
+ ia1
+-----------
+ {{1,2,3}}
+(1 row)
+
+SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": null}') q;
+ ia1d
+------
+
+(1 row)
+
+SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": 123}') q;
+ERROR: expected JSON array
+HINT: See the value of key "ia1d".
+SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": [1, "2", null, 4]}') q;
+ERROR: value for domain js_int_array_1d violates check constraint "js_int_array_1d_check"
+SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": [1, "2", null]}') q;
+ ia1d
+------------
+ {1,2,NULL}
+(1 row)
+
+SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [1, "2", null, 4]}') q;
+ ia2
+--------------
+ {1,2,NULL,4}
+(1 row)
+
+SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[1, 2], [null, 4]]}') q;
+ ia2
+------------------
+ {{1,2},{NULL,4}}
+(1 row)
+
+SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[], []]}') q;
+ ia2
+-----
+ {}
+(1 row)
+
+SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[1, 2], [3]]}') q;
+ERROR: malformed JSON array
+DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions.
+SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[1, 2], 3, 4]}') q;
+ERROR: expected JSON array
+HINT: See the array element [1] of key "ia2".
+SELECT ia2d FROM json_populate_record(NULL::jsrec, '{"ia2d": [[1, "2"], [null, 4]]}') q;
+ERROR: value for domain js_int_array_2d violates check constraint "js_int_array_2d_check"
+SELECT ia2d FROM json_populate_record(NULL::jsrec, '{"ia2d": [[1, "2", 3], [null, 5, 6]]}') q;
+ ia2d
+----------------------
+ {{1,2,3},{NULL,5,6}}
+(1 row)
+
+SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [1, "2", null, 4]}') q;
+ ia3
+--------------
+ {1,2,NULL,4}
+(1 row)
+
+SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [[1, 2], [null, 4]]}') q;
+ ia3
+------------------
+ {{1,2},{NULL,4}}
+(1 row)
+
+SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[], []], [[], []], [[], []] ]}') q;
+ ia3
+-----
+ {}
+(1 row)
+
+SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[1, 2]], [[3, 4]] ]}') q;
+ ia3
+-------------------
+ {{{1,2}},{{3,4}}}
+(1 row)
+
+SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8]] ]}') q;
+ ia3
+-------------------------------
+ {{{1,2},{3,4}},{{5,6},{7,8}}}
+(1 row)
+
+SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8], [9, 10]] ]}') q;
+ERROR: malformed JSON array
+DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions.
+SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": null}') q;
+ ta
+----
+
+(1 row)
+
+SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": 123}') q;
+ERROR: expected JSON array
+HINT: See the value of key "ta".
+SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": [1, "2", null, 4]}') q;
+ ta
+--------------
+ {1,2,NULL,4}
+(1 row)
+
+SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": [[1, 2, 3], {"k": "v"}]}') q;
+ERROR: expected JSON array
+HINT: See the array element [1] of key "ta".
+SELECT c FROM json_populate_record(NULL::jsrec, '{"c": null}') q;
+ c
+---
+
+(1 row)
+
+SELECT c FROM json_populate_record(NULL::jsrec, '{"c": "aaa"}') q;
+ c
+------------
+ aaa
+(1 row)
+
+SELECT c FROM json_populate_record(NULL::jsrec, '{"c": "aaaaaaaaaa"}') q;
+ c
+------------
+ aaaaaaaaaa
+(1 row)
+
+SELECT c FROM json_populate_record(NULL::jsrec, '{"c": "aaaaaaaaaaaaa"}') q;
+ERROR: value too long for type character(10)
+SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": null}') q;
+ ca
+----
+
+(1 row)
+
+SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": 123}') q;
+ERROR: expected JSON array
+HINT: See the value of key "ca".
+SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": [1, "2", null, 4]}') q;
+ ca
+-----------------------------------------------
+ {"1 ","2 ",NULL,"4 "}
+(1 row)
+
+SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": ["aaaaaaaaaaaaaaaa"]}') q;
+ERROR: value too long for type character(10)
+SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": [[1, 2, 3], {"k": "v"}]}') q;
+ERROR: expected JSON array
+HINT: See the array element [1] of key "ca".
+SELECT js FROM json_populate_record(NULL::jsrec, '{"js": null}') q;
+ js
+----
+
+(1 row)
+
+SELECT js FROM json_populate_record(NULL::jsrec, '{"js": true}') q;
+ js
+------
+ true
+(1 row)
+
+SELECT js FROM json_populate_record(NULL::jsrec, '{"js": 123.45}') q;
+ js
+--------
+ 123.45
+(1 row)
+
+SELECT js FROM json_populate_record(NULL::jsrec, '{"js": "123.45"}') q;
+ js
+----------
+ "123.45"
+(1 row)
+
+SELECT js FROM json_populate_record(NULL::jsrec, '{"js": "abc"}') q;
+ js
+-------
+ "abc"
+(1 row)
+
+SELECT js FROM json_populate_record(NULL::jsrec, '{"js": [123, "123", null, {"key": "value"}]}') q;
+ js
+--------------------------------------
+ [123, "123", null, {"key": "value"}]
+(1 row)
+
+SELECT js FROM json_populate_record(NULL::jsrec, '{"js": {"a": "bbb", "b": null, "c": 123.45}}') q;
+ js
+--------------------------------------
+ {"a": "bbb", "b": null, "c": 123.45}
+(1 row)
+
+SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": null}') q;
+ jsb
+-----
+
+(1 row)
+
+SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": true}') q;
+ jsb
+------
+ true
+(1 row)
+
+SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": 123.45}') q;
+ jsb
+--------
+ 123.45
+(1 row)
+
+SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": "123.45"}') q;
+ jsb
+----------
+ "123.45"
+(1 row)
+
+SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": "abc"}') q;
+ jsb
+-------
+ "abc"
+(1 row)
+
+SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": [123, "123", null, {"key": "value"}]}') q;
+ jsb
+--------------------------------------
+ [123, "123", null, {"key": "value"}]
+(1 row)
+
+SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": {"a": "bbb", "b": null, "c": 123.45}}') q;
+ jsb
+--------------------------------------
+ {"a": "bbb", "b": null, "c": 123.45}
+(1 row)
+
+SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": null}') q;
+ jsa
+-----
+
+(1 row)
+
+SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": 123}') q;
+ERROR: expected JSON array
+HINT: See the value of key "jsa".
+SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": [1, "2", null, 4]}') q;
+ jsa
+--------------------
+ {1,"\"2\"",NULL,4}
+(1 row)
+
+SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": ["aaa", null, [1, 2, "3", {}], { "k" : "v" }]}') q;
+ jsa
+----------------------------------------------------------
+ {"\"aaa\"",NULL,"[1, 2, \"3\", {}]","{ \"k\" : \"v\" }"}
+(1 row)
+
+SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": 123}') q;
+ERROR: cannot call populate_composite on a scalar
+SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": [1, 2]}') q;
+ERROR: cannot call populate_composite on an array
+SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}') q;
+ rec
+-----------------------------------
+ (abc,,"Thu Jan 02 00:00:00 2003")
+(1 row)
+
+SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": "(abc,42,01.02.2003)"}') q;
+ rec
+-------------------------------------
+ (abc,42,"Thu Jan 02 00:00:00 2003")
+(1 row)
+
+SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": 123}') q;
+ERROR: expected JSON array
+HINT: See the value of key "reca".
+SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": [1, 2]}') q;
+ERROR: cannot call populate_composite on a scalar
+SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}]}') q;
+ reca
+--------------------------------------------------------
+ {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"}
+(1 row)
+
+SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": ["(abc,42,01.02.2003)"]}') q;
+ reca
+-------------------------------------------
+ {"(abc,42,\"Thu Jan 02 00:00:00 2003\")"}
+(1 row)
+
+SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": "{\"(abc,42,01.02.2003)\"}"}') q;
+ reca
+-------------------------------------------
+ {"(abc,42,\"Thu Jan 02 00:00:00 2003\")"}
+(1 row)
+
+SELECT rec FROM json_populate_record(
+ row(NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
+ row('x',3,'2012-12-31 15:30:56')::jpop,NULL)::jsrec,
+ '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}'
+) q;
+ rec
+------------------------------------
+ (abc,3,"Thu Jan 02 00:00:00 2003")
+(1 row)
+
+-- anonymous record type
+SELECT json_populate_record(null::record, '{"x": 0, "y": 1}');
+ERROR: could not determine row type for result of json_populate_record
+HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list.
+SELECT json_populate_record(row(1,2), '{"f1": 0, "f2": 1}');
+ json_populate_record
+----------------------
+ (0,1)
+(1 row)
+
+SELECT * FROM
+ json_populate_record(null::record, '{"x": 776}') AS (x int, y int);
+ x | y
+-----+---
+ 776 |
+(1 row)
+
+-- composite domain
+SELECT json_populate_record(null::j_ordered_pair, '{"x": 0, "y": 1}');
+ json_populate_record
+----------------------
+ (0,1)
+(1 row)
+
+SELECT json_populate_record(row(1,2)::j_ordered_pair, '{"x": 0}');
+ json_populate_record
+----------------------
+ (0,2)
+(1 row)
+
+SELECT json_populate_record(row(1,2)::j_ordered_pair, '{"x": 1, "y": 0}');
+ERROR: value for domain j_ordered_pair violates check constraint "j_ordered_pair_check"
+-- populate_recordset
+select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+ a | b | c
+--------+---+--------------------------
+ blurfl | |
+ | 3 | Fri Jan 20 10:42:53 2012
+(2 rows)
+
+select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+ a | b | c
+--------+----+--------------------------
+ blurfl | 99 |
+ def | 3 | Fri Jan 20 10:42:53 2012
+(2 rows)
+
+select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+ a | b | c
+--------+---+--------------------------
+ blurfl | |
+ | 3 | Fri Jan 20 10:42:53 2012
+(2 rows)
+
+select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+ a | b | c
+--------+----+--------------------------
+ blurfl | 99 |
+ def | 3 | Fri Jan 20 10:42:53 2012
+(2 rows)
+
+select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q;
+ a | b | c
+---------------+----+--------------------------
+ [100,200,300] | 99 |
+ {"z":true} | 3 | Fri Jan 20 10:42:53 2012
+(2 rows)
+
+select * from json_populate_recordset(row('def',99,null)::jpop,'[{"c":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q;
+ERROR: invalid input syntax for type timestamp: "[100,200,300]"
+create type jpop2 as (a int, b json, c int, d int);
+select * from json_populate_recordset(null::jpop2, '[{"a":2,"c":3,"b":{"z":4},"d":6}]') q;
+ a | b | c | d
+---+---------+---+---
+ 2 | {"z":4} | 3 | 6
+(1 row)
+
+select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+ a | b | c
+--------+---+--------------------------
+ blurfl | |
+ | 3 | Fri Jan 20 10:42:53 2012
+(2 rows)
+
+select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+ a | b | c
+--------+----+--------------------------
+ blurfl | 99 |
+ def | 3 | Fri Jan 20 10:42:53 2012
+(2 rows)
+
+select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q;
+ a | b | c
+---------------+----+--------------------------
+ [100,200,300] | 99 |
+ {"z":true} | 3 | Fri Jan 20 10:42:53 2012
+(2 rows)
+
+-- anonymous record type
+SELECT json_populate_recordset(null::record, '[{"x": 0, "y": 1}]');
+ERROR: could not determine row type for result of json_populate_recordset
+HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list.
+SELECT json_populate_recordset(row(1,2), '[{"f1": 0, "f2": 1}]');
+ json_populate_recordset
+-------------------------
+ (0,1)
+(1 row)
+
+SELECT i, json_populate_recordset(row(i,50), '[{"f1":"42"},{"f2":"43"}]')
+FROM (VALUES (1),(2)) v(i);
+ i | json_populate_recordset
+---+-------------------------
+ 1 | (42,50)
+ 1 | (1,43)
+ 2 | (42,50)
+ 2 | (2,43)
+(4 rows)
+
+SELECT * FROM
+ json_populate_recordset(null::record, '[{"x": 776}]') AS (x int, y int);
+ x | y
+-----+---
+ 776 |
+(1 row)
+
+-- empty array is a corner case
+SELECT json_populate_recordset(null::record, '[]');
+ERROR: could not determine row type for result of json_populate_recordset
+HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list.
+SELECT json_populate_recordset(row(1,2), '[]');
+ json_populate_recordset
+-------------------------
+(0 rows)
+
+SELECT * FROM json_populate_recordset(NULL::jpop,'[]') q;
+ a | b | c
+---+---+---
+(0 rows)
+
+SELECT * FROM
+ json_populate_recordset(null::record, '[]') AS (x int, y int);
+ x | y
+---+---
+(0 rows)
+
+-- composite domain
+SELECT json_populate_recordset(null::j_ordered_pair, '[{"x": 0, "y": 1}]');
+ json_populate_recordset
+-------------------------
+ (0,1)
+(1 row)
+
+SELECT json_populate_recordset(row(1,2)::j_ordered_pair, '[{"x": 0}, {"y": 3}]');
+ json_populate_recordset
+-------------------------
+ (0,2)
+ (1,3)
+(2 rows)
+
+SELECT json_populate_recordset(row(1,2)::j_ordered_pair, '[{"x": 1, "y": 0}]');
+ERROR: value for domain j_ordered_pair violates check constraint "j_ordered_pair_check"
+-- negative cases where the wrong record type is supplied
+select * from json_populate_recordset(row(0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text);
+ERROR: function return row and query-specified return row do not match
+DETAIL: Returned row contains 1 attribute, but query expects 2.
+select * from json_populate_recordset(row(0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text);
+ERROR: function return row and query-specified return row do not match
+DETAIL: Returned type integer at ordinal position 1, but query expects text.
+select * from json_populate_recordset(row(0::int,0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text);
+ERROR: function return row and query-specified return row do not match
+DETAIL: Returned row contains 3 attributes, but query expects 2.
+select * from json_populate_recordset(row(1000000000::int,50::int),'[{"b":"2"},{"a":"3"}]') q (a text, b text);
+ERROR: function return row and query-specified return row do not match
+DETAIL: Returned type integer at ordinal position 1, but query expects text.
+-- test type info caching in json_populate_record()
+CREATE TEMP TABLE jspoptest (js json);
+INSERT INTO jspoptest
+SELECT '{
+ "jsa": [1, "2", null, 4],
+ "rec": {"a": "abc", "c": "01.02.2003", "x": 43.2},
+ "reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}]
+}'::json
+FROM generate_series(1, 3);
+SELECT (json_populate_record(NULL::jsrec, js)).* FROM jspoptest;
+ i | ia | ia1 | ia2 | ia3 | ia1d | ia2d | t | ta | c | ca | ts | js | jsb | jsa | rec | reca
+---+----+-----+-----+-----+------+------+---+----+---+----+----+----+-----+--------------------+-----------------------------------+--------------------------------------------------------
+ | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"}
+ | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"}
+ | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"}
+(3 rows)
+
+DROP TYPE jsrec;
+DROP TYPE jsrec_i_not_null;
+DROP DOMAIN js_int_not_null;
+DROP DOMAIN js_int_array_1d;
+DROP DOMAIN js_int_array_2d;
+DROP DOMAIN j_ordered_pair;
+DROP TYPE j_unordered_pair;
+--json_typeof() function
+select value, json_typeof(value)
+ from (values (json '123.4'),
+ (json '-1'),
+ (json '"foo"'),
+ (json 'true'),
+ (json 'false'),
+ (json 'null'),
+ (json '[1, 2, 3]'),
+ (json '[]'),
+ (json '{"x":"foo", "y":123}'),
+ (json '{}'),
+ (NULL::json))
+ as data(value);
+ value | json_typeof
+----------------------+-------------
+ 123.4 | number
+ -1 | number
+ "foo" | string
+ true | boolean
+ false | boolean
+ null | null
+ [1, 2, 3] | array
+ [] | array
+ {"x":"foo", "y":123} | object
+ {} | object
+ |
+(11 rows)
+
+-- json_build_array, json_build_object, json_object_agg
+SELECT json_build_array('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}');
+ json_build_array
+-----------------------------------------------------------------------
+ ["a", 1, "b", 1.2, "c", true, "d", null, "e", {"x": 3, "y": [1,2,3]}]
+(1 row)
+
+SELECT json_build_array('a', NULL); -- ok
+ json_build_array
+------------------
+ ["a", null]
+(1 row)
+
+SELECT json_build_array(VARIADIC NULL::text[]); -- ok
+ json_build_array
+------------------
+
+(1 row)
+
+SELECT json_build_array(VARIADIC '{}'::text[]); -- ok
+ json_build_array
+------------------
+ []
+(1 row)
+
+SELECT json_build_array(VARIADIC '{a,b,c}'::text[]); -- ok
+ json_build_array
+------------------
+ ["a", "b", "c"]
+(1 row)
+
+SELECT json_build_array(VARIADIC ARRAY['a', NULL]::text[]); -- ok
+ json_build_array
+------------------
+ ["a", null]
+(1 row)
+
+SELECT json_build_array(VARIADIC '{1,2,3,4}'::text[]); -- ok
+ json_build_array
+----------------------
+ ["1", "2", "3", "4"]
+(1 row)
+
+SELECT json_build_array(VARIADIC '{1,2,3,4}'::int[]); -- ok
+ json_build_array
+------------------
+ [1, 2, 3, 4]
+(1 row)
+
+SELECT json_build_array(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok
+ json_build_array
+--------------------
+ [1, 4, 2, 5, 3, 6]
+(1 row)
+
+SELECT json_build_object('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}');
+ json_build_object
+----------------------------------------------------------------------------
+ {"a" : 1, "b" : 1.2, "c" : true, "d" : null, "e" : {"x": 3, "y": [1,2,3]}}
+(1 row)
+
+SELECT json_build_object(
+ 'a', json_build_object('b',false,'c',99),
+ 'd', json_build_object('e',array[9,8,7]::int[],
+ 'f', (select row_to_json(r) from ( select relkind, oid::regclass as name from pg_class where relname = 'pg_class') r)));
+ json_build_object
+-------------------------------------------------------------------------------------------------
+ {"a" : {"b" : false, "c" : 99}, "d" : {"e" : [9,8,7], "f" : {"relkind":"r","name":"pg_class"}}}
+(1 row)
+
+SELECT json_build_object('{a,b,c}'::text[]); -- error
+ERROR: argument list must have even number of elements
+HINT: The arguments of json_build_object() must consist of alternating keys and values.
+SELECT json_build_object('{a,b,c}'::text[], '{d,e,f}'::text[]); -- error, key cannot be array
+ERROR: key value must be scalar, not array, composite, or json
+SELECT json_build_object('a', 'b', 'c'); -- error
+ERROR: argument list must have even number of elements
+HINT: The arguments of json_build_object() must consist of alternating keys and values.
+SELECT json_build_object(NULL, 'a'); -- error, key cannot be NULL
+ERROR: argument 1 cannot be null
+HINT: Object keys should be text.
+SELECT json_build_object('a', NULL); -- ok
+ json_build_object
+-------------------
+ {"a" : null}
+(1 row)
+
+SELECT json_build_object(VARIADIC NULL::text[]); -- ok
+ json_build_object
+-------------------
+
+(1 row)
+
+SELECT json_build_object(VARIADIC '{}'::text[]); -- ok
+ json_build_object
+-------------------
+ {}
+(1 row)
+
+SELECT json_build_object(VARIADIC '{a,b,c}'::text[]); -- error
+ERROR: argument list must have even number of elements
+HINT: The arguments of json_build_object() must consist of alternating keys and values.
+SELECT json_build_object(VARIADIC ARRAY['a', NULL]::text[]); -- ok
+ json_build_object
+-------------------
+ {"a" : null}
+(1 row)
+
+SELECT json_build_object(VARIADIC ARRAY[NULL, 'a']::text[]); -- error, key cannot be NULL
+ERROR: argument 1 cannot be null
+HINT: Object keys should be text.
+SELECT json_build_object(VARIADIC '{1,2,3,4}'::text[]); -- ok
+ json_build_object
+------------------------
+ {"1" : "2", "3" : "4"}
+(1 row)
+
+SELECT json_build_object(VARIADIC '{1,2,3,4}'::int[]); -- ok
+ json_build_object
+--------------------
+ {"1" : 2, "3" : 4}
+(1 row)
+
+SELECT json_build_object(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok
+ json_build_object
+-----------------------------
+ {"1" : 4, "2" : 5, "3" : 6}
+(1 row)
+
+-- empty objects/arrays
+SELECT json_build_array();
+ json_build_array
+------------------
+ []
+(1 row)
+
+SELECT json_build_object();
+ json_build_object
+-------------------
+ {}
+(1 row)
+
+-- make sure keys are quoted
+SELECT json_build_object(1,2);
+ json_build_object
+-------------------
+ {"1" : 2}
+(1 row)
+
+-- keys must be scalar and not null
+SELECT json_build_object(null,2);
+ERROR: argument 1 cannot be null
+HINT: Object keys should be text.
+SELECT json_build_object(r,2) FROM (SELECT 1 AS a, 2 AS b) r;
+ERROR: key value must be scalar, not array, composite, or json
+SELECT json_build_object(json '{"a":1,"b":2}', 3);
+ERROR: key value must be scalar, not array, composite, or json
+SELECT json_build_object('{1,2,3}'::int[], 3);
+ERROR: key value must be scalar, not array, composite, or json
+CREATE TEMP TABLE foo (serial_num int, name text, type text);
+INSERT INTO foo VALUES (847001,'t15','GE1043');
+INSERT INTO foo VALUES (847002,'t16','GE1043');
+INSERT INTO foo VALUES (847003,'sub-alpha','GESS90');
+SELECT json_build_object('turbines',json_object_agg(serial_num,json_build_object('name',name,'type',type)))
+FROM foo;
+ json_build_object
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ {"turbines" : { "847001" : {"name" : "t15", "type" : "GE1043"}, "847002" : {"name" : "t16", "type" : "GE1043"}, "847003" : {"name" : "sub-alpha", "type" : "GESS90"} }}
+(1 row)
+
+SELECT json_object_agg(name, type) FROM foo;
+ json_object_agg
+----------------------------------------------------------------
+ { "t15" : "GE1043", "t16" : "GE1043", "sub-alpha" : "GESS90" }
+(1 row)
+
+INSERT INTO foo VALUES (999999, NULL, 'bar');
+SELECT json_object_agg(name, type) FROM foo;
+ERROR: field name must not be null
+-- json_object
+-- empty object, one dimension
+SELECT json_object('{}');
+ json_object
+-------------
+ {}
+(1 row)
+
+-- empty object, two dimensions
+SELECT json_object('{}', '{}');
+ json_object
+-------------
+ {}
+(1 row)
+
+-- one dimension
+SELECT json_object('{a,1,b,2,3,NULL,"d e f","a b c"}');
+ json_object
+-------------------------------------------------------
+ {"a" : "1", "b" : "2", "3" : null, "d e f" : "a b c"}
+(1 row)
+
+-- same but with two dimensions
+SELECT json_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}');
+ json_object
+-------------------------------------------------------
+ {"a" : "1", "b" : "2", "3" : null, "d e f" : "a b c"}
+(1 row)
+
+-- odd number error
+SELECT json_object('{a,b,c}');
+ERROR: array must have even number of elements
+-- one column error
+SELECT json_object('{{a},{b}}');
+ERROR: array must have two columns
+-- too many columns error
+SELECT json_object('{{a,b,c},{b,c,d}}');
+ERROR: array must have two columns
+-- too many dimensions error
+SELECT json_object('{{{a,b},{c,d}},{{b,c},{d,e}}}');
+ERROR: wrong number of array subscripts
+--two argument form of json_object
+select json_object('{a,b,c,"d e f"}','{1,2,3,"a b c"}');
+ json_object
+------------------------------------------------------
+ {"a" : "1", "b" : "2", "c" : "3", "d e f" : "a b c"}
+(1 row)
+
+-- too many dimensions
+SELECT json_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}', '{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}');
+ERROR: wrong number of array subscripts
+-- mismatched dimensions
+select json_object('{a,b,c,"d e f",g}','{1,2,3,"a b c"}');
+ERROR: mismatched array dimensions
+select json_object('{a,b,c,"d e f"}','{1,2,3,"a b c",g}');
+ERROR: mismatched array dimensions
+-- null key error
+select json_object('{a,b,NULL,"d e f"}','{1,2,3,"a b c"}');
+ERROR: null value not allowed for object key
+-- empty key is allowed
+select json_object('{a,b,"","d e f"}','{1,2,3,"a b c"}');
+ json_object
+-----------------------------------------------------
+ {"a" : "1", "b" : "2", "" : "3", "d e f" : "a b c"}
+(1 row)
+
+-- json_to_record and json_to_recordset
+select * from json_to_record('{"a":1,"b":"foo","c":"bar"}')
+ as x(a int, b text, d text);
+ a | b | d
+---+-----+---
+ 1 | foo |
+(1 row)
+
+select * from json_to_recordset('[{"a":1,"b":"foo","d":false},{"a":2,"b":"bar","c":true}]')
+ as x(a int, b text, c boolean);
+ a | b | c
+---+-----+---
+ 1 | foo |
+ 2 | bar | t
+(2 rows)
+
+select * from json_to_recordset('[{"a":1,"b":{"d":"foo"},"c":true},{"a":2,"c":false,"b":{"d":"bar"}}]')
+ as x(a int, b json, c boolean);
+ a | b | c
+---+-------------+---
+ 1 | {"d":"foo"} | t
+ 2 | {"d":"bar"} | f
+(2 rows)
+
+select *, c is null as c_is_null
+from json_to_record('{"a":1, "b":{"c":16, "d":2}, "x":8, "ca": ["1 2", 3], "ia": [[1,2],[3,4]], "r": {"a": "aaa", "b": 123}}'::json)
+ as t(a int, b json, c text, x int, ca char(5)[], ia int[][], r jpop);
+ a | b | c | x | ca | ia | r | c_is_null
+---+-----------------+---+---+-------------------+---------------+------------+-----------
+ 1 | {"c":16, "d":2} | | 8 | {"1 2 ","3 "} | {{1,2},{3,4}} | (aaa,123,) | t
+(1 row)
+
+select *, c is null as c_is_null
+from json_to_recordset('[{"a":1, "b":{"c":16, "d":2}, "x":8}]'::json)
+ as t(a int, b json, c text, x int);
+ a | b | c | x | c_is_null
+---+-----------------+---+---+-----------
+ 1 | {"c":16, "d":2} | | 8 | t
+(1 row)
+
+select * from json_to_record('{"ia": null}') as x(ia _int4);
+ ia
+----
+
+(1 row)
+
+select * from json_to_record('{"ia": 123}') as x(ia _int4);
+ERROR: expected JSON array
+HINT: See the value of key "ia".
+select * from json_to_record('{"ia": [1, "2", null, 4]}') as x(ia _int4);
+ ia
+--------------
+ {1,2,NULL,4}
+(1 row)
+
+select * from json_to_record('{"ia": [[1, 2], [3, 4]]}') as x(ia _int4);
+ ia
+---------------
+ {{1,2},{3,4}}
+(1 row)
+
+select * from json_to_record('{"ia": [[1], 2]}') as x(ia _int4);
+ERROR: expected JSON array
+HINT: See the array element [1] of key "ia".
+select * from json_to_record('{"ia": [[1], [2, 3]]}') as x(ia _int4);
+ERROR: malformed JSON array
+DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions.
+select * from json_to_record('{"ia2": [1, 2, 3]}') as x(ia2 int[][]);
+ ia2
+---------
+ {1,2,3}
+(1 row)
+
+select * from json_to_record('{"ia2": [[1, 2], [3, 4]]}') as x(ia2 int4[][]);
+ ia2
+---------------
+ {{1,2},{3,4}}
+(1 row)
+
+select * from json_to_record('{"ia2": [[[1], [2], [3]]]}') as x(ia2 int4[][]);
+ ia2
+-----------------
+ {{{1},{2},{3}}}
+(1 row)
+
+select * from json_to_record('{"out": {"key": 1}}') as x(out json);
+ out
+------------
+ {"key": 1}
+(1 row)
+
+select * from json_to_record('{"out": [{"key": 1}]}') as x(out json);
+ out
+--------------
+ [{"key": 1}]
+(1 row)
+
+select * from json_to_record('{"out": "{\"key\": 1}"}') as x(out json);
+ out
+----------------
+ "{\"key\": 1}"
+(1 row)
+
+select * from json_to_record('{"out": {"key": 1}}') as x(out jsonb);
+ out
+------------
+ {"key": 1}
+(1 row)
+
+select * from json_to_record('{"out": [{"key": 1}]}') as x(out jsonb);
+ out
+--------------
+ [{"key": 1}]
+(1 row)
+
+select * from json_to_record('{"out": "{\"key\": 1}"}') as x(out jsonb);
+ out
+----------------
+ "{\"key\": 1}"
+(1 row)
+
+-- json_strip_nulls
+select json_strip_nulls(null);
+ json_strip_nulls
+------------------
+
+(1 row)
+
+select json_strip_nulls('1');
+ json_strip_nulls
+------------------
+ 1
+(1 row)
+
+select json_strip_nulls('"a string"');
+ json_strip_nulls
+------------------
+ "a string"
+(1 row)
+
+select json_strip_nulls('null');
+ json_strip_nulls
+------------------
+ null
+(1 row)
+
+select json_strip_nulls('[1,2,null,3,4]');
+ json_strip_nulls
+------------------
+ [1,2,null,3,4]
+(1 row)
+
+select json_strip_nulls('{"a":1,"b":null,"c":[2,null,3],"d":{"e":4,"f":null}}');
+ json_strip_nulls
+------------------------------------
+ {"a":1,"c":[2,null,3],"d":{"e":4}}
+(1 row)
+
+select json_strip_nulls('[1,{"a":1,"b":null,"c":2},3]');
+ json_strip_nulls
+---------------------
+ [1,{"a":1,"c":2},3]
+(1 row)
+
+-- an empty object is not null and should not be stripped
+select json_strip_nulls('{"a": {"b": null, "c": null}, "d": {} }');
+ json_strip_nulls
+------------------
+ {"a":{},"d":{}}
+(1 row)
+
+-- json to tsvector
+select to_tsvector('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::json);
+ to_tsvector
+---------------------------------------------------------------------------
+ 'aaa':1 'bbb':2 'ccc':4 'ddd':3 'eee':6 'fff':7 'ggg':8 'hhh':10 'iii':11
+(1 row)
+
+-- json to tsvector with config
+select to_tsvector('simple', '{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::json);
+ to_tsvector
+---------------------------------------------------------------------------
+ 'aaa':1 'bbb':2 'ccc':4 'ddd':3 'eee':6 'fff':7 'ggg':8 'hhh':10 'iii':11
+(1 row)
+
+-- json to tsvector with stop words
+select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": ["the eee fff ggg"], "c": {"d": "hhh. iii"}}'::json);
+ to_tsvector
+----------------------------------------------------------------------------
+ 'aaa':1 'bbb':3 'ccc':5 'ddd':4 'eee':8 'fff':9 'ggg':10 'hhh':12 'iii':13
+(1 row)
+
+-- json to tsvector with numeric values
+select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": 123, "c": 456}'::json);
+ to_tsvector
+---------------------------------
+ 'aaa':1 'bbb':3 'ccc':5 'ddd':4
+(1 row)
+
+-- json_to_tsvector
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"all"');
+ json_to_tsvector
+----------------------------------------------------------------------------------------
+ '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16
+(1 row)
+
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"key"');
+ json_to_tsvector
+--------------------------------
+ 'b':2 'c':4 'd':6 'f':8 'g':10
+(1 row)
+
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"string"');
+ json_to_tsvector
+------------------
+ 'aaa':1 'bbb':3
+(1 row)
+
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"numeric"');
+ json_to_tsvector
+------------------
+ '123':1 '456':3
+(1 row)
+
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"boolean"');
+ json_to_tsvector
+-------------------
+ 'fals':3 'true':1
+(1 row)
+
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["string", "numeric"]');
+ json_to_tsvector
+---------------------------------
+ '123':5 '456':7 'aaa':1 'bbb':3
+(1 row)
+
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"all"');
+ json_to_tsvector
+----------------------------------------------------------------------------------------
+ '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16
+(1 row)
+
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"key"');
+ json_to_tsvector
+--------------------------------
+ 'b':2 'c':4 'd':6 'f':8 'g':10
+(1 row)
+
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"string"');
+ json_to_tsvector
+------------------
+ 'aaa':1 'bbb':3
+(1 row)
+
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"numeric"');
+ json_to_tsvector
+------------------
+ '123':1 '456':3
+(1 row)
+
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"boolean"');
+ json_to_tsvector
+-------------------
+ 'fals':3 'true':1
+(1 row)
+
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["string", "numeric"]');
+ json_to_tsvector
+---------------------------------
+ '123':5 '456':7 'aaa':1 'bbb':3
+(1 row)
+
+-- to_tsvector corner cases
+select to_tsvector('""'::json);
+ to_tsvector
+-------------
+
+(1 row)
+
+select to_tsvector('{}'::json);
+ to_tsvector
+-------------
+
+(1 row)
+
+select to_tsvector('[]'::json);
+ to_tsvector
+-------------
+
+(1 row)
+
+select to_tsvector('null'::json);
+ to_tsvector
+-------------
+
+(1 row)
+
+-- json_to_tsvector corner cases
+select json_to_tsvector('""'::json, '"all"');
+ json_to_tsvector
+------------------
+
+(1 row)
+
+select json_to_tsvector('{}'::json, '"all"');
+ json_to_tsvector
+------------------
+
+(1 row)
+
+select json_to_tsvector('[]'::json, '"all"');
+ json_to_tsvector
+------------------
+
+(1 row)
+
+select json_to_tsvector('null'::json, '"all"');
+ json_to_tsvector
+------------------
+
+(1 row)
+
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '""');
+ERROR: wrong flag in flag array: ""
+HINT: Possible values are: "string", "numeric", "boolean", "key", and "all".
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '{}');
+ERROR: wrong flag type, only arrays and scalars are allowed
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '[]');
+ json_to_tsvector
+------------------
+
+(1 row)
+
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, 'null');
+ERROR: flag array element is not a string
+HINT: Possible values are: "string", "numeric", "boolean", "key", and "all".
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["all", null]');
+ERROR: flag array element is not a string
+HINT: Possible values are: "string", "numeric", "boolean", "key", and "all".
+-- ts_headline for json
+select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh'));
+ ts_headline
+---------------------------------------------------------------------------------------------------------
+ {"a":"aaa <b>bbb</b>","b":{"c":"ccc <b>ddd</b> fff","c1":"ccc1 ddd1"},"d":["ggg <b>hhh</b>","iii jjj"]}
+(1 row)
+
+select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh'));
+ ts_headline
+----------------------------------------------------------------------------------------
+ {"a":"aaa <b>bbb</b>","b":{"c":"ccc <b>ddd</b> fff"},"d":["ggg <b>hhh</b>","iii jjj"]}
+(1 row)
+
+select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >');
+ ts_headline
+------------------------------------------------------------------------------------------
+ {"a":"aaa <bbb>","b":{"c":"ccc <ddd> fff","c1":"ccc1 ddd1"},"d":["ggg <hhh>","iii jjj"]}
+(1 row)
+
+select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >');
+ ts_headline
+------------------------------------------------------------------------------------------
+ {"a":"aaa <bbb>","b":{"c":"ccc <ddd> fff","c1":"ccc1 ddd1"},"d":["ggg <hhh>","iii jjj"]}
+(1 row)
+
+-- corner cases for ts_headline with json
+select ts_headline('null'::json, tsquery('aaa & bbb'));
+ ts_headline
+-------------
+ null
+(1 row)
+
+select ts_headline('{}'::json, tsquery('aaa & bbb'));
+ ts_headline
+-------------
+ {}
+(1 row)
+
+select ts_headline('[]'::json, tsquery('aaa & bbb'));
+ ts_headline
+-------------
+ []
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/json.sql b/ydb/library/yql/tests/postgresql/original/cases/json.sql
new file mode 100644
index 0000000000..e366c6f51b
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/json.sql
@@ -0,0 +1,852 @@
+-- Strings.
+SELECT '""'::json; -- OK.
+SELECT $$''$$::json; -- ERROR, single quotes are not allowed
+SELECT '"abc"'::json; -- OK
+SELECT '"abc'::json; -- ERROR, quotes not closed
+SELECT '"abc
+def"'::json; -- ERROR, unescaped newline in string constant
+SELECT '"\n\"\\"'::json; -- OK, legal escapes
+SELECT '"\v"'::json; -- ERROR, not a valid JSON escape
+-- see json_encoding test for input with unicode escapes
+
+-- Numbers.
+SELECT '1'::json; -- OK
+SELECT '0'::json; -- OK
+SELECT '01'::json; -- ERROR, not valid according to JSON spec
+SELECT '0.1'::json; -- OK
+SELECT '9223372036854775808'::json; -- OK, even though it's too large for int8
+SELECT '1e100'::json; -- OK
+SELECT '1.3e100'::json; -- OK
+SELECT '1f2'::json; -- ERROR
+SELECT '0.x1'::json; -- ERROR
+SELECT '1.3ex100'::json; -- ERROR
+
+-- Arrays.
+SELECT '[]'::json; -- OK
+SELECT '[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]'::json; -- OK
+SELECT '[1,2]'::json; -- OK
+SELECT '[1,2,]'::json; -- ERROR, trailing comma
+SELECT '[1,2'::json; -- ERROR, no closing bracket
+SELECT '[1,[2]'::json; -- ERROR, no closing bracket
+
+-- Objects.
+SELECT '{}'::json; -- OK
+SELECT '{"abc"}'::json; -- ERROR, no value
+SELECT '{"abc":1}'::json; -- OK
+SELECT '{1:"abc"}'::json; -- ERROR, keys must be strings
+SELECT '{"abc",1}'::json; -- ERROR, wrong separator
+SELECT '{"abc"=1}'::json; -- ERROR, totally wrong separator
+SELECT '{"abc"::1}'::json; -- ERROR, another wrong separator
+SELECT '{"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}}'::json; -- OK
+SELECT '{"abc":1:2}'::json; -- ERROR, colon in wrong spot
+SELECT '{"abc":1,3}'::json; -- ERROR, no value
+
+-- Recursion.
+SET max_stack_depth = '100kB';
+SELECT repeat('[', 10000)::json;
+SELECT repeat('{"a":', 10000)::json;
+RESET max_stack_depth;
+
+-- Miscellaneous stuff.
+SELECT 'true'::json; -- OK
+SELECT 'false'::json; -- OK
+SELECT 'null'::json; -- OK
+SELECT ' true '::json; -- OK, even with extra whitespace
+SELECT 'true false'::json; -- ERROR, too many values
+SELECT 'true, false'::json; -- ERROR, too many values
+SELECT 'truf'::json; -- ERROR, not a keyword
+SELECT 'trues'::json; -- ERROR, not a keyword
+SELECT ''::json; -- ERROR, no value
+SELECT ' '::json; -- ERROR, no value
+
+-- Multi-line JSON input to check ERROR reporting
+SELECT '{
+ "one": 1,
+ "two":"two",
+ "three":
+ true}'::json; -- OK
+SELECT '{
+ "one": 1,
+ "two":,"two", -- ERROR extraneous comma before field "two"
+ "three":
+ true}'::json;
+SELECT '{
+ "one": 1,
+ "two":"two",
+ "averyveryveryveryveryveryveryveryveryverylongfieldname":}'::json;
+-- ERROR missing value for last field
+
+--constructors
+-- array_to_json
+
+SELECT array_to_json(array(select 1 as a));
+SELECT array_to_json(array_agg(q),false) from (select x as b, x * 2 as c from generate_series(1,3) x) q;
+SELECT array_to_json(array_agg(q),true) from (select x as b, x * 2 as c from generate_series(1,3) x) q;
+SELECT array_to_json(array_agg(q),false)
+ FROM ( SELECT $$a$$ || x AS b, y AS c,
+ ARRAY[ROW(x.*,ARRAY[1,2,3]),
+ ROW(y.*,ARRAY[4,5,6])] AS z
+ FROM generate_series(1,2) x,
+ generate_series(4,5) y) q;
+SELECT array_to_json(array_agg(x),false) from generate_series(5,10) x;
+SELECT array_to_json('{{1,5},{99,100}}'::int[]);
+
+-- row_to_json
+SELECT row_to_json(row(1,'foo'));
+
+SELECT row_to_json(q)
+FROM (SELECT $$a$$ || x AS b,
+ y AS c,
+ ARRAY[ROW(x.*,ARRAY[1,2,3]),
+ ROW(y.*,ARRAY[4,5,6])] AS z
+ FROM generate_series(1,2) x,
+ generate_series(4,5) y) q;
+
+SELECT row_to_json(q,true)
+FROM (SELECT $$a$$ || x AS b,
+ y AS c,
+ ARRAY[ROW(x.*,ARRAY[1,2,3]),
+ ROW(y.*,ARRAY[4,5,6])] AS z
+ FROM generate_series(1,2) x,
+ generate_series(4,5) y) q;
+
+CREATE TEMP TABLE rows AS
+SELECT x, 'txt' || x as y
+FROM generate_series(1,3) AS x;
+
+SELECT row_to_json(q,true)
+FROM rows q;
+
+SELECT row_to_json(row((select array_agg(x) as d from generate_series(5,10) x)),false);
+
+-- anyarray column
+
+analyze rows;
+
+select attname, to_json(histogram_bounds) histogram_bounds
+from pg_stats
+where tablename = 'rows' and
+ schemaname = pg_my_temp_schema()::regnamespace::text
+order by 1;
+
+-- to_json, timestamps
+
+select to_json(timestamp '2014-05-28 12:22:35.614298');
+
+BEGIN;
+SET LOCAL TIME ZONE 10.5;
+select to_json(timestamptz '2014-05-28 12:22:35.614298-04');
+SET LOCAL TIME ZONE -8;
+select to_json(timestamptz '2014-05-28 12:22:35.614298-04');
+COMMIT;
+
+select to_json(date '2014-05-28');
+
+select to_json(date 'Infinity');
+select to_json(date '-Infinity');
+select to_json(timestamp 'Infinity');
+select to_json(timestamp '-Infinity');
+select to_json(timestamptz 'Infinity');
+select to_json(timestamptz '-Infinity');
+
+--json_agg
+
+SELECT json_agg(q)
+ FROM ( SELECT $$a$$ || x AS b, y AS c,
+ ARRAY[ROW(x.*,ARRAY[1,2,3]),
+ ROW(y.*,ARRAY[4,5,6])] AS z
+ FROM generate_series(1,2) x,
+ generate_series(4,5) y) q;
+
+SELECT json_agg(q ORDER BY x, y)
+ FROM rows q;
+
+UPDATE rows SET x = NULL WHERE x = 1;
+
+SELECT json_agg(q ORDER BY x NULLS FIRST, y)
+ FROM rows q;
+
+-- non-numeric output
+SELECT row_to_json(q)
+FROM (SELECT 'NaN'::float8 AS "float8field") q;
+
+SELECT row_to_json(q)
+FROM (SELECT 'Infinity'::float8 AS "float8field") q;
+
+SELECT row_to_json(q)
+FROM (SELECT '-Infinity'::float8 AS "float8field") q;
+
+-- json input
+SELECT row_to_json(q)
+FROM (SELECT '{"a":1,"b": [2,3,4,"d","e","f"],"c":{"p":1,"q":2}}'::json AS "jsonfield") q;
+
+
+-- json extraction functions
+
+CREATE TEMP TABLE test_json (
+ json_type text,
+ test_json json
+);
+
+INSERT INTO test_json VALUES
+('scalar','"a scalar"'),
+('array','["zero", "one","two",null,"four","five", [1,2,3],{"f1":9}]'),
+('object','{"field1":"val1","field2":"val2","field3":null, "field4": 4, "field5": [1,2,3], "field6": {"f1":9}}');
+
+SELECT test_json -> 'x'
+FROM test_json
+WHERE json_type = 'scalar';
+
+SELECT test_json -> 'x'
+FROM test_json
+WHERE json_type = 'array';
+
+SELECT test_json -> 'x'
+FROM test_json
+WHERE json_type = 'object';
+
+SELECT test_json->'field2'
+FROM test_json
+WHERE json_type = 'object';
+
+SELECT test_json->>'field2'
+FROM test_json
+WHERE json_type = 'object';
+
+SELECT test_json -> 2
+FROM test_json
+WHERE json_type = 'scalar';
+
+SELECT test_json -> 2
+FROM test_json
+WHERE json_type = 'array';
+
+SELECT test_json -> -1
+FROM test_json
+WHERE json_type = 'array';
+
+SELECT test_json -> 2
+FROM test_json
+WHERE json_type = 'object';
+
+SELECT test_json->>2
+FROM test_json
+WHERE json_type = 'array';
+
+SELECT test_json ->> 6 FROM test_json WHERE json_type = 'array';
+SELECT test_json ->> 7 FROM test_json WHERE json_type = 'array';
+
+SELECT test_json ->> 'field4' FROM test_json WHERE json_type = 'object';
+SELECT test_json ->> 'field5' FROM test_json WHERE json_type = 'object';
+SELECT test_json ->> 'field6' FROM test_json WHERE json_type = 'object';
+
+SELECT json_object_keys(test_json)
+FROM test_json
+WHERE json_type = 'scalar';
+
+SELECT json_object_keys(test_json)
+FROM test_json
+WHERE json_type = 'array';
+
+SELECT json_object_keys(test_json)
+FROM test_json
+WHERE json_type = 'object';
+
+-- test extending object_keys resultset - initial resultset size is 256
+
+select count(*) from
+ (select json_object_keys(json_object(array_agg(g)))
+ from (select unnest(array['f'||n,n::text])as g
+ from generate_series(1,300) as n) x ) y;
+
+-- nulls
+
+select (test_json->'field3') is null as expect_false
+from test_json
+where json_type = 'object';
+
+select (test_json->>'field3') is null as expect_true
+from test_json
+where json_type = 'object';
+
+select (test_json->3) is null as expect_false
+from test_json
+where json_type = 'array';
+
+select (test_json->>3) is null as expect_true
+from test_json
+where json_type = 'array';
+
+-- corner cases
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> null::text;
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> null::int;
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> 1;
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> -1;
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> 'z';
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> '';
+select '[{"b": "c"}, {"b": "cc"}]'::json -> 1;
+select '[{"b": "c"}, {"b": "cc"}]'::json -> 3;
+select '[{"b": "c"}, {"b": "cc"}]'::json -> 'z';
+select '{"a": "c", "b": null}'::json -> 'b';
+select '"foo"'::json -> 1;
+select '"foo"'::json -> 'z';
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> null::text;
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> null::int;
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> 1;
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> 'z';
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> '';
+select '[{"b": "c"}, {"b": "cc"}]'::json ->> 1;
+select '[{"b": "c"}, {"b": "cc"}]'::json ->> 3;
+select '[{"b": "c"}, {"b": "cc"}]'::json ->> 'z';
+select '{"a": "c", "b": null}'::json ->> 'b';
+select '"foo"'::json ->> 1;
+select '"foo"'::json ->> 'z';
+
+-- array length
+
+SELECT json_array_length('[1,2,3,{"f1":1,"f2":[5,6]},4]');
+
+SELECT json_array_length('[]');
+
+SELECT json_array_length('{"f1":1,"f2":[5,6]}');
+
+SELECT json_array_length('4');
+
+-- each
+
+select json_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null}');
+select * from json_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q;
+
+select json_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":"null"}');
+select * from json_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q;
+
+-- extract_path, extract_path_as_text
+
+select json_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6');
+select json_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2');
+select json_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text);
+select json_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text);
+select json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6');
+select json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2');
+select json_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text);
+select json_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text);
+
+-- extract_path nulls
+
+select json_extract_path('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') is null as expect_false;
+select json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') is null as expect_true;
+select json_extract_path('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') is null as expect_false;
+select json_extract_path_text('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') is null as expect_true;
+
+-- extract_path operators
+
+select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f4','f6'];
+select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f2'];
+select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f2','0'];
+select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f2','1'];
+
+select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f4','f6'];
+select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f2'];
+select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f2','0'];
+select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f2','1'];
+
+-- corner cases for same
+select '{"a": {"b":{"c": "foo"}}}'::json #> '{}';
+select '[1,2,3]'::json #> '{}';
+select '"foo"'::json #> '{}';
+select '42'::json #> '{}';
+select 'null'::json #> '{}';
+select '{"a": {"b":{"c": "foo"}}}'::json #> array['a'];
+select '{"a": {"b":{"c": "foo"}}}'::json #> array['a', null];
+select '{"a": {"b":{"c": "foo"}}}'::json #> array['a', ''];
+select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','b'];
+select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','b','c'];
+select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','b','c','d'];
+select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','z','c'];
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #> array['a','1','b'];
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #> array['a','z','b'];
+select '[{"b": "c"}, {"b": "cc"}]'::json #> array['1','b'];
+select '[{"b": "c"}, {"b": "cc"}]'::json #> array['z','b'];
+select '[{"b": "c"}, {"b": null}]'::json #> array['1','b'];
+select '"foo"'::json #> array['z'];
+select '42'::json #> array['f2'];
+select '42'::json #> array['0'];
+
+select '{"a": {"b":{"c": "foo"}}}'::json #>> '{}';
+select '[1,2,3]'::json #>> '{}';
+select '"foo"'::json #>> '{}';
+select '42'::json #>> '{}';
+select 'null'::json #>> '{}';
+select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a'];
+select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a', null];
+select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a', ''];
+select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','b'];
+select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','b','c'];
+select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','b','c','d'];
+select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','z','c'];
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #>> array['a','1','b'];
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #>> array['a','z','b'];
+select '[{"b": "c"}, {"b": "cc"}]'::json #>> array['1','b'];
+select '[{"b": "c"}, {"b": "cc"}]'::json #>> array['z','b'];
+select '[{"b": "c"}, {"b": null}]'::json #>> array['1','b'];
+select '"foo"'::json #>> array['z'];
+select '42'::json #>> array['f2'];
+select '42'::json #>> array['0'];
+
+-- array_elements
+
+select json_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]');
+select * from json_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]') q;
+select json_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]');
+select * from json_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]') q;
+
+-- populate_record
+create type jpop as (a text, b int, c timestamp);
+
+CREATE DOMAIN js_int_not_null AS int NOT NULL;
+CREATE DOMAIN js_int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 3);
+CREATE DOMAIN js_int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 3);
+
+create type j_unordered_pair as (x int, y int);
+create domain j_ordered_pair as j_unordered_pair check((value).x <= (value).y);
+
+CREATE TYPE jsrec AS (
+ i int,
+ ia _int4,
+ ia1 int[],
+ ia2 int[][],
+ ia3 int[][][],
+ ia1d js_int_array_1d,
+ ia2d js_int_array_2d,
+ t text,
+ ta text[],
+ c char(10),
+ ca char(10)[],
+ ts timestamp,
+ js json,
+ jsb jsonb,
+ jsa json[],
+ rec jpop,
+ reca jpop[]
+);
+
+CREATE TYPE jsrec_i_not_null AS (
+ i js_int_not_null
+);
+
+select * from json_populate_record(null::jpop,'{"a":"blurfl","x":43.2}') q;
+select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"a":"blurfl","x":43.2}') q;
+
+select * from json_populate_record(null::jpop,'{"a":"blurfl","x":43.2}') q;
+select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"a":"blurfl","x":43.2}') q;
+
+select * from json_populate_record(null::jpop,'{"a":[100,200,false],"x":43.2}') q;
+select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"a":[100,200,false],"x":43.2}') q;
+select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"c":[100,200,false],"x":43.2}') q;
+
+select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{}') q;
+
+SELECT i FROM json_populate_record(NULL::jsrec_i_not_null, '{"x": 43.2}') q;
+SELECT i FROM json_populate_record(NULL::jsrec_i_not_null, '{"i": null}') q;
+SELECT i FROM json_populate_record(NULL::jsrec_i_not_null, '{"i": 12345}') q;
+
+SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": null}') q;
+SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": 123}') q;
+SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [1, "2", null, 4]}') q;
+SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [[1, 2], [3, 4]]}') q;
+SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [[1], 2]}') q;
+SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [[1], [2, 3]]}') q;
+SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": "{1,2,3}"}') q;
+
+SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": null}') q;
+SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": 123}') q;
+SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": [1, "2", null, 4]}') q;
+SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": [[1, 2, 3]]}') q;
+
+SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": null}') q;
+SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": 123}') q;
+SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": [1, "2", null, 4]}') q;
+SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": [1, "2", null]}') q;
+
+SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [1, "2", null, 4]}') q;
+SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[1, 2], [null, 4]]}') q;
+SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[], []]}') q;
+SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[1, 2], [3]]}') q;
+SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[1, 2], 3, 4]}') q;
+
+SELECT ia2d FROM json_populate_record(NULL::jsrec, '{"ia2d": [[1, "2"], [null, 4]]}') q;
+SELECT ia2d FROM json_populate_record(NULL::jsrec, '{"ia2d": [[1, "2", 3], [null, 5, 6]]}') q;
+
+SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [1, "2", null, 4]}') q;
+SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [[1, 2], [null, 4]]}') q;
+SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[], []], [[], []], [[], []] ]}') q;
+SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[1, 2]], [[3, 4]] ]}') q;
+SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8]] ]}') q;
+SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8], [9, 10]] ]}') q;
+
+SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": null}') q;
+SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": 123}') q;
+SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": [1, "2", null, 4]}') q;
+SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": [[1, 2, 3], {"k": "v"}]}') q;
+
+SELECT c FROM json_populate_record(NULL::jsrec, '{"c": null}') q;
+SELECT c FROM json_populate_record(NULL::jsrec, '{"c": "aaa"}') q;
+SELECT c FROM json_populate_record(NULL::jsrec, '{"c": "aaaaaaaaaa"}') q;
+SELECT c FROM json_populate_record(NULL::jsrec, '{"c": "aaaaaaaaaaaaa"}') q;
+
+SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": null}') q;
+SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": 123}') q;
+SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": [1, "2", null, 4]}') q;
+SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": ["aaaaaaaaaaaaaaaa"]}') q;
+SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": [[1, 2, 3], {"k": "v"}]}') q;
+
+SELECT js FROM json_populate_record(NULL::jsrec, '{"js": null}') q;
+SELECT js FROM json_populate_record(NULL::jsrec, '{"js": true}') q;
+SELECT js FROM json_populate_record(NULL::jsrec, '{"js": 123.45}') q;
+SELECT js FROM json_populate_record(NULL::jsrec, '{"js": "123.45"}') q;
+SELECT js FROM json_populate_record(NULL::jsrec, '{"js": "abc"}') q;
+SELECT js FROM json_populate_record(NULL::jsrec, '{"js": [123, "123", null, {"key": "value"}]}') q;
+SELECT js FROM json_populate_record(NULL::jsrec, '{"js": {"a": "bbb", "b": null, "c": 123.45}}') q;
+
+SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": null}') q;
+SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": true}') q;
+SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": 123.45}') q;
+SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": "123.45"}') q;
+SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": "abc"}') q;
+SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": [123, "123", null, {"key": "value"}]}') q;
+SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": {"a": "bbb", "b": null, "c": 123.45}}') q;
+
+SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": null}') q;
+SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": 123}') q;
+SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": [1, "2", null, 4]}') q;
+SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": ["aaa", null, [1, 2, "3", {}], { "k" : "v" }]}') q;
+
+SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": 123}') q;
+SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": [1, 2]}') q;
+SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}') q;
+SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": "(abc,42,01.02.2003)"}') q;
+
+SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": 123}') q;
+SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": [1, 2]}') q;
+SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}]}') q;
+SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": ["(abc,42,01.02.2003)"]}') q;
+SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": "{\"(abc,42,01.02.2003)\"}"}') q;
+
+SELECT rec FROM json_populate_record(
+ row(NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
+ row('x',3,'2012-12-31 15:30:56')::jpop,NULL)::jsrec,
+ '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}'
+) q;
+
+-- anonymous record type
+SELECT json_populate_record(null::record, '{"x": 0, "y": 1}');
+SELECT json_populate_record(row(1,2), '{"f1": 0, "f2": 1}');
+SELECT * FROM
+ json_populate_record(null::record, '{"x": 776}') AS (x int, y int);
+
+-- composite domain
+SELECT json_populate_record(null::j_ordered_pair, '{"x": 0, "y": 1}');
+SELECT json_populate_record(row(1,2)::j_ordered_pair, '{"x": 0}');
+SELECT json_populate_record(row(1,2)::j_ordered_pair, '{"x": 1, "y": 0}');
+
+-- populate_recordset
+
+select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q;
+select * from json_populate_recordset(row('def',99,null)::jpop,'[{"c":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q;
+
+create type jpop2 as (a int, b json, c int, d int);
+select * from json_populate_recordset(null::jpop2, '[{"a":2,"c":3,"b":{"z":4},"d":6}]') q;
+
+select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q;
+
+-- anonymous record type
+SELECT json_populate_recordset(null::record, '[{"x": 0, "y": 1}]');
+SELECT json_populate_recordset(row(1,2), '[{"f1": 0, "f2": 1}]');
+SELECT i, json_populate_recordset(row(i,50), '[{"f1":"42"},{"f2":"43"}]')
+FROM (VALUES (1),(2)) v(i);
+SELECT * FROM
+ json_populate_recordset(null::record, '[{"x": 776}]') AS (x int, y int);
+
+-- empty array is a corner case
+SELECT json_populate_recordset(null::record, '[]');
+SELECT json_populate_recordset(row(1,2), '[]');
+SELECT * FROM json_populate_recordset(NULL::jpop,'[]') q;
+SELECT * FROM
+ json_populate_recordset(null::record, '[]') AS (x int, y int);
+
+-- composite domain
+SELECT json_populate_recordset(null::j_ordered_pair, '[{"x": 0, "y": 1}]');
+SELECT json_populate_recordset(row(1,2)::j_ordered_pair, '[{"x": 0}, {"y": 3}]');
+SELECT json_populate_recordset(row(1,2)::j_ordered_pair, '[{"x": 1, "y": 0}]');
+
+-- negative cases where the wrong record type is supplied
+select * from json_populate_recordset(row(0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text);
+select * from json_populate_recordset(row(0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text);
+select * from json_populate_recordset(row(0::int,0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text);
+select * from json_populate_recordset(row(1000000000::int,50::int),'[{"b":"2"},{"a":"3"}]') q (a text, b text);
+
+-- test type info caching in json_populate_record()
+CREATE TEMP TABLE jspoptest (js json);
+
+INSERT INTO jspoptest
+SELECT '{
+ "jsa": [1, "2", null, 4],
+ "rec": {"a": "abc", "c": "01.02.2003", "x": 43.2},
+ "reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}]
+}'::json
+FROM generate_series(1, 3);
+
+SELECT (json_populate_record(NULL::jsrec, js)).* FROM jspoptest;
+
+DROP TYPE jsrec;
+DROP TYPE jsrec_i_not_null;
+DROP DOMAIN js_int_not_null;
+DROP DOMAIN js_int_array_1d;
+DROP DOMAIN js_int_array_2d;
+DROP DOMAIN j_ordered_pair;
+DROP TYPE j_unordered_pair;
+
+--json_typeof() function
+select value, json_typeof(value)
+ from (values (json '123.4'),
+ (json '-1'),
+ (json '"foo"'),
+ (json 'true'),
+ (json 'false'),
+ (json 'null'),
+ (json '[1, 2, 3]'),
+ (json '[]'),
+ (json '{"x":"foo", "y":123}'),
+ (json '{}'),
+ (NULL::json))
+ as data(value);
+
+-- json_build_array, json_build_object, json_object_agg
+
+SELECT json_build_array('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}');
+SELECT json_build_array('a', NULL); -- ok
+SELECT json_build_array(VARIADIC NULL::text[]); -- ok
+SELECT json_build_array(VARIADIC '{}'::text[]); -- ok
+SELECT json_build_array(VARIADIC '{a,b,c}'::text[]); -- ok
+SELECT json_build_array(VARIADIC ARRAY['a', NULL]::text[]); -- ok
+SELECT json_build_array(VARIADIC '{1,2,3,4}'::text[]); -- ok
+SELECT json_build_array(VARIADIC '{1,2,3,4}'::int[]); -- ok
+SELECT json_build_array(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok
+
+SELECT json_build_object('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}');
+
+SELECT json_build_object(
+ 'a', json_build_object('b',false,'c',99),
+ 'd', json_build_object('e',array[9,8,7]::int[],
+ 'f', (select row_to_json(r) from ( select relkind, oid::regclass as name from pg_class where relname = 'pg_class') r)));
+SELECT json_build_object('{a,b,c}'::text[]); -- error
+SELECT json_build_object('{a,b,c}'::text[], '{d,e,f}'::text[]); -- error, key cannot be array
+SELECT json_build_object('a', 'b', 'c'); -- error
+SELECT json_build_object(NULL, 'a'); -- error, key cannot be NULL
+SELECT json_build_object('a', NULL); -- ok
+SELECT json_build_object(VARIADIC NULL::text[]); -- ok
+SELECT json_build_object(VARIADIC '{}'::text[]); -- ok
+SELECT json_build_object(VARIADIC '{a,b,c}'::text[]); -- error
+SELECT json_build_object(VARIADIC ARRAY['a', NULL]::text[]); -- ok
+SELECT json_build_object(VARIADIC ARRAY[NULL, 'a']::text[]); -- error, key cannot be NULL
+SELECT json_build_object(VARIADIC '{1,2,3,4}'::text[]); -- ok
+SELECT json_build_object(VARIADIC '{1,2,3,4}'::int[]); -- ok
+SELECT json_build_object(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok
+
+-- empty objects/arrays
+SELECT json_build_array();
+
+SELECT json_build_object();
+
+-- make sure keys are quoted
+SELECT json_build_object(1,2);
+
+-- keys must be scalar and not null
+SELECT json_build_object(null,2);
+
+SELECT json_build_object(r,2) FROM (SELECT 1 AS a, 2 AS b) r;
+
+SELECT json_build_object(json '{"a":1,"b":2}', 3);
+
+SELECT json_build_object('{1,2,3}'::int[], 3);
+
+CREATE TEMP TABLE foo (serial_num int, name text, type text);
+INSERT INTO foo VALUES (847001,'t15','GE1043');
+INSERT INTO foo VALUES (847002,'t16','GE1043');
+INSERT INTO foo VALUES (847003,'sub-alpha','GESS90');
+
+SELECT json_build_object('turbines',json_object_agg(serial_num,json_build_object('name',name,'type',type)))
+FROM foo;
+
+SELECT json_object_agg(name, type) FROM foo;
+
+INSERT INTO foo VALUES (999999, NULL, 'bar');
+SELECT json_object_agg(name, type) FROM foo;
+
+-- json_object
+
+-- empty object, one dimension
+SELECT json_object('{}');
+
+-- empty object, two dimensions
+SELECT json_object('{}', '{}');
+
+-- one dimension
+SELECT json_object('{a,1,b,2,3,NULL,"d e f","a b c"}');
+
+-- same but with two dimensions
+SELECT json_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}');
+
+-- odd number error
+SELECT json_object('{a,b,c}');
+
+-- one column error
+SELECT json_object('{{a},{b}}');
+
+-- too many columns error
+SELECT json_object('{{a,b,c},{b,c,d}}');
+
+-- too many dimensions error
+SELECT json_object('{{{a,b},{c,d}},{{b,c},{d,e}}}');
+
+--two argument form of json_object
+
+select json_object('{a,b,c,"d e f"}','{1,2,3,"a b c"}');
+
+-- too many dimensions
+SELECT json_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}', '{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}');
+
+-- mismatched dimensions
+
+select json_object('{a,b,c,"d e f",g}','{1,2,3,"a b c"}');
+
+select json_object('{a,b,c,"d e f"}','{1,2,3,"a b c",g}');
+
+-- null key error
+
+select json_object('{a,b,NULL,"d e f"}','{1,2,3,"a b c"}');
+
+-- empty key is allowed
+
+select json_object('{a,b,"","d e f"}','{1,2,3,"a b c"}');
+
+
+-- json_to_record and json_to_recordset
+
+select * from json_to_record('{"a":1,"b":"foo","c":"bar"}')
+ as x(a int, b text, d text);
+
+select * from json_to_recordset('[{"a":1,"b":"foo","d":false},{"a":2,"b":"bar","c":true}]')
+ as x(a int, b text, c boolean);
+
+select * from json_to_recordset('[{"a":1,"b":{"d":"foo"},"c":true},{"a":2,"c":false,"b":{"d":"bar"}}]')
+ as x(a int, b json, c boolean);
+
+select *, c is null as c_is_null
+from json_to_record('{"a":1, "b":{"c":16, "d":2}, "x":8, "ca": ["1 2", 3], "ia": [[1,2],[3,4]], "r": {"a": "aaa", "b": 123}}'::json)
+ as t(a int, b json, c text, x int, ca char(5)[], ia int[][], r jpop);
+
+select *, c is null as c_is_null
+from json_to_recordset('[{"a":1, "b":{"c":16, "d":2}, "x":8}]'::json)
+ as t(a int, b json, c text, x int);
+
+select * from json_to_record('{"ia": null}') as x(ia _int4);
+select * from json_to_record('{"ia": 123}') as x(ia _int4);
+select * from json_to_record('{"ia": [1, "2", null, 4]}') as x(ia _int4);
+select * from json_to_record('{"ia": [[1, 2], [3, 4]]}') as x(ia _int4);
+select * from json_to_record('{"ia": [[1], 2]}') as x(ia _int4);
+select * from json_to_record('{"ia": [[1], [2, 3]]}') as x(ia _int4);
+
+select * from json_to_record('{"ia2": [1, 2, 3]}') as x(ia2 int[][]);
+select * from json_to_record('{"ia2": [[1, 2], [3, 4]]}') as x(ia2 int4[][]);
+select * from json_to_record('{"ia2": [[[1], [2], [3]]]}') as x(ia2 int4[][]);
+
+select * from json_to_record('{"out": {"key": 1}}') as x(out json);
+select * from json_to_record('{"out": [{"key": 1}]}') as x(out json);
+select * from json_to_record('{"out": "{\"key\": 1}"}') as x(out json);
+select * from json_to_record('{"out": {"key": 1}}') as x(out jsonb);
+select * from json_to_record('{"out": [{"key": 1}]}') as x(out jsonb);
+select * from json_to_record('{"out": "{\"key\": 1}"}') as x(out jsonb);
+
+-- json_strip_nulls
+
+select json_strip_nulls(null);
+
+select json_strip_nulls('1');
+
+select json_strip_nulls('"a string"');
+
+select json_strip_nulls('null');
+
+select json_strip_nulls('[1,2,null,3,4]');
+
+select json_strip_nulls('{"a":1,"b":null,"c":[2,null,3],"d":{"e":4,"f":null}}');
+
+select json_strip_nulls('[1,{"a":1,"b":null,"c":2},3]');
+
+-- an empty object is not null and should not be stripped
+select json_strip_nulls('{"a": {"b": null, "c": null}, "d": {} }');
+
+-- json to tsvector
+select to_tsvector('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::json);
+
+-- json to tsvector with config
+select to_tsvector('simple', '{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::json);
+
+-- json to tsvector with stop words
+select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": ["the eee fff ggg"], "c": {"d": "hhh. iii"}}'::json);
+
+-- json to tsvector with numeric values
+select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": 123, "c": 456}'::json);
+
+-- json_to_tsvector
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"all"');
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"key"');
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"string"');
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"numeric"');
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"boolean"');
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["string", "numeric"]');
+
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"all"');
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"key"');
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"string"');
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"numeric"');
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"boolean"');
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["string", "numeric"]');
+
+-- to_tsvector corner cases
+select to_tsvector('""'::json);
+select to_tsvector('{}'::json);
+select to_tsvector('[]'::json);
+select to_tsvector('null'::json);
+
+-- json_to_tsvector corner cases
+select json_to_tsvector('""'::json, '"all"');
+select json_to_tsvector('{}'::json, '"all"');
+select json_to_tsvector('[]'::json, '"all"');
+select json_to_tsvector('null'::json, '"all"');
+
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '""');
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '{}');
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '[]');
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, 'null');
+select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["all", null]');
+
+-- ts_headline for json
+select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh'));
+select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh'));
+select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >');
+select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >');
+
+-- corner cases for ts_headline with json
+select ts_headline('null'::json, tsquery('aaa & bbb'));
+select ts_headline('{}'::json, tsquery('aaa & bbb'));
+select ts_headline('[]'::json, tsquery('aaa & bbb'));
diff --git a/ydb/library/yql/tests/postgresql/original/cases/json_encoding.out b/ydb/library/yql/tests/postgresql/original/cases/json_encoding.out
new file mode 100644
index 0000000000..f343f74fe1
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/json_encoding.out
@@ -0,0 +1,262 @@
+--
+-- encoding-sensitive tests for json and jsonb
+--
+-- We provide expected-results files for UTF8 (json_encoding.out)
+-- and for SQL_ASCII (json_encoding_1.out). Skip otherwise.
+SELECT getdatabaseencoding() NOT IN ('UTF8', 'SQL_ASCII')
+ AS skip_test \gset
+\if :skip_test
+\quit
+\endif
+SELECT getdatabaseencoding(); -- just to label the results files
+ getdatabaseencoding
+---------------------
+ UTF8
+(1 row)
+
+-- first json
+-- basic unicode input
+SELECT '"\u"'::json; -- ERROR, incomplete escape
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"\u"'::json;
+ ^
+DETAIL: "\u" must be followed by four hexadecimal digits.
+CONTEXT: JSON data, line 1: "\u"
+SELECT '"\u00"'::json; -- ERROR, incomplete escape
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"\u00"'::json;
+ ^
+DETAIL: "\u" must be followed by four hexadecimal digits.
+CONTEXT: JSON data, line 1: "\u00"
+SELECT '"\u000g"'::json; -- ERROR, g is not a hex digit
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"\u000g"'::json;
+ ^
+DETAIL: "\u" must be followed by four hexadecimal digits.
+CONTEXT: JSON data, line 1: "\u000g...
+SELECT '"\u0000"'::json; -- OK, legal escape
+ json
+----------
+ "\u0000"
+(1 row)
+
+SELECT '"\uaBcD"'::json; -- OK, uppercase and lower case both OK
+ json
+----------
+ "\uaBcD"
+(1 row)
+
+-- handling of unicode surrogate pairs
+select json '{ "a": "\ud83d\ude04\ud83d\udc36" }' -> 'a' as correct_in_utf8;
+ correct_in_utf8
+----------------------------
+ "\ud83d\ude04\ud83d\udc36"
+(1 row)
+
+select json '{ "a": "\ud83d\ud83d" }' -> 'a'; -- 2 high surrogates in a row
+ERROR: invalid input syntax for type json
+DETAIL: Unicode high surrogate must not follow a high surrogate.
+CONTEXT: JSON data, line 1: { "a":...
+select json '{ "a": "\ude04\ud83d" }' -> 'a'; -- surrogates in wrong order
+ERROR: invalid input syntax for type json
+DETAIL: Unicode low surrogate must follow a high surrogate.
+CONTEXT: JSON data, line 1: { "a":...
+select json '{ "a": "\ud83dX" }' -> 'a'; -- orphan high surrogate
+ERROR: invalid input syntax for type json
+DETAIL: Unicode low surrogate must follow a high surrogate.
+CONTEXT: JSON data, line 1: { "a":...
+select json '{ "a": "\ude04X" }' -> 'a'; -- orphan low surrogate
+ERROR: invalid input syntax for type json
+DETAIL: Unicode low surrogate must follow a high surrogate.
+CONTEXT: JSON data, line 1: { "a":...
+--handling of simple unicode escapes
+select json '{ "a": "the Copyright \u00a9 sign" }' as correct_in_utf8;
+ correct_in_utf8
+---------------------------------------
+ { "a": "the Copyright \u00a9 sign" }
+(1 row)
+
+select json '{ "a": "dollar \u0024 character" }' as correct_everywhere;
+ correct_everywhere
+-------------------------------------
+ { "a": "dollar \u0024 character" }
+(1 row)
+
+select json '{ "a": "dollar \\u0024 character" }' as not_an_escape;
+ not_an_escape
+--------------------------------------
+ { "a": "dollar \\u0024 character" }
+(1 row)
+
+select json '{ "a": "null \u0000 escape" }' as not_unescaped;
+ not_unescaped
+--------------------------------
+ { "a": "null \u0000 escape" }
+(1 row)
+
+select json '{ "a": "null \\u0000 escape" }' as not_an_escape;
+ not_an_escape
+---------------------------------
+ { "a": "null \\u0000 escape" }
+(1 row)
+
+select json '{ "a": "the Copyright \u00a9 sign" }' ->> 'a' as correct_in_utf8;
+ correct_in_utf8
+----------------------
+ the Copyright © sign
+(1 row)
+
+select json '{ "a": "dollar \u0024 character" }' ->> 'a' as correct_everywhere;
+ correct_everywhere
+--------------------
+ dollar $ character
+(1 row)
+
+select json '{ "a": "dollar \\u0024 character" }' ->> 'a' as not_an_escape;
+ not_an_escape
+-------------------------
+ dollar \u0024 character
+(1 row)
+
+select json '{ "a": "null \u0000 escape" }' ->> 'a' as fails;
+ERROR: unsupported Unicode escape sequence
+DETAIL: \u0000 cannot be converted to text.
+CONTEXT: JSON data, line 1: { "a":...
+select json '{ "a": "null \\u0000 escape" }' ->> 'a' as not_an_escape;
+ not_an_escape
+--------------------
+ null \u0000 escape
+(1 row)
+
+-- then jsonb
+-- basic unicode input
+SELECT '"\u"'::jsonb; -- ERROR, incomplete escape
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"\u"'::jsonb;
+ ^
+DETAIL: "\u" must be followed by four hexadecimal digits.
+CONTEXT: JSON data, line 1: "\u"
+SELECT '"\u00"'::jsonb; -- ERROR, incomplete escape
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"\u00"'::jsonb;
+ ^
+DETAIL: "\u" must be followed by four hexadecimal digits.
+CONTEXT: JSON data, line 1: "\u00"
+SELECT '"\u000g"'::jsonb; -- ERROR, g is not a hex digit
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"\u000g"'::jsonb;
+ ^
+DETAIL: "\u" must be followed by four hexadecimal digits.
+CONTEXT: JSON data, line 1: "\u000g...
+SELECT '"\u0045"'::jsonb; -- OK, legal escape
+ jsonb
+-------
+ "E"
+(1 row)
+
+SELECT '"\u0000"'::jsonb; -- ERROR, we don't support U+0000
+ERROR: unsupported Unicode escape sequence
+LINE 1: SELECT '"\u0000"'::jsonb;
+ ^
+DETAIL: \u0000 cannot be converted to text.
+CONTEXT: JSON data, line 1: ...
+-- use octet_length here so we don't get an odd unicode char in the
+-- output
+SELECT octet_length('"\uaBcD"'::jsonb::text); -- OK, uppercase and lower case both OK
+ octet_length
+--------------
+ 5
+(1 row)
+
+-- handling of unicode surrogate pairs
+SELECT octet_length((jsonb '{ "a": "\ud83d\ude04\ud83d\udc36" }' -> 'a')::text) AS correct_in_utf8;
+ correct_in_utf8
+-----------------
+ 10
+(1 row)
+
+SELECT jsonb '{ "a": "\ud83d\ud83d" }' -> 'a'; -- 2 high surrogates in a row
+ERROR: invalid input syntax for type json
+LINE 1: SELECT jsonb '{ "a": "\ud83d\ud83d" }' -> 'a';
+ ^
+DETAIL: Unicode high surrogate must not follow a high surrogate.
+CONTEXT: JSON data, line 1: { "a":...
+SELECT jsonb '{ "a": "\ude04\ud83d" }' -> 'a'; -- surrogates in wrong order
+ERROR: invalid input syntax for type json
+LINE 1: SELECT jsonb '{ "a": "\ude04\ud83d" }' -> 'a';
+ ^
+DETAIL: Unicode low surrogate must follow a high surrogate.
+CONTEXT: JSON data, line 1: { "a":...
+SELECT jsonb '{ "a": "\ud83dX" }' -> 'a'; -- orphan high surrogate
+ERROR: invalid input syntax for type json
+LINE 1: SELECT jsonb '{ "a": "\ud83dX" }' -> 'a';
+ ^
+DETAIL: Unicode low surrogate must follow a high surrogate.
+CONTEXT: JSON data, line 1: { "a":...
+SELECT jsonb '{ "a": "\ude04X" }' -> 'a'; -- orphan low surrogate
+ERROR: invalid input syntax for type json
+LINE 1: SELECT jsonb '{ "a": "\ude04X" }' -> 'a';
+ ^
+DETAIL: Unicode low surrogate must follow a high surrogate.
+CONTEXT: JSON data, line 1: { "a":...
+-- handling of simple unicode escapes
+SELECT jsonb '{ "a": "the Copyright \u00a9 sign" }' as correct_in_utf8;
+ correct_in_utf8
+-------------------------------
+ {"a": "the Copyright © sign"}
+(1 row)
+
+SELECT jsonb '{ "a": "dollar \u0024 character" }' as correct_everywhere;
+ correct_everywhere
+-----------------------------
+ {"a": "dollar $ character"}
+(1 row)
+
+SELECT jsonb '{ "a": "dollar \\u0024 character" }' as not_an_escape;
+ not_an_escape
+-----------------------------------
+ {"a": "dollar \\u0024 character"}
+(1 row)
+
+SELECT jsonb '{ "a": "null \u0000 escape" }' as fails;
+ERROR: unsupported Unicode escape sequence
+LINE 1: SELECT jsonb '{ "a": "null \u0000 escape" }' as fails;
+ ^
+DETAIL: \u0000 cannot be converted to text.
+CONTEXT: JSON data, line 1: { "a":...
+SELECT jsonb '{ "a": "null \\u0000 escape" }' as not_an_escape;
+ not_an_escape
+------------------------------
+ {"a": "null \\u0000 escape"}
+(1 row)
+
+SELECT jsonb '{ "a": "the Copyright \u00a9 sign" }' ->> 'a' as correct_in_utf8;
+ correct_in_utf8
+----------------------
+ the Copyright © sign
+(1 row)
+
+SELECT jsonb '{ "a": "dollar \u0024 character" }' ->> 'a' as correct_everywhere;
+ correct_everywhere
+--------------------
+ dollar $ character
+(1 row)
+
+SELECT jsonb '{ "a": "dollar \\u0024 character" }' ->> 'a' as not_an_escape;
+ not_an_escape
+-------------------------
+ dollar \u0024 character
+(1 row)
+
+SELECT jsonb '{ "a": "null \u0000 escape" }' ->> 'a' as fails;
+ERROR: unsupported Unicode escape sequence
+LINE 1: SELECT jsonb '{ "a": "null \u0000 escape" }' ->> 'a' as fai...
+ ^
+DETAIL: \u0000 cannot be converted to text.
+CONTEXT: JSON data, line 1: { "a":...
+SELECT jsonb '{ "a": "null \\u0000 escape" }' ->> 'a' as not_an_escape;
+ not_an_escape
+--------------------
+ null \u0000 escape
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/json_encoding.sql b/ydb/library/yql/tests/postgresql/original/cases/json_encoding.sql
new file mode 100644
index 0000000000..d7fac69733
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/json_encoding.sql
@@ -0,0 +1,78 @@
+--
+-- encoding-sensitive tests for json and jsonb
+--
+
+-- We provide expected-results files for UTF8 (json_encoding.out)
+-- and for SQL_ASCII (json_encoding_1.out). Skip otherwise.
+SELECT getdatabaseencoding() NOT IN ('UTF8', 'SQL_ASCII')
+ AS skip_test \gset
+\if :skip_test
+\quit
+\endif
+
+SELECT getdatabaseencoding(); -- just to label the results files
+
+-- first json
+
+-- basic unicode input
+SELECT '"\u"'::json; -- ERROR, incomplete escape
+SELECT '"\u00"'::json; -- ERROR, incomplete escape
+SELECT '"\u000g"'::json; -- ERROR, g is not a hex digit
+SELECT '"\u0000"'::json; -- OK, legal escape
+SELECT '"\uaBcD"'::json; -- OK, uppercase and lower case both OK
+
+-- handling of unicode surrogate pairs
+
+select json '{ "a": "\ud83d\ude04\ud83d\udc36" }' -> 'a' as correct_in_utf8;
+select json '{ "a": "\ud83d\ud83d" }' -> 'a'; -- 2 high surrogates in a row
+select json '{ "a": "\ude04\ud83d" }' -> 'a'; -- surrogates in wrong order
+select json '{ "a": "\ud83dX" }' -> 'a'; -- orphan high surrogate
+select json '{ "a": "\ude04X" }' -> 'a'; -- orphan low surrogate
+
+--handling of simple unicode escapes
+
+select json '{ "a": "the Copyright \u00a9 sign" }' as correct_in_utf8;
+select json '{ "a": "dollar \u0024 character" }' as correct_everywhere;
+select json '{ "a": "dollar \\u0024 character" }' as not_an_escape;
+select json '{ "a": "null \u0000 escape" }' as not_unescaped;
+select json '{ "a": "null \\u0000 escape" }' as not_an_escape;
+
+select json '{ "a": "the Copyright \u00a9 sign" }' ->> 'a' as correct_in_utf8;
+select json '{ "a": "dollar \u0024 character" }' ->> 'a' as correct_everywhere;
+select json '{ "a": "dollar \\u0024 character" }' ->> 'a' as not_an_escape;
+select json '{ "a": "null \u0000 escape" }' ->> 'a' as fails;
+select json '{ "a": "null \\u0000 escape" }' ->> 'a' as not_an_escape;
+
+-- then jsonb
+
+-- basic unicode input
+SELECT '"\u"'::jsonb; -- ERROR, incomplete escape
+SELECT '"\u00"'::jsonb; -- ERROR, incomplete escape
+SELECT '"\u000g"'::jsonb; -- ERROR, g is not a hex digit
+SELECT '"\u0045"'::jsonb; -- OK, legal escape
+SELECT '"\u0000"'::jsonb; -- ERROR, we don't support U+0000
+-- use octet_length here so we don't get an odd unicode char in the
+-- output
+SELECT octet_length('"\uaBcD"'::jsonb::text); -- OK, uppercase and lower case both OK
+
+-- handling of unicode surrogate pairs
+
+SELECT octet_length((jsonb '{ "a": "\ud83d\ude04\ud83d\udc36" }' -> 'a')::text) AS correct_in_utf8;
+SELECT jsonb '{ "a": "\ud83d\ud83d" }' -> 'a'; -- 2 high surrogates in a row
+SELECT jsonb '{ "a": "\ude04\ud83d" }' -> 'a'; -- surrogates in wrong order
+SELECT jsonb '{ "a": "\ud83dX" }' -> 'a'; -- orphan high surrogate
+SELECT jsonb '{ "a": "\ude04X" }' -> 'a'; -- orphan low surrogate
+
+-- handling of simple unicode escapes
+
+SELECT jsonb '{ "a": "the Copyright \u00a9 sign" }' as correct_in_utf8;
+SELECT jsonb '{ "a": "dollar \u0024 character" }' as correct_everywhere;
+SELECT jsonb '{ "a": "dollar \\u0024 character" }' as not_an_escape;
+SELECT jsonb '{ "a": "null \u0000 escape" }' as fails;
+SELECT jsonb '{ "a": "null \\u0000 escape" }' as not_an_escape;
+
+SELECT jsonb '{ "a": "the Copyright \u00a9 sign" }' ->> 'a' as correct_in_utf8;
+SELECT jsonb '{ "a": "dollar \u0024 character" }' ->> 'a' as correct_everywhere;
+SELECT jsonb '{ "a": "dollar \\u0024 character" }' ->> 'a' as not_an_escape;
+SELECT jsonb '{ "a": "null \u0000 escape" }' ->> 'a' as fails;
+SELECT jsonb '{ "a": "null \\u0000 escape" }' ->> 'a' as not_an_escape;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/jsonb.out b/ydb/library/yql/tests/postgresql/original/cases/jsonb.out
new file mode 100644
index 0000000000..a9cd145aec
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/jsonb.out
@@ -0,0 +1,5511 @@
+-- Strings.
+SELECT '""'::jsonb; -- OK.
+ jsonb
+-------
+ ""
+(1 row)
+
+SELECT $$''$$::jsonb; -- ERROR, single quotes are not allowed
+ERROR: invalid input syntax for type json
+LINE 1: SELECT $$''$$::jsonb;
+ ^
+DETAIL: Token "'" is invalid.
+CONTEXT: JSON data, line 1: '...
+SELECT '"abc"'::jsonb; -- OK
+ jsonb
+-------
+ "abc"
+(1 row)
+
+SELECT '"abc'::jsonb; -- ERROR, quotes not closed
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"abc'::jsonb;
+ ^
+DETAIL: Token ""abc" is invalid.
+CONTEXT: JSON data, line 1: "abc
+SELECT '"abc
+def"'::jsonb; -- ERROR, unescaped newline in string constant
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"abc
+ ^
+DETAIL: Character with value 0x0a must be escaped.
+CONTEXT: JSON data, line 1: "abc
+SELECT '"\n\"\\"'::jsonb; -- OK, legal escapes
+ jsonb
+----------
+ "\n\"\\"
+(1 row)
+
+SELECT '"\v"'::jsonb; -- ERROR, not a valid JSON escape
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '"\v"'::jsonb;
+ ^
+DETAIL: Escape sequence "\v" is invalid.
+CONTEXT: JSON data, line 1: "\v...
+-- see json_encoding test for input with unicode escapes
+-- Numbers.
+SELECT '1'::jsonb; -- OK
+ jsonb
+-------
+ 1
+(1 row)
+
+SELECT '0'::jsonb; -- OK
+ jsonb
+-------
+ 0
+(1 row)
+
+SELECT '01'::jsonb; -- ERROR, not valid according to JSON spec
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '01'::jsonb;
+ ^
+DETAIL: Token "01" is invalid.
+CONTEXT: JSON data, line 1: 01
+SELECT '0.1'::jsonb; -- OK
+ jsonb
+-------
+ 0.1
+(1 row)
+
+SELECT '9223372036854775808'::jsonb; -- OK, even though it's too large for int8
+ jsonb
+---------------------
+ 9223372036854775808
+(1 row)
+
+SELECT '1e100'::jsonb; -- OK
+ jsonb
+-------------------------------------------------------------------------------------------------------
+ 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+(1 row)
+
+SELECT '1.3e100'::jsonb; -- OK
+ jsonb
+-------------------------------------------------------------------------------------------------------
+ 13000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+(1 row)
+
+SELECT '1f2'::jsonb; -- ERROR
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '1f2'::jsonb;
+ ^
+DETAIL: Token "1f2" is invalid.
+CONTEXT: JSON data, line 1: 1f2
+SELECT '0.x1'::jsonb; -- ERROR
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '0.x1'::jsonb;
+ ^
+DETAIL: Token "0.x1" is invalid.
+CONTEXT: JSON data, line 1: 0.x1
+SELECT '1.3ex100'::jsonb; -- ERROR
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '1.3ex100'::jsonb;
+ ^
+DETAIL: Token "1.3ex100" is invalid.
+CONTEXT: JSON data, line 1: 1.3ex100
+-- Arrays.
+SELECT '[]'::jsonb; -- OK
+ jsonb
+-------
+ []
+(1 row)
+
+SELECT '[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]'::jsonb; -- OK
+ jsonb
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]
+(1 row)
+
+SELECT '[1,2]'::jsonb; -- OK
+ jsonb
+--------
+ [1, 2]
+(1 row)
+
+SELECT '[1,2,]'::jsonb; -- ERROR, trailing comma
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '[1,2,]'::jsonb;
+ ^
+DETAIL: Expected JSON value, but found "]".
+CONTEXT: JSON data, line 1: [1,2,]
+SELECT '[1,2'::jsonb; -- ERROR, no closing bracket
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '[1,2'::jsonb;
+ ^
+DETAIL: The input string ended unexpectedly.
+CONTEXT: JSON data, line 1: [1,2
+SELECT '[1,[2]'::jsonb; -- ERROR, no closing bracket
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '[1,[2]'::jsonb;
+ ^
+DETAIL: The input string ended unexpectedly.
+CONTEXT: JSON data, line 1: [1,[2]
+-- Objects.
+SELECT '{}'::jsonb; -- OK
+ jsonb
+-------
+ {}
+(1 row)
+
+SELECT '{"abc"}'::jsonb; -- ERROR, no value
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc"}'::jsonb;
+ ^
+DETAIL: Expected ":", but found "}".
+CONTEXT: JSON data, line 1: {"abc"}
+SELECT '{"abc":1}'::jsonb; -- OK
+ jsonb
+------------
+ {"abc": 1}
+(1 row)
+
+SELECT '{1:"abc"}'::jsonb; -- ERROR, keys must be strings
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{1:"abc"}'::jsonb;
+ ^
+DETAIL: Expected string or "}", but found "1".
+CONTEXT: JSON data, line 1: {1...
+SELECT '{"abc",1}'::jsonb; -- ERROR, wrong separator
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc",1}'::jsonb;
+ ^
+DETAIL: Expected ":", but found ",".
+CONTEXT: JSON data, line 1: {"abc",...
+SELECT '{"abc"=1}'::jsonb; -- ERROR, totally wrong separator
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc"=1}'::jsonb;
+ ^
+DETAIL: Token "=" is invalid.
+CONTEXT: JSON data, line 1: {"abc"=...
+SELECT '{"abc"::1}'::jsonb; -- ERROR, another wrong separator
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc"::1}'::jsonb;
+ ^
+DETAIL: Expected JSON value, but found ":".
+CONTEXT: JSON data, line 1: {"abc"::...
+SELECT '{"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}}'::jsonb; -- OK
+ jsonb
+--------------------------------------------------------------------
+ {"abc": 1, "def": 2, "ghi": [3, 4], "hij": {"klm": 5, "nop": [6]}}
+(1 row)
+
+SELECT '{"abc":1:2}'::jsonb; -- ERROR, colon in wrong spot
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc":1:2}'::jsonb;
+ ^
+DETAIL: Expected "," or "}", but found ":".
+CONTEXT: JSON data, line 1: {"abc":1:...
+SELECT '{"abc":1,3}'::jsonb; -- ERROR, no value
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{"abc":1,3}'::jsonb;
+ ^
+DETAIL: Expected string, but found "3".
+CONTEXT: JSON data, line 1: {"abc":1,3...
+-- Recursion.
+SET max_stack_depth = '100kB';
+SELECT repeat('[', 10000)::jsonb;
+ERROR: stack depth limit exceeded
+HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
+SELECT repeat('{"a":', 10000)::jsonb;
+ERROR: stack depth limit exceeded
+HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate.
+RESET max_stack_depth;
+-- Miscellaneous stuff.
+SELECT 'true'::jsonb; -- OK
+ jsonb
+-------
+ true
+(1 row)
+
+SELECT 'false'::jsonb; -- OK
+ jsonb
+-------
+ false
+(1 row)
+
+SELECT 'null'::jsonb; -- OK
+ jsonb
+-------
+ null
+(1 row)
+
+SELECT ' true '::jsonb; -- OK, even with extra whitespace
+ jsonb
+-------
+ true
+(1 row)
+
+SELECT 'true false'::jsonb; -- ERROR, too many values
+ERROR: invalid input syntax for type json
+LINE 1: SELECT 'true false'::jsonb;
+ ^
+DETAIL: Expected end of input, but found "false".
+CONTEXT: JSON data, line 1: true false
+SELECT 'true, false'::jsonb; -- ERROR, too many values
+ERROR: invalid input syntax for type json
+LINE 1: SELECT 'true, false'::jsonb;
+ ^
+DETAIL: Expected end of input, but found ",".
+CONTEXT: JSON data, line 1: true,...
+SELECT 'truf'::jsonb; -- ERROR, not a keyword
+ERROR: invalid input syntax for type json
+LINE 1: SELECT 'truf'::jsonb;
+ ^
+DETAIL: Token "truf" is invalid.
+CONTEXT: JSON data, line 1: truf
+SELECT 'trues'::jsonb; -- ERROR, not a keyword
+ERROR: invalid input syntax for type json
+LINE 1: SELECT 'trues'::jsonb;
+ ^
+DETAIL: Token "trues" is invalid.
+CONTEXT: JSON data, line 1: trues
+SELECT ''::jsonb; -- ERROR, no value
+ERROR: invalid input syntax for type json
+LINE 1: SELECT ''::jsonb;
+ ^
+DETAIL: The input string ended unexpectedly.
+CONTEXT: JSON data, line 1:
+SELECT ' '::jsonb; -- ERROR, no value
+ERROR: invalid input syntax for type json
+LINE 1: SELECT ' '::jsonb;
+ ^
+DETAIL: The input string ended unexpectedly.
+CONTEXT: JSON data, line 1:
+-- Multi-line JSON input to check ERROR reporting
+SELECT '{
+ "one": 1,
+ "two":"two",
+ "three":
+ true}'::jsonb; -- OK
+ jsonb
+-----------------------------------------
+ {"one": 1, "two": "two", "three": true}
+(1 row)
+
+SELECT '{
+ "one": 1,
+ "two":,"two", -- ERROR extraneous comma before field "two"
+ "three":
+ true}'::jsonb;
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{
+ ^
+DETAIL: Expected JSON value, but found ",".
+CONTEXT: JSON data, line 3: "two":,...
+SELECT '{
+ "one": 1,
+ "two":"two",
+ "averyveryveryveryveryveryveryveryveryverylongfieldname":}'::jsonb;
+ERROR: invalid input syntax for type json
+LINE 1: SELECT '{
+ ^
+DETAIL: Expected JSON value, but found "}".
+CONTEXT: JSON data, line 4: ...yveryveryveryveryveryveryveryverylongfieldname":}
+-- ERROR missing value for last field
+-- make sure jsonb is passed through json generators without being escaped
+SELECT array_to_json(ARRAY [jsonb '{"a":1}', jsonb '{"b":[2,3]}']);
+ array_to_json
+--------------------------
+ [{"a": 1},{"b": [2, 3]}]
+(1 row)
+
+-- anyarray column
+CREATE TEMP TABLE rows AS
+SELECT x, 'txt' || x as y
+FROM generate_series(1,3) AS x;
+analyze rows;
+select attname, to_jsonb(histogram_bounds) histogram_bounds
+from pg_stats
+where tablename = 'rows' and
+ schemaname = pg_my_temp_schema()::regnamespace::text
+order by 1;
+ attname | histogram_bounds
+---------+--------------------------
+ x | [1, 2, 3]
+ y | ["txt1", "txt2", "txt3"]
+(2 rows)
+
+-- to_jsonb, timestamps
+select to_jsonb(timestamp '2014-05-28 12:22:35.614298');
+ to_jsonb
+------------------------------
+ "2014-05-28T12:22:35.614298"
+(1 row)
+
+BEGIN;
+SET LOCAL TIME ZONE 10.5;
+select to_jsonb(timestamptz '2014-05-28 12:22:35.614298-04');
+ to_jsonb
+------------------------------------
+ "2014-05-29T02:52:35.614298+10:30"
+(1 row)
+
+SET LOCAL TIME ZONE -8;
+select to_jsonb(timestamptz '2014-05-28 12:22:35.614298-04');
+ to_jsonb
+------------------------------------
+ "2014-05-28T08:22:35.614298-08:00"
+(1 row)
+
+COMMIT;
+select to_jsonb(date '2014-05-28');
+ to_jsonb
+--------------
+ "2014-05-28"
+(1 row)
+
+select to_jsonb(date 'Infinity');
+ to_jsonb
+------------
+ "infinity"
+(1 row)
+
+select to_jsonb(date '-Infinity');
+ to_jsonb
+-------------
+ "-infinity"
+(1 row)
+
+select to_jsonb(timestamp 'Infinity');
+ to_jsonb
+------------
+ "infinity"
+(1 row)
+
+select to_jsonb(timestamp '-Infinity');
+ to_jsonb
+-------------
+ "-infinity"
+(1 row)
+
+select to_jsonb(timestamptz 'Infinity');
+ to_jsonb
+------------
+ "infinity"
+(1 row)
+
+select to_jsonb(timestamptz '-Infinity');
+ to_jsonb
+-------------
+ "-infinity"
+(1 row)
+
+--jsonb_agg
+SELECT jsonb_agg(q)
+ FROM ( SELECT $$a$$ || x AS b, y AS c,
+ ARRAY[ROW(x.*,ARRAY[1,2,3]),
+ ROW(y.*,ARRAY[4,5,6])] AS z
+ FROM generate_series(1,2) x,
+ generate_series(4,5) y) q;
+ jsonb_agg
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ [{"b": "a1", "c": 4, "z": [{"f1": 1, "f2": [1, 2, 3]}, {"f1": 4, "f2": [4, 5, 6]}]}, {"b": "a1", "c": 5, "z": [{"f1": 1, "f2": [1, 2, 3]}, {"f1": 5, "f2": [4, 5, 6]}]}, {"b": "a2", "c": 4, "z": [{"f1": 2, "f2": [1, 2, 3]}, {"f1": 4, "f2": [4, 5, 6]}]}, {"b": "a2", "c": 5, "z": [{"f1": 2, "f2": [1, 2, 3]}, {"f1": 5, "f2": [4, 5, 6]}]}]
+(1 row)
+
+SELECT jsonb_agg(q ORDER BY x, y)
+ FROM rows q;
+ jsonb_agg
+-----------------------------------------------------------------------
+ [{"x": 1, "y": "txt1"}, {"x": 2, "y": "txt2"}, {"x": 3, "y": "txt3"}]
+(1 row)
+
+UPDATE rows SET x = NULL WHERE x = 1;
+SELECT jsonb_agg(q ORDER BY x NULLS FIRST, y)
+ FROM rows q;
+ jsonb_agg
+--------------------------------------------------------------------------
+ [{"x": null, "y": "txt1"}, {"x": 2, "y": "txt2"}, {"x": 3, "y": "txt3"}]
+(1 row)
+
+-- jsonb extraction functions
+CREATE TEMP TABLE test_jsonb (
+ json_type text,
+ test_json jsonb
+);
+INSERT INTO test_jsonb VALUES
+('scalar','"a scalar"'),
+('array','["zero", "one","two",null,"four","five", [1,2,3],{"f1":9}]'),
+('object','{"field1":"val1","field2":"val2","field3":null, "field4": 4, "field5": [1,2,3], "field6": {"f1":9}}');
+SELECT test_json -> 'x' FROM test_jsonb WHERE json_type = 'scalar';
+ ?column?
+----------
+
+(1 row)
+
+SELECT test_json -> 'x' FROM test_jsonb WHERE json_type = 'array';
+ ?column?
+----------
+
+(1 row)
+
+SELECT test_json -> 'x' FROM test_jsonb WHERE json_type = 'object';
+ ?column?
+----------
+
+(1 row)
+
+SELECT test_json -> 'field2' FROM test_jsonb WHERE json_type = 'object';
+ ?column?
+----------
+ "val2"
+(1 row)
+
+SELECT test_json ->> 'field2' FROM test_jsonb WHERE json_type = 'scalar';
+ ?column?
+----------
+
+(1 row)
+
+SELECT test_json ->> 'field2' FROM test_jsonb WHERE json_type = 'array';
+ ?column?
+----------
+
+(1 row)
+
+SELECT test_json ->> 'field2' FROM test_jsonb WHERE json_type = 'object';
+ ?column?
+----------
+ val2
+(1 row)
+
+SELECT test_json -> 2 FROM test_jsonb WHERE json_type = 'scalar';
+ ?column?
+----------
+
+(1 row)
+
+SELECT test_json -> 2 FROM test_jsonb WHERE json_type = 'array';
+ ?column?
+----------
+ "two"
+(1 row)
+
+SELECT test_json -> 9 FROM test_jsonb WHERE json_type = 'array';
+ ?column?
+----------
+
+(1 row)
+
+SELECT test_json -> 2 FROM test_jsonb WHERE json_type = 'object';
+ ?column?
+----------
+
+(1 row)
+
+SELECT test_json ->> 6 FROM test_jsonb WHERE json_type = 'array';
+ ?column?
+-----------
+ [1, 2, 3]
+(1 row)
+
+SELECT test_json ->> 7 FROM test_jsonb WHERE json_type = 'array';
+ ?column?
+-----------
+ {"f1": 9}
+(1 row)
+
+SELECT test_json ->> 'field4' FROM test_jsonb WHERE json_type = 'object';
+ ?column?
+----------
+ 4
+(1 row)
+
+SELECT test_json ->> 'field5' FROM test_jsonb WHERE json_type = 'object';
+ ?column?
+-----------
+ [1, 2, 3]
+(1 row)
+
+SELECT test_json ->> 'field6' FROM test_jsonb WHERE json_type = 'object';
+ ?column?
+-----------
+ {"f1": 9}
+(1 row)
+
+SELECT test_json ->> 2 FROM test_jsonb WHERE json_type = 'scalar';
+ ?column?
+----------
+
+(1 row)
+
+SELECT test_json ->> 2 FROM test_jsonb WHERE json_type = 'array';
+ ?column?
+----------
+ two
+(1 row)
+
+SELECT test_json ->> 2 FROM test_jsonb WHERE json_type = 'object';
+ ?column?
+----------
+
+(1 row)
+
+SELECT jsonb_object_keys(test_json) FROM test_jsonb WHERE json_type = 'scalar';
+ERROR: cannot call jsonb_object_keys on a scalar
+SELECT jsonb_object_keys(test_json) FROM test_jsonb WHERE json_type = 'array';
+ERROR: cannot call jsonb_object_keys on an array
+SELECT jsonb_object_keys(test_json) FROM test_jsonb WHERE json_type = 'object';
+ jsonb_object_keys
+-------------------
+ field1
+ field2
+ field3
+ field4
+ field5
+ field6
+(6 rows)
+
+-- nulls
+SELECT (test_json->'field3') IS NULL AS expect_false FROM test_jsonb WHERE json_type = 'object';
+ expect_false
+--------------
+ f
+(1 row)
+
+SELECT (test_json->>'field3') IS NULL AS expect_true FROM test_jsonb WHERE json_type = 'object';
+ expect_true
+-------------
+ t
+(1 row)
+
+SELECT (test_json->3) IS NULL AS expect_false FROM test_jsonb WHERE json_type = 'array';
+ expect_false
+--------------
+ f
+(1 row)
+
+SELECT (test_json->>3) IS NULL AS expect_true FROM test_jsonb WHERE json_type = 'array';
+ expect_true
+-------------
+ t
+(1 row)
+
+-- corner cases
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> null::text;
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> null::int;
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> 1;
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> 'z';
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> '';
+ ?column?
+----------
+
+(1 row)
+
+select '[{"b": "c"}, {"b": "cc"}]'::jsonb -> 1;
+ ?column?
+-------------
+ {"b": "cc"}
+(1 row)
+
+select '[{"b": "c"}, {"b": "cc"}]'::jsonb -> 3;
+ ?column?
+----------
+
+(1 row)
+
+select '[{"b": "c"}, {"b": "cc"}]'::jsonb -> 'z';
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": "c", "b": null}'::jsonb -> 'b';
+ ?column?
+----------
+ null
+(1 row)
+
+select '"foo"'::jsonb -> 1;
+ ?column?
+----------
+
+(1 row)
+
+select '"foo"'::jsonb -> 'z';
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> null::text;
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> null::int;
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> 1;
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> 'z';
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> '';
+ ?column?
+----------
+
+(1 row)
+
+select '[{"b": "c"}, {"b": "cc"}]'::jsonb ->> 1;
+ ?column?
+-------------
+ {"b": "cc"}
+(1 row)
+
+select '[{"b": "c"}, {"b": "cc"}]'::jsonb ->> 3;
+ ?column?
+----------
+
+(1 row)
+
+select '[{"b": "c"}, {"b": "cc"}]'::jsonb ->> 'z';
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": "c", "b": null}'::jsonb ->> 'b';
+ ?column?
+----------
+
+(1 row)
+
+select '"foo"'::jsonb ->> 1;
+ ?column?
+----------
+
+(1 row)
+
+select '"foo"'::jsonb ->> 'z';
+ ?column?
+----------
+
+(1 row)
+
+-- equality and inequality
+SELECT '{"x":"y"}'::jsonb = '{"x":"y"}'::jsonb;
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '{"x":"y"}'::jsonb = '{"x":"z"}'::jsonb;
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT '{"x":"y"}'::jsonb <> '{"x":"y"}'::jsonb;
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT '{"x":"y"}'::jsonb <> '{"x":"z"}'::jsonb;
+ ?column?
+----------
+ t
+(1 row)
+
+-- containment
+SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b"}');
+ jsonb_contains
+----------------
+ t
+(1 row)
+
+SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b", "c":null}');
+ jsonb_contains
+----------------
+ t
+(1 row)
+
+SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b", "g":null}');
+ jsonb_contains
+----------------
+ f
+(1 row)
+
+SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"g":null}');
+ jsonb_contains
+----------------
+ f
+(1 row)
+
+SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"c"}');
+ jsonb_contains
+----------------
+ f
+(1 row)
+
+SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b"}');
+ jsonb_contains
+----------------
+ t
+(1 row)
+
+SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b", "c":"q"}');
+ jsonb_contains
+----------------
+ f
+(1 row)
+
+SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b"}';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b", "c":null}';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b", "g":null}';
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"g":null}';
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"c"}';
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b"}';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b", "c":"q"}';
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT '[1,2]'::jsonb @> '[1,2,2]'::jsonb;
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '[1,1,2]'::jsonb @> '[1,2,2]'::jsonb;
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '[[1,2]]'::jsonb @> '[[1,2,2]]'::jsonb;
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '[1,2,2]'::jsonb <@ '[1,2]'::jsonb;
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '[1,2,2]'::jsonb <@ '[1,1,2]'::jsonb;
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '[[1,2,2]]'::jsonb <@ '[[1,2]]'::jsonb;
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT jsonb_contained('{"a":"b"}', '{"a":"b", "b":1, "c":null}');
+ jsonb_contained
+-----------------
+ t
+(1 row)
+
+SELECT jsonb_contained('{"a":"b", "c":null}', '{"a":"b", "b":1, "c":null}');
+ jsonb_contained
+-----------------
+ t
+(1 row)
+
+SELECT jsonb_contained('{"a":"b", "g":null}', '{"a":"b", "b":1, "c":null}');
+ jsonb_contained
+-----------------
+ f
+(1 row)
+
+SELECT jsonb_contained('{"g":null}', '{"a":"b", "b":1, "c":null}');
+ jsonb_contained
+-----------------
+ f
+(1 row)
+
+SELECT jsonb_contained('{"a":"c"}', '{"a":"b", "b":1, "c":null}');
+ jsonb_contained
+-----------------
+ f
+(1 row)
+
+SELECT jsonb_contained('{"a":"b"}', '{"a":"b", "b":1, "c":null}');
+ jsonb_contained
+-----------------
+ t
+(1 row)
+
+SELECT jsonb_contained('{"a":"b", "c":"q"}', '{"a":"b", "b":1, "c":null}');
+ jsonb_contained
+-----------------
+ f
+(1 row)
+
+SELECT '{"a":"b"}'::jsonb <@ '{"a":"b", "b":1, "c":null}';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '{"a":"b", "c":null}'::jsonb <@ '{"a":"b", "b":1, "c":null}';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '{"a":"b", "g":null}'::jsonb <@ '{"a":"b", "b":1, "c":null}';
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT '{"g":null}'::jsonb <@ '{"a":"b", "b":1, "c":null}';
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT '{"a":"c"}'::jsonb <@ '{"a":"b", "b":1, "c":null}';
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT '{"a":"b"}'::jsonb <@ '{"a":"b", "b":1, "c":null}';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '{"a":"b", "c":"q"}'::jsonb <@ '{"a":"b", "b":1, "c":null}';
+ ?column?
+----------
+ f
+(1 row)
+
+-- Raw scalar may contain another raw scalar, array may contain a raw scalar
+SELECT '[5]'::jsonb @> '[5]';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '5'::jsonb @> '5';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '[5]'::jsonb @> '5';
+ ?column?
+----------
+ t
+(1 row)
+
+-- But a raw scalar cannot contain an array
+SELECT '5'::jsonb @> '[5]';
+ ?column?
+----------
+ f
+(1 row)
+
+-- In general, one thing should always contain itself. Test array containment:
+SELECT '["9", ["7", "3"], 1]'::jsonb @> '["9", ["7", "3"], 1]'::jsonb;
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '["9", ["7", "3"], ["1"]]'::jsonb @> '["9", ["7", "3"], ["1"]]'::jsonb;
+ ?column?
+----------
+ t
+(1 row)
+
+-- array containment string matching confusion bug
+SELECT '{ "name": "Bob", "tags": [ "enim", "qui"]}'::jsonb @> '{"tags":["qu"]}';
+ ?column?
+----------
+ f
+(1 row)
+
+-- array length
+SELECT jsonb_array_length('[1,2,3,{"f1":1,"f2":[5,6]},4]');
+ jsonb_array_length
+--------------------
+ 5
+(1 row)
+
+SELECT jsonb_array_length('[]');
+ jsonb_array_length
+--------------------
+ 0
+(1 row)
+
+SELECT jsonb_array_length('{"f1":1,"f2":[5,6]}');
+ERROR: cannot get array length of a non-array
+SELECT jsonb_array_length('4');
+ERROR: cannot get array length of a scalar
+-- each
+SELECT jsonb_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null}');
+ jsonb_each
+--------------------
+ (f1,"[1, 2, 3]")
+ (f2,"{""f3"": 1}")
+ (f4,null)
+(3 rows)
+
+SELECT jsonb_each('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q;
+ q
+------------------------------------------------------
+ (1,"""first""")
+ (a,"{""1"": ""first"", ""b"": ""c"", ""c"": ""b""}")
+ (b,"[1, 2]")
+ (c,"""cc""")
+ (n,null)
+(5 rows)
+
+SELECT * FROM jsonb_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q;
+ key | value
+-----+-----------
+ f1 | [1, 2, 3]
+ f2 | {"f3": 1}
+ f4 | null
+ f5 | 99
+ f6 | "stringy"
+(5 rows)
+
+SELECT * FROM jsonb_each('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q;
+ key | value
+-----+------------------------------------
+ 1 | "first"
+ a | {"1": "first", "b": "c", "c": "b"}
+ b | [1, 2]
+ c | "cc"
+ n | null
+(5 rows)
+
+SELECT jsonb_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":"null"}');
+ jsonb_each_text
+--------------------
+ (f1,"[1, 2, 3]")
+ (f2,"{""f3"": 1}")
+ (f4,)
+ (f5,null)
+(4 rows)
+
+SELECT jsonb_each_text('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q;
+ q
+------------------------------------------------------
+ (1,first)
+ (a,"{""1"": ""first"", ""b"": ""c"", ""c"": ""b""}")
+ (b,"[1, 2]")
+ (c,cc)
+ (n,)
+(5 rows)
+
+SELECT * FROM jsonb_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q;
+ key | value
+-----+-----------
+ f1 | [1, 2, 3]
+ f2 | {"f3": 1}
+ f4 |
+ f5 | 99
+ f6 | stringy
+(5 rows)
+
+SELECT * FROM jsonb_each_text('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q;
+ key | value
+-----+------------------------------------
+ 1 | first
+ a | {"1": "first", "b": "c", "c": "b"}
+ b | [1, 2]
+ c | cc
+ n |
+(5 rows)
+
+-- exists
+SELECT jsonb_exists('{"a":null, "b":"qq"}', 'a');
+ jsonb_exists
+--------------
+ t
+(1 row)
+
+SELECT jsonb_exists('{"a":null, "b":"qq"}', 'b');
+ jsonb_exists
+--------------
+ t
+(1 row)
+
+SELECT jsonb_exists('{"a":null, "b":"qq"}', 'c');
+ jsonb_exists
+--------------
+ f
+(1 row)
+
+SELECT jsonb_exists('{"a":"null", "b":"qq"}', 'a');
+ jsonb_exists
+--------------
+ t
+(1 row)
+
+SELECT jsonb '{"a":null, "b":"qq"}' ? 'a';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT jsonb '{"a":null, "b":"qq"}' ? 'b';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT jsonb '{"a":null, "b":"qq"}' ? 'c';
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT jsonb '{"a":"null", "b":"qq"}' ? 'a';
+ ?column?
+----------
+ t
+(1 row)
+
+-- array exists - array elements should behave as keys
+SELECT count(*) from testjsonb WHERE j->'array' ? 'bar';
+ count
+-------
+ 3
+(1 row)
+
+-- type sensitive array exists - should return no rows (since "exists" only
+-- matches strings that are either object keys or array elements)
+SELECT count(*) from testjsonb WHERE j->'array' ? '5'::text;
+ count
+-------
+ 0
+(1 row)
+
+-- However, a raw scalar is *contained* within the array
+SELECT count(*) from testjsonb WHERE j->'array' @> '5'::jsonb;
+ count
+-------
+ 1
+(1 row)
+
+SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['a','b']);
+ jsonb_exists_any
+------------------
+ t
+(1 row)
+
+SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['b','a']);
+ jsonb_exists_any
+------------------
+ t
+(1 row)
+
+SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['c','a']);
+ jsonb_exists_any
+------------------
+ t
+(1 row)
+
+SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['c','d']);
+ jsonb_exists_any
+------------------
+ f
+(1 row)
+
+SELECT jsonb_exists_any('{"a":null, "b":"qq"}', '{}'::text[]);
+ jsonb_exists_any
+------------------
+ f
+(1 row)
+
+SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['a','b'];
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['b','a'];
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['c','a'];
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['c','d'];
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT jsonb '{"a":null, "b":"qq"}' ?| '{}'::text[];
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['a','b']);
+ jsonb_exists_all
+------------------
+ t
+(1 row)
+
+SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['b','a']);
+ jsonb_exists_all
+------------------
+ t
+(1 row)
+
+SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['c','a']);
+ jsonb_exists_all
+------------------
+ f
+(1 row)
+
+SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['c','d']);
+ jsonb_exists_all
+------------------
+ f
+(1 row)
+
+SELECT jsonb_exists_all('{"a":null, "b":"qq"}', '{}'::text[]);
+ jsonb_exists_all
+------------------
+ t
+(1 row)
+
+SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['a','b'];
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['b','a'];
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['c','a'];
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['c','d'];
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['a','a', 'b', 'b', 'b'];
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT jsonb '{"a":null, "b":"qq"}' ?& '{}'::text[];
+ ?column?
+----------
+ t
+(1 row)
+
+-- typeof
+SELECT jsonb_typeof('{}') AS object;
+ object
+--------
+ object
+(1 row)
+
+SELECT jsonb_typeof('{"c":3,"p":"o"}') AS object;
+ object
+--------
+ object
+(1 row)
+
+SELECT jsonb_typeof('[]') AS array;
+ array
+-------
+ array
+(1 row)
+
+SELECT jsonb_typeof('["a", 1]') AS array;
+ array
+-------
+ array
+(1 row)
+
+SELECT jsonb_typeof('null') AS "null";
+ null
+------
+ null
+(1 row)
+
+SELECT jsonb_typeof('1') AS number;
+ number
+--------
+ number
+(1 row)
+
+SELECT jsonb_typeof('-1') AS number;
+ number
+--------
+ number
+(1 row)
+
+SELECT jsonb_typeof('1.0') AS number;
+ number
+--------
+ number
+(1 row)
+
+SELECT jsonb_typeof('1e2') AS number;
+ number
+--------
+ number
+(1 row)
+
+SELECT jsonb_typeof('-1.0') AS number;
+ number
+--------
+ number
+(1 row)
+
+SELECT jsonb_typeof('true') AS boolean;
+ boolean
+---------
+ boolean
+(1 row)
+
+SELECT jsonb_typeof('false') AS boolean;
+ boolean
+---------
+ boolean
+(1 row)
+
+SELECT jsonb_typeof('"hello"') AS string;
+ string
+--------
+ string
+(1 row)
+
+SELECT jsonb_typeof('"true"') AS string;
+ string
+--------
+ string
+(1 row)
+
+SELECT jsonb_typeof('"1.0"') AS string;
+ string
+--------
+ string
+(1 row)
+
+-- jsonb_build_array, jsonb_build_object, jsonb_object_agg
+SELECT jsonb_build_array('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}');
+ jsonb_build_array
+-------------------------------------------------------------------------
+ ["a", 1, "b", 1.2, "c", true, "d", null, "e", {"x": 3, "y": [1, 2, 3]}]
+(1 row)
+
+SELECT jsonb_build_array('a', NULL); -- ok
+ jsonb_build_array
+-------------------
+ ["a", null]
+(1 row)
+
+SELECT jsonb_build_array(VARIADIC NULL::text[]); -- ok
+ jsonb_build_array
+-------------------
+
+(1 row)
+
+SELECT jsonb_build_array(VARIADIC '{}'::text[]); -- ok
+ jsonb_build_array
+-------------------
+ []
+(1 row)
+
+SELECT jsonb_build_array(VARIADIC '{a,b,c}'::text[]); -- ok
+ jsonb_build_array
+-------------------
+ ["a", "b", "c"]
+(1 row)
+
+SELECT jsonb_build_array(VARIADIC ARRAY['a', NULL]::text[]); -- ok
+ jsonb_build_array
+-------------------
+ ["a", null]
+(1 row)
+
+SELECT jsonb_build_array(VARIADIC '{1,2,3,4}'::text[]); -- ok
+ jsonb_build_array
+----------------------
+ ["1", "2", "3", "4"]
+(1 row)
+
+SELECT jsonb_build_array(VARIADIC '{1,2,3,4}'::int[]); -- ok
+ jsonb_build_array
+-------------------
+ [1, 2, 3, 4]
+(1 row)
+
+SELECT jsonb_build_array(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok
+ jsonb_build_array
+--------------------
+ [1, 4, 2, 5, 3, 6]
+(1 row)
+
+SELECT jsonb_build_object('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}');
+ jsonb_build_object
+-------------------------------------------------------------------------
+ {"a": 1, "b": 1.2, "c": true, "d": null, "e": {"x": 3, "y": [1, 2, 3]}}
+(1 row)
+
+SELECT jsonb_build_object(
+ 'a', jsonb_build_object('b',false,'c',99),
+ 'd', jsonb_build_object('e',array[9,8,7]::int[],
+ 'f', (select row_to_json(r) from ( select relkind, oid::regclass as name from pg_class where relname = 'pg_class') r)));
+ jsonb_build_object
+------------------------------------------------------------------------------------------------
+ {"a": {"b": false, "c": 99}, "d": {"e": [9, 8, 7], "f": {"name": "pg_class", "relkind": "r"}}}
+(1 row)
+
+SELECT jsonb_build_object('{a,b,c}'::text[]); -- error
+ERROR: argument list must have even number of elements
+HINT: The arguments of jsonb_build_object() must consist of alternating keys and values.
+SELECT jsonb_build_object('{a,b,c}'::text[], '{d,e,f}'::text[]); -- error, key cannot be array
+ERROR: key value must be scalar, not array, composite, or json
+SELECT jsonb_build_object('a', 'b', 'c'); -- error
+ERROR: argument list must have even number of elements
+HINT: The arguments of jsonb_build_object() must consist of alternating keys and values.
+SELECT jsonb_build_object(NULL, 'a'); -- error, key cannot be NULL
+ERROR: argument 1: key must not be null
+SELECT jsonb_build_object('a', NULL); -- ok
+ jsonb_build_object
+--------------------
+ {"a": null}
+(1 row)
+
+SELECT jsonb_build_object(VARIADIC NULL::text[]); -- ok
+ jsonb_build_object
+--------------------
+
+(1 row)
+
+SELECT jsonb_build_object(VARIADIC '{}'::text[]); -- ok
+ jsonb_build_object
+--------------------
+ {}
+(1 row)
+
+SELECT jsonb_build_object(VARIADIC '{a,b,c}'::text[]); -- error
+ERROR: argument list must have even number of elements
+HINT: The arguments of jsonb_build_object() must consist of alternating keys and values.
+SELECT jsonb_build_object(VARIADIC ARRAY['a', NULL]::text[]); -- ok
+ jsonb_build_object
+--------------------
+ {"a": null}
+(1 row)
+
+SELECT jsonb_build_object(VARIADIC ARRAY[NULL, 'a']::text[]); -- error, key cannot be NULL
+ERROR: argument 1: key must not be null
+SELECT jsonb_build_object(VARIADIC '{1,2,3,4}'::text[]); -- ok
+ jsonb_build_object
+----------------------
+ {"1": "2", "3": "4"}
+(1 row)
+
+SELECT jsonb_build_object(VARIADIC '{1,2,3,4}'::int[]); -- ok
+ jsonb_build_object
+--------------------
+ {"1": 2, "3": 4}
+(1 row)
+
+SELECT jsonb_build_object(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok
+ jsonb_build_object
+--------------------------
+ {"1": 4, "2": 5, "3": 6}
+(1 row)
+
+-- empty objects/arrays
+SELECT jsonb_build_array();
+ jsonb_build_array
+-------------------
+ []
+(1 row)
+
+SELECT jsonb_build_object();
+ jsonb_build_object
+--------------------
+ {}
+(1 row)
+
+-- make sure keys are quoted
+SELECT jsonb_build_object(1,2);
+ jsonb_build_object
+--------------------
+ {"1": 2}
+(1 row)
+
+-- keys must be scalar and not null
+SELECT jsonb_build_object(null,2);
+ERROR: argument 1: key must not be null
+SELECT jsonb_build_object(r,2) FROM (SELECT 1 AS a, 2 AS b) r;
+ERROR: key value must be scalar, not array, composite, or json
+SELECT jsonb_build_object(json '{"a":1,"b":2}', 3);
+ERROR: key value must be scalar, not array, composite, or json
+SELECT jsonb_build_object('{1,2,3}'::int[], 3);
+ERROR: key value must be scalar, not array, composite, or json
+-- handling of NULL values
+SELECT jsonb_object_agg(1, NULL::jsonb);
+ jsonb_object_agg
+------------------
+ {"1": null}
+(1 row)
+
+SELECT jsonb_object_agg(NULL, '{"a":1}');
+ERROR: field name must not be null
+CREATE TEMP TABLE foo (serial_num int, name text, type text);
+INSERT INTO foo VALUES (847001,'t15','GE1043');
+INSERT INTO foo VALUES (847002,'t16','GE1043');
+INSERT INTO foo VALUES (847003,'sub-alpha','GESS90');
+SELECT jsonb_build_object('turbines',jsonb_object_agg(serial_num,jsonb_build_object('name',name,'type',type)))
+FROM foo;
+ jsonb_build_object
+-------------------------------------------------------------------------------------------------------------------------------------------------------------
+ {"turbines": {"847001": {"name": "t15", "type": "GE1043"}, "847002": {"name": "t16", "type": "GE1043"}, "847003": {"name": "sub-alpha", "type": "GESS90"}}}
+(1 row)
+
+SELECT jsonb_object_agg(name, type) FROM foo;
+ jsonb_object_agg
+-----------------------------------------------------------
+ {"t15": "GE1043", "t16": "GE1043", "sub-alpha": "GESS90"}
+(1 row)
+
+INSERT INTO foo VALUES (999999, NULL, 'bar');
+SELECT jsonb_object_agg(name, type) FROM foo;
+ERROR: field name must not be null
+-- jsonb_object
+-- empty object, one dimension
+SELECT jsonb_object('{}');
+ jsonb_object
+--------------
+ {}
+(1 row)
+
+-- empty object, two dimensions
+SELECT jsonb_object('{}', '{}');
+ jsonb_object
+--------------
+ {}
+(1 row)
+
+-- one dimension
+SELECT jsonb_object('{a,1,b,2,3,NULL,"d e f","a b c"}');
+ jsonb_object
+---------------------------------------------------
+ {"3": null, "a": "1", "b": "2", "d e f": "a b c"}
+(1 row)
+
+-- same but with two dimensions
+SELECT jsonb_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}');
+ jsonb_object
+---------------------------------------------------
+ {"3": null, "a": "1", "b": "2", "d e f": "a b c"}
+(1 row)
+
+-- odd number error
+SELECT jsonb_object('{a,b,c}');
+ERROR: array must have even number of elements
+-- one column error
+SELECT jsonb_object('{{a},{b}}');
+ERROR: array must have two columns
+-- too many columns error
+SELECT jsonb_object('{{a,b,c},{b,c,d}}');
+ERROR: array must have two columns
+-- too many dimensions error
+SELECT jsonb_object('{{{a,b},{c,d}},{{b,c},{d,e}}}');
+ERROR: wrong number of array subscripts
+--two argument form of jsonb_object
+select jsonb_object('{a,b,c,"d e f"}','{1,2,3,"a b c"}');
+ jsonb_object
+--------------------------------------------------
+ {"a": "1", "b": "2", "c": "3", "d e f": "a b c"}
+(1 row)
+
+-- too many dimensions
+SELECT jsonb_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}', '{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}');
+ERROR: wrong number of array subscripts
+-- mismatched dimensions
+select jsonb_object('{a,b,c,"d e f",g}','{1,2,3,"a b c"}');
+ERROR: mismatched array dimensions
+select jsonb_object('{a,b,c,"d e f"}','{1,2,3,"a b c",g}');
+ERROR: mismatched array dimensions
+-- null key error
+select jsonb_object('{a,b,NULL,"d e f"}','{1,2,3,"a b c"}');
+ERROR: null value not allowed for object key
+-- empty key is allowed
+select jsonb_object('{a,b,"","d e f"}','{1,2,3,"a b c"}');
+ jsonb_object
+-------------------------------------------------
+ {"": "3", "a": "1", "b": "2", "d e f": "a b c"}
+(1 row)
+
+-- extract_path, extract_path_as_text
+SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6');
+ jsonb_extract_path
+--------------------
+ "stringy"
+(1 row)
+
+SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2');
+ jsonb_extract_path
+--------------------
+ {"f3": 1}
+(1 row)
+
+SELECT jsonb_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text);
+ jsonb_extract_path
+--------------------
+ "f3"
+(1 row)
+
+SELECT jsonb_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text);
+ jsonb_extract_path
+--------------------
+ 1
+(1 row)
+
+SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6');
+ jsonb_extract_path_text
+-------------------------
+ stringy
+(1 row)
+
+SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2');
+ jsonb_extract_path_text
+-------------------------
+ {"f3": 1}
+(1 row)
+
+SELECT jsonb_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text);
+ jsonb_extract_path_text
+-------------------------
+ f3
+(1 row)
+
+SELECT jsonb_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text);
+ jsonb_extract_path_text
+-------------------------
+ 1
+(1 row)
+
+-- extract_path nulls
+SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') IS NULL AS expect_false;
+ expect_false
+--------------
+ f
+(1 row)
+
+SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') IS NULL AS expect_true;
+ expect_true
+-------------
+ t
+(1 row)
+
+SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') IS NULL AS expect_false;
+ expect_false
+--------------
+ f
+(1 row)
+
+SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') IS NULL AS expect_true;
+ expect_true
+-------------
+ t
+(1 row)
+
+-- extract_path operators
+SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f4','f6'];
+ ?column?
+-----------
+ "stringy"
+(1 row)
+
+SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f2'];
+ ?column?
+-----------
+ {"f3": 1}
+(1 row)
+
+SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f2','0'];
+ ?column?
+----------
+ "f3"
+(1 row)
+
+SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f2','1'];
+ ?column?
+----------
+ 1
+(1 row)
+
+SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f4','f6'];
+ ?column?
+----------
+ stringy
+(1 row)
+
+SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f2'];
+ ?column?
+-----------
+ {"f3": 1}
+(1 row)
+
+SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f2','0'];
+ ?column?
+----------
+ f3
+(1 row)
+
+SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f2','1'];
+ ?column?
+----------
+ 1
+(1 row)
+
+-- corner cases for same
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #> '{}';
+ ?column?
+----------------------------
+ {"a": {"b": {"c": "foo"}}}
+(1 row)
+
+select '[1,2,3]'::jsonb #> '{}';
+ ?column?
+-----------
+ [1, 2, 3]
+(1 row)
+
+select '"foo"'::jsonb #> '{}';
+ ?column?
+----------
+ "foo"
+(1 row)
+
+select '42'::jsonb #> '{}';
+ ?column?
+----------
+ 42
+(1 row)
+
+select 'null'::jsonb #> '{}';
+ ?column?
+----------
+ null
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a'];
+ ?column?
+---------------------
+ {"b": {"c": "foo"}}
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a', null];
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a', ''];
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','b'];
+ ?column?
+--------------
+ {"c": "foo"}
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','b','c'];
+ ?column?
+----------
+ "foo"
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','b','c','d'];
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','z','c'];
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #> array['a','1','b'];
+ ?column?
+----------
+ "cc"
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #> array['a','z','b'];
+ ?column?
+----------
+
+(1 row)
+
+select '[{"b": "c"}, {"b": "cc"}]'::jsonb #> array['1','b'];
+ ?column?
+----------
+ "cc"
+(1 row)
+
+select '[{"b": "c"}, {"b": "cc"}]'::jsonb #> array['z','b'];
+ ?column?
+----------
+
+(1 row)
+
+select '[{"b": "c"}, {"b": null}]'::jsonb #> array['1','b'];
+ ?column?
+----------
+ null
+(1 row)
+
+select '"foo"'::jsonb #> array['z'];
+ ?column?
+----------
+
+(1 row)
+
+select '42'::jsonb #> array['f2'];
+ ?column?
+----------
+
+(1 row)
+
+select '42'::jsonb #> array['0'];
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> '{}';
+ ?column?
+----------------------------
+ {"a": {"b": {"c": "foo"}}}
+(1 row)
+
+select '[1,2,3]'::jsonb #>> '{}';
+ ?column?
+-----------
+ [1, 2, 3]
+(1 row)
+
+select '"foo"'::jsonb #>> '{}';
+ ?column?
+----------
+ foo
+(1 row)
+
+select '42'::jsonb #>> '{}';
+ ?column?
+----------
+ 42
+(1 row)
+
+select 'null'::jsonb #>> '{}';
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a'];
+ ?column?
+---------------------
+ {"b": {"c": "foo"}}
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a', null];
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a', ''];
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','b'];
+ ?column?
+--------------
+ {"c": "foo"}
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','b','c'];
+ ?column?
+----------
+ foo
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','b','c','d'];
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','z','c'];
+ ?column?
+----------
+
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #>> array['a','1','b'];
+ ?column?
+----------
+ cc
+(1 row)
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #>> array['a','z','b'];
+ ?column?
+----------
+
+(1 row)
+
+select '[{"b": "c"}, {"b": "cc"}]'::jsonb #>> array['1','b'];
+ ?column?
+----------
+ cc
+(1 row)
+
+select '[{"b": "c"}, {"b": "cc"}]'::jsonb #>> array['z','b'];
+ ?column?
+----------
+
+(1 row)
+
+select '[{"b": "c"}, {"b": null}]'::jsonb #>> array['1','b'];
+ ?column?
+----------
+
+(1 row)
+
+select '"foo"'::jsonb #>> array['z'];
+ ?column?
+----------
+
+(1 row)
+
+select '42'::jsonb #>> array['f2'];
+ ?column?
+----------
+
+(1 row)
+
+select '42'::jsonb #>> array['0'];
+ ?column?
+----------
+
+(1 row)
+
+-- array_elements
+SELECT jsonb_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false]');
+ jsonb_array_elements
+----------------------------
+ 1
+ true
+ [1, [2, 3]]
+ null
+ {"f1": 1, "f2": [7, 8, 9]}
+ false
+(6 rows)
+
+SELECT * FROM jsonb_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false]') q;
+ value
+----------------------------
+ 1
+ true
+ [1, [2, 3]]
+ null
+ {"f1": 1, "f2": [7, 8, 9]}
+ false
+(6 rows)
+
+SELECT jsonb_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]');
+ jsonb_array_elements_text
+----------------------------
+ 1
+ true
+ [1, [2, 3]]
+
+ {"f1": 1, "f2": [7, 8, 9]}
+ false
+ stringy
+(7 rows)
+
+SELECT * FROM jsonb_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]') q;
+ value
+----------------------------
+ 1
+ true
+ [1, [2, 3]]
+
+ {"f1": 1, "f2": [7, 8, 9]}
+ false
+ stringy
+(7 rows)
+
+-- populate_record
+CREATE TYPE jbpop AS (a text, b int, c timestamp);
+CREATE DOMAIN jsb_int_not_null AS int NOT NULL;
+CREATE DOMAIN jsb_int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 3);
+CREATE DOMAIN jsb_int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 3);
+create type jb_unordered_pair as (x int, y int);
+create domain jb_ordered_pair as jb_unordered_pair check((value).x <= (value).y);
+CREATE TYPE jsbrec AS (
+ i int,
+ ia _int4,
+ ia1 int[],
+ ia2 int[][],
+ ia3 int[][][],
+ ia1d jsb_int_array_1d,
+ ia2d jsb_int_array_2d,
+ t text,
+ ta text[],
+ c char(10),
+ ca char(10)[],
+ ts timestamp,
+ js json,
+ jsb jsonb,
+ jsa json[],
+ rec jbpop,
+ reca jbpop[]
+);
+CREATE TYPE jsbrec_i_not_null AS (
+ i jsb_int_not_null
+);
+SELECT * FROM jsonb_populate_record(NULL::jbpop,'{"a":"blurfl","x":43.2}') q;
+ a | b | c
+--------+---+---
+ blurfl | |
+(1 row)
+
+SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"a":"blurfl","x":43.2}') q;
+ a | b | c
+--------+---+--------------------------
+ blurfl | 3 | Mon Dec 31 15:30:56 2012
+(1 row)
+
+SELECT * FROM jsonb_populate_record(NULL::jbpop,'{"a":"blurfl","x":43.2}') q;
+ a | b | c
+--------+---+---
+ blurfl | |
+(1 row)
+
+SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"a":"blurfl","x":43.2}') q;
+ a | b | c
+--------+---+--------------------------
+ blurfl | 3 | Mon Dec 31 15:30:56 2012
+(1 row)
+
+SELECT * FROM jsonb_populate_record(NULL::jbpop,'{"a":[100,200,false],"x":43.2}') q;
+ a | b | c
+-------------------+---+---
+ [100, 200, false] | |
+(1 row)
+
+SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"a":[100,200,false],"x":43.2}') q;
+ a | b | c
+-------------------+---+--------------------------
+ [100, 200, false] | 3 | Mon Dec 31 15:30:56 2012
+(1 row)
+
+SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"c":[100,200,false],"x":43.2}') q;
+ERROR: invalid input syntax for type timestamp: "[100, 200, false]"
+SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop, '{}') q;
+ a | b | c
+---+---+--------------------------
+ x | 3 | Mon Dec 31 15:30:56 2012
+(1 row)
+
+SELECT i FROM jsonb_populate_record(NULL::jsbrec_i_not_null, '{"x": 43.2}') q;
+ERROR: domain jsb_int_not_null does not allow null values
+SELECT i FROM jsonb_populate_record(NULL::jsbrec_i_not_null, '{"i": null}') q;
+ERROR: domain jsb_int_not_null does not allow null values
+SELECT i FROM jsonb_populate_record(NULL::jsbrec_i_not_null, '{"i": 12345}') q;
+ i
+-------
+ 12345
+(1 row)
+
+SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": null}') q;
+ ia
+----
+
+(1 row)
+
+SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": 123}') q;
+ERROR: expected JSON array
+HINT: See the value of key "ia".
+SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [1, "2", null, 4]}') q;
+ ia
+--------------
+ {1,2,NULL,4}
+(1 row)
+
+SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [[1, 2], [3, 4]]}') q;
+ ia
+---------------
+ {{1,2},{3,4}}
+(1 row)
+
+SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [[1], 2]}') q;
+ERROR: expected JSON array
+HINT: See the array element [1] of key "ia".
+SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [[1], [2, 3]]}') q;
+ERROR: malformed JSON array
+DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions.
+SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": "{1,2,3}"}') q;
+ ia
+---------
+ {1,2,3}
+(1 row)
+
+SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": null}') q;
+ ia1
+-----
+
+(1 row)
+
+SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": 123}') q;
+ERROR: expected JSON array
+HINT: See the value of key "ia1".
+SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": [1, "2", null, 4]}') q;
+ ia1
+--------------
+ {1,2,NULL,4}
+(1 row)
+
+SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": [[1, 2, 3]]}') q;
+ ia1
+-----------
+ {{1,2,3}}
+(1 row)
+
+SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": null}') q;
+ ia1d
+------
+
+(1 row)
+
+SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": 123}') q;
+ERROR: expected JSON array
+HINT: See the value of key "ia1d".
+SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": [1, "2", null, 4]}') q;
+ERROR: value for domain jsb_int_array_1d violates check constraint "jsb_int_array_1d_check"
+SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": [1, "2", null]}') q;
+ ia1d
+------------
+ {1,2,NULL}
+(1 row)
+
+SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [1, "2", null, 4]}') q;
+ ia2
+--------------
+ {1,2,NULL,4}
+(1 row)
+
+SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[1, 2], [null, 4]]}') q;
+ ia2
+------------------
+ {{1,2},{NULL,4}}
+(1 row)
+
+SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[], []]}') q;
+ ia2
+-----
+ {}
+(1 row)
+
+SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[1, 2], [3]]}') q;
+ERROR: malformed JSON array
+DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions.
+SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[1, 2], 3, 4]}') q;
+ERROR: expected JSON array
+HINT: See the array element [1] of key "ia2".
+SELECT ia2d FROM jsonb_populate_record(NULL::jsbrec, '{"ia2d": [[1, "2"], [null, 4]]}') q;
+ERROR: value for domain jsb_int_array_2d violates check constraint "jsb_int_array_2d_check"
+SELECT ia2d FROM jsonb_populate_record(NULL::jsbrec, '{"ia2d": [[1, "2", 3], [null, 5, 6]]}') q;
+ ia2d
+----------------------
+ {{1,2,3},{NULL,5,6}}
+(1 row)
+
+SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [1, "2", null, 4]}') q;
+ ia3
+--------------
+ {1,2,NULL,4}
+(1 row)
+
+SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [[1, 2], [null, 4]]}') q;
+ ia3
+------------------
+ {{1,2},{NULL,4}}
+(1 row)
+
+SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[], []], [[], []], [[], []] ]}') q;
+ ia3
+-----
+ {}
+(1 row)
+
+SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[1, 2]], [[3, 4]] ]}') q;
+ ia3
+-------------------
+ {{{1,2}},{{3,4}}}
+(1 row)
+
+SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8]] ]}') q;
+ ia3
+-------------------------------
+ {{{1,2},{3,4}},{{5,6},{7,8}}}
+(1 row)
+
+SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8], [9, 10]] ]}') q;
+ERROR: malformed JSON array
+DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions.
+SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": null}') q;
+ ta
+----
+
+(1 row)
+
+SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": 123}') q;
+ERROR: expected JSON array
+HINT: See the value of key "ta".
+SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": [1, "2", null, 4]}') q;
+ ta
+--------------
+ {1,2,NULL,4}
+(1 row)
+
+SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": [[1, 2, 3], {"k": "v"}]}') q;
+ERROR: expected JSON array
+HINT: See the array element [1] of key "ta".
+SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": null}') q;
+ c
+---
+
+(1 row)
+
+SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": "aaa"}') q;
+ c
+------------
+ aaa
+(1 row)
+
+SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": "aaaaaaaaaa"}') q;
+ c
+------------
+ aaaaaaaaaa
+(1 row)
+
+SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": "aaaaaaaaaaaaa"}') q;
+ERROR: value too long for type character(10)
+SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": null}') q;
+ ca
+----
+
+(1 row)
+
+SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": 123}') q;
+ERROR: expected JSON array
+HINT: See the value of key "ca".
+SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": [1, "2", null, 4]}') q;
+ ca
+-----------------------------------------------
+ {"1 ","2 ",NULL,"4 "}
+(1 row)
+
+SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": ["aaaaaaaaaaaaaaaa"]}') q;
+ERROR: value too long for type character(10)
+SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": [[1, 2, 3], {"k": "v"}]}') q;
+ERROR: expected JSON array
+HINT: See the array element [1] of key "ca".
+SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": null}') q;
+ js
+----
+
+(1 row)
+
+SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": true}') q;
+ js
+------
+ true
+(1 row)
+
+SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": 123.45}') q;
+ js
+--------
+ 123.45
+(1 row)
+
+SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": "123.45"}') q;
+ js
+----------
+ "123.45"
+(1 row)
+
+SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": "abc"}') q;
+ js
+-------
+ "abc"
+(1 row)
+
+SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": [123, "123", null, {"key": "value"}]}') q;
+ js
+--------------------------------------
+ [123, "123", null, {"key": "value"}]
+(1 row)
+
+SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": {"a": "bbb", "b": null, "c": 123.45}}') q;
+ js
+--------------------------------------
+ {"a": "bbb", "b": null, "c": 123.45}
+(1 row)
+
+SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": null}') q;
+ jsb
+-----
+
+(1 row)
+
+SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": true}') q;
+ jsb
+------
+ true
+(1 row)
+
+SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": 123.45}') q;
+ jsb
+--------
+ 123.45
+(1 row)
+
+SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": "123.45"}') q;
+ jsb
+----------
+ "123.45"
+(1 row)
+
+SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": "abc"}') q;
+ jsb
+-------
+ "abc"
+(1 row)
+
+SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": [123, "123", null, {"key": "value"}]}') q;
+ jsb
+--------------------------------------
+ [123, "123", null, {"key": "value"}]
+(1 row)
+
+SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": {"a": "bbb", "b": null, "c": 123.45}}') q;
+ jsb
+--------------------------------------
+ {"a": "bbb", "b": null, "c": 123.45}
+(1 row)
+
+SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": null}') q;
+ jsa
+-----
+
+(1 row)
+
+SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": 123}') q;
+ERROR: expected JSON array
+HINT: See the value of key "jsa".
+SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": [1, "2", null, 4]}') q;
+ jsa
+--------------------
+ {1,"\"2\"",NULL,4}
+(1 row)
+
+SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": ["aaa", null, [1, 2, "3", {}], { "k" : "v" }]}') q;
+ jsa
+-------------------------------------------------------
+ {"\"aaa\"",NULL,"[1, 2, \"3\", {}]","{\"k\": \"v\"}"}
+(1 row)
+
+SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": 123}') q;
+ERROR: cannot call populate_composite on a scalar
+SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": [1, 2]}') q;
+ERROR: cannot call populate_composite on an array
+SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}') q;
+ rec
+-----------------------------------
+ (abc,,"Thu Jan 02 00:00:00 2003")
+(1 row)
+
+SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": "(abc,42,01.02.2003)"}') q;
+ rec
+-------------------------------------
+ (abc,42,"Thu Jan 02 00:00:00 2003")
+(1 row)
+
+SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": 123}') q;
+ERROR: expected JSON array
+HINT: See the value of key "reca".
+SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": [1, 2]}') q;
+ERROR: cannot call populate_composite on a scalar
+SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}]}') q;
+ reca
+--------------------------------------------------------
+ {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"}
+(1 row)
+
+SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": ["(abc,42,01.02.2003)"]}') q;
+ reca
+-------------------------------------------
+ {"(abc,42,\"Thu Jan 02 00:00:00 2003\")"}
+(1 row)
+
+SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": "{\"(abc,42,01.02.2003)\"}"}') q;
+ reca
+-------------------------------------------
+ {"(abc,42,\"Thu Jan 02 00:00:00 2003\")"}
+(1 row)
+
+SELECT rec FROM jsonb_populate_record(
+ row(NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
+ row('x',3,'2012-12-31 15:30:56')::jbpop,NULL)::jsbrec,
+ '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}'
+) q;
+ rec
+------------------------------------
+ (abc,3,"Thu Jan 02 00:00:00 2003")
+(1 row)
+
+-- anonymous record type
+SELECT jsonb_populate_record(null::record, '{"x": 0, "y": 1}');
+ERROR: could not determine row type for result of jsonb_populate_record
+HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list.
+SELECT jsonb_populate_record(row(1,2), '{"f1": 0, "f2": 1}');
+ jsonb_populate_record
+-----------------------
+ (0,1)
+(1 row)
+
+SELECT * FROM
+ jsonb_populate_record(null::record, '{"x": 776}') AS (x int, y int);
+ x | y
+-----+---
+ 776 |
+(1 row)
+
+-- composite domain
+SELECT jsonb_populate_record(null::jb_ordered_pair, '{"x": 0, "y": 1}');
+ jsonb_populate_record
+-----------------------
+ (0,1)
+(1 row)
+
+SELECT jsonb_populate_record(row(1,2)::jb_ordered_pair, '{"x": 0}');
+ jsonb_populate_record
+-----------------------
+ (0,2)
+(1 row)
+
+SELECT jsonb_populate_record(row(1,2)::jb_ordered_pair, '{"x": 1, "y": 0}');
+ERROR: value for domain jb_ordered_pair violates check constraint "jb_ordered_pair_check"
+-- populate_recordset
+SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+ a | b | c
+--------+---+--------------------------
+ blurfl | |
+ | 3 | Fri Jan 20 10:42:53 2012
+(2 rows)
+
+SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+ a | b | c
+--------+----+--------------------------
+ blurfl | 99 |
+ def | 3 | Fri Jan 20 10:42:53 2012
+(2 rows)
+
+SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+ a | b | c
+--------+---+--------------------------
+ blurfl | |
+ | 3 | Fri Jan 20 10:42:53 2012
+(2 rows)
+
+SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+ a | b | c
+--------+----+--------------------------
+ blurfl | 99 |
+ def | 3 | Fri Jan 20 10:42:53 2012
+(2 rows)
+
+SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q;
+ a | b | c
+-----------------+----+--------------------------
+ [100, 200, 300] | 99 |
+ {"z": true} | 3 | Fri Jan 20 10:42:53 2012
+(2 rows)
+
+SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"c":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q;
+ERROR: invalid input syntax for type timestamp: "[100, 200, 300]"
+SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+ a | b | c
+--------+---+--------------------------
+ blurfl | |
+ | 3 | Fri Jan 20 10:42:53 2012
+(2 rows)
+
+SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+ a | b | c
+--------+----+--------------------------
+ blurfl | 99 |
+ def | 3 | Fri Jan 20 10:42:53 2012
+(2 rows)
+
+SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q;
+ a | b | c
+-----------------+----+--------------------------
+ [100, 200, 300] | 99 |
+ {"z": true} | 3 | Fri Jan 20 10:42:53 2012
+(2 rows)
+
+-- anonymous record type
+SELECT jsonb_populate_recordset(null::record, '[{"x": 0, "y": 1}]');
+ERROR: could not determine row type for result of jsonb_populate_recordset
+HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list.
+SELECT jsonb_populate_recordset(row(1,2), '[{"f1": 0, "f2": 1}]');
+ jsonb_populate_recordset
+--------------------------
+ (0,1)
+(1 row)
+
+SELECT i, jsonb_populate_recordset(row(i,50), '[{"f1":"42"},{"f2":"43"}]')
+FROM (VALUES (1),(2)) v(i);
+ i | jsonb_populate_recordset
+---+--------------------------
+ 1 | (42,50)
+ 1 | (1,43)
+ 2 | (42,50)
+ 2 | (2,43)
+(4 rows)
+
+SELECT * FROM
+ jsonb_populate_recordset(null::record, '[{"x": 776}]') AS (x int, y int);
+ x | y
+-----+---
+ 776 |
+(1 row)
+
+-- empty array is a corner case
+SELECT jsonb_populate_recordset(null::record, '[]');
+ERROR: could not determine row type for result of jsonb_populate_recordset
+HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list.
+SELECT jsonb_populate_recordset(row(1,2), '[]');
+ jsonb_populate_recordset
+--------------------------
+(0 rows)
+
+SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[]') q;
+ a | b | c
+---+---+---
+(0 rows)
+
+SELECT * FROM
+ jsonb_populate_recordset(null::record, '[]') AS (x int, y int);
+ x | y
+---+---
+(0 rows)
+
+-- composite domain
+SELECT jsonb_populate_recordset(null::jb_ordered_pair, '[{"x": 0, "y": 1}]');
+ jsonb_populate_recordset
+--------------------------
+ (0,1)
+(1 row)
+
+SELECT jsonb_populate_recordset(row(1,2)::jb_ordered_pair, '[{"x": 0}, {"y": 3}]');
+ jsonb_populate_recordset
+--------------------------
+ (0,2)
+ (1,3)
+(2 rows)
+
+SELECT jsonb_populate_recordset(row(1,2)::jb_ordered_pair, '[{"x": 1, "y": 0}]');
+ERROR: value for domain jb_ordered_pair violates check constraint "jb_ordered_pair_check"
+-- negative cases where the wrong record type is supplied
+select * from jsonb_populate_recordset(row(0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text);
+ERROR: function return row and query-specified return row do not match
+DETAIL: Returned row contains 1 attribute, but query expects 2.
+select * from jsonb_populate_recordset(row(0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text);
+ERROR: function return row and query-specified return row do not match
+DETAIL: Returned type integer at ordinal position 1, but query expects text.
+select * from jsonb_populate_recordset(row(0::int,0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text);
+ERROR: function return row and query-specified return row do not match
+DETAIL: Returned row contains 3 attributes, but query expects 2.
+select * from jsonb_populate_recordset(row(1000000000::int,50::int),'[{"b":"2"},{"a":"3"}]') q (a text, b text);
+ERROR: function return row and query-specified return row do not match
+DETAIL: Returned type integer at ordinal position 1, but query expects text.
+-- jsonb_to_record and jsonb_to_recordset
+select * from jsonb_to_record('{"a":1,"b":"foo","c":"bar"}')
+ as x(a int, b text, d text);
+ a | b | d
+---+-----+---
+ 1 | foo |
+(1 row)
+
+select * from jsonb_to_recordset('[{"a":1,"b":"foo","d":false},{"a":2,"b":"bar","c":true}]')
+ as x(a int, b text, c boolean);
+ a | b | c
+---+-----+---
+ 1 | foo |
+ 2 | bar | t
+(2 rows)
+
+select *, c is null as c_is_null
+from jsonb_to_record('{"a":1, "b":{"c":16, "d":2}, "x":8, "ca": ["1 2", 3], "ia": [[1,2],[3,4]], "r": {"a": "aaa", "b": 123}}'::jsonb)
+ as t(a int, b jsonb, c text, x int, ca char(5)[], ia int[][], r jbpop);
+ a | b | c | x | ca | ia | r | c_is_null
+---+-------------------+---+---+-------------------+---------------+------------+-----------
+ 1 | {"c": 16, "d": 2} | | 8 | {"1 2 ","3 "} | {{1,2},{3,4}} | (aaa,123,) | t
+(1 row)
+
+select *, c is null as c_is_null
+from jsonb_to_recordset('[{"a":1, "b":{"c":16, "d":2}, "x":8}]'::jsonb)
+ as t(a int, b jsonb, c text, x int);
+ a | b | c | x | c_is_null
+---+-------------------+---+---+-----------
+ 1 | {"c": 16, "d": 2} | | 8 | t
+(1 row)
+
+select * from jsonb_to_record('{"ia": null}') as x(ia _int4);
+ ia
+----
+
+(1 row)
+
+select * from jsonb_to_record('{"ia": 123}') as x(ia _int4);
+ERROR: expected JSON array
+HINT: See the value of key "ia".
+select * from jsonb_to_record('{"ia": [1, "2", null, 4]}') as x(ia _int4);
+ ia
+--------------
+ {1,2,NULL,4}
+(1 row)
+
+select * from jsonb_to_record('{"ia": [[1, 2], [3, 4]]}') as x(ia _int4);
+ ia
+---------------
+ {{1,2},{3,4}}
+(1 row)
+
+select * from jsonb_to_record('{"ia": [[1], 2]}') as x(ia _int4);
+ERROR: expected JSON array
+HINT: See the array element [1] of key "ia".
+select * from jsonb_to_record('{"ia": [[1], [2, 3]]}') as x(ia _int4);
+ERROR: malformed JSON array
+DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions.
+select * from jsonb_to_record('{"ia2": [1, 2, 3]}') as x(ia2 int[][]);
+ ia2
+---------
+ {1,2,3}
+(1 row)
+
+select * from jsonb_to_record('{"ia2": [[1, 2], [3, 4]]}') as x(ia2 int4[][]);
+ ia2
+---------------
+ {{1,2},{3,4}}
+(1 row)
+
+select * from jsonb_to_record('{"ia2": [[[1], [2], [3]]]}') as x(ia2 int4[][]);
+ ia2
+-----------------
+ {{{1},{2},{3}}}
+(1 row)
+
+select * from jsonb_to_record('{"out": {"key": 1}}') as x(out json);
+ out
+------------
+ {"key": 1}
+(1 row)
+
+select * from jsonb_to_record('{"out": [{"key": 1}]}') as x(out json);
+ out
+--------------
+ [{"key": 1}]
+(1 row)
+
+select * from jsonb_to_record('{"out": "{\"key\": 1}"}') as x(out json);
+ out
+----------------
+ "{\"key\": 1}"
+(1 row)
+
+select * from jsonb_to_record('{"out": {"key": 1}}') as x(out jsonb);
+ out
+------------
+ {"key": 1}
+(1 row)
+
+select * from jsonb_to_record('{"out": [{"key": 1}]}') as x(out jsonb);
+ out
+--------------
+ [{"key": 1}]
+(1 row)
+
+select * from jsonb_to_record('{"out": "{\"key\": 1}"}') as x(out jsonb);
+ out
+----------------
+ "{\"key\": 1}"
+(1 row)
+
+-- test type info caching in jsonb_populate_record()
+CREATE TEMP TABLE jsbpoptest (js jsonb);
+INSERT INTO jsbpoptest
+SELECT '{
+ "jsa": [1, "2", null, 4],
+ "rec": {"a": "abc", "c": "01.02.2003", "x": 43.2},
+ "reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}]
+}'::jsonb
+FROM generate_series(1, 3);
+SELECT (jsonb_populate_record(NULL::jsbrec, js)).* FROM jsbpoptest;
+ i | ia | ia1 | ia2 | ia3 | ia1d | ia2d | t | ta | c | ca | ts | js | jsb | jsa | rec | reca
+---+----+-----+-----+-----+------+------+---+----+---+----+----+----+-----+--------------------+-----------------------------------+--------------------------------------------------------
+ | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"}
+ | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"}
+ | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"}
+(3 rows)
+
+DROP TYPE jsbrec;
+DROP TYPE jsbrec_i_not_null;
+DROP DOMAIN jsb_int_not_null;
+DROP DOMAIN jsb_int_array_1d;
+DROP DOMAIN jsb_int_array_2d;
+DROP DOMAIN jb_ordered_pair;
+DROP TYPE jb_unordered_pair;
+-- indexing
+SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}';
+ count
+-------
+ 1
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC"}';
+ count
+-------
+ 15
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC", "public":true}';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j ? 'public';
+ count
+-------
+ 194
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j ? 'bar';
+ count
+-------
+ 0
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j ?| ARRAY['public','disabled'];
+ count
+-------
+ 337
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j ?& ARRAY['public','disabled'];
+ count
+-------
+ 42
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null';
+ count
+-------
+ 1
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ '"CC" == $.wait';
+ count
+-------
+ 15
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == "CC" && true == $.public';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25.0';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($)';
+ count
+-------
+ 1012
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public)';
+ count
+-------
+ 194
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.bar)';
+ count
+-------
+ 0
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) || exists($.disabled)';
+ count
+-------
+ 337
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) && exists($.disabled)';
+ count
+-------
+ 42
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)';
+ count
+-------
+ 1
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? ("CC" == @)';
+ count
+-------
+ 15
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.wait == "CC" && true == @.public)';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$.age ? (@ == 25)';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.age == 25.0)';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$';
+ count
+-------
+ 1012
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$.public';
+ count
+-------
+ 194
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$.bar';
+ count
+-------
+ 0
+(1 row)
+
+CREATE INDEX jidx ON testjsonb USING gin (j);
+SET enable_seqscan = off;
+SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}';
+ count
+-------
+ 1
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC"}';
+ count
+-------
+ 15
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC", "public":true}';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @> '{"array":["foo"]}';
+ count
+-------
+ 3
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @> '{"array":["bar"]}';
+ count
+-------
+ 3
+(1 row)
+
+-- exercise GIN_SEARCH_MODE_ALL
+SELECT count(*) FROM testjsonb WHERE j @> '{}';
+ count
+-------
+ 1012
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j ? 'public';
+ count
+-------
+ 194
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j ? 'bar';
+ count
+-------
+ 0
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j ?| ARRAY['public','disabled'];
+ count
+-------
+ 337
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j ?& ARRAY['public','disabled'];
+ count
+-------
+ 42
+(1 row)
+
+EXPLAIN (COSTS OFF)
+SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null';
+ QUERY PLAN
+-----------------------------------------------------------------
+ Aggregate
+ -> Bitmap Heap Scan on testjsonb
+ Recheck Cond: (j @@ '($."wait" == null)'::jsonpath)
+ -> Bitmap Index Scan on jidx
+ Index Cond: (j @@ '($."wait" == null)'::jsonpath)
+(5 rows)
+
+SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null';
+ count
+-------
+ 1
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.wait == null))';
+ count
+-------
+ 1
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.wait ? (@ == null))';
+ count
+-------
+ 1
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ '"CC" == $.wait';
+ count
+-------
+ 15
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == "CC" && true == $.public';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25.0';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "foo"';
+ count
+-------
+ 3
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "bar"';
+ count
+-------
+ 3
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.array[*] == "bar"))';
+ count
+-------
+ 3
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array ? (@[*] == "bar"))';
+ count
+-------
+ 3
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array[*] ? (@ == "bar"))';
+ count
+-------
+ 3
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($)';
+ count
+-------
+ 1012
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public)';
+ count
+-------
+ 194
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.bar)';
+ count
+-------
+ 0
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) || exists($.disabled)';
+ count
+-------
+ 337
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) && exists($.disabled)';
+ count
+-------
+ 42
+(1 row)
+
+EXPLAIN (COSTS OFF)
+SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)';
+ QUERY PLAN
+-------------------------------------------------------------------
+ Aggregate
+ -> Bitmap Heap Scan on testjsonb
+ Recheck Cond: (j @? '$."wait"?(@ == null)'::jsonpath)
+ -> Bitmap Index Scan on jidx
+ Index Cond: (j @? '$."wait"?(@ == null)'::jsonpath)
+(5 rows)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)';
+ count
+-------
+ 1
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? ("CC" == @)';
+ count
+-------
+ 15
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.wait == "CC" && true == @.public)';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$.age ? (@ == 25)';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.age == 25.0)';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.array[*] == "bar")';
+ count
+-------
+ 3
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$.array ? (@[*] == "bar")';
+ count
+-------
+ 3
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$.array[*] ? (@ == "bar")';
+ count
+-------
+ 3
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$';
+ count
+-------
+ 1012
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$.public';
+ count
+-------
+ 194
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$.bar';
+ count
+-------
+ 0
+(1 row)
+
+-- array exists - array elements should behave as keys (for GIN index scans too)
+CREATE INDEX jidx_array ON testjsonb USING gin((j->'array'));
+SELECT count(*) from testjsonb WHERE j->'array' ? 'bar';
+ count
+-------
+ 3
+(1 row)
+
+-- type sensitive array exists - should return no rows (since "exists" only
+-- matches strings that are either object keys or array elements)
+SELECT count(*) from testjsonb WHERE j->'array' ? '5'::text;
+ count
+-------
+ 0
+(1 row)
+
+-- However, a raw scalar is *contained* within the array
+SELECT count(*) from testjsonb WHERE j->'array' @> '5'::jsonb;
+ count
+-------
+ 1
+(1 row)
+
+RESET enable_seqscan;
+SELECT count(*) FROM (SELECT (jsonb_each(j)).key FROM testjsonb) AS wow;
+ count
+-------
+ 4791
+(1 row)
+
+SELECT key, count(*) FROM (SELECT (jsonb_each(j)).key FROM testjsonb) AS wow GROUP BY key ORDER BY count DESC, key;
+ key | count
+-----------+-------
+ line | 884
+ query | 207
+ pos | 203
+ node | 202
+ space | 197
+ status | 195
+ public | 194
+ title | 190
+ wait | 190
+ org | 189
+ user | 189
+ coauthors | 188
+ disabled | 185
+ indexed | 184
+ cleaned | 180
+ bad | 179
+ date | 179
+ world | 176
+ state | 172
+ subtitle | 169
+ auth | 168
+ abstract | 161
+ array | 5
+ age | 2
+ foo | 2
+ fool | 1
+(26 rows)
+
+-- sort/hash
+SELECT count(distinct j) FROM testjsonb;
+ count
+-------
+ 894
+(1 row)
+
+SET enable_hashagg = off;
+SELECT count(*) FROM (SELECT j FROM (SELECT * FROM testjsonb UNION ALL SELECT * FROM testjsonb) js GROUP BY j) js2;
+ count
+-------
+ 894
+(1 row)
+
+SET enable_hashagg = on;
+SET enable_sort = off;
+SELECT count(*) FROM (SELECT j FROM (SELECT * FROM testjsonb UNION ALL SELECT * FROM testjsonb) js GROUP BY j) js2;
+ count
+-------
+ 894
+(1 row)
+
+SELECT distinct * FROM (values (jsonb '{}' || ''::text),('{}')) v(j);
+ j
+----
+ {}
+(1 row)
+
+SET enable_sort = on;
+RESET enable_hashagg;
+RESET enable_sort;
+DROP INDEX jidx;
+DROP INDEX jidx_array;
+-- btree
+CREATE INDEX jidx ON testjsonb USING btree (j);
+SET enable_seqscan = off;
+SELECT count(*) FROM testjsonb WHERE j > '{"p":1}';
+ count
+-------
+ 884
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j = '{"pos":98, "line":371, "node":"CBA", "indexed":true}';
+ count
+-------
+ 1
+(1 row)
+
+--gin path opclass
+DROP INDEX jidx;
+CREATE INDEX jidx ON testjsonb USING gin (j jsonb_path_ops);
+SET enable_seqscan = off;
+SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}';
+ count
+-------
+ 1
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC"}';
+ count
+-------
+ 15
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC", "public":true}';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}';
+ count
+-------
+ 2
+(1 row)
+
+-- exercise GIN_SEARCH_MODE_ALL
+SELECT count(*) FROM testjsonb WHERE j @> '{}';
+ count
+-------
+ 1012
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null';
+ count
+-------
+ 1
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.wait == null))';
+ count
+-------
+ 1
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.wait ? (@ == null))';
+ count
+-------
+ 1
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ '"CC" == $.wait';
+ count
+-------
+ 15
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == "CC" && true == $.public';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25.0';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "foo"';
+ count
+-------
+ 3
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "bar"';
+ count
+-------
+ 3
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.array[*] == "bar"))';
+ count
+-------
+ 3
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array ? (@[*] == "bar"))';
+ count
+-------
+ 3
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array[*] ? (@ == "bar"))';
+ count
+-------
+ 3
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($)';
+ count
+-------
+ 1012
+(1 row)
+
+EXPLAIN (COSTS OFF)
+SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)';
+ QUERY PLAN
+-------------------------------------------------------------------
+ Aggregate
+ -> Bitmap Heap Scan on testjsonb
+ Recheck Cond: (j @? '$."wait"?(@ == null)'::jsonpath)
+ -> Bitmap Index Scan on jidx
+ Index Cond: (j @? '$."wait"?(@ == null)'::jsonpath)
+(5 rows)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)';
+ count
+-------
+ 1
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? ("CC" == @)';
+ count
+-------
+ 15
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.wait == "CC" && true == @.public)';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$.age ? (@ == 25)';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.age == 25.0)';
+ count
+-------
+ 2
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.array[*] == "bar")';
+ count
+-------
+ 3
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$.array ? (@[*] == "bar")';
+ count
+-------
+ 3
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$.array[*] ? (@ == "bar")';
+ count
+-------
+ 3
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$';
+ count
+-------
+ 1012
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$.public';
+ count
+-------
+ 194
+(1 row)
+
+SELECT count(*) FROM testjsonb WHERE j @? '$.bar';
+ count
+-------
+ 0
+(1 row)
+
+RESET enable_seqscan;
+DROP INDEX jidx;
+-- nested tests
+SELECT '{"ff":{"a":12,"b":16}}'::jsonb;
+ jsonb
+----------------------------
+ {"ff": {"a": 12, "b": 16}}
+(1 row)
+
+SELECT '{"ff":{"a":12,"b":16},"qq":123}'::jsonb;
+ jsonb
+---------------------------------------
+ {"ff": {"a": 12, "b": 16}, "qq": 123}
+(1 row)
+
+SELECT '{"aa":["a","aaa"],"qq":{"a":12,"b":16,"c":["c1","c2"],"d":{"d1":"d1","d2":"d2","d1":"d3"}}}'::jsonb;
+ jsonb
+--------------------------------------------------------------------------------------------------
+ {"aa": ["a", "aaa"], "qq": {"a": 12, "b": 16, "c": ["c1", "c2"], "d": {"d1": "d3", "d2": "d2"}}}
+(1 row)
+
+SELECT '{"aa":["a","aaa"],"qq":{"a":"12","b":"16","c":["c1","c2"],"d":{"d1":"d1","d2":"d2"}}}'::jsonb;
+ jsonb
+------------------------------------------------------------------------------------------------------
+ {"aa": ["a", "aaa"], "qq": {"a": "12", "b": "16", "c": ["c1", "c2"], "d": {"d1": "d1", "d2": "d2"}}}
+(1 row)
+
+SELECT '{"aa":["a","aaa"],"qq":{"a":"12","b":"16","c":["c1","c2",["c3"],{"c4":4}],"d":{"d1":"d1","d2":"d2"}}}'::jsonb;
+ jsonb
+-------------------------------------------------------------------------------------------------------------------------
+ {"aa": ["a", "aaa"], "qq": {"a": "12", "b": "16", "c": ["c1", "c2", ["c3"], {"c4": 4}], "d": {"d1": "d1", "d2": "d2"}}}
+(1 row)
+
+SELECT '{"ff":["a","aaa"]}'::jsonb;
+ jsonb
+----------------------
+ {"ff": ["a", "aaa"]}
+(1 row)
+
+SELECT
+ '{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'ff',
+ '{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'qq',
+ ('{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'Y') IS NULL AS f,
+ ('{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb ->> 'Y') IS NULL AS t,
+ '{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'x';
+ ?column? | ?column? | f | t | ?column?
+--------------------+----------+---+---+----------
+ {"a": 12, "b": 16} | 123 | f | t | [1, 2]
+(1 row)
+
+-- nested containment
+SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[1,2]}';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '{"a":[2,1],"c":"b"}'::jsonb @> '{"a":[1,2]}';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '{"a":{"1":2},"c":"b"}'::jsonb @> '{"a":[1,2]}';
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT '{"a":{"2":1},"c":"b"}'::jsonb @> '{"a":[1,2]}';
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT '{"a":{"1":2},"c":"b"}'::jsonb @> '{"a":{"1":2}}';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '{"a":{"2":1},"c":"b"}'::jsonb @> '{"a":{"1":2}}';
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT '["a","b"]'::jsonb @> '["a","b","c","b"]';
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT '["a","b","c","b"]'::jsonb @> '["a","b"]';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '["a","b","c",[1,2]]'::jsonb @> '["a",[1,2]]';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '["a","b","c",[1,2]]'::jsonb @> '["b",[1,2]]';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[1]}';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[2]}';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[3]}';
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"c":3}]}';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"x":4}]}';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"x":4},3]}';
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"x":4},1]}';
+ ?column?
+----------
+ t
+(1 row)
+
+-- check some corner cases for indexed nested containment (bug #13756)
+create temp table nestjsonb (j jsonb);
+insert into nestjsonb (j) values ('{"a":[["b",{"x":1}],["b",{"x":2}]],"c":3}');
+insert into nestjsonb (j) values ('[[14,2,3]]');
+insert into nestjsonb (j) values ('[1,[14,2,3]]');
+create index on nestjsonb using gin(j jsonb_path_ops);
+set enable_seqscan = on;
+set enable_bitmapscan = off;
+select * from nestjsonb where j @> '{"a":[[{"x":2}]]}'::jsonb;
+ j
+---------------------------------------------------
+ {"a": [["b", {"x": 1}], ["b", {"x": 2}]], "c": 3}
+(1 row)
+
+select * from nestjsonb where j @> '{"c":3}';
+ j
+---------------------------------------------------
+ {"a": [["b", {"x": 1}], ["b", {"x": 2}]], "c": 3}
+(1 row)
+
+select * from nestjsonb where j @> '[[14]]';
+ j
+-----------------
+ [[14, 2, 3]]
+ [1, [14, 2, 3]]
+(2 rows)
+
+set enable_seqscan = off;
+set enable_bitmapscan = on;
+select * from nestjsonb where j @> '{"a":[[{"x":2}]]}'::jsonb;
+ j
+---------------------------------------------------
+ {"a": [["b", {"x": 1}], ["b", {"x": 2}]], "c": 3}
+(1 row)
+
+select * from nestjsonb where j @> '{"c":3}';
+ j
+---------------------------------------------------
+ {"a": [["b", {"x": 1}], ["b", {"x": 2}]], "c": 3}
+(1 row)
+
+select * from nestjsonb where j @> '[[14]]';
+ j
+-----------------
+ [[14, 2, 3]]
+ [1, [14, 2, 3]]
+(2 rows)
+
+reset enable_seqscan;
+reset enable_bitmapscan;
+-- nested object field / array index lookup
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'n';
+ ?column?
+----------
+ null
+(1 row)
+
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'a';
+ ?column?
+----------
+ 1
+(1 row)
+
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'b';
+ ?column?
+----------
+ [1, 2]
+(1 row)
+
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'c';
+ ?column?
+----------
+ {"1": 2}
+(1 row)
+
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'd';
+ ?column?
+---------------
+ {"1": [2, 3]}
+(1 row)
+
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'd' -> '1';
+ ?column?
+----------
+ [2, 3]
+(1 row)
+
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'e';
+ ?column?
+----------
+
+(1 row)
+
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 0; --expecting error
+ ?column?
+----------
+
+(1 row)
+
+SELECT '["a","b","c",[1,2],null]'::jsonb -> 0;
+ ?column?
+----------
+ "a"
+(1 row)
+
+SELECT '["a","b","c",[1,2],null]'::jsonb -> 1;
+ ?column?
+----------
+ "b"
+(1 row)
+
+SELECT '["a","b","c",[1,2],null]'::jsonb -> 2;
+ ?column?
+----------
+ "c"
+(1 row)
+
+SELECT '["a","b","c",[1,2],null]'::jsonb -> 3;
+ ?column?
+----------
+ [1, 2]
+(1 row)
+
+SELECT '["a","b","c",[1,2],null]'::jsonb -> 3 -> 1;
+ ?column?
+----------
+ 2
+(1 row)
+
+SELECT '["a","b","c",[1,2],null]'::jsonb -> 4;
+ ?column?
+----------
+ null
+(1 row)
+
+SELECT '["a","b","c",[1,2],null]'::jsonb -> 5;
+ ?column?
+----------
+
+(1 row)
+
+SELECT '["a","b","c",[1,2],null]'::jsonb -> -1;
+ ?column?
+----------
+ null
+(1 row)
+
+SELECT '["a","b","c",[1,2],null]'::jsonb -> -5;
+ ?column?
+----------
+ "a"
+(1 row)
+
+SELECT '["a","b","c",[1,2],null]'::jsonb -> -6;
+ ?column?
+----------
+
+(1 row)
+
+--nested path extraction
+SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{0}';
+ ?column?
+----------
+
+(1 row)
+
+SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{a}';
+ ?column?
+----------
+ "b"
+(1 row)
+
+SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c}';
+ ?column?
+-----------
+ [1, 2, 3]
+(1 row)
+
+SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,0}';
+ ?column?
+----------
+ 1
+(1 row)
+
+SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,1}';
+ ?column?
+----------
+ 2
+(1 row)
+
+SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,2}';
+ ?column?
+----------
+ 3
+(1 row)
+
+SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,3}';
+ ?column?
+----------
+
+(1 row)
+
+SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-1}';
+ ?column?
+----------
+ 3
+(1 row)
+
+SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-3}';
+ ?column?
+----------
+ 1
+(1 row)
+
+SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-4}';
+ ?column?
+----------
+
+(1 row)
+
+SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{0}';
+ ?column?
+----------
+ 0
+(1 row)
+
+SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{3}';
+ ?column?
+----------
+ [3, 4]
+(1 row)
+
+SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{4}';
+ ?column?
+---------------
+ {"5": "five"}
+(1 row)
+
+SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{4,5}';
+ ?column?
+----------
+ "five"
+(1 row)
+
+--nested exists
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'n';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'a';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'b';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'c';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'd';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'e';
+ ?column?
+----------
+ f
+(1 row)
+
+-- jsonb_strip_nulls
+select jsonb_strip_nulls(null);
+ jsonb_strip_nulls
+-------------------
+
+(1 row)
+
+select jsonb_strip_nulls('1');
+ jsonb_strip_nulls
+-------------------
+ 1
+(1 row)
+
+select jsonb_strip_nulls('"a string"');
+ jsonb_strip_nulls
+-------------------
+ "a string"
+(1 row)
+
+select jsonb_strip_nulls('null');
+ jsonb_strip_nulls
+-------------------
+ null
+(1 row)
+
+select jsonb_strip_nulls('[1,2,null,3,4]');
+ jsonb_strip_nulls
+--------------------
+ [1, 2, null, 3, 4]
+(1 row)
+
+select jsonb_strip_nulls('{"a":1,"b":null,"c":[2,null,3],"d":{"e":4,"f":null}}');
+ jsonb_strip_nulls
+--------------------------------------------
+ {"a": 1, "c": [2, null, 3], "d": {"e": 4}}
+(1 row)
+
+select jsonb_strip_nulls('[1,{"a":1,"b":null,"c":2},3]');
+ jsonb_strip_nulls
+--------------------------
+ [1, {"a": 1, "c": 2}, 3]
+(1 row)
+
+-- an empty object is not null and should not be stripped
+select jsonb_strip_nulls('{"a": {"b": null, "c": null}, "d": {} }');
+ jsonb_strip_nulls
+--------------------
+ {"a": {}, "d": {}}
+(1 row)
+
+select jsonb_pretty('{"a": "test", "b": [1, 2, 3], "c": "test3", "d":{"dd": "test4", "dd2":{"ddd": "test5"}}}');
+ jsonb_pretty
+----------------------------
+ { +
+ "a": "test", +
+ "b": [ +
+ 1, +
+ 2, +
+ 3 +
+ ], +
+ "c": "test3", +
+ "d": { +
+ "dd": "test4", +
+ "dd2": { +
+ "ddd": "test5"+
+ } +
+ } +
+ }
+(1 row)
+
+select jsonb_pretty('[{"f1":1,"f2":null},2,null,[[{"x":true},6,7],8],3]');
+ jsonb_pretty
+---------------------------
+ [ +
+ { +
+ "f1": 1, +
+ "f2": null +
+ }, +
+ 2, +
+ null, +
+ [ +
+ [ +
+ { +
+ "x": true+
+ }, +
+ 6, +
+ 7 +
+ ], +
+ 8 +
+ ], +
+ 3 +
+ ]
+(1 row)
+
+select jsonb_pretty('{"a":["b", "c"], "d": {"e":"f"}}');
+ jsonb_pretty
+------------------
+ { +
+ "a": [ +
+ "b", +
+ "c" +
+ ], +
+ "d": { +
+ "e": "f"+
+ } +
+ }
+(1 row)
+
+select jsonb_concat('{"d": "test", "a": [1, 2]}', '{"g": "test2", "c": {"c1":1, "c2":2}}');
+ jsonb_concat
+-------------------------------------------------------------------
+ {"a": [1, 2], "c": {"c1": 1, "c2": 2}, "d": "test", "g": "test2"}
+(1 row)
+
+select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{"cq":"l", "b":"g", "fg":false}';
+ ?column?
+---------------------------------------------
+ {"b": "g", "aa": 1, "cq": "l", "fg": false}
+(1 row)
+
+select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{"aq":"l"}';
+ ?column?
+---------------------------------------
+ {"b": 2, "aa": 1, "aq": "l", "cq": 3}
+(1 row)
+
+select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{"aa":"l"}';
+ ?column?
+------------------------------
+ {"b": 2, "aa": "l", "cq": 3}
+(1 row)
+
+select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{}';
+ ?column?
+----------------------------
+ {"b": 2, "aa": 1, "cq": 3}
+(1 row)
+
+select '["a", "b"]'::jsonb || '["c"]';
+ ?column?
+-----------------
+ ["a", "b", "c"]
+(1 row)
+
+select '["a", "b"]'::jsonb || '["c", "d"]';
+ ?column?
+----------------------
+ ["a", "b", "c", "d"]
+(1 row)
+
+select '["c"]' || '["a", "b"]'::jsonb;
+ ?column?
+-----------------
+ ["c", "a", "b"]
+(1 row)
+
+select '["a", "b"]'::jsonb || '"c"';
+ ?column?
+-----------------
+ ["a", "b", "c"]
+(1 row)
+
+select '"c"' || '["a", "b"]'::jsonb;
+ ?column?
+-----------------
+ ["c", "a", "b"]
+(1 row)
+
+select '[]'::jsonb || '["a"]'::jsonb;
+ ?column?
+----------
+ ["a"]
+(1 row)
+
+select '[]'::jsonb || '"a"'::jsonb;
+ ?column?
+----------
+ ["a"]
+(1 row)
+
+select '"b"'::jsonb || '"a"'::jsonb;
+ ?column?
+------------
+ ["b", "a"]
+(1 row)
+
+select '{}'::jsonb || '{"a":"b"}'::jsonb;
+ ?column?
+------------
+ {"a": "b"}
+(1 row)
+
+select '[]'::jsonb || '{"a":"b"}'::jsonb;
+ ?column?
+--------------
+ [{"a": "b"}]
+(1 row)
+
+select '{"a":"b"}'::jsonb || '[]'::jsonb;
+ ?column?
+--------------
+ [{"a": "b"}]
+(1 row)
+
+select '"a"'::jsonb || '{"a":1}';
+ ?column?
+-----------------
+ ["a", {"a": 1}]
+(1 row)
+
+select '{"a":1}' || '"a"'::jsonb;
+ ?column?
+-----------------
+ [{"a": 1}, "a"]
+(1 row)
+
+select '[3]'::jsonb || '{}'::jsonb;
+ ?column?
+----------
+ [3, {}]
+(1 row)
+
+select '3'::jsonb || '[]'::jsonb;
+ ?column?
+----------
+ [3]
+(1 row)
+
+select '3'::jsonb || '4'::jsonb;
+ ?column?
+----------
+ [3, 4]
+(1 row)
+
+select '3'::jsonb || '{}'::jsonb;
+ ?column?
+----------
+ [3, {}]
+(1 row)
+
+select '["a", "b"]'::jsonb || '{"c":1}';
+ ?column?
+----------------------
+ ["a", "b", {"c": 1}]
+(1 row)
+
+select '{"c": 1}'::jsonb || '["a", "b"]';
+ ?column?
+----------------------
+ [{"c": 1}, "a", "b"]
+(1 row)
+
+select '{}'::jsonb || '{"cq":"l", "b":"g", "fg":false}';
+ ?column?
+------------------------------------
+ {"b": "g", "cq": "l", "fg": false}
+(1 row)
+
+select pg_column_size('{}'::jsonb || '{}'::jsonb) = pg_column_size('{}'::jsonb);
+ ?column?
+----------
+ t
+(1 row)
+
+select pg_column_size('{"aa":1}'::jsonb || '{"b":2}'::jsonb) = pg_column_size('{"aa":1, "b":2}'::jsonb);
+ ?column?
+----------
+ t
+(1 row)
+
+select pg_column_size('{"aa":1, "b":2}'::jsonb || '{}'::jsonb) = pg_column_size('{"aa":1, "b":2}'::jsonb);
+ ?column?
+----------
+ t
+(1 row)
+
+select pg_column_size('{}'::jsonb || '{"aa":1, "b":2}'::jsonb) = pg_column_size('{"aa":1, "b":2}'::jsonb);
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'a');
+ jsonb_delete
+------------------
+ {"b": 2, "c": 3}
+(1 row)
+
+select jsonb_delete('{"a":null , "b":2, "c":3}'::jsonb, 'a');
+ jsonb_delete
+------------------
+ {"b": 2, "c": 3}
+(1 row)
+
+select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'b');
+ jsonb_delete
+------------------
+ {"a": 1, "c": 3}
+(1 row)
+
+select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'c');
+ jsonb_delete
+------------------
+ {"a": 1, "b": 2}
+(1 row)
+
+select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'd');
+ jsonb_delete
+--------------------------
+ {"a": 1, "b": 2, "c": 3}
+(1 row)
+
+select '{"a":1 , "b":2, "c":3}'::jsonb - 'a';
+ ?column?
+------------------
+ {"b": 2, "c": 3}
+(1 row)
+
+select '{"a":null , "b":2, "c":3}'::jsonb - 'a';
+ ?column?
+------------------
+ {"b": 2, "c": 3}
+(1 row)
+
+select '{"a":1 , "b":2, "c":3}'::jsonb - 'b';
+ ?column?
+------------------
+ {"a": 1, "c": 3}
+(1 row)
+
+select '{"a":1 , "b":2, "c":3}'::jsonb - 'c';
+ ?column?
+------------------
+ {"a": 1, "b": 2}
+(1 row)
+
+select '{"a":1 , "b":2, "c":3}'::jsonb - 'd';
+ ?column?
+--------------------------
+ {"a": 1, "b": 2, "c": 3}
+(1 row)
+
+select pg_column_size('{"a":1 , "b":2, "c":3}'::jsonb - 'b') = pg_column_size('{"a":1, "b":2}'::jsonb);
+ ?column?
+----------
+ t
+(1 row)
+
+select '["a","b","c"]'::jsonb - 3;
+ ?column?
+-----------------
+ ["a", "b", "c"]
+(1 row)
+
+select '["a","b","c"]'::jsonb - 2;
+ ?column?
+------------
+ ["a", "b"]
+(1 row)
+
+select '["a","b","c"]'::jsonb - 1;
+ ?column?
+------------
+ ["a", "c"]
+(1 row)
+
+select '["a","b","c"]'::jsonb - 0;
+ ?column?
+------------
+ ["b", "c"]
+(1 row)
+
+select '["a","b","c"]'::jsonb - -1;
+ ?column?
+------------
+ ["a", "b"]
+(1 row)
+
+select '["a","b","c"]'::jsonb - -2;
+ ?column?
+------------
+ ["a", "c"]
+(1 row)
+
+select '["a","b","c"]'::jsonb - -3;
+ ?column?
+------------
+ ["b", "c"]
+(1 row)
+
+select '["a","b","c"]'::jsonb - -4;
+ ?column?
+-----------------
+ ["a", "b", "c"]
+(1 row)
+
+select '{"a":1 , "b":2, "c":3}'::jsonb - '{b}'::text[];
+ ?column?
+------------------
+ {"a": 1, "c": 3}
+(1 row)
+
+select '{"a":1 , "b":2, "c":3}'::jsonb - '{c,b}'::text[];
+ ?column?
+----------
+ {"a": 1}
+(1 row)
+
+select '{"a":1 , "b":2, "c":3}'::jsonb - '{}'::text[];
+ ?column?
+--------------------------
+ {"a": 1, "b": 2, "c": 3}
+(1 row)
+
+select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{n}', '[1,2,3]');
+ jsonb_set
+--------------------------------------------------------------------------
+ {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": [1, 2, 3]}
+(1 row)
+
+select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '[1,2,3]');
+ jsonb_set
+-----------------------------------------------------------------------------
+ {"a": 1, "b": [1, [1, 2, 3]], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null}
+(1 row)
+
+select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,1,0}', '[1,2,3]');
+ jsonb_set
+-----------------------------------------------------------------------------
+ {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [[1, 2, 3], 3]}, "n": null}
+(1 row)
+
+select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,NULL,0}', '[1,2,3]');
+ERROR: path element at position 2 is null
+select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{n}', '{"1": 2}');
+ jsonb_set
+-------------------------------------------------------------------------
+ {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": {"1": 2}}
+(1 row)
+
+select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '{"1": 2}');
+ jsonb_set
+----------------------------------------------------------------------------
+ {"a": 1, "b": [1, {"1": 2}], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null}
+(1 row)
+
+select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,1,0}', '{"1": 2}');
+ jsonb_set
+----------------------------------------------------------------------------
+ {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [{"1": 2}, 3]}, "n": null}
+(1 row)
+
+select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,NULL,0}', '{"1": 2}');
+ERROR: path element at position 2 is null
+select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '"test"');
+ jsonb_set
+--------------------------------------------------------------------------
+ {"a": 1, "b": [1, "test"], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null}
+(1 row)
+
+select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '{"f": "test"}');
+ jsonb_set
+---------------------------------------------------------------------------------
+ {"a": 1, "b": [1, {"f": "test"}], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null}
+(1 row)
+
+select jsonb_delete_path('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}', '{n}');
+ jsonb_delete_path
+----------------------------------------------------------
+ {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [2, 3]}}
+(1 row)
+
+select jsonb_delete_path('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}', '{b,-1}');
+ jsonb_delete_path
+------------------------------------------------------------------
+ {"a": 1, "b": [1], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null}
+(1 row)
+
+select jsonb_delete_path('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}', '{d,1,0}');
+ jsonb_delete_path
+------------------------------------------------------------------
+ {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [3]}, "n": null}
+(1 row)
+
+select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{n}';
+ ?column?
+----------------------------------------------------------
+ {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [2, 3]}}
+(1 row)
+
+select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{b,-1}';
+ ?column?
+------------------------------------------------------------------
+ {"a": 1, "b": [1], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null}
+(1 row)
+
+select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{b,-1e}'; -- invalid array subscript
+ERROR: path element at position 2 is not an integer: "-1e"
+select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{d,1,0}';
+ ?column?
+------------------------------------------------------------------
+ {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [3]}, "n": null}
+(1 row)
+
+-- empty structure and error conditions for delete and replace
+select '"a"'::jsonb - 'a'; -- error
+ERROR: cannot delete from scalar
+select '{}'::jsonb - 'a';
+ ?column?
+----------
+ {}
+(1 row)
+
+select '[]'::jsonb - 'a';
+ ?column?
+----------
+ []
+(1 row)
+
+select '"a"'::jsonb - 1; -- error
+ERROR: cannot delete from scalar
+select '{}'::jsonb - 1; -- error
+ERROR: cannot delete from object using integer index
+select '[]'::jsonb - 1;
+ ?column?
+----------
+ []
+(1 row)
+
+select '"a"'::jsonb #- '{a}'; -- error
+ERROR: cannot delete path in scalar
+select '{}'::jsonb #- '{a}';
+ ?column?
+----------
+ {}
+(1 row)
+
+select '[]'::jsonb #- '{a}';
+ ?column?
+----------
+ []
+(1 row)
+
+select jsonb_set('"a"','{a}','"b"'); --error
+ERROR: cannot set path in scalar
+select jsonb_set('{}','{a}','"b"', false);
+ jsonb_set
+-----------
+ {}
+(1 row)
+
+select jsonb_set('[]','{1}','"b"', false);
+ jsonb_set
+-----------
+ []
+(1 row)
+
+select jsonb_set('[{"f1":1,"f2":null},2,null,3]', '{0}','[2,3,4]', false);
+ jsonb_set
+-------------------------
+ [[2, 3, 4], 2, null, 3]
+(1 row)
+
+-- jsonb_set adding instead of replacing
+-- prepend to array
+select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{b,-33}','{"foo":123}');
+ jsonb_set
+-------------------------------------------------------
+ {"a": 1, "b": [{"foo": 123}, 0, 1, 2], "c": {"d": 4}}
+(1 row)
+
+-- append to array
+select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{b,33}','{"foo":123}');
+ jsonb_set
+-------------------------------------------------------
+ {"a": 1, "b": [0, 1, 2, {"foo": 123}], "c": {"d": 4}}
+(1 row)
+
+-- check nesting levels addition
+select jsonb_set('{"a":1,"b":[4,5,[0,1,2],6,7],"c":{"d":4}}','{b,2,33}','{"foo":123}');
+ jsonb_set
+---------------------------------------------------------------------
+ {"a": 1, "b": [4, 5, [0, 1, 2, {"foo": 123}], 6, 7], "c": {"d": 4}}
+(1 row)
+
+-- add new key
+select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{c,e}','{"foo":123}');
+ jsonb_set
+------------------------------------------------------------
+ {"a": 1, "b": [0, 1, 2], "c": {"d": 4, "e": {"foo": 123}}}
+(1 row)
+
+-- adding doesn't do anything if elements before last aren't present
+select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{x,-33}','{"foo":123}');
+ jsonb_set
+-----------------------------------------
+ {"a": 1, "b": [0, 1, 2], "c": {"d": 4}}
+(1 row)
+
+select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{x,y}','{"foo":123}');
+ jsonb_set
+-----------------------------------------
+ {"a": 1, "b": [0, 1, 2], "c": {"d": 4}}
+(1 row)
+
+-- add to empty object
+select jsonb_set('{}','{x}','{"foo":123}');
+ jsonb_set
+---------------------
+ {"x": {"foo": 123}}
+(1 row)
+
+--add to empty array
+select jsonb_set('[]','{0}','{"foo":123}');
+ jsonb_set
+----------------
+ [{"foo": 123}]
+(1 row)
+
+select jsonb_set('[]','{99}','{"foo":123}');
+ jsonb_set
+----------------
+ [{"foo": 123}]
+(1 row)
+
+select jsonb_set('[]','{-99}','{"foo":123}');
+ jsonb_set
+----------------
+ [{"foo": 123}]
+(1 row)
+
+select jsonb_set('{"a": [1, 2, 3]}', '{a, non_integer}', '"new_value"');
+ERROR: path element at position 2 is not an integer: "non_integer"
+select jsonb_set('{"a": {"b": [1, 2, 3]}}', '{a, b, non_integer}', '"new_value"');
+ERROR: path element at position 3 is not an integer: "non_integer"
+select jsonb_set('{"a": {"b": [1, 2, 3]}}', '{a, b, NULL}', '"new_value"');
+ERROR: path element at position 3 is null
+-- jsonb_set_lax
+\pset null NULL
+-- pass though non nulls to jsonb_set
+select jsonb_set_lax('{"a":1,"b":2}','{b}','5') ;
+ jsonb_set_lax
+------------------
+ {"a": 1, "b": 5}
+(1 row)
+
+select jsonb_set_lax('{"a":1,"b":2}','{d}','6', true) ;
+ jsonb_set_lax
+--------------------------
+ {"a": 1, "b": 2, "d": 6}
+(1 row)
+
+-- using the default treatment
+select jsonb_set_lax('{"a":1,"b":2}','{b}',null);
+ jsonb_set_lax
+---------------------
+ {"a": 1, "b": null}
+(1 row)
+
+select jsonb_set_lax('{"a":1,"b":2}','{d}',null,true);
+ jsonb_set_lax
+-----------------------------
+ {"a": 1, "b": 2, "d": null}
+(1 row)
+
+-- errors
+select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, true, null);
+ERROR: null_value_treatment must be "delete_key", "return_target", "use_json_null", or "raise_exception"
+select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, true, 'no_such_treatment');
+ERROR: null_value_treatment must be "delete_key", "return_target", "use_json_null", or "raise_exception"
+-- explicit treatments
+select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'raise_exception') as raise_exception;
+ERROR: JSON value must not be null
+DETAIL: Exception was raised because null_value_treatment is "raise_exception".
+HINT: To avoid, either change the null_value_treatment argument or ensure that an SQL NULL is not passed.
+select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'return_target') as return_target;
+ return_target
+------------------
+ {"a": 1, "b": 2}
+(1 row)
+
+select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'delete_key') as delete_key;
+ delete_key
+------------
+ {"a": 1}
+(1 row)
+
+select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'use_json_null') as use_json_null;
+ use_json_null
+---------------------
+ {"a": 1, "b": null}
+(1 row)
+
+\pset null ''
+-- jsonb_insert
+select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '"new_value"');
+ jsonb_insert
+-------------------------------
+ {"a": [0, "new_value", 1, 2]}
+(1 row)
+
+select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '"new_value"', true);
+ jsonb_insert
+-------------------------------
+ {"a": [0, 1, "new_value", 2]}
+(1 row)
+
+select jsonb_insert('{"a": {"b": {"c": [0, 1, "test1", "test2"]}}}', '{a, b, c, 2}', '"new_value"');
+ jsonb_insert
+------------------------------------------------------------
+ {"a": {"b": {"c": [0, 1, "new_value", "test1", "test2"]}}}
+(1 row)
+
+select jsonb_insert('{"a": {"b": {"c": [0, 1, "test1", "test2"]}}}', '{a, b, c, 2}', '"new_value"', true);
+ jsonb_insert
+------------------------------------------------------------
+ {"a": {"b": {"c": [0, 1, "test1", "new_value", "test2"]}}}
+(1 row)
+
+select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '{"b": "value"}');
+ jsonb_insert
+----------------------------------
+ {"a": [0, {"b": "value"}, 1, 2]}
+(1 row)
+
+select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '["value1", "value2"]');
+ jsonb_insert
+----------------------------------------
+ {"a": [0, ["value1", "value2"], 1, 2]}
+(1 row)
+
+-- edge cases
+select jsonb_insert('{"a": [0,1,2]}', '{a, 0}', '"new_value"');
+ jsonb_insert
+-------------------------------
+ {"a": ["new_value", 0, 1, 2]}
+(1 row)
+
+select jsonb_insert('{"a": [0,1,2]}', '{a, 0}', '"new_value"', true);
+ jsonb_insert
+-------------------------------
+ {"a": [0, "new_value", 1, 2]}
+(1 row)
+
+select jsonb_insert('{"a": [0,1,2]}', '{a, 2}', '"new_value"');
+ jsonb_insert
+-------------------------------
+ {"a": [0, 1, "new_value", 2]}
+(1 row)
+
+select jsonb_insert('{"a": [0,1,2]}', '{a, 2}', '"new_value"', true);
+ jsonb_insert
+-------------------------------
+ {"a": [0, 1, 2, "new_value"]}
+(1 row)
+
+select jsonb_insert('{"a": [0,1,2]}', '{a, -1}', '"new_value"');
+ jsonb_insert
+-------------------------------
+ {"a": [0, 1, "new_value", 2]}
+(1 row)
+
+select jsonb_insert('{"a": [0,1,2]}', '{a, -1}', '"new_value"', true);
+ jsonb_insert
+-------------------------------
+ {"a": [0, 1, 2, "new_value"]}
+(1 row)
+
+select jsonb_insert('[]', '{1}', '"new_value"');
+ jsonb_insert
+---------------
+ ["new_value"]
+(1 row)
+
+select jsonb_insert('[]', '{1}', '"new_value"', true);
+ jsonb_insert
+---------------
+ ["new_value"]
+(1 row)
+
+select jsonb_insert('{"a": []}', '{a, 1}', '"new_value"');
+ jsonb_insert
+----------------------
+ {"a": ["new_value"]}
+(1 row)
+
+select jsonb_insert('{"a": []}', '{a, 1}', '"new_value"', true);
+ jsonb_insert
+----------------------
+ {"a": ["new_value"]}
+(1 row)
+
+select jsonb_insert('{"a": [0,1,2]}', '{a, 10}', '"new_value"');
+ jsonb_insert
+-------------------------------
+ {"a": [0, 1, 2, "new_value"]}
+(1 row)
+
+select jsonb_insert('{"a": [0,1,2]}', '{a, -10}', '"new_value"');
+ jsonb_insert
+-------------------------------
+ {"a": ["new_value", 0, 1, 2]}
+(1 row)
+
+-- jsonb_insert should be able to insert new value for objects, but not to replace
+select jsonb_insert('{"a": {"b": "value"}}', '{a, c}', '"new_value"');
+ jsonb_insert
+-----------------------------------------
+ {"a": {"b": "value", "c": "new_value"}}
+(1 row)
+
+select jsonb_insert('{"a": {"b": "value"}}', '{a, c}', '"new_value"', true);
+ jsonb_insert
+-----------------------------------------
+ {"a": {"b": "value", "c": "new_value"}}
+(1 row)
+
+select jsonb_insert('{"a": {"b": "value"}}', '{a, b}', '"new_value"');
+ERROR: cannot replace existing key
+HINT: Try using the function jsonb_set to replace key value.
+select jsonb_insert('{"a": {"b": "value"}}', '{a, b}', '"new_value"', true);
+ERROR: cannot replace existing key
+HINT: Try using the function jsonb_set to replace key value.
+-- jsonb subscript
+select ('123'::jsonb)['a'];
+ jsonb
+-------
+
+(1 row)
+
+select ('123'::jsonb)[0];
+ jsonb
+-------
+
+(1 row)
+
+select ('123'::jsonb)[NULL];
+ jsonb
+-------
+
+(1 row)
+
+select ('{"a": 1}'::jsonb)['a'];
+ jsonb
+-------
+ 1
+(1 row)
+
+select ('{"a": 1}'::jsonb)[0];
+ jsonb
+-------
+
+(1 row)
+
+select ('{"a": 1}'::jsonb)['not_exist'];
+ jsonb
+-------
+
+(1 row)
+
+select ('{"a": 1}'::jsonb)[NULL];
+ jsonb
+-------
+
+(1 row)
+
+select ('[1, "2", null]'::jsonb)['a'];
+ jsonb
+-------
+
+(1 row)
+
+select ('[1, "2", null]'::jsonb)[0];
+ jsonb
+-------
+ 1
+(1 row)
+
+select ('[1, "2", null]'::jsonb)['1'];
+ jsonb
+-------
+ "2"
+(1 row)
+
+select ('[1, "2", null]'::jsonb)[1.0];
+ERROR: subscript type numeric is not supported
+LINE 1: select ('[1, "2", null]'::jsonb)[1.0];
+ ^
+HINT: jsonb subscript must be coercible to either integer or text.
+select ('[1, "2", null]'::jsonb)[2];
+ jsonb
+-------
+ null
+(1 row)
+
+select ('[1, "2", null]'::jsonb)[3];
+ jsonb
+-------
+
+(1 row)
+
+select ('[1, "2", null]'::jsonb)[-2];
+ jsonb
+-------
+ "2"
+(1 row)
+
+select ('[1, "2", null]'::jsonb)[1]['a'];
+ jsonb
+-------
+
+(1 row)
+
+select ('[1, "2", null]'::jsonb)[1][0];
+ jsonb
+-------
+
+(1 row)
+
+select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['b'];
+ jsonb
+-------
+ "c"
+(1 row)
+
+select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d'];
+ jsonb
+-----------
+ [1, 2, 3]
+(1 row)
+
+select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d'][1];
+ jsonb
+-------
+ 2
+(1 row)
+
+select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d']['a'];
+ jsonb
+-------
+
+(1 row)
+
+select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1'];
+ jsonb
+---------------
+ {"a2": "aaa"}
+(1 row)
+
+select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1']['a2'];
+ jsonb
+-------
+ "aaa"
+(1 row)
+
+select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1']['a2']['a3'];
+ jsonb
+-------
+
+(1 row)
+
+select ('{"a": ["a1", {"b1": ["aaa", "bbb", "ccc"]}], "b": "bb"}'::jsonb)['a'][1]['b1'];
+ jsonb
+-----------------------
+ ["aaa", "bbb", "ccc"]
+(1 row)
+
+select ('{"a": ["a1", {"b1": ["aaa", "bbb", "ccc"]}], "b": "bb"}'::jsonb)['a'][1]['b1'][2];
+ jsonb
+-------
+ "ccc"
+(1 row)
+
+-- slices are not supported
+select ('{"a": 1}'::jsonb)['a':'b'];
+ERROR: jsonb subscript does not support slices
+LINE 1: select ('{"a": 1}'::jsonb)['a':'b'];
+ ^
+select ('[1, "2", null]'::jsonb)[1:2];
+ERROR: jsonb subscript does not support slices
+LINE 1: select ('[1, "2", null]'::jsonb)[1:2];
+ ^
+select ('[1, "2", null]'::jsonb)[:2];
+ERROR: jsonb subscript does not support slices
+LINE 1: select ('[1, "2", null]'::jsonb)[:2];
+ ^
+select ('[1, "2", null]'::jsonb)[1:];
+ERROR: jsonb subscript does not support slices
+LINE 1: select ('[1, "2", null]'::jsonb)[1:];
+ ^
+select ('[1, "2", null]'::jsonb)[:];
+ERROR: jsonb subscript does not support slices
+create TEMP TABLE test_jsonb_subscript (
+ id int,
+ test_json jsonb
+);
+insert into test_jsonb_subscript values
+(1, '{}'), -- empty jsonb
+(2, '{"key": "value"}'); -- jsonb with data
+-- update empty jsonb
+update test_jsonb_subscript set test_json['a'] = '1' where id = 1;
+select * from test_jsonb_subscript;
+ id | test_json
+----+------------------
+ 2 | {"key": "value"}
+ 1 | {"a": 1}
+(2 rows)
+
+-- update jsonb with some data
+update test_jsonb_subscript set test_json['a'] = '1' where id = 2;
+select * from test_jsonb_subscript;
+ id | test_json
+----+--------------------------
+ 1 | {"a": 1}
+ 2 | {"a": 1, "key": "value"}
+(2 rows)
+
+-- replace jsonb
+update test_jsonb_subscript set test_json['a'] = '"test"';
+select * from test_jsonb_subscript;
+ id | test_json
+----+-------------------------------
+ 1 | {"a": "test"}
+ 2 | {"a": "test", "key": "value"}
+(2 rows)
+
+-- replace by object
+update test_jsonb_subscript set test_json['a'] = '{"b": 1}'::jsonb;
+select * from test_jsonb_subscript;
+ id | test_json
+----+---------------------------------
+ 1 | {"a": {"b": 1}}
+ 2 | {"a": {"b": 1}, "key": "value"}
+(2 rows)
+
+-- replace by array
+update test_jsonb_subscript set test_json['a'] = '[1, 2, 3]'::jsonb;
+select * from test_jsonb_subscript;
+ id | test_json
+----+----------------------------------
+ 1 | {"a": [1, 2, 3]}
+ 2 | {"a": [1, 2, 3], "key": "value"}
+(2 rows)
+
+-- use jsonb subscription in where clause
+select * from test_jsonb_subscript where test_json['key'] = '"value"';
+ id | test_json
+----+----------------------------------
+ 2 | {"a": [1, 2, 3], "key": "value"}
+(1 row)
+
+select * from test_jsonb_subscript where test_json['key_doesnt_exists'] = '"value"';
+ id | test_json
+----+-----------
+(0 rows)
+
+select * from test_jsonb_subscript where test_json['key'] = '"wrong_value"';
+ id | test_json
+----+-----------
+(0 rows)
+
+-- NULL
+update test_jsonb_subscript set test_json[NULL] = '1';
+ERROR: jsonb subscript in assignment must not be null
+update test_jsonb_subscript set test_json['another_key'] = NULL;
+select * from test_jsonb_subscript;
+ id | test_json
+----+-------------------------------------------------------
+ 1 | {"a": [1, 2, 3], "another_key": null}
+ 2 | {"a": [1, 2, 3], "key": "value", "another_key": null}
+(2 rows)
+
+-- NULL as jsonb source
+insert into test_jsonb_subscript values (3, NULL);
+update test_jsonb_subscript set test_json['a'] = '1' where id = 3;
+select * from test_jsonb_subscript;
+ id | test_json
+----+-------------------------------------------------------
+ 1 | {"a": [1, 2, 3], "another_key": null}
+ 2 | {"a": [1, 2, 3], "key": "value", "another_key": null}
+ 3 | {"a": 1}
+(3 rows)
+
+update test_jsonb_subscript set test_json = NULL where id = 3;
+update test_jsonb_subscript set test_json[0] = '1';
+select * from test_jsonb_subscript;
+ id | test_json
+----+---------------------------------------------------------------
+ 1 | {"0": 1, "a": [1, 2, 3], "another_key": null}
+ 2 | {"0": 1, "a": [1, 2, 3], "key": "value", "another_key": null}
+ 3 | [1]
+(3 rows)
+
+-- Fill the gaps logic
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '[0]');
+update test_jsonb_subscript set test_json[5] = '1';
+select * from test_jsonb_subscript;
+ id | test_json
+----+--------------------------------
+ 1 | [0, null, null, null, null, 1]
+(1 row)
+
+update test_jsonb_subscript set test_json[-4] = '1';
+select * from test_jsonb_subscript;
+ id | test_json
+----+-----------------------------
+ 1 | [0, null, 1, null, null, 1]
+(1 row)
+
+update test_jsonb_subscript set test_json[-8] = '1';
+ERROR: path element at position 1 is out of range: -8
+select * from test_jsonb_subscript;
+ id | test_json
+----+-----------------------------
+ 1 | [0, null, 1, null, null, 1]
+(1 row)
+
+-- keep consistent values position
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '[]');
+update test_jsonb_subscript set test_json[5] = '1';
+select * from test_jsonb_subscript;
+ id | test_json
+----+-----------------------------------
+ 1 | [null, null, null, null, null, 1]
+(1 row)
+
+-- create the whole path
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '{}');
+update test_jsonb_subscript set test_json['a'][0]['b'][0]['c'] = '1';
+select * from test_jsonb_subscript;
+ id | test_json
+----+----------------------------
+ 1 | {"a": [{"b": [{"c": 1}]}]}
+(1 row)
+
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '{}');
+update test_jsonb_subscript set test_json['a'][2]['b'][2]['c'][2] = '1';
+select * from test_jsonb_subscript;
+ id | test_json
+----+------------------------------------------------------------------
+ 1 | {"a": [null, null, {"b": [null, null, {"c": [null, null, 1]}]}]}
+(1 row)
+
+-- create the whole path with already existing keys
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '{"b": 1}');
+update test_jsonb_subscript set test_json['a'][0] = '2';
+select * from test_jsonb_subscript;
+ id | test_json
+----+--------------------
+ 1 | {"a": [2], "b": 1}
+(1 row)
+
+-- the start jsonb is an object, first subscript is treated as a key
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '{}');
+update test_jsonb_subscript set test_json[0]['a'] = '1';
+select * from test_jsonb_subscript;
+ id | test_json
+----+-----------------
+ 1 | {"0": {"a": 1}}
+(1 row)
+
+-- the start jsonb is an array
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '[]');
+update test_jsonb_subscript set test_json[0]['a'] = '1';
+update test_jsonb_subscript set test_json[2]['b'] = '2';
+select * from test_jsonb_subscript;
+ id | test_json
+----+----------------------------
+ 1 | [{"a": 1}, null, {"b": 2}]
+(1 row)
+
+-- overwriting an existing path
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '{}');
+update test_jsonb_subscript set test_json['a']['b'][1] = '1';
+update test_jsonb_subscript set test_json['a']['b'][10] = '1';
+select * from test_jsonb_subscript;
+ id | test_json
+----+----------------------------------------------------------------------------
+ 1 | {"a": {"b": [null, 1, null, null, null, null, null, null, null, null, 1]}}
+(1 row)
+
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '[]');
+update test_jsonb_subscript set test_json[0][0][0] = '1';
+update test_jsonb_subscript set test_json[0][0][1] = '1';
+select * from test_jsonb_subscript;
+ id | test_json
+----+------------
+ 1 | [[[1, 1]]]
+(1 row)
+
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '{}');
+update test_jsonb_subscript set test_json['a']['b'][10] = '1';
+update test_jsonb_subscript set test_json['a'][10][10] = '1';
+select * from test_jsonb_subscript;
+ id | test_json
+----+------------------------------------------------------------------------------------------------------------------------------------------------------
+ 1 | {"a": {"b": [null, null, null, null, null, null, null, null, null, null, 1], "10": [null, null, null, null, null, null, null, null, null, null, 1]}}
+(1 row)
+
+-- an empty sub element
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '{"a": {}}');
+update test_jsonb_subscript set test_json['a']['b']['c'][2] = '1';
+select * from test_jsonb_subscript;
+ id | test_json
+----+--------------------------------------
+ 1 | {"a": {"b": {"c": [null, null, 1]}}}
+(1 row)
+
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '{"a": []}');
+update test_jsonb_subscript set test_json['a'][1]['c'][2] = '1';
+select * from test_jsonb_subscript;
+ id | test_json
+----+---------------------------------------
+ 1 | {"a": [null, {"c": [null, null, 1]}]}
+(1 row)
+
+-- trying replace assuming a composite object, but it's an element or a value
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '{"a": 1}');
+update test_jsonb_subscript set test_json['a']['b'] = '1';
+ERROR: cannot replace existing key
+DETAIL: The path assumes key is a composite object, but it is a scalar value.
+update test_jsonb_subscript set test_json['a']['b']['c'] = '1';
+ERROR: cannot replace existing key
+DETAIL: The path assumes key is a composite object, but it is a scalar value.
+update test_jsonb_subscript set test_json['a'][0] = '1';
+ERROR: cannot replace existing key
+DETAIL: The path assumes key is a composite object, but it is a scalar value.
+update test_jsonb_subscript set test_json['a'][0]['c'] = '1';
+ERROR: cannot replace existing key
+DETAIL: The path assumes key is a composite object, but it is a scalar value.
+update test_jsonb_subscript set test_json['a'][0][0] = '1';
+ERROR: cannot replace existing key
+DETAIL: The path assumes key is a composite object, but it is a scalar value.
+-- trying replace assuming a composite object, but it's a raw scalar
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, 'null');
+update test_jsonb_subscript set test_json[0] = '1';
+ERROR: cannot replace existing key
+DETAIL: The path assumes key is a composite object, but it is a scalar value.
+update test_jsonb_subscript set test_json[0][0] = '1';
+ERROR: cannot replace existing key
+DETAIL: The path assumes key is a composite object, but it is a scalar value.
+-- jsonb to tsvector
+select to_tsvector('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::jsonb);
+ to_tsvector
+---------------------------------------------------------------------------
+ 'aaa':1 'bbb':2 'ccc':4 'ddd':3 'eee':6 'fff':7 'ggg':8 'hhh':10 'iii':11
+(1 row)
+
+-- jsonb to tsvector with config
+select to_tsvector('simple', '{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::jsonb);
+ to_tsvector
+---------------------------------------------------------------------------
+ 'aaa':1 'bbb':2 'ccc':4 'ddd':3 'eee':6 'fff':7 'ggg':8 'hhh':10 'iii':11
+(1 row)
+
+-- jsonb to tsvector with stop words
+select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": ["the eee fff ggg"], "c": {"d": "hhh. iii"}}'::jsonb);
+ to_tsvector
+----------------------------------------------------------------------------
+ 'aaa':1 'bbb':3 'ccc':5 'ddd':4 'eee':8 'fff':9 'ggg':10 'hhh':12 'iii':13
+(1 row)
+
+-- jsonb to tsvector with numeric values
+select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": 123, "c": 456}'::jsonb);
+ to_tsvector
+---------------------------------
+ 'aaa':1 'bbb':3 'ccc':5 'ddd':4
+(1 row)
+
+-- jsonb_to_tsvector
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"all"');
+ jsonb_to_tsvector
+----------------------------------------------------------------------------------------
+ '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16
+(1 row)
+
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"key"');
+ jsonb_to_tsvector
+--------------------------------
+ 'b':2 'c':4 'd':6 'f':8 'g':10
+(1 row)
+
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"string"');
+ jsonb_to_tsvector
+-------------------
+ 'aaa':1 'bbb':3
+(1 row)
+
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"numeric"');
+ jsonb_to_tsvector
+-------------------
+ '123':1 '456':3
+(1 row)
+
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"boolean"');
+ jsonb_to_tsvector
+-------------------
+ 'fals':3 'true':1
+(1 row)
+
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["string", "numeric"]');
+ jsonb_to_tsvector
+---------------------------------
+ '123':5 '456':7 'aaa':1 'bbb':3
+(1 row)
+
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"all"');
+ jsonb_to_tsvector
+----------------------------------------------------------------------------------------
+ '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16
+(1 row)
+
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"key"');
+ jsonb_to_tsvector
+--------------------------------
+ 'b':2 'c':4 'd':6 'f':8 'g':10
+(1 row)
+
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"string"');
+ jsonb_to_tsvector
+-------------------
+ 'aaa':1 'bbb':3
+(1 row)
+
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"numeric"');
+ jsonb_to_tsvector
+-------------------
+ '123':1 '456':3
+(1 row)
+
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"boolean"');
+ jsonb_to_tsvector
+-------------------
+ 'fals':3 'true':1
+(1 row)
+
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["string", "numeric"]');
+ jsonb_to_tsvector
+---------------------------------
+ '123':5 '456':7 'aaa':1 'bbb':3
+(1 row)
+
+-- to_tsvector corner cases
+select to_tsvector('""'::jsonb);
+ to_tsvector
+-------------
+
+(1 row)
+
+select to_tsvector('{}'::jsonb);
+ to_tsvector
+-------------
+
+(1 row)
+
+select to_tsvector('[]'::jsonb);
+ to_tsvector
+-------------
+
+(1 row)
+
+select to_tsvector('null'::jsonb);
+ to_tsvector
+-------------
+
+(1 row)
+
+-- jsonb_to_tsvector corner cases
+select jsonb_to_tsvector('""'::jsonb, '"all"');
+ jsonb_to_tsvector
+-------------------
+
+(1 row)
+
+select jsonb_to_tsvector('{}'::jsonb, '"all"');
+ jsonb_to_tsvector
+-------------------
+
+(1 row)
+
+select jsonb_to_tsvector('[]'::jsonb, '"all"');
+ jsonb_to_tsvector
+-------------------
+
+(1 row)
+
+select jsonb_to_tsvector('null'::jsonb, '"all"');
+ jsonb_to_tsvector
+-------------------
+
+(1 row)
+
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '""');
+ERROR: wrong flag in flag array: ""
+HINT: Possible values are: "string", "numeric", "boolean", "key", and "all".
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '{}');
+ERROR: wrong flag type, only arrays and scalars are allowed
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '[]');
+ jsonb_to_tsvector
+-------------------
+
+(1 row)
+
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, 'null');
+ERROR: flag array element is not a string
+HINT: Possible values are: "string", "numeric", "boolean", "key", and "all".
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["all", null]');
+ERROR: flag array element is not a string
+HINT: Possible values are: "string", "numeric", "boolean", "key", and "all".
+-- ts_headline for jsonb
+select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh'));
+ ts_headline
+------------------------------------------------------------------------------------------------------------------
+ {"a": "aaa <b>bbb</b>", "b": {"c": "ccc <b>ddd</b> fff", "c1": "ccc1 ddd1"}, "d": ["ggg <b>hhh</b>", "iii jjj"]}
+(1 row)
+
+select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh'));
+ ts_headline
+-----------------------------------------------------------------------------------------------
+ {"a": "aaa <b>bbb</b>", "b": {"c": "ccc <b>ddd</b> fff"}, "d": ["ggg <b>hhh</b>", "iii jjj"]}
+(1 row)
+
+select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >');
+ ts_headline
+---------------------------------------------------------------------------------------------------
+ {"a": "aaa <bbb>", "b": {"c": "ccc <ddd> fff", "c1": "ccc1 ddd1"}, "d": ["ggg <hhh>", "iii jjj"]}
+(1 row)
+
+select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >');
+ ts_headline
+---------------------------------------------------------------------------------------------------
+ {"a": "aaa <bbb>", "b": {"c": "ccc <ddd> fff", "c1": "ccc1 ddd1"}, "d": ["ggg <hhh>", "iii jjj"]}
+(1 row)
+
+-- corner cases for ts_headline with jsonb
+select ts_headline('null'::jsonb, tsquery('aaa & bbb'));
+ ts_headline
+-------------
+ null
+(1 row)
+
+select ts_headline('{}'::jsonb, tsquery('aaa & bbb'));
+ ts_headline
+-------------
+ {}
+(1 row)
+
+select ts_headline('[]'::jsonb, tsquery('aaa & bbb'));
+ ts_headline
+-------------
+ []
+(1 row)
+
+-- casts
+select 'true'::jsonb::bool;
+ bool
+------
+ t
+(1 row)
+
+select '[]'::jsonb::bool;
+ERROR: cannot cast jsonb array to type boolean
+select '1.0'::jsonb::float;
+ float8
+--------
+ 1
+(1 row)
+
+select '[1.0]'::jsonb::float;
+ERROR: cannot cast jsonb array to type double precision
+select '12345'::jsonb::int4;
+ int4
+-------
+ 12345
+(1 row)
+
+select '"hello"'::jsonb::int4;
+ERROR: cannot cast jsonb string to type integer
+select '12345'::jsonb::numeric;
+ numeric
+---------
+ 12345
+(1 row)
+
+select '{}'::jsonb::numeric;
+ERROR: cannot cast jsonb object to type numeric
+select '12345.05'::jsonb::numeric;
+ numeric
+----------
+ 12345.05
+(1 row)
+
+select '12345.05'::jsonb::float4;
+ float4
+----------
+ 12345.05
+(1 row)
+
+select '12345.05'::jsonb::float8;
+ float8
+----------
+ 12345.05
+(1 row)
+
+select '12345.05'::jsonb::int2;
+ int2
+-------
+ 12345
+(1 row)
+
+select '12345.05'::jsonb::int4;
+ int4
+-------
+ 12345
+(1 row)
+
+select '12345.05'::jsonb::int8;
+ int8
+-------
+ 12345
+(1 row)
+
+select '12345.0000000000000000000000000000000000000000000005'::jsonb::numeric;
+ numeric
+------------------------------------------------------
+ 12345.0000000000000000000000000000000000000000000005
+(1 row)
+
+select '12345.0000000000000000000000000000000000000000000005'::jsonb::float4;
+ float4
+--------
+ 12345
+(1 row)
+
+select '12345.0000000000000000000000000000000000000000000005'::jsonb::float8;
+ float8
+--------
+ 12345
+(1 row)
+
+select '12345.0000000000000000000000000000000000000000000005'::jsonb::int2;
+ int2
+-------
+ 12345
+(1 row)
+
+select '12345.0000000000000000000000000000000000000000000005'::jsonb::int4;
+ int4
+-------
+ 12345
+(1 row)
+
+select '12345.0000000000000000000000000000000000000000000005'::jsonb::int8;
+ int8
+-------
+ 12345
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/jsonb.sql b/ydb/library/yql/tests/postgresql/original/cases/jsonb.sql
new file mode 100644
index 0000000000..5016f29c15
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/jsonb.sql
@@ -0,0 +1,1484 @@
+-- Strings.
+SELECT '""'::jsonb; -- OK.
+SELECT $$''$$::jsonb; -- ERROR, single quotes are not allowed
+SELECT '"abc"'::jsonb; -- OK
+SELECT '"abc'::jsonb; -- ERROR, quotes not closed
+SELECT '"abc
+def"'::jsonb; -- ERROR, unescaped newline in string constant
+SELECT '"\n\"\\"'::jsonb; -- OK, legal escapes
+SELECT '"\v"'::jsonb; -- ERROR, not a valid JSON escape
+-- see json_encoding test for input with unicode escapes
+
+-- Numbers.
+SELECT '1'::jsonb; -- OK
+SELECT '0'::jsonb; -- OK
+SELECT '01'::jsonb; -- ERROR, not valid according to JSON spec
+SELECT '0.1'::jsonb; -- OK
+SELECT '9223372036854775808'::jsonb; -- OK, even though it's too large for int8
+SELECT '1e100'::jsonb; -- OK
+SELECT '1.3e100'::jsonb; -- OK
+SELECT '1f2'::jsonb; -- ERROR
+SELECT '0.x1'::jsonb; -- ERROR
+SELECT '1.3ex100'::jsonb; -- ERROR
+
+-- Arrays.
+SELECT '[]'::jsonb; -- OK
+SELECT '[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]'::jsonb; -- OK
+SELECT '[1,2]'::jsonb; -- OK
+SELECT '[1,2,]'::jsonb; -- ERROR, trailing comma
+SELECT '[1,2'::jsonb; -- ERROR, no closing bracket
+SELECT '[1,[2]'::jsonb; -- ERROR, no closing bracket
+
+-- Objects.
+SELECT '{}'::jsonb; -- OK
+SELECT '{"abc"}'::jsonb; -- ERROR, no value
+SELECT '{"abc":1}'::jsonb; -- OK
+SELECT '{1:"abc"}'::jsonb; -- ERROR, keys must be strings
+SELECT '{"abc",1}'::jsonb; -- ERROR, wrong separator
+SELECT '{"abc"=1}'::jsonb; -- ERROR, totally wrong separator
+SELECT '{"abc"::1}'::jsonb; -- ERROR, another wrong separator
+SELECT '{"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}}'::jsonb; -- OK
+SELECT '{"abc":1:2}'::jsonb; -- ERROR, colon in wrong spot
+SELECT '{"abc":1,3}'::jsonb; -- ERROR, no value
+
+-- Recursion.
+SET max_stack_depth = '100kB';
+SELECT repeat('[', 10000)::jsonb;
+SELECT repeat('{"a":', 10000)::jsonb;
+RESET max_stack_depth;
+
+-- Miscellaneous stuff.
+SELECT 'true'::jsonb; -- OK
+SELECT 'false'::jsonb; -- OK
+SELECT 'null'::jsonb; -- OK
+SELECT ' true '::jsonb; -- OK, even with extra whitespace
+SELECT 'true false'::jsonb; -- ERROR, too many values
+SELECT 'true, false'::jsonb; -- ERROR, too many values
+SELECT 'truf'::jsonb; -- ERROR, not a keyword
+SELECT 'trues'::jsonb; -- ERROR, not a keyword
+SELECT ''::jsonb; -- ERROR, no value
+SELECT ' '::jsonb; -- ERROR, no value
+
+-- Multi-line JSON input to check ERROR reporting
+SELECT '{
+ "one": 1,
+ "two":"two",
+ "three":
+ true}'::jsonb; -- OK
+SELECT '{
+ "one": 1,
+ "two":,"two", -- ERROR extraneous comma before field "two"
+ "three":
+ true}'::jsonb;
+SELECT '{
+ "one": 1,
+ "two":"two",
+ "averyveryveryveryveryveryveryveryveryverylongfieldname":}'::jsonb;
+-- ERROR missing value for last field
+
+-- make sure jsonb is passed through json generators without being escaped
+SELECT array_to_json(ARRAY [jsonb '{"a":1}', jsonb '{"b":[2,3]}']);
+
+-- anyarray column
+
+CREATE TEMP TABLE rows AS
+SELECT x, 'txt' || x as y
+FROM generate_series(1,3) AS x;
+
+analyze rows;
+
+select attname, to_jsonb(histogram_bounds) histogram_bounds
+from pg_stats
+where tablename = 'rows' and
+ schemaname = pg_my_temp_schema()::regnamespace::text
+order by 1;
+
+-- to_jsonb, timestamps
+
+select to_jsonb(timestamp '2014-05-28 12:22:35.614298');
+
+BEGIN;
+SET LOCAL TIME ZONE 10.5;
+select to_jsonb(timestamptz '2014-05-28 12:22:35.614298-04');
+SET LOCAL TIME ZONE -8;
+select to_jsonb(timestamptz '2014-05-28 12:22:35.614298-04');
+COMMIT;
+
+select to_jsonb(date '2014-05-28');
+
+select to_jsonb(date 'Infinity');
+select to_jsonb(date '-Infinity');
+select to_jsonb(timestamp 'Infinity');
+select to_jsonb(timestamp '-Infinity');
+select to_jsonb(timestamptz 'Infinity');
+select to_jsonb(timestamptz '-Infinity');
+
+--jsonb_agg
+
+SELECT jsonb_agg(q)
+ FROM ( SELECT $$a$$ || x AS b, y AS c,
+ ARRAY[ROW(x.*,ARRAY[1,2,3]),
+ ROW(y.*,ARRAY[4,5,6])] AS z
+ FROM generate_series(1,2) x,
+ generate_series(4,5) y) q;
+
+SELECT jsonb_agg(q ORDER BY x, y)
+ FROM rows q;
+
+UPDATE rows SET x = NULL WHERE x = 1;
+
+SELECT jsonb_agg(q ORDER BY x NULLS FIRST, y)
+ FROM rows q;
+
+-- jsonb extraction functions
+CREATE TEMP TABLE test_jsonb (
+ json_type text,
+ test_json jsonb
+);
+
+INSERT INTO test_jsonb VALUES
+('scalar','"a scalar"'),
+('array','["zero", "one","two",null,"four","five", [1,2,3],{"f1":9}]'),
+('object','{"field1":"val1","field2":"val2","field3":null, "field4": 4, "field5": [1,2,3], "field6": {"f1":9}}');
+
+SELECT test_json -> 'x' FROM test_jsonb WHERE json_type = 'scalar';
+SELECT test_json -> 'x' FROM test_jsonb WHERE json_type = 'array';
+SELECT test_json -> 'x' FROM test_jsonb WHERE json_type = 'object';
+SELECT test_json -> 'field2' FROM test_jsonb WHERE json_type = 'object';
+
+SELECT test_json ->> 'field2' FROM test_jsonb WHERE json_type = 'scalar';
+SELECT test_json ->> 'field2' FROM test_jsonb WHERE json_type = 'array';
+SELECT test_json ->> 'field2' FROM test_jsonb WHERE json_type = 'object';
+
+SELECT test_json -> 2 FROM test_jsonb WHERE json_type = 'scalar';
+SELECT test_json -> 2 FROM test_jsonb WHERE json_type = 'array';
+SELECT test_json -> 9 FROM test_jsonb WHERE json_type = 'array';
+SELECT test_json -> 2 FROM test_jsonb WHERE json_type = 'object';
+
+SELECT test_json ->> 6 FROM test_jsonb WHERE json_type = 'array';
+SELECT test_json ->> 7 FROM test_jsonb WHERE json_type = 'array';
+
+SELECT test_json ->> 'field4' FROM test_jsonb WHERE json_type = 'object';
+SELECT test_json ->> 'field5' FROM test_jsonb WHERE json_type = 'object';
+SELECT test_json ->> 'field6' FROM test_jsonb WHERE json_type = 'object';
+
+SELECT test_json ->> 2 FROM test_jsonb WHERE json_type = 'scalar';
+SELECT test_json ->> 2 FROM test_jsonb WHERE json_type = 'array';
+SELECT test_json ->> 2 FROM test_jsonb WHERE json_type = 'object';
+
+SELECT jsonb_object_keys(test_json) FROM test_jsonb WHERE json_type = 'scalar';
+SELECT jsonb_object_keys(test_json) FROM test_jsonb WHERE json_type = 'array';
+SELECT jsonb_object_keys(test_json) FROM test_jsonb WHERE json_type = 'object';
+
+-- nulls
+SELECT (test_json->'field3') IS NULL AS expect_false FROM test_jsonb WHERE json_type = 'object';
+SELECT (test_json->>'field3') IS NULL AS expect_true FROM test_jsonb WHERE json_type = 'object';
+SELECT (test_json->3) IS NULL AS expect_false FROM test_jsonb WHERE json_type = 'array';
+SELECT (test_json->>3) IS NULL AS expect_true FROM test_jsonb WHERE json_type = 'array';
+
+-- corner cases
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> null::text;
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> null::int;
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> 1;
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> 'z';
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> '';
+select '[{"b": "c"}, {"b": "cc"}]'::jsonb -> 1;
+select '[{"b": "c"}, {"b": "cc"}]'::jsonb -> 3;
+select '[{"b": "c"}, {"b": "cc"}]'::jsonb -> 'z';
+select '{"a": "c", "b": null}'::jsonb -> 'b';
+select '"foo"'::jsonb -> 1;
+select '"foo"'::jsonb -> 'z';
+
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> null::text;
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> null::int;
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> 1;
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> 'z';
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> '';
+select '[{"b": "c"}, {"b": "cc"}]'::jsonb ->> 1;
+select '[{"b": "c"}, {"b": "cc"}]'::jsonb ->> 3;
+select '[{"b": "c"}, {"b": "cc"}]'::jsonb ->> 'z';
+select '{"a": "c", "b": null}'::jsonb ->> 'b';
+select '"foo"'::jsonb ->> 1;
+select '"foo"'::jsonb ->> 'z';
+
+-- equality and inequality
+SELECT '{"x":"y"}'::jsonb = '{"x":"y"}'::jsonb;
+SELECT '{"x":"y"}'::jsonb = '{"x":"z"}'::jsonb;
+
+SELECT '{"x":"y"}'::jsonb <> '{"x":"y"}'::jsonb;
+SELECT '{"x":"y"}'::jsonb <> '{"x":"z"}'::jsonb;
+
+-- containment
+SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b"}');
+SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b", "c":null}');
+SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b", "g":null}');
+SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"g":null}');
+SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"c"}');
+SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b"}');
+SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b", "c":"q"}');
+SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b"}';
+SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b", "c":null}';
+SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b", "g":null}';
+SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"g":null}';
+SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"c"}';
+SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b"}';
+SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b", "c":"q"}';
+
+SELECT '[1,2]'::jsonb @> '[1,2,2]'::jsonb;
+SELECT '[1,1,2]'::jsonb @> '[1,2,2]'::jsonb;
+SELECT '[[1,2]]'::jsonb @> '[[1,2,2]]'::jsonb;
+SELECT '[1,2,2]'::jsonb <@ '[1,2]'::jsonb;
+SELECT '[1,2,2]'::jsonb <@ '[1,1,2]'::jsonb;
+SELECT '[[1,2,2]]'::jsonb <@ '[[1,2]]'::jsonb;
+
+SELECT jsonb_contained('{"a":"b"}', '{"a":"b", "b":1, "c":null}');
+SELECT jsonb_contained('{"a":"b", "c":null}', '{"a":"b", "b":1, "c":null}');
+SELECT jsonb_contained('{"a":"b", "g":null}', '{"a":"b", "b":1, "c":null}');
+SELECT jsonb_contained('{"g":null}', '{"a":"b", "b":1, "c":null}');
+SELECT jsonb_contained('{"a":"c"}', '{"a":"b", "b":1, "c":null}');
+SELECT jsonb_contained('{"a":"b"}', '{"a":"b", "b":1, "c":null}');
+SELECT jsonb_contained('{"a":"b", "c":"q"}', '{"a":"b", "b":1, "c":null}');
+SELECT '{"a":"b"}'::jsonb <@ '{"a":"b", "b":1, "c":null}';
+SELECT '{"a":"b", "c":null}'::jsonb <@ '{"a":"b", "b":1, "c":null}';
+SELECT '{"a":"b", "g":null}'::jsonb <@ '{"a":"b", "b":1, "c":null}';
+SELECT '{"g":null}'::jsonb <@ '{"a":"b", "b":1, "c":null}';
+SELECT '{"a":"c"}'::jsonb <@ '{"a":"b", "b":1, "c":null}';
+SELECT '{"a":"b"}'::jsonb <@ '{"a":"b", "b":1, "c":null}';
+SELECT '{"a":"b", "c":"q"}'::jsonb <@ '{"a":"b", "b":1, "c":null}';
+-- Raw scalar may contain another raw scalar, array may contain a raw scalar
+SELECT '[5]'::jsonb @> '[5]';
+SELECT '5'::jsonb @> '5';
+SELECT '[5]'::jsonb @> '5';
+-- But a raw scalar cannot contain an array
+SELECT '5'::jsonb @> '[5]';
+-- In general, one thing should always contain itself. Test array containment:
+SELECT '["9", ["7", "3"], 1]'::jsonb @> '["9", ["7", "3"], 1]'::jsonb;
+SELECT '["9", ["7", "3"], ["1"]]'::jsonb @> '["9", ["7", "3"], ["1"]]'::jsonb;
+-- array containment string matching confusion bug
+SELECT '{ "name": "Bob", "tags": [ "enim", "qui"]}'::jsonb @> '{"tags":["qu"]}';
+
+-- array length
+SELECT jsonb_array_length('[1,2,3,{"f1":1,"f2":[5,6]},4]');
+SELECT jsonb_array_length('[]');
+SELECT jsonb_array_length('{"f1":1,"f2":[5,6]}');
+SELECT jsonb_array_length('4');
+
+-- each
+SELECT jsonb_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null}');
+SELECT jsonb_each('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q;
+SELECT * FROM jsonb_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q;
+SELECT * FROM jsonb_each('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q;
+
+SELECT jsonb_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":"null"}');
+SELECT jsonb_each_text('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q;
+SELECT * FROM jsonb_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q;
+SELECT * FROM jsonb_each_text('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q;
+
+-- exists
+SELECT jsonb_exists('{"a":null, "b":"qq"}', 'a');
+SELECT jsonb_exists('{"a":null, "b":"qq"}', 'b');
+SELECT jsonb_exists('{"a":null, "b":"qq"}', 'c');
+SELECT jsonb_exists('{"a":"null", "b":"qq"}', 'a');
+SELECT jsonb '{"a":null, "b":"qq"}' ? 'a';
+SELECT jsonb '{"a":null, "b":"qq"}' ? 'b';
+SELECT jsonb '{"a":null, "b":"qq"}' ? 'c';
+SELECT jsonb '{"a":"null", "b":"qq"}' ? 'a';
+-- array exists - array elements should behave as keys
+SELECT count(*) from testjsonb WHERE j->'array' ? 'bar';
+-- type sensitive array exists - should return no rows (since "exists" only
+-- matches strings that are either object keys or array elements)
+SELECT count(*) from testjsonb WHERE j->'array' ? '5'::text;
+-- However, a raw scalar is *contained* within the array
+SELECT count(*) from testjsonb WHERE j->'array' @> '5'::jsonb;
+
+SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['a','b']);
+SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['b','a']);
+SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['c','a']);
+SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['c','d']);
+SELECT jsonb_exists_any('{"a":null, "b":"qq"}', '{}'::text[]);
+SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['a','b'];
+SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['b','a'];
+SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['c','a'];
+SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['c','d'];
+SELECT jsonb '{"a":null, "b":"qq"}' ?| '{}'::text[];
+
+SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['a','b']);
+SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['b','a']);
+SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['c','a']);
+SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['c','d']);
+SELECT jsonb_exists_all('{"a":null, "b":"qq"}', '{}'::text[]);
+SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['a','b'];
+SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['b','a'];
+SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['c','a'];
+SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['c','d'];
+SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['a','a', 'b', 'b', 'b'];
+SELECT jsonb '{"a":null, "b":"qq"}' ?& '{}'::text[];
+
+-- typeof
+SELECT jsonb_typeof('{}') AS object;
+SELECT jsonb_typeof('{"c":3,"p":"o"}') AS object;
+SELECT jsonb_typeof('[]') AS array;
+SELECT jsonb_typeof('["a", 1]') AS array;
+SELECT jsonb_typeof('null') AS "null";
+SELECT jsonb_typeof('1') AS number;
+SELECT jsonb_typeof('-1') AS number;
+SELECT jsonb_typeof('1.0') AS number;
+SELECT jsonb_typeof('1e2') AS number;
+SELECT jsonb_typeof('-1.0') AS number;
+SELECT jsonb_typeof('true') AS boolean;
+SELECT jsonb_typeof('false') AS boolean;
+SELECT jsonb_typeof('"hello"') AS string;
+SELECT jsonb_typeof('"true"') AS string;
+SELECT jsonb_typeof('"1.0"') AS string;
+
+-- jsonb_build_array, jsonb_build_object, jsonb_object_agg
+
+SELECT jsonb_build_array('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}');
+SELECT jsonb_build_array('a', NULL); -- ok
+SELECT jsonb_build_array(VARIADIC NULL::text[]); -- ok
+SELECT jsonb_build_array(VARIADIC '{}'::text[]); -- ok
+SELECT jsonb_build_array(VARIADIC '{a,b,c}'::text[]); -- ok
+SELECT jsonb_build_array(VARIADIC ARRAY['a', NULL]::text[]); -- ok
+SELECT jsonb_build_array(VARIADIC '{1,2,3,4}'::text[]); -- ok
+SELECT jsonb_build_array(VARIADIC '{1,2,3,4}'::int[]); -- ok
+SELECT jsonb_build_array(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok
+
+SELECT jsonb_build_object('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}');
+
+SELECT jsonb_build_object(
+ 'a', jsonb_build_object('b',false,'c',99),
+ 'd', jsonb_build_object('e',array[9,8,7]::int[],
+ 'f', (select row_to_json(r) from ( select relkind, oid::regclass as name from pg_class where relname = 'pg_class') r)));
+SELECT jsonb_build_object('{a,b,c}'::text[]); -- error
+SELECT jsonb_build_object('{a,b,c}'::text[], '{d,e,f}'::text[]); -- error, key cannot be array
+SELECT jsonb_build_object('a', 'b', 'c'); -- error
+SELECT jsonb_build_object(NULL, 'a'); -- error, key cannot be NULL
+SELECT jsonb_build_object('a', NULL); -- ok
+SELECT jsonb_build_object(VARIADIC NULL::text[]); -- ok
+SELECT jsonb_build_object(VARIADIC '{}'::text[]); -- ok
+SELECT jsonb_build_object(VARIADIC '{a,b,c}'::text[]); -- error
+SELECT jsonb_build_object(VARIADIC ARRAY['a', NULL]::text[]); -- ok
+SELECT jsonb_build_object(VARIADIC ARRAY[NULL, 'a']::text[]); -- error, key cannot be NULL
+SELECT jsonb_build_object(VARIADIC '{1,2,3,4}'::text[]); -- ok
+SELECT jsonb_build_object(VARIADIC '{1,2,3,4}'::int[]); -- ok
+SELECT jsonb_build_object(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok
+
+-- empty objects/arrays
+SELECT jsonb_build_array();
+
+SELECT jsonb_build_object();
+
+-- make sure keys are quoted
+SELECT jsonb_build_object(1,2);
+
+-- keys must be scalar and not null
+SELECT jsonb_build_object(null,2);
+
+SELECT jsonb_build_object(r,2) FROM (SELECT 1 AS a, 2 AS b) r;
+
+SELECT jsonb_build_object(json '{"a":1,"b":2}', 3);
+
+SELECT jsonb_build_object('{1,2,3}'::int[], 3);
+
+-- handling of NULL values
+SELECT jsonb_object_agg(1, NULL::jsonb);
+SELECT jsonb_object_agg(NULL, '{"a":1}');
+
+CREATE TEMP TABLE foo (serial_num int, name text, type text);
+INSERT INTO foo VALUES (847001,'t15','GE1043');
+INSERT INTO foo VALUES (847002,'t16','GE1043');
+INSERT INTO foo VALUES (847003,'sub-alpha','GESS90');
+
+SELECT jsonb_build_object('turbines',jsonb_object_agg(serial_num,jsonb_build_object('name',name,'type',type)))
+FROM foo;
+
+SELECT jsonb_object_agg(name, type) FROM foo;
+
+INSERT INTO foo VALUES (999999, NULL, 'bar');
+SELECT jsonb_object_agg(name, type) FROM foo;
+
+-- jsonb_object
+
+-- empty object, one dimension
+SELECT jsonb_object('{}');
+
+-- empty object, two dimensions
+SELECT jsonb_object('{}', '{}');
+
+-- one dimension
+SELECT jsonb_object('{a,1,b,2,3,NULL,"d e f","a b c"}');
+
+-- same but with two dimensions
+SELECT jsonb_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}');
+
+-- odd number error
+SELECT jsonb_object('{a,b,c}');
+
+-- one column error
+SELECT jsonb_object('{{a},{b}}');
+
+-- too many columns error
+SELECT jsonb_object('{{a,b,c},{b,c,d}}');
+
+-- too many dimensions error
+SELECT jsonb_object('{{{a,b},{c,d}},{{b,c},{d,e}}}');
+
+--two argument form of jsonb_object
+
+select jsonb_object('{a,b,c,"d e f"}','{1,2,3,"a b c"}');
+
+-- too many dimensions
+SELECT jsonb_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}', '{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}');
+
+-- mismatched dimensions
+
+select jsonb_object('{a,b,c,"d e f",g}','{1,2,3,"a b c"}');
+
+select jsonb_object('{a,b,c,"d e f"}','{1,2,3,"a b c",g}');
+
+-- null key error
+
+select jsonb_object('{a,b,NULL,"d e f"}','{1,2,3,"a b c"}');
+
+-- empty key is allowed
+
+select jsonb_object('{a,b,"","d e f"}','{1,2,3,"a b c"}');
+
+
+
+-- extract_path, extract_path_as_text
+SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6');
+SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2');
+SELECT jsonb_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text);
+SELECT jsonb_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text);
+SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6');
+SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2');
+SELECT jsonb_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text);
+SELECT jsonb_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text);
+
+-- extract_path nulls
+SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') IS NULL AS expect_false;
+SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') IS NULL AS expect_true;
+SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') IS NULL AS expect_false;
+SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') IS NULL AS expect_true;
+
+-- extract_path operators
+SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f4','f6'];
+SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f2'];
+SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f2','0'];
+SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f2','1'];
+
+SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f4','f6'];
+SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f2'];
+SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f2','0'];
+SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f2','1'];
+
+-- corner cases for same
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #> '{}';
+select '[1,2,3]'::jsonb #> '{}';
+select '"foo"'::jsonb #> '{}';
+select '42'::jsonb #> '{}';
+select 'null'::jsonb #> '{}';
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a'];
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a', null];
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a', ''];
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','b'];
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','b','c'];
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','b','c','d'];
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','z','c'];
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #> array['a','1','b'];
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #> array['a','z','b'];
+select '[{"b": "c"}, {"b": "cc"}]'::jsonb #> array['1','b'];
+select '[{"b": "c"}, {"b": "cc"}]'::jsonb #> array['z','b'];
+select '[{"b": "c"}, {"b": null}]'::jsonb #> array['1','b'];
+select '"foo"'::jsonb #> array['z'];
+select '42'::jsonb #> array['f2'];
+select '42'::jsonb #> array['0'];
+
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> '{}';
+select '[1,2,3]'::jsonb #>> '{}';
+select '"foo"'::jsonb #>> '{}';
+select '42'::jsonb #>> '{}';
+select 'null'::jsonb #>> '{}';
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a'];
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a', null];
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a', ''];
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','b'];
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','b','c'];
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','b','c','d'];
+select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','z','c'];
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #>> array['a','1','b'];
+select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #>> array['a','z','b'];
+select '[{"b": "c"}, {"b": "cc"}]'::jsonb #>> array['1','b'];
+select '[{"b": "c"}, {"b": "cc"}]'::jsonb #>> array['z','b'];
+select '[{"b": "c"}, {"b": null}]'::jsonb #>> array['1','b'];
+select '"foo"'::jsonb #>> array['z'];
+select '42'::jsonb #>> array['f2'];
+select '42'::jsonb #>> array['0'];
+
+-- array_elements
+SELECT jsonb_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false]');
+SELECT * FROM jsonb_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false]') q;
+SELECT jsonb_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]');
+SELECT * FROM jsonb_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]') q;
+
+-- populate_record
+CREATE TYPE jbpop AS (a text, b int, c timestamp);
+
+CREATE DOMAIN jsb_int_not_null AS int NOT NULL;
+CREATE DOMAIN jsb_int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 3);
+CREATE DOMAIN jsb_int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 3);
+
+create type jb_unordered_pair as (x int, y int);
+create domain jb_ordered_pair as jb_unordered_pair check((value).x <= (value).y);
+
+CREATE TYPE jsbrec AS (
+ i int,
+ ia _int4,
+ ia1 int[],
+ ia2 int[][],
+ ia3 int[][][],
+ ia1d jsb_int_array_1d,
+ ia2d jsb_int_array_2d,
+ t text,
+ ta text[],
+ c char(10),
+ ca char(10)[],
+ ts timestamp,
+ js json,
+ jsb jsonb,
+ jsa json[],
+ rec jbpop,
+ reca jbpop[]
+);
+
+CREATE TYPE jsbrec_i_not_null AS (
+ i jsb_int_not_null
+);
+
+SELECT * FROM jsonb_populate_record(NULL::jbpop,'{"a":"blurfl","x":43.2}') q;
+SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"a":"blurfl","x":43.2}') q;
+
+SELECT * FROM jsonb_populate_record(NULL::jbpop,'{"a":"blurfl","x":43.2}') q;
+SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"a":"blurfl","x":43.2}') q;
+
+SELECT * FROM jsonb_populate_record(NULL::jbpop,'{"a":[100,200,false],"x":43.2}') q;
+SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"a":[100,200,false],"x":43.2}') q;
+SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"c":[100,200,false],"x":43.2}') q;
+
+SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop, '{}') q;
+
+SELECT i FROM jsonb_populate_record(NULL::jsbrec_i_not_null, '{"x": 43.2}') q;
+SELECT i FROM jsonb_populate_record(NULL::jsbrec_i_not_null, '{"i": null}') q;
+SELECT i FROM jsonb_populate_record(NULL::jsbrec_i_not_null, '{"i": 12345}') q;
+
+SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": null}') q;
+SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": 123}') q;
+SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [1, "2", null, 4]}') q;
+SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [[1, 2], [3, 4]]}') q;
+SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [[1], 2]}') q;
+SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [[1], [2, 3]]}') q;
+SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": "{1,2,3}"}') q;
+
+SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": null}') q;
+SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": 123}') q;
+SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": [1, "2", null, 4]}') q;
+SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": [[1, 2, 3]]}') q;
+
+SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": null}') q;
+SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": 123}') q;
+SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": [1, "2", null, 4]}') q;
+SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": [1, "2", null]}') q;
+
+SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [1, "2", null, 4]}') q;
+SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[1, 2], [null, 4]]}') q;
+SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[], []]}') q;
+SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[1, 2], [3]]}') q;
+SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[1, 2], 3, 4]}') q;
+
+SELECT ia2d FROM jsonb_populate_record(NULL::jsbrec, '{"ia2d": [[1, "2"], [null, 4]]}') q;
+SELECT ia2d FROM jsonb_populate_record(NULL::jsbrec, '{"ia2d": [[1, "2", 3], [null, 5, 6]]}') q;
+
+SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [1, "2", null, 4]}') q;
+SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [[1, 2], [null, 4]]}') q;
+SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[], []], [[], []], [[], []] ]}') q;
+SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[1, 2]], [[3, 4]] ]}') q;
+SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8]] ]}') q;
+SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8], [9, 10]] ]}') q;
+
+SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": null}') q;
+SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": 123}') q;
+SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": [1, "2", null, 4]}') q;
+SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": [[1, 2, 3], {"k": "v"}]}') q;
+
+SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": null}') q;
+SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": "aaa"}') q;
+SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": "aaaaaaaaaa"}') q;
+SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": "aaaaaaaaaaaaa"}') q;
+
+SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": null}') q;
+SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": 123}') q;
+SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": [1, "2", null, 4]}') q;
+SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": ["aaaaaaaaaaaaaaaa"]}') q;
+SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": [[1, 2, 3], {"k": "v"}]}') q;
+
+SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": null}') q;
+SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": true}') q;
+SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": 123.45}') q;
+SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": "123.45"}') q;
+SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": "abc"}') q;
+SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": [123, "123", null, {"key": "value"}]}') q;
+SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": {"a": "bbb", "b": null, "c": 123.45}}') q;
+
+SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": null}') q;
+SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": true}') q;
+SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": 123.45}') q;
+SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": "123.45"}') q;
+SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": "abc"}') q;
+SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": [123, "123", null, {"key": "value"}]}') q;
+SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": {"a": "bbb", "b": null, "c": 123.45}}') q;
+
+SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": null}') q;
+SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": 123}') q;
+SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": [1, "2", null, 4]}') q;
+SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": ["aaa", null, [1, 2, "3", {}], { "k" : "v" }]}') q;
+
+SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": 123}') q;
+SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": [1, 2]}') q;
+SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}') q;
+SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": "(abc,42,01.02.2003)"}') q;
+
+SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": 123}') q;
+SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": [1, 2]}') q;
+SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}]}') q;
+SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": ["(abc,42,01.02.2003)"]}') q;
+SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": "{\"(abc,42,01.02.2003)\"}"}') q;
+
+SELECT rec FROM jsonb_populate_record(
+ row(NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
+ row('x',3,'2012-12-31 15:30:56')::jbpop,NULL)::jsbrec,
+ '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}'
+) q;
+
+-- anonymous record type
+SELECT jsonb_populate_record(null::record, '{"x": 0, "y": 1}');
+SELECT jsonb_populate_record(row(1,2), '{"f1": 0, "f2": 1}');
+SELECT * FROM
+ jsonb_populate_record(null::record, '{"x": 776}') AS (x int, y int);
+
+-- composite domain
+SELECT jsonb_populate_record(null::jb_ordered_pair, '{"x": 0, "y": 1}');
+SELECT jsonb_populate_record(row(1,2)::jb_ordered_pair, '{"x": 0}');
+SELECT jsonb_populate_record(row(1,2)::jb_ordered_pair, '{"x": 1, "y": 0}');
+
+-- populate_recordset
+SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q;
+SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"c":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q;
+
+SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q;
+SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q;
+
+-- anonymous record type
+SELECT jsonb_populate_recordset(null::record, '[{"x": 0, "y": 1}]');
+SELECT jsonb_populate_recordset(row(1,2), '[{"f1": 0, "f2": 1}]');
+SELECT i, jsonb_populate_recordset(row(i,50), '[{"f1":"42"},{"f2":"43"}]')
+FROM (VALUES (1),(2)) v(i);
+SELECT * FROM
+ jsonb_populate_recordset(null::record, '[{"x": 776}]') AS (x int, y int);
+
+-- empty array is a corner case
+SELECT jsonb_populate_recordset(null::record, '[]');
+SELECT jsonb_populate_recordset(row(1,2), '[]');
+SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[]') q;
+SELECT * FROM
+ jsonb_populate_recordset(null::record, '[]') AS (x int, y int);
+
+-- composite domain
+SELECT jsonb_populate_recordset(null::jb_ordered_pair, '[{"x": 0, "y": 1}]');
+SELECT jsonb_populate_recordset(row(1,2)::jb_ordered_pair, '[{"x": 0}, {"y": 3}]');
+SELECT jsonb_populate_recordset(row(1,2)::jb_ordered_pair, '[{"x": 1, "y": 0}]');
+
+-- negative cases where the wrong record type is supplied
+select * from jsonb_populate_recordset(row(0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text);
+select * from jsonb_populate_recordset(row(0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text);
+select * from jsonb_populate_recordset(row(0::int,0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text);
+select * from jsonb_populate_recordset(row(1000000000::int,50::int),'[{"b":"2"},{"a":"3"}]') q (a text, b text);
+
+-- jsonb_to_record and jsonb_to_recordset
+
+select * from jsonb_to_record('{"a":1,"b":"foo","c":"bar"}')
+ as x(a int, b text, d text);
+
+select * from jsonb_to_recordset('[{"a":1,"b":"foo","d":false},{"a":2,"b":"bar","c":true}]')
+ as x(a int, b text, c boolean);
+
+select *, c is null as c_is_null
+from jsonb_to_record('{"a":1, "b":{"c":16, "d":2}, "x":8, "ca": ["1 2", 3], "ia": [[1,2],[3,4]], "r": {"a": "aaa", "b": 123}}'::jsonb)
+ as t(a int, b jsonb, c text, x int, ca char(5)[], ia int[][], r jbpop);
+
+select *, c is null as c_is_null
+from jsonb_to_recordset('[{"a":1, "b":{"c":16, "d":2}, "x":8}]'::jsonb)
+ as t(a int, b jsonb, c text, x int);
+
+select * from jsonb_to_record('{"ia": null}') as x(ia _int4);
+select * from jsonb_to_record('{"ia": 123}') as x(ia _int4);
+select * from jsonb_to_record('{"ia": [1, "2", null, 4]}') as x(ia _int4);
+select * from jsonb_to_record('{"ia": [[1, 2], [3, 4]]}') as x(ia _int4);
+select * from jsonb_to_record('{"ia": [[1], 2]}') as x(ia _int4);
+select * from jsonb_to_record('{"ia": [[1], [2, 3]]}') as x(ia _int4);
+
+select * from jsonb_to_record('{"ia2": [1, 2, 3]}') as x(ia2 int[][]);
+select * from jsonb_to_record('{"ia2": [[1, 2], [3, 4]]}') as x(ia2 int4[][]);
+select * from jsonb_to_record('{"ia2": [[[1], [2], [3]]]}') as x(ia2 int4[][]);
+
+select * from jsonb_to_record('{"out": {"key": 1}}') as x(out json);
+select * from jsonb_to_record('{"out": [{"key": 1}]}') as x(out json);
+select * from jsonb_to_record('{"out": "{\"key\": 1}"}') as x(out json);
+select * from jsonb_to_record('{"out": {"key": 1}}') as x(out jsonb);
+select * from jsonb_to_record('{"out": [{"key": 1}]}') as x(out jsonb);
+select * from jsonb_to_record('{"out": "{\"key\": 1}"}') as x(out jsonb);
+
+-- test type info caching in jsonb_populate_record()
+CREATE TEMP TABLE jsbpoptest (js jsonb);
+
+INSERT INTO jsbpoptest
+SELECT '{
+ "jsa": [1, "2", null, 4],
+ "rec": {"a": "abc", "c": "01.02.2003", "x": 43.2},
+ "reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}]
+}'::jsonb
+FROM generate_series(1, 3);
+
+SELECT (jsonb_populate_record(NULL::jsbrec, js)).* FROM jsbpoptest;
+
+DROP TYPE jsbrec;
+DROP TYPE jsbrec_i_not_null;
+DROP DOMAIN jsb_int_not_null;
+DROP DOMAIN jsb_int_array_1d;
+DROP DOMAIN jsb_int_array_2d;
+DROP DOMAIN jb_ordered_pair;
+DROP TYPE jb_unordered_pair;
+
+-- indexing
+SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}';
+SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC"}';
+SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC", "public":true}';
+SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}';
+SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}';
+SELECT count(*) FROM testjsonb WHERE j ? 'public';
+SELECT count(*) FROM testjsonb WHERE j ? 'bar';
+SELECT count(*) FROM testjsonb WHERE j ?| ARRAY['public','disabled'];
+SELECT count(*) FROM testjsonb WHERE j ?& ARRAY['public','disabled'];
+SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null';
+SELECT count(*) FROM testjsonb WHERE j @@ '"CC" == $.wait';
+SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == "CC" && true == $.public';
+SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25';
+SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25.0';
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($)';
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public)';
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.bar)';
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) || exists($.disabled)';
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) && exists($.disabled)';
+SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)';
+SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? ("CC" == @)';
+SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.wait == "CC" && true == @.public)';
+SELECT count(*) FROM testjsonb WHERE j @? '$.age ? (@ == 25)';
+SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.age == 25.0)';
+SELECT count(*) FROM testjsonb WHERE j @? '$';
+SELECT count(*) FROM testjsonb WHERE j @? '$.public';
+SELECT count(*) FROM testjsonb WHERE j @? '$.bar';
+
+CREATE INDEX jidx ON testjsonb USING gin (j);
+SET enable_seqscan = off;
+
+SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}';
+SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC"}';
+SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC", "public":true}';
+SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}';
+SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}';
+SELECT count(*) FROM testjsonb WHERE j @> '{"array":["foo"]}';
+SELECT count(*) FROM testjsonb WHERE j @> '{"array":["bar"]}';
+-- exercise GIN_SEARCH_MODE_ALL
+SELECT count(*) FROM testjsonb WHERE j @> '{}';
+SELECT count(*) FROM testjsonb WHERE j ? 'public';
+SELECT count(*) FROM testjsonb WHERE j ? 'bar';
+SELECT count(*) FROM testjsonb WHERE j ?| ARRAY['public','disabled'];
+SELECT count(*) FROM testjsonb WHERE j ?& ARRAY['public','disabled'];
+
+EXPLAIN (COSTS OFF)
+SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null';
+SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null';
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.wait == null))';
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.wait ? (@ == null))';
+SELECT count(*) FROM testjsonb WHERE j @@ '"CC" == $.wait';
+SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == "CC" && true == $.public';
+SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25';
+SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25.0';
+SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "foo"';
+SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "bar"';
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.array[*] == "bar"))';
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array ? (@[*] == "bar"))';
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array[*] ? (@ == "bar"))';
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($)';
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public)';
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.bar)';
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) || exists($.disabled)';
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) && exists($.disabled)';
+EXPLAIN (COSTS OFF)
+SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)';
+SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)';
+SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? ("CC" == @)';
+SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.wait == "CC" && true == @.public)';
+SELECT count(*) FROM testjsonb WHERE j @? '$.age ? (@ == 25)';
+SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.age == 25.0)';
+SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.array[*] == "bar")';
+SELECT count(*) FROM testjsonb WHERE j @? '$.array ? (@[*] == "bar")';
+SELECT count(*) FROM testjsonb WHERE j @? '$.array[*] ? (@ == "bar")';
+SELECT count(*) FROM testjsonb WHERE j @? '$';
+SELECT count(*) FROM testjsonb WHERE j @? '$.public';
+SELECT count(*) FROM testjsonb WHERE j @? '$.bar';
+
+-- array exists - array elements should behave as keys (for GIN index scans too)
+CREATE INDEX jidx_array ON testjsonb USING gin((j->'array'));
+SELECT count(*) from testjsonb WHERE j->'array' ? 'bar';
+-- type sensitive array exists - should return no rows (since "exists" only
+-- matches strings that are either object keys or array elements)
+SELECT count(*) from testjsonb WHERE j->'array' ? '5'::text;
+-- However, a raw scalar is *contained* within the array
+SELECT count(*) from testjsonb WHERE j->'array' @> '5'::jsonb;
+
+RESET enable_seqscan;
+
+SELECT count(*) FROM (SELECT (jsonb_each(j)).key FROM testjsonb) AS wow;
+SELECT key, count(*) FROM (SELECT (jsonb_each(j)).key FROM testjsonb) AS wow GROUP BY key ORDER BY count DESC, key;
+
+-- sort/hash
+SELECT count(distinct j) FROM testjsonb;
+SET enable_hashagg = off;
+SELECT count(*) FROM (SELECT j FROM (SELECT * FROM testjsonb UNION ALL SELECT * FROM testjsonb) js GROUP BY j) js2;
+SET enable_hashagg = on;
+SET enable_sort = off;
+SELECT count(*) FROM (SELECT j FROM (SELECT * FROM testjsonb UNION ALL SELECT * FROM testjsonb) js GROUP BY j) js2;
+SELECT distinct * FROM (values (jsonb '{}' || ''::text),('{}')) v(j);
+SET enable_sort = on;
+
+RESET enable_hashagg;
+RESET enable_sort;
+
+DROP INDEX jidx;
+DROP INDEX jidx_array;
+-- btree
+CREATE INDEX jidx ON testjsonb USING btree (j);
+SET enable_seqscan = off;
+
+SELECT count(*) FROM testjsonb WHERE j > '{"p":1}';
+SELECT count(*) FROM testjsonb WHERE j = '{"pos":98, "line":371, "node":"CBA", "indexed":true}';
+
+--gin path opclass
+DROP INDEX jidx;
+CREATE INDEX jidx ON testjsonb USING gin (j jsonb_path_ops);
+SET enable_seqscan = off;
+
+SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}';
+SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC"}';
+SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC", "public":true}';
+SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}';
+SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}';
+-- exercise GIN_SEARCH_MODE_ALL
+SELECT count(*) FROM testjsonb WHERE j @> '{}';
+
+SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null';
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.wait == null))';
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.wait ? (@ == null))';
+SELECT count(*) FROM testjsonb WHERE j @@ '"CC" == $.wait';
+SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == "CC" && true == $.public';
+SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25';
+SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25.0';
+SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "foo"';
+SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "bar"';
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.array[*] == "bar"))';
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array ? (@[*] == "bar"))';
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array[*] ? (@ == "bar"))';
+SELECT count(*) FROM testjsonb WHERE j @@ 'exists($)';
+
+EXPLAIN (COSTS OFF)
+SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)';
+SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)';
+SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? ("CC" == @)';
+SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.wait == "CC" && true == @.public)';
+SELECT count(*) FROM testjsonb WHERE j @? '$.age ? (@ == 25)';
+SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.age == 25.0)';
+SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.array[*] == "bar")';
+SELECT count(*) FROM testjsonb WHERE j @? '$.array ? (@[*] == "bar")';
+SELECT count(*) FROM testjsonb WHERE j @? '$.array[*] ? (@ == "bar")';
+SELECT count(*) FROM testjsonb WHERE j @? '$';
+SELECT count(*) FROM testjsonb WHERE j @? '$.public';
+SELECT count(*) FROM testjsonb WHERE j @? '$.bar';
+
+RESET enable_seqscan;
+DROP INDEX jidx;
+
+-- nested tests
+SELECT '{"ff":{"a":12,"b":16}}'::jsonb;
+SELECT '{"ff":{"a":12,"b":16},"qq":123}'::jsonb;
+SELECT '{"aa":["a","aaa"],"qq":{"a":12,"b":16,"c":["c1","c2"],"d":{"d1":"d1","d2":"d2","d1":"d3"}}}'::jsonb;
+SELECT '{"aa":["a","aaa"],"qq":{"a":"12","b":"16","c":["c1","c2"],"d":{"d1":"d1","d2":"d2"}}}'::jsonb;
+SELECT '{"aa":["a","aaa"],"qq":{"a":"12","b":"16","c":["c1","c2",["c3"],{"c4":4}],"d":{"d1":"d1","d2":"d2"}}}'::jsonb;
+SELECT '{"ff":["a","aaa"]}'::jsonb;
+
+SELECT
+ '{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'ff',
+ '{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'qq',
+ ('{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'Y') IS NULL AS f,
+ ('{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb ->> 'Y') IS NULL AS t,
+ '{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'x';
+
+-- nested containment
+SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[1,2]}';
+SELECT '{"a":[2,1],"c":"b"}'::jsonb @> '{"a":[1,2]}';
+SELECT '{"a":{"1":2},"c":"b"}'::jsonb @> '{"a":[1,2]}';
+SELECT '{"a":{"2":1},"c":"b"}'::jsonb @> '{"a":[1,2]}';
+SELECT '{"a":{"1":2},"c":"b"}'::jsonb @> '{"a":{"1":2}}';
+SELECT '{"a":{"2":1},"c":"b"}'::jsonb @> '{"a":{"1":2}}';
+SELECT '["a","b"]'::jsonb @> '["a","b","c","b"]';
+SELECT '["a","b","c","b"]'::jsonb @> '["a","b"]';
+SELECT '["a","b","c",[1,2]]'::jsonb @> '["a",[1,2]]';
+SELECT '["a","b","c",[1,2]]'::jsonb @> '["b",[1,2]]';
+
+SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[1]}';
+SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[2]}';
+SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[3]}';
+
+SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"c":3}]}';
+SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"x":4}]}';
+SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"x":4},3]}';
+SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"x":4},1]}';
+
+-- check some corner cases for indexed nested containment (bug #13756)
+create temp table nestjsonb (j jsonb);
+insert into nestjsonb (j) values ('{"a":[["b",{"x":1}],["b",{"x":2}]],"c":3}');
+insert into nestjsonb (j) values ('[[14,2,3]]');
+insert into nestjsonb (j) values ('[1,[14,2,3]]');
+create index on nestjsonb using gin(j jsonb_path_ops);
+
+set enable_seqscan = on;
+set enable_bitmapscan = off;
+select * from nestjsonb where j @> '{"a":[[{"x":2}]]}'::jsonb;
+select * from nestjsonb where j @> '{"c":3}';
+select * from nestjsonb where j @> '[[14]]';
+set enable_seqscan = off;
+set enable_bitmapscan = on;
+select * from nestjsonb where j @> '{"a":[[{"x":2}]]}'::jsonb;
+select * from nestjsonb where j @> '{"c":3}';
+select * from nestjsonb where j @> '[[14]]';
+reset enable_seqscan;
+reset enable_bitmapscan;
+
+-- nested object field / array index lookup
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'n';
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'a';
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'b';
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'c';
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'd';
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'd' -> '1';
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'e';
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 0; --expecting error
+
+SELECT '["a","b","c",[1,2],null]'::jsonb -> 0;
+SELECT '["a","b","c",[1,2],null]'::jsonb -> 1;
+SELECT '["a","b","c",[1,2],null]'::jsonb -> 2;
+SELECT '["a","b","c",[1,2],null]'::jsonb -> 3;
+SELECT '["a","b","c",[1,2],null]'::jsonb -> 3 -> 1;
+SELECT '["a","b","c",[1,2],null]'::jsonb -> 4;
+SELECT '["a","b","c",[1,2],null]'::jsonb -> 5;
+SELECT '["a","b","c",[1,2],null]'::jsonb -> -1;
+SELECT '["a","b","c",[1,2],null]'::jsonb -> -5;
+SELECT '["a","b","c",[1,2],null]'::jsonb -> -6;
+
+--nested path extraction
+SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{0}';
+SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{a}';
+SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c}';
+SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,0}';
+SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,1}';
+SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,2}';
+SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,3}';
+SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-1}';
+SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-3}';
+SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-4}';
+
+SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{0}';
+SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{3}';
+SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{4}';
+SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{4,5}';
+
+--nested exists
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'n';
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'a';
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'b';
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'c';
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'd';
+SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'e';
+
+-- jsonb_strip_nulls
+
+select jsonb_strip_nulls(null);
+
+select jsonb_strip_nulls('1');
+
+select jsonb_strip_nulls('"a string"');
+
+select jsonb_strip_nulls('null');
+
+select jsonb_strip_nulls('[1,2,null,3,4]');
+
+select jsonb_strip_nulls('{"a":1,"b":null,"c":[2,null,3],"d":{"e":4,"f":null}}');
+
+select jsonb_strip_nulls('[1,{"a":1,"b":null,"c":2},3]');
+
+-- an empty object is not null and should not be stripped
+select jsonb_strip_nulls('{"a": {"b": null, "c": null}, "d": {} }');
+
+
+select jsonb_pretty('{"a": "test", "b": [1, 2, 3], "c": "test3", "d":{"dd": "test4", "dd2":{"ddd": "test5"}}}');
+select jsonb_pretty('[{"f1":1,"f2":null},2,null,[[{"x":true},6,7],8],3]');
+select jsonb_pretty('{"a":["b", "c"], "d": {"e":"f"}}');
+
+select jsonb_concat('{"d": "test", "a": [1, 2]}', '{"g": "test2", "c": {"c1":1, "c2":2}}');
+
+select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{"cq":"l", "b":"g", "fg":false}';
+select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{"aq":"l"}';
+select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{"aa":"l"}';
+select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{}';
+
+select '["a", "b"]'::jsonb || '["c"]';
+select '["a", "b"]'::jsonb || '["c", "d"]';
+select '["c"]' || '["a", "b"]'::jsonb;
+
+select '["a", "b"]'::jsonb || '"c"';
+select '"c"' || '["a", "b"]'::jsonb;
+
+select '[]'::jsonb || '["a"]'::jsonb;
+select '[]'::jsonb || '"a"'::jsonb;
+select '"b"'::jsonb || '"a"'::jsonb;
+select '{}'::jsonb || '{"a":"b"}'::jsonb;
+select '[]'::jsonb || '{"a":"b"}'::jsonb;
+select '{"a":"b"}'::jsonb || '[]'::jsonb;
+
+select '"a"'::jsonb || '{"a":1}';
+select '{"a":1}' || '"a"'::jsonb;
+
+select '[3]'::jsonb || '{}'::jsonb;
+select '3'::jsonb || '[]'::jsonb;
+select '3'::jsonb || '4'::jsonb;
+select '3'::jsonb || '{}'::jsonb;
+
+select '["a", "b"]'::jsonb || '{"c":1}';
+select '{"c": 1}'::jsonb || '["a", "b"]';
+
+select '{}'::jsonb || '{"cq":"l", "b":"g", "fg":false}';
+
+select pg_column_size('{}'::jsonb || '{}'::jsonb) = pg_column_size('{}'::jsonb);
+select pg_column_size('{"aa":1}'::jsonb || '{"b":2}'::jsonb) = pg_column_size('{"aa":1, "b":2}'::jsonb);
+select pg_column_size('{"aa":1, "b":2}'::jsonb || '{}'::jsonb) = pg_column_size('{"aa":1, "b":2}'::jsonb);
+select pg_column_size('{}'::jsonb || '{"aa":1, "b":2}'::jsonb) = pg_column_size('{"aa":1, "b":2}'::jsonb);
+
+select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'a');
+select jsonb_delete('{"a":null , "b":2, "c":3}'::jsonb, 'a');
+select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'b');
+select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'c');
+select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'd');
+select '{"a":1 , "b":2, "c":3}'::jsonb - 'a';
+select '{"a":null , "b":2, "c":3}'::jsonb - 'a';
+select '{"a":1 , "b":2, "c":3}'::jsonb - 'b';
+select '{"a":1 , "b":2, "c":3}'::jsonb - 'c';
+select '{"a":1 , "b":2, "c":3}'::jsonb - 'd';
+select pg_column_size('{"a":1 , "b":2, "c":3}'::jsonb - 'b') = pg_column_size('{"a":1, "b":2}'::jsonb);
+
+select '["a","b","c"]'::jsonb - 3;
+select '["a","b","c"]'::jsonb - 2;
+select '["a","b","c"]'::jsonb - 1;
+select '["a","b","c"]'::jsonb - 0;
+select '["a","b","c"]'::jsonb - -1;
+select '["a","b","c"]'::jsonb - -2;
+select '["a","b","c"]'::jsonb - -3;
+select '["a","b","c"]'::jsonb - -4;
+
+select '{"a":1 , "b":2, "c":3}'::jsonb - '{b}'::text[];
+select '{"a":1 , "b":2, "c":3}'::jsonb - '{c,b}'::text[];
+select '{"a":1 , "b":2, "c":3}'::jsonb - '{}'::text[];
+
+select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{n}', '[1,2,3]');
+select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '[1,2,3]');
+select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,1,0}', '[1,2,3]');
+select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,NULL,0}', '[1,2,3]');
+
+select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{n}', '{"1": 2}');
+select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '{"1": 2}');
+select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,1,0}', '{"1": 2}');
+select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,NULL,0}', '{"1": 2}');
+
+select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '"test"');
+select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '{"f": "test"}');
+
+select jsonb_delete_path('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}', '{n}');
+select jsonb_delete_path('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}', '{b,-1}');
+select jsonb_delete_path('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}', '{d,1,0}');
+
+select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{n}';
+select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{b,-1}';
+select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{b,-1e}'; -- invalid array subscript
+select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{d,1,0}';
+
+
+-- empty structure and error conditions for delete and replace
+
+select '"a"'::jsonb - 'a'; -- error
+select '{}'::jsonb - 'a';
+select '[]'::jsonb - 'a';
+select '"a"'::jsonb - 1; -- error
+select '{}'::jsonb - 1; -- error
+select '[]'::jsonb - 1;
+select '"a"'::jsonb #- '{a}'; -- error
+select '{}'::jsonb #- '{a}';
+select '[]'::jsonb #- '{a}';
+select jsonb_set('"a"','{a}','"b"'); --error
+select jsonb_set('{}','{a}','"b"', false);
+select jsonb_set('[]','{1}','"b"', false);
+select jsonb_set('[{"f1":1,"f2":null},2,null,3]', '{0}','[2,3,4]', false);
+
+-- jsonb_set adding instead of replacing
+
+-- prepend to array
+select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{b,-33}','{"foo":123}');
+-- append to array
+select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{b,33}','{"foo":123}');
+-- check nesting levels addition
+select jsonb_set('{"a":1,"b":[4,5,[0,1,2],6,7],"c":{"d":4}}','{b,2,33}','{"foo":123}');
+-- add new key
+select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{c,e}','{"foo":123}');
+-- adding doesn't do anything if elements before last aren't present
+select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{x,-33}','{"foo":123}');
+select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{x,y}','{"foo":123}');
+-- add to empty object
+select jsonb_set('{}','{x}','{"foo":123}');
+--add to empty array
+select jsonb_set('[]','{0}','{"foo":123}');
+select jsonb_set('[]','{99}','{"foo":123}');
+select jsonb_set('[]','{-99}','{"foo":123}');
+select jsonb_set('{"a": [1, 2, 3]}', '{a, non_integer}', '"new_value"');
+select jsonb_set('{"a": {"b": [1, 2, 3]}}', '{a, b, non_integer}', '"new_value"');
+select jsonb_set('{"a": {"b": [1, 2, 3]}}', '{a, b, NULL}', '"new_value"');
+
+-- jsonb_set_lax
+
+\pset null NULL
+
+-- pass though non nulls to jsonb_set
+select jsonb_set_lax('{"a":1,"b":2}','{b}','5') ;
+select jsonb_set_lax('{"a":1,"b":2}','{d}','6', true) ;
+-- using the default treatment
+select jsonb_set_lax('{"a":1,"b":2}','{b}',null);
+select jsonb_set_lax('{"a":1,"b":2}','{d}',null,true);
+-- errors
+select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, true, null);
+select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, true, 'no_such_treatment');
+-- explicit treatments
+select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'raise_exception') as raise_exception;
+select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'return_target') as return_target;
+select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'delete_key') as delete_key;
+select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'use_json_null') as use_json_null;
+
+\pset null ''
+
+-- jsonb_insert
+select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '"new_value"');
+select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '"new_value"', true);
+select jsonb_insert('{"a": {"b": {"c": [0, 1, "test1", "test2"]}}}', '{a, b, c, 2}', '"new_value"');
+select jsonb_insert('{"a": {"b": {"c": [0, 1, "test1", "test2"]}}}', '{a, b, c, 2}', '"new_value"', true);
+select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '{"b": "value"}');
+select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '["value1", "value2"]');
+
+-- edge cases
+select jsonb_insert('{"a": [0,1,2]}', '{a, 0}', '"new_value"');
+select jsonb_insert('{"a": [0,1,2]}', '{a, 0}', '"new_value"', true);
+select jsonb_insert('{"a": [0,1,2]}', '{a, 2}', '"new_value"');
+select jsonb_insert('{"a": [0,1,2]}', '{a, 2}', '"new_value"', true);
+select jsonb_insert('{"a": [0,1,2]}', '{a, -1}', '"new_value"');
+select jsonb_insert('{"a": [0,1,2]}', '{a, -1}', '"new_value"', true);
+select jsonb_insert('[]', '{1}', '"new_value"');
+select jsonb_insert('[]', '{1}', '"new_value"', true);
+select jsonb_insert('{"a": []}', '{a, 1}', '"new_value"');
+select jsonb_insert('{"a": []}', '{a, 1}', '"new_value"', true);
+select jsonb_insert('{"a": [0,1,2]}', '{a, 10}', '"new_value"');
+select jsonb_insert('{"a": [0,1,2]}', '{a, -10}', '"new_value"');
+
+-- jsonb_insert should be able to insert new value for objects, but not to replace
+select jsonb_insert('{"a": {"b": "value"}}', '{a, c}', '"new_value"');
+select jsonb_insert('{"a": {"b": "value"}}', '{a, c}', '"new_value"', true);
+
+select jsonb_insert('{"a": {"b": "value"}}', '{a, b}', '"new_value"');
+select jsonb_insert('{"a": {"b": "value"}}', '{a, b}', '"new_value"', true);
+
+-- jsonb subscript
+select ('123'::jsonb)['a'];
+select ('123'::jsonb)[0];
+select ('123'::jsonb)[NULL];
+select ('{"a": 1}'::jsonb)['a'];
+select ('{"a": 1}'::jsonb)[0];
+select ('{"a": 1}'::jsonb)['not_exist'];
+select ('{"a": 1}'::jsonb)[NULL];
+select ('[1, "2", null]'::jsonb)['a'];
+select ('[1, "2", null]'::jsonb)[0];
+select ('[1, "2", null]'::jsonb)['1'];
+select ('[1, "2", null]'::jsonb)[1.0];
+select ('[1, "2", null]'::jsonb)[2];
+select ('[1, "2", null]'::jsonb)[3];
+select ('[1, "2", null]'::jsonb)[-2];
+select ('[1, "2", null]'::jsonb)[1]['a'];
+select ('[1, "2", null]'::jsonb)[1][0];
+select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['b'];
+select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d'];
+select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d'][1];
+select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d']['a'];
+select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1'];
+select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1']['a2'];
+select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1']['a2']['a3'];
+select ('{"a": ["a1", {"b1": ["aaa", "bbb", "ccc"]}], "b": "bb"}'::jsonb)['a'][1]['b1'];
+select ('{"a": ["a1", {"b1": ["aaa", "bbb", "ccc"]}], "b": "bb"}'::jsonb)['a'][1]['b1'][2];
+
+-- slices are not supported
+select ('{"a": 1}'::jsonb)['a':'b'];
+select ('[1, "2", null]'::jsonb)[1:2];
+select ('[1, "2", null]'::jsonb)[:2];
+select ('[1, "2", null]'::jsonb)[1:];
+select ('[1, "2", null]'::jsonb)[:];
+
+create TEMP TABLE test_jsonb_subscript (
+ id int,
+ test_json jsonb
+);
+
+insert into test_jsonb_subscript values
+(1, '{}'), -- empty jsonb
+(2, '{"key": "value"}'); -- jsonb with data
+
+-- update empty jsonb
+update test_jsonb_subscript set test_json['a'] = '1' where id = 1;
+select * from test_jsonb_subscript;
+
+-- update jsonb with some data
+update test_jsonb_subscript set test_json['a'] = '1' where id = 2;
+select * from test_jsonb_subscript;
+
+-- replace jsonb
+update test_jsonb_subscript set test_json['a'] = '"test"';
+select * from test_jsonb_subscript;
+
+-- replace by object
+update test_jsonb_subscript set test_json['a'] = '{"b": 1}'::jsonb;
+select * from test_jsonb_subscript;
+
+-- replace by array
+update test_jsonb_subscript set test_json['a'] = '[1, 2, 3]'::jsonb;
+select * from test_jsonb_subscript;
+
+-- use jsonb subscription in where clause
+select * from test_jsonb_subscript where test_json['key'] = '"value"';
+select * from test_jsonb_subscript where test_json['key_doesnt_exists'] = '"value"';
+select * from test_jsonb_subscript where test_json['key'] = '"wrong_value"';
+
+-- NULL
+update test_jsonb_subscript set test_json[NULL] = '1';
+update test_jsonb_subscript set test_json['another_key'] = NULL;
+select * from test_jsonb_subscript;
+
+-- NULL as jsonb source
+insert into test_jsonb_subscript values (3, NULL);
+update test_jsonb_subscript set test_json['a'] = '1' where id = 3;
+select * from test_jsonb_subscript;
+
+update test_jsonb_subscript set test_json = NULL where id = 3;
+update test_jsonb_subscript set test_json[0] = '1';
+select * from test_jsonb_subscript;
+
+-- Fill the gaps logic
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '[0]');
+
+update test_jsonb_subscript set test_json[5] = '1';
+select * from test_jsonb_subscript;
+
+update test_jsonb_subscript set test_json[-4] = '1';
+select * from test_jsonb_subscript;
+
+update test_jsonb_subscript set test_json[-8] = '1';
+select * from test_jsonb_subscript;
+
+-- keep consistent values position
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '[]');
+
+update test_jsonb_subscript set test_json[5] = '1';
+select * from test_jsonb_subscript;
+
+-- create the whole path
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '{}');
+update test_jsonb_subscript set test_json['a'][0]['b'][0]['c'] = '1';
+select * from test_jsonb_subscript;
+
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '{}');
+update test_jsonb_subscript set test_json['a'][2]['b'][2]['c'][2] = '1';
+select * from test_jsonb_subscript;
+
+-- create the whole path with already existing keys
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '{"b": 1}');
+update test_jsonb_subscript set test_json['a'][0] = '2';
+select * from test_jsonb_subscript;
+
+-- the start jsonb is an object, first subscript is treated as a key
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '{}');
+update test_jsonb_subscript set test_json[0]['a'] = '1';
+select * from test_jsonb_subscript;
+
+-- the start jsonb is an array
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '[]');
+update test_jsonb_subscript set test_json[0]['a'] = '1';
+update test_jsonb_subscript set test_json[2]['b'] = '2';
+select * from test_jsonb_subscript;
+
+-- overwriting an existing path
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '{}');
+update test_jsonb_subscript set test_json['a']['b'][1] = '1';
+update test_jsonb_subscript set test_json['a']['b'][10] = '1';
+select * from test_jsonb_subscript;
+
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '[]');
+update test_jsonb_subscript set test_json[0][0][0] = '1';
+update test_jsonb_subscript set test_json[0][0][1] = '1';
+select * from test_jsonb_subscript;
+
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '{}');
+update test_jsonb_subscript set test_json['a']['b'][10] = '1';
+update test_jsonb_subscript set test_json['a'][10][10] = '1';
+select * from test_jsonb_subscript;
+
+-- an empty sub element
+
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '{"a": {}}');
+update test_jsonb_subscript set test_json['a']['b']['c'][2] = '1';
+select * from test_jsonb_subscript;
+
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '{"a": []}');
+update test_jsonb_subscript set test_json['a'][1]['c'][2] = '1';
+select * from test_jsonb_subscript;
+
+-- trying replace assuming a composite object, but it's an element or a value
+
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, '{"a": 1}');
+update test_jsonb_subscript set test_json['a']['b'] = '1';
+update test_jsonb_subscript set test_json['a']['b']['c'] = '1';
+update test_jsonb_subscript set test_json['a'][0] = '1';
+update test_jsonb_subscript set test_json['a'][0]['c'] = '1';
+update test_jsonb_subscript set test_json['a'][0][0] = '1';
+
+-- trying replace assuming a composite object, but it's a raw scalar
+
+delete from test_jsonb_subscript;
+insert into test_jsonb_subscript values (1, 'null');
+update test_jsonb_subscript set test_json[0] = '1';
+update test_jsonb_subscript set test_json[0][0] = '1';
+
+-- jsonb to tsvector
+select to_tsvector('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::jsonb);
+
+-- jsonb to tsvector with config
+select to_tsvector('simple', '{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::jsonb);
+
+-- jsonb to tsvector with stop words
+select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": ["the eee fff ggg"], "c": {"d": "hhh. iii"}}'::jsonb);
+
+-- jsonb to tsvector with numeric values
+select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": 123, "c": 456}'::jsonb);
+
+-- jsonb_to_tsvector
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"all"');
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"key"');
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"string"');
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"numeric"');
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"boolean"');
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["string", "numeric"]');
+
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"all"');
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"key"');
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"string"');
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"numeric"');
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"boolean"');
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["string", "numeric"]');
+
+-- to_tsvector corner cases
+select to_tsvector('""'::jsonb);
+select to_tsvector('{}'::jsonb);
+select to_tsvector('[]'::jsonb);
+select to_tsvector('null'::jsonb);
+
+-- jsonb_to_tsvector corner cases
+select jsonb_to_tsvector('""'::jsonb, '"all"');
+select jsonb_to_tsvector('{}'::jsonb, '"all"');
+select jsonb_to_tsvector('[]'::jsonb, '"all"');
+select jsonb_to_tsvector('null'::jsonb, '"all"');
+
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '""');
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '{}');
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '[]');
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, 'null');
+select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["all", null]');
+
+-- ts_headline for jsonb
+select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh'));
+select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh'));
+select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >');
+select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >');
+
+-- corner cases for ts_headline with jsonb
+select ts_headline('null'::jsonb, tsquery('aaa & bbb'));
+select ts_headline('{}'::jsonb, tsquery('aaa & bbb'));
+select ts_headline('[]'::jsonb, tsquery('aaa & bbb'));
+
+-- casts
+select 'true'::jsonb::bool;
+select '[]'::jsonb::bool;
+select '1.0'::jsonb::float;
+select '[1.0]'::jsonb::float;
+select '12345'::jsonb::int4;
+select '"hello"'::jsonb::int4;
+select '12345'::jsonb::numeric;
+select '{}'::jsonb::numeric;
+select '12345.05'::jsonb::numeric;
+select '12345.05'::jsonb::float4;
+select '12345.05'::jsonb::float8;
+select '12345.05'::jsonb::int2;
+select '12345.05'::jsonb::int4;
+select '12345.05'::jsonb::int8;
+select '12345.0000000000000000000000000000000000000000000005'::jsonb::numeric;
+select '12345.0000000000000000000000000000000000000000000005'::jsonb::float4;
+select '12345.0000000000000000000000000000000000000000000005'::jsonb::float8;
+select '12345.0000000000000000000000000000000000000000000005'::jsonb::int2;
+select '12345.0000000000000000000000000000000000000000000005'::jsonb::int4;
+select '12345.0000000000000000000000000000000000000000000005'::jsonb::int8;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/jsonb_jsonpath.out b/ydb/library/yql/tests/postgresql/original/cases/jsonb_jsonpath.out
new file mode 100644
index 0000000000..508ddd797e
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/jsonb_jsonpath.out
@@ -0,0 +1,2539 @@
+select jsonb '{"a": 12}' @? '$';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"a": 12}' @? '1';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"a": 12}' @? '$.a.b';
+ ?column?
+----------
+ f
+(1 row)
+
+select jsonb '{"a": 12}' @? '$.b';
+ ?column?
+----------
+ f
+(1 row)
+
+select jsonb '{"a": 12}' @? '$.a + 2';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"a": 12}' @? '$.b + 2';
+ ?column?
+----------
+
+(1 row)
+
+select jsonb '{"a": {"a": 12}}' @? '$.a.a';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"a": {"a": 12}}' @? '$.*.a';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"b": {"a": 12}}' @? '$.*.a';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"b": {"a": 12}}' @? '$.*.b';
+ ?column?
+----------
+ f
+(1 row)
+
+select jsonb '{"b": {"a": 12}}' @? 'strict $.*.b';
+ ?column?
+----------
+
+(1 row)
+
+select jsonb '{}' @? '$.*';
+ ?column?
+----------
+ f
+(1 row)
+
+select jsonb '{"a": 1}' @? '$.*';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"a": {"b": 1}}' @? 'lax $.**{1}';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"a": {"b": 1}}' @? 'lax $.**{2}';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"a": {"b": 1}}' @? 'lax $.**{3}';
+ ?column?
+----------
+ f
+(1 row)
+
+select jsonb '[]' @? '$[*]';
+ ?column?
+----------
+ f
+(1 row)
+
+select jsonb '[1]' @? '$[*]';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '[1]' @? '$[1]';
+ ?column?
+----------
+ f
+(1 row)
+
+select jsonb '[1]' @? 'strict $[1]';
+ ?column?
+----------
+
+(1 row)
+
+select jsonb_path_query('[1]', 'strict $[1]');
+ERROR: jsonpath array subscript is out of bounds
+select jsonb_path_query('[1]', 'strict $[1]', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb '[1]' @? 'lax $[10000000000000000]';
+ ?column?
+----------
+
+(1 row)
+
+select jsonb '[1]' @? 'strict $[10000000000000000]';
+ ?column?
+----------
+
+(1 row)
+
+select jsonb_path_query('[1]', 'lax $[10000000000000000]');
+ERROR: jsonpath array subscript is out of integer range
+select jsonb_path_query('[1]', 'strict $[10000000000000000]');
+ERROR: jsonpath array subscript is out of integer range
+select jsonb '[1]' @? '$[0]';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '[1]' @? '$[0.3]';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '[1]' @? '$[0.5]';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '[1]' @? '$[0.9]';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '[1]' @? '$[1.2]';
+ ?column?
+----------
+ f
+(1 row)
+
+select jsonb '[1]' @? 'strict $[1.2]';
+ ?column?
+----------
+
+(1 row)
+
+select jsonb '{"a": [1,2,3], "b": [3,4,5]}' @? '$ ? (@.a[*] > @.b[*])';
+ ?column?
+----------
+ f
+(1 row)
+
+select jsonb '{"a": [1,2,3], "b": [3,4,5]}' @? '$ ? (@.a[*] >= @.b[*])';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"a": [1,2,3], "b": [3,4,"5"]}' @? '$ ? (@.a[*] >= @.b[*])';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"a": [1,2,3], "b": [3,4,"5"]}' @? 'strict $ ? (@.a[*] >= @.b[*])';
+ ?column?
+----------
+ f
+(1 row)
+
+select jsonb '{"a": [1,2,3], "b": [3,4,null]}' @? '$ ? (@.a[*] >= @.b[*])';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '1' @? '$ ? ((@ == "1") is unknown)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '1' @? '$ ? ((@ == 1) is unknown)';
+ ?column?
+----------
+ f
+(1 row)
+
+select jsonb '[{"a": 1}, {"a": 2}]' @? '$[0 to 1] ? (@.a > 1)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'lax $[*].a', silent => false);
+ jsonb_path_exists
+-------------------
+ t
+(1 row)
+
+select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'lax $[*].a', silent => true);
+ jsonb_path_exists
+-------------------
+ t
+(1 row)
+
+select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'strict $[*].a', silent => false);
+ERROR: jsonpath member accessor can only be applied to an object
+select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'strict $[*].a', silent => true);
+ jsonb_path_exists
+-------------------
+
+(1 row)
+
+select jsonb_path_query('1', 'lax $.a');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('1', 'strict $.a');
+ERROR: jsonpath member accessor can only be applied to an object
+select jsonb_path_query('1', 'strict $.*');
+ERROR: jsonpath wildcard member accessor can only be applied to an object
+select jsonb_path_query('1', 'strict $.a', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('1', 'strict $.*', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('[]', 'lax $.a');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('[]', 'strict $.a');
+ERROR: jsonpath member accessor can only be applied to an object
+select jsonb_path_query('[]', 'strict $.a', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('{}', 'lax $.a');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('{}', 'strict $.a');
+ERROR: JSON object does not contain key "a"
+select jsonb_path_query('{}', 'strict $.a', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('1', 'strict $[1]');
+ERROR: jsonpath array accessor can only be applied to an array
+select jsonb_path_query('1', 'strict $[*]');
+ERROR: jsonpath wildcard array accessor can only be applied to an array
+select jsonb_path_query('[]', 'strict $[1]');
+ERROR: jsonpath array subscript is out of bounds
+select jsonb_path_query('[]', 'strict $["a"]');
+ERROR: jsonpath array subscript is not a single numeric value
+select jsonb_path_query('1', 'strict $[1]', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('1', 'strict $[*]', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('[]', 'strict $[1]', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('[]', 'strict $["a"]', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('{"a": 12, "b": {"a": 13}}', '$.a');
+ jsonb_path_query
+------------------
+ 12
+(1 row)
+
+select jsonb_path_query('{"a": 12, "b": {"a": 13}}', '$.b');
+ jsonb_path_query
+------------------
+ {"a": 13}
+(1 row)
+
+select jsonb_path_query('{"a": 12, "b": {"a": 13}}', '$.*');
+ jsonb_path_query
+------------------
+ 12
+ {"a": 13}
+(2 rows)
+
+select jsonb_path_query('{"a": 12, "b": {"a": 13}}', 'lax $.*.a');
+ jsonb_path_query
+------------------
+ 13
+(1 row)
+
+select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[*].a');
+ jsonb_path_query
+------------------
+ 13
+(1 row)
+
+select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[*].*');
+ jsonb_path_query
+------------------
+ 13
+ 14
+(2 rows)
+
+select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0].a');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[1].a');
+ jsonb_path_query
+------------------
+ 13
+(1 row)
+
+select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[2].a');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0,1].a');
+ jsonb_path_query
+------------------
+ 13
+(1 row)
+
+select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0 to 10].a');
+ jsonb_path_query
+------------------
+ 13
+(1 row)
+
+select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0 to 10 / 0].a');
+ERROR: division by zero
+select jsonb_path_query('[12, {"a": 13}, {"b": 14}, "ccc", true]', '$[2.5 - 1 to $.size() - 2]');
+ jsonb_path_query
+------------------
+ {"a": 13}
+ {"b": 14}
+ "ccc"
+(3 rows)
+
+select jsonb_path_query('1', 'lax $[0]');
+ jsonb_path_query
+------------------
+ 1
+(1 row)
+
+select jsonb_path_query('1', 'lax $[*]');
+ jsonb_path_query
+------------------
+ 1
+(1 row)
+
+select jsonb_path_query('[1]', 'lax $[0]');
+ jsonb_path_query
+------------------
+ 1
+(1 row)
+
+select jsonb_path_query('[1]', 'lax $[*]');
+ jsonb_path_query
+------------------
+ 1
+(1 row)
+
+select jsonb_path_query('[1,2,3]', 'lax $[*]');
+ jsonb_path_query
+------------------
+ 1
+ 2
+ 3
+(3 rows)
+
+select jsonb_path_query('[1,2,3]', 'strict $[*].a');
+ERROR: jsonpath member accessor can only be applied to an object
+select jsonb_path_query('[1,2,3]', 'strict $[*].a', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('[]', '$[last]');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('[]', '$[last ? (exists(last))]');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('[]', 'strict $[last]');
+ERROR: jsonpath array subscript is out of bounds
+select jsonb_path_query('[]', 'strict $[last]', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('[1]', '$[last]');
+ jsonb_path_query
+------------------
+ 1
+(1 row)
+
+select jsonb_path_query('[1,2,3]', '$[last]');
+ jsonb_path_query
+------------------
+ 3
+(1 row)
+
+select jsonb_path_query('[1,2,3]', '$[last - 1]');
+ jsonb_path_query
+------------------
+ 2
+(1 row)
+
+select jsonb_path_query('[1,2,3]', '$[last ? (@.type() == "number")]');
+ jsonb_path_query
+------------------
+ 3
+(1 row)
+
+select jsonb_path_query('[1,2,3]', '$[last ? (@.type() == "string")]');
+ERROR: jsonpath array subscript is not a single numeric value
+select jsonb_path_query('[1,2,3]', '$[last ? (@.type() == "string")]', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select * from jsonb_path_query('{"a": 10}', '$');
+ jsonb_path_query
+------------------
+ {"a": 10}
+(1 row)
+
+select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)');
+ERROR: could not find jsonpath variable "value"
+select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '1');
+ERROR: "vars" argument is not an object
+DETAIL: Jsonpath parameters should be encoded as key-value pairs of "vars" object.
+select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '[{"value" : 13}]');
+ERROR: "vars" argument is not an object
+DETAIL: Jsonpath parameters should be encoded as key-value pairs of "vars" object.
+select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '{"value" : 13}');
+ jsonb_path_query
+------------------
+ {"a": 10}
+(1 row)
+
+select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '{"value" : 8}');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select * from jsonb_path_query('{"a": 10}', '$.a ? (@ < $value)', '{"value" : 13}');
+ jsonb_path_query
+------------------
+ 10
+(1 row)
+
+select * from jsonb_path_query('[10,11,12,13,14,15]', '$[*] ? (@ < $value)', '{"value" : 13}');
+ jsonb_path_query
+------------------
+ 10
+ 11
+ 12
+(3 rows)
+
+select * from jsonb_path_query('[10,11,12,13,14,15]', '$[0,1] ? (@ < $x.value)', '{"x": {"value" : 13}}');
+ jsonb_path_query
+------------------
+ 10
+ 11
+(2 rows)
+
+select * from jsonb_path_query('[10,11,12,13,14,15]', '$[0 to 2] ? (@ < $value)', '{"value" : 15}');
+ jsonb_path_query
+------------------
+ 10
+ 11
+ 12
+(3 rows)
+
+select * from jsonb_path_query('[1,"1",2,"2",null]', '$[*] ? (@ == "1")');
+ jsonb_path_query
+------------------
+ "1"
+(1 row)
+
+select * from jsonb_path_query('[1,"1",2,"2",null]', '$[*] ? (@ == $value)', '{"value" : "1"}');
+ jsonb_path_query
+------------------
+ "1"
+(1 row)
+
+select * from jsonb_path_query('[1,"1",2,"2",null]', '$[*] ? (@ == $value)', '{"value" : null}');
+ jsonb_path_query
+------------------
+ null
+(1 row)
+
+select * from jsonb_path_query('[1, "2", null]', '$[*] ? (@ != null)');
+ jsonb_path_query
+------------------
+ 1
+ "2"
+(2 rows)
+
+select * from jsonb_path_query('[1, "2", null]', '$[*] ? (@ == null)');
+ jsonb_path_query
+------------------
+ null
+(1 row)
+
+select * from jsonb_path_query('{}', '$ ? (@ == @)');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select * from jsonb_path_query('[]', 'strict $ ? (@ == @)');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**');
+ jsonb_path_query
+------------------
+ {"a": {"b": 1}}
+ {"b": 1}
+ 1
+(3 rows)
+
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0}');
+ jsonb_path_query
+------------------
+ {"a": {"b": 1}}
+(1 row)
+
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0 to last}');
+ jsonb_path_query
+------------------
+ {"a": {"b": 1}}
+ {"b": 1}
+ 1
+(3 rows)
+
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1}');
+ jsonb_path_query
+------------------
+ {"b": 1}
+(1 row)
+
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1 to last}');
+ jsonb_path_query
+------------------
+ {"b": 1}
+ 1
+(2 rows)
+
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{2}');
+ jsonb_path_query
+------------------
+ 1
+(1 row)
+
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{2 to last}');
+ jsonb_path_query
+------------------
+ 1
+(1 row)
+
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{3 to last}');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{last}');
+ jsonb_path_query
+------------------
+ 1
+(1 row)
+
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**.b ? (@ > 0)');
+ jsonb_path_query
+------------------
+ 1
+(1 row)
+
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0}.b ? (@ > 0)');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1}.b ? (@ > 0)');
+ jsonb_path_query
+------------------
+ 1
+(1 row)
+
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0 to last}.b ? (@ > 0)');
+ jsonb_path_query
+------------------
+ 1
+(1 row)
+
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1 to last}.b ? (@ > 0)');
+ jsonb_path_query
+------------------
+ 1
+(1 row)
+
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1 to 2}.b ? (@ > 0)');
+ jsonb_path_query
+------------------
+ 1
+(1 row)
+
+select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**.b ? (@ > 0)');
+ jsonb_path_query
+------------------
+ 1
+(1 row)
+
+select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{0}.b ? (@ > 0)');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{1}.b ? (@ > 0)');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{0 to last}.b ? (@ > 0)');
+ jsonb_path_query
+------------------
+ 1
+(1 row)
+
+select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{1 to last}.b ? (@ > 0)');
+ jsonb_path_query
+------------------
+ 1
+(1 row)
+
+select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{1 to 2}.b ? (@ > 0)');
+ jsonb_path_query
+------------------
+ 1
+(1 row)
+
+select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{2 to 3}.b ? (@ > 0)');
+ jsonb_path_query
+------------------
+ 1
+(1 row)
+
+select jsonb '{"a": {"b": 1}}' @? '$.**.b ? ( @ > 0)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"a": {"b": 1}}' @? '$.**{0}.b ? ( @ > 0)';
+ ?column?
+----------
+ f
+(1 row)
+
+select jsonb '{"a": {"b": 1}}' @? '$.**{1}.b ? ( @ > 0)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"a": {"b": 1}}' @? '$.**{0 to last}.b ? ( @ > 0)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"a": {"b": 1}}' @? '$.**{1 to last}.b ? ( @ > 0)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"a": {"b": 1}}' @? '$.**{1 to 2}.b ? ( @ > 0)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**.b ? ( @ > 0)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{0}.b ? ( @ > 0)';
+ ?column?
+----------
+ f
+(1 row)
+
+select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{1}.b ? ( @ > 0)';
+ ?column?
+----------
+ f
+(1 row)
+
+select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{0 to last}.b ? ( @ > 0)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{1 to last}.b ? ( @ > 0)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{1 to 2}.b ? ( @ > 0)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{2 to 3}.b ? ( @ > 0)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb_path_query('{"g": {"x": 2}}', '$.g ? (exists (@.x))');
+ jsonb_path_query
+------------------
+ {"x": 2}
+(1 row)
+
+select jsonb_path_query('{"g": {"x": 2}}', '$.g ? (exists (@.y))');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('{"g": {"x": 2}}', '$.g ? (exists (@.x ? (@ >= 2) ))');
+ jsonb_path_query
+------------------
+ {"x": 2}
+(1 row)
+
+select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'lax $.g ? (exists (@.x))');
+ jsonb_path_query
+------------------
+ {"x": 2}
+(1 row)
+
+select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'lax $.g ? (exists (@.x + "3"))');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'lax $.g ? ((exists (@.x + "3")) is unknown)');
+ jsonb_path_query
+------------------
+ {"x": 2}
+ {"y": 3}
+(2 rows)
+
+select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g[*] ? (exists (@.x))');
+ jsonb_path_query
+------------------
+ {"x": 2}
+(1 row)
+
+select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g[*] ? ((exists (@.x)) is unknown)');
+ jsonb_path_query
+------------------
+ {"y": 3}
+(1 row)
+
+select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g ? (exists (@[*].x))');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g ? ((exists (@[*].x)) is unknown)');
+ jsonb_path_query
+----------------------
+ [{"x": 2}, {"y": 3}]
+(1 row)
+
+--test ternary logic
+select
+ x, y,
+ jsonb_path_query(
+ '[true, false, null]',
+ '$[*] ? (@ == true && ($x == true && $y == true) ||
+ @ == false && !($x == true && $y == true) ||
+ @ == null && ($x == true && $y == true) is unknown)',
+ jsonb_build_object('x', x, 'y', y)
+ ) as "x && y"
+from
+ (values (jsonb 'true'), ('false'), ('"null"')) x(x),
+ (values (jsonb 'true'), ('false'), ('"null"')) y(y);
+ x | y | x && y
+--------+--------+--------
+ true | true | true
+ true | false | false
+ true | "null" | null
+ false | true | false
+ false | false | false
+ false | "null" | false
+ "null" | true | null
+ "null" | false | false
+ "null" | "null" | null
+(9 rows)
+
+select
+ x, y,
+ jsonb_path_query(
+ '[true, false, null]',
+ '$[*] ? (@ == true && ($x == true || $y == true) ||
+ @ == false && !($x == true || $y == true) ||
+ @ == null && ($x == true || $y == true) is unknown)',
+ jsonb_build_object('x', x, 'y', y)
+ ) as "x || y"
+from
+ (values (jsonb 'true'), ('false'), ('"null"')) x(x),
+ (values (jsonb 'true'), ('false'), ('"null"')) y(y);
+ x | y | x || y
+--------+--------+--------
+ true | true | true
+ true | false | true
+ true | "null" | true
+ false | true | true
+ false | false | false
+ false | "null" | null
+ "null" | true | true
+ "null" | false | null
+ "null" | "null" | null
+(9 rows)
+
+select jsonb '{"a": 1, "b":1}' @? '$ ? (@.a == @.b)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"c": {"a": 1, "b":1}}' @? '$ ? (@.a == @.b)';
+ ?column?
+----------
+ f
+(1 row)
+
+select jsonb '{"c": {"a": 1, "b":1}}' @? '$.c ? (@.a == @.b)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"c": {"a": 1, "b":1}}' @? '$.c ? ($.c.a == @.b)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"c": {"a": 1, "b":1}}' @? '$.* ? (@.a == @.b)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"a": 1, "b":1}' @? '$.** ? (@.a == @.b)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"c": {"a": 1, "b":1}}' @? '$.** ? (@.a == @.b)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == 1 + 1)');
+ jsonb_path_query
+------------------
+ {"a": 2, "b": 1}
+(1 row)
+
+select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == (1 + 1))');
+ jsonb_path_query
+------------------
+ {"a": 2, "b": 1}
+(1 row)
+
+select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == @.b + 1)');
+ jsonb_path_query
+------------------
+ {"a": 2, "b": 1}
+(1 row)
+
+select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == (@.b + 1))');
+ jsonb_path_query
+------------------
+ {"a": 2, "b": 1}
+(1 row)
+
+select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == - 1)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == -1)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == -@.b)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == - @.b)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"c": {"a": 0, "b":1}}' @? '$.** ? (@.a == 1 - @.b)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"c": {"a": 2, "b":1}}' @? '$.** ? (@.a == 1 - - @.b)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"c": {"a": 0, "b":1}}' @? '$.** ? (@.a == 1 - +@.b)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '[1,2,3]' @? '$ ? (+@[*] > +2)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '[1,2,3]' @? '$ ? (+@[*] > +3)';
+ ?column?
+----------
+ f
+(1 row)
+
+select jsonb '[1,2,3]' @? '$ ? (-@[*] < -2)';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '[1,2,3]' @? '$ ? (-@[*] < -3)';
+ ?column?
+----------
+ f
+(1 row)
+
+select jsonb '1' @? '$ ? ($ > 0)';
+ ?column?
+----------
+ t
+(1 row)
+
+-- arithmetic errors
+select jsonb_path_query('[1,2,0,3]', '$[*] ? (2 / @ > 0)');
+ jsonb_path_query
+------------------
+ 1
+ 2
+ 3
+(3 rows)
+
+select jsonb_path_query('[1,2,0,3]', '$[*] ? ((2 / @ > 0) is unknown)');
+ jsonb_path_query
+------------------
+ 0
+(1 row)
+
+select jsonb_path_query('0', '1 / $');
+ERROR: division by zero
+select jsonb_path_query('0', '1 / $ + 2');
+ERROR: division by zero
+select jsonb_path_query('0', '-(3 + 1 % $)');
+ERROR: division by zero
+select jsonb_path_query('1', '$ + "2"');
+ERROR: right operand of jsonpath operator + is not a single numeric value
+select jsonb_path_query('[1, 2]', '3 * $');
+ERROR: right operand of jsonpath operator * is not a single numeric value
+select jsonb_path_query('"a"', '-$');
+ERROR: operand of unary jsonpath operator - is not a numeric value
+select jsonb_path_query('[1,"2",3]', '+$');
+ERROR: operand of unary jsonpath operator + is not a numeric value
+select jsonb_path_query('1', '$ + "2"', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('[1, 2]', '3 * $', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('"a"', '-$', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('[1,"2",3]', '+$', silent => true);
+ jsonb_path_query
+------------------
+ 1
+(1 row)
+
+select jsonb '["1",2,0,3]' @? '-$[*]';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '[1,"2",0,3]' @? '-$[*]';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '["1",2,0,3]' @? 'strict -$[*]';
+ ?column?
+----------
+
+(1 row)
+
+select jsonb '[1,"2",0,3]' @? 'strict -$[*]';
+ ?column?
+----------
+
+(1 row)
+
+-- unwrapping of operator arguments in lax mode
+select jsonb_path_query('{"a": [2]}', 'lax $.a * 3');
+ jsonb_path_query
+------------------
+ 6
+(1 row)
+
+select jsonb_path_query('{"a": [2]}', 'lax $.a + 3');
+ jsonb_path_query
+------------------
+ 5
+(1 row)
+
+select jsonb_path_query('{"a": [2, 3, 4]}', 'lax -$.a');
+ jsonb_path_query
+------------------
+ -2
+ -3
+ -4
+(3 rows)
+
+-- should fail
+select jsonb_path_query('{"a": [1, 2]}', 'lax $.a * 3');
+ERROR: left operand of jsonpath operator * is not a single numeric value
+select jsonb_path_query('{"a": [1, 2]}', 'lax $.a * 3', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+-- extension: boolean expressions
+select jsonb_path_query('2', '$ > 1');
+ jsonb_path_query
+------------------
+ true
+(1 row)
+
+select jsonb_path_query('2', '$ <= 1');
+ jsonb_path_query
+------------------
+ false
+(1 row)
+
+select jsonb_path_query('2', '$ == "2"');
+ jsonb_path_query
+------------------
+ null
+(1 row)
+
+select jsonb '2' @? '$ == "2"';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '2' @@ '$ > 1';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '2' @@ '$ <= 1';
+ ?column?
+----------
+ f
+(1 row)
+
+select jsonb '2' @@ '$ == "2"';
+ ?column?
+----------
+
+(1 row)
+
+select jsonb '2' @@ '1';
+ ?column?
+----------
+
+(1 row)
+
+select jsonb '{}' @@ '$';
+ ?column?
+----------
+
+(1 row)
+
+select jsonb '[]' @@ '$';
+ ?column?
+----------
+
+(1 row)
+
+select jsonb '[1,2,3]' @@ '$[*]';
+ ?column?
+----------
+
+(1 row)
+
+select jsonb '[]' @@ '$[*]';
+ ?column?
+----------
+
+(1 row)
+
+select jsonb_path_match('[[1, true], [2, false]]', 'strict $[*] ? (@[0] > $x) [1]', '{"x": 1}');
+ jsonb_path_match
+------------------
+ f
+(1 row)
+
+select jsonb_path_match('[[1, true], [2, false]]', 'strict $[*] ? (@[0] < $x) [1]', '{"x": 2}');
+ jsonb_path_match
+------------------
+ t
+(1 row)
+
+select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'lax exists($[*].a)', silent => false);
+ jsonb_path_match
+------------------
+ t
+(1 row)
+
+select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'lax exists($[*].a)', silent => true);
+ jsonb_path_match
+------------------
+ t
+(1 row)
+
+select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'strict exists($[*].a)', silent => false);
+ jsonb_path_match
+------------------
+
+(1 row)
+
+select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'strict exists($[*].a)', silent => true);
+ jsonb_path_match
+------------------
+
+(1 row)
+
+select jsonb_path_query('[null,1,true,"a",[],{}]', '$.type()');
+ jsonb_path_query
+------------------
+ "array"
+(1 row)
+
+select jsonb_path_query('[null,1,true,"a",[],{}]', 'lax $.type()');
+ jsonb_path_query
+------------------
+ "array"
+(1 row)
+
+select jsonb_path_query('[null,1,true,"a",[],{}]', '$[*].type()');
+ jsonb_path_query
+------------------
+ "null"
+ "number"
+ "boolean"
+ "string"
+ "array"
+ "object"
+(6 rows)
+
+select jsonb_path_query('null', 'null.type()');
+ jsonb_path_query
+------------------
+ "null"
+(1 row)
+
+select jsonb_path_query('null', 'true.type()');
+ jsonb_path_query
+------------------
+ "boolean"
+(1 row)
+
+select jsonb_path_query('null', '(123).type()');
+ jsonb_path_query
+------------------
+ "number"
+(1 row)
+
+select jsonb_path_query('null', '"123".type()');
+ jsonb_path_query
+------------------
+ "string"
+(1 row)
+
+select jsonb_path_query('{"a": 2}', '($.a - 5).abs() + 10');
+ jsonb_path_query
+------------------
+ 13
+(1 row)
+
+select jsonb_path_query('{"a": 2.5}', '-($.a * $.a).floor() % 4.3');
+ jsonb_path_query
+------------------
+ -1.7
+(1 row)
+
+select jsonb_path_query('[1, 2, 3]', '($[*] > 2) ? (@ == true)');
+ jsonb_path_query
+------------------
+ true
+(1 row)
+
+select jsonb_path_query('[1, 2, 3]', '($[*] > 3).type()');
+ jsonb_path_query
+------------------
+ "boolean"
+(1 row)
+
+select jsonb_path_query('[1, 2, 3]', '($[*].a > 3).type()');
+ jsonb_path_query
+------------------
+ "boolean"
+(1 row)
+
+select jsonb_path_query('[1, 2, 3]', 'strict ($[*].a > 3).type()');
+ jsonb_path_query
+------------------
+ "null"
+(1 row)
+
+select jsonb_path_query('[1,null,true,"11",[],[1],[1,2,3],{},{"a":1,"b":2}]', 'strict $[*].size()');
+ERROR: jsonpath item method .size() can only be applied to an array
+select jsonb_path_query('[1,null,true,"11",[],[1],[1,2,3],{},{"a":1,"b":2}]', 'strict $[*].size()', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('[1,null,true,"11",[],[1],[1,2,3],{},{"a":1,"b":2}]', 'lax $[*].size()');
+ jsonb_path_query
+------------------
+ 1
+ 1
+ 1
+ 1
+ 0
+ 1
+ 3
+ 1
+ 1
+(9 rows)
+
+select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].abs()');
+ jsonb_path_query
+------------------
+ 0
+ 1
+ 2
+ 3.4
+ 5.6
+(5 rows)
+
+select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].floor()');
+ jsonb_path_query
+------------------
+ 0
+ 1
+ -2
+ -4
+ 5
+(5 rows)
+
+select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].ceiling()');
+ jsonb_path_query
+------------------
+ 0
+ 1
+ -2
+ -3
+ 6
+(5 rows)
+
+select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].ceiling().abs()');
+ jsonb_path_query
+------------------
+ 0
+ 1
+ 2
+ 3
+ 6
+(5 rows)
+
+select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].ceiling().abs().type()');
+ jsonb_path_query
+------------------
+ "number"
+ "number"
+ "number"
+ "number"
+ "number"
+(5 rows)
+
+select jsonb_path_query('[{},1]', '$[*].keyvalue()');
+ERROR: jsonpath item method .keyvalue() can only be applied to an object
+select jsonb_path_query('[{},1]', '$[*].keyvalue()', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('{}', '$.keyvalue()');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('{"a": 1, "b": [1, 2], "c": {"a": "bbb"}}', '$.keyvalue()');
+ jsonb_path_query
+----------------------------------------------
+ {"id": 0, "key": "a", "value": 1}
+ {"id": 0, "key": "b", "value": [1, 2]}
+ {"id": 0, "key": "c", "value": {"a": "bbb"}}
+(3 rows)
+
+select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', '$[*].keyvalue()');
+ jsonb_path_query
+-----------------------------------------------
+ {"id": 12, "key": "a", "value": 1}
+ {"id": 12, "key": "b", "value": [1, 2]}
+ {"id": 72, "key": "c", "value": {"a": "bbb"}}
+(3 rows)
+
+select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', 'strict $.keyvalue()');
+ERROR: jsonpath item method .keyvalue() can only be applied to an object
+select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', 'lax $.keyvalue()');
+ jsonb_path_query
+-----------------------------------------------
+ {"id": 12, "key": "a", "value": 1}
+ {"id": 12, "key": "b", "value": [1, 2]}
+ {"id": 72, "key": "c", "value": {"a": "bbb"}}
+(3 rows)
+
+select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', 'strict $.keyvalue().a');
+ERROR: jsonpath item method .keyvalue() can only be applied to an object
+select jsonb '{"a": 1, "b": [1, 2]}' @? 'lax $.keyvalue()';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb '{"a": 1, "b": [1, 2]}' @? 'lax $.keyvalue().key';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb_path_query('null', '$.double()');
+ERROR: jsonpath item method .double() can only be applied to a string or numeric value
+select jsonb_path_query('true', '$.double()');
+ERROR: jsonpath item method .double() can only be applied to a string or numeric value
+select jsonb_path_query('null', '$.double()', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('true', '$.double()', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('[]', '$.double()');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('[]', 'strict $.double()');
+ERROR: jsonpath item method .double() can only be applied to a string or numeric value
+select jsonb_path_query('{}', '$.double()');
+ERROR: jsonpath item method .double() can only be applied to a string or numeric value
+select jsonb_path_query('[]', 'strict $.double()', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('{}', '$.double()', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('1.23', '$.double()');
+ jsonb_path_query
+------------------
+ 1.23
+(1 row)
+
+select jsonb_path_query('"1.23"', '$.double()');
+ jsonb_path_query
+------------------
+ 1.23
+(1 row)
+
+select jsonb_path_query('"1.23aaa"', '$.double()');
+ERROR: string argument of jsonpath item method .double() is not a valid representation of a double precision number
+select jsonb_path_query('1e1000', '$.double()');
+ERROR: numeric argument of jsonpath item method .double() is out of range for type double precision
+select jsonb_path_query('"nan"', '$.double()');
+ERROR: string argument of jsonpath item method .double() is not a valid representation of a double precision number
+select jsonb_path_query('"NaN"', '$.double()');
+ERROR: string argument of jsonpath item method .double() is not a valid representation of a double precision number
+select jsonb_path_query('"inf"', '$.double()');
+ERROR: string argument of jsonpath item method .double() is not a valid representation of a double precision number
+select jsonb_path_query('"-inf"', '$.double()');
+ERROR: string argument of jsonpath item method .double() is not a valid representation of a double precision number
+select jsonb_path_query('"inf"', '$.double()', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('"-inf"', '$.double()', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('{}', '$.abs()');
+ERROR: jsonpath item method .abs() can only be applied to a numeric value
+select jsonb_path_query('true', '$.floor()');
+ERROR: jsonpath item method .floor() can only be applied to a numeric value
+select jsonb_path_query('"1.2"', '$.ceiling()');
+ERROR: jsonpath item method .ceiling() can only be applied to a numeric value
+select jsonb_path_query('{}', '$.abs()', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('true', '$.floor()', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('"1.2"', '$.ceiling()', silent => true);
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('["", "a", "abc", "abcabc"]', '$[*] ? (@ starts with "abc")');
+ jsonb_path_query
+------------------
+ "abc"
+ "abcabc"
+(2 rows)
+
+select jsonb_path_query('["", "a", "abc", "abcabc"]', 'strict $ ? (@[*] starts with "abc")');
+ jsonb_path_query
+----------------------------
+ ["", "a", "abc", "abcabc"]
+(1 row)
+
+select jsonb_path_query('["", "a", "abd", "abdabc"]', 'strict $ ? (@[*] starts with "abc")');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('["abc", "abcabc", null, 1]', 'strict $ ? (@[*] starts with "abc")');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('["abc", "abcabc", null, 1]', 'strict $ ? ((@[*] starts with "abc") is unknown)');
+ jsonb_path_query
+----------------------------
+ ["abc", "abcabc", null, 1]
+(1 row)
+
+select jsonb_path_query('[[null, 1, "abc", "abcabc"]]', 'lax $ ? (@[*] starts with "abc")');
+ jsonb_path_query
+----------------------------
+ [null, 1, "abc", "abcabc"]
+(1 row)
+
+select jsonb_path_query('[[null, 1, "abd", "abdabc"]]', 'lax $ ? ((@[*] starts with "abc") is unknown)');
+ jsonb_path_query
+----------------------------
+ [null, 1, "abd", "abdabc"]
+(1 row)
+
+select jsonb_path_query('[null, 1, "abd", "abdabc"]', 'lax $[*] ? ((@ starts with "abc") is unknown)');
+ jsonb_path_query
+------------------
+ null
+ 1
+(2 rows)
+
+select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c")');
+ jsonb_path_query
+------------------
+ "abc"
+ "abdacb"
+(2 rows)
+
+select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c" flag "i")');
+ jsonb_path_query
+------------------
+ "abc"
+ "aBdC"
+ "abdacb"
+(3 rows)
+
+select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c" flag "m")');
+ jsonb_path_query
+------------------
+ "abc"
+ "abdacb"
+ "adc\nabc"
+(3 rows)
+
+select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c" flag "s")');
+ jsonb_path_query
+------------------
+ "abc"
+ "abdacb"
+ "ab\nadc"
+(3 rows)
+
+select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "a\\b" flag "q")');
+ jsonb_path_query
+------------------
+ "a\\b"
+ "^a\\b$"
+(2 rows)
+
+select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "a\\b" flag "")');
+ jsonb_path_query
+------------------
+ "a\b"
+(1 row)
+
+select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\b$" flag "q")');
+ jsonb_path_query
+------------------
+ "^a\\b$"
+(1 row)
+
+select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\B$" flag "q")');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\B$" flag "iq")');
+ jsonb_path_query
+------------------
+ "^a\\b$"
+(1 row)
+
+select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\b$" flag "")');
+ jsonb_path_query
+------------------
+ "a\b"
+(1 row)
+
+select jsonb_path_query('null', '$.datetime()');
+ERROR: jsonpath item method .datetime() can only be applied to a string
+select jsonb_path_query('true', '$.datetime()');
+ERROR: jsonpath item method .datetime() can only be applied to a string
+select jsonb_path_query('1', '$.datetime()');
+ERROR: jsonpath item method .datetime() can only be applied to a string
+select jsonb_path_query('[]', '$.datetime()');
+ jsonb_path_query
+------------------
+(0 rows)
+
+select jsonb_path_query('[]', 'strict $.datetime()');
+ERROR: jsonpath item method .datetime() can only be applied to a string
+select jsonb_path_query('{}', '$.datetime()');
+ERROR: jsonpath item method .datetime() can only be applied to a string
+select jsonb_path_query('"bogus"', '$.datetime()');
+ERROR: datetime format is not recognized: "bogus"
+HINT: Use a datetime template argument to specify the input data format.
+select jsonb_path_query('"12:34"', '$.datetime("aaa")');
+ERROR: invalid datetime format separator: "a"
+select jsonb_path_query('"aaaa"', '$.datetime("HH24")');
+ERROR: invalid value "aa" for "HH24"
+DETAIL: Value must be an integer.
+select jsonb '"10-03-2017"' @? '$.datetime("dd-mm-yyyy")';
+ ?column?
+----------
+ t
+(1 row)
+
+select jsonb_path_query('"10-03-2017"', '$.datetime("dd-mm-yyyy")');
+ jsonb_path_query
+------------------
+ "2017-03-10"
+(1 row)
+
+select jsonb_path_query('"10-03-2017"', '$.datetime("dd-mm-yyyy").type()');
+ jsonb_path_query
+------------------
+ "date"
+(1 row)
+
+select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy")');
+ERROR: trailing characters remain in input string after datetime format
+select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy").type()');
+ERROR: trailing characters remain in input string after datetime format
+select jsonb_path_query('"10-03-2017 12:34"', ' $.datetime("dd-mm-yyyy HH24:MI").type()');
+ jsonb_path_query
+-------------------------------
+ "timestamp without time zone"
+(1 row)
+
+select jsonb_path_query('"10-03-2017 12:34 +05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM").type()');
+ jsonb_path_query
+----------------------------
+ "timestamp with time zone"
+(1 row)
+
+select jsonb_path_query('"12:34:56"', '$.datetime("HH24:MI:SS").type()');
+ jsonb_path_query
+--------------------------
+ "time without time zone"
+(1 row)
+
+select jsonb_path_query('"12:34:56 +05:20"', '$.datetime("HH24:MI:SS TZH:TZM").type()');
+ jsonb_path_query
+-----------------------
+ "time with time zone"
+(1 row)
+
+select jsonb_path_query('"10-03-2017T12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")');
+ jsonb_path_query
+-----------------------
+ "2017-03-10T12:34:56"
+(1 row)
+
+select jsonb_path_query('"10-03-2017t12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")');
+ERROR: unmatched format character "T"
+select jsonb_path_query('"10-03-2017 12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")');
+ERROR: unmatched format character "T"
+set time zone '+00';
+select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI")');
+ jsonb_path_query
+-----------------------
+ "2017-03-10T12:34:00"
+(1 row)
+
+select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI TZH")');
+ERROR: input string is too short for datetime format
+select jsonb_path_query('"10-03-2017 12:34 +05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")');
+ jsonb_path_query
+-----------------------------
+ "2017-03-10T12:34:00+05:00"
+(1 row)
+
+select jsonb_path_query('"10-03-2017 12:34 -05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")');
+ jsonb_path_query
+-----------------------------
+ "2017-03-10T12:34:00-05:00"
+(1 row)
+
+select jsonb_path_query('"10-03-2017 12:34 +05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")');
+ jsonb_path_query
+-----------------------------
+ "2017-03-10T12:34:00+05:20"
+(1 row)
+
+select jsonb_path_query('"10-03-2017 12:34 -05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")');
+ jsonb_path_query
+-----------------------------
+ "2017-03-10T12:34:00-05:20"
+(1 row)
+
+select jsonb_path_query('"12:34"', '$.datetime("HH24:MI")');
+ jsonb_path_query
+------------------
+ "12:34:00"
+(1 row)
+
+select jsonb_path_query('"12:34"', '$.datetime("HH24:MI TZH")');
+ERROR: input string is too short for datetime format
+select jsonb_path_query('"12:34 +05"', '$.datetime("HH24:MI TZH")');
+ jsonb_path_query
+------------------
+ "12:34:00+05:00"
+(1 row)
+
+select jsonb_path_query('"12:34 -05"', '$.datetime("HH24:MI TZH")');
+ jsonb_path_query
+------------------
+ "12:34:00-05:00"
+(1 row)
+
+select jsonb_path_query('"12:34 +05:20"', '$.datetime("HH24:MI TZH:TZM")');
+ jsonb_path_query
+------------------
+ "12:34:00+05:20"
+(1 row)
+
+select jsonb_path_query('"12:34 -05:20"', '$.datetime("HH24:MI TZH:TZM")');
+ jsonb_path_query
+------------------
+ "12:34:00-05:20"
+(1 row)
+
+set time zone '+10';
+select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI")');
+ jsonb_path_query
+-----------------------
+ "2017-03-10T12:34:00"
+(1 row)
+
+select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI TZH")');
+ERROR: input string is too short for datetime format
+select jsonb_path_query('"10-03-2017 12:34 +05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")');
+ jsonb_path_query
+-----------------------------
+ "2017-03-10T12:34:00+05:00"
+(1 row)
+
+select jsonb_path_query('"10-03-2017 12:34 -05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")');
+ jsonb_path_query
+-----------------------------
+ "2017-03-10T12:34:00-05:00"
+(1 row)
+
+select jsonb_path_query('"10-03-2017 12:34 +05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")');
+ jsonb_path_query
+-----------------------------
+ "2017-03-10T12:34:00+05:20"
+(1 row)
+
+select jsonb_path_query('"10-03-2017 12:34 -05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")');
+ jsonb_path_query
+-----------------------------
+ "2017-03-10T12:34:00-05:20"
+(1 row)
+
+select jsonb_path_query('"12:34"', '$.datetime("HH24:MI")');
+ jsonb_path_query
+------------------
+ "12:34:00"
+(1 row)
+
+select jsonb_path_query('"12:34"', '$.datetime("HH24:MI TZH")');
+ERROR: input string is too short for datetime format
+select jsonb_path_query('"12:34 +05"', '$.datetime("HH24:MI TZH")');
+ jsonb_path_query
+------------------
+ "12:34:00+05:00"
+(1 row)
+
+select jsonb_path_query('"12:34 -05"', '$.datetime("HH24:MI TZH")');
+ jsonb_path_query
+------------------
+ "12:34:00-05:00"
+(1 row)
+
+select jsonb_path_query('"12:34 +05:20"', '$.datetime("HH24:MI TZH:TZM")');
+ jsonb_path_query
+------------------
+ "12:34:00+05:20"
+(1 row)
+
+select jsonb_path_query('"12:34 -05:20"', '$.datetime("HH24:MI TZH:TZM")');
+ jsonb_path_query
+------------------
+ "12:34:00-05:20"
+(1 row)
+
+set time zone default;
+select jsonb_path_query('"2017-03-10"', '$.datetime().type()');
+ jsonb_path_query
+------------------
+ "date"
+(1 row)
+
+select jsonb_path_query('"2017-03-10"', '$.datetime()');
+ jsonb_path_query
+------------------
+ "2017-03-10"
+(1 row)
+
+select jsonb_path_query('"2017-03-10 12:34:56"', '$.datetime().type()');
+ jsonb_path_query
+-------------------------------
+ "timestamp without time zone"
+(1 row)
+
+select jsonb_path_query('"2017-03-10 12:34:56"', '$.datetime()');
+ jsonb_path_query
+-----------------------
+ "2017-03-10T12:34:56"
+(1 row)
+
+select jsonb_path_query('"2017-03-10 12:34:56+3"', '$.datetime().type()');
+ jsonb_path_query
+----------------------------
+ "timestamp with time zone"
+(1 row)
+
+select jsonb_path_query('"2017-03-10 12:34:56+3"', '$.datetime()');
+ jsonb_path_query
+-----------------------------
+ "2017-03-10T12:34:56+03:00"
+(1 row)
+
+select jsonb_path_query('"2017-03-10 12:34:56+3:10"', '$.datetime().type()');
+ jsonb_path_query
+----------------------------
+ "timestamp with time zone"
+(1 row)
+
+select jsonb_path_query('"2017-03-10 12:34:56+3:10"', '$.datetime()');
+ jsonb_path_query
+-----------------------------
+ "2017-03-10T12:34:56+03:10"
+(1 row)
+
+select jsonb_path_query('"2017-03-10T12:34:56+3:10"', '$.datetime()');
+ jsonb_path_query
+-----------------------------
+ "2017-03-10T12:34:56+03:10"
+(1 row)
+
+select jsonb_path_query('"2017-03-10t12:34:56+3:10"', '$.datetime()');
+ERROR: datetime format is not recognized: "2017-03-10t12:34:56+3:10"
+HINT: Use a datetime template argument to specify the input data format.
+select jsonb_path_query('"12:34:56"', '$.datetime().type()');
+ jsonb_path_query
+--------------------------
+ "time without time zone"
+(1 row)
+
+select jsonb_path_query('"12:34:56"', '$.datetime()');
+ jsonb_path_query
+------------------
+ "12:34:56"
+(1 row)
+
+select jsonb_path_query('"12:34:56+3"', '$.datetime().type()');
+ jsonb_path_query
+-----------------------
+ "time with time zone"
+(1 row)
+
+select jsonb_path_query('"12:34:56+3"', '$.datetime()');
+ jsonb_path_query
+------------------
+ "12:34:56+03:00"
+(1 row)
+
+select jsonb_path_query('"12:34:56+3:10"', '$.datetime().type()');
+ jsonb_path_query
+-----------------------
+ "time with time zone"
+(1 row)
+
+select jsonb_path_query('"12:34:56+3:10"', '$.datetime()');
+ jsonb_path_query
+------------------
+ "12:34:56+03:10"
+(1 row)
+
+set time zone '+00';
+-- date comparison
+select jsonb_path_query(
+ '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
+ '$[*].datetime() ? (@ == "10.03.2017".datetime("dd.mm.yyyy"))');
+ERROR: cannot convert value from date to timestamptz without time zone usage
+HINT: Use *_tz() function for time zone support.
+select jsonb_path_query(
+ '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
+ '$[*].datetime() ? (@ >= "10.03.2017".datetime("dd.mm.yyyy"))');
+ERROR: cannot convert value from date to timestamptz without time zone usage
+HINT: Use *_tz() function for time zone support.
+select jsonb_path_query(
+ '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
+ '$[*].datetime() ? (@ < "10.03.2017".datetime("dd.mm.yyyy"))');
+ERROR: cannot convert value from date to timestamptz without time zone usage
+HINT: Use *_tz() function for time zone support.
+select jsonb_path_query_tz(
+ '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
+ '$[*].datetime() ? (@ == "10.03.2017".datetime("dd.mm.yyyy"))');
+ jsonb_path_query_tz
+-----------------------------
+ "2017-03-10"
+ "2017-03-10T00:00:00"
+ "2017-03-10T03:00:00+03:00"
+(3 rows)
+
+select jsonb_path_query_tz(
+ '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
+ '$[*].datetime() ? (@ >= "10.03.2017".datetime("dd.mm.yyyy"))');
+ jsonb_path_query_tz
+-----------------------------
+ "2017-03-10"
+ "2017-03-11"
+ "2017-03-10T00:00:00"
+ "2017-03-10T12:34:56"
+ "2017-03-10T03:00:00+03:00"
+(5 rows)
+
+select jsonb_path_query_tz(
+ '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
+ '$[*].datetime() ? (@ < "10.03.2017".datetime("dd.mm.yyyy"))');
+ jsonb_path_query_tz
+-----------------------------
+ "2017-03-09"
+ "2017-03-10T01:02:03+04:00"
+(2 rows)
+
+-- time comparison
+select jsonb_path_query(
+ '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
+ '$[*].datetime() ? (@ == "12:35".datetime("HH24:MI"))');
+ERROR: cannot convert value from time to timetz without time zone usage
+HINT: Use *_tz() function for time zone support.
+select jsonb_path_query(
+ '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
+ '$[*].datetime() ? (@ >= "12:35".datetime("HH24:MI"))');
+ERROR: cannot convert value from time to timetz without time zone usage
+HINT: Use *_tz() function for time zone support.
+select jsonb_path_query(
+ '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
+ '$[*].datetime() ? (@ < "12:35".datetime("HH24:MI"))');
+ERROR: cannot convert value from time to timetz without time zone usage
+HINT: Use *_tz() function for time zone support.
+select jsonb_path_query_tz(
+ '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
+ '$[*].datetime() ? (@ == "12:35".datetime("HH24:MI"))');
+ jsonb_path_query_tz
+---------------------
+ "12:35:00"
+ "12:35:00+00:00"
+(2 rows)
+
+select jsonb_path_query_tz(
+ '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
+ '$[*].datetime() ? (@ >= "12:35".datetime("HH24:MI"))');
+ jsonb_path_query_tz
+---------------------
+ "12:35:00"
+ "12:36:00"
+ "12:35:00+00:00"
+(3 rows)
+
+select jsonb_path_query_tz(
+ '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
+ '$[*].datetime() ? (@ < "12:35".datetime("HH24:MI"))');
+ jsonb_path_query_tz
+---------------------
+ "12:34:00"
+ "12:35:00+01:00"
+ "13:35:00+01:00"
+(3 rows)
+
+-- timetz comparison
+select jsonb_path_query(
+ '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]',
+ '$[*].datetime() ? (@ == "12:35 +1".datetime("HH24:MI TZH"))');
+ERROR: cannot convert value from time to timetz without time zone usage
+HINT: Use *_tz() function for time zone support.
+select jsonb_path_query(
+ '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]',
+ '$[*].datetime() ? (@ >= "12:35 +1".datetime("HH24:MI TZH"))');
+ERROR: cannot convert value from time to timetz without time zone usage
+HINT: Use *_tz() function for time zone support.
+select jsonb_path_query(
+ '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]',
+ '$[*].datetime() ? (@ < "12:35 +1".datetime("HH24:MI TZH"))');
+ERROR: cannot convert value from time to timetz without time zone usage
+HINT: Use *_tz() function for time zone support.
+select jsonb_path_query_tz(
+ '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]',
+ '$[*].datetime() ? (@ == "12:35 +1".datetime("HH24:MI TZH"))');
+ jsonb_path_query_tz
+---------------------
+ "12:35:00+01:00"
+(1 row)
+
+select jsonb_path_query_tz(
+ '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]',
+ '$[*].datetime() ? (@ >= "12:35 +1".datetime("HH24:MI TZH"))');
+ jsonb_path_query_tz
+---------------------
+ "12:35:00+01:00"
+ "12:36:00+01:00"
+ "12:35:00-02:00"
+ "11:35:00"
+ "12:35:00"
+(5 rows)
+
+select jsonb_path_query_tz(
+ '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]',
+ '$[*].datetime() ? (@ < "12:35 +1".datetime("HH24:MI TZH"))');
+ jsonb_path_query_tz
+---------------------
+ "12:34:00+01:00"
+ "12:35:00+02:00"
+ "10:35:00"
+(3 rows)
+
+-- timestamp comparison
+select jsonb_path_query(
+ '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ == "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))');
+ERROR: cannot convert value from timestamp to timestamptz without time zone usage
+HINT: Use *_tz() function for time zone support.
+select jsonb_path_query(
+ '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ >= "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))');
+ERROR: cannot convert value from timestamp to timestamptz without time zone usage
+HINT: Use *_tz() function for time zone support.
+select jsonb_path_query(
+ '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ < "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))');
+ERROR: cannot convert value from timestamp to timestamptz without time zone usage
+HINT: Use *_tz() function for time zone support.
+select jsonb_path_query_tz(
+ '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ == "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))');
+ jsonb_path_query_tz
+-----------------------------
+ "2017-03-10T12:35:00"
+ "2017-03-10T13:35:00+01:00"
+(2 rows)
+
+select jsonb_path_query_tz(
+ '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ >= "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))');
+ jsonb_path_query_tz
+-----------------------------
+ "2017-03-10T12:35:00"
+ "2017-03-10T12:36:00"
+ "2017-03-10T13:35:00+01:00"
+ "2017-03-10T12:35:00-01:00"
+ "2017-03-11"
+(5 rows)
+
+select jsonb_path_query_tz(
+ '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ < "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))');
+ jsonb_path_query_tz
+-----------------------------
+ "2017-03-10T12:34:00"
+ "2017-03-10T12:35:00+01:00"
+ "2017-03-10"
+(3 rows)
+
+-- timestamptz comparison
+select jsonb_path_query(
+ '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ == "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))');
+ERROR: cannot convert value from timestamp to timestamptz without time zone usage
+HINT: Use *_tz() function for time zone support.
+select jsonb_path_query(
+ '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ >= "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))');
+ERROR: cannot convert value from timestamp to timestamptz without time zone usage
+HINT: Use *_tz() function for time zone support.
+select jsonb_path_query(
+ '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ < "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))');
+ERROR: cannot convert value from timestamp to timestamptz without time zone usage
+HINT: Use *_tz() function for time zone support.
+select jsonb_path_query_tz(
+ '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ == "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))');
+ jsonb_path_query_tz
+-----------------------------
+ "2017-03-10T12:35:00+01:00"
+ "2017-03-10T11:35:00"
+(2 rows)
+
+select jsonb_path_query_tz(
+ '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ >= "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))');
+ jsonb_path_query_tz
+-----------------------------
+ "2017-03-10T12:35:00+01:00"
+ "2017-03-10T12:36:00+01:00"
+ "2017-03-10T12:35:00-02:00"
+ "2017-03-10T11:35:00"
+ "2017-03-10T12:35:00"
+ "2017-03-11"
+(6 rows)
+
+select jsonb_path_query_tz(
+ '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ < "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))');
+ jsonb_path_query_tz
+-----------------------------
+ "2017-03-10T12:34:00+01:00"
+ "2017-03-10T12:35:00+02:00"
+ "2017-03-10T10:35:00"
+ "2017-03-10"
+(4 rows)
+
+-- overflow during comparison
+select jsonb_path_query('"1000000-01-01"', '$.datetime() > "2020-01-01 12:00:00".datetime()'::jsonpath);
+ jsonb_path_query
+------------------
+ true
+(1 row)
+
+set time zone default;
+-- jsonpath operators
+SELECT jsonb_path_query('[{"a": 1}, {"a": 2}]', '$[*]');
+ jsonb_path_query
+------------------
+ {"a": 1}
+ {"a": 2}
+(2 rows)
+
+SELECT jsonb_path_query('[{"a": 1}, {"a": 2}]', '$[*] ? (@.a > 10)');
+ jsonb_path_query
+------------------
+(0 rows)
+
+SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}, {}]', 'strict $[*].a');
+ERROR: JSON object does not contain key "a"
+SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a');
+ jsonb_path_query_array
+------------------------
+ [1, 2]
+(1 row)
+
+SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ == 1)');
+ jsonb_path_query_array
+------------------------
+ [1]
+(1 row)
+
+SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ > 10)');
+ jsonb_path_query_array
+------------------------
+ []
+(1 row)
+
+SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 1, "max": 4}');
+ jsonb_path_query_array
+------------------------
+ [2, 3]
+(1 row)
+
+SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 3, "max": 4}');
+ jsonb_path_query_array
+------------------------
+ []
+(1 row)
+
+SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {}]', 'strict $[*].a');
+ERROR: JSON object does not contain key "a"
+SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {}]', 'strict $[*].a', silent => true);
+ jsonb_path_query_first
+------------------------
+ 1
+(1 row)
+
+SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}]', '$[*].a');
+ jsonb_path_query_first
+------------------------
+ 1
+(1 row)
+
+SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ == 1)');
+ jsonb_path_query_first
+------------------------
+ 1
+(1 row)
+
+SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ > 10)');
+ jsonb_path_query_first
+------------------------
+
+(1 row)
+
+SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 1, "max": 4}');
+ jsonb_path_query_first
+------------------------
+ 2
+(1 row)
+
+SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 3, "max": 4}');
+ jsonb_path_query_first
+------------------------
+
+(1 row)
+
+SELECT jsonb '[{"a": 1}, {"a": 2}]' @? '$[*].a ? (@ > 1)';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT jsonb '[{"a": 1}, {"a": 2}]' @? '$[*] ? (@.a > 2)';
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ > 1)');
+ jsonb_path_exists
+-------------------
+ t
+(1 row)
+
+SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*] ? (@.a > $min && @.a < $max)', vars => '{"min": 1, "max": 4}');
+ jsonb_path_exists
+-------------------
+ t
+(1 row)
+
+SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*] ? (@.a > $min && @.a < $max)', vars => '{"min": 3, "max": 4}');
+ jsonb_path_exists
+-------------------
+ f
+(1 row)
+
+SELECT jsonb_path_match('true', '$', silent => false);
+ jsonb_path_match
+------------------
+ t
+(1 row)
+
+SELECT jsonb_path_match('false', '$', silent => false);
+ jsonb_path_match
+------------------
+ f
+(1 row)
+
+SELECT jsonb_path_match('null', '$', silent => false);
+ jsonb_path_match
+------------------
+
+(1 row)
+
+SELECT jsonb_path_match('1', '$', silent => true);
+ jsonb_path_match
+------------------
+
+(1 row)
+
+SELECT jsonb_path_match('1', '$', silent => false);
+ERROR: single boolean result is expected
+SELECT jsonb_path_match('"a"', '$', silent => false);
+ERROR: single boolean result is expected
+SELECT jsonb_path_match('{}', '$', silent => false);
+ERROR: single boolean result is expected
+SELECT jsonb_path_match('[true]', '$', silent => false);
+ERROR: single boolean result is expected
+SELECT jsonb_path_match('{}', 'lax $.a', silent => false);
+ERROR: single boolean result is expected
+SELECT jsonb_path_match('{}', 'strict $.a', silent => false);
+ERROR: JSON object does not contain key "a"
+SELECT jsonb_path_match('{}', 'strict $.a', silent => true);
+ jsonb_path_match
+------------------
+
+(1 row)
+
+SELECT jsonb_path_match('[true, true]', '$[*]', silent => false);
+ERROR: single boolean result is expected
+SELECT jsonb '[{"a": 1}, {"a": 2}]' @@ '$[*].a > 1';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT jsonb '[{"a": 1}, {"a": 2}]' @@ '$[*].a > 2';
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT jsonb_path_match('[{"a": 1}, {"a": 2}]', '$[*].a > 1');
+ jsonb_path_match
+------------------
+ t
+(1 row)
+
+-- test string comparison (Unicode codepoint collation)
+WITH str(j, num) AS
+(
+ SELECT jsonb_build_object('s', s), num
+ FROM unnest('{"", "a", "ab", "abc", "abcd", "b", "A", "AB", "ABC", "ABc", "ABcD", "B"}'::text[]) WITH ORDINALITY AS a(s, num)
+)
+SELECT
+ s1.j, s2.j,
+ jsonb_path_query_first(s1.j, '$.s < $s', vars => s2.j) lt,
+ jsonb_path_query_first(s1.j, '$.s <= $s', vars => s2.j) le,
+ jsonb_path_query_first(s1.j, '$.s == $s', vars => s2.j) eq,
+ jsonb_path_query_first(s1.j, '$.s >= $s', vars => s2.j) ge,
+ jsonb_path_query_first(s1.j, '$.s > $s', vars => s2.j) gt
+FROM str s1, str s2
+ORDER BY s1.num, s2.num;
+ j | j | lt | le | eq | ge | gt
+---------------+---------------+-------+-------+-------+-------+-------
+ {"s": ""} | {"s": ""} | false | true | true | true | false
+ {"s": ""} | {"s": "a"} | true | true | false | false | false
+ {"s": ""} | {"s": "ab"} | true | true | false | false | false
+ {"s": ""} | {"s": "abc"} | true | true | false | false | false
+ {"s": ""} | {"s": "abcd"} | true | true | false | false | false
+ {"s": ""} | {"s": "b"} | true | true | false | false | false
+ {"s": ""} | {"s": "A"} | true | true | false | false | false
+ {"s": ""} | {"s": "AB"} | true | true | false | false | false
+ {"s": ""} | {"s": "ABC"} | true | true | false | false | false
+ {"s": ""} | {"s": "ABc"} | true | true | false | false | false
+ {"s": ""} | {"s": "ABcD"} | true | true | false | false | false
+ {"s": ""} | {"s": "B"} | true | true | false | false | false
+ {"s": "a"} | {"s": ""} | false | false | false | true | true
+ {"s": "a"} | {"s": "a"} | false | true | true | true | false
+ {"s": "a"} | {"s": "ab"} | true | true | false | false | false
+ {"s": "a"} | {"s": "abc"} | true | true | false | false | false
+ {"s": "a"} | {"s": "abcd"} | true | true | false | false | false
+ {"s": "a"} | {"s": "b"} | true | true | false | false | false
+ {"s": "a"} | {"s": "A"} | false | false | false | true | true
+ {"s": "a"} | {"s": "AB"} | false | false | false | true | true
+ {"s": "a"} | {"s": "ABC"} | false | false | false | true | true
+ {"s": "a"} | {"s": "ABc"} | false | false | false | true | true
+ {"s": "a"} | {"s": "ABcD"} | false | false | false | true | true
+ {"s": "a"} | {"s": "B"} | false | false | false | true | true
+ {"s": "ab"} | {"s": ""} | false | false | false | true | true
+ {"s": "ab"} | {"s": "a"} | false | false | false | true | true
+ {"s": "ab"} | {"s": "ab"} | false | true | true | true | false
+ {"s": "ab"} | {"s": "abc"} | true | true | false | false | false
+ {"s": "ab"} | {"s": "abcd"} | true | true | false | false | false
+ {"s": "ab"} | {"s": "b"} | true | true | false | false | false
+ {"s": "ab"} | {"s": "A"} | false | false | false | true | true
+ {"s": "ab"} | {"s": "AB"} | false | false | false | true | true
+ {"s": "ab"} | {"s": "ABC"} | false | false | false | true | true
+ {"s": "ab"} | {"s": "ABc"} | false | false | false | true | true
+ {"s": "ab"} | {"s": "ABcD"} | false | false | false | true | true
+ {"s": "ab"} | {"s": "B"} | false | false | false | true | true
+ {"s": "abc"} | {"s": ""} | false | false | false | true | true
+ {"s": "abc"} | {"s": "a"} | false | false | false | true | true
+ {"s": "abc"} | {"s": "ab"} | false | false | false | true | true
+ {"s": "abc"} | {"s": "abc"} | false | true | true | true | false
+ {"s": "abc"} | {"s": "abcd"} | true | true | false | false | false
+ {"s": "abc"} | {"s": "b"} | true | true | false | false | false
+ {"s": "abc"} | {"s": "A"} | false | false | false | true | true
+ {"s": "abc"} | {"s": "AB"} | false | false | false | true | true
+ {"s": "abc"} | {"s": "ABC"} | false | false | false | true | true
+ {"s": "abc"} | {"s": "ABc"} | false | false | false | true | true
+ {"s": "abc"} | {"s": "ABcD"} | false | false | false | true | true
+ {"s": "abc"} | {"s": "B"} | false | false | false | true | true
+ {"s": "abcd"} | {"s": ""} | false | false | false | true | true
+ {"s": "abcd"} | {"s": "a"} | false | false | false | true | true
+ {"s": "abcd"} | {"s": "ab"} | false | false | false | true | true
+ {"s": "abcd"} | {"s": "abc"} | false | false | false | true | true
+ {"s": "abcd"} | {"s": "abcd"} | false | true | true | true | false
+ {"s": "abcd"} | {"s": "b"} | true | true | false | false | false
+ {"s": "abcd"} | {"s": "A"} | false | false | false | true | true
+ {"s": "abcd"} | {"s": "AB"} | false | false | false | true | true
+ {"s": "abcd"} | {"s": "ABC"} | false | false | false | true | true
+ {"s": "abcd"} | {"s": "ABc"} | false | false | false | true | true
+ {"s": "abcd"} | {"s": "ABcD"} | false | false | false | true | true
+ {"s": "abcd"} | {"s": "B"} | false | false | false | true | true
+ {"s": "b"} | {"s": ""} | false | false | false | true | true
+ {"s": "b"} | {"s": "a"} | false | false | false | true | true
+ {"s": "b"} | {"s": "ab"} | false | false | false | true | true
+ {"s": "b"} | {"s": "abc"} | false | false | false | true | true
+ {"s": "b"} | {"s": "abcd"} | false | false | false | true | true
+ {"s": "b"} | {"s": "b"} | false | true | true | true | false
+ {"s": "b"} | {"s": "A"} | false | false | false | true | true
+ {"s": "b"} | {"s": "AB"} | false | false | false | true | true
+ {"s": "b"} | {"s": "ABC"} | false | false | false | true | true
+ {"s": "b"} | {"s": "ABc"} | false | false | false | true | true
+ {"s": "b"} | {"s": "ABcD"} | false | false | false | true | true
+ {"s": "b"} | {"s": "B"} | false | false | false | true | true
+ {"s": "A"} | {"s": ""} | false | false | false | true | true
+ {"s": "A"} | {"s": "a"} | true | true | false | false | false
+ {"s": "A"} | {"s": "ab"} | true | true | false | false | false
+ {"s": "A"} | {"s": "abc"} | true | true | false | false | false
+ {"s": "A"} | {"s": "abcd"} | true | true | false | false | false
+ {"s": "A"} | {"s": "b"} | true | true | false | false | false
+ {"s": "A"} | {"s": "A"} | false | true | true | true | false
+ {"s": "A"} | {"s": "AB"} | true | true | false | false | false
+ {"s": "A"} | {"s": "ABC"} | true | true | false | false | false
+ {"s": "A"} | {"s": "ABc"} | true | true | false | false | false
+ {"s": "A"} | {"s": "ABcD"} | true | true | false | false | false
+ {"s": "A"} | {"s": "B"} | true | true | false | false | false
+ {"s": "AB"} | {"s": ""} | false | false | false | true | true
+ {"s": "AB"} | {"s": "a"} | true | true | false | false | false
+ {"s": "AB"} | {"s": "ab"} | true | true | false | false | false
+ {"s": "AB"} | {"s": "abc"} | true | true | false | false | false
+ {"s": "AB"} | {"s": "abcd"} | true | true | false | false | false
+ {"s": "AB"} | {"s": "b"} | true | true | false | false | false
+ {"s": "AB"} | {"s": "A"} | false | false | false | true | true
+ {"s": "AB"} | {"s": "AB"} | false | true | true | true | false
+ {"s": "AB"} | {"s": "ABC"} | true | true | false | false | false
+ {"s": "AB"} | {"s": "ABc"} | true | true | false | false | false
+ {"s": "AB"} | {"s": "ABcD"} | true | true | false | false | false
+ {"s": "AB"} | {"s": "B"} | true | true | false | false | false
+ {"s": "ABC"} | {"s": ""} | false | false | false | true | true
+ {"s": "ABC"} | {"s": "a"} | true | true | false | false | false
+ {"s": "ABC"} | {"s": "ab"} | true | true | false | false | false
+ {"s": "ABC"} | {"s": "abc"} | true | true | false | false | false
+ {"s": "ABC"} | {"s": "abcd"} | true | true | false | false | false
+ {"s": "ABC"} | {"s": "b"} | true | true | false | false | false
+ {"s": "ABC"} | {"s": "A"} | false | false | false | true | true
+ {"s": "ABC"} | {"s": "AB"} | false | false | false | true | true
+ {"s": "ABC"} | {"s": "ABC"} | false | true | true | true | false
+ {"s": "ABC"} | {"s": "ABc"} | true | true | false | false | false
+ {"s": "ABC"} | {"s": "ABcD"} | true | true | false | false | false
+ {"s": "ABC"} | {"s": "B"} | true | true | false | false | false
+ {"s": "ABc"} | {"s": ""} | false | false | false | true | true
+ {"s": "ABc"} | {"s": "a"} | true | true | false | false | false
+ {"s": "ABc"} | {"s": "ab"} | true | true | false | false | false
+ {"s": "ABc"} | {"s": "abc"} | true | true | false | false | false
+ {"s": "ABc"} | {"s": "abcd"} | true | true | false | false | false
+ {"s": "ABc"} | {"s": "b"} | true | true | false | false | false
+ {"s": "ABc"} | {"s": "A"} | false | false | false | true | true
+ {"s": "ABc"} | {"s": "AB"} | false | false | false | true | true
+ {"s": "ABc"} | {"s": "ABC"} | false | false | false | true | true
+ {"s": "ABc"} | {"s": "ABc"} | false | true | true | true | false
+ {"s": "ABc"} | {"s": "ABcD"} | true | true | false | false | false
+ {"s": "ABc"} | {"s": "B"} | true | true | false | false | false
+ {"s": "ABcD"} | {"s": ""} | false | false | false | true | true
+ {"s": "ABcD"} | {"s": "a"} | true | true | false | false | false
+ {"s": "ABcD"} | {"s": "ab"} | true | true | false | false | false
+ {"s": "ABcD"} | {"s": "abc"} | true | true | false | false | false
+ {"s": "ABcD"} | {"s": "abcd"} | true | true | false | false | false
+ {"s": "ABcD"} | {"s": "b"} | true | true | false | false | false
+ {"s": "ABcD"} | {"s": "A"} | false | false | false | true | true
+ {"s": "ABcD"} | {"s": "AB"} | false | false | false | true | true
+ {"s": "ABcD"} | {"s": "ABC"} | false | false | false | true | true
+ {"s": "ABcD"} | {"s": "ABc"} | false | false | false | true | true
+ {"s": "ABcD"} | {"s": "ABcD"} | false | true | true | true | false
+ {"s": "ABcD"} | {"s": "B"} | true | true | false | false | false
+ {"s": "B"} | {"s": ""} | false | false | false | true | true
+ {"s": "B"} | {"s": "a"} | true | true | false | false | false
+ {"s": "B"} | {"s": "ab"} | true | true | false | false | false
+ {"s": "B"} | {"s": "abc"} | true | true | false | false | false
+ {"s": "B"} | {"s": "abcd"} | true | true | false | false | false
+ {"s": "B"} | {"s": "b"} | true | true | false | false | false
+ {"s": "B"} | {"s": "A"} | false | false | false | true | true
+ {"s": "B"} | {"s": "AB"} | false | false | false | true | true
+ {"s": "B"} | {"s": "ABC"} | false | false | false | true | true
+ {"s": "B"} | {"s": "ABc"} | false | false | false | true | true
+ {"s": "B"} | {"s": "ABcD"} | false | false | false | true | true
+ {"s": "B"} | {"s": "B"} | false | true | true | true | false
+(144 rows)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/jsonb_jsonpath.sql b/ydb/library/yql/tests/postgresql/original/cases/jsonb_jsonpath.sql
new file mode 100644
index 0000000000..60f73cb059
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/jsonb_jsonpath.sql
@@ -0,0 +1,587 @@
+select jsonb '{"a": 12}' @? '$';
+select jsonb '{"a": 12}' @? '1';
+select jsonb '{"a": 12}' @? '$.a.b';
+select jsonb '{"a": 12}' @? '$.b';
+select jsonb '{"a": 12}' @? '$.a + 2';
+select jsonb '{"a": 12}' @? '$.b + 2';
+select jsonb '{"a": {"a": 12}}' @? '$.a.a';
+select jsonb '{"a": {"a": 12}}' @? '$.*.a';
+select jsonb '{"b": {"a": 12}}' @? '$.*.a';
+select jsonb '{"b": {"a": 12}}' @? '$.*.b';
+select jsonb '{"b": {"a": 12}}' @? 'strict $.*.b';
+select jsonb '{}' @? '$.*';
+select jsonb '{"a": 1}' @? '$.*';
+select jsonb '{"a": {"b": 1}}' @? 'lax $.**{1}';
+select jsonb '{"a": {"b": 1}}' @? 'lax $.**{2}';
+select jsonb '{"a": {"b": 1}}' @? 'lax $.**{3}';
+select jsonb '[]' @? '$[*]';
+select jsonb '[1]' @? '$[*]';
+select jsonb '[1]' @? '$[1]';
+select jsonb '[1]' @? 'strict $[1]';
+select jsonb_path_query('[1]', 'strict $[1]');
+select jsonb_path_query('[1]', 'strict $[1]', silent => true);
+select jsonb '[1]' @? 'lax $[10000000000000000]';
+select jsonb '[1]' @? 'strict $[10000000000000000]';
+select jsonb_path_query('[1]', 'lax $[10000000000000000]');
+select jsonb_path_query('[1]', 'strict $[10000000000000000]');
+select jsonb '[1]' @? '$[0]';
+select jsonb '[1]' @? '$[0.3]';
+select jsonb '[1]' @? '$[0.5]';
+select jsonb '[1]' @? '$[0.9]';
+select jsonb '[1]' @? '$[1.2]';
+select jsonb '[1]' @? 'strict $[1.2]';
+select jsonb '{"a": [1,2,3], "b": [3,4,5]}' @? '$ ? (@.a[*] > @.b[*])';
+select jsonb '{"a": [1,2,3], "b": [3,4,5]}' @? '$ ? (@.a[*] >= @.b[*])';
+select jsonb '{"a": [1,2,3], "b": [3,4,"5"]}' @? '$ ? (@.a[*] >= @.b[*])';
+select jsonb '{"a": [1,2,3], "b": [3,4,"5"]}' @? 'strict $ ? (@.a[*] >= @.b[*])';
+select jsonb '{"a": [1,2,3], "b": [3,4,null]}' @? '$ ? (@.a[*] >= @.b[*])';
+select jsonb '1' @? '$ ? ((@ == "1") is unknown)';
+select jsonb '1' @? '$ ? ((@ == 1) is unknown)';
+select jsonb '[{"a": 1}, {"a": 2}]' @? '$[0 to 1] ? (@.a > 1)';
+
+select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'lax $[*].a', silent => false);
+select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'lax $[*].a', silent => true);
+select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'strict $[*].a', silent => false);
+select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'strict $[*].a', silent => true);
+
+select jsonb_path_query('1', 'lax $.a');
+select jsonb_path_query('1', 'strict $.a');
+select jsonb_path_query('1', 'strict $.*');
+select jsonb_path_query('1', 'strict $.a', silent => true);
+select jsonb_path_query('1', 'strict $.*', silent => true);
+select jsonb_path_query('[]', 'lax $.a');
+select jsonb_path_query('[]', 'strict $.a');
+select jsonb_path_query('[]', 'strict $.a', silent => true);
+select jsonb_path_query('{}', 'lax $.a');
+select jsonb_path_query('{}', 'strict $.a');
+select jsonb_path_query('{}', 'strict $.a', silent => true);
+
+select jsonb_path_query('1', 'strict $[1]');
+select jsonb_path_query('1', 'strict $[*]');
+select jsonb_path_query('[]', 'strict $[1]');
+select jsonb_path_query('[]', 'strict $["a"]');
+select jsonb_path_query('1', 'strict $[1]', silent => true);
+select jsonb_path_query('1', 'strict $[*]', silent => true);
+select jsonb_path_query('[]', 'strict $[1]', silent => true);
+select jsonb_path_query('[]', 'strict $["a"]', silent => true);
+
+select jsonb_path_query('{"a": 12, "b": {"a": 13}}', '$.a');
+select jsonb_path_query('{"a": 12, "b": {"a": 13}}', '$.b');
+select jsonb_path_query('{"a": 12, "b": {"a": 13}}', '$.*');
+select jsonb_path_query('{"a": 12, "b": {"a": 13}}', 'lax $.*.a');
+select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[*].a');
+select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[*].*');
+select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0].a');
+select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[1].a');
+select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[2].a');
+select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0,1].a');
+select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0 to 10].a');
+select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0 to 10 / 0].a');
+select jsonb_path_query('[12, {"a": 13}, {"b": 14}, "ccc", true]', '$[2.5 - 1 to $.size() - 2]');
+select jsonb_path_query('1', 'lax $[0]');
+select jsonb_path_query('1', 'lax $[*]');
+select jsonb_path_query('[1]', 'lax $[0]');
+select jsonb_path_query('[1]', 'lax $[*]');
+select jsonb_path_query('[1,2,3]', 'lax $[*]');
+select jsonb_path_query('[1,2,3]', 'strict $[*].a');
+select jsonb_path_query('[1,2,3]', 'strict $[*].a', silent => true);
+select jsonb_path_query('[]', '$[last]');
+select jsonb_path_query('[]', '$[last ? (exists(last))]');
+select jsonb_path_query('[]', 'strict $[last]');
+select jsonb_path_query('[]', 'strict $[last]', silent => true);
+select jsonb_path_query('[1]', '$[last]');
+select jsonb_path_query('[1,2,3]', '$[last]');
+select jsonb_path_query('[1,2,3]', '$[last - 1]');
+select jsonb_path_query('[1,2,3]', '$[last ? (@.type() == "number")]');
+select jsonb_path_query('[1,2,3]', '$[last ? (@.type() == "string")]');
+select jsonb_path_query('[1,2,3]', '$[last ? (@.type() == "string")]', silent => true);
+
+select * from jsonb_path_query('{"a": 10}', '$');
+select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)');
+select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '1');
+select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '[{"value" : 13}]');
+select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '{"value" : 13}');
+select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '{"value" : 8}');
+select * from jsonb_path_query('{"a": 10}', '$.a ? (@ < $value)', '{"value" : 13}');
+select * from jsonb_path_query('[10,11,12,13,14,15]', '$[*] ? (@ < $value)', '{"value" : 13}');
+select * from jsonb_path_query('[10,11,12,13,14,15]', '$[0,1] ? (@ < $x.value)', '{"x": {"value" : 13}}');
+select * from jsonb_path_query('[10,11,12,13,14,15]', '$[0 to 2] ? (@ < $value)', '{"value" : 15}');
+select * from jsonb_path_query('[1,"1",2,"2",null]', '$[*] ? (@ == "1")');
+select * from jsonb_path_query('[1,"1",2,"2",null]', '$[*] ? (@ == $value)', '{"value" : "1"}');
+select * from jsonb_path_query('[1,"1",2,"2",null]', '$[*] ? (@ == $value)', '{"value" : null}');
+select * from jsonb_path_query('[1, "2", null]', '$[*] ? (@ != null)');
+select * from jsonb_path_query('[1, "2", null]', '$[*] ? (@ == null)');
+select * from jsonb_path_query('{}', '$ ? (@ == @)');
+select * from jsonb_path_query('[]', 'strict $ ? (@ == @)');
+
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**');
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0}');
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0 to last}');
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1}');
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1 to last}');
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{2}');
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{2 to last}');
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{3 to last}');
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{last}');
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**.b ? (@ > 0)');
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0}.b ? (@ > 0)');
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1}.b ? (@ > 0)');
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0 to last}.b ? (@ > 0)');
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1 to last}.b ? (@ > 0)');
+select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1 to 2}.b ? (@ > 0)');
+select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**.b ? (@ > 0)');
+select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{0}.b ? (@ > 0)');
+select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{1}.b ? (@ > 0)');
+select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{0 to last}.b ? (@ > 0)');
+select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{1 to last}.b ? (@ > 0)');
+select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{1 to 2}.b ? (@ > 0)');
+select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{2 to 3}.b ? (@ > 0)');
+
+select jsonb '{"a": {"b": 1}}' @? '$.**.b ? ( @ > 0)';
+select jsonb '{"a": {"b": 1}}' @? '$.**{0}.b ? ( @ > 0)';
+select jsonb '{"a": {"b": 1}}' @? '$.**{1}.b ? ( @ > 0)';
+select jsonb '{"a": {"b": 1}}' @? '$.**{0 to last}.b ? ( @ > 0)';
+select jsonb '{"a": {"b": 1}}' @? '$.**{1 to last}.b ? ( @ > 0)';
+select jsonb '{"a": {"b": 1}}' @? '$.**{1 to 2}.b ? ( @ > 0)';
+select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**.b ? ( @ > 0)';
+select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{0}.b ? ( @ > 0)';
+select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{1}.b ? ( @ > 0)';
+select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{0 to last}.b ? ( @ > 0)';
+select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{1 to last}.b ? ( @ > 0)';
+select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{1 to 2}.b ? ( @ > 0)';
+select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{2 to 3}.b ? ( @ > 0)';
+
+select jsonb_path_query('{"g": {"x": 2}}', '$.g ? (exists (@.x))');
+select jsonb_path_query('{"g": {"x": 2}}', '$.g ? (exists (@.y))');
+select jsonb_path_query('{"g": {"x": 2}}', '$.g ? (exists (@.x ? (@ >= 2) ))');
+select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'lax $.g ? (exists (@.x))');
+select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'lax $.g ? (exists (@.x + "3"))');
+select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'lax $.g ? ((exists (@.x + "3")) is unknown)');
+select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g[*] ? (exists (@.x))');
+select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g[*] ? ((exists (@.x)) is unknown)');
+select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g ? (exists (@[*].x))');
+select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g ? ((exists (@[*].x)) is unknown)');
+
+--test ternary logic
+select
+ x, y,
+ jsonb_path_query(
+ '[true, false, null]',
+ '$[*] ? (@ == true && ($x == true && $y == true) ||
+ @ == false && !($x == true && $y == true) ||
+ @ == null && ($x == true && $y == true) is unknown)',
+ jsonb_build_object('x', x, 'y', y)
+ ) as "x && y"
+from
+ (values (jsonb 'true'), ('false'), ('"null"')) x(x),
+ (values (jsonb 'true'), ('false'), ('"null"')) y(y);
+
+select
+ x, y,
+ jsonb_path_query(
+ '[true, false, null]',
+ '$[*] ? (@ == true && ($x == true || $y == true) ||
+ @ == false && !($x == true || $y == true) ||
+ @ == null && ($x == true || $y == true) is unknown)',
+ jsonb_build_object('x', x, 'y', y)
+ ) as "x || y"
+from
+ (values (jsonb 'true'), ('false'), ('"null"')) x(x),
+ (values (jsonb 'true'), ('false'), ('"null"')) y(y);
+
+select jsonb '{"a": 1, "b":1}' @? '$ ? (@.a == @.b)';
+select jsonb '{"c": {"a": 1, "b":1}}' @? '$ ? (@.a == @.b)';
+select jsonb '{"c": {"a": 1, "b":1}}' @? '$.c ? (@.a == @.b)';
+select jsonb '{"c": {"a": 1, "b":1}}' @? '$.c ? ($.c.a == @.b)';
+select jsonb '{"c": {"a": 1, "b":1}}' @? '$.* ? (@.a == @.b)';
+select jsonb '{"a": 1, "b":1}' @? '$.** ? (@.a == @.b)';
+select jsonb '{"c": {"a": 1, "b":1}}' @? '$.** ? (@.a == @.b)';
+
+select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == 1 + 1)');
+select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == (1 + 1))');
+select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == @.b + 1)');
+select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == (@.b + 1))');
+select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == - 1)';
+select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == -1)';
+select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == -@.b)';
+select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == - @.b)';
+select jsonb '{"c": {"a": 0, "b":1}}' @? '$.** ? (@.a == 1 - @.b)';
+select jsonb '{"c": {"a": 2, "b":1}}' @? '$.** ? (@.a == 1 - - @.b)';
+select jsonb '{"c": {"a": 0, "b":1}}' @? '$.** ? (@.a == 1 - +@.b)';
+select jsonb '[1,2,3]' @? '$ ? (+@[*] > +2)';
+select jsonb '[1,2,3]' @? '$ ? (+@[*] > +3)';
+select jsonb '[1,2,3]' @? '$ ? (-@[*] < -2)';
+select jsonb '[1,2,3]' @? '$ ? (-@[*] < -3)';
+select jsonb '1' @? '$ ? ($ > 0)';
+
+-- arithmetic errors
+select jsonb_path_query('[1,2,0,3]', '$[*] ? (2 / @ > 0)');
+select jsonb_path_query('[1,2,0,3]', '$[*] ? ((2 / @ > 0) is unknown)');
+select jsonb_path_query('0', '1 / $');
+select jsonb_path_query('0', '1 / $ + 2');
+select jsonb_path_query('0', '-(3 + 1 % $)');
+select jsonb_path_query('1', '$ + "2"');
+select jsonb_path_query('[1, 2]', '3 * $');
+select jsonb_path_query('"a"', '-$');
+select jsonb_path_query('[1,"2",3]', '+$');
+select jsonb_path_query('1', '$ + "2"', silent => true);
+select jsonb_path_query('[1, 2]', '3 * $', silent => true);
+select jsonb_path_query('"a"', '-$', silent => true);
+select jsonb_path_query('[1,"2",3]', '+$', silent => true);
+select jsonb '["1",2,0,3]' @? '-$[*]';
+select jsonb '[1,"2",0,3]' @? '-$[*]';
+select jsonb '["1",2,0,3]' @? 'strict -$[*]';
+select jsonb '[1,"2",0,3]' @? 'strict -$[*]';
+
+-- unwrapping of operator arguments in lax mode
+select jsonb_path_query('{"a": [2]}', 'lax $.a * 3');
+select jsonb_path_query('{"a": [2]}', 'lax $.a + 3');
+select jsonb_path_query('{"a": [2, 3, 4]}', 'lax -$.a');
+-- should fail
+select jsonb_path_query('{"a": [1, 2]}', 'lax $.a * 3');
+select jsonb_path_query('{"a": [1, 2]}', 'lax $.a * 3', silent => true);
+
+-- extension: boolean expressions
+select jsonb_path_query('2', '$ > 1');
+select jsonb_path_query('2', '$ <= 1');
+select jsonb_path_query('2', '$ == "2"');
+select jsonb '2' @? '$ == "2"';
+
+select jsonb '2' @@ '$ > 1';
+select jsonb '2' @@ '$ <= 1';
+select jsonb '2' @@ '$ == "2"';
+select jsonb '2' @@ '1';
+select jsonb '{}' @@ '$';
+select jsonb '[]' @@ '$';
+select jsonb '[1,2,3]' @@ '$[*]';
+select jsonb '[]' @@ '$[*]';
+select jsonb_path_match('[[1, true], [2, false]]', 'strict $[*] ? (@[0] > $x) [1]', '{"x": 1}');
+select jsonb_path_match('[[1, true], [2, false]]', 'strict $[*] ? (@[0] < $x) [1]', '{"x": 2}');
+
+select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'lax exists($[*].a)', silent => false);
+select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'lax exists($[*].a)', silent => true);
+select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'strict exists($[*].a)', silent => false);
+select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'strict exists($[*].a)', silent => true);
+
+
+select jsonb_path_query('[null,1,true,"a",[],{}]', '$.type()');
+select jsonb_path_query('[null,1,true,"a",[],{}]', 'lax $.type()');
+select jsonb_path_query('[null,1,true,"a",[],{}]', '$[*].type()');
+select jsonb_path_query('null', 'null.type()');
+select jsonb_path_query('null', 'true.type()');
+select jsonb_path_query('null', '(123).type()');
+select jsonb_path_query('null', '"123".type()');
+
+select jsonb_path_query('{"a": 2}', '($.a - 5).abs() + 10');
+select jsonb_path_query('{"a": 2.5}', '-($.a * $.a).floor() % 4.3');
+select jsonb_path_query('[1, 2, 3]', '($[*] > 2) ? (@ == true)');
+select jsonb_path_query('[1, 2, 3]', '($[*] > 3).type()');
+select jsonb_path_query('[1, 2, 3]', '($[*].a > 3).type()');
+select jsonb_path_query('[1, 2, 3]', 'strict ($[*].a > 3).type()');
+
+select jsonb_path_query('[1,null,true,"11",[],[1],[1,2,3],{},{"a":1,"b":2}]', 'strict $[*].size()');
+select jsonb_path_query('[1,null,true,"11",[],[1],[1,2,3],{},{"a":1,"b":2}]', 'strict $[*].size()', silent => true);
+select jsonb_path_query('[1,null,true,"11",[],[1],[1,2,3],{},{"a":1,"b":2}]', 'lax $[*].size()');
+
+select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].abs()');
+select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].floor()');
+select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].ceiling()');
+select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].ceiling().abs()');
+select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].ceiling().abs().type()');
+
+select jsonb_path_query('[{},1]', '$[*].keyvalue()');
+select jsonb_path_query('[{},1]', '$[*].keyvalue()', silent => true);
+select jsonb_path_query('{}', '$.keyvalue()');
+select jsonb_path_query('{"a": 1, "b": [1, 2], "c": {"a": "bbb"}}', '$.keyvalue()');
+select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', '$[*].keyvalue()');
+select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', 'strict $.keyvalue()');
+select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', 'lax $.keyvalue()');
+select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', 'strict $.keyvalue().a');
+select jsonb '{"a": 1, "b": [1, 2]}' @? 'lax $.keyvalue()';
+select jsonb '{"a": 1, "b": [1, 2]}' @? 'lax $.keyvalue().key';
+
+select jsonb_path_query('null', '$.double()');
+select jsonb_path_query('true', '$.double()');
+select jsonb_path_query('null', '$.double()', silent => true);
+select jsonb_path_query('true', '$.double()', silent => true);
+select jsonb_path_query('[]', '$.double()');
+select jsonb_path_query('[]', 'strict $.double()');
+select jsonb_path_query('{}', '$.double()');
+select jsonb_path_query('[]', 'strict $.double()', silent => true);
+select jsonb_path_query('{}', '$.double()', silent => true);
+select jsonb_path_query('1.23', '$.double()');
+select jsonb_path_query('"1.23"', '$.double()');
+select jsonb_path_query('"1.23aaa"', '$.double()');
+select jsonb_path_query('1e1000', '$.double()');
+select jsonb_path_query('"nan"', '$.double()');
+select jsonb_path_query('"NaN"', '$.double()');
+select jsonb_path_query('"inf"', '$.double()');
+select jsonb_path_query('"-inf"', '$.double()');
+select jsonb_path_query('"inf"', '$.double()', silent => true);
+select jsonb_path_query('"-inf"', '$.double()', silent => true);
+
+select jsonb_path_query('{}', '$.abs()');
+select jsonb_path_query('true', '$.floor()');
+select jsonb_path_query('"1.2"', '$.ceiling()');
+select jsonb_path_query('{}', '$.abs()', silent => true);
+select jsonb_path_query('true', '$.floor()', silent => true);
+select jsonb_path_query('"1.2"', '$.ceiling()', silent => true);
+
+select jsonb_path_query('["", "a", "abc", "abcabc"]', '$[*] ? (@ starts with "abc")');
+select jsonb_path_query('["", "a", "abc", "abcabc"]', 'strict $ ? (@[*] starts with "abc")');
+select jsonb_path_query('["", "a", "abd", "abdabc"]', 'strict $ ? (@[*] starts with "abc")');
+select jsonb_path_query('["abc", "abcabc", null, 1]', 'strict $ ? (@[*] starts with "abc")');
+select jsonb_path_query('["abc", "abcabc", null, 1]', 'strict $ ? ((@[*] starts with "abc") is unknown)');
+select jsonb_path_query('[[null, 1, "abc", "abcabc"]]', 'lax $ ? (@[*] starts with "abc")');
+select jsonb_path_query('[[null, 1, "abd", "abdabc"]]', 'lax $ ? ((@[*] starts with "abc") is unknown)');
+select jsonb_path_query('[null, 1, "abd", "abdabc"]', 'lax $[*] ? ((@ starts with "abc") is unknown)');
+
+select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c")');
+select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c" flag "i")');
+select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c" flag "m")');
+select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c" flag "s")');
+select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "a\\b" flag "q")');
+select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "a\\b" flag "")');
+select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\b$" flag "q")');
+select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\B$" flag "q")');
+select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\B$" flag "iq")');
+select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\b$" flag "")');
+
+select jsonb_path_query('null', '$.datetime()');
+select jsonb_path_query('true', '$.datetime()');
+select jsonb_path_query('1', '$.datetime()');
+select jsonb_path_query('[]', '$.datetime()');
+select jsonb_path_query('[]', 'strict $.datetime()');
+select jsonb_path_query('{}', '$.datetime()');
+select jsonb_path_query('"bogus"', '$.datetime()');
+select jsonb_path_query('"12:34"', '$.datetime("aaa")');
+select jsonb_path_query('"aaaa"', '$.datetime("HH24")');
+
+select jsonb '"10-03-2017"' @? '$.datetime("dd-mm-yyyy")';
+select jsonb_path_query('"10-03-2017"', '$.datetime("dd-mm-yyyy")');
+select jsonb_path_query('"10-03-2017"', '$.datetime("dd-mm-yyyy").type()');
+select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy")');
+select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy").type()');
+
+select jsonb_path_query('"10-03-2017 12:34"', ' $.datetime("dd-mm-yyyy HH24:MI").type()');
+select jsonb_path_query('"10-03-2017 12:34 +05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM").type()');
+select jsonb_path_query('"12:34:56"', '$.datetime("HH24:MI:SS").type()');
+select jsonb_path_query('"12:34:56 +05:20"', '$.datetime("HH24:MI:SS TZH:TZM").type()');
+
+select jsonb_path_query('"10-03-2017T12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")');
+select jsonb_path_query('"10-03-2017t12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")');
+select jsonb_path_query('"10-03-2017 12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")');
+
+set time zone '+00';
+
+select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI")');
+select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI TZH")');
+select jsonb_path_query('"10-03-2017 12:34 +05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")');
+select jsonb_path_query('"10-03-2017 12:34 -05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")');
+select jsonb_path_query('"10-03-2017 12:34 +05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")');
+select jsonb_path_query('"10-03-2017 12:34 -05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")');
+select jsonb_path_query('"12:34"', '$.datetime("HH24:MI")');
+select jsonb_path_query('"12:34"', '$.datetime("HH24:MI TZH")');
+select jsonb_path_query('"12:34 +05"', '$.datetime("HH24:MI TZH")');
+select jsonb_path_query('"12:34 -05"', '$.datetime("HH24:MI TZH")');
+select jsonb_path_query('"12:34 +05:20"', '$.datetime("HH24:MI TZH:TZM")');
+select jsonb_path_query('"12:34 -05:20"', '$.datetime("HH24:MI TZH:TZM")');
+
+set time zone '+10';
+
+select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI")');
+select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI TZH")');
+select jsonb_path_query('"10-03-2017 12:34 +05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")');
+select jsonb_path_query('"10-03-2017 12:34 -05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")');
+select jsonb_path_query('"10-03-2017 12:34 +05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")');
+select jsonb_path_query('"10-03-2017 12:34 -05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")');
+select jsonb_path_query('"12:34"', '$.datetime("HH24:MI")');
+select jsonb_path_query('"12:34"', '$.datetime("HH24:MI TZH")');
+select jsonb_path_query('"12:34 +05"', '$.datetime("HH24:MI TZH")');
+select jsonb_path_query('"12:34 -05"', '$.datetime("HH24:MI TZH")');
+select jsonb_path_query('"12:34 +05:20"', '$.datetime("HH24:MI TZH:TZM")');
+select jsonb_path_query('"12:34 -05:20"', '$.datetime("HH24:MI TZH:TZM")');
+
+set time zone default;
+
+select jsonb_path_query('"2017-03-10"', '$.datetime().type()');
+select jsonb_path_query('"2017-03-10"', '$.datetime()');
+select jsonb_path_query('"2017-03-10 12:34:56"', '$.datetime().type()');
+select jsonb_path_query('"2017-03-10 12:34:56"', '$.datetime()');
+select jsonb_path_query('"2017-03-10 12:34:56+3"', '$.datetime().type()');
+select jsonb_path_query('"2017-03-10 12:34:56+3"', '$.datetime()');
+select jsonb_path_query('"2017-03-10 12:34:56+3:10"', '$.datetime().type()');
+select jsonb_path_query('"2017-03-10 12:34:56+3:10"', '$.datetime()');
+select jsonb_path_query('"2017-03-10T12:34:56+3:10"', '$.datetime()');
+select jsonb_path_query('"2017-03-10t12:34:56+3:10"', '$.datetime()');
+select jsonb_path_query('"12:34:56"', '$.datetime().type()');
+select jsonb_path_query('"12:34:56"', '$.datetime()');
+select jsonb_path_query('"12:34:56+3"', '$.datetime().type()');
+select jsonb_path_query('"12:34:56+3"', '$.datetime()');
+select jsonb_path_query('"12:34:56+3:10"', '$.datetime().type()');
+select jsonb_path_query('"12:34:56+3:10"', '$.datetime()');
+
+set time zone '+00';
+
+-- date comparison
+select jsonb_path_query(
+ '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
+ '$[*].datetime() ? (@ == "10.03.2017".datetime("dd.mm.yyyy"))');
+select jsonb_path_query(
+ '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
+ '$[*].datetime() ? (@ >= "10.03.2017".datetime("dd.mm.yyyy"))');
+select jsonb_path_query(
+ '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
+ '$[*].datetime() ? (@ < "10.03.2017".datetime("dd.mm.yyyy"))');
+select jsonb_path_query_tz(
+ '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
+ '$[*].datetime() ? (@ == "10.03.2017".datetime("dd.mm.yyyy"))');
+select jsonb_path_query_tz(
+ '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
+ '$[*].datetime() ? (@ >= "10.03.2017".datetime("dd.mm.yyyy"))');
+select jsonb_path_query_tz(
+ '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]',
+ '$[*].datetime() ? (@ < "10.03.2017".datetime("dd.mm.yyyy"))');
+
+-- time comparison
+select jsonb_path_query(
+ '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
+ '$[*].datetime() ? (@ == "12:35".datetime("HH24:MI"))');
+select jsonb_path_query(
+ '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
+ '$[*].datetime() ? (@ >= "12:35".datetime("HH24:MI"))');
+select jsonb_path_query(
+ '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
+ '$[*].datetime() ? (@ < "12:35".datetime("HH24:MI"))');
+select jsonb_path_query_tz(
+ '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
+ '$[*].datetime() ? (@ == "12:35".datetime("HH24:MI"))');
+select jsonb_path_query_tz(
+ '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
+ '$[*].datetime() ? (@ >= "12:35".datetime("HH24:MI"))');
+select jsonb_path_query_tz(
+ '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]',
+ '$[*].datetime() ? (@ < "12:35".datetime("HH24:MI"))');
+
+-- timetz comparison
+select jsonb_path_query(
+ '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]',
+ '$[*].datetime() ? (@ == "12:35 +1".datetime("HH24:MI TZH"))');
+select jsonb_path_query(
+ '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]',
+ '$[*].datetime() ? (@ >= "12:35 +1".datetime("HH24:MI TZH"))');
+select jsonb_path_query(
+ '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]',
+ '$[*].datetime() ? (@ < "12:35 +1".datetime("HH24:MI TZH"))');
+select jsonb_path_query_tz(
+ '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]',
+ '$[*].datetime() ? (@ == "12:35 +1".datetime("HH24:MI TZH"))');
+select jsonb_path_query_tz(
+ '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]',
+ '$[*].datetime() ? (@ >= "12:35 +1".datetime("HH24:MI TZH"))');
+select jsonb_path_query_tz(
+ '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]',
+ '$[*].datetime() ? (@ < "12:35 +1".datetime("HH24:MI TZH"))');
+
+-- timestamp comparison
+select jsonb_path_query(
+ '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ == "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))');
+select jsonb_path_query(
+ '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ >= "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))');
+select jsonb_path_query(
+ '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ < "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))');
+select jsonb_path_query_tz(
+ '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ == "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))');
+select jsonb_path_query_tz(
+ '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ >= "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))');
+select jsonb_path_query_tz(
+ '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ < "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))');
+
+-- timestamptz comparison
+select jsonb_path_query(
+ '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ == "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))');
+select jsonb_path_query(
+ '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ >= "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))');
+select jsonb_path_query(
+ '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ < "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))');
+select jsonb_path_query_tz(
+ '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ == "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))');
+select jsonb_path_query_tz(
+ '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ >= "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))');
+select jsonb_path_query_tz(
+ '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]',
+ '$[*].datetime() ? (@ < "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))');
+
+-- overflow during comparison
+select jsonb_path_query('"1000000-01-01"', '$.datetime() > "2020-01-01 12:00:00".datetime()'::jsonpath);
+
+set time zone default;
+
+-- jsonpath operators
+
+SELECT jsonb_path_query('[{"a": 1}, {"a": 2}]', '$[*]');
+SELECT jsonb_path_query('[{"a": 1}, {"a": 2}]', '$[*] ? (@.a > 10)');
+
+SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}, {}]', 'strict $[*].a');
+SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a');
+SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ == 1)');
+SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ > 10)');
+SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 1, "max": 4}');
+SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 3, "max": 4}');
+
+SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {}]', 'strict $[*].a');
+SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {}]', 'strict $[*].a', silent => true);
+SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}]', '$[*].a');
+SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ == 1)');
+SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ > 10)');
+SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 1, "max": 4}');
+SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 3, "max": 4}');
+
+SELECT jsonb '[{"a": 1}, {"a": 2}]' @? '$[*].a ? (@ > 1)';
+SELECT jsonb '[{"a": 1}, {"a": 2}]' @? '$[*] ? (@.a > 2)';
+SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ > 1)');
+SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*] ? (@.a > $min && @.a < $max)', vars => '{"min": 1, "max": 4}');
+SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*] ? (@.a > $min && @.a < $max)', vars => '{"min": 3, "max": 4}');
+
+SELECT jsonb_path_match('true', '$', silent => false);
+SELECT jsonb_path_match('false', '$', silent => false);
+SELECT jsonb_path_match('null', '$', silent => false);
+SELECT jsonb_path_match('1', '$', silent => true);
+SELECT jsonb_path_match('1', '$', silent => false);
+SELECT jsonb_path_match('"a"', '$', silent => false);
+SELECT jsonb_path_match('{}', '$', silent => false);
+SELECT jsonb_path_match('[true]', '$', silent => false);
+SELECT jsonb_path_match('{}', 'lax $.a', silent => false);
+SELECT jsonb_path_match('{}', 'strict $.a', silent => false);
+SELECT jsonb_path_match('{}', 'strict $.a', silent => true);
+SELECT jsonb_path_match('[true, true]', '$[*]', silent => false);
+SELECT jsonb '[{"a": 1}, {"a": 2}]' @@ '$[*].a > 1';
+SELECT jsonb '[{"a": 1}, {"a": 2}]' @@ '$[*].a > 2';
+SELECT jsonb_path_match('[{"a": 1}, {"a": 2}]', '$[*].a > 1');
+
+-- test string comparison (Unicode codepoint collation)
+WITH str(j, num) AS
+(
+ SELECT jsonb_build_object('s', s), num
+ FROM unnest('{"", "a", "ab", "abc", "abcd", "b", "A", "AB", "ABC", "ABc", "ABcD", "B"}'::text[]) WITH ORDINALITY AS a(s, num)
+)
+SELECT
+ s1.j, s2.j,
+ jsonb_path_query_first(s1.j, '$.s < $s', vars => s2.j) lt,
+ jsonb_path_query_first(s1.j, '$.s <= $s', vars => s2.j) le,
+ jsonb_path_query_first(s1.j, '$.s == $s', vars => s2.j) eq,
+ jsonb_path_query_first(s1.j, '$.s >= $s', vars => s2.j) ge,
+ jsonb_path_query_first(s1.j, '$.s > $s', vars => s2.j) gt
+FROM str s1, str s2
+ORDER BY s1.num, s2.num;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/jsonpath.out b/ydb/library/yql/tests/postgresql/original/cases/jsonpath.out
new file mode 100644
index 0000000000..6dab98d03a
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/jsonpath.out
@@ -0,0 +1,964 @@
+--jsonpath io
+select ''::jsonpath;
+ERROR: invalid input syntax for type jsonpath: ""
+LINE 1: select ''::jsonpath;
+ ^
+select '$'::jsonpath;
+ jsonpath
+----------
+ $
+(1 row)
+
+select 'strict $'::jsonpath;
+ jsonpath
+----------
+ strict $
+(1 row)
+
+select 'lax $'::jsonpath;
+ jsonpath
+----------
+ $
+(1 row)
+
+select '$.a'::jsonpath;
+ jsonpath
+----------
+ $."a"
+(1 row)
+
+select '$.a.v'::jsonpath;
+ jsonpath
+-----------
+ $."a"."v"
+(1 row)
+
+select '$.a.*'::jsonpath;
+ jsonpath
+----------
+ $."a".*
+(1 row)
+
+select '$.*[*]'::jsonpath;
+ jsonpath
+----------
+ $.*[*]
+(1 row)
+
+select '$.a[*]'::jsonpath;
+ jsonpath
+----------
+ $."a"[*]
+(1 row)
+
+select '$.a[*][*]'::jsonpath;
+ jsonpath
+-------------
+ $."a"[*][*]
+(1 row)
+
+select '$[*]'::jsonpath;
+ jsonpath
+----------
+ $[*]
+(1 row)
+
+select '$[0]'::jsonpath;
+ jsonpath
+----------
+ $[0]
+(1 row)
+
+select '$[*][0]'::jsonpath;
+ jsonpath
+----------
+ $[*][0]
+(1 row)
+
+select '$[*].a'::jsonpath;
+ jsonpath
+----------
+ $[*]."a"
+(1 row)
+
+select '$[*][0].a.b'::jsonpath;
+ jsonpath
+-----------------
+ $[*][0]."a"."b"
+(1 row)
+
+select '$.a.**.b'::jsonpath;
+ jsonpath
+--------------
+ $."a".**."b"
+(1 row)
+
+select '$.a.**{2}.b'::jsonpath;
+ jsonpath
+-----------------
+ $."a".**{2}."b"
+(1 row)
+
+select '$.a.**{2 to 2}.b'::jsonpath;
+ jsonpath
+-----------------
+ $."a".**{2}."b"
+(1 row)
+
+select '$.a.**{2 to 5}.b'::jsonpath;
+ jsonpath
+----------------------
+ $."a".**{2 to 5}."b"
+(1 row)
+
+select '$.a.**{0 to 5}.b'::jsonpath;
+ jsonpath
+----------------------
+ $."a".**{0 to 5}."b"
+(1 row)
+
+select '$.a.**{5 to last}.b'::jsonpath;
+ jsonpath
+-------------------------
+ $."a".**{5 to last}."b"
+(1 row)
+
+select '$.a.**{last}.b'::jsonpath;
+ jsonpath
+--------------------
+ $."a".**{last}."b"
+(1 row)
+
+select '$.a.**{last to 5}.b'::jsonpath;
+ jsonpath
+-------------------------
+ $."a".**{last to 5}."b"
+(1 row)
+
+select '$+1'::jsonpath;
+ jsonpath
+----------
+ ($ + 1)
+(1 row)
+
+select '$-1'::jsonpath;
+ jsonpath
+----------
+ ($ - 1)
+(1 row)
+
+select '$--+1'::jsonpath;
+ jsonpath
+----------
+ ($ - -1)
+(1 row)
+
+select '$.a/+-1'::jsonpath;
+ jsonpath
+--------------
+ ($."a" / -1)
+(1 row)
+
+select '1 * 2 + 4 % -3 != false'::jsonpath;
+ jsonpath
+---------------------------
+ (1 * 2 + 4 % -3 != false)
+(1 row)
+
+select '"\b\f\r\n\t\v\"\''\\"'::jsonpath;
+ jsonpath
+-------------------------
+ "\b\f\r\n\t\u000b\"'\\"
+(1 row)
+
+select '"\x50\u0067\u{53}\u{051}\u{00004C}"'::jsonpath;
+ jsonpath
+----------
+ "PgSQL"
+(1 row)
+
+select '$.foo\x50\u0067\u{53}\u{051}\u{00004C}\t\"bar'::jsonpath;
+ jsonpath
+---------------------
+ $."fooPgSQL\t\"bar"
+(1 row)
+
+select '"\z"'::jsonpath; -- unrecognized escape is just the literal char
+ jsonpath
+----------
+ "z"
+(1 row)
+
+select '$.g ? ($.a == 1)'::jsonpath;
+ jsonpath
+--------------------
+ $."g"?($."a" == 1)
+(1 row)
+
+select '$.g ? (@ == 1)'::jsonpath;
+ jsonpath
+----------------
+ $."g"?(@ == 1)
+(1 row)
+
+select '$.g ? (@.a == 1)'::jsonpath;
+ jsonpath
+--------------------
+ $."g"?(@."a" == 1)
+(1 row)
+
+select '$.g ? (@.a == 1 || @.a == 4)'::jsonpath;
+ jsonpath
+----------------------------------
+ $."g"?(@."a" == 1 || @."a" == 4)
+(1 row)
+
+select '$.g ? (@.a == 1 && @.a == 4)'::jsonpath;
+ jsonpath
+----------------------------------
+ $."g"?(@."a" == 1 && @."a" == 4)
+(1 row)
+
+select '$.g ? (@.a == 1 || @.a == 4 && @.b == 7)'::jsonpath;
+ jsonpath
+------------------------------------------------
+ $."g"?(@."a" == 1 || @."a" == 4 && @."b" == 7)
+(1 row)
+
+select '$.g ? (@.a == 1 || !(@.a == 4) && @.b == 7)'::jsonpath;
+ jsonpath
+---------------------------------------------------
+ $."g"?(@."a" == 1 || !(@."a" == 4) && @."b" == 7)
+(1 row)
+
+select '$.g ? (@.a == 1 || !(@.x >= 123 || @.a == 4) && @.b == 7)'::jsonpath;
+ jsonpath
+-------------------------------------------------------------------
+ $."g"?(@."a" == 1 || !(@."x" >= 123 || @."a" == 4) && @."b" == 7)
+(1 row)
+
+select '$.g ? (@.x >= @[*]?(@.a > "abc"))'::jsonpath;
+ jsonpath
+---------------------------------------
+ $."g"?(@."x" >= @[*]?(@."a" > "abc"))
+(1 row)
+
+select '$.g ? ((@.x >= 123 || @.a == 4) is unknown)'::jsonpath;
+ jsonpath
+-------------------------------------------------
+ $."g"?((@."x" >= 123 || @."a" == 4) is unknown)
+(1 row)
+
+select '$.g ? (exists (@.x))'::jsonpath;
+ jsonpath
+------------------------
+ $."g"?(exists (@."x"))
+(1 row)
+
+select '$.g ? (exists (@.x ? (@ == 14)))'::jsonpath;
+ jsonpath
+----------------------------------
+ $."g"?(exists (@."x"?(@ == 14)))
+(1 row)
+
+select '$.g ? ((@.x >= 123 || @.a == 4) && exists (@.x ? (@ == 14)))'::jsonpath;
+ jsonpath
+------------------------------------------------------------------
+ $."g"?((@."x" >= 123 || @."a" == 4) && exists (@."x"?(@ == 14)))
+(1 row)
+
+select '$.g ? (+@.x >= +-(+@.a + 2))'::jsonpath;
+ jsonpath
+------------------------------------
+ $."g"?(+@."x" >= +(-(+@."a" + 2)))
+(1 row)
+
+select '$a'::jsonpath;
+ jsonpath
+----------
+ $"a"
+(1 row)
+
+select '$a.b'::jsonpath;
+ jsonpath
+----------
+ $"a"."b"
+(1 row)
+
+select '$a[*]'::jsonpath;
+ jsonpath
+----------
+ $"a"[*]
+(1 row)
+
+select '$.g ? (@.zip == $zip)'::jsonpath;
+ jsonpath
+---------------------------
+ $."g"?(@."zip" == $"zip")
+(1 row)
+
+select '$.a[1,2, 3 to 16]'::jsonpath;
+ jsonpath
+--------------------
+ $."a"[1,2,3 to 16]
+(1 row)
+
+select '$.a[$a + 1, ($b[*]) to -($[0] * 2)]'::jsonpath;
+ jsonpath
+----------------------------------------
+ $."a"[$"a" + 1,$"b"[*] to -($[0] * 2)]
+(1 row)
+
+select '$.a[$.a.size() - 3]'::jsonpath;
+ jsonpath
+-------------------------
+ $."a"[$."a".size() - 3]
+(1 row)
+
+select 'last'::jsonpath;
+ERROR: LAST is allowed only in array subscripts
+LINE 1: select 'last'::jsonpath;
+ ^
+select '"last"'::jsonpath;
+ jsonpath
+----------
+ "last"
+(1 row)
+
+select '$.last'::jsonpath;
+ jsonpath
+----------
+ $."last"
+(1 row)
+
+select '$ ? (last > 0)'::jsonpath;
+ERROR: LAST is allowed only in array subscripts
+LINE 1: select '$ ? (last > 0)'::jsonpath;
+ ^
+select '$[last]'::jsonpath;
+ jsonpath
+----------
+ $[last]
+(1 row)
+
+select '$[$[0] ? (last > 0)]'::jsonpath;
+ jsonpath
+--------------------
+ $[$[0]?(last > 0)]
+(1 row)
+
+select 'null.type()'::jsonpath;
+ jsonpath
+-------------
+ null.type()
+(1 row)
+
+select '1.type()'::jsonpath;
+ jsonpath
+----------
+ 1.type()
+(1 row)
+
+select '(1).type()'::jsonpath;
+ jsonpath
+----------
+ 1.type()
+(1 row)
+
+select '1.2.type()'::jsonpath;
+ jsonpath
+------------
+ 1.2.type()
+(1 row)
+
+select '"aaa".type()'::jsonpath;
+ jsonpath
+--------------
+ "aaa".type()
+(1 row)
+
+select 'true.type()'::jsonpath;
+ jsonpath
+-------------
+ true.type()
+(1 row)
+
+select '$.double().floor().ceiling().abs()'::jsonpath;
+ jsonpath
+------------------------------------
+ $.double().floor().ceiling().abs()
+(1 row)
+
+select '$.keyvalue().key'::jsonpath;
+ jsonpath
+--------------------
+ $.keyvalue()."key"
+(1 row)
+
+select '$.datetime()'::jsonpath;
+ jsonpath
+--------------
+ $.datetime()
+(1 row)
+
+select '$.datetime("datetime template")'::jsonpath;
+ jsonpath
+---------------------------------
+ $.datetime("datetime template")
+(1 row)
+
+select '$ ? (@ starts with "abc")'::jsonpath;
+ jsonpath
+-------------------------
+ $?(@ starts with "abc")
+(1 row)
+
+select '$ ? (@ starts with $var)'::jsonpath;
+ jsonpath
+--------------------------
+ $?(@ starts with $"var")
+(1 row)
+
+select '$ ? (@ like_regex "(invalid pattern")'::jsonpath;
+ERROR: invalid regular expression: parentheses () not balanced
+LINE 1: select '$ ? (@ like_regex "(invalid pattern")'::jsonpath;
+ ^
+select '$ ? (@ like_regex "pattern")'::jsonpath;
+ jsonpath
+----------------------------
+ $?(@ like_regex "pattern")
+(1 row)
+
+select '$ ? (@ like_regex "pattern" flag "")'::jsonpath;
+ jsonpath
+----------------------------
+ $?(@ like_regex "pattern")
+(1 row)
+
+select '$ ? (@ like_regex "pattern" flag "i")'::jsonpath;
+ jsonpath
+-------------------------------------
+ $?(@ like_regex "pattern" flag "i")
+(1 row)
+
+select '$ ? (@ like_regex "pattern" flag "is")'::jsonpath;
+ jsonpath
+--------------------------------------
+ $?(@ like_regex "pattern" flag "is")
+(1 row)
+
+select '$ ? (@ like_regex "pattern" flag "isim")'::jsonpath;
+ jsonpath
+---------------------------------------
+ $?(@ like_regex "pattern" flag "ism")
+(1 row)
+
+select '$ ? (@ like_regex "pattern" flag "xsms")'::jsonpath;
+ERROR: XQuery "x" flag (expanded regular expressions) is not implemented
+LINE 1: select '$ ? (@ like_regex "pattern" flag "xsms")'::jsonpath;
+ ^
+select '$ ? (@ like_regex "pattern" flag "q")'::jsonpath;
+ jsonpath
+-------------------------------------
+ $?(@ like_regex "pattern" flag "q")
+(1 row)
+
+select '$ ? (@ like_regex "pattern" flag "iq")'::jsonpath;
+ jsonpath
+--------------------------------------
+ $?(@ like_regex "pattern" flag "iq")
+(1 row)
+
+select '$ ? (@ like_regex "pattern" flag "smixq")'::jsonpath;
+ jsonpath
+-----------------------------------------
+ $?(@ like_regex "pattern" flag "ismxq")
+(1 row)
+
+select '$ ? (@ like_regex "pattern" flag "a")'::jsonpath;
+ERROR: invalid input syntax for type jsonpath
+LINE 1: select '$ ? (@ like_regex "pattern" flag "a")'::jsonpath;
+ ^
+DETAIL: unrecognized flag character "a" in LIKE_REGEX predicate
+select '$ < 1'::jsonpath;
+ jsonpath
+----------
+ ($ < 1)
+(1 row)
+
+select '($ < 1) || $.a.b <= $x'::jsonpath;
+ jsonpath
+------------------------------
+ ($ < 1 || $."a"."b" <= $"x")
+(1 row)
+
+select '@ + 1'::jsonpath;
+ERROR: @ is not allowed in root expressions
+LINE 1: select '@ + 1'::jsonpath;
+ ^
+select '($).a.b'::jsonpath;
+ jsonpath
+-----------
+ $."a"."b"
+(1 row)
+
+select '($.a.b).c.d'::jsonpath;
+ jsonpath
+-------------------
+ $."a"."b"."c"."d"
+(1 row)
+
+select '($.a.b + -$.x.y).c.d'::jsonpath;
+ jsonpath
+----------------------------------
+ ($."a"."b" + -$."x"."y")."c"."d"
+(1 row)
+
+select '(-+$.a.b).c.d'::jsonpath;
+ jsonpath
+-------------------------
+ (-(+$."a"."b"))."c"."d"
+(1 row)
+
+select '1 + ($.a.b + 2).c.d'::jsonpath;
+ jsonpath
+-------------------------------
+ (1 + ($."a"."b" + 2)."c"."d")
+(1 row)
+
+select '1 + ($.a.b > 2).c.d'::jsonpath;
+ jsonpath
+-------------------------------
+ (1 + ($."a"."b" > 2)."c"."d")
+(1 row)
+
+select '($)'::jsonpath;
+ jsonpath
+----------
+ $
+(1 row)
+
+select '(($))'::jsonpath;
+ jsonpath
+----------
+ $
+(1 row)
+
+select '((($ + 1)).a + ((2)).b ? ((((@ > 1)) || (exists(@.c)))))'::jsonpath;
+ jsonpath
+-------------------------------------------------
+ (($ + 1)."a" + 2."b"?(@ > 1 || exists (@."c")))
+(1 row)
+
+select '$ ? (@.a < 1)'::jsonpath;
+ jsonpath
+---------------
+ $?(@."a" < 1)
+(1 row)
+
+select '$ ? (@.a < -1)'::jsonpath;
+ jsonpath
+----------------
+ $?(@."a" < -1)
+(1 row)
+
+select '$ ? (@.a < +1)'::jsonpath;
+ jsonpath
+---------------
+ $?(@."a" < 1)
+(1 row)
+
+select '$ ? (@.a < .1)'::jsonpath;
+ERROR: syntax error at or near "." of jsonpath input
+LINE 1: select '$ ? (@.a < .1)'::jsonpath;
+ ^
+select '$ ? (@.a < -.1)'::jsonpath;
+ERROR: syntax error at or near "." of jsonpath input
+LINE 1: select '$ ? (@.a < -.1)'::jsonpath;
+ ^
+select '$ ? (@.a < +.1)'::jsonpath;
+ERROR: syntax error at or near "." of jsonpath input
+LINE 1: select '$ ? (@.a < +.1)'::jsonpath;
+ ^
+select '$ ? (@.a < 0.1)'::jsonpath;
+ jsonpath
+-----------------
+ $?(@."a" < 0.1)
+(1 row)
+
+select '$ ? (@.a < -0.1)'::jsonpath;
+ jsonpath
+------------------
+ $?(@."a" < -0.1)
+(1 row)
+
+select '$ ? (@.a < +0.1)'::jsonpath;
+ jsonpath
+-----------------
+ $?(@."a" < 0.1)
+(1 row)
+
+select '$ ? (@.a < 10.1)'::jsonpath;
+ jsonpath
+------------------
+ $?(@."a" < 10.1)
+(1 row)
+
+select '$ ? (@.a < -10.1)'::jsonpath;
+ jsonpath
+-------------------
+ $?(@."a" < -10.1)
+(1 row)
+
+select '$ ? (@.a < +10.1)'::jsonpath;
+ jsonpath
+------------------
+ $?(@."a" < 10.1)
+(1 row)
+
+select '$ ? (@.a < 1e1)'::jsonpath;
+ jsonpath
+----------------
+ $?(@."a" < 10)
+(1 row)
+
+select '$ ? (@.a < -1e1)'::jsonpath;
+ jsonpath
+-----------------
+ $?(@."a" < -10)
+(1 row)
+
+select '$ ? (@.a < +1e1)'::jsonpath;
+ jsonpath
+----------------
+ $?(@."a" < 10)
+(1 row)
+
+select '$ ? (@.a < .1e1)'::jsonpath;
+ERROR: syntax error at or near "." of jsonpath input
+LINE 1: select '$ ? (@.a < .1e1)'::jsonpath;
+ ^
+select '$ ? (@.a < -.1e1)'::jsonpath;
+ERROR: syntax error at or near "." of jsonpath input
+LINE 1: select '$ ? (@.a < -.1e1)'::jsonpath;
+ ^
+select '$ ? (@.a < +.1e1)'::jsonpath;
+ERROR: syntax error at or near "." of jsonpath input
+LINE 1: select '$ ? (@.a < +.1e1)'::jsonpath;
+ ^
+select '$ ? (@.a < 0.1e1)'::jsonpath;
+ jsonpath
+---------------
+ $?(@."a" < 1)
+(1 row)
+
+select '$ ? (@.a < -0.1e1)'::jsonpath;
+ jsonpath
+----------------
+ $?(@."a" < -1)
+(1 row)
+
+select '$ ? (@.a < +0.1e1)'::jsonpath;
+ jsonpath
+---------------
+ $?(@."a" < 1)
+(1 row)
+
+select '$ ? (@.a < 10.1e1)'::jsonpath;
+ jsonpath
+-----------------
+ $?(@."a" < 101)
+(1 row)
+
+select '$ ? (@.a < -10.1e1)'::jsonpath;
+ jsonpath
+------------------
+ $?(@."a" < -101)
+(1 row)
+
+select '$ ? (@.a < +10.1e1)'::jsonpath;
+ jsonpath
+-----------------
+ $?(@."a" < 101)
+(1 row)
+
+select '$ ? (@.a < 1e-1)'::jsonpath;
+ jsonpath
+-----------------
+ $?(@."a" < 0.1)
+(1 row)
+
+select '$ ? (@.a < -1e-1)'::jsonpath;
+ jsonpath
+------------------
+ $?(@."a" < -0.1)
+(1 row)
+
+select '$ ? (@.a < +1e-1)'::jsonpath;
+ jsonpath
+-----------------
+ $?(@."a" < 0.1)
+(1 row)
+
+select '$ ? (@.a < .1e-1)'::jsonpath;
+ERROR: syntax error at or near "." of jsonpath input
+LINE 1: select '$ ? (@.a < .1e-1)'::jsonpath;
+ ^
+select '$ ? (@.a < -.1e-1)'::jsonpath;
+ERROR: syntax error at or near "." of jsonpath input
+LINE 1: select '$ ? (@.a < -.1e-1)'::jsonpath;
+ ^
+select '$ ? (@.a < +.1e-1)'::jsonpath;
+ERROR: syntax error at or near "." of jsonpath input
+LINE 1: select '$ ? (@.a < +.1e-1)'::jsonpath;
+ ^
+select '$ ? (@.a < 0.1e-1)'::jsonpath;
+ jsonpath
+------------------
+ $?(@."a" < 0.01)
+(1 row)
+
+select '$ ? (@.a < -0.1e-1)'::jsonpath;
+ jsonpath
+-------------------
+ $?(@."a" < -0.01)
+(1 row)
+
+select '$ ? (@.a < +0.1e-1)'::jsonpath;
+ jsonpath
+------------------
+ $?(@."a" < 0.01)
+(1 row)
+
+select '$ ? (@.a < 10.1e-1)'::jsonpath;
+ jsonpath
+------------------
+ $?(@."a" < 1.01)
+(1 row)
+
+select '$ ? (@.a < -10.1e-1)'::jsonpath;
+ jsonpath
+-------------------
+ $?(@."a" < -1.01)
+(1 row)
+
+select '$ ? (@.a < +10.1e-1)'::jsonpath;
+ jsonpath
+------------------
+ $?(@."a" < 1.01)
+(1 row)
+
+select '$ ? (@.a < 1e+1)'::jsonpath;
+ jsonpath
+----------------
+ $?(@."a" < 10)
+(1 row)
+
+select '$ ? (@.a < -1e+1)'::jsonpath;
+ jsonpath
+-----------------
+ $?(@."a" < -10)
+(1 row)
+
+select '$ ? (@.a < +1e+1)'::jsonpath;
+ jsonpath
+----------------
+ $?(@."a" < 10)
+(1 row)
+
+select '$ ? (@.a < .1e+1)'::jsonpath;
+ERROR: syntax error at or near "." of jsonpath input
+LINE 1: select '$ ? (@.a < .1e+1)'::jsonpath;
+ ^
+select '$ ? (@.a < -.1e+1)'::jsonpath;
+ERROR: syntax error at or near "." of jsonpath input
+LINE 1: select '$ ? (@.a < -.1e+1)'::jsonpath;
+ ^
+select '$ ? (@.a < +.1e+1)'::jsonpath;
+ERROR: syntax error at or near "." of jsonpath input
+LINE 1: select '$ ? (@.a < +.1e+1)'::jsonpath;
+ ^
+select '$ ? (@.a < 0.1e+1)'::jsonpath;
+ jsonpath
+---------------
+ $?(@."a" < 1)
+(1 row)
+
+select '$ ? (@.a < -0.1e+1)'::jsonpath;
+ jsonpath
+----------------
+ $?(@."a" < -1)
+(1 row)
+
+select '$ ? (@.a < +0.1e+1)'::jsonpath;
+ jsonpath
+---------------
+ $?(@."a" < 1)
+(1 row)
+
+select '$ ? (@.a < 10.1e+1)'::jsonpath;
+ jsonpath
+-----------------
+ $?(@."a" < 101)
+(1 row)
+
+select '$ ? (@.a < -10.1e+1)'::jsonpath;
+ jsonpath
+------------------
+ $?(@."a" < -101)
+(1 row)
+
+select '$ ? (@.a < +10.1e+1)'::jsonpath;
+ jsonpath
+-----------------
+ $?(@."a" < 101)
+(1 row)
+
+select '0'::jsonpath;
+ jsonpath
+----------
+ 0
+(1 row)
+
+select '00'::jsonpath;
+ERROR: syntax error at end of jsonpath input
+LINE 1: select '00'::jsonpath;
+ ^
+select '0.0'::jsonpath;
+ jsonpath
+----------
+ 0.0
+(1 row)
+
+select '0.000'::jsonpath;
+ jsonpath
+----------
+ 0.000
+(1 row)
+
+select '0.000e1'::jsonpath;
+ jsonpath
+----------
+ 0.00
+(1 row)
+
+select '0.000e2'::jsonpath;
+ jsonpath
+----------
+ 0.0
+(1 row)
+
+select '0.000e3'::jsonpath;
+ jsonpath
+----------
+ 0
+(1 row)
+
+select '0.0010'::jsonpath;
+ jsonpath
+----------
+ 0.0010
+(1 row)
+
+select '0.0010e-1'::jsonpath;
+ jsonpath
+----------
+ 0.00010
+(1 row)
+
+select '0.0010e+1'::jsonpath;
+ jsonpath
+----------
+ 0.010
+(1 row)
+
+select '0.0010e+2'::jsonpath;
+ jsonpath
+----------
+ 0.10
+(1 row)
+
+select '1e'::jsonpath;
+ERROR: invalid floating point number at or near "1e" of jsonpath input
+LINE 1: select '1e'::jsonpath;
+ ^
+select '1.e'::jsonpath;
+ jsonpath
+----------
+ 1."e"
+(1 row)
+
+select '1.2e'::jsonpath;
+ERROR: invalid floating point number at or near "1.2e" of jsonpath input
+LINE 1: select '1.2e'::jsonpath;
+ ^
+select '1.2.e'::jsonpath;
+ jsonpath
+----------
+ 1.2."e"
+(1 row)
+
+select '(1.2).e'::jsonpath;
+ jsonpath
+----------
+ 1.2."e"
+(1 row)
+
+select '1e3'::jsonpath;
+ jsonpath
+----------
+ 1000
+(1 row)
+
+select '1.e3'::jsonpath;
+ jsonpath
+----------
+ 1."e3"
+(1 row)
+
+select '1.e3.e'::jsonpath;
+ jsonpath
+------------
+ 1."e3"."e"
+(1 row)
+
+select '1.e3.e4'::jsonpath;
+ jsonpath
+-------------
+ 1."e3"."e4"
+(1 row)
+
+select '1.2e3'::jsonpath;
+ jsonpath
+----------
+ 1200
+(1 row)
+
+select '1.2.e3'::jsonpath;
+ jsonpath
+----------
+ 1.2."e3"
+(1 row)
+
+select '(1.2).e3'::jsonpath;
+ jsonpath
+----------
+ 1.2."e3"
+(1 row)
+
+select '1..e'::jsonpath;
+ERROR: syntax error at or near "." of jsonpath input
+LINE 1: select '1..e'::jsonpath;
+ ^
+select '1..e3'::jsonpath;
+ERROR: syntax error at or near "." of jsonpath input
+LINE 1: select '1..e3'::jsonpath;
+ ^
+select '(1.).e'::jsonpath;
+ERROR: syntax error at or near ")" of jsonpath input
+LINE 1: select '(1.).e'::jsonpath;
+ ^
+select '(1.).e3'::jsonpath;
+ERROR: syntax error at or near ")" of jsonpath input
+LINE 1: select '(1.).e3'::jsonpath;
+ ^
diff --git a/ydb/library/yql/tests/postgresql/original/cases/jsonpath.sql b/ydb/library/yql/tests/postgresql/original/cases/jsonpath.sql
new file mode 100644
index 0000000000..17ab775783
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/jsonpath.sql
@@ -0,0 +1,181 @@
+--jsonpath io
+
+select ''::jsonpath;
+select '$'::jsonpath;
+select 'strict $'::jsonpath;
+select 'lax $'::jsonpath;
+select '$.a'::jsonpath;
+select '$.a.v'::jsonpath;
+select '$.a.*'::jsonpath;
+select '$.*[*]'::jsonpath;
+select '$.a[*]'::jsonpath;
+select '$.a[*][*]'::jsonpath;
+select '$[*]'::jsonpath;
+select '$[0]'::jsonpath;
+select '$[*][0]'::jsonpath;
+select '$[*].a'::jsonpath;
+select '$[*][0].a.b'::jsonpath;
+select '$.a.**.b'::jsonpath;
+select '$.a.**{2}.b'::jsonpath;
+select '$.a.**{2 to 2}.b'::jsonpath;
+select '$.a.**{2 to 5}.b'::jsonpath;
+select '$.a.**{0 to 5}.b'::jsonpath;
+select '$.a.**{5 to last}.b'::jsonpath;
+select '$.a.**{last}.b'::jsonpath;
+select '$.a.**{last to 5}.b'::jsonpath;
+select '$+1'::jsonpath;
+select '$-1'::jsonpath;
+select '$--+1'::jsonpath;
+select '$.a/+-1'::jsonpath;
+select '1 * 2 + 4 % -3 != false'::jsonpath;
+
+select '"\b\f\r\n\t\v\"\''\\"'::jsonpath;
+select '"\x50\u0067\u{53}\u{051}\u{00004C}"'::jsonpath;
+select '$.foo\x50\u0067\u{53}\u{051}\u{00004C}\t\"bar'::jsonpath;
+select '"\z"'::jsonpath; -- unrecognized escape is just the literal char
+
+select '$.g ? ($.a == 1)'::jsonpath;
+select '$.g ? (@ == 1)'::jsonpath;
+select '$.g ? (@.a == 1)'::jsonpath;
+select '$.g ? (@.a == 1 || @.a == 4)'::jsonpath;
+select '$.g ? (@.a == 1 && @.a == 4)'::jsonpath;
+select '$.g ? (@.a == 1 || @.a == 4 && @.b == 7)'::jsonpath;
+select '$.g ? (@.a == 1 || !(@.a == 4) && @.b == 7)'::jsonpath;
+select '$.g ? (@.a == 1 || !(@.x >= 123 || @.a == 4) && @.b == 7)'::jsonpath;
+select '$.g ? (@.x >= @[*]?(@.a > "abc"))'::jsonpath;
+select '$.g ? ((@.x >= 123 || @.a == 4) is unknown)'::jsonpath;
+select '$.g ? (exists (@.x))'::jsonpath;
+select '$.g ? (exists (@.x ? (@ == 14)))'::jsonpath;
+select '$.g ? ((@.x >= 123 || @.a == 4) && exists (@.x ? (@ == 14)))'::jsonpath;
+select '$.g ? (+@.x >= +-(+@.a + 2))'::jsonpath;
+
+select '$a'::jsonpath;
+select '$a.b'::jsonpath;
+select '$a[*]'::jsonpath;
+select '$.g ? (@.zip == $zip)'::jsonpath;
+select '$.a[1,2, 3 to 16]'::jsonpath;
+select '$.a[$a + 1, ($b[*]) to -($[0] * 2)]'::jsonpath;
+select '$.a[$.a.size() - 3]'::jsonpath;
+select 'last'::jsonpath;
+select '"last"'::jsonpath;
+select '$.last'::jsonpath;
+select '$ ? (last > 0)'::jsonpath;
+select '$[last]'::jsonpath;
+select '$[$[0] ? (last > 0)]'::jsonpath;
+
+select 'null.type()'::jsonpath;
+select '1.type()'::jsonpath;
+select '(1).type()'::jsonpath;
+select '1.2.type()'::jsonpath;
+select '"aaa".type()'::jsonpath;
+select 'true.type()'::jsonpath;
+select '$.double().floor().ceiling().abs()'::jsonpath;
+select '$.keyvalue().key'::jsonpath;
+select '$.datetime()'::jsonpath;
+select '$.datetime("datetime template")'::jsonpath;
+
+select '$ ? (@ starts with "abc")'::jsonpath;
+select '$ ? (@ starts with $var)'::jsonpath;
+
+select '$ ? (@ like_regex "(invalid pattern")'::jsonpath;
+select '$ ? (@ like_regex "pattern")'::jsonpath;
+select '$ ? (@ like_regex "pattern" flag "")'::jsonpath;
+select '$ ? (@ like_regex "pattern" flag "i")'::jsonpath;
+select '$ ? (@ like_regex "pattern" flag "is")'::jsonpath;
+select '$ ? (@ like_regex "pattern" flag "isim")'::jsonpath;
+select '$ ? (@ like_regex "pattern" flag "xsms")'::jsonpath;
+select '$ ? (@ like_regex "pattern" flag "q")'::jsonpath;
+select '$ ? (@ like_regex "pattern" flag "iq")'::jsonpath;
+select '$ ? (@ like_regex "pattern" flag "smixq")'::jsonpath;
+select '$ ? (@ like_regex "pattern" flag "a")'::jsonpath;
+
+select '$ < 1'::jsonpath;
+select '($ < 1) || $.a.b <= $x'::jsonpath;
+select '@ + 1'::jsonpath;
+
+select '($).a.b'::jsonpath;
+select '($.a.b).c.d'::jsonpath;
+select '($.a.b + -$.x.y).c.d'::jsonpath;
+select '(-+$.a.b).c.d'::jsonpath;
+select '1 + ($.a.b + 2).c.d'::jsonpath;
+select '1 + ($.a.b > 2).c.d'::jsonpath;
+select '($)'::jsonpath;
+select '(($))'::jsonpath;
+select '((($ + 1)).a + ((2)).b ? ((((@ > 1)) || (exists(@.c)))))'::jsonpath;
+
+select '$ ? (@.a < 1)'::jsonpath;
+select '$ ? (@.a < -1)'::jsonpath;
+select '$ ? (@.a < +1)'::jsonpath;
+select '$ ? (@.a < .1)'::jsonpath;
+select '$ ? (@.a < -.1)'::jsonpath;
+select '$ ? (@.a < +.1)'::jsonpath;
+select '$ ? (@.a < 0.1)'::jsonpath;
+select '$ ? (@.a < -0.1)'::jsonpath;
+select '$ ? (@.a < +0.1)'::jsonpath;
+select '$ ? (@.a < 10.1)'::jsonpath;
+select '$ ? (@.a < -10.1)'::jsonpath;
+select '$ ? (@.a < +10.1)'::jsonpath;
+select '$ ? (@.a < 1e1)'::jsonpath;
+select '$ ? (@.a < -1e1)'::jsonpath;
+select '$ ? (@.a < +1e1)'::jsonpath;
+select '$ ? (@.a < .1e1)'::jsonpath;
+select '$ ? (@.a < -.1e1)'::jsonpath;
+select '$ ? (@.a < +.1e1)'::jsonpath;
+select '$ ? (@.a < 0.1e1)'::jsonpath;
+select '$ ? (@.a < -0.1e1)'::jsonpath;
+select '$ ? (@.a < +0.1e1)'::jsonpath;
+select '$ ? (@.a < 10.1e1)'::jsonpath;
+select '$ ? (@.a < -10.1e1)'::jsonpath;
+select '$ ? (@.a < +10.1e1)'::jsonpath;
+select '$ ? (@.a < 1e-1)'::jsonpath;
+select '$ ? (@.a < -1e-1)'::jsonpath;
+select '$ ? (@.a < +1e-1)'::jsonpath;
+select '$ ? (@.a < .1e-1)'::jsonpath;
+select '$ ? (@.a < -.1e-1)'::jsonpath;
+select '$ ? (@.a < +.1e-1)'::jsonpath;
+select '$ ? (@.a < 0.1e-1)'::jsonpath;
+select '$ ? (@.a < -0.1e-1)'::jsonpath;
+select '$ ? (@.a < +0.1e-1)'::jsonpath;
+select '$ ? (@.a < 10.1e-1)'::jsonpath;
+select '$ ? (@.a < -10.1e-1)'::jsonpath;
+select '$ ? (@.a < +10.1e-1)'::jsonpath;
+select '$ ? (@.a < 1e+1)'::jsonpath;
+select '$ ? (@.a < -1e+1)'::jsonpath;
+select '$ ? (@.a < +1e+1)'::jsonpath;
+select '$ ? (@.a < .1e+1)'::jsonpath;
+select '$ ? (@.a < -.1e+1)'::jsonpath;
+select '$ ? (@.a < +.1e+1)'::jsonpath;
+select '$ ? (@.a < 0.1e+1)'::jsonpath;
+select '$ ? (@.a < -0.1e+1)'::jsonpath;
+select '$ ? (@.a < +0.1e+1)'::jsonpath;
+select '$ ? (@.a < 10.1e+1)'::jsonpath;
+select '$ ? (@.a < -10.1e+1)'::jsonpath;
+select '$ ? (@.a < +10.1e+1)'::jsonpath;
+
+select '0'::jsonpath;
+select '00'::jsonpath;
+select '0.0'::jsonpath;
+select '0.000'::jsonpath;
+select '0.000e1'::jsonpath;
+select '0.000e2'::jsonpath;
+select '0.000e3'::jsonpath;
+select '0.0010'::jsonpath;
+select '0.0010e-1'::jsonpath;
+select '0.0010e+1'::jsonpath;
+select '0.0010e+2'::jsonpath;
+select '1e'::jsonpath;
+select '1.e'::jsonpath;
+select '1.2e'::jsonpath;
+select '1.2.e'::jsonpath;
+select '(1.2).e'::jsonpath;
+select '1e3'::jsonpath;
+select '1.e3'::jsonpath;
+select '1.e3.e'::jsonpath;
+select '1.e3.e4'::jsonpath;
+select '1.2e3'::jsonpath;
+select '1.2.e3'::jsonpath;
+select '(1.2).e3'::jsonpath;
+select '1..e'::jsonpath;
+select '1..e3'::jsonpath;
+select '(1.).e'::jsonpath;
+select '(1.).e3'::jsonpath;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/jsonpath_encoding.out b/ydb/library/yql/tests/postgresql/original/cases/jsonpath_encoding.out
new file mode 100644
index 0000000000..7cbfb6abcf
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/jsonpath_encoding.out
@@ -0,0 +1,180 @@
+--
+-- encoding-sensitive tests for jsonpath
+--
+-- We provide expected-results files for UTF8 (jsonpath_encoding.out)
+-- and for SQL_ASCII (jsonpath_encoding_1.out). Skip otherwise.
+SELECT getdatabaseencoding() NOT IN ('UTF8', 'SQL_ASCII')
+ AS skip_test \gset
+\if :skip_test
+\quit
+\endif
+SELECT getdatabaseencoding(); -- just to label the results files
+ getdatabaseencoding
+---------------------
+ UTF8
+(1 row)
+
+-- checks for double-quoted values
+-- basic unicode input
+SELECT '"\u"'::jsonpath; -- ERROR, incomplete escape
+ERROR: invalid unicode sequence at or near "\u" of jsonpath input
+LINE 1: SELECT '"\u"'::jsonpath;
+ ^
+SELECT '"\u00"'::jsonpath; -- ERROR, incomplete escape
+ERROR: invalid unicode sequence at or near "\u00" of jsonpath input
+LINE 1: SELECT '"\u00"'::jsonpath;
+ ^
+SELECT '"\u000g"'::jsonpath; -- ERROR, g is not a hex digit
+ERROR: invalid unicode sequence at or near "\u000" of jsonpath input
+LINE 1: SELECT '"\u000g"'::jsonpath;
+ ^
+SELECT '"\u0000"'::jsonpath; -- OK, legal escape
+ERROR: unsupported Unicode escape sequence
+LINE 1: SELECT '"\u0000"'::jsonpath;
+ ^
+DETAIL: \u0000 cannot be converted to text.
+SELECT '"\uaBcD"'::jsonpath; -- OK, uppercase and lower case both OK
+ jsonpath
+----------
+ "ꯍ"
+(1 row)
+
+-- handling of unicode surrogate pairs
+select '"\ud83d\ude04\ud83d\udc36"'::jsonpath as correct_in_utf8;
+ correct_in_utf8
+-----------------
+ "😄🐶"
+(1 row)
+
+select '"\ud83d\ud83d"'::jsonpath; -- 2 high surrogates in a row
+ERROR: invalid input syntax for type jsonpath
+LINE 1: select '"\ud83d\ud83d"'::jsonpath;
+ ^
+DETAIL: Unicode high surrogate must not follow a high surrogate.
+select '"\ude04\ud83d"'::jsonpath; -- surrogates in wrong order
+ERROR: invalid input syntax for type jsonpath
+LINE 1: select '"\ude04\ud83d"'::jsonpath;
+ ^
+DETAIL: Unicode low surrogate must follow a high surrogate.
+select '"\ud83dX"'::jsonpath; -- orphan high surrogate
+ERROR: invalid input syntax for type jsonpath
+LINE 1: select '"\ud83dX"'::jsonpath;
+ ^
+DETAIL: Unicode low surrogate must follow a high surrogate.
+select '"\ude04X"'::jsonpath; -- orphan low surrogate
+ERROR: invalid input syntax for type jsonpath
+LINE 1: select '"\ude04X"'::jsonpath;
+ ^
+DETAIL: Unicode low surrogate must follow a high surrogate.
+--handling of simple unicode escapes
+select '"the Copyright \u00a9 sign"'::jsonpath as correct_in_utf8;
+ correct_in_utf8
+------------------------
+ "the Copyright © sign"
+(1 row)
+
+select '"dollar \u0024 character"'::jsonpath as correct_everywhere;
+ correct_everywhere
+----------------------
+ "dollar $ character"
+(1 row)
+
+select '"dollar \\u0024 character"'::jsonpath as not_an_escape;
+ not_an_escape
+----------------------------
+ "dollar \\u0024 character"
+(1 row)
+
+select '"null \u0000 escape"'::jsonpath as not_unescaped;
+ERROR: unsupported Unicode escape sequence
+LINE 1: select '"null \u0000 escape"'::jsonpath as not_unescaped;
+ ^
+DETAIL: \u0000 cannot be converted to text.
+select '"null \\u0000 escape"'::jsonpath as not_an_escape;
+ not_an_escape
+-----------------------
+ "null \\u0000 escape"
+(1 row)
+
+-- checks for quoted key names
+-- basic unicode input
+SELECT '$."\u"'::jsonpath; -- ERROR, incomplete escape
+ERROR: invalid unicode sequence at or near "\u" of jsonpath input
+LINE 1: SELECT '$."\u"'::jsonpath;
+ ^
+SELECT '$."\u00"'::jsonpath; -- ERROR, incomplete escape
+ERROR: invalid unicode sequence at or near "\u00" of jsonpath input
+LINE 1: SELECT '$."\u00"'::jsonpath;
+ ^
+SELECT '$."\u000g"'::jsonpath; -- ERROR, g is not a hex digit
+ERROR: invalid unicode sequence at or near "\u000" of jsonpath input
+LINE 1: SELECT '$."\u000g"'::jsonpath;
+ ^
+SELECT '$."\u0000"'::jsonpath; -- OK, legal escape
+ERROR: unsupported Unicode escape sequence
+LINE 1: SELECT '$."\u0000"'::jsonpath;
+ ^
+DETAIL: \u0000 cannot be converted to text.
+SELECT '$."\uaBcD"'::jsonpath; -- OK, uppercase and lower case both OK
+ jsonpath
+----------
+ $."ꯍ"
+(1 row)
+
+-- handling of unicode surrogate pairs
+select '$."\ud83d\ude04\ud83d\udc36"'::jsonpath as correct_in_utf8;
+ correct_in_utf8
+-----------------
+ $."😄🐶"
+(1 row)
+
+select '$."\ud83d\ud83d"'::jsonpath; -- 2 high surrogates in a row
+ERROR: invalid input syntax for type jsonpath
+LINE 1: select '$."\ud83d\ud83d"'::jsonpath;
+ ^
+DETAIL: Unicode high surrogate must not follow a high surrogate.
+select '$."\ude04\ud83d"'::jsonpath; -- surrogates in wrong order
+ERROR: invalid input syntax for type jsonpath
+LINE 1: select '$."\ude04\ud83d"'::jsonpath;
+ ^
+DETAIL: Unicode low surrogate must follow a high surrogate.
+select '$."\ud83dX"'::jsonpath; -- orphan high surrogate
+ERROR: invalid input syntax for type jsonpath
+LINE 1: select '$."\ud83dX"'::jsonpath;
+ ^
+DETAIL: Unicode low surrogate must follow a high surrogate.
+select '$."\ude04X"'::jsonpath; -- orphan low surrogate
+ERROR: invalid input syntax for type jsonpath
+LINE 1: select '$."\ude04X"'::jsonpath;
+ ^
+DETAIL: Unicode low surrogate must follow a high surrogate.
+--handling of simple unicode escapes
+select '$."the Copyright \u00a9 sign"'::jsonpath as correct_in_utf8;
+ correct_in_utf8
+--------------------------
+ $."the Copyright © sign"
+(1 row)
+
+select '$."dollar \u0024 character"'::jsonpath as correct_everywhere;
+ correct_everywhere
+------------------------
+ $."dollar $ character"
+(1 row)
+
+select '$."dollar \\u0024 character"'::jsonpath as not_an_escape;
+ not_an_escape
+------------------------------
+ $."dollar \\u0024 character"
+(1 row)
+
+select '$."null \u0000 escape"'::jsonpath as not_unescaped;
+ERROR: unsupported Unicode escape sequence
+LINE 1: select '$."null \u0000 escape"'::jsonpath as not_unescaped;
+ ^
+DETAIL: \u0000 cannot be converted to text.
+select '$."null \\u0000 escape"'::jsonpath as not_an_escape;
+ not_an_escape
+-------------------------
+ $."null \\u0000 escape"
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/jsonpath_encoding.sql b/ydb/library/yql/tests/postgresql/original/cases/jsonpath_encoding.sql
new file mode 100644
index 0000000000..55d9e30b95
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/jsonpath_encoding.sql
@@ -0,0 +1,59 @@
+--
+-- encoding-sensitive tests for jsonpath
+--
+
+-- We provide expected-results files for UTF8 (jsonpath_encoding.out)
+-- and for SQL_ASCII (jsonpath_encoding_1.out). Skip otherwise.
+SELECT getdatabaseencoding() NOT IN ('UTF8', 'SQL_ASCII')
+ AS skip_test \gset
+\if :skip_test
+\quit
+\endif
+
+SELECT getdatabaseencoding(); -- just to label the results files
+
+-- checks for double-quoted values
+
+-- basic unicode input
+SELECT '"\u"'::jsonpath; -- ERROR, incomplete escape
+SELECT '"\u00"'::jsonpath; -- ERROR, incomplete escape
+SELECT '"\u000g"'::jsonpath; -- ERROR, g is not a hex digit
+SELECT '"\u0000"'::jsonpath; -- OK, legal escape
+SELECT '"\uaBcD"'::jsonpath; -- OK, uppercase and lower case both OK
+
+-- handling of unicode surrogate pairs
+select '"\ud83d\ude04\ud83d\udc36"'::jsonpath as correct_in_utf8;
+select '"\ud83d\ud83d"'::jsonpath; -- 2 high surrogates in a row
+select '"\ude04\ud83d"'::jsonpath; -- surrogates in wrong order
+select '"\ud83dX"'::jsonpath; -- orphan high surrogate
+select '"\ude04X"'::jsonpath; -- orphan low surrogate
+
+--handling of simple unicode escapes
+select '"the Copyright \u00a9 sign"'::jsonpath as correct_in_utf8;
+select '"dollar \u0024 character"'::jsonpath as correct_everywhere;
+select '"dollar \\u0024 character"'::jsonpath as not_an_escape;
+select '"null \u0000 escape"'::jsonpath as not_unescaped;
+select '"null \\u0000 escape"'::jsonpath as not_an_escape;
+
+-- checks for quoted key names
+
+-- basic unicode input
+SELECT '$."\u"'::jsonpath; -- ERROR, incomplete escape
+SELECT '$."\u00"'::jsonpath; -- ERROR, incomplete escape
+SELECT '$."\u000g"'::jsonpath; -- ERROR, g is not a hex digit
+SELECT '$."\u0000"'::jsonpath; -- OK, legal escape
+SELECT '$."\uaBcD"'::jsonpath; -- OK, uppercase and lower case both OK
+
+-- handling of unicode surrogate pairs
+select '$."\ud83d\ude04\ud83d\udc36"'::jsonpath as correct_in_utf8;
+select '$."\ud83d\ud83d"'::jsonpath; -- 2 high surrogates in a row
+select '$."\ude04\ud83d"'::jsonpath; -- surrogates in wrong order
+select '$."\ud83dX"'::jsonpath; -- orphan high surrogate
+select '$."\ude04X"'::jsonpath; -- orphan low surrogate
+
+--handling of simple unicode escapes
+select '$."the Copyright \u00a9 sign"'::jsonpath as correct_in_utf8;
+select '$."dollar \u0024 character"'::jsonpath as correct_everywhere;
+select '$."dollar \\u0024 character"'::jsonpath as not_an_escape;
+select '$."null \u0000 escape"'::jsonpath as not_unescaped;
+select '$."null \\u0000 escape"'::jsonpath as not_an_escape;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/limit.out b/ydb/library/yql/tests/postgresql/original/cases/limit.out
new file mode 100644
index 0000000000..8a98bbea8e
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/limit.out
@@ -0,0 +1,694 @@
+--
+-- LIMIT
+-- Check the LIMIT/OFFSET feature of SELECT
+--
+SELECT ''::text AS two, unique1, unique2, stringu1
+ FROM onek WHERE unique1 > 50
+ ORDER BY unique1 LIMIT 2;
+ two | unique1 | unique2 | stringu1
+-----+---------+---------+----------
+ | 51 | 76 | ZBAAAA
+ | 52 | 985 | ACAAAA
+(2 rows)
+
+SELECT ''::text AS five, unique1, unique2, stringu1
+ FROM onek WHERE unique1 > 60
+ ORDER BY unique1 LIMIT 5;
+ five | unique1 | unique2 | stringu1
+------+---------+---------+----------
+ | 61 | 560 | JCAAAA
+ | 62 | 633 | KCAAAA
+ | 63 | 296 | LCAAAA
+ | 64 | 479 | MCAAAA
+ | 65 | 64 | NCAAAA
+(5 rows)
+
+SELECT ''::text AS two, unique1, unique2, stringu1
+ FROM onek WHERE unique1 > 60 AND unique1 < 63
+ ORDER BY unique1 LIMIT 5;
+ two | unique1 | unique2 | stringu1
+-----+---------+---------+----------
+ | 61 | 560 | JCAAAA
+ | 62 | 633 | KCAAAA
+(2 rows)
+
+SELECT ''::text AS three, unique1, unique2, stringu1
+ FROM onek WHERE unique1 > 100
+ ORDER BY unique1 LIMIT 3 OFFSET 20;
+ three | unique1 | unique2 | stringu1
+-------+---------+---------+----------
+ | 121 | 700 | REAAAA
+ | 122 | 519 | SEAAAA
+ | 123 | 777 | TEAAAA
+(3 rows)
+
+SELECT ''::text AS zero, unique1, unique2, stringu1
+ FROM onek WHERE unique1 < 50
+ ORDER BY unique1 DESC LIMIT 8 OFFSET 99;
+ zero | unique1 | unique2 | stringu1
+------+---------+---------+----------
+(0 rows)
+
+SELECT ''::text AS eleven, unique1, unique2, stringu1
+ FROM onek WHERE unique1 < 50
+ ORDER BY unique1 DESC LIMIT 20 OFFSET 39;
+ eleven | unique1 | unique2 | stringu1
+--------+---------+---------+----------
+ | 10 | 520 | KAAAAA
+ | 9 | 49 | JAAAAA
+ | 8 | 653 | IAAAAA
+ | 7 | 647 | HAAAAA
+ | 6 | 978 | GAAAAA
+ | 5 | 541 | FAAAAA
+ | 4 | 833 | EAAAAA
+ | 3 | 431 | DAAAAA
+ | 2 | 326 | CAAAAA
+ | 1 | 214 | BAAAAA
+ | 0 | 998 | AAAAAA
+(11 rows)
+
+SELECT ''::text AS ten, unique1, unique2, stringu1
+ FROM onek
+ ORDER BY unique1 OFFSET 990;
+ ten | unique1 | unique2 | stringu1
+-----+---------+---------+----------
+ | 990 | 369 | CMAAAA
+ | 991 | 426 | DMAAAA
+ | 992 | 363 | EMAAAA
+ | 993 | 661 | FMAAAA
+ | 994 | 695 | GMAAAA
+ | 995 | 144 | HMAAAA
+ | 996 | 258 | IMAAAA
+ | 997 | 21 | JMAAAA
+ | 998 | 549 | KMAAAA
+ | 999 | 152 | LMAAAA
+(10 rows)
+
+SELECT ''::text AS five, unique1, unique2, stringu1
+ FROM onek
+ ORDER BY unique1 OFFSET 990 LIMIT 5;
+ five | unique1 | unique2 | stringu1
+------+---------+---------+----------
+ | 990 | 369 | CMAAAA
+ | 991 | 426 | DMAAAA
+ | 992 | 363 | EMAAAA
+ | 993 | 661 | FMAAAA
+ | 994 | 695 | GMAAAA
+(5 rows)
+
+SELECT ''::text AS five, unique1, unique2, stringu1
+ FROM onek
+ ORDER BY unique1 LIMIT 5 OFFSET 900;
+ five | unique1 | unique2 | stringu1
+------+---------+---------+----------
+ | 900 | 913 | QIAAAA
+ | 901 | 931 | RIAAAA
+ | 902 | 702 | SIAAAA
+ | 903 | 641 | TIAAAA
+ | 904 | 793 | UIAAAA
+(5 rows)
+
+-- Test null limit and offset. The planner would discard a simple null
+-- constant, so to ensure executor is exercised, do this:
+select * from int8_tbl limit (case when random() < 0.5 then null::bigint end);
+ q1 | q2
+------------------+-------------------
+ 123 | 456
+ 123 | 4567890123456789
+ 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+(5 rows)
+
+select * from int8_tbl offset (case when random() < 0.5 then null::bigint end);
+ q1 | q2
+------------------+-------------------
+ 123 | 456
+ 123 | 4567890123456789
+ 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+(5 rows)
+
+-- Test assorted cases involving backwards fetch from a LIMIT plan node
+begin;
+declare c1 cursor for select * from int8_tbl limit 10;
+fetch all in c1;
+ q1 | q2
+------------------+-------------------
+ 123 | 456
+ 123 | 4567890123456789
+ 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+(5 rows)
+
+fetch 1 in c1;
+ q1 | q2
+----+----
+(0 rows)
+
+fetch backward 1 in c1;
+ q1 | q2
+------------------+-------------------
+ 4567890123456789 | -4567890123456789
+(1 row)
+
+fetch backward all in c1;
+ q1 | q2
+------------------+------------------
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | 123
+ 123 | 4567890123456789
+ 123 | 456
+(4 rows)
+
+fetch backward 1 in c1;
+ q1 | q2
+----+----
+(0 rows)
+
+fetch all in c1;
+ q1 | q2
+------------------+-------------------
+ 123 | 456
+ 123 | 4567890123456789
+ 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+(5 rows)
+
+declare c2 cursor for select * from int8_tbl limit 3;
+fetch all in c2;
+ q1 | q2
+------------------+------------------
+ 123 | 456
+ 123 | 4567890123456789
+ 4567890123456789 | 123
+(3 rows)
+
+fetch 1 in c2;
+ q1 | q2
+----+----
+(0 rows)
+
+fetch backward 1 in c2;
+ q1 | q2
+------------------+-----
+ 4567890123456789 | 123
+(1 row)
+
+fetch backward all in c2;
+ q1 | q2
+-----+------------------
+ 123 | 4567890123456789
+ 123 | 456
+(2 rows)
+
+fetch backward 1 in c2;
+ q1 | q2
+----+----
+(0 rows)
+
+fetch all in c2;
+ q1 | q2
+------------------+------------------
+ 123 | 456
+ 123 | 4567890123456789
+ 4567890123456789 | 123
+(3 rows)
+
+declare c3 cursor for select * from int8_tbl offset 3;
+fetch all in c3;
+ q1 | q2
+------------------+-------------------
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+(2 rows)
+
+fetch 1 in c3;
+ q1 | q2
+----+----
+(0 rows)
+
+fetch backward 1 in c3;
+ q1 | q2
+------------------+-------------------
+ 4567890123456789 | -4567890123456789
+(1 row)
+
+fetch backward all in c3;
+ q1 | q2
+------------------+------------------
+ 4567890123456789 | 4567890123456789
+(1 row)
+
+fetch backward 1 in c3;
+ q1 | q2
+----+----
+(0 rows)
+
+fetch all in c3;
+ q1 | q2
+------------------+-------------------
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+(2 rows)
+
+declare c4 cursor for select * from int8_tbl offset 10;
+fetch all in c4;
+ q1 | q2
+----+----
+(0 rows)
+
+fetch 1 in c4;
+ q1 | q2
+----+----
+(0 rows)
+
+fetch backward 1 in c4;
+ q1 | q2
+----+----
+(0 rows)
+
+fetch backward all in c4;
+ q1 | q2
+----+----
+(0 rows)
+
+fetch backward 1 in c4;
+ q1 | q2
+----+----
+(0 rows)
+
+fetch all in c4;
+ q1 | q2
+----+----
+(0 rows)
+
+declare c5 cursor for select * from int8_tbl order by q1 fetch first 2 rows with ties;
+fetch all in c5;
+ q1 | q2
+-----+------------------
+ 123 | 456
+ 123 | 4567890123456789
+(2 rows)
+
+fetch 1 in c5;
+ q1 | q2
+----+----
+(0 rows)
+
+fetch backward 1 in c5;
+ q1 | q2
+-----+------------------
+ 123 | 4567890123456789
+(1 row)
+
+fetch backward 1 in c5;
+ q1 | q2
+-----+-----
+ 123 | 456
+(1 row)
+
+fetch all in c5;
+ q1 | q2
+-----+------------------
+ 123 | 4567890123456789
+(1 row)
+
+fetch backward all in c5;
+ q1 | q2
+-----+------------------
+ 123 | 4567890123456789
+ 123 | 456
+(2 rows)
+
+fetch all in c5;
+ q1 | q2
+-----+------------------
+ 123 | 456
+ 123 | 4567890123456789
+(2 rows)
+
+fetch backward all in c5;
+ q1 | q2
+-----+------------------
+ 123 | 4567890123456789
+ 123 | 456
+(2 rows)
+
+rollback;
+-- Stress test for variable LIMIT in conjunction with bounded-heap sorting
+SELECT
+ (SELECT n
+ FROM (VALUES (1)) AS x,
+ (SELECT n FROM generate_series(1,10) AS n
+ ORDER BY n LIMIT 1 OFFSET s-1) AS y) AS z
+ FROM generate_series(1,10) AS s;
+ z
+----
+ 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+ 10
+(10 rows)
+
+--
+-- Test behavior of volatile and set-returning functions in conjunction
+-- with ORDER BY and LIMIT.
+--
+create temp sequence testseq;
+explain (verbose, costs off)
+select unique1, unique2, nextval('testseq')
+ from tenk1 order by unique2 limit 10;
+ QUERY PLAN
+----------------------------------------------------------------
+ Limit
+ Output: unique1, unique2, (nextval('testseq'::regclass))
+ -> Index Scan using tenk1_unique2 on public.tenk1
+ Output: unique1, unique2, nextval('testseq'::regclass)
+(4 rows)
+
+select unique1, unique2, nextval('testseq')
+ from tenk1 order by unique2 limit 10;
+ unique1 | unique2 | nextval
+---------+---------+---------
+ 8800 | 0 | 1
+ 1891 | 1 | 2
+ 3420 | 2 | 3
+ 9850 | 3 | 4
+ 7164 | 4 | 5
+ 8009 | 5 | 6
+ 5057 | 6 | 7
+ 6701 | 7 | 8
+ 4321 | 8 | 9
+ 3043 | 9 | 10
+(10 rows)
+
+select currval('testseq');
+ currval
+---------
+ 10
+(1 row)
+
+explain (verbose, costs off)
+select unique1, unique2, nextval('testseq')
+ from tenk1 order by tenthous limit 10;
+ QUERY PLAN
+--------------------------------------------------------------------------
+ Limit
+ Output: unique1, unique2, (nextval('testseq'::regclass)), tenthous
+ -> Result
+ Output: unique1, unique2, nextval('testseq'::regclass), tenthous
+ -> Sort
+ Output: unique1, unique2, tenthous
+ Sort Key: tenk1.tenthous
+ -> Seq Scan on public.tenk1
+ Output: unique1, unique2, tenthous
+(9 rows)
+
+select unique1, unique2, nextval('testseq')
+ from tenk1 order by tenthous limit 10;
+ unique1 | unique2 | nextval
+---------+---------+---------
+ 0 | 9998 | 11
+ 1 | 2838 | 12
+ 2 | 2716 | 13
+ 3 | 5679 | 14
+ 4 | 1621 | 15
+ 5 | 5557 | 16
+ 6 | 2855 | 17
+ 7 | 8518 | 18
+ 8 | 5435 | 19
+ 9 | 4463 | 20
+(10 rows)
+
+select currval('testseq');
+ currval
+---------
+ 20
+(1 row)
+
+explain (verbose, costs off)
+select unique1, unique2, generate_series(1,10)
+ from tenk1 order by unique2 limit 7;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Limit
+ Output: unique1, unique2, (generate_series(1, 10))
+ -> ProjectSet
+ Output: unique1, unique2, generate_series(1, 10)
+ -> Index Scan using tenk1_unique2 on public.tenk1
+ Output: unique1, unique2, two, four, ten, twenty, hundred, thousand, twothousand, fivethous, tenthous, odd, even, stringu1, stringu2, string4
+(6 rows)
+
+select unique1, unique2, generate_series(1,10)
+ from tenk1 order by unique2 limit 7;
+ unique1 | unique2 | generate_series
+---------+---------+-----------------
+ 8800 | 0 | 1
+ 8800 | 0 | 2
+ 8800 | 0 | 3
+ 8800 | 0 | 4
+ 8800 | 0 | 5
+ 8800 | 0 | 6
+ 8800 | 0 | 7
+(7 rows)
+
+explain (verbose, costs off)
+select unique1, unique2, generate_series(1,10)
+ from tenk1 order by tenthous limit 7;
+ QUERY PLAN
+--------------------------------------------------------------------
+ Limit
+ Output: unique1, unique2, (generate_series(1, 10)), tenthous
+ -> ProjectSet
+ Output: unique1, unique2, generate_series(1, 10), tenthous
+ -> Sort
+ Output: unique1, unique2, tenthous
+ Sort Key: tenk1.tenthous
+ -> Seq Scan on public.tenk1
+ Output: unique1, unique2, tenthous
+(9 rows)
+
+select unique1, unique2, generate_series(1,10)
+ from tenk1 order by tenthous limit 7;
+ unique1 | unique2 | generate_series
+---------+---------+-----------------
+ 0 | 9998 | 1
+ 0 | 9998 | 2
+ 0 | 9998 | 3
+ 0 | 9998 | 4
+ 0 | 9998 | 5
+ 0 | 9998 | 6
+ 0 | 9998 | 7
+(7 rows)
+
+-- use of random() is to keep planner from folding the expressions together
+explain (verbose, costs off)
+select generate_series(0,2) as s1, generate_series((random()*.1)::int,2) as s2;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------
+ ProjectSet
+ Output: generate_series(0, 2), generate_series(((random() * '0.1'::double precision))::integer, 2)
+ -> Result
+(3 rows)
+
+select generate_series(0,2) as s1, generate_series((random()*.1)::int,2) as s2;
+ s1 | s2
+----+----
+ 0 | 0
+ 1 | 1
+ 2 | 2
+(3 rows)
+
+explain (verbose, costs off)
+select generate_series(0,2) as s1, generate_series((random()*.1)::int,2) as s2
+order by s2 desc;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------
+ Sort
+ Output: (generate_series(0, 2)), (generate_series(((random() * '0.1'::double precision))::integer, 2))
+ Sort Key: (generate_series(((random() * '0.1'::double precision))::integer, 2)) DESC
+ -> ProjectSet
+ Output: generate_series(0, 2), generate_series(((random() * '0.1'::double precision))::integer, 2)
+ -> Result
+(6 rows)
+
+select generate_series(0,2) as s1, generate_series((random()*.1)::int,2) as s2
+order by s2 desc;
+ s1 | s2
+----+----
+ 2 | 2
+ 1 | 1
+ 0 | 0
+(3 rows)
+
+-- test for failure to set all aggregates' aggtranstype
+explain (verbose, costs off)
+select sum(tenthous) as s1, sum(tenthous) + random()*0 as s2
+ from tenk1 group by thousand order by thousand limit 3;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------
+ Limit
+ Output: (sum(tenthous)), (((sum(tenthous))::double precision + (random() * '0'::double precision))), thousand
+ -> GroupAggregate
+ Output: sum(tenthous), ((sum(tenthous))::double precision + (random() * '0'::double precision)), thousand
+ Group Key: tenk1.thousand
+ -> Index Only Scan using tenk1_thous_tenthous on public.tenk1
+ Output: thousand, tenthous
+(7 rows)
+
+select sum(tenthous) as s1, sum(tenthous) + random()*0 as s2
+ from tenk1 group by thousand order by thousand limit 3;
+ s1 | s2
+-------+-------
+ 45000 | 45000
+ 45010 | 45010
+ 45020 | 45020
+(3 rows)
+
+--
+-- FETCH FIRST
+-- Check the WITH TIES clause
+--
+SELECT thousand
+ FROM onek WHERE thousand < 5
+ ORDER BY thousand FETCH FIRST 2 ROW WITH TIES;
+ thousand
+----------
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+(10 rows)
+
+SELECT thousand
+ FROM onek WHERE thousand < 5
+ ORDER BY thousand FETCH FIRST ROWS WITH TIES;
+ thousand
+----------
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+(10 rows)
+
+SELECT thousand
+ FROM onek WHERE thousand < 5
+ ORDER BY thousand FETCH FIRST 1 ROW WITH TIES;
+ thousand
+----------
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+(10 rows)
+
+SELECT thousand
+ FROM onek WHERE thousand < 5
+ ORDER BY thousand FETCH FIRST 2 ROW ONLY;
+ thousand
+----------
+ 0
+ 0
+(2 rows)
+
+-- SKIP LOCKED and WITH TIES are incompatible
+SELECT thousand
+ FROM onek WHERE thousand < 5
+ ORDER BY thousand FETCH FIRST 1 ROW WITH TIES FOR UPDATE SKIP LOCKED;
+ERROR: SKIP LOCKED and WITH TIES options cannot be used together
+-- should fail
+SELECT ''::text AS two, unique1, unique2, stringu1
+ FROM onek WHERE unique1 > 50
+ FETCH FIRST 2 ROW WITH TIES;
+ERROR: WITH TIES cannot be specified without ORDER BY clause
+-- test ruleutils
+CREATE VIEW limit_thousand_v_1 AS SELECT thousand FROM onek WHERE thousand < 995
+ ORDER BY thousand FETCH FIRST 5 ROWS WITH TIES OFFSET 10;
+\d+ limit_thousand_v_1
+ View "public.limit_thousand_v_1"
+ Column | Type | Collation | Nullable | Default | Storage | Description
+----------+---------+-----------+----------+---------+---------+-------------
+ thousand | integer | | | | plain |
+View definition:
+ SELECT onek.thousand
+ FROM onek
+ WHERE onek.thousand < 995
+ ORDER BY onek.thousand
+ OFFSET 10
+ FETCH FIRST 5 ROWS WITH TIES;
+
+CREATE VIEW limit_thousand_v_2 AS SELECT thousand FROM onek WHERE thousand < 995
+ ORDER BY thousand OFFSET 10 FETCH FIRST 5 ROWS ONLY;
+\d+ limit_thousand_v_2
+ View "public.limit_thousand_v_2"
+ Column | Type | Collation | Nullable | Default | Storage | Description
+----------+---------+-----------+----------+---------+---------+-------------
+ thousand | integer | | | | plain |
+View definition:
+ SELECT onek.thousand
+ FROM onek
+ WHERE onek.thousand < 995
+ ORDER BY onek.thousand
+ OFFSET 10
+ LIMIT 5;
+
+CREATE VIEW limit_thousand_v_3 AS SELECT thousand FROM onek WHERE thousand < 995
+ ORDER BY thousand FETCH FIRST NULL ROWS WITH TIES; -- fails
+ERROR: row count cannot be null in FETCH FIRST ... WITH TIES clause
+CREATE VIEW limit_thousand_v_3 AS SELECT thousand FROM onek WHERE thousand < 995
+ ORDER BY thousand FETCH FIRST (NULL+1) ROWS WITH TIES;
+\d+ limit_thousand_v_3
+ View "public.limit_thousand_v_3"
+ Column | Type | Collation | Nullable | Default | Storage | Description
+----------+---------+-----------+----------+---------+---------+-------------
+ thousand | integer | | | | plain |
+View definition:
+ SELECT onek.thousand
+ FROM onek
+ WHERE onek.thousand < 995
+ ORDER BY onek.thousand
+ FETCH FIRST (NULL::integer + 1) ROWS WITH TIES;
+
+CREATE VIEW limit_thousand_v_4 AS SELECT thousand FROM onek WHERE thousand < 995
+ ORDER BY thousand FETCH FIRST NULL ROWS ONLY;
+\d+ limit_thousand_v_4
+ View "public.limit_thousand_v_4"
+ Column | Type | Collation | Nullable | Default | Storage | Description
+----------+---------+-----------+----------+---------+---------+-------------
+ thousand | integer | | | | plain |
+View definition:
+ SELECT onek.thousand
+ FROM onek
+ WHERE onek.thousand < 995
+ ORDER BY onek.thousand
+ LIMIT ALL;
+
+-- leave these views
diff --git a/ydb/library/yql/tests/postgresql/original/cases/limit.sql b/ydb/library/yql/tests/postgresql/original/cases/limit.sql
new file mode 100644
index 0000000000..6f0cda9870
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/limit.sql
@@ -0,0 +1,201 @@
+--
+-- LIMIT
+-- Check the LIMIT/OFFSET feature of SELECT
+--
+
+SELECT ''::text AS two, unique1, unique2, stringu1
+ FROM onek WHERE unique1 > 50
+ ORDER BY unique1 LIMIT 2;
+SELECT ''::text AS five, unique1, unique2, stringu1
+ FROM onek WHERE unique1 > 60
+ ORDER BY unique1 LIMIT 5;
+SELECT ''::text AS two, unique1, unique2, stringu1
+ FROM onek WHERE unique1 > 60 AND unique1 < 63
+ ORDER BY unique1 LIMIT 5;
+SELECT ''::text AS three, unique1, unique2, stringu1
+ FROM onek WHERE unique1 > 100
+ ORDER BY unique1 LIMIT 3 OFFSET 20;
+SELECT ''::text AS zero, unique1, unique2, stringu1
+ FROM onek WHERE unique1 < 50
+ ORDER BY unique1 DESC LIMIT 8 OFFSET 99;
+SELECT ''::text AS eleven, unique1, unique2, stringu1
+ FROM onek WHERE unique1 < 50
+ ORDER BY unique1 DESC LIMIT 20 OFFSET 39;
+SELECT ''::text AS ten, unique1, unique2, stringu1
+ FROM onek
+ ORDER BY unique1 OFFSET 990;
+SELECT ''::text AS five, unique1, unique2, stringu1
+ FROM onek
+ ORDER BY unique1 OFFSET 990 LIMIT 5;
+SELECT ''::text AS five, unique1, unique2, stringu1
+ FROM onek
+ ORDER BY unique1 LIMIT 5 OFFSET 900;
+
+-- Test null limit and offset. The planner would discard a simple null
+-- constant, so to ensure executor is exercised, do this:
+select * from int8_tbl limit (case when random() < 0.5 then null::bigint end);
+select * from int8_tbl offset (case when random() < 0.5 then null::bigint end);
+
+-- Test assorted cases involving backwards fetch from a LIMIT plan node
+begin;
+
+declare c1 cursor for select * from int8_tbl limit 10;
+fetch all in c1;
+fetch 1 in c1;
+fetch backward 1 in c1;
+fetch backward all in c1;
+fetch backward 1 in c1;
+fetch all in c1;
+
+declare c2 cursor for select * from int8_tbl limit 3;
+fetch all in c2;
+fetch 1 in c2;
+fetch backward 1 in c2;
+fetch backward all in c2;
+fetch backward 1 in c2;
+fetch all in c2;
+
+declare c3 cursor for select * from int8_tbl offset 3;
+fetch all in c3;
+fetch 1 in c3;
+fetch backward 1 in c3;
+fetch backward all in c3;
+fetch backward 1 in c3;
+fetch all in c3;
+
+declare c4 cursor for select * from int8_tbl offset 10;
+fetch all in c4;
+fetch 1 in c4;
+fetch backward 1 in c4;
+fetch backward all in c4;
+fetch backward 1 in c4;
+fetch all in c4;
+
+declare c5 cursor for select * from int8_tbl order by q1 fetch first 2 rows with ties;
+fetch all in c5;
+fetch 1 in c5;
+fetch backward 1 in c5;
+fetch backward 1 in c5;
+fetch all in c5;
+fetch backward all in c5;
+fetch all in c5;
+fetch backward all in c5;
+
+rollback;
+
+-- Stress test for variable LIMIT in conjunction with bounded-heap sorting
+
+SELECT
+ (SELECT n
+ FROM (VALUES (1)) AS x,
+ (SELECT n FROM generate_series(1,10) AS n
+ ORDER BY n LIMIT 1 OFFSET s-1) AS y) AS z
+ FROM generate_series(1,10) AS s;
+
+--
+-- Test behavior of volatile and set-returning functions in conjunction
+-- with ORDER BY and LIMIT.
+--
+
+create temp sequence testseq;
+
+explain (verbose, costs off)
+select unique1, unique2, nextval('testseq')
+ from tenk1 order by unique2 limit 10;
+
+select unique1, unique2, nextval('testseq')
+ from tenk1 order by unique2 limit 10;
+
+select currval('testseq');
+
+explain (verbose, costs off)
+select unique1, unique2, nextval('testseq')
+ from tenk1 order by tenthous limit 10;
+
+select unique1, unique2, nextval('testseq')
+ from tenk1 order by tenthous limit 10;
+
+select currval('testseq');
+
+explain (verbose, costs off)
+select unique1, unique2, generate_series(1,10)
+ from tenk1 order by unique2 limit 7;
+
+select unique1, unique2, generate_series(1,10)
+ from tenk1 order by unique2 limit 7;
+
+explain (verbose, costs off)
+select unique1, unique2, generate_series(1,10)
+ from tenk1 order by tenthous limit 7;
+
+select unique1, unique2, generate_series(1,10)
+ from tenk1 order by tenthous limit 7;
+
+-- use of random() is to keep planner from folding the expressions together
+explain (verbose, costs off)
+select generate_series(0,2) as s1, generate_series((random()*.1)::int,2) as s2;
+
+select generate_series(0,2) as s1, generate_series((random()*.1)::int,2) as s2;
+
+explain (verbose, costs off)
+select generate_series(0,2) as s1, generate_series((random()*.1)::int,2) as s2
+order by s2 desc;
+
+select generate_series(0,2) as s1, generate_series((random()*.1)::int,2) as s2
+order by s2 desc;
+
+-- test for failure to set all aggregates' aggtranstype
+explain (verbose, costs off)
+select sum(tenthous) as s1, sum(tenthous) + random()*0 as s2
+ from tenk1 group by thousand order by thousand limit 3;
+
+select sum(tenthous) as s1, sum(tenthous) + random()*0 as s2
+ from tenk1 group by thousand order by thousand limit 3;
+
+--
+-- FETCH FIRST
+-- Check the WITH TIES clause
+--
+
+SELECT thousand
+ FROM onek WHERE thousand < 5
+ ORDER BY thousand FETCH FIRST 2 ROW WITH TIES;
+
+SELECT thousand
+ FROM onek WHERE thousand < 5
+ ORDER BY thousand FETCH FIRST ROWS WITH TIES;
+
+SELECT thousand
+ FROM onek WHERE thousand < 5
+ ORDER BY thousand FETCH FIRST 1 ROW WITH TIES;
+
+SELECT thousand
+ FROM onek WHERE thousand < 5
+ ORDER BY thousand FETCH FIRST 2 ROW ONLY;
+
+-- SKIP LOCKED and WITH TIES are incompatible
+SELECT thousand
+ FROM onek WHERE thousand < 5
+ ORDER BY thousand FETCH FIRST 1 ROW WITH TIES FOR UPDATE SKIP LOCKED;
+
+-- should fail
+SELECT ''::text AS two, unique1, unique2, stringu1
+ FROM onek WHERE unique1 > 50
+ FETCH FIRST 2 ROW WITH TIES;
+
+-- test ruleutils
+CREATE VIEW limit_thousand_v_1 AS SELECT thousand FROM onek WHERE thousand < 995
+ ORDER BY thousand FETCH FIRST 5 ROWS WITH TIES OFFSET 10;
+\d+ limit_thousand_v_1
+CREATE VIEW limit_thousand_v_2 AS SELECT thousand FROM onek WHERE thousand < 995
+ ORDER BY thousand OFFSET 10 FETCH FIRST 5 ROWS ONLY;
+\d+ limit_thousand_v_2
+CREATE VIEW limit_thousand_v_3 AS SELECT thousand FROM onek WHERE thousand < 995
+ ORDER BY thousand FETCH FIRST NULL ROWS WITH TIES; -- fails
+CREATE VIEW limit_thousand_v_3 AS SELECT thousand FROM onek WHERE thousand < 995
+ ORDER BY thousand FETCH FIRST (NULL+1) ROWS WITH TIES;
+\d+ limit_thousand_v_3
+CREATE VIEW limit_thousand_v_4 AS SELECT thousand FROM onek WHERE thousand < 995
+ ORDER BY thousand FETCH FIRST NULL ROWS ONLY;
+\d+ limit_thousand_v_4
+-- leave these views
diff --git a/ydb/library/yql/tests/postgresql/original/cases/name.out b/ydb/library/yql/tests/postgresql/original/cases/name.out
new file mode 100644
index 0000000000..d58df2ba41
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/name.out
@@ -0,0 +1,197 @@
+--
+-- NAME
+-- all inputs are silently truncated at NAMEDATALEN-1 (63) characters
+--
+-- fixed-length by reference
+SELECT name 'name string' = name 'name string' AS "True";
+ True
+------
+ t
+(1 row)
+
+SELECT name 'name string' = name 'name string ' AS "False";
+ False
+-------
+ f
+(1 row)
+
+--
+--
+--
+CREATE TABLE NAME_TBL(f1 name);
+INSERT INTO NAME_TBL(f1) VALUES ('1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR');
+INSERT INTO NAME_TBL(f1) VALUES ('1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqr');
+INSERT INTO NAME_TBL(f1) VALUES ('asdfghjkl;');
+INSERT INTO NAME_TBL(f1) VALUES ('343f%2a');
+INSERT INTO NAME_TBL(f1) VALUES ('d34aaasdf');
+INSERT INTO NAME_TBL(f1) VALUES ('');
+INSERT INTO NAME_TBL(f1) VALUES ('1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ');
+SELECT * FROM NAME_TBL;
+ f1
+-----------------------------------------------------------------
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+ 1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopq
+ asdfghjkl;
+ 343f%2a
+ d34aaasdf
+
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+(7 rows)
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 <> '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+ f1
+-----------------------------------------------------------------
+ 1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopq
+ asdfghjkl;
+ 343f%2a
+ d34aaasdf
+
+(5 rows)
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 = '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+ f1
+-----------------------------------------------------------------
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+(2 rows)
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 < '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+ f1
+----
+
+(1 row)
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 <= '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+ f1
+-----------------------------------------------------------------
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+(3 rows)
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 > '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+ f1
+-----------------------------------------------------------------
+ 1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopq
+ asdfghjkl;
+ 343f%2a
+ d34aaasdf
+(4 rows)
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 >= '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+ f1
+-----------------------------------------------------------------
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+ 1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopq
+ asdfghjkl;
+ 343f%2a
+ d34aaasdf
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+(6 rows)
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 ~ '.*';
+ f1
+-----------------------------------------------------------------
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+ 1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopq
+ asdfghjkl;
+ 343f%2a
+ d34aaasdf
+
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+(7 rows)
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 !~ '.*';
+ f1
+----
+(0 rows)
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 ~ '[0-9]';
+ f1
+-----------------------------------------------------------------
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+ 1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopq
+ 343f%2a
+ d34aaasdf
+ 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ
+(5 rows)
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 ~ '.*asdf.*';
+ f1
+------------
+ asdfghjkl;
+ d34aaasdf
+(2 rows)
+
+DROP TABLE NAME_TBL;
+DO $$
+DECLARE r text[];
+BEGIN
+ r := parse_ident('Schemax.Tabley');
+ RAISE NOTICE '%', format('%I.%I', r[1], r[2]);
+ r := parse_ident('"SchemaX"."TableY"');
+ RAISE NOTICE '%', format('%I.%I', r[1], r[2]);
+END;
+$$;
+NOTICE: schemax.tabley
+NOTICE: "SchemaX"."TableY"
+SELECT parse_ident('foo.boo');
+ parse_ident
+-------------
+ {foo,boo}
+(1 row)
+
+SELECT parse_ident('foo.boo[]'); -- should fail
+ERROR: string is not a valid identifier: "foo.boo[]"
+SELECT parse_ident('foo.boo[]', strict => false); -- ok
+ parse_ident
+-------------
+ {foo,boo}
+(1 row)
+
+-- should fail
+SELECT parse_ident(' ');
+ERROR: string is not a valid identifier: " "
+SELECT parse_ident(' .aaa');
+ERROR: string is not a valid identifier: " .aaa"
+DETAIL: No valid identifier before ".".
+SELECT parse_ident(' aaa . ');
+ERROR: string is not a valid identifier: " aaa . "
+DETAIL: No valid identifier after ".".
+SELECT parse_ident('aaa.a%b');
+ERROR: string is not a valid identifier: "aaa.a%b"
+SELECT parse_ident(E'X\rXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX');
+ERROR: string is not a valid identifier: "X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
+SELECT length(a[1]), length(a[2]) from parse_ident('"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx".yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy') as a ;
+ length | length
+--------+--------
+ 414 | 289
+(1 row)
+
+SELECT parse_ident(' first . " second " ." third ". " ' || repeat('x',66) || '"');
+ parse_ident
+-----------------------------------------------------------------------------------------------------------
+ {first," second "," third "," xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}
+(1 row)
+
+SELECT parse_ident(' first . " second " ." third ". " ' || repeat('x',66) || '"')::name[];
+ parse_ident
+------------------------------------------------------------------------------------------------------
+ {first," second "," third "," xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}
+(1 row)
+
+SELECT parse_ident(E'"c".X XXXX\002XXXXXX');
+ERROR: string is not a valid identifier: ""c".X XXXXXXXXXX"
+SELECT parse_ident('1020');
+ERROR: string is not a valid identifier: "1020"
+SELECT parse_ident('10.20');
+ERROR: string is not a valid identifier: "10.20"
+SELECT parse_ident('.');
+ERROR: string is not a valid identifier: "."
+DETAIL: No valid identifier before ".".
+SELECT parse_ident('.1020');
+ERROR: string is not a valid identifier: ".1020"
+DETAIL: No valid identifier before ".".
+SELECT parse_ident('xxx.1020');
+ERROR: string is not a valid identifier: "xxx.1020"
+DETAIL: No valid identifier after ".".
diff --git a/ydb/library/yql/tests/postgresql/original/cases/name.sql b/ydb/library/yql/tests/postgresql/original/cases/name.sql
new file mode 100644
index 0000000000..29a5d97e5b
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/name.sql
@@ -0,0 +1,87 @@
+--
+-- NAME
+-- all inputs are silently truncated at NAMEDATALEN-1 (63) characters
+--
+
+-- fixed-length by reference
+SELECT name 'name string' = name 'name string' AS "True";
+
+SELECT name 'name string' = name 'name string ' AS "False";
+
+--
+--
+--
+
+CREATE TABLE NAME_TBL(f1 name);
+
+INSERT INTO NAME_TBL(f1) VALUES ('1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR');
+
+INSERT INTO NAME_TBL(f1) VALUES ('1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqr');
+
+INSERT INTO NAME_TBL(f1) VALUES ('asdfghjkl;');
+
+INSERT INTO NAME_TBL(f1) VALUES ('343f%2a');
+
+INSERT INTO NAME_TBL(f1) VALUES ('d34aaasdf');
+
+INSERT INTO NAME_TBL(f1) VALUES ('');
+
+INSERT INTO NAME_TBL(f1) VALUES ('1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ');
+
+
+SELECT * FROM NAME_TBL;
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 <> '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 = '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 < '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 <= '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 > '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 >= '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR';
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 ~ '.*';
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 !~ '.*';
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 ~ '[0-9]';
+
+SELECT c.f1 FROM NAME_TBL c WHERE c.f1 ~ '.*asdf.*';
+
+DROP TABLE NAME_TBL;
+
+DO $$
+DECLARE r text[];
+BEGIN
+ r := parse_ident('Schemax.Tabley');
+ RAISE NOTICE '%', format('%I.%I', r[1], r[2]);
+ r := parse_ident('"SchemaX"."TableY"');
+ RAISE NOTICE '%', format('%I.%I', r[1], r[2]);
+END;
+$$;
+
+SELECT parse_ident('foo.boo');
+SELECT parse_ident('foo.boo[]'); -- should fail
+SELECT parse_ident('foo.boo[]', strict => false); -- ok
+
+-- should fail
+SELECT parse_ident(' ');
+SELECT parse_ident(' .aaa');
+SELECT parse_ident(' aaa . ');
+SELECT parse_ident('aaa.a%b');
+SELECT parse_ident(E'X\rXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX');
+
+SELECT length(a[1]), length(a[2]) from parse_ident('"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx".yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy') as a ;
+
+SELECT parse_ident(' first . " second " ." third ". " ' || repeat('x',66) || '"');
+SELECT parse_ident(' first . " second " ." third ". " ' || repeat('x',66) || '"')::name[];
+
+SELECT parse_ident(E'"c".X XXXX\002XXXXXX');
+SELECT parse_ident('1020');
+SELECT parse_ident('10.20');
+SELECT parse_ident('.');
+SELECT parse_ident('.1020');
+SELECT parse_ident('xxx.1020');
diff --git a/ydb/library/yql/tests/postgresql/original/cases/numeric.out b/ydb/library/yql/tests/postgresql/original/cases/numeric.out
new file mode 100644
index 0000000000..bea33181be
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/numeric.out
@@ -0,0 +1,3226 @@
+--
+-- NUMERIC
+--
+CREATE TABLE num_data (id int4, val numeric(210,10));
+CREATE TABLE num_exp_add (id1 int4, id2 int4, expected numeric(210,10));
+CREATE TABLE num_exp_sub (id1 int4, id2 int4, expected numeric(210,10));
+CREATE TABLE num_exp_div (id1 int4, id2 int4, expected numeric(210,10));
+CREATE TABLE num_exp_mul (id1 int4, id2 int4, expected numeric(210,10));
+CREATE TABLE num_exp_sqrt (id int4, expected numeric(210,10));
+CREATE TABLE num_exp_ln (id int4, expected numeric(210,10));
+CREATE TABLE num_exp_log10 (id int4, expected numeric(210,10));
+CREATE TABLE num_exp_power_10_ln (id int4, expected numeric(210,10));
+CREATE TABLE num_result (id1 int4, id2 int4, result numeric(210,10));
+-- ******************************
+-- * The following EXPECTED results are computed by bc(1)
+-- * with a scale of 200
+-- ******************************
+BEGIN TRANSACTION;
+INSERT INTO num_exp_add VALUES (0,0,'0');
+INSERT INTO num_exp_sub VALUES (0,0,'0');
+INSERT INTO num_exp_mul VALUES (0,0,'0');
+INSERT INTO num_exp_div VALUES (0,0,'NaN');
+INSERT INTO num_exp_add VALUES (0,1,'0');
+INSERT INTO num_exp_sub VALUES (0,1,'0');
+INSERT INTO num_exp_mul VALUES (0,1,'0');
+INSERT INTO num_exp_div VALUES (0,1,'NaN');
+INSERT INTO num_exp_add VALUES (0,2,'-34338492.215397047');
+INSERT INTO num_exp_sub VALUES (0,2,'34338492.215397047');
+INSERT INTO num_exp_mul VALUES (0,2,'0');
+INSERT INTO num_exp_div VALUES (0,2,'0');
+INSERT INTO num_exp_add VALUES (0,3,'4.31');
+INSERT INTO num_exp_sub VALUES (0,3,'-4.31');
+INSERT INTO num_exp_mul VALUES (0,3,'0');
+INSERT INTO num_exp_div VALUES (0,3,'0');
+INSERT INTO num_exp_add VALUES (0,4,'7799461.4119');
+INSERT INTO num_exp_sub VALUES (0,4,'-7799461.4119');
+INSERT INTO num_exp_mul VALUES (0,4,'0');
+INSERT INTO num_exp_div VALUES (0,4,'0');
+INSERT INTO num_exp_add VALUES (0,5,'16397.038491');
+INSERT INTO num_exp_sub VALUES (0,5,'-16397.038491');
+INSERT INTO num_exp_mul VALUES (0,5,'0');
+INSERT INTO num_exp_div VALUES (0,5,'0');
+INSERT INTO num_exp_add VALUES (0,6,'93901.57763026');
+INSERT INTO num_exp_sub VALUES (0,6,'-93901.57763026');
+INSERT INTO num_exp_mul VALUES (0,6,'0');
+INSERT INTO num_exp_div VALUES (0,6,'0');
+INSERT INTO num_exp_add VALUES (0,7,'-83028485');
+INSERT INTO num_exp_sub VALUES (0,7,'83028485');
+INSERT INTO num_exp_mul VALUES (0,7,'0');
+INSERT INTO num_exp_div VALUES (0,7,'0');
+INSERT INTO num_exp_add VALUES (0,8,'74881');
+INSERT INTO num_exp_sub VALUES (0,8,'-74881');
+INSERT INTO num_exp_mul VALUES (0,8,'0');
+INSERT INTO num_exp_div VALUES (0,8,'0');
+INSERT INTO num_exp_add VALUES (0,9,'-24926804.045047420');
+INSERT INTO num_exp_sub VALUES (0,9,'24926804.045047420');
+INSERT INTO num_exp_mul VALUES (0,9,'0');
+INSERT INTO num_exp_div VALUES (0,9,'0');
+INSERT INTO num_exp_add VALUES (1,0,'0');
+INSERT INTO num_exp_sub VALUES (1,0,'0');
+INSERT INTO num_exp_mul VALUES (1,0,'0');
+INSERT INTO num_exp_div VALUES (1,0,'NaN');
+INSERT INTO num_exp_add VALUES (1,1,'0');
+INSERT INTO num_exp_sub VALUES (1,1,'0');
+INSERT INTO num_exp_mul VALUES (1,1,'0');
+INSERT INTO num_exp_div VALUES (1,1,'NaN');
+INSERT INTO num_exp_add VALUES (1,2,'-34338492.215397047');
+INSERT INTO num_exp_sub VALUES (1,2,'34338492.215397047');
+INSERT INTO num_exp_mul VALUES (1,2,'0');
+INSERT INTO num_exp_div VALUES (1,2,'0');
+INSERT INTO num_exp_add VALUES (1,3,'4.31');
+INSERT INTO num_exp_sub VALUES (1,3,'-4.31');
+INSERT INTO num_exp_mul VALUES (1,3,'0');
+INSERT INTO num_exp_div VALUES (1,3,'0');
+INSERT INTO num_exp_add VALUES (1,4,'7799461.4119');
+INSERT INTO num_exp_sub VALUES (1,4,'-7799461.4119');
+INSERT INTO num_exp_mul VALUES (1,4,'0');
+INSERT INTO num_exp_div VALUES (1,4,'0');
+INSERT INTO num_exp_add VALUES (1,5,'16397.038491');
+INSERT INTO num_exp_sub VALUES (1,5,'-16397.038491');
+INSERT INTO num_exp_mul VALUES (1,5,'0');
+INSERT INTO num_exp_div VALUES (1,5,'0');
+INSERT INTO num_exp_add VALUES (1,6,'93901.57763026');
+INSERT INTO num_exp_sub VALUES (1,6,'-93901.57763026');
+INSERT INTO num_exp_mul VALUES (1,6,'0');
+INSERT INTO num_exp_div VALUES (1,6,'0');
+INSERT INTO num_exp_add VALUES (1,7,'-83028485');
+INSERT INTO num_exp_sub VALUES (1,7,'83028485');
+INSERT INTO num_exp_mul VALUES (1,7,'0');
+INSERT INTO num_exp_div VALUES (1,7,'0');
+INSERT INTO num_exp_add VALUES (1,8,'74881');
+INSERT INTO num_exp_sub VALUES (1,8,'-74881');
+INSERT INTO num_exp_mul VALUES (1,8,'0');
+INSERT INTO num_exp_div VALUES (1,8,'0');
+INSERT INTO num_exp_add VALUES (1,9,'-24926804.045047420');
+INSERT INTO num_exp_sub VALUES (1,9,'24926804.045047420');
+INSERT INTO num_exp_mul VALUES (1,9,'0');
+INSERT INTO num_exp_div VALUES (1,9,'0');
+INSERT INTO num_exp_add VALUES (2,0,'-34338492.215397047');
+INSERT INTO num_exp_sub VALUES (2,0,'-34338492.215397047');
+INSERT INTO num_exp_mul VALUES (2,0,'0');
+INSERT INTO num_exp_div VALUES (2,0,'NaN');
+INSERT INTO num_exp_add VALUES (2,1,'-34338492.215397047');
+INSERT INTO num_exp_sub VALUES (2,1,'-34338492.215397047');
+INSERT INTO num_exp_mul VALUES (2,1,'0');
+INSERT INTO num_exp_div VALUES (2,1,'NaN');
+INSERT INTO num_exp_add VALUES (2,2,'-68676984.430794094');
+INSERT INTO num_exp_sub VALUES (2,2,'0');
+INSERT INTO num_exp_mul VALUES (2,2,'1179132047626883.596862135856320209');
+INSERT INTO num_exp_div VALUES (2,2,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (2,3,'-34338487.905397047');
+INSERT INTO num_exp_sub VALUES (2,3,'-34338496.525397047');
+INSERT INTO num_exp_mul VALUES (2,3,'-147998901.44836127257');
+INSERT INTO num_exp_div VALUES (2,3,'-7967167.56737750510440835266');
+INSERT INTO num_exp_add VALUES (2,4,'-26539030.803497047');
+INSERT INTO num_exp_sub VALUES (2,4,'-42137953.627297047');
+INSERT INTO num_exp_mul VALUES (2,4,'-267821744976817.8111137106593');
+INSERT INTO num_exp_div VALUES (2,4,'-4.40267480046830116685');
+INSERT INTO num_exp_add VALUES (2,5,'-34322095.176906047');
+INSERT INTO num_exp_sub VALUES (2,5,'-34354889.253888047');
+INSERT INTO num_exp_mul VALUES (2,5,'-563049578578.769242506736077');
+INSERT INTO num_exp_div VALUES (2,5,'-2094.18866914563535496429');
+INSERT INTO num_exp_add VALUES (2,6,'-34244590.637766787');
+INSERT INTO num_exp_sub VALUES (2,6,'-34432393.793027307');
+INSERT INTO num_exp_mul VALUES (2,6,'-3224438592470.18449811926184222');
+INSERT INTO num_exp_div VALUES (2,6,'-365.68599891479766440940');
+INSERT INTO num_exp_add VALUES (2,7,'-117366977.215397047');
+INSERT INTO num_exp_sub VALUES (2,7,'48689992.784602953');
+INSERT INTO num_exp_mul VALUES (2,7,'2851072985828710.485883795');
+INSERT INTO num_exp_div VALUES (2,7,'.41357483778485235518');
+INSERT INTO num_exp_add VALUES (2,8,'-34263611.215397047');
+INSERT INTO num_exp_sub VALUES (2,8,'-34413373.215397047');
+INSERT INTO num_exp_mul VALUES (2,8,'-2571300635581.146276407');
+INSERT INTO num_exp_div VALUES (2,8,'-458.57416721727870888476');
+INSERT INTO num_exp_add VALUES (2,9,'-59265296.260444467');
+INSERT INTO num_exp_sub VALUES (2,9,'-9411688.170349627');
+INSERT INTO num_exp_mul VALUES (2,9,'855948866655588.453741509242968740');
+INSERT INTO num_exp_div VALUES (2,9,'1.37757299946438931811');
+INSERT INTO num_exp_add VALUES (3,0,'4.31');
+INSERT INTO num_exp_sub VALUES (3,0,'4.31');
+INSERT INTO num_exp_mul VALUES (3,0,'0');
+INSERT INTO num_exp_div VALUES (3,0,'NaN');
+INSERT INTO num_exp_add VALUES (3,1,'4.31');
+INSERT INTO num_exp_sub VALUES (3,1,'4.31');
+INSERT INTO num_exp_mul VALUES (3,1,'0');
+INSERT INTO num_exp_div VALUES (3,1,'NaN');
+INSERT INTO num_exp_add VALUES (3,2,'-34338487.905397047');
+INSERT INTO num_exp_sub VALUES (3,2,'34338496.525397047');
+INSERT INTO num_exp_mul VALUES (3,2,'-147998901.44836127257');
+INSERT INTO num_exp_div VALUES (3,2,'-.00000012551512084352');
+INSERT INTO num_exp_add VALUES (3,3,'8.62');
+INSERT INTO num_exp_sub VALUES (3,3,'0');
+INSERT INTO num_exp_mul VALUES (3,3,'18.5761');
+INSERT INTO num_exp_div VALUES (3,3,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (3,4,'7799465.7219');
+INSERT INTO num_exp_sub VALUES (3,4,'-7799457.1019');
+INSERT INTO num_exp_mul VALUES (3,4,'33615678.685289');
+INSERT INTO num_exp_div VALUES (3,4,'.00000055260225961552');
+INSERT INTO num_exp_add VALUES (3,5,'16401.348491');
+INSERT INTO num_exp_sub VALUES (3,5,'-16392.728491');
+INSERT INTO num_exp_mul VALUES (3,5,'70671.23589621');
+INSERT INTO num_exp_div VALUES (3,5,'.00026285234387695504');
+INSERT INTO num_exp_add VALUES (3,6,'93905.88763026');
+INSERT INTO num_exp_sub VALUES (3,6,'-93897.26763026');
+INSERT INTO num_exp_mul VALUES (3,6,'404715.7995864206');
+INSERT INTO num_exp_div VALUES (3,6,'.00004589912234457595');
+INSERT INTO num_exp_add VALUES (3,7,'-83028480.69');
+INSERT INTO num_exp_sub VALUES (3,7,'83028489.31');
+INSERT INTO num_exp_mul VALUES (3,7,'-357852770.35');
+INSERT INTO num_exp_div VALUES (3,7,'-.00000005190989574240');
+INSERT INTO num_exp_add VALUES (3,8,'74885.31');
+INSERT INTO num_exp_sub VALUES (3,8,'-74876.69');
+INSERT INTO num_exp_mul VALUES (3,8,'322737.11');
+INSERT INTO num_exp_div VALUES (3,8,'.00005755799201399553');
+INSERT INTO num_exp_add VALUES (3,9,'-24926799.735047420');
+INSERT INTO num_exp_sub VALUES (3,9,'24926808.355047420');
+INSERT INTO num_exp_mul VALUES (3,9,'-107434525.43415438020');
+INSERT INTO num_exp_div VALUES (3,9,'-.00000017290624149854');
+INSERT INTO num_exp_add VALUES (4,0,'7799461.4119');
+INSERT INTO num_exp_sub VALUES (4,0,'7799461.4119');
+INSERT INTO num_exp_mul VALUES (4,0,'0');
+INSERT INTO num_exp_div VALUES (4,0,'NaN');
+INSERT INTO num_exp_add VALUES (4,1,'7799461.4119');
+INSERT INTO num_exp_sub VALUES (4,1,'7799461.4119');
+INSERT INTO num_exp_mul VALUES (4,1,'0');
+INSERT INTO num_exp_div VALUES (4,1,'NaN');
+INSERT INTO num_exp_add VALUES (4,2,'-26539030.803497047');
+INSERT INTO num_exp_sub VALUES (4,2,'42137953.627297047');
+INSERT INTO num_exp_mul VALUES (4,2,'-267821744976817.8111137106593');
+INSERT INTO num_exp_div VALUES (4,2,'-.22713465002993920385');
+INSERT INTO num_exp_add VALUES (4,3,'7799465.7219');
+INSERT INTO num_exp_sub VALUES (4,3,'7799457.1019');
+INSERT INTO num_exp_mul VALUES (4,3,'33615678.685289');
+INSERT INTO num_exp_div VALUES (4,3,'1809619.81714617169373549883');
+INSERT INTO num_exp_add VALUES (4,4,'15598922.8238');
+INSERT INTO num_exp_sub VALUES (4,4,'0');
+INSERT INTO num_exp_mul VALUES (4,4,'60831598315717.14146161');
+INSERT INTO num_exp_div VALUES (4,4,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (4,5,'7815858.450391');
+INSERT INTO num_exp_sub VALUES (4,5,'7783064.373409');
+INSERT INTO num_exp_mul VALUES (4,5,'127888068979.9935054429');
+INSERT INTO num_exp_div VALUES (4,5,'475.66281046305802686061');
+INSERT INTO num_exp_add VALUES (4,6,'7893362.98953026');
+INSERT INTO num_exp_sub VALUES (4,6,'7705559.83426974');
+INSERT INTO num_exp_mul VALUES (4,6,'732381731243.745115764094');
+INSERT INTO num_exp_div VALUES (4,6,'83.05996138436129499606');
+INSERT INTO num_exp_add VALUES (4,7,'-75229023.5881');
+INSERT INTO num_exp_sub VALUES (4,7,'90827946.4119');
+INSERT INTO num_exp_mul VALUES (4,7,'-647577464846017.9715');
+INSERT INTO num_exp_div VALUES (4,7,'-.09393717604145131637');
+INSERT INTO num_exp_add VALUES (4,8,'7874342.4119');
+INSERT INTO num_exp_sub VALUES (4,8,'7724580.4119');
+INSERT INTO num_exp_mul VALUES (4,8,'584031469984.4839');
+INSERT INTO num_exp_div VALUES (4,8,'104.15808298366741897143');
+INSERT INTO num_exp_add VALUES (4,9,'-17127342.633147420');
+INSERT INTO num_exp_sub VALUES (4,9,'32726265.456947420');
+INSERT INTO num_exp_mul VALUES (4,9,'-194415646271340.1815956522980');
+INSERT INTO num_exp_div VALUES (4,9,'-.31289456112403769409');
+INSERT INTO num_exp_add VALUES (5,0,'16397.038491');
+INSERT INTO num_exp_sub VALUES (5,0,'16397.038491');
+INSERT INTO num_exp_mul VALUES (5,0,'0');
+INSERT INTO num_exp_div VALUES (5,0,'NaN');
+INSERT INTO num_exp_add VALUES (5,1,'16397.038491');
+INSERT INTO num_exp_sub VALUES (5,1,'16397.038491');
+INSERT INTO num_exp_mul VALUES (5,1,'0');
+INSERT INTO num_exp_div VALUES (5,1,'NaN');
+INSERT INTO num_exp_add VALUES (5,2,'-34322095.176906047');
+INSERT INTO num_exp_sub VALUES (5,2,'34354889.253888047');
+INSERT INTO num_exp_mul VALUES (5,2,'-563049578578.769242506736077');
+INSERT INTO num_exp_div VALUES (5,2,'-.00047751189505192446');
+INSERT INTO num_exp_add VALUES (5,3,'16401.348491');
+INSERT INTO num_exp_sub VALUES (5,3,'16392.728491');
+INSERT INTO num_exp_mul VALUES (5,3,'70671.23589621');
+INSERT INTO num_exp_div VALUES (5,3,'3804.41728329466357308584');
+INSERT INTO num_exp_add VALUES (5,4,'7815858.450391');
+INSERT INTO num_exp_sub VALUES (5,4,'-7783064.373409');
+INSERT INTO num_exp_mul VALUES (5,4,'127888068979.9935054429');
+INSERT INTO num_exp_div VALUES (5,4,'.00210232958726897192');
+INSERT INTO num_exp_add VALUES (5,5,'32794.076982');
+INSERT INTO num_exp_sub VALUES (5,5,'0');
+INSERT INTO num_exp_mul VALUES (5,5,'268862871.275335557081');
+INSERT INTO num_exp_div VALUES (5,5,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (5,6,'110298.61612126');
+INSERT INTO num_exp_sub VALUES (5,6,'-77504.53913926');
+INSERT INTO num_exp_mul VALUES (5,6,'1539707782.76899778633766');
+INSERT INTO num_exp_div VALUES (5,6,'.17461941433576102689');
+INSERT INTO num_exp_add VALUES (5,7,'-83012087.961509');
+INSERT INTO num_exp_sub VALUES (5,7,'83044882.038491');
+INSERT INTO num_exp_mul VALUES (5,7,'-1361421264394.416135');
+INSERT INTO num_exp_div VALUES (5,7,'-.00019748690453643710');
+INSERT INTO num_exp_add VALUES (5,8,'91278.038491');
+INSERT INTO num_exp_sub VALUES (5,8,'-58483.961509');
+INSERT INTO num_exp_mul VALUES (5,8,'1227826639.244571');
+INSERT INTO num_exp_div VALUES (5,8,'.21897461960978085228');
+INSERT INTO num_exp_add VALUES (5,9,'-24910407.006556420');
+INSERT INTO num_exp_sub VALUES (5,9,'24943201.083538420');
+INSERT INTO num_exp_mul VALUES (5,9,'-408725765384.257043660243220');
+INSERT INTO num_exp_div VALUES (5,9,'-.00065780749354660427');
+INSERT INTO num_exp_add VALUES (6,0,'93901.57763026');
+INSERT INTO num_exp_sub VALUES (6,0,'93901.57763026');
+INSERT INTO num_exp_mul VALUES (6,0,'0');
+INSERT INTO num_exp_div VALUES (6,0,'NaN');
+INSERT INTO num_exp_add VALUES (6,1,'93901.57763026');
+INSERT INTO num_exp_sub VALUES (6,1,'93901.57763026');
+INSERT INTO num_exp_mul VALUES (6,1,'0');
+INSERT INTO num_exp_div VALUES (6,1,'NaN');
+INSERT INTO num_exp_add VALUES (6,2,'-34244590.637766787');
+INSERT INTO num_exp_sub VALUES (6,2,'34432393.793027307');
+INSERT INTO num_exp_mul VALUES (6,2,'-3224438592470.18449811926184222');
+INSERT INTO num_exp_div VALUES (6,2,'-.00273458651128995823');
+INSERT INTO num_exp_add VALUES (6,3,'93905.88763026');
+INSERT INTO num_exp_sub VALUES (6,3,'93897.26763026');
+INSERT INTO num_exp_mul VALUES (6,3,'404715.7995864206');
+INSERT INTO num_exp_div VALUES (6,3,'21786.90896293735498839907');
+INSERT INTO num_exp_add VALUES (6,4,'7893362.98953026');
+INSERT INTO num_exp_sub VALUES (6,4,'-7705559.83426974');
+INSERT INTO num_exp_mul VALUES (6,4,'732381731243.745115764094');
+INSERT INTO num_exp_div VALUES (6,4,'.01203949512295682469');
+INSERT INTO num_exp_add VALUES (6,5,'110298.61612126');
+INSERT INTO num_exp_sub VALUES (6,5,'77504.53913926');
+INSERT INTO num_exp_mul VALUES (6,5,'1539707782.76899778633766');
+INSERT INTO num_exp_div VALUES (6,5,'5.72674008674192359679');
+INSERT INTO num_exp_add VALUES (6,6,'187803.15526052');
+INSERT INTO num_exp_sub VALUES (6,6,'0');
+INSERT INTO num_exp_mul VALUES (6,6,'8817506281.4517452372676676');
+INSERT INTO num_exp_div VALUES (6,6,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (6,7,'-82934583.42236974');
+INSERT INTO num_exp_sub VALUES (6,7,'83122386.57763026');
+INSERT INTO num_exp_mul VALUES (6,7,'-7796505729750.37795610');
+INSERT INTO num_exp_div VALUES (6,7,'-.00113095617281538980');
+INSERT INTO num_exp_add VALUES (6,8,'168782.57763026');
+INSERT INTO num_exp_sub VALUES (6,8,'19020.57763026');
+INSERT INTO num_exp_mul VALUES (6,8,'7031444034.53149906');
+INSERT INTO num_exp_div VALUES (6,8,'1.25401073209839612184');
+INSERT INTO num_exp_add VALUES (6,9,'-24832902.467417160');
+INSERT INTO num_exp_sub VALUES (6,9,'25020705.622677680');
+INSERT INTO num_exp_mul VALUES (6,9,'-2340666225110.29929521292692920');
+INSERT INTO num_exp_div VALUES (6,9,'-.00376709254265256789');
+INSERT INTO num_exp_add VALUES (7,0,'-83028485');
+INSERT INTO num_exp_sub VALUES (7,0,'-83028485');
+INSERT INTO num_exp_mul VALUES (7,0,'0');
+INSERT INTO num_exp_div VALUES (7,0,'NaN');
+INSERT INTO num_exp_add VALUES (7,1,'-83028485');
+INSERT INTO num_exp_sub VALUES (7,1,'-83028485');
+INSERT INTO num_exp_mul VALUES (7,1,'0');
+INSERT INTO num_exp_div VALUES (7,1,'NaN');
+INSERT INTO num_exp_add VALUES (7,2,'-117366977.215397047');
+INSERT INTO num_exp_sub VALUES (7,2,'-48689992.784602953');
+INSERT INTO num_exp_mul VALUES (7,2,'2851072985828710.485883795');
+INSERT INTO num_exp_div VALUES (7,2,'2.41794207151503385700');
+INSERT INTO num_exp_add VALUES (7,3,'-83028480.69');
+INSERT INTO num_exp_sub VALUES (7,3,'-83028489.31');
+INSERT INTO num_exp_mul VALUES (7,3,'-357852770.35');
+INSERT INTO num_exp_div VALUES (7,3,'-19264149.65197215777262180974');
+INSERT INTO num_exp_add VALUES (7,4,'-75229023.5881');
+INSERT INTO num_exp_sub VALUES (7,4,'-90827946.4119');
+INSERT INTO num_exp_mul VALUES (7,4,'-647577464846017.9715');
+INSERT INTO num_exp_div VALUES (7,4,'-10.64541262725136247686');
+INSERT INTO num_exp_add VALUES (7,5,'-83012087.961509');
+INSERT INTO num_exp_sub VALUES (7,5,'-83044882.038491');
+INSERT INTO num_exp_mul VALUES (7,5,'-1361421264394.416135');
+INSERT INTO num_exp_div VALUES (7,5,'-5063.62688881730941836574');
+INSERT INTO num_exp_add VALUES (7,6,'-82934583.42236974');
+INSERT INTO num_exp_sub VALUES (7,6,'-83122386.57763026');
+INSERT INTO num_exp_mul VALUES (7,6,'-7796505729750.37795610');
+INSERT INTO num_exp_div VALUES (7,6,'-884.20756174009028770294');
+INSERT INTO num_exp_add VALUES (7,7,'-166056970');
+INSERT INTO num_exp_sub VALUES (7,7,'0');
+INSERT INTO num_exp_mul VALUES (7,7,'6893729321395225');
+INSERT INTO num_exp_div VALUES (7,7,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (7,8,'-82953604');
+INSERT INTO num_exp_sub VALUES (7,8,'-83103366');
+INSERT INTO num_exp_mul VALUES (7,8,'-6217255985285');
+INSERT INTO num_exp_div VALUES (7,8,'-1108.80577182462841041118');
+INSERT INTO num_exp_add VALUES (7,9,'-107955289.045047420');
+INSERT INTO num_exp_sub VALUES (7,9,'-58101680.954952580');
+INSERT INTO num_exp_mul VALUES (7,9,'2069634775752159.035758700');
+INSERT INTO num_exp_div VALUES (7,9,'3.33089171198810413382');
+INSERT INTO num_exp_add VALUES (8,0,'74881');
+INSERT INTO num_exp_sub VALUES (8,0,'74881');
+INSERT INTO num_exp_mul VALUES (8,0,'0');
+INSERT INTO num_exp_div VALUES (8,0,'NaN');
+INSERT INTO num_exp_add VALUES (8,1,'74881');
+INSERT INTO num_exp_sub VALUES (8,1,'74881');
+INSERT INTO num_exp_mul VALUES (8,1,'0');
+INSERT INTO num_exp_div VALUES (8,1,'NaN');
+INSERT INTO num_exp_add VALUES (8,2,'-34263611.215397047');
+INSERT INTO num_exp_sub VALUES (8,2,'34413373.215397047');
+INSERT INTO num_exp_mul VALUES (8,2,'-2571300635581.146276407');
+INSERT INTO num_exp_div VALUES (8,2,'-.00218067233500788615');
+INSERT INTO num_exp_add VALUES (8,3,'74885.31');
+INSERT INTO num_exp_sub VALUES (8,3,'74876.69');
+INSERT INTO num_exp_mul VALUES (8,3,'322737.11');
+INSERT INTO num_exp_div VALUES (8,3,'17373.78190255220417633410');
+INSERT INTO num_exp_add VALUES (8,4,'7874342.4119');
+INSERT INTO num_exp_sub VALUES (8,4,'-7724580.4119');
+INSERT INTO num_exp_mul VALUES (8,4,'584031469984.4839');
+INSERT INTO num_exp_div VALUES (8,4,'.00960079113741758956');
+INSERT INTO num_exp_add VALUES (8,5,'91278.038491');
+INSERT INTO num_exp_sub VALUES (8,5,'58483.961509');
+INSERT INTO num_exp_mul VALUES (8,5,'1227826639.244571');
+INSERT INTO num_exp_div VALUES (8,5,'4.56673929509287019456');
+INSERT INTO num_exp_add VALUES (8,6,'168782.57763026');
+INSERT INTO num_exp_sub VALUES (8,6,'-19020.57763026');
+INSERT INTO num_exp_mul VALUES (8,6,'7031444034.53149906');
+INSERT INTO num_exp_div VALUES (8,6,'.79744134113322314424');
+INSERT INTO num_exp_add VALUES (8,7,'-82953604');
+INSERT INTO num_exp_sub VALUES (8,7,'83103366');
+INSERT INTO num_exp_mul VALUES (8,7,'-6217255985285');
+INSERT INTO num_exp_div VALUES (8,7,'-.00090187120721280172');
+INSERT INTO num_exp_add VALUES (8,8,'149762');
+INSERT INTO num_exp_sub VALUES (8,8,'0');
+INSERT INTO num_exp_mul VALUES (8,8,'5607164161');
+INSERT INTO num_exp_div VALUES (8,8,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (8,9,'-24851923.045047420');
+INSERT INTO num_exp_sub VALUES (8,9,'25001685.045047420');
+INSERT INTO num_exp_mul VALUES (8,9,'-1866544013697.195857020');
+INSERT INTO num_exp_div VALUES (8,9,'-.00300403532938582735');
+INSERT INTO num_exp_add VALUES (9,0,'-24926804.045047420');
+INSERT INTO num_exp_sub VALUES (9,0,'-24926804.045047420');
+INSERT INTO num_exp_mul VALUES (9,0,'0');
+INSERT INTO num_exp_div VALUES (9,0,'NaN');
+INSERT INTO num_exp_add VALUES (9,1,'-24926804.045047420');
+INSERT INTO num_exp_sub VALUES (9,1,'-24926804.045047420');
+INSERT INTO num_exp_mul VALUES (9,1,'0');
+INSERT INTO num_exp_div VALUES (9,1,'NaN');
+INSERT INTO num_exp_add VALUES (9,2,'-59265296.260444467');
+INSERT INTO num_exp_sub VALUES (9,2,'9411688.170349627');
+INSERT INTO num_exp_mul VALUES (9,2,'855948866655588.453741509242968740');
+INSERT INTO num_exp_div VALUES (9,2,'.72591434384152961526');
+INSERT INTO num_exp_add VALUES (9,3,'-24926799.735047420');
+INSERT INTO num_exp_sub VALUES (9,3,'-24926808.355047420');
+INSERT INTO num_exp_mul VALUES (9,3,'-107434525.43415438020');
+INSERT INTO num_exp_div VALUES (9,3,'-5783481.21694835730858468677');
+INSERT INTO num_exp_add VALUES (9,4,'-17127342.633147420');
+INSERT INTO num_exp_sub VALUES (9,4,'-32726265.456947420');
+INSERT INTO num_exp_mul VALUES (9,4,'-194415646271340.1815956522980');
+INSERT INTO num_exp_div VALUES (9,4,'-3.19596478892958416484');
+INSERT INTO num_exp_add VALUES (9,5,'-24910407.006556420');
+INSERT INTO num_exp_sub VALUES (9,5,'-24943201.083538420');
+INSERT INTO num_exp_mul VALUES (9,5,'-408725765384.257043660243220');
+INSERT INTO num_exp_div VALUES (9,5,'-1520.20159364322004505807');
+INSERT INTO num_exp_add VALUES (9,6,'-24832902.467417160');
+INSERT INTO num_exp_sub VALUES (9,6,'-25020705.622677680');
+INSERT INTO num_exp_mul VALUES (9,6,'-2340666225110.29929521292692920');
+INSERT INTO num_exp_div VALUES (9,6,'-265.45671195426965751280');
+INSERT INTO num_exp_add VALUES (9,7,'-107955289.045047420');
+INSERT INTO num_exp_sub VALUES (9,7,'58101680.954952580');
+INSERT INTO num_exp_mul VALUES (9,7,'2069634775752159.035758700');
+INSERT INTO num_exp_div VALUES (9,7,'.30021990699995814689');
+INSERT INTO num_exp_add VALUES (9,8,'-24851923.045047420');
+INSERT INTO num_exp_sub VALUES (9,8,'-25001685.045047420');
+INSERT INTO num_exp_mul VALUES (9,8,'-1866544013697.195857020');
+INSERT INTO num_exp_div VALUES (9,8,'-332.88556569820675471748');
+INSERT INTO num_exp_add VALUES (9,9,'-49853608.090094840');
+INSERT INTO num_exp_sub VALUES (9,9,'0');
+INSERT INTO num_exp_mul VALUES (9,9,'621345559900192.420120630048656400');
+INSERT INTO num_exp_div VALUES (9,9,'1.00000000000000000000');
+COMMIT TRANSACTION;
+BEGIN TRANSACTION;
+INSERT INTO num_exp_sqrt VALUES (0,'0');
+INSERT INTO num_exp_sqrt VALUES (1,'0');
+INSERT INTO num_exp_sqrt VALUES (2,'5859.90547836712524903505');
+INSERT INTO num_exp_sqrt VALUES (3,'2.07605394920266944396');
+INSERT INTO num_exp_sqrt VALUES (4,'2792.75158435189147418923');
+INSERT INTO num_exp_sqrt VALUES (5,'128.05092147657509145473');
+INSERT INTO num_exp_sqrt VALUES (6,'306.43364311096782703406');
+INSERT INTO num_exp_sqrt VALUES (7,'9111.99676251039939975230');
+INSERT INTO num_exp_sqrt VALUES (8,'273.64392922189960397542');
+INSERT INTO num_exp_sqrt VALUES (9,'4992.67503899937593364766');
+COMMIT TRANSACTION;
+BEGIN TRANSACTION;
+INSERT INTO num_exp_ln VALUES (0,'NaN');
+INSERT INTO num_exp_ln VALUES (1,'NaN');
+INSERT INTO num_exp_ln VALUES (2,'17.35177750493897715514');
+INSERT INTO num_exp_ln VALUES (3,'1.46093790411565641971');
+INSERT INTO num_exp_ln VALUES (4,'15.86956523951936572464');
+INSERT INTO num_exp_ln VALUES (5,'9.70485601768871834038');
+INSERT INTO num_exp_ln VALUES (6,'11.45000246622944403127');
+INSERT INTO num_exp_ln VALUES (7,'18.23469429965478772991');
+INSERT INTO num_exp_ln VALUES (8,'11.22365546576315513668');
+INSERT INTO num_exp_ln VALUES (9,'17.03145425013166006962');
+COMMIT TRANSACTION;
+BEGIN TRANSACTION;
+INSERT INTO num_exp_log10 VALUES (0,'NaN');
+INSERT INTO num_exp_log10 VALUES (1,'NaN');
+INSERT INTO num_exp_log10 VALUES (2,'7.53578122160797276459');
+INSERT INTO num_exp_log10 VALUES (3,'.63447727016073160075');
+INSERT INTO num_exp_log10 VALUES (4,'6.89206461372691743345');
+INSERT INTO num_exp_log10 VALUES (5,'4.21476541614777768626');
+INSERT INTO num_exp_log10 VALUES (6,'4.97267288886207207671');
+INSERT INTO num_exp_log10 VALUES (7,'7.91922711353275546914');
+INSERT INTO num_exp_log10 VALUES (8,'4.87437163556421004138');
+INSERT INTO num_exp_log10 VALUES (9,'7.39666659961986567059');
+COMMIT TRANSACTION;
+BEGIN TRANSACTION;
+INSERT INTO num_exp_power_10_ln VALUES (0,'NaN');
+INSERT INTO num_exp_power_10_ln VALUES (1,'NaN');
+INSERT INTO num_exp_power_10_ln VALUES (2,'224790267919917955.13261618583642653184');
+INSERT INTO num_exp_power_10_ln VALUES (3,'28.90266599445155957393');
+INSERT INTO num_exp_power_10_ln VALUES (4,'7405685069594999.07733999469386277636');
+INSERT INTO num_exp_power_10_ln VALUES (5,'5068226527.32127265408584640098');
+INSERT INTO num_exp_power_10_ln VALUES (6,'281839893606.99372343357047819067');
+INSERT INTO num_exp_power_10_ln VALUES (7,'1716699575118597095.42330819910640247627');
+INSERT INTO num_exp_power_10_ln VALUES (8,'167361463828.07491320069016125952');
+INSERT INTO num_exp_power_10_ln VALUES (9,'107511333880052007.04141124673540337457');
+COMMIT TRANSACTION;
+BEGIN TRANSACTION;
+INSERT INTO num_data VALUES (0, '0');
+INSERT INTO num_data VALUES (1, '0');
+INSERT INTO num_data VALUES (2, '-34338492.215397047');
+INSERT INTO num_data VALUES (3, '4.31');
+INSERT INTO num_data VALUES (4, '7799461.4119');
+INSERT INTO num_data VALUES (5, '16397.038491');
+INSERT INTO num_data VALUES (6, '93901.57763026');
+INSERT INTO num_data VALUES (7, '-83028485');
+INSERT INTO num_data VALUES (8, '74881');
+INSERT INTO num_data VALUES (9, '-24926804.045047420');
+COMMIT TRANSACTION;
+-- ******************************
+-- * Create indices for faster checks
+-- ******************************
+CREATE UNIQUE INDEX num_exp_add_idx ON num_exp_add (id1, id2);
+CREATE UNIQUE INDEX num_exp_sub_idx ON num_exp_sub (id1, id2);
+CREATE UNIQUE INDEX num_exp_div_idx ON num_exp_div (id1, id2);
+CREATE UNIQUE INDEX num_exp_mul_idx ON num_exp_mul (id1, id2);
+CREATE UNIQUE INDEX num_exp_sqrt_idx ON num_exp_sqrt (id);
+CREATE UNIQUE INDEX num_exp_ln_idx ON num_exp_ln (id);
+CREATE UNIQUE INDEX num_exp_log10_idx ON num_exp_log10 (id);
+CREATE UNIQUE INDEX num_exp_power_10_ln_idx ON num_exp_power_10_ln (id);
+VACUUM ANALYZE num_exp_add;
+VACUUM ANALYZE num_exp_sub;
+VACUUM ANALYZE num_exp_div;
+VACUUM ANALYZE num_exp_mul;
+VACUUM ANALYZE num_exp_sqrt;
+VACUUM ANALYZE num_exp_ln;
+VACUUM ANALYZE num_exp_log10;
+VACUUM ANALYZE num_exp_power_10_ln;
+-- ******************************
+-- * Now check the behaviour of the NUMERIC type
+-- ******************************
+-- ******************************
+-- * Addition check
+-- ******************************
+DELETE FROM num_result;
+INSERT INTO num_result SELECT t1.id, t2.id, t1.val + t2.val
+ FROM num_data t1, num_data t2;
+SELECT t1.id1, t1.id2, t1.result, t2.expected
+ FROM num_result t1, num_exp_add t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != t2.expected;
+ id1 | id2 | result | expected
+-----+-----+--------+----------
+(0 rows)
+
+DELETE FROM num_result;
+INSERT INTO num_result SELECT t1.id, t2.id, round(t1.val + t2.val, 10)
+ FROM num_data t1, num_data t2;
+SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 10) as expected
+ FROM num_result t1, num_exp_add t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != round(t2.expected, 10);
+ id1 | id2 | result | expected
+-----+-----+--------+----------
+(0 rows)
+
+-- ******************************
+-- * Subtraction check
+-- ******************************
+DELETE FROM num_result;
+INSERT INTO num_result SELECT t1.id, t2.id, t1.val - t2.val
+ FROM num_data t1, num_data t2;
+SELECT t1.id1, t1.id2, t1.result, t2.expected
+ FROM num_result t1, num_exp_sub t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != t2.expected;
+ id1 | id2 | result | expected
+-----+-----+--------+----------
+(0 rows)
+
+DELETE FROM num_result;
+INSERT INTO num_result SELECT t1.id, t2.id, round(t1.val - t2.val, 40)
+ FROM num_data t1, num_data t2;
+SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 40)
+ FROM num_result t1, num_exp_sub t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != round(t2.expected, 40);
+ id1 | id2 | result | round
+-----+-----+--------+-------
+(0 rows)
+
+-- ******************************
+-- * Multiply check
+-- ******************************
+DELETE FROM num_result;
+INSERT INTO num_result SELECT t1.id, t2.id, t1.val * t2.val
+ FROM num_data t1, num_data t2;
+SELECT t1.id1, t1.id2, t1.result, t2.expected
+ FROM num_result t1, num_exp_mul t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != t2.expected;
+ id1 | id2 | result | expected
+-----+-----+--------+----------
+(0 rows)
+
+DELETE FROM num_result;
+INSERT INTO num_result SELECT t1.id, t2.id, round(t1.val * t2.val, 30)
+ FROM num_data t1, num_data t2;
+SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 30) as expected
+ FROM num_result t1, num_exp_mul t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != round(t2.expected, 30);
+ id1 | id2 | result | expected
+-----+-----+--------+----------
+(0 rows)
+
+-- ******************************
+-- * Division check
+-- ******************************
+DELETE FROM num_result;
+INSERT INTO num_result SELECT t1.id, t2.id, t1.val / t2.val
+ FROM num_data t1, num_data t2
+ WHERE t2.val != '0.0';
+SELECT t1.id1, t1.id2, t1.result, t2.expected
+ FROM num_result t1, num_exp_div t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != t2.expected;
+ id1 | id2 | result | expected
+-----+-----+--------+----------
+(0 rows)
+
+DELETE FROM num_result;
+INSERT INTO num_result SELECT t1.id, t2.id, round(t1.val / t2.val, 80)
+ FROM num_data t1, num_data t2
+ WHERE t2.val != '0.0';
+SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 80) as expected
+ FROM num_result t1, num_exp_div t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != round(t2.expected, 80);
+ id1 | id2 | result | expected
+-----+-----+--------+----------
+(0 rows)
+
+-- ******************************
+-- * Square root check
+-- ******************************
+DELETE FROM num_result;
+INSERT INTO num_result SELECT id, 0, SQRT(ABS(val))
+ FROM num_data;
+SELECT t1.id1, t1.result, t2.expected
+ FROM num_result t1, num_exp_sqrt t2
+ WHERE t1.id1 = t2.id
+ AND t1.result != t2.expected;
+ id1 | result | expected
+-----+--------+----------
+(0 rows)
+
+-- ******************************
+-- * Natural logarithm check
+-- ******************************
+DELETE FROM num_result;
+INSERT INTO num_result SELECT id, 0, LN(ABS(val))
+ FROM num_data
+ WHERE val != '0.0';
+SELECT t1.id1, t1.result, t2.expected
+ FROM num_result t1, num_exp_ln t2
+ WHERE t1.id1 = t2.id
+ AND t1.result != t2.expected;
+ id1 | result | expected
+-----+--------+----------
+(0 rows)
+
+-- ******************************
+-- * Logarithm base 10 check
+-- ******************************
+DELETE FROM num_result;
+INSERT INTO num_result SELECT id, 0, LOG(numeric '10', ABS(val))
+ FROM num_data
+ WHERE val != '0.0';
+SELECT t1.id1, t1.result, t2.expected
+ FROM num_result t1, num_exp_log10 t2
+ WHERE t1.id1 = t2.id
+ AND t1.result != t2.expected;
+ id1 | result | expected
+-----+--------+----------
+(0 rows)
+
+-- ******************************
+-- * POWER(10, LN(value)) check
+-- ******************************
+DELETE FROM num_result;
+INSERT INTO num_result SELECT id, 0, POWER(numeric '10', LN(ABS(round(val,200))))
+ FROM num_data
+ WHERE val != '0.0';
+SELECT t1.id1, t1.result, t2.expected
+ FROM num_result t1, num_exp_power_10_ln t2
+ WHERE t1.id1 = t2.id
+ AND t1.result != t2.expected;
+ id1 | result | expected
+-----+--------+----------
+(0 rows)
+
+-- ******************************
+-- * Check behavior with Inf and NaN inputs. It's easiest to handle these
+-- * separately from the num_data framework used above, because some input
+-- * combinations will throw errors.
+-- ******************************
+WITH v(x) AS
+ (VALUES('0'::numeric),('1'),('-1'),('4.2'),('inf'),('-inf'),('nan'))
+SELECT x1, x2,
+ x1 + x2 AS sum,
+ x1 - x2 AS diff,
+ x1 * x2 AS prod
+FROM v AS v1(x1), v AS v2(x2);
+ x1 | x2 | sum | diff | prod
+-----------+-----------+-----------+-----------+-----------
+ 0 | 0 | 0 | 0 | 0
+ 0 | 1 | 1 | -1 | 0
+ 0 | -1 | -1 | 1 | 0
+ 0 | 4.2 | 4.2 | -4.2 | 0.0
+ 0 | Infinity | Infinity | -Infinity | NaN
+ 0 | -Infinity | -Infinity | Infinity | NaN
+ 0 | NaN | NaN | NaN | NaN
+ 1 | 0 | 1 | 1 | 0
+ 1 | 1 | 2 | 0 | 1
+ 1 | -1 | 0 | 2 | -1
+ 1 | 4.2 | 5.2 | -3.2 | 4.2
+ 1 | Infinity | Infinity | -Infinity | Infinity
+ 1 | -Infinity | -Infinity | Infinity | -Infinity
+ 1 | NaN | NaN | NaN | NaN
+ -1 | 0 | -1 | -1 | 0
+ -1 | 1 | 0 | -2 | -1
+ -1 | -1 | -2 | 0 | 1
+ -1 | 4.2 | 3.2 | -5.2 | -4.2
+ -1 | Infinity | Infinity | -Infinity | -Infinity
+ -1 | -Infinity | -Infinity | Infinity | Infinity
+ -1 | NaN | NaN | NaN | NaN
+ 4.2 | 0 | 4.2 | 4.2 | 0.0
+ 4.2 | 1 | 5.2 | 3.2 | 4.2
+ 4.2 | -1 | 3.2 | 5.2 | -4.2
+ 4.2 | 4.2 | 8.4 | 0.0 | 17.64
+ 4.2 | Infinity | Infinity | -Infinity | Infinity
+ 4.2 | -Infinity | -Infinity | Infinity | -Infinity
+ 4.2 | NaN | NaN | NaN | NaN
+ Infinity | 0 | Infinity | Infinity | NaN
+ Infinity | 1 | Infinity | Infinity | Infinity
+ Infinity | -1 | Infinity | Infinity | -Infinity
+ Infinity | 4.2 | Infinity | Infinity | Infinity
+ Infinity | Infinity | Infinity | NaN | Infinity
+ Infinity | -Infinity | NaN | Infinity | -Infinity
+ Infinity | NaN | NaN | NaN | NaN
+ -Infinity | 0 | -Infinity | -Infinity | NaN
+ -Infinity | 1 | -Infinity | -Infinity | -Infinity
+ -Infinity | -1 | -Infinity | -Infinity | Infinity
+ -Infinity | 4.2 | -Infinity | -Infinity | -Infinity
+ -Infinity | Infinity | NaN | -Infinity | -Infinity
+ -Infinity | -Infinity | -Infinity | NaN | Infinity
+ -Infinity | NaN | NaN | NaN | NaN
+ NaN | 0 | NaN | NaN | NaN
+ NaN | 1 | NaN | NaN | NaN
+ NaN | -1 | NaN | NaN | NaN
+ NaN | 4.2 | NaN | NaN | NaN
+ NaN | Infinity | NaN | NaN | NaN
+ NaN | -Infinity | NaN | NaN | NaN
+ NaN | NaN | NaN | NaN | NaN
+(49 rows)
+
+WITH v(x) AS
+ (VALUES('0'::numeric),('1'),('-1'),('4.2'),('inf'),('-inf'),('nan'))
+SELECT x1, x2,
+ x1 / x2 AS quot,
+ x1 % x2 AS mod,
+ div(x1, x2) AS div
+FROM v AS v1(x1), v AS v2(x2) WHERE x2 != 0;
+ x1 | x2 | quot | mod | div
+-----------+-----------+-------------------------+------+-----------
+ 0 | 1 | 0.00000000000000000000 | 0 | 0
+ 1 | 1 | 1.00000000000000000000 | 0 | 1
+ -1 | 1 | -1.00000000000000000000 | 0 | -1
+ 4.2 | 1 | 4.2000000000000000 | 0.2 | 4
+ Infinity | 1 | Infinity | NaN | Infinity
+ -Infinity | 1 | -Infinity | NaN | -Infinity
+ NaN | 1 | NaN | NaN | NaN
+ 0 | -1 | 0.00000000000000000000 | 0 | 0
+ 1 | -1 | -1.00000000000000000000 | 0 | -1
+ -1 | -1 | 1.00000000000000000000 | 0 | 1
+ 4.2 | -1 | -4.2000000000000000 | 0.2 | -4
+ Infinity | -1 | -Infinity | NaN | -Infinity
+ -Infinity | -1 | Infinity | NaN | Infinity
+ NaN | -1 | NaN | NaN | NaN
+ 0 | 4.2 | 0.00000000000000000000 | 0.0 | 0
+ 1 | 4.2 | 0.23809523809523809524 | 1.0 | 0
+ -1 | 4.2 | -0.23809523809523809524 | -1.0 | 0
+ 4.2 | 4.2 | 1.00000000000000000000 | 0.0 | 1
+ Infinity | 4.2 | Infinity | NaN | Infinity
+ -Infinity | 4.2 | -Infinity | NaN | -Infinity
+ NaN | 4.2 | NaN | NaN | NaN
+ 0 | Infinity | 0 | 0 | 0
+ 1 | Infinity | 0 | 1 | 0
+ -1 | Infinity | 0 | -1 | 0
+ 4.2 | Infinity | 0 | 4.2 | 0
+ Infinity | Infinity | NaN | NaN | NaN
+ -Infinity | Infinity | NaN | NaN | NaN
+ NaN | Infinity | NaN | NaN | NaN
+ 0 | -Infinity | 0 | 0 | 0
+ 1 | -Infinity | 0 | 1 | 0
+ -1 | -Infinity | 0 | -1 | 0
+ 4.2 | -Infinity | 0 | 4.2 | 0
+ Infinity | -Infinity | NaN | NaN | NaN
+ -Infinity | -Infinity | NaN | NaN | NaN
+ NaN | -Infinity | NaN | NaN | NaN
+ 0 | NaN | NaN | NaN | NaN
+ 1 | NaN | NaN | NaN | NaN
+ -1 | NaN | NaN | NaN | NaN
+ 4.2 | NaN | NaN | NaN | NaN
+ Infinity | NaN | NaN | NaN | NaN
+ -Infinity | NaN | NaN | NaN | NaN
+ NaN | NaN | NaN | NaN | NaN
+(42 rows)
+
+SELECT 'inf'::numeric / '0';
+ERROR: division by zero
+SELECT '-inf'::numeric / '0';
+ERROR: division by zero
+SELECT 'nan'::numeric / '0';
+ ?column?
+----------
+ NaN
+(1 row)
+
+SELECT '0'::numeric / '0';
+ERROR: division by zero
+SELECT 'inf'::numeric % '0';
+ERROR: division by zero
+SELECT '-inf'::numeric % '0';
+ERROR: division by zero
+SELECT 'nan'::numeric % '0';
+ ?column?
+----------
+ NaN
+(1 row)
+
+SELECT '0'::numeric % '0';
+ERROR: division by zero
+SELECT div('inf'::numeric, '0');
+ERROR: division by zero
+SELECT div('-inf'::numeric, '0');
+ERROR: division by zero
+SELECT div('nan'::numeric, '0');
+ div
+-----
+ NaN
+(1 row)
+
+SELECT div('0'::numeric, '0');
+ERROR: division by zero
+WITH v(x) AS
+ (VALUES('0'::numeric),('1'),('-1'),('4.2'),('-7.777'),('inf'),('-inf'),('nan'))
+SELECT x, -x as minusx, abs(x), floor(x), ceil(x), sign(x), numeric_inc(x) as inc
+FROM v;
+ x | minusx | abs | floor | ceil | sign | inc
+-----------+-----------+----------+-----------+-----------+------+-----------
+ 0 | 0 | 0 | 0 | 0 | 0 | 1
+ 1 | -1 | 1 | 1 | 1 | 1 | 2
+ -1 | 1 | 1 | -1 | -1 | -1 | 0
+ 4.2 | -4.2 | 4.2 | 4 | 5 | 1 | 5.2
+ -7.777 | 7.777 | 7.777 | -8 | -7 | -1 | -6.777
+ Infinity | -Infinity | Infinity | Infinity | Infinity | 1 | Infinity
+ -Infinity | Infinity | Infinity | -Infinity | -Infinity | -1 | -Infinity
+ NaN | NaN | NaN | NaN | NaN | NaN | NaN
+(8 rows)
+
+WITH v(x) AS
+ (VALUES('0'::numeric),('1'),('-1'),('4.2'),('-7.777'),('inf'),('-inf'),('nan'))
+SELECT x, round(x), round(x,1) as round1, trunc(x), trunc(x,1) as trunc1
+FROM v;
+ x | round | round1 | trunc | trunc1
+-----------+-----------+-----------+-----------+-----------
+ 0 | 0 | 0.0 | 0 | 0.0
+ 1 | 1 | 1.0 | 1 | 1.0
+ -1 | -1 | -1.0 | -1 | -1.0
+ 4.2 | 4 | 4.2 | 4 | 4.2
+ -7.777 | -8 | -7.8 | -7 | -7.7
+ Infinity | Infinity | Infinity | Infinity | Infinity
+ -Infinity | -Infinity | -Infinity | -Infinity | -Infinity
+ NaN | NaN | NaN | NaN | NaN
+(8 rows)
+
+-- the large values fall into the numeric abbreviation code's maximal classes
+WITH v(x) AS
+ (VALUES('0'::numeric),('1'),('-1'),('4.2'),('-7.777'),('1e340'),('-1e340'),
+ ('inf'),('-inf'),('nan'),
+ ('inf'),('-inf'),('nan'))
+SELECT substring(x::text, 1, 32)
+FROM v ORDER BY x;
+ substring
+----------------------------------
+ -Infinity
+ -Infinity
+ -1000000000000000000000000000000
+ -7.777
+ -1
+ 0
+ 1
+ 4.2
+ 10000000000000000000000000000000
+ Infinity
+ Infinity
+ NaN
+ NaN
+(13 rows)
+
+WITH v(x) AS
+ (VALUES('0'::numeric),('1'),('4.2'),('inf'),('nan'))
+SELECT x, sqrt(x)
+FROM v;
+ x | sqrt
+----------+-------------------
+ 0 | 0.000000000000000
+ 1 | 1.000000000000000
+ 4.2 | 2.049390153191920
+ Infinity | Infinity
+ NaN | NaN
+(5 rows)
+
+SELECT sqrt('-1'::numeric);
+ERROR: cannot take square root of a negative number
+SELECT sqrt('-inf'::numeric);
+ERROR: cannot take square root of a negative number
+WITH v(x) AS
+ (VALUES('1'::numeric),('4.2'),('inf'),('nan'))
+SELECT x,
+ log(x),
+ log10(x),
+ ln(x)
+FROM v;
+ x | log | log10 | ln
+----------+--------------------+--------------------+--------------------
+ 1 | 0.0000000000000000 | 0.0000000000000000 | 0.0000000000000000
+ 4.2 | 0.6232492903979005 | 0.6232492903979005 | 1.4350845252893226
+ Infinity | Infinity | Infinity | Infinity
+ NaN | NaN | NaN | NaN
+(4 rows)
+
+SELECT ln('0'::numeric);
+ERROR: cannot take logarithm of zero
+SELECT ln('-1'::numeric);
+ERROR: cannot take logarithm of a negative number
+SELECT ln('-inf'::numeric);
+ERROR: cannot take logarithm of a negative number
+WITH v(x) AS
+ (VALUES('2'::numeric),('4.2'),('inf'),('nan'))
+SELECT x1, x2,
+ log(x1, x2)
+FROM v AS v1(x1), v AS v2(x2);
+ x1 | x2 | log
+----------+----------+--------------------
+ 2 | 2 | 1.0000000000000000
+ 2 | 4.2 | 2.0703893278913979
+ 2 | Infinity | Infinity
+ 2 | NaN | NaN
+ 4.2 | 2 | 0.4830009440873890
+ 4.2 | 4.2 | 1.0000000000000000
+ 4.2 | Infinity | Infinity
+ 4.2 | NaN | NaN
+ Infinity | 2 | 0
+ Infinity | 4.2 | 0
+ Infinity | Infinity | NaN
+ Infinity | NaN | NaN
+ NaN | 2 | NaN
+ NaN | 4.2 | NaN
+ NaN | Infinity | NaN
+ NaN | NaN | NaN
+(16 rows)
+
+SELECT log('0'::numeric, '10');
+ERROR: cannot take logarithm of zero
+SELECT log('10'::numeric, '0');
+ERROR: cannot take logarithm of zero
+SELECT log('-inf'::numeric, '10');
+ERROR: cannot take logarithm of a negative number
+SELECT log('10'::numeric, '-inf');
+ERROR: cannot take logarithm of a negative number
+SELECT log('inf'::numeric, '0');
+ERROR: cannot take logarithm of zero
+SELECT log('inf'::numeric, '-inf');
+ERROR: cannot take logarithm of a negative number
+SELECT log('-inf'::numeric, 'inf');
+ERROR: cannot take logarithm of a negative number
+WITH v(x) AS
+ (VALUES('0'::numeric),('1'),('2'),('4.2'),('inf'),('nan'))
+SELECT x1, x2,
+ power(x1, x2)
+FROM v AS v1(x1), v AS v2(x2) WHERE x1 != 0 OR x2 >= 0;
+ x1 | x2 | power
+----------+----------+---------------------
+ 0 | 0 | 1.0000000000000000
+ 0 | 1 | 0.0000000000000000
+ 0 | 2 | 0.0000000000000000
+ 0 | 4.2 | 0.0000000000000000
+ 0 | Infinity | 0
+ 0 | NaN | NaN
+ 1 | 0 | 1.0000000000000000
+ 1 | 1 | 1.0000000000000000
+ 1 | 2 | 1.0000000000000000
+ 1 | 4.2 | 1.0000000000000000
+ 1 | Infinity | 1
+ 1 | NaN | 1
+ 2 | 0 | 1.0000000000000000
+ 2 | 1 | 2.0000000000000000
+ 2 | 2 | 4.0000000000000000
+ 2 | 4.2 | 18.379173679952560
+ 2 | Infinity | Infinity
+ 2 | NaN | NaN
+ 4.2 | 0 | 1.0000000000000000
+ 4.2 | 1 | 4.2000000000000000
+ 4.2 | 2 | 17.6400000000000000
+ 4.2 | 4.2 | 414.61691860129675
+ 4.2 | Infinity | Infinity
+ 4.2 | NaN | NaN
+ Infinity | 0 | 1
+ Infinity | 1 | Infinity
+ Infinity | 2 | Infinity
+ Infinity | 4.2 | Infinity
+ Infinity | Infinity | Infinity
+ Infinity | NaN | NaN
+ NaN | 0 | 1
+ NaN | 1 | NaN
+ NaN | 2 | NaN
+ NaN | 4.2 | NaN
+ NaN | Infinity | NaN
+ NaN | NaN | NaN
+(36 rows)
+
+SELECT power('0'::numeric, '-1');
+ERROR: zero raised to a negative power is undefined
+SELECT power('0'::numeric, '-inf');
+ERROR: zero raised to a negative power is undefined
+SELECT power('-1'::numeric, 'inf');
+ power
+-------
+ 1
+(1 row)
+
+SELECT power('-2'::numeric, '3');
+ power
+---------------------
+ -8.0000000000000000
+(1 row)
+
+SELECT power('-2'::numeric, '3.3');
+ERROR: a negative number raised to a non-integer power yields a complex result
+SELECT power('-2'::numeric, '-1');
+ power
+---------------------
+ -0.5000000000000000
+(1 row)
+
+SELECT power('-2'::numeric, '-1.5');
+ERROR: a negative number raised to a non-integer power yields a complex result
+SELECT power('-2'::numeric, 'inf');
+ power
+----------
+ Infinity
+(1 row)
+
+SELECT power('-2'::numeric, '-inf');
+ power
+-------
+ 0
+(1 row)
+
+SELECT power('inf'::numeric, '-2');
+ power
+-------
+ 0
+(1 row)
+
+SELECT power('inf'::numeric, '-inf');
+ power
+-------
+ 0
+(1 row)
+
+SELECT power('-inf'::numeric, '2');
+ power
+----------
+ Infinity
+(1 row)
+
+SELECT power('-inf'::numeric, '3');
+ power
+-----------
+ -Infinity
+(1 row)
+
+SELECT power('-inf'::numeric, '4.5');
+ERROR: a negative number raised to a non-integer power yields a complex result
+SELECT power('-inf'::numeric, '-2');
+ power
+-------
+ 0
+(1 row)
+
+SELECT power('-inf'::numeric, '-3');
+ power
+-------
+ 0
+(1 row)
+
+SELECT power('-inf'::numeric, '0');
+ power
+-------
+ 1
+(1 row)
+
+SELECT power('-inf'::numeric, 'inf');
+ power
+----------
+ Infinity
+(1 row)
+
+SELECT power('-inf'::numeric, '-inf');
+ power
+-------
+ 0
+(1 row)
+
+-- ******************************
+-- * miscellaneous checks for things that have been broken in the past...
+-- ******************************
+-- numeric AVG used to fail on some platforms
+SELECT AVG(val) FROM num_data;
+ avg
+------------------------
+ -13430913.592242320700
+(1 row)
+
+SELECT MAX(val) FROM num_data;
+ max
+--------------------
+ 7799461.4119000000
+(1 row)
+
+SELECT MIN(val) FROM num_data;
+ min
+----------------------
+ -83028485.0000000000
+(1 row)
+
+SELECT STDDEV(val) FROM num_data;
+ stddev
+-------------------------------
+ 27791203.28758835329805617386
+(1 row)
+
+SELECT VARIANCE(val) FROM num_data;
+ variance
+--------------------------------------
+ 772350980172061.69659105821915863601
+(1 row)
+
+-- Check for appropriate rounding and overflow
+CREATE TABLE fract_only (id int, val numeric(4,4));
+INSERT INTO fract_only VALUES (1, '0.0');
+INSERT INTO fract_only VALUES (2, '0.1');
+INSERT INTO fract_only VALUES (3, '1.0'); -- should fail
+ERROR: numeric field overflow
+DETAIL: A field with precision 4, scale 4 must round to an absolute value less than 1.
+INSERT INTO fract_only VALUES (4, '-0.9999');
+INSERT INTO fract_only VALUES (5, '0.99994');
+INSERT INTO fract_only VALUES (6, '0.99995'); -- should fail
+ERROR: numeric field overflow
+DETAIL: A field with precision 4, scale 4 must round to an absolute value less than 1.
+INSERT INTO fract_only VALUES (7, '0.00001');
+INSERT INTO fract_only VALUES (8, '0.00017');
+INSERT INTO fract_only VALUES (9, 'NaN');
+INSERT INTO fract_only VALUES (10, 'Inf'); -- should fail
+ERROR: numeric field overflow
+DETAIL: A field with precision 4, scale 4 cannot hold an infinite value.
+INSERT INTO fract_only VALUES (11, '-Inf'); -- should fail
+ERROR: numeric field overflow
+DETAIL: A field with precision 4, scale 4 cannot hold an infinite value.
+SELECT * FROM fract_only;
+ id | val
+----+---------
+ 1 | 0.0000
+ 2 | 0.1000
+ 4 | -0.9999
+ 5 | 0.9999
+ 7 | 0.0000
+ 8 | 0.0002
+ 9 | NaN
+(7 rows)
+
+DROP TABLE fract_only;
+-- Check conversion to integers
+SELECT (-9223372036854775808.5)::int8; -- should fail
+ERROR: bigint out of range
+SELECT (-9223372036854775808.4)::int8; -- ok
+ int8
+----------------------
+ -9223372036854775808
+(1 row)
+
+SELECT 9223372036854775807.4::int8; -- ok
+ int8
+---------------------
+ 9223372036854775807
+(1 row)
+
+SELECT 9223372036854775807.5::int8; -- should fail
+ERROR: bigint out of range
+SELECT (-2147483648.5)::int4; -- should fail
+ERROR: integer out of range
+SELECT (-2147483648.4)::int4; -- ok
+ int4
+-------------
+ -2147483648
+(1 row)
+
+SELECT 2147483647.4::int4; -- ok
+ int4
+------------
+ 2147483647
+(1 row)
+
+SELECT 2147483647.5::int4; -- should fail
+ERROR: integer out of range
+SELECT (-32768.5)::int2; -- should fail
+ERROR: smallint out of range
+SELECT (-32768.4)::int2; -- ok
+ int2
+--------
+ -32768
+(1 row)
+
+SELECT 32767.4::int2; -- ok
+ int2
+-------
+ 32767
+(1 row)
+
+SELECT 32767.5::int2; -- should fail
+ERROR: smallint out of range
+-- Check inf/nan conversion behavior
+SELECT 'NaN'::float8::numeric;
+ numeric
+---------
+ NaN
+(1 row)
+
+SELECT 'Infinity'::float8::numeric;
+ numeric
+----------
+ Infinity
+(1 row)
+
+SELECT '-Infinity'::float8::numeric;
+ numeric
+-----------
+ -Infinity
+(1 row)
+
+SELECT 'NaN'::numeric::float8;
+ float8
+--------
+ NaN
+(1 row)
+
+SELECT 'Infinity'::numeric::float8;
+ float8
+----------
+ Infinity
+(1 row)
+
+SELECT '-Infinity'::numeric::float8;
+ float8
+-----------
+ -Infinity
+(1 row)
+
+SELECT 'NaN'::float4::numeric;
+ numeric
+---------
+ NaN
+(1 row)
+
+SELECT 'Infinity'::float4::numeric;
+ numeric
+----------
+ Infinity
+(1 row)
+
+SELECT '-Infinity'::float4::numeric;
+ numeric
+-----------
+ -Infinity
+(1 row)
+
+SELECT 'NaN'::numeric::float4;
+ float4
+--------
+ NaN
+(1 row)
+
+SELECT 'Infinity'::numeric::float4;
+ float4
+----------
+ Infinity
+(1 row)
+
+SELECT '-Infinity'::numeric::float4;
+ float4
+-----------
+ -Infinity
+(1 row)
+
+SELECT '42'::int2::numeric;
+ numeric
+---------
+ 42
+(1 row)
+
+SELECT 'NaN'::numeric::int2;
+ERROR: cannot convert NaN to smallint
+SELECT 'Infinity'::numeric::int2;
+ERROR: cannot convert infinity to smallint
+SELECT '-Infinity'::numeric::int2;
+ERROR: cannot convert infinity to smallint
+SELECT 'NaN'::numeric::int4;
+ERROR: cannot convert NaN to integer
+SELECT 'Infinity'::numeric::int4;
+ERROR: cannot convert infinity to integer
+SELECT '-Infinity'::numeric::int4;
+ERROR: cannot convert infinity to integer
+SELECT 'NaN'::numeric::int8;
+ERROR: cannot convert NaN to bigint
+SELECT 'Infinity'::numeric::int8;
+ERROR: cannot convert infinity to bigint
+SELECT '-Infinity'::numeric::int8;
+ERROR: cannot convert infinity to bigint
+-- Simple check that ceil(), floor(), and round() work correctly
+CREATE TABLE ceil_floor_round (a numeric);
+INSERT INTO ceil_floor_round VALUES ('-5.5');
+INSERT INTO ceil_floor_round VALUES ('-5.499999');
+INSERT INTO ceil_floor_round VALUES ('9.5');
+INSERT INTO ceil_floor_round VALUES ('9.4999999');
+INSERT INTO ceil_floor_round VALUES ('0.0');
+INSERT INTO ceil_floor_round VALUES ('0.0000001');
+INSERT INTO ceil_floor_round VALUES ('-0.000001');
+SELECT a, ceil(a), ceiling(a), floor(a), round(a) FROM ceil_floor_round;
+ a | ceil | ceiling | floor | round
+-----------+------+---------+-------+-------
+ -5.5 | -5 | -5 | -6 | -6
+ -5.499999 | -5 | -5 | -6 | -5
+ 9.5 | 10 | 10 | 9 | 10
+ 9.4999999 | 10 | 10 | 9 | 9
+ 0.0 | 0 | 0 | 0 | 0
+ 0.0000001 | 1 | 1 | 0 | 0
+ -0.000001 | 0 | 0 | -1 | 0
+(7 rows)
+
+DROP TABLE ceil_floor_round;
+-- Check rounding, it should round ties away from zero.
+SELECT i as pow,
+ round((-2.5 * 10 ^ i)::numeric, -i),
+ round((-1.5 * 10 ^ i)::numeric, -i),
+ round((-0.5 * 10 ^ i)::numeric, -i),
+ round((0.5 * 10 ^ i)::numeric, -i),
+ round((1.5 * 10 ^ i)::numeric, -i),
+ round((2.5 * 10 ^ i)::numeric, -i)
+FROM generate_series(-5,5) AS t(i);
+ pow | round | round | round | round | round | round
+-----+----------+----------+----------+---------+---------+---------
+ -5 | -0.00003 | -0.00002 | -0.00001 | 0.00001 | 0.00002 | 0.00003
+ -4 | -0.0003 | -0.0002 | -0.0001 | 0.0001 | 0.0002 | 0.0003
+ -3 | -0.003 | -0.002 | -0.001 | 0.001 | 0.002 | 0.003
+ -2 | -0.03 | -0.02 | -0.01 | 0.01 | 0.02 | 0.03
+ -1 | -0.3 | -0.2 | -0.1 | 0.1 | 0.2 | 0.3
+ 0 | -3 | -2 | -1 | 1 | 2 | 3
+ 1 | -30 | -20 | -10 | 10 | 20 | 30
+ 2 | -300 | -200 | -100 | 100 | 200 | 300
+ 3 | -3000 | -2000 | -1000 | 1000 | 2000 | 3000
+ 4 | -30000 | -20000 | -10000 | 10000 | 20000 | 30000
+ 5 | -300000 | -200000 | -100000 | 100000 | 200000 | 300000
+(11 rows)
+
+-- Testing for width_bucket(). For convenience, we test both the
+-- numeric and float8 versions of the function in this file.
+-- errors
+SELECT width_bucket(5.0, 3.0, 4.0, 0);
+ERROR: count must be greater than zero
+SELECT width_bucket(5.0, 3.0, 4.0, -5);
+ERROR: count must be greater than zero
+SELECT width_bucket(3.5, 3.0, 3.0, 888);
+ERROR: lower bound cannot equal upper bound
+SELECT width_bucket(5.0::float8, 3.0::float8, 4.0::float8, 0);
+ERROR: count must be greater than zero
+SELECT width_bucket(5.0::float8, 3.0::float8, 4.0::float8, -5);
+ERROR: count must be greater than zero
+SELECT width_bucket(3.5::float8, 3.0::float8, 3.0::float8, 888);
+ERROR: lower bound cannot equal upper bound
+SELECT width_bucket('NaN', 3.0, 4.0, 888);
+ERROR: operand, lower bound, and upper bound cannot be NaN
+SELECT width_bucket(0::float8, 'NaN', 4.0::float8, 888);
+ERROR: operand, lower bound, and upper bound cannot be NaN
+SELECT width_bucket(2.0, 3.0, '-inf', 888);
+ERROR: lower and upper bounds must be finite
+SELECT width_bucket(0::float8, '-inf', 4.0::float8, 888);
+ERROR: lower and upper bounds must be finite
+-- normal operation
+CREATE TABLE width_bucket_test (operand_num numeric, operand_f8 float8);
+COPY width_bucket_test (operand_num) FROM stdin;
+UPDATE width_bucket_test SET operand_f8 = operand_num::float8;
+SELECT
+ operand_num,
+ width_bucket(operand_num, 0, 10, 5) AS wb_1,
+ width_bucket(operand_f8, 0, 10, 5) AS wb_1f,
+ width_bucket(operand_num, 10, 0, 5) AS wb_2,
+ width_bucket(operand_f8, 10, 0, 5) AS wb_2f,
+ width_bucket(operand_num, 2, 8, 4) AS wb_3,
+ width_bucket(operand_f8, 2, 8, 4) AS wb_3f,
+ width_bucket(operand_num, 5.0, 5.5, 20) AS wb_4,
+ width_bucket(operand_f8, 5.0, 5.5, 20) AS wb_4f,
+ width_bucket(operand_num, -25, 25, 10) AS wb_5,
+ width_bucket(operand_f8, -25, 25, 10) AS wb_5f
+ FROM width_bucket_test;
+ operand_num | wb_1 | wb_1f | wb_2 | wb_2f | wb_3 | wb_3f | wb_4 | wb_4f | wb_5 | wb_5f
+------------------+------+-------+------+-------+------+-------+------+-------+------+-------
+ -5.2 | 0 | 0 | 6 | 6 | 0 | 0 | 0 | 0 | 4 | 4
+ -0.0000000001 | 0 | 0 | 6 | 6 | 0 | 0 | 0 | 0 | 5 | 5
+ 0.000000000001 | 1 | 1 | 5 | 5 | 0 | 0 | 0 | 0 | 6 | 6
+ 1 | 1 | 1 | 5 | 5 | 0 | 0 | 0 | 0 | 6 | 6
+ 1.99999999999999 | 1 | 1 | 5 | 5 | 0 | 0 | 0 | 0 | 6 | 6
+ 2 | 2 | 2 | 5 | 5 | 1 | 1 | 0 | 0 | 6 | 6
+ 2.00000000000001 | 2 | 2 | 4 | 4 | 1 | 1 | 0 | 0 | 6 | 6
+ 3 | 2 | 2 | 4 | 4 | 1 | 1 | 0 | 0 | 6 | 6
+ 4 | 3 | 3 | 4 | 4 | 2 | 2 | 0 | 0 | 6 | 6
+ 4.5 | 3 | 3 | 3 | 3 | 2 | 2 | 0 | 0 | 6 | 6
+ 5 | 3 | 3 | 3 | 3 | 3 | 3 | 1 | 1 | 7 | 7
+ 5.5 | 3 | 3 | 3 | 3 | 3 | 3 | 21 | 21 | 7 | 7
+ 6 | 4 | 4 | 3 | 3 | 3 | 3 | 21 | 21 | 7 | 7
+ 7 | 4 | 4 | 2 | 2 | 4 | 4 | 21 | 21 | 7 | 7
+ 8 | 5 | 5 | 2 | 2 | 5 | 5 | 21 | 21 | 7 | 7
+ 9 | 5 | 5 | 1 | 1 | 5 | 5 | 21 | 21 | 7 | 7
+ 9.99999999999999 | 5 | 5 | 1 | 1 | 5 | 5 | 21 | 21 | 7 | 7
+ 10 | 6 | 6 | 1 | 1 | 5 | 5 | 21 | 21 | 8 | 8
+ 10.0000000000001 | 6 | 6 | 0 | 0 | 5 | 5 | 21 | 21 | 8 | 8
+(19 rows)
+
+-- Check positive and negative infinity: we require
+-- finite bucket bounds, but allow an infinite operand
+SELECT width_bucket(0.0::numeric, 'Infinity'::numeric, 5, 10); -- error
+ERROR: lower and upper bounds must be finite
+SELECT width_bucket(0.0::numeric, 5, '-Infinity'::numeric, 20); -- error
+ERROR: lower and upper bounds must be finite
+SELECT width_bucket('Infinity'::numeric, 1, 10, 10),
+ width_bucket('-Infinity'::numeric, 1, 10, 10);
+ width_bucket | width_bucket
+--------------+--------------
+ 11 | 0
+(1 row)
+
+SELECT width_bucket(0.0::float8, 'Infinity'::float8, 5, 10); -- error
+ERROR: lower and upper bounds must be finite
+SELECT width_bucket(0.0::float8, 5, '-Infinity'::float8, 20); -- error
+ERROR: lower and upper bounds must be finite
+SELECT width_bucket('Infinity'::float8, 1, 10, 10),
+ width_bucket('-Infinity'::float8, 1, 10, 10);
+ width_bucket | width_bucket
+--------------+--------------
+ 11 | 0
+(1 row)
+
+DROP TABLE width_bucket_test;
+-- Simple test for roundoff error when results should be exact
+SELECT x, width_bucket(x::float8, 10, 100, 9) as flt,
+ width_bucket(x::numeric, 10, 100, 9) as num
+FROM generate_series(0, 110, 10) x;
+ x | flt | num
+-----+-----+-----
+ 0 | 0 | 0
+ 10 | 1 | 1
+ 20 | 2 | 2
+ 30 | 3 | 3
+ 40 | 4 | 4
+ 50 | 5 | 5
+ 60 | 6 | 6
+ 70 | 7 | 7
+ 80 | 8 | 8
+ 90 | 9 | 9
+ 100 | 10 | 10
+ 110 | 10 | 10
+(12 rows)
+
+SELECT x, width_bucket(x::float8, 100, 10, 9) as flt,
+ width_bucket(x::numeric, 100, 10, 9) as num
+FROM generate_series(0, 110, 10) x;
+ x | flt | num
+-----+-----+-----
+ 0 | 10 | 10
+ 10 | 10 | 10
+ 20 | 9 | 9
+ 30 | 8 | 8
+ 40 | 7 | 7
+ 50 | 6 | 6
+ 60 | 5 | 5
+ 70 | 4 | 4
+ 80 | 3 | 3
+ 90 | 2 | 2
+ 100 | 1 | 1
+ 110 | 0 | 0
+(12 rows)
+
+--
+-- TO_CHAR()
+--
+SELECT to_char(val, '9G999G999G999G999G999')
+ FROM num_data;
+ to_char
+------------------------
+ 0
+ 0
+ -34,338,492
+ 4
+ 7,799,461
+ 16,397
+ 93,902
+ -83,028,485
+ 74,881
+ -24,926,804
+(10 rows)
+
+SELECT to_char(val, '9G999G999G999G999G999D999G999G999G999G999')
+ FROM num_data;
+ to_char
+--------------------------------------------
+ .000,000,000,000,000
+ .000,000,000,000,000
+ -34,338,492.215,397,047,000,000
+ 4.310,000,000,000,000
+ 7,799,461.411,900,000,000,000
+ 16,397.038,491,000,000,000
+ 93,901.577,630,260,000,000
+ -83,028,485.000,000,000,000,000
+ 74,881.000,000,000,000,000
+ -24,926,804.045,047,420,000,000
+(10 rows)
+
+SELECT to_char(val, '9999999999999999.999999999999999PR')
+ FROM num_data;
+ to_char
+------------------------------------
+ .000000000000000
+ .000000000000000
+ <34338492.215397047000000>
+ 4.310000000000000
+ 7799461.411900000000000
+ 16397.038491000000000
+ 93901.577630260000000
+ <83028485.000000000000000>
+ 74881.000000000000000
+ <24926804.045047420000000>
+(10 rows)
+
+SELECT to_char(val, '9999999999999999.999999999999999S')
+ FROM num_data;
+ to_char
+-----------------------------------
+ .000000000000000+
+ .000000000000000+
+ 34338492.215397047000000-
+ 4.310000000000000+
+ 7799461.411900000000000+
+ 16397.038491000000000+
+ 93901.577630260000000+
+ 83028485.000000000000000-
+ 74881.000000000000000+
+ 24926804.045047420000000-
+(10 rows)
+
+SELECT to_char(val, 'MI9999999999999999.999999999999999') FROM num_data;
+ to_char
+-----------------------------------
+ .000000000000000
+ .000000000000000
+ - 34338492.215397047000000
+ 4.310000000000000
+ 7799461.411900000000000
+ 16397.038491000000000
+ 93901.577630260000000
+ - 83028485.000000000000000
+ 74881.000000000000000
+ - 24926804.045047420000000
+(10 rows)
+
+SELECT to_char(val, 'FMS9999999999999999.999999999999999') FROM num_data;
+ to_char
+---------------------
+ +0.
+ +0.
+ -34338492.215397047
+ +4.31
+ +7799461.4119
+ +16397.038491
+ +93901.57763026
+ -83028485.
+ +74881.
+ -24926804.04504742
+(10 rows)
+
+SELECT to_char(val, 'FM9999999999999999.999999999999999THPR') FROM num_data;
+ to_char
+----------------------
+ 0.
+ 0.
+ <34338492.215397047>
+ 4.31
+ 7799461.4119
+ 16397.038491
+ 93901.57763026
+ <83028485.>
+ 74881.
+ <24926804.04504742>
+(10 rows)
+
+SELECT to_char(val, 'SG9999999999999999.999999999999999th') FROM num_data;
+ to_char
+-----------------------------------
+ + .000000000000000
+ + .000000000000000
+ - 34338492.215397047000000
+ + 4.310000000000000
+ + 7799461.411900000000000
+ + 16397.038491000000000
+ + 93901.577630260000000
+ - 83028485.000000000000000
+ + 74881.000000000000000
+ - 24926804.045047420000000
+(10 rows)
+
+SELECT to_char(val, '0999999999999999.999999999999999') FROM num_data;
+ to_char
+-----------------------------------
+ 0000000000000000.000000000000000
+ 0000000000000000.000000000000000
+ -0000000034338492.215397047000000
+ 0000000000000004.310000000000000
+ 0000000007799461.411900000000000
+ 0000000000016397.038491000000000
+ 0000000000093901.577630260000000
+ -0000000083028485.000000000000000
+ 0000000000074881.000000000000000
+ -0000000024926804.045047420000000
+(10 rows)
+
+SELECT to_char(val, 'S0999999999999999.999999999999999') FROM num_data;
+ to_char
+-----------------------------------
+ +0000000000000000.000000000000000
+ +0000000000000000.000000000000000
+ -0000000034338492.215397047000000
+ +0000000000000004.310000000000000
+ +0000000007799461.411900000000000
+ +0000000000016397.038491000000000
+ +0000000000093901.577630260000000
+ -0000000083028485.000000000000000
+ +0000000000074881.000000000000000
+ -0000000024926804.045047420000000
+(10 rows)
+
+SELECT to_char(val, 'FM0999999999999999.999999999999999') FROM num_data;
+ to_char
+-----------------------------
+ 0000000000000000.
+ 0000000000000000.
+ -0000000034338492.215397047
+ 0000000000000004.31
+ 0000000007799461.4119
+ 0000000000016397.038491
+ 0000000000093901.57763026
+ -0000000083028485.
+ 0000000000074881.
+ -0000000024926804.04504742
+(10 rows)
+
+SELECT to_char(val, 'FM9999999999999999.099999999999999') FROM num_data;
+ to_char
+---------------------
+ .0
+ .0
+ -34338492.215397047
+ 4.31
+ 7799461.4119
+ 16397.038491
+ 93901.57763026
+ -83028485.0
+ 74881.0
+ -24926804.04504742
+(10 rows)
+
+SELECT to_char(val, 'FM9999999999990999.990999999999999') FROM num_data;
+ to_char
+---------------------
+ 0000.000
+ 0000.000
+ -34338492.215397047
+ 0004.310
+ 7799461.4119
+ 16397.038491
+ 93901.57763026
+ -83028485.000
+ 74881.000
+ -24926804.04504742
+(10 rows)
+
+SELECT to_char(val, 'FM0999999999999999.999909999999999') FROM num_data;
+ to_char
+-----------------------------
+ 0000000000000000.00000
+ 0000000000000000.00000
+ -0000000034338492.215397047
+ 0000000000000004.31000
+ 0000000007799461.41190
+ 0000000000016397.038491
+ 0000000000093901.57763026
+ -0000000083028485.00000
+ 0000000000074881.00000
+ -0000000024926804.04504742
+(10 rows)
+
+SELECT to_char(val, 'FM9999999990999999.099999999999999') FROM num_data;
+ to_char
+---------------------
+ 0000000.0
+ 0000000.0
+ -34338492.215397047
+ 0000004.31
+ 7799461.4119
+ 0016397.038491
+ 0093901.57763026
+ -83028485.0
+ 0074881.0
+ -24926804.04504742
+(10 rows)
+
+SELECT to_char(val, 'L9999999999999999.099999999999999') FROM num_data;
+ to_char
+------------------------------------
+ .000000000000000
+ .000000000000000
+ -34338492.215397047000000
+ 4.310000000000000
+ 7799461.411900000000000
+ 16397.038491000000000
+ 93901.577630260000000
+ -83028485.000000000000000
+ 74881.000000000000000
+ -24926804.045047420000000
+(10 rows)
+
+SELECT to_char(val, 'FM9999999999999999.99999999999999') FROM num_data;
+ to_char
+---------------------
+ 0.
+ 0.
+ -34338492.215397047
+ 4.31
+ 7799461.4119
+ 16397.038491
+ 93901.57763026
+ -83028485.
+ 74881.
+ -24926804.04504742
+(10 rows)
+
+SELECT to_char(val, 'S 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 . 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9') FROM num_data;
+ to_char
+-----------------------------------------------------------------------
+ +. 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ +. 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ -3 4 3 3 8 4 9 2 . 2 1 5 3 9 7 0 4 7 0 0 0 0 0 0 0 0
+ +4 . 3 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ +7 7 9 9 4 6 1 . 4 1 1 9 0 0 0 0 0 0 0 0 0 0 0 0 0
+ +1 6 3 9 7 . 0 3 8 4 9 1 0 0 0 0 0 0 0 0 0 0 0
+ +9 3 9 0 1 . 5 7 7 6 3 0 2 6 0 0 0 0 0 0 0 0 0
+ -8 3 0 2 8 4 8 5 . 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ +7 4 8 8 1 . 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ -2 4 9 2 6 8 0 4 . 0 4 5 0 4 7 4 2 0 0 0 0 0 0 0 0 0
+(10 rows)
+
+SELECT to_char(val, 'FMS 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 . 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9') FROM num_data;
+ to_char
+-------------------------------------------------------
+ +0 .
+ +0 .
+ -3 4 3 3 8 4 9 2 . 2 1 5 3 9 7 0 4 7
+ +4 . 3 1
+ +7 7 9 9 4 6 1 . 4 1 1 9
+ +1 6 3 9 7 . 0 3 8 4 9 1
+ +9 3 9 0 1 . 5 7 7 6 3 0 2 6
+ -8 3 0 2 8 4 8 5 .
+ +7 4 8 8 1 .
+ -2 4 9 2 6 8 0 4 . 0 4 5 0 4 7 4 2
+(10 rows)
+
+SELECT to_char(val, E'99999 "text" 9999 "9999" 999 "\\"text between quote marks\\"" 9999') FROM num_data;
+ to_char
+-----------------------------------------------------------
+ text 9999 "text between quote marks" 0
+ text 9999 "text between quote marks" 0
+ text -3 9999 433 "text between quote marks" 8492
+ text 9999 "text between quote marks" 4
+ text 9999 779 "text between quote marks" 9461
+ text 9999 1 "text between quote marks" 6397
+ text 9999 9 "text between quote marks" 3902
+ text -8 9999 302 "text between quote marks" 8485
+ text 9999 7 "text between quote marks" 4881
+ text -2 9999 492 "text between quote marks" 6804
+(10 rows)
+
+SELECT to_char(val, '999999SG9999999999') FROM num_data;
+ to_char
+-------------------
+ + 0
+ + 0
+ - 34338492
+ + 4
+ + 7799461
+ + 16397
+ + 93902
+ - 83028485
+ + 74881
+ - 24926804
+(10 rows)
+
+SELECT to_char(val, 'FM9999999999999999.999999999999999') FROM num_data;
+ to_char
+---------------------
+ 0.
+ 0.
+ -34338492.215397047
+ 4.31
+ 7799461.4119
+ 16397.038491
+ 93901.57763026
+ -83028485.
+ 74881.
+ -24926804.04504742
+(10 rows)
+
+SELECT to_char(val, '9.999EEEE') FROM num_data;
+ to_char
+------------
+ 0.000e+00
+ 0.000e+00
+ -3.434e+07
+ 4.310e+00
+ 7.799e+06
+ 1.640e+04
+ 9.390e+04
+ -8.303e+07
+ 7.488e+04
+ -2.493e+07
+(10 rows)
+
+WITH v(val) AS
+ (VALUES('0'::numeric),('-4.2'),('4.2e9'),('1.2e-5'),('inf'),('-inf'),('nan'))
+SELECT val,
+ to_char(val, '9.999EEEE') as numeric,
+ to_char(val::float8, '9.999EEEE') as float8,
+ to_char(val::float4, '9.999EEEE') as float4
+FROM v;
+ val | numeric | float8 | float4
+------------+------------+------------+------------
+ 0 | 0.000e+00 | 0.000e+00 | 0.000e+00
+ -4.2 | -4.200e+00 | -4.200e+00 | -4.200e+00
+ 4200000000 | 4.200e+09 | 4.200e+09 | 4.200e+09
+ 0.000012 | 1.200e-05 | 1.200e-05 | 1.200e-05
+ Infinity | #.####### | #.####### | #.#######
+ -Infinity | #.####### | #.####### | #.#######
+ NaN | #.####### | #.####### | #.#######
+(7 rows)
+
+WITH v(exp) AS
+ (VALUES(-16379),(-16378),(-1234),(-789),(-45),(-5),(-4),(-3),(-2),(-1),(0),
+ (1),(2),(3),(4),(5),(38),(275),(2345),(45678),(131070),(131071))
+SELECT exp,
+ to_char(('1.2345e'||exp)::numeric, '9.999EEEE') as numeric
+FROM v;
+ exp | numeric
+--------+----------------
+ -16379 | 1.235e-16379
+ -16378 | 1.235e-16378
+ -1234 | 1.235e-1234
+ -789 | 1.235e-789
+ -45 | 1.235e-45
+ -5 | 1.235e-05
+ -4 | 1.235e-04
+ -3 | 1.235e-03
+ -2 | 1.235e-02
+ -1 | 1.235e-01
+ 0 | 1.235e+00
+ 1 | 1.235e+01
+ 2 | 1.235e+02
+ 3 | 1.235e+03
+ 4 | 1.235e+04
+ 5 | 1.235e+05
+ 38 | 1.235e+38
+ 275 | 1.235e+275
+ 2345 | 1.235e+2345
+ 45678 | 1.235e+45678
+ 131070 | 1.235e+131070
+ 131071 | 1.235e+131071
+(22 rows)
+
+WITH v(val) AS
+ (VALUES('0'::numeric),('-4.2'),('4.2e9'),('1.2e-5'),('inf'),('-inf'),('nan'))
+SELECT val,
+ to_char(val, 'MI9999999999.99') as numeric,
+ to_char(val::float8, 'MI9999999999.99') as float8,
+ to_char(val::float4, 'MI9999999999.99') as float4
+FROM v;
+ val | numeric | float8 | float4
+------------+----------------+----------------+----------------
+ 0 | .00 | .00 | .00
+ -4.2 | - 4.20 | - 4.20 | - 4.20
+ 4200000000 | 4200000000.00 | 4200000000.00 | 4200000000
+ 0.000012 | .00 | .00 | .00
+ Infinity | Infinity | Infinity | Infinity
+ -Infinity | - Infinity | - Infinity | - Infinity
+ NaN | NaN | NaN | NaN
+(7 rows)
+
+WITH v(val) AS
+ (VALUES('0'::numeric),('-4.2'),('4.2e9'),('1.2e-5'),('inf'),('-inf'),('nan'))
+SELECT val,
+ to_char(val, 'MI99.99') as numeric,
+ to_char(val::float8, 'MI99.99') as float8,
+ to_char(val::float4, 'MI99.99') as float4
+FROM v;
+ val | numeric | float8 | float4
+------------+---------+--------+--------
+ 0 | .00 | .00 | .00
+ -4.2 | - 4.20 | - 4.20 | - 4.20
+ 4200000000 | ##.## | ##.## | ##.
+ 0.000012 | .00 | .00 | .00
+ Infinity | ##.## | ##.## | ##.
+ -Infinity | -##.## | -##.## | -##.
+ NaN | ##.## | ##.## | ##.##
+(7 rows)
+
+SELECT to_char('100'::numeric, 'FM999.9');
+ to_char
+---------
+ 100.
+(1 row)
+
+SELECT to_char('100'::numeric, 'FM999.');
+ to_char
+---------
+ 100
+(1 row)
+
+SELECT to_char('100'::numeric, 'FM999');
+ to_char
+---------
+ 100
+(1 row)
+
+-- Check parsing of literal text in a format string
+SELECT to_char('100'::numeric, 'foo999');
+ to_char
+---------
+ foo 100
+(1 row)
+
+SELECT to_char('100'::numeric, 'f\oo999');
+ to_char
+----------
+ f\oo 100
+(1 row)
+
+SELECT to_char('100'::numeric, 'f\\oo999');
+ to_char
+-----------
+ f\\oo 100
+(1 row)
+
+SELECT to_char('100'::numeric, 'f\"oo999');
+ to_char
+----------
+ f"oo 100
+(1 row)
+
+SELECT to_char('100'::numeric, 'f\\"oo999');
+ to_char
+-----------
+ f\"oo 100
+(1 row)
+
+SELECT to_char('100'::numeric, 'f"ool"999');
+ to_char
+----------
+ fool 100
+(1 row)
+
+SELECT to_char('100'::numeric, 'f"\ool"999');
+ to_char
+----------
+ fool 100
+(1 row)
+
+SELECT to_char('100'::numeric, 'f"\\ool"999');
+ to_char
+-----------
+ f\ool 100
+(1 row)
+
+SELECT to_char('100'::numeric, 'f"ool\"999');
+ to_char
+----------
+ fool"999
+(1 row)
+
+SELECT to_char('100'::numeric, 'f"ool\\"999');
+ to_char
+-----------
+ fool\ 100
+(1 row)
+
+-- TO_NUMBER()
+--
+SET lc_numeric = 'C';
+SELECT to_number('-34,338,492', '99G999G999');
+ to_number
+-----------
+ -34338492
+(1 row)
+
+SELECT to_number('-34,338,492.654,878', '99G999G999D999G999');
+ to_number
+------------------
+ -34338492.654878
+(1 row)
+
+SELECT to_number('<564646.654564>', '999999.999999PR');
+ to_number
+----------------
+ -564646.654564
+(1 row)
+
+SELECT to_number('0.00001-', '9.999999S');
+ to_number
+-----------
+ -0.00001
+(1 row)
+
+SELECT to_number('5.01-', 'FM9.999999S');
+ to_number
+-----------
+ -5.01
+(1 row)
+
+SELECT to_number('5.01-', 'FM9.999999MI');
+ to_number
+-----------
+ -5.01
+(1 row)
+
+SELECT to_number('5 4 4 4 4 8 . 7 8', '9 9 9 9 9 9 . 9 9');
+ to_number
+-----------
+ 544448.78
+(1 row)
+
+SELECT to_number('.01', 'FM9.99');
+ to_number
+-----------
+ 0.01
+(1 row)
+
+SELECT to_number('.0', '99999999.99999999');
+ to_number
+-----------
+ 0.0
+(1 row)
+
+SELECT to_number('0', '99.99');
+ to_number
+-----------
+ 0
+(1 row)
+
+SELECT to_number('.-01', 'S99.99');
+ to_number
+-----------
+ -0.01
+(1 row)
+
+SELECT to_number('.01-', '99.99S');
+ to_number
+-----------
+ -0.01
+(1 row)
+
+SELECT to_number(' . 0 1-', ' 9 9 . 9 9 S');
+ to_number
+-----------
+ -0.01
+(1 row)
+
+SELECT to_number('34,50','999,99');
+ to_number
+-----------
+ 3450
+(1 row)
+
+SELECT to_number('123,000','999G');
+ to_number
+-----------
+ 123
+(1 row)
+
+SELECT to_number('123456','999G999');
+ to_number
+-----------
+ 123456
+(1 row)
+
+SELECT to_number('$1234.56','L9,999.99');
+ to_number
+-----------
+ 1234.56
+(1 row)
+
+SELECT to_number('$1234.56','L99,999.99');
+ to_number
+-----------
+ 1234.56
+(1 row)
+
+SELECT to_number('$1,234.56','L99,999.99');
+ to_number
+-----------
+ 1234.56
+(1 row)
+
+SELECT to_number('1234.56','L99,999.99');
+ to_number
+-----------
+ 1234.56
+(1 row)
+
+SELECT to_number('1,234.56','L99,999.99');
+ to_number
+-----------
+ 1234.56
+(1 row)
+
+SELECT to_number('42nd', '99th');
+ to_number
+-----------
+ 42
+(1 row)
+
+RESET lc_numeric;
+--
+-- Input syntax
+--
+CREATE TABLE num_input_test (n1 numeric);
+-- good inputs
+INSERT INTO num_input_test(n1) VALUES (' 123');
+INSERT INTO num_input_test(n1) VALUES (' 3245874 ');
+INSERT INTO num_input_test(n1) VALUES (' -93853');
+INSERT INTO num_input_test(n1) VALUES ('555.50');
+INSERT INTO num_input_test(n1) VALUES ('-555.50');
+INSERT INTO num_input_test(n1) VALUES ('NaN ');
+INSERT INTO num_input_test(n1) VALUES (' nan');
+INSERT INTO num_input_test(n1) VALUES (' inf ');
+INSERT INTO num_input_test(n1) VALUES (' +inf ');
+INSERT INTO num_input_test(n1) VALUES (' -inf ');
+INSERT INTO num_input_test(n1) VALUES (' Infinity ');
+INSERT INTO num_input_test(n1) VALUES (' +inFinity ');
+INSERT INTO num_input_test(n1) VALUES (' -INFINITY ');
+-- bad inputs
+INSERT INTO num_input_test(n1) VALUES (' ');
+ERROR: invalid input syntax for type numeric: " "
+LINE 1: INSERT INTO num_input_test(n1) VALUES (' ');
+ ^
+INSERT INTO num_input_test(n1) VALUES (' 1234 %');
+ERROR: invalid input syntax for type numeric: " 1234 %"
+LINE 1: INSERT INTO num_input_test(n1) VALUES (' 1234 %');
+ ^
+INSERT INTO num_input_test(n1) VALUES ('xyz');
+ERROR: invalid input syntax for type numeric: "xyz"
+LINE 1: INSERT INTO num_input_test(n1) VALUES ('xyz');
+ ^
+INSERT INTO num_input_test(n1) VALUES ('- 1234');
+ERROR: invalid input syntax for type numeric: "- 1234"
+LINE 1: INSERT INTO num_input_test(n1) VALUES ('- 1234');
+ ^
+INSERT INTO num_input_test(n1) VALUES ('5 . 0');
+ERROR: invalid input syntax for type numeric: "5 . 0"
+LINE 1: INSERT INTO num_input_test(n1) VALUES ('5 . 0');
+ ^
+INSERT INTO num_input_test(n1) VALUES ('5. 0 ');
+ERROR: invalid input syntax for type numeric: "5. 0 "
+LINE 1: INSERT INTO num_input_test(n1) VALUES ('5. 0 ');
+ ^
+INSERT INTO num_input_test(n1) VALUES ('');
+ERROR: invalid input syntax for type numeric: ""
+LINE 1: INSERT INTO num_input_test(n1) VALUES ('');
+ ^
+INSERT INTO num_input_test(n1) VALUES (' N aN ');
+ERROR: invalid input syntax for type numeric: " N aN "
+LINE 1: INSERT INTO num_input_test(n1) VALUES (' N aN ');
+ ^
+INSERT INTO num_input_test(n1) VALUES ('+ infinity');
+ERROR: invalid input syntax for type numeric: "+ infinity"
+LINE 1: INSERT INTO num_input_test(n1) VALUES ('+ infinity');
+ ^
+SELECT * FROM num_input_test;
+ n1
+-----------
+ 123
+ 3245874
+ -93853
+ 555.50
+ -555.50
+ NaN
+ NaN
+ Infinity
+ Infinity
+ -Infinity
+ Infinity
+ Infinity
+ -Infinity
+(13 rows)
+
+--
+-- Test some corner cases for multiplication
+--
+select 4790999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999;
+ ?column?
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ 47909999999999999999999999999999999999999999999999999999999999999999999999999999999999985209000000000000000000000000000000000000000000000000000000000000000000000000000000000001
+(1 row)
+
+select 4789999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999;
+ ?column?
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ 47899999999999999999999999999999999999999999999999999999999999999999999999999999999999985210000000000000000000000000000000000000000000000000000000000000000000000000000000000001
+(1 row)
+
+select 4770999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999;
+ ?column?
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ 47709999999999999999999999999999999999999999999999999999999999999999999999999999999999985229000000000000000000000000000000000000000000000000000000000000000000000000000000000001
+(1 row)
+
+select 4769999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999;
+ ?column?
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ 47699999999999999999999999999999999999999999999999999999999999999999999999999999999999985230000000000000000000000000000000000000000000000000000000000000000000000000000000000001
+(1 row)
+
+select trim_scale((0.1 - 2e-16383) * (0.1 - 3e-16383));
+ trim_scale
+------------
+ 0.01
+(1 row)
+
+--
+-- Test some corner cases for division
+--
+select 999999999999999999999::numeric/1000000000000000000000;
+ ?column?
+------------------------
+ 1.00000000000000000000
+(1 row)
+
+select div(999999999999999999999::numeric,1000000000000000000000);
+ div
+-----
+ 0
+(1 row)
+
+select mod(999999999999999999999::numeric,1000000000000000000000);
+ mod
+-----------------------
+ 999999999999999999999
+(1 row)
+
+select div(-9999999999999999999999::numeric,1000000000000000000000);
+ div
+-----
+ -9
+(1 row)
+
+select mod(-9999999999999999999999::numeric,1000000000000000000000);
+ mod
+------------------------
+ -999999999999999999999
+(1 row)
+
+select div(-9999999999999999999999::numeric,1000000000000000000000)*1000000000000000000000 + mod(-9999999999999999999999::numeric,1000000000000000000000);
+ ?column?
+-------------------------
+ -9999999999999999999999
+(1 row)
+
+select mod (70.0,70) ;
+ mod
+-----
+ 0.0
+(1 row)
+
+select div (70.0,70) ;
+ div
+-----
+ 1
+(1 row)
+
+select 70.0 / 70 ;
+ ?column?
+------------------------
+ 1.00000000000000000000
+(1 row)
+
+select 12345678901234567890 % 123;
+ ?column?
+----------
+ 78
+(1 row)
+
+select 12345678901234567890 / 123;
+ ?column?
+--------------------
+ 100371373180768845
+(1 row)
+
+select div(12345678901234567890, 123);
+ div
+--------------------
+ 100371373180768844
+(1 row)
+
+select div(12345678901234567890, 123) * 123 + 12345678901234567890 % 123;
+ ?column?
+----------------------
+ 12345678901234567890
+(1 row)
+
+--
+-- Test some corner cases for square root
+--
+select sqrt(1.000000000000003::numeric);
+ sqrt
+-------------------
+ 1.000000000000001
+(1 row)
+
+select sqrt(1.000000000000004::numeric);
+ sqrt
+-------------------
+ 1.000000000000002
+(1 row)
+
+select sqrt(96627521408608.56340355805::numeric);
+ sqrt
+---------------------
+ 9829929.87811248648
+(1 row)
+
+select sqrt(96627521408608.56340355806::numeric);
+ sqrt
+---------------------
+ 9829929.87811248649
+(1 row)
+
+select sqrt(515549506212297735.073688290367::numeric);
+ sqrt
+------------------------
+ 718017761.766585921184
+(1 row)
+
+select sqrt(515549506212297735.073688290368::numeric);
+ sqrt
+------------------------
+ 718017761.766585921185
+(1 row)
+
+select sqrt(8015491789940783531003294973900306::numeric);
+ sqrt
+-------------------
+ 89529278953540017
+(1 row)
+
+select sqrt(8015491789940783531003294973900307::numeric);
+ sqrt
+-------------------
+ 89529278953540018
+(1 row)
+
+--
+-- Test code path for raising to integer powers
+--
+select 10.0 ^ -2147483648 as rounds_to_zero;
+ rounds_to_zero
+--------------------
+ 0.0000000000000000
+(1 row)
+
+select 10.0 ^ -2147483647 as rounds_to_zero;
+ rounds_to_zero
+--------------------
+ 0.0000000000000000
+(1 row)
+
+select 10.0 ^ 2147483647 as overflows;
+ERROR: value overflows numeric format
+select 117743296169.0 ^ 1000000000 as overflows;
+ERROR: value overflows numeric format
+-- cases that used to return inaccurate results
+select 3.789 ^ 21;
+ ?column?
+--------------------------------
+ 1409343026052.8716016316022141
+(1 row)
+
+select 3.789 ^ 35;
+ ?column?
+----------------------------------------
+ 177158169650516670809.3820586142670135
+(1 row)
+
+select 1.2 ^ 345;
+ ?column?
+-----------------------------------------------
+ 2077446682327378559843444695.5827049735727869
+(1 row)
+
+select 0.12 ^ (-20);
+ ?column?
+--------------------------------------
+ 2608405330458882702.5529619561355838
+(1 row)
+
+select 1.000000000123 ^ (-2147483648);
+ ?column?
+--------------------
+ 0.7678656556403084
+(1 row)
+
+select coalesce(nullif(0.9999999999 ^ 23300000000000, 0), 0) as rounds_to_zero;
+ rounds_to_zero
+----------------
+ 0
+(1 row)
+
+select round(((1 - 1.500012345678e-1000) ^ 1.45e1003) * 1e1000);
+ round
+----------------------------------------------------------
+ 25218976308958387188077465658068501556514992509509282366
+(1 row)
+
+-- cases that used to error out
+select 0.12 ^ (-25);
+ ?column?
+-------------------------------------------
+ 104825960103961013959336.4983657883169110
+(1 row)
+
+select 0.5678 ^ (-85);
+ ?column?
+----------------------------------------
+ 782333637740774446257.7719390061997396
+(1 row)
+
+select coalesce(nullif(0.9999999999 ^ 70000000000000, 0), 0) as underflows;
+ underflows
+------------
+ 0
+(1 row)
+
+-- negative base to integer powers
+select (-1.0) ^ 2147483646;
+ ?column?
+--------------------
+ 1.0000000000000000
+(1 row)
+
+select (-1.0) ^ 2147483647;
+ ?column?
+---------------------
+ -1.0000000000000000
+(1 row)
+
+select (-1.0) ^ 2147483648;
+ ?column?
+--------------------
+ 1.0000000000000000
+(1 row)
+
+select (-1.0) ^ 1000000000000000;
+ ?column?
+--------------------
+ 1.0000000000000000
+(1 row)
+
+select (-1.0) ^ 1000000000000001;
+ ?column?
+---------------------
+ -1.0000000000000000
+(1 row)
+
+--
+-- Tests for raising to non-integer powers
+--
+-- special cases
+select 0.0 ^ 0.0;
+ ?column?
+--------------------
+ 1.0000000000000000
+(1 row)
+
+select (-12.34) ^ 0.0;
+ ?column?
+--------------------
+ 1.0000000000000000
+(1 row)
+
+select 12.34 ^ 0.0;
+ ?column?
+--------------------
+ 1.0000000000000000
+(1 row)
+
+select 0.0 ^ 12.34;
+ ?column?
+--------------------
+ 0.0000000000000000
+(1 row)
+
+-- NaNs
+select 'NaN'::numeric ^ 'NaN'::numeric;
+ ?column?
+----------
+ NaN
+(1 row)
+
+select 'NaN'::numeric ^ 0;
+ ?column?
+----------
+ 1
+(1 row)
+
+select 'NaN'::numeric ^ 1;
+ ?column?
+----------
+ NaN
+(1 row)
+
+select 0 ^ 'NaN'::numeric;
+ ?column?
+----------
+ NaN
+(1 row)
+
+select 1 ^ 'NaN'::numeric;
+ ?column?
+----------
+ 1
+(1 row)
+
+-- invalid inputs
+select 0.0 ^ (-12.34);
+ERROR: zero raised to a negative power is undefined
+select (-12.34) ^ 1.2;
+ERROR: a negative number raised to a non-integer power yields a complex result
+-- cases that used to generate inaccurate results
+select 32.1 ^ 9.8;
+ ?column?
+--------------------
+ 580429286790711.10
+(1 row)
+
+select 32.1 ^ (-9.8);
+ ?column?
+----------------------------------
+ 0.000000000000001722862754788209
+(1 row)
+
+select 12.3 ^ 45.6;
+ ?column?
+------------------------------------------------------
+ 50081010321492803393171165777624533697036806969694.9
+(1 row)
+
+select 12.3 ^ (-45.6);
+ ?column?
+---------------------------------------------------------------------
+ 0.00000000000000000000000000000000000000000000000001996764828785491
+(1 row)
+
+-- big test
+select 1.234 ^ 5678;
+ ?column?
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ 307239295662090741644584872593956173493568238595074141254349565406661439636598896798876823220904084953233015553994854875890890858118656468658643918169805277399402542281777901029346337707622181574346585989613344285010764501017625366742865066948856161360224801370482171458030533346309750557140549621313515752078638620714732831815297168231790779296290266207315344008883935010274044001522606235576584215999260117523114297033944018699691024106823438431754073086813382242140602291215149759520833200152654884259619588924545324.5973362312547382
+(1 row)
+
+--
+-- Tests for EXP()
+--
+-- special cases
+select exp(0.0);
+ exp
+--------------------
+ 1.0000000000000000
+(1 row)
+
+select exp(1.0);
+ exp
+--------------------
+ 2.7182818284590452
+(1 row)
+
+select exp(1.0::numeric(71,70));
+ exp
+--------------------------------------------------------------------------
+ 2.7182818284590452353602874713526624977572470936999595749669676277240766
+(1 row)
+
+select exp('nan'::numeric);
+ exp
+-----
+ NaN
+(1 row)
+
+select exp('inf'::numeric);
+ exp
+----------
+ Infinity
+(1 row)
+
+select exp('-inf'::numeric);
+ exp
+-----
+ 0
+(1 row)
+
+select coalesce(nullif(exp(-5000::numeric), 0), 0) as rounds_to_zero;
+ rounds_to_zero
+----------------
+ 0
+(1 row)
+
+select coalesce(nullif(exp(-10000::numeric), 0), 0) as underflows;
+ underflows
+------------
+ 0
+(1 row)
+
+-- cases that used to generate inaccurate results
+select exp(32.999);
+ exp
+---------------------
+ 214429043492155.053
+(1 row)
+
+select exp(-32.999);
+ exp
+----------------------------------
+ 0.000000000000004663547361468248
+(1 row)
+
+select exp(123.456);
+ exp
+------------------------------------------------------------
+ 413294435277809344957685441227343146614594393746575438.725
+(1 row)
+
+select exp(-123.456);
+ exp
+-------------------------------------------------------------------------
+ 0.000000000000000000000000000000000000000000000000000002419582541264601
+(1 row)
+
+-- big test
+select exp(1234.5678);
+ exp
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ 146549072930959479983482138503979804217622199675223653966270157446954995433819741094410764947112047906012815540251009949604426069672532417736057033099274204598385314594846509975629046864798765888104789074984927709616261452461385220475510438783429612447831614003668421849727379202555580791042606170523016207262965336641214601082882495255771621327088265411334088968112458492660609809762865582162764292604697957813514621259353683899630997077707406305730694385703091201347848855199354307506425820147289848677003277208302716466011827836279231.9667
+(1 row)
+
+--
+-- Tests for generate_series
+--
+select * from generate_series(0.0::numeric, 4.0::numeric);
+ generate_series
+-----------------
+ 0.0
+ 1.0
+ 2.0
+ 3.0
+ 4.0
+(5 rows)
+
+select * from generate_series(0.1::numeric, 4.0::numeric, 1.3::numeric);
+ generate_series
+-----------------
+ 0.1
+ 1.4
+ 2.7
+ 4.0
+(4 rows)
+
+select * from generate_series(4.0::numeric, -1.5::numeric, -2.2::numeric);
+ generate_series
+-----------------
+ 4.0
+ 1.8
+ -0.4
+(3 rows)
+
+-- Trigger errors
+select * from generate_series(-100::numeric, 100::numeric, 0::numeric);
+ERROR: step size cannot equal zero
+select * from generate_series(-100::numeric, 100::numeric, 'nan'::numeric);
+ERROR: step size cannot be NaN
+select * from generate_series('nan'::numeric, 100::numeric, 10::numeric);
+ERROR: start value cannot be NaN
+select * from generate_series(0::numeric, 'nan'::numeric, 10::numeric);
+ERROR: stop value cannot be NaN
+select * from generate_series('inf'::numeric, 'inf'::numeric, 10::numeric);
+ERROR: start value cannot be infinity
+select * from generate_series(0::numeric, 'inf'::numeric, 10::numeric);
+ERROR: stop value cannot be infinity
+select * from generate_series(0::numeric, '42'::numeric, '-inf'::numeric);
+ERROR: step size cannot be infinity
+-- Checks maximum, output is truncated
+select (i / (10::numeric ^ 131071))::numeric(1,0)
+ from generate_series(6 * (10::numeric ^ 131071),
+ 9 * (10::numeric ^ 131071),
+ 10::numeric ^ 131071) as a(i);
+ numeric
+---------
+ 6
+ 7
+ 8
+ 9
+(4 rows)
+
+-- Check usage with variables
+select * from generate_series(1::numeric, 3::numeric) i, generate_series(i,3) j;
+ i | j
+---+---
+ 1 | 1
+ 1 | 2
+ 1 | 3
+ 2 | 2
+ 2 | 3
+ 3 | 3
+(6 rows)
+
+select * from generate_series(1::numeric, 3::numeric) i, generate_series(1,i) j;
+ i | j
+---+---
+ 1 | 1
+ 2 | 1
+ 2 | 2
+ 3 | 1
+ 3 | 2
+ 3 | 3
+(6 rows)
+
+select * from generate_series(1::numeric, 3::numeric) i, generate_series(1,5,i) j;
+ i | j
+---+---
+ 1 | 1
+ 1 | 2
+ 1 | 3
+ 1 | 4
+ 1 | 5
+ 2 | 1
+ 2 | 3
+ 2 | 5
+ 3 | 1
+ 3 | 4
+(10 rows)
+
+--
+-- Tests for LN()
+--
+-- Invalid inputs
+select ln(-12.34);
+ERROR: cannot take logarithm of a negative number
+select ln(0.0);
+ERROR: cannot take logarithm of zero
+-- Some random tests
+select ln(1.2345678e-28);
+ ln
+-----------------------------------------
+ -64.26166165451762991204894255882820859
+(1 row)
+
+select ln(0.0456789);
+ ln
+---------------------
+ -3.0861187944847439
+(1 row)
+
+select ln(0.349873948359354029493948309745709580730482050975);
+ ln
+-----------------------------------------------------
+ -1.050182336912082775693991697979750253056317885460
+(1 row)
+
+select ln(0.99949452);
+ ln
+-------------------------
+ -0.00050560779808326467
+(1 row)
+
+select ln(1.00049687395);
+ ln
+------------------------
+ 0.00049675054901370394
+(1 row)
+
+select ln(1234.567890123456789);
+ ln
+--------------------
+ 7.1184763012977896
+(1 row)
+
+select ln(5.80397490724e5);
+ ln
+--------------------
+ 13.271468476626518
+(1 row)
+
+select ln(9.342536355e34);
+ ln
+--------------------
+ 80.522470935524187
+(1 row)
+
+--
+-- Tests for LOG() (base 10)
+--
+-- invalid inputs
+select log(-12.34);
+ERROR: cannot take logarithm of a negative number
+CONTEXT: SQL function "log" statement 1
+select log(0.0);
+ERROR: cannot take logarithm of zero
+CONTEXT: SQL function "log" statement 1
+-- some random tests
+select log(1.234567e-89);
+ log
+-----------------------------------------------------------------------------------------------------
+ -88.90848533591373725637496492944925187293052336306443143312825869985819779294142441287021741054275
+(1 row)
+
+select log(3.4634998359873254962349856073435545);
+ log
+--------------------------------------
+ 0.5395151714070134409152404011959981
+(1 row)
+
+select log(9.999999999999999999);
+ log
+----------------------
+ 1.000000000000000000
+(1 row)
+
+select log(10.00000000000000000);
+ log
+---------------------
+ 1.00000000000000000
+(1 row)
+
+select log(10.00000000000000001);
+ log
+---------------------
+ 1.00000000000000000
+(1 row)
+
+select log(590489.45235237);
+ log
+-------------------
+ 5.771212144411727
+(1 row)
+
+--
+-- Tests for LOG() (arbitrary base)
+--
+-- invalid inputs
+select log(-12.34, 56.78);
+ERROR: cannot take logarithm of a negative number
+select log(-12.34, -56.78);
+ERROR: cannot take logarithm of a negative number
+select log(12.34, -56.78);
+ERROR: cannot take logarithm of a negative number
+select log(0.0, 12.34);
+ERROR: cannot take logarithm of zero
+select log(12.34, 0.0);
+ERROR: cannot take logarithm of zero
+select log(1.0, 12.34);
+ERROR: division by zero
+-- some random tests
+select log(1.23e-89, 6.4689e45);
+ log
+------------------------------------------------------------------------------------------------
+ -0.5152489207781856983977054971756484879653568168479201885425588841094788842469115325262329756
+(1 row)
+
+select log(0.99923, 4.58934e34);
+ log
+---------------------
+ -103611.55579544132
+(1 row)
+
+select log(1.000016, 8.452010e18);
+ log
+--------------------
+ 2723830.2877097365
+(1 row)
+
+select log(3.1954752e47, 9.4792021e-73);
+ log
+-------------------------------------------------------------------------------------
+ -1.51613372350688302142917386143459361608600157692779164475351842333265418126982165
+(1 row)
+
+--
+-- Tests for scale()
+--
+select scale(numeric 'NaN');
+ scale
+-------
+
+(1 row)
+
+select scale(numeric 'inf');
+ scale
+-------
+
+(1 row)
+
+select scale(NULL::numeric);
+ scale
+-------
+
+(1 row)
+
+select scale(1.12);
+ scale
+-------
+ 2
+(1 row)
+
+select scale(0);
+ scale
+-------
+ 0
+(1 row)
+
+select scale(0.00);
+ scale
+-------
+ 2
+(1 row)
+
+select scale(1.12345);
+ scale
+-------
+ 5
+(1 row)
+
+select scale(110123.12475871856128);
+ scale
+-------
+ 14
+(1 row)
+
+select scale(-1123.12471856128);
+ scale
+-------
+ 11
+(1 row)
+
+select scale(-13.000000000000000);
+ scale
+-------
+ 15
+(1 row)
+
+--
+-- Tests for min_scale()
+--
+select min_scale(numeric 'NaN') is NULL; -- should be true
+ ?column?
+----------
+ t
+(1 row)
+
+select min_scale(numeric 'inf') is NULL; -- should be true
+ ?column?
+----------
+ t
+(1 row)
+
+select min_scale(0); -- no digits
+ min_scale
+-----------
+ 0
+(1 row)
+
+select min_scale(0.00); -- no digits again
+ min_scale
+-----------
+ 0
+(1 row)
+
+select min_scale(1.0); -- no scale
+ min_scale
+-----------
+ 0
+(1 row)
+
+select min_scale(1.1); -- scale 1
+ min_scale
+-----------
+ 1
+(1 row)
+
+select min_scale(1.12); -- scale 2
+ min_scale
+-----------
+ 2
+(1 row)
+
+select min_scale(1.123); -- scale 3
+ min_scale
+-----------
+ 3
+(1 row)
+
+select min_scale(1.1234); -- scale 4, filled digit
+ min_scale
+-----------
+ 4
+(1 row)
+
+select min_scale(1.12345); -- scale 5, 2 NDIGITS
+ min_scale
+-----------
+ 5
+(1 row)
+
+select min_scale(1.1000); -- 1 pos in NDIGITS
+ min_scale
+-----------
+ 1
+(1 row)
+
+select min_scale(1e100); -- very big number
+ min_scale
+-----------
+ 0
+(1 row)
+
+--
+-- Tests for trim_scale()
+--
+select trim_scale(numeric 'NaN');
+ trim_scale
+------------
+ NaN
+(1 row)
+
+select trim_scale(numeric 'inf');
+ trim_scale
+------------
+ Infinity
+(1 row)
+
+select trim_scale(1.120);
+ trim_scale
+------------
+ 1.12
+(1 row)
+
+select trim_scale(0);
+ trim_scale
+------------
+ 0
+(1 row)
+
+select trim_scale(0.00);
+ trim_scale
+------------
+ 0
+(1 row)
+
+select trim_scale(1.1234500);
+ trim_scale
+------------
+ 1.12345
+(1 row)
+
+select trim_scale(110123.12475871856128000);
+ trim_scale
+-----------------------
+ 110123.12475871856128
+(1 row)
+
+select trim_scale(-1123.124718561280000000);
+ trim_scale
+-------------------
+ -1123.12471856128
+(1 row)
+
+select trim_scale(-13.00000000000000000000);
+ trim_scale
+------------
+ -13
+(1 row)
+
+select trim_scale(1e100);
+ trim_scale
+-------------------------------------------------------------------------------------------------------
+ 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
+(1 row)
+
+--
+-- Tests for SUM()
+--
+-- cases that need carry propagation
+SELECT SUM(9999::numeric) FROM generate_series(1, 100000);
+ sum
+-----------
+ 999900000
+(1 row)
+
+SELECT SUM((-9999)::numeric) FROM generate_series(1, 100000);
+ sum
+------------
+ -999900000
+(1 row)
+
+--
+-- Tests for GCD()
+--
+SELECT a, b, gcd(a, b), gcd(a, -b), gcd(-b, a), gcd(-b, -a)
+FROM (VALUES (0::numeric, 0::numeric),
+ (0::numeric, numeric 'NaN'),
+ (0::numeric, 46375::numeric),
+ (433125::numeric, 46375::numeric),
+ (43312.5::numeric, 4637.5::numeric),
+ (4331.250::numeric, 463.75000::numeric),
+ ('inf', '0'),
+ ('inf', '42'),
+ ('inf', 'inf')
+ ) AS v(a, b);
+ a | b | gcd | gcd | gcd | gcd
+----------+-----------+---------+---------+---------+---------
+ 0 | 0 | 0 | 0 | 0 | 0
+ 0 | NaN | NaN | NaN | NaN | NaN
+ 0 | 46375 | 46375 | 46375 | 46375 | 46375
+ 433125 | 46375 | 875 | 875 | 875 | 875
+ 43312.5 | 4637.5 | 87.5 | 87.5 | 87.5 | 87.5
+ 4331.250 | 463.75000 | 8.75000 | 8.75000 | 8.75000 | 8.75000
+ Infinity | 0 | NaN | NaN | NaN | NaN
+ Infinity | 42 | NaN | NaN | NaN | NaN
+ Infinity | Infinity | NaN | NaN | NaN | NaN
+(9 rows)
+
+--
+-- Tests for LCM()
+--
+SELECT a,b, lcm(a, b), lcm(a, -b), lcm(-b, a), lcm(-b, -a)
+FROM (VALUES (0::numeric, 0::numeric),
+ (0::numeric, numeric 'NaN'),
+ (0::numeric, 13272::numeric),
+ (13272::numeric, 13272::numeric),
+ (423282::numeric, 13272::numeric),
+ (42328.2::numeric, 1327.2::numeric),
+ (4232.820::numeric, 132.72000::numeric),
+ ('inf', '0'),
+ ('inf', '42'),
+ ('inf', 'inf')
+ ) AS v(a, b);
+ a | b | lcm | lcm | lcm | lcm
+----------+-----------+--------------+--------------+--------------+--------------
+ 0 | 0 | 0 | 0 | 0 | 0
+ 0 | NaN | NaN | NaN | NaN | NaN
+ 0 | 13272 | 0 | 0 | 0 | 0
+ 13272 | 13272 | 13272 | 13272 | 13272 | 13272
+ 423282 | 13272 | 11851896 | 11851896 | 11851896 | 11851896
+ 42328.2 | 1327.2 | 1185189.6 | 1185189.6 | 1185189.6 | 1185189.6
+ 4232.820 | 132.72000 | 118518.96000 | 118518.96000 | 118518.96000 | 118518.96000
+ Infinity | 0 | NaN | NaN | NaN | NaN
+ Infinity | 42 | NaN | NaN | NaN | NaN
+ Infinity | Infinity | NaN | NaN | NaN | NaN
+(10 rows)
+
+SELECT lcm(9999 * (10::numeric)^131068 + (10::numeric^131068 - 1), 2); -- overflow
+ERROR: value overflows numeric format
+--
+-- Tests for factorial
+--
+SELECT factorial(4);
+ factorial
+-----------
+ 24
+(1 row)
+
+SELECT factorial(15);
+ factorial
+---------------
+ 1307674368000
+(1 row)
+
+SELECT factorial(100000);
+ERROR: value overflows numeric format
+SELECT factorial(0);
+ factorial
+-----------
+ 1
+(1 row)
+
+SELECT factorial(-4);
+ERROR: factorial of a negative number is undefined
+--
+-- Tests for pg_lsn()
+--
+SELECT pg_lsn(23783416::numeric);
+ pg_lsn
+-----------
+ 0/16AE7F8
+(1 row)
+
+SELECT pg_lsn(0::numeric);
+ pg_lsn
+--------
+ 0/0
+(1 row)
+
+SELECT pg_lsn(18446744073709551615::numeric);
+ pg_lsn
+-------------------
+ FFFFFFFF/FFFFFFFF
+(1 row)
+
+SELECT pg_lsn(-1::numeric);
+ERROR: pg_lsn out of range
+SELECT pg_lsn(18446744073709551616::numeric);
+ERROR: pg_lsn out of range
+SELECT pg_lsn('NaN'::numeric);
+ERROR: cannot convert NaN to pg_lsn
diff --git a/ydb/library/yql/tests/postgresql/original/cases/numeric.sql b/ydb/library/yql/tests/postgresql/original/cases/numeric.sql
new file mode 100644
index 0000000000..9233c666d4
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/numeric.sql
@@ -0,0 +1,1365 @@
+--
+-- NUMERIC
+--
+
+CREATE TABLE num_data (id int4, val numeric(210,10));
+CREATE TABLE num_exp_add (id1 int4, id2 int4, expected numeric(210,10));
+CREATE TABLE num_exp_sub (id1 int4, id2 int4, expected numeric(210,10));
+CREATE TABLE num_exp_div (id1 int4, id2 int4, expected numeric(210,10));
+CREATE TABLE num_exp_mul (id1 int4, id2 int4, expected numeric(210,10));
+CREATE TABLE num_exp_sqrt (id int4, expected numeric(210,10));
+CREATE TABLE num_exp_ln (id int4, expected numeric(210,10));
+CREATE TABLE num_exp_log10 (id int4, expected numeric(210,10));
+CREATE TABLE num_exp_power_10_ln (id int4, expected numeric(210,10));
+
+CREATE TABLE num_result (id1 int4, id2 int4, result numeric(210,10));
+
+
+-- ******************************
+-- * The following EXPECTED results are computed by bc(1)
+-- * with a scale of 200
+-- ******************************
+
+BEGIN TRANSACTION;
+INSERT INTO num_exp_add VALUES (0,0,'0');
+INSERT INTO num_exp_sub VALUES (0,0,'0');
+INSERT INTO num_exp_mul VALUES (0,0,'0');
+INSERT INTO num_exp_div VALUES (0,0,'NaN');
+INSERT INTO num_exp_add VALUES (0,1,'0');
+INSERT INTO num_exp_sub VALUES (0,1,'0');
+INSERT INTO num_exp_mul VALUES (0,1,'0');
+INSERT INTO num_exp_div VALUES (0,1,'NaN');
+INSERT INTO num_exp_add VALUES (0,2,'-34338492.215397047');
+INSERT INTO num_exp_sub VALUES (0,2,'34338492.215397047');
+INSERT INTO num_exp_mul VALUES (0,2,'0');
+INSERT INTO num_exp_div VALUES (0,2,'0');
+INSERT INTO num_exp_add VALUES (0,3,'4.31');
+INSERT INTO num_exp_sub VALUES (0,3,'-4.31');
+INSERT INTO num_exp_mul VALUES (0,3,'0');
+INSERT INTO num_exp_div VALUES (0,3,'0');
+INSERT INTO num_exp_add VALUES (0,4,'7799461.4119');
+INSERT INTO num_exp_sub VALUES (0,4,'-7799461.4119');
+INSERT INTO num_exp_mul VALUES (0,4,'0');
+INSERT INTO num_exp_div VALUES (0,4,'0');
+INSERT INTO num_exp_add VALUES (0,5,'16397.038491');
+INSERT INTO num_exp_sub VALUES (0,5,'-16397.038491');
+INSERT INTO num_exp_mul VALUES (0,5,'0');
+INSERT INTO num_exp_div VALUES (0,5,'0');
+INSERT INTO num_exp_add VALUES (0,6,'93901.57763026');
+INSERT INTO num_exp_sub VALUES (0,6,'-93901.57763026');
+INSERT INTO num_exp_mul VALUES (0,6,'0');
+INSERT INTO num_exp_div VALUES (0,6,'0');
+INSERT INTO num_exp_add VALUES (0,7,'-83028485');
+INSERT INTO num_exp_sub VALUES (0,7,'83028485');
+INSERT INTO num_exp_mul VALUES (0,7,'0');
+INSERT INTO num_exp_div VALUES (0,7,'0');
+INSERT INTO num_exp_add VALUES (0,8,'74881');
+INSERT INTO num_exp_sub VALUES (0,8,'-74881');
+INSERT INTO num_exp_mul VALUES (0,8,'0');
+INSERT INTO num_exp_div VALUES (0,8,'0');
+INSERT INTO num_exp_add VALUES (0,9,'-24926804.045047420');
+INSERT INTO num_exp_sub VALUES (0,9,'24926804.045047420');
+INSERT INTO num_exp_mul VALUES (0,9,'0');
+INSERT INTO num_exp_div VALUES (0,9,'0');
+INSERT INTO num_exp_add VALUES (1,0,'0');
+INSERT INTO num_exp_sub VALUES (1,0,'0');
+INSERT INTO num_exp_mul VALUES (1,0,'0');
+INSERT INTO num_exp_div VALUES (1,0,'NaN');
+INSERT INTO num_exp_add VALUES (1,1,'0');
+INSERT INTO num_exp_sub VALUES (1,1,'0');
+INSERT INTO num_exp_mul VALUES (1,1,'0');
+INSERT INTO num_exp_div VALUES (1,1,'NaN');
+INSERT INTO num_exp_add VALUES (1,2,'-34338492.215397047');
+INSERT INTO num_exp_sub VALUES (1,2,'34338492.215397047');
+INSERT INTO num_exp_mul VALUES (1,2,'0');
+INSERT INTO num_exp_div VALUES (1,2,'0');
+INSERT INTO num_exp_add VALUES (1,3,'4.31');
+INSERT INTO num_exp_sub VALUES (1,3,'-4.31');
+INSERT INTO num_exp_mul VALUES (1,3,'0');
+INSERT INTO num_exp_div VALUES (1,3,'0');
+INSERT INTO num_exp_add VALUES (1,4,'7799461.4119');
+INSERT INTO num_exp_sub VALUES (1,4,'-7799461.4119');
+INSERT INTO num_exp_mul VALUES (1,4,'0');
+INSERT INTO num_exp_div VALUES (1,4,'0');
+INSERT INTO num_exp_add VALUES (1,5,'16397.038491');
+INSERT INTO num_exp_sub VALUES (1,5,'-16397.038491');
+INSERT INTO num_exp_mul VALUES (1,5,'0');
+INSERT INTO num_exp_div VALUES (1,5,'0');
+INSERT INTO num_exp_add VALUES (1,6,'93901.57763026');
+INSERT INTO num_exp_sub VALUES (1,6,'-93901.57763026');
+INSERT INTO num_exp_mul VALUES (1,6,'0');
+INSERT INTO num_exp_div VALUES (1,6,'0');
+INSERT INTO num_exp_add VALUES (1,7,'-83028485');
+INSERT INTO num_exp_sub VALUES (1,7,'83028485');
+INSERT INTO num_exp_mul VALUES (1,7,'0');
+INSERT INTO num_exp_div VALUES (1,7,'0');
+INSERT INTO num_exp_add VALUES (1,8,'74881');
+INSERT INTO num_exp_sub VALUES (1,8,'-74881');
+INSERT INTO num_exp_mul VALUES (1,8,'0');
+INSERT INTO num_exp_div VALUES (1,8,'0');
+INSERT INTO num_exp_add VALUES (1,9,'-24926804.045047420');
+INSERT INTO num_exp_sub VALUES (1,9,'24926804.045047420');
+INSERT INTO num_exp_mul VALUES (1,9,'0');
+INSERT INTO num_exp_div VALUES (1,9,'0');
+INSERT INTO num_exp_add VALUES (2,0,'-34338492.215397047');
+INSERT INTO num_exp_sub VALUES (2,0,'-34338492.215397047');
+INSERT INTO num_exp_mul VALUES (2,0,'0');
+INSERT INTO num_exp_div VALUES (2,0,'NaN');
+INSERT INTO num_exp_add VALUES (2,1,'-34338492.215397047');
+INSERT INTO num_exp_sub VALUES (2,1,'-34338492.215397047');
+INSERT INTO num_exp_mul VALUES (2,1,'0');
+INSERT INTO num_exp_div VALUES (2,1,'NaN');
+INSERT INTO num_exp_add VALUES (2,2,'-68676984.430794094');
+INSERT INTO num_exp_sub VALUES (2,2,'0');
+INSERT INTO num_exp_mul VALUES (2,2,'1179132047626883.596862135856320209');
+INSERT INTO num_exp_div VALUES (2,2,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (2,3,'-34338487.905397047');
+INSERT INTO num_exp_sub VALUES (2,3,'-34338496.525397047');
+INSERT INTO num_exp_mul VALUES (2,3,'-147998901.44836127257');
+INSERT INTO num_exp_div VALUES (2,3,'-7967167.56737750510440835266');
+INSERT INTO num_exp_add VALUES (2,4,'-26539030.803497047');
+INSERT INTO num_exp_sub VALUES (2,4,'-42137953.627297047');
+INSERT INTO num_exp_mul VALUES (2,4,'-267821744976817.8111137106593');
+INSERT INTO num_exp_div VALUES (2,4,'-4.40267480046830116685');
+INSERT INTO num_exp_add VALUES (2,5,'-34322095.176906047');
+INSERT INTO num_exp_sub VALUES (2,5,'-34354889.253888047');
+INSERT INTO num_exp_mul VALUES (2,5,'-563049578578.769242506736077');
+INSERT INTO num_exp_div VALUES (2,5,'-2094.18866914563535496429');
+INSERT INTO num_exp_add VALUES (2,6,'-34244590.637766787');
+INSERT INTO num_exp_sub VALUES (2,6,'-34432393.793027307');
+INSERT INTO num_exp_mul VALUES (2,6,'-3224438592470.18449811926184222');
+INSERT INTO num_exp_div VALUES (2,6,'-365.68599891479766440940');
+INSERT INTO num_exp_add VALUES (2,7,'-117366977.215397047');
+INSERT INTO num_exp_sub VALUES (2,7,'48689992.784602953');
+INSERT INTO num_exp_mul VALUES (2,7,'2851072985828710.485883795');
+INSERT INTO num_exp_div VALUES (2,7,'.41357483778485235518');
+INSERT INTO num_exp_add VALUES (2,8,'-34263611.215397047');
+INSERT INTO num_exp_sub VALUES (2,8,'-34413373.215397047');
+INSERT INTO num_exp_mul VALUES (2,8,'-2571300635581.146276407');
+INSERT INTO num_exp_div VALUES (2,8,'-458.57416721727870888476');
+INSERT INTO num_exp_add VALUES (2,9,'-59265296.260444467');
+INSERT INTO num_exp_sub VALUES (2,9,'-9411688.170349627');
+INSERT INTO num_exp_mul VALUES (2,9,'855948866655588.453741509242968740');
+INSERT INTO num_exp_div VALUES (2,9,'1.37757299946438931811');
+INSERT INTO num_exp_add VALUES (3,0,'4.31');
+INSERT INTO num_exp_sub VALUES (3,0,'4.31');
+INSERT INTO num_exp_mul VALUES (3,0,'0');
+INSERT INTO num_exp_div VALUES (3,0,'NaN');
+INSERT INTO num_exp_add VALUES (3,1,'4.31');
+INSERT INTO num_exp_sub VALUES (3,1,'4.31');
+INSERT INTO num_exp_mul VALUES (3,1,'0');
+INSERT INTO num_exp_div VALUES (3,1,'NaN');
+INSERT INTO num_exp_add VALUES (3,2,'-34338487.905397047');
+INSERT INTO num_exp_sub VALUES (3,2,'34338496.525397047');
+INSERT INTO num_exp_mul VALUES (3,2,'-147998901.44836127257');
+INSERT INTO num_exp_div VALUES (3,2,'-.00000012551512084352');
+INSERT INTO num_exp_add VALUES (3,3,'8.62');
+INSERT INTO num_exp_sub VALUES (3,3,'0');
+INSERT INTO num_exp_mul VALUES (3,3,'18.5761');
+INSERT INTO num_exp_div VALUES (3,3,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (3,4,'7799465.7219');
+INSERT INTO num_exp_sub VALUES (3,4,'-7799457.1019');
+INSERT INTO num_exp_mul VALUES (3,4,'33615678.685289');
+INSERT INTO num_exp_div VALUES (3,4,'.00000055260225961552');
+INSERT INTO num_exp_add VALUES (3,5,'16401.348491');
+INSERT INTO num_exp_sub VALUES (3,5,'-16392.728491');
+INSERT INTO num_exp_mul VALUES (3,5,'70671.23589621');
+INSERT INTO num_exp_div VALUES (3,5,'.00026285234387695504');
+INSERT INTO num_exp_add VALUES (3,6,'93905.88763026');
+INSERT INTO num_exp_sub VALUES (3,6,'-93897.26763026');
+INSERT INTO num_exp_mul VALUES (3,6,'404715.7995864206');
+INSERT INTO num_exp_div VALUES (3,6,'.00004589912234457595');
+INSERT INTO num_exp_add VALUES (3,7,'-83028480.69');
+INSERT INTO num_exp_sub VALUES (3,7,'83028489.31');
+INSERT INTO num_exp_mul VALUES (3,7,'-357852770.35');
+INSERT INTO num_exp_div VALUES (3,7,'-.00000005190989574240');
+INSERT INTO num_exp_add VALUES (3,8,'74885.31');
+INSERT INTO num_exp_sub VALUES (3,8,'-74876.69');
+INSERT INTO num_exp_mul VALUES (3,8,'322737.11');
+INSERT INTO num_exp_div VALUES (3,8,'.00005755799201399553');
+INSERT INTO num_exp_add VALUES (3,9,'-24926799.735047420');
+INSERT INTO num_exp_sub VALUES (3,9,'24926808.355047420');
+INSERT INTO num_exp_mul VALUES (3,9,'-107434525.43415438020');
+INSERT INTO num_exp_div VALUES (3,9,'-.00000017290624149854');
+INSERT INTO num_exp_add VALUES (4,0,'7799461.4119');
+INSERT INTO num_exp_sub VALUES (4,0,'7799461.4119');
+INSERT INTO num_exp_mul VALUES (4,0,'0');
+INSERT INTO num_exp_div VALUES (4,0,'NaN');
+INSERT INTO num_exp_add VALUES (4,1,'7799461.4119');
+INSERT INTO num_exp_sub VALUES (4,1,'7799461.4119');
+INSERT INTO num_exp_mul VALUES (4,1,'0');
+INSERT INTO num_exp_div VALUES (4,1,'NaN');
+INSERT INTO num_exp_add VALUES (4,2,'-26539030.803497047');
+INSERT INTO num_exp_sub VALUES (4,2,'42137953.627297047');
+INSERT INTO num_exp_mul VALUES (4,2,'-267821744976817.8111137106593');
+INSERT INTO num_exp_div VALUES (4,2,'-.22713465002993920385');
+INSERT INTO num_exp_add VALUES (4,3,'7799465.7219');
+INSERT INTO num_exp_sub VALUES (4,3,'7799457.1019');
+INSERT INTO num_exp_mul VALUES (4,3,'33615678.685289');
+INSERT INTO num_exp_div VALUES (4,3,'1809619.81714617169373549883');
+INSERT INTO num_exp_add VALUES (4,4,'15598922.8238');
+INSERT INTO num_exp_sub VALUES (4,4,'0');
+INSERT INTO num_exp_mul VALUES (4,4,'60831598315717.14146161');
+INSERT INTO num_exp_div VALUES (4,4,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (4,5,'7815858.450391');
+INSERT INTO num_exp_sub VALUES (4,5,'7783064.373409');
+INSERT INTO num_exp_mul VALUES (4,5,'127888068979.9935054429');
+INSERT INTO num_exp_div VALUES (4,5,'475.66281046305802686061');
+INSERT INTO num_exp_add VALUES (4,6,'7893362.98953026');
+INSERT INTO num_exp_sub VALUES (4,6,'7705559.83426974');
+INSERT INTO num_exp_mul VALUES (4,6,'732381731243.745115764094');
+INSERT INTO num_exp_div VALUES (4,6,'83.05996138436129499606');
+INSERT INTO num_exp_add VALUES (4,7,'-75229023.5881');
+INSERT INTO num_exp_sub VALUES (4,7,'90827946.4119');
+INSERT INTO num_exp_mul VALUES (4,7,'-647577464846017.9715');
+INSERT INTO num_exp_div VALUES (4,7,'-.09393717604145131637');
+INSERT INTO num_exp_add VALUES (4,8,'7874342.4119');
+INSERT INTO num_exp_sub VALUES (4,8,'7724580.4119');
+INSERT INTO num_exp_mul VALUES (4,8,'584031469984.4839');
+INSERT INTO num_exp_div VALUES (4,8,'104.15808298366741897143');
+INSERT INTO num_exp_add VALUES (4,9,'-17127342.633147420');
+INSERT INTO num_exp_sub VALUES (4,9,'32726265.456947420');
+INSERT INTO num_exp_mul VALUES (4,9,'-194415646271340.1815956522980');
+INSERT INTO num_exp_div VALUES (4,9,'-.31289456112403769409');
+INSERT INTO num_exp_add VALUES (5,0,'16397.038491');
+INSERT INTO num_exp_sub VALUES (5,0,'16397.038491');
+INSERT INTO num_exp_mul VALUES (5,0,'0');
+INSERT INTO num_exp_div VALUES (5,0,'NaN');
+INSERT INTO num_exp_add VALUES (5,1,'16397.038491');
+INSERT INTO num_exp_sub VALUES (5,1,'16397.038491');
+INSERT INTO num_exp_mul VALUES (5,1,'0');
+INSERT INTO num_exp_div VALUES (5,1,'NaN');
+INSERT INTO num_exp_add VALUES (5,2,'-34322095.176906047');
+INSERT INTO num_exp_sub VALUES (5,2,'34354889.253888047');
+INSERT INTO num_exp_mul VALUES (5,2,'-563049578578.769242506736077');
+INSERT INTO num_exp_div VALUES (5,2,'-.00047751189505192446');
+INSERT INTO num_exp_add VALUES (5,3,'16401.348491');
+INSERT INTO num_exp_sub VALUES (5,3,'16392.728491');
+INSERT INTO num_exp_mul VALUES (5,3,'70671.23589621');
+INSERT INTO num_exp_div VALUES (5,3,'3804.41728329466357308584');
+INSERT INTO num_exp_add VALUES (5,4,'7815858.450391');
+INSERT INTO num_exp_sub VALUES (5,4,'-7783064.373409');
+INSERT INTO num_exp_mul VALUES (5,4,'127888068979.9935054429');
+INSERT INTO num_exp_div VALUES (5,4,'.00210232958726897192');
+INSERT INTO num_exp_add VALUES (5,5,'32794.076982');
+INSERT INTO num_exp_sub VALUES (5,5,'0');
+INSERT INTO num_exp_mul VALUES (5,5,'268862871.275335557081');
+INSERT INTO num_exp_div VALUES (5,5,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (5,6,'110298.61612126');
+INSERT INTO num_exp_sub VALUES (5,6,'-77504.53913926');
+INSERT INTO num_exp_mul VALUES (5,6,'1539707782.76899778633766');
+INSERT INTO num_exp_div VALUES (5,6,'.17461941433576102689');
+INSERT INTO num_exp_add VALUES (5,7,'-83012087.961509');
+INSERT INTO num_exp_sub VALUES (5,7,'83044882.038491');
+INSERT INTO num_exp_mul VALUES (5,7,'-1361421264394.416135');
+INSERT INTO num_exp_div VALUES (5,7,'-.00019748690453643710');
+INSERT INTO num_exp_add VALUES (5,8,'91278.038491');
+INSERT INTO num_exp_sub VALUES (5,8,'-58483.961509');
+INSERT INTO num_exp_mul VALUES (5,8,'1227826639.244571');
+INSERT INTO num_exp_div VALUES (5,8,'.21897461960978085228');
+INSERT INTO num_exp_add VALUES (5,9,'-24910407.006556420');
+INSERT INTO num_exp_sub VALUES (5,9,'24943201.083538420');
+INSERT INTO num_exp_mul VALUES (5,9,'-408725765384.257043660243220');
+INSERT INTO num_exp_div VALUES (5,9,'-.00065780749354660427');
+INSERT INTO num_exp_add VALUES (6,0,'93901.57763026');
+INSERT INTO num_exp_sub VALUES (6,0,'93901.57763026');
+INSERT INTO num_exp_mul VALUES (6,0,'0');
+INSERT INTO num_exp_div VALUES (6,0,'NaN');
+INSERT INTO num_exp_add VALUES (6,1,'93901.57763026');
+INSERT INTO num_exp_sub VALUES (6,1,'93901.57763026');
+INSERT INTO num_exp_mul VALUES (6,1,'0');
+INSERT INTO num_exp_div VALUES (6,1,'NaN');
+INSERT INTO num_exp_add VALUES (6,2,'-34244590.637766787');
+INSERT INTO num_exp_sub VALUES (6,2,'34432393.793027307');
+INSERT INTO num_exp_mul VALUES (6,2,'-3224438592470.18449811926184222');
+INSERT INTO num_exp_div VALUES (6,2,'-.00273458651128995823');
+INSERT INTO num_exp_add VALUES (6,3,'93905.88763026');
+INSERT INTO num_exp_sub VALUES (6,3,'93897.26763026');
+INSERT INTO num_exp_mul VALUES (6,3,'404715.7995864206');
+INSERT INTO num_exp_div VALUES (6,3,'21786.90896293735498839907');
+INSERT INTO num_exp_add VALUES (6,4,'7893362.98953026');
+INSERT INTO num_exp_sub VALUES (6,4,'-7705559.83426974');
+INSERT INTO num_exp_mul VALUES (6,4,'732381731243.745115764094');
+INSERT INTO num_exp_div VALUES (6,4,'.01203949512295682469');
+INSERT INTO num_exp_add VALUES (6,5,'110298.61612126');
+INSERT INTO num_exp_sub VALUES (6,5,'77504.53913926');
+INSERT INTO num_exp_mul VALUES (6,5,'1539707782.76899778633766');
+INSERT INTO num_exp_div VALUES (6,5,'5.72674008674192359679');
+INSERT INTO num_exp_add VALUES (6,6,'187803.15526052');
+INSERT INTO num_exp_sub VALUES (6,6,'0');
+INSERT INTO num_exp_mul VALUES (6,6,'8817506281.4517452372676676');
+INSERT INTO num_exp_div VALUES (6,6,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (6,7,'-82934583.42236974');
+INSERT INTO num_exp_sub VALUES (6,7,'83122386.57763026');
+INSERT INTO num_exp_mul VALUES (6,7,'-7796505729750.37795610');
+INSERT INTO num_exp_div VALUES (6,7,'-.00113095617281538980');
+INSERT INTO num_exp_add VALUES (6,8,'168782.57763026');
+INSERT INTO num_exp_sub VALUES (6,8,'19020.57763026');
+INSERT INTO num_exp_mul VALUES (6,8,'7031444034.53149906');
+INSERT INTO num_exp_div VALUES (6,8,'1.25401073209839612184');
+INSERT INTO num_exp_add VALUES (6,9,'-24832902.467417160');
+INSERT INTO num_exp_sub VALUES (6,9,'25020705.622677680');
+INSERT INTO num_exp_mul VALUES (6,9,'-2340666225110.29929521292692920');
+INSERT INTO num_exp_div VALUES (6,9,'-.00376709254265256789');
+INSERT INTO num_exp_add VALUES (7,0,'-83028485');
+INSERT INTO num_exp_sub VALUES (7,0,'-83028485');
+INSERT INTO num_exp_mul VALUES (7,0,'0');
+INSERT INTO num_exp_div VALUES (7,0,'NaN');
+INSERT INTO num_exp_add VALUES (7,1,'-83028485');
+INSERT INTO num_exp_sub VALUES (7,1,'-83028485');
+INSERT INTO num_exp_mul VALUES (7,1,'0');
+INSERT INTO num_exp_div VALUES (7,1,'NaN');
+INSERT INTO num_exp_add VALUES (7,2,'-117366977.215397047');
+INSERT INTO num_exp_sub VALUES (7,2,'-48689992.784602953');
+INSERT INTO num_exp_mul VALUES (7,2,'2851072985828710.485883795');
+INSERT INTO num_exp_div VALUES (7,2,'2.41794207151503385700');
+INSERT INTO num_exp_add VALUES (7,3,'-83028480.69');
+INSERT INTO num_exp_sub VALUES (7,3,'-83028489.31');
+INSERT INTO num_exp_mul VALUES (7,3,'-357852770.35');
+INSERT INTO num_exp_div VALUES (7,3,'-19264149.65197215777262180974');
+INSERT INTO num_exp_add VALUES (7,4,'-75229023.5881');
+INSERT INTO num_exp_sub VALUES (7,4,'-90827946.4119');
+INSERT INTO num_exp_mul VALUES (7,4,'-647577464846017.9715');
+INSERT INTO num_exp_div VALUES (7,4,'-10.64541262725136247686');
+INSERT INTO num_exp_add VALUES (7,5,'-83012087.961509');
+INSERT INTO num_exp_sub VALUES (7,5,'-83044882.038491');
+INSERT INTO num_exp_mul VALUES (7,5,'-1361421264394.416135');
+INSERT INTO num_exp_div VALUES (7,5,'-5063.62688881730941836574');
+INSERT INTO num_exp_add VALUES (7,6,'-82934583.42236974');
+INSERT INTO num_exp_sub VALUES (7,6,'-83122386.57763026');
+INSERT INTO num_exp_mul VALUES (7,6,'-7796505729750.37795610');
+INSERT INTO num_exp_div VALUES (7,6,'-884.20756174009028770294');
+INSERT INTO num_exp_add VALUES (7,7,'-166056970');
+INSERT INTO num_exp_sub VALUES (7,7,'0');
+INSERT INTO num_exp_mul VALUES (7,7,'6893729321395225');
+INSERT INTO num_exp_div VALUES (7,7,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (7,8,'-82953604');
+INSERT INTO num_exp_sub VALUES (7,8,'-83103366');
+INSERT INTO num_exp_mul VALUES (7,8,'-6217255985285');
+INSERT INTO num_exp_div VALUES (7,8,'-1108.80577182462841041118');
+INSERT INTO num_exp_add VALUES (7,9,'-107955289.045047420');
+INSERT INTO num_exp_sub VALUES (7,9,'-58101680.954952580');
+INSERT INTO num_exp_mul VALUES (7,9,'2069634775752159.035758700');
+INSERT INTO num_exp_div VALUES (7,9,'3.33089171198810413382');
+INSERT INTO num_exp_add VALUES (8,0,'74881');
+INSERT INTO num_exp_sub VALUES (8,0,'74881');
+INSERT INTO num_exp_mul VALUES (8,0,'0');
+INSERT INTO num_exp_div VALUES (8,0,'NaN');
+INSERT INTO num_exp_add VALUES (8,1,'74881');
+INSERT INTO num_exp_sub VALUES (8,1,'74881');
+INSERT INTO num_exp_mul VALUES (8,1,'0');
+INSERT INTO num_exp_div VALUES (8,1,'NaN');
+INSERT INTO num_exp_add VALUES (8,2,'-34263611.215397047');
+INSERT INTO num_exp_sub VALUES (8,2,'34413373.215397047');
+INSERT INTO num_exp_mul VALUES (8,2,'-2571300635581.146276407');
+INSERT INTO num_exp_div VALUES (8,2,'-.00218067233500788615');
+INSERT INTO num_exp_add VALUES (8,3,'74885.31');
+INSERT INTO num_exp_sub VALUES (8,3,'74876.69');
+INSERT INTO num_exp_mul VALUES (8,3,'322737.11');
+INSERT INTO num_exp_div VALUES (8,3,'17373.78190255220417633410');
+INSERT INTO num_exp_add VALUES (8,4,'7874342.4119');
+INSERT INTO num_exp_sub VALUES (8,4,'-7724580.4119');
+INSERT INTO num_exp_mul VALUES (8,4,'584031469984.4839');
+INSERT INTO num_exp_div VALUES (8,4,'.00960079113741758956');
+INSERT INTO num_exp_add VALUES (8,5,'91278.038491');
+INSERT INTO num_exp_sub VALUES (8,5,'58483.961509');
+INSERT INTO num_exp_mul VALUES (8,5,'1227826639.244571');
+INSERT INTO num_exp_div VALUES (8,5,'4.56673929509287019456');
+INSERT INTO num_exp_add VALUES (8,6,'168782.57763026');
+INSERT INTO num_exp_sub VALUES (8,6,'-19020.57763026');
+INSERT INTO num_exp_mul VALUES (8,6,'7031444034.53149906');
+INSERT INTO num_exp_div VALUES (8,6,'.79744134113322314424');
+INSERT INTO num_exp_add VALUES (8,7,'-82953604');
+INSERT INTO num_exp_sub VALUES (8,7,'83103366');
+INSERT INTO num_exp_mul VALUES (8,7,'-6217255985285');
+INSERT INTO num_exp_div VALUES (8,7,'-.00090187120721280172');
+INSERT INTO num_exp_add VALUES (8,8,'149762');
+INSERT INTO num_exp_sub VALUES (8,8,'0');
+INSERT INTO num_exp_mul VALUES (8,8,'5607164161');
+INSERT INTO num_exp_div VALUES (8,8,'1.00000000000000000000');
+INSERT INTO num_exp_add VALUES (8,9,'-24851923.045047420');
+INSERT INTO num_exp_sub VALUES (8,9,'25001685.045047420');
+INSERT INTO num_exp_mul VALUES (8,9,'-1866544013697.195857020');
+INSERT INTO num_exp_div VALUES (8,9,'-.00300403532938582735');
+INSERT INTO num_exp_add VALUES (9,0,'-24926804.045047420');
+INSERT INTO num_exp_sub VALUES (9,0,'-24926804.045047420');
+INSERT INTO num_exp_mul VALUES (9,0,'0');
+INSERT INTO num_exp_div VALUES (9,0,'NaN');
+INSERT INTO num_exp_add VALUES (9,1,'-24926804.045047420');
+INSERT INTO num_exp_sub VALUES (9,1,'-24926804.045047420');
+INSERT INTO num_exp_mul VALUES (9,1,'0');
+INSERT INTO num_exp_div VALUES (9,1,'NaN');
+INSERT INTO num_exp_add VALUES (9,2,'-59265296.260444467');
+INSERT INTO num_exp_sub VALUES (9,2,'9411688.170349627');
+INSERT INTO num_exp_mul VALUES (9,2,'855948866655588.453741509242968740');
+INSERT INTO num_exp_div VALUES (9,2,'.72591434384152961526');
+INSERT INTO num_exp_add VALUES (9,3,'-24926799.735047420');
+INSERT INTO num_exp_sub VALUES (9,3,'-24926808.355047420');
+INSERT INTO num_exp_mul VALUES (9,3,'-107434525.43415438020');
+INSERT INTO num_exp_div VALUES (9,3,'-5783481.21694835730858468677');
+INSERT INTO num_exp_add VALUES (9,4,'-17127342.633147420');
+INSERT INTO num_exp_sub VALUES (9,4,'-32726265.456947420');
+INSERT INTO num_exp_mul VALUES (9,4,'-194415646271340.1815956522980');
+INSERT INTO num_exp_div VALUES (9,4,'-3.19596478892958416484');
+INSERT INTO num_exp_add VALUES (9,5,'-24910407.006556420');
+INSERT INTO num_exp_sub VALUES (9,5,'-24943201.083538420');
+INSERT INTO num_exp_mul VALUES (9,5,'-408725765384.257043660243220');
+INSERT INTO num_exp_div VALUES (9,5,'-1520.20159364322004505807');
+INSERT INTO num_exp_add VALUES (9,6,'-24832902.467417160');
+INSERT INTO num_exp_sub VALUES (9,6,'-25020705.622677680');
+INSERT INTO num_exp_mul VALUES (9,6,'-2340666225110.29929521292692920');
+INSERT INTO num_exp_div VALUES (9,6,'-265.45671195426965751280');
+INSERT INTO num_exp_add VALUES (9,7,'-107955289.045047420');
+INSERT INTO num_exp_sub VALUES (9,7,'58101680.954952580');
+INSERT INTO num_exp_mul VALUES (9,7,'2069634775752159.035758700');
+INSERT INTO num_exp_div VALUES (9,7,'.30021990699995814689');
+INSERT INTO num_exp_add VALUES (9,8,'-24851923.045047420');
+INSERT INTO num_exp_sub VALUES (9,8,'-25001685.045047420');
+INSERT INTO num_exp_mul VALUES (9,8,'-1866544013697.195857020');
+INSERT INTO num_exp_div VALUES (9,8,'-332.88556569820675471748');
+INSERT INTO num_exp_add VALUES (9,9,'-49853608.090094840');
+INSERT INTO num_exp_sub VALUES (9,9,'0');
+INSERT INTO num_exp_mul VALUES (9,9,'621345559900192.420120630048656400');
+INSERT INTO num_exp_div VALUES (9,9,'1.00000000000000000000');
+COMMIT TRANSACTION;
+BEGIN TRANSACTION;
+INSERT INTO num_exp_sqrt VALUES (0,'0');
+INSERT INTO num_exp_sqrt VALUES (1,'0');
+INSERT INTO num_exp_sqrt VALUES (2,'5859.90547836712524903505');
+INSERT INTO num_exp_sqrt VALUES (3,'2.07605394920266944396');
+INSERT INTO num_exp_sqrt VALUES (4,'2792.75158435189147418923');
+INSERT INTO num_exp_sqrt VALUES (5,'128.05092147657509145473');
+INSERT INTO num_exp_sqrt VALUES (6,'306.43364311096782703406');
+INSERT INTO num_exp_sqrt VALUES (7,'9111.99676251039939975230');
+INSERT INTO num_exp_sqrt VALUES (8,'273.64392922189960397542');
+INSERT INTO num_exp_sqrt VALUES (9,'4992.67503899937593364766');
+COMMIT TRANSACTION;
+BEGIN TRANSACTION;
+INSERT INTO num_exp_ln VALUES (0,'NaN');
+INSERT INTO num_exp_ln VALUES (1,'NaN');
+INSERT INTO num_exp_ln VALUES (2,'17.35177750493897715514');
+INSERT INTO num_exp_ln VALUES (3,'1.46093790411565641971');
+INSERT INTO num_exp_ln VALUES (4,'15.86956523951936572464');
+INSERT INTO num_exp_ln VALUES (5,'9.70485601768871834038');
+INSERT INTO num_exp_ln VALUES (6,'11.45000246622944403127');
+INSERT INTO num_exp_ln VALUES (7,'18.23469429965478772991');
+INSERT INTO num_exp_ln VALUES (8,'11.22365546576315513668');
+INSERT INTO num_exp_ln VALUES (9,'17.03145425013166006962');
+COMMIT TRANSACTION;
+BEGIN TRANSACTION;
+INSERT INTO num_exp_log10 VALUES (0,'NaN');
+INSERT INTO num_exp_log10 VALUES (1,'NaN');
+INSERT INTO num_exp_log10 VALUES (2,'7.53578122160797276459');
+INSERT INTO num_exp_log10 VALUES (3,'.63447727016073160075');
+INSERT INTO num_exp_log10 VALUES (4,'6.89206461372691743345');
+INSERT INTO num_exp_log10 VALUES (5,'4.21476541614777768626');
+INSERT INTO num_exp_log10 VALUES (6,'4.97267288886207207671');
+INSERT INTO num_exp_log10 VALUES (7,'7.91922711353275546914');
+INSERT INTO num_exp_log10 VALUES (8,'4.87437163556421004138');
+INSERT INTO num_exp_log10 VALUES (9,'7.39666659961986567059');
+COMMIT TRANSACTION;
+BEGIN TRANSACTION;
+INSERT INTO num_exp_power_10_ln VALUES (0,'NaN');
+INSERT INTO num_exp_power_10_ln VALUES (1,'NaN');
+INSERT INTO num_exp_power_10_ln VALUES (2,'224790267919917955.13261618583642653184');
+INSERT INTO num_exp_power_10_ln VALUES (3,'28.90266599445155957393');
+INSERT INTO num_exp_power_10_ln VALUES (4,'7405685069594999.07733999469386277636');
+INSERT INTO num_exp_power_10_ln VALUES (5,'5068226527.32127265408584640098');
+INSERT INTO num_exp_power_10_ln VALUES (6,'281839893606.99372343357047819067');
+INSERT INTO num_exp_power_10_ln VALUES (7,'1716699575118597095.42330819910640247627');
+INSERT INTO num_exp_power_10_ln VALUES (8,'167361463828.07491320069016125952');
+INSERT INTO num_exp_power_10_ln VALUES (9,'107511333880052007.04141124673540337457');
+COMMIT TRANSACTION;
+BEGIN TRANSACTION;
+INSERT INTO num_data VALUES (0, '0');
+INSERT INTO num_data VALUES (1, '0');
+INSERT INTO num_data VALUES (2, '-34338492.215397047');
+INSERT INTO num_data VALUES (3, '4.31');
+INSERT INTO num_data VALUES (4, '7799461.4119');
+INSERT INTO num_data VALUES (5, '16397.038491');
+INSERT INTO num_data VALUES (6, '93901.57763026');
+INSERT INTO num_data VALUES (7, '-83028485');
+INSERT INTO num_data VALUES (8, '74881');
+INSERT INTO num_data VALUES (9, '-24926804.045047420');
+COMMIT TRANSACTION;
+
+-- ******************************
+-- * Create indices for faster checks
+-- ******************************
+
+CREATE UNIQUE INDEX num_exp_add_idx ON num_exp_add (id1, id2);
+CREATE UNIQUE INDEX num_exp_sub_idx ON num_exp_sub (id1, id2);
+CREATE UNIQUE INDEX num_exp_div_idx ON num_exp_div (id1, id2);
+CREATE UNIQUE INDEX num_exp_mul_idx ON num_exp_mul (id1, id2);
+CREATE UNIQUE INDEX num_exp_sqrt_idx ON num_exp_sqrt (id);
+CREATE UNIQUE INDEX num_exp_ln_idx ON num_exp_ln (id);
+CREATE UNIQUE INDEX num_exp_log10_idx ON num_exp_log10 (id);
+CREATE UNIQUE INDEX num_exp_power_10_ln_idx ON num_exp_power_10_ln (id);
+
+VACUUM ANALYZE num_exp_add;
+VACUUM ANALYZE num_exp_sub;
+VACUUM ANALYZE num_exp_div;
+VACUUM ANALYZE num_exp_mul;
+VACUUM ANALYZE num_exp_sqrt;
+VACUUM ANALYZE num_exp_ln;
+VACUUM ANALYZE num_exp_log10;
+VACUUM ANALYZE num_exp_power_10_ln;
+
+-- ******************************
+-- * Now check the behaviour of the NUMERIC type
+-- ******************************
+
+-- ******************************
+-- * Addition check
+-- ******************************
+DELETE FROM num_result;
+INSERT INTO num_result SELECT t1.id, t2.id, t1.val + t2.val
+ FROM num_data t1, num_data t2;
+SELECT t1.id1, t1.id2, t1.result, t2.expected
+ FROM num_result t1, num_exp_add t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != t2.expected;
+
+DELETE FROM num_result;
+INSERT INTO num_result SELECT t1.id, t2.id, round(t1.val + t2.val, 10)
+ FROM num_data t1, num_data t2;
+SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 10) as expected
+ FROM num_result t1, num_exp_add t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != round(t2.expected, 10);
+
+-- ******************************
+-- * Subtraction check
+-- ******************************
+DELETE FROM num_result;
+INSERT INTO num_result SELECT t1.id, t2.id, t1.val - t2.val
+ FROM num_data t1, num_data t2;
+SELECT t1.id1, t1.id2, t1.result, t2.expected
+ FROM num_result t1, num_exp_sub t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != t2.expected;
+
+DELETE FROM num_result;
+INSERT INTO num_result SELECT t1.id, t2.id, round(t1.val - t2.val, 40)
+ FROM num_data t1, num_data t2;
+SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 40)
+ FROM num_result t1, num_exp_sub t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != round(t2.expected, 40);
+
+-- ******************************
+-- * Multiply check
+-- ******************************
+DELETE FROM num_result;
+INSERT INTO num_result SELECT t1.id, t2.id, t1.val * t2.val
+ FROM num_data t1, num_data t2;
+SELECT t1.id1, t1.id2, t1.result, t2.expected
+ FROM num_result t1, num_exp_mul t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != t2.expected;
+
+DELETE FROM num_result;
+INSERT INTO num_result SELECT t1.id, t2.id, round(t1.val * t2.val, 30)
+ FROM num_data t1, num_data t2;
+SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 30) as expected
+ FROM num_result t1, num_exp_mul t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != round(t2.expected, 30);
+
+-- ******************************
+-- * Division check
+-- ******************************
+DELETE FROM num_result;
+INSERT INTO num_result SELECT t1.id, t2.id, t1.val / t2.val
+ FROM num_data t1, num_data t2
+ WHERE t2.val != '0.0';
+SELECT t1.id1, t1.id2, t1.result, t2.expected
+ FROM num_result t1, num_exp_div t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != t2.expected;
+
+DELETE FROM num_result;
+INSERT INTO num_result SELECT t1.id, t2.id, round(t1.val / t2.val, 80)
+ FROM num_data t1, num_data t2
+ WHERE t2.val != '0.0';
+SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 80) as expected
+ FROM num_result t1, num_exp_div t2
+ WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2
+ AND t1.result != round(t2.expected, 80);
+
+-- ******************************
+-- * Square root check
+-- ******************************
+DELETE FROM num_result;
+INSERT INTO num_result SELECT id, 0, SQRT(ABS(val))
+ FROM num_data;
+SELECT t1.id1, t1.result, t2.expected
+ FROM num_result t1, num_exp_sqrt t2
+ WHERE t1.id1 = t2.id
+ AND t1.result != t2.expected;
+
+-- ******************************
+-- * Natural logarithm check
+-- ******************************
+DELETE FROM num_result;
+INSERT INTO num_result SELECT id, 0, LN(ABS(val))
+ FROM num_data
+ WHERE val != '0.0';
+SELECT t1.id1, t1.result, t2.expected
+ FROM num_result t1, num_exp_ln t2
+ WHERE t1.id1 = t2.id
+ AND t1.result != t2.expected;
+
+-- ******************************
+-- * Logarithm base 10 check
+-- ******************************
+DELETE FROM num_result;
+INSERT INTO num_result SELECT id, 0, LOG(numeric '10', ABS(val))
+ FROM num_data
+ WHERE val != '0.0';
+SELECT t1.id1, t1.result, t2.expected
+ FROM num_result t1, num_exp_log10 t2
+ WHERE t1.id1 = t2.id
+ AND t1.result != t2.expected;
+
+-- ******************************
+-- * POWER(10, LN(value)) check
+-- ******************************
+DELETE FROM num_result;
+INSERT INTO num_result SELECT id, 0, POWER(numeric '10', LN(ABS(round(val,200))))
+ FROM num_data
+ WHERE val != '0.0';
+SELECT t1.id1, t1.result, t2.expected
+ FROM num_result t1, num_exp_power_10_ln t2
+ WHERE t1.id1 = t2.id
+ AND t1.result != t2.expected;
+
+-- ******************************
+-- * Check behavior with Inf and NaN inputs. It's easiest to handle these
+-- * separately from the num_data framework used above, because some input
+-- * combinations will throw errors.
+-- ******************************
+
+WITH v(x) AS
+ (VALUES('0'::numeric),('1'),('-1'),('4.2'),('inf'),('-inf'),('nan'))
+SELECT x1, x2,
+ x1 + x2 AS sum,
+ x1 - x2 AS diff,
+ x1 * x2 AS prod
+FROM v AS v1(x1), v AS v2(x2);
+
+WITH v(x) AS
+ (VALUES('0'::numeric),('1'),('-1'),('4.2'),('inf'),('-inf'),('nan'))
+SELECT x1, x2,
+ x1 / x2 AS quot,
+ x1 % x2 AS mod,
+ div(x1, x2) AS div
+FROM v AS v1(x1), v AS v2(x2) WHERE x2 != 0;
+
+SELECT 'inf'::numeric / '0';
+SELECT '-inf'::numeric / '0';
+SELECT 'nan'::numeric / '0';
+SELECT '0'::numeric / '0';
+SELECT 'inf'::numeric % '0';
+SELECT '-inf'::numeric % '0';
+SELECT 'nan'::numeric % '0';
+SELECT '0'::numeric % '0';
+SELECT div('inf'::numeric, '0');
+SELECT div('-inf'::numeric, '0');
+SELECT div('nan'::numeric, '0');
+SELECT div('0'::numeric, '0');
+
+WITH v(x) AS
+ (VALUES('0'::numeric),('1'),('-1'),('4.2'),('-7.777'),('inf'),('-inf'),('nan'))
+SELECT x, -x as minusx, abs(x), floor(x), ceil(x), sign(x), numeric_inc(x) as inc
+FROM v;
+
+WITH v(x) AS
+ (VALUES('0'::numeric),('1'),('-1'),('4.2'),('-7.777'),('inf'),('-inf'),('nan'))
+SELECT x, round(x), round(x,1) as round1, trunc(x), trunc(x,1) as trunc1
+FROM v;
+
+-- the large values fall into the numeric abbreviation code's maximal classes
+WITH v(x) AS
+ (VALUES('0'::numeric),('1'),('-1'),('4.2'),('-7.777'),('1e340'),('-1e340'),
+ ('inf'),('-inf'),('nan'),
+ ('inf'),('-inf'),('nan'))
+SELECT substring(x::text, 1, 32)
+FROM v ORDER BY x;
+
+WITH v(x) AS
+ (VALUES('0'::numeric),('1'),('4.2'),('inf'),('nan'))
+SELECT x, sqrt(x)
+FROM v;
+
+SELECT sqrt('-1'::numeric);
+SELECT sqrt('-inf'::numeric);
+
+WITH v(x) AS
+ (VALUES('1'::numeric),('4.2'),('inf'),('nan'))
+SELECT x,
+ log(x),
+ log10(x),
+ ln(x)
+FROM v;
+
+SELECT ln('0'::numeric);
+SELECT ln('-1'::numeric);
+SELECT ln('-inf'::numeric);
+
+WITH v(x) AS
+ (VALUES('2'::numeric),('4.2'),('inf'),('nan'))
+SELECT x1, x2,
+ log(x1, x2)
+FROM v AS v1(x1), v AS v2(x2);
+
+SELECT log('0'::numeric, '10');
+SELECT log('10'::numeric, '0');
+SELECT log('-inf'::numeric, '10');
+SELECT log('10'::numeric, '-inf');
+SELECT log('inf'::numeric, '0');
+SELECT log('inf'::numeric, '-inf');
+SELECT log('-inf'::numeric, 'inf');
+
+WITH v(x) AS
+ (VALUES('0'::numeric),('1'),('2'),('4.2'),('inf'),('nan'))
+SELECT x1, x2,
+ power(x1, x2)
+FROM v AS v1(x1), v AS v2(x2) WHERE x1 != 0 OR x2 >= 0;
+
+SELECT power('0'::numeric, '-1');
+SELECT power('0'::numeric, '-inf');
+SELECT power('-1'::numeric, 'inf');
+SELECT power('-2'::numeric, '3');
+SELECT power('-2'::numeric, '3.3');
+SELECT power('-2'::numeric, '-1');
+SELECT power('-2'::numeric, '-1.5');
+SELECT power('-2'::numeric, 'inf');
+SELECT power('-2'::numeric, '-inf');
+SELECT power('inf'::numeric, '-2');
+SELECT power('inf'::numeric, '-inf');
+SELECT power('-inf'::numeric, '2');
+SELECT power('-inf'::numeric, '3');
+SELECT power('-inf'::numeric, '4.5');
+SELECT power('-inf'::numeric, '-2');
+SELECT power('-inf'::numeric, '-3');
+SELECT power('-inf'::numeric, '0');
+SELECT power('-inf'::numeric, 'inf');
+SELECT power('-inf'::numeric, '-inf');
+
+-- ******************************
+-- * miscellaneous checks for things that have been broken in the past...
+-- ******************************
+-- numeric AVG used to fail on some platforms
+SELECT AVG(val) FROM num_data;
+SELECT MAX(val) FROM num_data;
+SELECT MIN(val) FROM num_data;
+SELECT STDDEV(val) FROM num_data;
+SELECT VARIANCE(val) FROM num_data;
+
+-- Check for appropriate rounding and overflow
+CREATE TABLE fract_only (id int, val numeric(4,4));
+INSERT INTO fract_only VALUES (1, '0.0');
+INSERT INTO fract_only VALUES (2, '0.1');
+INSERT INTO fract_only VALUES (3, '1.0'); -- should fail
+INSERT INTO fract_only VALUES (4, '-0.9999');
+INSERT INTO fract_only VALUES (5, '0.99994');
+INSERT INTO fract_only VALUES (6, '0.99995'); -- should fail
+INSERT INTO fract_only VALUES (7, '0.00001');
+INSERT INTO fract_only VALUES (8, '0.00017');
+INSERT INTO fract_only VALUES (9, 'NaN');
+INSERT INTO fract_only VALUES (10, 'Inf'); -- should fail
+INSERT INTO fract_only VALUES (11, '-Inf'); -- should fail
+SELECT * FROM fract_only;
+DROP TABLE fract_only;
+
+-- Check conversion to integers
+SELECT (-9223372036854775808.5)::int8; -- should fail
+SELECT (-9223372036854775808.4)::int8; -- ok
+SELECT 9223372036854775807.4::int8; -- ok
+SELECT 9223372036854775807.5::int8; -- should fail
+SELECT (-2147483648.5)::int4; -- should fail
+SELECT (-2147483648.4)::int4; -- ok
+SELECT 2147483647.4::int4; -- ok
+SELECT 2147483647.5::int4; -- should fail
+SELECT (-32768.5)::int2; -- should fail
+SELECT (-32768.4)::int2; -- ok
+SELECT 32767.4::int2; -- ok
+SELECT 32767.5::int2; -- should fail
+
+-- Check inf/nan conversion behavior
+SELECT 'NaN'::float8::numeric;
+SELECT 'Infinity'::float8::numeric;
+SELECT '-Infinity'::float8::numeric;
+SELECT 'NaN'::numeric::float8;
+SELECT 'Infinity'::numeric::float8;
+SELECT '-Infinity'::numeric::float8;
+SELECT 'NaN'::float4::numeric;
+SELECT 'Infinity'::float4::numeric;
+SELECT '-Infinity'::float4::numeric;
+SELECT 'NaN'::numeric::float4;
+SELECT 'Infinity'::numeric::float4;
+SELECT '-Infinity'::numeric::float4;
+SELECT '42'::int2::numeric;
+SELECT 'NaN'::numeric::int2;
+SELECT 'Infinity'::numeric::int2;
+SELECT '-Infinity'::numeric::int2;
+SELECT 'NaN'::numeric::int4;
+SELECT 'Infinity'::numeric::int4;
+SELECT '-Infinity'::numeric::int4;
+SELECT 'NaN'::numeric::int8;
+SELECT 'Infinity'::numeric::int8;
+SELECT '-Infinity'::numeric::int8;
+
+-- Simple check that ceil(), floor(), and round() work correctly
+CREATE TABLE ceil_floor_round (a numeric);
+INSERT INTO ceil_floor_round VALUES ('-5.5');
+INSERT INTO ceil_floor_round VALUES ('-5.499999');
+INSERT INTO ceil_floor_round VALUES ('9.5');
+INSERT INTO ceil_floor_round VALUES ('9.4999999');
+INSERT INTO ceil_floor_round VALUES ('0.0');
+INSERT INTO ceil_floor_round VALUES ('0.0000001');
+INSERT INTO ceil_floor_round VALUES ('-0.000001');
+SELECT a, ceil(a), ceiling(a), floor(a), round(a) FROM ceil_floor_round;
+DROP TABLE ceil_floor_round;
+
+-- Check rounding, it should round ties away from zero.
+SELECT i as pow,
+ round((-2.5 * 10 ^ i)::numeric, -i),
+ round((-1.5 * 10 ^ i)::numeric, -i),
+ round((-0.5 * 10 ^ i)::numeric, -i),
+ round((0.5 * 10 ^ i)::numeric, -i),
+ round((1.5 * 10 ^ i)::numeric, -i),
+ round((2.5 * 10 ^ i)::numeric, -i)
+FROM generate_series(-5,5) AS t(i);
+
+-- Testing for width_bucket(). For convenience, we test both the
+-- numeric and float8 versions of the function in this file.
+
+-- errors
+SELECT width_bucket(5.0, 3.0, 4.0, 0);
+SELECT width_bucket(5.0, 3.0, 4.0, -5);
+SELECT width_bucket(3.5, 3.0, 3.0, 888);
+SELECT width_bucket(5.0::float8, 3.0::float8, 4.0::float8, 0);
+SELECT width_bucket(5.0::float8, 3.0::float8, 4.0::float8, -5);
+SELECT width_bucket(3.5::float8, 3.0::float8, 3.0::float8, 888);
+SELECT width_bucket('NaN', 3.0, 4.0, 888);
+SELECT width_bucket(0::float8, 'NaN', 4.0::float8, 888);
+SELECT width_bucket(2.0, 3.0, '-inf', 888);
+SELECT width_bucket(0::float8, '-inf', 4.0::float8, 888);
+
+-- normal operation
+CREATE TABLE width_bucket_test (operand_num numeric, operand_f8 float8);
+
+COPY width_bucket_test (operand_num) FROM stdin;
+-5.2
+-0.0000000001
+0.000000000001
+1
+1.99999999999999
+2
+2.00000000000001
+3
+4
+4.5
+5
+5.5
+6
+7
+8
+9
+9.99999999999999
+10
+10.0000000000001
+\.
+
+UPDATE width_bucket_test SET operand_f8 = operand_num::float8;
+
+SELECT
+ operand_num,
+ width_bucket(operand_num, 0, 10, 5) AS wb_1,
+ width_bucket(operand_f8, 0, 10, 5) AS wb_1f,
+ width_bucket(operand_num, 10, 0, 5) AS wb_2,
+ width_bucket(operand_f8, 10, 0, 5) AS wb_2f,
+ width_bucket(operand_num, 2, 8, 4) AS wb_3,
+ width_bucket(operand_f8, 2, 8, 4) AS wb_3f,
+ width_bucket(operand_num, 5.0, 5.5, 20) AS wb_4,
+ width_bucket(operand_f8, 5.0, 5.5, 20) AS wb_4f,
+ width_bucket(operand_num, -25, 25, 10) AS wb_5,
+ width_bucket(operand_f8, -25, 25, 10) AS wb_5f
+ FROM width_bucket_test;
+
+-- Check positive and negative infinity: we require
+-- finite bucket bounds, but allow an infinite operand
+SELECT width_bucket(0.0::numeric, 'Infinity'::numeric, 5, 10); -- error
+SELECT width_bucket(0.0::numeric, 5, '-Infinity'::numeric, 20); -- error
+SELECT width_bucket('Infinity'::numeric, 1, 10, 10),
+ width_bucket('-Infinity'::numeric, 1, 10, 10);
+SELECT width_bucket(0.0::float8, 'Infinity'::float8, 5, 10); -- error
+SELECT width_bucket(0.0::float8, 5, '-Infinity'::float8, 20); -- error
+SELECT width_bucket('Infinity'::float8, 1, 10, 10),
+ width_bucket('-Infinity'::float8, 1, 10, 10);
+
+DROP TABLE width_bucket_test;
+
+-- Simple test for roundoff error when results should be exact
+SELECT x, width_bucket(x::float8, 10, 100, 9) as flt,
+ width_bucket(x::numeric, 10, 100, 9) as num
+FROM generate_series(0, 110, 10) x;
+SELECT x, width_bucket(x::float8, 100, 10, 9) as flt,
+ width_bucket(x::numeric, 100, 10, 9) as num
+FROM generate_series(0, 110, 10) x;
+
+--
+-- TO_CHAR()
+--
+SELECT to_char(val, '9G999G999G999G999G999')
+ FROM num_data;
+
+SELECT to_char(val, '9G999G999G999G999G999D999G999G999G999G999')
+ FROM num_data;
+
+SELECT to_char(val, '9999999999999999.999999999999999PR')
+ FROM num_data;
+
+SELECT to_char(val, '9999999999999999.999999999999999S')
+ FROM num_data;
+
+SELECT to_char(val, 'MI9999999999999999.999999999999999') FROM num_data;
+SELECT to_char(val, 'FMS9999999999999999.999999999999999') FROM num_data;
+SELECT to_char(val, 'FM9999999999999999.999999999999999THPR') FROM num_data;
+SELECT to_char(val, 'SG9999999999999999.999999999999999th') FROM num_data;
+SELECT to_char(val, '0999999999999999.999999999999999') FROM num_data;
+SELECT to_char(val, 'S0999999999999999.999999999999999') FROM num_data;
+SELECT to_char(val, 'FM0999999999999999.999999999999999') FROM num_data;
+SELECT to_char(val, 'FM9999999999999999.099999999999999') FROM num_data;
+SELECT to_char(val, 'FM9999999999990999.990999999999999') FROM num_data;
+SELECT to_char(val, 'FM0999999999999999.999909999999999') FROM num_data;
+SELECT to_char(val, 'FM9999999990999999.099999999999999') FROM num_data;
+SELECT to_char(val, 'L9999999999999999.099999999999999') FROM num_data;
+SELECT to_char(val, 'FM9999999999999999.99999999999999') FROM num_data;
+SELECT to_char(val, 'S 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 . 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9') FROM num_data;
+SELECT to_char(val, 'FMS 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 . 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9') FROM num_data;
+SELECT to_char(val, E'99999 "text" 9999 "9999" 999 "\\"text between quote marks\\"" 9999') FROM num_data;
+SELECT to_char(val, '999999SG9999999999') FROM num_data;
+SELECT to_char(val, 'FM9999999999999999.999999999999999') FROM num_data;
+SELECT to_char(val, '9.999EEEE') FROM num_data;
+
+WITH v(val) AS
+ (VALUES('0'::numeric),('-4.2'),('4.2e9'),('1.2e-5'),('inf'),('-inf'),('nan'))
+SELECT val,
+ to_char(val, '9.999EEEE') as numeric,
+ to_char(val::float8, '9.999EEEE') as float8,
+ to_char(val::float4, '9.999EEEE') as float4
+FROM v;
+
+WITH v(exp) AS
+ (VALUES(-16379),(-16378),(-1234),(-789),(-45),(-5),(-4),(-3),(-2),(-1),(0),
+ (1),(2),(3),(4),(5),(38),(275),(2345),(45678),(131070),(131071))
+SELECT exp,
+ to_char(('1.2345e'||exp)::numeric, '9.999EEEE') as numeric
+FROM v;
+
+WITH v(val) AS
+ (VALUES('0'::numeric),('-4.2'),('4.2e9'),('1.2e-5'),('inf'),('-inf'),('nan'))
+SELECT val,
+ to_char(val, 'MI9999999999.99') as numeric,
+ to_char(val::float8, 'MI9999999999.99') as float8,
+ to_char(val::float4, 'MI9999999999.99') as float4
+FROM v;
+
+WITH v(val) AS
+ (VALUES('0'::numeric),('-4.2'),('4.2e9'),('1.2e-5'),('inf'),('-inf'),('nan'))
+SELECT val,
+ to_char(val, 'MI99.99') as numeric,
+ to_char(val::float8, 'MI99.99') as float8,
+ to_char(val::float4, 'MI99.99') as float4
+FROM v;
+
+SELECT to_char('100'::numeric, 'FM999.9');
+SELECT to_char('100'::numeric, 'FM999.');
+SELECT to_char('100'::numeric, 'FM999');
+
+-- Check parsing of literal text in a format string
+SELECT to_char('100'::numeric, 'foo999');
+SELECT to_char('100'::numeric, 'f\oo999');
+SELECT to_char('100'::numeric, 'f\\oo999');
+SELECT to_char('100'::numeric, 'f\"oo999');
+SELECT to_char('100'::numeric, 'f\\"oo999');
+SELECT to_char('100'::numeric, 'f"ool"999');
+SELECT to_char('100'::numeric, 'f"\ool"999');
+SELECT to_char('100'::numeric, 'f"\\ool"999');
+SELECT to_char('100'::numeric, 'f"ool\"999');
+SELECT to_char('100'::numeric, 'f"ool\\"999');
+
+-- TO_NUMBER()
+--
+SET lc_numeric = 'C';
+SELECT to_number('-34,338,492', '99G999G999');
+SELECT to_number('-34,338,492.654,878', '99G999G999D999G999');
+SELECT to_number('<564646.654564>', '999999.999999PR');
+SELECT to_number('0.00001-', '9.999999S');
+SELECT to_number('5.01-', 'FM9.999999S');
+SELECT to_number('5.01-', 'FM9.999999MI');
+SELECT to_number('5 4 4 4 4 8 . 7 8', '9 9 9 9 9 9 . 9 9');
+SELECT to_number('.01', 'FM9.99');
+SELECT to_number('.0', '99999999.99999999');
+SELECT to_number('0', '99.99');
+SELECT to_number('.-01', 'S99.99');
+SELECT to_number('.01-', '99.99S');
+SELECT to_number(' . 0 1-', ' 9 9 . 9 9 S');
+SELECT to_number('34,50','999,99');
+SELECT to_number('123,000','999G');
+SELECT to_number('123456','999G999');
+SELECT to_number('$1234.56','L9,999.99');
+SELECT to_number('$1234.56','L99,999.99');
+SELECT to_number('$1,234.56','L99,999.99');
+SELECT to_number('1234.56','L99,999.99');
+SELECT to_number('1,234.56','L99,999.99');
+SELECT to_number('42nd', '99th');
+RESET lc_numeric;
+
+--
+-- Input syntax
+--
+
+CREATE TABLE num_input_test (n1 numeric);
+
+-- good inputs
+INSERT INTO num_input_test(n1) VALUES (' 123');
+INSERT INTO num_input_test(n1) VALUES (' 3245874 ');
+INSERT INTO num_input_test(n1) VALUES (' -93853');
+INSERT INTO num_input_test(n1) VALUES ('555.50');
+INSERT INTO num_input_test(n1) VALUES ('-555.50');
+INSERT INTO num_input_test(n1) VALUES ('NaN ');
+INSERT INTO num_input_test(n1) VALUES (' nan');
+INSERT INTO num_input_test(n1) VALUES (' inf ');
+INSERT INTO num_input_test(n1) VALUES (' +inf ');
+INSERT INTO num_input_test(n1) VALUES (' -inf ');
+INSERT INTO num_input_test(n1) VALUES (' Infinity ');
+INSERT INTO num_input_test(n1) VALUES (' +inFinity ');
+INSERT INTO num_input_test(n1) VALUES (' -INFINITY ');
+
+-- bad inputs
+INSERT INTO num_input_test(n1) VALUES (' ');
+INSERT INTO num_input_test(n1) VALUES (' 1234 %');
+INSERT INTO num_input_test(n1) VALUES ('xyz');
+INSERT INTO num_input_test(n1) VALUES ('- 1234');
+INSERT INTO num_input_test(n1) VALUES ('5 . 0');
+INSERT INTO num_input_test(n1) VALUES ('5. 0 ');
+INSERT INTO num_input_test(n1) VALUES ('');
+INSERT INTO num_input_test(n1) VALUES (' N aN ');
+INSERT INTO num_input_test(n1) VALUES ('+ infinity');
+
+SELECT * FROM num_input_test;
+
+--
+-- Test some corner cases for multiplication
+--
+
+select 4790999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999;
+
+select 4789999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999;
+
+select 4770999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999;
+
+select 4769999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999;
+
+select trim_scale((0.1 - 2e-16383) * (0.1 - 3e-16383));
+
+--
+-- Test some corner cases for division
+--
+
+select 999999999999999999999::numeric/1000000000000000000000;
+select div(999999999999999999999::numeric,1000000000000000000000);
+select mod(999999999999999999999::numeric,1000000000000000000000);
+select div(-9999999999999999999999::numeric,1000000000000000000000);
+select mod(-9999999999999999999999::numeric,1000000000000000000000);
+select div(-9999999999999999999999::numeric,1000000000000000000000)*1000000000000000000000 + mod(-9999999999999999999999::numeric,1000000000000000000000);
+select mod (70.0,70) ;
+select div (70.0,70) ;
+select 70.0 / 70 ;
+select 12345678901234567890 % 123;
+select 12345678901234567890 / 123;
+select div(12345678901234567890, 123);
+select div(12345678901234567890, 123) * 123 + 12345678901234567890 % 123;
+
+--
+-- Test some corner cases for square root
+--
+
+select sqrt(1.000000000000003::numeric);
+select sqrt(1.000000000000004::numeric);
+select sqrt(96627521408608.56340355805::numeric);
+select sqrt(96627521408608.56340355806::numeric);
+select sqrt(515549506212297735.073688290367::numeric);
+select sqrt(515549506212297735.073688290368::numeric);
+select sqrt(8015491789940783531003294973900306::numeric);
+select sqrt(8015491789940783531003294973900307::numeric);
+
+--
+-- Test code path for raising to integer powers
+--
+
+select 10.0 ^ -2147483648 as rounds_to_zero;
+select 10.0 ^ -2147483647 as rounds_to_zero;
+select 10.0 ^ 2147483647 as overflows;
+select 117743296169.0 ^ 1000000000 as overflows;
+
+-- cases that used to return inaccurate results
+select 3.789 ^ 21;
+select 3.789 ^ 35;
+select 1.2 ^ 345;
+select 0.12 ^ (-20);
+select 1.000000000123 ^ (-2147483648);
+select coalesce(nullif(0.9999999999 ^ 23300000000000, 0), 0) as rounds_to_zero;
+select round(((1 - 1.500012345678e-1000) ^ 1.45e1003) * 1e1000);
+
+-- cases that used to error out
+select 0.12 ^ (-25);
+select 0.5678 ^ (-85);
+select coalesce(nullif(0.9999999999 ^ 70000000000000, 0), 0) as underflows;
+
+-- negative base to integer powers
+select (-1.0) ^ 2147483646;
+select (-1.0) ^ 2147483647;
+select (-1.0) ^ 2147483648;
+select (-1.0) ^ 1000000000000000;
+select (-1.0) ^ 1000000000000001;
+
+--
+-- Tests for raising to non-integer powers
+--
+
+-- special cases
+select 0.0 ^ 0.0;
+select (-12.34) ^ 0.0;
+select 12.34 ^ 0.0;
+select 0.0 ^ 12.34;
+
+-- NaNs
+select 'NaN'::numeric ^ 'NaN'::numeric;
+select 'NaN'::numeric ^ 0;
+select 'NaN'::numeric ^ 1;
+select 0 ^ 'NaN'::numeric;
+select 1 ^ 'NaN'::numeric;
+
+-- invalid inputs
+select 0.0 ^ (-12.34);
+select (-12.34) ^ 1.2;
+
+-- cases that used to generate inaccurate results
+select 32.1 ^ 9.8;
+select 32.1 ^ (-9.8);
+select 12.3 ^ 45.6;
+select 12.3 ^ (-45.6);
+
+-- big test
+select 1.234 ^ 5678;
+
+--
+-- Tests for EXP()
+--
+
+-- special cases
+select exp(0.0);
+select exp(1.0);
+select exp(1.0::numeric(71,70));
+select exp('nan'::numeric);
+select exp('inf'::numeric);
+select exp('-inf'::numeric);
+select coalesce(nullif(exp(-5000::numeric), 0), 0) as rounds_to_zero;
+select coalesce(nullif(exp(-10000::numeric), 0), 0) as underflows;
+
+-- cases that used to generate inaccurate results
+select exp(32.999);
+select exp(-32.999);
+select exp(123.456);
+select exp(-123.456);
+
+-- big test
+select exp(1234.5678);
+
+--
+-- Tests for generate_series
+--
+select * from generate_series(0.0::numeric, 4.0::numeric);
+select * from generate_series(0.1::numeric, 4.0::numeric, 1.3::numeric);
+select * from generate_series(4.0::numeric, -1.5::numeric, -2.2::numeric);
+-- Trigger errors
+select * from generate_series(-100::numeric, 100::numeric, 0::numeric);
+select * from generate_series(-100::numeric, 100::numeric, 'nan'::numeric);
+select * from generate_series('nan'::numeric, 100::numeric, 10::numeric);
+select * from generate_series(0::numeric, 'nan'::numeric, 10::numeric);
+select * from generate_series('inf'::numeric, 'inf'::numeric, 10::numeric);
+select * from generate_series(0::numeric, 'inf'::numeric, 10::numeric);
+select * from generate_series(0::numeric, '42'::numeric, '-inf'::numeric);
+-- Checks maximum, output is truncated
+select (i / (10::numeric ^ 131071))::numeric(1,0)
+ from generate_series(6 * (10::numeric ^ 131071),
+ 9 * (10::numeric ^ 131071),
+ 10::numeric ^ 131071) as a(i);
+-- Check usage with variables
+select * from generate_series(1::numeric, 3::numeric) i, generate_series(i,3) j;
+select * from generate_series(1::numeric, 3::numeric) i, generate_series(1,i) j;
+select * from generate_series(1::numeric, 3::numeric) i, generate_series(1,5,i) j;
+
+--
+-- Tests for LN()
+--
+
+-- Invalid inputs
+select ln(-12.34);
+select ln(0.0);
+
+-- Some random tests
+select ln(1.2345678e-28);
+select ln(0.0456789);
+select ln(0.349873948359354029493948309745709580730482050975);
+select ln(0.99949452);
+select ln(1.00049687395);
+select ln(1234.567890123456789);
+select ln(5.80397490724e5);
+select ln(9.342536355e34);
+
+--
+-- Tests for LOG() (base 10)
+--
+
+-- invalid inputs
+select log(-12.34);
+select log(0.0);
+
+-- some random tests
+select log(1.234567e-89);
+select log(3.4634998359873254962349856073435545);
+select log(9.999999999999999999);
+select log(10.00000000000000000);
+select log(10.00000000000000001);
+select log(590489.45235237);
+
+--
+-- Tests for LOG() (arbitrary base)
+--
+
+-- invalid inputs
+select log(-12.34, 56.78);
+select log(-12.34, -56.78);
+select log(12.34, -56.78);
+select log(0.0, 12.34);
+select log(12.34, 0.0);
+select log(1.0, 12.34);
+
+-- some random tests
+select log(1.23e-89, 6.4689e45);
+select log(0.99923, 4.58934e34);
+select log(1.000016, 8.452010e18);
+select log(3.1954752e47, 9.4792021e-73);
+
+--
+-- Tests for scale()
+--
+
+select scale(numeric 'NaN');
+select scale(numeric 'inf');
+select scale(NULL::numeric);
+select scale(1.12);
+select scale(0);
+select scale(0.00);
+select scale(1.12345);
+select scale(110123.12475871856128);
+select scale(-1123.12471856128);
+select scale(-13.000000000000000);
+
+--
+-- Tests for min_scale()
+--
+
+select min_scale(numeric 'NaN') is NULL; -- should be true
+select min_scale(numeric 'inf') is NULL; -- should be true
+select min_scale(0); -- no digits
+select min_scale(0.00); -- no digits again
+select min_scale(1.0); -- no scale
+select min_scale(1.1); -- scale 1
+select min_scale(1.12); -- scale 2
+select min_scale(1.123); -- scale 3
+select min_scale(1.1234); -- scale 4, filled digit
+select min_scale(1.12345); -- scale 5, 2 NDIGITS
+select min_scale(1.1000); -- 1 pos in NDIGITS
+select min_scale(1e100); -- very big number
+
+--
+-- Tests for trim_scale()
+--
+
+select trim_scale(numeric 'NaN');
+select trim_scale(numeric 'inf');
+select trim_scale(1.120);
+select trim_scale(0);
+select trim_scale(0.00);
+select trim_scale(1.1234500);
+select trim_scale(110123.12475871856128000);
+select trim_scale(-1123.124718561280000000);
+select trim_scale(-13.00000000000000000000);
+select trim_scale(1e100);
+
+--
+-- Tests for SUM()
+--
+
+-- cases that need carry propagation
+SELECT SUM(9999::numeric) FROM generate_series(1, 100000);
+SELECT SUM((-9999)::numeric) FROM generate_series(1, 100000);
+
+--
+-- Tests for GCD()
+--
+SELECT a, b, gcd(a, b), gcd(a, -b), gcd(-b, a), gcd(-b, -a)
+FROM (VALUES (0::numeric, 0::numeric),
+ (0::numeric, numeric 'NaN'),
+ (0::numeric, 46375::numeric),
+ (433125::numeric, 46375::numeric),
+ (43312.5::numeric, 4637.5::numeric),
+ (4331.250::numeric, 463.75000::numeric),
+ ('inf', '0'),
+ ('inf', '42'),
+ ('inf', 'inf')
+ ) AS v(a, b);
+
+--
+-- Tests for LCM()
+--
+SELECT a,b, lcm(a, b), lcm(a, -b), lcm(-b, a), lcm(-b, -a)
+FROM (VALUES (0::numeric, 0::numeric),
+ (0::numeric, numeric 'NaN'),
+ (0::numeric, 13272::numeric),
+ (13272::numeric, 13272::numeric),
+ (423282::numeric, 13272::numeric),
+ (42328.2::numeric, 1327.2::numeric),
+ (4232.820::numeric, 132.72000::numeric),
+ ('inf', '0'),
+ ('inf', '42'),
+ ('inf', 'inf')
+ ) AS v(a, b);
+
+SELECT lcm(9999 * (10::numeric)^131068 + (10::numeric^131068 - 1), 2); -- overflow
+
+--
+-- Tests for factorial
+--
+SELECT factorial(4);
+SELECT factorial(15);
+SELECT factorial(100000);
+SELECT factorial(0);
+SELECT factorial(-4);
+
+--
+-- Tests for pg_lsn()
+--
+SELECT pg_lsn(23783416::numeric);
+SELECT pg_lsn(0::numeric);
+SELECT pg_lsn(18446744073709551615::numeric);
+SELECT pg_lsn(-1::numeric);
+SELECT pg_lsn(18446744073709551616::numeric);
+SELECT pg_lsn('NaN'::numeric);
diff --git a/ydb/library/yql/tests/postgresql/original/cases/numerology.out b/ydb/library/yql/tests/postgresql/original/cases/numerology.out
new file mode 100644
index 0000000000..44d6c435de
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/numerology.out
@@ -0,0 +1,136 @@
+--
+-- NUMEROLOGY
+-- Test various combinations of numeric types and functions.
+--
+--
+-- Test implicit type conversions
+-- This fails for Postgres v6.1 (and earlier?)
+-- so let's try explicit conversions for now - tgl 97/05/07
+--
+CREATE TABLE TEMP_FLOAT (f1 FLOAT8);
+INSERT INTO TEMP_FLOAT (f1)
+ SELECT float8(f1) FROM INT4_TBL;
+INSERT INTO TEMP_FLOAT (f1)
+ SELECT float8(f1) FROM INT2_TBL;
+SELECT f1 FROM TEMP_FLOAT
+ ORDER BY f1;
+ f1
+-------------
+ -2147483647
+ -123456
+ -32767
+ -1234
+ 0
+ 0
+ 1234
+ 32767
+ 123456
+ 2147483647
+(10 rows)
+
+-- int4
+CREATE TABLE TEMP_INT4 (f1 INT4);
+INSERT INTO TEMP_INT4 (f1)
+ SELECT int4(f1) FROM FLOAT8_TBL
+ WHERE (f1 > -2147483647) AND (f1 < 2147483647);
+INSERT INTO TEMP_INT4 (f1)
+ SELECT int4(f1) FROM INT2_TBL;
+SELECT f1 FROM TEMP_INT4
+ ORDER BY f1;
+ f1
+--------
+ -32767
+ -1234
+ -1004
+ -35
+ 0
+ 0
+ 0
+ 1234
+ 32767
+(9 rows)
+
+-- int2
+CREATE TABLE TEMP_INT2 (f1 INT2);
+INSERT INTO TEMP_INT2 (f1)
+ SELECT int2(f1) FROM FLOAT8_TBL
+ WHERE (f1 >= -32767) AND (f1 <= 32767);
+INSERT INTO TEMP_INT2 (f1)
+ SELECT int2(f1) FROM INT4_TBL
+ WHERE (f1 >= -32767) AND (f1 <= 32767);
+SELECT f1 FROM TEMP_INT2
+ ORDER BY f1;
+ f1
+-------
+ -1004
+ -35
+ 0
+ 0
+ 0
+(5 rows)
+
+--
+-- Group-by combinations
+--
+CREATE TABLE TEMP_GROUP (f1 INT4, f2 INT4, f3 FLOAT8);
+INSERT INTO TEMP_GROUP
+ SELECT 1, (- i.f1), (- f.f1)
+ FROM INT4_TBL i, FLOAT8_TBL f;
+INSERT INTO TEMP_GROUP
+ SELECT 2, i.f1, f.f1
+ FROM INT4_TBL i, FLOAT8_TBL f;
+SELECT DISTINCT f1 AS two FROM TEMP_GROUP ORDER BY 1;
+ two
+-----
+ 1
+ 2
+(2 rows)
+
+SELECT f1 AS two, max(f3) AS max_float, min(f3) as min_float
+ FROM TEMP_GROUP
+ GROUP BY f1
+ ORDER BY two, max_float, min_float;
+ two | max_float | min_float
+-----+----------------------+-----------------------
+ 1 | 1.2345678901234e+200 | -0
+ 2 | 0 | -1.2345678901234e+200
+(2 rows)
+
+-- GROUP BY a result column name is not legal per SQL92, but we accept it
+-- anyway (if the name is not the name of any column exposed by FROM).
+SELECT f1 AS two, max(f3) AS max_float, min(f3) AS min_float
+ FROM TEMP_GROUP
+ GROUP BY two
+ ORDER BY two, max_float, min_float;
+ two | max_float | min_float
+-----+----------------------+-----------------------
+ 1 | 1.2345678901234e+200 | -0
+ 2 | 0 | -1.2345678901234e+200
+(2 rows)
+
+SELECT f1 AS two, (max(f3) + 1) AS max_plus_1, (min(f3) - 1) AS min_minus_1
+ FROM TEMP_GROUP
+ GROUP BY f1
+ ORDER BY two, min_minus_1;
+ two | max_plus_1 | min_minus_1
+-----+----------------------+-----------------------
+ 1 | 1.2345678901234e+200 | -1
+ 2 | 1 | -1.2345678901234e+200
+(2 rows)
+
+SELECT f1 AS two,
+ max(f2) + min(f2) AS max_plus_min,
+ min(f3) - 1 AS min_minus_1
+ FROM TEMP_GROUP
+ GROUP BY f1
+ ORDER BY two, min_minus_1;
+ two | max_plus_min | min_minus_1
+-----+--------------+-----------------------
+ 1 | 0 | -1
+ 2 | 0 | -1.2345678901234e+200
+(2 rows)
+
+DROP TABLE TEMP_INT2;
+DROP TABLE TEMP_INT4;
+DROP TABLE TEMP_FLOAT;
+DROP TABLE TEMP_GROUP;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/numerology.sql b/ydb/library/yql/tests/postgresql/original/cases/numerology.sql
new file mode 100644
index 0000000000..fddb58f8fd
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/numerology.sql
@@ -0,0 +1,98 @@
+--
+-- NUMEROLOGY
+-- Test various combinations of numeric types and functions.
+--
+
+--
+-- Test implicit type conversions
+-- This fails for Postgres v6.1 (and earlier?)
+-- so let's try explicit conversions for now - tgl 97/05/07
+--
+
+CREATE TABLE TEMP_FLOAT (f1 FLOAT8);
+
+INSERT INTO TEMP_FLOAT (f1)
+ SELECT float8(f1) FROM INT4_TBL;
+
+INSERT INTO TEMP_FLOAT (f1)
+ SELECT float8(f1) FROM INT2_TBL;
+
+SELECT f1 FROM TEMP_FLOAT
+ ORDER BY f1;
+
+-- int4
+
+CREATE TABLE TEMP_INT4 (f1 INT4);
+
+INSERT INTO TEMP_INT4 (f1)
+ SELECT int4(f1) FROM FLOAT8_TBL
+ WHERE (f1 > -2147483647) AND (f1 < 2147483647);
+
+INSERT INTO TEMP_INT4 (f1)
+ SELECT int4(f1) FROM INT2_TBL;
+
+SELECT f1 FROM TEMP_INT4
+ ORDER BY f1;
+
+-- int2
+
+CREATE TABLE TEMP_INT2 (f1 INT2);
+
+INSERT INTO TEMP_INT2 (f1)
+ SELECT int2(f1) FROM FLOAT8_TBL
+ WHERE (f1 >= -32767) AND (f1 <= 32767);
+
+INSERT INTO TEMP_INT2 (f1)
+ SELECT int2(f1) FROM INT4_TBL
+ WHERE (f1 >= -32767) AND (f1 <= 32767);
+
+SELECT f1 FROM TEMP_INT2
+ ORDER BY f1;
+
+--
+-- Group-by combinations
+--
+
+CREATE TABLE TEMP_GROUP (f1 INT4, f2 INT4, f3 FLOAT8);
+
+INSERT INTO TEMP_GROUP
+ SELECT 1, (- i.f1), (- f.f1)
+ FROM INT4_TBL i, FLOAT8_TBL f;
+
+INSERT INTO TEMP_GROUP
+ SELECT 2, i.f1, f.f1
+ FROM INT4_TBL i, FLOAT8_TBL f;
+
+SELECT DISTINCT f1 AS two FROM TEMP_GROUP ORDER BY 1;
+
+SELECT f1 AS two, max(f3) AS max_float, min(f3) as min_float
+ FROM TEMP_GROUP
+ GROUP BY f1
+ ORDER BY two, max_float, min_float;
+
+-- GROUP BY a result column name is not legal per SQL92, but we accept it
+-- anyway (if the name is not the name of any column exposed by FROM).
+SELECT f1 AS two, max(f3) AS max_float, min(f3) AS min_float
+ FROM TEMP_GROUP
+ GROUP BY two
+ ORDER BY two, max_float, min_float;
+
+SELECT f1 AS two, (max(f3) + 1) AS max_plus_1, (min(f3) - 1) AS min_minus_1
+ FROM TEMP_GROUP
+ GROUP BY f1
+ ORDER BY two, min_minus_1;
+
+SELECT f1 AS two,
+ max(f2) + min(f2) AS max_plus_min,
+ min(f3) - 1 AS min_minus_1
+ FROM TEMP_GROUP
+ GROUP BY f1
+ ORDER BY two, min_minus_1;
+
+DROP TABLE TEMP_INT2;
+
+DROP TABLE TEMP_INT4;
+
+DROP TABLE TEMP_FLOAT;
+
+DROP TABLE TEMP_GROUP;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/oid.out b/ydb/library/yql/tests/postgresql/original/cases/oid.out
new file mode 100644
index 0000000000..8909373453
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/oid.out
@@ -0,0 +1,122 @@
+--
+-- OID
+--
+CREATE TABLE OID_TBL(f1 oid);
+INSERT INTO OID_TBL(f1) VALUES ('1234');
+INSERT INTO OID_TBL(f1) VALUES ('1235');
+INSERT INTO OID_TBL(f1) VALUES ('987');
+INSERT INTO OID_TBL(f1) VALUES ('-1040');
+INSERT INTO OID_TBL(f1) VALUES ('99999999');
+INSERT INTO OID_TBL(f1) VALUES ('5 ');
+INSERT INTO OID_TBL(f1) VALUES (' 10 ');
+-- leading/trailing hard tab is also allowed
+INSERT INTO OID_TBL(f1) VALUES (' 15 ');
+-- bad inputs
+INSERT INTO OID_TBL(f1) VALUES ('');
+ERROR: invalid input syntax for type oid: ""
+LINE 1: INSERT INTO OID_TBL(f1) VALUES ('');
+ ^
+INSERT INTO OID_TBL(f1) VALUES (' ');
+ERROR: invalid input syntax for type oid: " "
+LINE 1: INSERT INTO OID_TBL(f1) VALUES (' ');
+ ^
+INSERT INTO OID_TBL(f1) VALUES ('asdfasd');
+ERROR: invalid input syntax for type oid: "asdfasd"
+LINE 1: INSERT INTO OID_TBL(f1) VALUES ('asdfasd');
+ ^
+INSERT INTO OID_TBL(f1) VALUES ('99asdfasd');
+ERROR: invalid input syntax for type oid: "99asdfasd"
+LINE 1: INSERT INTO OID_TBL(f1) VALUES ('99asdfasd');
+ ^
+INSERT INTO OID_TBL(f1) VALUES ('5 d');
+ERROR: invalid input syntax for type oid: "5 d"
+LINE 1: INSERT INTO OID_TBL(f1) VALUES ('5 d');
+ ^
+INSERT INTO OID_TBL(f1) VALUES (' 5d');
+ERROR: invalid input syntax for type oid: " 5d"
+LINE 1: INSERT INTO OID_TBL(f1) VALUES (' 5d');
+ ^
+INSERT INTO OID_TBL(f1) VALUES ('5 5');
+ERROR: invalid input syntax for type oid: "5 5"
+LINE 1: INSERT INTO OID_TBL(f1) VALUES ('5 5');
+ ^
+INSERT INTO OID_TBL(f1) VALUES (' - 500');
+ERROR: invalid input syntax for type oid: " - 500"
+LINE 1: INSERT INTO OID_TBL(f1) VALUES (' - 500');
+ ^
+INSERT INTO OID_TBL(f1) VALUES ('32958209582039852935');
+ERROR: value "32958209582039852935" is out of range for type oid
+LINE 1: INSERT INTO OID_TBL(f1) VALUES ('32958209582039852935');
+ ^
+INSERT INTO OID_TBL(f1) VALUES ('-23582358720398502385');
+ERROR: value "-23582358720398502385" is out of range for type oid
+LINE 1: INSERT INTO OID_TBL(f1) VALUES ('-23582358720398502385');
+ ^
+SELECT * FROM OID_TBL;
+ f1
+------------
+ 1234
+ 1235
+ 987
+ 4294966256
+ 99999999
+ 5
+ 10
+ 15
+(8 rows)
+
+SELECT o.* FROM OID_TBL o WHERE o.f1 = 1234;
+ f1
+------
+ 1234
+(1 row)
+
+SELECT o.* FROM OID_TBL o WHERE o.f1 <> '1234';
+ f1
+------------
+ 1235
+ 987
+ 4294966256
+ 99999999
+ 5
+ 10
+ 15
+(7 rows)
+
+SELECT o.* FROM OID_TBL o WHERE o.f1 <= '1234';
+ f1
+------
+ 1234
+ 987
+ 5
+ 10
+ 15
+(5 rows)
+
+SELECT o.* FROM OID_TBL o WHERE o.f1 < '1234';
+ f1
+-----
+ 987
+ 5
+ 10
+ 15
+(4 rows)
+
+SELECT o.* FROM OID_TBL o WHERE o.f1 >= '1234';
+ f1
+------------
+ 1234
+ 1235
+ 4294966256
+ 99999999
+(4 rows)
+
+SELECT o.* FROM OID_TBL o WHERE o.f1 > '1234';
+ f1
+------------
+ 1235
+ 4294966256
+ 99999999
+(3 rows)
+
+DROP TABLE OID_TBL;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/oid.sql b/ydb/library/yql/tests/postgresql/original/cases/oid.sql
new file mode 100644
index 0000000000..25b4b68a6a
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/oid.sql
@@ -0,0 +1,43 @@
+--
+-- OID
+--
+
+CREATE TABLE OID_TBL(f1 oid);
+
+INSERT INTO OID_TBL(f1) VALUES ('1234');
+INSERT INTO OID_TBL(f1) VALUES ('1235');
+INSERT INTO OID_TBL(f1) VALUES ('987');
+INSERT INTO OID_TBL(f1) VALUES ('-1040');
+INSERT INTO OID_TBL(f1) VALUES ('99999999');
+INSERT INTO OID_TBL(f1) VALUES ('5 ');
+INSERT INTO OID_TBL(f1) VALUES (' 10 ');
+-- leading/trailing hard tab is also allowed
+INSERT INTO OID_TBL(f1) VALUES (' 15 ');
+
+-- bad inputs
+INSERT INTO OID_TBL(f1) VALUES ('');
+INSERT INTO OID_TBL(f1) VALUES (' ');
+INSERT INTO OID_TBL(f1) VALUES ('asdfasd');
+INSERT INTO OID_TBL(f1) VALUES ('99asdfasd');
+INSERT INTO OID_TBL(f1) VALUES ('5 d');
+INSERT INTO OID_TBL(f1) VALUES (' 5d');
+INSERT INTO OID_TBL(f1) VALUES ('5 5');
+INSERT INTO OID_TBL(f1) VALUES (' - 500');
+INSERT INTO OID_TBL(f1) VALUES ('32958209582039852935');
+INSERT INTO OID_TBL(f1) VALUES ('-23582358720398502385');
+
+SELECT * FROM OID_TBL;
+
+SELECT o.* FROM OID_TBL o WHERE o.f1 = 1234;
+
+SELECT o.* FROM OID_TBL o WHERE o.f1 <> '1234';
+
+SELECT o.* FROM OID_TBL o WHERE o.f1 <= '1234';
+
+SELECT o.* FROM OID_TBL o WHERE o.f1 < '1234';
+
+SELECT o.* FROM OID_TBL o WHERE o.f1 >= '1234';
+
+SELECT o.* FROM OID_TBL o WHERE o.f1 > '1234';
+
+DROP TABLE OID_TBL;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/select.out b/ydb/library/yql/tests/postgresql/original/cases/select.out
new file mode 100644
index 0000000000..6dcd4fa8f3
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/select.out
@@ -0,0 +1,973 @@
+--
+-- SELECT
+--
+-- btree index
+-- awk '{if($1<10){print;}else{next;}}' onek.data | sort +0n -1
+--
+SELECT * FROM onek
+ WHERE onek.unique1 < 10
+ ORDER BY onek.unique1;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+ 0 | 998 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | AAAAAA | KMBAAA | OOOOxx
+ 1 | 214 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 2 | 3 | BAAAAA | GIAAAA | OOOOxx
+ 2 | 326 | 0 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 4 | 5 | CAAAAA | OMAAAA | OOOOxx
+ 3 | 431 | 1 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 6 | 7 | DAAAAA | PQAAAA | VVVVxx
+ 4 | 833 | 0 | 0 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 8 | 9 | EAAAAA | BGBAAA | HHHHxx
+ 5 | 541 | 1 | 1 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 10 | 11 | FAAAAA | VUAAAA | HHHHxx
+ 6 | 978 | 0 | 2 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 12 | 13 | GAAAAA | QLBAAA | OOOOxx
+ 7 | 647 | 1 | 3 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 14 | 15 | HAAAAA | XYAAAA | VVVVxx
+ 8 | 653 | 0 | 0 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 16 | 17 | IAAAAA | DZAAAA | HHHHxx
+ 9 | 49 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 18 | 19 | JAAAAA | XBAAAA | HHHHxx
+(10 rows)
+
+--
+-- awk '{if($1<20){print $1,$14;}else{next;}}' onek.data | sort +0nr -1
+--
+SELECT onek.unique1, onek.stringu1 FROM onek
+ WHERE onek.unique1 < 20
+ ORDER BY unique1 using >;
+ unique1 | stringu1
+---------+----------
+ 19 | TAAAAA
+ 18 | SAAAAA
+ 17 | RAAAAA
+ 16 | QAAAAA
+ 15 | PAAAAA
+ 14 | OAAAAA
+ 13 | NAAAAA
+ 12 | MAAAAA
+ 11 | LAAAAA
+ 10 | KAAAAA
+ 9 | JAAAAA
+ 8 | IAAAAA
+ 7 | HAAAAA
+ 6 | GAAAAA
+ 5 | FAAAAA
+ 4 | EAAAAA
+ 3 | DAAAAA
+ 2 | CAAAAA
+ 1 | BAAAAA
+ 0 | AAAAAA
+(20 rows)
+
+--
+-- awk '{if($1>980){print $1,$14;}else{next;}}' onek.data | sort +1d -2
+--
+SELECT onek.unique1, onek.stringu1 FROM onek
+ WHERE onek.unique1 > 980
+ ORDER BY stringu1 using <;
+ unique1 | stringu1
+---------+----------
+ 988 | AMAAAA
+ 989 | BMAAAA
+ 990 | CMAAAA
+ 991 | DMAAAA
+ 992 | EMAAAA
+ 993 | FMAAAA
+ 994 | GMAAAA
+ 995 | HMAAAA
+ 996 | IMAAAA
+ 997 | JMAAAA
+ 998 | KMAAAA
+ 999 | LMAAAA
+ 981 | TLAAAA
+ 982 | ULAAAA
+ 983 | VLAAAA
+ 984 | WLAAAA
+ 985 | XLAAAA
+ 986 | YLAAAA
+ 987 | ZLAAAA
+(19 rows)
+
+--
+-- awk '{if($1>980){print $1,$16;}else{next;}}' onek.data |
+-- sort +1d -2 +0nr -1
+--
+SELECT onek.unique1, onek.string4 FROM onek
+ WHERE onek.unique1 > 980
+ ORDER BY string4 using <, unique1 using >;
+ unique1 | string4
+---------+---------
+ 999 | AAAAxx
+ 995 | AAAAxx
+ 983 | AAAAxx
+ 982 | AAAAxx
+ 981 | AAAAxx
+ 998 | HHHHxx
+ 997 | HHHHxx
+ 993 | HHHHxx
+ 990 | HHHHxx
+ 986 | HHHHxx
+ 996 | OOOOxx
+ 991 | OOOOxx
+ 988 | OOOOxx
+ 987 | OOOOxx
+ 985 | OOOOxx
+ 994 | VVVVxx
+ 992 | VVVVxx
+ 989 | VVVVxx
+ 984 | VVVVxx
+(19 rows)
+
+--
+-- awk '{if($1>980){print $1,$16;}else{next;}}' onek.data |
+-- sort +1dr -2 +0n -1
+--
+SELECT onek.unique1, onek.string4 FROM onek
+ WHERE onek.unique1 > 980
+ ORDER BY string4 using >, unique1 using <;
+ unique1 | string4
+---------+---------
+ 984 | VVVVxx
+ 989 | VVVVxx
+ 992 | VVVVxx
+ 994 | VVVVxx
+ 985 | OOOOxx
+ 987 | OOOOxx
+ 988 | OOOOxx
+ 991 | OOOOxx
+ 996 | OOOOxx
+ 986 | HHHHxx
+ 990 | HHHHxx
+ 993 | HHHHxx
+ 997 | HHHHxx
+ 998 | HHHHxx
+ 981 | AAAAxx
+ 982 | AAAAxx
+ 983 | AAAAxx
+ 995 | AAAAxx
+ 999 | AAAAxx
+(19 rows)
+
+--
+-- awk '{if($1<20){print $1,$16;}else{next;}}' onek.data |
+-- sort +0nr -1 +1d -2
+--
+SELECT onek.unique1, onek.string4 FROM onek
+ WHERE onek.unique1 < 20
+ ORDER BY unique1 using >, string4 using <;
+ unique1 | string4
+---------+---------
+ 19 | OOOOxx
+ 18 | VVVVxx
+ 17 | HHHHxx
+ 16 | OOOOxx
+ 15 | VVVVxx
+ 14 | AAAAxx
+ 13 | OOOOxx
+ 12 | AAAAxx
+ 11 | OOOOxx
+ 10 | AAAAxx
+ 9 | HHHHxx
+ 8 | HHHHxx
+ 7 | VVVVxx
+ 6 | OOOOxx
+ 5 | HHHHxx
+ 4 | HHHHxx
+ 3 | VVVVxx
+ 2 | OOOOxx
+ 1 | OOOOxx
+ 0 | OOOOxx
+(20 rows)
+
+--
+-- awk '{if($1<20){print $1,$16;}else{next;}}' onek.data |
+-- sort +0n -1 +1dr -2
+--
+SELECT onek.unique1, onek.string4 FROM onek
+ WHERE onek.unique1 < 20
+ ORDER BY unique1 using <, string4 using >;
+ unique1 | string4
+---------+---------
+ 0 | OOOOxx
+ 1 | OOOOxx
+ 2 | OOOOxx
+ 3 | VVVVxx
+ 4 | HHHHxx
+ 5 | HHHHxx
+ 6 | OOOOxx
+ 7 | VVVVxx
+ 8 | HHHHxx
+ 9 | HHHHxx
+ 10 | AAAAxx
+ 11 | OOOOxx
+ 12 | AAAAxx
+ 13 | OOOOxx
+ 14 | AAAAxx
+ 15 | VVVVxx
+ 16 | OOOOxx
+ 17 | HHHHxx
+ 18 | VVVVxx
+ 19 | OOOOxx
+(20 rows)
+
+--
+-- test partial btree indexes
+--
+-- As of 7.2, planner probably won't pick an indexscan without stats,
+-- so ANALYZE first. Also, we want to prevent it from picking a bitmapscan
+-- followed by sort, because that could hide index ordering problems.
+--
+ANALYZE onek2;
+SET enable_seqscan TO off;
+SET enable_bitmapscan TO off;
+SET enable_sort TO off;
+--
+-- awk '{if($1<10){print $0;}else{next;}}' onek.data | sort +0n -1
+--
+SELECT onek2.* FROM onek2 WHERE onek2.unique1 < 10;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+ 0 | 998 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | AAAAAA | KMBAAA | OOOOxx
+ 1 | 214 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 2 | 3 | BAAAAA | GIAAAA | OOOOxx
+ 2 | 326 | 0 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 4 | 5 | CAAAAA | OMAAAA | OOOOxx
+ 3 | 431 | 1 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 6 | 7 | DAAAAA | PQAAAA | VVVVxx
+ 4 | 833 | 0 | 0 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 8 | 9 | EAAAAA | BGBAAA | HHHHxx
+ 5 | 541 | 1 | 1 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 10 | 11 | FAAAAA | VUAAAA | HHHHxx
+ 6 | 978 | 0 | 2 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 12 | 13 | GAAAAA | QLBAAA | OOOOxx
+ 7 | 647 | 1 | 3 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 14 | 15 | HAAAAA | XYAAAA | VVVVxx
+ 8 | 653 | 0 | 0 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 16 | 17 | IAAAAA | DZAAAA | HHHHxx
+ 9 | 49 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 18 | 19 | JAAAAA | XBAAAA | HHHHxx
+(10 rows)
+
+--
+-- awk '{if($1<20){print $1,$14;}else{next;}}' onek.data | sort +0nr -1
+--
+SELECT onek2.unique1, onek2.stringu1 FROM onek2
+ WHERE onek2.unique1 < 20
+ ORDER BY unique1 using >;
+ unique1 | stringu1
+---------+----------
+ 19 | TAAAAA
+ 18 | SAAAAA
+ 17 | RAAAAA
+ 16 | QAAAAA
+ 15 | PAAAAA
+ 14 | OAAAAA
+ 13 | NAAAAA
+ 12 | MAAAAA
+ 11 | LAAAAA
+ 10 | KAAAAA
+ 9 | JAAAAA
+ 8 | IAAAAA
+ 7 | HAAAAA
+ 6 | GAAAAA
+ 5 | FAAAAA
+ 4 | EAAAAA
+ 3 | DAAAAA
+ 2 | CAAAAA
+ 1 | BAAAAA
+ 0 | AAAAAA
+(20 rows)
+
+--
+-- awk '{if($1>980){print $1,$14;}else{next;}}' onek.data | sort +1d -2
+--
+SELECT onek2.unique1, onek2.stringu1 FROM onek2
+ WHERE onek2.unique1 > 980;
+ unique1 | stringu1
+---------+----------
+ 981 | TLAAAA
+ 982 | ULAAAA
+ 983 | VLAAAA
+ 984 | WLAAAA
+ 985 | XLAAAA
+ 986 | YLAAAA
+ 987 | ZLAAAA
+ 988 | AMAAAA
+ 989 | BMAAAA
+ 990 | CMAAAA
+ 991 | DMAAAA
+ 992 | EMAAAA
+ 993 | FMAAAA
+ 994 | GMAAAA
+ 995 | HMAAAA
+ 996 | IMAAAA
+ 997 | JMAAAA
+ 998 | KMAAAA
+ 999 | LMAAAA
+(19 rows)
+
+RESET enable_seqscan;
+RESET enable_bitmapscan;
+RESET enable_sort;
+SELECT two, stringu1, ten, string4
+ INTO TABLE tmp
+ FROM onek;
+--
+-- awk '{print $1,$2;}' person.data |
+-- awk '{if(NF!=2){print $3,$2;}else{print;}}' - emp.data |
+-- awk '{if(NF!=2){print $3,$2;}else{print;}}' - student.data |
+-- awk 'BEGIN{FS=" ";}{if(NF!=2){print $4,$5;}else{print;}}' - stud_emp.data
+--
+-- SELECT name, age FROM person*; ??? check if different
+SELECT p.name, p.age FROM person* p;
+ name | age
+---------+-----
+ mike | 40
+ joe | 20
+ sally | 34
+ sandra | 19
+ alex | 30
+ sue | 50
+ denise | 24
+ sarah | 88
+ teresa | 38
+ nan | 28
+ leah | 68
+ wendy | 78
+ melissa | 28
+ joan | 18
+ mary | 8
+ jane | 58
+ liza | 38
+ jean | 28
+ jenifer | 38
+ juanita | 58
+ susan | 78
+ zena | 98
+ martie | 88
+ chris | 78
+ pat | 18
+ zola | 58
+ louise | 98
+ edna | 18
+ bertha | 88
+ sumi | 38
+ koko | 88
+ gina | 18
+ rean | 48
+ sharon | 78
+ paula | 68
+ julie | 68
+ belinda | 38
+ karen | 48
+ carina | 58
+ diane | 18
+ esther | 98
+ trudy | 88
+ fanny | 8
+ carmen | 78
+ lita | 25
+ pamela | 48
+ sandy | 38
+ trisha | 88
+ uma | 78
+ velma | 68
+ sharon | 25
+ sam | 30
+ bill | 20
+ fred | 28
+ larry | 60
+ jeff | 23
+ cim | 30
+ linda | 19
+(58 rows)
+
+--
+-- awk '{print $1,$2;}' person.data |
+-- awk '{if(NF!=2){print $3,$2;}else{print;}}' - emp.data |
+-- awk '{if(NF!=2){print $3,$2;}else{print;}}' - student.data |
+-- awk 'BEGIN{FS=" ";}{if(NF!=1){print $4,$5;}else{print;}}' - stud_emp.data |
+-- sort +1nr -2
+--
+SELECT p.name, p.age FROM person* p ORDER BY age using >, name;
+ name | age
+---------+-----
+ esther | 98
+ louise | 98
+ zena | 98
+ bertha | 88
+ koko | 88
+ martie | 88
+ sarah | 88
+ trisha | 88
+ trudy | 88
+ carmen | 78
+ chris | 78
+ sharon | 78
+ susan | 78
+ uma | 78
+ wendy | 78
+ julie | 68
+ leah | 68
+ paula | 68
+ velma | 68
+ larry | 60
+ carina | 58
+ jane | 58
+ juanita | 58
+ zola | 58
+ sue | 50
+ karen | 48
+ pamela | 48
+ rean | 48
+ mike | 40
+ belinda | 38
+ jenifer | 38
+ liza | 38
+ sandy | 38
+ sumi | 38
+ teresa | 38
+ sally | 34
+ alex | 30
+ cim | 30
+ sam | 30
+ fred | 28
+ jean | 28
+ melissa | 28
+ nan | 28
+ lita | 25
+ sharon | 25
+ denise | 24
+ jeff | 23
+ bill | 20
+ joe | 20
+ linda | 19
+ sandra | 19
+ diane | 18
+ edna | 18
+ gina | 18
+ joan | 18
+ pat | 18
+ fanny | 8
+ mary | 8
+(58 rows)
+
+--
+-- Test some cases involving whole-row Var referencing a subquery
+--
+select foo from (select 1 offset 0) as foo;
+ foo
+-----
+ (1)
+(1 row)
+
+select foo from (select null offset 0) as foo;
+ foo
+-----
+ ()
+(1 row)
+
+select foo from (select 'xyzzy',1,null offset 0) as foo;
+ foo
+------------
+ (xyzzy,1,)
+(1 row)
+
+--
+-- Test VALUES lists
+--
+select * from onek, (values(147, 'RFAAAA'), (931, 'VJAAAA')) as v (i, j)
+ WHERE onek.unique1 = v.i and onek.stringu1 = v.j;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 | i | j
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------+-----+--------
+ 147 | 0 | 1 | 3 | 7 | 7 | 7 | 47 | 147 | 147 | 147 | 14 | 15 | RFAAAA | AAAAAA | AAAAxx | 147 | RFAAAA
+ 931 | 1 | 1 | 3 | 1 | 11 | 1 | 31 | 131 | 431 | 931 | 2 | 3 | VJAAAA | BAAAAA | HHHHxx | 931 | VJAAAA
+(2 rows)
+
+-- a more complex case
+-- looks like we're coding lisp :-)
+select * from onek,
+ (values ((select i from
+ (values(10000), (2), (389), (1000), (2000), ((select 10029))) as foo(i)
+ order by i asc limit 1))) bar (i)
+ where onek.unique1 = bar.i;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 | i
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------+---
+ 2 | 326 | 0 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 4 | 5 | CAAAAA | OMAAAA | OOOOxx | 2
+(1 row)
+
+-- try VALUES in a subquery
+select * from onek
+ where (unique1,ten) in (values (1,1), (20,0), (99,9), (17,99))
+ order by unique1;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+ 1 | 214 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 2 | 3 | BAAAAA | GIAAAA | OOOOxx
+ 20 | 306 | 0 | 0 | 0 | 0 | 0 | 20 | 20 | 20 | 20 | 0 | 1 | UAAAAA | ULAAAA | OOOOxx
+ 99 | 101 | 1 | 3 | 9 | 19 | 9 | 99 | 99 | 99 | 99 | 18 | 19 | VDAAAA | XDAAAA | HHHHxx
+(3 rows)
+
+-- VALUES is also legal as a standalone query or a set-operation member
+VALUES (1,2), (3,4+4), (7,77.7);
+ column1 | column2
+---------+---------
+ 1 | 2
+ 3 | 8
+ 7 | 77.7
+(3 rows)
+
+VALUES (1,2), (3,4+4), (7,77.7)
+UNION ALL
+SELECT 2+2, 57
+UNION ALL
+TABLE int8_tbl;
+ column1 | column2
+------------------+-------------------
+ 1 | 2
+ 3 | 8
+ 7 | 77.7
+ 4 | 57
+ 123 | 456
+ 123 | 4567890123456789
+ 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+(9 rows)
+
+-- corner case: VALUES with no columns
+CREATE TEMP TABLE nocols();
+INSERT INTO nocols DEFAULT VALUES;
+SELECT * FROM nocols n, LATERAL (VALUES(n.*)) v;
+--
+(1 row)
+
+--
+-- Test ORDER BY options
+--
+CREATE TEMP TABLE foo (f1 int);
+INSERT INTO foo VALUES (42),(3),(10),(7),(null),(null),(1);
+SELECT * FROM foo ORDER BY f1;
+ f1
+----
+ 1
+ 3
+ 7
+ 10
+ 42
+
+
+(7 rows)
+
+SELECT * FROM foo ORDER BY f1 ASC; -- same thing
+ f1
+----
+ 1
+ 3
+ 7
+ 10
+ 42
+
+
+(7 rows)
+
+SELECT * FROM foo ORDER BY f1 NULLS FIRST;
+ f1
+----
+
+
+ 1
+ 3
+ 7
+ 10
+ 42
+(7 rows)
+
+SELECT * FROM foo ORDER BY f1 DESC;
+ f1
+----
+
+
+ 42
+ 10
+ 7
+ 3
+ 1
+(7 rows)
+
+SELECT * FROM foo ORDER BY f1 DESC NULLS LAST;
+ f1
+----
+ 42
+ 10
+ 7
+ 3
+ 1
+
+
+(7 rows)
+
+-- check if indexscans do the right things
+CREATE INDEX fooi ON foo (f1);
+SET enable_sort = false;
+SELECT * FROM foo ORDER BY f1;
+ f1
+----
+ 1
+ 3
+ 7
+ 10
+ 42
+
+
+(7 rows)
+
+SELECT * FROM foo ORDER BY f1 NULLS FIRST;
+ f1
+----
+
+
+ 1
+ 3
+ 7
+ 10
+ 42
+(7 rows)
+
+SELECT * FROM foo ORDER BY f1 DESC;
+ f1
+----
+
+
+ 42
+ 10
+ 7
+ 3
+ 1
+(7 rows)
+
+SELECT * FROM foo ORDER BY f1 DESC NULLS LAST;
+ f1
+----
+ 42
+ 10
+ 7
+ 3
+ 1
+
+
+(7 rows)
+
+DROP INDEX fooi;
+CREATE INDEX fooi ON foo (f1 DESC);
+SELECT * FROM foo ORDER BY f1;
+ f1
+----
+ 1
+ 3
+ 7
+ 10
+ 42
+
+
+(7 rows)
+
+SELECT * FROM foo ORDER BY f1 NULLS FIRST;
+ f1
+----
+
+
+ 1
+ 3
+ 7
+ 10
+ 42
+(7 rows)
+
+SELECT * FROM foo ORDER BY f1 DESC;
+ f1
+----
+
+
+ 42
+ 10
+ 7
+ 3
+ 1
+(7 rows)
+
+SELECT * FROM foo ORDER BY f1 DESC NULLS LAST;
+ f1
+----
+ 42
+ 10
+ 7
+ 3
+ 1
+
+
+(7 rows)
+
+DROP INDEX fooi;
+CREATE INDEX fooi ON foo (f1 DESC NULLS LAST);
+SELECT * FROM foo ORDER BY f1;
+ f1
+----
+ 1
+ 3
+ 7
+ 10
+ 42
+
+
+(7 rows)
+
+SELECT * FROM foo ORDER BY f1 NULLS FIRST;
+ f1
+----
+
+
+ 1
+ 3
+ 7
+ 10
+ 42
+(7 rows)
+
+SELECT * FROM foo ORDER BY f1 DESC;
+ f1
+----
+
+
+ 42
+ 10
+ 7
+ 3
+ 1
+(7 rows)
+
+SELECT * FROM foo ORDER BY f1 DESC NULLS LAST;
+ f1
+----
+ 42
+ 10
+ 7
+ 3
+ 1
+
+
+(7 rows)
+
+--
+-- Test planning of some cases with partial indexes
+--
+-- partial index is usable
+explain (costs off)
+select * from onek2 where unique2 = 11 and stringu1 = 'ATAAAA';
+ QUERY PLAN
+-----------------------------------------
+ Index Scan using onek2_u2_prtl on onek2
+ Index Cond: (unique2 = 11)
+ Filter: (stringu1 = 'ATAAAA'::name)
+(3 rows)
+
+select * from onek2 where unique2 = 11 and stringu1 = 'ATAAAA';
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+ 494 | 11 | 0 | 2 | 4 | 14 | 4 | 94 | 94 | 494 | 494 | 8 | 9 | ATAAAA | LAAAAA | VVVVxx
+(1 row)
+
+-- actually run the query with an analyze to use the partial index
+explain (costs off, analyze on, timing off, summary off)
+select * from onek2 where unique2 = 11 and stringu1 = 'ATAAAA';
+ QUERY PLAN
+-----------------------------------------------------------------
+ Index Scan using onek2_u2_prtl on onek2 (actual rows=1 loops=1)
+ Index Cond: (unique2 = 11)
+ Filter: (stringu1 = 'ATAAAA'::name)
+(3 rows)
+
+explain (costs off)
+select unique2 from onek2 where unique2 = 11 and stringu1 = 'ATAAAA';
+ QUERY PLAN
+-----------------------------------------
+ Index Scan using onek2_u2_prtl on onek2
+ Index Cond: (unique2 = 11)
+ Filter: (stringu1 = 'ATAAAA'::name)
+(3 rows)
+
+select unique2 from onek2 where unique2 = 11 and stringu1 = 'ATAAAA';
+ unique2
+---------
+ 11
+(1 row)
+
+-- partial index predicate implies clause, so no need for retest
+explain (costs off)
+select * from onek2 where unique2 = 11 and stringu1 < 'B';
+ QUERY PLAN
+-----------------------------------------
+ Index Scan using onek2_u2_prtl on onek2
+ Index Cond: (unique2 = 11)
+(2 rows)
+
+select * from onek2 where unique2 = 11 and stringu1 < 'B';
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+ 494 | 11 | 0 | 2 | 4 | 14 | 4 | 94 | 94 | 494 | 494 | 8 | 9 | ATAAAA | LAAAAA | VVVVxx
+(1 row)
+
+explain (costs off)
+select unique2 from onek2 where unique2 = 11 and stringu1 < 'B';
+ QUERY PLAN
+----------------------------------------------
+ Index Only Scan using onek2_u2_prtl on onek2
+ Index Cond: (unique2 = 11)
+(2 rows)
+
+select unique2 from onek2 where unique2 = 11 and stringu1 < 'B';
+ unique2
+---------
+ 11
+(1 row)
+
+-- but if it's an update target, must retest anyway
+explain (costs off)
+select unique2 from onek2 where unique2 = 11 and stringu1 < 'B' for update;
+ QUERY PLAN
+-----------------------------------------------
+ LockRows
+ -> Index Scan using onek2_u2_prtl on onek2
+ Index Cond: (unique2 = 11)
+ Filter: (stringu1 < 'B'::name)
+(4 rows)
+
+select unique2 from onek2 where unique2 = 11 and stringu1 < 'B' for update;
+ unique2
+---------
+ 11
+(1 row)
+
+-- partial index is not applicable
+explain (costs off)
+select unique2 from onek2 where unique2 = 11 and stringu1 < 'C';
+ QUERY PLAN
+-------------------------------------------------------
+ Seq Scan on onek2
+ Filter: ((stringu1 < 'C'::name) AND (unique2 = 11))
+(2 rows)
+
+select unique2 from onek2 where unique2 = 11 and stringu1 < 'C';
+ unique2
+---------
+ 11
+(1 row)
+
+-- partial index implies clause, but bitmap scan must recheck predicate anyway
+SET enable_indexscan TO off;
+explain (costs off)
+select unique2 from onek2 where unique2 = 11 and stringu1 < 'B';
+ QUERY PLAN
+-------------------------------------------------------------
+ Bitmap Heap Scan on onek2
+ Recheck Cond: ((unique2 = 11) AND (stringu1 < 'B'::name))
+ -> Bitmap Index Scan on onek2_u2_prtl
+ Index Cond: (unique2 = 11)
+(4 rows)
+
+select unique2 from onek2 where unique2 = 11 and stringu1 < 'B';
+ unique2
+---------
+ 11
+(1 row)
+
+RESET enable_indexscan;
+-- check multi-index cases too
+explain (costs off)
+select unique1, unique2 from onek2
+ where (unique2 = 11 or unique1 = 0) and stringu1 < 'B';
+ QUERY PLAN
+--------------------------------------------------------------------------------
+ Bitmap Heap Scan on onek2
+ Recheck Cond: (((unique2 = 11) AND (stringu1 < 'B'::name)) OR (unique1 = 0))
+ Filter: (stringu1 < 'B'::name)
+ -> BitmapOr
+ -> Bitmap Index Scan on onek2_u2_prtl
+ Index Cond: (unique2 = 11)
+ -> Bitmap Index Scan on onek2_u1_prtl
+ Index Cond: (unique1 = 0)
+(8 rows)
+
+select unique1, unique2 from onek2
+ where (unique2 = 11 or unique1 = 0) and stringu1 < 'B';
+ unique1 | unique2
+---------+---------
+ 494 | 11
+ 0 | 998
+(2 rows)
+
+explain (costs off)
+select unique1, unique2 from onek2
+ where (unique2 = 11 and stringu1 < 'B') or unique1 = 0;
+ QUERY PLAN
+--------------------------------------------------------------------------------
+ Bitmap Heap Scan on onek2
+ Recheck Cond: (((unique2 = 11) AND (stringu1 < 'B'::name)) OR (unique1 = 0))
+ -> BitmapOr
+ -> Bitmap Index Scan on onek2_u2_prtl
+ Index Cond: (unique2 = 11)
+ -> Bitmap Index Scan on onek2_u1_prtl
+ Index Cond: (unique1 = 0)
+(7 rows)
+
+select unique1, unique2 from onek2
+ where (unique2 = 11 and stringu1 < 'B') or unique1 = 0;
+ unique1 | unique2
+---------+---------
+ 494 | 11
+ 0 | 998
+(2 rows)
+
+--
+-- Test some corner cases that have been known to confuse the planner
+--
+-- ORDER BY on a constant doesn't really need any sorting
+SELECT 1 AS x ORDER BY x;
+ x
+---
+ 1
+(1 row)
+
+-- But ORDER BY on a set-valued expression does
+create function sillysrf(int) returns setof int as
+ 'values (1),(10),(2),($1)' language sql immutable;
+select sillysrf(42);
+ sillysrf
+----------
+ 1
+ 10
+ 2
+ 42
+(4 rows)
+
+select sillysrf(-1) order by 1;
+ sillysrf
+----------
+ -1
+ 1
+ 2
+ 10
+(4 rows)
+
+drop function sillysrf(int);
+-- X = X isn't a no-op, it's effectively X IS NOT NULL assuming = is strict
+-- (see bug #5084)
+select * from (values (2),(null),(1)) v(k) where k = k order by k;
+ k
+---
+ 1
+ 2
+(2 rows)
+
+select * from (values (2),(null),(1)) v(k) where k = k;
+ k
+---
+ 2
+ 1
+(2 rows)
+
+-- Test partitioned tables with no partitions, which should be handled the
+-- same as the non-inheritance case when expanding its RTE.
+create table list_parted_tbl (a int,b int) partition by list (a);
+create table list_parted_tbl1 partition of list_parted_tbl
+ for values in (1) partition by list(b);
+explain (costs off) select * from list_parted_tbl;
+ QUERY PLAN
+--------------------------
+ Result
+ One-Time Filter: false
+(2 rows)
+
+drop table list_parted_tbl;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/select.sql b/ydb/library/yql/tests/postgresql/original/cases/select.sql
new file mode 100644
index 0000000000..d6f42aa0d4
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/select.sql
@@ -0,0 +1,269 @@
+--
+-- SELECT
+--
+
+-- btree index
+-- awk '{if($1<10){print;}else{next;}}' onek.data | sort +0n -1
+--
+SELECT * FROM onek
+ WHERE onek.unique1 < 10
+ ORDER BY onek.unique1;
+
+--
+-- awk '{if($1<20){print $1,$14;}else{next;}}' onek.data | sort +0nr -1
+--
+SELECT onek.unique1, onek.stringu1 FROM onek
+ WHERE onek.unique1 < 20
+ ORDER BY unique1 using >;
+
+--
+-- awk '{if($1>980){print $1,$14;}else{next;}}' onek.data | sort +1d -2
+--
+SELECT onek.unique1, onek.stringu1 FROM onek
+ WHERE onek.unique1 > 980
+ ORDER BY stringu1 using <;
+
+--
+-- awk '{if($1>980){print $1,$16;}else{next;}}' onek.data |
+-- sort +1d -2 +0nr -1
+--
+SELECT onek.unique1, onek.string4 FROM onek
+ WHERE onek.unique1 > 980
+ ORDER BY string4 using <, unique1 using >;
+
+--
+-- awk '{if($1>980){print $1,$16;}else{next;}}' onek.data |
+-- sort +1dr -2 +0n -1
+--
+SELECT onek.unique1, onek.string4 FROM onek
+ WHERE onek.unique1 > 980
+ ORDER BY string4 using >, unique1 using <;
+
+--
+-- awk '{if($1<20){print $1,$16;}else{next;}}' onek.data |
+-- sort +0nr -1 +1d -2
+--
+SELECT onek.unique1, onek.string4 FROM onek
+ WHERE onek.unique1 < 20
+ ORDER BY unique1 using >, string4 using <;
+
+--
+-- awk '{if($1<20){print $1,$16;}else{next;}}' onek.data |
+-- sort +0n -1 +1dr -2
+--
+SELECT onek.unique1, onek.string4 FROM onek
+ WHERE onek.unique1 < 20
+ ORDER BY unique1 using <, string4 using >;
+
+--
+-- test partial btree indexes
+--
+-- As of 7.2, planner probably won't pick an indexscan without stats,
+-- so ANALYZE first. Also, we want to prevent it from picking a bitmapscan
+-- followed by sort, because that could hide index ordering problems.
+--
+ANALYZE onek2;
+
+SET enable_seqscan TO off;
+SET enable_bitmapscan TO off;
+SET enable_sort TO off;
+
+--
+-- awk '{if($1<10){print $0;}else{next;}}' onek.data | sort +0n -1
+--
+SELECT onek2.* FROM onek2 WHERE onek2.unique1 < 10;
+
+--
+-- awk '{if($1<20){print $1,$14;}else{next;}}' onek.data | sort +0nr -1
+--
+SELECT onek2.unique1, onek2.stringu1 FROM onek2
+ WHERE onek2.unique1 < 20
+ ORDER BY unique1 using >;
+
+--
+-- awk '{if($1>980){print $1,$14;}else{next;}}' onek.data | sort +1d -2
+--
+SELECT onek2.unique1, onek2.stringu1 FROM onek2
+ WHERE onek2.unique1 > 980;
+
+RESET enable_seqscan;
+RESET enable_bitmapscan;
+RESET enable_sort;
+
+
+SELECT two, stringu1, ten, string4
+ INTO TABLE tmp
+ FROM onek;
+
+--
+-- awk '{print $1,$2;}' person.data |
+-- awk '{if(NF!=2){print $3,$2;}else{print;}}' - emp.data |
+-- awk '{if(NF!=2){print $3,$2;}else{print;}}' - student.data |
+-- awk 'BEGIN{FS=" ";}{if(NF!=2){print $4,$5;}else{print;}}' - stud_emp.data
+--
+-- SELECT name, age FROM person*; ??? check if different
+SELECT p.name, p.age FROM person* p;
+
+--
+-- awk '{print $1,$2;}' person.data |
+-- awk '{if(NF!=2){print $3,$2;}else{print;}}' - emp.data |
+-- awk '{if(NF!=2){print $3,$2;}else{print;}}' - student.data |
+-- awk 'BEGIN{FS=" ";}{if(NF!=1){print $4,$5;}else{print;}}' - stud_emp.data |
+-- sort +1nr -2
+--
+SELECT p.name, p.age FROM person* p ORDER BY age using >, name;
+
+--
+-- Test some cases involving whole-row Var referencing a subquery
+--
+select foo from (select 1 offset 0) as foo;
+select foo from (select null offset 0) as foo;
+select foo from (select 'xyzzy',1,null offset 0) as foo;
+
+--
+-- Test VALUES lists
+--
+select * from onek, (values(147, 'RFAAAA'), (931, 'VJAAAA')) as v (i, j)
+ WHERE onek.unique1 = v.i and onek.stringu1 = v.j;
+
+-- a more complex case
+-- looks like we're coding lisp :-)
+select * from onek,
+ (values ((select i from
+ (values(10000), (2), (389), (1000), (2000), ((select 10029))) as foo(i)
+ order by i asc limit 1))) bar (i)
+ where onek.unique1 = bar.i;
+
+-- try VALUES in a subquery
+select * from onek
+ where (unique1,ten) in (values (1,1), (20,0), (99,9), (17,99))
+ order by unique1;
+
+-- VALUES is also legal as a standalone query or a set-operation member
+VALUES (1,2), (3,4+4), (7,77.7);
+
+VALUES (1,2), (3,4+4), (7,77.7)
+UNION ALL
+SELECT 2+2, 57
+UNION ALL
+TABLE int8_tbl;
+
+-- corner case: VALUES with no columns
+CREATE TEMP TABLE nocols();
+INSERT INTO nocols DEFAULT VALUES;
+SELECT * FROM nocols n, LATERAL (VALUES(n.*)) v;
+
+--
+-- Test ORDER BY options
+--
+
+CREATE TEMP TABLE foo (f1 int);
+
+INSERT INTO foo VALUES (42),(3),(10),(7),(null),(null),(1);
+
+SELECT * FROM foo ORDER BY f1;
+SELECT * FROM foo ORDER BY f1 ASC; -- same thing
+SELECT * FROM foo ORDER BY f1 NULLS FIRST;
+SELECT * FROM foo ORDER BY f1 DESC;
+SELECT * FROM foo ORDER BY f1 DESC NULLS LAST;
+
+-- check if indexscans do the right things
+CREATE INDEX fooi ON foo (f1);
+SET enable_sort = false;
+
+SELECT * FROM foo ORDER BY f1;
+SELECT * FROM foo ORDER BY f1 NULLS FIRST;
+SELECT * FROM foo ORDER BY f1 DESC;
+SELECT * FROM foo ORDER BY f1 DESC NULLS LAST;
+
+DROP INDEX fooi;
+CREATE INDEX fooi ON foo (f1 DESC);
+
+SELECT * FROM foo ORDER BY f1;
+SELECT * FROM foo ORDER BY f1 NULLS FIRST;
+SELECT * FROM foo ORDER BY f1 DESC;
+SELECT * FROM foo ORDER BY f1 DESC NULLS LAST;
+
+DROP INDEX fooi;
+CREATE INDEX fooi ON foo (f1 DESC NULLS LAST);
+
+SELECT * FROM foo ORDER BY f1;
+SELECT * FROM foo ORDER BY f1 NULLS FIRST;
+SELECT * FROM foo ORDER BY f1 DESC;
+SELECT * FROM foo ORDER BY f1 DESC NULLS LAST;
+
+--
+-- Test planning of some cases with partial indexes
+--
+
+-- partial index is usable
+explain (costs off)
+select * from onek2 where unique2 = 11 and stringu1 = 'ATAAAA';
+select * from onek2 where unique2 = 11 and stringu1 = 'ATAAAA';
+-- actually run the query with an analyze to use the partial index
+explain (costs off, analyze on, timing off, summary off)
+select * from onek2 where unique2 = 11 and stringu1 = 'ATAAAA';
+explain (costs off)
+select unique2 from onek2 where unique2 = 11 and stringu1 = 'ATAAAA';
+select unique2 from onek2 where unique2 = 11 and stringu1 = 'ATAAAA';
+-- partial index predicate implies clause, so no need for retest
+explain (costs off)
+select * from onek2 where unique2 = 11 and stringu1 < 'B';
+select * from onek2 where unique2 = 11 and stringu1 < 'B';
+explain (costs off)
+select unique2 from onek2 where unique2 = 11 and stringu1 < 'B';
+select unique2 from onek2 where unique2 = 11 and stringu1 < 'B';
+-- but if it's an update target, must retest anyway
+explain (costs off)
+select unique2 from onek2 where unique2 = 11 and stringu1 < 'B' for update;
+select unique2 from onek2 where unique2 = 11 and stringu1 < 'B' for update;
+-- partial index is not applicable
+explain (costs off)
+select unique2 from onek2 where unique2 = 11 and stringu1 < 'C';
+select unique2 from onek2 where unique2 = 11 and stringu1 < 'C';
+-- partial index implies clause, but bitmap scan must recheck predicate anyway
+SET enable_indexscan TO off;
+explain (costs off)
+select unique2 from onek2 where unique2 = 11 and stringu1 < 'B';
+select unique2 from onek2 where unique2 = 11 and stringu1 < 'B';
+RESET enable_indexscan;
+-- check multi-index cases too
+explain (costs off)
+select unique1, unique2 from onek2
+ where (unique2 = 11 or unique1 = 0) and stringu1 < 'B';
+select unique1, unique2 from onek2
+ where (unique2 = 11 or unique1 = 0) and stringu1 < 'B';
+explain (costs off)
+select unique1, unique2 from onek2
+ where (unique2 = 11 and stringu1 < 'B') or unique1 = 0;
+select unique1, unique2 from onek2
+ where (unique2 = 11 and stringu1 < 'B') or unique1 = 0;
+
+--
+-- Test some corner cases that have been known to confuse the planner
+--
+
+-- ORDER BY on a constant doesn't really need any sorting
+SELECT 1 AS x ORDER BY x;
+
+-- But ORDER BY on a set-valued expression does
+create function sillysrf(int) returns setof int as
+ 'values (1),(10),(2),($1)' language sql immutable;
+
+select sillysrf(42);
+select sillysrf(-1) order by 1;
+
+drop function sillysrf(int);
+
+-- X = X isn't a no-op, it's effectively X IS NOT NULL assuming = is strict
+-- (see bug #5084)
+select * from (values (2),(null),(1)) v(k) where k = k order by k;
+select * from (values (2),(null),(1)) v(k) where k = k;
+
+-- Test partitioned tables with no partitions, which should be handled the
+-- same as the non-inheritance case when expanding its RTE.
+create table list_parted_tbl (a int,b int) partition by list (a);
+create table list_parted_tbl1 partition of list_parted_tbl
+ for values in (1) partition by list(b);
+explain (costs off) select * from list_parted_tbl;
+drop table list_parted_tbl;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/select_distinct.out b/ydb/library/yql/tests/postgresql/original/cases/select_distinct.out
new file mode 100644
index 0000000000..11c6f50fbf
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/select_distinct.out
@@ -0,0 +1,308 @@
+--
+-- SELECT_DISTINCT
+--
+--
+-- awk '{print $3;}' onek.data | sort -n | uniq
+--
+SELECT DISTINCT two FROM tmp ORDER BY 1;
+ two
+-----
+ 0
+ 1
+(2 rows)
+
+--
+-- awk '{print $5;}' onek.data | sort -n | uniq
+--
+SELECT DISTINCT ten FROM tmp ORDER BY 1;
+ ten
+-----
+ 0
+ 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+(10 rows)
+
+--
+-- awk '{print $16;}' onek.data | sort -d | uniq
+--
+SELECT DISTINCT string4 FROM tmp ORDER BY 1;
+ string4
+---------
+ AAAAxx
+ HHHHxx
+ OOOOxx
+ VVVVxx
+(4 rows)
+
+--
+-- awk '{print $3,$16,$5;}' onek.data | sort -d | uniq |
+-- sort +0n -1 +1d -2 +2n -3
+--
+SELECT DISTINCT two, string4, ten
+ FROM tmp
+ ORDER BY two using <, string4 using <, ten using <;
+ two | string4 | ten
+-----+---------+-----
+ 0 | AAAAxx | 0
+ 0 | AAAAxx | 2
+ 0 | AAAAxx | 4
+ 0 | AAAAxx | 6
+ 0 | AAAAxx | 8
+ 0 | HHHHxx | 0
+ 0 | HHHHxx | 2
+ 0 | HHHHxx | 4
+ 0 | HHHHxx | 6
+ 0 | HHHHxx | 8
+ 0 | OOOOxx | 0
+ 0 | OOOOxx | 2
+ 0 | OOOOxx | 4
+ 0 | OOOOxx | 6
+ 0 | OOOOxx | 8
+ 0 | VVVVxx | 0
+ 0 | VVVVxx | 2
+ 0 | VVVVxx | 4
+ 0 | VVVVxx | 6
+ 0 | VVVVxx | 8
+ 1 | AAAAxx | 1
+ 1 | AAAAxx | 3
+ 1 | AAAAxx | 5
+ 1 | AAAAxx | 7
+ 1 | AAAAxx | 9
+ 1 | HHHHxx | 1
+ 1 | HHHHxx | 3
+ 1 | HHHHxx | 5
+ 1 | HHHHxx | 7
+ 1 | HHHHxx | 9
+ 1 | OOOOxx | 1
+ 1 | OOOOxx | 3
+ 1 | OOOOxx | 5
+ 1 | OOOOxx | 7
+ 1 | OOOOxx | 9
+ 1 | VVVVxx | 1
+ 1 | VVVVxx | 3
+ 1 | VVVVxx | 5
+ 1 | VVVVxx | 7
+ 1 | VVVVxx | 9
+(40 rows)
+
+--
+-- awk '{print $2;}' person.data |
+-- awk '{if(NF!=1){print $2;}else{print;}}' - emp.data |
+-- awk '{if(NF!=1){print $2;}else{print;}}' - student.data |
+-- awk 'BEGIN{FS=" ";}{if(NF!=1){print $5;}else{print;}}' - stud_emp.data |
+-- sort -n -r | uniq
+--
+SELECT DISTINCT p.age FROM person* p ORDER BY age using >;
+ age
+-----
+ 98
+ 88
+ 78
+ 68
+ 60
+ 58
+ 50
+ 48
+ 40
+ 38
+ 34
+ 30
+ 28
+ 25
+ 24
+ 23
+ 20
+ 19
+ 18
+ 8
+(20 rows)
+
+--
+-- Check mentioning same column more than once
+--
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT count(*) FROM
+ (SELECT DISTINCT two, four, two FROM tenk1) ss;
+ QUERY PLAN
+--------------------------------------------------------
+ Aggregate
+ Output: count(*)
+ -> HashAggregate
+ Output: tenk1.two, tenk1.four, tenk1.two
+ Group Key: tenk1.two, tenk1.four, tenk1.two
+ -> Seq Scan on public.tenk1
+ Output: tenk1.two, tenk1.four, tenk1.two
+(7 rows)
+
+SELECT count(*) FROM
+ (SELECT DISTINCT two, four, two FROM tenk1) ss;
+ count
+-------
+ 4
+(1 row)
+
+--
+-- Compare results between plans using sorting and plans using hash
+-- aggregation. Force spilling in both cases by setting work_mem low.
+--
+SET work_mem='64kB';
+-- Produce results with sorting.
+SET enable_hashagg=FALSE;
+SET jit_above_cost=0;
+EXPLAIN (costs off)
+SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
+ QUERY PLAN
+------------------------------------------------
+ Unique
+ -> Sort
+ Sort Key: ((g % 1000))
+ -> Function Scan on generate_series g
+(4 rows)
+
+CREATE TABLE distinct_group_1 AS
+SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
+SET jit_above_cost TO DEFAULT;
+CREATE TABLE distinct_group_2 AS
+SELECT DISTINCT (g%1000)::text FROM generate_series(0,9999) g;
+SET enable_hashagg=TRUE;
+-- Produce results with hash aggregation.
+SET enable_sort=FALSE;
+SET jit_above_cost=0;
+EXPLAIN (costs off)
+SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
+ QUERY PLAN
+------------------------------------------
+ HashAggregate
+ Group Key: (g % 1000)
+ -> Function Scan on generate_series g
+(3 rows)
+
+CREATE TABLE distinct_hash_1 AS
+SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
+SET jit_above_cost TO DEFAULT;
+CREATE TABLE distinct_hash_2 AS
+SELECT DISTINCT (g%1000)::text FROM generate_series(0,9999) g;
+SET enable_sort=TRUE;
+SET work_mem TO DEFAULT;
+-- Compare results
+(SELECT * FROM distinct_hash_1 EXCEPT SELECT * FROM distinct_group_1)
+ UNION ALL
+(SELECT * FROM distinct_group_1 EXCEPT SELECT * FROM distinct_hash_1);
+ ?column?
+----------
+(0 rows)
+
+(SELECT * FROM distinct_hash_1 EXCEPT SELECT * FROM distinct_group_1)
+ UNION ALL
+(SELECT * FROM distinct_group_1 EXCEPT SELECT * FROM distinct_hash_1);
+ ?column?
+----------
+(0 rows)
+
+DROP TABLE distinct_hash_1;
+DROP TABLE distinct_hash_2;
+DROP TABLE distinct_group_1;
+DROP TABLE distinct_group_2;
+--
+-- Also, some tests of IS DISTINCT FROM, which doesn't quite deserve its
+-- very own regression file.
+--
+CREATE TEMP TABLE disttable (f1 integer);
+INSERT INTO DISTTABLE VALUES(1);
+INSERT INTO DISTTABLE VALUES(2);
+INSERT INTO DISTTABLE VALUES(3);
+INSERT INTO DISTTABLE VALUES(NULL);
+-- basic cases
+SELECT f1, f1 IS DISTINCT FROM 2 as "not 2" FROM disttable;
+ f1 | not 2
+----+-------
+ 1 | t
+ 2 | f
+ 3 | t
+ | t
+(4 rows)
+
+SELECT f1, f1 IS DISTINCT FROM NULL as "not null" FROM disttable;
+ f1 | not null
+----+----------
+ 1 | t
+ 2 | t
+ 3 | t
+ | f
+(4 rows)
+
+SELECT f1, f1 IS DISTINCT FROM f1 as "false" FROM disttable;
+ f1 | false
+----+-------
+ 1 | f
+ 2 | f
+ 3 | f
+ | f
+(4 rows)
+
+SELECT f1, f1 IS DISTINCT FROM f1+1 as "not null" FROM disttable;
+ f1 | not null
+----+----------
+ 1 | t
+ 2 | t
+ 3 | t
+ | f
+(4 rows)
+
+-- check that optimizer constant-folds it properly
+SELECT 1 IS DISTINCT FROM 2 as "yes";
+ yes
+-----
+ t
+(1 row)
+
+SELECT 2 IS DISTINCT FROM 2 as "no";
+ no
+----
+ f
+(1 row)
+
+SELECT 2 IS DISTINCT FROM null as "yes";
+ yes
+-----
+ t
+(1 row)
+
+SELECT null IS DISTINCT FROM null as "no";
+ no
+----
+ f
+(1 row)
+
+-- negated form
+SELECT 1 IS NOT DISTINCT FROM 2 as "no";
+ no
+----
+ f
+(1 row)
+
+SELECT 2 IS NOT DISTINCT FROM 2 as "yes";
+ yes
+-----
+ t
+(1 row)
+
+SELECT 2 IS NOT DISTINCT FROM null as "no";
+ no
+----
+ f
+(1 row)
+
+SELECT null IS NOT DISTINCT FROM null as "yes";
+ yes
+-----
+ t
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/select_distinct.sql b/ydb/library/yql/tests/postgresql/original/cases/select_distinct.sql
new file mode 100644
index 0000000000..33102744eb
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/select_distinct.sql
@@ -0,0 +1,137 @@
+--
+-- SELECT_DISTINCT
+--
+
+--
+-- awk '{print $3;}' onek.data | sort -n | uniq
+--
+SELECT DISTINCT two FROM tmp ORDER BY 1;
+
+--
+-- awk '{print $5;}' onek.data | sort -n | uniq
+--
+SELECT DISTINCT ten FROM tmp ORDER BY 1;
+
+--
+-- awk '{print $16;}' onek.data | sort -d | uniq
+--
+SELECT DISTINCT string4 FROM tmp ORDER BY 1;
+
+--
+-- awk '{print $3,$16,$5;}' onek.data | sort -d | uniq |
+-- sort +0n -1 +1d -2 +2n -3
+--
+SELECT DISTINCT two, string4, ten
+ FROM tmp
+ ORDER BY two using <, string4 using <, ten using <;
+
+--
+-- awk '{print $2;}' person.data |
+-- awk '{if(NF!=1){print $2;}else{print;}}' - emp.data |
+-- awk '{if(NF!=1){print $2;}else{print;}}' - student.data |
+-- awk 'BEGIN{FS=" ";}{if(NF!=1){print $5;}else{print;}}' - stud_emp.data |
+-- sort -n -r | uniq
+--
+SELECT DISTINCT p.age FROM person* p ORDER BY age using >;
+
+--
+-- Check mentioning same column more than once
+--
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT count(*) FROM
+ (SELECT DISTINCT two, four, two FROM tenk1) ss;
+
+SELECT count(*) FROM
+ (SELECT DISTINCT two, four, two FROM tenk1) ss;
+
+--
+-- Compare results between plans using sorting and plans using hash
+-- aggregation. Force spilling in both cases by setting work_mem low.
+--
+
+SET work_mem='64kB';
+
+-- Produce results with sorting.
+
+SET enable_hashagg=FALSE;
+
+SET jit_above_cost=0;
+
+EXPLAIN (costs off)
+SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
+
+CREATE TABLE distinct_group_1 AS
+SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
+
+SET jit_above_cost TO DEFAULT;
+
+CREATE TABLE distinct_group_2 AS
+SELECT DISTINCT (g%1000)::text FROM generate_series(0,9999) g;
+
+SET enable_hashagg=TRUE;
+
+-- Produce results with hash aggregation.
+
+SET enable_sort=FALSE;
+
+SET jit_above_cost=0;
+
+EXPLAIN (costs off)
+SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
+
+CREATE TABLE distinct_hash_1 AS
+SELECT DISTINCT g%1000 FROM generate_series(0,9999) g;
+
+SET jit_above_cost TO DEFAULT;
+
+CREATE TABLE distinct_hash_2 AS
+SELECT DISTINCT (g%1000)::text FROM generate_series(0,9999) g;
+
+SET enable_sort=TRUE;
+
+SET work_mem TO DEFAULT;
+
+-- Compare results
+
+(SELECT * FROM distinct_hash_1 EXCEPT SELECT * FROM distinct_group_1)
+ UNION ALL
+(SELECT * FROM distinct_group_1 EXCEPT SELECT * FROM distinct_hash_1);
+
+(SELECT * FROM distinct_hash_1 EXCEPT SELECT * FROM distinct_group_1)
+ UNION ALL
+(SELECT * FROM distinct_group_1 EXCEPT SELECT * FROM distinct_hash_1);
+
+DROP TABLE distinct_hash_1;
+DROP TABLE distinct_hash_2;
+DROP TABLE distinct_group_1;
+DROP TABLE distinct_group_2;
+
+--
+-- Also, some tests of IS DISTINCT FROM, which doesn't quite deserve its
+-- very own regression file.
+--
+
+CREATE TEMP TABLE disttable (f1 integer);
+INSERT INTO DISTTABLE VALUES(1);
+INSERT INTO DISTTABLE VALUES(2);
+INSERT INTO DISTTABLE VALUES(3);
+INSERT INTO DISTTABLE VALUES(NULL);
+
+-- basic cases
+SELECT f1, f1 IS DISTINCT FROM 2 as "not 2" FROM disttable;
+SELECT f1, f1 IS DISTINCT FROM NULL as "not null" FROM disttable;
+SELECT f1, f1 IS DISTINCT FROM f1 as "false" FROM disttable;
+SELECT f1, f1 IS DISTINCT FROM f1+1 as "not null" FROM disttable;
+
+-- check that optimizer constant-folds it properly
+SELECT 1 IS DISTINCT FROM 2 as "yes";
+SELECT 2 IS DISTINCT FROM 2 as "no";
+SELECT 2 IS DISTINCT FROM null as "yes";
+SELECT null IS DISTINCT FROM null as "no";
+
+-- negated form
+SELECT 1 IS NOT DISTINCT FROM 2 as "no";
+SELECT 2 IS NOT DISTINCT FROM 2 as "yes";
+SELECT 2 IS NOT DISTINCT FROM null as "no";
+SELECT null IS NOT DISTINCT FROM null as "yes";
diff --git a/ydb/library/yql/tests/postgresql/original/cases/select_distinct_on.out b/ydb/library/yql/tests/postgresql/original/cases/select_distinct_on.out
new file mode 100644
index 0000000000..b787b307f6
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/select_distinct_on.out
@@ -0,0 +1,75 @@
+--
+-- SELECT_DISTINCT_ON
+--
+SELECT DISTINCT ON (string4) string4, two, ten
+ FROM tmp
+ ORDER BY string4 using <, two using >, ten using <;
+ string4 | two | ten
+---------+-----+-----
+ AAAAxx | 1 | 1
+ HHHHxx | 1 | 1
+ OOOOxx | 1 | 1
+ VVVVxx | 1 | 1
+(4 rows)
+
+-- this will fail due to conflict of ordering requirements
+SELECT DISTINCT ON (string4, ten) string4, two, ten
+ FROM tmp
+ ORDER BY string4 using <, two using <, ten using <;
+ERROR: SELECT DISTINCT ON expressions must match initial ORDER BY expressions
+LINE 1: SELECT DISTINCT ON (string4, ten) string4, two, ten
+ ^
+SELECT DISTINCT ON (string4, ten) string4, ten, two
+ FROM tmp
+ ORDER BY string4 using <, ten using >, two using <;
+ string4 | ten | two
+---------+-----+-----
+ AAAAxx | 9 | 1
+ AAAAxx | 8 | 0
+ AAAAxx | 7 | 1
+ AAAAxx | 6 | 0
+ AAAAxx | 5 | 1
+ AAAAxx | 4 | 0
+ AAAAxx | 3 | 1
+ AAAAxx | 2 | 0
+ AAAAxx | 1 | 1
+ AAAAxx | 0 | 0
+ HHHHxx | 9 | 1
+ HHHHxx | 8 | 0
+ HHHHxx | 7 | 1
+ HHHHxx | 6 | 0
+ HHHHxx | 5 | 1
+ HHHHxx | 4 | 0
+ HHHHxx | 3 | 1
+ HHHHxx | 2 | 0
+ HHHHxx | 1 | 1
+ HHHHxx | 0 | 0
+ OOOOxx | 9 | 1
+ OOOOxx | 8 | 0
+ OOOOxx | 7 | 1
+ OOOOxx | 6 | 0
+ OOOOxx | 5 | 1
+ OOOOxx | 4 | 0
+ OOOOxx | 3 | 1
+ OOOOxx | 2 | 0
+ OOOOxx | 1 | 1
+ OOOOxx | 0 | 0
+ VVVVxx | 9 | 1
+ VVVVxx | 8 | 0
+ VVVVxx | 7 | 1
+ VVVVxx | 6 | 0
+ VVVVxx | 5 | 1
+ VVVVxx | 4 | 0
+ VVVVxx | 3 | 1
+ VVVVxx | 2 | 0
+ VVVVxx | 1 | 1
+ VVVVxx | 0 | 0
+(40 rows)
+
+-- bug #5049: early 8.4.x chokes on volatile DISTINCT ON clauses
+select distinct on (1) floor(random()) as r, f1 from int4_tbl order by 1,2;
+ r | f1
+---+-------------
+ 0 | -2147483647
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/select_distinct_on.sql b/ydb/library/yql/tests/postgresql/original/cases/select_distinct_on.sql
new file mode 100644
index 0000000000..d18733d274
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/select_distinct_on.sql
@@ -0,0 +1,19 @@
+--
+-- SELECT_DISTINCT_ON
+--
+
+SELECT DISTINCT ON (string4) string4, two, ten
+ FROM tmp
+ ORDER BY string4 using <, two using >, ten using <;
+
+-- this will fail due to conflict of ordering requirements
+SELECT DISTINCT ON (string4, ten) string4, two, ten
+ FROM tmp
+ ORDER BY string4 using <, two using <, ten using <;
+
+SELECT DISTINCT ON (string4, ten) string4, ten, two
+ FROM tmp
+ ORDER BY string4 using <, ten using >, two using <;
+
+-- bug #5049: early 8.4.x chokes on volatile DISTINCT ON clauses
+select distinct on (1) floor(random()) as r, f1 from int4_tbl order by 1,2;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/select_having.out b/ydb/library/yql/tests/postgresql/original/cases/select_having.out
new file mode 100644
index 0000000000..3950c0b404
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/select_having.out
@@ -0,0 +1,93 @@
+--
+-- SELECT_HAVING
+--
+-- load test data
+CREATE TABLE test_having (a int, b int, c char(8), d char);
+INSERT INTO test_having VALUES (0, 1, 'XXXX', 'A');
+INSERT INTO test_having VALUES (1, 2, 'AAAA', 'b');
+INSERT INTO test_having VALUES (2, 2, 'AAAA', 'c');
+INSERT INTO test_having VALUES (3, 3, 'BBBB', 'D');
+INSERT INTO test_having VALUES (4, 3, 'BBBB', 'e');
+INSERT INTO test_having VALUES (5, 3, 'bbbb', 'F');
+INSERT INTO test_having VALUES (6, 4, 'cccc', 'g');
+INSERT INTO test_having VALUES (7, 4, 'cccc', 'h');
+INSERT INTO test_having VALUES (8, 4, 'CCCC', 'I');
+INSERT INTO test_having VALUES (9, 4, 'CCCC', 'j');
+SELECT b, c FROM test_having
+ GROUP BY b, c HAVING count(*) = 1 ORDER BY b, c;
+ b | c
+---+----------
+ 1 | XXXX
+ 3 | bbbb
+(2 rows)
+
+-- HAVING is effectively equivalent to WHERE in this case
+SELECT b, c FROM test_having
+ GROUP BY b, c HAVING b = 3 ORDER BY b, c;
+ b | c
+---+----------
+ 3 | BBBB
+ 3 | bbbb
+(2 rows)
+
+SELECT lower(c), count(c) FROM test_having
+ GROUP BY lower(c) HAVING count(*) > 2 OR min(a) = max(a)
+ ORDER BY lower(c);
+ lower | count
+-------+-------
+ bbbb | 3
+ cccc | 4
+ xxxx | 1
+(3 rows)
+
+SELECT c, max(a) FROM test_having
+ GROUP BY c HAVING count(*) > 2 OR min(a) = max(a)
+ ORDER BY c;
+ c | max
+----------+-----
+ XXXX | 0
+ bbbb | 5
+(2 rows)
+
+-- test degenerate cases involving HAVING without GROUP BY
+-- Per SQL spec, these should generate 0 or 1 row, even without aggregates
+SELECT min(a), max(a) FROM test_having HAVING min(a) = max(a);
+ min | max
+-----+-----
+(0 rows)
+
+SELECT min(a), max(a) FROM test_having HAVING min(a) < max(a);
+ min | max
+-----+-----
+ 0 | 9
+(1 row)
+
+-- errors: ungrouped column references
+SELECT a FROM test_having HAVING min(a) < max(a);
+ERROR: column "test_having.a" must appear in the GROUP BY clause or be used in an aggregate function
+LINE 1: SELECT a FROM test_having HAVING min(a) < max(a);
+ ^
+SELECT 1 AS one FROM test_having HAVING a > 1;
+ERROR: column "test_having.a" must appear in the GROUP BY clause or be used in an aggregate function
+LINE 1: SELECT 1 AS one FROM test_having HAVING a > 1;
+ ^
+-- the really degenerate case: need not scan table at all
+SELECT 1 AS one FROM test_having HAVING 1 > 2;
+ one
+-----
+(0 rows)
+
+SELECT 1 AS one FROM test_having HAVING 1 < 2;
+ one
+-----
+ 1
+(1 row)
+
+-- and just to prove that we aren't scanning the table:
+SELECT 1 AS one FROM test_having WHERE 1/a = 1 HAVING 1 < 2;
+ one
+-----
+ 1
+(1 row)
+
+DROP TABLE test_having;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/select_having.sql b/ydb/library/yql/tests/postgresql/original/cases/select_having.sql
new file mode 100644
index 0000000000..bc0cdc0630
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/select_having.sql
@@ -0,0 +1,50 @@
+--
+-- SELECT_HAVING
+--
+
+-- load test data
+CREATE TABLE test_having (a int, b int, c char(8), d char);
+INSERT INTO test_having VALUES (0, 1, 'XXXX', 'A');
+INSERT INTO test_having VALUES (1, 2, 'AAAA', 'b');
+INSERT INTO test_having VALUES (2, 2, 'AAAA', 'c');
+INSERT INTO test_having VALUES (3, 3, 'BBBB', 'D');
+INSERT INTO test_having VALUES (4, 3, 'BBBB', 'e');
+INSERT INTO test_having VALUES (5, 3, 'bbbb', 'F');
+INSERT INTO test_having VALUES (6, 4, 'cccc', 'g');
+INSERT INTO test_having VALUES (7, 4, 'cccc', 'h');
+INSERT INTO test_having VALUES (8, 4, 'CCCC', 'I');
+INSERT INTO test_having VALUES (9, 4, 'CCCC', 'j');
+
+SELECT b, c FROM test_having
+ GROUP BY b, c HAVING count(*) = 1 ORDER BY b, c;
+
+-- HAVING is effectively equivalent to WHERE in this case
+SELECT b, c FROM test_having
+ GROUP BY b, c HAVING b = 3 ORDER BY b, c;
+
+SELECT lower(c), count(c) FROM test_having
+ GROUP BY lower(c) HAVING count(*) > 2 OR min(a) = max(a)
+ ORDER BY lower(c);
+
+SELECT c, max(a) FROM test_having
+ GROUP BY c HAVING count(*) > 2 OR min(a) = max(a)
+ ORDER BY c;
+
+-- test degenerate cases involving HAVING without GROUP BY
+-- Per SQL spec, these should generate 0 or 1 row, even without aggregates
+
+SELECT min(a), max(a) FROM test_having HAVING min(a) = max(a);
+SELECT min(a), max(a) FROM test_having HAVING min(a) < max(a);
+
+-- errors: ungrouped column references
+SELECT a FROM test_having HAVING min(a) < max(a);
+SELECT 1 AS one FROM test_having HAVING a > 1;
+
+-- the really degenerate case: need not scan table at all
+SELECT 1 AS one FROM test_having HAVING 1 > 2;
+SELECT 1 AS one FROM test_having HAVING 1 < 2;
+
+-- and just to prove that we aren't scanning the table:
+SELECT 1 AS one FROM test_having WHERE 1/a = 1 HAVING 1 < 2;
+
+DROP TABLE test_having;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/select_having_1.out b/ydb/library/yql/tests/postgresql/original/cases/select_having_1.out
new file mode 100644
index 0000000000..5c58da154c
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/select_having_1.out
@@ -0,0 +1,93 @@
+--
+-- SELECT_HAVING
+--
+-- load test data
+CREATE TABLE test_having (a int, b int, c char(8), d char);
+INSERT INTO test_having VALUES (0, 1, 'XXXX', 'A');
+INSERT INTO test_having VALUES (1, 2, 'AAAA', 'b');
+INSERT INTO test_having VALUES (2, 2, 'AAAA', 'c');
+INSERT INTO test_having VALUES (3, 3, 'BBBB', 'D');
+INSERT INTO test_having VALUES (4, 3, 'BBBB', 'e');
+INSERT INTO test_having VALUES (5, 3, 'bbbb', 'F');
+INSERT INTO test_having VALUES (6, 4, 'cccc', 'g');
+INSERT INTO test_having VALUES (7, 4, 'cccc', 'h');
+INSERT INTO test_having VALUES (8, 4, 'CCCC', 'I');
+INSERT INTO test_having VALUES (9, 4, 'CCCC', 'j');
+SELECT b, c FROM test_having
+ GROUP BY b, c HAVING count(*) = 1 ORDER BY b, c;
+ b | c
+---+----------
+ 1 | XXXX
+ 3 | bbbb
+(2 rows)
+
+-- HAVING is effectively equivalent to WHERE in this case
+SELECT b, c FROM test_having
+ GROUP BY b, c HAVING b = 3 ORDER BY b, c;
+ b | c
+---+----------
+ 3 | BBBB
+ 3 | bbbb
+(2 rows)
+
+SELECT lower(c), count(c) FROM test_having
+ GROUP BY lower(c) HAVING count(*) > 2 OR min(a) = max(a)
+ ORDER BY lower(c);
+ lower | count
+-------+-------
+ bbbb | 3
+ cccc | 4
+ xxxx | 1
+(3 rows)
+
+SELECT c, max(a) FROM test_having
+ GROUP BY c HAVING count(*) > 2 OR min(a) = max(a)
+ ORDER BY c;
+ c | max
+----------+-----
+ bbbb | 5
+ XXXX | 0
+(2 rows)
+
+-- test degenerate cases involving HAVING without GROUP BY
+-- Per SQL spec, these should generate 0 or 1 row, even without aggregates
+SELECT min(a), max(a) FROM test_having HAVING min(a) = max(a);
+ min | max
+-----+-----
+(0 rows)
+
+SELECT min(a), max(a) FROM test_having HAVING min(a) < max(a);
+ min | max
+-----+-----
+ 0 | 9
+(1 row)
+
+-- errors: ungrouped column references
+SELECT a FROM test_having HAVING min(a) < max(a);
+ERROR: column "test_having.a" must appear in the GROUP BY clause or be used in an aggregate function
+LINE 1: SELECT a FROM test_having HAVING min(a) < max(a);
+ ^
+SELECT 1 AS one FROM test_having HAVING a > 1;
+ERROR: column "test_having.a" must appear in the GROUP BY clause or be used in an aggregate function
+LINE 1: SELECT 1 AS one FROM test_having HAVING a > 1;
+ ^
+-- the really degenerate case: need not scan table at all
+SELECT 1 AS one FROM test_having HAVING 1 > 2;
+ one
+-----
+(0 rows)
+
+SELECT 1 AS one FROM test_having HAVING 1 < 2;
+ one
+-----
+ 1
+(1 row)
+
+-- and just to prove that we aren't scanning the table:
+SELECT 1 AS one FROM test_having WHERE 1/a = 1 HAVING 1 < 2;
+ one
+-----
+ 1
+(1 row)
+
+DROP TABLE test_having;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/select_having_2.out b/ydb/library/yql/tests/postgresql/original/cases/select_having_2.out
new file mode 100644
index 0000000000..7087fb1c0c
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/select_having_2.out
@@ -0,0 +1,93 @@
+--
+-- SELECT_HAVING
+--
+-- load test data
+CREATE TABLE test_having (a int, b int, c char(8), d char);
+INSERT INTO test_having VALUES (0, 1, 'XXXX', 'A');
+INSERT INTO test_having VALUES (1, 2, 'AAAA', 'b');
+INSERT INTO test_having VALUES (2, 2, 'AAAA', 'c');
+INSERT INTO test_having VALUES (3, 3, 'BBBB', 'D');
+INSERT INTO test_having VALUES (4, 3, 'BBBB', 'e');
+INSERT INTO test_having VALUES (5, 3, 'bbbb', 'F');
+INSERT INTO test_having VALUES (6, 4, 'cccc', 'g');
+INSERT INTO test_having VALUES (7, 4, 'cccc', 'h');
+INSERT INTO test_having VALUES (8, 4, 'CCCC', 'I');
+INSERT INTO test_having VALUES (9, 4, 'CCCC', 'j');
+SELECT b, c FROM test_having
+ GROUP BY b, c HAVING count(*) = 1 ORDER BY b, c;
+ b | c
+---+----------
+ 1 | XXXX
+ 3 | bbbb
+(2 rows)
+
+-- HAVING is effectively equivalent to WHERE in this case
+SELECT b, c FROM test_having
+ GROUP BY b, c HAVING b = 3 ORDER BY b, c;
+ b | c
+---+----------
+ 3 | bbbb
+ 3 | BBBB
+(2 rows)
+
+SELECT lower(c), count(c) FROM test_having
+ GROUP BY lower(c) HAVING count(*) > 2 OR min(a) = max(a)
+ ORDER BY lower(c);
+ lower | count
+-------+-------
+ bbbb | 3
+ cccc | 4
+ xxxx | 1
+(3 rows)
+
+SELECT c, max(a) FROM test_having
+ GROUP BY c HAVING count(*) > 2 OR min(a) = max(a)
+ ORDER BY c;
+ c | max
+----------+-----
+ bbbb | 5
+ XXXX | 0
+(2 rows)
+
+-- test degenerate cases involving HAVING without GROUP BY
+-- Per SQL spec, these should generate 0 or 1 row, even without aggregates
+SELECT min(a), max(a) FROM test_having HAVING min(a) = max(a);
+ min | max
+-----+-----
+(0 rows)
+
+SELECT min(a), max(a) FROM test_having HAVING min(a) < max(a);
+ min | max
+-----+-----
+ 0 | 9
+(1 row)
+
+-- errors: ungrouped column references
+SELECT a FROM test_having HAVING min(a) < max(a);
+ERROR: column "test_having.a" must appear in the GROUP BY clause or be used in an aggregate function
+LINE 1: SELECT a FROM test_having HAVING min(a) < max(a);
+ ^
+SELECT 1 AS one FROM test_having HAVING a > 1;
+ERROR: column "test_having.a" must appear in the GROUP BY clause or be used in an aggregate function
+LINE 1: SELECT 1 AS one FROM test_having HAVING a > 1;
+ ^
+-- the really degenerate case: need not scan table at all
+SELECT 1 AS one FROM test_having HAVING 1 > 2;
+ one
+-----
+(0 rows)
+
+SELECT 1 AS one FROM test_having HAVING 1 < 2;
+ one
+-----
+ 1
+(1 row)
+
+-- and just to prove that we aren't scanning the table:
+SELECT 1 AS one FROM test_having WHERE 1/a = 1 HAVING 1 < 2;
+ one
+-----
+ 1
+(1 row)
+
+DROP TABLE test_having;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/select_implicit.out b/ydb/library/yql/tests/postgresql/original/cases/select_implicit.out
new file mode 100644
index 0000000000..27c07de92c
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/select_implicit.out
@@ -0,0 +1,338 @@
+--
+-- SELECT_IMPLICIT
+-- Test cases for queries with ordering terms missing from the target list.
+-- This used to be called "junkfilter.sql".
+-- The parser uses the term "resjunk" to handle these cases.
+-- - thomas 1998-07-09
+--
+-- load test data
+CREATE TABLE test_missing_target (a int, b int, c char(8), d char);
+INSERT INTO test_missing_target VALUES (0, 1, 'XXXX', 'A');
+INSERT INTO test_missing_target VALUES (1, 2, 'ABAB', 'b');
+INSERT INTO test_missing_target VALUES (2, 2, 'ABAB', 'c');
+INSERT INTO test_missing_target VALUES (3, 3, 'BBBB', 'D');
+INSERT INTO test_missing_target VALUES (4, 3, 'BBBB', 'e');
+INSERT INTO test_missing_target VALUES (5, 3, 'bbbb', 'F');
+INSERT INTO test_missing_target VALUES (6, 4, 'cccc', 'g');
+INSERT INTO test_missing_target VALUES (7, 4, 'cccc', 'h');
+INSERT INTO test_missing_target VALUES (8, 4, 'CCCC', 'I');
+INSERT INTO test_missing_target VALUES (9, 4, 'CCCC', 'j');
+-- w/ existing GROUP BY target
+SELECT c, count(*) FROM test_missing_target GROUP BY test_missing_target.c ORDER BY c;
+ c | count
+----------+-------
+ ABAB | 2
+ BBBB | 2
+ CCCC | 2
+ XXXX | 1
+ bbbb | 1
+ cccc | 2
+(6 rows)
+
+-- w/o existing GROUP BY target using a relation name in GROUP BY clause
+SELECT count(*) FROM test_missing_target GROUP BY test_missing_target.c ORDER BY c;
+ count
+-------
+ 2
+ 2
+ 2
+ 1
+ 1
+ 2
+(6 rows)
+
+-- w/o existing GROUP BY target and w/o existing a different ORDER BY target
+-- failure expected
+SELECT count(*) FROM test_missing_target GROUP BY a ORDER BY b;
+ERROR: column "test_missing_target.b" must appear in the GROUP BY clause or be used in an aggregate function
+LINE 1: ...ECT count(*) FROM test_missing_target GROUP BY a ORDER BY b;
+ ^
+-- w/o existing GROUP BY target and w/o existing same ORDER BY target
+SELECT count(*) FROM test_missing_target GROUP BY b ORDER BY b;
+ count
+-------
+ 1
+ 2
+ 3
+ 4
+(4 rows)
+
+-- w/ existing GROUP BY target using a relation name in target
+SELECT test_missing_target.b, count(*)
+ FROM test_missing_target GROUP BY b ORDER BY b;
+ b | count
+---+-------
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+(4 rows)
+
+-- w/o existing GROUP BY target
+SELECT c FROM test_missing_target ORDER BY a;
+ c
+----------
+ XXXX
+ ABAB
+ ABAB
+ BBBB
+ BBBB
+ bbbb
+ cccc
+ cccc
+ CCCC
+ CCCC
+(10 rows)
+
+-- w/o existing ORDER BY target
+SELECT count(*) FROM test_missing_target GROUP BY b ORDER BY b desc;
+ count
+-------
+ 4
+ 3
+ 2
+ 1
+(4 rows)
+
+-- group using reference number
+SELECT count(*) FROM test_missing_target ORDER BY 1 desc;
+ count
+-------
+ 10
+(1 row)
+
+-- order using reference number
+SELECT c, count(*) FROM test_missing_target GROUP BY 1 ORDER BY 1;
+ c | count
+----------+-------
+ ABAB | 2
+ BBBB | 2
+ CCCC | 2
+ XXXX | 1
+ bbbb | 1
+ cccc | 2
+(6 rows)
+
+-- group using reference number out of range
+-- failure expected
+SELECT c, count(*) FROM test_missing_target GROUP BY 3;
+ERROR: GROUP BY position 3 is not in select list
+LINE 1: SELECT c, count(*) FROM test_missing_target GROUP BY 3;
+ ^
+-- group w/o existing GROUP BY and ORDER BY target under ambiguous condition
+-- failure expected
+SELECT count(*) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY b ORDER BY b;
+ERROR: column reference "b" is ambiguous
+LINE 3: GROUP BY b ORDER BY b;
+ ^
+-- order w/ target under ambiguous condition
+-- failure NOT expected
+SELECT a, a FROM test_missing_target
+ ORDER BY a;
+ a | a
+---+---
+ 0 | 0
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+ 5 | 5
+ 6 | 6
+ 7 | 7
+ 8 | 8
+ 9 | 9
+(10 rows)
+
+-- order expression w/ target under ambiguous condition
+-- failure NOT expected
+SELECT a/2, a/2 FROM test_missing_target
+ ORDER BY a/2;
+ ?column? | ?column?
+----------+----------
+ 0 | 0
+ 0 | 0
+ 1 | 1
+ 1 | 1
+ 2 | 2
+ 2 | 2
+ 3 | 3
+ 3 | 3
+ 4 | 4
+ 4 | 4
+(10 rows)
+
+-- group expression w/ target under ambiguous condition
+-- failure NOT expected
+SELECT a/2, a/2 FROM test_missing_target
+ GROUP BY a/2 ORDER BY a/2;
+ ?column? | ?column?
+----------+----------
+ 0 | 0
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+(5 rows)
+
+-- group w/ existing GROUP BY target under ambiguous condition
+SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b ORDER BY x.b;
+ b | count
+---+-------
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+(4 rows)
+
+-- group w/o existing GROUP BY target under ambiguous condition
+SELECT count(*) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b ORDER BY x.b;
+ count
+-------
+ 1
+ 2
+ 3
+ 4
+(4 rows)
+
+-- group w/o existing GROUP BY target under ambiguous condition
+-- into a table
+CREATE TABLE test_missing_target2 AS
+SELECT count(*)
+FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b ORDER BY x.b;
+SELECT * FROM test_missing_target2;
+ count
+-------
+ 1
+ 2
+ 3
+ 4
+(4 rows)
+
+-- Functions and expressions
+-- w/ existing GROUP BY target
+SELECT a%2, count(b) FROM test_missing_target
+GROUP BY test_missing_target.a%2
+ORDER BY test_missing_target.a%2;
+ ?column? | count
+----------+-------
+ 0 | 5
+ 1 | 5
+(2 rows)
+
+-- w/o existing GROUP BY target using a relation name in GROUP BY clause
+SELECT count(c) FROM test_missing_target
+GROUP BY lower(test_missing_target.c)
+ORDER BY lower(test_missing_target.c);
+ count
+-------
+ 2
+ 3
+ 4
+ 1
+(4 rows)
+
+-- w/o existing GROUP BY target and w/o existing a different ORDER BY target
+-- failure expected
+SELECT count(a) FROM test_missing_target GROUP BY a ORDER BY b;
+ERROR: column "test_missing_target.b" must appear in the GROUP BY clause or be used in an aggregate function
+LINE 1: ...ECT count(a) FROM test_missing_target GROUP BY a ORDER BY b;
+ ^
+-- w/o existing GROUP BY target and w/o existing same ORDER BY target
+SELECT count(b) FROM test_missing_target GROUP BY b/2 ORDER BY b/2;
+ count
+-------
+ 1
+ 5
+ 4
+(3 rows)
+
+-- w/ existing GROUP BY target using a relation name in target
+SELECT lower(test_missing_target.c), count(c)
+ FROM test_missing_target GROUP BY lower(c) ORDER BY lower(c);
+ lower | count
+-------+-------
+ abab | 2
+ bbbb | 3
+ cccc | 4
+ xxxx | 1
+(4 rows)
+
+-- w/o existing GROUP BY target
+SELECT a FROM test_missing_target ORDER BY upper(d);
+ a
+---
+ 0
+ 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+(10 rows)
+
+-- w/o existing ORDER BY target
+SELECT count(b) FROM test_missing_target
+ GROUP BY (b + 1) / 2 ORDER BY (b + 1) / 2 desc;
+ count
+-------
+ 7
+ 3
+(2 rows)
+
+-- group w/o existing GROUP BY and ORDER BY target under ambiguous condition
+-- failure expected
+SELECT count(x.a) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY b/2 ORDER BY b/2;
+ERROR: column reference "b" is ambiguous
+LINE 3: GROUP BY b/2 ORDER BY b/2;
+ ^
+-- group w/ existing GROUP BY target under ambiguous condition
+SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b/2 ORDER BY x.b/2;
+ ?column? | count
+----------+-------
+ 0 | 1
+ 1 | 5
+ 2 | 4
+(3 rows)
+
+-- group w/o existing GROUP BY target under ambiguous condition
+-- failure expected due to ambiguous b in count(b)
+SELECT count(b) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b/2;
+ERROR: column reference "b" is ambiguous
+LINE 1: SELECT count(b) FROM test_missing_target x, test_missing_tar...
+ ^
+-- group w/o existing GROUP BY target under ambiguous condition
+-- into a table
+CREATE TABLE test_missing_target3 AS
+SELECT count(x.b)
+FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b/2 ORDER BY x.b/2;
+SELECT * FROM test_missing_target3;
+ count
+-------
+ 1
+ 5
+ 4
+(3 rows)
+
+-- Cleanup
+DROP TABLE test_missing_target;
+DROP TABLE test_missing_target2;
+DROP TABLE test_missing_target3;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/select_implicit.sql b/ydb/library/yql/tests/postgresql/original/cases/select_implicit.sql
new file mode 100644
index 0000000000..de3aef8d81
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/select_implicit.sql
@@ -0,0 +1,156 @@
+--
+-- SELECT_IMPLICIT
+-- Test cases for queries with ordering terms missing from the target list.
+-- This used to be called "junkfilter.sql".
+-- The parser uses the term "resjunk" to handle these cases.
+-- - thomas 1998-07-09
+--
+
+-- load test data
+CREATE TABLE test_missing_target (a int, b int, c char(8), d char);
+INSERT INTO test_missing_target VALUES (0, 1, 'XXXX', 'A');
+INSERT INTO test_missing_target VALUES (1, 2, 'ABAB', 'b');
+INSERT INTO test_missing_target VALUES (2, 2, 'ABAB', 'c');
+INSERT INTO test_missing_target VALUES (3, 3, 'BBBB', 'D');
+INSERT INTO test_missing_target VALUES (4, 3, 'BBBB', 'e');
+INSERT INTO test_missing_target VALUES (5, 3, 'bbbb', 'F');
+INSERT INTO test_missing_target VALUES (6, 4, 'cccc', 'g');
+INSERT INTO test_missing_target VALUES (7, 4, 'cccc', 'h');
+INSERT INTO test_missing_target VALUES (8, 4, 'CCCC', 'I');
+INSERT INTO test_missing_target VALUES (9, 4, 'CCCC', 'j');
+
+
+-- w/ existing GROUP BY target
+SELECT c, count(*) FROM test_missing_target GROUP BY test_missing_target.c ORDER BY c;
+
+-- w/o existing GROUP BY target using a relation name in GROUP BY clause
+SELECT count(*) FROM test_missing_target GROUP BY test_missing_target.c ORDER BY c;
+
+-- w/o existing GROUP BY target and w/o existing a different ORDER BY target
+-- failure expected
+SELECT count(*) FROM test_missing_target GROUP BY a ORDER BY b;
+
+-- w/o existing GROUP BY target and w/o existing same ORDER BY target
+SELECT count(*) FROM test_missing_target GROUP BY b ORDER BY b;
+
+-- w/ existing GROUP BY target using a relation name in target
+SELECT test_missing_target.b, count(*)
+ FROM test_missing_target GROUP BY b ORDER BY b;
+
+-- w/o existing GROUP BY target
+SELECT c FROM test_missing_target ORDER BY a;
+
+-- w/o existing ORDER BY target
+SELECT count(*) FROM test_missing_target GROUP BY b ORDER BY b desc;
+
+-- group using reference number
+SELECT count(*) FROM test_missing_target ORDER BY 1 desc;
+
+-- order using reference number
+SELECT c, count(*) FROM test_missing_target GROUP BY 1 ORDER BY 1;
+
+-- group using reference number out of range
+-- failure expected
+SELECT c, count(*) FROM test_missing_target GROUP BY 3;
+
+-- group w/o existing GROUP BY and ORDER BY target under ambiguous condition
+-- failure expected
+SELECT count(*) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY b ORDER BY b;
+
+-- order w/ target under ambiguous condition
+-- failure NOT expected
+SELECT a, a FROM test_missing_target
+ ORDER BY a;
+
+-- order expression w/ target under ambiguous condition
+-- failure NOT expected
+SELECT a/2, a/2 FROM test_missing_target
+ ORDER BY a/2;
+
+-- group expression w/ target under ambiguous condition
+-- failure NOT expected
+SELECT a/2, a/2 FROM test_missing_target
+ GROUP BY a/2 ORDER BY a/2;
+
+-- group w/ existing GROUP BY target under ambiguous condition
+SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b ORDER BY x.b;
+
+-- group w/o existing GROUP BY target under ambiguous condition
+SELECT count(*) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b ORDER BY x.b;
+
+-- group w/o existing GROUP BY target under ambiguous condition
+-- into a table
+CREATE TABLE test_missing_target2 AS
+SELECT count(*)
+FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b ORDER BY x.b;
+SELECT * FROM test_missing_target2;
+
+
+-- Functions and expressions
+
+-- w/ existing GROUP BY target
+SELECT a%2, count(b) FROM test_missing_target
+GROUP BY test_missing_target.a%2
+ORDER BY test_missing_target.a%2;
+
+-- w/o existing GROUP BY target using a relation name in GROUP BY clause
+SELECT count(c) FROM test_missing_target
+GROUP BY lower(test_missing_target.c)
+ORDER BY lower(test_missing_target.c);
+
+-- w/o existing GROUP BY target and w/o existing a different ORDER BY target
+-- failure expected
+SELECT count(a) FROM test_missing_target GROUP BY a ORDER BY b;
+
+-- w/o existing GROUP BY target and w/o existing same ORDER BY target
+SELECT count(b) FROM test_missing_target GROUP BY b/2 ORDER BY b/2;
+
+-- w/ existing GROUP BY target using a relation name in target
+SELECT lower(test_missing_target.c), count(c)
+ FROM test_missing_target GROUP BY lower(c) ORDER BY lower(c);
+
+-- w/o existing GROUP BY target
+SELECT a FROM test_missing_target ORDER BY upper(d);
+
+-- w/o existing ORDER BY target
+SELECT count(b) FROM test_missing_target
+ GROUP BY (b + 1) / 2 ORDER BY (b + 1) / 2 desc;
+
+-- group w/o existing GROUP BY and ORDER BY target under ambiguous condition
+-- failure expected
+SELECT count(x.a) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY b/2 ORDER BY b/2;
+
+-- group w/ existing GROUP BY target under ambiguous condition
+SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b/2 ORDER BY x.b/2;
+
+-- group w/o existing GROUP BY target under ambiguous condition
+-- failure expected due to ambiguous b in count(b)
+SELECT count(b) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b/2;
+
+-- group w/o existing GROUP BY target under ambiguous condition
+-- into a table
+CREATE TABLE test_missing_target3 AS
+SELECT count(x.b)
+FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b/2 ORDER BY x.b/2;
+SELECT * FROM test_missing_target3;
+
+-- Cleanup
+DROP TABLE test_missing_target;
+DROP TABLE test_missing_target2;
+DROP TABLE test_missing_target3;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/select_implicit_1.out b/ydb/library/yql/tests/postgresql/original/cases/select_implicit_1.out
new file mode 100644
index 0000000000..d67521e8f8
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/select_implicit_1.out
@@ -0,0 +1,338 @@
+--
+-- SELECT_IMPLICIT
+-- Test cases for queries with ordering terms missing from the target list.
+-- This used to be called "junkfilter.sql".
+-- The parser uses the term "resjunk" to handle these cases.
+-- - thomas 1998-07-09
+--
+-- load test data
+CREATE TABLE test_missing_target (a int, b int, c char(8), d char);
+INSERT INTO test_missing_target VALUES (0, 1, 'XXXX', 'A');
+INSERT INTO test_missing_target VALUES (1, 2, 'ABAB', 'b');
+INSERT INTO test_missing_target VALUES (2, 2, 'ABAB', 'c');
+INSERT INTO test_missing_target VALUES (3, 3, 'BBBB', 'D');
+INSERT INTO test_missing_target VALUES (4, 3, 'BBBB', 'e');
+INSERT INTO test_missing_target VALUES (5, 3, 'bbbb', 'F');
+INSERT INTO test_missing_target VALUES (6, 4, 'cccc', 'g');
+INSERT INTO test_missing_target VALUES (7, 4, 'cccc', 'h');
+INSERT INTO test_missing_target VALUES (8, 4, 'CCCC', 'I');
+INSERT INTO test_missing_target VALUES (9, 4, 'CCCC', 'j');
+-- w/ existing GROUP BY target
+SELECT c, count(*) FROM test_missing_target GROUP BY test_missing_target.c ORDER BY c;
+ c | count
+----------+-------
+ ABAB | 2
+ BBBB | 2
+ bbbb | 1
+ CCCC | 2
+ cccc | 2
+ XXXX | 1
+(6 rows)
+
+-- w/o existing GROUP BY target using a relation name in GROUP BY clause
+SELECT count(*) FROM test_missing_target GROUP BY test_missing_target.c ORDER BY c;
+ count
+-------
+ 2
+ 2
+ 1
+ 2
+ 2
+ 1
+(6 rows)
+
+-- w/o existing GROUP BY target and w/o existing a different ORDER BY target
+-- failure expected
+SELECT count(*) FROM test_missing_target GROUP BY a ORDER BY b;
+ERROR: column "test_missing_target.b" must appear in the GROUP BY clause or be used in an aggregate function
+LINE 1: ...ECT count(*) FROM test_missing_target GROUP BY a ORDER BY b;
+ ^
+-- w/o existing GROUP BY target and w/o existing same ORDER BY target
+SELECT count(*) FROM test_missing_target GROUP BY b ORDER BY b;
+ count
+-------
+ 1
+ 2
+ 3
+ 4
+(4 rows)
+
+-- w/ existing GROUP BY target using a relation name in target
+SELECT test_missing_target.b, count(*)
+ FROM test_missing_target GROUP BY b ORDER BY b;
+ b | count
+---+-------
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+(4 rows)
+
+-- w/o existing GROUP BY target
+SELECT c FROM test_missing_target ORDER BY a;
+ c
+----------
+ XXXX
+ ABAB
+ ABAB
+ BBBB
+ BBBB
+ bbbb
+ cccc
+ cccc
+ CCCC
+ CCCC
+(10 rows)
+
+-- w/o existing ORDER BY target
+SELECT count(*) FROM test_missing_target GROUP BY b ORDER BY b desc;
+ count
+-------
+ 4
+ 3
+ 2
+ 1
+(4 rows)
+
+-- group using reference number
+SELECT count(*) FROM test_missing_target ORDER BY 1 desc;
+ count
+-------
+ 10
+(1 row)
+
+-- order using reference number
+SELECT c, count(*) FROM test_missing_target GROUP BY 1 ORDER BY 1;
+ c | count
+----------+-------
+ ABAB | 2
+ BBBB | 2
+ bbbb | 1
+ CCCC | 2
+ cccc | 2
+ XXXX | 1
+(6 rows)
+
+-- group using reference number out of range
+-- failure expected
+SELECT c, count(*) FROM test_missing_target GROUP BY 3;
+ERROR: GROUP BY position 3 is not in select list
+LINE 1: SELECT c, count(*) FROM test_missing_target GROUP BY 3;
+ ^
+-- group w/o existing GROUP BY and ORDER BY target under ambiguous condition
+-- failure expected
+SELECT count(*) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY b ORDER BY b;
+ERROR: column reference "b" is ambiguous
+LINE 3: GROUP BY b ORDER BY b;
+ ^
+-- order w/ target under ambiguous condition
+-- failure NOT expected
+SELECT a, a FROM test_missing_target
+ ORDER BY a;
+ a | a
+---+---
+ 0 | 0
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+ 5 | 5
+ 6 | 6
+ 7 | 7
+ 8 | 8
+ 9 | 9
+(10 rows)
+
+-- order expression w/ target under ambiguous condition
+-- failure NOT expected
+SELECT a/2, a/2 FROM test_missing_target
+ ORDER BY a/2;
+ ?column? | ?column?
+----------+----------
+ 0 | 0
+ 0 | 0
+ 1 | 1
+ 1 | 1
+ 2 | 2
+ 2 | 2
+ 3 | 3
+ 3 | 3
+ 4 | 4
+ 4 | 4
+(10 rows)
+
+-- group expression w/ target under ambiguous condition
+-- failure NOT expected
+SELECT a/2, a/2 FROM test_missing_target
+ GROUP BY a/2 ORDER BY a/2;
+ ?column? | ?column?
+----------+----------
+ 0 | 0
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+(5 rows)
+
+-- group w/ existing GROUP BY target under ambiguous condition
+SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b ORDER BY x.b;
+ b | count
+---+-------
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+(4 rows)
+
+-- group w/o existing GROUP BY target under ambiguous condition
+SELECT count(*) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b ORDER BY x.b;
+ count
+-------
+ 1
+ 2
+ 3
+ 4
+(4 rows)
+
+-- group w/o existing GROUP BY target under ambiguous condition
+-- into a table
+CREATE TABLE test_missing_target2 AS
+SELECT count(*)
+FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b ORDER BY x.b;
+SELECT * FROM test_missing_target2;
+ count
+-------
+ 1
+ 2
+ 3
+ 4
+(4 rows)
+
+-- Functions and expressions
+-- w/ existing GROUP BY target
+SELECT a%2, count(b) FROM test_missing_target
+GROUP BY test_missing_target.a%2
+ORDER BY test_missing_target.a%2;
+ ?column? | count
+----------+-------
+ 0 | 5
+ 1 | 5
+(2 rows)
+
+-- w/o existing GROUP BY target using a relation name in GROUP BY clause
+SELECT count(c) FROM test_missing_target
+GROUP BY lower(test_missing_target.c)
+ORDER BY lower(test_missing_target.c);
+ count
+-------
+ 2
+ 3
+ 4
+ 1
+(4 rows)
+
+-- w/o existing GROUP BY target and w/o existing a different ORDER BY target
+-- failure expected
+SELECT count(a) FROM test_missing_target GROUP BY a ORDER BY b;
+ERROR: column "test_missing_target.b" must appear in the GROUP BY clause or be used in an aggregate function
+LINE 1: ...ECT count(a) FROM test_missing_target GROUP BY a ORDER BY b;
+ ^
+-- w/o existing GROUP BY target and w/o existing same ORDER BY target
+SELECT count(b) FROM test_missing_target GROUP BY b/2 ORDER BY b/2;
+ count
+-------
+ 1
+ 5
+ 4
+(3 rows)
+
+-- w/ existing GROUP BY target using a relation name in target
+SELECT lower(test_missing_target.c), count(c)
+ FROM test_missing_target GROUP BY lower(c) ORDER BY lower(c);
+ lower | count
+-------+-------
+ abab | 2
+ bbbb | 3
+ cccc | 4
+ xxxx | 1
+(4 rows)
+
+-- w/o existing GROUP BY target
+SELECT a FROM test_missing_target ORDER BY upper(d);
+ a
+---
+ 0
+ 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+(10 rows)
+
+-- w/o existing ORDER BY target
+SELECT count(b) FROM test_missing_target
+ GROUP BY (b + 1) / 2 ORDER BY (b + 1) / 2 desc;
+ count
+-------
+ 7
+ 3
+(2 rows)
+
+-- group w/o existing GROUP BY and ORDER BY target under ambiguous condition
+-- failure expected
+SELECT count(x.a) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY b/2 ORDER BY b/2;
+ERROR: column reference "b" is ambiguous
+LINE 3: GROUP BY b/2 ORDER BY b/2;
+ ^
+-- group w/ existing GROUP BY target under ambiguous condition
+SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b/2 ORDER BY x.b/2;
+ ?column? | count
+----------+-------
+ 0 | 1
+ 1 | 5
+ 2 | 4
+(3 rows)
+
+-- group w/o existing GROUP BY target under ambiguous condition
+-- failure expected due to ambiguous b in count(b)
+SELECT count(b) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b/2;
+ERROR: column reference "b" is ambiguous
+LINE 1: SELECT count(b) FROM test_missing_target x, test_missing_tar...
+ ^
+-- group w/o existing GROUP BY target under ambiguous condition
+-- into a table
+CREATE TABLE test_missing_target3 AS
+SELECT count(x.b)
+FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b/2 ORDER BY x.b/2;
+SELECT * FROM test_missing_target3;
+ count
+-------
+ 1
+ 5
+ 4
+(3 rows)
+
+-- Cleanup
+DROP TABLE test_missing_target;
+DROP TABLE test_missing_target2;
+DROP TABLE test_missing_target3;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/select_implicit_2.out b/ydb/library/yql/tests/postgresql/original/cases/select_implicit_2.out
new file mode 100644
index 0000000000..7a353d0862
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/select_implicit_2.out
@@ -0,0 +1,338 @@
+--
+-- SELECT_IMPLICIT
+-- Test cases for queries with ordering terms missing from the target list.
+-- This used to be called "junkfilter.sql".
+-- The parser uses the term "resjunk" to handle these cases.
+-- - thomas 1998-07-09
+--
+-- load test data
+CREATE TABLE test_missing_target (a int, b int, c char(8), d char);
+INSERT INTO test_missing_target VALUES (0, 1, 'XXXX', 'A');
+INSERT INTO test_missing_target VALUES (1, 2, 'ABAB', 'b');
+INSERT INTO test_missing_target VALUES (2, 2, 'ABAB', 'c');
+INSERT INTO test_missing_target VALUES (3, 3, 'BBBB', 'D');
+INSERT INTO test_missing_target VALUES (4, 3, 'BBBB', 'e');
+INSERT INTO test_missing_target VALUES (5, 3, 'bbbb', 'F');
+INSERT INTO test_missing_target VALUES (6, 4, 'cccc', 'g');
+INSERT INTO test_missing_target VALUES (7, 4, 'cccc', 'h');
+INSERT INTO test_missing_target VALUES (8, 4, 'CCCC', 'I');
+INSERT INTO test_missing_target VALUES (9, 4, 'CCCC', 'j');
+-- w/ existing GROUP BY target
+SELECT c, count(*) FROM test_missing_target GROUP BY test_missing_target.c ORDER BY c;
+ c | count
+----------+-------
+ ABAB | 2
+ bbbb | 1
+ BBBB | 2
+ cccc | 2
+ CCCC | 2
+ XXXX | 1
+(6 rows)
+
+-- w/o existing GROUP BY target using a relation name in GROUP BY clause
+SELECT count(*) FROM test_missing_target GROUP BY test_missing_target.c ORDER BY c;
+ count
+-------
+ 2
+ 1
+ 2
+ 2
+ 2
+ 1
+(6 rows)
+
+-- w/o existing GROUP BY target and w/o existing a different ORDER BY target
+-- failure expected
+SELECT count(*) FROM test_missing_target GROUP BY a ORDER BY b;
+ERROR: column "test_missing_target.b" must appear in the GROUP BY clause or be used in an aggregate function
+LINE 1: ...ECT count(*) FROM test_missing_target GROUP BY a ORDER BY b;
+ ^
+-- w/o existing GROUP BY target and w/o existing same ORDER BY target
+SELECT count(*) FROM test_missing_target GROUP BY b ORDER BY b;
+ count
+-------
+ 1
+ 2
+ 3
+ 4
+(4 rows)
+
+-- w/ existing GROUP BY target using a relation name in target
+SELECT test_missing_target.b, count(*)
+ FROM test_missing_target GROUP BY b ORDER BY b;
+ b | count
+---+-------
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+(4 rows)
+
+-- w/o existing GROUP BY target
+SELECT c FROM test_missing_target ORDER BY a;
+ c
+----------
+ XXXX
+ ABAB
+ ABAB
+ BBBB
+ BBBB
+ bbbb
+ cccc
+ cccc
+ CCCC
+ CCCC
+(10 rows)
+
+-- w/o existing ORDER BY target
+SELECT count(*) FROM test_missing_target GROUP BY b ORDER BY b desc;
+ count
+-------
+ 4
+ 3
+ 2
+ 1
+(4 rows)
+
+-- group using reference number
+SELECT count(*) FROM test_missing_target ORDER BY 1 desc;
+ count
+-------
+ 10
+(1 row)
+
+-- order using reference number
+SELECT c, count(*) FROM test_missing_target GROUP BY 1 ORDER BY 1;
+ c | count
+----------+-------
+ ABAB | 2
+ bbbb | 1
+ BBBB | 2
+ cccc | 2
+ CCCC | 2
+ XXXX | 1
+(6 rows)
+
+-- group using reference number out of range
+-- failure expected
+SELECT c, count(*) FROM test_missing_target GROUP BY 3;
+ERROR: GROUP BY position 3 is not in select list
+LINE 1: SELECT c, count(*) FROM test_missing_target GROUP BY 3;
+ ^
+-- group w/o existing GROUP BY and ORDER BY target under ambiguous condition
+-- failure expected
+SELECT count(*) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY b ORDER BY b;
+ERROR: column reference "b" is ambiguous
+LINE 3: GROUP BY b ORDER BY b;
+ ^
+-- order w/ target under ambiguous condition
+-- failure NOT expected
+SELECT a, a FROM test_missing_target
+ ORDER BY a;
+ a | a
+---+---
+ 0 | 0
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+ 5 | 5
+ 6 | 6
+ 7 | 7
+ 8 | 8
+ 9 | 9
+(10 rows)
+
+-- order expression w/ target under ambiguous condition
+-- failure NOT expected
+SELECT a/2, a/2 FROM test_missing_target
+ ORDER BY a/2;
+ ?column? | ?column?
+----------+----------
+ 0 | 0
+ 0 | 0
+ 1 | 1
+ 1 | 1
+ 2 | 2
+ 2 | 2
+ 3 | 3
+ 3 | 3
+ 4 | 4
+ 4 | 4
+(10 rows)
+
+-- group expression w/ target under ambiguous condition
+-- failure NOT expected
+SELECT a/2, a/2 FROM test_missing_target
+ GROUP BY a/2 ORDER BY a/2;
+ ?column? | ?column?
+----------+----------
+ 0 | 0
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+(5 rows)
+
+-- group w/ existing GROUP BY target under ambiguous condition
+SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b ORDER BY x.b;
+ b | count
+---+-------
+ 1 | 1
+ 2 | 2
+ 3 | 3
+ 4 | 4
+(4 rows)
+
+-- group w/o existing GROUP BY target under ambiguous condition
+SELECT count(*) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b ORDER BY x.b;
+ count
+-------
+ 1
+ 2
+ 3
+ 4
+(4 rows)
+
+-- group w/o existing GROUP BY target under ambiguous condition
+-- into a table
+CREATE TABLE test_missing_target2 AS
+SELECT count(*)
+FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b ORDER BY x.b;
+SELECT * FROM test_missing_target2;
+ count
+-------
+ 1
+ 2
+ 3
+ 4
+(4 rows)
+
+-- Functions and expressions
+-- w/ existing GROUP BY target
+SELECT a%2, count(b) FROM test_missing_target
+GROUP BY test_missing_target.a%2
+ORDER BY test_missing_target.a%2;
+ ?column? | count
+----------+-------
+ 0 | 5
+ 1 | 5
+(2 rows)
+
+-- w/o existing GROUP BY target using a relation name in GROUP BY clause
+SELECT count(c) FROM test_missing_target
+GROUP BY lower(test_missing_target.c)
+ORDER BY lower(test_missing_target.c);
+ count
+-------
+ 2
+ 3
+ 4
+ 1
+(4 rows)
+
+-- w/o existing GROUP BY target and w/o existing a different ORDER BY target
+-- failure expected
+SELECT count(a) FROM test_missing_target GROUP BY a ORDER BY b;
+ERROR: column "test_missing_target.b" must appear in the GROUP BY clause or be used in an aggregate function
+LINE 1: ...ECT count(a) FROM test_missing_target GROUP BY a ORDER BY b;
+ ^
+-- w/o existing GROUP BY target and w/o existing same ORDER BY target
+SELECT count(b) FROM test_missing_target GROUP BY b/2 ORDER BY b/2;
+ count
+-------
+ 1
+ 5
+ 4
+(3 rows)
+
+-- w/ existing GROUP BY target using a relation name in target
+SELECT lower(test_missing_target.c), count(c)
+ FROM test_missing_target GROUP BY lower(c) ORDER BY lower(c);
+ lower | count
+-------+-------
+ abab | 2
+ bbbb | 3
+ cccc | 4
+ xxxx | 1
+(4 rows)
+
+-- w/o existing GROUP BY target
+SELECT a FROM test_missing_target ORDER BY upper(d);
+ a
+---
+ 0
+ 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+(10 rows)
+
+-- w/o existing ORDER BY target
+SELECT count(b) FROM test_missing_target
+ GROUP BY (b + 1) / 2 ORDER BY (b + 1) / 2 desc;
+ count
+-------
+ 7
+ 3
+(2 rows)
+
+-- group w/o existing GROUP BY and ORDER BY target under ambiguous condition
+-- failure expected
+SELECT count(x.a) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY b/2 ORDER BY b/2;
+ERROR: column reference "b" is ambiguous
+LINE 3: GROUP BY b/2 ORDER BY b/2;
+ ^
+-- group w/ existing GROUP BY target under ambiguous condition
+SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b/2 ORDER BY x.b/2;
+ ?column? | count
+----------+-------
+ 0 | 1
+ 1 | 5
+ 2 | 4
+(3 rows)
+
+-- group w/o existing GROUP BY target under ambiguous condition
+-- failure expected due to ambiguous b in count(b)
+SELECT count(b) FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b/2;
+ERROR: column reference "b" is ambiguous
+LINE 1: SELECT count(b) FROM test_missing_target x, test_missing_tar...
+ ^
+-- group w/o existing GROUP BY target under ambiguous condition
+-- into a table
+CREATE TABLE test_missing_target3 AS
+SELECT count(x.b)
+FROM test_missing_target x, test_missing_target y
+ WHERE x.a = y.a
+ GROUP BY x.b/2 ORDER BY x.b/2;
+SELECT * FROM test_missing_target3;
+ count
+-------
+ 1
+ 5
+ 4
+(3 rows)
+
+-- Cleanup
+DROP TABLE test_missing_target;
+DROP TABLE test_missing_target2;
+DROP TABLE test_missing_target3;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/select_into.out b/ydb/library/yql/tests/postgresql/original/cases/select_into.out
new file mode 100644
index 0000000000..43b8209d22
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/select_into.out
@@ -0,0 +1,222 @@
+--
+-- SELECT_INTO
+--
+SELECT *
+ INTO TABLE sitmp1
+ FROM onek
+ WHERE onek.unique1 < 2;
+DROP TABLE sitmp1;
+SELECT *
+ INTO TABLE sitmp1
+ FROM onek2
+ WHERE onek2.unique1 < 2;
+DROP TABLE sitmp1;
+--
+-- SELECT INTO and INSERT permission, if owner is not allowed to insert.
+--
+CREATE SCHEMA selinto_schema;
+CREATE USER regress_selinto_user;
+ALTER DEFAULT PRIVILEGES FOR ROLE regress_selinto_user
+ REVOKE INSERT ON TABLES FROM regress_selinto_user;
+GRANT ALL ON SCHEMA selinto_schema TO public;
+SET SESSION AUTHORIZATION regress_selinto_user;
+-- WITH DATA, passes.
+CREATE TABLE selinto_schema.tbl_withdata1 (a)
+ AS SELECT generate_series(1,3) WITH DATA;
+INSERT INTO selinto_schema.tbl_withdata1 VALUES (4);
+ERROR: permission denied for table tbl_withdata1
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+ CREATE TABLE selinto_schema.tbl_withdata2 (a) AS
+ SELECT generate_series(1,3) WITH DATA;
+ QUERY PLAN
+--------------------------------------
+ ProjectSet (actual rows=3 loops=1)
+ -> Result (actual rows=1 loops=1)
+(2 rows)
+
+-- WITH NO DATA, passes.
+CREATE TABLE selinto_schema.tbl_nodata1 (a) AS
+ SELECT generate_series(1,3) WITH NO DATA;
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+ CREATE TABLE selinto_schema.tbl_nodata2 (a) AS
+ SELECT generate_series(1,3) WITH NO DATA;
+ QUERY PLAN
+-------------------------------
+ ProjectSet (never executed)
+ -> Result (never executed)
+(2 rows)
+
+-- EXECUTE and WITH DATA, passes.
+PREPARE data_sel AS SELECT generate_series(1,3);
+CREATE TABLE selinto_schema.tbl_withdata3 (a) AS
+ EXECUTE data_sel WITH DATA;
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+ CREATE TABLE selinto_schema.tbl_withdata4 (a) AS
+ EXECUTE data_sel WITH DATA;
+ QUERY PLAN
+--------------------------------------
+ ProjectSet (actual rows=3 loops=1)
+ -> Result (actual rows=1 loops=1)
+(2 rows)
+
+-- EXECUTE and WITH NO DATA, passes.
+CREATE TABLE selinto_schema.tbl_nodata3 (a) AS
+ EXECUTE data_sel WITH NO DATA;
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+ CREATE TABLE selinto_schema.tbl_nodata4 (a) AS
+ EXECUTE data_sel WITH NO DATA;
+ QUERY PLAN
+-------------------------------
+ ProjectSet (never executed)
+ -> Result (never executed)
+(2 rows)
+
+RESET SESSION AUTHORIZATION;
+ALTER DEFAULT PRIVILEGES FOR ROLE regress_selinto_user
+ GRANT INSERT ON TABLES TO regress_selinto_user;
+SET SESSION AUTHORIZATION regress_selinto_user;
+RESET SESSION AUTHORIZATION;
+DEALLOCATE data_sel;
+DROP SCHEMA selinto_schema CASCADE;
+NOTICE: drop cascades to 8 other objects
+DETAIL: drop cascades to table selinto_schema.tbl_withdata1
+drop cascades to table selinto_schema.tbl_withdata2
+drop cascades to table selinto_schema.tbl_nodata1
+drop cascades to table selinto_schema.tbl_nodata2
+drop cascades to table selinto_schema.tbl_withdata3
+drop cascades to table selinto_schema.tbl_withdata4
+drop cascades to table selinto_schema.tbl_nodata3
+drop cascades to table selinto_schema.tbl_nodata4
+DROP USER regress_selinto_user;
+-- Tests for WITH NO DATA and column name consistency
+CREATE TABLE ctas_base (i int, j int);
+INSERT INTO ctas_base VALUES (1, 2);
+CREATE TABLE ctas_nodata (ii, jj, kk) AS SELECT i, j FROM ctas_base; -- Error
+ERROR: too many column names were specified
+CREATE TABLE ctas_nodata (ii, jj, kk) AS SELECT i, j FROM ctas_base WITH NO DATA; -- Error
+ERROR: too many column names were specified
+CREATE TABLE ctas_nodata (ii, jj) AS SELECT i, j FROM ctas_base; -- OK
+CREATE TABLE ctas_nodata_2 (ii, jj) AS SELECT i, j FROM ctas_base WITH NO DATA; -- OK
+CREATE TABLE ctas_nodata_3 (ii) AS SELECT i, j FROM ctas_base; -- OK
+CREATE TABLE ctas_nodata_4 (ii) AS SELECT i, j FROM ctas_base WITH NO DATA; -- OK
+SELECT * FROM ctas_nodata;
+ ii | jj
+----+----
+ 1 | 2
+(1 row)
+
+SELECT * FROM ctas_nodata_2;
+ ii | jj
+----+----
+(0 rows)
+
+SELECT * FROM ctas_nodata_3;
+ ii | j
+----+---
+ 1 | 2
+(1 row)
+
+SELECT * FROM ctas_nodata_4;
+ ii | j
+----+---
+(0 rows)
+
+DROP TABLE ctas_base;
+DROP TABLE ctas_nodata;
+DROP TABLE ctas_nodata_2;
+DROP TABLE ctas_nodata_3;
+DROP TABLE ctas_nodata_4;
+--
+-- CREATE TABLE AS/SELECT INTO as last command in a SQL function
+-- have been known to cause problems
+--
+CREATE FUNCTION make_table() RETURNS VOID
+AS $$
+ CREATE TABLE created_table AS SELECT * FROM int8_tbl;
+$$ LANGUAGE SQL;
+SELECT make_table();
+ make_table
+------------
+
+(1 row)
+
+SELECT * FROM created_table;
+ q1 | q2
+------------------+-------------------
+ 123 | 456
+ 123 | 4567890123456789
+ 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+(5 rows)
+
+-- Try EXPLAIN ANALYZE SELECT INTO and EXPLAIN ANALYZE CREATE TABLE AS
+-- WITH NO DATA, but hide the outputs since they won't be stable.
+DO $$
+BEGIN
+ EXECUTE 'EXPLAIN ANALYZE SELECT * INTO TABLE easi FROM int8_tbl';
+ EXECUTE 'EXPLAIN ANALYZE CREATE TABLE easi2 AS SELECT * FROM int8_tbl WITH NO DATA';
+END$$;
+DROP TABLE created_table;
+DROP TABLE easi, easi2;
+--
+-- Disallowed uses of SELECT ... INTO. All should fail
+--
+DECLARE foo CURSOR FOR SELECT 1 INTO b;
+ERROR: SELECT ... INTO is not allowed here
+LINE 1: DECLARE foo CURSOR FOR SELECT 1 INTO b;
+ ^
+COPY (SELECT 1 INTO frak UNION SELECT 2) TO 'blob';
+ERROR: COPY (SELECT INTO) is not supported
+SELECT * FROM (SELECT 1 INTO f) bar;
+ERROR: SELECT ... INTO is not allowed here
+LINE 1: SELECT * FROM (SELECT 1 INTO f) bar;
+ ^
+CREATE VIEW foo AS SELECT 1 INTO b;
+ERROR: views must not contain SELECT INTO
+INSERT INTO b SELECT 1 INTO f;
+ERROR: SELECT ... INTO is not allowed here
+LINE 1: INSERT INTO b SELECT 1 INTO f;
+ ^
+-- Test CREATE TABLE AS ... IF NOT EXISTS
+CREATE TABLE ctas_ine_tbl AS SELECT 1;
+CREATE TABLE ctas_ine_tbl AS SELECT 1 / 0; -- error
+ERROR: relation "ctas_ine_tbl" already exists
+CREATE TABLE IF NOT EXISTS ctas_ine_tbl AS SELECT 1 / 0; -- ok
+NOTICE: relation "ctas_ine_tbl" already exists, skipping
+CREATE TABLE ctas_ine_tbl AS SELECT 1 / 0 WITH NO DATA; -- error
+ERROR: relation "ctas_ine_tbl" already exists
+CREATE TABLE IF NOT EXISTS ctas_ine_tbl AS SELECT 1 / 0 WITH NO DATA; -- ok
+NOTICE: relation "ctas_ine_tbl" already exists, skipping
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+ CREATE TABLE ctas_ine_tbl AS SELECT 1 / 0; -- error
+ERROR: relation "ctas_ine_tbl" already exists
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+ CREATE TABLE IF NOT EXISTS ctas_ine_tbl AS SELECT 1 / 0; -- ok
+NOTICE: relation "ctas_ine_tbl" already exists, skipping
+ QUERY PLAN
+------------
+(0 rows)
+
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+ CREATE TABLE ctas_ine_tbl AS SELECT 1 / 0 WITH NO DATA; -- error
+ERROR: relation "ctas_ine_tbl" already exists
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+ CREATE TABLE IF NOT EXISTS ctas_ine_tbl AS SELECT 1 / 0 WITH NO DATA; -- ok
+NOTICE: relation "ctas_ine_tbl" already exists, skipping
+ QUERY PLAN
+------------
+(0 rows)
+
+PREPARE ctas_ine_query AS SELECT 1 / 0;
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+ CREATE TABLE ctas_ine_tbl AS EXECUTE ctas_ine_query; -- error
+ERROR: relation "ctas_ine_tbl" already exists
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+ CREATE TABLE IF NOT EXISTS ctas_ine_tbl AS EXECUTE ctas_ine_query; -- ok
+NOTICE: relation "ctas_ine_tbl" already exists, skipping
+ QUERY PLAN
+------------
+(0 rows)
+
+DROP TABLE ctas_ine_tbl;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/select_into.sql b/ydb/library/yql/tests/postgresql/original/cases/select_into.sql
new file mode 100644
index 0000000000..7e903c339a
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/select_into.sql
@@ -0,0 +1,138 @@
+--
+-- SELECT_INTO
+--
+
+SELECT *
+ INTO TABLE sitmp1
+ FROM onek
+ WHERE onek.unique1 < 2;
+
+DROP TABLE sitmp1;
+
+SELECT *
+ INTO TABLE sitmp1
+ FROM onek2
+ WHERE onek2.unique1 < 2;
+
+DROP TABLE sitmp1;
+
+--
+-- SELECT INTO and INSERT permission, if owner is not allowed to insert.
+--
+CREATE SCHEMA selinto_schema;
+CREATE USER regress_selinto_user;
+ALTER DEFAULT PRIVILEGES FOR ROLE regress_selinto_user
+ REVOKE INSERT ON TABLES FROM regress_selinto_user;
+GRANT ALL ON SCHEMA selinto_schema TO public;
+
+SET SESSION AUTHORIZATION regress_selinto_user;
+-- WITH DATA, passes.
+CREATE TABLE selinto_schema.tbl_withdata1 (a)
+ AS SELECT generate_series(1,3) WITH DATA;
+INSERT INTO selinto_schema.tbl_withdata1 VALUES (4);
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+ CREATE TABLE selinto_schema.tbl_withdata2 (a) AS
+ SELECT generate_series(1,3) WITH DATA;
+-- WITH NO DATA, passes.
+CREATE TABLE selinto_schema.tbl_nodata1 (a) AS
+ SELECT generate_series(1,3) WITH NO DATA;
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+ CREATE TABLE selinto_schema.tbl_nodata2 (a) AS
+ SELECT generate_series(1,3) WITH NO DATA;
+-- EXECUTE and WITH DATA, passes.
+PREPARE data_sel AS SELECT generate_series(1,3);
+CREATE TABLE selinto_schema.tbl_withdata3 (a) AS
+ EXECUTE data_sel WITH DATA;
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+ CREATE TABLE selinto_schema.tbl_withdata4 (a) AS
+ EXECUTE data_sel WITH DATA;
+-- EXECUTE and WITH NO DATA, passes.
+CREATE TABLE selinto_schema.tbl_nodata3 (a) AS
+ EXECUTE data_sel WITH NO DATA;
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+ CREATE TABLE selinto_schema.tbl_nodata4 (a) AS
+ EXECUTE data_sel WITH NO DATA;
+RESET SESSION AUTHORIZATION;
+
+ALTER DEFAULT PRIVILEGES FOR ROLE regress_selinto_user
+ GRANT INSERT ON TABLES TO regress_selinto_user;
+
+SET SESSION AUTHORIZATION regress_selinto_user;
+RESET SESSION AUTHORIZATION;
+
+DEALLOCATE data_sel;
+DROP SCHEMA selinto_schema CASCADE;
+DROP USER regress_selinto_user;
+
+-- Tests for WITH NO DATA and column name consistency
+CREATE TABLE ctas_base (i int, j int);
+INSERT INTO ctas_base VALUES (1, 2);
+CREATE TABLE ctas_nodata (ii, jj, kk) AS SELECT i, j FROM ctas_base; -- Error
+CREATE TABLE ctas_nodata (ii, jj, kk) AS SELECT i, j FROM ctas_base WITH NO DATA; -- Error
+CREATE TABLE ctas_nodata (ii, jj) AS SELECT i, j FROM ctas_base; -- OK
+CREATE TABLE ctas_nodata_2 (ii, jj) AS SELECT i, j FROM ctas_base WITH NO DATA; -- OK
+CREATE TABLE ctas_nodata_3 (ii) AS SELECT i, j FROM ctas_base; -- OK
+CREATE TABLE ctas_nodata_4 (ii) AS SELECT i, j FROM ctas_base WITH NO DATA; -- OK
+SELECT * FROM ctas_nodata;
+SELECT * FROM ctas_nodata_2;
+SELECT * FROM ctas_nodata_3;
+SELECT * FROM ctas_nodata_4;
+DROP TABLE ctas_base;
+DROP TABLE ctas_nodata;
+DROP TABLE ctas_nodata_2;
+DROP TABLE ctas_nodata_3;
+DROP TABLE ctas_nodata_4;
+
+--
+-- CREATE TABLE AS/SELECT INTO as last command in a SQL function
+-- have been known to cause problems
+--
+CREATE FUNCTION make_table() RETURNS VOID
+AS $$
+ CREATE TABLE created_table AS SELECT * FROM int8_tbl;
+$$ LANGUAGE SQL;
+
+SELECT make_table();
+
+SELECT * FROM created_table;
+
+-- Try EXPLAIN ANALYZE SELECT INTO and EXPLAIN ANALYZE CREATE TABLE AS
+-- WITH NO DATA, but hide the outputs since they won't be stable.
+DO $$
+BEGIN
+ EXECUTE 'EXPLAIN ANALYZE SELECT * INTO TABLE easi FROM int8_tbl';
+ EXECUTE 'EXPLAIN ANALYZE CREATE TABLE easi2 AS SELECT * FROM int8_tbl WITH NO DATA';
+END$$;
+
+DROP TABLE created_table;
+DROP TABLE easi, easi2;
+
+--
+-- Disallowed uses of SELECT ... INTO. All should fail
+--
+DECLARE foo CURSOR FOR SELECT 1 INTO b;
+COPY (SELECT 1 INTO frak UNION SELECT 2) TO 'blob';
+SELECT * FROM (SELECT 1 INTO f) bar;
+CREATE VIEW foo AS SELECT 1 INTO b;
+INSERT INTO b SELECT 1 INTO f;
+
+-- Test CREATE TABLE AS ... IF NOT EXISTS
+CREATE TABLE ctas_ine_tbl AS SELECT 1;
+CREATE TABLE ctas_ine_tbl AS SELECT 1 / 0; -- error
+CREATE TABLE IF NOT EXISTS ctas_ine_tbl AS SELECT 1 / 0; -- ok
+CREATE TABLE ctas_ine_tbl AS SELECT 1 / 0 WITH NO DATA; -- error
+CREATE TABLE IF NOT EXISTS ctas_ine_tbl AS SELECT 1 / 0 WITH NO DATA; -- ok
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+ CREATE TABLE ctas_ine_tbl AS SELECT 1 / 0; -- error
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+ CREATE TABLE IF NOT EXISTS ctas_ine_tbl AS SELECT 1 / 0; -- ok
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+ CREATE TABLE ctas_ine_tbl AS SELECT 1 / 0 WITH NO DATA; -- error
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+ CREATE TABLE IF NOT EXISTS ctas_ine_tbl AS SELECT 1 / 0 WITH NO DATA; -- ok
+PREPARE ctas_ine_query AS SELECT 1 / 0;
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+ CREATE TABLE ctas_ine_tbl AS EXECUTE ctas_ine_query; -- error
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+ CREATE TABLE IF NOT EXISTS ctas_ine_tbl AS EXECUTE ctas_ine_query; -- ok
+DROP TABLE ctas_ine_tbl;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/strings.out b/ydb/library/yql/tests/postgresql/original/cases/strings.out
new file mode 100644
index 0000000000..91aa819804
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/strings.out
@@ -0,0 +1,2272 @@
+--
+-- STRINGS
+-- Test various data entry syntaxes.
+--
+-- SQL string continuation syntax
+-- E021-03 character string literals
+SELECT 'first line'
+' - next line'
+ ' - third line'
+ AS "Three lines to one";
+ Three lines to one
+-------------------------------------
+ first line - next line - third line
+(1 row)
+
+-- illegal string continuation syntax
+SELECT 'first line'
+' - next line' /* this comment is not allowed here */
+' - third line'
+ AS "Illegal comment within continuation";
+ERROR: syntax error at or near "' - third line'"
+LINE 3: ' - third line'
+ ^
+-- Unicode escapes
+SET standard_conforming_strings TO on;
+SELECT U&'d\0061t\+000061' AS U&"d\0061t\+000061";
+ data
+------
+ data
+(1 row)
+
+SELECT U&'d!0061t\+000061' UESCAPE '!' AS U&"d*0061t\+000061" UESCAPE '*';
+ dat\+000061
+-------------
+ dat\+000061
+(1 row)
+
+SELECT U&'a\\b' AS "a\b";
+ a\b
+-----
+ a\b
+(1 row)
+
+SELECT U&' \' UESCAPE '!' AS "tricky";
+ tricky
+--------
+ \
+(1 row)
+
+SELECT 'tricky' AS U&"\" UESCAPE '!';
+ \
+--------
+ tricky
+(1 row)
+
+SELECT U&'wrong: \061';
+ERROR: invalid Unicode escape
+LINE 1: SELECT U&'wrong: \061';
+ ^
+HINT: Unicode escapes must be \XXXX or \+XXXXXX.
+SELECT U&'wrong: \+0061';
+ERROR: invalid Unicode escape
+LINE 1: SELECT U&'wrong: \+0061';
+ ^
+HINT: Unicode escapes must be \XXXX or \+XXXXXX.
+SELECT U&'wrong: +0061' UESCAPE +;
+ERROR: UESCAPE must be followed by a simple string literal at or near "+"
+LINE 1: SELECT U&'wrong: +0061' UESCAPE +;
+ ^
+SELECT U&'wrong: +0061' UESCAPE '+';
+ERROR: invalid Unicode escape character at or near "'+'"
+LINE 1: SELECT U&'wrong: +0061' UESCAPE '+';
+ ^
+SELECT U&'wrong: \db99';
+ERROR: invalid Unicode surrogate pair
+LINE 1: SELECT U&'wrong: \db99';
+ ^
+SELECT U&'wrong: \db99xy';
+ERROR: invalid Unicode surrogate pair
+LINE 1: SELECT U&'wrong: \db99xy';
+ ^
+SELECT U&'wrong: \db99\\';
+ERROR: invalid Unicode surrogate pair
+LINE 1: SELECT U&'wrong: \db99\\';
+ ^
+SELECT U&'wrong: \db99\0061';
+ERROR: invalid Unicode surrogate pair
+LINE 1: SELECT U&'wrong: \db99\0061';
+ ^
+SELECT U&'wrong: \+00db99\+000061';
+ERROR: invalid Unicode surrogate pair
+LINE 1: SELECT U&'wrong: \+00db99\+000061';
+ ^
+SELECT U&'wrong: \+2FFFFF';
+ERROR: invalid Unicode escape value
+LINE 1: SELECT U&'wrong: \+2FFFFF';
+ ^
+-- while we're here, check the same cases in E-style literals
+SELECT E'd\u0061t\U00000061' AS "data";
+ data
+------
+ data
+(1 row)
+
+SELECT E'a\\b' AS "a\b";
+ a\b
+-----
+ a\b
+(1 row)
+
+SELECT E'wrong: \u061';
+ERROR: invalid Unicode escape
+LINE 1: SELECT E'wrong: \u061';
+ ^
+HINT: Unicode escapes must be \uXXXX or \UXXXXXXXX.
+SELECT E'wrong: \U0061';
+ERROR: invalid Unicode escape
+LINE 1: SELECT E'wrong: \U0061';
+ ^
+HINT: Unicode escapes must be \uXXXX or \UXXXXXXXX.
+SELECT E'wrong: \udb99';
+ERROR: invalid Unicode surrogate pair at or near "'"
+LINE 1: SELECT E'wrong: \udb99';
+ ^
+SELECT E'wrong: \udb99xy';
+ERROR: invalid Unicode surrogate pair at or near "x"
+LINE 1: SELECT E'wrong: \udb99xy';
+ ^
+SELECT E'wrong: \udb99\\';
+ERROR: invalid Unicode surrogate pair at or near "\"
+LINE 1: SELECT E'wrong: \udb99\\';
+ ^
+SELECT E'wrong: \udb99\u0061';
+ERROR: invalid Unicode surrogate pair at or near "\u0061"
+LINE 1: SELECT E'wrong: \udb99\u0061';
+ ^
+SELECT E'wrong: \U0000db99\U00000061';
+ERROR: invalid Unicode surrogate pair at or near "\U00000061"
+LINE 1: SELECT E'wrong: \U0000db99\U00000061';
+ ^
+SELECT E'wrong: \U002FFFFF';
+ERROR: invalid Unicode escape value at or near "\U002FFFFF"
+LINE 1: SELECT E'wrong: \U002FFFFF';
+ ^
+SET standard_conforming_strings TO off;
+SELECT U&'d\0061t\+000061' AS U&"d\0061t\+000061";
+ERROR: unsafe use of string constant with Unicode escapes
+LINE 1: SELECT U&'d\0061t\+000061' AS U&"d\0061t\+000061";
+ ^
+DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
+SELECT U&'d!0061t\+000061' UESCAPE '!' AS U&"d*0061t\+000061" UESCAPE '*';
+ERROR: unsafe use of string constant with Unicode escapes
+LINE 1: SELECT U&'d!0061t\+000061' UESCAPE '!' AS U&"d*0061t\+000061...
+ ^
+DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
+SELECT U&' \' UESCAPE '!' AS "tricky";
+ERROR: unsafe use of string constant with Unicode escapes
+LINE 1: SELECT U&' \' UESCAPE '!' AS "tricky";
+ ^
+DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
+SELECT 'tricky' AS U&"\" UESCAPE '!';
+ \
+--------
+ tricky
+(1 row)
+
+SELECT U&'wrong: \061';
+ERROR: unsafe use of string constant with Unicode escapes
+LINE 1: SELECT U&'wrong: \061';
+ ^
+DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
+SELECT U&'wrong: \+0061';
+ERROR: unsafe use of string constant with Unicode escapes
+LINE 1: SELECT U&'wrong: \+0061';
+ ^
+DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
+SELECT U&'wrong: +0061' UESCAPE '+';
+ERROR: unsafe use of string constant with Unicode escapes
+LINE 1: SELECT U&'wrong: +0061' UESCAPE '+';
+ ^
+DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off.
+RESET standard_conforming_strings;
+-- bytea
+SET bytea_output TO hex;
+SELECT E'\\xDeAdBeEf'::bytea;
+ bytea
+------------
+ \xdeadbeef
+(1 row)
+
+SELECT E'\\x De Ad Be Ef '::bytea;
+ bytea
+------------
+ \xdeadbeef
+(1 row)
+
+SELECT E'\\xDeAdBeE'::bytea;
+ERROR: invalid hexadecimal data: odd number of digits
+LINE 1: SELECT E'\\xDeAdBeE'::bytea;
+ ^
+SELECT E'\\xDeAdBeEx'::bytea;
+ERROR: invalid hexadecimal digit: "x"
+LINE 1: SELECT E'\\xDeAdBeEx'::bytea;
+ ^
+SELECT E'\\xDe00BeEf'::bytea;
+ bytea
+------------
+ \xde00beef
+(1 row)
+
+SELECT E'DeAdBeEf'::bytea;
+ bytea
+--------------------
+ \x4465416442654566
+(1 row)
+
+SELECT E'De\\000dBeEf'::bytea;
+ bytea
+--------------------
+ \x4465006442654566
+(1 row)
+
+SELECT E'De\123dBeEf'::bytea;
+ bytea
+--------------------
+ \x4465536442654566
+(1 row)
+
+SELECT E'De\\123dBeEf'::bytea;
+ bytea
+--------------------
+ \x4465536442654566
+(1 row)
+
+SELECT E'De\\678dBeEf'::bytea;
+ERROR: invalid input syntax for type bytea
+LINE 1: SELECT E'De\\678dBeEf'::bytea;
+ ^
+SET bytea_output TO escape;
+SELECT E'\\xDeAdBeEf'::bytea;
+ bytea
+------------------
+ \336\255\276\357
+(1 row)
+
+SELECT E'\\x De Ad Be Ef '::bytea;
+ bytea
+------------------
+ \336\255\276\357
+(1 row)
+
+SELECT E'\\xDe00BeEf'::bytea;
+ bytea
+------------------
+ \336\000\276\357
+(1 row)
+
+SELECT E'DeAdBeEf'::bytea;
+ bytea
+----------
+ DeAdBeEf
+(1 row)
+
+SELECT E'De\\000dBeEf'::bytea;
+ bytea
+-------------
+ De\000dBeEf
+(1 row)
+
+SELECT E'De\\123dBeEf'::bytea;
+ bytea
+----------
+ DeSdBeEf
+(1 row)
+
+--
+-- test conversions between various string types
+-- E021-10 implicit casting among the character data types
+--
+SELECT CAST(f1 AS text) AS "text(char)" FROM CHAR_TBL;
+ text(char)
+------------
+ a
+ ab
+ abcd
+ abcd
+(4 rows)
+
+SELECT CAST(f1 AS text) AS "text(varchar)" FROM VARCHAR_TBL;
+ text(varchar)
+---------------
+ a
+ ab
+ abcd
+ abcd
+(4 rows)
+
+SELECT CAST(name 'namefield' AS text) AS "text(name)";
+ text(name)
+------------
+ namefield
+(1 row)
+
+-- since this is an explicit cast, it should truncate w/o error:
+SELECT CAST(f1 AS char(10)) AS "char(text)" FROM TEXT_TBL;
+ char(text)
+------------
+ doh!
+ hi de ho n
+(2 rows)
+
+-- note: implicit-cast case is tested in char.sql
+SELECT CAST(f1 AS char(20)) AS "char(text)" FROM TEXT_TBL;
+ char(text)
+----------------------
+ doh!
+ hi de ho neighbor
+(2 rows)
+
+SELECT CAST(f1 AS char(10)) AS "char(varchar)" FROM VARCHAR_TBL;
+ char(varchar)
+---------------
+ a
+ ab
+ abcd
+ abcd
+(4 rows)
+
+SELECT CAST(name 'namefield' AS char(10)) AS "char(name)";
+ char(name)
+------------
+ namefield
+(1 row)
+
+SELECT CAST(f1 AS varchar) AS "varchar(text)" FROM TEXT_TBL;
+ varchar(text)
+-------------------
+ doh!
+ hi de ho neighbor
+(2 rows)
+
+SELECT CAST(f1 AS varchar) AS "varchar(char)" FROM CHAR_TBL;
+ varchar(char)
+---------------
+ a
+ ab
+ abcd
+ abcd
+(4 rows)
+
+SELECT CAST(name 'namefield' AS varchar) AS "varchar(name)";
+ varchar(name)
+---------------
+ namefield
+(1 row)
+
+--
+-- test SQL string functions
+-- E### and T### are feature reference numbers from SQL99
+--
+-- E021-09 trim function
+SELECT TRIM(BOTH FROM ' bunch o blanks ') = 'bunch o blanks' AS "bunch o blanks";
+ bunch o blanks
+----------------
+ t
+(1 row)
+
+SELECT TRIM(LEADING FROM ' bunch o blanks ') = 'bunch o blanks ' AS "bunch o blanks ";
+ bunch o blanks
+------------------
+ t
+(1 row)
+
+SELECT TRIM(TRAILING FROM ' bunch o blanks ') = ' bunch o blanks' AS " bunch o blanks";
+ bunch o blanks
+------------------
+ t
+(1 row)
+
+SELECT TRIM(BOTH 'x' FROM 'xxxxxsome Xsxxxxx') = 'some Xs' AS "some Xs";
+ some Xs
+---------
+ t
+(1 row)
+
+-- E021-06 substring expression
+SELECT SUBSTRING('1234567890' FROM 3) = '34567890' AS "34567890";
+ 34567890
+----------
+ t
+(1 row)
+
+SELECT SUBSTRING('1234567890' FROM 4 FOR 3) = '456' AS "456";
+ 456
+-----
+ t
+(1 row)
+
+-- test overflow cases
+SELECT SUBSTRING('string' FROM 2 FOR 2147483646) AS "tring";
+ tring
+-------
+ tring
+(1 row)
+
+SELECT SUBSTRING('string' FROM -10 FOR 2147483646) AS "string";
+ string
+--------
+ string
+(1 row)
+
+SELECT SUBSTRING('string' FROM -10 FOR -2147483646) AS "error";
+ERROR: negative substring length not allowed
+-- T581 regular expression substring (with SQL's bizarre regexp syntax)
+SELECT SUBSTRING('abcdefg' SIMILAR 'a#"(b_d)#"%' ESCAPE '#') AS "bcd";
+ bcd
+-----
+ bcd
+(1 row)
+
+-- obsolete SQL99 syntax
+SELECT SUBSTRING('abcdefg' FROM 'a#"(b_d)#"%' FOR '#') AS "bcd";
+ bcd
+-----
+ bcd
+(1 row)
+
+-- No match should return NULL
+SELECT SUBSTRING('abcdefg' SIMILAR '#"(b_d)#"%' ESCAPE '#') IS NULL AS "True";
+ True
+------
+ t
+(1 row)
+
+-- Null inputs should return NULL
+SELECT SUBSTRING('abcdefg' SIMILAR '%' ESCAPE NULL) IS NULL AS "True";
+ True
+------
+ t
+(1 row)
+
+SELECT SUBSTRING(NULL SIMILAR '%' ESCAPE '#') IS NULL AS "True";
+ True
+------
+ t
+(1 row)
+
+SELECT SUBSTRING('abcdefg' SIMILAR NULL ESCAPE '#') IS NULL AS "True";
+ True
+------
+ t
+(1 row)
+
+-- The first and last parts should act non-greedy
+SELECT SUBSTRING('abcdefg' SIMILAR 'a#"%#"g' ESCAPE '#') AS "bcdef";
+ bcdef
+-------
+ bcdef
+(1 row)
+
+SELECT SUBSTRING('abcdefg' SIMILAR 'a*#"%#"g*' ESCAPE '#') AS "abcdefg";
+ abcdefg
+---------
+ abcdefg
+(1 row)
+
+-- Vertical bar in any part affects only that part
+SELECT SUBSTRING('abcdefg' SIMILAR 'a|b#"%#"g' ESCAPE '#') AS "bcdef";
+ bcdef
+-------
+ bcdef
+(1 row)
+
+SELECT SUBSTRING('abcdefg' SIMILAR 'a#"%#"x|g' ESCAPE '#') AS "bcdef";
+ bcdef
+-------
+ bcdef
+(1 row)
+
+SELECT SUBSTRING('abcdefg' SIMILAR 'a#"%|ab#"g' ESCAPE '#') AS "bcdef";
+ bcdef
+-------
+ bcdef
+(1 row)
+
+-- Can't have more than two part separators
+SELECT SUBSTRING('abcdefg' SIMILAR 'a*#"%#"g*#"x' ESCAPE '#') AS "error";
+ERROR: SQL regular expression may not contain more than two escape-double-quote separators
+CONTEXT: SQL function "substring" statement 1
+-- Postgres extension: with 0 or 1 separator, assume parts 1 and 3 are empty
+SELECT SUBSTRING('abcdefg' SIMILAR 'a#"%g' ESCAPE '#') AS "bcdefg";
+ bcdefg
+--------
+ bcdefg
+(1 row)
+
+SELECT SUBSTRING('abcdefg' SIMILAR 'a%g' ESCAPE '#') AS "abcdefg";
+ abcdefg
+---------
+ abcdefg
+(1 row)
+
+-- substring() with just two arguments is not allowed by SQL spec;
+-- we accept it, but we interpret the pattern as a POSIX regexp not SQL
+SELECT SUBSTRING('abcdefg' FROM 'c.e') AS "cde";
+ cde
+-----
+ cde
+(1 row)
+
+-- With a parenthesized subexpression, return only what matches the subexpr
+SELECT SUBSTRING('abcdefg' FROM 'b(.*)f') AS "cde";
+ cde
+-----
+ cde
+(1 row)
+
+-- Check behavior of SIMILAR TO, which uses largely the same regexp variant
+SELECT 'abcdefg' SIMILAR TO '_bcd%' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT 'abcdefg' SIMILAR TO 'bcd%' AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT 'abcdefg' SIMILAR TO '_bcd#%' ESCAPE '#' AS false;
+ false
+-------
+ f
+(1 row)
+
+SELECT 'abcd%' SIMILAR TO '_bcd#%' ESCAPE '#' AS true;
+ true
+------
+ t
+(1 row)
+
+-- Postgres uses '\' as the default escape character, which is not per spec
+SELECT 'abcdefg' SIMILAR TO '_bcd\%' AS false;
+ false
+-------
+ f
+(1 row)
+
+-- and an empty string to mean "no escape", which is also not per spec
+SELECT 'abcd\efg' SIMILAR TO '_bcd\%' ESCAPE '' AS true;
+ true
+------
+ t
+(1 row)
+
+-- these behaviors are per spec, though:
+SELECT 'abcdefg' SIMILAR TO '_bcd%' ESCAPE NULL AS null;
+ null
+------
+
+(1 row)
+
+SELECT 'abcdefg' SIMILAR TO '_bcd#%' ESCAPE '##' AS error;
+ERROR: invalid escape string
+HINT: Escape string must be empty or one character.
+-- Test back reference in regexp_replace
+SELECT regexp_replace('1112223333', E'(\\d{3})(\\d{3})(\\d{4})', E'(\\1) \\2-\\3');
+ regexp_replace
+----------------
+ (111) 222-3333
+(1 row)
+
+SELECT regexp_replace('AAA BBB CCC ', E'\\s+', ' ', 'g');
+ regexp_replace
+----------------
+ AAA BBB CCC
+(1 row)
+
+SELECT regexp_replace('AAA', '^|$', 'Z', 'g');
+ regexp_replace
+----------------
+ ZAAAZ
+(1 row)
+
+SELECT regexp_replace('AAA aaa', 'A+', 'Z', 'gi');
+ regexp_replace
+----------------
+ Z Z
+(1 row)
+
+-- invalid regexp option
+SELECT regexp_replace('AAA aaa', 'A+', 'Z', 'z');
+ERROR: invalid regular expression option: "z"
+-- set so we can tell NULL from empty string
+\pset null '\\N'
+-- return all matches from regexp
+SELECT regexp_matches('foobarbequebaz', $re$(bar)(beque)$re$);
+ regexp_matches
+----------------
+ {bar,beque}
+(1 row)
+
+-- test case insensitive
+SELECT regexp_matches('foObARbEqUEbAz', $re$(bar)(beque)$re$, 'i');
+ regexp_matches
+----------------
+ {bAR,bEqUE}
+(1 row)
+
+-- global option - more than one match
+SELECT regexp_matches('foobarbequebazilbarfbonk', $re$(b[^b]+)(b[^b]+)$re$, 'g');
+ regexp_matches
+----------------
+ {bar,beque}
+ {bazil,barf}
+(2 rows)
+
+-- empty capture group (matched empty string)
+SELECT regexp_matches('foobarbequebaz', $re$(bar)(.*)(beque)$re$);
+ regexp_matches
+----------------
+ {bar,"",beque}
+(1 row)
+
+-- no match
+SELECT regexp_matches('foobarbequebaz', $re$(bar)(.+)(beque)$re$);
+ regexp_matches
+----------------
+(0 rows)
+
+-- optional capture group did not match, null entry in array
+SELECT regexp_matches('foobarbequebaz', $re$(bar)(.+)?(beque)$re$);
+ regexp_matches
+------------------
+ {bar,NULL,beque}
+(1 row)
+
+-- no capture groups
+SELECT regexp_matches('foobarbequebaz', $re$barbeque$re$);
+ regexp_matches
+----------------
+ {barbeque}
+(1 row)
+
+-- start/end-of-line matches are of zero length
+SELECT regexp_matches('foo' || chr(10) || 'bar' || chr(10) || 'bequq' || chr(10) || 'baz', '^', 'mg');
+ regexp_matches
+----------------
+ {""}
+ {""}
+ {""}
+ {""}
+(4 rows)
+
+SELECT regexp_matches('foo' || chr(10) || 'bar' || chr(10) || 'bequq' || chr(10) || 'baz', '$', 'mg');
+ regexp_matches
+----------------
+ {""}
+ {""}
+ {""}
+ {""}
+(4 rows)
+
+SELECT regexp_matches('1' || chr(10) || '2' || chr(10) || '3' || chr(10) || '4' || chr(10), '^.?', 'mg');
+ regexp_matches
+----------------
+ {1}
+ {2}
+ {3}
+ {4}
+ {""}
+(5 rows)
+
+SELECT regexp_matches(chr(10) || '1' || chr(10) || '2' || chr(10) || '3' || chr(10) || '4' || chr(10), '.?$', 'mg');
+ regexp_matches
+----------------
+ {""}
+ {1}
+ {""}
+ {2}
+ {""}
+ {3}
+ {""}
+ {4}
+ {""}
+ {""}
+(10 rows)
+
+SELECT regexp_matches(chr(10) || '1' || chr(10) || '2' || chr(10) || '3' || chr(10) || '4', '.?$', 'mg');
+ regexp_matches
+----------------
+ {""}
+ {1}
+ {""}
+ {2}
+ {""}
+ {3}
+ {""}
+ {4}
+ {""}
+(9 rows)
+
+-- give me errors
+SELECT regexp_matches('foobarbequebaz', $re$(bar)(beque)$re$, 'gz');
+ERROR: invalid regular expression option: "z"
+SELECT regexp_matches('foobarbequebaz', $re$(barbeque$re$);
+ERROR: invalid regular expression: parentheses () not balanced
+SELECT regexp_matches('foobarbequebaz', $re$(bar)(beque){2,1}$re$);
+ERROR: invalid regular expression: invalid repetition count(s)
+-- split string on regexp
+SELECT foo, length(foo) FROM regexp_split_to_table('the quick brown fox jumps over the lazy dog', $re$\s+$re$) AS foo;
+ foo | length
+-------+--------
+ the | 3
+ quick | 5
+ brown | 5
+ fox | 3
+ jumps | 5
+ over | 4
+ the | 3
+ lazy | 4
+ dog | 3
+(9 rows)
+
+SELECT regexp_split_to_array('the quick brown fox jumps over the lazy dog', $re$\s+$re$);
+ regexp_split_to_array
+-----------------------------------------------
+ {the,quick,brown,fox,jumps,over,the,lazy,dog}
+(1 row)
+
+SELECT foo, length(foo) FROM regexp_split_to_table('the quick brown fox jumps over the lazy dog', $re$\s*$re$) AS foo;
+ foo | length
+-----+--------
+ t | 1
+ h | 1
+ e | 1
+ q | 1
+ u | 1
+ i | 1
+ c | 1
+ k | 1
+ b | 1
+ r | 1
+ o | 1
+ w | 1
+ n | 1
+ f | 1
+ o | 1
+ x | 1
+ j | 1
+ u | 1
+ m | 1
+ p | 1
+ s | 1
+ o | 1
+ v | 1
+ e | 1
+ r | 1
+ t | 1
+ h | 1
+ e | 1
+ l | 1
+ a | 1
+ z | 1
+ y | 1
+ d | 1
+ o | 1
+ g | 1
+(35 rows)
+
+SELECT regexp_split_to_array('the quick brown fox jumps over the lazy dog', $re$\s*$re$);
+ regexp_split_to_array
+-------------------------------------------------------------------------
+ {t,h,e,q,u,i,c,k,b,r,o,w,n,f,o,x,j,u,m,p,s,o,v,e,r,t,h,e,l,a,z,y,d,o,g}
+(1 row)
+
+SELECT foo, length(foo) FROM regexp_split_to_table('the quick brown fox jumps over the lazy dog', '') AS foo;
+ foo | length
+-----+--------
+ t | 1
+ h | 1
+ e | 1
+ | 1
+ q | 1
+ u | 1
+ i | 1
+ c | 1
+ k | 1
+ | 1
+ b | 1
+ r | 1
+ o | 1
+ w | 1
+ n | 1
+ | 1
+ f | 1
+ o | 1
+ x | 1
+ | 1
+ j | 1
+ u | 1
+ m | 1
+ p | 1
+ s | 1
+ | 1
+ o | 1
+ v | 1
+ e | 1
+ r | 1
+ | 1
+ t | 1
+ h | 1
+ e | 1
+ | 1
+ l | 1
+ a | 1
+ z | 1
+ y | 1
+ | 1
+ d | 1
+ o | 1
+ g | 1
+(43 rows)
+
+SELECT regexp_split_to_array('the quick brown fox jumps over the lazy dog', '');
+ regexp_split_to_array
+---------------------------------------------------------------------------------------------------------
+ {t,h,e," ",q,u,i,c,k," ",b,r,o,w,n," ",f,o,x," ",j,u,m,p,s," ",o,v,e,r," ",t,h,e," ",l,a,z,y," ",d,o,g}
+(1 row)
+
+-- case insensitive
+SELECT foo, length(foo) FROM regexp_split_to_table('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'i') AS foo;
+ foo | length
+---------------------------+--------
+ th | 2
+ QUick bROWn FOx jUMPs ov | 25
+ r Th | 4
+ lazy dOG | 9
+(4 rows)
+
+SELECT regexp_split_to_array('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'i');
+ regexp_split_to_array
+-----------------------------------------------------
+ {th," QUick bROWn FOx jUMPs ov","r Th"," lazy dOG"}
+(1 row)
+
+-- no match of pattern
+SELECT foo, length(foo) FROM regexp_split_to_table('the quick brown fox jumps over the lazy dog', 'nomatch') AS foo;
+ foo | length
+---------------------------------------------+--------
+ the quick brown fox jumps over the lazy dog | 43
+(1 row)
+
+SELECT regexp_split_to_array('the quick brown fox jumps over the lazy dog', 'nomatch');
+ regexp_split_to_array
+-------------------------------------------------
+ {"the quick brown fox jumps over the lazy dog"}
+(1 row)
+
+-- some corner cases
+SELECT regexp_split_to_array('123456','1');
+ regexp_split_to_array
+-----------------------
+ {"",23456}
+(1 row)
+
+SELECT regexp_split_to_array('123456','6');
+ regexp_split_to_array
+-----------------------
+ {12345,""}
+(1 row)
+
+SELECT regexp_split_to_array('123456','.');
+ regexp_split_to_array
+------------------------
+ {"","","","","","",""}
+(1 row)
+
+SELECT regexp_split_to_array('123456','');
+ regexp_split_to_array
+-----------------------
+ {1,2,3,4,5,6}
+(1 row)
+
+SELECT regexp_split_to_array('123456','(?:)');
+ regexp_split_to_array
+-----------------------
+ {1,2,3,4,5,6}
+(1 row)
+
+SELECT regexp_split_to_array('1','');
+ regexp_split_to_array
+-----------------------
+ {1}
+(1 row)
+
+-- errors
+SELECT foo, length(foo) FROM regexp_split_to_table('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'zippy') AS foo;
+ERROR: invalid regular expression option: "z"
+SELECT regexp_split_to_array('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'iz');
+ERROR: invalid regular expression option: "z"
+-- global option meaningless for regexp_split
+SELECT foo, length(foo) FROM regexp_split_to_table('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'g') AS foo;
+ERROR: regexp_split_to_table() does not support the "global" option
+SELECT regexp_split_to_array('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'g');
+ERROR: regexp_split_to_array() does not support the "global" option
+-- change NULL-display back
+\pset null ''
+-- E021-11 position expression
+SELECT POSITION('4' IN '1234567890') = '4' AS "4";
+ 4
+---
+ t
+(1 row)
+
+SELECT POSITION('5' IN '1234567890') = '5' AS "5";
+ 5
+---
+ t
+(1 row)
+
+-- T312 character overlay function
+SELECT OVERLAY('abcdef' PLACING '45' FROM 4) AS "abc45f";
+ abc45f
+--------
+ abc45f
+(1 row)
+
+SELECT OVERLAY('yabadoo' PLACING 'daba' FROM 5) AS "yabadaba";
+ yabadaba
+----------
+ yabadaba
+(1 row)
+
+SELECT OVERLAY('yabadoo' PLACING 'daba' FROM 5 FOR 0) AS "yabadabadoo";
+ yabadabadoo
+-------------
+ yabadabadoo
+(1 row)
+
+SELECT OVERLAY('babosa' PLACING 'ubb' FROM 2 FOR 4) AS "bubba";
+ bubba
+-------
+ bubba
+(1 row)
+
+--
+-- test LIKE
+-- Be sure to form every test as a LIKE/NOT LIKE pair.
+--
+-- simplest examples
+-- E061-04 like predicate
+SELECT 'hawkeye' LIKE 'h%' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'hawkeye' NOT LIKE 'h%' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'hawkeye' LIKE 'H%' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'hawkeye' NOT LIKE 'H%' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'hawkeye' LIKE 'indio%' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'hawkeye' NOT LIKE 'indio%' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'hawkeye' LIKE 'h%eye' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'hawkeye' NOT LIKE 'h%eye' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'indio' LIKE '_ndio' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'indio' NOT LIKE '_ndio' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'indio' LIKE 'in__o' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'indio' NOT LIKE 'in__o' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'indio' LIKE 'in_o' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'indio' NOT LIKE 'in_o' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'abc'::name LIKE '_b_' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'abc'::name NOT LIKE '_b_' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'abc'::bytea LIKE '_b_'::bytea AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'abc'::bytea NOT LIKE '_b_'::bytea AS "false";
+ false
+-------
+ f
+(1 row)
+
+-- unused escape character
+SELECT 'hawkeye' LIKE 'h%' ESCAPE '#' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'hawkeye' NOT LIKE 'h%' ESCAPE '#' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'indio' LIKE 'ind_o' ESCAPE '$' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'indio' NOT LIKE 'ind_o' ESCAPE '$' AS "false";
+ false
+-------
+ f
+(1 row)
+
+-- escape character
+-- E061-05 like predicate with escape clause
+SELECT 'h%' LIKE 'h#%' ESCAPE '#' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'h%' NOT LIKE 'h#%' ESCAPE '#' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'h%wkeye' LIKE 'h#%' ESCAPE '#' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'h%wkeye' NOT LIKE 'h#%' ESCAPE '#' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'h%wkeye' LIKE 'h#%%' ESCAPE '#' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'h%wkeye' NOT LIKE 'h#%%' ESCAPE '#' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'h%awkeye' LIKE 'h#%a%k%e' ESCAPE '#' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'h%awkeye' NOT LIKE 'h#%a%k%e' ESCAPE '#' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'indio' LIKE '_ndio' ESCAPE '$' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'indio' NOT LIKE '_ndio' ESCAPE '$' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'i_dio' LIKE 'i$_d_o' ESCAPE '$' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'i_dio' NOT LIKE 'i$_d_o' ESCAPE '$' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'i_dio' LIKE 'i$_nd_o' ESCAPE '$' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'i_dio' NOT LIKE 'i$_nd_o' ESCAPE '$' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'i_dio' LIKE 'i$_d%o' ESCAPE '$' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'i_dio' NOT LIKE 'i$_d%o' ESCAPE '$' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'a_c'::bytea LIKE 'a$__'::bytea ESCAPE '$'::bytea AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'a_c'::bytea NOT LIKE 'a$__'::bytea ESCAPE '$'::bytea AS "false";
+ false
+-------
+ f
+(1 row)
+
+-- escape character same as pattern character
+SELECT 'maca' LIKE 'm%aca' ESCAPE '%' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'maca' NOT LIKE 'm%aca' ESCAPE '%' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'ma%a' LIKE 'm%a%%a' ESCAPE '%' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'ma%a' NOT LIKE 'm%a%%a' ESCAPE '%' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'bear' LIKE 'b_ear' ESCAPE '_' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'bear' NOT LIKE 'b_ear' ESCAPE '_' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'be_r' LIKE 'b_e__r' ESCAPE '_' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'be_r' NOT LIKE 'b_e__r' ESCAPE '_' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'be_r' LIKE '__e__r' ESCAPE '_' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'be_r' NOT LIKE '__e__r' ESCAPE '_' AS "true";
+ true
+------
+ t
+(1 row)
+
+--
+-- test ILIKE (case-insensitive LIKE)
+-- Be sure to form every test as an ILIKE/NOT ILIKE pair.
+--
+SELECT 'hawkeye' ILIKE 'h%' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'hawkeye' NOT ILIKE 'h%' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'hawkeye' ILIKE 'H%' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'hawkeye' NOT ILIKE 'H%' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'hawkeye' ILIKE 'H%Eye' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'hawkeye' NOT ILIKE 'H%Eye' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'Hawkeye' ILIKE 'h%' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'Hawkeye' NOT ILIKE 'h%' AS "false";
+ false
+-------
+ f
+(1 row)
+
+SELECT 'ABC'::name ILIKE '_b_' AS "true";
+ true
+------
+ t
+(1 row)
+
+SELECT 'ABC'::name NOT ILIKE '_b_' AS "false";
+ false
+-------
+ f
+(1 row)
+
+--
+-- test %/_ combination cases, cf bugs #4821 and #5478
+--
+SELECT 'foo' LIKE '_%' as t, 'f' LIKE '_%' as t, '' LIKE '_%' as f;
+ t | t | f
+---+---+---
+ t | t | f
+(1 row)
+
+SELECT 'foo' LIKE '%_' as t, 'f' LIKE '%_' as t, '' LIKE '%_' as f;
+ t | t | f
+---+---+---
+ t | t | f
+(1 row)
+
+SELECT 'foo' LIKE '__%' as t, 'foo' LIKE '___%' as t, 'foo' LIKE '____%' as f;
+ t | t | f
+---+---+---
+ t | t | f
+(1 row)
+
+SELECT 'foo' LIKE '%__' as t, 'foo' LIKE '%___' as t, 'foo' LIKE '%____' as f;
+ t | t | f
+---+---+---
+ t | t | f
+(1 row)
+
+SELECT 'jack' LIKE '%____%' AS t;
+ t
+---
+ t
+(1 row)
+
+--
+-- basic tests of LIKE with indexes
+--
+CREATE TABLE texttest (a text PRIMARY KEY, b int);
+SELECT * FROM texttest WHERE a LIKE '%1%';
+ a | b
+---+---
+(0 rows)
+
+CREATE TABLE byteatest (a bytea PRIMARY KEY, b int);
+SELECT * FROM byteatest WHERE a LIKE '%1%';
+ a | b
+---+---
+(0 rows)
+
+DROP TABLE texttest, byteatest;
+--
+-- test implicit type conversion
+--
+-- E021-07 character concatenation
+SELECT 'unknown' || ' and unknown' AS "Concat unknown types";
+ Concat unknown types
+----------------------
+ unknown and unknown
+(1 row)
+
+SELECT text 'text' || ' and unknown' AS "Concat text to unknown type";
+ Concat text to unknown type
+-----------------------------
+ text and unknown
+(1 row)
+
+SELECT char(20) 'characters' || ' and text' AS "Concat char to unknown type";
+ Concat char to unknown type
+-----------------------------
+ characters and text
+(1 row)
+
+SELECT text 'text' || char(20) ' and characters' AS "Concat text to char";
+ Concat text to char
+---------------------
+ text and characters
+(1 row)
+
+SELECT text 'text' || varchar ' and varchar' AS "Concat text to varchar";
+ Concat text to varchar
+------------------------
+ text and varchar
+(1 row)
+
+--
+-- test substr with toasted text values
+--
+CREATE TABLE toasttest(f1 text);
+insert into toasttest values(repeat('1234567890',10000));
+insert into toasttest values(repeat('1234567890',10000));
+--
+-- Ensure that some values are uncompressed, to test the faster substring
+-- operation used in that case
+--
+alter table toasttest alter column f1 set storage external;
+insert into toasttest values(repeat('1234567890',10000));
+insert into toasttest values(repeat('1234567890',10000));
+-- If the starting position is zero or less, then return from the start of the string
+-- adjusting the length to be consistent with the "negative start" per SQL.
+SELECT substr(f1, -1, 5) from toasttest;
+ substr
+--------
+ 123
+ 123
+ 123
+ 123
+(4 rows)
+
+-- If the length is less than zero, an ERROR is thrown.
+SELECT substr(f1, 5, -1) from toasttest;
+ERROR: negative substring length not allowed
+-- If no third argument (length) is provided, the length to the end of the
+-- string is assumed.
+SELECT substr(f1, 99995) from toasttest;
+ substr
+--------
+ 567890
+ 567890
+ 567890
+ 567890
+(4 rows)
+
+-- If start plus length is > string length, the result is truncated to
+-- string length
+SELECT substr(f1, 99995, 10) from toasttest;
+ substr
+--------
+ 567890
+ 567890
+ 567890
+ 567890
+(4 rows)
+
+TRUNCATE TABLE toasttest;
+INSERT INTO toasttest values (repeat('1234567890',300));
+INSERT INTO toasttest values (repeat('1234567890',300));
+INSERT INTO toasttest values (repeat('1234567890',300));
+INSERT INTO toasttest values (repeat('1234567890',300));
+-- expect >0 blocks
+SELECT pg_relation_size(reltoastrelid) = 0 AS is_empty
+ FROM pg_class where relname = 'toasttest';
+ is_empty
+----------
+ f
+(1 row)
+
+TRUNCATE TABLE toasttest;
+ALTER TABLE toasttest set (toast_tuple_target = 4080);
+INSERT INTO toasttest values (repeat('1234567890',300));
+INSERT INTO toasttest values (repeat('1234567890',300));
+INSERT INTO toasttest values (repeat('1234567890',300));
+INSERT INTO toasttest values (repeat('1234567890',300));
+-- expect 0 blocks
+SELECT pg_relation_size(reltoastrelid) = 0 AS is_empty
+ FROM pg_class where relname = 'toasttest';
+ is_empty
+----------
+ t
+(1 row)
+
+DROP TABLE toasttest;
+--
+-- test substr with toasted bytea values
+--
+CREATE TABLE toasttest(f1 bytea);
+insert into toasttest values(decode(repeat('1234567890',10000),'escape'));
+insert into toasttest values(decode(repeat('1234567890',10000),'escape'));
+--
+-- Ensure that some values are uncompressed, to test the faster substring
+-- operation used in that case
+--
+alter table toasttest alter column f1 set storage external;
+insert into toasttest values(decode(repeat('1234567890',10000),'escape'));
+insert into toasttest values(decode(repeat('1234567890',10000),'escape'));
+-- If the starting position is zero or less, then return from the start of the string
+-- adjusting the length to be consistent with the "negative start" per SQL.
+SELECT substr(f1, -1, 5) from toasttest;
+ substr
+--------
+ 123
+ 123
+ 123
+ 123
+(4 rows)
+
+-- If the length is less than zero, an ERROR is thrown.
+SELECT substr(f1, 5, -1) from toasttest;
+ERROR: negative substring length not allowed
+-- If no third argument (length) is provided, the length to the end of the
+-- string is assumed.
+SELECT substr(f1, 99995) from toasttest;
+ substr
+--------
+ 567890
+ 567890
+ 567890
+ 567890
+(4 rows)
+
+-- If start plus length is > string length, the result is truncated to
+-- string length
+SELECT substr(f1, 99995, 10) from toasttest;
+ substr
+--------
+ 567890
+ 567890
+ 567890
+ 567890
+(4 rows)
+
+DROP TABLE toasttest;
+-- test internally compressing datums
+-- this tests compressing a datum to a very small size which exercises a
+-- corner case in packed-varlena handling: even though small, the compressed
+-- datum must be given a 4-byte header because there are no bits to indicate
+-- compression in a 1-byte header
+CREATE TABLE toasttest (c char(4096));
+INSERT INTO toasttest VALUES('x');
+SELECT length(c), c::text FROM toasttest;
+ length | c
+--------+---
+ 1 | x
+(1 row)
+
+SELECT c FROM toasttest;
+ c
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ x
+(1 row)
+
+DROP TABLE toasttest;
+--
+-- test length
+--
+SELECT length('abcdef') AS "length_6";
+ length_6
+----------
+ 6
+(1 row)
+
+--
+-- test strpos
+--
+SELECT strpos('abcdef', 'cd') AS "pos_3";
+ pos_3
+-------
+ 3
+(1 row)
+
+SELECT strpos('abcdef', 'xy') AS "pos_0";
+ pos_0
+-------
+ 0
+(1 row)
+
+SELECT strpos('abcdef', '') AS "pos_1";
+ pos_1
+-------
+ 1
+(1 row)
+
+SELECT strpos('', 'xy') AS "pos_0";
+ pos_0
+-------
+ 0
+(1 row)
+
+SELECT strpos('', '') AS "pos_1";
+ pos_1
+-------
+ 1
+(1 row)
+
+--
+-- test replace
+--
+SELECT replace('abcdef', 'de', '45') AS "abc45f";
+ abc45f
+--------
+ abc45f
+(1 row)
+
+SELECT replace('yabadabadoo', 'ba', '123') AS "ya123da123doo";
+ ya123da123doo
+---------------
+ ya123da123doo
+(1 row)
+
+SELECT replace('yabadoo', 'bad', '') AS "yaoo";
+ yaoo
+------
+ yaoo
+(1 row)
+
+--
+-- test split_part
+--
+select split_part('','@',1) AS "empty string";
+ empty string
+--------------
+
+(1 row)
+
+select split_part('','@',-1) AS "empty string";
+ empty string
+--------------
+
+(1 row)
+
+select split_part('joeuser@mydatabase','',1) AS "joeuser@mydatabase";
+ joeuser@mydatabase
+--------------------
+ joeuser@mydatabase
+(1 row)
+
+select split_part('joeuser@mydatabase','',2) AS "empty string";
+ empty string
+--------------
+
+(1 row)
+
+select split_part('joeuser@mydatabase','',-1) AS "joeuser@mydatabase";
+ joeuser@mydatabase
+--------------------
+ joeuser@mydatabase
+(1 row)
+
+select split_part('joeuser@mydatabase','',-2) AS "empty string";
+ empty string
+--------------
+
+(1 row)
+
+select split_part('joeuser@mydatabase','@',0) AS "an error";
+ERROR: field position must not be zero
+select split_part('joeuser@mydatabase','@@',1) AS "joeuser@mydatabase";
+ joeuser@mydatabase
+--------------------
+ joeuser@mydatabase
+(1 row)
+
+select split_part('joeuser@mydatabase','@@',2) AS "empty string";
+ empty string
+--------------
+
+(1 row)
+
+select split_part('joeuser@mydatabase','@',1) AS "joeuser";
+ joeuser
+---------
+ joeuser
+(1 row)
+
+select split_part('joeuser@mydatabase','@',2) AS "mydatabase";
+ mydatabase
+------------
+ mydatabase
+(1 row)
+
+select split_part('joeuser@mydatabase','@',3) AS "empty string";
+ empty string
+--------------
+
+(1 row)
+
+select split_part('@joeuser@mydatabase@','@',2) AS "joeuser";
+ joeuser
+---------
+ joeuser
+(1 row)
+
+select split_part('joeuser@mydatabase','@',-1) AS "mydatabase";
+ mydatabase
+------------
+ mydatabase
+(1 row)
+
+select split_part('joeuser@mydatabase','@',-2) AS "joeuser";
+ joeuser
+---------
+ joeuser
+(1 row)
+
+select split_part('joeuser@mydatabase','@',-3) AS "empty string";
+ empty string
+--------------
+
+(1 row)
+
+select split_part('@joeuser@mydatabase@','@',-2) AS "mydatabase";
+ mydatabase
+------------
+ mydatabase
+(1 row)
+
+--
+-- test to_hex
+--
+select to_hex(256*256*256 - 1) AS "ffffff";
+ ffffff
+--------
+ ffffff
+(1 row)
+
+select to_hex(256::bigint*256::bigint*256::bigint*256::bigint - 1) AS "ffffffff";
+ ffffffff
+----------
+ ffffffff
+(1 row)
+
+--
+-- MD5 test suite - from IETF RFC 1321
+-- (see: ftp://ftp.rfc-editor.org/in-notes/rfc1321.txt)
+--
+select md5('') = 'd41d8cd98f00b204e9800998ecf8427e' AS "TRUE";
+ TRUE
+------
+ t
+(1 row)
+
+select md5('a') = '0cc175b9c0f1b6a831c399e269772661' AS "TRUE";
+ TRUE
+------
+ t
+(1 row)
+
+select md5('abc') = '900150983cd24fb0d6963f7d28e17f72' AS "TRUE";
+ TRUE
+------
+ t
+(1 row)
+
+select md5('message digest') = 'f96b697d7cb7938d525a2f31aaf161d0' AS "TRUE";
+ TRUE
+------
+ t
+(1 row)
+
+select md5('abcdefghijklmnopqrstuvwxyz') = 'c3fcd3d76192e4007dfb496cca67e13b' AS "TRUE";
+ TRUE
+------
+ t
+(1 row)
+
+select md5('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789') = 'd174ab98d277d9f5a5611c2c9f419d9f' AS "TRUE";
+ TRUE
+------
+ t
+(1 row)
+
+select md5('12345678901234567890123456789012345678901234567890123456789012345678901234567890') = '57edf4a22be3c955ac49da2e2107b67a' AS "TRUE";
+ TRUE
+------
+ t
+(1 row)
+
+select md5(''::bytea) = 'd41d8cd98f00b204e9800998ecf8427e' AS "TRUE";
+ TRUE
+------
+ t
+(1 row)
+
+select md5('a'::bytea) = '0cc175b9c0f1b6a831c399e269772661' AS "TRUE";
+ TRUE
+------
+ t
+(1 row)
+
+select md5('abc'::bytea) = '900150983cd24fb0d6963f7d28e17f72' AS "TRUE";
+ TRUE
+------
+ t
+(1 row)
+
+select md5('message digest'::bytea) = 'f96b697d7cb7938d525a2f31aaf161d0' AS "TRUE";
+ TRUE
+------
+ t
+(1 row)
+
+select md5('abcdefghijklmnopqrstuvwxyz'::bytea) = 'c3fcd3d76192e4007dfb496cca67e13b' AS "TRUE";
+ TRUE
+------
+ t
+(1 row)
+
+select md5('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'::bytea) = 'd174ab98d277d9f5a5611c2c9f419d9f' AS "TRUE";
+ TRUE
+------
+ t
+(1 row)
+
+select md5('12345678901234567890123456789012345678901234567890123456789012345678901234567890'::bytea) = '57edf4a22be3c955ac49da2e2107b67a' AS "TRUE";
+ TRUE
+------
+ t
+(1 row)
+
+--
+-- SHA-2
+--
+SET bytea_output TO hex;
+SELECT sha224('');
+ sha224
+------------------------------------------------------------
+ \xd14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f
+(1 row)
+
+SELECT sha224('The quick brown fox jumps over the lazy dog.');
+ sha224
+------------------------------------------------------------
+ \x619cba8e8e05826e9b8c519c0a5c68f4fb653e8a3d8aa04bb2c8cd4c
+(1 row)
+
+SELECT sha256('');
+ sha256
+--------------------------------------------------------------------
+ \xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
+(1 row)
+
+SELECT sha256('The quick brown fox jumps over the lazy dog.');
+ sha256
+--------------------------------------------------------------------
+ \xef537f25c895bfa782526529a9b63d97aa631564d5d789c2b765448c8635fb6c
+(1 row)
+
+SELECT sha384('');
+ sha384
+----------------------------------------------------------------------------------------------------
+ \x38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b
+(1 row)
+
+SELECT sha384('The quick brown fox jumps over the lazy dog.');
+ sha384
+----------------------------------------------------------------------------------------------------
+ \xed892481d8272ca6df370bf706e4d7bc1b5739fa2177aae6c50e946678718fc67a7af2819a021c2fc34e91bdb63409d7
+(1 row)
+
+SELECT sha512('');
+ sha512
+------------------------------------------------------------------------------------------------------------------------------------
+ \xcf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e
+(1 row)
+
+SELECT sha512('The quick brown fox jumps over the lazy dog.');
+ sha512
+------------------------------------------------------------------------------------------------------------------------------------
+ \x91ea1245f20d46ae9a037a989f54f1f790f0a47607eeb8a14d12890cea77a1bbc6c7ed9cf205e67b7f2b8fd4c7dfd3a7a8617e45f3c463d481c7e586c39ac1ed
+(1 row)
+
+--
+-- encode/decode
+--
+SELECT encode('\x1234567890abcdef00', 'hex');
+ encode
+--------------------
+ 1234567890abcdef00
+(1 row)
+
+SELECT decode('1234567890abcdef00', 'hex');
+ decode
+----------------------
+ \x1234567890abcdef00
+(1 row)
+
+SELECT encode(('\x' || repeat('1234567890abcdef0001', 7))::bytea, 'base64');
+ encode
+------------------------------------------------------------------------------
+ EjRWeJCrze8AARI0VniQq83vAAESNFZ4kKvN7wABEjRWeJCrze8AARI0VniQq83vAAESNFZ4kKvN+
+ 7wABEjRWeJCrze8AAQ==
+(1 row)
+
+SELECT decode(encode(('\x' || repeat('1234567890abcdef0001', 7))::bytea,
+ 'base64'), 'base64');
+ decode
+------------------------------------------------------------------------------------------------------------------------------------------------
+ \x1234567890abcdef00011234567890abcdef00011234567890abcdef00011234567890abcdef00011234567890abcdef00011234567890abcdef00011234567890abcdef0001
+(1 row)
+
+SELECT encode('\x1234567890abcdef00', 'escape');
+ encode
+-----------------------------
+ \x124Vx\220\253\315\357\000
+(1 row)
+
+SELECT decode(encode('\x1234567890abcdef00', 'escape'), 'escape');
+ decode
+----------------------
+ \x1234567890abcdef00
+(1 row)
+
+--
+-- get_bit/set_bit etc
+--
+SELECT get_bit('\x1234567890abcdef00'::bytea, 43);
+ get_bit
+---------
+ 1
+(1 row)
+
+SELECT get_bit('\x1234567890abcdef00'::bytea, 99); -- error
+ERROR: index 99 out of valid range, 0..71
+SELECT set_bit('\x1234567890abcdef00'::bytea, 43, 0);
+ set_bit
+----------------------
+ \x1234567890a3cdef00
+(1 row)
+
+SELECT set_bit('\x1234567890abcdef00'::bytea, 99, 0); -- error
+ERROR: index 99 out of valid range, 0..71
+SELECT get_byte('\x1234567890abcdef00'::bytea, 3);
+ get_byte
+----------
+ 120
+(1 row)
+
+SELECT get_byte('\x1234567890abcdef00'::bytea, 99); -- error
+ERROR: index 99 out of valid range, 0..8
+SELECT set_byte('\x1234567890abcdef00'::bytea, 7, 11);
+ set_byte
+----------------------
+ \x1234567890abcd0b00
+(1 row)
+
+SELECT set_byte('\x1234567890abcdef00'::bytea, 99, 11); -- error
+ERROR: index 99 out of valid range, 0..8
+--
+-- test behavior of escape_string_warning and standard_conforming_strings options
+--
+set escape_string_warning = off;
+set standard_conforming_strings = off;
+show escape_string_warning;
+ escape_string_warning
+-----------------------
+ off
+(1 row)
+
+show standard_conforming_strings;
+ standard_conforming_strings
+-----------------------------
+ off
+(1 row)
+
+set escape_string_warning = on;
+set standard_conforming_strings = on;
+show escape_string_warning;
+ escape_string_warning
+-----------------------
+ on
+(1 row)
+
+show standard_conforming_strings;
+ standard_conforming_strings
+-----------------------------
+ on
+(1 row)
+
+select 'a\bcd' as f1, 'a\b''cd' as f2, 'a\b''''cd' as f3, 'abcd\' as f4, 'ab\''cd' as f5, '\\' as f6;
+ f1 | f2 | f3 | f4 | f5 | f6
+-------+--------+---------+-------+--------+----
+ a\bcd | a\b'cd | a\b''cd | abcd\ | ab\'cd | \\
+(1 row)
+
+set standard_conforming_strings = off;
+select 'a\\bcd' as f1, 'a\\b\'cd' as f2, 'a\\b\'''cd' as f3, 'abcd\\' as f4, 'ab\\\'cd' as f5, '\\\\' as f6;
+WARNING: nonstandard use of \\ in a string literal
+LINE 1: select 'a\\bcd' as f1, 'a\\b\'cd' as f2, 'a\\b\'''cd' as f3,...
+ ^
+HINT: Use the escape string syntax for backslashes, e.g., E'\\'.
+WARNING: nonstandard use of \\ in a string literal
+LINE 1: select 'a\\bcd' as f1, 'a\\b\'cd' as f2, 'a\\b\'''cd' as f3,...
+ ^
+HINT: Use the escape string syntax for backslashes, e.g., E'\\'.
+WARNING: nonstandard use of \\ in a string literal
+LINE 1: select 'a\\bcd' as f1, 'a\\b\'cd' as f2, 'a\\b\'''cd' as f3,...
+ ^
+HINT: Use the escape string syntax for backslashes, e.g., E'\\'.
+WARNING: nonstandard use of \\ in a string literal
+LINE 1: ...bcd' as f1, 'a\\b\'cd' as f2, 'a\\b\'''cd' as f3, 'abcd\\' ...
+ ^
+HINT: Use the escape string syntax for backslashes, e.g., E'\\'.
+WARNING: nonstandard use of \\ in a string literal
+LINE 1: ...'cd' as f2, 'a\\b\'''cd' as f3, 'abcd\\' as f4, 'ab\\\'cd'...
+ ^
+HINT: Use the escape string syntax for backslashes, e.g., E'\\'.
+WARNING: nonstandard use of \\ in a string literal
+LINE 1: ...'''cd' as f3, 'abcd\\' as f4, 'ab\\\'cd' as f5, '\\\\' as ...
+ ^
+HINT: Use the escape string syntax for backslashes, e.g., E'\\'.
+ f1 | f2 | f3 | f4 | f5 | f6
+-------+--------+---------+-------+--------+----
+ a\bcd | a\b'cd | a\b''cd | abcd\ | ab\'cd | \\
+(1 row)
+
+set escape_string_warning = off;
+set standard_conforming_strings = on;
+select 'a\bcd' as f1, 'a\b''cd' as f2, 'a\b''''cd' as f3, 'abcd\' as f4, 'ab\''cd' as f5, '\\' as f6;
+ f1 | f2 | f3 | f4 | f5 | f6
+-------+--------+---------+-------+--------+----
+ a\bcd | a\b'cd | a\b''cd | abcd\ | ab\'cd | \\
+(1 row)
+
+set standard_conforming_strings = off;
+select 'a\\bcd' as f1, 'a\\b\'cd' as f2, 'a\\b\'''cd' as f3, 'abcd\\' as f4, 'ab\\\'cd' as f5, '\\\\' as f6;
+ f1 | f2 | f3 | f4 | f5 | f6
+-------+--------+---------+-------+--------+----
+ a\bcd | a\b'cd | a\b''cd | abcd\ | ab\'cd | \\
+(1 row)
+
+reset standard_conforming_strings;
+--
+-- Additional string functions
+--
+SET bytea_output TO escape;
+SELECT initcap('hi THOMAS');
+ initcap
+-----------
+ Hi Thomas
+(1 row)
+
+SELECT lpad('hi', 5, 'xy');
+ lpad
+-------
+ xyxhi
+(1 row)
+
+SELECT lpad('hi', 5);
+ lpad
+-------
+ hi
+(1 row)
+
+SELECT lpad('hi', -5, 'xy');
+ lpad
+------
+
+(1 row)
+
+SELECT lpad('hello', 2);
+ lpad
+------
+ he
+(1 row)
+
+SELECT lpad('hi', 5, '');
+ lpad
+------
+ hi
+(1 row)
+
+SELECT rpad('hi', 5, 'xy');
+ rpad
+-------
+ hixyx
+(1 row)
+
+SELECT rpad('hi', 5);
+ rpad
+-------
+ hi
+(1 row)
+
+SELECT rpad('hi', -5, 'xy');
+ rpad
+------
+
+(1 row)
+
+SELECT rpad('hello', 2);
+ rpad
+------
+ he
+(1 row)
+
+SELECT rpad('hi', 5, '');
+ rpad
+------
+ hi
+(1 row)
+
+SELECT ltrim('zzzytrim', 'xyz');
+ ltrim
+-------
+ trim
+(1 row)
+
+SELECT translate('', '14', 'ax');
+ translate
+-----------
+
+(1 row)
+
+SELECT translate('12345', '14', 'ax');
+ translate
+-----------
+ a23x5
+(1 row)
+
+SELECT ascii('x');
+ ascii
+-------
+ 120
+(1 row)
+
+SELECT ascii('');
+ ascii
+-------
+ 0
+(1 row)
+
+SELECT chr(65);
+ chr
+-----
+ A
+(1 row)
+
+SELECT chr(0);
+ERROR: null character not permitted
+SELECT repeat('Pg', 4);
+ repeat
+----------
+ PgPgPgPg
+(1 row)
+
+SELECT repeat('Pg', -4);
+ repeat
+--------
+
+(1 row)
+
+SELECT SUBSTRING('1234567890'::bytea FROM 3) "34567890";
+ 34567890
+----------
+ 34567890
+(1 row)
+
+SELECT SUBSTRING('1234567890'::bytea FROM 4 FOR 3) AS "456";
+ 456
+-----
+ 456
+(1 row)
+
+SELECT SUBSTRING('string'::bytea FROM 2 FOR 2147483646) AS "tring";
+ tring
+-------
+ tring
+(1 row)
+
+SELECT SUBSTRING('string'::bytea FROM -10 FOR 2147483646) AS "string";
+ string
+--------
+ string
+(1 row)
+
+SELECT SUBSTRING('string'::bytea FROM -10 FOR -2147483646) AS "error";
+ERROR: negative substring length not allowed
+SELECT trim(E'\\000'::bytea from E'\\000Tom\\000'::bytea);
+ btrim
+-------
+ Tom
+(1 row)
+
+SELECT trim(leading E'\\000'::bytea from E'\\000Tom\\000'::bytea);
+ ltrim
+---------
+ Tom\000
+(1 row)
+
+SELECT trim(trailing E'\\000'::bytea from E'\\000Tom\\000'::bytea);
+ rtrim
+---------
+ \000Tom
+(1 row)
+
+SELECT btrim(E'\\000trim\\000'::bytea, E'\\000'::bytea);
+ btrim
+-------
+ trim
+(1 row)
+
+SELECT btrim(''::bytea, E'\\000'::bytea);
+ btrim
+-------
+
+(1 row)
+
+SELECT btrim(E'\\000trim\\000'::bytea, ''::bytea);
+ btrim
+--------------
+ \000trim\000
+(1 row)
+
+SELECT encode(overlay(E'Th\\000omas'::bytea placing E'Th\\001omas'::bytea from 2),'escape');
+ encode
+-------------
+ TTh\x01omas
+(1 row)
+
+SELECT encode(overlay(E'Th\\000omas'::bytea placing E'\\002\\003'::bytea from 8),'escape');
+ encode
+--------------------
+ Th\000omas\x02\x03
+(1 row)
+
+SELECT encode(overlay(E'Th\\000omas'::bytea placing E'\\002\\003'::bytea from 5 for 3),'escape');
+ encode
+-----------------
+ Th\000o\x02\x03
+(1 row)
+
+SELECT bit_count('\x1234567890'::bytea);
+ bit_count
+-----------
+ 15
+(1 row)
+
+SELECT unistr('\0064at\+0000610');
+ unistr
+--------
+ data0
+(1 row)
+
+SELECT unistr('d\u0061t\U000000610');
+ unistr
+--------
+ data0
+(1 row)
+
+SELECT unistr('a\\b');
+ unistr
+--------
+ a\b
+(1 row)
+
+-- errors:
+SELECT unistr('wrong: \db99');
+ERROR: invalid Unicode surrogate pair
+SELECT unistr('wrong: \db99\0061');
+ERROR: invalid Unicode surrogate pair
+SELECT unistr('wrong: \+00db99\+000061');
+ERROR: invalid Unicode surrogate pair
+SELECT unistr('wrong: \+2FFFFF');
+ERROR: invalid Unicode code point: 2FFFFF
+SELECT unistr('wrong: \udb99\u0061');
+ERROR: invalid Unicode surrogate pair
+SELECT unistr('wrong: \U0000db99\U00000061');
+ERROR: invalid Unicode surrogate pair
+SELECT unistr('wrong: \U002FFFFF');
+ERROR: invalid Unicode code point: 2FFFFF
+SELECT unistr('wrong: \xyz');
+ERROR: invalid Unicode escape
+HINT: Unicode escapes must be \XXXX, \+XXXXXX, \uXXXX, or \UXXXXXXXX.
diff --git a/ydb/library/yql/tests/postgresql/original/cases/strings.sql b/ydb/library/yql/tests/postgresql/original/cases/strings.sql
new file mode 100644
index 0000000000..2c502534c2
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/strings.sql
@@ -0,0 +1,761 @@
+--
+-- STRINGS
+-- Test various data entry syntaxes.
+--
+
+-- SQL string continuation syntax
+-- E021-03 character string literals
+SELECT 'first line'
+' - next line'
+ ' - third line'
+ AS "Three lines to one";
+
+-- illegal string continuation syntax
+SELECT 'first line'
+' - next line' /* this comment is not allowed here */
+' - third line'
+ AS "Illegal comment within continuation";
+
+-- Unicode escapes
+SET standard_conforming_strings TO on;
+
+SELECT U&'d\0061t\+000061' AS U&"d\0061t\+000061";
+SELECT U&'d!0061t\+000061' UESCAPE '!' AS U&"d*0061t\+000061" UESCAPE '*';
+SELECT U&'a\\b' AS "a\b";
+
+SELECT U&' \' UESCAPE '!' AS "tricky";
+SELECT 'tricky' AS U&"\" UESCAPE '!';
+
+SELECT U&'wrong: \061';
+SELECT U&'wrong: \+0061';
+SELECT U&'wrong: +0061' UESCAPE +;
+SELECT U&'wrong: +0061' UESCAPE '+';
+
+SELECT U&'wrong: \db99';
+SELECT U&'wrong: \db99xy';
+SELECT U&'wrong: \db99\\';
+SELECT U&'wrong: \db99\0061';
+SELECT U&'wrong: \+00db99\+000061';
+SELECT U&'wrong: \+2FFFFF';
+
+-- while we're here, check the same cases in E-style literals
+SELECT E'd\u0061t\U00000061' AS "data";
+SELECT E'a\\b' AS "a\b";
+SELECT E'wrong: \u061';
+SELECT E'wrong: \U0061';
+SELECT E'wrong: \udb99';
+SELECT E'wrong: \udb99xy';
+SELECT E'wrong: \udb99\\';
+SELECT E'wrong: \udb99\u0061';
+SELECT E'wrong: \U0000db99\U00000061';
+SELECT E'wrong: \U002FFFFF';
+
+SET standard_conforming_strings TO off;
+
+SELECT U&'d\0061t\+000061' AS U&"d\0061t\+000061";
+SELECT U&'d!0061t\+000061' UESCAPE '!' AS U&"d*0061t\+000061" UESCAPE '*';
+
+SELECT U&' \' UESCAPE '!' AS "tricky";
+SELECT 'tricky' AS U&"\" UESCAPE '!';
+
+SELECT U&'wrong: \061';
+SELECT U&'wrong: \+0061';
+SELECT U&'wrong: +0061' UESCAPE '+';
+
+RESET standard_conforming_strings;
+
+-- bytea
+SET bytea_output TO hex;
+SELECT E'\\xDeAdBeEf'::bytea;
+SELECT E'\\x De Ad Be Ef '::bytea;
+SELECT E'\\xDeAdBeE'::bytea;
+SELECT E'\\xDeAdBeEx'::bytea;
+SELECT E'\\xDe00BeEf'::bytea;
+SELECT E'DeAdBeEf'::bytea;
+SELECT E'De\\000dBeEf'::bytea;
+SELECT E'De\123dBeEf'::bytea;
+SELECT E'De\\123dBeEf'::bytea;
+SELECT E'De\\678dBeEf'::bytea;
+
+SET bytea_output TO escape;
+SELECT E'\\xDeAdBeEf'::bytea;
+SELECT E'\\x De Ad Be Ef '::bytea;
+SELECT E'\\xDe00BeEf'::bytea;
+SELECT E'DeAdBeEf'::bytea;
+SELECT E'De\\000dBeEf'::bytea;
+SELECT E'De\\123dBeEf'::bytea;
+
+--
+-- test conversions between various string types
+-- E021-10 implicit casting among the character data types
+--
+
+SELECT CAST(f1 AS text) AS "text(char)" FROM CHAR_TBL;
+
+SELECT CAST(f1 AS text) AS "text(varchar)" FROM VARCHAR_TBL;
+
+SELECT CAST(name 'namefield' AS text) AS "text(name)";
+
+-- since this is an explicit cast, it should truncate w/o error:
+SELECT CAST(f1 AS char(10)) AS "char(text)" FROM TEXT_TBL;
+-- note: implicit-cast case is tested in char.sql
+
+SELECT CAST(f1 AS char(20)) AS "char(text)" FROM TEXT_TBL;
+
+SELECT CAST(f1 AS char(10)) AS "char(varchar)" FROM VARCHAR_TBL;
+
+SELECT CAST(name 'namefield' AS char(10)) AS "char(name)";
+
+SELECT CAST(f1 AS varchar) AS "varchar(text)" FROM TEXT_TBL;
+
+SELECT CAST(f1 AS varchar) AS "varchar(char)" FROM CHAR_TBL;
+
+SELECT CAST(name 'namefield' AS varchar) AS "varchar(name)";
+
+--
+-- test SQL string functions
+-- E### and T### are feature reference numbers from SQL99
+--
+
+-- E021-09 trim function
+SELECT TRIM(BOTH FROM ' bunch o blanks ') = 'bunch o blanks' AS "bunch o blanks";
+
+SELECT TRIM(LEADING FROM ' bunch o blanks ') = 'bunch o blanks ' AS "bunch o blanks ";
+
+SELECT TRIM(TRAILING FROM ' bunch o blanks ') = ' bunch o blanks' AS " bunch o blanks";
+
+SELECT TRIM(BOTH 'x' FROM 'xxxxxsome Xsxxxxx') = 'some Xs' AS "some Xs";
+
+-- E021-06 substring expression
+SELECT SUBSTRING('1234567890' FROM 3) = '34567890' AS "34567890";
+
+SELECT SUBSTRING('1234567890' FROM 4 FOR 3) = '456' AS "456";
+
+-- test overflow cases
+SELECT SUBSTRING('string' FROM 2 FOR 2147483646) AS "tring";
+SELECT SUBSTRING('string' FROM -10 FOR 2147483646) AS "string";
+SELECT SUBSTRING('string' FROM -10 FOR -2147483646) AS "error";
+
+-- T581 regular expression substring (with SQL's bizarre regexp syntax)
+SELECT SUBSTRING('abcdefg' SIMILAR 'a#"(b_d)#"%' ESCAPE '#') AS "bcd";
+-- obsolete SQL99 syntax
+SELECT SUBSTRING('abcdefg' FROM 'a#"(b_d)#"%' FOR '#') AS "bcd";
+
+-- No match should return NULL
+SELECT SUBSTRING('abcdefg' SIMILAR '#"(b_d)#"%' ESCAPE '#') IS NULL AS "True";
+
+-- Null inputs should return NULL
+SELECT SUBSTRING('abcdefg' SIMILAR '%' ESCAPE NULL) IS NULL AS "True";
+SELECT SUBSTRING(NULL SIMILAR '%' ESCAPE '#') IS NULL AS "True";
+SELECT SUBSTRING('abcdefg' SIMILAR NULL ESCAPE '#') IS NULL AS "True";
+
+-- The first and last parts should act non-greedy
+SELECT SUBSTRING('abcdefg' SIMILAR 'a#"%#"g' ESCAPE '#') AS "bcdef";
+SELECT SUBSTRING('abcdefg' SIMILAR 'a*#"%#"g*' ESCAPE '#') AS "abcdefg";
+
+-- Vertical bar in any part affects only that part
+SELECT SUBSTRING('abcdefg' SIMILAR 'a|b#"%#"g' ESCAPE '#') AS "bcdef";
+SELECT SUBSTRING('abcdefg' SIMILAR 'a#"%#"x|g' ESCAPE '#') AS "bcdef";
+SELECT SUBSTRING('abcdefg' SIMILAR 'a#"%|ab#"g' ESCAPE '#') AS "bcdef";
+
+-- Can't have more than two part separators
+SELECT SUBSTRING('abcdefg' SIMILAR 'a*#"%#"g*#"x' ESCAPE '#') AS "error";
+
+-- Postgres extension: with 0 or 1 separator, assume parts 1 and 3 are empty
+SELECT SUBSTRING('abcdefg' SIMILAR 'a#"%g' ESCAPE '#') AS "bcdefg";
+SELECT SUBSTRING('abcdefg' SIMILAR 'a%g' ESCAPE '#') AS "abcdefg";
+
+-- substring() with just two arguments is not allowed by SQL spec;
+-- we accept it, but we interpret the pattern as a POSIX regexp not SQL
+SELECT SUBSTRING('abcdefg' FROM 'c.e') AS "cde";
+
+-- With a parenthesized subexpression, return only what matches the subexpr
+SELECT SUBSTRING('abcdefg' FROM 'b(.*)f') AS "cde";
+
+-- Check behavior of SIMILAR TO, which uses largely the same regexp variant
+SELECT 'abcdefg' SIMILAR TO '_bcd%' AS true;
+SELECT 'abcdefg' SIMILAR TO 'bcd%' AS false;
+SELECT 'abcdefg' SIMILAR TO '_bcd#%' ESCAPE '#' AS false;
+SELECT 'abcd%' SIMILAR TO '_bcd#%' ESCAPE '#' AS true;
+-- Postgres uses '\' as the default escape character, which is not per spec
+SELECT 'abcdefg' SIMILAR TO '_bcd\%' AS false;
+-- and an empty string to mean "no escape", which is also not per spec
+SELECT 'abcd\efg' SIMILAR TO '_bcd\%' ESCAPE '' AS true;
+-- these behaviors are per spec, though:
+SELECT 'abcdefg' SIMILAR TO '_bcd%' ESCAPE NULL AS null;
+SELECT 'abcdefg' SIMILAR TO '_bcd#%' ESCAPE '##' AS error;
+
+-- Test back reference in regexp_replace
+SELECT regexp_replace('1112223333', E'(\\d{3})(\\d{3})(\\d{4})', E'(\\1) \\2-\\3');
+SELECT regexp_replace('AAA BBB CCC ', E'\\s+', ' ', 'g');
+SELECT regexp_replace('AAA', '^|$', 'Z', 'g');
+SELECT regexp_replace('AAA aaa', 'A+', 'Z', 'gi');
+-- invalid regexp option
+SELECT regexp_replace('AAA aaa', 'A+', 'Z', 'z');
+
+-- set so we can tell NULL from empty string
+\pset null '\\N'
+
+-- return all matches from regexp
+SELECT regexp_matches('foobarbequebaz', $re$(bar)(beque)$re$);
+
+-- test case insensitive
+SELECT regexp_matches('foObARbEqUEbAz', $re$(bar)(beque)$re$, 'i');
+
+-- global option - more than one match
+SELECT regexp_matches('foobarbequebazilbarfbonk', $re$(b[^b]+)(b[^b]+)$re$, 'g');
+
+-- empty capture group (matched empty string)
+SELECT regexp_matches('foobarbequebaz', $re$(bar)(.*)(beque)$re$);
+-- no match
+SELECT regexp_matches('foobarbequebaz', $re$(bar)(.+)(beque)$re$);
+-- optional capture group did not match, null entry in array
+SELECT regexp_matches('foobarbequebaz', $re$(bar)(.+)?(beque)$re$);
+
+-- no capture groups
+SELECT regexp_matches('foobarbequebaz', $re$barbeque$re$);
+
+-- start/end-of-line matches are of zero length
+SELECT regexp_matches('foo' || chr(10) || 'bar' || chr(10) || 'bequq' || chr(10) || 'baz', '^', 'mg');
+SELECT regexp_matches('foo' || chr(10) || 'bar' || chr(10) || 'bequq' || chr(10) || 'baz', '$', 'mg');
+SELECT regexp_matches('1' || chr(10) || '2' || chr(10) || '3' || chr(10) || '4' || chr(10), '^.?', 'mg');
+SELECT regexp_matches(chr(10) || '1' || chr(10) || '2' || chr(10) || '3' || chr(10) || '4' || chr(10), '.?$', 'mg');
+SELECT regexp_matches(chr(10) || '1' || chr(10) || '2' || chr(10) || '3' || chr(10) || '4', '.?$', 'mg');
+
+-- give me errors
+SELECT regexp_matches('foobarbequebaz', $re$(bar)(beque)$re$, 'gz');
+SELECT regexp_matches('foobarbequebaz', $re$(barbeque$re$);
+SELECT regexp_matches('foobarbequebaz', $re$(bar)(beque){2,1}$re$);
+
+-- split string on regexp
+SELECT foo, length(foo) FROM regexp_split_to_table('the quick brown fox jumps over the lazy dog', $re$\s+$re$) AS foo;
+SELECT regexp_split_to_array('the quick brown fox jumps over the lazy dog', $re$\s+$re$);
+
+SELECT foo, length(foo) FROM regexp_split_to_table('the quick brown fox jumps over the lazy dog', $re$\s*$re$) AS foo;
+SELECT regexp_split_to_array('the quick brown fox jumps over the lazy dog', $re$\s*$re$);
+SELECT foo, length(foo) FROM regexp_split_to_table('the quick brown fox jumps over the lazy dog', '') AS foo;
+SELECT regexp_split_to_array('the quick brown fox jumps over the lazy dog', '');
+-- case insensitive
+SELECT foo, length(foo) FROM regexp_split_to_table('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'i') AS foo;
+SELECT regexp_split_to_array('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'i');
+-- no match of pattern
+SELECT foo, length(foo) FROM regexp_split_to_table('the quick brown fox jumps over the lazy dog', 'nomatch') AS foo;
+SELECT regexp_split_to_array('the quick brown fox jumps over the lazy dog', 'nomatch');
+-- some corner cases
+SELECT regexp_split_to_array('123456','1');
+SELECT regexp_split_to_array('123456','6');
+SELECT regexp_split_to_array('123456','.');
+SELECT regexp_split_to_array('123456','');
+SELECT regexp_split_to_array('123456','(?:)');
+SELECT regexp_split_to_array('1','');
+-- errors
+SELECT foo, length(foo) FROM regexp_split_to_table('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'zippy') AS foo;
+SELECT regexp_split_to_array('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'iz');
+-- global option meaningless for regexp_split
+SELECT foo, length(foo) FROM regexp_split_to_table('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'g') AS foo;
+SELECT regexp_split_to_array('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'g');
+
+-- change NULL-display back
+\pset null ''
+
+-- E021-11 position expression
+SELECT POSITION('4' IN '1234567890') = '4' AS "4";
+
+SELECT POSITION('5' IN '1234567890') = '5' AS "5";
+
+-- T312 character overlay function
+SELECT OVERLAY('abcdef' PLACING '45' FROM 4) AS "abc45f";
+
+SELECT OVERLAY('yabadoo' PLACING 'daba' FROM 5) AS "yabadaba";
+
+SELECT OVERLAY('yabadoo' PLACING 'daba' FROM 5 FOR 0) AS "yabadabadoo";
+
+SELECT OVERLAY('babosa' PLACING 'ubb' FROM 2 FOR 4) AS "bubba";
+
+--
+-- test LIKE
+-- Be sure to form every test as a LIKE/NOT LIKE pair.
+--
+
+-- simplest examples
+-- E061-04 like predicate
+SELECT 'hawkeye' LIKE 'h%' AS "true";
+SELECT 'hawkeye' NOT LIKE 'h%' AS "false";
+
+SELECT 'hawkeye' LIKE 'H%' AS "false";
+SELECT 'hawkeye' NOT LIKE 'H%' AS "true";
+
+SELECT 'hawkeye' LIKE 'indio%' AS "false";
+SELECT 'hawkeye' NOT LIKE 'indio%' AS "true";
+
+SELECT 'hawkeye' LIKE 'h%eye' AS "true";
+SELECT 'hawkeye' NOT LIKE 'h%eye' AS "false";
+
+SELECT 'indio' LIKE '_ndio' AS "true";
+SELECT 'indio' NOT LIKE '_ndio' AS "false";
+
+SELECT 'indio' LIKE 'in__o' AS "true";
+SELECT 'indio' NOT LIKE 'in__o' AS "false";
+
+SELECT 'indio' LIKE 'in_o' AS "false";
+SELECT 'indio' NOT LIKE 'in_o' AS "true";
+
+SELECT 'abc'::name LIKE '_b_' AS "true";
+SELECT 'abc'::name NOT LIKE '_b_' AS "false";
+
+SELECT 'abc'::bytea LIKE '_b_'::bytea AS "true";
+SELECT 'abc'::bytea NOT LIKE '_b_'::bytea AS "false";
+
+-- unused escape character
+SELECT 'hawkeye' LIKE 'h%' ESCAPE '#' AS "true";
+SELECT 'hawkeye' NOT LIKE 'h%' ESCAPE '#' AS "false";
+
+SELECT 'indio' LIKE 'ind_o' ESCAPE '$' AS "true";
+SELECT 'indio' NOT LIKE 'ind_o' ESCAPE '$' AS "false";
+
+-- escape character
+-- E061-05 like predicate with escape clause
+SELECT 'h%' LIKE 'h#%' ESCAPE '#' AS "true";
+SELECT 'h%' NOT LIKE 'h#%' ESCAPE '#' AS "false";
+
+SELECT 'h%wkeye' LIKE 'h#%' ESCAPE '#' AS "false";
+SELECT 'h%wkeye' NOT LIKE 'h#%' ESCAPE '#' AS "true";
+
+SELECT 'h%wkeye' LIKE 'h#%%' ESCAPE '#' AS "true";
+SELECT 'h%wkeye' NOT LIKE 'h#%%' ESCAPE '#' AS "false";
+
+SELECT 'h%awkeye' LIKE 'h#%a%k%e' ESCAPE '#' AS "true";
+SELECT 'h%awkeye' NOT LIKE 'h#%a%k%e' ESCAPE '#' AS "false";
+
+SELECT 'indio' LIKE '_ndio' ESCAPE '$' AS "true";
+SELECT 'indio' NOT LIKE '_ndio' ESCAPE '$' AS "false";
+
+SELECT 'i_dio' LIKE 'i$_d_o' ESCAPE '$' AS "true";
+SELECT 'i_dio' NOT LIKE 'i$_d_o' ESCAPE '$' AS "false";
+
+SELECT 'i_dio' LIKE 'i$_nd_o' ESCAPE '$' AS "false";
+SELECT 'i_dio' NOT LIKE 'i$_nd_o' ESCAPE '$' AS "true";
+
+SELECT 'i_dio' LIKE 'i$_d%o' ESCAPE '$' AS "true";
+SELECT 'i_dio' NOT LIKE 'i$_d%o' ESCAPE '$' AS "false";
+
+SELECT 'a_c'::bytea LIKE 'a$__'::bytea ESCAPE '$'::bytea AS "true";
+SELECT 'a_c'::bytea NOT LIKE 'a$__'::bytea ESCAPE '$'::bytea AS "false";
+
+-- escape character same as pattern character
+SELECT 'maca' LIKE 'm%aca' ESCAPE '%' AS "true";
+SELECT 'maca' NOT LIKE 'm%aca' ESCAPE '%' AS "false";
+
+SELECT 'ma%a' LIKE 'm%a%%a' ESCAPE '%' AS "true";
+SELECT 'ma%a' NOT LIKE 'm%a%%a' ESCAPE '%' AS "false";
+
+SELECT 'bear' LIKE 'b_ear' ESCAPE '_' AS "true";
+SELECT 'bear' NOT LIKE 'b_ear' ESCAPE '_' AS "false";
+
+SELECT 'be_r' LIKE 'b_e__r' ESCAPE '_' AS "true";
+SELECT 'be_r' NOT LIKE 'b_e__r' ESCAPE '_' AS "false";
+
+SELECT 'be_r' LIKE '__e__r' ESCAPE '_' AS "false";
+SELECT 'be_r' NOT LIKE '__e__r' ESCAPE '_' AS "true";
+
+
+--
+-- test ILIKE (case-insensitive LIKE)
+-- Be sure to form every test as an ILIKE/NOT ILIKE pair.
+--
+
+SELECT 'hawkeye' ILIKE 'h%' AS "true";
+SELECT 'hawkeye' NOT ILIKE 'h%' AS "false";
+
+SELECT 'hawkeye' ILIKE 'H%' AS "true";
+SELECT 'hawkeye' NOT ILIKE 'H%' AS "false";
+
+SELECT 'hawkeye' ILIKE 'H%Eye' AS "true";
+SELECT 'hawkeye' NOT ILIKE 'H%Eye' AS "false";
+
+SELECT 'Hawkeye' ILIKE 'h%' AS "true";
+SELECT 'Hawkeye' NOT ILIKE 'h%' AS "false";
+
+SELECT 'ABC'::name ILIKE '_b_' AS "true";
+SELECT 'ABC'::name NOT ILIKE '_b_' AS "false";
+
+--
+-- test %/_ combination cases, cf bugs #4821 and #5478
+--
+
+SELECT 'foo' LIKE '_%' as t, 'f' LIKE '_%' as t, '' LIKE '_%' as f;
+SELECT 'foo' LIKE '%_' as t, 'f' LIKE '%_' as t, '' LIKE '%_' as f;
+
+SELECT 'foo' LIKE '__%' as t, 'foo' LIKE '___%' as t, 'foo' LIKE '____%' as f;
+SELECT 'foo' LIKE '%__' as t, 'foo' LIKE '%___' as t, 'foo' LIKE '%____' as f;
+
+SELECT 'jack' LIKE '%____%' AS t;
+
+
+--
+-- basic tests of LIKE with indexes
+--
+
+CREATE TABLE texttest (a text PRIMARY KEY, b int);
+SELECT * FROM texttest WHERE a LIKE '%1%';
+
+CREATE TABLE byteatest (a bytea PRIMARY KEY, b int);
+SELECT * FROM byteatest WHERE a LIKE '%1%';
+
+DROP TABLE texttest, byteatest;
+
+
+--
+-- test implicit type conversion
+--
+
+-- E021-07 character concatenation
+SELECT 'unknown' || ' and unknown' AS "Concat unknown types";
+
+SELECT text 'text' || ' and unknown' AS "Concat text to unknown type";
+
+SELECT char(20) 'characters' || ' and text' AS "Concat char to unknown type";
+
+SELECT text 'text' || char(20) ' and characters' AS "Concat text to char";
+
+SELECT text 'text' || varchar ' and varchar' AS "Concat text to varchar";
+
+--
+-- test substr with toasted text values
+--
+CREATE TABLE toasttest(f1 text);
+
+insert into toasttest values(repeat('1234567890',10000));
+insert into toasttest values(repeat('1234567890',10000));
+
+--
+-- Ensure that some values are uncompressed, to test the faster substring
+-- operation used in that case
+--
+alter table toasttest alter column f1 set storage external;
+insert into toasttest values(repeat('1234567890',10000));
+insert into toasttest values(repeat('1234567890',10000));
+
+-- If the starting position is zero or less, then return from the start of the string
+-- adjusting the length to be consistent with the "negative start" per SQL.
+SELECT substr(f1, -1, 5) from toasttest;
+
+-- If the length is less than zero, an ERROR is thrown.
+SELECT substr(f1, 5, -1) from toasttest;
+
+-- If no third argument (length) is provided, the length to the end of the
+-- string is assumed.
+SELECT substr(f1, 99995) from toasttest;
+
+-- If start plus length is > string length, the result is truncated to
+-- string length
+SELECT substr(f1, 99995, 10) from toasttest;
+
+TRUNCATE TABLE toasttest;
+INSERT INTO toasttest values (repeat('1234567890',300));
+INSERT INTO toasttest values (repeat('1234567890',300));
+INSERT INTO toasttest values (repeat('1234567890',300));
+INSERT INTO toasttest values (repeat('1234567890',300));
+-- expect >0 blocks
+SELECT pg_relation_size(reltoastrelid) = 0 AS is_empty
+ FROM pg_class where relname = 'toasttest';
+
+TRUNCATE TABLE toasttest;
+ALTER TABLE toasttest set (toast_tuple_target = 4080);
+INSERT INTO toasttest values (repeat('1234567890',300));
+INSERT INTO toasttest values (repeat('1234567890',300));
+INSERT INTO toasttest values (repeat('1234567890',300));
+INSERT INTO toasttest values (repeat('1234567890',300));
+-- expect 0 blocks
+SELECT pg_relation_size(reltoastrelid) = 0 AS is_empty
+ FROM pg_class where relname = 'toasttest';
+
+DROP TABLE toasttest;
+
+--
+-- test substr with toasted bytea values
+--
+CREATE TABLE toasttest(f1 bytea);
+
+insert into toasttest values(decode(repeat('1234567890',10000),'escape'));
+insert into toasttest values(decode(repeat('1234567890',10000),'escape'));
+
+--
+-- Ensure that some values are uncompressed, to test the faster substring
+-- operation used in that case
+--
+alter table toasttest alter column f1 set storage external;
+insert into toasttest values(decode(repeat('1234567890',10000),'escape'));
+insert into toasttest values(decode(repeat('1234567890',10000),'escape'));
+
+-- If the starting position is zero or less, then return from the start of the string
+-- adjusting the length to be consistent with the "negative start" per SQL.
+SELECT substr(f1, -1, 5) from toasttest;
+
+-- If the length is less than zero, an ERROR is thrown.
+SELECT substr(f1, 5, -1) from toasttest;
+
+-- If no third argument (length) is provided, the length to the end of the
+-- string is assumed.
+SELECT substr(f1, 99995) from toasttest;
+
+-- If start plus length is > string length, the result is truncated to
+-- string length
+SELECT substr(f1, 99995, 10) from toasttest;
+
+DROP TABLE toasttest;
+
+-- test internally compressing datums
+
+-- this tests compressing a datum to a very small size which exercises a
+-- corner case in packed-varlena handling: even though small, the compressed
+-- datum must be given a 4-byte header because there are no bits to indicate
+-- compression in a 1-byte header
+
+CREATE TABLE toasttest (c char(4096));
+INSERT INTO toasttest VALUES('x');
+SELECT length(c), c::text FROM toasttest;
+SELECT c FROM toasttest;
+DROP TABLE toasttest;
+
+--
+-- test length
+--
+
+SELECT length('abcdef') AS "length_6";
+
+--
+-- test strpos
+--
+
+SELECT strpos('abcdef', 'cd') AS "pos_3";
+
+SELECT strpos('abcdef', 'xy') AS "pos_0";
+
+SELECT strpos('abcdef', '') AS "pos_1";
+
+SELECT strpos('', 'xy') AS "pos_0";
+
+SELECT strpos('', '') AS "pos_1";
+
+--
+-- test replace
+--
+SELECT replace('abcdef', 'de', '45') AS "abc45f";
+
+SELECT replace('yabadabadoo', 'ba', '123') AS "ya123da123doo";
+
+SELECT replace('yabadoo', 'bad', '') AS "yaoo";
+
+--
+-- test split_part
+--
+select split_part('','@',1) AS "empty string";
+
+select split_part('','@',-1) AS "empty string";
+
+select split_part('joeuser@mydatabase','',1) AS "joeuser@mydatabase";
+
+select split_part('joeuser@mydatabase','',2) AS "empty string";
+
+select split_part('joeuser@mydatabase','',-1) AS "joeuser@mydatabase";
+
+select split_part('joeuser@mydatabase','',-2) AS "empty string";
+
+select split_part('joeuser@mydatabase','@',0) AS "an error";
+
+select split_part('joeuser@mydatabase','@@',1) AS "joeuser@mydatabase";
+
+select split_part('joeuser@mydatabase','@@',2) AS "empty string";
+
+select split_part('joeuser@mydatabase','@',1) AS "joeuser";
+
+select split_part('joeuser@mydatabase','@',2) AS "mydatabase";
+
+select split_part('joeuser@mydatabase','@',3) AS "empty string";
+
+select split_part('@joeuser@mydatabase@','@',2) AS "joeuser";
+
+select split_part('joeuser@mydatabase','@',-1) AS "mydatabase";
+
+select split_part('joeuser@mydatabase','@',-2) AS "joeuser";
+
+select split_part('joeuser@mydatabase','@',-3) AS "empty string";
+
+select split_part('@joeuser@mydatabase@','@',-2) AS "mydatabase";
+
+--
+-- test to_hex
+--
+select to_hex(256*256*256 - 1) AS "ffffff";
+
+select to_hex(256::bigint*256::bigint*256::bigint*256::bigint - 1) AS "ffffffff";
+
+--
+-- MD5 test suite - from IETF RFC 1321
+-- (see: ftp://ftp.rfc-editor.org/in-notes/rfc1321.txt)
+--
+select md5('') = 'd41d8cd98f00b204e9800998ecf8427e' AS "TRUE";
+
+select md5('a') = '0cc175b9c0f1b6a831c399e269772661' AS "TRUE";
+
+select md5('abc') = '900150983cd24fb0d6963f7d28e17f72' AS "TRUE";
+
+select md5('message digest') = 'f96b697d7cb7938d525a2f31aaf161d0' AS "TRUE";
+
+select md5('abcdefghijklmnopqrstuvwxyz') = 'c3fcd3d76192e4007dfb496cca67e13b' AS "TRUE";
+
+select md5('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789') = 'd174ab98d277d9f5a5611c2c9f419d9f' AS "TRUE";
+
+select md5('12345678901234567890123456789012345678901234567890123456789012345678901234567890') = '57edf4a22be3c955ac49da2e2107b67a' AS "TRUE";
+
+select md5(''::bytea) = 'd41d8cd98f00b204e9800998ecf8427e' AS "TRUE";
+
+select md5('a'::bytea) = '0cc175b9c0f1b6a831c399e269772661' AS "TRUE";
+
+select md5('abc'::bytea) = '900150983cd24fb0d6963f7d28e17f72' AS "TRUE";
+
+select md5('message digest'::bytea) = 'f96b697d7cb7938d525a2f31aaf161d0' AS "TRUE";
+
+select md5('abcdefghijklmnopqrstuvwxyz'::bytea) = 'c3fcd3d76192e4007dfb496cca67e13b' AS "TRUE";
+
+select md5('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'::bytea) = 'd174ab98d277d9f5a5611c2c9f419d9f' AS "TRUE";
+
+select md5('12345678901234567890123456789012345678901234567890123456789012345678901234567890'::bytea) = '57edf4a22be3c955ac49da2e2107b67a' AS "TRUE";
+
+--
+-- SHA-2
+--
+SET bytea_output TO hex;
+
+SELECT sha224('');
+SELECT sha224('The quick brown fox jumps over the lazy dog.');
+
+SELECT sha256('');
+SELECT sha256('The quick brown fox jumps over the lazy dog.');
+
+SELECT sha384('');
+SELECT sha384('The quick brown fox jumps over the lazy dog.');
+
+SELECT sha512('');
+SELECT sha512('The quick brown fox jumps over the lazy dog.');
+
+--
+-- encode/decode
+--
+SELECT encode('\x1234567890abcdef00', 'hex');
+SELECT decode('1234567890abcdef00', 'hex');
+SELECT encode(('\x' || repeat('1234567890abcdef0001', 7))::bytea, 'base64');
+SELECT decode(encode(('\x' || repeat('1234567890abcdef0001', 7))::bytea,
+ 'base64'), 'base64');
+SELECT encode('\x1234567890abcdef00', 'escape');
+SELECT decode(encode('\x1234567890abcdef00', 'escape'), 'escape');
+
+--
+-- get_bit/set_bit etc
+--
+SELECT get_bit('\x1234567890abcdef00'::bytea, 43);
+SELECT get_bit('\x1234567890abcdef00'::bytea, 99); -- error
+SELECT set_bit('\x1234567890abcdef00'::bytea, 43, 0);
+SELECT set_bit('\x1234567890abcdef00'::bytea, 99, 0); -- error
+SELECT get_byte('\x1234567890abcdef00'::bytea, 3);
+SELECT get_byte('\x1234567890abcdef00'::bytea, 99); -- error
+SELECT set_byte('\x1234567890abcdef00'::bytea, 7, 11);
+SELECT set_byte('\x1234567890abcdef00'::bytea, 99, 11); -- error
+
+--
+-- test behavior of escape_string_warning and standard_conforming_strings options
+--
+set escape_string_warning = off;
+set standard_conforming_strings = off;
+
+show escape_string_warning;
+show standard_conforming_strings;
+
+set escape_string_warning = on;
+set standard_conforming_strings = on;
+
+show escape_string_warning;
+show standard_conforming_strings;
+
+select 'a\bcd' as f1, 'a\b''cd' as f2, 'a\b''''cd' as f3, 'abcd\' as f4, 'ab\''cd' as f5, '\\' as f6;
+
+set standard_conforming_strings = off;
+
+select 'a\\bcd' as f1, 'a\\b\'cd' as f2, 'a\\b\'''cd' as f3, 'abcd\\' as f4, 'ab\\\'cd' as f5, '\\\\' as f6;
+
+set escape_string_warning = off;
+set standard_conforming_strings = on;
+
+select 'a\bcd' as f1, 'a\b''cd' as f2, 'a\b''''cd' as f3, 'abcd\' as f4, 'ab\''cd' as f5, '\\' as f6;
+
+set standard_conforming_strings = off;
+
+select 'a\\bcd' as f1, 'a\\b\'cd' as f2, 'a\\b\'''cd' as f3, 'abcd\\' as f4, 'ab\\\'cd' as f5, '\\\\' as f6;
+
+reset standard_conforming_strings;
+
+
+--
+-- Additional string functions
+--
+SET bytea_output TO escape;
+
+SELECT initcap('hi THOMAS');
+
+SELECT lpad('hi', 5, 'xy');
+SELECT lpad('hi', 5);
+SELECT lpad('hi', -5, 'xy');
+SELECT lpad('hello', 2);
+SELECT lpad('hi', 5, '');
+
+SELECT rpad('hi', 5, 'xy');
+SELECT rpad('hi', 5);
+SELECT rpad('hi', -5, 'xy');
+SELECT rpad('hello', 2);
+SELECT rpad('hi', 5, '');
+
+SELECT ltrim('zzzytrim', 'xyz');
+
+SELECT translate('', '14', 'ax');
+SELECT translate('12345', '14', 'ax');
+
+SELECT ascii('x');
+SELECT ascii('');
+
+SELECT chr(65);
+SELECT chr(0);
+
+SELECT repeat('Pg', 4);
+SELECT repeat('Pg', -4);
+
+SELECT SUBSTRING('1234567890'::bytea FROM 3) "34567890";
+SELECT SUBSTRING('1234567890'::bytea FROM 4 FOR 3) AS "456";
+SELECT SUBSTRING('string'::bytea FROM 2 FOR 2147483646) AS "tring";
+SELECT SUBSTRING('string'::bytea FROM -10 FOR 2147483646) AS "string";
+SELECT SUBSTRING('string'::bytea FROM -10 FOR -2147483646) AS "error";
+
+SELECT trim(E'\\000'::bytea from E'\\000Tom\\000'::bytea);
+SELECT trim(leading E'\\000'::bytea from E'\\000Tom\\000'::bytea);
+SELECT trim(trailing E'\\000'::bytea from E'\\000Tom\\000'::bytea);
+SELECT btrim(E'\\000trim\\000'::bytea, E'\\000'::bytea);
+SELECT btrim(''::bytea, E'\\000'::bytea);
+SELECT btrim(E'\\000trim\\000'::bytea, ''::bytea);
+SELECT encode(overlay(E'Th\\000omas'::bytea placing E'Th\\001omas'::bytea from 2),'escape');
+SELECT encode(overlay(E'Th\\000omas'::bytea placing E'\\002\\003'::bytea from 8),'escape');
+SELECT encode(overlay(E'Th\\000omas'::bytea placing E'\\002\\003'::bytea from 5 for 3),'escape');
+
+SELECT bit_count('\x1234567890'::bytea);
+
+SELECT unistr('\0064at\+0000610');
+SELECT unistr('d\u0061t\U000000610');
+SELECT unistr('a\\b');
+-- errors:
+SELECT unistr('wrong: \db99');
+SELECT unistr('wrong: \db99\0061');
+SELECT unistr('wrong: \+00db99\+000061');
+SELECT unistr('wrong: \+2FFFFF');
+SELECT unistr('wrong: \udb99\u0061');
+SELECT unistr('wrong: \U0000db99\U00000061');
+SELECT unistr('wrong: \U002FFFFF');
+SELECT unistr('wrong: \xyz');
diff --git a/ydb/library/yql/tests/postgresql/original/cases/subselect.out b/ydb/library/yql/tests/postgresql/original/cases/subselect.out
new file mode 100644
index 0000000000..45c75eecc5
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/subselect.out
@@ -0,0 +1,1859 @@
+--
+-- SUBSELECT
+--
+SELECT 1 AS one WHERE 1 IN (SELECT 1);
+ one
+-----
+ 1
+(1 row)
+
+SELECT 1 AS zero WHERE 1 NOT IN (SELECT 1);
+ zero
+------
+(0 rows)
+
+SELECT 1 AS zero WHERE 1 IN (SELECT 2);
+ zero
+------
+(0 rows)
+
+-- Check grammar's handling of extra parens in assorted contexts
+SELECT * FROM (SELECT 1 AS x) ss;
+ x
+---
+ 1
+(1 row)
+
+SELECT * FROM ((SELECT 1 AS x)) ss;
+ x
+---
+ 1
+(1 row)
+
+(SELECT 2) UNION SELECT 2;
+ ?column?
+----------
+ 2
+(1 row)
+
+((SELECT 2)) UNION SELECT 2;
+ ?column?
+----------
+ 2
+(1 row)
+
+SELECT ((SELECT 2) UNION SELECT 2);
+ ?column?
+----------
+ 2
+(1 row)
+
+SELECT (((SELECT 2)) UNION SELECT 2);
+ ?column?
+----------
+ 2
+(1 row)
+
+SELECT (SELECT ARRAY[1,2,3])[1];
+ array
+-------
+ 1
+(1 row)
+
+SELECT ((SELECT ARRAY[1,2,3]))[2];
+ array
+-------
+ 2
+(1 row)
+
+SELECT (((SELECT ARRAY[1,2,3])))[3];
+ array
+-------
+ 3
+(1 row)
+
+-- Set up some simple test tables
+CREATE TABLE SUBSELECT_TBL (
+ f1 integer,
+ f2 integer,
+ f3 float
+);
+INSERT INTO SUBSELECT_TBL VALUES (1, 2, 3);
+INSERT INTO SUBSELECT_TBL VALUES (2, 3, 4);
+INSERT INTO SUBSELECT_TBL VALUES (3, 4, 5);
+INSERT INTO SUBSELECT_TBL VALUES (1, 1, 1);
+INSERT INTO SUBSELECT_TBL VALUES (2, 2, 2);
+INSERT INTO SUBSELECT_TBL VALUES (3, 3, 3);
+INSERT INTO SUBSELECT_TBL VALUES (6, 7, 8);
+INSERT INTO SUBSELECT_TBL VALUES (8, 9, NULL);
+SELECT * FROM SUBSELECT_TBL;
+ f1 | f2 | f3
+----+----+----
+ 1 | 2 | 3
+ 2 | 3 | 4
+ 3 | 4 | 5
+ 1 | 1 | 1
+ 2 | 2 | 2
+ 3 | 3 | 3
+ 6 | 7 | 8
+ 8 | 9 |
+(8 rows)
+
+-- Uncorrelated subselects
+SELECT f1 AS "Constant Select" FROM SUBSELECT_TBL
+ WHERE f1 IN (SELECT 1);
+ Constant Select
+-----------------
+ 1
+ 1
+(2 rows)
+
+SELECT f1 AS "Uncorrelated Field" FROM SUBSELECT_TBL
+ WHERE f1 IN (SELECT f2 FROM SUBSELECT_TBL);
+ Uncorrelated Field
+--------------------
+ 1
+ 2
+ 3
+ 1
+ 2
+ 3
+(6 rows)
+
+SELECT f1 AS "Uncorrelated Field" FROM SUBSELECT_TBL
+ WHERE f1 IN (SELECT f2 FROM SUBSELECT_TBL WHERE
+ f2 IN (SELECT f1 FROM SUBSELECT_TBL));
+ Uncorrelated Field
+--------------------
+ 1
+ 2
+ 3
+ 1
+ 2
+ 3
+(6 rows)
+
+SELECT f1, f2
+ FROM SUBSELECT_TBL
+ WHERE (f1, f2) NOT IN (SELECT f2, CAST(f3 AS int4) FROM SUBSELECT_TBL
+ WHERE f3 IS NOT NULL);
+ f1 | f2
+----+----
+ 1 | 2
+ 6 | 7
+ 8 | 9
+(3 rows)
+
+-- Correlated subselects
+SELECT f1 AS "Correlated Field", f2 AS "Second Field"
+ FROM SUBSELECT_TBL upper
+ WHERE f1 IN (SELECT f2 FROM SUBSELECT_TBL WHERE f1 = upper.f1);
+ Correlated Field | Second Field
+------------------+--------------
+ 1 | 2
+ 2 | 3
+ 3 | 4
+ 1 | 1
+ 2 | 2
+ 3 | 3
+(6 rows)
+
+SELECT f1 AS "Correlated Field", f3 AS "Second Field"
+ FROM SUBSELECT_TBL upper
+ WHERE f1 IN
+ (SELECT f2 FROM SUBSELECT_TBL WHERE CAST(upper.f2 AS float) = f3);
+ Correlated Field | Second Field
+------------------+--------------
+ 2 | 4
+ 3 | 5
+ 1 | 1
+ 2 | 2
+ 3 | 3
+(5 rows)
+
+SELECT f1 AS "Correlated Field", f3 AS "Second Field"
+ FROM SUBSELECT_TBL upper
+ WHERE f3 IN (SELECT upper.f1 + f2 FROM SUBSELECT_TBL
+ WHERE f2 = CAST(f3 AS integer));
+ Correlated Field | Second Field
+------------------+--------------
+ 1 | 3
+ 2 | 4
+ 3 | 5
+ 6 | 8
+(4 rows)
+
+SELECT f1 AS "Correlated Field"
+ FROM SUBSELECT_TBL
+ WHERE (f1, f2) IN (SELECT f2, CAST(f3 AS int4) FROM SUBSELECT_TBL
+ WHERE f3 IS NOT NULL);
+ Correlated Field
+------------------
+ 2
+ 3
+ 1
+ 2
+ 3
+(5 rows)
+
+--
+-- Use some existing tables in the regression test
+--
+SELECT ss.f1 AS "Correlated Field", ss.f3 AS "Second Field"
+ FROM SUBSELECT_TBL ss
+ WHERE f1 NOT IN (SELECT f1+1 FROM INT4_TBL
+ WHERE f1 != ss.f1 AND f1 < 2147483647);
+ Correlated Field | Second Field
+------------------+--------------
+ 2 | 4
+ 3 | 5
+ 2 | 2
+ 3 | 3
+ 6 | 8
+ 8 |
+(6 rows)
+
+select q1, float8(count(*)) / (select count(*) from int8_tbl)
+from int8_tbl group by q1 order by q1;
+ q1 | ?column?
+------------------+----------
+ 123 | 0.4
+ 4567890123456789 | 0.6
+(2 rows)
+
+-- Unspecified-type literals in output columns should resolve as text
+SELECT *, pg_typeof(f1) FROM
+ (SELECT 'foo' AS f1 FROM generate_series(1,3)) ss ORDER BY 1;
+ f1 | pg_typeof
+-----+-----------
+ foo | text
+ foo | text
+ foo | text
+(3 rows)
+
+-- ... unless there's context to suggest differently
+explain (verbose, costs off) select '42' union all select '43';
+ QUERY PLAN
+----------------------------
+ Append
+ -> Result
+ Output: '42'::text
+ -> Result
+ Output: '43'::text
+(5 rows)
+
+explain (verbose, costs off) select '42' union all select 43;
+ QUERY PLAN
+--------------------
+ Append
+ -> Result
+ Output: 42
+ -> Result
+ Output: 43
+(5 rows)
+
+-- check materialization of an initplan reference (bug #14524)
+explain (verbose, costs off)
+select 1 = all (select (select 1));
+ QUERY PLAN
+-----------------------------------
+ Result
+ Output: (SubPlan 2)
+ SubPlan 2
+ -> Materialize
+ Output: ($0)
+ InitPlan 1 (returns $0)
+ -> Result
+ Output: 1
+ -> Result
+ Output: $0
+(10 rows)
+
+select 1 = all (select (select 1));
+ ?column?
+----------
+ t
+(1 row)
+
+--
+-- Check EXISTS simplification with LIMIT
+--
+explain (costs off)
+select * from int4_tbl o where exists
+ (select 1 from int4_tbl i where i.f1=o.f1 limit null);
+ QUERY PLAN
+------------------------------------
+ Hash Semi Join
+ Hash Cond: (o.f1 = i.f1)
+ -> Seq Scan on int4_tbl o
+ -> Hash
+ -> Seq Scan on int4_tbl i
+(5 rows)
+
+explain (costs off)
+select * from int4_tbl o where not exists
+ (select 1 from int4_tbl i where i.f1=o.f1 limit 1);
+ QUERY PLAN
+------------------------------------
+ Hash Anti Join
+ Hash Cond: (o.f1 = i.f1)
+ -> Seq Scan on int4_tbl o
+ -> Hash
+ -> Seq Scan on int4_tbl i
+(5 rows)
+
+explain (costs off)
+select * from int4_tbl o where exists
+ (select 1 from int4_tbl i where i.f1=o.f1 limit 0);
+ QUERY PLAN
+--------------------------------------
+ Seq Scan on int4_tbl o
+ Filter: (SubPlan 1)
+ SubPlan 1
+ -> Limit
+ -> Seq Scan on int4_tbl i
+ Filter: (f1 = o.f1)
+(6 rows)
+
+--
+-- Test cases to catch unpleasant interactions between IN-join processing
+-- and subquery pullup.
+--
+select count(*) from
+ (select 1 from tenk1 a
+ where unique1 IN (select hundred from tenk1 b)) ss;
+ count
+-------
+ 100
+(1 row)
+
+select count(distinct ss.ten) from
+ (select ten from tenk1 a
+ where unique1 IN (select hundred from tenk1 b)) ss;
+ count
+-------
+ 10
+(1 row)
+
+select count(*) from
+ (select 1 from tenk1 a
+ where unique1 IN (select distinct hundred from tenk1 b)) ss;
+ count
+-------
+ 100
+(1 row)
+
+select count(distinct ss.ten) from
+ (select ten from tenk1 a
+ where unique1 IN (select distinct hundred from tenk1 b)) ss;
+ count
+-------
+ 10
+(1 row)
+
+--
+-- Test cases to check for overenthusiastic optimization of
+-- "IN (SELECT DISTINCT ...)" and related cases. Per example from
+-- Luca Pireddu and Michael Fuhr.
+--
+CREATE TEMP TABLE foo (id integer);
+CREATE TEMP TABLE bar (id1 integer, id2 integer);
+INSERT INTO foo VALUES (1);
+INSERT INTO bar VALUES (1, 1);
+INSERT INTO bar VALUES (2, 2);
+INSERT INTO bar VALUES (3, 1);
+-- These cases require an extra level of distinct-ing above subquery s
+SELECT * FROM foo WHERE id IN
+ (SELECT id2 FROM (SELECT DISTINCT id1, id2 FROM bar) AS s);
+ id
+----
+ 1
+(1 row)
+
+SELECT * FROM foo WHERE id IN
+ (SELECT id2 FROM (SELECT id1,id2 FROM bar GROUP BY id1,id2) AS s);
+ id
+----
+ 1
+(1 row)
+
+SELECT * FROM foo WHERE id IN
+ (SELECT id2 FROM (SELECT id1, id2 FROM bar UNION
+ SELECT id1, id2 FROM bar) AS s);
+ id
+----
+ 1
+(1 row)
+
+-- These cases do not
+SELECT * FROM foo WHERE id IN
+ (SELECT id2 FROM (SELECT DISTINCT ON (id2) id1, id2 FROM bar) AS s);
+ id
+----
+ 1
+(1 row)
+
+SELECT * FROM foo WHERE id IN
+ (SELECT id2 FROM (SELECT id2 FROM bar GROUP BY id2) AS s);
+ id
+----
+ 1
+(1 row)
+
+SELECT * FROM foo WHERE id IN
+ (SELECT id2 FROM (SELECT id2 FROM bar UNION
+ SELECT id2 FROM bar) AS s);
+ id
+----
+ 1
+(1 row)
+
+--
+-- Test case to catch problems with multiply nested sub-SELECTs not getting
+-- recalculated properly. Per bug report from Didier Moens.
+--
+CREATE TABLE orderstest (
+ approver_ref integer,
+ po_ref integer,
+ ordercanceled boolean
+);
+INSERT INTO orderstest VALUES (1, 1, false);
+INSERT INTO orderstest VALUES (66, 5, false);
+INSERT INTO orderstest VALUES (66, 6, false);
+INSERT INTO orderstest VALUES (66, 7, false);
+INSERT INTO orderstest VALUES (66, 1, true);
+INSERT INTO orderstest VALUES (66, 8, false);
+INSERT INTO orderstest VALUES (66, 1, false);
+INSERT INTO orderstest VALUES (77, 1, false);
+INSERT INTO orderstest VALUES (1, 1, false);
+INSERT INTO orderstest VALUES (66, 1, false);
+INSERT INTO orderstest VALUES (1, 1, false);
+CREATE VIEW orders_view AS
+SELECT *,
+(SELECT CASE
+ WHEN ord.approver_ref=1 THEN '---' ELSE 'Approved'
+ END) AS "Approved",
+(SELECT CASE
+ WHEN ord.ordercanceled
+ THEN 'Canceled'
+ ELSE
+ (SELECT CASE
+ WHEN ord.po_ref=1
+ THEN
+ (SELECT CASE
+ WHEN ord.approver_ref=1
+ THEN '---'
+ ELSE 'Approved'
+ END)
+ ELSE 'PO'
+ END)
+END) AS "Status",
+(CASE
+ WHEN ord.ordercanceled
+ THEN 'Canceled'
+ ELSE
+ (CASE
+ WHEN ord.po_ref=1
+ THEN
+ (CASE
+ WHEN ord.approver_ref=1
+ THEN '---'
+ ELSE 'Approved'
+ END)
+ ELSE 'PO'
+ END)
+END) AS "Status_OK"
+FROM orderstest ord;
+SELECT * FROM orders_view;
+ approver_ref | po_ref | ordercanceled | Approved | Status | Status_OK
+--------------+--------+---------------+----------+----------+-----------
+ 1 | 1 | f | --- | --- | ---
+ 66 | 5 | f | Approved | PO | PO
+ 66 | 6 | f | Approved | PO | PO
+ 66 | 7 | f | Approved | PO | PO
+ 66 | 1 | t | Approved | Canceled | Canceled
+ 66 | 8 | f | Approved | PO | PO
+ 66 | 1 | f | Approved | Approved | Approved
+ 77 | 1 | f | Approved | Approved | Approved
+ 1 | 1 | f | --- | --- | ---
+ 66 | 1 | f | Approved | Approved | Approved
+ 1 | 1 | f | --- | --- | ---
+(11 rows)
+
+DROP TABLE orderstest cascade;
+NOTICE: drop cascades to view orders_view
+--
+-- Test cases to catch situations where rule rewriter fails to propagate
+-- hasSubLinks flag correctly. Per example from Kyle Bateman.
+--
+create temp table parts (
+ partnum text,
+ cost float8
+);
+create temp table shipped (
+ ttype char(2),
+ ordnum int4,
+ partnum text,
+ value float8
+);
+create temp view shipped_view as
+ select * from shipped where ttype = 'wt';
+create rule shipped_view_insert as on insert to shipped_view do instead
+ insert into shipped values('wt', new.ordnum, new.partnum, new.value);
+insert into parts (partnum, cost) values (1, 1234.56);
+insert into shipped_view (ordnum, partnum, value)
+ values (0, 1, (select cost from parts where partnum = '1'));
+select * from shipped_view;
+ ttype | ordnum | partnum | value
+-------+--------+---------+---------
+ wt | 0 | 1 | 1234.56
+(1 row)
+
+create rule shipped_view_update as on update to shipped_view do instead
+ update shipped set partnum = new.partnum, value = new.value
+ where ttype = new.ttype and ordnum = new.ordnum;
+update shipped_view set value = 11
+ from int4_tbl a join int4_tbl b
+ on (a.f1 = (select f1 from int4_tbl c where c.f1=b.f1))
+ where ordnum = a.f1;
+select * from shipped_view;
+ ttype | ordnum | partnum | value
+-------+--------+---------+-------
+ wt | 0 | 1 | 11
+(1 row)
+
+select f1, ss1 as relabel from
+ (select *, (select sum(f1) from int4_tbl b where f1 >= a.f1) as ss1
+ from int4_tbl a) ss;
+ f1 | relabel
+-------------+------------
+ 0 | 2147607103
+ 123456 | 2147607103
+ -123456 | 2147483647
+ 2147483647 | 2147483647
+ -2147483647 | 0
+(5 rows)
+
+--
+-- Test cases involving PARAM_EXEC parameters and min/max index optimizations.
+-- Per bug report from David Sanchez i Gregori.
+--
+select * from (
+ select max(unique1) from tenk1 as a
+ where exists (select 1 from tenk1 as b where b.thousand = a.unique2)
+) ss;
+ max
+------
+ 9997
+(1 row)
+
+select * from (
+ select min(unique1) from tenk1 as a
+ where not exists (select 1 from tenk1 as b where b.unique2 = 10000)
+) ss;
+ min
+-----
+ 0
+(1 row)
+
+--
+-- Test that an IN implemented using a UniquePath does unique-ification
+-- with the right semantics, as per bug #4113. (Unfortunately we have
+-- no simple way to ensure that this test case actually chooses that type
+-- of plan, but it does in releases 7.4-8.3. Note that an ordering difference
+-- here might mean that some other plan type is being used, rendering the test
+-- pointless.)
+--
+create temp table numeric_table (num_col numeric);
+insert into numeric_table values (1), (1.000000000000000000001), (2), (3);
+create temp table float_table (float_col float8);
+insert into float_table values (1), (2), (3);
+select * from float_table
+ where float_col in (select num_col from numeric_table);
+ float_col
+-----------
+ 1
+ 2
+ 3
+(3 rows)
+
+select * from numeric_table
+ where num_col in (select float_col from float_table);
+ num_col
+-------------------------
+ 1
+ 1.000000000000000000001
+ 2
+ 3
+(4 rows)
+
+--
+-- Test case for bug #4290: bogus calculation of subplan param sets
+--
+create temp table ta (id int primary key, val int);
+insert into ta values(1,1);
+insert into ta values(2,2);
+create temp table tb (id int primary key, aval int);
+insert into tb values(1,1);
+insert into tb values(2,1);
+insert into tb values(3,2);
+insert into tb values(4,2);
+create temp table tc (id int primary key, aid int);
+insert into tc values(1,1);
+insert into tc values(2,2);
+select
+ ( select min(tb.id) from tb
+ where tb.aval = (select ta.val from ta where ta.id = tc.aid) ) as min_tb_id
+from tc;
+ min_tb_id
+-----------
+ 1
+ 3
+(2 rows)
+
+--
+-- Test case for 8.3 "failed to locate grouping columns" bug
+--
+create temp table t1 (f1 numeric(14,0), f2 varchar(30));
+select * from
+ (select distinct f1, f2, (select f2 from t1 x where x.f1 = up.f1) as fs
+ from t1 up) ss
+group by f1,f2,fs;
+ f1 | f2 | fs
+----+----+----
+(0 rows)
+
+--
+-- Test case for bug #5514 (mishandling of whole-row Vars in subselects)
+--
+create temp table table_a(id integer);
+insert into table_a values (42);
+create temp view view_a as select * from table_a;
+select view_a from view_a;
+ view_a
+--------
+ (42)
+(1 row)
+
+select (select view_a) from view_a;
+ view_a
+--------
+ (42)
+(1 row)
+
+select (select (select view_a)) from view_a;
+ view_a
+--------
+ (42)
+(1 row)
+
+select (select (a.*)::text) from view_a a;
+ a
+------
+ (42)
+(1 row)
+
+--
+-- Check that whole-row Vars reading the result of a subselect don't include
+-- any junk columns therein
+--
+select q from (select max(f1) from int4_tbl group by f1 order by f1) q;
+ q
+---------------
+ (-2147483647)
+ (-123456)
+ (0)
+ (123456)
+ (2147483647)
+(5 rows)
+
+with q as (select max(f1) from int4_tbl group by f1 order by f1)
+ select q from q;
+ q
+---------------
+ (-2147483647)
+ (-123456)
+ (0)
+ (123456)
+ (2147483647)
+(5 rows)
+
+--
+-- Test case for sublinks pulled up into joinaliasvars lists in an
+-- inherited update/delete query
+--
+begin; -- this shouldn't delete anything, but be safe
+delete from road
+where exists (
+ select 1
+ from
+ int4_tbl cross join
+ ( select f1, array(select q1 from int8_tbl) as arr
+ from text_tbl ) ss
+ where road.name = ss.f1 );
+rollback;
+--
+-- Test case for sublinks pushed down into subselects via join alias expansion
+--
+select
+ (select sq1) as qq1
+from
+ (select exists(select 1 from int4_tbl where f1 = q2) as sq1, 42 as dummy
+ from int8_tbl) sq0
+ join
+ int4_tbl i4 on dummy = i4.f1;
+ qq1
+-----
+(0 rows)
+
+--
+-- Test case for subselect within UPDATE of INSERT...ON CONFLICT DO UPDATE
+--
+create temp table upsert(key int4 primary key, val text);
+insert into upsert values(1, 'val') on conflict (key) do update set val = 'not seen';
+insert into upsert values(1, 'val') on conflict (key) do update set val = 'seen with subselect ' || (select f1 from int4_tbl where f1 != 0 limit 1)::text;
+select * from upsert;
+ key | val
+-----+----------------------------
+ 1 | seen with subselect 123456
+(1 row)
+
+with aa as (select 'int4_tbl' u from int4_tbl limit 1)
+insert into upsert values (1, 'x'), (999, 'y')
+on conflict (key) do update set val = (select u from aa)
+returning *;
+ key | val
+-----+----------
+ 1 | int4_tbl
+ 999 | y
+(2 rows)
+
+--
+-- Test case for cross-type partial matching in hashed subplan (bug #7597)
+--
+create temp table outer_7597 (f1 int4, f2 int4);
+insert into outer_7597 values (0, 0);
+insert into outer_7597 values (1, 0);
+insert into outer_7597 values (0, null);
+insert into outer_7597 values (1, null);
+create temp table inner_7597(c1 int8, c2 int8);
+insert into inner_7597 values(0, null);
+select * from outer_7597 where (f1, f2) not in (select * from inner_7597);
+ f1 | f2
+----+----
+ 1 | 0
+ 1 |
+(2 rows)
+
+--
+-- Similar test case using text that verifies that collation
+-- information is passed through by execTuplesEqual() in nodeSubplan.c
+-- (otherwise it would error in texteq())
+--
+create temp table outer_text (f1 text, f2 text);
+insert into outer_text values ('a', 'a');
+insert into outer_text values ('b', 'a');
+insert into outer_text values ('a', null);
+insert into outer_text values ('b', null);
+create temp table inner_text (c1 text, c2 text);
+insert into inner_text values ('a', null);
+insert into inner_text values ('123', '456');
+select * from outer_text where (f1, f2) not in (select * from inner_text);
+ f1 | f2
+----+----
+ b | a
+ b |
+(2 rows)
+
+--
+-- Another test case for cross-type hashed subplans: comparison of
+-- inner-side values must be done with appropriate operator
+--
+explain (verbose, costs off)
+select 'foo'::text in (select 'bar'::name union all select 'bar'::name);
+ QUERY PLAN
+-------------------------------------
+ Result
+ Output: (hashed SubPlan 1)
+ SubPlan 1
+ -> Append
+ -> Result
+ Output: 'bar'::name
+ -> Result
+ Output: 'bar'::name
+(8 rows)
+
+select 'foo'::text in (select 'bar'::name union all select 'bar'::name);
+ ?column?
+----------
+ f
+(1 row)
+
+--
+-- Test that we don't try to hash nested records (bug #17363)
+-- (Hashing could be supported, but for now we don't)
+--
+explain (verbose, costs off)
+select row(row(row(1))) = any (select row(row(1)));
+ QUERY PLAN
+-------------------------------------------
+ Result
+ Output: (SubPlan 1)
+ SubPlan 1
+ -> Materialize
+ Output: '("(1)")'::record
+ -> Result
+ Output: '("(1)")'::record
+(7 rows)
+
+select row(row(row(1))) = any (select row(row(1)));
+ ?column?
+----------
+ t
+(1 row)
+
+--
+-- Test case for premature memory release during hashing of subplan output
+--
+select '1'::text in (select '1'::name union all select '1'::name);
+ ?column?
+----------
+ t
+(1 row)
+
+--
+-- Test that we don't try to use a hashed subplan if the simplified
+-- testexpr isn't of the right shape
+--
+-- this fails by default, of course
+select * from int8_tbl where q1 in (select c1 from inner_text);
+ERROR: operator does not exist: bigint = text
+LINE 1: select * from int8_tbl where q1 in (select c1 from inner_tex...
+ ^
+HINT: No operator matches the given name and argument types. You might need to add explicit type casts.
+begin;
+-- make an operator to allow it to succeed
+create function bogus_int8_text_eq(int8, text) returns boolean
+language sql as 'select $1::text = $2';
+create operator = (procedure=bogus_int8_text_eq, leftarg=int8, rightarg=text);
+explain (costs off)
+select * from int8_tbl where q1 in (select c1 from inner_text);
+ QUERY PLAN
+--------------------------------
+ Seq Scan on int8_tbl
+ Filter: (hashed SubPlan 1)
+ SubPlan 1
+ -> Seq Scan on inner_text
+(4 rows)
+
+select * from int8_tbl where q1 in (select c1 from inner_text);
+ q1 | q2
+-----+------------------
+ 123 | 456
+ 123 | 4567890123456789
+(2 rows)
+
+-- inlining of this function results in unusual number of hash clauses,
+-- which we can still cope with
+create or replace function bogus_int8_text_eq(int8, text) returns boolean
+language sql as 'select $1::text = $2 and $1::text = $2';
+explain (costs off)
+select * from int8_tbl where q1 in (select c1 from inner_text);
+ QUERY PLAN
+--------------------------------
+ Seq Scan on int8_tbl
+ Filter: (hashed SubPlan 1)
+ SubPlan 1
+ -> Seq Scan on inner_text
+(4 rows)
+
+select * from int8_tbl where q1 in (select c1 from inner_text);
+ q1 | q2
+-----+------------------
+ 123 | 456
+ 123 | 4567890123456789
+(2 rows)
+
+-- inlining of this function causes LHS and RHS to be switched,
+-- which we can't cope with, so hashing should be abandoned
+create or replace function bogus_int8_text_eq(int8, text) returns boolean
+language sql as 'select $2 = $1::text';
+explain (costs off)
+select * from int8_tbl where q1 in (select c1 from inner_text);
+ QUERY PLAN
+--------------------------------------
+ Seq Scan on int8_tbl
+ Filter: (SubPlan 1)
+ SubPlan 1
+ -> Materialize
+ -> Seq Scan on inner_text
+(5 rows)
+
+select * from int8_tbl where q1 in (select c1 from inner_text);
+ q1 | q2
+-----+------------------
+ 123 | 456
+ 123 | 4567890123456789
+(2 rows)
+
+rollback; -- to get rid of the bogus operator
+--
+-- Test resolution of hashed vs non-hashed implementation of EXISTS subplan
+--
+explain (costs off)
+select count(*) from tenk1 t
+where (exists(select 1 from tenk1 k where k.unique1 = t.unique2) or ten < 0);
+ QUERY PLAN
+--------------------------------------------------------------
+ Aggregate
+ -> Seq Scan on tenk1 t
+ Filter: ((hashed SubPlan 2) OR (ten < 0))
+ SubPlan 2
+ -> Index Only Scan using tenk1_unique1 on tenk1 k
+(5 rows)
+
+select count(*) from tenk1 t
+where (exists(select 1 from tenk1 k where k.unique1 = t.unique2) or ten < 0);
+ count
+-------
+ 10000
+(1 row)
+
+explain (costs off)
+select count(*) from tenk1 t
+where (exists(select 1 from tenk1 k where k.unique1 = t.unique2) or ten < 0)
+ and thousand = 1;
+ QUERY PLAN
+--------------------------------------------------------------
+ Aggregate
+ -> Bitmap Heap Scan on tenk1 t
+ Recheck Cond: (thousand = 1)
+ Filter: ((SubPlan 1) OR (ten < 0))
+ -> Bitmap Index Scan on tenk1_thous_tenthous
+ Index Cond: (thousand = 1)
+ SubPlan 1
+ -> Index Only Scan using tenk1_unique1 on tenk1 k
+ Index Cond: (unique1 = t.unique2)
+(9 rows)
+
+select count(*) from tenk1 t
+where (exists(select 1 from tenk1 k where k.unique1 = t.unique2) or ten < 0)
+ and thousand = 1;
+ count
+-------
+ 10
+(1 row)
+
+-- It's possible for the same EXISTS to get resolved both ways
+create temp table exists_tbl (c1 int, c2 int, c3 int) partition by list (c1);
+create temp table exists_tbl_null partition of exists_tbl for values in (null);
+create temp table exists_tbl_def partition of exists_tbl default;
+insert into exists_tbl select x, x/2, x+1 from generate_series(0,10) x;
+analyze exists_tbl;
+explain (costs off)
+select * from exists_tbl t1
+ where (exists(select 1 from exists_tbl t2 where t1.c1 = t2.c2) or c3 < 0);
+ QUERY PLAN
+------------------------------------------------------
+ Append
+ -> Seq Scan on exists_tbl_null t1_1
+ Filter: ((SubPlan 1) OR (c3 < 0))
+ SubPlan 1
+ -> Append
+ -> Seq Scan on exists_tbl_null t2_1
+ Filter: (t1_1.c1 = c2)
+ -> Seq Scan on exists_tbl_def t2_2
+ Filter: (t1_1.c1 = c2)
+ -> Seq Scan on exists_tbl_def t1_2
+ Filter: ((hashed SubPlan 2) OR (c3 < 0))
+ SubPlan 2
+ -> Append
+ -> Seq Scan on exists_tbl_null t2_4
+ -> Seq Scan on exists_tbl_def t2_5
+(15 rows)
+
+select * from exists_tbl t1
+ where (exists(select 1 from exists_tbl t2 where t1.c1 = t2.c2) or c3 < 0);
+ c1 | c2 | c3
+----+----+----
+ 0 | 0 | 1
+ 1 | 0 | 2
+ 2 | 1 | 3
+ 3 | 1 | 4
+ 4 | 2 | 5
+ 5 | 2 | 6
+(6 rows)
+
+--
+-- Test case for planner bug with nested EXISTS handling
+--
+select a.thousand from tenk1 a, tenk1 b
+where a.thousand = b.thousand
+ and exists ( select 1 from tenk1 c where b.hundred = c.hundred
+ and not exists ( select 1 from tenk1 d
+ where a.thousand = d.thousand ) );
+ thousand
+----------
+(0 rows)
+
+--
+-- Check that nested sub-selects are not pulled up if they contain volatiles
+--
+explain (verbose, costs off)
+ select x, x from
+ (select (select now()) as x from (values(1),(2)) v(y)) ss;
+ QUERY PLAN
+---------------------------
+ Values Scan on "*VALUES*"
+ Output: $0, $1
+ InitPlan 1 (returns $0)
+ -> Result
+ Output: now()
+ InitPlan 2 (returns $1)
+ -> Result
+ Output: now()
+(8 rows)
+
+explain (verbose, costs off)
+ select x, x from
+ (select (select random()) as x from (values(1),(2)) v(y)) ss;
+ QUERY PLAN
+----------------------------------
+ Subquery Scan on ss
+ Output: ss.x, ss.x
+ -> Values Scan on "*VALUES*"
+ Output: $0
+ InitPlan 1 (returns $0)
+ -> Result
+ Output: random()
+(7 rows)
+
+explain (verbose, costs off)
+ select x, x from
+ (select (select now() where y=y) as x from (values(1),(2)) v(y)) ss;
+ QUERY PLAN
+----------------------------------------------------------------------
+ Values Scan on "*VALUES*"
+ Output: (SubPlan 1), (SubPlan 2)
+ SubPlan 1
+ -> Result
+ Output: now()
+ One-Time Filter: ("*VALUES*".column1 = "*VALUES*".column1)
+ SubPlan 2
+ -> Result
+ Output: now()
+ One-Time Filter: ("*VALUES*".column1 = "*VALUES*".column1)
+(10 rows)
+
+explain (verbose, costs off)
+ select x, x from
+ (select (select random() where y=y) as x from (values(1),(2)) v(y)) ss;
+ QUERY PLAN
+----------------------------------------------------------------------------
+ Subquery Scan on ss
+ Output: ss.x, ss.x
+ -> Values Scan on "*VALUES*"
+ Output: (SubPlan 1)
+ SubPlan 1
+ -> Result
+ Output: random()
+ One-Time Filter: ("*VALUES*".column1 = "*VALUES*".column1)
+(8 rows)
+
+--
+-- Test rescan of a hashed subplan (the use of random() is to prevent the
+-- sub-select from being pulled up, which would result in not hashing)
+--
+explain (verbose, costs off)
+select sum(ss.tst::int) from
+ onek o cross join lateral (
+ select i.ten in (select f1 from int4_tbl where f1 <= o.hundred) as tst,
+ random() as r
+ from onek i where i.unique1 = o.unique1 ) ss
+where o.ten = 0;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Aggregate
+ Output: sum((((hashed SubPlan 1)))::integer)
+ -> Nested Loop
+ Output: ((hashed SubPlan 1))
+ -> Seq Scan on public.onek o
+ Output: o.unique1, o.unique2, o.two, o.four, o.ten, o.twenty, o.hundred, o.thousand, o.twothousand, o.fivethous, o.tenthous, o.odd, o.even, o.stringu1, o.stringu2, o.string4
+ Filter: (o.ten = 0)
+ -> Index Scan using onek_unique1 on public.onek i
+ Output: (hashed SubPlan 1), random()
+ Index Cond: (i.unique1 = o.unique1)
+ SubPlan 1
+ -> Seq Scan on public.int4_tbl
+ Output: int4_tbl.f1
+ Filter: (int4_tbl.f1 <= $0)
+(14 rows)
+
+select sum(ss.tst::int) from
+ onek o cross join lateral (
+ select i.ten in (select f1 from int4_tbl where f1 <= o.hundred) as tst,
+ random() as r
+ from onek i where i.unique1 = o.unique1 ) ss
+where o.ten = 0;
+ sum
+-----
+ 100
+(1 row)
+
+--
+-- Test rescan of a SetOp node
+--
+explain (costs off)
+select count(*) from
+ onek o cross join lateral (
+ select * from onek i1 where i1.unique1 = o.unique1
+ except
+ select * from onek i2 where i2.unique1 = o.unique2
+ ) ss
+where o.ten = 1;
+ QUERY PLAN
+------------------------------------------------------------------------------
+ Aggregate
+ -> Nested Loop
+ -> Seq Scan on onek o
+ Filter: (ten = 1)
+ -> Subquery Scan on ss
+ -> HashSetOp Except
+ -> Append
+ -> Subquery Scan on "*SELECT* 1"
+ -> Index Scan using onek_unique1 on onek i1
+ Index Cond: (unique1 = o.unique1)
+ -> Subquery Scan on "*SELECT* 2"
+ -> Index Scan using onek_unique1 on onek i2
+ Index Cond: (unique1 = o.unique2)
+(13 rows)
+
+select count(*) from
+ onek o cross join lateral (
+ select * from onek i1 where i1.unique1 = o.unique1
+ except
+ select * from onek i2 where i2.unique1 = o.unique2
+ ) ss
+where o.ten = 1;
+ count
+-------
+ 100
+(1 row)
+
+--
+-- Test rescan of a RecursiveUnion node
+--
+explain (costs off)
+select sum(o.four), sum(ss.a) from
+ onek o cross join lateral (
+ with recursive x(a) as
+ (select o.four as a
+ union
+ select a + 1 from x
+ where a < 10)
+ select * from x
+ ) ss
+where o.ten = 1;
+ QUERY PLAN
+---------------------------------------------------------
+ Aggregate
+ -> Nested Loop
+ -> Seq Scan on onek o
+ Filter: (ten = 1)
+ -> Memoize
+ Cache Key: o.four
+ Cache Mode: binary
+ -> CTE Scan on x
+ CTE x
+ -> Recursive Union
+ -> Result
+ -> WorkTable Scan on x x_1
+ Filter: (a < 10)
+(13 rows)
+
+select sum(o.four), sum(ss.a) from
+ onek o cross join lateral (
+ with recursive x(a) as
+ (select o.four as a
+ union
+ select a + 1 from x
+ where a < 10)
+ select * from x
+ ) ss
+where o.ten = 1;
+ sum | sum
+------+------
+ 1700 | 5350
+(1 row)
+
+--
+-- Check we don't misoptimize a NOT IN where the subquery returns no rows.
+--
+create temp table notinouter (a int);
+create temp table notininner (b int not null);
+insert into notinouter values (null), (1);
+select * from notinouter where a not in (select b from notininner);
+ a
+---
+
+ 1
+(2 rows)
+
+--
+-- Check we behave sanely in corner case of empty SELECT list (bug #8648)
+--
+create temp table nocolumns();
+select exists(select * from nocolumns);
+ exists
+--------
+ f
+(1 row)
+
+--
+-- Check behavior with a SubPlan in VALUES (bug #14924)
+--
+select val.x
+ from generate_series(1,10) as s(i),
+ lateral (
+ values ((select s.i + 1)), (s.i + 101)
+ ) as val(x)
+where s.i < 10 and (select val.x) < 110;
+ x
+-----
+ 2
+ 102
+ 3
+ 103
+ 4
+ 104
+ 5
+ 105
+ 6
+ 106
+ 7
+ 107
+ 8
+ 108
+ 9
+ 109
+ 10
+(17 rows)
+
+-- another variant of that (bug #16213)
+explain (verbose, costs off)
+select * from
+(values
+ (3 not in (select * from (values (1), (2)) ss1)),
+ (false)
+) ss;
+ QUERY PLAN
+----------------------------------------
+ Values Scan on "*VALUES*"
+ Output: "*VALUES*".column1
+ SubPlan 1
+ -> Values Scan on "*VALUES*_1"
+ Output: "*VALUES*_1".column1
+(5 rows)
+
+select * from
+(values
+ (3 not in (select * from (values (1), (2)) ss1)),
+ (false)
+) ss;
+ column1
+---------
+ t
+ f
+(2 rows)
+
+--
+-- Check sane behavior with nested IN SubLinks
+--
+explain (verbose, costs off)
+select * from int4_tbl where
+ (case when f1 in (select unique1 from tenk1 a) then f1 else null end) in
+ (select ten from tenk1 b);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Nested Loop Semi Join
+ Output: int4_tbl.f1
+ Join Filter: (CASE WHEN (hashed SubPlan 1) THEN int4_tbl.f1 ELSE NULL::integer END = b.ten)
+ -> Seq Scan on public.int4_tbl
+ Output: int4_tbl.f1
+ -> Seq Scan on public.tenk1 b
+ Output: b.unique1, b.unique2, b.two, b.four, b.ten, b.twenty, b.hundred, b.thousand, b.twothousand, b.fivethous, b.tenthous, b.odd, b.even, b.stringu1, b.stringu2, b.string4
+ SubPlan 1
+ -> Index Only Scan using tenk1_unique1 on public.tenk1 a
+ Output: a.unique1
+(10 rows)
+
+select * from int4_tbl where
+ (case when f1 in (select unique1 from tenk1 a) then f1 else null end) in
+ (select ten from tenk1 b);
+ f1
+----
+ 0
+(1 row)
+
+--
+-- Check for incorrect optimization when IN subquery contains a SRF
+--
+explain (verbose, costs off)
+select * from int4_tbl o where (f1, f1) in
+ (select f1, generate_series(1,50) / 10 g from int4_tbl i group by f1);
+ QUERY PLAN
+-------------------------------------------------------------------
+ Nested Loop Semi Join
+ Output: o.f1
+ Join Filter: (o.f1 = "ANY_subquery".f1)
+ -> Seq Scan on public.int4_tbl o
+ Output: o.f1
+ -> Materialize
+ Output: "ANY_subquery".f1, "ANY_subquery".g
+ -> Subquery Scan on "ANY_subquery"
+ Output: "ANY_subquery".f1, "ANY_subquery".g
+ Filter: ("ANY_subquery".f1 = "ANY_subquery".g)
+ -> Result
+ Output: i.f1, ((generate_series(1, 50)) / 10)
+ -> ProjectSet
+ Output: generate_series(1, 50), i.f1
+ -> HashAggregate
+ Output: i.f1
+ Group Key: i.f1
+ -> Seq Scan on public.int4_tbl i
+ Output: i.f1
+(19 rows)
+
+select * from int4_tbl o where (f1, f1) in
+ (select f1, generate_series(1,50) / 10 g from int4_tbl i group by f1);
+ f1
+----
+ 0
+(1 row)
+
+--
+-- check for over-optimization of whole-row Var referencing an Append plan
+--
+select (select q from
+ (select 1,2,3 where f1 > 0
+ union all
+ select 4,5,6.0 where f1 <= 0
+ ) q )
+from int4_tbl;
+ q
+-----------
+ (4,5,6.0)
+ (1,2,3)
+ (4,5,6.0)
+ (1,2,3)
+ (4,5,6.0)
+(5 rows)
+
+--
+-- Check for sane handling of a lateral reference in a subquery's quals
+-- (most of the complication here is to prevent the test case from being
+-- flattened too much)
+--
+explain (verbose, costs off)
+select * from
+ int4_tbl i4,
+ lateral (
+ select i4.f1 > 1 as b, 1 as id
+ from (select random() order by 1) as t1
+ union all
+ select true as b, 2 as id
+ ) as t2
+where b and f1 >= 0;
+ QUERY PLAN
+--------------------------------------------
+ Nested Loop
+ Output: i4.f1, ((i4.f1 > 1)), (1)
+ -> Seq Scan on public.int4_tbl i4
+ Output: i4.f1
+ Filter: (i4.f1 >= 0)
+ -> Append
+ -> Subquery Scan on t1
+ Output: (i4.f1 > 1), 1
+ Filter: (i4.f1 > 1)
+ -> Sort
+ Output: (random())
+ Sort Key: (random())
+ -> Result
+ Output: random()
+ -> Result
+ Output: true, 2
+(16 rows)
+
+select * from
+ int4_tbl i4,
+ lateral (
+ select i4.f1 > 1 as b, 1 as id
+ from (select random() order by 1) as t1
+ union all
+ select true as b, 2 as id
+ ) as t2
+where b and f1 >= 0;
+ f1 | b | id
+------------+---+----
+ 0 | t | 2
+ 123456 | t | 1
+ 123456 | t | 2
+ 2147483647 | t | 1
+ 2147483647 | t | 2
+(5 rows)
+
+--
+-- Check that volatile quals aren't pushed down past a DISTINCT:
+-- nextval() should not be called more than the nominal number of times
+--
+create temp sequence ts1;
+select * from
+ (select distinct ten from tenk1) ss
+ where ten < 10 + nextval('ts1')
+ order by 1;
+ ten
+-----
+ 0
+ 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+(10 rows)
+
+select nextval('ts1');
+ nextval
+---------
+ 11
+(1 row)
+
+--
+-- Check that volatile quals aren't pushed down past a set-returning function;
+-- while a nonvolatile qual can be, if it doesn't reference the SRF.
+--
+create function tattle(x int, y int) returns bool
+volatile language plpgsql as $$
+begin
+ raise notice 'x = %, y = %', x, y;
+ return x > y;
+end$$;
+explain (verbose, costs off)
+select * from
+ (select 9 as x, unnest(array[1,2,3,11,12,13]) as u) ss
+ where tattle(x, 8);
+ QUERY PLAN
+----------------------------------------------------------
+ Subquery Scan on ss
+ Output: ss.x, ss.u
+ Filter: tattle(ss.x, 8)
+ -> ProjectSet
+ Output: 9, unnest('{1,2,3,11,12,13}'::integer[])
+ -> Result
+(6 rows)
+
+select * from
+ (select 9 as x, unnest(array[1,2,3,11,12,13]) as u) ss
+ where tattle(x, 8);
+NOTICE: x = 9, y = 8
+NOTICE: x = 9, y = 8
+NOTICE: x = 9, y = 8
+NOTICE: x = 9, y = 8
+NOTICE: x = 9, y = 8
+NOTICE: x = 9, y = 8
+ x | u
+---+----
+ 9 | 1
+ 9 | 2
+ 9 | 3
+ 9 | 11
+ 9 | 12
+ 9 | 13
+(6 rows)
+
+-- if we pretend it's stable, we get different results:
+alter function tattle(x int, y int) stable;
+explain (verbose, costs off)
+select * from
+ (select 9 as x, unnest(array[1,2,3,11,12,13]) as u) ss
+ where tattle(x, 8);
+ QUERY PLAN
+----------------------------------------------------
+ ProjectSet
+ Output: 9, unnest('{1,2,3,11,12,13}'::integer[])
+ -> Result
+ One-Time Filter: tattle(9, 8)
+(4 rows)
+
+select * from
+ (select 9 as x, unnest(array[1,2,3,11,12,13]) as u) ss
+ where tattle(x, 8);
+NOTICE: x = 9, y = 8
+ x | u
+---+----
+ 9 | 1
+ 9 | 2
+ 9 | 3
+ 9 | 11
+ 9 | 12
+ 9 | 13
+(6 rows)
+
+-- although even a stable qual should not be pushed down if it references SRF
+explain (verbose, costs off)
+select * from
+ (select 9 as x, unnest(array[1,2,3,11,12,13]) as u) ss
+ where tattle(x, u);
+ QUERY PLAN
+----------------------------------------------------------
+ Subquery Scan on ss
+ Output: ss.x, ss.u
+ Filter: tattle(ss.x, ss.u)
+ -> ProjectSet
+ Output: 9, unnest('{1,2,3,11,12,13}'::integer[])
+ -> Result
+(6 rows)
+
+select * from
+ (select 9 as x, unnest(array[1,2,3,11,12,13]) as u) ss
+ where tattle(x, u);
+NOTICE: x = 9, y = 1
+NOTICE: x = 9, y = 2
+NOTICE: x = 9, y = 3
+NOTICE: x = 9, y = 11
+NOTICE: x = 9, y = 12
+NOTICE: x = 9, y = 13
+ x | u
+---+---
+ 9 | 1
+ 9 | 2
+ 9 | 3
+(3 rows)
+
+drop function tattle(x int, y int);
+--
+-- Test that LIMIT can be pushed to SORT through a subquery that just projects
+-- columns. We check for that having happened by looking to see if EXPLAIN
+-- ANALYZE shows that a top-N sort was used. We must suppress or filter away
+-- all the non-invariant parts of the EXPLAIN ANALYZE output.
+--
+create table sq_limit (pk int primary key, c1 int, c2 int);
+insert into sq_limit values
+ (1, 1, 1),
+ (2, 2, 2),
+ (3, 3, 3),
+ (4, 4, 4),
+ (5, 1, 1),
+ (6, 2, 2),
+ (7, 3, 3),
+ (8, 4, 4);
+create function explain_sq_limit() returns setof text language plpgsql as
+$$
+declare ln text;
+begin
+ for ln in
+ explain (analyze, summary off, timing off, costs off)
+ select * from (select pk,c2 from sq_limit order by c1,pk) as x limit 3
+ loop
+ ln := regexp_replace(ln, 'Memory: \S*', 'Memory: xxx');
+ return next ln;
+ end loop;
+end;
+$$;
+select * from explain_sq_limit();
+ explain_sq_limit
+----------------------------------------------------------------
+ Limit (actual rows=3 loops=1)
+ -> Subquery Scan on x (actual rows=3 loops=1)
+ -> Sort (actual rows=3 loops=1)
+ Sort Key: sq_limit.c1, sq_limit.pk
+ Sort Method: top-N heapsort Memory: xxx
+ -> Seq Scan on sq_limit (actual rows=8 loops=1)
+(6 rows)
+
+select * from (select pk,c2 from sq_limit order by c1,pk) as x limit 3;
+ pk | c2
+----+----
+ 1 | 1
+ 5 | 1
+ 2 | 2
+(3 rows)
+
+drop function explain_sq_limit();
+drop table sq_limit;
+--
+-- Ensure that backward scan direction isn't propagated into
+-- expression subqueries (bug #15336)
+--
+begin;
+declare c1 scroll cursor for
+ select * from generate_series(1,4) i
+ where i <> all (values (2),(3));
+move forward all in c1;
+fetch backward all in c1;
+ i
+---
+ 4
+ 1
+(2 rows)
+
+commit;
+--
+-- Tests for CTE inlining behavior
+--
+-- Basic subquery that can be inlined
+explain (verbose, costs off)
+with x as (select * from (select f1 from subselect_tbl) ss)
+select * from x where f1 = 1;
+ QUERY PLAN
+----------------------------------
+ Seq Scan on public.subselect_tbl
+ Output: subselect_tbl.f1
+ Filter: (subselect_tbl.f1 = 1)
+(3 rows)
+
+-- Explicitly request materialization
+explain (verbose, costs off)
+with x as materialized (select * from (select f1 from subselect_tbl) ss)
+select * from x where f1 = 1;
+ QUERY PLAN
+------------------------------------------
+ CTE Scan on x
+ Output: x.f1
+ Filter: (x.f1 = 1)
+ CTE x
+ -> Seq Scan on public.subselect_tbl
+ Output: subselect_tbl.f1
+(6 rows)
+
+-- Stable functions are safe to inline
+explain (verbose, costs off)
+with x as (select * from (select f1, now() from subselect_tbl) ss)
+select * from x where f1 = 1;
+ QUERY PLAN
+-----------------------------------
+ Seq Scan on public.subselect_tbl
+ Output: subselect_tbl.f1, now()
+ Filter: (subselect_tbl.f1 = 1)
+(3 rows)
+
+-- Volatile functions prevent inlining
+explain (verbose, costs off)
+with x as (select * from (select f1, random() from subselect_tbl) ss)
+select * from x where f1 = 1;
+ QUERY PLAN
+----------------------------------------------
+ CTE Scan on x
+ Output: x.f1, x.random
+ Filter: (x.f1 = 1)
+ CTE x
+ -> Seq Scan on public.subselect_tbl
+ Output: subselect_tbl.f1, random()
+(6 rows)
+
+-- SELECT FOR UPDATE cannot be inlined
+explain (verbose, costs off)
+with x as (select * from (select f1 from subselect_tbl for update) ss)
+select * from x where f1 = 1;
+ QUERY PLAN
+--------------------------------------------------------------------
+ CTE Scan on x
+ Output: x.f1
+ Filter: (x.f1 = 1)
+ CTE x
+ -> Subquery Scan on ss
+ Output: ss.f1
+ -> LockRows
+ Output: subselect_tbl.f1, subselect_tbl.ctid
+ -> Seq Scan on public.subselect_tbl
+ Output: subselect_tbl.f1, subselect_tbl.ctid
+(10 rows)
+
+-- Multiply-referenced CTEs are inlined only when requested
+explain (verbose, costs off)
+with x as (select * from (select f1, now() as n from subselect_tbl) ss)
+select * from x, x x2 where x.n = x2.n;
+ QUERY PLAN
+-------------------------------------------
+ Merge Join
+ Output: x.f1, x.n, x2.f1, x2.n
+ Merge Cond: (x.n = x2.n)
+ CTE x
+ -> Seq Scan on public.subselect_tbl
+ Output: subselect_tbl.f1, now()
+ -> Sort
+ Output: x.f1, x.n
+ Sort Key: x.n
+ -> CTE Scan on x
+ Output: x.f1, x.n
+ -> Sort
+ Output: x2.f1, x2.n
+ Sort Key: x2.n
+ -> CTE Scan on x x2
+ Output: x2.f1, x2.n
+(16 rows)
+
+explain (verbose, costs off)
+with x as not materialized (select * from (select f1, now() as n from subselect_tbl) ss)
+select * from x, x x2 where x.n = x2.n;
+ QUERY PLAN
+----------------------------------------------------------------------------
+ Result
+ Output: subselect_tbl.f1, now(), subselect_tbl_1.f1, now()
+ One-Time Filter: (now() = now())
+ -> Nested Loop
+ Output: subselect_tbl.f1, subselect_tbl_1.f1
+ -> Seq Scan on public.subselect_tbl
+ Output: subselect_tbl.f1, subselect_tbl.f2, subselect_tbl.f3
+ -> Materialize
+ Output: subselect_tbl_1.f1
+ -> Seq Scan on public.subselect_tbl subselect_tbl_1
+ Output: subselect_tbl_1.f1
+(11 rows)
+
+-- Multiply-referenced CTEs can't be inlined if they contain outer self-refs
+explain (verbose, costs off)
+with recursive x(a) as
+ ((values ('a'), ('b'))
+ union all
+ (with z as not materialized (select * from x)
+ select z.a || z1.a as a from z cross join z as z1
+ where length(z.a || z1.a) < 5))
+select * from x;
+ QUERY PLAN
+----------------------------------------------------------
+ CTE Scan on x
+ Output: x.a
+ CTE x
+ -> Recursive Union
+ -> Values Scan on "*VALUES*"
+ Output: "*VALUES*".column1
+ -> Nested Loop
+ Output: (z.a || z1.a)
+ Join Filter: (length((z.a || z1.a)) < 5)
+ CTE z
+ -> WorkTable Scan on x x_1
+ Output: x_1.a
+ -> CTE Scan on z
+ Output: z.a
+ -> CTE Scan on z z1
+ Output: z1.a
+(16 rows)
+
+with recursive x(a) as
+ ((values ('a'), ('b'))
+ union all
+ (with z as not materialized (select * from x)
+ select z.a || z1.a as a from z cross join z as z1
+ where length(z.a || z1.a) < 5))
+select * from x;
+ a
+------
+ a
+ b
+ aa
+ ab
+ ba
+ bb
+ aaaa
+ aaab
+ aaba
+ aabb
+ abaa
+ abab
+ abba
+ abbb
+ baaa
+ baab
+ baba
+ babb
+ bbaa
+ bbab
+ bbba
+ bbbb
+(22 rows)
+
+explain (verbose, costs off)
+with recursive x(a) as
+ ((values ('a'), ('b'))
+ union all
+ (with z as not materialized (select * from x)
+ select z.a || z.a as a from z
+ where length(z.a || z.a) < 5))
+select * from x;
+ QUERY PLAN
+--------------------------------------------------------
+ CTE Scan on x
+ Output: x.a
+ CTE x
+ -> Recursive Union
+ -> Values Scan on "*VALUES*"
+ Output: "*VALUES*".column1
+ -> WorkTable Scan on x x_1
+ Output: (x_1.a || x_1.a)
+ Filter: (length((x_1.a || x_1.a)) < 5)
+(9 rows)
+
+with recursive x(a) as
+ ((values ('a'), ('b'))
+ union all
+ (with z as not materialized (select * from x)
+ select z.a || z.a as a from z
+ where length(z.a || z.a) < 5))
+select * from x;
+ a
+------
+ a
+ b
+ aa
+ bb
+ aaaa
+ bbbb
+(6 rows)
+
+-- Check handling of outer references
+explain (verbose, costs off)
+with x as (select * from int4_tbl)
+select * from (with y as (select * from x) select * from y) ss;
+ QUERY PLAN
+-----------------------------
+ Seq Scan on public.int4_tbl
+ Output: int4_tbl.f1
+(2 rows)
+
+explain (verbose, costs off)
+with x as materialized (select * from int4_tbl)
+select * from (with y as (select * from x) select * from y) ss;
+ QUERY PLAN
+-------------------------------------
+ CTE Scan on x
+ Output: x.f1
+ CTE x
+ -> Seq Scan on public.int4_tbl
+ Output: int4_tbl.f1
+(5 rows)
+
+-- Ensure that we inline the currect CTE when there are
+-- multiple CTEs with the same name
+explain (verbose, costs off)
+with x as (select 1 as y)
+select * from (with x as (select 2 as y) select * from x) ss;
+ QUERY PLAN
+-------------
+ Result
+ Output: 2
+(2 rows)
+
+-- Row marks are not pushed into CTEs
+explain (verbose, costs off)
+with x as (select * from subselect_tbl)
+select * from x for update;
+ QUERY PLAN
+----------------------------------------------------------------
+ Seq Scan on public.subselect_tbl
+ Output: subselect_tbl.f1, subselect_tbl.f2, subselect_tbl.f3
+(2 rows)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/subselect.sql b/ydb/library/yql/tests/postgresql/original/cases/subselect.sql
new file mode 100644
index 0000000000..94ba91f5bb
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/subselect.sql
@@ -0,0 +1,939 @@
+--
+-- SUBSELECT
+--
+
+SELECT 1 AS one WHERE 1 IN (SELECT 1);
+
+SELECT 1 AS zero WHERE 1 NOT IN (SELECT 1);
+
+SELECT 1 AS zero WHERE 1 IN (SELECT 2);
+
+-- Check grammar's handling of extra parens in assorted contexts
+
+SELECT * FROM (SELECT 1 AS x) ss;
+SELECT * FROM ((SELECT 1 AS x)) ss;
+
+(SELECT 2) UNION SELECT 2;
+((SELECT 2)) UNION SELECT 2;
+
+SELECT ((SELECT 2) UNION SELECT 2);
+SELECT (((SELECT 2)) UNION SELECT 2);
+
+SELECT (SELECT ARRAY[1,2,3])[1];
+SELECT ((SELECT ARRAY[1,2,3]))[2];
+SELECT (((SELECT ARRAY[1,2,3])))[3];
+
+-- Set up some simple test tables
+
+CREATE TABLE SUBSELECT_TBL (
+ f1 integer,
+ f2 integer,
+ f3 float
+);
+
+INSERT INTO SUBSELECT_TBL VALUES (1, 2, 3);
+INSERT INTO SUBSELECT_TBL VALUES (2, 3, 4);
+INSERT INTO SUBSELECT_TBL VALUES (3, 4, 5);
+INSERT INTO SUBSELECT_TBL VALUES (1, 1, 1);
+INSERT INTO SUBSELECT_TBL VALUES (2, 2, 2);
+INSERT INTO SUBSELECT_TBL VALUES (3, 3, 3);
+INSERT INTO SUBSELECT_TBL VALUES (6, 7, 8);
+INSERT INTO SUBSELECT_TBL VALUES (8, 9, NULL);
+
+SELECT * FROM SUBSELECT_TBL;
+
+-- Uncorrelated subselects
+
+SELECT f1 AS "Constant Select" FROM SUBSELECT_TBL
+ WHERE f1 IN (SELECT 1);
+
+SELECT f1 AS "Uncorrelated Field" FROM SUBSELECT_TBL
+ WHERE f1 IN (SELECT f2 FROM SUBSELECT_TBL);
+
+SELECT f1 AS "Uncorrelated Field" FROM SUBSELECT_TBL
+ WHERE f1 IN (SELECT f2 FROM SUBSELECT_TBL WHERE
+ f2 IN (SELECT f1 FROM SUBSELECT_TBL));
+
+SELECT f1, f2
+ FROM SUBSELECT_TBL
+ WHERE (f1, f2) NOT IN (SELECT f2, CAST(f3 AS int4) FROM SUBSELECT_TBL
+ WHERE f3 IS NOT NULL);
+
+-- Correlated subselects
+
+SELECT f1 AS "Correlated Field", f2 AS "Second Field"
+ FROM SUBSELECT_TBL upper
+ WHERE f1 IN (SELECT f2 FROM SUBSELECT_TBL WHERE f1 = upper.f1);
+
+SELECT f1 AS "Correlated Field", f3 AS "Second Field"
+ FROM SUBSELECT_TBL upper
+ WHERE f1 IN
+ (SELECT f2 FROM SUBSELECT_TBL WHERE CAST(upper.f2 AS float) = f3);
+
+SELECT f1 AS "Correlated Field", f3 AS "Second Field"
+ FROM SUBSELECT_TBL upper
+ WHERE f3 IN (SELECT upper.f1 + f2 FROM SUBSELECT_TBL
+ WHERE f2 = CAST(f3 AS integer));
+
+SELECT f1 AS "Correlated Field"
+ FROM SUBSELECT_TBL
+ WHERE (f1, f2) IN (SELECT f2, CAST(f3 AS int4) FROM SUBSELECT_TBL
+ WHERE f3 IS NOT NULL);
+
+--
+-- Use some existing tables in the regression test
+--
+
+SELECT ss.f1 AS "Correlated Field", ss.f3 AS "Second Field"
+ FROM SUBSELECT_TBL ss
+ WHERE f1 NOT IN (SELECT f1+1 FROM INT4_TBL
+ WHERE f1 != ss.f1 AND f1 < 2147483647);
+
+select q1, float8(count(*)) / (select count(*) from int8_tbl)
+from int8_tbl group by q1 order by q1;
+
+-- Unspecified-type literals in output columns should resolve as text
+
+SELECT *, pg_typeof(f1) FROM
+ (SELECT 'foo' AS f1 FROM generate_series(1,3)) ss ORDER BY 1;
+
+-- ... unless there's context to suggest differently
+
+explain (verbose, costs off) select '42' union all select '43';
+explain (verbose, costs off) select '42' union all select 43;
+
+-- check materialization of an initplan reference (bug #14524)
+explain (verbose, costs off)
+select 1 = all (select (select 1));
+select 1 = all (select (select 1));
+
+--
+-- Check EXISTS simplification with LIMIT
+--
+explain (costs off)
+select * from int4_tbl o where exists
+ (select 1 from int4_tbl i where i.f1=o.f1 limit null);
+explain (costs off)
+select * from int4_tbl o where not exists
+ (select 1 from int4_tbl i where i.f1=o.f1 limit 1);
+explain (costs off)
+select * from int4_tbl o where exists
+ (select 1 from int4_tbl i where i.f1=o.f1 limit 0);
+
+--
+-- Test cases to catch unpleasant interactions between IN-join processing
+-- and subquery pullup.
+--
+
+select count(*) from
+ (select 1 from tenk1 a
+ where unique1 IN (select hundred from tenk1 b)) ss;
+select count(distinct ss.ten) from
+ (select ten from tenk1 a
+ where unique1 IN (select hundred from tenk1 b)) ss;
+select count(*) from
+ (select 1 from tenk1 a
+ where unique1 IN (select distinct hundred from tenk1 b)) ss;
+select count(distinct ss.ten) from
+ (select ten from tenk1 a
+ where unique1 IN (select distinct hundred from tenk1 b)) ss;
+
+--
+-- Test cases to check for overenthusiastic optimization of
+-- "IN (SELECT DISTINCT ...)" and related cases. Per example from
+-- Luca Pireddu and Michael Fuhr.
+--
+
+CREATE TEMP TABLE foo (id integer);
+CREATE TEMP TABLE bar (id1 integer, id2 integer);
+
+INSERT INTO foo VALUES (1);
+
+INSERT INTO bar VALUES (1, 1);
+INSERT INTO bar VALUES (2, 2);
+INSERT INTO bar VALUES (3, 1);
+
+-- These cases require an extra level of distinct-ing above subquery s
+SELECT * FROM foo WHERE id IN
+ (SELECT id2 FROM (SELECT DISTINCT id1, id2 FROM bar) AS s);
+SELECT * FROM foo WHERE id IN
+ (SELECT id2 FROM (SELECT id1,id2 FROM bar GROUP BY id1,id2) AS s);
+SELECT * FROM foo WHERE id IN
+ (SELECT id2 FROM (SELECT id1, id2 FROM bar UNION
+ SELECT id1, id2 FROM bar) AS s);
+
+-- These cases do not
+SELECT * FROM foo WHERE id IN
+ (SELECT id2 FROM (SELECT DISTINCT ON (id2) id1, id2 FROM bar) AS s);
+SELECT * FROM foo WHERE id IN
+ (SELECT id2 FROM (SELECT id2 FROM bar GROUP BY id2) AS s);
+SELECT * FROM foo WHERE id IN
+ (SELECT id2 FROM (SELECT id2 FROM bar UNION
+ SELECT id2 FROM bar) AS s);
+
+--
+-- Test case to catch problems with multiply nested sub-SELECTs not getting
+-- recalculated properly. Per bug report from Didier Moens.
+--
+
+CREATE TABLE orderstest (
+ approver_ref integer,
+ po_ref integer,
+ ordercanceled boolean
+);
+
+INSERT INTO orderstest VALUES (1, 1, false);
+INSERT INTO orderstest VALUES (66, 5, false);
+INSERT INTO orderstest VALUES (66, 6, false);
+INSERT INTO orderstest VALUES (66, 7, false);
+INSERT INTO orderstest VALUES (66, 1, true);
+INSERT INTO orderstest VALUES (66, 8, false);
+INSERT INTO orderstest VALUES (66, 1, false);
+INSERT INTO orderstest VALUES (77, 1, false);
+INSERT INTO orderstest VALUES (1, 1, false);
+INSERT INTO orderstest VALUES (66, 1, false);
+INSERT INTO orderstest VALUES (1, 1, false);
+
+CREATE VIEW orders_view AS
+SELECT *,
+(SELECT CASE
+ WHEN ord.approver_ref=1 THEN '---' ELSE 'Approved'
+ END) AS "Approved",
+(SELECT CASE
+ WHEN ord.ordercanceled
+ THEN 'Canceled'
+ ELSE
+ (SELECT CASE
+ WHEN ord.po_ref=1
+ THEN
+ (SELECT CASE
+ WHEN ord.approver_ref=1
+ THEN '---'
+ ELSE 'Approved'
+ END)
+ ELSE 'PO'
+ END)
+END) AS "Status",
+(CASE
+ WHEN ord.ordercanceled
+ THEN 'Canceled'
+ ELSE
+ (CASE
+ WHEN ord.po_ref=1
+ THEN
+ (CASE
+ WHEN ord.approver_ref=1
+ THEN '---'
+ ELSE 'Approved'
+ END)
+ ELSE 'PO'
+ END)
+END) AS "Status_OK"
+FROM orderstest ord;
+
+SELECT * FROM orders_view;
+
+DROP TABLE orderstest cascade;
+
+--
+-- Test cases to catch situations where rule rewriter fails to propagate
+-- hasSubLinks flag correctly. Per example from Kyle Bateman.
+--
+
+create temp table parts (
+ partnum text,
+ cost float8
+);
+
+create temp table shipped (
+ ttype char(2),
+ ordnum int4,
+ partnum text,
+ value float8
+);
+
+create temp view shipped_view as
+ select * from shipped where ttype = 'wt';
+
+create rule shipped_view_insert as on insert to shipped_view do instead
+ insert into shipped values('wt', new.ordnum, new.partnum, new.value);
+
+insert into parts (partnum, cost) values (1, 1234.56);
+
+insert into shipped_view (ordnum, partnum, value)
+ values (0, 1, (select cost from parts where partnum = '1'));
+
+select * from shipped_view;
+
+create rule shipped_view_update as on update to shipped_view do instead
+ update shipped set partnum = new.partnum, value = new.value
+ where ttype = new.ttype and ordnum = new.ordnum;
+
+update shipped_view set value = 11
+ from int4_tbl a join int4_tbl b
+ on (a.f1 = (select f1 from int4_tbl c where c.f1=b.f1))
+ where ordnum = a.f1;
+
+select * from shipped_view;
+
+select f1, ss1 as relabel from
+ (select *, (select sum(f1) from int4_tbl b where f1 >= a.f1) as ss1
+ from int4_tbl a) ss;
+
+--
+-- Test cases involving PARAM_EXEC parameters and min/max index optimizations.
+-- Per bug report from David Sanchez i Gregori.
+--
+
+select * from (
+ select max(unique1) from tenk1 as a
+ where exists (select 1 from tenk1 as b where b.thousand = a.unique2)
+) ss;
+
+select * from (
+ select min(unique1) from tenk1 as a
+ where not exists (select 1 from tenk1 as b where b.unique2 = 10000)
+) ss;
+
+--
+-- Test that an IN implemented using a UniquePath does unique-ification
+-- with the right semantics, as per bug #4113. (Unfortunately we have
+-- no simple way to ensure that this test case actually chooses that type
+-- of plan, but it does in releases 7.4-8.3. Note that an ordering difference
+-- here might mean that some other plan type is being used, rendering the test
+-- pointless.)
+--
+
+create temp table numeric_table (num_col numeric);
+insert into numeric_table values (1), (1.000000000000000000001), (2), (3);
+
+create temp table float_table (float_col float8);
+insert into float_table values (1), (2), (3);
+
+select * from float_table
+ where float_col in (select num_col from numeric_table);
+
+select * from numeric_table
+ where num_col in (select float_col from float_table);
+
+--
+-- Test case for bug #4290: bogus calculation of subplan param sets
+--
+
+create temp table ta (id int primary key, val int);
+
+insert into ta values(1,1);
+insert into ta values(2,2);
+
+create temp table tb (id int primary key, aval int);
+
+insert into tb values(1,1);
+insert into tb values(2,1);
+insert into tb values(3,2);
+insert into tb values(4,2);
+
+create temp table tc (id int primary key, aid int);
+
+insert into tc values(1,1);
+insert into tc values(2,2);
+
+select
+ ( select min(tb.id) from tb
+ where tb.aval = (select ta.val from ta where ta.id = tc.aid) ) as min_tb_id
+from tc;
+
+--
+-- Test case for 8.3 "failed to locate grouping columns" bug
+--
+
+create temp table t1 (f1 numeric(14,0), f2 varchar(30));
+
+select * from
+ (select distinct f1, f2, (select f2 from t1 x where x.f1 = up.f1) as fs
+ from t1 up) ss
+group by f1,f2,fs;
+
+--
+-- Test case for bug #5514 (mishandling of whole-row Vars in subselects)
+--
+
+create temp table table_a(id integer);
+insert into table_a values (42);
+
+create temp view view_a as select * from table_a;
+
+select view_a from view_a;
+select (select view_a) from view_a;
+select (select (select view_a)) from view_a;
+select (select (a.*)::text) from view_a a;
+
+--
+-- Check that whole-row Vars reading the result of a subselect don't include
+-- any junk columns therein
+--
+
+select q from (select max(f1) from int4_tbl group by f1 order by f1) q;
+with q as (select max(f1) from int4_tbl group by f1 order by f1)
+ select q from q;
+
+--
+-- Test case for sublinks pulled up into joinaliasvars lists in an
+-- inherited update/delete query
+--
+
+begin; -- this shouldn't delete anything, but be safe
+
+delete from road
+where exists (
+ select 1
+ from
+ int4_tbl cross join
+ ( select f1, array(select q1 from int8_tbl) as arr
+ from text_tbl ) ss
+ where road.name = ss.f1 );
+
+rollback;
+
+--
+-- Test case for sublinks pushed down into subselects via join alias expansion
+--
+
+select
+ (select sq1) as qq1
+from
+ (select exists(select 1 from int4_tbl where f1 = q2) as sq1, 42 as dummy
+ from int8_tbl) sq0
+ join
+ int4_tbl i4 on dummy = i4.f1;
+
+--
+-- Test case for subselect within UPDATE of INSERT...ON CONFLICT DO UPDATE
+--
+create temp table upsert(key int4 primary key, val text);
+insert into upsert values(1, 'val') on conflict (key) do update set val = 'not seen';
+insert into upsert values(1, 'val') on conflict (key) do update set val = 'seen with subselect ' || (select f1 from int4_tbl where f1 != 0 limit 1)::text;
+
+select * from upsert;
+
+with aa as (select 'int4_tbl' u from int4_tbl limit 1)
+insert into upsert values (1, 'x'), (999, 'y')
+on conflict (key) do update set val = (select u from aa)
+returning *;
+
+--
+-- Test case for cross-type partial matching in hashed subplan (bug #7597)
+--
+
+create temp table outer_7597 (f1 int4, f2 int4);
+insert into outer_7597 values (0, 0);
+insert into outer_7597 values (1, 0);
+insert into outer_7597 values (0, null);
+insert into outer_7597 values (1, null);
+
+create temp table inner_7597(c1 int8, c2 int8);
+insert into inner_7597 values(0, null);
+
+select * from outer_7597 where (f1, f2) not in (select * from inner_7597);
+
+--
+-- Similar test case using text that verifies that collation
+-- information is passed through by execTuplesEqual() in nodeSubplan.c
+-- (otherwise it would error in texteq())
+--
+
+create temp table outer_text (f1 text, f2 text);
+insert into outer_text values ('a', 'a');
+insert into outer_text values ('b', 'a');
+insert into outer_text values ('a', null);
+insert into outer_text values ('b', null);
+
+create temp table inner_text (c1 text, c2 text);
+insert into inner_text values ('a', null);
+insert into inner_text values ('123', '456');
+
+select * from outer_text where (f1, f2) not in (select * from inner_text);
+
+--
+-- Another test case for cross-type hashed subplans: comparison of
+-- inner-side values must be done with appropriate operator
+--
+
+explain (verbose, costs off)
+select 'foo'::text in (select 'bar'::name union all select 'bar'::name);
+
+select 'foo'::text in (select 'bar'::name union all select 'bar'::name);
+
+--
+-- Test that we don't try to hash nested records (bug #17363)
+-- (Hashing could be supported, but for now we don't)
+--
+
+explain (verbose, costs off)
+select row(row(row(1))) = any (select row(row(1)));
+
+select row(row(row(1))) = any (select row(row(1)));
+
+--
+-- Test case for premature memory release during hashing of subplan output
+--
+
+select '1'::text in (select '1'::name union all select '1'::name);
+
+--
+-- Test that we don't try to use a hashed subplan if the simplified
+-- testexpr isn't of the right shape
+--
+
+-- this fails by default, of course
+select * from int8_tbl where q1 in (select c1 from inner_text);
+
+begin;
+
+-- make an operator to allow it to succeed
+create function bogus_int8_text_eq(int8, text) returns boolean
+language sql as 'select $1::text = $2';
+
+create operator = (procedure=bogus_int8_text_eq, leftarg=int8, rightarg=text);
+
+explain (costs off)
+select * from int8_tbl where q1 in (select c1 from inner_text);
+select * from int8_tbl where q1 in (select c1 from inner_text);
+
+-- inlining of this function results in unusual number of hash clauses,
+-- which we can still cope with
+create or replace function bogus_int8_text_eq(int8, text) returns boolean
+language sql as 'select $1::text = $2 and $1::text = $2';
+
+explain (costs off)
+select * from int8_tbl where q1 in (select c1 from inner_text);
+select * from int8_tbl where q1 in (select c1 from inner_text);
+
+-- inlining of this function causes LHS and RHS to be switched,
+-- which we can't cope with, so hashing should be abandoned
+create or replace function bogus_int8_text_eq(int8, text) returns boolean
+language sql as 'select $2 = $1::text';
+
+explain (costs off)
+select * from int8_tbl where q1 in (select c1 from inner_text);
+select * from int8_tbl where q1 in (select c1 from inner_text);
+
+rollback; -- to get rid of the bogus operator
+
+--
+-- Test resolution of hashed vs non-hashed implementation of EXISTS subplan
+--
+explain (costs off)
+select count(*) from tenk1 t
+where (exists(select 1 from tenk1 k where k.unique1 = t.unique2) or ten < 0);
+select count(*) from tenk1 t
+where (exists(select 1 from tenk1 k where k.unique1 = t.unique2) or ten < 0);
+
+explain (costs off)
+select count(*) from tenk1 t
+where (exists(select 1 from tenk1 k where k.unique1 = t.unique2) or ten < 0)
+ and thousand = 1;
+select count(*) from tenk1 t
+where (exists(select 1 from tenk1 k where k.unique1 = t.unique2) or ten < 0)
+ and thousand = 1;
+
+-- It's possible for the same EXISTS to get resolved both ways
+create temp table exists_tbl (c1 int, c2 int, c3 int) partition by list (c1);
+create temp table exists_tbl_null partition of exists_tbl for values in (null);
+create temp table exists_tbl_def partition of exists_tbl default;
+insert into exists_tbl select x, x/2, x+1 from generate_series(0,10) x;
+analyze exists_tbl;
+explain (costs off)
+select * from exists_tbl t1
+ where (exists(select 1 from exists_tbl t2 where t1.c1 = t2.c2) or c3 < 0);
+select * from exists_tbl t1
+ where (exists(select 1 from exists_tbl t2 where t1.c1 = t2.c2) or c3 < 0);
+
+--
+-- Test case for planner bug with nested EXISTS handling
+--
+select a.thousand from tenk1 a, tenk1 b
+where a.thousand = b.thousand
+ and exists ( select 1 from tenk1 c where b.hundred = c.hundred
+ and not exists ( select 1 from tenk1 d
+ where a.thousand = d.thousand ) );
+
+--
+-- Check that nested sub-selects are not pulled up if they contain volatiles
+--
+explain (verbose, costs off)
+ select x, x from
+ (select (select now()) as x from (values(1),(2)) v(y)) ss;
+explain (verbose, costs off)
+ select x, x from
+ (select (select random()) as x from (values(1),(2)) v(y)) ss;
+explain (verbose, costs off)
+ select x, x from
+ (select (select now() where y=y) as x from (values(1),(2)) v(y)) ss;
+explain (verbose, costs off)
+ select x, x from
+ (select (select random() where y=y) as x from (values(1),(2)) v(y)) ss;
+
+--
+-- Test rescan of a hashed subplan (the use of random() is to prevent the
+-- sub-select from being pulled up, which would result in not hashing)
+--
+explain (verbose, costs off)
+select sum(ss.tst::int) from
+ onek o cross join lateral (
+ select i.ten in (select f1 from int4_tbl where f1 <= o.hundred) as tst,
+ random() as r
+ from onek i where i.unique1 = o.unique1 ) ss
+where o.ten = 0;
+
+select sum(ss.tst::int) from
+ onek o cross join lateral (
+ select i.ten in (select f1 from int4_tbl where f1 <= o.hundred) as tst,
+ random() as r
+ from onek i where i.unique1 = o.unique1 ) ss
+where o.ten = 0;
+
+--
+-- Test rescan of a SetOp node
+--
+explain (costs off)
+select count(*) from
+ onek o cross join lateral (
+ select * from onek i1 where i1.unique1 = o.unique1
+ except
+ select * from onek i2 where i2.unique1 = o.unique2
+ ) ss
+where o.ten = 1;
+
+select count(*) from
+ onek o cross join lateral (
+ select * from onek i1 where i1.unique1 = o.unique1
+ except
+ select * from onek i2 where i2.unique1 = o.unique2
+ ) ss
+where o.ten = 1;
+
+--
+-- Test rescan of a RecursiveUnion node
+--
+explain (costs off)
+select sum(o.four), sum(ss.a) from
+ onek o cross join lateral (
+ with recursive x(a) as
+ (select o.four as a
+ union
+ select a + 1 from x
+ where a < 10)
+ select * from x
+ ) ss
+where o.ten = 1;
+
+select sum(o.four), sum(ss.a) from
+ onek o cross join lateral (
+ with recursive x(a) as
+ (select o.four as a
+ union
+ select a + 1 from x
+ where a < 10)
+ select * from x
+ ) ss
+where o.ten = 1;
+
+--
+-- Check we don't misoptimize a NOT IN where the subquery returns no rows.
+--
+create temp table notinouter (a int);
+create temp table notininner (b int not null);
+insert into notinouter values (null), (1);
+
+select * from notinouter where a not in (select b from notininner);
+
+--
+-- Check we behave sanely in corner case of empty SELECT list (bug #8648)
+--
+create temp table nocolumns();
+select exists(select * from nocolumns);
+
+--
+-- Check behavior with a SubPlan in VALUES (bug #14924)
+--
+select val.x
+ from generate_series(1,10) as s(i),
+ lateral (
+ values ((select s.i + 1)), (s.i + 101)
+ ) as val(x)
+where s.i < 10 and (select val.x) < 110;
+
+-- another variant of that (bug #16213)
+explain (verbose, costs off)
+select * from
+(values
+ (3 not in (select * from (values (1), (2)) ss1)),
+ (false)
+) ss;
+
+select * from
+(values
+ (3 not in (select * from (values (1), (2)) ss1)),
+ (false)
+) ss;
+
+--
+-- Check sane behavior with nested IN SubLinks
+--
+explain (verbose, costs off)
+select * from int4_tbl where
+ (case when f1 in (select unique1 from tenk1 a) then f1 else null end) in
+ (select ten from tenk1 b);
+select * from int4_tbl where
+ (case when f1 in (select unique1 from tenk1 a) then f1 else null end) in
+ (select ten from tenk1 b);
+
+--
+-- Check for incorrect optimization when IN subquery contains a SRF
+--
+explain (verbose, costs off)
+select * from int4_tbl o where (f1, f1) in
+ (select f1, generate_series(1,50) / 10 g from int4_tbl i group by f1);
+select * from int4_tbl o where (f1, f1) in
+ (select f1, generate_series(1,50) / 10 g from int4_tbl i group by f1);
+
+--
+-- check for over-optimization of whole-row Var referencing an Append plan
+--
+select (select q from
+ (select 1,2,3 where f1 > 0
+ union all
+ select 4,5,6.0 where f1 <= 0
+ ) q )
+from int4_tbl;
+
+--
+-- Check for sane handling of a lateral reference in a subquery's quals
+-- (most of the complication here is to prevent the test case from being
+-- flattened too much)
+--
+explain (verbose, costs off)
+select * from
+ int4_tbl i4,
+ lateral (
+ select i4.f1 > 1 as b, 1 as id
+ from (select random() order by 1) as t1
+ union all
+ select true as b, 2 as id
+ ) as t2
+where b and f1 >= 0;
+
+select * from
+ int4_tbl i4,
+ lateral (
+ select i4.f1 > 1 as b, 1 as id
+ from (select random() order by 1) as t1
+ union all
+ select true as b, 2 as id
+ ) as t2
+where b and f1 >= 0;
+
+--
+-- Check that volatile quals aren't pushed down past a DISTINCT:
+-- nextval() should not be called more than the nominal number of times
+--
+create temp sequence ts1;
+
+select * from
+ (select distinct ten from tenk1) ss
+ where ten < 10 + nextval('ts1')
+ order by 1;
+
+select nextval('ts1');
+
+--
+-- Check that volatile quals aren't pushed down past a set-returning function;
+-- while a nonvolatile qual can be, if it doesn't reference the SRF.
+--
+create function tattle(x int, y int) returns bool
+volatile language plpgsql as $$
+begin
+ raise notice 'x = %, y = %', x, y;
+ return x > y;
+end$$;
+
+explain (verbose, costs off)
+select * from
+ (select 9 as x, unnest(array[1,2,3,11,12,13]) as u) ss
+ where tattle(x, 8);
+
+select * from
+ (select 9 as x, unnest(array[1,2,3,11,12,13]) as u) ss
+ where tattle(x, 8);
+
+-- if we pretend it's stable, we get different results:
+alter function tattle(x int, y int) stable;
+
+explain (verbose, costs off)
+select * from
+ (select 9 as x, unnest(array[1,2,3,11,12,13]) as u) ss
+ where tattle(x, 8);
+
+select * from
+ (select 9 as x, unnest(array[1,2,3,11,12,13]) as u) ss
+ where tattle(x, 8);
+
+-- although even a stable qual should not be pushed down if it references SRF
+explain (verbose, costs off)
+select * from
+ (select 9 as x, unnest(array[1,2,3,11,12,13]) as u) ss
+ where tattle(x, u);
+
+select * from
+ (select 9 as x, unnest(array[1,2,3,11,12,13]) as u) ss
+ where tattle(x, u);
+
+drop function tattle(x int, y int);
+
+--
+-- Test that LIMIT can be pushed to SORT through a subquery that just projects
+-- columns. We check for that having happened by looking to see if EXPLAIN
+-- ANALYZE shows that a top-N sort was used. We must suppress or filter away
+-- all the non-invariant parts of the EXPLAIN ANALYZE output.
+--
+create table sq_limit (pk int primary key, c1 int, c2 int);
+insert into sq_limit values
+ (1, 1, 1),
+ (2, 2, 2),
+ (3, 3, 3),
+ (4, 4, 4),
+ (5, 1, 1),
+ (6, 2, 2),
+ (7, 3, 3),
+ (8, 4, 4);
+
+create function explain_sq_limit() returns setof text language plpgsql as
+$$
+declare ln text;
+begin
+ for ln in
+ explain (analyze, summary off, timing off, costs off)
+ select * from (select pk,c2 from sq_limit order by c1,pk) as x limit 3
+ loop
+ ln := regexp_replace(ln, 'Memory: \S*', 'Memory: xxx');
+ return next ln;
+ end loop;
+end;
+$$;
+
+select * from explain_sq_limit();
+
+select * from (select pk,c2 from sq_limit order by c1,pk) as x limit 3;
+
+drop function explain_sq_limit();
+
+drop table sq_limit;
+
+--
+-- Ensure that backward scan direction isn't propagated into
+-- expression subqueries (bug #15336)
+--
+
+begin;
+
+declare c1 scroll cursor for
+ select * from generate_series(1,4) i
+ where i <> all (values (2),(3));
+
+move forward all in c1;
+fetch backward all in c1;
+
+commit;
+
+--
+-- Tests for CTE inlining behavior
+--
+
+-- Basic subquery that can be inlined
+explain (verbose, costs off)
+with x as (select * from (select f1 from subselect_tbl) ss)
+select * from x where f1 = 1;
+
+-- Explicitly request materialization
+explain (verbose, costs off)
+with x as materialized (select * from (select f1 from subselect_tbl) ss)
+select * from x where f1 = 1;
+
+-- Stable functions are safe to inline
+explain (verbose, costs off)
+with x as (select * from (select f1, now() from subselect_tbl) ss)
+select * from x where f1 = 1;
+
+-- Volatile functions prevent inlining
+explain (verbose, costs off)
+with x as (select * from (select f1, random() from subselect_tbl) ss)
+select * from x where f1 = 1;
+
+-- SELECT FOR UPDATE cannot be inlined
+explain (verbose, costs off)
+with x as (select * from (select f1 from subselect_tbl for update) ss)
+select * from x where f1 = 1;
+
+-- Multiply-referenced CTEs are inlined only when requested
+explain (verbose, costs off)
+with x as (select * from (select f1, now() as n from subselect_tbl) ss)
+select * from x, x x2 where x.n = x2.n;
+
+explain (verbose, costs off)
+with x as not materialized (select * from (select f1, now() as n from subselect_tbl) ss)
+select * from x, x x2 where x.n = x2.n;
+
+-- Multiply-referenced CTEs can't be inlined if they contain outer self-refs
+explain (verbose, costs off)
+with recursive x(a) as
+ ((values ('a'), ('b'))
+ union all
+ (with z as not materialized (select * from x)
+ select z.a || z1.a as a from z cross join z as z1
+ where length(z.a || z1.a) < 5))
+select * from x;
+
+with recursive x(a) as
+ ((values ('a'), ('b'))
+ union all
+ (with z as not materialized (select * from x)
+ select z.a || z1.a as a from z cross join z as z1
+ where length(z.a || z1.a) < 5))
+select * from x;
+
+explain (verbose, costs off)
+with recursive x(a) as
+ ((values ('a'), ('b'))
+ union all
+ (with z as not materialized (select * from x)
+ select z.a || z.a as a from z
+ where length(z.a || z.a) < 5))
+select * from x;
+
+with recursive x(a) as
+ ((values ('a'), ('b'))
+ union all
+ (with z as not materialized (select * from x)
+ select z.a || z.a as a from z
+ where length(z.a || z.a) < 5))
+select * from x;
+
+-- Check handling of outer references
+explain (verbose, costs off)
+with x as (select * from int4_tbl)
+select * from (with y as (select * from x) select * from y) ss;
+
+explain (verbose, costs off)
+with x as materialized (select * from int4_tbl)
+select * from (with y as (select * from x) select * from y) ss;
+
+-- Ensure that we inline the currect CTE when there are
+-- multiple CTEs with the same name
+explain (verbose, costs off)
+with x as (select 1 as y)
+select * from (with x as (select 2 as y) select * from x) ss;
+
+-- Row marks are not pushed into CTEs
+explain (verbose, costs off)
+with x as (select * from subselect_tbl)
+select * from x for update;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/text.out b/ydb/library/yql/tests/postgresql/original/cases/text.out
new file mode 100644
index 0000000000..b625b09f32
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/text.out
@@ -0,0 +1,440 @@
+--
+-- TEXT
+--
+SELECT text 'this is a text string' = text 'this is a text string' AS true;
+ true
+------
+ t
+(1 row)
+
+SELECT text 'this is a text string' = text 'this is a text strin' AS false;
+ false
+-------
+ f
+(1 row)
+
+CREATE TABLE TEXT_TBL (f1 text);
+INSERT INTO TEXT_TBL VALUES ('doh!');
+INSERT INTO TEXT_TBL VALUES ('hi de ho neighbor');
+SELECT * FROM TEXT_TBL;
+ f1
+-------------------
+ doh!
+ hi de ho neighbor
+(2 rows)
+
+-- As of 8.3 we have removed most implicit casts to text, so that for example
+-- this no longer works:
+select length(42);
+ERROR: function length(integer) does not exist
+LINE 1: select length(42);
+ ^
+HINT: No function matches the given name and argument types. You might need to add explicit type casts.
+-- But as a special exception for usability's sake, we still allow implicit
+-- casting to text in concatenations, so long as the other input is text or
+-- an unknown literal. So these work:
+select 'four: '::text || 2+2;
+ ?column?
+----------
+ four: 4
+(1 row)
+
+select 'four: ' || 2+2;
+ ?column?
+----------
+ four: 4
+(1 row)
+
+-- but not this:
+select 3 || 4.0;
+ERROR: operator does not exist: integer || numeric
+LINE 1: select 3 || 4.0;
+ ^
+HINT: No operator matches the given name and argument types. You might need to add explicit type casts.
+/*
+ * various string functions
+ */
+select concat('one');
+ concat
+--------
+ one
+(1 row)
+
+select concat(1,2,3,'hello',true, false, to_date('20100309','YYYYMMDD'));
+ concat
+----------------------
+ 123hellotf03-09-2010
+(1 row)
+
+select concat_ws('#','one');
+ concat_ws
+-----------
+ one
+(1 row)
+
+select concat_ws('#',1,2,3,'hello',true, false, to_date('20100309','YYYYMMDD'));
+ concat_ws
+----------------------------
+ 1#2#3#hello#t#f#03-09-2010
+(1 row)
+
+select concat_ws(',',10,20,null,30);
+ concat_ws
+-----------
+ 10,20,30
+(1 row)
+
+select concat_ws('',10,20,null,30);
+ concat_ws
+-----------
+ 102030
+(1 row)
+
+select concat_ws(NULL,10,20,null,30) is null;
+ ?column?
+----------
+ t
+(1 row)
+
+select reverse('abcde');
+ reverse
+---------
+ edcba
+(1 row)
+
+select i, left('ahoj', i), right('ahoj', i) from generate_series(-5, 5) t(i) order by i;
+ i | left | right
+----+------+-------
+ -5 | |
+ -4 | |
+ -3 | a | j
+ -2 | ah | oj
+ -1 | aho | hoj
+ 0 | |
+ 1 | a | j
+ 2 | ah | oj
+ 3 | aho | hoj
+ 4 | ahoj | ahoj
+ 5 | ahoj | ahoj
+(11 rows)
+
+select quote_literal('');
+ quote_literal
+---------------
+ ''
+(1 row)
+
+select quote_literal('abc''');
+ quote_literal
+---------------
+ 'abc'''
+(1 row)
+
+select quote_literal(e'\\');
+ quote_literal
+---------------
+ E'\\'
+(1 row)
+
+-- check variadic labeled argument
+select concat(variadic array[1,2,3]);
+ concat
+--------
+ 123
+(1 row)
+
+select concat_ws(',', variadic array[1,2,3]);
+ concat_ws
+-----------
+ 1,2,3
+(1 row)
+
+select concat_ws(',', variadic NULL::int[]);
+ concat_ws
+-----------
+
+(1 row)
+
+select concat(variadic NULL::int[]) is NULL;
+ ?column?
+----------
+ t
+(1 row)
+
+select concat(variadic '{}'::int[]) = '';
+ ?column?
+----------
+ t
+(1 row)
+
+--should fail
+select concat_ws(',', variadic 10);
+ERROR: VARIADIC argument must be an array
+LINE 1: select concat_ws(',', variadic 10);
+ ^
+/*
+ * format
+ */
+select format(NULL);
+ format
+--------
+
+(1 row)
+
+select format('Hello');
+ format
+--------
+ Hello
+(1 row)
+
+select format('Hello %s', 'World');
+ format
+-------------
+ Hello World
+(1 row)
+
+select format('Hello %%');
+ format
+---------
+ Hello %
+(1 row)
+
+select format('Hello %%%%');
+ format
+----------
+ Hello %%
+(1 row)
+
+-- should fail
+select format('Hello %s %s', 'World');
+ERROR: too few arguments for format()
+select format('Hello %s');
+ERROR: too few arguments for format()
+select format('Hello %x', 20);
+ERROR: unrecognized format() type specifier "x"
+HINT: For a single "%" use "%%".
+-- check literal and sql identifiers
+select format('INSERT INTO %I VALUES(%L,%L)', 'mytab', 10, 'Hello');
+ format
+----------------------------------------
+ INSERT INTO mytab VALUES('10','Hello')
+(1 row)
+
+select format('%s%s%s','Hello', NULL,'World');
+ format
+------------
+ HelloWorld
+(1 row)
+
+select format('INSERT INTO %I VALUES(%L,%L)', 'mytab', 10, NULL);
+ format
+-------------------------------------
+ INSERT INTO mytab VALUES('10',NULL)
+(1 row)
+
+select format('INSERT INTO %I VALUES(%L,%L)', 'mytab', NULL, 'Hello');
+ format
+----------------------------------------
+ INSERT INTO mytab VALUES(NULL,'Hello')
+(1 row)
+
+-- should fail, sql identifier cannot be NULL
+select format('INSERT INTO %I VALUES(%L,%L)', NULL, 10, 'Hello');
+ERROR: null values cannot be formatted as an SQL identifier
+-- check positional placeholders
+select format('%1$s %3$s', 1, 2, 3);
+ format
+--------
+ 1 3
+(1 row)
+
+select format('%1$s %12$s', 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12);
+ format
+--------
+ 1 12
+(1 row)
+
+-- should fail
+select format('%1$s %4$s', 1, 2, 3);
+ERROR: too few arguments for format()
+select format('%1$s %13$s', 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12);
+ERROR: too few arguments for format()
+select format('%0$s', 'Hello');
+ERROR: format specifies argument 0, but arguments are numbered from 1
+select format('%*0$s', 'Hello');
+ERROR: format specifies argument 0, but arguments are numbered from 1
+select format('%1$', 1);
+ERROR: unterminated format() type specifier
+HINT: For a single "%" use "%%".
+select format('%1$1', 1);
+ERROR: unterminated format() type specifier
+HINT: For a single "%" use "%%".
+-- check mix of positional and ordered placeholders
+select format('Hello %s %1$s %s', 'World', 'Hello again');
+ format
+-------------------------------
+ Hello World World Hello again
+(1 row)
+
+select format('Hello %s %s, %2$s %2$s', 'World', 'Hello again');
+ format
+--------------------------------------------------
+ Hello World Hello again, Hello again Hello again
+(1 row)
+
+-- check variadic labeled arguments
+select format('%s, %s', variadic array['Hello','World']);
+ format
+--------------
+ Hello, World
+(1 row)
+
+select format('%s, %s', variadic array[1, 2]);
+ format
+--------
+ 1, 2
+(1 row)
+
+select format('%s, %s', variadic array[true, false]);
+ format
+--------
+ t, f
+(1 row)
+
+select format('%s, %s', variadic array[true, false]::text[]);
+ format
+-------------
+ true, false
+(1 row)
+
+-- check variadic with positional placeholders
+select format('%2$s, %1$s', variadic array['first', 'second']);
+ format
+---------------
+ second, first
+(1 row)
+
+select format('%2$s, %1$s', variadic array[1, 2]);
+ format
+--------
+ 2, 1
+(1 row)
+
+-- variadic argument can be array type NULL, but should not be referenced
+select format('Hello', variadic NULL::int[]);
+ format
+--------
+ Hello
+(1 row)
+
+-- variadic argument allows simulating more than FUNC_MAX_ARGS parameters
+select format(string_agg('%s',','), variadic array_agg(i))
+from generate_series(1,200) g(i);
+ format
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200
+(1 row)
+
+-- check field widths and left, right alignment
+select format('>>%10s<<', 'Hello');
+ format
+----------------
+ >> Hello<<
+(1 row)
+
+select format('>>%10s<<', NULL);
+ format
+----------------
+ >> <<
+(1 row)
+
+select format('>>%10s<<', '');
+ format
+----------------
+ >> <<
+(1 row)
+
+select format('>>%-10s<<', '');
+ format
+----------------
+ >> <<
+(1 row)
+
+select format('>>%-10s<<', 'Hello');
+ format
+----------------
+ >>Hello <<
+(1 row)
+
+select format('>>%-10s<<', NULL);
+ format
+----------------
+ >> <<
+(1 row)
+
+select format('>>%1$10s<<', 'Hello');
+ format
+----------------
+ >> Hello<<
+(1 row)
+
+select format('>>%1$-10I<<', 'Hello');
+ format
+----------------
+ >>"Hello" <<
+(1 row)
+
+select format('>>%2$*1$L<<', 10, 'Hello');
+ format
+----------------
+ >> 'Hello'<<
+(1 row)
+
+select format('>>%2$*1$L<<', 10, NULL);
+ format
+----------------
+ >> NULL<<
+(1 row)
+
+select format('>>%2$*1$L<<', -10, NULL);
+ format
+----------------
+ >>NULL <<
+(1 row)
+
+select format('>>%*s<<', 10, 'Hello');
+ format
+----------------
+ >> Hello<<
+(1 row)
+
+select format('>>%*1$s<<', 10, 'Hello');
+ format
+----------------
+ >> Hello<<
+(1 row)
+
+select format('>>%-s<<', 'Hello');
+ format
+-----------
+ >>Hello<<
+(1 row)
+
+select format('>>%10L<<', NULL);
+ format
+----------------
+ >> NULL<<
+(1 row)
+
+select format('>>%2$*1$L<<', NULL, 'Hello');
+ format
+-------------
+ >>'Hello'<<
+(1 row)
+
+select format('>>%2$*1$L<<', 0, 'Hello');
+ format
+-------------
+ >>'Hello'<<
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/text.sql b/ydb/library/yql/tests/postgresql/original/cases/text.sql
new file mode 100644
index 0000000000..56eee69abc
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/text.sql
@@ -0,0 +1,118 @@
+--
+-- TEXT
+--
+
+SELECT text 'this is a text string' = text 'this is a text string' AS true;
+
+SELECT text 'this is a text string' = text 'this is a text strin' AS false;
+
+CREATE TABLE TEXT_TBL (f1 text);
+
+INSERT INTO TEXT_TBL VALUES ('doh!');
+INSERT INTO TEXT_TBL VALUES ('hi de ho neighbor');
+
+SELECT * FROM TEXT_TBL;
+
+-- As of 8.3 we have removed most implicit casts to text, so that for example
+-- this no longer works:
+
+select length(42);
+
+-- But as a special exception for usability's sake, we still allow implicit
+-- casting to text in concatenations, so long as the other input is text or
+-- an unknown literal. So these work:
+
+select 'four: '::text || 2+2;
+select 'four: ' || 2+2;
+
+-- but not this:
+
+select 3 || 4.0;
+
+/*
+ * various string functions
+ */
+select concat('one');
+select concat(1,2,3,'hello',true, false, to_date('20100309','YYYYMMDD'));
+select concat_ws('#','one');
+select concat_ws('#',1,2,3,'hello',true, false, to_date('20100309','YYYYMMDD'));
+select concat_ws(',',10,20,null,30);
+select concat_ws('',10,20,null,30);
+select concat_ws(NULL,10,20,null,30) is null;
+select reverse('abcde');
+select i, left('ahoj', i), right('ahoj', i) from generate_series(-5, 5) t(i) order by i;
+select quote_literal('');
+select quote_literal('abc''');
+select quote_literal(e'\\');
+-- check variadic labeled argument
+select concat(variadic array[1,2,3]);
+select concat_ws(',', variadic array[1,2,3]);
+select concat_ws(',', variadic NULL::int[]);
+select concat(variadic NULL::int[]) is NULL;
+select concat(variadic '{}'::int[]) = '';
+--should fail
+select concat_ws(',', variadic 10);
+
+/*
+ * format
+ */
+select format(NULL);
+select format('Hello');
+select format('Hello %s', 'World');
+select format('Hello %%');
+select format('Hello %%%%');
+-- should fail
+select format('Hello %s %s', 'World');
+select format('Hello %s');
+select format('Hello %x', 20);
+-- check literal and sql identifiers
+select format('INSERT INTO %I VALUES(%L,%L)', 'mytab', 10, 'Hello');
+select format('%s%s%s','Hello', NULL,'World');
+select format('INSERT INTO %I VALUES(%L,%L)', 'mytab', 10, NULL);
+select format('INSERT INTO %I VALUES(%L,%L)', 'mytab', NULL, 'Hello');
+-- should fail, sql identifier cannot be NULL
+select format('INSERT INTO %I VALUES(%L,%L)', NULL, 10, 'Hello');
+-- check positional placeholders
+select format('%1$s %3$s', 1, 2, 3);
+select format('%1$s %12$s', 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12);
+-- should fail
+select format('%1$s %4$s', 1, 2, 3);
+select format('%1$s %13$s', 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12);
+select format('%0$s', 'Hello');
+select format('%*0$s', 'Hello');
+select format('%1$', 1);
+select format('%1$1', 1);
+-- check mix of positional and ordered placeholders
+select format('Hello %s %1$s %s', 'World', 'Hello again');
+select format('Hello %s %s, %2$s %2$s', 'World', 'Hello again');
+-- check variadic labeled arguments
+select format('%s, %s', variadic array['Hello','World']);
+select format('%s, %s', variadic array[1, 2]);
+select format('%s, %s', variadic array[true, false]);
+select format('%s, %s', variadic array[true, false]::text[]);
+-- check variadic with positional placeholders
+select format('%2$s, %1$s', variadic array['first', 'second']);
+select format('%2$s, %1$s', variadic array[1, 2]);
+-- variadic argument can be array type NULL, but should not be referenced
+select format('Hello', variadic NULL::int[]);
+-- variadic argument allows simulating more than FUNC_MAX_ARGS parameters
+select format(string_agg('%s',','), variadic array_agg(i))
+from generate_series(1,200) g(i);
+-- check field widths and left, right alignment
+select format('>>%10s<<', 'Hello');
+select format('>>%10s<<', NULL);
+select format('>>%10s<<', '');
+select format('>>%-10s<<', '');
+select format('>>%-10s<<', 'Hello');
+select format('>>%-10s<<', NULL);
+select format('>>%1$10s<<', 'Hello');
+select format('>>%1$-10I<<', 'Hello');
+select format('>>%2$*1$L<<', 10, 'Hello');
+select format('>>%2$*1$L<<', 10, NULL);
+select format('>>%2$*1$L<<', -10, NULL);
+select format('>>%*s<<', 10, 'Hello');
+select format('>>%*1$s<<', 10, 'Hello');
+select format('>>%-s<<', 'Hello');
+select format('>>%10L<<', NULL);
+select format('>>%2$*1$L<<', NULL, 'Hello');
+select format('>>%2$*1$L<<', 0, 'Hello');
diff --git a/ydb/library/yql/tests/postgresql/original/cases/time.out b/ydb/library/yql/tests/postgresql/original/cases/time.out
new file mode 100644
index 0000000000..39b409feca
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/time.out
@@ -0,0 +1,200 @@
+--
+-- TIME
+--
+CREATE TABLE TIME_TBL (f1 time(2));
+INSERT INTO TIME_TBL VALUES ('00:00');
+INSERT INTO TIME_TBL VALUES ('01:00');
+-- as of 7.4, timezone spec should be accepted and ignored
+INSERT INTO TIME_TBL VALUES ('02:03 PST');
+INSERT INTO TIME_TBL VALUES ('11:59 EDT');
+INSERT INTO TIME_TBL VALUES ('12:00');
+INSERT INTO TIME_TBL VALUES ('12:01');
+INSERT INTO TIME_TBL VALUES ('23:59');
+INSERT INTO TIME_TBL VALUES ('11:59:59.99 PM');
+INSERT INTO TIME_TBL VALUES ('2003-03-07 15:36:39 America/New_York');
+INSERT INTO TIME_TBL VALUES ('2003-07-07 15:36:39 America/New_York');
+-- this should fail (the timezone offset is not known)
+INSERT INTO TIME_TBL VALUES ('15:36:39 America/New_York');
+ERROR: invalid input syntax for type time: "15:36:39 America/New_York"
+LINE 1: INSERT INTO TIME_TBL VALUES ('15:36:39 America/New_York');
+ ^
+SELECT f1 AS "Time" FROM TIME_TBL;
+ Time
+-------------
+ 00:00:00
+ 01:00:00
+ 02:03:00
+ 11:59:00
+ 12:00:00
+ 12:01:00
+ 23:59:00
+ 23:59:59.99
+ 15:36:39
+ 15:36:39
+(10 rows)
+
+SELECT f1 AS "Three" FROM TIME_TBL WHERE f1 < '05:06:07';
+ Three
+----------
+ 00:00:00
+ 01:00:00
+ 02:03:00
+(3 rows)
+
+SELECT f1 AS "Five" FROM TIME_TBL WHERE f1 > '05:06:07';
+ Five
+-------------
+ 11:59:00
+ 12:00:00
+ 12:01:00
+ 23:59:00
+ 23:59:59.99
+ 15:36:39
+ 15:36:39
+(7 rows)
+
+SELECT f1 AS "None" FROM TIME_TBL WHERE f1 < '00:00';
+ None
+------
+(0 rows)
+
+SELECT f1 AS "Eight" FROM TIME_TBL WHERE f1 >= '00:00';
+ Eight
+-------------
+ 00:00:00
+ 01:00:00
+ 02:03:00
+ 11:59:00
+ 12:00:00
+ 12:01:00
+ 23:59:00
+ 23:59:59.99
+ 15:36:39
+ 15:36:39
+(10 rows)
+
+-- Check edge cases
+SELECT '23:59:59.999999'::time;
+ time
+-----------------
+ 23:59:59.999999
+(1 row)
+
+SELECT '23:59:59.9999999'::time; -- rounds up
+ time
+----------
+ 24:00:00
+(1 row)
+
+SELECT '23:59:60'::time; -- rounds up
+ time
+----------
+ 24:00:00
+(1 row)
+
+SELECT '24:00:00'::time; -- allowed
+ time
+----------
+ 24:00:00
+(1 row)
+
+SELECT '24:00:00.01'::time; -- not allowed
+ERROR: date/time field value out of range: "24:00:00.01"
+LINE 1: SELECT '24:00:00.01'::time;
+ ^
+SELECT '23:59:60.01'::time; -- not allowed
+ERROR: date/time field value out of range: "23:59:60.01"
+LINE 1: SELECT '23:59:60.01'::time;
+ ^
+SELECT '24:01:00'::time; -- not allowed
+ERROR: date/time field value out of range: "24:01:00"
+LINE 1: SELECT '24:01:00'::time;
+ ^
+SELECT '25:00:00'::time; -- not allowed
+ERROR: date/time field value out of range: "25:00:00"
+LINE 1: SELECT '25:00:00'::time;
+ ^
+--
+-- TIME simple math
+--
+-- We now make a distinction between time and intervals,
+-- and adding two times together makes no sense at all.
+-- Leave in one query to show that it is rejected,
+-- and do the rest of the testing in horology.sql
+-- where we do mixed-type arithmetic. - thomas 2000-12-02
+SELECT f1 + time '00:01' AS "Illegal" FROM TIME_TBL;
+ERROR: operator is not unique: time without time zone + time without time zone
+LINE 1: SELECT f1 + time '00:01' AS "Illegal" FROM TIME_TBL;
+ ^
+HINT: Could not choose a best candidate operator. You might need to add explicit type casts.
+--
+-- test EXTRACT
+--
+SELECT EXTRACT(MICROSECOND FROM TIME '2020-05-26 13:30:25.575401');
+ extract
+----------
+ 25575401
+(1 row)
+
+SELECT EXTRACT(MILLISECOND FROM TIME '2020-05-26 13:30:25.575401');
+ extract
+-----------
+ 25575.401
+(1 row)
+
+SELECT EXTRACT(SECOND FROM TIME '2020-05-26 13:30:25.575401');
+ extract
+-----------
+ 25.575401
+(1 row)
+
+SELECT EXTRACT(MINUTE FROM TIME '2020-05-26 13:30:25.575401');
+ extract
+---------
+ 30
+(1 row)
+
+SELECT EXTRACT(HOUR FROM TIME '2020-05-26 13:30:25.575401');
+ extract
+---------
+ 13
+(1 row)
+
+SELECT EXTRACT(DAY FROM TIME '2020-05-26 13:30:25.575401'); -- error
+ERROR: "time" units "day" not recognized
+SELECT EXTRACT(FORTNIGHT FROM TIME '2020-05-26 13:30:25.575401'); -- error
+ERROR: "time" units "fortnight" not recognized
+SELECT EXTRACT(TIMEZONE FROM TIME '2020-05-26 13:30:25.575401'); -- error
+ERROR: "time" units "timezone" not recognized
+SELECT EXTRACT(EPOCH FROM TIME '2020-05-26 13:30:25.575401');
+ extract
+--------------
+ 48625.575401
+(1 row)
+
+-- date_part implementation is mostly the same as extract, so only
+-- test a few cases for additional coverage.
+SELECT date_part('microsecond', TIME '2020-05-26 13:30:25.575401');
+ date_part
+-----------
+ 25575401
+(1 row)
+
+SELECT date_part('millisecond', TIME '2020-05-26 13:30:25.575401');
+ date_part
+-----------
+ 25575.401
+(1 row)
+
+SELECT date_part('second', TIME '2020-05-26 13:30:25.575401');
+ date_part
+-----------
+ 25.575401
+(1 row)
+
+SELECT date_part('epoch', TIME '2020-05-26 13:30:25.575401');
+ date_part
+--------------
+ 48625.575401
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/time.sql b/ydb/library/yql/tests/postgresql/original/cases/time.sql
new file mode 100644
index 0000000000..3637f28798
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/time.sql
@@ -0,0 +1,72 @@
+--
+-- TIME
+--
+
+CREATE TABLE TIME_TBL (f1 time(2));
+
+INSERT INTO TIME_TBL VALUES ('00:00');
+INSERT INTO TIME_TBL VALUES ('01:00');
+-- as of 7.4, timezone spec should be accepted and ignored
+INSERT INTO TIME_TBL VALUES ('02:03 PST');
+INSERT INTO TIME_TBL VALUES ('11:59 EDT');
+INSERT INTO TIME_TBL VALUES ('12:00');
+INSERT INTO TIME_TBL VALUES ('12:01');
+INSERT INTO TIME_TBL VALUES ('23:59');
+INSERT INTO TIME_TBL VALUES ('11:59:59.99 PM');
+
+INSERT INTO TIME_TBL VALUES ('2003-03-07 15:36:39 America/New_York');
+INSERT INTO TIME_TBL VALUES ('2003-07-07 15:36:39 America/New_York');
+-- this should fail (the timezone offset is not known)
+INSERT INTO TIME_TBL VALUES ('15:36:39 America/New_York');
+
+
+SELECT f1 AS "Time" FROM TIME_TBL;
+
+SELECT f1 AS "Three" FROM TIME_TBL WHERE f1 < '05:06:07';
+
+SELECT f1 AS "Five" FROM TIME_TBL WHERE f1 > '05:06:07';
+
+SELECT f1 AS "None" FROM TIME_TBL WHERE f1 < '00:00';
+
+SELECT f1 AS "Eight" FROM TIME_TBL WHERE f1 >= '00:00';
+
+-- Check edge cases
+SELECT '23:59:59.999999'::time;
+SELECT '23:59:59.9999999'::time; -- rounds up
+SELECT '23:59:60'::time; -- rounds up
+SELECT '24:00:00'::time; -- allowed
+SELECT '24:00:00.01'::time; -- not allowed
+SELECT '23:59:60.01'::time; -- not allowed
+SELECT '24:01:00'::time; -- not allowed
+SELECT '25:00:00'::time; -- not allowed
+
+--
+-- TIME simple math
+--
+-- We now make a distinction between time and intervals,
+-- and adding two times together makes no sense at all.
+-- Leave in one query to show that it is rejected,
+-- and do the rest of the testing in horology.sql
+-- where we do mixed-type arithmetic. - thomas 2000-12-02
+
+SELECT f1 + time '00:01' AS "Illegal" FROM TIME_TBL;
+
+--
+-- test EXTRACT
+--
+SELECT EXTRACT(MICROSECOND FROM TIME '2020-05-26 13:30:25.575401');
+SELECT EXTRACT(MILLISECOND FROM TIME '2020-05-26 13:30:25.575401');
+SELECT EXTRACT(SECOND FROM TIME '2020-05-26 13:30:25.575401');
+SELECT EXTRACT(MINUTE FROM TIME '2020-05-26 13:30:25.575401');
+SELECT EXTRACT(HOUR FROM TIME '2020-05-26 13:30:25.575401');
+SELECT EXTRACT(DAY FROM TIME '2020-05-26 13:30:25.575401'); -- error
+SELECT EXTRACT(FORTNIGHT FROM TIME '2020-05-26 13:30:25.575401'); -- error
+SELECT EXTRACT(TIMEZONE FROM TIME '2020-05-26 13:30:25.575401'); -- error
+SELECT EXTRACT(EPOCH FROM TIME '2020-05-26 13:30:25.575401');
+
+-- date_part implementation is mostly the same as extract, so only
+-- test a few cases for additional coverage.
+SELECT date_part('microsecond', TIME '2020-05-26 13:30:25.575401');
+SELECT date_part('millisecond', TIME '2020-05-26 13:30:25.575401');
+SELECT date_part('second', TIME '2020-05-26 13:30:25.575401');
+SELECT date_part('epoch', TIME '2020-05-26 13:30:25.575401');
diff --git a/ydb/library/yql/tests/postgresql/original/cases/timestamp.out b/ydb/library/yql/tests/postgresql/original/cases/timestamp.out
new file mode 100644
index 0000000000..79f8180955
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/timestamp.out
@@ -0,0 +1,2081 @@
+--
+-- TIMESTAMP
+--
+CREATE TABLE TIMESTAMP_TBL (d1 timestamp(2) without time zone);
+-- Test shorthand input values
+-- We can't just "select" the results since they aren't constants; test for
+-- equality instead. We can do that by running the test inside a transaction
+-- block, within which the value of 'now' shouldn't change, and so these
+-- related values shouldn't either.
+BEGIN;
+INSERT INTO TIMESTAMP_TBL VALUES ('today');
+INSERT INTO TIMESTAMP_TBL VALUES ('yesterday');
+INSERT INTO TIMESTAMP_TBL VALUES ('tomorrow');
+-- time zone should be ignored by this data type
+INSERT INTO TIMESTAMP_TBL VALUES ('tomorrow EST');
+INSERT INTO TIMESTAMP_TBL VALUES ('tomorrow zulu');
+SELECT count(*) AS One FROM TIMESTAMP_TBL WHERE d1 = timestamp without time zone 'today';
+ one
+-----
+ 1
+(1 row)
+
+SELECT count(*) AS Three FROM TIMESTAMP_TBL WHERE d1 = timestamp without time zone 'tomorrow';
+ three
+-------
+ 3
+(1 row)
+
+SELECT count(*) AS One FROM TIMESTAMP_TBL WHERE d1 = timestamp without time zone 'yesterday';
+ one
+-----
+ 1
+(1 row)
+
+COMMIT;
+DELETE FROM TIMESTAMP_TBL;
+-- Verify that 'now' *does* change over a reasonable interval such as 100 msec,
+-- and that it doesn't change over the same interval within a transaction block
+INSERT INTO TIMESTAMP_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+ pg_sleep
+----------
+
+(1 row)
+
+BEGIN;
+INSERT INTO TIMESTAMP_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+ pg_sleep
+----------
+
+(1 row)
+
+INSERT INTO TIMESTAMP_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+ pg_sleep
+----------
+
+(1 row)
+
+SELECT count(*) AS two FROM TIMESTAMP_TBL WHERE d1 = timestamp(2) without time zone 'now';
+ two
+-----
+ 2
+(1 row)
+
+SELECT count(d1) AS three, count(DISTINCT d1) AS two FROM TIMESTAMP_TBL;
+ three | two
+-------+-----
+ 3 | 2
+(1 row)
+
+COMMIT;
+TRUNCATE TIMESTAMP_TBL;
+-- Special values
+INSERT INTO TIMESTAMP_TBL VALUES ('-infinity');
+INSERT INTO TIMESTAMP_TBL VALUES ('infinity');
+INSERT INTO TIMESTAMP_TBL VALUES ('epoch');
+-- Postgres v6.0 standard output format
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01 1997 PST');
+-- Variations on Postgres v6.1 standard output format
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.000001 1997 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.999999 1997 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.4 1997 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.5 1997 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.6 1997 PST');
+-- ISO 8601 format
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-01-02');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-01-02 03:04:05');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-02-10 17:32:01-08');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-02-10 17:32:01-0800');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-02-10 17:32:01 -08:00');
+INSERT INTO TIMESTAMP_TBL VALUES ('19970210 173201 -0800');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-06-10 17:32:01 -07:00');
+INSERT INTO TIMESTAMP_TBL VALUES ('2001-09-22T18:19:20');
+-- POSIX format (note that the timezone abbrev is just decoration here)
+INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 08:14:01 GMT+8');
+INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 13:14:02 GMT-1');
+INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 12:14:03 GMT-2');
+INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 03:14:04 PST+8');
+INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 02:14:05 MST+7:00');
+-- Variations for acceptable input formats
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 10 17:32:01 1997 -0800');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 10 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 10 5:32PM 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997/02/10 17:32:01-0800');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-02-10 17:32:01 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb-10-1997 17:32:01 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('02-10-1997 17:32:01 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('19970210 173201 PST');
+set datestyle to ymd;
+INSERT INTO TIMESTAMP_TBL VALUES ('97FEB10 5:32:01PM UTC');
+INSERT INTO TIMESTAMP_TBL VALUES ('97/02/10 17:32:01 UTC');
+reset datestyle;
+INSERT INTO TIMESTAMP_TBL VALUES ('1997.041 17:32:01 UTC');
+INSERT INTO TIMESTAMP_TBL VALUES ('19970210 173201 America/New_York');
+-- this fails (even though TZ is a no-op, we still look it up)
+INSERT INTO TIMESTAMP_TBL VALUES ('19970710 173201 America/Does_not_exist');
+ERROR: time zone "america/does_not_exist" not recognized
+LINE 1: INSERT INTO TIMESTAMP_TBL VALUES ('19970710 173201 America/D...
+ ^
+-- Check date conversion and date arithmetic
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-06-10 18:32:01 PDT');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 10 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 11 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 12 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 13 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 14 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 15 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 0097 BC');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 0097');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 0597');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1097');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1697');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1797');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1897');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 2097');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 28 17:32:01 1996');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 29 17:32:01 1996');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mar 01 17:32:01 1996');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 30 17:32:01 1996');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 31 17:32:01 1996');
+INSERT INTO TIMESTAMP_TBL VALUES ('Jan 01 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 28 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 29 17:32:01 1997');
+ERROR: date/time field value out of range: "Feb 29 17:32:01 1997"
+LINE 1: INSERT INTO TIMESTAMP_TBL VALUES ('Feb 29 17:32:01 1997');
+ ^
+INSERT INTO TIMESTAMP_TBL VALUES ('Mar 01 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 30 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 31 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 31 17:32:01 1999');
+INSERT INTO TIMESTAMP_TBL VALUES ('Jan 01 17:32:01 2000');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 31 17:32:01 2000');
+INSERT INTO TIMESTAMP_TBL VALUES ('Jan 01 17:32:01 2001');
+-- Currently unsupported syntax and ranges
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 -0097');
+ERROR: time zone displacement out of range: "Feb 16 17:32:01 -0097"
+LINE 1: INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 -0097');
+ ^
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 5097 BC');
+ERROR: timestamp out of range: "Feb 16 17:32:01 5097 BC"
+LINE 1: INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 5097 BC')...
+ ^
+SELECT d1 FROM TIMESTAMP_TBL;
+ d1
+-----------------------------
+ -infinity
+ infinity
+ Thu Jan 01 00:00:00 1970
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:02 1997
+ Mon Feb 10 17:32:01.4 1997
+ Mon Feb 10 17:32:01.5 1997
+ Mon Feb 10 17:32:01.6 1997
+ Thu Jan 02 00:00:00 1997
+ Thu Jan 02 03:04:05 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Tue Jun 10 17:32:01 1997
+ Sat Sep 22 18:19:20 2001
+ Wed Mar 15 08:14:01 2000
+ Wed Mar 15 13:14:02 2000
+ Wed Mar 15 12:14:03 2000
+ Wed Mar 15 03:14:04 2000
+ Wed Mar 15 02:14:05 2000
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:00 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Tue Jun 10 18:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Tue Feb 11 17:32:01 1997
+ Wed Feb 12 17:32:01 1997
+ Thu Feb 13 17:32:01 1997
+ Fri Feb 14 17:32:01 1997
+ Sat Feb 15 17:32:01 1997
+ Sun Feb 16 17:32:01 1997
+ Tue Feb 16 17:32:01 0097 BC
+ Sat Feb 16 17:32:01 0097
+ Thu Feb 16 17:32:01 0597
+ Tue Feb 16 17:32:01 1097
+ Sat Feb 16 17:32:01 1697
+ Thu Feb 16 17:32:01 1797
+ Tue Feb 16 17:32:01 1897
+ Sun Feb 16 17:32:01 1997
+ Sat Feb 16 17:32:01 2097
+ Wed Feb 28 17:32:01 1996
+ Thu Feb 29 17:32:01 1996
+ Fri Mar 01 17:32:01 1996
+ Mon Dec 30 17:32:01 1996
+ Tue Dec 31 17:32:01 1996
+ Wed Jan 01 17:32:01 1997
+ Fri Feb 28 17:32:01 1997
+ Sat Mar 01 17:32:01 1997
+ Tue Dec 30 17:32:01 1997
+ Wed Dec 31 17:32:01 1997
+ Fri Dec 31 17:32:01 1999
+ Sat Jan 01 17:32:01 2000
+ Sun Dec 31 17:32:01 2000
+ Mon Jan 01 17:32:01 2001
+(65 rows)
+
+-- Check behavior at the boundaries of the timestamp range
+SELECT '4714-11-24 00:00:00 BC'::timestamp;
+ timestamp
+-----------------------------
+ Mon Nov 24 00:00:00 4714 BC
+(1 row)
+
+SELECT '4714-11-23 23:59:59 BC'::timestamp; -- out of range
+ERROR: timestamp out of range: "4714-11-23 23:59:59 BC"
+LINE 1: SELECT '4714-11-23 23:59:59 BC'::timestamp;
+ ^
+SELECT '294276-12-31 23:59:59'::timestamp;
+ timestamp
+----------------------------
+ Sun Dec 31 23:59:59 294276
+(1 row)
+
+SELECT '294277-01-01 00:00:00'::timestamp; -- out of range
+ERROR: timestamp out of range: "294277-01-01 00:00:00"
+LINE 1: SELECT '294277-01-01 00:00:00'::timestamp;
+ ^
+-- Demonstrate functions and operators
+SELECT d1 FROM TIMESTAMP_TBL
+ WHERE d1 > timestamp without time zone '1997-01-02';
+ d1
+----------------------------
+ infinity
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:02 1997
+ Mon Feb 10 17:32:01.4 1997
+ Mon Feb 10 17:32:01.5 1997
+ Mon Feb 10 17:32:01.6 1997
+ Thu Jan 02 03:04:05 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Tue Jun 10 17:32:01 1997
+ Sat Sep 22 18:19:20 2001
+ Wed Mar 15 08:14:01 2000
+ Wed Mar 15 13:14:02 2000
+ Wed Mar 15 12:14:03 2000
+ Wed Mar 15 03:14:04 2000
+ Wed Mar 15 02:14:05 2000
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:00 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Tue Jun 10 18:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Tue Feb 11 17:32:01 1997
+ Wed Feb 12 17:32:01 1997
+ Thu Feb 13 17:32:01 1997
+ Fri Feb 14 17:32:01 1997
+ Sat Feb 15 17:32:01 1997
+ Sun Feb 16 17:32:01 1997
+ Sun Feb 16 17:32:01 1997
+ Sat Feb 16 17:32:01 2097
+ Fri Feb 28 17:32:01 1997
+ Sat Mar 01 17:32:01 1997
+ Tue Dec 30 17:32:01 1997
+ Wed Dec 31 17:32:01 1997
+ Fri Dec 31 17:32:01 1999
+ Sat Jan 01 17:32:01 2000
+ Sun Dec 31 17:32:01 2000
+ Mon Jan 01 17:32:01 2001
+(49 rows)
+
+SELECT d1 FROM TIMESTAMP_TBL
+ WHERE d1 < timestamp without time zone '1997-01-02';
+ d1
+-----------------------------
+ -infinity
+ Thu Jan 01 00:00:00 1970
+ Tue Feb 16 17:32:01 0097 BC
+ Sat Feb 16 17:32:01 0097
+ Thu Feb 16 17:32:01 0597
+ Tue Feb 16 17:32:01 1097
+ Sat Feb 16 17:32:01 1697
+ Thu Feb 16 17:32:01 1797
+ Tue Feb 16 17:32:01 1897
+ Wed Feb 28 17:32:01 1996
+ Thu Feb 29 17:32:01 1996
+ Fri Mar 01 17:32:01 1996
+ Mon Dec 30 17:32:01 1996
+ Tue Dec 31 17:32:01 1996
+ Wed Jan 01 17:32:01 1997
+(15 rows)
+
+SELECT d1 FROM TIMESTAMP_TBL
+ WHERE d1 = timestamp without time zone '1997-01-02';
+ d1
+--------------------------
+ Thu Jan 02 00:00:00 1997
+(1 row)
+
+SELECT d1 FROM TIMESTAMP_TBL
+ WHERE d1 != timestamp without time zone '1997-01-02';
+ d1
+-----------------------------
+ -infinity
+ infinity
+ Thu Jan 01 00:00:00 1970
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:02 1997
+ Mon Feb 10 17:32:01.4 1997
+ Mon Feb 10 17:32:01.5 1997
+ Mon Feb 10 17:32:01.6 1997
+ Thu Jan 02 03:04:05 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Tue Jun 10 17:32:01 1997
+ Sat Sep 22 18:19:20 2001
+ Wed Mar 15 08:14:01 2000
+ Wed Mar 15 13:14:02 2000
+ Wed Mar 15 12:14:03 2000
+ Wed Mar 15 03:14:04 2000
+ Wed Mar 15 02:14:05 2000
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:00 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Tue Jun 10 18:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Tue Feb 11 17:32:01 1997
+ Wed Feb 12 17:32:01 1997
+ Thu Feb 13 17:32:01 1997
+ Fri Feb 14 17:32:01 1997
+ Sat Feb 15 17:32:01 1997
+ Sun Feb 16 17:32:01 1997
+ Tue Feb 16 17:32:01 0097 BC
+ Sat Feb 16 17:32:01 0097
+ Thu Feb 16 17:32:01 0597
+ Tue Feb 16 17:32:01 1097
+ Sat Feb 16 17:32:01 1697
+ Thu Feb 16 17:32:01 1797
+ Tue Feb 16 17:32:01 1897
+ Sun Feb 16 17:32:01 1997
+ Sat Feb 16 17:32:01 2097
+ Wed Feb 28 17:32:01 1996
+ Thu Feb 29 17:32:01 1996
+ Fri Mar 01 17:32:01 1996
+ Mon Dec 30 17:32:01 1996
+ Tue Dec 31 17:32:01 1996
+ Wed Jan 01 17:32:01 1997
+ Fri Feb 28 17:32:01 1997
+ Sat Mar 01 17:32:01 1997
+ Tue Dec 30 17:32:01 1997
+ Wed Dec 31 17:32:01 1997
+ Fri Dec 31 17:32:01 1999
+ Sat Jan 01 17:32:01 2000
+ Sun Dec 31 17:32:01 2000
+ Mon Jan 01 17:32:01 2001
+(64 rows)
+
+SELECT d1 FROM TIMESTAMP_TBL
+ WHERE d1 <= timestamp without time zone '1997-01-02';
+ d1
+-----------------------------
+ -infinity
+ Thu Jan 01 00:00:00 1970
+ Thu Jan 02 00:00:00 1997
+ Tue Feb 16 17:32:01 0097 BC
+ Sat Feb 16 17:32:01 0097
+ Thu Feb 16 17:32:01 0597
+ Tue Feb 16 17:32:01 1097
+ Sat Feb 16 17:32:01 1697
+ Thu Feb 16 17:32:01 1797
+ Tue Feb 16 17:32:01 1897
+ Wed Feb 28 17:32:01 1996
+ Thu Feb 29 17:32:01 1996
+ Fri Mar 01 17:32:01 1996
+ Mon Dec 30 17:32:01 1996
+ Tue Dec 31 17:32:01 1996
+ Wed Jan 01 17:32:01 1997
+(16 rows)
+
+SELECT d1 FROM TIMESTAMP_TBL
+ WHERE d1 >= timestamp without time zone '1997-01-02';
+ d1
+----------------------------
+ infinity
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:02 1997
+ Mon Feb 10 17:32:01.4 1997
+ Mon Feb 10 17:32:01.5 1997
+ Mon Feb 10 17:32:01.6 1997
+ Thu Jan 02 00:00:00 1997
+ Thu Jan 02 03:04:05 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Tue Jun 10 17:32:01 1997
+ Sat Sep 22 18:19:20 2001
+ Wed Mar 15 08:14:01 2000
+ Wed Mar 15 13:14:02 2000
+ Wed Mar 15 12:14:03 2000
+ Wed Mar 15 03:14:04 2000
+ Wed Mar 15 02:14:05 2000
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:00 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Tue Jun 10 18:32:01 1997
+ Mon Feb 10 17:32:01 1997
+ Tue Feb 11 17:32:01 1997
+ Wed Feb 12 17:32:01 1997
+ Thu Feb 13 17:32:01 1997
+ Fri Feb 14 17:32:01 1997
+ Sat Feb 15 17:32:01 1997
+ Sun Feb 16 17:32:01 1997
+ Sun Feb 16 17:32:01 1997
+ Sat Feb 16 17:32:01 2097
+ Fri Feb 28 17:32:01 1997
+ Sat Mar 01 17:32:01 1997
+ Tue Dec 30 17:32:01 1997
+ Wed Dec 31 17:32:01 1997
+ Fri Dec 31 17:32:01 1999
+ Sat Jan 01 17:32:01 2000
+ Sun Dec 31 17:32:01 2000
+ Mon Jan 01 17:32:01 2001
+(50 rows)
+
+SELECT d1 - timestamp without time zone '1997-01-02' AS diff
+ FROM TIMESTAMP_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01';
+ diff
+----------------------------------------
+ @ 9863 days ago
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 2 secs
+ @ 39 days 17 hours 32 mins 1.4 secs
+ @ 39 days 17 hours 32 mins 1.5 secs
+ @ 39 days 17 hours 32 mins 1.6 secs
+ @ 0
+ @ 3 hours 4 mins 5 secs
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 159 days 17 hours 32 mins 1 sec
+ @ 1724 days 18 hours 19 mins 20 secs
+ @ 1168 days 8 hours 14 mins 1 sec
+ @ 1168 days 13 hours 14 mins 2 secs
+ @ 1168 days 12 hours 14 mins 3 secs
+ @ 1168 days 3 hours 14 mins 4 secs
+ @ 1168 days 2 hours 14 mins 5 secs
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 159 days 18 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 40 days 17 hours 32 mins 1 sec
+ @ 41 days 17 hours 32 mins 1 sec
+ @ 42 days 17 hours 32 mins 1 sec
+ @ 43 days 17 hours 32 mins 1 sec
+ @ 44 days 17 hours 32 mins 1 sec
+ @ 45 days 17 hours 32 mins 1 sec
+ @ 45 days 17 hours 32 mins 1 sec
+ @ 308 days 6 hours 27 mins 59 secs ago
+ @ 307 days 6 hours 27 mins 59 secs ago
+ @ 306 days 6 hours 27 mins 59 secs ago
+ @ 2 days 6 hours 27 mins 59 secs ago
+ @ 1 day 6 hours 27 mins 59 secs ago
+ @ 6 hours 27 mins 59 secs ago
+ @ 57 days 17 hours 32 mins 1 sec
+ @ 58 days 17 hours 32 mins 1 sec
+ @ 362 days 17 hours 32 mins 1 sec
+ @ 363 days 17 hours 32 mins 1 sec
+ @ 1093 days 17 hours 32 mins 1 sec
+ @ 1094 days 17 hours 32 mins 1 sec
+ @ 1459 days 17 hours 32 mins 1 sec
+ @ 1460 days 17 hours 32 mins 1 sec
+(55 rows)
+
+SELECT date_trunc( 'week', timestamp '2004-02-29 15:44:17.71393' ) AS week_trunc;
+ week_trunc
+--------------------------
+ Mon Feb 23 00:00:00 2004
+(1 row)
+
+-- verify date_bin behaves the same as date_trunc for relevant intervals
+-- case 1: AD dates, origin < input
+SELECT
+ str,
+ interval,
+ date_trunc(str, ts) = date_bin(interval::interval, ts, timestamp '2001-01-01') AS equal
+FROM (
+ VALUES
+ ('week', '7 d'),
+ ('day', '1 d'),
+ ('hour', '1 h'),
+ ('minute', '1 m'),
+ ('second', '1 s'),
+ ('millisecond', '1 ms'),
+ ('microsecond', '1 us')
+) intervals (str, interval),
+(VALUES (timestamp '2020-02-29 15:44:17.71393')) ts (ts);
+ str | interval | equal
+-------------+----------+-------
+ week | 7 d | t
+ day | 1 d | t
+ hour | 1 h | t
+ minute | 1 m | t
+ second | 1 s | t
+ millisecond | 1 ms | t
+ microsecond | 1 us | t
+(7 rows)
+
+-- case 2: BC dates, origin < input
+SELECT
+ str,
+ interval,
+ date_trunc(str, ts) = date_bin(interval::interval, ts, timestamp '2000-01-01 BC') AS equal
+FROM (
+ VALUES
+ ('week', '7 d'),
+ ('day', '1 d'),
+ ('hour', '1 h'),
+ ('minute', '1 m'),
+ ('second', '1 s'),
+ ('millisecond', '1 ms'),
+ ('microsecond', '1 us')
+) intervals (str, interval),
+(VALUES (timestamp '0055-6-10 15:44:17.71393 BC')) ts (ts);
+ str | interval | equal
+-------------+----------+-------
+ week | 7 d | t
+ day | 1 d | t
+ hour | 1 h | t
+ minute | 1 m | t
+ second | 1 s | t
+ millisecond | 1 ms | t
+ microsecond | 1 us | t
+(7 rows)
+
+-- case 3: AD dates, origin > input
+SELECT
+ str,
+ interval,
+ date_trunc(str, ts) = date_bin(interval::interval, ts, timestamp '2020-03-02') AS equal
+FROM (
+ VALUES
+ ('week', '7 d'),
+ ('day', '1 d'),
+ ('hour', '1 h'),
+ ('minute', '1 m'),
+ ('second', '1 s'),
+ ('millisecond', '1 ms'),
+ ('microsecond', '1 us')
+) intervals (str, interval),
+(VALUES (timestamp '2020-02-29 15:44:17.71393')) ts (ts);
+ str | interval | equal
+-------------+----------+-------
+ week | 7 d | t
+ day | 1 d | t
+ hour | 1 h | t
+ minute | 1 m | t
+ second | 1 s | t
+ millisecond | 1 ms | t
+ microsecond | 1 us | t
+(7 rows)
+
+-- case 4: BC dates, origin > input
+SELECT
+ str,
+ interval,
+ date_trunc(str, ts) = date_bin(interval::interval, ts, timestamp '0055-06-17 BC') AS equal
+FROM (
+ VALUES
+ ('week', '7 d'),
+ ('day', '1 d'),
+ ('hour', '1 h'),
+ ('minute', '1 m'),
+ ('second', '1 s'),
+ ('millisecond', '1 ms'),
+ ('microsecond', '1 us')
+) intervals (str, interval),
+(VALUES (timestamp '0055-6-10 15:44:17.71393 BC')) ts (ts);
+ str | interval | equal
+-------------+----------+-------
+ week | 7 d | t
+ day | 1 d | t
+ hour | 1 h | t
+ minute | 1 m | t
+ second | 1 s | t
+ millisecond | 1 ms | t
+ microsecond | 1 us | t
+(7 rows)
+
+-- bin timestamps into arbitrary intervals
+SELECT
+ interval,
+ ts,
+ origin,
+ date_bin(interval::interval, ts, origin)
+FROM (
+ VALUES
+ ('15 days'),
+ ('2 hours'),
+ ('1 hour 30 minutes'),
+ ('15 minutes'),
+ ('10 seconds'),
+ ('100 milliseconds'),
+ ('250 microseconds')
+) intervals (interval),
+(VALUES (timestamp '2020-02-11 15:44:17.71393')) ts (ts),
+(VALUES (timestamp '2001-01-01')) origin (origin);
+ interval | ts | origin | date_bin
+-------------------+--------------------------------+--------------------------+--------------------------------
+ 15 days | Tue Feb 11 15:44:17.71393 2020 | Mon Jan 01 00:00:00 2001 | Thu Feb 06 00:00:00 2020
+ 2 hours | Tue Feb 11 15:44:17.71393 2020 | Mon Jan 01 00:00:00 2001 | Tue Feb 11 14:00:00 2020
+ 1 hour 30 minutes | Tue Feb 11 15:44:17.71393 2020 | Mon Jan 01 00:00:00 2001 | Tue Feb 11 15:00:00 2020
+ 15 minutes | Tue Feb 11 15:44:17.71393 2020 | Mon Jan 01 00:00:00 2001 | Tue Feb 11 15:30:00 2020
+ 10 seconds | Tue Feb 11 15:44:17.71393 2020 | Mon Jan 01 00:00:00 2001 | Tue Feb 11 15:44:10 2020
+ 100 milliseconds | Tue Feb 11 15:44:17.71393 2020 | Mon Jan 01 00:00:00 2001 | Tue Feb 11 15:44:17.7 2020
+ 250 microseconds | Tue Feb 11 15:44:17.71393 2020 | Mon Jan 01 00:00:00 2001 | Tue Feb 11 15:44:17.71375 2020
+(7 rows)
+
+-- shift bins using the origin parameter:
+SELECT date_bin('5 min'::interval, timestamp '2020-02-01 01:01:01', timestamp '2020-02-01 00:02:30');
+ date_bin
+--------------------------
+ Sat Feb 01 00:57:30 2020
+(1 row)
+
+-- disallow intervals with months or years
+SELECT date_bin('5 months'::interval, timestamp '2020-02-01 01:01:01', timestamp '2001-01-01');
+ERROR: timestamps cannot be binned into intervals containing months or years
+SELECT date_bin('5 years'::interval, timestamp '2020-02-01 01:01:01', timestamp '2001-01-01');
+ERROR: timestamps cannot be binned into intervals containing months or years
+-- disallow zero intervals
+SELECT date_bin('0 days'::interval, timestamp '1970-01-01 01:00:00' , timestamp '1970-01-01 00:00:00');
+ERROR: stride must be greater than zero
+-- disallow negative intervals
+SELECT date_bin('-2 days'::interval, timestamp '1970-01-01 01:00:00' , timestamp '1970-01-01 00:00:00');
+ERROR: stride must be greater than zero
+-- Test casting within a BETWEEN qualifier
+SELECT d1 - timestamp without time zone '1997-01-02' AS diff
+ FROM TIMESTAMP_TBL
+ WHERE d1 BETWEEN timestamp without time zone '1902-01-01'
+ AND timestamp without time zone '2038-01-01';
+ diff
+----------------------------------------
+ @ 9863 days ago
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 2 secs
+ @ 39 days 17 hours 32 mins 1.4 secs
+ @ 39 days 17 hours 32 mins 1.5 secs
+ @ 39 days 17 hours 32 mins 1.6 secs
+ @ 0
+ @ 3 hours 4 mins 5 secs
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 159 days 17 hours 32 mins 1 sec
+ @ 1724 days 18 hours 19 mins 20 secs
+ @ 1168 days 8 hours 14 mins 1 sec
+ @ 1168 days 13 hours 14 mins 2 secs
+ @ 1168 days 12 hours 14 mins 3 secs
+ @ 1168 days 3 hours 14 mins 4 secs
+ @ 1168 days 2 hours 14 mins 5 secs
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 159 days 18 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 40 days 17 hours 32 mins 1 sec
+ @ 41 days 17 hours 32 mins 1 sec
+ @ 42 days 17 hours 32 mins 1 sec
+ @ 43 days 17 hours 32 mins 1 sec
+ @ 44 days 17 hours 32 mins 1 sec
+ @ 45 days 17 hours 32 mins 1 sec
+ @ 45 days 17 hours 32 mins 1 sec
+ @ 308 days 6 hours 27 mins 59 secs ago
+ @ 307 days 6 hours 27 mins 59 secs ago
+ @ 306 days 6 hours 27 mins 59 secs ago
+ @ 2 days 6 hours 27 mins 59 secs ago
+ @ 1 day 6 hours 27 mins 59 secs ago
+ @ 6 hours 27 mins 59 secs ago
+ @ 57 days 17 hours 32 mins 1 sec
+ @ 58 days 17 hours 32 mins 1 sec
+ @ 362 days 17 hours 32 mins 1 sec
+ @ 363 days 17 hours 32 mins 1 sec
+ @ 1093 days 17 hours 32 mins 1 sec
+ @ 1094 days 17 hours 32 mins 1 sec
+ @ 1459 days 17 hours 32 mins 1 sec
+ @ 1460 days 17 hours 32 mins 1 sec
+(55 rows)
+
+-- DATE_PART (timestamp_part)
+SELECT d1 as "timestamp",
+ date_part( 'year', d1) AS year, date_part( 'month', d1) AS month,
+ date_part( 'day', d1) AS day, date_part( 'hour', d1) AS hour,
+ date_part( 'minute', d1) AS minute, date_part( 'second', d1) AS second
+ FROM TIMESTAMP_TBL;
+ timestamp | year | month | day | hour | minute | second
+-----------------------------+-----------+-------+-----+------+--------+--------
+ -infinity | -Infinity | | | | |
+ infinity | Infinity | | | | |
+ Thu Jan 01 00:00:00 1970 | 1970 | 1 | 1 | 0 | 0 | 0
+ Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:02 1997 | 1997 | 2 | 10 | 17 | 32 | 2
+ Mon Feb 10 17:32:01.4 1997 | 1997 | 2 | 10 | 17 | 32 | 1.4
+ Mon Feb 10 17:32:01.5 1997 | 1997 | 2 | 10 | 17 | 32 | 1.5
+ Mon Feb 10 17:32:01.6 1997 | 1997 | 2 | 10 | 17 | 32 | 1.6
+ Thu Jan 02 00:00:00 1997 | 1997 | 1 | 2 | 0 | 0 | 0
+ Thu Jan 02 03:04:05 1997 | 1997 | 1 | 2 | 3 | 4 | 5
+ Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1
+ Tue Jun 10 17:32:01 1997 | 1997 | 6 | 10 | 17 | 32 | 1
+ Sat Sep 22 18:19:20 2001 | 2001 | 9 | 22 | 18 | 19 | 20
+ Wed Mar 15 08:14:01 2000 | 2000 | 3 | 15 | 8 | 14 | 1
+ Wed Mar 15 13:14:02 2000 | 2000 | 3 | 15 | 13 | 14 | 2
+ Wed Mar 15 12:14:03 2000 | 2000 | 3 | 15 | 12 | 14 | 3
+ Wed Mar 15 03:14:04 2000 | 2000 | 3 | 15 | 3 | 14 | 4
+ Wed Mar 15 02:14:05 2000 | 2000 | 3 | 15 | 2 | 14 | 5
+ Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:00 1997 | 1997 | 2 | 10 | 17 | 32 | 0
+ Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1
+ Tue Jun 10 18:32:01 1997 | 1997 | 6 | 10 | 18 | 32 | 1
+ Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1
+ Tue Feb 11 17:32:01 1997 | 1997 | 2 | 11 | 17 | 32 | 1
+ Wed Feb 12 17:32:01 1997 | 1997 | 2 | 12 | 17 | 32 | 1
+ Thu Feb 13 17:32:01 1997 | 1997 | 2 | 13 | 17 | 32 | 1
+ Fri Feb 14 17:32:01 1997 | 1997 | 2 | 14 | 17 | 32 | 1
+ Sat Feb 15 17:32:01 1997 | 1997 | 2 | 15 | 17 | 32 | 1
+ Sun Feb 16 17:32:01 1997 | 1997 | 2 | 16 | 17 | 32 | 1
+ Tue Feb 16 17:32:01 0097 BC | -97 | 2 | 16 | 17 | 32 | 1
+ Sat Feb 16 17:32:01 0097 | 97 | 2 | 16 | 17 | 32 | 1
+ Thu Feb 16 17:32:01 0597 | 597 | 2 | 16 | 17 | 32 | 1
+ Tue Feb 16 17:32:01 1097 | 1097 | 2 | 16 | 17 | 32 | 1
+ Sat Feb 16 17:32:01 1697 | 1697 | 2 | 16 | 17 | 32 | 1
+ Thu Feb 16 17:32:01 1797 | 1797 | 2 | 16 | 17 | 32 | 1
+ Tue Feb 16 17:32:01 1897 | 1897 | 2 | 16 | 17 | 32 | 1
+ Sun Feb 16 17:32:01 1997 | 1997 | 2 | 16 | 17 | 32 | 1
+ Sat Feb 16 17:32:01 2097 | 2097 | 2 | 16 | 17 | 32 | 1
+ Wed Feb 28 17:32:01 1996 | 1996 | 2 | 28 | 17 | 32 | 1
+ Thu Feb 29 17:32:01 1996 | 1996 | 2 | 29 | 17 | 32 | 1
+ Fri Mar 01 17:32:01 1996 | 1996 | 3 | 1 | 17 | 32 | 1
+ Mon Dec 30 17:32:01 1996 | 1996 | 12 | 30 | 17 | 32 | 1
+ Tue Dec 31 17:32:01 1996 | 1996 | 12 | 31 | 17 | 32 | 1
+ Wed Jan 01 17:32:01 1997 | 1997 | 1 | 1 | 17 | 32 | 1
+ Fri Feb 28 17:32:01 1997 | 1997 | 2 | 28 | 17 | 32 | 1
+ Sat Mar 01 17:32:01 1997 | 1997 | 3 | 1 | 17 | 32 | 1
+ Tue Dec 30 17:32:01 1997 | 1997 | 12 | 30 | 17 | 32 | 1
+ Wed Dec 31 17:32:01 1997 | 1997 | 12 | 31 | 17 | 32 | 1
+ Fri Dec 31 17:32:01 1999 | 1999 | 12 | 31 | 17 | 32 | 1
+ Sat Jan 01 17:32:01 2000 | 2000 | 1 | 1 | 17 | 32 | 1
+ Sun Dec 31 17:32:01 2000 | 2000 | 12 | 31 | 17 | 32 | 1
+ Mon Jan 01 17:32:01 2001 | 2001 | 1 | 1 | 17 | 32 | 1
+(65 rows)
+
+SELECT d1 as "timestamp",
+ date_part( 'quarter', d1) AS quarter, date_part( 'msec', d1) AS msec,
+ date_part( 'usec', d1) AS usec
+ FROM TIMESTAMP_TBL;
+ timestamp | quarter | msec | usec
+-----------------------------+---------+-------+----------
+ -infinity | | |
+ infinity | | |
+ Thu Jan 01 00:00:00 1970 | 1 | 0 | 0
+ Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:02 1997 | 1 | 2000 | 2000000
+ Mon Feb 10 17:32:01.4 1997 | 1 | 1400 | 1400000
+ Mon Feb 10 17:32:01.5 1997 | 1 | 1500 | 1500000
+ Mon Feb 10 17:32:01.6 1997 | 1 | 1600 | 1600000
+ Thu Jan 02 00:00:00 1997 | 1 | 0 | 0
+ Thu Jan 02 03:04:05 1997 | 1 | 5000 | 5000000
+ Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000
+ Tue Jun 10 17:32:01 1997 | 2 | 1000 | 1000000
+ Sat Sep 22 18:19:20 2001 | 3 | 20000 | 20000000
+ Wed Mar 15 08:14:01 2000 | 1 | 1000 | 1000000
+ Wed Mar 15 13:14:02 2000 | 1 | 2000 | 2000000
+ Wed Mar 15 12:14:03 2000 | 1 | 3000 | 3000000
+ Wed Mar 15 03:14:04 2000 | 1 | 4000 | 4000000
+ Wed Mar 15 02:14:05 2000 | 1 | 5000 | 5000000
+ Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:00 1997 | 1 | 0 | 0
+ Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000
+ Tue Jun 10 18:32:01 1997 | 2 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000
+ Tue Feb 11 17:32:01 1997 | 1 | 1000 | 1000000
+ Wed Feb 12 17:32:01 1997 | 1 | 1000 | 1000000
+ Thu Feb 13 17:32:01 1997 | 1 | 1000 | 1000000
+ Fri Feb 14 17:32:01 1997 | 1 | 1000 | 1000000
+ Sat Feb 15 17:32:01 1997 | 1 | 1000 | 1000000
+ Sun Feb 16 17:32:01 1997 | 1 | 1000 | 1000000
+ Tue Feb 16 17:32:01 0097 BC | 1 | 1000 | 1000000
+ Sat Feb 16 17:32:01 0097 | 1 | 1000 | 1000000
+ Thu Feb 16 17:32:01 0597 | 1 | 1000 | 1000000
+ Tue Feb 16 17:32:01 1097 | 1 | 1000 | 1000000
+ Sat Feb 16 17:32:01 1697 | 1 | 1000 | 1000000
+ Thu Feb 16 17:32:01 1797 | 1 | 1000 | 1000000
+ Tue Feb 16 17:32:01 1897 | 1 | 1000 | 1000000
+ Sun Feb 16 17:32:01 1997 | 1 | 1000 | 1000000
+ Sat Feb 16 17:32:01 2097 | 1 | 1000 | 1000000
+ Wed Feb 28 17:32:01 1996 | 1 | 1000 | 1000000
+ Thu Feb 29 17:32:01 1996 | 1 | 1000 | 1000000
+ Fri Mar 01 17:32:01 1996 | 1 | 1000 | 1000000
+ Mon Dec 30 17:32:01 1996 | 4 | 1000 | 1000000
+ Tue Dec 31 17:32:01 1996 | 4 | 1000 | 1000000
+ Wed Jan 01 17:32:01 1997 | 1 | 1000 | 1000000
+ Fri Feb 28 17:32:01 1997 | 1 | 1000 | 1000000
+ Sat Mar 01 17:32:01 1997 | 1 | 1000 | 1000000
+ Tue Dec 30 17:32:01 1997 | 4 | 1000 | 1000000
+ Wed Dec 31 17:32:01 1997 | 4 | 1000 | 1000000
+ Fri Dec 31 17:32:01 1999 | 4 | 1000 | 1000000
+ Sat Jan 01 17:32:01 2000 | 1 | 1000 | 1000000
+ Sun Dec 31 17:32:01 2000 | 4 | 1000 | 1000000
+ Mon Jan 01 17:32:01 2001 | 1 | 1000 | 1000000
+(65 rows)
+
+SELECT d1 as "timestamp",
+ date_part( 'isoyear', d1) AS isoyear, date_part( 'week', d1) AS week,
+ date_part( 'isodow', d1) AS isodow, date_part( 'dow', d1) AS dow,
+ date_part( 'doy', d1) AS doy
+ FROM TIMESTAMP_TBL;
+ timestamp | isoyear | week | isodow | dow | doy
+-----------------------------+-----------+------+--------+-----+-----
+ -infinity | -Infinity | | | |
+ infinity | Infinity | | | |
+ Thu Jan 01 00:00:00 1970 | 1970 | 1 | 4 | 4 | 1
+ Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:02 1997 | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01.4 1997 | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01.5 1997 | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01.6 1997 | 1997 | 7 | 1 | 1 | 41
+ Thu Jan 02 00:00:00 1997 | 1997 | 1 | 4 | 4 | 2
+ Thu Jan 02 03:04:05 1997 | 1997 | 1 | 4 | 4 | 2
+ Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41
+ Tue Jun 10 17:32:01 1997 | 1997 | 24 | 2 | 2 | 161
+ Sat Sep 22 18:19:20 2001 | 2001 | 38 | 6 | 6 | 265
+ Wed Mar 15 08:14:01 2000 | 2000 | 11 | 3 | 3 | 75
+ Wed Mar 15 13:14:02 2000 | 2000 | 11 | 3 | 3 | 75
+ Wed Mar 15 12:14:03 2000 | 2000 | 11 | 3 | 3 | 75
+ Wed Mar 15 03:14:04 2000 | 2000 | 11 | 3 | 3 | 75
+ Wed Mar 15 02:14:05 2000 | 2000 | 11 | 3 | 3 | 75
+ Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:00 1997 | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41
+ Tue Jun 10 18:32:01 1997 | 1997 | 24 | 2 | 2 | 161
+ Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41
+ Tue Feb 11 17:32:01 1997 | 1997 | 7 | 2 | 2 | 42
+ Wed Feb 12 17:32:01 1997 | 1997 | 7 | 3 | 3 | 43
+ Thu Feb 13 17:32:01 1997 | 1997 | 7 | 4 | 4 | 44
+ Fri Feb 14 17:32:01 1997 | 1997 | 7 | 5 | 5 | 45
+ Sat Feb 15 17:32:01 1997 | 1997 | 7 | 6 | 6 | 46
+ Sun Feb 16 17:32:01 1997 | 1997 | 7 | 7 | 0 | 47
+ Tue Feb 16 17:32:01 0097 BC | -97 | 7 | 2 | 2 | 47
+ Sat Feb 16 17:32:01 0097 | 97 | 7 | 6 | 6 | 47
+ Thu Feb 16 17:32:01 0597 | 597 | 7 | 4 | 4 | 47
+ Tue Feb 16 17:32:01 1097 | 1097 | 7 | 2 | 2 | 47
+ Sat Feb 16 17:32:01 1697 | 1697 | 7 | 6 | 6 | 47
+ Thu Feb 16 17:32:01 1797 | 1797 | 7 | 4 | 4 | 47
+ Tue Feb 16 17:32:01 1897 | 1897 | 7 | 2 | 2 | 47
+ Sun Feb 16 17:32:01 1997 | 1997 | 7 | 7 | 0 | 47
+ Sat Feb 16 17:32:01 2097 | 2097 | 7 | 6 | 6 | 47
+ Wed Feb 28 17:32:01 1996 | 1996 | 9 | 3 | 3 | 59
+ Thu Feb 29 17:32:01 1996 | 1996 | 9 | 4 | 4 | 60
+ Fri Mar 01 17:32:01 1996 | 1996 | 9 | 5 | 5 | 61
+ Mon Dec 30 17:32:01 1996 | 1997 | 1 | 1 | 1 | 365
+ Tue Dec 31 17:32:01 1996 | 1997 | 1 | 2 | 2 | 366
+ Wed Jan 01 17:32:01 1997 | 1997 | 1 | 3 | 3 | 1
+ Fri Feb 28 17:32:01 1997 | 1997 | 9 | 5 | 5 | 59
+ Sat Mar 01 17:32:01 1997 | 1997 | 9 | 6 | 6 | 60
+ Tue Dec 30 17:32:01 1997 | 1998 | 1 | 2 | 2 | 364
+ Wed Dec 31 17:32:01 1997 | 1998 | 1 | 3 | 3 | 365
+ Fri Dec 31 17:32:01 1999 | 1999 | 52 | 5 | 5 | 365
+ Sat Jan 01 17:32:01 2000 | 1999 | 52 | 6 | 6 | 1
+ Sun Dec 31 17:32:01 2000 | 2000 | 52 | 7 | 0 | 366
+ Mon Jan 01 17:32:01 2001 | 2001 | 1 | 1 | 1 | 1
+(65 rows)
+
+SELECT d1 as "timestamp",
+ date_part( 'decade', d1) AS decade,
+ date_part( 'century', d1) AS century,
+ date_part( 'millennium', d1) AS millennium,
+ round(date_part( 'julian', d1)) AS julian,
+ date_part( 'epoch', d1) AS epoch
+ FROM TIMESTAMP_TBL;
+ timestamp | decade | century | millennium | julian | epoch
+-----------------------------+-----------+-----------+------------+-----------+--------------
+ -infinity | -Infinity | -Infinity | -Infinity | -Infinity | -Infinity
+ infinity | Infinity | Infinity | Infinity | Infinity | Infinity
+ Thu Jan 01 00:00:00 1970 | 197 | 20 | 2 | 2440588 | 0
+ Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921
+ Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921
+ Mon Feb 10 17:32:02 1997 | 199 | 20 | 2 | 2450491 | 855595922
+ Mon Feb 10 17:32:01.4 1997 | 199 | 20 | 2 | 2450491 | 855595921.4
+ Mon Feb 10 17:32:01.5 1997 | 199 | 20 | 2 | 2450491 | 855595921.5
+ Mon Feb 10 17:32:01.6 1997 | 199 | 20 | 2 | 2450491 | 855595921.6
+ Thu Jan 02 00:00:00 1997 | 199 | 20 | 2 | 2450451 | 852163200
+ Thu Jan 02 03:04:05 1997 | 199 | 20 | 2 | 2450451 | 852174245
+ Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921
+ Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921
+ Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921
+ Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921
+ Tue Jun 10 17:32:01 1997 | 199 | 20 | 2 | 2450611 | 865963921
+ Sat Sep 22 18:19:20 2001 | 200 | 21 | 3 | 2452176 | 1001182760
+ Wed Mar 15 08:14:01 2000 | 200 | 20 | 2 | 2451619 | 953108041
+ Wed Mar 15 13:14:02 2000 | 200 | 20 | 2 | 2451620 | 953126042
+ Wed Mar 15 12:14:03 2000 | 200 | 20 | 2 | 2451620 | 953122443
+ Wed Mar 15 03:14:04 2000 | 200 | 20 | 2 | 2451619 | 953090044
+ Wed Mar 15 02:14:05 2000 | 200 | 20 | 2 | 2451619 | 953086445
+ Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921
+ Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921
+ Mon Feb 10 17:32:00 1997 | 199 | 20 | 2 | 2450491 | 855595920
+ Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921
+ Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921
+ Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921
+ Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921
+ Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921
+ Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921
+ Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921
+ Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921
+ Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921
+ Tue Jun 10 18:32:01 1997 | 199 | 20 | 2 | 2450611 | 865967521
+ Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921
+ Tue Feb 11 17:32:01 1997 | 199 | 20 | 2 | 2450492 | 855682321
+ Wed Feb 12 17:32:01 1997 | 199 | 20 | 2 | 2450493 | 855768721
+ Thu Feb 13 17:32:01 1997 | 199 | 20 | 2 | 2450494 | 855855121
+ Fri Feb 14 17:32:01 1997 | 199 | 20 | 2 | 2450495 | 855941521
+ Sat Feb 15 17:32:01 1997 | 199 | 20 | 2 | 2450496 | 856027921
+ Sun Feb 16 17:32:01 1997 | 199 | 20 | 2 | 2450497 | 856114321
+ Tue Feb 16 17:32:01 0097 BC | -10 | -1 | -1 | 1686043 | -65192711279
+ Sat Feb 16 17:32:01 0097 | 9 | 1 | 1 | 1756537 | -59102029679
+ Thu Feb 16 17:32:01 0597 | 59 | 6 | 1 | 1939158 | -43323575279
+ Tue Feb 16 17:32:01 1097 | 109 | 11 | 2 | 2121779 | -27545120879
+ Sat Feb 16 17:32:01 1697 | 169 | 17 | 2 | 2340925 | -8610906479
+ Thu Feb 16 17:32:01 1797 | 179 | 18 | 2 | 2377449 | -5455232879
+ Tue Feb 16 17:32:01 1897 | 189 | 19 | 2 | 2413973 | -2299559279
+ Sun Feb 16 17:32:01 1997 | 199 | 20 | 2 | 2450497 | 856114321
+ Sat Feb 16 17:32:01 2097 | 209 | 21 | 3 | 2487022 | 4011874321
+ Wed Feb 28 17:32:01 1996 | 199 | 20 | 2 | 2450143 | 825528721
+ Thu Feb 29 17:32:01 1996 | 199 | 20 | 2 | 2450144 | 825615121
+ Fri Mar 01 17:32:01 1996 | 199 | 20 | 2 | 2450145 | 825701521
+ Mon Dec 30 17:32:01 1996 | 199 | 20 | 2 | 2450449 | 851967121
+ Tue Dec 31 17:32:01 1996 | 199 | 20 | 2 | 2450450 | 852053521
+ Wed Jan 01 17:32:01 1997 | 199 | 20 | 2 | 2450451 | 852139921
+ Fri Feb 28 17:32:01 1997 | 199 | 20 | 2 | 2450509 | 857151121
+ Sat Mar 01 17:32:01 1997 | 199 | 20 | 2 | 2450510 | 857237521
+ Tue Dec 30 17:32:01 1997 | 199 | 20 | 2 | 2450814 | 883503121
+ Wed Dec 31 17:32:01 1997 | 199 | 20 | 2 | 2450815 | 883589521
+ Fri Dec 31 17:32:01 1999 | 199 | 20 | 2 | 2451545 | 946661521
+ Sat Jan 01 17:32:01 2000 | 200 | 20 | 2 | 2451546 | 946747921
+ Sun Dec 31 17:32:01 2000 | 200 | 20 | 2 | 2451911 | 978283921
+ Mon Jan 01 17:32:01 2001 | 200 | 21 | 3 | 2451912 | 978370321
+(65 rows)
+
+-- extract implementation is mostly the same as date_part, so only
+-- test a few cases for additional coverage.
+SELECT d1 as "timestamp",
+ extract(microseconds from d1) AS microseconds,
+ extract(milliseconds from d1) AS milliseconds,
+ extract(seconds from d1) AS seconds,
+ round(extract(julian from d1)) AS julian,
+ extract(epoch from d1) AS epoch
+ FROM TIMESTAMP_TBL;
+ timestamp | microseconds | milliseconds | seconds | julian | epoch
+-----------------------------+--------------+--------------+-----------+-----------+---------------------
+ -infinity | | | | -Infinity | -Infinity
+ infinity | | | | Infinity | Infinity
+ Thu Jan 01 00:00:00 1970 | 0 | 0.000 | 0.000000 | 2440588 | 0.000000
+ Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000
+ Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000
+ Mon Feb 10 17:32:02 1997 | 2000000 | 2000.000 | 2.000000 | 2450491 | 855595922.000000
+ Mon Feb 10 17:32:01.4 1997 | 1400000 | 1400.000 | 1.400000 | 2450491 | 855595921.400000
+ Mon Feb 10 17:32:01.5 1997 | 1500000 | 1500.000 | 1.500000 | 2450491 | 855595921.500000
+ Mon Feb 10 17:32:01.6 1997 | 1600000 | 1600.000 | 1.600000 | 2450491 | 855595921.600000
+ Thu Jan 02 00:00:00 1997 | 0 | 0.000 | 0.000000 | 2450451 | 852163200.000000
+ Thu Jan 02 03:04:05 1997 | 5000000 | 5000.000 | 5.000000 | 2450451 | 852174245.000000
+ Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000
+ Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000
+ Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000
+ Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000
+ Tue Jun 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450611 | 865963921.000000
+ Sat Sep 22 18:19:20 2001 | 20000000 | 20000.000 | 20.000000 | 2452176 | 1001182760.000000
+ Wed Mar 15 08:14:01 2000 | 1000000 | 1000.000 | 1.000000 | 2451619 | 953108041.000000
+ Wed Mar 15 13:14:02 2000 | 2000000 | 2000.000 | 2.000000 | 2451620 | 953126042.000000
+ Wed Mar 15 12:14:03 2000 | 3000000 | 3000.000 | 3.000000 | 2451620 | 953122443.000000
+ Wed Mar 15 03:14:04 2000 | 4000000 | 4000.000 | 4.000000 | 2451619 | 953090044.000000
+ Wed Mar 15 02:14:05 2000 | 5000000 | 5000.000 | 5.000000 | 2451619 | 953086445.000000
+ Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000
+ Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000
+ Mon Feb 10 17:32:00 1997 | 0 | 0.000 | 0.000000 | 2450491 | 855595920.000000
+ Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000
+ Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000
+ Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000
+ Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000
+ Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000
+ Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000
+ Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000
+ Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000
+ Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000
+ Tue Jun 10 18:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450611 | 865967521.000000
+ Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000
+ Tue Feb 11 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450492 | 855682321.000000
+ Wed Feb 12 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450493 | 855768721.000000
+ Thu Feb 13 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450494 | 855855121.000000
+ Fri Feb 14 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450495 | 855941521.000000
+ Sat Feb 15 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450496 | 856027921.000000
+ Sun Feb 16 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450497 | 856114321.000000
+ Tue Feb 16 17:32:01 0097 BC | 1000000 | 1000.000 | 1.000000 | 1686043 | -65192711279.000000
+ Sat Feb 16 17:32:01 0097 | 1000000 | 1000.000 | 1.000000 | 1756537 | -59102029679.000000
+ Thu Feb 16 17:32:01 0597 | 1000000 | 1000.000 | 1.000000 | 1939158 | -43323575279.000000
+ Tue Feb 16 17:32:01 1097 | 1000000 | 1000.000 | 1.000000 | 2121779 | -27545120879.000000
+ Sat Feb 16 17:32:01 1697 | 1000000 | 1000.000 | 1.000000 | 2340925 | -8610906479.000000
+ Thu Feb 16 17:32:01 1797 | 1000000 | 1000.000 | 1.000000 | 2377449 | -5455232879.000000
+ Tue Feb 16 17:32:01 1897 | 1000000 | 1000.000 | 1.000000 | 2413973 | -2299559279.000000
+ Sun Feb 16 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450497 | 856114321.000000
+ Sat Feb 16 17:32:01 2097 | 1000000 | 1000.000 | 1.000000 | 2487022 | 4011874321.000000
+ Wed Feb 28 17:32:01 1996 | 1000000 | 1000.000 | 1.000000 | 2450143 | 825528721.000000
+ Thu Feb 29 17:32:01 1996 | 1000000 | 1000.000 | 1.000000 | 2450144 | 825615121.000000
+ Fri Mar 01 17:32:01 1996 | 1000000 | 1000.000 | 1.000000 | 2450145 | 825701521.000000
+ Mon Dec 30 17:32:01 1996 | 1000000 | 1000.000 | 1.000000 | 2450449 | 851967121.000000
+ Tue Dec 31 17:32:01 1996 | 1000000 | 1000.000 | 1.000000 | 2450450 | 852053521.000000
+ Wed Jan 01 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450451 | 852139921.000000
+ Fri Feb 28 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450509 | 857151121.000000
+ Sat Mar 01 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450510 | 857237521.000000
+ Tue Dec 30 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450814 | 883503121.000000
+ Wed Dec 31 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450815 | 883589521.000000
+ Fri Dec 31 17:32:01 1999 | 1000000 | 1000.000 | 1.000000 | 2451545 | 946661521.000000
+ Sat Jan 01 17:32:01 2000 | 1000000 | 1000.000 | 1.000000 | 2451546 | 946747921.000000
+ Sun Dec 31 17:32:01 2000 | 1000000 | 1000.000 | 1.000000 | 2451911 | 978283921.000000
+ Mon Jan 01 17:32:01 2001 | 1000000 | 1000.000 | 1.000000 | 2451912 | 978370321.000000
+(65 rows)
+
+-- value near upper bound uses special case in code
+SELECT date_part('epoch', '294270-01-01 00:00:00'::timestamp);
+ date_part
+---------------
+ 9224097091200
+(1 row)
+
+SELECT extract(epoch from '294270-01-01 00:00:00'::timestamp);
+ extract
+----------------------
+ 9224097091200.000000
+(1 row)
+
+-- another internal overflow test case
+SELECT extract(epoch from '5000-01-01 00:00:00'::timestamp);
+ extract
+--------------------
+ 95617584000.000000
+(1 row)
+
+-- TO_CHAR()
+SELECT to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon')
+ FROM TIMESTAMP_TBL;
+ to_char
+------------------------------------------------------------------------------------------
+
+
+ THURSDAY Thursday thursday THU Thu thu JANUARY January january I JAN Jan jan
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ THURSDAY Thursday thursday THU Thu thu JANUARY January january I JAN Jan jan
+ THURSDAY Thursday thursday THU Thu thu JANUARY January january I JAN Jan jan
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ TUESDAY Tuesday tuesday TUE Tue tue JUNE June june VI JUN Jun jun
+ SATURDAY Saturday saturday SAT Sat sat SEPTEMBER September september IX SEP Sep sep
+ WEDNESDAY Wednesday wednesday WED Wed wed MARCH March march III MAR Mar mar
+ WEDNESDAY Wednesday wednesday WED Wed wed MARCH March march III MAR Mar mar
+ WEDNESDAY Wednesday wednesday WED Wed wed MARCH March march III MAR Mar mar
+ WEDNESDAY Wednesday wednesday WED Wed wed MARCH March march III MAR Mar mar
+ WEDNESDAY Wednesday wednesday WED Wed wed MARCH March march III MAR Mar mar
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ TUESDAY Tuesday tuesday TUE Tue tue JUNE June june VI JUN Jun jun
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ TUESDAY Tuesday tuesday TUE Tue tue FEBRUARY February february II FEB Feb feb
+ WEDNESDAY Wednesday wednesday WED Wed wed FEBRUARY February february II FEB Feb feb
+ THURSDAY Thursday thursday THU Thu thu FEBRUARY February february II FEB Feb feb
+ FRIDAY Friday friday FRI Fri fri FEBRUARY February february II FEB Feb feb
+ SATURDAY Saturday saturday SAT Sat sat FEBRUARY February february II FEB Feb feb
+ SUNDAY Sunday sunday SUN Sun sun FEBRUARY February february II FEB Feb feb
+ TUESDAY Tuesday tuesday TUE Tue tue FEBRUARY February february II FEB Feb feb
+ SATURDAY Saturday saturday SAT Sat sat FEBRUARY February february II FEB Feb feb
+ THURSDAY Thursday thursday THU Thu thu FEBRUARY February february II FEB Feb feb
+ TUESDAY Tuesday tuesday TUE Tue tue FEBRUARY February february II FEB Feb feb
+ SATURDAY Saturday saturday SAT Sat sat FEBRUARY February february II FEB Feb feb
+ THURSDAY Thursday thursday THU Thu thu FEBRUARY February february II FEB Feb feb
+ TUESDAY Tuesday tuesday TUE Tue tue FEBRUARY February february II FEB Feb feb
+ SUNDAY Sunday sunday SUN Sun sun FEBRUARY February february II FEB Feb feb
+ SATURDAY Saturday saturday SAT Sat sat FEBRUARY February february II FEB Feb feb
+ WEDNESDAY Wednesday wednesday WED Wed wed FEBRUARY February february II FEB Feb feb
+ THURSDAY Thursday thursday THU Thu thu FEBRUARY February february II FEB Feb feb
+ FRIDAY Friday friday FRI Fri fri MARCH March march III MAR Mar mar
+ MONDAY Monday monday MON Mon mon DECEMBER December december XII DEC Dec dec
+ TUESDAY Tuesday tuesday TUE Tue tue DECEMBER December december XII DEC Dec dec
+ WEDNESDAY Wednesday wednesday WED Wed wed JANUARY January january I JAN Jan jan
+ FRIDAY Friday friday FRI Fri fri FEBRUARY February february II FEB Feb feb
+ SATURDAY Saturday saturday SAT Sat sat MARCH March march III MAR Mar mar
+ TUESDAY Tuesday tuesday TUE Tue tue DECEMBER December december XII DEC Dec dec
+ WEDNESDAY Wednesday wednesday WED Wed wed DECEMBER December december XII DEC Dec dec
+ FRIDAY Friday friday FRI Fri fri DECEMBER December december XII DEC Dec dec
+ SATURDAY Saturday saturday SAT Sat sat JANUARY January january I JAN Jan jan
+ SUNDAY Sunday sunday SUN Sun sun DECEMBER December december XII DEC Dec dec
+ MONDAY Monday monday MON Mon mon JANUARY January january I JAN Jan jan
+(65 rows)
+
+SELECT to_char(d1, 'FMDAY FMDay FMday FMMONTH FMMonth FMmonth FMRM')
+ FROM TIMESTAMP_TBL;
+ to_char
+--------------------------------------------------------------
+
+
+ THURSDAY Thursday thursday JANUARY January january I
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ THURSDAY Thursday thursday JANUARY January january I
+ THURSDAY Thursday thursday JANUARY January january I
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ TUESDAY Tuesday tuesday JUNE June june VI
+ SATURDAY Saturday saturday SEPTEMBER September september IX
+ WEDNESDAY Wednesday wednesday MARCH March march III
+ WEDNESDAY Wednesday wednesday MARCH March march III
+ WEDNESDAY Wednesday wednesday MARCH March march III
+ WEDNESDAY Wednesday wednesday MARCH March march III
+ WEDNESDAY Wednesday wednesday MARCH March march III
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ TUESDAY Tuesday tuesday JUNE June june VI
+ MONDAY Monday monday FEBRUARY February february II
+ TUESDAY Tuesday tuesday FEBRUARY February february II
+ WEDNESDAY Wednesday wednesday FEBRUARY February february II
+ THURSDAY Thursday thursday FEBRUARY February february II
+ FRIDAY Friday friday FEBRUARY February february II
+ SATURDAY Saturday saturday FEBRUARY February february II
+ SUNDAY Sunday sunday FEBRUARY February february II
+ TUESDAY Tuesday tuesday FEBRUARY February february II
+ SATURDAY Saturday saturday FEBRUARY February february II
+ THURSDAY Thursday thursday FEBRUARY February february II
+ TUESDAY Tuesday tuesday FEBRUARY February february II
+ SATURDAY Saturday saturday FEBRUARY February february II
+ THURSDAY Thursday thursday FEBRUARY February february II
+ TUESDAY Tuesday tuesday FEBRUARY February february II
+ SUNDAY Sunday sunday FEBRUARY February february II
+ SATURDAY Saturday saturday FEBRUARY February february II
+ WEDNESDAY Wednesday wednesday FEBRUARY February february II
+ THURSDAY Thursday thursday FEBRUARY February february II
+ FRIDAY Friday friday MARCH March march III
+ MONDAY Monday monday DECEMBER December december XII
+ TUESDAY Tuesday tuesday DECEMBER December december XII
+ WEDNESDAY Wednesday wednesday JANUARY January january I
+ FRIDAY Friday friday FEBRUARY February february II
+ SATURDAY Saturday saturday MARCH March march III
+ TUESDAY Tuesday tuesday DECEMBER December december XII
+ WEDNESDAY Wednesday wednesday DECEMBER December december XII
+ FRIDAY Friday friday DECEMBER December december XII
+ SATURDAY Saturday saturday JANUARY January january I
+ SUNDAY Sunday sunday DECEMBER December december XII
+ MONDAY Monday monday JANUARY January january I
+(65 rows)
+
+SELECT to_char(d1, 'Y,YYY YYYY YYY YY Y CC Q MM WW DDD DD D J')
+ FROM TIMESTAMP_TBL;
+ to_char
+--------------------------------------------------
+
+
+ 1,970 1970 970 70 0 20 1 01 01 001 01 5 2440588
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 01 01 002 02 5 2450451
+ 1,997 1997 997 97 7 20 1 01 01 002 02 5 2450451
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 2 06 23 161 10 3 2450610
+ 2,001 2001 001 01 1 21 3 09 38 265 22 7 2452175
+ 2,000 2000 000 00 0 20 1 03 11 075 15 4 2451619
+ 2,000 2000 000 00 0 20 1 03 11 075 15 4 2451619
+ 2,000 2000 000 00 0 20 1 03 11 075 15 4 2451619
+ 2,000 2000 000 00 0 20 1 03 11 075 15 4 2451619
+ 2,000 2000 000 00 0 20 1 03 11 075 15 4 2451619
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 2 06 23 161 10 3 2450610
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 042 11 3 2450491
+ 1,997 1997 997 97 7 20 1 02 07 043 12 4 2450492
+ 1,997 1997 997 97 7 20 1 02 07 044 13 5 2450493
+ 1,997 1997 997 97 7 20 1 02 07 045 14 6 2450494
+ 1,997 1997 997 97 7 20 1 02 07 046 15 7 2450495
+ 1,997 1997 997 97 7 20 1 02 07 047 16 1 2450496
+ 0,097 0097 097 97 7 -01 1 02 07 047 16 3 1686042
+ 0,097 0097 097 97 7 01 1 02 07 047 16 7 1756536
+ 0,597 0597 597 97 7 06 1 02 07 047 16 5 1939157
+ 1,097 1097 097 97 7 11 1 02 07 047 16 3 2121778
+ 1,697 1697 697 97 7 17 1 02 07 047 16 7 2340924
+ 1,797 1797 797 97 7 18 1 02 07 047 16 5 2377448
+ 1,897 1897 897 97 7 19 1 02 07 047 16 3 2413972
+ 1,997 1997 997 97 7 20 1 02 07 047 16 1 2450496
+ 2,097 2097 097 97 7 21 1 02 07 047 16 7 2487021
+ 1,996 1996 996 96 6 20 1 02 09 059 28 4 2450142
+ 1,996 1996 996 96 6 20 1 02 09 060 29 5 2450143
+ 1,996 1996 996 96 6 20 1 03 09 061 01 6 2450144
+ 1,996 1996 996 96 6 20 4 12 53 365 30 2 2450448
+ 1,996 1996 996 96 6 20 4 12 53 366 31 3 2450449
+ 1,997 1997 997 97 7 20 1 01 01 001 01 4 2450450
+ 1,997 1997 997 97 7 20 1 02 09 059 28 6 2450508
+ 1,997 1997 997 97 7 20 1 03 09 060 01 7 2450509
+ 1,997 1997 997 97 7 20 4 12 52 364 30 3 2450813
+ 1,997 1997 997 97 7 20 4 12 53 365 31 4 2450814
+ 1,999 1999 999 99 9 20 4 12 53 365 31 6 2451544
+ 2,000 2000 000 00 0 20 1 01 01 001 01 7 2451545
+ 2,000 2000 000 00 0 20 4 12 53 366 31 1 2451910
+ 2,001 2001 001 01 1 21 1 01 01 001 01 2 2451911
+(65 rows)
+
+SELECT to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ')
+ FROM TIMESTAMP_TBL;
+ to_char
+-------------------------------------------------
+
+
+ 1,970 1970 970 70 0 20 1 1 1 1 1 5 2440588
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 1 1 2 2 5 2450451
+ 1,997 1997 997 97 7 20 1 1 1 2 2 5 2450451
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 2 6 23 161 10 3 2450610
+ 2,001 2001 1 1 1 21 3 9 38 265 22 7 2452175
+ 2,000 2000 0 0 0 20 1 3 11 75 15 4 2451619
+ 2,000 2000 0 0 0 20 1 3 11 75 15 4 2451619
+ 2,000 2000 0 0 0 20 1 3 11 75 15 4 2451619
+ 2,000 2000 0 0 0 20 1 3 11 75 15 4 2451619
+ 2,000 2000 0 0 0 20 1 3 11 75 15 4 2451619
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 2 6 23 161 10 3 2450610
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 42 11 3 2450491
+ 1,997 1997 997 97 7 20 1 2 7 43 12 4 2450492
+ 1,997 1997 997 97 7 20 1 2 7 44 13 5 2450493
+ 1,997 1997 997 97 7 20 1 2 7 45 14 6 2450494
+ 1,997 1997 997 97 7 20 1 2 7 46 15 7 2450495
+ 1,997 1997 997 97 7 20 1 2 7 47 16 1 2450496
+ 0,097 97 97 97 7 -1 1 2 7 47 16 3 1686042
+ 0,097 97 97 97 7 1 1 2 7 47 16 7 1756536
+ 0,597 597 597 97 7 6 1 2 7 47 16 5 1939157
+ 1,097 1097 97 97 7 11 1 2 7 47 16 3 2121778
+ 1,697 1697 697 97 7 17 1 2 7 47 16 7 2340924
+ 1,797 1797 797 97 7 18 1 2 7 47 16 5 2377448
+ 1,897 1897 897 97 7 19 1 2 7 47 16 3 2413972
+ 1,997 1997 997 97 7 20 1 2 7 47 16 1 2450496
+ 2,097 2097 97 97 7 21 1 2 7 47 16 7 2487021
+ 1,996 1996 996 96 6 20 1 2 9 59 28 4 2450142
+ 1,996 1996 996 96 6 20 1 2 9 60 29 5 2450143
+ 1,996 1996 996 96 6 20 1 3 9 61 1 6 2450144
+ 1,996 1996 996 96 6 20 4 12 53 365 30 2 2450448
+ 1,996 1996 996 96 6 20 4 12 53 366 31 3 2450449
+ 1,997 1997 997 97 7 20 1 1 1 1 1 4 2450450
+ 1,997 1997 997 97 7 20 1 2 9 59 28 6 2450508
+ 1,997 1997 997 97 7 20 1 3 9 60 1 7 2450509
+ 1,997 1997 997 97 7 20 4 12 52 364 30 3 2450813
+ 1,997 1997 997 97 7 20 4 12 53 365 31 4 2450814
+ 1,999 1999 999 99 9 20 4 12 53 365 31 6 2451544
+ 2,000 2000 0 0 0 20 1 1 1 1 1 7 2451545
+ 2,000 2000 0 0 0 20 4 12 53 366 31 1 2451910
+ 2,001 2001 1 1 1 21 1 1 1 1 1 2 2451911
+(65 rows)
+
+SELECT to_char(d1, 'HH HH12 HH24 MI SS SSSS')
+ FROM TIMESTAMP_TBL;
+ to_char
+----------------------
+
+
+ 12 12 00 00 00 0
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 02 63122
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 12 12 00 00 00 0
+ 03 03 03 04 05 11045
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 06 06 18 19 20 65960
+ 08 08 08 14 01 29641
+ 01 01 13 14 02 47642
+ 12 12 12 14 03 44043
+ 03 03 03 14 04 11644
+ 02 02 02 14 05 8045
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 00 63120
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 06 06 18 32 01 66721
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+(65 rows)
+
+SELECT to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""')
+ FROM TIMESTAMP_TBL;
+ to_char
+-------------------------------------------------
+
+
+ HH:MI:SS is 12:00:00 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:02 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 12:00:00 "text between quote marks"
+ HH:MI:SS is 03:04:05 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 06:19:20 "text between quote marks"
+ HH:MI:SS is 08:14:01 "text between quote marks"
+ HH:MI:SS is 01:14:02 "text between quote marks"
+ HH:MI:SS is 12:14:03 "text between quote marks"
+ HH:MI:SS is 03:14:04 "text between quote marks"
+ HH:MI:SS is 02:14:05 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:00 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 06:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+(65 rows)
+
+SELECT to_char(d1, 'HH24--text--MI--text--SS')
+ FROM TIMESTAMP_TBL;
+ to_char
+------------------------
+
+
+ 00--text--00--text--00
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--02
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 00--text--00--text--00
+ 03--text--04--text--05
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 18--text--19--text--20
+ 08--text--14--text--01
+ 13--text--14--text--02
+ 12--text--14--text--03
+ 03--text--14--text--04
+ 02--text--14--text--05
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--00
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 18--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+(65 rows)
+
+SELECT to_char(d1, 'YYYYTH YYYYth Jth')
+ FROM TIMESTAMP_TBL;
+ to_char
+-------------------------
+
+
+ 1970TH 1970th 2440588th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450451st
+ 1997TH 1997th 2450451st
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450610th
+ 2001ST 2001st 2452175th
+ 2000TH 2000th 2451619th
+ 2000TH 2000th 2451619th
+ 2000TH 2000th 2451619th
+ 2000TH 2000th 2451619th
+ 2000TH 2000th 2451619th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450610th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450491st
+ 1997TH 1997th 2450492nd
+ 1997TH 1997th 2450493rd
+ 1997TH 1997th 2450494th
+ 1997TH 1997th 2450495th
+ 1997TH 1997th 2450496th
+ 0097TH 0097th 1686042nd
+ 0097TH 0097th 1756536th
+ 0597TH 0597th 1939157th
+ 1097TH 1097th 2121778th
+ 1697TH 1697th 2340924th
+ 1797TH 1797th 2377448th
+ 1897TH 1897th 2413972nd
+ 1997TH 1997th 2450496th
+ 2097TH 2097th 2487021st
+ 1996TH 1996th 2450142nd
+ 1996TH 1996th 2450143rd
+ 1996TH 1996th 2450144th
+ 1996TH 1996th 2450448th
+ 1996TH 1996th 2450449th
+ 1997TH 1997th 2450450th
+ 1997TH 1997th 2450508th
+ 1997TH 1997th 2450509th
+ 1997TH 1997th 2450813th
+ 1997TH 1997th 2450814th
+ 1999TH 1999th 2451544th
+ 2000TH 2000th 2451545th
+ 2000TH 2000th 2451910th
+ 2001ST 2001st 2451911th
+(65 rows)
+
+SELECT to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm')
+ FROM TIMESTAMP_TBL;
+ to_char
+---------------------------------------------------------------------
+
+
+ 1970 A.D. 1970 a.d. 1970 ad 12:00:00 A.M. 12:00:00 a.m. 12:00:00 am
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:02 P.M. 05:32:02 p.m. 05:32:02 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 12:00:00 A.M. 12:00:00 a.m. 12:00:00 am
+ 1997 A.D. 1997 a.d. 1997 ad 03:04:05 A.M. 03:04:05 a.m. 03:04:05 am
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 2001 A.D. 2001 a.d. 2001 ad 06:19:20 P.M. 06:19:20 p.m. 06:19:20 pm
+ 2000 A.D. 2000 a.d. 2000 ad 08:14:01 A.M. 08:14:01 a.m. 08:14:01 am
+ 2000 A.D. 2000 a.d. 2000 ad 01:14:02 P.M. 01:14:02 p.m. 01:14:02 pm
+ 2000 A.D. 2000 a.d. 2000 ad 12:14:03 P.M. 12:14:03 p.m. 12:14:03 pm
+ 2000 A.D. 2000 a.d. 2000 ad 03:14:04 A.M. 03:14:04 a.m. 03:14:04 am
+ 2000 A.D. 2000 a.d. 2000 ad 02:14:05 A.M. 02:14:05 a.m. 02:14:05 am
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:00 P.M. 05:32:00 p.m. 05:32:00 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 06:32:01 P.M. 06:32:01 p.m. 06:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 0097 B.C. 0097 b.c. 0097 bc 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 0097 A.D. 0097 a.d. 0097 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 0597 A.D. 0597 a.d. 0597 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1097 A.D. 1097 a.d. 1097 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1697 A.D. 1697 a.d. 1697 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1797 A.D. 1797 a.d. 1797 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1897 A.D. 1897 a.d. 1897 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 2097 A.D. 2097 a.d. 2097 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1996 A.D. 1996 a.d. 1996 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1996 A.D. 1996 a.d. 1996 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1996 A.D. 1996 a.d. 1996 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1996 A.D. 1996 a.d. 1996 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1996 A.D. 1996 a.d. 1996 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1999 A.D. 1999 a.d. 1999 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 2000 A.D. 2000 a.d. 2000 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 2000 A.D. 2000 a.d. 2000 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 2001 A.D. 2001 a.d. 2001 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+(65 rows)
+
+SELECT to_char(d1, 'IYYY IYY IY I IW IDDD ID')
+ FROM TIMESTAMP_TBL;
+ to_char
+------------------------
+
+
+ 1970 970 70 0 01 004 4
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 01 004 4
+ 1997 997 97 7 01 004 4
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 24 163 2
+ 2001 001 01 1 38 265 6
+ 2000 000 00 0 11 073 3
+ 2000 000 00 0 11 073 3
+ 2000 000 00 0 11 073 3
+ 2000 000 00 0 11 073 3
+ 2000 000 00 0 11 073 3
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 24 163 2
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 044 2
+ 1997 997 97 7 07 045 3
+ 1997 997 97 7 07 046 4
+ 1997 997 97 7 07 047 5
+ 1997 997 97 7 07 048 6
+ 1997 997 97 7 07 049 7
+ 0097 097 97 7 07 044 2
+ 0097 097 97 7 07 048 6
+ 0597 597 97 7 07 046 4
+ 1097 097 97 7 07 044 2
+ 1697 697 97 7 07 048 6
+ 1797 797 97 7 07 046 4
+ 1897 897 97 7 07 044 2
+ 1997 997 97 7 07 049 7
+ 2097 097 97 7 07 048 6
+ 1996 996 96 6 09 059 3
+ 1996 996 96 6 09 060 4
+ 1996 996 96 6 09 061 5
+ 1997 997 97 7 01 001 1
+ 1997 997 97 7 01 002 2
+ 1997 997 97 7 01 003 3
+ 1997 997 97 7 09 061 5
+ 1997 997 97 7 09 062 6
+ 1998 998 98 8 01 002 2
+ 1998 998 98 8 01 003 3
+ 1999 999 99 9 52 362 5
+ 1999 999 99 9 52 363 6
+ 2000 000 00 0 52 364 7
+ 2001 001 01 1 01 001 1
+(65 rows)
+
+SELECT to_char(d1, 'FMIYYY FMIYY FMIY FMI FMIW FMIDDD FMID')
+ FROM TIMESTAMP_TBL;
+ to_char
+------------------------
+
+
+ 1970 970 70 0 1 4 4
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 1 4 4
+ 1997 997 97 7 1 4 4
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 24 163 2
+ 2001 1 1 1 38 265 6
+ 2000 0 0 0 11 73 3
+ 2000 0 0 0 11 73 3
+ 2000 0 0 0 11 73 3
+ 2000 0 0 0 11 73 3
+ 2000 0 0 0 11 73 3
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 24 163 2
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 44 2
+ 1997 997 97 7 7 45 3
+ 1997 997 97 7 7 46 4
+ 1997 997 97 7 7 47 5
+ 1997 997 97 7 7 48 6
+ 1997 997 97 7 7 49 7
+ 97 97 97 7 7 44 2
+ 97 97 97 7 7 48 6
+ 597 597 97 7 7 46 4
+ 1097 97 97 7 7 44 2
+ 1697 697 97 7 7 48 6
+ 1797 797 97 7 7 46 4
+ 1897 897 97 7 7 44 2
+ 1997 997 97 7 7 49 7
+ 2097 97 97 7 7 48 6
+ 1996 996 96 6 9 59 3
+ 1996 996 96 6 9 60 4
+ 1996 996 96 6 9 61 5
+ 1997 997 97 7 1 1 1
+ 1997 997 97 7 1 2 2
+ 1997 997 97 7 1 3 3
+ 1997 997 97 7 9 61 5
+ 1997 997 97 7 9 62 6
+ 1998 998 98 8 1 2 2
+ 1998 998 98 8 1 3 3
+ 1999 999 99 9 52 362 5
+ 1999 999 99 9 52 363 6
+ 2000 0 0 0 52 364 7
+ 2001 1 1 1 1 1 1
+(65 rows)
+
+SELECT to_char(d, 'FF1 FF2 FF3 FF4 FF5 FF6 ff1 ff2 ff3 ff4 ff5 ff6 MS US')
+ FROM (VALUES
+ ('2018-11-02 12:34:56'::timestamp),
+ ('2018-11-02 12:34:56.78'),
+ ('2018-11-02 12:34:56.78901'),
+ ('2018-11-02 12:34:56.78901234')
+ ) d(d);
+ to_char
+--------------------------------------------------------------------
+ 0 00 000 0000 00000 000000 0 00 000 0000 00000 000000 000 000000
+ 7 78 780 7800 78000 780000 7 78 780 7800 78000 780000 780 780000
+ 7 78 789 7890 78901 789010 7 78 789 7890 78901 789010 789 789010
+ 7 78 789 7890 78901 789012 7 78 789 7890 78901 789012 789 789012
+(4 rows)
+
+-- Roman months, with upper and lower case.
+SELECT i,
+ to_char(i * interval '1mon', 'rm'),
+ to_char(i * interval '1mon', 'RM')
+ FROM generate_series(-13, 13) i;
+ i | to_char | to_char
+-----+---------+---------
+ -13 | xii | XII
+ -12 | i | I
+ -11 | ii | II
+ -10 | iii | III
+ -9 | iv | IV
+ -8 | v | V
+ -7 | vi | VI
+ -6 | vii | VII
+ -5 | viii | VIII
+ -4 | ix | IX
+ -3 | x | X
+ -2 | xi | XI
+ -1 | xii | XII
+ 0 | |
+ 1 | i | I
+ 2 | ii | II
+ 3 | iii | III
+ 4 | iv | IV
+ 5 | v | V
+ 6 | vi | VI
+ 7 | vii | VII
+ 8 | viii | VIII
+ 9 | ix | IX
+ 10 | x | X
+ 11 | xi | XI
+ 12 | xii | XII
+ 13 | i | I
+(27 rows)
+
+-- timestamp numeric fields constructor
+SELECT make_timestamp(2014, 12, 28, 6, 30, 45.887);
+ make_timestamp
+------------------------------
+ Sun Dec 28 06:30:45.887 2014
+(1 row)
+
+SELECT make_timestamp(-44, 3, 15, 12, 30, 15);
+ make_timestamp
+-----------------------------
+ Fri Mar 15 12:30:15 0044 BC
+(1 row)
+
+-- should fail
+select make_timestamp(0, 7, 15, 12, 30, 15);
+ERROR: date field value out of range: 0-07-15
+-- generate_series for timestamp
+select * from generate_series('2020-01-01 00:00'::timestamp,
+ '2020-01-02 03:00'::timestamp,
+ '1 hour'::interval);
+ generate_series
+--------------------------
+ Wed Jan 01 00:00:00 2020
+ Wed Jan 01 01:00:00 2020
+ Wed Jan 01 02:00:00 2020
+ Wed Jan 01 03:00:00 2020
+ Wed Jan 01 04:00:00 2020
+ Wed Jan 01 05:00:00 2020
+ Wed Jan 01 06:00:00 2020
+ Wed Jan 01 07:00:00 2020
+ Wed Jan 01 08:00:00 2020
+ Wed Jan 01 09:00:00 2020
+ Wed Jan 01 10:00:00 2020
+ Wed Jan 01 11:00:00 2020
+ Wed Jan 01 12:00:00 2020
+ Wed Jan 01 13:00:00 2020
+ Wed Jan 01 14:00:00 2020
+ Wed Jan 01 15:00:00 2020
+ Wed Jan 01 16:00:00 2020
+ Wed Jan 01 17:00:00 2020
+ Wed Jan 01 18:00:00 2020
+ Wed Jan 01 19:00:00 2020
+ Wed Jan 01 20:00:00 2020
+ Wed Jan 01 21:00:00 2020
+ Wed Jan 01 22:00:00 2020
+ Wed Jan 01 23:00:00 2020
+ Thu Jan 02 00:00:00 2020
+ Thu Jan 02 01:00:00 2020
+ Thu Jan 02 02:00:00 2020
+ Thu Jan 02 03:00:00 2020
+(28 rows)
+
+-- the LIMIT should allow this to terminate in a reasonable amount of time
+-- (but that unfortunately doesn't work yet for SELECT * FROM ...)
+select generate_series('2022-01-01 00:00'::timestamp,
+ 'infinity'::timestamp,
+ '1 month'::interval) limit 10;
+ generate_series
+--------------------------
+ Sat Jan 01 00:00:00 2022
+ Tue Feb 01 00:00:00 2022
+ Tue Mar 01 00:00:00 2022
+ Fri Apr 01 00:00:00 2022
+ Sun May 01 00:00:00 2022
+ Wed Jun 01 00:00:00 2022
+ Fri Jul 01 00:00:00 2022
+ Mon Aug 01 00:00:00 2022
+ Thu Sep 01 00:00:00 2022
+ Sat Oct 01 00:00:00 2022
+(10 rows)
+
+-- errors
+select * from generate_series('2020-01-01 00:00'::timestamp,
+ '2020-01-02 03:00'::timestamp,
+ '0 hour'::interval);
+ERROR: step size cannot equal zero
diff --git a/ydb/library/yql/tests/postgresql/original/cases/timestamp.sql b/ydb/library/yql/tests/postgresql/original/cases/timestamp.sql
new file mode 100644
index 0000000000..ebc969f36c
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/timestamp.sql
@@ -0,0 +1,386 @@
+--
+-- TIMESTAMP
+--
+
+CREATE TABLE TIMESTAMP_TBL (d1 timestamp(2) without time zone);
+
+-- Test shorthand input values
+-- We can't just "select" the results since they aren't constants; test for
+-- equality instead. We can do that by running the test inside a transaction
+-- block, within which the value of 'now' shouldn't change, and so these
+-- related values shouldn't either.
+
+BEGIN;
+
+INSERT INTO TIMESTAMP_TBL VALUES ('today');
+INSERT INTO TIMESTAMP_TBL VALUES ('yesterday');
+INSERT INTO TIMESTAMP_TBL VALUES ('tomorrow');
+-- time zone should be ignored by this data type
+INSERT INTO TIMESTAMP_TBL VALUES ('tomorrow EST');
+INSERT INTO TIMESTAMP_TBL VALUES ('tomorrow zulu');
+
+SELECT count(*) AS One FROM TIMESTAMP_TBL WHERE d1 = timestamp without time zone 'today';
+SELECT count(*) AS Three FROM TIMESTAMP_TBL WHERE d1 = timestamp without time zone 'tomorrow';
+SELECT count(*) AS One FROM TIMESTAMP_TBL WHERE d1 = timestamp without time zone 'yesterday';
+
+COMMIT;
+
+DELETE FROM TIMESTAMP_TBL;
+
+-- Verify that 'now' *does* change over a reasonable interval such as 100 msec,
+-- and that it doesn't change over the same interval within a transaction block
+
+INSERT INTO TIMESTAMP_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+
+BEGIN;
+INSERT INTO TIMESTAMP_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+INSERT INTO TIMESTAMP_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+SELECT count(*) AS two FROM TIMESTAMP_TBL WHERE d1 = timestamp(2) without time zone 'now';
+SELECT count(d1) AS three, count(DISTINCT d1) AS two FROM TIMESTAMP_TBL;
+COMMIT;
+
+TRUNCATE TIMESTAMP_TBL;
+
+-- Special values
+INSERT INTO TIMESTAMP_TBL VALUES ('-infinity');
+INSERT INTO TIMESTAMP_TBL VALUES ('infinity');
+INSERT INTO TIMESTAMP_TBL VALUES ('epoch');
+
+-- Postgres v6.0 standard output format
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01 1997 PST');
+
+-- Variations on Postgres v6.1 standard output format
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.000001 1997 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.999999 1997 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.4 1997 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.5 1997 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.6 1997 PST');
+
+-- ISO 8601 format
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-01-02');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-01-02 03:04:05');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-02-10 17:32:01-08');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-02-10 17:32:01-0800');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-02-10 17:32:01 -08:00');
+INSERT INTO TIMESTAMP_TBL VALUES ('19970210 173201 -0800');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-06-10 17:32:01 -07:00');
+INSERT INTO TIMESTAMP_TBL VALUES ('2001-09-22T18:19:20');
+
+-- POSIX format (note that the timezone abbrev is just decoration here)
+INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 08:14:01 GMT+8');
+INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 13:14:02 GMT-1');
+INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 12:14:03 GMT-2');
+INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 03:14:04 PST+8');
+INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 02:14:05 MST+7:00');
+
+-- Variations for acceptable input formats
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 10 17:32:01 1997 -0800');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 10 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 10 5:32PM 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997/02/10 17:32:01-0800');
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-02-10 17:32:01 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb-10-1997 17:32:01 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('02-10-1997 17:32:01 PST');
+INSERT INTO TIMESTAMP_TBL VALUES ('19970210 173201 PST');
+set datestyle to ymd;
+INSERT INTO TIMESTAMP_TBL VALUES ('97FEB10 5:32:01PM UTC');
+INSERT INTO TIMESTAMP_TBL VALUES ('97/02/10 17:32:01 UTC');
+reset datestyle;
+INSERT INTO TIMESTAMP_TBL VALUES ('1997.041 17:32:01 UTC');
+INSERT INTO TIMESTAMP_TBL VALUES ('19970210 173201 America/New_York');
+-- this fails (even though TZ is a no-op, we still look it up)
+INSERT INTO TIMESTAMP_TBL VALUES ('19970710 173201 America/Does_not_exist');
+
+-- Check date conversion and date arithmetic
+INSERT INTO TIMESTAMP_TBL VALUES ('1997-06-10 18:32:01 PDT');
+
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 10 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 11 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 12 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 13 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 14 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 15 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1997');
+
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 0097 BC');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 0097');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 0597');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1097');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1697');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1797');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1897');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 2097');
+
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 28 17:32:01 1996');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 29 17:32:01 1996');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mar 01 17:32:01 1996');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 30 17:32:01 1996');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 31 17:32:01 1996');
+INSERT INTO TIMESTAMP_TBL VALUES ('Jan 01 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 28 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 29 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Mar 01 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 30 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 31 17:32:01 1997');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 31 17:32:01 1999');
+INSERT INTO TIMESTAMP_TBL VALUES ('Jan 01 17:32:01 2000');
+INSERT INTO TIMESTAMP_TBL VALUES ('Dec 31 17:32:01 2000');
+INSERT INTO TIMESTAMP_TBL VALUES ('Jan 01 17:32:01 2001');
+
+-- Currently unsupported syntax and ranges
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 -0097');
+INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 5097 BC');
+
+SELECT d1 FROM TIMESTAMP_TBL;
+
+-- Check behavior at the boundaries of the timestamp range
+SELECT '4714-11-24 00:00:00 BC'::timestamp;
+SELECT '4714-11-23 23:59:59 BC'::timestamp; -- out of range
+SELECT '294276-12-31 23:59:59'::timestamp;
+SELECT '294277-01-01 00:00:00'::timestamp; -- out of range
+
+-- Demonstrate functions and operators
+SELECT d1 FROM TIMESTAMP_TBL
+ WHERE d1 > timestamp without time zone '1997-01-02';
+
+SELECT d1 FROM TIMESTAMP_TBL
+ WHERE d1 < timestamp without time zone '1997-01-02';
+
+SELECT d1 FROM TIMESTAMP_TBL
+ WHERE d1 = timestamp without time zone '1997-01-02';
+
+SELECT d1 FROM TIMESTAMP_TBL
+ WHERE d1 != timestamp without time zone '1997-01-02';
+
+SELECT d1 FROM TIMESTAMP_TBL
+ WHERE d1 <= timestamp without time zone '1997-01-02';
+
+SELECT d1 FROM TIMESTAMP_TBL
+ WHERE d1 >= timestamp without time zone '1997-01-02';
+
+SELECT d1 - timestamp without time zone '1997-01-02' AS diff
+ FROM TIMESTAMP_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01';
+
+SELECT date_trunc( 'week', timestamp '2004-02-29 15:44:17.71393' ) AS week_trunc;
+
+-- verify date_bin behaves the same as date_trunc for relevant intervals
+
+-- case 1: AD dates, origin < input
+SELECT
+ str,
+ interval,
+ date_trunc(str, ts) = date_bin(interval::interval, ts, timestamp '2001-01-01') AS equal
+FROM (
+ VALUES
+ ('week', '7 d'),
+ ('day', '1 d'),
+ ('hour', '1 h'),
+ ('minute', '1 m'),
+ ('second', '1 s'),
+ ('millisecond', '1 ms'),
+ ('microsecond', '1 us')
+) intervals (str, interval),
+(VALUES (timestamp '2020-02-29 15:44:17.71393')) ts (ts);
+
+-- case 2: BC dates, origin < input
+SELECT
+ str,
+ interval,
+ date_trunc(str, ts) = date_bin(interval::interval, ts, timestamp '2000-01-01 BC') AS equal
+FROM (
+ VALUES
+ ('week', '7 d'),
+ ('day', '1 d'),
+ ('hour', '1 h'),
+ ('minute', '1 m'),
+ ('second', '1 s'),
+ ('millisecond', '1 ms'),
+ ('microsecond', '1 us')
+) intervals (str, interval),
+(VALUES (timestamp '0055-6-10 15:44:17.71393 BC')) ts (ts);
+
+-- case 3: AD dates, origin > input
+SELECT
+ str,
+ interval,
+ date_trunc(str, ts) = date_bin(interval::interval, ts, timestamp '2020-03-02') AS equal
+FROM (
+ VALUES
+ ('week', '7 d'),
+ ('day', '1 d'),
+ ('hour', '1 h'),
+ ('minute', '1 m'),
+ ('second', '1 s'),
+ ('millisecond', '1 ms'),
+ ('microsecond', '1 us')
+) intervals (str, interval),
+(VALUES (timestamp '2020-02-29 15:44:17.71393')) ts (ts);
+
+-- case 4: BC dates, origin > input
+SELECT
+ str,
+ interval,
+ date_trunc(str, ts) = date_bin(interval::interval, ts, timestamp '0055-06-17 BC') AS equal
+FROM (
+ VALUES
+ ('week', '7 d'),
+ ('day', '1 d'),
+ ('hour', '1 h'),
+ ('minute', '1 m'),
+ ('second', '1 s'),
+ ('millisecond', '1 ms'),
+ ('microsecond', '1 us')
+) intervals (str, interval),
+(VALUES (timestamp '0055-6-10 15:44:17.71393 BC')) ts (ts);
+
+-- bin timestamps into arbitrary intervals
+SELECT
+ interval,
+ ts,
+ origin,
+ date_bin(interval::interval, ts, origin)
+FROM (
+ VALUES
+ ('15 days'),
+ ('2 hours'),
+ ('1 hour 30 minutes'),
+ ('15 minutes'),
+ ('10 seconds'),
+ ('100 milliseconds'),
+ ('250 microseconds')
+) intervals (interval),
+(VALUES (timestamp '2020-02-11 15:44:17.71393')) ts (ts),
+(VALUES (timestamp '2001-01-01')) origin (origin);
+
+-- shift bins using the origin parameter:
+SELECT date_bin('5 min'::interval, timestamp '2020-02-01 01:01:01', timestamp '2020-02-01 00:02:30');
+
+-- disallow intervals with months or years
+SELECT date_bin('5 months'::interval, timestamp '2020-02-01 01:01:01', timestamp '2001-01-01');
+SELECT date_bin('5 years'::interval, timestamp '2020-02-01 01:01:01', timestamp '2001-01-01');
+
+-- disallow zero intervals
+SELECT date_bin('0 days'::interval, timestamp '1970-01-01 01:00:00' , timestamp '1970-01-01 00:00:00');
+
+-- disallow negative intervals
+SELECT date_bin('-2 days'::interval, timestamp '1970-01-01 01:00:00' , timestamp '1970-01-01 00:00:00');
+
+-- Test casting within a BETWEEN qualifier
+SELECT d1 - timestamp without time zone '1997-01-02' AS diff
+ FROM TIMESTAMP_TBL
+ WHERE d1 BETWEEN timestamp without time zone '1902-01-01'
+ AND timestamp without time zone '2038-01-01';
+
+-- DATE_PART (timestamp_part)
+SELECT d1 as "timestamp",
+ date_part( 'year', d1) AS year, date_part( 'month', d1) AS month,
+ date_part( 'day', d1) AS day, date_part( 'hour', d1) AS hour,
+ date_part( 'minute', d1) AS minute, date_part( 'second', d1) AS second
+ FROM TIMESTAMP_TBL;
+
+SELECT d1 as "timestamp",
+ date_part( 'quarter', d1) AS quarter, date_part( 'msec', d1) AS msec,
+ date_part( 'usec', d1) AS usec
+ FROM TIMESTAMP_TBL;
+
+SELECT d1 as "timestamp",
+ date_part( 'isoyear', d1) AS isoyear, date_part( 'week', d1) AS week,
+ date_part( 'isodow', d1) AS isodow, date_part( 'dow', d1) AS dow,
+ date_part( 'doy', d1) AS doy
+ FROM TIMESTAMP_TBL;
+
+SELECT d1 as "timestamp",
+ date_part( 'decade', d1) AS decade,
+ date_part( 'century', d1) AS century,
+ date_part( 'millennium', d1) AS millennium,
+ round(date_part( 'julian', d1)) AS julian,
+ date_part( 'epoch', d1) AS epoch
+ FROM TIMESTAMP_TBL;
+
+-- extract implementation is mostly the same as date_part, so only
+-- test a few cases for additional coverage.
+SELECT d1 as "timestamp",
+ extract(microseconds from d1) AS microseconds,
+ extract(milliseconds from d1) AS milliseconds,
+ extract(seconds from d1) AS seconds,
+ round(extract(julian from d1)) AS julian,
+ extract(epoch from d1) AS epoch
+ FROM TIMESTAMP_TBL;
+
+-- value near upper bound uses special case in code
+SELECT date_part('epoch', '294270-01-01 00:00:00'::timestamp);
+SELECT extract(epoch from '294270-01-01 00:00:00'::timestamp);
+-- another internal overflow test case
+SELECT extract(epoch from '5000-01-01 00:00:00'::timestamp);
+
+-- TO_CHAR()
+SELECT to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon')
+ FROM TIMESTAMP_TBL;
+
+SELECT to_char(d1, 'FMDAY FMDay FMday FMMONTH FMMonth FMmonth FMRM')
+ FROM TIMESTAMP_TBL;
+
+SELECT to_char(d1, 'Y,YYY YYYY YYY YY Y CC Q MM WW DDD DD D J')
+ FROM TIMESTAMP_TBL;
+
+SELECT to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ')
+ FROM TIMESTAMP_TBL;
+
+SELECT to_char(d1, 'HH HH12 HH24 MI SS SSSS')
+ FROM TIMESTAMP_TBL;
+
+SELECT to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""')
+ FROM TIMESTAMP_TBL;
+
+SELECT to_char(d1, 'HH24--text--MI--text--SS')
+ FROM TIMESTAMP_TBL;
+
+SELECT to_char(d1, 'YYYYTH YYYYth Jth')
+ FROM TIMESTAMP_TBL;
+
+SELECT to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm')
+ FROM TIMESTAMP_TBL;
+
+SELECT to_char(d1, 'IYYY IYY IY I IW IDDD ID')
+ FROM TIMESTAMP_TBL;
+
+SELECT to_char(d1, 'FMIYYY FMIYY FMIY FMI FMIW FMIDDD FMID')
+ FROM TIMESTAMP_TBL;
+
+SELECT to_char(d, 'FF1 FF2 FF3 FF4 FF5 FF6 ff1 ff2 ff3 ff4 ff5 ff6 MS US')
+ FROM (VALUES
+ ('2018-11-02 12:34:56'::timestamp),
+ ('2018-11-02 12:34:56.78'),
+ ('2018-11-02 12:34:56.78901'),
+ ('2018-11-02 12:34:56.78901234')
+ ) d(d);
+
+-- Roman months, with upper and lower case.
+SELECT i,
+ to_char(i * interval '1mon', 'rm'),
+ to_char(i * interval '1mon', 'RM')
+ FROM generate_series(-13, 13) i;
+
+-- timestamp numeric fields constructor
+SELECT make_timestamp(2014, 12, 28, 6, 30, 45.887);
+SELECT make_timestamp(-44, 3, 15, 12, 30, 15);
+-- should fail
+select make_timestamp(0, 7, 15, 12, 30, 15);
+
+-- generate_series for timestamp
+select * from generate_series('2020-01-01 00:00'::timestamp,
+ '2020-01-02 03:00'::timestamp,
+ '1 hour'::interval);
+-- the LIMIT should allow this to terminate in a reasonable amount of time
+-- (but that unfortunately doesn't work yet for SELECT * FROM ...)
+select generate_series('2022-01-01 00:00'::timestamp,
+ 'infinity'::timestamp,
+ '1 month'::interval) limit 10;
+-- errors
+select * from generate_series('2020-01-01 00:00'::timestamp,
+ '2020-01-02 03:00'::timestamp,
+ '0 hour'::interval);
diff --git a/ydb/library/yql/tests/postgresql/original/cases/timestamptz.out b/ydb/library/yql/tests/postgresql/original/cases/timestamptz.out
new file mode 100644
index 0000000000..a0df947e27
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/timestamptz.out
@@ -0,0 +1,2991 @@
+--
+-- TIMESTAMPTZ
+--
+CREATE TABLE TIMESTAMPTZ_TBL (d1 timestamp(2) with time zone);
+-- Test shorthand input values
+-- We can't just "select" the results since they aren't constants; test for
+-- equality instead. We can do that by running the test inside a transaction
+-- block, within which the value of 'now' shouldn't change, and so these
+-- related values shouldn't either.
+BEGIN;
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('today');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('yesterday');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('tomorrow');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('tomorrow EST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('tomorrow zulu');
+SELECT count(*) AS One FROM TIMESTAMPTZ_TBL WHERE d1 = timestamp with time zone 'today';
+ one
+-----
+ 1
+(1 row)
+
+SELECT count(*) AS One FROM TIMESTAMPTZ_TBL WHERE d1 = timestamp with time zone 'tomorrow';
+ one
+-----
+ 1
+(1 row)
+
+SELECT count(*) AS One FROM TIMESTAMPTZ_TBL WHERE d1 = timestamp with time zone 'yesterday';
+ one
+-----
+ 1
+(1 row)
+
+SELECT count(*) AS One FROM TIMESTAMPTZ_TBL WHERE d1 = timestamp with time zone 'tomorrow EST';
+ one
+-----
+ 1
+(1 row)
+
+SELECT count(*) AS One FROM TIMESTAMPTZ_TBL WHERE d1 = timestamp with time zone 'tomorrow zulu';
+ one
+-----
+ 1
+(1 row)
+
+COMMIT;
+DELETE FROM TIMESTAMPTZ_TBL;
+-- Verify that 'now' *does* change over a reasonable interval such as 100 msec,
+-- and that it doesn't change over the same interval within a transaction block
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+ pg_sleep
+----------
+
+(1 row)
+
+BEGIN;
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+ pg_sleep
+----------
+
+(1 row)
+
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+ pg_sleep
+----------
+
+(1 row)
+
+SELECT count(*) AS two FROM TIMESTAMPTZ_TBL WHERE d1 = timestamp(2) with time zone 'now';
+ two
+-----
+ 2
+(1 row)
+
+SELECT count(d1) AS three, count(DISTINCT d1) AS two FROM TIMESTAMPTZ_TBL;
+ three | two
+-------+-----
+ 3 | 2
+(1 row)
+
+COMMIT;
+TRUNCATE TIMESTAMPTZ_TBL;
+-- Special values
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('-infinity');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('infinity');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('epoch');
+-- Postgres v6.0 standard output format
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01 1997 PST');
+-- Variations on Postgres v6.1 standard output format
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.000001 1997 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.999999 1997 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.4 1997 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.5 1997 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.6 1997 PST');
+-- ISO 8601 format
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-01-02');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-01-02 03:04:05');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-02-10 17:32:01-08');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-02-10 17:32:01-0800');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-02-10 17:32:01 -08:00');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970210 173201 -0800');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-06-10 17:32:01 -07:00');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2001-09-22T18:19:20');
+-- POSIX format (note that the timezone abbrev is just decoration here)
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 08:14:01 GMT+8');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 13:14:02 GMT-1');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 12:14:03 GMT-2');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 03:14:04 PST+8');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 02:14:05 MST+7:00');
+-- Variations for acceptable input formats
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 10 17:32:01 1997 -0800');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 10 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 10 5:32PM 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997/02/10 17:32:01-0800');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-02-10 17:32:01 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb-10-1997 17:32:01 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('02-10-1997 17:32:01 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970210 173201 PST');
+set datestyle to ymd;
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('97FEB10 5:32:01PM UTC');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('97/02/10 17:32:01 UTC');
+reset datestyle;
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997.041 17:32:01 UTC');
+-- timestamps at different timezones
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970210 173201 America/New_York');
+SELECT '19970210 173201' AT TIME ZONE 'America/New_York';
+ timezone
+--------------------------
+ Mon Feb 10 20:32:01 1997
+(1 row)
+
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970710 173201 America/New_York');
+SELECT '19970710 173201' AT TIME ZONE 'America/New_York';
+ timezone
+--------------------------
+ Thu Jul 10 20:32:01 1997
+(1 row)
+
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970710 173201 America/Does_not_exist');
+ERROR: time zone "america/does_not_exist" not recognized
+LINE 1: INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970710 173201 America...
+ ^
+SELECT '19970710 173201' AT TIME ZONE 'America/Does_not_exist';
+ERROR: time zone "America/Does_not_exist" not recognized
+-- Daylight saving time for timestamps beyond 32-bit time_t range.
+SELECT '20500710 173201 Europe/Helsinki'::timestamptz; -- DST
+ timestamptz
+------------------------------
+ Sun Jul 10 07:32:01 2050 PDT
+(1 row)
+
+SELECT '20500110 173201 Europe/Helsinki'::timestamptz; -- non-DST
+ timestamptz
+------------------------------
+ Mon Jan 10 07:32:01 2050 PST
+(1 row)
+
+SELECT '205000-07-10 17:32:01 Europe/Helsinki'::timestamptz; -- DST
+ timestamptz
+--------------------------------
+ Thu Jul 10 07:32:01 205000 PDT
+(1 row)
+
+SELECT '205000-01-10 17:32:01 Europe/Helsinki'::timestamptz; -- non-DST
+ timestamptz
+--------------------------------
+ Fri Jan 10 07:32:01 205000 PST
+(1 row)
+
+-- Check date conversion and date arithmetic
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-06-10 18:32:01 PDT');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 10 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 11 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 12 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 13 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 14 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 15 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 0097 BC');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 0097');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 0597');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1097');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1697');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1797');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1897');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 2097');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 28 17:32:01 1996');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 29 17:32:01 1996');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mar 01 17:32:01 1996');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 30 17:32:01 1996');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 31 17:32:01 1996');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Jan 01 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 28 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 29 17:32:01 1997');
+ERROR: date/time field value out of range: "Feb 29 17:32:01 1997"
+LINE 1: INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 29 17:32:01 1997');
+ ^
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mar 01 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 30 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 31 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 31 17:32:01 1999');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Jan 01 17:32:01 2000');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 31 17:32:01 2000');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Jan 01 17:32:01 2001');
+-- Currently unsupported syntax and ranges
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 -0097');
+ERROR: time zone displacement out of range: "Feb 16 17:32:01 -0097"
+LINE 1: INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 -0097')...
+ ^
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 5097 BC');
+ERROR: timestamp out of range: "Feb 16 17:32:01 5097 BC"
+LINE 1: INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 5097 BC...
+ ^
+-- Alternative field order that we've historically supported (sort of)
+-- with regular and POSIXy timezone specs
+SELECT 'Wed Jul 11 10:51:14 America/New_York 2001'::timestamptz;
+ timestamptz
+------------------------------
+ Wed Jul 11 07:51:14 2001 PDT
+(1 row)
+
+SELECT 'Wed Jul 11 10:51:14 GMT-4 2001'::timestamptz;
+ timestamptz
+------------------------------
+ Tue Jul 10 23:51:14 2001 PDT
+(1 row)
+
+SELECT 'Wed Jul 11 10:51:14 GMT+4 2001'::timestamptz;
+ timestamptz
+------------------------------
+ Wed Jul 11 07:51:14 2001 PDT
+(1 row)
+
+SELECT 'Wed Jul 11 10:51:14 PST-03:00 2001'::timestamptz;
+ timestamptz
+------------------------------
+ Wed Jul 11 00:51:14 2001 PDT
+(1 row)
+
+SELECT 'Wed Jul 11 10:51:14 PST+03:00 2001'::timestamptz;
+ timestamptz
+------------------------------
+ Wed Jul 11 06:51:14 2001 PDT
+(1 row)
+
+SELECT d1 FROM TIMESTAMPTZ_TBL;
+ d1
+---------------------------------
+ -infinity
+ infinity
+ Wed Dec 31 16:00:00 1969 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:02 1997 PST
+ Mon Feb 10 17:32:01.4 1997 PST
+ Mon Feb 10 17:32:01.5 1997 PST
+ Mon Feb 10 17:32:01.6 1997 PST
+ Thu Jan 02 00:00:00 1997 PST
+ Thu Jan 02 03:04:05 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Tue Jun 10 17:32:01 1997 PDT
+ Sat Sep 22 18:19:20 2001 PDT
+ Wed Mar 15 08:14:01 2000 PST
+ Wed Mar 15 04:14:02 2000 PST
+ Wed Mar 15 02:14:03 2000 PST
+ Wed Mar 15 03:14:04 2000 PST
+ Wed Mar 15 01:14:05 2000 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:00 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 09:32:01 1997 PST
+ Mon Feb 10 09:32:01 1997 PST
+ Mon Feb 10 09:32:01 1997 PST
+ Mon Feb 10 14:32:01 1997 PST
+ Thu Jul 10 14:32:01 1997 PDT
+ Tue Jun 10 18:32:01 1997 PDT
+ Mon Feb 10 17:32:01 1997 PST
+ Tue Feb 11 17:32:01 1997 PST
+ Wed Feb 12 17:32:01 1997 PST
+ Thu Feb 13 17:32:01 1997 PST
+ Fri Feb 14 17:32:01 1997 PST
+ Sat Feb 15 17:32:01 1997 PST
+ Sun Feb 16 17:32:01 1997 PST
+ Tue Feb 16 17:32:01 0097 PST BC
+ Sat Feb 16 17:32:01 0097 PST
+ Thu Feb 16 17:32:01 0597 PST
+ Tue Feb 16 17:32:01 1097 PST
+ Sat Feb 16 17:32:01 1697 PST
+ Thu Feb 16 17:32:01 1797 PST
+ Tue Feb 16 17:32:01 1897 PST
+ Sun Feb 16 17:32:01 1997 PST
+ Sat Feb 16 17:32:01 2097 PST
+ Wed Feb 28 17:32:01 1996 PST
+ Thu Feb 29 17:32:01 1996 PST
+ Fri Mar 01 17:32:01 1996 PST
+ Mon Dec 30 17:32:01 1996 PST
+ Tue Dec 31 17:32:01 1996 PST
+ Wed Jan 01 17:32:01 1997 PST
+ Fri Feb 28 17:32:01 1997 PST
+ Sat Mar 01 17:32:01 1997 PST
+ Tue Dec 30 17:32:01 1997 PST
+ Wed Dec 31 17:32:01 1997 PST
+ Fri Dec 31 17:32:01 1999 PST
+ Sat Jan 01 17:32:01 2000 PST
+ Sun Dec 31 17:32:01 2000 PST
+ Mon Jan 01 17:32:01 2001 PST
+(66 rows)
+
+-- Check behavior at the boundaries of the timestamp range
+SELECT '4714-11-24 00:00:00+00 BC'::timestamptz;
+ timestamptz
+---------------------------------
+ Sun Nov 23 16:00:00 4714 PST BC
+(1 row)
+
+SELECT '4714-11-23 16:00:00-08 BC'::timestamptz;
+ timestamptz
+---------------------------------
+ Sun Nov 23 16:00:00 4714 PST BC
+(1 row)
+
+SELECT 'Sun Nov 23 16:00:00 4714 PST BC'::timestamptz;
+ timestamptz
+---------------------------------
+ Sun Nov 23 16:00:00 4714 PST BC
+(1 row)
+
+SELECT '4714-11-23 23:59:59+00 BC'::timestamptz; -- out of range
+ERROR: timestamp out of range: "4714-11-23 23:59:59+00 BC"
+LINE 1: SELECT '4714-11-23 23:59:59+00 BC'::timestamptz;
+ ^
+SELECT '294276-12-31 23:59:59+00'::timestamptz;
+ timestamptz
+--------------------------------
+ Sun Dec 31 15:59:59 294276 PST
+(1 row)
+
+SELECT '294276-12-31 15:59:59-08'::timestamptz;
+ timestamptz
+--------------------------------
+ Sun Dec 31 15:59:59 294276 PST
+(1 row)
+
+SELECT '294277-01-01 00:00:00+00'::timestamptz; -- out of range
+ERROR: timestamp out of range: "294277-01-01 00:00:00+00"
+LINE 1: SELECT '294277-01-01 00:00:00+00'::timestamptz;
+ ^
+SELECT '294277-12-31 16:00:00-08'::timestamptz; -- out of range
+ERROR: timestamp out of range: "294277-12-31 16:00:00-08"
+LINE 1: SELECT '294277-12-31 16:00:00-08'::timestamptz;
+ ^
+-- Demonstrate functions and operators
+SELECT d1 FROM TIMESTAMPTZ_TBL
+ WHERE d1 > timestamp with time zone '1997-01-02';
+ d1
+--------------------------------
+ infinity
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:02 1997 PST
+ Mon Feb 10 17:32:01.4 1997 PST
+ Mon Feb 10 17:32:01.5 1997 PST
+ Mon Feb 10 17:32:01.6 1997 PST
+ Thu Jan 02 03:04:05 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Tue Jun 10 17:32:01 1997 PDT
+ Sat Sep 22 18:19:20 2001 PDT
+ Wed Mar 15 08:14:01 2000 PST
+ Wed Mar 15 04:14:02 2000 PST
+ Wed Mar 15 02:14:03 2000 PST
+ Wed Mar 15 03:14:04 2000 PST
+ Wed Mar 15 01:14:05 2000 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:00 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 09:32:01 1997 PST
+ Mon Feb 10 09:32:01 1997 PST
+ Mon Feb 10 09:32:01 1997 PST
+ Mon Feb 10 14:32:01 1997 PST
+ Thu Jul 10 14:32:01 1997 PDT
+ Tue Jun 10 18:32:01 1997 PDT
+ Mon Feb 10 17:32:01 1997 PST
+ Tue Feb 11 17:32:01 1997 PST
+ Wed Feb 12 17:32:01 1997 PST
+ Thu Feb 13 17:32:01 1997 PST
+ Fri Feb 14 17:32:01 1997 PST
+ Sat Feb 15 17:32:01 1997 PST
+ Sun Feb 16 17:32:01 1997 PST
+ Sun Feb 16 17:32:01 1997 PST
+ Sat Feb 16 17:32:01 2097 PST
+ Fri Feb 28 17:32:01 1997 PST
+ Sat Mar 01 17:32:01 1997 PST
+ Tue Dec 30 17:32:01 1997 PST
+ Wed Dec 31 17:32:01 1997 PST
+ Fri Dec 31 17:32:01 1999 PST
+ Sat Jan 01 17:32:01 2000 PST
+ Sun Dec 31 17:32:01 2000 PST
+ Mon Jan 01 17:32:01 2001 PST
+(50 rows)
+
+SELECT d1 FROM TIMESTAMPTZ_TBL
+ WHERE d1 < timestamp with time zone '1997-01-02';
+ d1
+---------------------------------
+ -infinity
+ Wed Dec 31 16:00:00 1969 PST
+ Tue Feb 16 17:32:01 0097 PST BC
+ Sat Feb 16 17:32:01 0097 PST
+ Thu Feb 16 17:32:01 0597 PST
+ Tue Feb 16 17:32:01 1097 PST
+ Sat Feb 16 17:32:01 1697 PST
+ Thu Feb 16 17:32:01 1797 PST
+ Tue Feb 16 17:32:01 1897 PST
+ Wed Feb 28 17:32:01 1996 PST
+ Thu Feb 29 17:32:01 1996 PST
+ Fri Mar 01 17:32:01 1996 PST
+ Mon Dec 30 17:32:01 1996 PST
+ Tue Dec 31 17:32:01 1996 PST
+ Wed Jan 01 17:32:01 1997 PST
+(15 rows)
+
+SELECT d1 FROM TIMESTAMPTZ_TBL
+ WHERE d1 = timestamp with time zone '1997-01-02';
+ d1
+------------------------------
+ Thu Jan 02 00:00:00 1997 PST
+(1 row)
+
+SELECT d1 FROM TIMESTAMPTZ_TBL
+ WHERE d1 != timestamp with time zone '1997-01-02';
+ d1
+---------------------------------
+ -infinity
+ infinity
+ Wed Dec 31 16:00:00 1969 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:02 1997 PST
+ Mon Feb 10 17:32:01.4 1997 PST
+ Mon Feb 10 17:32:01.5 1997 PST
+ Mon Feb 10 17:32:01.6 1997 PST
+ Thu Jan 02 03:04:05 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Tue Jun 10 17:32:01 1997 PDT
+ Sat Sep 22 18:19:20 2001 PDT
+ Wed Mar 15 08:14:01 2000 PST
+ Wed Mar 15 04:14:02 2000 PST
+ Wed Mar 15 02:14:03 2000 PST
+ Wed Mar 15 03:14:04 2000 PST
+ Wed Mar 15 01:14:05 2000 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:00 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 09:32:01 1997 PST
+ Mon Feb 10 09:32:01 1997 PST
+ Mon Feb 10 09:32:01 1997 PST
+ Mon Feb 10 14:32:01 1997 PST
+ Thu Jul 10 14:32:01 1997 PDT
+ Tue Jun 10 18:32:01 1997 PDT
+ Mon Feb 10 17:32:01 1997 PST
+ Tue Feb 11 17:32:01 1997 PST
+ Wed Feb 12 17:32:01 1997 PST
+ Thu Feb 13 17:32:01 1997 PST
+ Fri Feb 14 17:32:01 1997 PST
+ Sat Feb 15 17:32:01 1997 PST
+ Sun Feb 16 17:32:01 1997 PST
+ Tue Feb 16 17:32:01 0097 PST BC
+ Sat Feb 16 17:32:01 0097 PST
+ Thu Feb 16 17:32:01 0597 PST
+ Tue Feb 16 17:32:01 1097 PST
+ Sat Feb 16 17:32:01 1697 PST
+ Thu Feb 16 17:32:01 1797 PST
+ Tue Feb 16 17:32:01 1897 PST
+ Sun Feb 16 17:32:01 1997 PST
+ Sat Feb 16 17:32:01 2097 PST
+ Wed Feb 28 17:32:01 1996 PST
+ Thu Feb 29 17:32:01 1996 PST
+ Fri Mar 01 17:32:01 1996 PST
+ Mon Dec 30 17:32:01 1996 PST
+ Tue Dec 31 17:32:01 1996 PST
+ Wed Jan 01 17:32:01 1997 PST
+ Fri Feb 28 17:32:01 1997 PST
+ Sat Mar 01 17:32:01 1997 PST
+ Tue Dec 30 17:32:01 1997 PST
+ Wed Dec 31 17:32:01 1997 PST
+ Fri Dec 31 17:32:01 1999 PST
+ Sat Jan 01 17:32:01 2000 PST
+ Sun Dec 31 17:32:01 2000 PST
+ Mon Jan 01 17:32:01 2001 PST
+(65 rows)
+
+SELECT d1 FROM TIMESTAMPTZ_TBL
+ WHERE d1 <= timestamp with time zone '1997-01-02';
+ d1
+---------------------------------
+ -infinity
+ Wed Dec 31 16:00:00 1969 PST
+ Thu Jan 02 00:00:00 1997 PST
+ Tue Feb 16 17:32:01 0097 PST BC
+ Sat Feb 16 17:32:01 0097 PST
+ Thu Feb 16 17:32:01 0597 PST
+ Tue Feb 16 17:32:01 1097 PST
+ Sat Feb 16 17:32:01 1697 PST
+ Thu Feb 16 17:32:01 1797 PST
+ Tue Feb 16 17:32:01 1897 PST
+ Wed Feb 28 17:32:01 1996 PST
+ Thu Feb 29 17:32:01 1996 PST
+ Fri Mar 01 17:32:01 1996 PST
+ Mon Dec 30 17:32:01 1996 PST
+ Tue Dec 31 17:32:01 1996 PST
+ Wed Jan 01 17:32:01 1997 PST
+(16 rows)
+
+SELECT d1 FROM TIMESTAMPTZ_TBL
+ WHERE d1 >= timestamp with time zone '1997-01-02';
+ d1
+--------------------------------
+ infinity
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:02 1997 PST
+ Mon Feb 10 17:32:01.4 1997 PST
+ Mon Feb 10 17:32:01.5 1997 PST
+ Mon Feb 10 17:32:01.6 1997 PST
+ Thu Jan 02 00:00:00 1997 PST
+ Thu Jan 02 03:04:05 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Tue Jun 10 17:32:01 1997 PDT
+ Sat Sep 22 18:19:20 2001 PDT
+ Wed Mar 15 08:14:01 2000 PST
+ Wed Mar 15 04:14:02 2000 PST
+ Wed Mar 15 02:14:03 2000 PST
+ Wed Mar 15 03:14:04 2000 PST
+ Wed Mar 15 01:14:05 2000 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:00 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 17:32:01 1997 PST
+ Mon Feb 10 09:32:01 1997 PST
+ Mon Feb 10 09:32:01 1997 PST
+ Mon Feb 10 09:32:01 1997 PST
+ Mon Feb 10 14:32:01 1997 PST
+ Thu Jul 10 14:32:01 1997 PDT
+ Tue Jun 10 18:32:01 1997 PDT
+ Mon Feb 10 17:32:01 1997 PST
+ Tue Feb 11 17:32:01 1997 PST
+ Wed Feb 12 17:32:01 1997 PST
+ Thu Feb 13 17:32:01 1997 PST
+ Fri Feb 14 17:32:01 1997 PST
+ Sat Feb 15 17:32:01 1997 PST
+ Sun Feb 16 17:32:01 1997 PST
+ Sun Feb 16 17:32:01 1997 PST
+ Sat Feb 16 17:32:01 2097 PST
+ Fri Feb 28 17:32:01 1997 PST
+ Sat Mar 01 17:32:01 1997 PST
+ Tue Dec 30 17:32:01 1997 PST
+ Wed Dec 31 17:32:01 1997 PST
+ Fri Dec 31 17:32:01 1999 PST
+ Sat Jan 01 17:32:01 2000 PST
+ Sun Dec 31 17:32:01 2000 PST
+ Mon Jan 01 17:32:01 2001 PST
+(51 rows)
+
+SELECT d1 - timestamp with time zone '1997-01-02' AS diff
+ FROM TIMESTAMPTZ_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01';
+ diff
+----------------------------------------
+ @ 9863 days 8 hours ago
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 2 secs
+ @ 39 days 17 hours 32 mins 1.4 secs
+ @ 39 days 17 hours 32 mins 1.5 secs
+ @ 39 days 17 hours 32 mins 1.6 secs
+ @ 0
+ @ 3 hours 4 mins 5 secs
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 159 days 16 hours 32 mins 1 sec
+ @ 1724 days 17 hours 19 mins 20 secs
+ @ 1168 days 8 hours 14 mins 1 sec
+ @ 1168 days 4 hours 14 mins 2 secs
+ @ 1168 days 2 hours 14 mins 3 secs
+ @ 1168 days 3 hours 14 mins 4 secs
+ @ 1168 days 1 hour 14 mins 5 secs
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 9 hours 32 mins 1 sec
+ @ 39 days 9 hours 32 mins 1 sec
+ @ 39 days 9 hours 32 mins 1 sec
+ @ 39 days 14 hours 32 mins 1 sec
+ @ 189 days 13 hours 32 mins 1 sec
+ @ 159 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 40 days 17 hours 32 mins 1 sec
+ @ 41 days 17 hours 32 mins 1 sec
+ @ 42 days 17 hours 32 mins 1 sec
+ @ 43 days 17 hours 32 mins 1 sec
+ @ 44 days 17 hours 32 mins 1 sec
+ @ 45 days 17 hours 32 mins 1 sec
+ @ 45 days 17 hours 32 mins 1 sec
+ @ 308 days 6 hours 27 mins 59 secs ago
+ @ 307 days 6 hours 27 mins 59 secs ago
+ @ 306 days 6 hours 27 mins 59 secs ago
+ @ 2 days 6 hours 27 mins 59 secs ago
+ @ 1 day 6 hours 27 mins 59 secs ago
+ @ 6 hours 27 mins 59 secs ago
+ @ 57 days 17 hours 32 mins 1 sec
+ @ 58 days 17 hours 32 mins 1 sec
+ @ 362 days 17 hours 32 mins 1 sec
+ @ 363 days 17 hours 32 mins 1 sec
+ @ 1093 days 17 hours 32 mins 1 sec
+ @ 1094 days 17 hours 32 mins 1 sec
+ @ 1459 days 17 hours 32 mins 1 sec
+ @ 1460 days 17 hours 32 mins 1 sec
+(56 rows)
+
+SELECT date_trunc( 'week', timestamp with time zone '2004-02-29 15:44:17.71393' ) AS week_trunc;
+ week_trunc
+------------------------------
+ Mon Feb 23 00:00:00 2004 PST
+(1 row)
+
+SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'Australia/Sydney') as sydney_trunc; -- zone name
+ sydney_trunc
+------------------------------
+ Fri Feb 16 05:00:00 2001 PST
+(1 row)
+
+SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'GMT') as gmt_trunc; -- fixed-offset abbreviation
+ gmt_trunc
+------------------------------
+ Thu Feb 15 16:00:00 2001 PST
+(1 row)
+
+SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'VET') as vet_trunc; -- variable-offset abbreviation
+ vet_trunc
+------------------------------
+ Thu Feb 15 20:00:00 2001 PST
+(1 row)
+
+-- verify date_bin behaves the same as date_trunc for relevant intervals
+SELECT
+ str,
+ interval,
+ date_trunc(str, ts, 'Australia/Sydney') = date_bin(interval::interval, ts, timestamp with time zone '2001-01-01+11') AS equal
+FROM (
+ VALUES
+ ('day', '1 d'),
+ ('hour', '1 h'),
+ ('minute', '1 m'),
+ ('second', '1 s'),
+ ('millisecond', '1 ms'),
+ ('microsecond', '1 us')
+) intervals (str, interval),
+(VALUES (timestamptz '2020-02-29 15:44:17.71393+00')) ts (ts);
+ str | interval | equal
+-------------+----------+-------
+ day | 1 d | t
+ hour | 1 h | t
+ minute | 1 m | t
+ second | 1 s | t
+ millisecond | 1 ms | t
+ microsecond | 1 us | t
+(6 rows)
+
+-- bin timestamps into arbitrary intervals
+SELECT
+ interval,
+ ts,
+ origin,
+ date_bin(interval::interval, ts, origin)
+FROM (
+ VALUES
+ ('15 days'),
+ ('2 hours'),
+ ('1 hour 30 minutes'),
+ ('15 minutes'),
+ ('10 seconds'),
+ ('100 milliseconds'),
+ ('250 microseconds')
+) intervals (interval),
+(VALUES (timestamptz '2020-02-11 15:44:17.71393')) ts (ts),
+(VALUES (timestamptz '2001-01-01')) origin (origin);
+ interval | ts | origin | date_bin
+-------------------+------------------------------------+------------------------------+------------------------------------
+ 15 days | Tue Feb 11 15:44:17.71393 2020 PST | Mon Jan 01 00:00:00 2001 PST | Thu Feb 06 00:00:00 2020 PST
+ 2 hours | Tue Feb 11 15:44:17.71393 2020 PST | Mon Jan 01 00:00:00 2001 PST | Tue Feb 11 14:00:00 2020 PST
+ 1 hour 30 minutes | Tue Feb 11 15:44:17.71393 2020 PST | Mon Jan 01 00:00:00 2001 PST | Tue Feb 11 15:00:00 2020 PST
+ 15 minutes | Tue Feb 11 15:44:17.71393 2020 PST | Mon Jan 01 00:00:00 2001 PST | Tue Feb 11 15:30:00 2020 PST
+ 10 seconds | Tue Feb 11 15:44:17.71393 2020 PST | Mon Jan 01 00:00:00 2001 PST | Tue Feb 11 15:44:10 2020 PST
+ 100 milliseconds | Tue Feb 11 15:44:17.71393 2020 PST | Mon Jan 01 00:00:00 2001 PST | Tue Feb 11 15:44:17.7 2020 PST
+ 250 microseconds | Tue Feb 11 15:44:17.71393 2020 PST | Mon Jan 01 00:00:00 2001 PST | Tue Feb 11 15:44:17.71375 2020 PST
+(7 rows)
+
+-- shift bins using the origin parameter:
+SELECT date_bin('5 min'::interval, timestamptz '2020-02-01 01:01:01+00', timestamptz '2020-02-01 00:02:30+00');
+ date_bin
+------------------------------
+ Fri Jan 31 16:57:30 2020 PST
+(1 row)
+
+-- disallow intervals with months or years
+SELECT date_bin('5 months'::interval, timestamp with time zone '2020-02-01 01:01:01+00', timestamp with time zone '2001-01-01+00');
+ERROR: timestamps cannot be binned into intervals containing months or years
+SELECT date_bin('5 years'::interval, timestamp with time zone '2020-02-01 01:01:01+00', timestamp with time zone '2001-01-01+00');
+ERROR: timestamps cannot be binned into intervals containing months or years
+-- disallow zero intervals
+SELECT date_bin('0 days'::interval, timestamp with time zone '1970-01-01 01:00:00+00' , timestamp with time zone '1970-01-01 00:00:00+00');
+ERROR: stride must be greater than zero
+-- disallow negative intervals
+SELECT date_bin('-2 days'::interval, timestamp with time zone '1970-01-01 01:00:00+00' , timestamp with time zone '1970-01-01 00:00:00+00');
+ERROR: stride must be greater than zero
+-- Test casting within a BETWEEN qualifier
+SELECT d1 - timestamp with time zone '1997-01-02' AS diff
+ FROM TIMESTAMPTZ_TBL
+ WHERE d1 BETWEEN timestamp with time zone '1902-01-01' AND timestamp with time zone '2038-01-01';
+ diff
+----------------------------------------
+ @ 9863 days 8 hours ago
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 2 secs
+ @ 39 days 17 hours 32 mins 1.4 secs
+ @ 39 days 17 hours 32 mins 1.5 secs
+ @ 39 days 17 hours 32 mins 1.6 secs
+ @ 0
+ @ 3 hours 4 mins 5 secs
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 159 days 16 hours 32 mins 1 sec
+ @ 1724 days 17 hours 19 mins 20 secs
+ @ 1168 days 8 hours 14 mins 1 sec
+ @ 1168 days 4 hours 14 mins 2 secs
+ @ 1168 days 2 hours 14 mins 3 secs
+ @ 1168 days 3 hours 14 mins 4 secs
+ @ 1168 days 1 hour 14 mins 5 secs
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 39 days 9 hours 32 mins 1 sec
+ @ 39 days 9 hours 32 mins 1 sec
+ @ 39 days 9 hours 32 mins 1 sec
+ @ 39 days 14 hours 32 mins 1 sec
+ @ 189 days 13 hours 32 mins 1 sec
+ @ 159 days 17 hours 32 mins 1 sec
+ @ 39 days 17 hours 32 mins 1 sec
+ @ 40 days 17 hours 32 mins 1 sec
+ @ 41 days 17 hours 32 mins 1 sec
+ @ 42 days 17 hours 32 mins 1 sec
+ @ 43 days 17 hours 32 mins 1 sec
+ @ 44 days 17 hours 32 mins 1 sec
+ @ 45 days 17 hours 32 mins 1 sec
+ @ 45 days 17 hours 32 mins 1 sec
+ @ 308 days 6 hours 27 mins 59 secs ago
+ @ 307 days 6 hours 27 mins 59 secs ago
+ @ 306 days 6 hours 27 mins 59 secs ago
+ @ 2 days 6 hours 27 mins 59 secs ago
+ @ 1 day 6 hours 27 mins 59 secs ago
+ @ 6 hours 27 mins 59 secs ago
+ @ 57 days 17 hours 32 mins 1 sec
+ @ 58 days 17 hours 32 mins 1 sec
+ @ 362 days 17 hours 32 mins 1 sec
+ @ 363 days 17 hours 32 mins 1 sec
+ @ 1093 days 17 hours 32 mins 1 sec
+ @ 1094 days 17 hours 32 mins 1 sec
+ @ 1459 days 17 hours 32 mins 1 sec
+ @ 1460 days 17 hours 32 mins 1 sec
+(56 rows)
+
+-- DATE_PART (timestamptz_part)
+SELECT d1 as timestamptz,
+ date_part( 'year', d1) AS year, date_part( 'month', d1) AS month,
+ date_part( 'day', d1) AS day, date_part( 'hour', d1) AS hour,
+ date_part( 'minute', d1) AS minute, date_part( 'second', d1) AS second
+ FROM TIMESTAMPTZ_TBL;
+ timestamptz | year | month | day | hour | minute | second
+---------------------------------+-----------+-------+-----+------+--------+--------
+ -infinity | -Infinity | | | | |
+ infinity | Infinity | | | | |
+ Wed Dec 31 16:00:00 1969 PST | 1969 | 12 | 31 | 16 | 0 | 0
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:02 1997 PST | 1997 | 2 | 10 | 17 | 32 | 2
+ Mon Feb 10 17:32:01.4 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1.4
+ Mon Feb 10 17:32:01.5 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1.5
+ Mon Feb 10 17:32:01.6 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1.6
+ Thu Jan 02 00:00:00 1997 PST | 1997 | 1 | 2 | 0 | 0 | 0
+ Thu Jan 02 03:04:05 1997 PST | 1997 | 1 | 2 | 3 | 4 | 5
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1
+ Tue Jun 10 17:32:01 1997 PDT | 1997 | 6 | 10 | 17 | 32 | 1
+ Sat Sep 22 18:19:20 2001 PDT | 2001 | 9 | 22 | 18 | 19 | 20
+ Wed Mar 15 08:14:01 2000 PST | 2000 | 3 | 15 | 8 | 14 | 1
+ Wed Mar 15 04:14:02 2000 PST | 2000 | 3 | 15 | 4 | 14 | 2
+ Wed Mar 15 02:14:03 2000 PST | 2000 | 3 | 15 | 2 | 14 | 3
+ Wed Mar 15 03:14:04 2000 PST | 2000 | 3 | 15 | 3 | 14 | 4
+ Wed Mar 15 01:14:05 2000 PST | 2000 | 3 | 15 | 1 | 14 | 5
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:00 1997 PST | 1997 | 2 | 10 | 17 | 32 | 0
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1
+ Mon Feb 10 09:32:01 1997 PST | 1997 | 2 | 10 | 9 | 32 | 1
+ Mon Feb 10 09:32:01 1997 PST | 1997 | 2 | 10 | 9 | 32 | 1
+ Mon Feb 10 09:32:01 1997 PST | 1997 | 2 | 10 | 9 | 32 | 1
+ Mon Feb 10 14:32:01 1997 PST | 1997 | 2 | 10 | 14 | 32 | 1
+ Thu Jul 10 14:32:01 1997 PDT | 1997 | 7 | 10 | 14 | 32 | 1
+ Tue Jun 10 18:32:01 1997 PDT | 1997 | 6 | 10 | 18 | 32 | 1
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1
+ Tue Feb 11 17:32:01 1997 PST | 1997 | 2 | 11 | 17 | 32 | 1
+ Wed Feb 12 17:32:01 1997 PST | 1997 | 2 | 12 | 17 | 32 | 1
+ Thu Feb 13 17:32:01 1997 PST | 1997 | 2 | 13 | 17 | 32 | 1
+ Fri Feb 14 17:32:01 1997 PST | 1997 | 2 | 14 | 17 | 32 | 1
+ Sat Feb 15 17:32:01 1997 PST | 1997 | 2 | 15 | 17 | 32 | 1
+ Sun Feb 16 17:32:01 1997 PST | 1997 | 2 | 16 | 17 | 32 | 1
+ Tue Feb 16 17:32:01 0097 PST BC | -97 | 2 | 16 | 17 | 32 | 1
+ Sat Feb 16 17:32:01 0097 PST | 97 | 2 | 16 | 17 | 32 | 1
+ Thu Feb 16 17:32:01 0597 PST | 597 | 2 | 16 | 17 | 32 | 1
+ Tue Feb 16 17:32:01 1097 PST | 1097 | 2 | 16 | 17 | 32 | 1
+ Sat Feb 16 17:32:01 1697 PST | 1697 | 2 | 16 | 17 | 32 | 1
+ Thu Feb 16 17:32:01 1797 PST | 1797 | 2 | 16 | 17 | 32 | 1
+ Tue Feb 16 17:32:01 1897 PST | 1897 | 2 | 16 | 17 | 32 | 1
+ Sun Feb 16 17:32:01 1997 PST | 1997 | 2 | 16 | 17 | 32 | 1
+ Sat Feb 16 17:32:01 2097 PST | 2097 | 2 | 16 | 17 | 32 | 1
+ Wed Feb 28 17:32:01 1996 PST | 1996 | 2 | 28 | 17 | 32 | 1
+ Thu Feb 29 17:32:01 1996 PST | 1996 | 2 | 29 | 17 | 32 | 1
+ Fri Mar 01 17:32:01 1996 PST | 1996 | 3 | 1 | 17 | 32 | 1
+ Mon Dec 30 17:32:01 1996 PST | 1996 | 12 | 30 | 17 | 32 | 1
+ Tue Dec 31 17:32:01 1996 PST | 1996 | 12 | 31 | 17 | 32 | 1
+ Wed Jan 01 17:32:01 1997 PST | 1997 | 1 | 1 | 17 | 32 | 1
+ Fri Feb 28 17:32:01 1997 PST | 1997 | 2 | 28 | 17 | 32 | 1
+ Sat Mar 01 17:32:01 1997 PST | 1997 | 3 | 1 | 17 | 32 | 1
+ Tue Dec 30 17:32:01 1997 PST | 1997 | 12 | 30 | 17 | 32 | 1
+ Wed Dec 31 17:32:01 1997 PST | 1997 | 12 | 31 | 17 | 32 | 1
+ Fri Dec 31 17:32:01 1999 PST | 1999 | 12 | 31 | 17 | 32 | 1
+ Sat Jan 01 17:32:01 2000 PST | 2000 | 1 | 1 | 17 | 32 | 1
+ Sun Dec 31 17:32:01 2000 PST | 2000 | 12 | 31 | 17 | 32 | 1
+ Mon Jan 01 17:32:01 2001 PST | 2001 | 1 | 1 | 17 | 32 | 1
+(66 rows)
+
+SELECT d1 as timestamptz,
+ date_part( 'quarter', d1) AS quarter, date_part( 'msec', d1) AS msec,
+ date_part( 'usec', d1) AS usec
+ FROM TIMESTAMPTZ_TBL;
+ timestamptz | quarter | msec | usec
+---------------------------------+---------+-------+----------
+ -infinity | | |
+ infinity | | |
+ Wed Dec 31 16:00:00 1969 PST | 4 | 0 | 0
+ Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:02 1997 PST | 1 | 2000 | 2000000
+ Mon Feb 10 17:32:01.4 1997 PST | 1 | 1400 | 1400000
+ Mon Feb 10 17:32:01.5 1997 PST | 1 | 1500 | 1500000
+ Mon Feb 10 17:32:01.6 1997 PST | 1 | 1600 | 1600000
+ Thu Jan 02 00:00:00 1997 PST | 1 | 0 | 0
+ Thu Jan 02 03:04:05 1997 PST | 1 | 5000 | 5000000
+ Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Tue Jun 10 17:32:01 1997 PDT | 2 | 1000 | 1000000
+ Sat Sep 22 18:19:20 2001 PDT | 3 | 20000 | 20000000
+ Wed Mar 15 08:14:01 2000 PST | 1 | 1000 | 1000000
+ Wed Mar 15 04:14:02 2000 PST | 1 | 2000 | 2000000
+ Wed Mar 15 02:14:03 2000 PST | 1 | 3000 | 3000000
+ Wed Mar 15 03:14:04 2000 PST | 1 | 4000 | 4000000
+ Wed Mar 15 01:14:05 2000 PST | 1 | 5000 | 5000000
+ Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:00 1997 PST | 1 | 0 | 0
+ Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Mon Feb 10 09:32:01 1997 PST | 1 | 1000 | 1000000
+ Mon Feb 10 09:32:01 1997 PST | 1 | 1000 | 1000000
+ Mon Feb 10 09:32:01 1997 PST | 1 | 1000 | 1000000
+ Mon Feb 10 14:32:01 1997 PST | 1 | 1000 | 1000000
+ Thu Jul 10 14:32:01 1997 PDT | 3 | 1000 | 1000000
+ Tue Jun 10 18:32:01 1997 PDT | 2 | 1000 | 1000000
+ Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Tue Feb 11 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Wed Feb 12 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Thu Feb 13 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Fri Feb 14 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Sat Feb 15 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Sun Feb 16 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Tue Feb 16 17:32:01 0097 PST BC | 1 | 1000 | 1000000
+ Sat Feb 16 17:32:01 0097 PST | 1 | 1000 | 1000000
+ Thu Feb 16 17:32:01 0597 PST | 1 | 1000 | 1000000
+ Tue Feb 16 17:32:01 1097 PST | 1 | 1000 | 1000000
+ Sat Feb 16 17:32:01 1697 PST | 1 | 1000 | 1000000
+ Thu Feb 16 17:32:01 1797 PST | 1 | 1000 | 1000000
+ Tue Feb 16 17:32:01 1897 PST | 1 | 1000 | 1000000
+ Sun Feb 16 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Sat Feb 16 17:32:01 2097 PST | 1 | 1000 | 1000000
+ Wed Feb 28 17:32:01 1996 PST | 1 | 1000 | 1000000
+ Thu Feb 29 17:32:01 1996 PST | 1 | 1000 | 1000000
+ Fri Mar 01 17:32:01 1996 PST | 1 | 1000 | 1000000
+ Mon Dec 30 17:32:01 1996 PST | 4 | 1000 | 1000000
+ Tue Dec 31 17:32:01 1996 PST | 4 | 1000 | 1000000
+ Wed Jan 01 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Fri Feb 28 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Sat Mar 01 17:32:01 1997 PST | 1 | 1000 | 1000000
+ Tue Dec 30 17:32:01 1997 PST | 4 | 1000 | 1000000
+ Wed Dec 31 17:32:01 1997 PST | 4 | 1000 | 1000000
+ Fri Dec 31 17:32:01 1999 PST | 4 | 1000 | 1000000
+ Sat Jan 01 17:32:01 2000 PST | 1 | 1000 | 1000000
+ Sun Dec 31 17:32:01 2000 PST | 4 | 1000 | 1000000
+ Mon Jan 01 17:32:01 2001 PST | 1 | 1000 | 1000000
+(66 rows)
+
+SELECT d1 as timestamptz,
+ date_part( 'isoyear', d1) AS isoyear, date_part( 'week', d1) AS week,
+ date_part( 'isodow', d1) AS isodow, date_part( 'dow', d1) AS dow,
+ date_part( 'doy', d1) AS doy
+ FROM TIMESTAMPTZ_TBL;
+ timestamptz | isoyear | week | isodow | dow | doy
+---------------------------------+-----------+------+--------+-----+-----
+ -infinity | -Infinity | | | |
+ infinity | Infinity | | | |
+ Wed Dec 31 16:00:00 1969 PST | 1970 | 1 | 3 | 3 | 365
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:02 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01.4 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01.5 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01.6 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Thu Jan 02 00:00:00 1997 PST | 1997 | 1 | 4 | 4 | 2
+ Thu Jan 02 03:04:05 1997 PST | 1997 | 1 | 4 | 4 | 2
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Tue Jun 10 17:32:01 1997 PDT | 1997 | 24 | 2 | 2 | 161
+ Sat Sep 22 18:19:20 2001 PDT | 2001 | 38 | 6 | 6 | 265
+ Wed Mar 15 08:14:01 2000 PST | 2000 | 11 | 3 | 3 | 75
+ Wed Mar 15 04:14:02 2000 PST | 2000 | 11 | 3 | 3 | 75
+ Wed Mar 15 02:14:03 2000 PST | 2000 | 11 | 3 | 3 | 75
+ Wed Mar 15 03:14:04 2000 PST | 2000 | 11 | 3 | 3 | 75
+ Wed Mar 15 01:14:05 2000 PST | 2000 | 11 | 3 | 3 | 75
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:00 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 09:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 09:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 09:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Mon Feb 10 14:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Thu Jul 10 14:32:01 1997 PDT | 1997 | 28 | 4 | 4 | 191
+ Tue Jun 10 18:32:01 1997 PDT | 1997 | 24 | 2 | 2 | 161
+ Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41
+ Tue Feb 11 17:32:01 1997 PST | 1997 | 7 | 2 | 2 | 42
+ Wed Feb 12 17:32:01 1997 PST | 1997 | 7 | 3 | 3 | 43
+ Thu Feb 13 17:32:01 1997 PST | 1997 | 7 | 4 | 4 | 44
+ Fri Feb 14 17:32:01 1997 PST | 1997 | 7 | 5 | 5 | 45
+ Sat Feb 15 17:32:01 1997 PST | 1997 | 7 | 6 | 6 | 46
+ Sun Feb 16 17:32:01 1997 PST | 1997 | 7 | 7 | 0 | 47
+ Tue Feb 16 17:32:01 0097 PST BC | -97 | 7 | 2 | 2 | 47
+ Sat Feb 16 17:32:01 0097 PST | 97 | 7 | 6 | 6 | 47
+ Thu Feb 16 17:32:01 0597 PST | 597 | 7 | 4 | 4 | 47
+ Tue Feb 16 17:32:01 1097 PST | 1097 | 7 | 2 | 2 | 47
+ Sat Feb 16 17:32:01 1697 PST | 1697 | 7 | 6 | 6 | 47
+ Thu Feb 16 17:32:01 1797 PST | 1797 | 7 | 4 | 4 | 47
+ Tue Feb 16 17:32:01 1897 PST | 1897 | 7 | 2 | 2 | 47
+ Sun Feb 16 17:32:01 1997 PST | 1997 | 7 | 7 | 0 | 47
+ Sat Feb 16 17:32:01 2097 PST | 2097 | 7 | 6 | 6 | 47
+ Wed Feb 28 17:32:01 1996 PST | 1996 | 9 | 3 | 3 | 59
+ Thu Feb 29 17:32:01 1996 PST | 1996 | 9 | 4 | 4 | 60
+ Fri Mar 01 17:32:01 1996 PST | 1996 | 9 | 5 | 5 | 61
+ Mon Dec 30 17:32:01 1996 PST | 1997 | 1 | 1 | 1 | 365
+ Tue Dec 31 17:32:01 1996 PST | 1997 | 1 | 2 | 2 | 366
+ Wed Jan 01 17:32:01 1997 PST | 1997 | 1 | 3 | 3 | 1
+ Fri Feb 28 17:32:01 1997 PST | 1997 | 9 | 5 | 5 | 59
+ Sat Mar 01 17:32:01 1997 PST | 1997 | 9 | 6 | 6 | 60
+ Tue Dec 30 17:32:01 1997 PST | 1998 | 1 | 2 | 2 | 364
+ Wed Dec 31 17:32:01 1997 PST | 1998 | 1 | 3 | 3 | 365
+ Fri Dec 31 17:32:01 1999 PST | 1999 | 52 | 5 | 5 | 365
+ Sat Jan 01 17:32:01 2000 PST | 1999 | 52 | 6 | 6 | 1
+ Sun Dec 31 17:32:01 2000 PST | 2000 | 52 | 7 | 0 | 366
+ Mon Jan 01 17:32:01 2001 PST | 2001 | 1 | 1 | 1 | 1
+(66 rows)
+
+SELECT d1 as timestamptz,
+ date_part( 'decade', d1) AS decade,
+ date_part( 'century', d1) AS century,
+ date_part( 'millennium', d1) AS millennium,
+ round(date_part( 'julian', d1)) AS julian,
+ date_part( 'epoch', d1) AS epoch
+ FROM TIMESTAMPTZ_TBL;
+ timestamptz | decade | century | millennium | julian | epoch
+---------------------------------+-----------+-----------+------------+-----------+--------------
+ -infinity | -Infinity | -Infinity | -Infinity | -Infinity | -Infinity
+ infinity | Infinity | Infinity | Infinity | Infinity | Infinity
+ Wed Dec 31 16:00:00 1969 PST | 196 | 20 | 2 | 2440588 | 0
+ Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721
+ Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721
+ Mon Feb 10 17:32:02 1997 PST | 199 | 20 | 2 | 2450491 | 855624722
+ Mon Feb 10 17:32:01.4 1997 PST | 199 | 20 | 2 | 2450491 | 855624721.4
+ Mon Feb 10 17:32:01.5 1997 PST | 199 | 20 | 2 | 2450491 | 855624721.5
+ Mon Feb 10 17:32:01.6 1997 PST | 199 | 20 | 2 | 2450491 | 855624721.6
+ Thu Jan 02 00:00:00 1997 PST | 199 | 20 | 2 | 2450451 | 852192000
+ Thu Jan 02 03:04:05 1997 PST | 199 | 20 | 2 | 2450451 | 852203045
+ Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721
+ Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721
+ Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721
+ Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721
+ Tue Jun 10 17:32:01 1997 PDT | 199 | 20 | 2 | 2450611 | 865989121
+ Sat Sep 22 18:19:20 2001 PDT | 200 | 21 | 3 | 2452176 | 1001207960
+ Wed Mar 15 08:14:01 2000 PST | 200 | 20 | 2 | 2451619 | 953136841
+ Wed Mar 15 04:14:02 2000 PST | 200 | 20 | 2 | 2451619 | 953122442
+ Wed Mar 15 02:14:03 2000 PST | 200 | 20 | 2 | 2451619 | 953115243
+ Wed Mar 15 03:14:04 2000 PST | 200 | 20 | 2 | 2451619 | 953118844
+ Wed Mar 15 01:14:05 2000 PST | 200 | 20 | 2 | 2451619 | 953111645
+ Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721
+ Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721
+ Mon Feb 10 17:32:00 1997 PST | 199 | 20 | 2 | 2450491 | 855624720
+ Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721
+ Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721
+ Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721
+ Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721
+ Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721
+ Mon Feb 10 09:32:01 1997 PST | 199 | 20 | 2 | 2450490 | 855595921
+ Mon Feb 10 09:32:01 1997 PST | 199 | 20 | 2 | 2450490 | 855595921
+ Mon Feb 10 09:32:01 1997 PST | 199 | 20 | 2 | 2450490 | 855595921
+ Mon Feb 10 14:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855613921
+ Thu Jul 10 14:32:01 1997 PDT | 199 | 20 | 2 | 2450641 | 868570321
+ Tue Jun 10 18:32:01 1997 PDT | 199 | 20 | 2 | 2450611 | 865992721
+ Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721
+ Tue Feb 11 17:32:01 1997 PST | 199 | 20 | 2 | 2450492 | 855711121
+ Wed Feb 12 17:32:01 1997 PST | 199 | 20 | 2 | 2450493 | 855797521
+ Thu Feb 13 17:32:01 1997 PST | 199 | 20 | 2 | 2450494 | 855883921
+ Fri Feb 14 17:32:01 1997 PST | 199 | 20 | 2 | 2450495 | 855970321
+ Sat Feb 15 17:32:01 1997 PST | 199 | 20 | 2 | 2450496 | 856056721
+ Sun Feb 16 17:32:01 1997 PST | 199 | 20 | 2 | 2450497 | 856143121
+ Tue Feb 16 17:32:01 0097 PST BC | -10 | -1 | -1 | 1686043 | -65192682479
+ Sat Feb 16 17:32:01 0097 PST | 9 | 1 | 1 | 1756537 | -59102000879
+ Thu Feb 16 17:32:01 0597 PST | 59 | 6 | 1 | 1939158 | -43323546479
+ Tue Feb 16 17:32:01 1097 PST | 109 | 11 | 2 | 2121779 | -27545092079
+ Sat Feb 16 17:32:01 1697 PST | 169 | 17 | 2 | 2340925 | -8610877679
+ Thu Feb 16 17:32:01 1797 PST | 179 | 18 | 2 | 2377449 | -5455204079
+ Tue Feb 16 17:32:01 1897 PST | 189 | 19 | 2 | 2413973 | -2299530479
+ Sun Feb 16 17:32:01 1997 PST | 199 | 20 | 2 | 2450497 | 856143121
+ Sat Feb 16 17:32:01 2097 PST | 209 | 21 | 3 | 2487022 | 4011903121
+ Wed Feb 28 17:32:01 1996 PST | 199 | 20 | 2 | 2450143 | 825557521
+ Thu Feb 29 17:32:01 1996 PST | 199 | 20 | 2 | 2450144 | 825643921
+ Fri Mar 01 17:32:01 1996 PST | 199 | 20 | 2 | 2450145 | 825730321
+ Mon Dec 30 17:32:01 1996 PST | 199 | 20 | 2 | 2450449 | 851995921
+ Tue Dec 31 17:32:01 1996 PST | 199 | 20 | 2 | 2450450 | 852082321
+ Wed Jan 01 17:32:01 1997 PST | 199 | 20 | 2 | 2450451 | 852168721
+ Fri Feb 28 17:32:01 1997 PST | 199 | 20 | 2 | 2450509 | 857179921
+ Sat Mar 01 17:32:01 1997 PST | 199 | 20 | 2 | 2450510 | 857266321
+ Tue Dec 30 17:32:01 1997 PST | 199 | 20 | 2 | 2450814 | 883531921
+ Wed Dec 31 17:32:01 1997 PST | 199 | 20 | 2 | 2450815 | 883618321
+ Fri Dec 31 17:32:01 1999 PST | 199 | 20 | 2 | 2451545 | 946690321
+ Sat Jan 01 17:32:01 2000 PST | 200 | 20 | 2 | 2451546 | 946776721
+ Sun Dec 31 17:32:01 2000 PST | 200 | 20 | 2 | 2451911 | 978312721
+ Mon Jan 01 17:32:01 2001 PST | 200 | 21 | 3 | 2451912 | 978399121
+(66 rows)
+
+SELECT d1 as timestamptz,
+ date_part( 'timezone', d1) AS timezone,
+ date_part( 'timezone_hour', d1) AS timezone_hour,
+ date_part( 'timezone_minute', d1) AS timezone_minute
+ FROM TIMESTAMPTZ_TBL;
+ timestamptz | timezone | timezone_hour | timezone_minute
+---------------------------------+----------+---------------+-----------------
+ -infinity | | |
+ infinity | | |
+ Wed Dec 31 16:00:00 1969 PST | -28800 | -8 | 0
+ Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0
+ Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0
+ Mon Feb 10 17:32:02 1997 PST | -28800 | -8 | 0
+ Mon Feb 10 17:32:01.4 1997 PST | -28800 | -8 | 0
+ Mon Feb 10 17:32:01.5 1997 PST | -28800 | -8 | 0
+ Mon Feb 10 17:32:01.6 1997 PST | -28800 | -8 | 0
+ Thu Jan 02 00:00:00 1997 PST | -28800 | -8 | 0
+ Thu Jan 02 03:04:05 1997 PST | -28800 | -8 | 0
+ Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0
+ Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0
+ Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0
+ Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0
+ Tue Jun 10 17:32:01 1997 PDT | -25200 | -7 | 0
+ Sat Sep 22 18:19:20 2001 PDT | -25200 | -7 | 0
+ Wed Mar 15 08:14:01 2000 PST | -28800 | -8 | 0
+ Wed Mar 15 04:14:02 2000 PST | -28800 | -8 | 0
+ Wed Mar 15 02:14:03 2000 PST | -28800 | -8 | 0
+ Wed Mar 15 03:14:04 2000 PST | -28800 | -8 | 0
+ Wed Mar 15 01:14:05 2000 PST | -28800 | -8 | 0
+ Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0
+ Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0
+ Mon Feb 10 17:32:00 1997 PST | -28800 | -8 | 0
+ Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0
+ Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0
+ Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0
+ Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0
+ Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0
+ Mon Feb 10 09:32:01 1997 PST | -28800 | -8 | 0
+ Mon Feb 10 09:32:01 1997 PST | -28800 | -8 | 0
+ Mon Feb 10 09:32:01 1997 PST | -28800 | -8 | 0
+ Mon Feb 10 14:32:01 1997 PST | -28800 | -8 | 0
+ Thu Jul 10 14:32:01 1997 PDT | -25200 | -7 | 0
+ Tue Jun 10 18:32:01 1997 PDT | -25200 | -7 | 0
+ Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0
+ Tue Feb 11 17:32:01 1997 PST | -28800 | -8 | 0
+ Wed Feb 12 17:32:01 1997 PST | -28800 | -8 | 0
+ Thu Feb 13 17:32:01 1997 PST | -28800 | -8 | 0
+ Fri Feb 14 17:32:01 1997 PST | -28800 | -8 | 0
+ Sat Feb 15 17:32:01 1997 PST | -28800 | -8 | 0
+ Sun Feb 16 17:32:01 1997 PST | -28800 | -8 | 0
+ Tue Feb 16 17:32:01 0097 PST BC | -28800 | -8 | 0
+ Sat Feb 16 17:32:01 0097 PST | -28800 | -8 | 0
+ Thu Feb 16 17:32:01 0597 PST | -28800 | -8 | 0
+ Tue Feb 16 17:32:01 1097 PST | -28800 | -8 | 0
+ Sat Feb 16 17:32:01 1697 PST | -28800 | -8 | 0
+ Thu Feb 16 17:32:01 1797 PST | -28800 | -8 | 0
+ Tue Feb 16 17:32:01 1897 PST | -28800 | -8 | 0
+ Sun Feb 16 17:32:01 1997 PST | -28800 | -8 | 0
+ Sat Feb 16 17:32:01 2097 PST | -28800 | -8 | 0
+ Wed Feb 28 17:32:01 1996 PST | -28800 | -8 | 0
+ Thu Feb 29 17:32:01 1996 PST | -28800 | -8 | 0
+ Fri Mar 01 17:32:01 1996 PST | -28800 | -8 | 0
+ Mon Dec 30 17:32:01 1996 PST | -28800 | -8 | 0
+ Tue Dec 31 17:32:01 1996 PST | -28800 | -8 | 0
+ Wed Jan 01 17:32:01 1997 PST | -28800 | -8 | 0
+ Fri Feb 28 17:32:01 1997 PST | -28800 | -8 | 0
+ Sat Mar 01 17:32:01 1997 PST | -28800 | -8 | 0
+ Tue Dec 30 17:32:01 1997 PST | -28800 | -8 | 0
+ Wed Dec 31 17:32:01 1997 PST | -28800 | -8 | 0
+ Fri Dec 31 17:32:01 1999 PST | -28800 | -8 | 0
+ Sat Jan 01 17:32:01 2000 PST | -28800 | -8 | 0
+ Sun Dec 31 17:32:01 2000 PST | -28800 | -8 | 0
+ Mon Jan 01 17:32:01 2001 PST | -28800 | -8 | 0
+(66 rows)
+
+-- extract implementation is mostly the same as date_part, so only
+-- test a few cases for additional coverage.
+SELECT d1 as "timestamp",
+ extract(microseconds from d1) AS microseconds,
+ extract(milliseconds from d1) AS milliseconds,
+ extract(seconds from d1) AS seconds,
+ round(extract(julian from d1)) AS julian,
+ extract(epoch from d1) AS epoch
+ FROM TIMESTAMPTZ_TBL;
+ timestamp | microseconds | milliseconds | seconds | julian | epoch
+---------------------------------+--------------+--------------+-----------+-----------+---------------------
+ -infinity | | | | -Infinity | -Infinity
+ infinity | | | | Infinity | Infinity
+ Wed Dec 31 16:00:00 1969 PST | 0 | 0.000 | 0.000000 | 2440588 | 0.000000
+ Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000
+ Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000
+ Mon Feb 10 17:32:02 1997 PST | 2000000 | 2000.000 | 2.000000 | 2450491 | 855624722.000000
+ Mon Feb 10 17:32:01.4 1997 PST | 1400000 | 1400.000 | 1.400000 | 2450491 | 855624721.400000
+ Mon Feb 10 17:32:01.5 1997 PST | 1500000 | 1500.000 | 1.500000 | 2450491 | 855624721.500000
+ Mon Feb 10 17:32:01.6 1997 PST | 1600000 | 1600.000 | 1.600000 | 2450491 | 855624721.600000
+ Thu Jan 02 00:00:00 1997 PST | 0 | 0.000 | 0.000000 | 2450451 | 852192000.000000
+ Thu Jan 02 03:04:05 1997 PST | 5000000 | 5000.000 | 5.000000 | 2450451 | 852203045.000000
+ Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000
+ Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000
+ Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000
+ Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000
+ Tue Jun 10 17:32:01 1997 PDT | 1000000 | 1000.000 | 1.000000 | 2450611 | 865989121.000000
+ Sat Sep 22 18:19:20 2001 PDT | 20000000 | 20000.000 | 20.000000 | 2452176 | 1001207960.000000
+ Wed Mar 15 08:14:01 2000 PST | 1000000 | 1000.000 | 1.000000 | 2451619 | 953136841.000000
+ Wed Mar 15 04:14:02 2000 PST | 2000000 | 2000.000 | 2.000000 | 2451619 | 953122442.000000
+ Wed Mar 15 02:14:03 2000 PST | 3000000 | 3000.000 | 3.000000 | 2451619 | 953115243.000000
+ Wed Mar 15 03:14:04 2000 PST | 4000000 | 4000.000 | 4.000000 | 2451619 | 953118844.000000
+ Wed Mar 15 01:14:05 2000 PST | 5000000 | 5000.000 | 5.000000 | 2451619 | 953111645.000000
+ Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000
+ Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000
+ Mon Feb 10 17:32:00 1997 PST | 0 | 0.000 | 0.000000 | 2450491 | 855624720.000000
+ Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000
+ Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000
+ Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000
+ Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000
+ Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000
+ Mon Feb 10 09:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450490 | 855595921.000000
+ Mon Feb 10 09:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450490 | 855595921.000000
+ Mon Feb 10 09:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450490 | 855595921.000000
+ Mon Feb 10 14:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855613921.000000
+ Thu Jul 10 14:32:01 1997 PDT | 1000000 | 1000.000 | 1.000000 | 2450641 | 868570321.000000
+ Tue Jun 10 18:32:01 1997 PDT | 1000000 | 1000.000 | 1.000000 | 2450611 | 865992721.000000
+ Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000
+ Tue Feb 11 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450492 | 855711121.000000
+ Wed Feb 12 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450493 | 855797521.000000
+ Thu Feb 13 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450494 | 855883921.000000
+ Fri Feb 14 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450495 | 855970321.000000
+ Sat Feb 15 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450496 | 856056721.000000
+ Sun Feb 16 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450497 | 856143121.000000
+ Tue Feb 16 17:32:01 0097 PST BC | 1000000 | 1000.000 | 1.000000 | 1686043 | -65192682479.000000
+ Sat Feb 16 17:32:01 0097 PST | 1000000 | 1000.000 | 1.000000 | 1756537 | -59102000879.000000
+ Thu Feb 16 17:32:01 0597 PST | 1000000 | 1000.000 | 1.000000 | 1939158 | -43323546479.000000
+ Tue Feb 16 17:32:01 1097 PST | 1000000 | 1000.000 | 1.000000 | 2121779 | -27545092079.000000
+ Sat Feb 16 17:32:01 1697 PST | 1000000 | 1000.000 | 1.000000 | 2340925 | -8610877679.000000
+ Thu Feb 16 17:32:01 1797 PST | 1000000 | 1000.000 | 1.000000 | 2377449 | -5455204079.000000
+ Tue Feb 16 17:32:01 1897 PST | 1000000 | 1000.000 | 1.000000 | 2413973 | -2299530479.000000
+ Sun Feb 16 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450497 | 856143121.000000
+ Sat Feb 16 17:32:01 2097 PST | 1000000 | 1000.000 | 1.000000 | 2487022 | 4011903121.000000
+ Wed Feb 28 17:32:01 1996 PST | 1000000 | 1000.000 | 1.000000 | 2450143 | 825557521.000000
+ Thu Feb 29 17:32:01 1996 PST | 1000000 | 1000.000 | 1.000000 | 2450144 | 825643921.000000
+ Fri Mar 01 17:32:01 1996 PST | 1000000 | 1000.000 | 1.000000 | 2450145 | 825730321.000000
+ Mon Dec 30 17:32:01 1996 PST | 1000000 | 1000.000 | 1.000000 | 2450449 | 851995921.000000
+ Tue Dec 31 17:32:01 1996 PST | 1000000 | 1000.000 | 1.000000 | 2450450 | 852082321.000000
+ Wed Jan 01 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450451 | 852168721.000000
+ Fri Feb 28 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450509 | 857179921.000000
+ Sat Mar 01 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450510 | 857266321.000000
+ Tue Dec 30 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450814 | 883531921.000000
+ Wed Dec 31 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450815 | 883618321.000000
+ Fri Dec 31 17:32:01 1999 PST | 1000000 | 1000.000 | 1.000000 | 2451545 | 946690321.000000
+ Sat Jan 01 17:32:01 2000 PST | 1000000 | 1000.000 | 1.000000 | 2451546 | 946776721.000000
+ Sun Dec 31 17:32:01 2000 PST | 1000000 | 1000.000 | 1.000000 | 2451911 | 978312721.000000
+ Mon Jan 01 17:32:01 2001 PST | 1000000 | 1000.000 | 1.000000 | 2451912 | 978399121.000000
+(66 rows)
+
+-- value near upper bound uses special case in code
+SELECT date_part('epoch', '294270-01-01 00:00:00+00'::timestamptz);
+ date_part
+---------------
+ 9224097091200
+(1 row)
+
+SELECT extract(epoch from '294270-01-01 00:00:00+00'::timestamptz);
+ extract
+----------------------
+ 9224097091200.000000
+(1 row)
+
+-- another internal overflow test case
+SELECT extract(epoch from '5000-01-01 00:00:00+00'::timestamptz);
+ extract
+--------------------
+ 95617584000.000000
+(1 row)
+
+-- TO_CHAR()
+SELECT to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon')
+ FROM TIMESTAMPTZ_TBL;
+ to_char
+------------------------------------------------------------------------------------------
+
+
+ WEDNESDAY Wednesday wednesday WED Wed wed DECEMBER December december XII DEC Dec dec
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ THURSDAY Thursday thursday THU Thu thu JANUARY January january I JAN Jan jan
+ THURSDAY Thursday thursday THU Thu thu JANUARY January january I JAN Jan jan
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ TUESDAY Tuesday tuesday TUE Tue tue JUNE June june VI JUN Jun jun
+ SATURDAY Saturday saturday SAT Sat sat SEPTEMBER September september IX SEP Sep sep
+ WEDNESDAY Wednesday wednesday WED Wed wed MARCH March march III MAR Mar mar
+ WEDNESDAY Wednesday wednesday WED Wed wed MARCH March march III MAR Mar mar
+ WEDNESDAY Wednesday wednesday WED Wed wed MARCH March march III MAR Mar mar
+ WEDNESDAY Wednesday wednesday WED Wed wed MARCH March march III MAR Mar mar
+ WEDNESDAY Wednesday wednesday WED Wed wed MARCH March march III MAR Mar mar
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ THURSDAY Thursday thursday THU Thu thu JULY July july VII JUL Jul jul
+ TUESDAY Tuesday tuesday TUE Tue tue JUNE June june VI JUN Jun jun
+ MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb
+ TUESDAY Tuesday tuesday TUE Tue tue FEBRUARY February february II FEB Feb feb
+ WEDNESDAY Wednesday wednesday WED Wed wed FEBRUARY February february II FEB Feb feb
+ THURSDAY Thursday thursday THU Thu thu FEBRUARY February february II FEB Feb feb
+ FRIDAY Friday friday FRI Fri fri FEBRUARY February february II FEB Feb feb
+ SATURDAY Saturday saturday SAT Sat sat FEBRUARY February february II FEB Feb feb
+ SUNDAY Sunday sunday SUN Sun sun FEBRUARY February february II FEB Feb feb
+ TUESDAY Tuesday tuesday TUE Tue tue FEBRUARY February february II FEB Feb feb
+ SATURDAY Saturday saturday SAT Sat sat FEBRUARY February february II FEB Feb feb
+ THURSDAY Thursday thursday THU Thu thu FEBRUARY February february II FEB Feb feb
+ TUESDAY Tuesday tuesday TUE Tue tue FEBRUARY February february II FEB Feb feb
+ SATURDAY Saturday saturday SAT Sat sat FEBRUARY February february II FEB Feb feb
+ THURSDAY Thursday thursday THU Thu thu FEBRUARY February february II FEB Feb feb
+ TUESDAY Tuesday tuesday TUE Tue tue FEBRUARY February february II FEB Feb feb
+ SUNDAY Sunday sunday SUN Sun sun FEBRUARY February february II FEB Feb feb
+ SATURDAY Saturday saturday SAT Sat sat FEBRUARY February february II FEB Feb feb
+ WEDNESDAY Wednesday wednesday WED Wed wed FEBRUARY February february II FEB Feb feb
+ THURSDAY Thursday thursday THU Thu thu FEBRUARY February february II FEB Feb feb
+ FRIDAY Friday friday FRI Fri fri MARCH March march III MAR Mar mar
+ MONDAY Monday monday MON Mon mon DECEMBER December december XII DEC Dec dec
+ TUESDAY Tuesday tuesday TUE Tue tue DECEMBER December december XII DEC Dec dec
+ WEDNESDAY Wednesday wednesday WED Wed wed JANUARY January january I JAN Jan jan
+ FRIDAY Friday friday FRI Fri fri FEBRUARY February february II FEB Feb feb
+ SATURDAY Saturday saturday SAT Sat sat MARCH March march III MAR Mar mar
+ TUESDAY Tuesday tuesday TUE Tue tue DECEMBER December december XII DEC Dec dec
+ WEDNESDAY Wednesday wednesday WED Wed wed DECEMBER December december XII DEC Dec dec
+ FRIDAY Friday friday FRI Fri fri DECEMBER December december XII DEC Dec dec
+ SATURDAY Saturday saturday SAT Sat sat JANUARY January january I JAN Jan jan
+ SUNDAY Sunday sunday SUN Sun sun DECEMBER December december XII DEC Dec dec
+ MONDAY Monday monday MON Mon mon JANUARY January january I JAN Jan jan
+(66 rows)
+
+SELECT to_char(d1, 'FMDAY FMDay FMday FMMONTH FMMonth FMmonth FMRM')
+ FROM TIMESTAMPTZ_TBL;
+ to_char
+--------------------------------------------------------------
+
+
+ WEDNESDAY Wednesday wednesday DECEMBER December december XII
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ THURSDAY Thursday thursday JANUARY January january I
+ THURSDAY Thursday thursday JANUARY January january I
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ TUESDAY Tuesday tuesday JUNE June june VI
+ SATURDAY Saturday saturday SEPTEMBER September september IX
+ WEDNESDAY Wednesday wednesday MARCH March march III
+ WEDNESDAY Wednesday wednesday MARCH March march III
+ WEDNESDAY Wednesday wednesday MARCH March march III
+ WEDNESDAY Wednesday wednesday MARCH March march III
+ WEDNESDAY Wednesday wednesday MARCH March march III
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ MONDAY Monday monday FEBRUARY February february II
+ THURSDAY Thursday thursday JULY July july VII
+ TUESDAY Tuesday tuesday JUNE June june VI
+ MONDAY Monday monday FEBRUARY February february II
+ TUESDAY Tuesday tuesday FEBRUARY February february II
+ WEDNESDAY Wednesday wednesday FEBRUARY February february II
+ THURSDAY Thursday thursday FEBRUARY February february II
+ FRIDAY Friday friday FEBRUARY February february II
+ SATURDAY Saturday saturday FEBRUARY February february II
+ SUNDAY Sunday sunday FEBRUARY February february II
+ TUESDAY Tuesday tuesday FEBRUARY February february II
+ SATURDAY Saturday saturday FEBRUARY February february II
+ THURSDAY Thursday thursday FEBRUARY February february II
+ TUESDAY Tuesday tuesday FEBRUARY February february II
+ SATURDAY Saturday saturday FEBRUARY February february II
+ THURSDAY Thursday thursday FEBRUARY February february II
+ TUESDAY Tuesday tuesday FEBRUARY February february II
+ SUNDAY Sunday sunday FEBRUARY February february II
+ SATURDAY Saturday saturday FEBRUARY February february II
+ WEDNESDAY Wednesday wednesday FEBRUARY February february II
+ THURSDAY Thursday thursday FEBRUARY February february II
+ FRIDAY Friday friday MARCH March march III
+ MONDAY Monday monday DECEMBER December december XII
+ TUESDAY Tuesday tuesday DECEMBER December december XII
+ WEDNESDAY Wednesday wednesday JANUARY January january I
+ FRIDAY Friday friday FEBRUARY February february II
+ SATURDAY Saturday saturday MARCH March march III
+ TUESDAY Tuesday tuesday DECEMBER December december XII
+ WEDNESDAY Wednesday wednesday DECEMBER December december XII
+ FRIDAY Friday friday DECEMBER December december XII
+ SATURDAY Saturday saturday JANUARY January january I
+ SUNDAY Sunday sunday DECEMBER December december XII
+ MONDAY Monday monday JANUARY January january I
+(66 rows)
+
+SELECT to_char(d1, 'Y,YYY YYYY YYY YY Y CC Q MM WW DDD DD D J')
+ FROM TIMESTAMPTZ_TBL;
+ to_char
+--------------------------------------------------
+
+
+ 1,969 1969 969 69 9 20 4 12 53 365 31 4 2440587
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 01 01 002 02 5 2450451
+ 1,997 1997 997 97 7 20 1 01 01 002 02 5 2450451
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 2 06 23 161 10 3 2450610
+ 2,001 2001 001 01 1 21 3 09 38 265 22 7 2452175
+ 2,000 2000 000 00 0 20 1 03 11 075 15 4 2451619
+ 2,000 2000 000 00 0 20 1 03 11 075 15 4 2451619
+ 2,000 2000 000 00 0 20 1 03 11 075 15 4 2451619
+ 2,000 2000 000 00 0 20 1 03 11 075 15 4 2451619
+ 2,000 2000 000 00 0 20 1 03 11 075 15 4 2451619
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 3 07 28 191 10 5 2450640
+ 1,997 1997 997 97 7 20 2 06 23 161 10 3 2450610
+ 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490
+ 1,997 1997 997 97 7 20 1 02 06 042 11 3 2450491
+ 1,997 1997 997 97 7 20 1 02 07 043 12 4 2450492
+ 1,997 1997 997 97 7 20 1 02 07 044 13 5 2450493
+ 1,997 1997 997 97 7 20 1 02 07 045 14 6 2450494
+ 1,997 1997 997 97 7 20 1 02 07 046 15 7 2450495
+ 1,997 1997 997 97 7 20 1 02 07 047 16 1 2450496
+ 0,097 0097 097 97 7 -01 1 02 07 047 16 3 1686042
+ 0,097 0097 097 97 7 01 1 02 07 047 16 7 1756536
+ 0,597 0597 597 97 7 06 1 02 07 047 16 5 1939157
+ 1,097 1097 097 97 7 11 1 02 07 047 16 3 2121778
+ 1,697 1697 697 97 7 17 1 02 07 047 16 7 2340924
+ 1,797 1797 797 97 7 18 1 02 07 047 16 5 2377448
+ 1,897 1897 897 97 7 19 1 02 07 047 16 3 2413972
+ 1,997 1997 997 97 7 20 1 02 07 047 16 1 2450496
+ 2,097 2097 097 97 7 21 1 02 07 047 16 7 2487021
+ 1,996 1996 996 96 6 20 1 02 09 059 28 4 2450142
+ 1,996 1996 996 96 6 20 1 02 09 060 29 5 2450143
+ 1,996 1996 996 96 6 20 1 03 09 061 01 6 2450144
+ 1,996 1996 996 96 6 20 4 12 53 365 30 2 2450448
+ 1,996 1996 996 96 6 20 4 12 53 366 31 3 2450449
+ 1,997 1997 997 97 7 20 1 01 01 001 01 4 2450450
+ 1,997 1997 997 97 7 20 1 02 09 059 28 6 2450508
+ 1,997 1997 997 97 7 20 1 03 09 060 01 7 2450509
+ 1,997 1997 997 97 7 20 4 12 52 364 30 3 2450813
+ 1,997 1997 997 97 7 20 4 12 53 365 31 4 2450814
+ 1,999 1999 999 99 9 20 4 12 53 365 31 6 2451544
+ 2,000 2000 000 00 0 20 1 01 01 001 01 7 2451545
+ 2,000 2000 000 00 0 20 4 12 53 366 31 1 2451910
+ 2,001 2001 001 01 1 21 1 01 01 001 01 2 2451911
+(66 rows)
+
+SELECT to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ')
+ FROM TIMESTAMPTZ_TBL;
+ to_char
+-------------------------------------------------
+
+
+ 1,969 1969 969 69 9 20 4 12 53 365 31 4 2440587
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 1 1 2 2 5 2450451
+ 1,997 1997 997 97 7 20 1 1 1 2 2 5 2450451
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 2 6 23 161 10 3 2450610
+ 2,001 2001 1 1 1 21 3 9 38 265 22 7 2452175
+ 2,000 2000 0 0 0 20 1 3 11 75 15 4 2451619
+ 2,000 2000 0 0 0 20 1 3 11 75 15 4 2451619
+ 2,000 2000 0 0 0 20 1 3 11 75 15 4 2451619
+ 2,000 2000 0 0 0 20 1 3 11 75 15 4 2451619
+ 2,000 2000 0 0 0 20 1 3 11 75 15 4 2451619
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 3 7 28 191 10 5 2450640
+ 1,997 1997 997 97 7 20 2 6 23 161 10 3 2450610
+ 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490
+ 1,997 1997 997 97 7 20 1 2 6 42 11 3 2450491
+ 1,997 1997 997 97 7 20 1 2 7 43 12 4 2450492
+ 1,997 1997 997 97 7 20 1 2 7 44 13 5 2450493
+ 1,997 1997 997 97 7 20 1 2 7 45 14 6 2450494
+ 1,997 1997 997 97 7 20 1 2 7 46 15 7 2450495
+ 1,997 1997 997 97 7 20 1 2 7 47 16 1 2450496
+ 0,097 97 97 97 7 -1 1 2 7 47 16 3 1686042
+ 0,097 97 97 97 7 1 1 2 7 47 16 7 1756536
+ 0,597 597 597 97 7 6 1 2 7 47 16 5 1939157
+ 1,097 1097 97 97 7 11 1 2 7 47 16 3 2121778
+ 1,697 1697 697 97 7 17 1 2 7 47 16 7 2340924
+ 1,797 1797 797 97 7 18 1 2 7 47 16 5 2377448
+ 1,897 1897 897 97 7 19 1 2 7 47 16 3 2413972
+ 1,997 1997 997 97 7 20 1 2 7 47 16 1 2450496
+ 2,097 2097 97 97 7 21 1 2 7 47 16 7 2487021
+ 1,996 1996 996 96 6 20 1 2 9 59 28 4 2450142
+ 1,996 1996 996 96 6 20 1 2 9 60 29 5 2450143
+ 1,996 1996 996 96 6 20 1 3 9 61 1 6 2450144
+ 1,996 1996 996 96 6 20 4 12 53 365 30 2 2450448
+ 1,996 1996 996 96 6 20 4 12 53 366 31 3 2450449
+ 1,997 1997 997 97 7 20 1 1 1 1 1 4 2450450
+ 1,997 1997 997 97 7 20 1 2 9 59 28 6 2450508
+ 1,997 1997 997 97 7 20 1 3 9 60 1 7 2450509
+ 1,997 1997 997 97 7 20 4 12 52 364 30 3 2450813
+ 1,997 1997 997 97 7 20 4 12 53 365 31 4 2450814
+ 1,999 1999 999 99 9 20 4 12 53 365 31 6 2451544
+ 2,000 2000 0 0 0 20 1 1 1 1 1 7 2451545
+ 2,000 2000 0 0 0 20 4 12 53 366 31 1 2451910
+ 2,001 2001 1 1 1 21 1 1 1 1 1 2 2451911
+(66 rows)
+
+SELECT to_char(d1, 'HH HH12 HH24 MI SS SSSS')
+ FROM TIMESTAMPTZ_TBL;
+ to_char
+----------------------
+
+
+ 04 04 16 00 00 57600
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 02 63122
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 12 12 00 00 00 0
+ 03 03 03 04 05 11045
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 06 06 18 19 20 65960
+ 08 08 08 14 01 29641
+ 04 04 04 14 02 15242
+ 02 02 02 14 03 8043
+ 03 03 03 14 04 11644
+ 01 01 01 14 05 4445
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 00 63120
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 09 09 09 32 01 34321
+ 09 09 09 32 01 34321
+ 09 09 09 32 01 34321
+ 02 02 14 32 01 52321
+ 02 02 14 32 01 52321
+ 06 06 18 32 01 66721
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+ 05 05 17 32 01 63121
+(66 rows)
+
+SELECT to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""')
+ FROM TIMESTAMPTZ_TBL;
+ to_char
+-------------------------------------------------
+
+
+ HH:MI:SS is 04:00:00 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:02 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 12:00:00 "text between quote marks"
+ HH:MI:SS is 03:04:05 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 06:19:20 "text between quote marks"
+ HH:MI:SS is 08:14:01 "text between quote marks"
+ HH:MI:SS is 04:14:02 "text between quote marks"
+ HH:MI:SS is 02:14:03 "text between quote marks"
+ HH:MI:SS is 03:14:04 "text between quote marks"
+ HH:MI:SS is 01:14:05 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:00 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 09:32:01 "text between quote marks"
+ HH:MI:SS is 09:32:01 "text between quote marks"
+ HH:MI:SS is 09:32:01 "text between quote marks"
+ HH:MI:SS is 02:32:01 "text between quote marks"
+ HH:MI:SS is 02:32:01 "text between quote marks"
+ HH:MI:SS is 06:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+ HH:MI:SS is 05:32:01 "text between quote marks"
+(66 rows)
+
+SELECT to_char(d1, 'HH24--text--MI--text--SS')
+ FROM TIMESTAMPTZ_TBL;
+ to_char
+------------------------
+
+
+ 16--text--00--text--00
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--02
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 00--text--00--text--00
+ 03--text--04--text--05
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 18--text--19--text--20
+ 08--text--14--text--01
+ 04--text--14--text--02
+ 02--text--14--text--03
+ 03--text--14--text--04
+ 01--text--14--text--05
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--00
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 09--text--32--text--01
+ 09--text--32--text--01
+ 09--text--32--text--01
+ 14--text--32--text--01
+ 14--text--32--text--01
+ 18--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+ 17--text--32--text--01
+(66 rows)
+
+SELECT to_char(d1, 'YYYYTH YYYYth Jth')
+ FROM TIMESTAMPTZ_TBL;
+ to_char
+-------------------------
+
+
+ 1969TH 1969th 2440587th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450451st
+ 1997TH 1997th 2450451st
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450610th
+ 2001ST 2001st 2452175th
+ 2000TH 2000th 2451619th
+ 2000TH 2000th 2451619th
+ 2000TH 2000th 2451619th
+ 2000TH 2000th 2451619th
+ 2000TH 2000th 2451619th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450640th
+ 1997TH 1997th 2450610th
+ 1997TH 1997th 2450490th
+ 1997TH 1997th 2450491st
+ 1997TH 1997th 2450492nd
+ 1997TH 1997th 2450493rd
+ 1997TH 1997th 2450494th
+ 1997TH 1997th 2450495th
+ 1997TH 1997th 2450496th
+ 0097TH 0097th 1686042nd
+ 0097TH 0097th 1756536th
+ 0597TH 0597th 1939157th
+ 1097TH 1097th 2121778th
+ 1697TH 1697th 2340924th
+ 1797TH 1797th 2377448th
+ 1897TH 1897th 2413972nd
+ 1997TH 1997th 2450496th
+ 2097TH 2097th 2487021st
+ 1996TH 1996th 2450142nd
+ 1996TH 1996th 2450143rd
+ 1996TH 1996th 2450144th
+ 1996TH 1996th 2450448th
+ 1996TH 1996th 2450449th
+ 1997TH 1997th 2450450th
+ 1997TH 1997th 2450508th
+ 1997TH 1997th 2450509th
+ 1997TH 1997th 2450813th
+ 1997TH 1997th 2450814th
+ 1999TH 1999th 2451544th
+ 2000TH 2000th 2451545th
+ 2000TH 2000th 2451910th
+ 2001ST 2001st 2451911th
+(66 rows)
+
+SELECT to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm')
+ FROM TIMESTAMPTZ_TBL;
+ to_char
+---------------------------------------------------------------------
+
+
+ 1969 A.D. 1969 a.d. 1969 ad 04:00:00 P.M. 04:00:00 p.m. 04:00:00 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:02 P.M. 05:32:02 p.m. 05:32:02 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 12:00:00 A.M. 12:00:00 a.m. 12:00:00 am
+ 1997 A.D. 1997 a.d. 1997 ad 03:04:05 A.M. 03:04:05 a.m. 03:04:05 am
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 2001 A.D. 2001 a.d. 2001 ad 06:19:20 P.M. 06:19:20 p.m. 06:19:20 pm
+ 2000 A.D. 2000 a.d. 2000 ad 08:14:01 A.M. 08:14:01 a.m. 08:14:01 am
+ 2000 A.D. 2000 a.d. 2000 ad 04:14:02 A.M. 04:14:02 a.m. 04:14:02 am
+ 2000 A.D. 2000 a.d. 2000 ad 02:14:03 A.M. 02:14:03 a.m. 02:14:03 am
+ 2000 A.D. 2000 a.d. 2000 ad 03:14:04 A.M. 03:14:04 a.m. 03:14:04 am
+ 2000 A.D. 2000 a.d. 2000 ad 01:14:05 A.M. 01:14:05 a.m. 01:14:05 am
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:00 P.M. 05:32:00 p.m. 05:32:00 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 09:32:01 A.M. 09:32:01 a.m. 09:32:01 am
+ 1997 A.D. 1997 a.d. 1997 ad 09:32:01 A.M. 09:32:01 a.m. 09:32:01 am
+ 1997 A.D. 1997 a.d. 1997 ad 09:32:01 A.M. 09:32:01 a.m. 09:32:01 am
+ 1997 A.D. 1997 a.d. 1997 ad 02:32:01 P.M. 02:32:01 p.m. 02:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 02:32:01 P.M. 02:32:01 p.m. 02:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 06:32:01 P.M. 06:32:01 p.m. 06:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 0097 B.C. 0097 b.c. 0097 bc 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 0097 A.D. 0097 a.d. 0097 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 0597 A.D. 0597 a.d. 0597 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1097 A.D. 1097 a.d. 1097 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1697 A.D. 1697 a.d. 1697 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1797 A.D. 1797 a.d. 1797 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1897 A.D. 1897 a.d. 1897 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 2097 A.D. 2097 a.d. 2097 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1996 A.D. 1996 a.d. 1996 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1996 A.D. 1996 a.d. 1996 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1996 A.D. 1996 a.d. 1996 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1996 A.D. 1996 a.d. 1996 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1996 A.D. 1996 a.d. 1996 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 1999 A.D. 1999 a.d. 1999 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 2000 A.D. 2000 a.d. 2000 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 2000 A.D. 2000 a.d. 2000 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+ 2001 A.D. 2001 a.d. 2001 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm
+(66 rows)
+
+SELECT to_char(d1, 'IYYY IYY IY I IW IDDD ID')
+ FROM TIMESTAMPTZ_TBL;
+ to_char
+------------------------
+
+
+ 1970 970 70 0 01 003 3
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 01 004 4
+ 1997 997 97 7 01 004 4
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 24 163 2
+ 2001 001 01 1 38 265 6
+ 2000 000 00 0 11 073 3
+ 2000 000 00 0 11 073 3
+ 2000 000 00 0 11 073 3
+ 2000 000 00 0 11 073 3
+ 2000 000 00 0 11 073 3
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 28 193 4
+ 1997 997 97 7 24 163 2
+ 1997 997 97 7 07 043 1
+ 1997 997 97 7 07 044 2
+ 1997 997 97 7 07 045 3
+ 1997 997 97 7 07 046 4
+ 1997 997 97 7 07 047 5
+ 1997 997 97 7 07 048 6
+ 1997 997 97 7 07 049 7
+ 0097 097 97 7 07 044 2
+ 0097 097 97 7 07 048 6
+ 0597 597 97 7 07 046 4
+ 1097 097 97 7 07 044 2
+ 1697 697 97 7 07 048 6
+ 1797 797 97 7 07 046 4
+ 1897 897 97 7 07 044 2
+ 1997 997 97 7 07 049 7
+ 2097 097 97 7 07 048 6
+ 1996 996 96 6 09 059 3
+ 1996 996 96 6 09 060 4
+ 1996 996 96 6 09 061 5
+ 1997 997 97 7 01 001 1
+ 1997 997 97 7 01 002 2
+ 1997 997 97 7 01 003 3
+ 1997 997 97 7 09 061 5
+ 1997 997 97 7 09 062 6
+ 1998 998 98 8 01 002 2
+ 1998 998 98 8 01 003 3
+ 1999 999 99 9 52 362 5
+ 1999 999 99 9 52 363 6
+ 2000 000 00 0 52 364 7
+ 2001 001 01 1 01 001 1
+(66 rows)
+
+SELECT to_char(d1, 'FMIYYY FMIYY FMIY FMI FMIW FMIDDD FMID')
+ FROM TIMESTAMPTZ_TBL;
+ to_char
+------------------------
+
+
+ 1970 970 70 0 1 3 3
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 1 4 4
+ 1997 997 97 7 1 4 4
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 24 163 2
+ 2001 1 1 1 38 265 6
+ 2000 0 0 0 11 73 3
+ 2000 0 0 0 11 73 3
+ 2000 0 0 0 11 73 3
+ 2000 0 0 0 11 73 3
+ 2000 0 0 0 11 73 3
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 28 193 4
+ 1997 997 97 7 24 163 2
+ 1997 997 97 7 7 43 1
+ 1997 997 97 7 7 44 2
+ 1997 997 97 7 7 45 3
+ 1997 997 97 7 7 46 4
+ 1997 997 97 7 7 47 5
+ 1997 997 97 7 7 48 6
+ 1997 997 97 7 7 49 7
+ 97 97 97 7 7 44 2
+ 97 97 97 7 7 48 6
+ 597 597 97 7 7 46 4
+ 1097 97 97 7 7 44 2
+ 1697 697 97 7 7 48 6
+ 1797 797 97 7 7 46 4
+ 1897 897 97 7 7 44 2
+ 1997 997 97 7 7 49 7
+ 2097 97 97 7 7 48 6
+ 1996 996 96 6 9 59 3
+ 1996 996 96 6 9 60 4
+ 1996 996 96 6 9 61 5
+ 1997 997 97 7 1 1 1
+ 1997 997 97 7 1 2 2
+ 1997 997 97 7 1 3 3
+ 1997 997 97 7 9 61 5
+ 1997 997 97 7 9 62 6
+ 1998 998 98 8 1 2 2
+ 1998 998 98 8 1 3 3
+ 1999 999 99 9 52 362 5
+ 1999 999 99 9 52 363 6
+ 2000 0 0 0 52 364 7
+ 2001 1 1 1 1 1 1
+(66 rows)
+
+SELECT to_char(d, 'FF1 FF2 FF3 FF4 FF5 FF6 ff1 ff2 ff3 ff4 ff5 ff6 MS US')
+ FROM (VALUES
+ ('2018-11-02 12:34:56'::timestamptz),
+ ('2018-11-02 12:34:56.78'),
+ ('2018-11-02 12:34:56.78901'),
+ ('2018-11-02 12:34:56.78901234')
+ ) d(d);
+ to_char
+--------------------------------------------------------------------
+ 0 00 000 0000 00000 000000 0 00 000 0000 00000 000000 000 000000
+ 7 78 780 7800 78000 780000 7 78 780 7800 78000 780000 780 780000
+ 7 78 789 7890 78901 789010 7 78 789 7890 78901 789010 789 789010
+ 7 78 789 7890 78901 789012 7 78 789 7890 78901 789012 789 789012
+(4 rows)
+
+-- Check OF, TZH, TZM with various zone offsets, particularly fractional hours
+SET timezone = '00:00';
+SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM";
+ OF | TZH:TZM
+-----+---------
+ +00 | +00:00
+(1 row)
+
+SET timezone = '+02:00';
+SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM";
+ OF | TZH:TZM
+-----+---------
+ -02 | -02:00
+(1 row)
+
+SET timezone = '-13:00';
+SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM";
+ OF | TZH:TZM
+-----+---------
+ +13 | +13:00
+(1 row)
+
+SET timezone = '-00:30';
+SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM";
+ OF | TZH:TZM
+--------+---------
+ +00:30 | +00:30
+(1 row)
+
+SET timezone = '00:30';
+SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM";
+ OF | TZH:TZM
+--------+---------
+ -00:30 | -00:30
+(1 row)
+
+SET timezone = '-04:30';
+SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM";
+ OF | TZH:TZM
+--------+---------
+ +04:30 | +04:30
+(1 row)
+
+SET timezone = '04:30';
+SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM";
+ OF | TZH:TZM
+--------+---------
+ -04:30 | -04:30
+(1 row)
+
+SET timezone = '-04:15';
+SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM";
+ OF | TZH:TZM
+--------+---------
+ +04:15 | +04:15
+(1 row)
+
+SET timezone = '04:15';
+SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM";
+ OF | TZH:TZM
+--------+---------
+ -04:15 | -04:15
+(1 row)
+
+RESET timezone;
+CREATE TABLE TIMESTAMPTZ_TST (a int , b timestamptz);
+-- Test year field value with len > 4
+INSERT INTO TIMESTAMPTZ_TST VALUES(1, 'Sat Mar 12 23:58:48 1000 IST');
+INSERT INTO TIMESTAMPTZ_TST VALUES(2, 'Sat Mar 12 23:58:48 10000 IST');
+INSERT INTO TIMESTAMPTZ_TST VALUES(3, 'Sat Mar 12 23:58:48 100000 IST');
+INSERT INTO TIMESTAMPTZ_TST VALUES(3, '10000 Mar 12 23:58:48 IST');
+INSERT INTO TIMESTAMPTZ_TST VALUES(4, '100000312 23:58:48 IST');
+INSERT INTO TIMESTAMPTZ_TST VALUES(4, '1000000312 23:58:48 IST');
+--Verify data
+SELECT * FROM TIMESTAMPTZ_TST ORDER BY a;
+ a | b
+---+--------------------------------
+ 1 | Wed Mar 12 13:58:48 1000 PST
+ 2 | Sun Mar 12 14:58:48 10000 PDT
+ 3 | Sun Mar 12 14:58:48 100000 PDT
+ 3 | Sun Mar 12 14:58:48 10000 PDT
+ 4 | Sun Mar 12 14:58:48 10000 PDT
+ 4 | Sun Mar 12 14:58:48 100000 PDT
+(6 rows)
+
+--Cleanup
+DROP TABLE TIMESTAMPTZ_TST;
+-- test timestamptz constructors
+set TimeZone to 'America/New_York';
+-- numeric timezone
+SELECT make_timestamptz(1973, 07, 15, 08, 15, 55.33);
+ make_timestamptz
+---------------------------------
+ Sun Jul 15 08:15:55.33 1973 EDT
+(1 row)
+
+SELECT make_timestamptz(1973, 07, 15, 08, 15, 55.33, '+2');
+ make_timestamptz
+---------------------------------
+ Sun Jul 15 02:15:55.33 1973 EDT
+(1 row)
+
+SELECT make_timestamptz(1973, 07, 15, 08, 15, 55.33, '-2');
+ make_timestamptz
+---------------------------------
+ Sun Jul 15 06:15:55.33 1973 EDT
+(1 row)
+
+WITH tzs (tz) AS (VALUES
+ ('+1'), ('+1:'), ('+1:0'), ('+100'), ('+1:00'), ('+01:00'),
+ ('+10'), ('+1000'), ('+10:'), ('+10:0'), ('+10:00'), ('+10:00:'),
+ ('+10:00:1'), ('+10:00:01'),
+ ('+10:00:10'))
+ SELECT make_timestamptz(2010, 2, 27, 3, 45, 00, tz), tz FROM tzs;
+ make_timestamptz | tz
+------------------------------+-----------
+ Fri Feb 26 21:45:00 2010 EST | +1
+ Fri Feb 26 21:45:00 2010 EST | +1:
+ Fri Feb 26 21:45:00 2010 EST | +1:0
+ Fri Feb 26 21:45:00 2010 EST | +100
+ Fri Feb 26 21:45:00 2010 EST | +1:00
+ Fri Feb 26 21:45:00 2010 EST | +01:00
+ Fri Feb 26 12:45:00 2010 EST | +10
+ Fri Feb 26 12:45:00 2010 EST | +1000
+ Fri Feb 26 12:45:00 2010 EST | +10:
+ Fri Feb 26 12:45:00 2010 EST | +10:0
+ Fri Feb 26 12:45:00 2010 EST | +10:00
+ Fri Feb 26 12:45:00 2010 EST | +10:00:
+ Fri Feb 26 12:44:59 2010 EST | +10:00:1
+ Fri Feb 26 12:44:59 2010 EST | +10:00:01
+ Fri Feb 26 12:44:50 2010 EST | +10:00:10
+(15 rows)
+
+-- these should fail
+SELECT make_timestamptz(1973, 07, 15, 08, 15, 55.33, '2');
+ERROR: invalid input syntax for type numeric time zone: "2"
+HINT: Numeric time zones must have "-" or "+" as first character.
+SELECT make_timestamptz(2014, 12, 10, 10, 10, 10, '+16');
+ERROR: numeric time zone "+16" out of range
+SELECT make_timestamptz(2014, 12, 10, 10, 10, 10, '-16');
+ERROR: numeric time zone "-16" out of range
+-- should be true
+SELECT make_timestamptz(1973, 07, 15, 08, 15, 55.33, '+2') = '1973-07-15 08:15:55.33+02'::timestamptz;
+ ?column?
+----------
+ t
+(1 row)
+
+-- full timezone names
+SELECT make_timestamptz(2014, 12, 10, 0, 0, 0, 'Europe/Prague') = timestamptz '2014-12-10 00:00:00 Europe/Prague';
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT make_timestamptz(2014, 12, 10, 0, 0, 0, 'Europe/Prague') AT TIME ZONE 'UTC';
+ timezone
+--------------------------
+ Tue Dec 09 23:00:00 2014
+(1 row)
+
+SELECT make_timestamptz(1846, 12, 10, 0, 0, 0, 'Asia/Manila') AT TIME ZONE 'UTC';
+ timezone
+--------------------------
+ Wed Dec 09 15:56:00 1846
+(1 row)
+
+SELECT make_timestamptz(1881, 12, 10, 0, 0, 0, 'Europe/Paris') AT TIME ZONE 'UTC';
+ timezone
+--------------------------
+ Fri Dec 09 23:50:39 1881
+(1 row)
+
+SELECT make_timestamptz(1910, 12, 24, 0, 0, 0, 'Nehwon/Lankhmar');
+ERROR: time zone "Nehwon/Lankhmar" not recognized
+-- abbreviations
+SELECT make_timestamptz(2008, 12, 10, 10, 10, 10, 'EST');
+ make_timestamptz
+------------------------------
+ Wed Dec 10 10:10:10 2008 EST
+(1 row)
+
+SELECT make_timestamptz(2008, 12, 10, 10, 10, 10, 'EDT');
+ make_timestamptz
+------------------------------
+ Wed Dec 10 09:10:10 2008 EST
+(1 row)
+
+SELECT make_timestamptz(2014, 12, 10, 10, 10, 10, 'PST8PDT');
+ make_timestamptz
+------------------------------
+ Wed Dec 10 13:10:10 2014 EST
+(1 row)
+
+RESET TimeZone;
+-- generate_series for timestamptz
+select * from generate_series('2020-01-01 00:00'::timestamptz,
+ '2020-01-02 03:00'::timestamptz,
+ '1 hour'::interval);
+ generate_series
+------------------------------
+ Wed Jan 01 00:00:00 2020 PST
+ Wed Jan 01 01:00:00 2020 PST
+ Wed Jan 01 02:00:00 2020 PST
+ Wed Jan 01 03:00:00 2020 PST
+ Wed Jan 01 04:00:00 2020 PST
+ Wed Jan 01 05:00:00 2020 PST
+ Wed Jan 01 06:00:00 2020 PST
+ Wed Jan 01 07:00:00 2020 PST
+ Wed Jan 01 08:00:00 2020 PST
+ Wed Jan 01 09:00:00 2020 PST
+ Wed Jan 01 10:00:00 2020 PST
+ Wed Jan 01 11:00:00 2020 PST
+ Wed Jan 01 12:00:00 2020 PST
+ Wed Jan 01 13:00:00 2020 PST
+ Wed Jan 01 14:00:00 2020 PST
+ Wed Jan 01 15:00:00 2020 PST
+ Wed Jan 01 16:00:00 2020 PST
+ Wed Jan 01 17:00:00 2020 PST
+ Wed Jan 01 18:00:00 2020 PST
+ Wed Jan 01 19:00:00 2020 PST
+ Wed Jan 01 20:00:00 2020 PST
+ Wed Jan 01 21:00:00 2020 PST
+ Wed Jan 01 22:00:00 2020 PST
+ Wed Jan 01 23:00:00 2020 PST
+ Thu Jan 02 00:00:00 2020 PST
+ Thu Jan 02 01:00:00 2020 PST
+ Thu Jan 02 02:00:00 2020 PST
+ Thu Jan 02 03:00:00 2020 PST
+(28 rows)
+
+-- the LIMIT should allow this to terminate in a reasonable amount of time
+-- (but that unfortunately doesn't work yet for SELECT * FROM ...)
+select generate_series('2022-01-01 00:00'::timestamptz,
+ 'infinity'::timestamptz,
+ '1 month'::interval) limit 10;
+ generate_series
+------------------------------
+ Sat Jan 01 00:00:00 2022 PST
+ Tue Feb 01 00:00:00 2022 PST
+ Tue Mar 01 00:00:00 2022 PST
+ Fri Apr 01 00:00:00 2022 PDT
+ Sun May 01 00:00:00 2022 PDT
+ Wed Jun 01 00:00:00 2022 PDT
+ Fri Jul 01 00:00:00 2022 PDT
+ Mon Aug 01 00:00:00 2022 PDT
+ Thu Sep 01 00:00:00 2022 PDT
+ Sat Oct 01 00:00:00 2022 PDT
+(10 rows)
+
+-- errors
+select * from generate_series('2020-01-01 00:00'::timestamptz,
+ '2020-01-02 03:00'::timestamptz,
+ '0 hour'::interval);
+ERROR: step size cannot equal zero
+--
+-- Test behavior with a dynamic (time-varying) timezone abbreviation.
+-- These tests rely on the knowledge that MSK (Europe/Moscow standard time)
+-- moved forwards in Mar 2011 and backwards again in Oct 2014.
+--
+SET TimeZone to 'UTC';
+SELECT '2011-03-27 00:00:00 Europe/Moscow'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Mar 26 21:00:00 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 01:00:00 Europe/Moscow'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Mar 26 22:00:00 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 01:59:59 Europe/Moscow'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Mar 26 22:59:59 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 02:00:00 Europe/Moscow'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Mar 26 23:00:00 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 02:00:01 Europe/Moscow'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Mar 26 23:00:01 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 02:59:59 Europe/Moscow'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Mar 26 23:59:59 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 03:00:00 Europe/Moscow'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Mar 26 23:00:00 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 03:00:01 Europe/Moscow'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Mar 26 23:00:01 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 04:00:00 Europe/Moscow'::timestamptz;
+ timestamptz
+------------------------------
+ Sun Mar 27 00:00:00 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 00:00:00 MSK'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Mar 26 21:00:00 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 01:00:00 MSK'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Mar 26 22:00:00 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 01:59:59 MSK'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Mar 26 22:59:59 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 02:00:00 MSK'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Mar 26 22:00:00 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 02:00:01 MSK'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Mar 26 22:00:01 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 02:59:59 MSK'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Mar 26 22:59:59 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 03:00:00 MSK'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Mar 26 23:00:00 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 03:00:01 MSK'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Mar 26 23:00:01 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 04:00:00 MSK'::timestamptz;
+ timestamptz
+------------------------------
+ Sun Mar 27 00:00:00 2011 UTC
+(1 row)
+
+SELECT '2014-10-26 00:00:00 Europe/Moscow'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Oct 25 20:00:00 2014 UTC
+(1 row)
+
+SELECT '2014-10-26 00:59:59 Europe/Moscow'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Oct 25 20:59:59 2014 UTC
+(1 row)
+
+SELECT '2014-10-26 01:00:00 Europe/Moscow'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Oct 25 22:00:00 2014 UTC
+(1 row)
+
+SELECT '2014-10-26 01:00:01 Europe/Moscow'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Oct 25 22:00:01 2014 UTC
+(1 row)
+
+SELECT '2014-10-26 02:00:00 Europe/Moscow'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Oct 25 23:00:00 2014 UTC
+(1 row)
+
+SELECT '2014-10-26 00:00:00 MSK'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Oct 25 20:00:00 2014 UTC
+(1 row)
+
+SELECT '2014-10-26 00:59:59 MSK'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Oct 25 20:59:59 2014 UTC
+(1 row)
+
+SELECT '2014-10-26 01:00:00 MSK'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Oct 25 22:00:00 2014 UTC
+(1 row)
+
+SELECT '2014-10-26 01:00:01 MSK'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Oct 25 22:00:01 2014 UTC
+(1 row)
+
+SELECT '2014-10-26 02:00:00 MSK'::timestamptz;
+ timestamptz
+------------------------------
+ Sat Oct 25 23:00:00 2014 UTC
+(1 row)
+
+SELECT '2011-03-27 00:00:00'::timestamp AT TIME ZONE 'Europe/Moscow';
+ timezone
+------------------------------
+ Sat Mar 26 21:00:00 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 01:00:00'::timestamp AT TIME ZONE 'Europe/Moscow';
+ timezone
+------------------------------
+ Sat Mar 26 22:00:00 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 01:59:59'::timestamp AT TIME ZONE 'Europe/Moscow';
+ timezone
+------------------------------
+ Sat Mar 26 22:59:59 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 02:00:00'::timestamp AT TIME ZONE 'Europe/Moscow';
+ timezone
+------------------------------
+ Sat Mar 26 23:00:00 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 02:00:01'::timestamp AT TIME ZONE 'Europe/Moscow';
+ timezone
+------------------------------
+ Sat Mar 26 23:00:01 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 02:59:59'::timestamp AT TIME ZONE 'Europe/Moscow';
+ timezone
+------------------------------
+ Sat Mar 26 23:59:59 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 03:00:00'::timestamp AT TIME ZONE 'Europe/Moscow';
+ timezone
+------------------------------
+ Sat Mar 26 23:00:00 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 03:00:01'::timestamp AT TIME ZONE 'Europe/Moscow';
+ timezone
+------------------------------
+ Sat Mar 26 23:00:01 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 04:00:00'::timestamp AT TIME ZONE 'Europe/Moscow';
+ timezone
+------------------------------
+ Sun Mar 27 00:00:00 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 00:00:00'::timestamp AT TIME ZONE 'MSK';
+ timezone
+------------------------------
+ Sat Mar 26 21:00:00 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 01:00:00'::timestamp AT TIME ZONE 'MSK';
+ timezone
+------------------------------
+ Sat Mar 26 22:00:00 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 01:59:59'::timestamp AT TIME ZONE 'MSK';
+ timezone
+------------------------------
+ Sat Mar 26 22:59:59 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 02:00:00'::timestamp AT TIME ZONE 'MSK';
+ timezone
+------------------------------
+ Sat Mar 26 22:00:00 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 02:00:01'::timestamp AT TIME ZONE 'MSK';
+ timezone
+------------------------------
+ Sat Mar 26 22:00:01 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 02:59:59'::timestamp AT TIME ZONE 'MSK';
+ timezone
+------------------------------
+ Sat Mar 26 22:59:59 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 03:00:00'::timestamp AT TIME ZONE 'MSK';
+ timezone
+------------------------------
+ Sat Mar 26 23:00:00 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 03:00:01'::timestamp AT TIME ZONE 'MSK';
+ timezone
+------------------------------
+ Sat Mar 26 23:00:01 2011 UTC
+(1 row)
+
+SELECT '2011-03-27 04:00:00'::timestamp AT TIME ZONE 'MSK';
+ timezone
+------------------------------
+ Sun Mar 27 00:00:00 2011 UTC
+(1 row)
+
+SELECT '2014-10-26 00:00:00'::timestamp AT TIME ZONE 'Europe/Moscow';
+ timezone
+------------------------------
+ Sat Oct 25 20:00:00 2014 UTC
+(1 row)
+
+SELECT '2014-10-26 00:59:59'::timestamp AT TIME ZONE 'Europe/Moscow';
+ timezone
+------------------------------
+ Sat Oct 25 20:59:59 2014 UTC
+(1 row)
+
+SELECT '2014-10-26 01:00:00'::timestamp AT TIME ZONE 'Europe/Moscow';
+ timezone
+------------------------------
+ Sat Oct 25 22:00:00 2014 UTC
+(1 row)
+
+SELECT '2014-10-26 01:00:01'::timestamp AT TIME ZONE 'Europe/Moscow';
+ timezone
+------------------------------
+ Sat Oct 25 22:00:01 2014 UTC
+(1 row)
+
+SELECT '2014-10-26 02:00:00'::timestamp AT TIME ZONE 'Europe/Moscow';
+ timezone
+------------------------------
+ Sat Oct 25 23:00:00 2014 UTC
+(1 row)
+
+SELECT '2014-10-26 00:00:00'::timestamp AT TIME ZONE 'MSK';
+ timezone
+------------------------------
+ Sat Oct 25 20:00:00 2014 UTC
+(1 row)
+
+SELECT '2014-10-26 00:59:59'::timestamp AT TIME ZONE 'MSK';
+ timezone
+------------------------------
+ Sat Oct 25 20:59:59 2014 UTC
+(1 row)
+
+SELECT '2014-10-26 01:00:00'::timestamp AT TIME ZONE 'MSK';
+ timezone
+------------------------------
+ Sat Oct 25 22:00:00 2014 UTC
+(1 row)
+
+SELECT '2014-10-26 01:00:01'::timestamp AT TIME ZONE 'MSK';
+ timezone
+------------------------------
+ Sat Oct 25 22:00:01 2014 UTC
+(1 row)
+
+SELECT '2014-10-26 02:00:00'::timestamp AT TIME ZONE 'MSK';
+ timezone
+------------------------------
+ Sat Oct 25 23:00:00 2014 UTC
+(1 row)
+
+SELECT make_timestamptz(2014, 10, 26, 0, 0, 0, 'MSK');
+ make_timestamptz
+------------------------------
+ Sat Oct 25 20:00:00 2014 UTC
+(1 row)
+
+SELECT make_timestamptz(2014, 10, 26, 1, 0, 0, 'MSK');
+ make_timestamptz
+------------------------------
+ Sat Oct 25 22:00:00 2014 UTC
+(1 row)
+
+SELECT to_timestamp( 0); -- 1970-01-01 00:00:00+00
+ to_timestamp
+------------------------------
+ Thu Jan 01 00:00:00 1970 UTC
+(1 row)
+
+SELECT to_timestamp( 946684800); -- 2000-01-01 00:00:00+00
+ to_timestamp
+------------------------------
+ Sat Jan 01 00:00:00 2000 UTC
+(1 row)
+
+SELECT to_timestamp(1262349296.7890123); -- 2010-01-01 12:34:56.789012+00
+ to_timestamp
+-------------------------------------
+ Fri Jan 01 12:34:56.789012 2010 UTC
+(1 row)
+
+-- edge cases
+SELECT to_timestamp(-210866803200); -- 4714-11-24 00:00:00+00 BC
+ to_timestamp
+---------------------------------
+ Mon Nov 24 00:00:00 4714 UTC BC
+(1 row)
+
+-- upper limit varies between integer and float timestamps, so hard to test
+-- nonfinite values
+SELECT to_timestamp(' Infinity'::float);
+ to_timestamp
+--------------
+ infinity
+(1 row)
+
+SELECT to_timestamp('-Infinity'::float);
+ to_timestamp
+--------------
+ -infinity
+(1 row)
+
+SELECT to_timestamp('NaN'::float);
+ERROR: timestamp cannot be NaN
+SET TimeZone to 'Europe/Moscow';
+SELECT '2011-03-26 21:00:00 UTC'::timestamptz;
+ timestamptz
+------------------------------
+ Sun Mar 27 00:00:00 2011 MSK
+(1 row)
+
+SELECT '2011-03-26 22:00:00 UTC'::timestamptz;
+ timestamptz
+------------------------------
+ Sun Mar 27 01:00:00 2011 MSK
+(1 row)
+
+SELECT '2011-03-26 22:59:59 UTC'::timestamptz;
+ timestamptz
+------------------------------
+ Sun Mar 27 01:59:59 2011 MSK
+(1 row)
+
+SELECT '2011-03-26 23:00:00 UTC'::timestamptz;
+ timestamptz
+------------------------------
+ Sun Mar 27 03:00:00 2011 MSK
+(1 row)
+
+SELECT '2011-03-26 23:00:01 UTC'::timestamptz;
+ timestamptz
+------------------------------
+ Sun Mar 27 03:00:01 2011 MSK
+(1 row)
+
+SELECT '2011-03-26 23:59:59 UTC'::timestamptz;
+ timestamptz
+------------------------------
+ Sun Mar 27 03:59:59 2011 MSK
+(1 row)
+
+SELECT '2011-03-27 00:00:00 UTC'::timestamptz;
+ timestamptz
+------------------------------
+ Sun Mar 27 04:00:00 2011 MSK
+(1 row)
+
+SELECT '2014-10-25 21:00:00 UTC'::timestamptz;
+ timestamptz
+------------------------------
+ Sun Oct 26 01:00:00 2014 MSK
+(1 row)
+
+SELECT '2014-10-25 21:59:59 UTC'::timestamptz;
+ timestamptz
+------------------------------
+ Sun Oct 26 01:59:59 2014 MSK
+(1 row)
+
+SELECT '2014-10-25 22:00:00 UTC'::timestamptz;
+ timestamptz
+------------------------------
+ Sun Oct 26 01:00:00 2014 MSK
+(1 row)
+
+SELECT '2014-10-25 22:00:01 UTC'::timestamptz;
+ timestamptz
+------------------------------
+ Sun Oct 26 01:00:01 2014 MSK
+(1 row)
+
+SELECT '2014-10-25 23:00:00 UTC'::timestamptz;
+ timestamptz
+------------------------------
+ Sun Oct 26 02:00:00 2014 MSK
+(1 row)
+
+RESET TimeZone;
+SELECT '2011-03-26 21:00:00 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+ timezone
+--------------------------
+ Sun Mar 27 00:00:00 2011
+(1 row)
+
+SELECT '2011-03-26 22:00:00 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+ timezone
+--------------------------
+ Sun Mar 27 01:00:00 2011
+(1 row)
+
+SELECT '2011-03-26 22:59:59 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+ timezone
+--------------------------
+ Sun Mar 27 01:59:59 2011
+(1 row)
+
+SELECT '2011-03-26 23:00:00 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+ timezone
+--------------------------
+ Sun Mar 27 03:00:00 2011
+(1 row)
+
+SELECT '2011-03-26 23:00:01 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+ timezone
+--------------------------
+ Sun Mar 27 03:00:01 2011
+(1 row)
+
+SELECT '2011-03-26 23:59:59 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+ timezone
+--------------------------
+ Sun Mar 27 03:59:59 2011
+(1 row)
+
+SELECT '2011-03-27 00:00:00 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+ timezone
+--------------------------
+ Sun Mar 27 04:00:00 2011
+(1 row)
+
+SELECT '2014-10-25 21:00:00 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+ timezone
+--------------------------
+ Sun Oct 26 01:00:00 2014
+(1 row)
+
+SELECT '2014-10-25 21:59:59 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+ timezone
+--------------------------
+ Sun Oct 26 01:59:59 2014
+(1 row)
+
+SELECT '2014-10-25 22:00:00 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+ timezone
+--------------------------
+ Sun Oct 26 01:00:00 2014
+(1 row)
+
+SELECT '2014-10-25 22:00:01 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+ timezone
+--------------------------
+ Sun Oct 26 01:00:01 2014
+(1 row)
+
+SELECT '2014-10-25 23:00:00 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+ timezone
+--------------------------
+ Sun Oct 26 02:00:00 2014
+(1 row)
+
+SELECT '2011-03-26 21:00:00 UTC'::timestamptz AT TIME ZONE 'MSK';
+ timezone
+--------------------------
+ Sun Mar 27 00:00:00 2011
+(1 row)
+
+SELECT '2011-03-26 22:00:00 UTC'::timestamptz AT TIME ZONE 'MSK';
+ timezone
+--------------------------
+ Sun Mar 27 01:00:00 2011
+(1 row)
+
+SELECT '2011-03-26 22:59:59 UTC'::timestamptz AT TIME ZONE 'MSK';
+ timezone
+--------------------------
+ Sun Mar 27 01:59:59 2011
+(1 row)
+
+SELECT '2011-03-26 23:00:00 UTC'::timestamptz AT TIME ZONE 'MSK';
+ timezone
+--------------------------
+ Sun Mar 27 03:00:00 2011
+(1 row)
+
+SELECT '2011-03-26 23:00:01 UTC'::timestamptz AT TIME ZONE 'MSK';
+ timezone
+--------------------------
+ Sun Mar 27 03:00:01 2011
+(1 row)
+
+SELECT '2011-03-26 23:59:59 UTC'::timestamptz AT TIME ZONE 'MSK';
+ timezone
+--------------------------
+ Sun Mar 27 03:59:59 2011
+(1 row)
+
+SELECT '2011-03-27 00:00:00 UTC'::timestamptz AT TIME ZONE 'MSK';
+ timezone
+--------------------------
+ Sun Mar 27 04:00:00 2011
+(1 row)
+
+SELECT '2014-10-25 21:00:00 UTC'::timestamptz AT TIME ZONE 'MSK';
+ timezone
+--------------------------
+ Sun Oct 26 01:00:00 2014
+(1 row)
+
+SELECT '2014-10-25 21:59:59 UTC'::timestamptz AT TIME ZONE 'MSK';
+ timezone
+--------------------------
+ Sun Oct 26 01:59:59 2014
+(1 row)
+
+SELECT '2014-10-25 22:00:00 UTC'::timestamptz AT TIME ZONE 'MSK';
+ timezone
+--------------------------
+ Sun Oct 26 01:00:00 2014
+(1 row)
+
+SELECT '2014-10-25 22:00:01 UTC'::timestamptz AT TIME ZONE 'MSK';
+ timezone
+--------------------------
+ Sun Oct 26 01:00:01 2014
+(1 row)
+
+SELECT '2014-10-25 23:00:00 UTC'::timestamptz AT TIME ZONE 'MSK';
+ timezone
+--------------------------
+ Sun Oct 26 02:00:00 2014
+(1 row)
+
+--
+-- Test that AT TIME ZONE isn't misoptimized when using an index (bug #14504)
+--
+create temp table tmptz (f1 timestamptz primary key);
+insert into tmptz values ('2017-01-18 00:00+00');
+explain (costs off)
+select * from tmptz where f1 at time zone 'utc' = '2017-01-18 00:00';
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------
+ Seq Scan on tmptz
+ Filter: ((f1 AT TIME ZONE 'utc'::text) = 'Wed Jan 18 00:00:00 2017'::timestamp without time zone)
+(2 rows)
+
+select * from tmptz where f1 at time zone 'utc' = '2017-01-18 00:00';
+ f1
+------------------------------
+ Tue Jan 17 16:00:00 2017 PST
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/timestamptz.sql b/ydb/library/yql/tests/postgresql/original/cases/timestamptz.sql
new file mode 100644
index 0000000000..bce70fb21f
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/timestamptz.sql
@@ -0,0 +1,567 @@
+--
+-- TIMESTAMPTZ
+--
+
+CREATE TABLE TIMESTAMPTZ_TBL (d1 timestamp(2) with time zone);
+
+-- Test shorthand input values
+-- We can't just "select" the results since they aren't constants; test for
+-- equality instead. We can do that by running the test inside a transaction
+-- block, within which the value of 'now' shouldn't change, and so these
+-- related values shouldn't either.
+
+BEGIN;
+
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('today');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('yesterday');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('tomorrow');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('tomorrow EST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('tomorrow zulu');
+
+SELECT count(*) AS One FROM TIMESTAMPTZ_TBL WHERE d1 = timestamp with time zone 'today';
+SELECT count(*) AS One FROM TIMESTAMPTZ_TBL WHERE d1 = timestamp with time zone 'tomorrow';
+SELECT count(*) AS One FROM TIMESTAMPTZ_TBL WHERE d1 = timestamp with time zone 'yesterday';
+SELECT count(*) AS One FROM TIMESTAMPTZ_TBL WHERE d1 = timestamp with time zone 'tomorrow EST';
+SELECT count(*) AS One FROM TIMESTAMPTZ_TBL WHERE d1 = timestamp with time zone 'tomorrow zulu';
+
+COMMIT;
+
+DELETE FROM TIMESTAMPTZ_TBL;
+
+-- Verify that 'now' *does* change over a reasonable interval such as 100 msec,
+-- and that it doesn't change over the same interval within a transaction block
+
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+
+BEGIN;
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('now');
+SELECT pg_sleep(0.1);
+SELECT count(*) AS two FROM TIMESTAMPTZ_TBL WHERE d1 = timestamp(2) with time zone 'now';
+SELECT count(d1) AS three, count(DISTINCT d1) AS two FROM TIMESTAMPTZ_TBL;
+COMMIT;
+
+TRUNCATE TIMESTAMPTZ_TBL;
+
+-- Special values
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('-infinity');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('infinity');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('epoch');
+
+-- Postgres v6.0 standard output format
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01 1997 PST');
+
+-- Variations on Postgres v6.1 standard output format
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.000001 1997 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.999999 1997 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.4 1997 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.5 1997 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.6 1997 PST');
+
+-- ISO 8601 format
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-01-02');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-01-02 03:04:05');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-02-10 17:32:01-08');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-02-10 17:32:01-0800');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-02-10 17:32:01 -08:00');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970210 173201 -0800');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-06-10 17:32:01 -07:00');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2001-09-22T18:19:20');
+
+-- POSIX format (note that the timezone abbrev is just decoration here)
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 08:14:01 GMT+8');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 13:14:02 GMT-1');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 12:14:03 GMT-2');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 03:14:04 PST+8');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 02:14:05 MST+7:00');
+
+-- Variations for acceptable input formats
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 10 17:32:01 1997 -0800');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 10 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 10 5:32PM 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997/02/10 17:32:01-0800');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-02-10 17:32:01 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb-10-1997 17:32:01 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('02-10-1997 17:32:01 PST');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970210 173201 PST');
+set datestyle to ymd;
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('97FEB10 5:32:01PM UTC');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('97/02/10 17:32:01 UTC');
+reset datestyle;
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997.041 17:32:01 UTC');
+
+-- timestamps at different timezones
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970210 173201 America/New_York');
+SELECT '19970210 173201' AT TIME ZONE 'America/New_York';
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970710 173201 America/New_York');
+SELECT '19970710 173201' AT TIME ZONE 'America/New_York';
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970710 173201 America/Does_not_exist');
+SELECT '19970710 173201' AT TIME ZONE 'America/Does_not_exist';
+
+-- Daylight saving time for timestamps beyond 32-bit time_t range.
+SELECT '20500710 173201 Europe/Helsinki'::timestamptz; -- DST
+SELECT '20500110 173201 Europe/Helsinki'::timestamptz; -- non-DST
+
+SELECT '205000-07-10 17:32:01 Europe/Helsinki'::timestamptz; -- DST
+SELECT '205000-01-10 17:32:01 Europe/Helsinki'::timestamptz; -- non-DST
+
+-- Check date conversion and date arithmetic
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-06-10 18:32:01 PDT');
+
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 10 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 11 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 12 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 13 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 14 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 15 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1997');
+
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 0097 BC');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 0097');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 0597');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1097');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1697');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1797');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1897');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 2097');
+
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 28 17:32:01 1996');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 29 17:32:01 1996');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mar 01 17:32:01 1996');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 30 17:32:01 1996');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 31 17:32:01 1996');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Jan 01 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 28 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 29 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mar 01 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 30 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 31 17:32:01 1997');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 31 17:32:01 1999');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Jan 01 17:32:01 2000');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 31 17:32:01 2000');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Jan 01 17:32:01 2001');
+
+-- Currently unsupported syntax and ranges
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 -0097');
+INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 5097 BC');
+
+-- Alternative field order that we've historically supported (sort of)
+-- with regular and POSIXy timezone specs
+SELECT 'Wed Jul 11 10:51:14 America/New_York 2001'::timestamptz;
+SELECT 'Wed Jul 11 10:51:14 GMT-4 2001'::timestamptz;
+SELECT 'Wed Jul 11 10:51:14 GMT+4 2001'::timestamptz;
+SELECT 'Wed Jul 11 10:51:14 PST-03:00 2001'::timestamptz;
+SELECT 'Wed Jul 11 10:51:14 PST+03:00 2001'::timestamptz;
+
+SELECT d1 FROM TIMESTAMPTZ_TBL;
+
+-- Check behavior at the boundaries of the timestamp range
+SELECT '4714-11-24 00:00:00+00 BC'::timestamptz;
+SELECT '4714-11-23 16:00:00-08 BC'::timestamptz;
+SELECT 'Sun Nov 23 16:00:00 4714 PST BC'::timestamptz;
+SELECT '4714-11-23 23:59:59+00 BC'::timestamptz; -- out of range
+SELECT '294276-12-31 23:59:59+00'::timestamptz;
+SELECT '294276-12-31 15:59:59-08'::timestamptz;
+SELECT '294277-01-01 00:00:00+00'::timestamptz; -- out of range
+SELECT '294277-12-31 16:00:00-08'::timestamptz; -- out of range
+
+-- Demonstrate functions and operators
+SELECT d1 FROM TIMESTAMPTZ_TBL
+ WHERE d1 > timestamp with time zone '1997-01-02';
+
+SELECT d1 FROM TIMESTAMPTZ_TBL
+ WHERE d1 < timestamp with time zone '1997-01-02';
+
+SELECT d1 FROM TIMESTAMPTZ_TBL
+ WHERE d1 = timestamp with time zone '1997-01-02';
+
+SELECT d1 FROM TIMESTAMPTZ_TBL
+ WHERE d1 != timestamp with time zone '1997-01-02';
+
+SELECT d1 FROM TIMESTAMPTZ_TBL
+ WHERE d1 <= timestamp with time zone '1997-01-02';
+
+SELECT d1 FROM TIMESTAMPTZ_TBL
+ WHERE d1 >= timestamp with time zone '1997-01-02';
+
+SELECT d1 - timestamp with time zone '1997-01-02' AS diff
+ FROM TIMESTAMPTZ_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01';
+
+SELECT date_trunc( 'week', timestamp with time zone '2004-02-29 15:44:17.71393' ) AS week_trunc;
+
+SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'Australia/Sydney') as sydney_trunc; -- zone name
+SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'GMT') as gmt_trunc; -- fixed-offset abbreviation
+SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'VET') as vet_trunc; -- variable-offset abbreviation
+
+-- verify date_bin behaves the same as date_trunc for relevant intervals
+SELECT
+ str,
+ interval,
+ date_trunc(str, ts, 'Australia/Sydney') = date_bin(interval::interval, ts, timestamp with time zone '2001-01-01+11') AS equal
+FROM (
+ VALUES
+ ('day', '1 d'),
+ ('hour', '1 h'),
+ ('minute', '1 m'),
+ ('second', '1 s'),
+ ('millisecond', '1 ms'),
+ ('microsecond', '1 us')
+) intervals (str, interval),
+(VALUES (timestamptz '2020-02-29 15:44:17.71393+00')) ts (ts);
+
+-- bin timestamps into arbitrary intervals
+SELECT
+ interval,
+ ts,
+ origin,
+ date_bin(interval::interval, ts, origin)
+FROM (
+ VALUES
+ ('15 days'),
+ ('2 hours'),
+ ('1 hour 30 minutes'),
+ ('15 minutes'),
+ ('10 seconds'),
+ ('100 milliseconds'),
+ ('250 microseconds')
+) intervals (interval),
+(VALUES (timestamptz '2020-02-11 15:44:17.71393')) ts (ts),
+(VALUES (timestamptz '2001-01-01')) origin (origin);
+
+-- shift bins using the origin parameter:
+SELECT date_bin('5 min'::interval, timestamptz '2020-02-01 01:01:01+00', timestamptz '2020-02-01 00:02:30+00');
+
+-- disallow intervals with months or years
+SELECT date_bin('5 months'::interval, timestamp with time zone '2020-02-01 01:01:01+00', timestamp with time zone '2001-01-01+00');
+SELECT date_bin('5 years'::interval, timestamp with time zone '2020-02-01 01:01:01+00', timestamp with time zone '2001-01-01+00');
+
+-- disallow zero intervals
+SELECT date_bin('0 days'::interval, timestamp with time zone '1970-01-01 01:00:00+00' , timestamp with time zone '1970-01-01 00:00:00+00');
+
+-- disallow negative intervals
+SELECT date_bin('-2 days'::interval, timestamp with time zone '1970-01-01 01:00:00+00' , timestamp with time zone '1970-01-01 00:00:00+00');
+
+-- Test casting within a BETWEEN qualifier
+SELECT d1 - timestamp with time zone '1997-01-02' AS diff
+ FROM TIMESTAMPTZ_TBL
+ WHERE d1 BETWEEN timestamp with time zone '1902-01-01' AND timestamp with time zone '2038-01-01';
+
+-- DATE_PART (timestamptz_part)
+SELECT d1 as timestamptz,
+ date_part( 'year', d1) AS year, date_part( 'month', d1) AS month,
+ date_part( 'day', d1) AS day, date_part( 'hour', d1) AS hour,
+ date_part( 'minute', d1) AS minute, date_part( 'second', d1) AS second
+ FROM TIMESTAMPTZ_TBL;
+
+SELECT d1 as timestamptz,
+ date_part( 'quarter', d1) AS quarter, date_part( 'msec', d1) AS msec,
+ date_part( 'usec', d1) AS usec
+ FROM TIMESTAMPTZ_TBL;
+
+SELECT d1 as timestamptz,
+ date_part( 'isoyear', d1) AS isoyear, date_part( 'week', d1) AS week,
+ date_part( 'isodow', d1) AS isodow, date_part( 'dow', d1) AS dow,
+ date_part( 'doy', d1) AS doy
+ FROM TIMESTAMPTZ_TBL;
+
+SELECT d1 as timestamptz,
+ date_part( 'decade', d1) AS decade,
+ date_part( 'century', d1) AS century,
+ date_part( 'millennium', d1) AS millennium,
+ round(date_part( 'julian', d1)) AS julian,
+ date_part( 'epoch', d1) AS epoch
+ FROM TIMESTAMPTZ_TBL;
+
+SELECT d1 as timestamptz,
+ date_part( 'timezone', d1) AS timezone,
+ date_part( 'timezone_hour', d1) AS timezone_hour,
+ date_part( 'timezone_minute', d1) AS timezone_minute
+ FROM TIMESTAMPTZ_TBL;
+
+-- extract implementation is mostly the same as date_part, so only
+-- test a few cases for additional coverage.
+SELECT d1 as "timestamp",
+ extract(microseconds from d1) AS microseconds,
+ extract(milliseconds from d1) AS milliseconds,
+ extract(seconds from d1) AS seconds,
+ round(extract(julian from d1)) AS julian,
+ extract(epoch from d1) AS epoch
+ FROM TIMESTAMPTZ_TBL;
+
+-- value near upper bound uses special case in code
+SELECT date_part('epoch', '294270-01-01 00:00:00+00'::timestamptz);
+SELECT extract(epoch from '294270-01-01 00:00:00+00'::timestamptz);
+-- another internal overflow test case
+SELECT extract(epoch from '5000-01-01 00:00:00+00'::timestamptz);
+
+-- TO_CHAR()
+SELECT to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon')
+ FROM TIMESTAMPTZ_TBL;
+
+SELECT to_char(d1, 'FMDAY FMDay FMday FMMONTH FMMonth FMmonth FMRM')
+ FROM TIMESTAMPTZ_TBL;
+
+SELECT to_char(d1, 'Y,YYY YYYY YYY YY Y CC Q MM WW DDD DD D J')
+ FROM TIMESTAMPTZ_TBL;
+
+SELECT to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ')
+ FROM TIMESTAMPTZ_TBL;
+
+SELECT to_char(d1, 'HH HH12 HH24 MI SS SSSS')
+ FROM TIMESTAMPTZ_TBL;
+
+SELECT to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""')
+ FROM TIMESTAMPTZ_TBL;
+
+SELECT to_char(d1, 'HH24--text--MI--text--SS')
+ FROM TIMESTAMPTZ_TBL;
+
+SELECT to_char(d1, 'YYYYTH YYYYth Jth')
+ FROM TIMESTAMPTZ_TBL;
+
+SELECT to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm')
+ FROM TIMESTAMPTZ_TBL;
+
+SELECT to_char(d1, 'IYYY IYY IY I IW IDDD ID')
+ FROM TIMESTAMPTZ_TBL;
+
+SELECT to_char(d1, 'FMIYYY FMIYY FMIY FMI FMIW FMIDDD FMID')
+ FROM TIMESTAMPTZ_TBL;
+
+SELECT to_char(d, 'FF1 FF2 FF3 FF4 FF5 FF6 ff1 ff2 ff3 ff4 ff5 ff6 MS US')
+ FROM (VALUES
+ ('2018-11-02 12:34:56'::timestamptz),
+ ('2018-11-02 12:34:56.78'),
+ ('2018-11-02 12:34:56.78901'),
+ ('2018-11-02 12:34:56.78901234')
+ ) d(d);
+
+-- Check OF, TZH, TZM with various zone offsets, particularly fractional hours
+SET timezone = '00:00';
+SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM";
+SET timezone = '+02:00';
+SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM";
+SET timezone = '-13:00';
+SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM";
+SET timezone = '-00:30';
+SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM";
+SET timezone = '00:30';
+SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM";
+SET timezone = '-04:30';
+SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM";
+SET timezone = '04:30';
+SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM";
+SET timezone = '-04:15';
+SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM";
+SET timezone = '04:15';
+SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM";
+RESET timezone;
+
+CREATE TABLE TIMESTAMPTZ_TST (a int , b timestamptz);
+
+-- Test year field value with len > 4
+INSERT INTO TIMESTAMPTZ_TST VALUES(1, 'Sat Mar 12 23:58:48 1000 IST');
+INSERT INTO TIMESTAMPTZ_TST VALUES(2, 'Sat Mar 12 23:58:48 10000 IST');
+INSERT INTO TIMESTAMPTZ_TST VALUES(3, 'Sat Mar 12 23:58:48 100000 IST');
+INSERT INTO TIMESTAMPTZ_TST VALUES(3, '10000 Mar 12 23:58:48 IST');
+INSERT INTO TIMESTAMPTZ_TST VALUES(4, '100000312 23:58:48 IST');
+INSERT INTO TIMESTAMPTZ_TST VALUES(4, '1000000312 23:58:48 IST');
+--Verify data
+SELECT * FROM TIMESTAMPTZ_TST ORDER BY a;
+--Cleanup
+DROP TABLE TIMESTAMPTZ_TST;
+
+-- test timestamptz constructors
+set TimeZone to 'America/New_York';
+
+-- numeric timezone
+SELECT make_timestamptz(1973, 07, 15, 08, 15, 55.33);
+SELECT make_timestamptz(1973, 07, 15, 08, 15, 55.33, '+2');
+SELECT make_timestamptz(1973, 07, 15, 08, 15, 55.33, '-2');
+WITH tzs (tz) AS (VALUES
+ ('+1'), ('+1:'), ('+1:0'), ('+100'), ('+1:00'), ('+01:00'),
+ ('+10'), ('+1000'), ('+10:'), ('+10:0'), ('+10:00'), ('+10:00:'),
+ ('+10:00:1'), ('+10:00:01'),
+ ('+10:00:10'))
+ SELECT make_timestamptz(2010, 2, 27, 3, 45, 00, tz), tz FROM tzs;
+
+-- these should fail
+SELECT make_timestamptz(1973, 07, 15, 08, 15, 55.33, '2');
+SELECT make_timestamptz(2014, 12, 10, 10, 10, 10, '+16');
+SELECT make_timestamptz(2014, 12, 10, 10, 10, 10, '-16');
+
+-- should be true
+SELECT make_timestamptz(1973, 07, 15, 08, 15, 55.33, '+2') = '1973-07-15 08:15:55.33+02'::timestamptz;
+
+-- full timezone names
+SELECT make_timestamptz(2014, 12, 10, 0, 0, 0, 'Europe/Prague') = timestamptz '2014-12-10 00:00:00 Europe/Prague';
+SELECT make_timestamptz(2014, 12, 10, 0, 0, 0, 'Europe/Prague') AT TIME ZONE 'UTC';
+SELECT make_timestamptz(1846, 12, 10, 0, 0, 0, 'Asia/Manila') AT TIME ZONE 'UTC';
+SELECT make_timestamptz(1881, 12, 10, 0, 0, 0, 'Europe/Paris') AT TIME ZONE 'UTC';
+SELECT make_timestamptz(1910, 12, 24, 0, 0, 0, 'Nehwon/Lankhmar');
+
+-- abbreviations
+SELECT make_timestamptz(2008, 12, 10, 10, 10, 10, 'EST');
+SELECT make_timestamptz(2008, 12, 10, 10, 10, 10, 'EDT');
+SELECT make_timestamptz(2014, 12, 10, 10, 10, 10, 'PST8PDT');
+
+RESET TimeZone;
+
+-- generate_series for timestamptz
+select * from generate_series('2020-01-01 00:00'::timestamptz,
+ '2020-01-02 03:00'::timestamptz,
+ '1 hour'::interval);
+-- the LIMIT should allow this to terminate in a reasonable amount of time
+-- (but that unfortunately doesn't work yet for SELECT * FROM ...)
+select generate_series('2022-01-01 00:00'::timestamptz,
+ 'infinity'::timestamptz,
+ '1 month'::interval) limit 10;
+-- errors
+select * from generate_series('2020-01-01 00:00'::timestamptz,
+ '2020-01-02 03:00'::timestamptz,
+ '0 hour'::interval);
+
+--
+-- Test behavior with a dynamic (time-varying) timezone abbreviation.
+-- These tests rely on the knowledge that MSK (Europe/Moscow standard time)
+-- moved forwards in Mar 2011 and backwards again in Oct 2014.
+--
+
+SET TimeZone to 'UTC';
+
+SELECT '2011-03-27 00:00:00 Europe/Moscow'::timestamptz;
+SELECT '2011-03-27 01:00:00 Europe/Moscow'::timestamptz;
+SELECT '2011-03-27 01:59:59 Europe/Moscow'::timestamptz;
+SELECT '2011-03-27 02:00:00 Europe/Moscow'::timestamptz;
+SELECT '2011-03-27 02:00:01 Europe/Moscow'::timestamptz;
+SELECT '2011-03-27 02:59:59 Europe/Moscow'::timestamptz;
+SELECT '2011-03-27 03:00:00 Europe/Moscow'::timestamptz;
+SELECT '2011-03-27 03:00:01 Europe/Moscow'::timestamptz;
+SELECT '2011-03-27 04:00:00 Europe/Moscow'::timestamptz;
+
+SELECT '2011-03-27 00:00:00 MSK'::timestamptz;
+SELECT '2011-03-27 01:00:00 MSK'::timestamptz;
+SELECT '2011-03-27 01:59:59 MSK'::timestamptz;
+SELECT '2011-03-27 02:00:00 MSK'::timestamptz;
+SELECT '2011-03-27 02:00:01 MSK'::timestamptz;
+SELECT '2011-03-27 02:59:59 MSK'::timestamptz;
+SELECT '2011-03-27 03:00:00 MSK'::timestamptz;
+SELECT '2011-03-27 03:00:01 MSK'::timestamptz;
+SELECT '2011-03-27 04:00:00 MSK'::timestamptz;
+
+SELECT '2014-10-26 00:00:00 Europe/Moscow'::timestamptz;
+SELECT '2014-10-26 00:59:59 Europe/Moscow'::timestamptz;
+SELECT '2014-10-26 01:00:00 Europe/Moscow'::timestamptz;
+SELECT '2014-10-26 01:00:01 Europe/Moscow'::timestamptz;
+SELECT '2014-10-26 02:00:00 Europe/Moscow'::timestamptz;
+
+SELECT '2014-10-26 00:00:00 MSK'::timestamptz;
+SELECT '2014-10-26 00:59:59 MSK'::timestamptz;
+SELECT '2014-10-26 01:00:00 MSK'::timestamptz;
+SELECT '2014-10-26 01:00:01 MSK'::timestamptz;
+SELECT '2014-10-26 02:00:00 MSK'::timestamptz;
+
+SELECT '2011-03-27 00:00:00'::timestamp AT TIME ZONE 'Europe/Moscow';
+SELECT '2011-03-27 01:00:00'::timestamp AT TIME ZONE 'Europe/Moscow';
+SELECT '2011-03-27 01:59:59'::timestamp AT TIME ZONE 'Europe/Moscow';
+SELECT '2011-03-27 02:00:00'::timestamp AT TIME ZONE 'Europe/Moscow';
+SELECT '2011-03-27 02:00:01'::timestamp AT TIME ZONE 'Europe/Moscow';
+SELECT '2011-03-27 02:59:59'::timestamp AT TIME ZONE 'Europe/Moscow';
+SELECT '2011-03-27 03:00:00'::timestamp AT TIME ZONE 'Europe/Moscow';
+SELECT '2011-03-27 03:00:01'::timestamp AT TIME ZONE 'Europe/Moscow';
+SELECT '2011-03-27 04:00:00'::timestamp AT TIME ZONE 'Europe/Moscow';
+
+SELECT '2011-03-27 00:00:00'::timestamp AT TIME ZONE 'MSK';
+SELECT '2011-03-27 01:00:00'::timestamp AT TIME ZONE 'MSK';
+SELECT '2011-03-27 01:59:59'::timestamp AT TIME ZONE 'MSK';
+SELECT '2011-03-27 02:00:00'::timestamp AT TIME ZONE 'MSK';
+SELECT '2011-03-27 02:00:01'::timestamp AT TIME ZONE 'MSK';
+SELECT '2011-03-27 02:59:59'::timestamp AT TIME ZONE 'MSK';
+SELECT '2011-03-27 03:00:00'::timestamp AT TIME ZONE 'MSK';
+SELECT '2011-03-27 03:00:01'::timestamp AT TIME ZONE 'MSK';
+SELECT '2011-03-27 04:00:00'::timestamp AT TIME ZONE 'MSK';
+
+SELECT '2014-10-26 00:00:00'::timestamp AT TIME ZONE 'Europe/Moscow';
+SELECT '2014-10-26 00:59:59'::timestamp AT TIME ZONE 'Europe/Moscow';
+SELECT '2014-10-26 01:00:00'::timestamp AT TIME ZONE 'Europe/Moscow';
+SELECT '2014-10-26 01:00:01'::timestamp AT TIME ZONE 'Europe/Moscow';
+SELECT '2014-10-26 02:00:00'::timestamp AT TIME ZONE 'Europe/Moscow';
+
+SELECT '2014-10-26 00:00:00'::timestamp AT TIME ZONE 'MSK';
+SELECT '2014-10-26 00:59:59'::timestamp AT TIME ZONE 'MSK';
+SELECT '2014-10-26 01:00:00'::timestamp AT TIME ZONE 'MSK';
+SELECT '2014-10-26 01:00:01'::timestamp AT TIME ZONE 'MSK';
+SELECT '2014-10-26 02:00:00'::timestamp AT TIME ZONE 'MSK';
+
+SELECT make_timestamptz(2014, 10, 26, 0, 0, 0, 'MSK');
+SELECT make_timestamptz(2014, 10, 26, 1, 0, 0, 'MSK');
+
+SELECT to_timestamp( 0); -- 1970-01-01 00:00:00+00
+SELECT to_timestamp( 946684800); -- 2000-01-01 00:00:00+00
+SELECT to_timestamp(1262349296.7890123); -- 2010-01-01 12:34:56.789012+00
+-- edge cases
+SELECT to_timestamp(-210866803200); -- 4714-11-24 00:00:00+00 BC
+-- upper limit varies between integer and float timestamps, so hard to test
+-- nonfinite values
+SELECT to_timestamp(' Infinity'::float);
+SELECT to_timestamp('-Infinity'::float);
+SELECT to_timestamp('NaN'::float);
+
+
+SET TimeZone to 'Europe/Moscow';
+
+SELECT '2011-03-26 21:00:00 UTC'::timestamptz;
+SELECT '2011-03-26 22:00:00 UTC'::timestamptz;
+SELECT '2011-03-26 22:59:59 UTC'::timestamptz;
+SELECT '2011-03-26 23:00:00 UTC'::timestamptz;
+SELECT '2011-03-26 23:00:01 UTC'::timestamptz;
+SELECT '2011-03-26 23:59:59 UTC'::timestamptz;
+SELECT '2011-03-27 00:00:00 UTC'::timestamptz;
+
+SELECT '2014-10-25 21:00:00 UTC'::timestamptz;
+SELECT '2014-10-25 21:59:59 UTC'::timestamptz;
+SELECT '2014-10-25 22:00:00 UTC'::timestamptz;
+SELECT '2014-10-25 22:00:01 UTC'::timestamptz;
+SELECT '2014-10-25 23:00:00 UTC'::timestamptz;
+
+RESET TimeZone;
+
+SELECT '2011-03-26 21:00:00 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+SELECT '2011-03-26 22:00:00 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+SELECT '2011-03-26 22:59:59 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+SELECT '2011-03-26 23:00:00 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+SELECT '2011-03-26 23:00:01 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+SELECT '2011-03-26 23:59:59 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+SELECT '2011-03-27 00:00:00 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+
+SELECT '2014-10-25 21:00:00 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+SELECT '2014-10-25 21:59:59 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+SELECT '2014-10-25 22:00:00 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+SELECT '2014-10-25 22:00:01 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+SELECT '2014-10-25 23:00:00 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow';
+
+SELECT '2011-03-26 21:00:00 UTC'::timestamptz AT TIME ZONE 'MSK';
+SELECT '2011-03-26 22:00:00 UTC'::timestamptz AT TIME ZONE 'MSK';
+SELECT '2011-03-26 22:59:59 UTC'::timestamptz AT TIME ZONE 'MSK';
+SELECT '2011-03-26 23:00:00 UTC'::timestamptz AT TIME ZONE 'MSK';
+SELECT '2011-03-26 23:00:01 UTC'::timestamptz AT TIME ZONE 'MSK';
+SELECT '2011-03-26 23:59:59 UTC'::timestamptz AT TIME ZONE 'MSK';
+SELECT '2011-03-27 00:00:00 UTC'::timestamptz AT TIME ZONE 'MSK';
+
+SELECT '2014-10-25 21:00:00 UTC'::timestamptz AT TIME ZONE 'MSK';
+SELECT '2014-10-25 21:59:59 UTC'::timestamptz AT TIME ZONE 'MSK';
+SELECT '2014-10-25 22:00:00 UTC'::timestamptz AT TIME ZONE 'MSK';
+SELECT '2014-10-25 22:00:01 UTC'::timestamptz AT TIME ZONE 'MSK';
+SELECT '2014-10-25 23:00:00 UTC'::timestamptz AT TIME ZONE 'MSK';
+
+--
+-- Test that AT TIME ZONE isn't misoptimized when using an index (bug #14504)
+--
+create temp table tmptz (f1 timestamptz primary key);
+insert into tmptz values ('2017-01-18 00:00+00');
+explain (costs off)
+select * from tmptz where f1 at time zone 'utc' = '2017-01-18 00:00';
+select * from tmptz where f1 at time zone 'utc' = '2017-01-18 00:00';
diff --git a/ydb/library/yql/tests/postgresql/original/cases/timetz.out b/ydb/library/yql/tests/postgresql/original/cases/timetz.out
new file mode 100644
index 0000000000..f4960c0166
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/timetz.out
@@ -0,0 +1,233 @@
+--
+-- TIMETZ
+--
+CREATE TABLE TIMETZ_TBL (f1 time(2) with time zone);
+INSERT INTO TIMETZ_TBL VALUES ('00:01 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('01:00 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('02:03 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('07:07 PST');
+INSERT INTO TIMETZ_TBL VALUES ('08:08 EDT');
+INSERT INTO TIMETZ_TBL VALUES ('11:59 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('12:00 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('12:01 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('23:59 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('11:59:59.99 PM PDT');
+INSERT INTO TIMETZ_TBL VALUES ('2003-03-07 15:36:39 America/New_York');
+INSERT INTO TIMETZ_TBL VALUES ('2003-07-07 15:36:39 America/New_York');
+-- this should fail (the timezone offset is not known)
+INSERT INTO TIMETZ_TBL VALUES ('15:36:39 America/New_York');
+ERROR: invalid input syntax for type time with time zone: "15:36:39 America/New_York"
+LINE 1: INSERT INTO TIMETZ_TBL VALUES ('15:36:39 America/New_York');
+ ^
+-- this should fail (timezone not specified without a date)
+INSERT INTO TIMETZ_TBL VALUES ('15:36:39 m2');
+ERROR: invalid input syntax for type time with time zone: "15:36:39 m2"
+LINE 1: INSERT INTO TIMETZ_TBL VALUES ('15:36:39 m2');
+ ^
+-- this should fail (dynamic timezone abbreviation without a date)
+INSERT INTO TIMETZ_TBL VALUES ('15:36:39 MSK m2');
+ERROR: invalid input syntax for type time with time zone: "15:36:39 MSK m2"
+LINE 1: INSERT INTO TIMETZ_TBL VALUES ('15:36:39 MSK m2');
+ ^
+SELECT f1 AS "Time TZ" FROM TIMETZ_TBL;
+ Time TZ
+----------------
+ 00:01:00-07
+ 01:00:00-07
+ 02:03:00-07
+ 07:07:00-08
+ 08:08:00-04
+ 11:59:00-07
+ 12:00:00-07
+ 12:01:00-07
+ 23:59:00-07
+ 23:59:59.99-07
+ 15:36:39-05
+ 15:36:39-04
+(12 rows)
+
+SELECT f1 AS "Three" FROM TIMETZ_TBL WHERE f1 < '05:06:07-07';
+ Three
+-------------
+ 00:01:00-07
+ 01:00:00-07
+ 02:03:00-07
+(3 rows)
+
+SELECT f1 AS "Seven" FROM TIMETZ_TBL WHERE f1 > '05:06:07-07';
+ Seven
+----------------
+ 07:07:00-08
+ 08:08:00-04
+ 11:59:00-07
+ 12:00:00-07
+ 12:01:00-07
+ 23:59:00-07
+ 23:59:59.99-07
+ 15:36:39-05
+ 15:36:39-04
+(9 rows)
+
+SELECT f1 AS "None" FROM TIMETZ_TBL WHERE f1 < '00:00-07';
+ None
+------
+(0 rows)
+
+SELECT f1 AS "Ten" FROM TIMETZ_TBL WHERE f1 >= '00:00-07';
+ Ten
+----------------
+ 00:01:00-07
+ 01:00:00-07
+ 02:03:00-07
+ 07:07:00-08
+ 08:08:00-04
+ 11:59:00-07
+ 12:00:00-07
+ 12:01:00-07
+ 23:59:00-07
+ 23:59:59.99-07
+ 15:36:39-05
+ 15:36:39-04
+(12 rows)
+
+-- Check edge cases
+SELECT '23:59:59.999999 PDT'::timetz;
+ timetz
+--------------------
+ 23:59:59.999999-07
+(1 row)
+
+SELECT '23:59:59.9999999 PDT'::timetz; -- rounds up
+ timetz
+-------------
+ 24:00:00-07
+(1 row)
+
+SELECT '23:59:60 PDT'::timetz; -- rounds up
+ timetz
+-------------
+ 24:00:00-07
+(1 row)
+
+SELECT '24:00:00 PDT'::timetz; -- allowed
+ timetz
+-------------
+ 24:00:00-07
+(1 row)
+
+SELECT '24:00:00.01 PDT'::timetz; -- not allowed
+ERROR: date/time field value out of range: "24:00:00.01 PDT"
+LINE 1: SELECT '24:00:00.01 PDT'::timetz;
+ ^
+SELECT '23:59:60.01 PDT'::timetz; -- not allowed
+ERROR: date/time field value out of range: "23:59:60.01 PDT"
+LINE 1: SELECT '23:59:60.01 PDT'::timetz;
+ ^
+SELECT '24:01:00 PDT'::timetz; -- not allowed
+ERROR: date/time field value out of range: "24:01:00 PDT"
+LINE 1: SELECT '24:01:00 PDT'::timetz;
+ ^
+SELECT '25:00:00 PDT'::timetz; -- not allowed
+ERROR: date/time field value out of range: "25:00:00 PDT"
+LINE 1: SELECT '25:00:00 PDT'::timetz;
+ ^
+--
+-- TIME simple math
+--
+-- We now make a distinction between time and intervals,
+-- and adding two times together makes no sense at all.
+-- Leave in one query to show that it is rejected,
+-- and do the rest of the testing in horology.sql
+-- where we do mixed-type arithmetic. - thomas 2000-12-02
+SELECT f1 + time with time zone '00:01' AS "Illegal" FROM TIMETZ_TBL;
+ERROR: operator does not exist: time with time zone + time with time zone
+LINE 1: SELECT f1 + time with time zone '00:01' AS "Illegal" FROM TI...
+ ^
+HINT: No operator matches the given name and argument types. You might need to add explicit type casts.
+--
+-- test EXTRACT
+--
+SELECT EXTRACT(MICROSECOND FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04');
+ extract
+----------
+ 25575401
+(1 row)
+
+SELECT EXTRACT(MILLISECOND FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04');
+ extract
+-----------
+ 25575.401
+(1 row)
+
+SELECT EXTRACT(SECOND FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04');
+ extract
+-----------
+ 25.575401
+(1 row)
+
+SELECT EXTRACT(MINUTE FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04');
+ extract
+---------
+ 30
+(1 row)
+
+SELECT EXTRACT(HOUR FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04');
+ extract
+---------
+ 13
+(1 row)
+
+SELECT EXTRACT(DAY FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04'); -- error
+ERROR: "time with time zone" units "day" not recognized
+SELECT EXTRACT(FORTNIGHT FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04'); -- error
+ERROR: "time with time zone" units "fortnight" not recognized
+SELECT EXTRACT(TIMEZONE FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04:30');
+ extract
+---------
+ -16200
+(1 row)
+
+SELECT EXTRACT(TIMEZONE_HOUR FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04:30');
+ extract
+---------
+ -4
+(1 row)
+
+SELECT EXTRACT(TIMEZONE_MINUTE FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04:30');
+ extract
+---------
+ -30
+(1 row)
+
+SELECT EXTRACT(EPOCH FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04');
+ extract
+--------------
+ 63025.575401
+(1 row)
+
+-- date_part implementation is mostly the same as extract, so only
+-- test a few cases for additional coverage.
+SELECT date_part('microsecond', TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04');
+ date_part
+-----------
+ 25575401
+(1 row)
+
+SELECT date_part('millisecond', TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04');
+ date_part
+-----------
+ 25575.401
+(1 row)
+
+SELECT date_part('second', TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04');
+ date_part
+-----------
+ 25.575401
+(1 row)
+
+SELECT date_part('epoch', TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04');
+ date_part
+--------------
+ 63025.575401
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/timetz.sql b/ydb/library/yql/tests/postgresql/original/cases/timetz.sql
new file mode 100644
index 0000000000..7b70f4656c
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/timetz.sql
@@ -0,0 +1,79 @@
+--
+-- TIMETZ
+--
+
+CREATE TABLE TIMETZ_TBL (f1 time(2) with time zone);
+
+INSERT INTO TIMETZ_TBL VALUES ('00:01 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('01:00 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('02:03 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('07:07 PST');
+INSERT INTO TIMETZ_TBL VALUES ('08:08 EDT');
+INSERT INTO TIMETZ_TBL VALUES ('11:59 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('12:00 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('12:01 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('23:59 PDT');
+INSERT INTO TIMETZ_TBL VALUES ('11:59:59.99 PM PDT');
+
+INSERT INTO TIMETZ_TBL VALUES ('2003-03-07 15:36:39 America/New_York');
+INSERT INTO TIMETZ_TBL VALUES ('2003-07-07 15:36:39 America/New_York');
+-- this should fail (the timezone offset is not known)
+INSERT INTO TIMETZ_TBL VALUES ('15:36:39 America/New_York');
+-- this should fail (timezone not specified without a date)
+INSERT INTO TIMETZ_TBL VALUES ('15:36:39 m2');
+-- this should fail (dynamic timezone abbreviation without a date)
+INSERT INTO TIMETZ_TBL VALUES ('15:36:39 MSK m2');
+
+
+SELECT f1 AS "Time TZ" FROM TIMETZ_TBL;
+
+SELECT f1 AS "Three" FROM TIMETZ_TBL WHERE f1 < '05:06:07-07';
+
+SELECT f1 AS "Seven" FROM TIMETZ_TBL WHERE f1 > '05:06:07-07';
+
+SELECT f1 AS "None" FROM TIMETZ_TBL WHERE f1 < '00:00-07';
+
+SELECT f1 AS "Ten" FROM TIMETZ_TBL WHERE f1 >= '00:00-07';
+
+-- Check edge cases
+SELECT '23:59:59.999999 PDT'::timetz;
+SELECT '23:59:59.9999999 PDT'::timetz; -- rounds up
+SELECT '23:59:60 PDT'::timetz; -- rounds up
+SELECT '24:00:00 PDT'::timetz; -- allowed
+SELECT '24:00:00.01 PDT'::timetz; -- not allowed
+SELECT '23:59:60.01 PDT'::timetz; -- not allowed
+SELECT '24:01:00 PDT'::timetz; -- not allowed
+SELECT '25:00:00 PDT'::timetz; -- not allowed
+
+--
+-- TIME simple math
+--
+-- We now make a distinction between time and intervals,
+-- and adding two times together makes no sense at all.
+-- Leave in one query to show that it is rejected,
+-- and do the rest of the testing in horology.sql
+-- where we do mixed-type arithmetic. - thomas 2000-12-02
+
+SELECT f1 + time with time zone '00:01' AS "Illegal" FROM TIMETZ_TBL;
+
+--
+-- test EXTRACT
+--
+SELECT EXTRACT(MICROSECOND FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04');
+SELECT EXTRACT(MILLISECOND FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04');
+SELECT EXTRACT(SECOND FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04');
+SELECT EXTRACT(MINUTE FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04');
+SELECT EXTRACT(HOUR FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04');
+SELECT EXTRACT(DAY FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04'); -- error
+SELECT EXTRACT(FORTNIGHT FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04'); -- error
+SELECT EXTRACT(TIMEZONE FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04:30');
+SELECT EXTRACT(TIMEZONE_HOUR FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04:30');
+SELECT EXTRACT(TIMEZONE_MINUTE FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04:30');
+SELECT EXTRACT(EPOCH FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04');
+
+-- date_part implementation is mostly the same as extract, so only
+-- test a few cases for additional coverage.
+SELECT date_part('microsecond', TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04');
+SELECT date_part('millisecond', TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04');
+SELECT date_part('second', TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04');
+SELECT date_part('epoch', TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04');
diff --git a/ydb/library/yql/tests/postgresql/original/cases/truncate.out b/ydb/library/yql/tests/postgresql/original/cases/truncate.out
new file mode 100644
index 0000000000..1e88e867bf
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/truncate.out
@@ -0,0 +1,594 @@
+-- Test basic TRUNCATE functionality.
+CREATE TABLE truncate_a (col1 integer primary key);
+INSERT INTO truncate_a VALUES (1);
+INSERT INTO truncate_a VALUES (2);
+SELECT * FROM truncate_a;
+ col1
+------
+ 1
+ 2
+(2 rows)
+
+-- Roll truncate back
+BEGIN;
+TRUNCATE truncate_a;
+ROLLBACK;
+SELECT * FROM truncate_a;
+ col1
+------
+ 1
+ 2
+(2 rows)
+
+-- Commit the truncate this time
+BEGIN;
+TRUNCATE truncate_a;
+COMMIT;
+SELECT * FROM truncate_a;
+ col1
+------
+(0 rows)
+
+-- Test foreign-key checks
+CREATE TABLE trunc_b (a int REFERENCES truncate_a);
+CREATE TABLE trunc_c (a serial PRIMARY KEY);
+CREATE TABLE trunc_d (a int REFERENCES trunc_c);
+CREATE TABLE trunc_e (a int REFERENCES truncate_a, b int REFERENCES trunc_c);
+TRUNCATE TABLE truncate_a; -- fail
+ERROR: cannot truncate a table referenced in a foreign key constraint
+DETAIL: Table "trunc_b" references "truncate_a".
+HINT: Truncate table "trunc_b" at the same time, or use TRUNCATE ... CASCADE.
+TRUNCATE TABLE truncate_a,trunc_b; -- fail
+ERROR: cannot truncate a table referenced in a foreign key constraint
+DETAIL: Table "trunc_e" references "truncate_a".
+HINT: Truncate table "trunc_e" at the same time, or use TRUNCATE ... CASCADE.
+TRUNCATE TABLE truncate_a,trunc_b,trunc_e; -- ok
+TRUNCATE TABLE truncate_a,trunc_e; -- fail
+ERROR: cannot truncate a table referenced in a foreign key constraint
+DETAIL: Table "trunc_b" references "truncate_a".
+HINT: Truncate table "trunc_b" at the same time, or use TRUNCATE ... CASCADE.
+TRUNCATE TABLE trunc_c; -- fail
+ERROR: cannot truncate a table referenced in a foreign key constraint
+DETAIL: Table "trunc_d" references "trunc_c".
+HINT: Truncate table "trunc_d" at the same time, or use TRUNCATE ... CASCADE.
+TRUNCATE TABLE trunc_c,trunc_d; -- fail
+ERROR: cannot truncate a table referenced in a foreign key constraint
+DETAIL: Table "trunc_e" references "trunc_c".
+HINT: Truncate table "trunc_e" at the same time, or use TRUNCATE ... CASCADE.
+TRUNCATE TABLE trunc_c,trunc_d,trunc_e; -- ok
+TRUNCATE TABLE trunc_c,trunc_d,trunc_e,truncate_a; -- fail
+ERROR: cannot truncate a table referenced in a foreign key constraint
+DETAIL: Table "trunc_b" references "truncate_a".
+HINT: Truncate table "trunc_b" at the same time, or use TRUNCATE ... CASCADE.
+TRUNCATE TABLE trunc_c,trunc_d,trunc_e,truncate_a,trunc_b; -- ok
+TRUNCATE TABLE truncate_a RESTRICT; -- fail
+ERROR: cannot truncate a table referenced in a foreign key constraint
+DETAIL: Table "trunc_b" references "truncate_a".
+HINT: Truncate table "trunc_b" at the same time, or use TRUNCATE ... CASCADE.
+TRUNCATE TABLE truncate_a CASCADE; -- ok
+NOTICE: truncate cascades to table "trunc_b"
+NOTICE: truncate cascades to table "trunc_e"
+-- circular references
+ALTER TABLE truncate_a ADD FOREIGN KEY (col1) REFERENCES trunc_c;
+-- Add some data to verify that truncating actually works ...
+INSERT INTO trunc_c VALUES (1);
+INSERT INTO truncate_a VALUES (1);
+INSERT INTO trunc_b VALUES (1);
+INSERT INTO trunc_d VALUES (1);
+INSERT INTO trunc_e VALUES (1,1);
+TRUNCATE TABLE trunc_c;
+ERROR: cannot truncate a table referenced in a foreign key constraint
+DETAIL: Table "truncate_a" references "trunc_c".
+HINT: Truncate table "truncate_a" at the same time, or use TRUNCATE ... CASCADE.
+TRUNCATE TABLE trunc_c,truncate_a;
+ERROR: cannot truncate a table referenced in a foreign key constraint
+DETAIL: Table "trunc_d" references "trunc_c".
+HINT: Truncate table "trunc_d" at the same time, or use TRUNCATE ... CASCADE.
+TRUNCATE TABLE trunc_c,truncate_a,trunc_d;
+ERROR: cannot truncate a table referenced in a foreign key constraint
+DETAIL: Table "trunc_e" references "trunc_c".
+HINT: Truncate table "trunc_e" at the same time, or use TRUNCATE ... CASCADE.
+TRUNCATE TABLE trunc_c,truncate_a,trunc_d,trunc_e;
+ERROR: cannot truncate a table referenced in a foreign key constraint
+DETAIL: Table "trunc_b" references "truncate_a".
+HINT: Truncate table "trunc_b" at the same time, or use TRUNCATE ... CASCADE.
+TRUNCATE TABLE trunc_c,truncate_a,trunc_d,trunc_e,trunc_b;
+-- Verify that truncating did actually work
+SELECT * FROM truncate_a
+ UNION ALL
+ SELECT * FROM trunc_c
+ UNION ALL
+ SELECT * FROM trunc_b
+ UNION ALL
+ SELECT * FROM trunc_d;
+ col1
+------
+(0 rows)
+
+SELECT * FROM trunc_e;
+ a | b
+---+---
+(0 rows)
+
+-- Add data again to test TRUNCATE ... CASCADE
+INSERT INTO trunc_c VALUES (1);
+INSERT INTO truncate_a VALUES (1);
+INSERT INTO trunc_b VALUES (1);
+INSERT INTO trunc_d VALUES (1);
+INSERT INTO trunc_e VALUES (1,1);
+TRUNCATE TABLE trunc_c CASCADE; -- ok
+NOTICE: truncate cascades to table "truncate_a"
+NOTICE: truncate cascades to table "trunc_d"
+NOTICE: truncate cascades to table "trunc_e"
+NOTICE: truncate cascades to table "trunc_b"
+SELECT * FROM truncate_a
+ UNION ALL
+ SELECT * FROM trunc_c
+ UNION ALL
+ SELECT * FROM trunc_b
+ UNION ALL
+ SELECT * FROM trunc_d;
+ col1
+------
+(0 rows)
+
+SELECT * FROM trunc_e;
+ a | b
+---+---
+(0 rows)
+
+DROP TABLE truncate_a,trunc_c,trunc_b,trunc_d,trunc_e CASCADE;
+-- Test TRUNCATE with inheritance
+CREATE TABLE trunc_f (col1 integer primary key);
+INSERT INTO trunc_f VALUES (1);
+INSERT INTO trunc_f VALUES (2);
+CREATE TABLE trunc_fa (col2a text) INHERITS (trunc_f);
+INSERT INTO trunc_fa VALUES (3, 'three');
+CREATE TABLE trunc_fb (col2b int) INHERITS (trunc_f);
+INSERT INTO trunc_fb VALUES (4, 444);
+CREATE TABLE trunc_faa (col3 text) INHERITS (trunc_fa);
+INSERT INTO trunc_faa VALUES (5, 'five', 'FIVE');
+BEGIN;
+SELECT * FROM trunc_f;
+ col1
+------
+ 1
+ 2
+ 3
+ 4
+ 5
+(5 rows)
+
+TRUNCATE trunc_f;
+SELECT * FROM trunc_f;
+ col1
+------
+(0 rows)
+
+ROLLBACK;
+BEGIN;
+SELECT * FROM trunc_f;
+ col1
+------
+ 1
+ 2
+ 3
+ 4
+ 5
+(5 rows)
+
+TRUNCATE ONLY trunc_f;
+SELECT * FROM trunc_f;
+ col1
+------
+ 3
+ 4
+ 5
+(3 rows)
+
+ROLLBACK;
+BEGIN;
+SELECT * FROM trunc_f;
+ col1
+------
+ 1
+ 2
+ 3
+ 4
+ 5
+(5 rows)
+
+SELECT * FROM trunc_fa;
+ col1 | col2a
+------+-------
+ 3 | three
+ 5 | five
+(2 rows)
+
+SELECT * FROM trunc_faa;
+ col1 | col2a | col3
+------+-------+------
+ 5 | five | FIVE
+(1 row)
+
+TRUNCATE ONLY trunc_fb, ONLY trunc_fa;
+SELECT * FROM trunc_f;
+ col1
+------
+ 1
+ 2
+ 5
+(3 rows)
+
+SELECT * FROM trunc_fa;
+ col1 | col2a
+------+-------
+ 5 | five
+(1 row)
+
+SELECT * FROM trunc_faa;
+ col1 | col2a | col3
+------+-------+------
+ 5 | five | FIVE
+(1 row)
+
+ROLLBACK;
+BEGIN;
+SELECT * FROM trunc_f;
+ col1
+------
+ 1
+ 2
+ 3
+ 4
+ 5
+(5 rows)
+
+SELECT * FROM trunc_fa;
+ col1 | col2a
+------+-------
+ 3 | three
+ 5 | five
+(2 rows)
+
+SELECT * FROM trunc_faa;
+ col1 | col2a | col3
+------+-------+------
+ 5 | five | FIVE
+(1 row)
+
+TRUNCATE ONLY trunc_fb, trunc_fa;
+SELECT * FROM trunc_f;
+ col1
+------
+ 1
+ 2
+(2 rows)
+
+SELECT * FROM trunc_fa;
+ col1 | col2a
+------+-------
+(0 rows)
+
+SELECT * FROM trunc_faa;
+ col1 | col2a | col3
+------+-------+------
+(0 rows)
+
+ROLLBACK;
+DROP TABLE trunc_f CASCADE;
+NOTICE: drop cascades to 3 other objects
+DETAIL: drop cascades to table trunc_fa
+drop cascades to table trunc_faa
+drop cascades to table trunc_fb
+-- Test ON TRUNCATE triggers
+CREATE TABLE trunc_trigger_test (f1 int, f2 text, f3 text);
+CREATE TABLE trunc_trigger_log (tgop text, tglevel text, tgwhen text,
+ tgargv text, tgtable name, rowcount bigint);
+CREATE FUNCTION trunctrigger() RETURNS trigger as $$
+declare c bigint;
+begin
+ execute 'select count(*) from ' || quote_ident(tg_table_name) into c;
+ insert into trunc_trigger_log values
+ (TG_OP, TG_LEVEL, TG_WHEN, TG_ARGV[0], tg_table_name, c);
+ return null;
+end;
+$$ LANGUAGE plpgsql;
+-- basic before trigger
+INSERT INTO trunc_trigger_test VALUES(1, 'foo', 'bar'), (2, 'baz', 'quux');
+CREATE TRIGGER t
+BEFORE TRUNCATE ON trunc_trigger_test
+FOR EACH STATEMENT
+EXECUTE PROCEDURE trunctrigger('before trigger truncate');
+SELECT count(*) as "Row count in test table" FROM trunc_trigger_test;
+ Row count in test table
+-------------------------
+ 2
+(1 row)
+
+SELECT * FROM trunc_trigger_log;
+ tgop | tglevel | tgwhen | tgargv | tgtable | rowcount
+------+---------+--------+--------+---------+----------
+(0 rows)
+
+TRUNCATE trunc_trigger_test;
+SELECT count(*) as "Row count in test table" FROM trunc_trigger_test;
+ Row count in test table
+-------------------------
+ 0
+(1 row)
+
+SELECT * FROM trunc_trigger_log;
+ tgop | tglevel | tgwhen | tgargv | tgtable | rowcount
+----------+-----------+--------+-------------------------+--------------------+----------
+ TRUNCATE | STATEMENT | BEFORE | before trigger truncate | trunc_trigger_test | 2
+(1 row)
+
+DROP TRIGGER t ON trunc_trigger_test;
+truncate trunc_trigger_log;
+-- same test with an after trigger
+INSERT INTO trunc_trigger_test VALUES(1, 'foo', 'bar'), (2, 'baz', 'quux');
+CREATE TRIGGER tt
+AFTER TRUNCATE ON trunc_trigger_test
+FOR EACH STATEMENT
+EXECUTE PROCEDURE trunctrigger('after trigger truncate');
+SELECT count(*) as "Row count in test table" FROM trunc_trigger_test;
+ Row count in test table
+-------------------------
+ 2
+(1 row)
+
+SELECT * FROM trunc_trigger_log;
+ tgop | tglevel | tgwhen | tgargv | tgtable | rowcount
+------+---------+--------+--------+---------+----------
+(0 rows)
+
+TRUNCATE trunc_trigger_test;
+SELECT count(*) as "Row count in test table" FROM trunc_trigger_test;
+ Row count in test table
+-------------------------
+ 0
+(1 row)
+
+SELECT * FROM trunc_trigger_log;
+ tgop | tglevel | tgwhen | tgargv | tgtable | rowcount
+----------+-----------+--------+------------------------+--------------------+----------
+ TRUNCATE | STATEMENT | AFTER | after trigger truncate | trunc_trigger_test | 0
+(1 row)
+
+DROP TABLE trunc_trigger_test;
+DROP TABLE trunc_trigger_log;
+DROP FUNCTION trunctrigger();
+-- test TRUNCATE ... RESTART IDENTITY
+CREATE SEQUENCE truncate_a_id1 START WITH 33;
+CREATE TABLE truncate_a (id serial,
+ id1 integer default nextval('truncate_a_id1'));
+ALTER SEQUENCE truncate_a_id1 OWNED BY truncate_a.id1;
+INSERT INTO truncate_a DEFAULT VALUES;
+INSERT INTO truncate_a DEFAULT VALUES;
+SELECT * FROM truncate_a;
+ id | id1
+----+-----
+ 1 | 33
+ 2 | 34
+(2 rows)
+
+TRUNCATE truncate_a;
+INSERT INTO truncate_a DEFAULT VALUES;
+INSERT INTO truncate_a DEFAULT VALUES;
+SELECT * FROM truncate_a;
+ id | id1
+----+-----
+ 3 | 35
+ 4 | 36
+(2 rows)
+
+TRUNCATE truncate_a RESTART IDENTITY;
+INSERT INTO truncate_a DEFAULT VALUES;
+INSERT INTO truncate_a DEFAULT VALUES;
+SELECT * FROM truncate_a;
+ id | id1
+----+-----
+ 1 | 33
+ 2 | 34
+(2 rows)
+
+CREATE TABLE truncate_b (id int GENERATED ALWAYS AS IDENTITY (START WITH 44));
+INSERT INTO truncate_b DEFAULT VALUES;
+INSERT INTO truncate_b DEFAULT VALUES;
+SELECT * FROM truncate_b;
+ id
+----
+ 44
+ 45
+(2 rows)
+
+TRUNCATE truncate_b;
+INSERT INTO truncate_b DEFAULT VALUES;
+INSERT INTO truncate_b DEFAULT VALUES;
+SELECT * FROM truncate_b;
+ id
+----
+ 46
+ 47
+(2 rows)
+
+TRUNCATE truncate_b RESTART IDENTITY;
+INSERT INTO truncate_b DEFAULT VALUES;
+INSERT INTO truncate_b DEFAULT VALUES;
+SELECT * FROM truncate_b;
+ id
+----
+ 44
+ 45
+(2 rows)
+
+-- check rollback of a RESTART IDENTITY operation
+BEGIN;
+TRUNCATE truncate_a RESTART IDENTITY;
+INSERT INTO truncate_a DEFAULT VALUES;
+SELECT * FROM truncate_a;
+ id | id1
+----+-----
+ 1 | 33
+(1 row)
+
+ROLLBACK;
+INSERT INTO truncate_a DEFAULT VALUES;
+INSERT INTO truncate_a DEFAULT VALUES;
+SELECT * FROM truncate_a;
+ id | id1
+----+-----
+ 1 | 33
+ 2 | 34
+ 3 | 35
+ 4 | 36
+(4 rows)
+
+DROP TABLE truncate_a;
+SELECT nextval('truncate_a_id1'); -- fail, seq should have been dropped
+ERROR: relation "truncate_a_id1" does not exist
+LINE 1: SELECT nextval('truncate_a_id1');
+ ^
+-- partitioned table
+CREATE TABLE truncparted (a int, b char) PARTITION BY LIST (a);
+-- error, can't truncate a partitioned table
+TRUNCATE ONLY truncparted;
+ERROR: cannot truncate only a partitioned table
+HINT: Do not specify the ONLY keyword, or use TRUNCATE ONLY on the partitions directly.
+CREATE TABLE truncparted1 PARTITION OF truncparted FOR VALUES IN (1);
+INSERT INTO truncparted VALUES (1, 'a');
+-- error, must truncate partitions
+TRUNCATE ONLY truncparted;
+ERROR: cannot truncate only a partitioned table
+HINT: Do not specify the ONLY keyword, or use TRUNCATE ONLY on the partitions directly.
+TRUNCATE truncparted;
+DROP TABLE truncparted;
+-- foreign key on partitioned table: partition key is referencing column.
+-- Make sure truncate did execute on all tables
+CREATE FUNCTION tp_ins_data() RETURNS void LANGUAGE plpgsql AS $$
+ BEGIN
+ INSERT INTO truncprim VALUES (1), (100), (150);
+ INSERT INTO truncpart VALUES (1), (100), (150);
+ END
+$$;
+CREATE FUNCTION tp_chk_data(OUT pktb regclass, OUT pkval int, OUT fktb regclass, OUT fkval int)
+ RETURNS SETOF record LANGUAGE plpgsql AS $$
+ BEGIN
+ RETURN QUERY SELECT
+ pk.tableoid::regclass, pk.a, fk.tableoid::regclass, fk.a
+ FROM truncprim pk FULL JOIN truncpart fk USING (a)
+ ORDER BY 2, 4;
+ END
+$$;
+CREATE TABLE truncprim (a int PRIMARY KEY);
+CREATE TABLE truncpart (a int REFERENCES truncprim)
+ PARTITION BY RANGE (a);
+CREATE TABLE truncpart_1 PARTITION OF truncpart FOR VALUES FROM (0) TO (100);
+CREATE TABLE truncpart_2 PARTITION OF truncpart FOR VALUES FROM (100) TO (200)
+ PARTITION BY RANGE (a);
+CREATE TABLE truncpart_2_1 PARTITION OF truncpart_2 FOR VALUES FROM (100) TO (150);
+CREATE TABLE truncpart_2_d PARTITION OF truncpart_2 DEFAULT;
+TRUNCATE TABLE truncprim; -- should fail
+ERROR: cannot truncate a table referenced in a foreign key constraint
+DETAIL: Table "truncpart" references "truncprim".
+HINT: Truncate table "truncpart" at the same time, or use TRUNCATE ... CASCADE.
+select tp_ins_data();
+ tp_ins_data
+-------------
+
+(1 row)
+
+-- should truncate everything
+TRUNCATE TABLE truncprim, truncpart;
+select * from tp_chk_data();
+ pktb | pkval | fktb | fkval
+------+-------+------+-------
+(0 rows)
+
+select tp_ins_data();
+ tp_ins_data
+-------------
+
+(1 row)
+
+-- should truncate everything
+TRUNCATE TABLE truncprim CASCADE;
+NOTICE: truncate cascades to table "truncpart"
+NOTICE: truncate cascades to table "truncpart_1"
+NOTICE: truncate cascades to table "truncpart_2"
+NOTICE: truncate cascades to table "truncpart_2_1"
+NOTICE: truncate cascades to table "truncpart_2_d"
+SELECT * FROM tp_chk_data();
+ pktb | pkval | fktb | fkval
+------+-------+------+-------
+(0 rows)
+
+SELECT tp_ins_data();
+ tp_ins_data
+-------------
+
+(1 row)
+
+-- should truncate all partitions
+TRUNCATE TABLE truncpart;
+SELECT * FROM tp_chk_data();
+ pktb | pkval | fktb | fkval
+-----------+-------+------+-------
+ truncprim | 1 | |
+ truncprim | 100 | |
+ truncprim | 150 | |
+(3 rows)
+
+DROP TABLE truncprim, truncpart;
+DROP FUNCTION tp_ins_data(), tp_chk_data();
+-- test cascade when referencing a partitioned table
+CREATE TABLE trunc_a (a INT PRIMARY KEY) PARTITION BY RANGE (a);
+CREATE TABLE trunc_a1 PARTITION OF trunc_a FOR VALUES FROM (0) TO (10);
+CREATE TABLE trunc_a2 PARTITION OF trunc_a FOR VALUES FROM (10) TO (20)
+ PARTITION BY RANGE (a);
+CREATE TABLE trunc_a21 PARTITION OF trunc_a2 FOR VALUES FROM (10) TO (12);
+CREATE TABLE trunc_a22 PARTITION OF trunc_a2 FOR VALUES FROM (12) TO (16);
+CREATE TABLE trunc_a2d PARTITION OF trunc_a2 DEFAULT;
+CREATE TABLE trunc_a3 PARTITION OF trunc_a FOR VALUES FROM (20) TO (30);
+INSERT INTO trunc_a VALUES (0), (5), (10), (15), (20), (25);
+-- truncate a partition cascading to a table
+CREATE TABLE ref_b (
+ b INT PRIMARY KEY,
+ a INT REFERENCES trunc_a(a) ON DELETE CASCADE
+);
+INSERT INTO ref_b VALUES (10, 0), (50, 5), (100, 10), (150, 15);
+TRUNCATE TABLE trunc_a1 CASCADE;
+NOTICE: truncate cascades to table "ref_b"
+SELECT a FROM ref_b;
+ a
+---
+(0 rows)
+
+DROP TABLE ref_b;
+-- truncate a partition cascading to a partitioned table
+CREATE TABLE ref_c (
+ c INT PRIMARY KEY,
+ a INT REFERENCES trunc_a(a) ON DELETE CASCADE
+) PARTITION BY RANGE (c);
+CREATE TABLE ref_c1 PARTITION OF ref_c FOR VALUES FROM (100) TO (200);
+CREATE TABLE ref_c2 PARTITION OF ref_c FOR VALUES FROM (200) TO (300);
+INSERT INTO ref_c VALUES (100, 10), (150, 15), (200, 20), (250, 25);
+TRUNCATE TABLE trunc_a21 CASCADE;
+NOTICE: truncate cascades to table "ref_c"
+NOTICE: truncate cascades to table "ref_c1"
+NOTICE: truncate cascades to table "ref_c2"
+SELECT a as "from table ref_c" FROM ref_c;
+ from table ref_c
+------------------
+(0 rows)
+
+SELECT a as "from table trunc_a" FROM trunc_a ORDER BY a;
+ from table trunc_a
+--------------------
+ 15
+ 20
+ 25
+(3 rows)
+
+DROP TABLE trunc_a, ref_c;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/truncate.sql b/ydb/library/yql/tests/postgresql/original/cases/truncate.sql
new file mode 100644
index 0000000000..54f26e3077
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/truncate.sql
@@ -0,0 +1,329 @@
+-- Test basic TRUNCATE functionality.
+CREATE TABLE truncate_a (col1 integer primary key);
+INSERT INTO truncate_a VALUES (1);
+INSERT INTO truncate_a VALUES (2);
+SELECT * FROM truncate_a;
+-- Roll truncate back
+BEGIN;
+TRUNCATE truncate_a;
+ROLLBACK;
+SELECT * FROM truncate_a;
+-- Commit the truncate this time
+BEGIN;
+TRUNCATE truncate_a;
+COMMIT;
+SELECT * FROM truncate_a;
+
+-- Test foreign-key checks
+CREATE TABLE trunc_b (a int REFERENCES truncate_a);
+CREATE TABLE trunc_c (a serial PRIMARY KEY);
+CREATE TABLE trunc_d (a int REFERENCES trunc_c);
+CREATE TABLE trunc_e (a int REFERENCES truncate_a, b int REFERENCES trunc_c);
+
+TRUNCATE TABLE truncate_a; -- fail
+TRUNCATE TABLE truncate_a,trunc_b; -- fail
+TRUNCATE TABLE truncate_a,trunc_b,trunc_e; -- ok
+TRUNCATE TABLE truncate_a,trunc_e; -- fail
+TRUNCATE TABLE trunc_c; -- fail
+TRUNCATE TABLE trunc_c,trunc_d; -- fail
+TRUNCATE TABLE trunc_c,trunc_d,trunc_e; -- ok
+TRUNCATE TABLE trunc_c,trunc_d,trunc_e,truncate_a; -- fail
+TRUNCATE TABLE trunc_c,trunc_d,trunc_e,truncate_a,trunc_b; -- ok
+
+TRUNCATE TABLE truncate_a RESTRICT; -- fail
+TRUNCATE TABLE truncate_a CASCADE; -- ok
+
+-- circular references
+ALTER TABLE truncate_a ADD FOREIGN KEY (col1) REFERENCES trunc_c;
+
+-- Add some data to verify that truncating actually works ...
+INSERT INTO trunc_c VALUES (1);
+INSERT INTO truncate_a VALUES (1);
+INSERT INTO trunc_b VALUES (1);
+INSERT INTO trunc_d VALUES (1);
+INSERT INTO trunc_e VALUES (1,1);
+TRUNCATE TABLE trunc_c;
+TRUNCATE TABLE trunc_c,truncate_a;
+TRUNCATE TABLE trunc_c,truncate_a,trunc_d;
+TRUNCATE TABLE trunc_c,truncate_a,trunc_d,trunc_e;
+TRUNCATE TABLE trunc_c,truncate_a,trunc_d,trunc_e,trunc_b;
+
+-- Verify that truncating did actually work
+SELECT * FROM truncate_a
+ UNION ALL
+ SELECT * FROM trunc_c
+ UNION ALL
+ SELECT * FROM trunc_b
+ UNION ALL
+ SELECT * FROM trunc_d;
+SELECT * FROM trunc_e;
+
+-- Add data again to test TRUNCATE ... CASCADE
+INSERT INTO trunc_c VALUES (1);
+INSERT INTO truncate_a VALUES (1);
+INSERT INTO trunc_b VALUES (1);
+INSERT INTO trunc_d VALUES (1);
+INSERT INTO trunc_e VALUES (1,1);
+
+TRUNCATE TABLE trunc_c CASCADE; -- ok
+
+SELECT * FROM truncate_a
+ UNION ALL
+ SELECT * FROM trunc_c
+ UNION ALL
+ SELECT * FROM trunc_b
+ UNION ALL
+ SELECT * FROM trunc_d;
+SELECT * FROM trunc_e;
+
+DROP TABLE truncate_a,trunc_c,trunc_b,trunc_d,trunc_e CASCADE;
+
+-- Test TRUNCATE with inheritance
+
+CREATE TABLE trunc_f (col1 integer primary key);
+INSERT INTO trunc_f VALUES (1);
+INSERT INTO trunc_f VALUES (2);
+
+CREATE TABLE trunc_fa (col2a text) INHERITS (trunc_f);
+INSERT INTO trunc_fa VALUES (3, 'three');
+
+CREATE TABLE trunc_fb (col2b int) INHERITS (trunc_f);
+INSERT INTO trunc_fb VALUES (4, 444);
+
+CREATE TABLE trunc_faa (col3 text) INHERITS (trunc_fa);
+INSERT INTO trunc_faa VALUES (5, 'five', 'FIVE');
+
+BEGIN;
+SELECT * FROM trunc_f;
+TRUNCATE trunc_f;
+SELECT * FROM trunc_f;
+ROLLBACK;
+
+BEGIN;
+SELECT * FROM trunc_f;
+TRUNCATE ONLY trunc_f;
+SELECT * FROM trunc_f;
+ROLLBACK;
+
+BEGIN;
+SELECT * FROM trunc_f;
+SELECT * FROM trunc_fa;
+SELECT * FROM trunc_faa;
+TRUNCATE ONLY trunc_fb, ONLY trunc_fa;
+SELECT * FROM trunc_f;
+SELECT * FROM trunc_fa;
+SELECT * FROM trunc_faa;
+ROLLBACK;
+
+BEGIN;
+SELECT * FROM trunc_f;
+SELECT * FROM trunc_fa;
+SELECT * FROM trunc_faa;
+TRUNCATE ONLY trunc_fb, trunc_fa;
+SELECT * FROM trunc_f;
+SELECT * FROM trunc_fa;
+SELECT * FROM trunc_faa;
+ROLLBACK;
+
+DROP TABLE trunc_f CASCADE;
+
+-- Test ON TRUNCATE triggers
+
+CREATE TABLE trunc_trigger_test (f1 int, f2 text, f3 text);
+CREATE TABLE trunc_trigger_log (tgop text, tglevel text, tgwhen text,
+ tgargv text, tgtable name, rowcount bigint);
+
+CREATE FUNCTION trunctrigger() RETURNS trigger as $$
+declare c bigint;
+begin
+ execute 'select count(*) from ' || quote_ident(tg_table_name) into c;
+ insert into trunc_trigger_log values
+ (TG_OP, TG_LEVEL, TG_WHEN, TG_ARGV[0], tg_table_name, c);
+ return null;
+end;
+$$ LANGUAGE plpgsql;
+
+-- basic before trigger
+INSERT INTO trunc_trigger_test VALUES(1, 'foo', 'bar'), (2, 'baz', 'quux');
+
+CREATE TRIGGER t
+BEFORE TRUNCATE ON trunc_trigger_test
+FOR EACH STATEMENT
+EXECUTE PROCEDURE trunctrigger('before trigger truncate');
+
+SELECT count(*) as "Row count in test table" FROM trunc_trigger_test;
+SELECT * FROM trunc_trigger_log;
+TRUNCATE trunc_trigger_test;
+SELECT count(*) as "Row count in test table" FROM trunc_trigger_test;
+SELECT * FROM trunc_trigger_log;
+
+DROP TRIGGER t ON trunc_trigger_test;
+
+truncate trunc_trigger_log;
+
+-- same test with an after trigger
+INSERT INTO trunc_trigger_test VALUES(1, 'foo', 'bar'), (2, 'baz', 'quux');
+
+CREATE TRIGGER tt
+AFTER TRUNCATE ON trunc_trigger_test
+FOR EACH STATEMENT
+EXECUTE PROCEDURE trunctrigger('after trigger truncate');
+
+SELECT count(*) as "Row count in test table" FROM trunc_trigger_test;
+SELECT * FROM trunc_trigger_log;
+TRUNCATE trunc_trigger_test;
+SELECT count(*) as "Row count in test table" FROM trunc_trigger_test;
+SELECT * FROM trunc_trigger_log;
+
+DROP TABLE trunc_trigger_test;
+DROP TABLE trunc_trigger_log;
+
+DROP FUNCTION trunctrigger();
+
+-- test TRUNCATE ... RESTART IDENTITY
+CREATE SEQUENCE truncate_a_id1 START WITH 33;
+CREATE TABLE truncate_a (id serial,
+ id1 integer default nextval('truncate_a_id1'));
+ALTER SEQUENCE truncate_a_id1 OWNED BY truncate_a.id1;
+
+INSERT INTO truncate_a DEFAULT VALUES;
+INSERT INTO truncate_a DEFAULT VALUES;
+SELECT * FROM truncate_a;
+
+TRUNCATE truncate_a;
+
+INSERT INTO truncate_a DEFAULT VALUES;
+INSERT INTO truncate_a DEFAULT VALUES;
+SELECT * FROM truncate_a;
+
+TRUNCATE truncate_a RESTART IDENTITY;
+
+INSERT INTO truncate_a DEFAULT VALUES;
+INSERT INTO truncate_a DEFAULT VALUES;
+SELECT * FROM truncate_a;
+
+CREATE TABLE truncate_b (id int GENERATED ALWAYS AS IDENTITY (START WITH 44));
+
+INSERT INTO truncate_b DEFAULT VALUES;
+INSERT INTO truncate_b DEFAULT VALUES;
+SELECT * FROM truncate_b;
+
+TRUNCATE truncate_b;
+
+INSERT INTO truncate_b DEFAULT VALUES;
+INSERT INTO truncate_b DEFAULT VALUES;
+SELECT * FROM truncate_b;
+
+TRUNCATE truncate_b RESTART IDENTITY;
+
+INSERT INTO truncate_b DEFAULT VALUES;
+INSERT INTO truncate_b DEFAULT VALUES;
+SELECT * FROM truncate_b;
+
+-- check rollback of a RESTART IDENTITY operation
+BEGIN;
+TRUNCATE truncate_a RESTART IDENTITY;
+INSERT INTO truncate_a DEFAULT VALUES;
+SELECT * FROM truncate_a;
+ROLLBACK;
+INSERT INTO truncate_a DEFAULT VALUES;
+INSERT INTO truncate_a DEFAULT VALUES;
+SELECT * FROM truncate_a;
+
+DROP TABLE truncate_a;
+
+SELECT nextval('truncate_a_id1'); -- fail, seq should have been dropped
+
+-- partitioned table
+CREATE TABLE truncparted (a int, b char) PARTITION BY LIST (a);
+-- error, can't truncate a partitioned table
+TRUNCATE ONLY truncparted;
+CREATE TABLE truncparted1 PARTITION OF truncparted FOR VALUES IN (1);
+INSERT INTO truncparted VALUES (1, 'a');
+-- error, must truncate partitions
+TRUNCATE ONLY truncparted;
+TRUNCATE truncparted;
+DROP TABLE truncparted;
+
+-- foreign key on partitioned table: partition key is referencing column.
+-- Make sure truncate did execute on all tables
+CREATE FUNCTION tp_ins_data() RETURNS void LANGUAGE plpgsql AS $$
+ BEGIN
+ INSERT INTO truncprim VALUES (1), (100), (150);
+ INSERT INTO truncpart VALUES (1), (100), (150);
+ END
+$$;
+CREATE FUNCTION tp_chk_data(OUT pktb regclass, OUT pkval int, OUT fktb regclass, OUT fkval int)
+ RETURNS SETOF record LANGUAGE plpgsql AS $$
+ BEGIN
+ RETURN QUERY SELECT
+ pk.tableoid::regclass, pk.a, fk.tableoid::regclass, fk.a
+ FROM truncprim pk FULL JOIN truncpart fk USING (a)
+ ORDER BY 2, 4;
+ END
+$$;
+CREATE TABLE truncprim (a int PRIMARY KEY);
+CREATE TABLE truncpart (a int REFERENCES truncprim)
+ PARTITION BY RANGE (a);
+CREATE TABLE truncpart_1 PARTITION OF truncpart FOR VALUES FROM (0) TO (100);
+CREATE TABLE truncpart_2 PARTITION OF truncpart FOR VALUES FROM (100) TO (200)
+ PARTITION BY RANGE (a);
+CREATE TABLE truncpart_2_1 PARTITION OF truncpart_2 FOR VALUES FROM (100) TO (150);
+CREATE TABLE truncpart_2_d PARTITION OF truncpart_2 DEFAULT;
+
+TRUNCATE TABLE truncprim; -- should fail
+
+select tp_ins_data();
+-- should truncate everything
+TRUNCATE TABLE truncprim, truncpart;
+select * from tp_chk_data();
+
+select tp_ins_data();
+-- should truncate everything
+TRUNCATE TABLE truncprim CASCADE;
+SELECT * FROM tp_chk_data();
+
+SELECT tp_ins_data();
+-- should truncate all partitions
+TRUNCATE TABLE truncpart;
+SELECT * FROM tp_chk_data();
+DROP TABLE truncprim, truncpart;
+DROP FUNCTION tp_ins_data(), tp_chk_data();
+
+-- test cascade when referencing a partitioned table
+CREATE TABLE trunc_a (a INT PRIMARY KEY) PARTITION BY RANGE (a);
+CREATE TABLE trunc_a1 PARTITION OF trunc_a FOR VALUES FROM (0) TO (10);
+CREATE TABLE trunc_a2 PARTITION OF trunc_a FOR VALUES FROM (10) TO (20)
+ PARTITION BY RANGE (a);
+CREATE TABLE trunc_a21 PARTITION OF trunc_a2 FOR VALUES FROM (10) TO (12);
+CREATE TABLE trunc_a22 PARTITION OF trunc_a2 FOR VALUES FROM (12) TO (16);
+CREATE TABLE trunc_a2d PARTITION OF trunc_a2 DEFAULT;
+CREATE TABLE trunc_a3 PARTITION OF trunc_a FOR VALUES FROM (20) TO (30);
+INSERT INTO trunc_a VALUES (0), (5), (10), (15), (20), (25);
+
+-- truncate a partition cascading to a table
+CREATE TABLE ref_b (
+ b INT PRIMARY KEY,
+ a INT REFERENCES trunc_a(a) ON DELETE CASCADE
+);
+INSERT INTO ref_b VALUES (10, 0), (50, 5), (100, 10), (150, 15);
+
+TRUNCATE TABLE trunc_a1 CASCADE;
+SELECT a FROM ref_b;
+
+DROP TABLE ref_b;
+
+-- truncate a partition cascading to a partitioned table
+CREATE TABLE ref_c (
+ c INT PRIMARY KEY,
+ a INT REFERENCES trunc_a(a) ON DELETE CASCADE
+) PARTITION BY RANGE (c);
+CREATE TABLE ref_c1 PARTITION OF ref_c FOR VALUES FROM (100) TO (200);
+CREATE TABLE ref_c2 PARTITION OF ref_c FOR VALUES FROM (200) TO (300);
+INSERT INTO ref_c VALUES (100, 10), (150, 15), (200, 20), (250, 25);
+
+TRUNCATE TABLE trunc_a21 CASCADE;
+SELECT a as "from table ref_c" FROM ref_c;
+SELECT a as "from table trunc_a" FROM trunc_a ORDER BY a;
+
+DROP TABLE trunc_a, ref_c;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/unicode.out b/ydb/library/yql/tests/postgresql/original/cases/unicode.out
new file mode 100644
index 0000000000..f2713a2326
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/unicode.out
@@ -0,0 +1,89 @@
+SELECT getdatabaseencoding() <> 'UTF8' AS skip_test \gset
+\if :skip_test
+\quit
+\endif
+SELECT U&'\0061\0308bc' <> U&'\00E4bc' COLLATE "C" AS sanity_check;
+ sanity_check
+--------------
+ t
+(1 row)
+
+SELECT normalize('');
+ normalize
+-----------
+
+(1 row)
+
+SELECT normalize(U&'\0061\0308\24D1c') = U&'\00E4\24D1c' COLLATE "C" AS test_default;
+ test_default
+--------------
+ t
+(1 row)
+
+SELECT normalize(U&'\0061\0308\24D1c', NFC) = U&'\00E4\24D1c' COLLATE "C" AS test_nfc;
+ test_nfc
+----------
+ t
+(1 row)
+
+SELECT normalize(U&'\00E4bc', NFC) = U&'\00E4bc' COLLATE "C" AS test_nfc_idem;
+ test_nfc_idem
+---------------
+ t
+(1 row)
+
+SELECT normalize(U&'\00E4\24D1c', NFD) = U&'\0061\0308\24D1c' COLLATE "C" AS test_nfd;
+ test_nfd
+----------
+ t
+(1 row)
+
+SELECT normalize(U&'\0061\0308\24D1c', NFKC) = U&'\00E4bc' COLLATE "C" AS test_nfkc;
+ test_nfkc
+-----------
+ t
+(1 row)
+
+SELECT normalize(U&'\00E4\24D1c', NFKD) = U&'\0061\0308bc' COLLATE "C" AS test_nfkd;
+ test_nfkd
+-----------
+ t
+(1 row)
+
+SELECT "normalize"('abc', 'def'); -- run-time error
+ERROR: invalid normalization form: def
+SELECT U&'\00E4\24D1c' IS NORMALIZED AS test_default;
+ test_default
+--------------
+ t
+(1 row)
+
+SELECT U&'\00E4\24D1c' IS NFC NORMALIZED AS test_nfc;
+ test_nfc
+----------
+ t
+(1 row)
+
+SELECT num, val,
+ val IS NFC NORMALIZED AS NFC,
+ val IS NFD NORMALIZED AS NFD,
+ val IS NFKC NORMALIZED AS NFKC,
+ val IS NFKD NORMALIZED AS NFKD
+FROM
+ (VALUES (1, U&'\00E4bc'),
+ (2, U&'\0061\0308bc'),
+ (3, U&'\00E4\24D1c'),
+ (4, U&'\0061\0308\24D1c'),
+ (5, '')) vals (num, val)
+ORDER BY num;
+ num | val | nfc | nfd | nfkc | nfkd
+-----+-----+-----+-----+------+------
+ 1 | äbc | t | f | t | f
+ 2 | äbc | f | t | f | t
+ 3 | äⓑc | t | f | f | f
+ 4 | äⓑc | f | t | f | f
+ 5 | | t | t | t | t
+(5 rows)
+
+SELECT is_normalized('abc', 'def'); -- run-time error
+ERROR: invalid normalization form: def
diff --git a/ydb/library/yql/tests/postgresql/original/cases/unicode.sql b/ydb/library/yql/tests/postgresql/original/cases/unicode.sql
new file mode 100644
index 0000000000..63cd523f85
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/unicode.sql
@@ -0,0 +1,34 @@
+SELECT getdatabaseencoding() <> 'UTF8' AS skip_test \gset
+\if :skip_test
+\quit
+\endif
+
+SELECT U&'\0061\0308bc' <> U&'\00E4bc' COLLATE "C" AS sanity_check;
+
+SELECT normalize('');
+SELECT normalize(U&'\0061\0308\24D1c') = U&'\00E4\24D1c' COLLATE "C" AS test_default;
+SELECT normalize(U&'\0061\0308\24D1c', NFC) = U&'\00E4\24D1c' COLLATE "C" AS test_nfc;
+SELECT normalize(U&'\00E4bc', NFC) = U&'\00E4bc' COLLATE "C" AS test_nfc_idem;
+SELECT normalize(U&'\00E4\24D1c', NFD) = U&'\0061\0308\24D1c' COLLATE "C" AS test_nfd;
+SELECT normalize(U&'\0061\0308\24D1c', NFKC) = U&'\00E4bc' COLLATE "C" AS test_nfkc;
+SELECT normalize(U&'\00E4\24D1c', NFKD) = U&'\0061\0308bc' COLLATE "C" AS test_nfkd;
+
+SELECT "normalize"('abc', 'def'); -- run-time error
+
+SELECT U&'\00E4\24D1c' IS NORMALIZED AS test_default;
+SELECT U&'\00E4\24D1c' IS NFC NORMALIZED AS test_nfc;
+
+SELECT num, val,
+ val IS NFC NORMALIZED AS NFC,
+ val IS NFD NORMALIZED AS NFD,
+ val IS NFKC NORMALIZED AS NFKC,
+ val IS NFKD NORMALIZED AS NFKD
+FROM
+ (VALUES (1, U&'\00E4bc'),
+ (2, U&'\0061\0308bc'),
+ (3, U&'\00E4\24D1c'),
+ (4, U&'\0061\0308\24D1c'),
+ (5, '')) vals (num, val)
+ORDER BY num;
+
+SELECT is_normalized('abc', 'def'); -- run-time error
diff --git a/ydb/library/yql/tests/postgresql/original/cases/union.out b/ydb/library/yql/tests/postgresql/original/cases/union.out
new file mode 100644
index 0000000000..dece7310cf
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/union.out
@@ -0,0 +1,1434 @@
+--
+-- UNION (also INTERSECT, EXCEPT)
+--
+-- Simple UNION constructs
+SELECT 1 AS two UNION SELECT 2 ORDER BY 1;
+ two
+-----
+ 1
+ 2
+(2 rows)
+
+SELECT 1 AS one UNION SELECT 1 ORDER BY 1;
+ one
+-----
+ 1
+(1 row)
+
+SELECT 1 AS two UNION ALL SELECT 2;
+ two
+-----
+ 1
+ 2
+(2 rows)
+
+SELECT 1 AS two UNION ALL SELECT 1;
+ two
+-----
+ 1
+ 1
+(2 rows)
+
+SELECT 1 AS three UNION SELECT 2 UNION SELECT 3 ORDER BY 1;
+ three
+-------
+ 1
+ 2
+ 3
+(3 rows)
+
+SELECT 1 AS two UNION SELECT 2 UNION SELECT 2 ORDER BY 1;
+ two
+-----
+ 1
+ 2
+(2 rows)
+
+SELECT 1 AS three UNION SELECT 2 UNION ALL SELECT 2 ORDER BY 1;
+ three
+-------
+ 1
+ 2
+ 2
+(3 rows)
+
+SELECT 1.1 AS two UNION SELECT 2.2 ORDER BY 1;
+ two
+-----
+ 1.1
+ 2.2
+(2 rows)
+
+-- Mixed types
+SELECT 1.1 AS two UNION SELECT 2 ORDER BY 1;
+ two
+-----
+ 1.1
+ 2
+(2 rows)
+
+SELECT 1 AS two UNION SELECT 2.2 ORDER BY 1;
+ two
+-----
+ 1
+ 2.2
+(2 rows)
+
+SELECT 1 AS one UNION SELECT 1.0::float8 ORDER BY 1;
+ one
+-----
+ 1
+(1 row)
+
+SELECT 1.1 AS two UNION ALL SELECT 2 ORDER BY 1;
+ two
+-----
+ 1.1
+ 2
+(2 rows)
+
+SELECT 1.0::float8 AS two UNION ALL SELECT 1 ORDER BY 1;
+ two
+-----
+ 1
+ 1
+(2 rows)
+
+SELECT 1.1 AS three UNION SELECT 2 UNION SELECT 3 ORDER BY 1;
+ three
+-------
+ 1.1
+ 2
+ 3
+(3 rows)
+
+SELECT 1.1::float8 AS two UNION SELECT 2 UNION SELECT 2.0::float8 ORDER BY 1;
+ two
+-----
+ 1.1
+ 2
+(2 rows)
+
+SELECT 1.1 AS three UNION SELECT 2 UNION ALL SELECT 2 ORDER BY 1;
+ three
+-------
+ 1.1
+ 2
+ 2
+(3 rows)
+
+SELECT 1.1 AS two UNION (SELECT 2 UNION ALL SELECT 2) ORDER BY 1;
+ two
+-----
+ 1.1
+ 2
+(2 rows)
+
+--
+-- Try testing from tables...
+--
+SELECT f1 AS five FROM FLOAT8_TBL
+UNION
+SELECT f1 FROM FLOAT8_TBL
+ORDER BY 1;
+ five
+-----------------------
+ -1.2345678901234e+200
+ -1004.3
+ -34.84
+ -1.2345678901234e-200
+ 0
+(5 rows)
+
+SELECT f1 AS ten FROM FLOAT8_TBL
+UNION ALL
+SELECT f1 FROM FLOAT8_TBL;
+ ten
+-----------------------
+ 0
+ -34.84
+ -1004.3
+ -1.2345678901234e+200
+ -1.2345678901234e-200
+ 0
+ -34.84
+ -1004.3
+ -1.2345678901234e+200
+ -1.2345678901234e-200
+(10 rows)
+
+SELECT f1 AS nine FROM FLOAT8_TBL
+UNION
+SELECT f1 FROM INT4_TBL
+ORDER BY 1;
+ nine
+-----------------------
+ -1.2345678901234e+200
+ -2147483647
+ -123456
+ -1004.3
+ -34.84
+ -1.2345678901234e-200
+ 0
+ 123456
+ 2147483647
+(9 rows)
+
+SELECT f1 AS ten FROM FLOAT8_TBL
+UNION ALL
+SELECT f1 FROM INT4_TBL;
+ ten
+-----------------------
+ 0
+ -34.84
+ -1004.3
+ -1.2345678901234e+200
+ -1.2345678901234e-200
+ 0
+ 123456
+ -123456
+ 2147483647
+ -2147483647
+(10 rows)
+
+SELECT f1 AS five FROM FLOAT8_TBL
+ WHERE f1 BETWEEN -1e6 AND 1e6
+UNION
+SELECT f1 FROM INT4_TBL
+ WHERE f1 BETWEEN 0 AND 1000000
+ORDER BY 1;
+ five
+-----------------------
+ -1004.3
+ -34.84
+ -1.2345678901234e-200
+ 0
+ 123456
+(5 rows)
+
+SELECT CAST(f1 AS char(4)) AS three FROM VARCHAR_TBL
+UNION
+SELECT f1 FROM CHAR_TBL
+ORDER BY 1;
+ three
+-------
+ a
+ ab
+ abcd
+(3 rows)
+
+SELECT f1 AS three FROM VARCHAR_TBL
+UNION
+SELECT CAST(f1 AS varchar) FROM CHAR_TBL
+ORDER BY 1;
+ three
+-------
+ a
+ ab
+ abcd
+(3 rows)
+
+SELECT f1 AS eight FROM VARCHAR_TBL
+UNION ALL
+SELECT f1 FROM CHAR_TBL;
+ eight
+-------
+ a
+ ab
+ abcd
+ abcd
+ a
+ ab
+ abcd
+ abcd
+(8 rows)
+
+SELECT f1 AS five FROM TEXT_TBL
+UNION
+SELECT f1 FROM VARCHAR_TBL
+UNION
+SELECT TRIM(TRAILING FROM f1) FROM CHAR_TBL
+ORDER BY 1;
+ five
+-------------------
+ a
+ ab
+ abcd
+ doh!
+ hi de ho neighbor
+(5 rows)
+
+--
+-- INTERSECT and EXCEPT
+--
+SELECT q2 FROM int8_tbl INTERSECT SELECT q1 FROM int8_tbl ORDER BY 1;
+ q2
+------------------
+ 123
+ 4567890123456789
+(2 rows)
+
+SELECT q2 FROM int8_tbl INTERSECT ALL SELECT q1 FROM int8_tbl ORDER BY 1;
+ q2
+------------------
+ 123
+ 4567890123456789
+ 4567890123456789
+(3 rows)
+
+SELECT q2 FROM int8_tbl EXCEPT SELECT q1 FROM int8_tbl ORDER BY 1;
+ q2
+-------------------
+ -4567890123456789
+ 456
+(2 rows)
+
+SELECT q2 FROM int8_tbl EXCEPT ALL SELECT q1 FROM int8_tbl ORDER BY 1;
+ q2
+-------------------
+ -4567890123456789
+ 456
+(2 rows)
+
+SELECT q2 FROM int8_tbl EXCEPT ALL SELECT DISTINCT q1 FROM int8_tbl ORDER BY 1;
+ q2
+-------------------
+ -4567890123456789
+ 456
+ 4567890123456789
+(3 rows)
+
+SELECT q1 FROM int8_tbl EXCEPT SELECT q2 FROM int8_tbl ORDER BY 1;
+ q1
+----
+(0 rows)
+
+SELECT q1 FROM int8_tbl EXCEPT ALL SELECT q2 FROM int8_tbl ORDER BY 1;
+ q1
+------------------
+ 123
+ 4567890123456789
+(2 rows)
+
+SELECT q1 FROM int8_tbl EXCEPT ALL SELECT DISTINCT q2 FROM int8_tbl ORDER BY 1;
+ q1
+------------------
+ 123
+ 4567890123456789
+ 4567890123456789
+(3 rows)
+
+SELECT q1 FROM int8_tbl EXCEPT ALL SELECT q1 FROM int8_tbl FOR NO KEY UPDATE;
+ERROR: FOR NO KEY UPDATE is not allowed with UNION/INTERSECT/EXCEPT
+-- nested cases
+(SELECT 1,2,3 UNION SELECT 4,5,6) INTERSECT SELECT 4,5,6;
+ ?column? | ?column? | ?column?
+----------+----------+----------
+ 4 | 5 | 6
+(1 row)
+
+(SELECT 1,2,3 UNION SELECT 4,5,6 ORDER BY 1,2) INTERSECT SELECT 4,5,6;
+ ?column? | ?column? | ?column?
+----------+----------+----------
+ 4 | 5 | 6
+(1 row)
+
+(SELECT 1,2,3 UNION SELECT 4,5,6) EXCEPT SELECT 4,5,6;
+ ?column? | ?column? | ?column?
+----------+----------+----------
+ 1 | 2 | 3
+(1 row)
+
+(SELECT 1,2,3 UNION SELECT 4,5,6 ORDER BY 1,2) EXCEPT SELECT 4,5,6;
+ ?column? | ?column? | ?column?
+----------+----------+----------
+ 1 | 2 | 3
+(1 row)
+
+-- exercise both hashed and sorted implementations of UNION/INTERSECT/EXCEPT
+set enable_hashagg to on;
+explain (costs off)
+select count(*) from
+ ( select unique1 from tenk1 union select fivethous from tenk1 ) ss;
+ QUERY PLAN
+----------------------------------------------------------------
+ Aggregate
+ -> HashAggregate
+ Group Key: tenk1.unique1
+ -> Append
+ -> Index Only Scan using tenk1_unique1 on tenk1
+ -> Seq Scan on tenk1 tenk1_1
+(6 rows)
+
+select count(*) from
+ ( select unique1 from tenk1 union select fivethous from tenk1 ) ss;
+ count
+-------
+ 10000
+(1 row)
+
+explain (costs off)
+select count(*) from
+ ( select unique1 from tenk1 intersect select fivethous from tenk1 ) ss;
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Aggregate
+ -> Subquery Scan on ss
+ -> HashSetOp Intersect
+ -> Append
+ -> Subquery Scan on "*SELECT* 2"
+ -> Seq Scan on tenk1
+ -> Subquery Scan on "*SELECT* 1"
+ -> Index Only Scan using tenk1_unique1 on tenk1 tenk1_1
+(8 rows)
+
+select count(*) from
+ ( select unique1 from tenk1 intersect select fivethous from tenk1 ) ss;
+ count
+-------
+ 5000
+(1 row)
+
+explain (costs off)
+select unique1 from tenk1 except select unique2 from tenk1 where unique2 != 10;
+ QUERY PLAN
+------------------------------------------------------------------------
+ HashSetOp Except
+ -> Append
+ -> Subquery Scan on "*SELECT* 1"
+ -> Index Only Scan using tenk1_unique1 on tenk1
+ -> Subquery Scan on "*SELECT* 2"
+ -> Index Only Scan using tenk1_unique2 on tenk1 tenk1_1
+ Filter: (unique2 <> 10)
+(7 rows)
+
+select unique1 from tenk1 except select unique2 from tenk1 where unique2 != 10;
+ unique1
+---------
+ 10
+(1 row)
+
+set enable_hashagg to off;
+explain (costs off)
+select count(*) from
+ ( select unique1 from tenk1 union select fivethous from tenk1 ) ss;
+ QUERY PLAN
+----------------------------------------------------------------------
+ Aggregate
+ -> Unique
+ -> Sort
+ Sort Key: tenk1.unique1
+ -> Append
+ -> Index Only Scan using tenk1_unique1 on tenk1
+ -> Seq Scan on tenk1 tenk1_1
+(7 rows)
+
+select count(*) from
+ ( select unique1 from tenk1 union select fivethous from tenk1 ) ss;
+ count
+-------
+ 10000
+(1 row)
+
+explain (costs off)
+select count(*) from
+ ( select unique1 from tenk1 intersect select fivethous from tenk1 ) ss;
+ QUERY PLAN
+------------------------------------------------------------------------------------------
+ Aggregate
+ -> Subquery Scan on ss
+ -> SetOp Intersect
+ -> Sort
+ Sort Key: "*SELECT* 2".fivethous
+ -> Append
+ -> Subquery Scan on "*SELECT* 2"
+ -> Seq Scan on tenk1
+ -> Subquery Scan on "*SELECT* 1"
+ -> Index Only Scan using tenk1_unique1 on tenk1 tenk1_1
+(10 rows)
+
+select count(*) from
+ ( select unique1 from tenk1 intersect select fivethous from tenk1 ) ss;
+ count
+-------
+ 5000
+(1 row)
+
+explain (costs off)
+select unique1 from tenk1 except select unique2 from tenk1 where unique2 != 10;
+ QUERY PLAN
+------------------------------------------------------------------------------
+ SetOp Except
+ -> Sort
+ Sort Key: "*SELECT* 1".unique1
+ -> Append
+ -> Subquery Scan on "*SELECT* 1"
+ -> Index Only Scan using tenk1_unique1 on tenk1
+ -> Subquery Scan on "*SELECT* 2"
+ -> Index Only Scan using tenk1_unique2 on tenk1 tenk1_1
+ Filter: (unique2 <> 10)
+(9 rows)
+
+select unique1 from tenk1 except select unique2 from tenk1 where unique2 != 10;
+ unique1
+---------
+ 10
+(1 row)
+
+reset enable_hashagg;
+-- non-hashable type
+set enable_hashagg to on;
+explain (costs off)
+select x from (values (100::money), (200::money)) _(x) union select x from (values (100::money), (300::money)) _(x);
+ QUERY PLAN
+-----------------------------------------------
+ Unique
+ -> Sort
+ Sort Key: "*VALUES*".column1
+ -> Append
+ -> Values Scan on "*VALUES*"
+ -> Values Scan on "*VALUES*_1"
+(6 rows)
+
+set enable_hashagg to off;
+explain (costs off)
+select x from (values (100::money), (200::money)) _(x) union select x from (values (100::money), (300::money)) _(x);
+ QUERY PLAN
+-----------------------------------------------
+ Unique
+ -> Sort
+ Sort Key: "*VALUES*".column1
+ -> Append
+ -> Values Scan on "*VALUES*"
+ -> Values Scan on "*VALUES*_1"
+(6 rows)
+
+reset enable_hashagg;
+-- arrays
+set enable_hashagg to on;
+explain (costs off)
+select x from (values (array[1, 2]), (array[1, 3])) _(x) union select x from (values (array[1, 2]), (array[1, 4])) _(x);
+ QUERY PLAN
+-----------------------------------------
+ HashAggregate
+ Group Key: "*VALUES*".column1
+ -> Append
+ -> Values Scan on "*VALUES*"
+ -> Values Scan on "*VALUES*_1"
+(5 rows)
+
+select x from (values (array[1, 2]), (array[1, 3])) _(x) union select x from (values (array[1, 2]), (array[1, 4])) _(x);
+ x
+-------
+ {1,4}
+ {1,2}
+ {1,3}
+(3 rows)
+
+explain (costs off)
+select x from (values (array[1, 2]), (array[1, 3])) _(x) intersect select x from (values (array[1, 2]), (array[1, 4])) _(x);
+ QUERY PLAN
+-----------------------------------------------
+ HashSetOp Intersect
+ -> Append
+ -> Subquery Scan on "*SELECT* 1"
+ -> Values Scan on "*VALUES*"
+ -> Subquery Scan on "*SELECT* 2"
+ -> Values Scan on "*VALUES*_1"
+(6 rows)
+
+select x from (values (array[1, 2]), (array[1, 3])) _(x) intersect select x from (values (array[1, 2]), (array[1, 4])) _(x);
+ x
+-------
+ {1,2}
+(1 row)
+
+explain (costs off)
+select x from (values (array[1, 2]), (array[1, 3])) _(x) except select x from (values (array[1, 2]), (array[1, 4])) _(x);
+ QUERY PLAN
+-----------------------------------------------
+ HashSetOp Except
+ -> Append
+ -> Subquery Scan on "*SELECT* 1"
+ -> Values Scan on "*VALUES*"
+ -> Subquery Scan on "*SELECT* 2"
+ -> Values Scan on "*VALUES*_1"
+(6 rows)
+
+select x from (values (array[1, 2]), (array[1, 3])) _(x) except select x from (values (array[1, 2]), (array[1, 4])) _(x);
+ x
+-------
+ {1,3}
+(1 row)
+
+-- non-hashable type
+explain (costs off)
+select x from (values (array[100::money]), (array[200::money])) _(x) union select x from (values (array[100::money]), (array[300::money])) _(x);
+ QUERY PLAN
+-----------------------------------------------
+ Unique
+ -> Sort
+ Sort Key: "*VALUES*".column1
+ -> Append
+ -> Values Scan on "*VALUES*"
+ -> Values Scan on "*VALUES*_1"
+(6 rows)
+
+select x from (values (array[100::money]), (array[200::money])) _(x) union select x from (values (array[100::money]), (array[300::money])) _(x);
+ x
+-----------
+ {$100.00}
+ {$200.00}
+ {$300.00}
+(3 rows)
+
+set enable_hashagg to off;
+explain (costs off)
+select x from (values (array[1, 2]), (array[1, 3])) _(x) union select x from (values (array[1, 2]), (array[1, 4])) _(x);
+ QUERY PLAN
+-----------------------------------------------
+ Unique
+ -> Sort
+ Sort Key: "*VALUES*".column1
+ -> Append
+ -> Values Scan on "*VALUES*"
+ -> Values Scan on "*VALUES*_1"
+(6 rows)
+
+select x from (values (array[1, 2]), (array[1, 3])) _(x) union select x from (values (array[1, 2]), (array[1, 4])) _(x);
+ x
+-------
+ {1,2}
+ {1,3}
+ {1,4}
+(3 rows)
+
+explain (costs off)
+select x from (values (array[1, 2]), (array[1, 3])) _(x) intersect select x from (values (array[1, 2]), (array[1, 4])) _(x);
+ QUERY PLAN
+-----------------------------------------------------
+ SetOp Intersect
+ -> Sort
+ Sort Key: "*SELECT* 1".x
+ -> Append
+ -> Subquery Scan on "*SELECT* 1"
+ -> Values Scan on "*VALUES*"
+ -> Subquery Scan on "*SELECT* 2"
+ -> Values Scan on "*VALUES*_1"
+(8 rows)
+
+select x from (values (array[1, 2]), (array[1, 3])) _(x) intersect select x from (values (array[1, 2]), (array[1, 4])) _(x);
+ x
+-------
+ {1,2}
+(1 row)
+
+explain (costs off)
+select x from (values (array[1, 2]), (array[1, 3])) _(x) except select x from (values (array[1, 2]), (array[1, 4])) _(x);
+ QUERY PLAN
+-----------------------------------------------------
+ SetOp Except
+ -> Sort
+ Sort Key: "*SELECT* 1".x
+ -> Append
+ -> Subquery Scan on "*SELECT* 1"
+ -> Values Scan on "*VALUES*"
+ -> Subquery Scan on "*SELECT* 2"
+ -> Values Scan on "*VALUES*_1"
+(8 rows)
+
+select x from (values (array[1, 2]), (array[1, 3])) _(x) except select x from (values (array[1, 2]), (array[1, 4])) _(x);
+ x
+-------
+ {1,3}
+(1 row)
+
+reset enable_hashagg;
+-- records
+set enable_hashagg to on;
+explain (costs off)
+select x from (values (row(1, 2)), (row(1, 3))) _(x) union select x from (values (row(1, 2)), (row(1, 4))) _(x);
+ QUERY PLAN
+-----------------------------------------------
+ Unique
+ -> Sort
+ Sort Key: "*VALUES*".column1
+ -> Append
+ -> Values Scan on "*VALUES*"
+ -> Values Scan on "*VALUES*_1"
+(6 rows)
+
+select x from (values (row(1, 2)), (row(1, 3))) _(x) union select x from (values (row(1, 2)), (row(1, 4))) _(x);
+ x
+-------
+ (1,2)
+ (1,3)
+ (1,4)
+(3 rows)
+
+explain (costs off)
+select x from (values (row(1, 2)), (row(1, 3))) _(x) intersect select x from (values (row(1, 2)), (row(1, 4))) _(x);
+ QUERY PLAN
+-----------------------------------------------------
+ SetOp Intersect
+ -> Sort
+ Sort Key: "*SELECT* 1".x
+ -> Append
+ -> Subquery Scan on "*SELECT* 1"
+ -> Values Scan on "*VALUES*"
+ -> Subquery Scan on "*SELECT* 2"
+ -> Values Scan on "*VALUES*_1"
+(8 rows)
+
+select x from (values (row(1, 2)), (row(1, 3))) _(x) intersect select x from (values (row(1, 2)), (row(1, 4))) _(x);
+ x
+-------
+ (1,2)
+(1 row)
+
+explain (costs off)
+select x from (values (row(1, 2)), (row(1, 3))) _(x) except select x from (values (row(1, 2)), (row(1, 4))) _(x);
+ QUERY PLAN
+-----------------------------------------------------
+ SetOp Except
+ -> Sort
+ Sort Key: "*SELECT* 1".x
+ -> Append
+ -> Subquery Scan on "*SELECT* 1"
+ -> Values Scan on "*VALUES*"
+ -> Subquery Scan on "*SELECT* 2"
+ -> Values Scan on "*VALUES*_1"
+(8 rows)
+
+select x from (values (row(1, 2)), (row(1, 3))) _(x) except select x from (values (row(1, 2)), (row(1, 4))) _(x);
+ x
+-------
+ (1,3)
+(1 row)
+
+-- non-hashable type
+-- With an anonymous row type, the typcache does not report that the
+-- type is hashable. (Otherwise, this would fail at execution time.)
+explain (costs off)
+select x from (values (row(100::money)), (row(200::money))) _(x) union select x from (values (row(100::money)), (row(300::money))) _(x);
+ QUERY PLAN
+-----------------------------------------------
+ Unique
+ -> Sort
+ Sort Key: "*VALUES*".column1
+ -> Append
+ -> Values Scan on "*VALUES*"
+ -> Values Scan on "*VALUES*_1"
+(6 rows)
+
+select x from (values (row(100::money)), (row(200::money))) _(x) union select x from (values (row(100::money)), (row(300::money))) _(x);
+ x
+-----------
+ ($100.00)
+ ($200.00)
+ ($300.00)
+(3 rows)
+
+-- With a defined row type, the typcache can inspect the type's fields
+-- for hashability.
+create type ct1 as (f1 money);
+explain (costs off)
+select x from (values (row(100::money)::ct1), (row(200::money)::ct1)) _(x) union select x from (values (row(100::money)::ct1), (row(300::money)::ct1)) _(x);
+ QUERY PLAN
+-----------------------------------------------
+ Unique
+ -> Sort
+ Sort Key: "*VALUES*".column1
+ -> Append
+ -> Values Scan on "*VALUES*"
+ -> Values Scan on "*VALUES*_1"
+(6 rows)
+
+select x from (values (row(100::money)::ct1), (row(200::money)::ct1)) _(x) union select x from (values (row(100::money)::ct1), (row(300::money)::ct1)) _(x);
+ x
+-----------
+ ($100.00)
+ ($200.00)
+ ($300.00)
+(3 rows)
+
+drop type ct1;
+set enable_hashagg to off;
+explain (costs off)
+select x from (values (row(1, 2)), (row(1, 3))) _(x) union select x from (values (row(1, 2)), (row(1, 4))) _(x);
+ QUERY PLAN
+-----------------------------------------------
+ Unique
+ -> Sort
+ Sort Key: "*VALUES*".column1
+ -> Append
+ -> Values Scan on "*VALUES*"
+ -> Values Scan on "*VALUES*_1"
+(6 rows)
+
+select x from (values (row(1, 2)), (row(1, 3))) _(x) union select x from (values (row(1, 2)), (row(1, 4))) _(x);
+ x
+-------
+ (1,2)
+ (1,3)
+ (1,4)
+(3 rows)
+
+explain (costs off)
+select x from (values (row(1, 2)), (row(1, 3))) _(x) intersect select x from (values (row(1, 2)), (row(1, 4))) _(x);
+ QUERY PLAN
+-----------------------------------------------------
+ SetOp Intersect
+ -> Sort
+ Sort Key: "*SELECT* 1".x
+ -> Append
+ -> Subquery Scan on "*SELECT* 1"
+ -> Values Scan on "*VALUES*"
+ -> Subquery Scan on "*SELECT* 2"
+ -> Values Scan on "*VALUES*_1"
+(8 rows)
+
+select x from (values (row(1, 2)), (row(1, 3))) _(x) intersect select x from (values (row(1, 2)), (row(1, 4))) _(x);
+ x
+-------
+ (1,2)
+(1 row)
+
+explain (costs off)
+select x from (values (row(1, 2)), (row(1, 3))) _(x) except select x from (values (row(1, 2)), (row(1, 4))) _(x);
+ QUERY PLAN
+-----------------------------------------------------
+ SetOp Except
+ -> Sort
+ Sort Key: "*SELECT* 1".x
+ -> Append
+ -> Subquery Scan on "*SELECT* 1"
+ -> Values Scan on "*VALUES*"
+ -> Subquery Scan on "*SELECT* 2"
+ -> Values Scan on "*VALUES*_1"
+(8 rows)
+
+select x from (values (row(1, 2)), (row(1, 3))) _(x) except select x from (values (row(1, 2)), (row(1, 4))) _(x);
+ x
+-------
+ (1,3)
+(1 row)
+
+reset enable_hashagg;
+--
+-- Mixed types
+--
+SELECT f1 FROM float8_tbl INTERSECT SELECT f1 FROM int4_tbl ORDER BY 1;
+ f1
+----
+ 0
+(1 row)
+
+SELECT f1 FROM float8_tbl EXCEPT SELECT f1 FROM int4_tbl ORDER BY 1;
+ f1
+-----------------------
+ -1.2345678901234e+200
+ -1004.3
+ -34.84
+ -1.2345678901234e-200
+(4 rows)
+
+--
+-- Operator precedence and (((((extra))))) parentheses
+--
+SELECT q1 FROM int8_tbl INTERSECT SELECT q2 FROM int8_tbl UNION ALL SELECT q2 FROM int8_tbl ORDER BY 1;
+ q1
+-------------------
+ -4567890123456789
+ 123
+ 123
+ 456
+ 4567890123456789
+ 4567890123456789
+ 4567890123456789
+(7 rows)
+
+SELECT q1 FROM int8_tbl INTERSECT (((SELECT q2 FROM int8_tbl UNION ALL SELECT q2 FROM int8_tbl))) ORDER BY 1;
+ q1
+------------------
+ 123
+ 4567890123456789
+(2 rows)
+
+(((SELECT q1 FROM int8_tbl INTERSECT SELECT q2 FROM int8_tbl ORDER BY 1))) UNION ALL SELECT q2 FROM int8_tbl;
+ q1
+-------------------
+ 123
+ 4567890123456789
+ 456
+ 4567890123456789
+ 123
+ 4567890123456789
+ -4567890123456789
+(7 rows)
+
+SELECT q1 FROM int8_tbl UNION ALL SELECT q2 FROM int8_tbl EXCEPT SELECT q1 FROM int8_tbl ORDER BY 1;
+ q1
+-------------------
+ -4567890123456789
+ 456
+(2 rows)
+
+SELECT q1 FROM int8_tbl UNION ALL (((SELECT q2 FROM int8_tbl EXCEPT SELECT q1 FROM int8_tbl ORDER BY 1)));
+ q1
+-------------------
+ 123
+ 123
+ 4567890123456789
+ 4567890123456789
+ 4567890123456789
+ -4567890123456789
+ 456
+(7 rows)
+
+(((SELECT q1 FROM int8_tbl UNION ALL SELECT q2 FROM int8_tbl))) EXCEPT SELECT q1 FROM int8_tbl ORDER BY 1;
+ q1
+-------------------
+ -4567890123456789
+ 456
+(2 rows)
+
+--
+-- Subqueries with ORDER BY & LIMIT clauses
+--
+-- In this syntax, ORDER BY/LIMIT apply to the result of the EXCEPT
+SELECT q1,q2 FROM int8_tbl EXCEPT SELECT q2,q1 FROM int8_tbl
+ORDER BY q2,q1;
+ q1 | q2
+------------------+-------------------
+ 4567890123456789 | -4567890123456789
+ 123 | 456
+(2 rows)
+
+-- This should fail, because q2 isn't a name of an EXCEPT output column
+SELECT q1 FROM int8_tbl EXCEPT SELECT q2 FROM int8_tbl ORDER BY q2 LIMIT 1;
+ERROR: column "q2" does not exist
+LINE 1: ... int8_tbl EXCEPT SELECT q2 FROM int8_tbl ORDER BY q2 LIMIT 1...
+ ^
+HINT: There is a column named "q2" in table "*SELECT* 2", but it cannot be referenced from this part of the query.
+-- But this should work:
+SELECT q1 FROM int8_tbl EXCEPT (((SELECT q2 FROM int8_tbl ORDER BY q2 LIMIT 1))) ORDER BY 1;
+ q1
+------------------
+ 123
+ 4567890123456789
+(2 rows)
+
+--
+-- New syntaxes (7.1) permit new tests
+--
+(((((select * from int8_tbl)))));
+ q1 | q2
+------------------+-------------------
+ 123 | 456
+ 123 | 4567890123456789
+ 4567890123456789 | 123
+ 4567890123456789 | 4567890123456789
+ 4567890123456789 | -4567890123456789
+(5 rows)
+
+--
+-- Check behavior with empty select list (allowed since 9.4)
+--
+select union select;
+--
+(1 row)
+
+select intersect select;
+--
+(1 row)
+
+select except select;
+--
+(0 rows)
+
+-- check hashed implementation
+set enable_hashagg = true;
+set enable_sort = false;
+explain (costs off)
+select from generate_series(1,5) union select from generate_series(1,3);
+ QUERY PLAN
+----------------------------------------------------------------
+ HashAggregate
+ -> Append
+ -> Function Scan on generate_series
+ -> Function Scan on generate_series generate_series_1
+(4 rows)
+
+explain (costs off)
+select from generate_series(1,5) intersect select from generate_series(1,3);
+ QUERY PLAN
+----------------------------------------------------------------------
+ HashSetOp Intersect
+ -> Append
+ -> Subquery Scan on "*SELECT* 1"
+ -> Function Scan on generate_series
+ -> Subquery Scan on "*SELECT* 2"
+ -> Function Scan on generate_series generate_series_1
+(6 rows)
+
+select from generate_series(1,5) union select from generate_series(1,3);
+--
+(1 row)
+
+select from generate_series(1,5) union all select from generate_series(1,3);
+--
+(8 rows)
+
+select from generate_series(1,5) intersect select from generate_series(1,3);
+--
+(1 row)
+
+select from generate_series(1,5) intersect all select from generate_series(1,3);
+--
+(3 rows)
+
+select from generate_series(1,5) except select from generate_series(1,3);
+--
+(0 rows)
+
+select from generate_series(1,5) except all select from generate_series(1,3);
+--
+(2 rows)
+
+-- check sorted implementation
+set enable_hashagg = false;
+set enable_sort = true;
+explain (costs off)
+select from generate_series(1,5) union select from generate_series(1,3);
+ QUERY PLAN
+----------------------------------------------------------------
+ Unique
+ -> Append
+ -> Function Scan on generate_series
+ -> Function Scan on generate_series generate_series_1
+(4 rows)
+
+explain (costs off)
+select from generate_series(1,5) intersect select from generate_series(1,3);
+ QUERY PLAN
+----------------------------------------------------------------------
+ SetOp Intersect
+ -> Append
+ -> Subquery Scan on "*SELECT* 1"
+ -> Function Scan on generate_series
+ -> Subquery Scan on "*SELECT* 2"
+ -> Function Scan on generate_series generate_series_1
+(6 rows)
+
+select from generate_series(1,5) union select from generate_series(1,3);
+--
+(1 row)
+
+select from generate_series(1,5) union all select from generate_series(1,3);
+--
+(8 rows)
+
+select from generate_series(1,5) intersect select from generate_series(1,3);
+--
+(1 row)
+
+select from generate_series(1,5) intersect all select from generate_series(1,3);
+--
+(3 rows)
+
+select from generate_series(1,5) except select from generate_series(1,3);
+--
+(0 rows)
+
+select from generate_series(1,5) except all select from generate_series(1,3);
+--
+(2 rows)
+
+reset enable_hashagg;
+reset enable_sort;
+--
+-- Check handling of a case with unknown constants. We don't guarantee
+-- an undecorated constant will work in all cases, but historically this
+-- usage has worked, so test we don't break it.
+--
+SELECT a.f1 FROM (SELECT 'test' AS f1 FROM varchar_tbl) a
+UNION
+SELECT b.f1 FROM (SELECT f1 FROM varchar_tbl) b
+ORDER BY 1;
+ f1
+------
+ a
+ ab
+ abcd
+ test
+(4 rows)
+
+-- This should fail, but it should produce an error cursor
+SELECT '3.4'::numeric UNION SELECT 'foo';
+ERROR: invalid input syntax for type numeric: "foo"
+LINE 1: SELECT '3.4'::numeric UNION SELECT 'foo';
+ ^
+--
+-- Test that expression-index constraints can be pushed down through
+-- UNION or UNION ALL
+--
+CREATE TEMP TABLE t1 (a text, b text);
+CREATE INDEX t1_ab_idx on t1 ((a || b));
+CREATE TEMP TABLE t2 (ab text primary key);
+INSERT INTO t1 VALUES ('a', 'b'), ('x', 'y');
+INSERT INTO t2 VALUES ('ab'), ('xy');
+set enable_seqscan = off;
+set enable_indexscan = on;
+set enable_bitmapscan = off;
+explain (costs off)
+ SELECT * FROM
+ (SELECT a || b AS ab FROM t1
+ UNION ALL
+ SELECT * FROM t2) t
+ WHERE ab = 'ab';
+ QUERY PLAN
+---------------------------------------------
+ Append
+ -> Index Scan using t1_ab_idx on t1
+ Index Cond: ((a || b) = 'ab'::text)
+ -> Index Only Scan using t2_pkey on t2
+ Index Cond: (ab = 'ab'::text)
+(5 rows)
+
+explain (costs off)
+ SELECT * FROM
+ (SELECT a || b AS ab FROM t1
+ UNION
+ SELECT * FROM t2) t
+ WHERE ab = 'ab';
+ QUERY PLAN
+---------------------------------------------------
+ HashAggregate
+ Group Key: ((t1.a || t1.b))
+ -> Append
+ -> Index Scan using t1_ab_idx on t1
+ Index Cond: ((a || b) = 'ab'::text)
+ -> Index Only Scan using t2_pkey on t2
+ Index Cond: (ab = 'ab'::text)
+(7 rows)
+
+--
+-- Test that ORDER BY for UNION ALL can be pushed down to inheritance
+-- children.
+--
+CREATE TEMP TABLE t1c (b text, a text);
+ALTER TABLE t1c INHERIT t1;
+CREATE TEMP TABLE t2c (primary key (ab)) INHERITS (t2);
+INSERT INTO t1c VALUES ('v', 'w'), ('c', 'd'), ('m', 'n'), ('e', 'f');
+INSERT INTO t2c VALUES ('vw'), ('cd'), ('mn'), ('ef');
+CREATE INDEX t1c_ab_idx on t1c ((a || b));
+set enable_seqscan = on;
+set enable_indexonlyscan = off;
+explain (costs off)
+ SELECT * FROM
+ (SELECT a || b AS ab FROM t1
+ UNION ALL
+ SELECT ab FROM t2) t
+ ORDER BY 1 LIMIT 8;
+ QUERY PLAN
+-----------------------------------------------------
+ Limit
+ -> Merge Append
+ Sort Key: ((t1.a || t1.b))
+ -> Index Scan using t1_ab_idx on t1
+ -> Index Scan using t1c_ab_idx on t1c t1_1
+ -> Index Scan using t2_pkey on t2
+ -> Index Scan using t2c_pkey on t2c t2_1
+(7 rows)
+
+ SELECT * FROM
+ (SELECT a || b AS ab FROM t1
+ UNION ALL
+ SELECT ab FROM t2) t
+ ORDER BY 1 LIMIT 8;
+ ab
+----
+ ab
+ ab
+ cd
+ dc
+ ef
+ fe
+ mn
+ nm
+(8 rows)
+
+reset enable_seqscan;
+reset enable_indexscan;
+reset enable_bitmapscan;
+-- This simpler variant of the above test has been observed to fail differently
+create table events (event_id int primary key);
+create table other_events (event_id int primary key);
+create table events_child () inherits (events);
+explain (costs off)
+select event_id
+ from (select event_id from events
+ union all
+ select event_id from other_events) ss
+ order by event_id;
+ QUERY PLAN
+----------------------------------------------------------
+ Merge Append
+ Sort Key: events.event_id
+ -> Index Scan using events_pkey on events
+ -> Sort
+ Sort Key: events_1.event_id
+ -> Seq Scan on events_child events_1
+ -> Index Scan using other_events_pkey on other_events
+(7 rows)
+
+drop table events_child, events, other_events;
+reset enable_indexonlyscan;
+-- Test constraint exclusion of UNION ALL subqueries
+explain (costs off)
+ SELECT * FROM
+ (SELECT 1 AS t, * FROM tenk1 a
+ UNION ALL
+ SELECT 2 AS t, * FROM tenk1 b) c
+ WHERE t = 2;
+ QUERY PLAN
+---------------------
+ Seq Scan on tenk1 b
+(1 row)
+
+-- Test that we push quals into UNION sub-selects only when it's safe
+explain (costs off)
+SELECT * FROM
+ (SELECT 1 AS t, 2 AS x
+ UNION
+ SELECT 2 AS t, 4 AS x) ss
+WHERE x < 4
+ORDER BY x;
+ QUERY PLAN
+--------------------------------------------------
+ Sort
+ Sort Key: (2)
+ -> Unique
+ -> Sort
+ Sort Key: (1), (2)
+ -> Append
+ -> Result
+ -> Result
+ One-Time Filter: false
+(9 rows)
+
+SELECT * FROM
+ (SELECT 1 AS t, 2 AS x
+ UNION
+ SELECT 2 AS t, 4 AS x) ss
+WHERE x < 4
+ORDER BY x;
+ t | x
+---+---
+ 1 | 2
+(1 row)
+
+explain (costs off)
+SELECT * FROM
+ (SELECT 1 AS t, generate_series(1,10) AS x
+ UNION
+ SELECT 2 AS t, 4 AS x) ss
+WHERE x < 4
+ORDER BY x;
+ QUERY PLAN
+--------------------------------------------------------
+ Sort
+ Sort Key: ss.x
+ -> Subquery Scan on ss
+ Filter: (ss.x < 4)
+ -> HashAggregate
+ Group Key: (1), (generate_series(1, 10))
+ -> Append
+ -> ProjectSet
+ -> Result
+ -> Result
+(10 rows)
+
+SELECT * FROM
+ (SELECT 1 AS t, generate_series(1,10) AS x
+ UNION
+ SELECT 2 AS t, 4 AS x) ss
+WHERE x < 4
+ORDER BY x;
+ t | x
+---+---
+ 1 | 1
+ 1 | 2
+ 1 | 3
+(3 rows)
+
+explain (costs off)
+SELECT * FROM
+ (SELECT 1 AS t, (random()*3)::int AS x
+ UNION
+ SELECT 2 AS t, 4 AS x) ss
+WHERE x > 3
+ORDER BY x;
+ QUERY PLAN
+------------------------------------------------------------------------------------
+ Sort
+ Sort Key: ss.x
+ -> Subquery Scan on ss
+ Filter: (ss.x > 3)
+ -> Unique
+ -> Sort
+ Sort Key: (1), (((random() * '3'::double precision))::integer)
+ -> Append
+ -> Result
+ -> Result
+(10 rows)
+
+SELECT * FROM
+ (SELECT 1 AS t, (random()*3)::int AS x
+ UNION
+ SELECT 2 AS t, 4 AS x) ss
+WHERE x > 3
+ORDER BY x;
+ t | x
+---+---
+ 2 | 4
+(1 row)
+
+-- Test cases where the native ordering of a sub-select has more pathkeys
+-- than the outer query cares about
+explain (costs off)
+select distinct q1 from
+ (select distinct * from int8_tbl i81
+ union all
+ select distinct * from int8_tbl i82) ss
+where q2 = q2;
+ QUERY PLAN
+----------------------------------------------------------
+ Unique
+ -> Merge Append
+ Sort Key: "*SELECT* 1".q1
+ -> Subquery Scan on "*SELECT* 1"
+ -> Unique
+ -> Sort
+ Sort Key: i81.q1, i81.q2
+ -> Seq Scan on int8_tbl i81
+ Filter: (q2 IS NOT NULL)
+ -> Subquery Scan on "*SELECT* 2"
+ -> Unique
+ -> Sort
+ Sort Key: i82.q1, i82.q2
+ -> Seq Scan on int8_tbl i82
+ Filter: (q2 IS NOT NULL)
+(15 rows)
+
+select distinct q1 from
+ (select distinct * from int8_tbl i81
+ union all
+ select distinct * from int8_tbl i82) ss
+where q2 = q2;
+ q1
+------------------
+ 123
+ 4567890123456789
+(2 rows)
+
+explain (costs off)
+select distinct q1 from
+ (select distinct * from int8_tbl i81
+ union all
+ select distinct * from int8_tbl i82) ss
+where -q1 = q2;
+ QUERY PLAN
+--------------------------------------------------------
+ Unique
+ -> Merge Append
+ Sort Key: "*SELECT* 1".q1
+ -> Subquery Scan on "*SELECT* 1"
+ -> Unique
+ -> Sort
+ Sort Key: i81.q1, i81.q2
+ -> Seq Scan on int8_tbl i81
+ Filter: ((- q1) = q2)
+ -> Subquery Scan on "*SELECT* 2"
+ -> Unique
+ -> Sort
+ Sort Key: i82.q1, i82.q2
+ -> Seq Scan on int8_tbl i82
+ Filter: ((- q1) = q2)
+(15 rows)
+
+select distinct q1 from
+ (select distinct * from int8_tbl i81
+ union all
+ select distinct * from int8_tbl i82) ss
+where -q1 = q2;
+ q1
+------------------
+ 4567890123456789
+(1 row)
+
+-- Test proper handling of parameterized appendrel paths when the
+-- potential join qual is expensive
+create function expensivefunc(int) returns int
+language plpgsql immutable strict cost 10000
+as $$begin return $1; end$$;
+create temp table t3 as select generate_series(-1000,1000) as x;
+create index t3i on t3 (expensivefunc(x));
+analyze t3;
+explain (costs off)
+select * from
+ (select * from t3 a union all select * from t3 b) ss
+ join int4_tbl on f1 = expensivefunc(x);
+ QUERY PLAN
+------------------------------------------------------------
+ Nested Loop
+ -> Seq Scan on int4_tbl
+ -> Append
+ -> Index Scan using t3i on t3 a
+ Index Cond: (expensivefunc(x) = int4_tbl.f1)
+ -> Index Scan using t3i on t3 b
+ Index Cond: (expensivefunc(x) = int4_tbl.f1)
+(7 rows)
+
+select * from
+ (select * from t3 a union all select * from t3 b) ss
+ join int4_tbl on f1 = expensivefunc(x);
+ x | f1
+---+----
+ 0 | 0
+ 0 | 0
+(2 rows)
+
+drop table t3;
+drop function expensivefunc(int);
+-- Test handling of appendrel quals that const-simplify into an AND
+explain (costs off)
+select * from
+ (select *, 0 as x from int8_tbl a
+ union all
+ select *, 1 as x from int8_tbl b) ss
+where (x = 0) or (q1 >= q2 and q1 <= q2);
+ QUERY PLAN
+---------------------------------------------
+ Append
+ -> Seq Scan on int8_tbl a
+ -> Seq Scan on int8_tbl b
+ Filter: ((q1 >= q2) AND (q1 <= q2))
+(4 rows)
+
+select * from
+ (select *, 0 as x from int8_tbl a
+ union all
+ select *, 1 as x from int8_tbl b) ss
+where (x = 0) or (q1 >= q2 and q1 <= q2);
+ q1 | q2 | x
+------------------+-------------------+---
+ 123 | 456 | 0
+ 123 | 4567890123456789 | 0
+ 4567890123456789 | 123 | 0
+ 4567890123456789 | 4567890123456789 | 0
+ 4567890123456789 | -4567890123456789 | 0
+ 4567890123456789 | 4567890123456789 | 1
+(6 rows)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/union.sql b/ydb/library/yql/tests/postgresql/original/cases/union.sql
new file mode 100644
index 0000000000..ca8c9b4d12
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/union.sql
@@ -0,0 +1,542 @@
+--
+-- UNION (also INTERSECT, EXCEPT)
+--
+
+-- Simple UNION constructs
+
+SELECT 1 AS two UNION SELECT 2 ORDER BY 1;
+
+SELECT 1 AS one UNION SELECT 1 ORDER BY 1;
+
+SELECT 1 AS two UNION ALL SELECT 2;
+
+SELECT 1 AS two UNION ALL SELECT 1;
+
+SELECT 1 AS three UNION SELECT 2 UNION SELECT 3 ORDER BY 1;
+
+SELECT 1 AS two UNION SELECT 2 UNION SELECT 2 ORDER BY 1;
+
+SELECT 1 AS three UNION SELECT 2 UNION ALL SELECT 2 ORDER BY 1;
+
+SELECT 1.1 AS two UNION SELECT 2.2 ORDER BY 1;
+
+-- Mixed types
+
+SELECT 1.1 AS two UNION SELECT 2 ORDER BY 1;
+
+SELECT 1 AS two UNION SELECT 2.2 ORDER BY 1;
+
+SELECT 1 AS one UNION SELECT 1.0::float8 ORDER BY 1;
+
+SELECT 1.1 AS two UNION ALL SELECT 2 ORDER BY 1;
+
+SELECT 1.0::float8 AS two UNION ALL SELECT 1 ORDER BY 1;
+
+SELECT 1.1 AS three UNION SELECT 2 UNION SELECT 3 ORDER BY 1;
+
+SELECT 1.1::float8 AS two UNION SELECT 2 UNION SELECT 2.0::float8 ORDER BY 1;
+
+SELECT 1.1 AS three UNION SELECT 2 UNION ALL SELECT 2 ORDER BY 1;
+
+SELECT 1.1 AS two UNION (SELECT 2 UNION ALL SELECT 2) ORDER BY 1;
+
+--
+-- Try testing from tables...
+--
+
+SELECT f1 AS five FROM FLOAT8_TBL
+UNION
+SELECT f1 FROM FLOAT8_TBL
+ORDER BY 1;
+
+SELECT f1 AS ten FROM FLOAT8_TBL
+UNION ALL
+SELECT f1 FROM FLOAT8_TBL;
+
+SELECT f1 AS nine FROM FLOAT8_TBL
+UNION
+SELECT f1 FROM INT4_TBL
+ORDER BY 1;
+
+SELECT f1 AS ten FROM FLOAT8_TBL
+UNION ALL
+SELECT f1 FROM INT4_TBL;
+
+SELECT f1 AS five FROM FLOAT8_TBL
+ WHERE f1 BETWEEN -1e6 AND 1e6
+UNION
+SELECT f1 FROM INT4_TBL
+ WHERE f1 BETWEEN 0 AND 1000000
+ORDER BY 1;
+
+SELECT CAST(f1 AS char(4)) AS three FROM VARCHAR_TBL
+UNION
+SELECT f1 FROM CHAR_TBL
+ORDER BY 1;
+
+SELECT f1 AS three FROM VARCHAR_TBL
+UNION
+SELECT CAST(f1 AS varchar) FROM CHAR_TBL
+ORDER BY 1;
+
+SELECT f1 AS eight FROM VARCHAR_TBL
+UNION ALL
+SELECT f1 FROM CHAR_TBL;
+
+SELECT f1 AS five FROM TEXT_TBL
+UNION
+SELECT f1 FROM VARCHAR_TBL
+UNION
+SELECT TRIM(TRAILING FROM f1) FROM CHAR_TBL
+ORDER BY 1;
+
+--
+-- INTERSECT and EXCEPT
+--
+
+SELECT q2 FROM int8_tbl INTERSECT SELECT q1 FROM int8_tbl ORDER BY 1;
+
+SELECT q2 FROM int8_tbl INTERSECT ALL SELECT q1 FROM int8_tbl ORDER BY 1;
+
+SELECT q2 FROM int8_tbl EXCEPT SELECT q1 FROM int8_tbl ORDER BY 1;
+
+SELECT q2 FROM int8_tbl EXCEPT ALL SELECT q1 FROM int8_tbl ORDER BY 1;
+
+SELECT q2 FROM int8_tbl EXCEPT ALL SELECT DISTINCT q1 FROM int8_tbl ORDER BY 1;
+
+SELECT q1 FROM int8_tbl EXCEPT SELECT q2 FROM int8_tbl ORDER BY 1;
+
+SELECT q1 FROM int8_tbl EXCEPT ALL SELECT q2 FROM int8_tbl ORDER BY 1;
+
+SELECT q1 FROM int8_tbl EXCEPT ALL SELECT DISTINCT q2 FROM int8_tbl ORDER BY 1;
+
+SELECT q1 FROM int8_tbl EXCEPT ALL SELECT q1 FROM int8_tbl FOR NO KEY UPDATE;
+
+-- nested cases
+(SELECT 1,2,3 UNION SELECT 4,5,6) INTERSECT SELECT 4,5,6;
+(SELECT 1,2,3 UNION SELECT 4,5,6 ORDER BY 1,2) INTERSECT SELECT 4,5,6;
+(SELECT 1,2,3 UNION SELECT 4,5,6) EXCEPT SELECT 4,5,6;
+(SELECT 1,2,3 UNION SELECT 4,5,6 ORDER BY 1,2) EXCEPT SELECT 4,5,6;
+
+-- exercise both hashed and sorted implementations of UNION/INTERSECT/EXCEPT
+
+set enable_hashagg to on;
+
+explain (costs off)
+select count(*) from
+ ( select unique1 from tenk1 union select fivethous from tenk1 ) ss;
+select count(*) from
+ ( select unique1 from tenk1 union select fivethous from tenk1 ) ss;
+
+explain (costs off)
+select count(*) from
+ ( select unique1 from tenk1 intersect select fivethous from tenk1 ) ss;
+select count(*) from
+ ( select unique1 from tenk1 intersect select fivethous from tenk1 ) ss;
+
+explain (costs off)
+select unique1 from tenk1 except select unique2 from tenk1 where unique2 != 10;
+select unique1 from tenk1 except select unique2 from tenk1 where unique2 != 10;
+
+set enable_hashagg to off;
+
+explain (costs off)
+select count(*) from
+ ( select unique1 from tenk1 union select fivethous from tenk1 ) ss;
+select count(*) from
+ ( select unique1 from tenk1 union select fivethous from tenk1 ) ss;
+
+explain (costs off)
+select count(*) from
+ ( select unique1 from tenk1 intersect select fivethous from tenk1 ) ss;
+select count(*) from
+ ( select unique1 from tenk1 intersect select fivethous from tenk1 ) ss;
+
+explain (costs off)
+select unique1 from tenk1 except select unique2 from tenk1 where unique2 != 10;
+select unique1 from tenk1 except select unique2 from tenk1 where unique2 != 10;
+
+reset enable_hashagg;
+
+-- non-hashable type
+set enable_hashagg to on;
+
+explain (costs off)
+select x from (values (100::money), (200::money)) _(x) union select x from (values (100::money), (300::money)) _(x);
+
+set enable_hashagg to off;
+
+explain (costs off)
+select x from (values (100::money), (200::money)) _(x) union select x from (values (100::money), (300::money)) _(x);
+
+reset enable_hashagg;
+
+-- arrays
+set enable_hashagg to on;
+
+explain (costs off)
+select x from (values (array[1, 2]), (array[1, 3])) _(x) union select x from (values (array[1, 2]), (array[1, 4])) _(x);
+select x from (values (array[1, 2]), (array[1, 3])) _(x) union select x from (values (array[1, 2]), (array[1, 4])) _(x);
+explain (costs off)
+select x from (values (array[1, 2]), (array[1, 3])) _(x) intersect select x from (values (array[1, 2]), (array[1, 4])) _(x);
+select x from (values (array[1, 2]), (array[1, 3])) _(x) intersect select x from (values (array[1, 2]), (array[1, 4])) _(x);
+explain (costs off)
+select x from (values (array[1, 2]), (array[1, 3])) _(x) except select x from (values (array[1, 2]), (array[1, 4])) _(x);
+select x from (values (array[1, 2]), (array[1, 3])) _(x) except select x from (values (array[1, 2]), (array[1, 4])) _(x);
+
+-- non-hashable type
+explain (costs off)
+select x from (values (array[100::money]), (array[200::money])) _(x) union select x from (values (array[100::money]), (array[300::money])) _(x);
+select x from (values (array[100::money]), (array[200::money])) _(x) union select x from (values (array[100::money]), (array[300::money])) _(x);
+
+set enable_hashagg to off;
+
+explain (costs off)
+select x from (values (array[1, 2]), (array[1, 3])) _(x) union select x from (values (array[1, 2]), (array[1, 4])) _(x);
+select x from (values (array[1, 2]), (array[1, 3])) _(x) union select x from (values (array[1, 2]), (array[1, 4])) _(x);
+explain (costs off)
+select x from (values (array[1, 2]), (array[1, 3])) _(x) intersect select x from (values (array[1, 2]), (array[1, 4])) _(x);
+select x from (values (array[1, 2]), (array[1, 3])) _(x) intersect select x from (values (array[1, 2]), (array[1, 4])) _(x);
+explain (costs off)
+select x from (values (array[1, 2]), (array[1, 3])) _(x) except select x from (values (array[1, 2]), (array[1, 4])) _(x);
+select x from (values (array[1, 2]), (array[1, 3])) _(x) except select x from (values (array[1, 2]), (array[1, 4])) _(x);
+
+reset enable_hashagg;
+
+-- records
+set enable_hashagg to on;
+
+explain (costs off)
+select x from (values (row(1, 2)), (row(1, 3))) _(x) union select x from (values (row(1, 2)), (row(1, 4))) _(x);
+select x from (values (row(1, 2)), (row(1, 3))) _(x) union select x from (values (row(1, 2)), (row(1, 4))) _(x);
+explain (costs off)
+select x from (values (row(1, 2)), (row(1, 3))) _(x) intersect select x from (values (row(1, 2)), (row(1, 4))) _(x);
+select x from (values (row(1, 2)), (row(1, 3))) _(x) intersect select x from (values (row(1, 2)), (row(1, 4))) _(x);
+explain (costs off)
+select x from (values (row(1, 2)), (row(1, 3))) _(x) except select x from (values (row(1, 2)), (row(1, 4))) _(x);
+select x from (values (row(1, 2)), (row(1, 3))) _(x) except select x from (values (row(1, 2)), (row(1, 4))) _(x);
+
+-- non-hashable type
+
+-- With an anonymous row type, the typcache does not report that the
+-- type is hashable. (Otherwise, this would fail at execution time.)
+explain (costs off)
+select x from (values (row(100::money)), (row(200::money))) _(x) union select x from (values (row(100::money)), (row(300::money))) _(x);
+select x from (values (row(100::money)), (row(200::money))) _(x) union select x from (values (row(100::money)), (row(300::money))) _(x);
+
+-- With a defined row type, the typcache can inspect the type's fields
+-- for hashability.
+create type ct1 as (f1 money);
+explain (costs off)
+select x from (values (row(100::money)::ct1), (row(200::money)::ct1)) _(x) union select x from (values (row(100::money)::ct1), (row(300::money)::ct1)) _(x);
+select x from (values (row(100::money)::ct1), (row(200::money)::ct1)) _(x) union select x from (values (row(100::money)::ct1), (row(300::money)::ct1)) _(x);
+drop type ct1;
+
+set enable_hashagg to off;
+
+explain (costs off)
+select x from (values (row(1, 2)), (row(1, 3))) _(x) union select x from (values (row(1, 2)), (row(1, 4))) _(x);
+select x from (values (row(1, 2)), (row(1, 3))) _(x) union select x from (values (row(1, 2)), (row(1, 4))) _(x);
+explain (costs off)
+select x from (values (row(1, 2)), (row(1, 3))) _(x) intersect select x from (values (row(1, 2)), (row(1, 4))) _(x);
+select x from (values (row(1, 2)), (row(1, 3))) _(x) intersect select x from (values (row(1, 2)), (row(1, 4))) _(x);
+explain (costs off)
+select x from (values (row(1, 2)), (row(1, 3))) _(x) except select x from (values (row(1, 2)), (row(1, 4))) _(x);
+select x from (values (row(1, 2)), (row(1, 3))) _(x) except select x from (values (row(1, 2)), (row(1, 4))) _(x);
+
+reset enable_hashagg;
+
+--
+-- Mixed types
+--
+
+SELECT f1 FROM float8_tbl INTERSECT SELECT f1 FROM int4_tbl ORDER BY 1;
+
+SELECT f1 FROM float8_tbl EXCEPT SELECT f1 FROM int4_tbl ORDER BY 1;
+
+--
+-- Operator precedence and (((((extra))))) parentheses
+--
+
+SELECT q1 FROM int8_tbl INTERSECT SELECT q2 FROM int8_tbl UNION ALL SELECT q2 FROM int8_tbl ORDER BY 1;
+
+SELECT q1 FROM int8_tbl INTERSECT (((SELECT q2 FROM int8_tbl UNION ALL SELECT q2 FROM int8_tbl))) ORDER BY 1;
+
+(((SELECT q1 FROM int8_tbl INTERSECT SELECT q2 FROM int8_tbl ORDER BY 1))) UNION ALL SELECT q2 FROM int8_tbl;
+
+SELECT q1 FROM int8_tbl UNION ALL SELECT q2 FROM int8_tbl EXCEPT SELECT q1 FROM int8_tbl ORDER BY 1;
+
+SELECT q1 FROM int8_tbl UNION ALL (((SELECT q2 FROM int8_tbl EXCEPT SELECT q1 FROM int8_tbl ORDER BY 1)));
+
+(((SELECT q1 FROM int8_tbl UNION ALL SELECT q2 FROM int8_tbl))) EXCEPT SELECT q1 FROM int8_tbl ORDER BY 1;
+
+--
+-- Subqueries with ORDER BY & LIMIT clauses
+--
+
+-- In this syntax, ORDER BY/LIMIT apply to the result of the EXCEPT
+SELECT q1,q2 FROM int8_tbl EXCEPT SELECT q2,q1 FROM int8_tbl
+ORDER BY q2,q1;
+
+-- This should fail, because q2 isn't a name of an EXCEPT output column
+SELECT q1 FROM int8_tbl EXCEPT SELECT q2 FROM int8_tbl ORDER BY q2 LIMIT 1;
+
+-- But this should work:
+SELECT q1 FROM int8_tbl EXCEPT (((SELECT q2 FROM int8_tbl ORDER BY q2 LIMIT 1))) ORDER BY 1;
+
+--
+-- New syntaxes (7.1) permit new tests
+--
+
+(((((select * from int8_tbl)))));
+
+--
+-- Check behavior with empty select list (allowed since 9.4)
+--
+
+select union select;
+select intersect select;
+select except select;
+
+-- check hashed implementation
+set enable_hashagg = true;
+set enable_sort = false;
+
+explain (costs off)
+select from generate_series(1,5) union select from generate_series(1,3);
+explain (costs off)
+select from generate_series(1,5) intersect select from generate_series(1,3);
+
+select from generate_series(1,5) union select from generate_series(1,3);
+select from generate_series(1,5) union all select from generate_series(1,3);
+select from generate_series(1,5) intersect select from generate_series(1,3);
+select from generate_series(1,5) intersect all select from generate_series(1,3);
+select from generate_series(1,5) except select from generate_series(1,3);
+select from generate_series(1,5) except all select from generate_series(1,3);
+
+-- check sorted implementation
+set enable_hashagg = false;
+set enable_sort = true;
+
+explain (costs off)
+select from generate_series(1,5) union select from generate_series(1,3);
+explain (costs off)
+select from generate_series(1,5) intersect select from generate_series(1,3);
+
+select from generate_series(1,5) union select from generate_series(1,3);
+select from generate_series(1,5) union all select from generate_series(1,3);
+select from generate_series(1,5) intersect select from generate_series(1,3);
+select from generate_series(1,5) intersect all select from generate_series(1,3);
+select from generate_series(1,5) except select from generate_series(1,3);
+select from generate_series(1,5) except all select from generate_series(1,3);
+
+reset enable_hashagg;
+reset enable_sort;
+
+--
+-- Check handling of a case with unknown constants. We don't guarantee
+-- an undecorated constant will work in all cases, but historically this
+-- usage has worked, so test we don't break it.
+--
+
+SELECT a.f1 FROM (SELECT 'test' AS f1 FROM varchar_tbl) a
+UNION
+SELECT b.f1 FROM (SELECT f1 FROM varchar_tbl) b
+ORDER BY 1;
+
+-- This should fail, but it should produce an error cursor
+SELECT '3.4'::numeric UNION SELECT 'foo';
+
+--
+-- Test that expression-index constraints can be pushed down through
+-- UNION or UNION ALL
+--
+
+CREATE TEMP TABLE t1 (a text, b text);
+CREATE INDEX t1_ab_idx on t1 ((a || b));
+CREATE TEMP TABLE t2 (ab text primary key);
+INSERT INTO t1 VALUES ('a', 'b'), ('x', 'y');
+INSERT INTO t2 VALUES ('ab'), ('xy');
+
+set enable_seqscan = off;
+set enable_indexscan = on;
+set enable_bitmapscan = off;
+
+explain (costs off)
+ SELECT * FROM
+ (SELECT a || b AS ab FROM t1
+ UNION ALL
+ SELECT * FROM t2) t
+ WHERE ab = 'ab';
+
+explain (costs off)
+ SELECT * FROM
+ (SELECT a || b AS ab FROM t1
+ UNION
+ SELECT * FROM t2) t
+ WHERE ab = 'ab';
+
+--
+-- Test that ORDER BY for UNION ALL can be pushed down to inheritance
+-- children.
+--
+
+CREATE TEMP TABLE t1c (b text, a text);
+ALTER TABLE t1c INHERIT t1;
+CREATE TEMP TABLE t2c (primary key (ab)) INHERITS (t2);
+INSERT INTO t1c VALUES ('v', 'w'), ('c', 'd'), ('m', 'n'), ('e', 'f');
+INSERT INTO t2c VALUES ('vw'), ('cd'), ('mn'), ('ef');
+CREATE INDEX t1c_ab_idx on t1c ((a || b));
+
+set enable_seqscan = on;
+set enable_indexonlyscan = off;
+
+explain (costs off)
+ SELECT * FROM
+ (SELECT a || b AS ab FROM t1
+ UNION ALL
+ SELECT ab FROM t2) t
+ ORDER BY 1 LIMIT 8;
+
+ SELECT * FROM
+ (SELECT a || b AS ab FROM t1
+ UNION ALL
+ SELECT ab FROM t2) t
+ ORDER BY 1 LIMIT 8;
+
+reset enable_seqscan;
+reset enable_indexscan;
+reset enable_bitmapscan;
+
+-- This simpler variant of the above test has been observed to fail differently
+
+create table events (event_id int primary key);
+create table other_events (event_id int primary key);
+create table events_child () inherits (events);
+
+explain (costs off)
+select event_id
+ from (select event_id from events
+ union all
+ select event_id from other_events) ss
+ order by event_id;
+
+drop table events_child, events, other_events;
+
+reset enable_indexonlyscan;
+
+-- Test constraint exclusion of UNION ALL subqueries
+explain (costs off)
+ SELECT * FROM
+ (SELECT 1 AS t, * FROM tenk1 a
+ UNION ALL
+ SELECT 2 AS t, * FROM tenk1 b) c
+ WHERE t = 2;
+
+-- Test that we push quals into UNION sub-selects only when it's safe
+explain (costs off)
+SELECT * FROM
+ (SELECT 1 AS t, 2 AS x
+ UNION
+ SELECT 2 AS t, 4 AS x) ss
+WHERE x < 4
+ORDER BY x;
+
+SELECT * FROM
+ (SELECT 1 AS t, 2 AS x
+ UNION
+ SELECT 2 AS t, 4 AS x) ss
+WHERE x < 4
+ORDER BY x;
+
+explain (costs off)
+SELECT * FROM
+ (SELECT 1 AS t, generate_series(1,10) AS x
+ UNION
+ SELECT 2 AS t, 4 AS x) ss
+WHERE x < 4
+ORDER BY x;
+
+SELECT * FROM
+ (SELECT 1 AS t, generate_series(1,10) AS x
+ UNION
+ SELECT 2 AS t, 4 AS x) ss
+WHERE x < 4
+ORDER BY x;
+
+explain (costs off)
+SELECT * FROM
+ (SELECT 1 AS t, (random()*3)::int AS x
+ UNION
+ SELECT 2 AS t, 4 AS x) ss
+WHERE x > 3
+ORDER BY x;
+
+SELECT * FROM
+ (SELECT 1 AS t, (random()*3)::int AS x
+ UNION
+ SELECT 2 AS t, 4 AS x) ss
+WHERE x > 3
+ORDER BY x;
+
+-- Test cases where the native ordering of a sub-select has more pathkeys
+-- than the outer query cares about
+explain (costs off)
+select distinct q1 from
+ (select distinct * from int8_tbl i81
+ union all
+ select distinct * from int8_tbl i82) ss
+where q2 = q2;
+
+select distinct q1 from
+ (select distinct * from int8_tbl i81
+ union all
+ select distinct * from int8_tbl i82) ss
+where q2 = q2;
+
+explain (costs off)
+select distinct q1 from
+ (select distinct * from int8_tbl i81
+ union all
+ select distinct * from int8_tbl i82) ss
+where -q1 = q2;
+
+select distinct q1 from
+ (select distinct * from int8_tbl i81
+ union all
+ select distinct * from int8_tbl i82) ss
+where -q1 = q2;
+
+-- Test proper handling of parameterized appendrel paths when the
+-- potential join qual is expensive
+create function expensivefunc(int) returns int
+language plpgsql immutable strict cost 10000
+as $$begin return $1; end$$;
+
+create temp table t3 as select generate_series(-1000,1000) as x;
+create index t3i on t3 (expensivefunc(x));
+analyze t3;
+
+explain (costs off)
+select * from
+ (select * from t3 a union all select * from t3 b) ss
+ join int4_tbl on f1 = expensivefunc(x);
+select * from
+ (select * from t3 a union all select * from t3 b) ss
+ join int4_tbl on f1 = expensivefunc(x);
+
+drop table t3;
+drop function expensivefunc(int);
+
+-- Test handling of appendrel quals that const-simplify into an AND
+explain (costs off)
+select * from
+ (select *, 0 as x from int8_tbl a
+ union all
+ select *, 1 as x from int8_tbl b) ss
+where (x = 0) or (q1 >= q2 and q1 <= q2);
+select * from
+ (select *, 0 as x from int8_tbl a
+ union all
+ select *, 1 as x from int8_tbl b) ss
+where (x = 0) or (q1 >= q2 and q1 <= q2);
diff --git a/ydb/library/yql/tests/postgresql/original/cases/update.out b/ydb/library/yql/tests/postgresql/original/cases/update.out
new file mode 100644
index 0000000000..c809f88f54
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/update.out
@@ -0,0 +1,1028 @@
+--
+-- UPDATE syntax tests
+--
+CREATE TABLE update_test (
+ a INT DEFAULT 10,
+ b INT,
+ c TEXT
+);
+CREATE TABLE upsert_test (
+ a INT PRIMARY KEY,
+ b TEXT
+);
+INSERT INTO update_test VALUES (5, 10, 'foo');
+INSERT INTO update_test(b, a) VALUES (15, 10);
+SELECT * FROM update_test;
+ a | b | c
+----+----+-----
+ 5 | 10 | foo
+ 10 | 15 |
+(2 rows)
+
+UPDATE update_test SET a = DEFAULT, b = DEFAULT;
+SELECT * FROM update_test;
+ a | b | c
+----+---+-----
+ 10 | | foo
+ 10 | |
+(2 rows)
+
+-- aliases for the UPDATE target table
+UPDATE update_test AS t SET b = 10 WHERE t.a = 10;
+SELECT * FROM update_test;
+ a | b | c
+----+----+-----
+ 10 | 10 | foo
+ 10 | 10 |
+(2 rows)
+
+UPDATE update_test t SET b = t.b + 10 WHERE t.a = 10;
+SELECT * FROM update_test;
+ a | b | c
+----+----+-----
+ 10 | 20 | foo
+ 10 | 20 |
+(2 rows)
+
+--
+-- Test VALUES in FROM
+--
+UPDATE update_test SET a=v.i FROM (VALUES(100, 20)) AS v(i, j)
+ WHERE update_test.b = v.j;
+SELECT * FROM update_test;
+ a | b | c
+-----+----+-----
+ 100 | 20 | foo
+ 100 | 20 |
+(2 rows)
+
+-- fail, wrong data type:
+UPDATE update_test SET a = v.* FROM (VALUES(100, 20)) AS v(i, j)
+ WHERE update_test.b = v.j;
+ERROR: column "a" is of type integer but expression is of type record
+LINE 1: UPDATE update_test SET a = v.* FROM (VALUES(100, 20)) AS v(i...
+ ^
+HINT: You will need to rewrite or cast the expression.
+--
+-- Test multiple-set-clause syntax
+--
+INSERT INTO update_test SELECT a,b+1,c FROM update_test;
+SELECT * FROM update_test;
+ a | b | c
+-----+----+-----
+ 100 | 20 | foo
+ 100 | 20 |
+ 100 | 21 | foo
+ 100 | 21 |
+(4 rows)
+
+UPDATE update_test SET (c,b,a) = ('bugle', b+11, DEFAULT) WHERE c = 'foo';
+SELECT * FROM update_test;
+ a | b | c
+-----+----+-------
+ 100 | 20 |
+ 100 | 21 |
+ 10 | 31 | bugle
+ 10 | 32 | bugle
+(4 rows)
+
+UPDATE update_test SET (c,b) = ('car', a+b), a = a + 1 WHERE a = 10;
+SELECT * FROM update_test;
+ a | b | c
+-----+----+-----
+ 100 | 20 |
+ 100 | 21 |
+ 11 | 41 | car
+ 11 | 42 | car
+(4 rows)
+
+-- fail, multi assignment to same column:
+UPDATE update_test SET (c,b) = ('car', a+b), b = a + 1 WHERE a = 10;
+ERROR: multiple assignments to same column "b"
+-- uncorrelated sub-select:
+UPDATE update_test
+ SET (b,a) = (select a,b from update_test where b = 41 and c = 'car')
+ WHERE a = 100 AND b = 20;
+SELECT * FROM update_test;
+ a | b | c
+-----+----+-----
+ 100 | 21 |
+ 11 | 41 | car
+ 11 | 42 | car
+ 41 | 11 |
+(4 rows)
+
+-- correlated sub-select:
+UPDATE update_test o
+ SET (b,a) = (select a+1,b from update_test i
+ where i.a=o.a and i.b=o.b and i.c is not distinct from o.c);
+SELECT * FROM update_test;
+ a | b | c
+----+-----+-----
+ 21 | 101 |
+ 41 | 12 | car
+ 42 | 12 | car
+ 11 | 42 |
+(4 rows)
+
+-- fail, multiple rows supplied:
+UPDATE update_test SET (b,a) = (select a+1,b from update_test);
+ERROR: more than one row returned by a subquery used as an expression
+-- set to null if no rows supplied:
+UPDATE update_test SET (b,a) = (select a+1,b from update_test where a = 1000)
+ WHERE a = 11;
+SELECT * FROM update_test;
+ a | b | c
+----+-----+-----
+ 21 | 101 |
+ 41 | 12 | car
+ 42 | 12 | car
+ | |
+(4 rows)
+
+-- *-expansion should work in this context:
+UPDATE update_test SET (a,b) = ROW(v.*) FROM (VALUES(21, 100)) AS v(i, j)
+ WHERE update_test.a = v.i;
+-- you might expect this to work, but syntactically it's not a RowExpr:
+UPDATE update_test SET (a,b) = (v.*) FROM (VALUES(21, 101)) AS v(i, j)
+ WHERE update_test.a = v.i;
+ERROR: source for a multiple-column UPDATE item must be a sub-SELECT or ROW() expression
+LINE 1: UPDATE update_test SET (a,b) = (v.*) FROM (VALUES(21, 101)) ...
+ ^
+-- if an alias for the target table is specified, don't allow references
+-- to the original table name
+UPDATE update_test AS t SET b = update_test.b + 10 WHERE t.a = 10;
+ERROR: invalid reference to FROM-clause entry for table "update_test"
+LINE 1: UPDATE update_test AS t SET b = update_test.b + 10 WHERE t.a...
+ ^
+HINT: Perhaps you meant to reference the table alias "t".
+-- Make sure that we can update to a TOASTed value.
+UPDATE update_test SET c = repeat('x', 10000) WHERE c = 'car';
+SELECT a, b, char_length(c) FROM update_test;
+ a | b | char_length
+----+-----+-------------
+ | |
+ 21 | 100 |
+ 41 | 12 | 10000
+ 42 | 12 | 10000
+(4 rows)
+
+-- Check multi-assignment with a Result node to handle a one-time filter.
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE update_test t
+ SET (a, b) = (SELECT b, a FROM update_test s WHERE s.a = t.a)
+ WHERE CURRENT_USER = SESSION_USER;
+ QUERY PLAN
+-------------------------------------------------------------
+ Update on public.update_test t
+ -> Result
+ Output: $1, $2, (SubPlan 1 (returns $1,$2)), t.ctid
+ One-Time Filter: (CURRENT_USER = SESSION_USER)
+ -> Seq Scan on public.update_test t
+ Output: t.a, t.ctid
+ SubPlan 1 (returns $1,$2)
+ -> Seq Scan on public.update_test s
+ Output: s.b, s.a
+ Filter: (s.a = t.a)
+(10 rows)
+
+UPDATE update_test t
+ SET (a, b) = (SELECT b, a FROM update_test s WHERE s.a = t.a)
+ WHERE CURRENT_USER = SESSION_USER;
+SELECT a, b, char_length(c) FROM update_test;
+ a | b | char_length
+-----+----+-------------
+ | |
+ 100 | 21 |
+ 12 | 41 | 10000
+ 12 | 42 | 10000
+(4 rows)
+
+-- Test ON CONFLICT DO UPDATE
+INSERT INTO upsert_test VALUES(1, 'Boo'), (3, 'Zoo');
+-- uncorrelated sub-select:
+WITH aaa AS (SELECT 1 AS a, 'Foo' AS b) INSERT INTO upsert_test
+ VALUES (1, 'Bar') ON CONFLICT(a)
+ DO UPDATE SET (b, a) = (SELECT b, a FROM aaa) RETURNING *;
+ a | b
+---+-----
+ 1 | Foo
+(1 row)
+
+-- correlated sub-select:
+INSERT INTO upsert_test VALUES (1, 'Baz'), (3, 'Zaz') ON CONFLICT(a)
+ DO UPDATE SET (b, a) = (SELECT b || ', Correlated', a from upsert_test i WHERE i.a = upsert_test.a)
+ RETURNING *;
+ a | b
+---+-----------------
+ 1 | Foo, Correlated
+ 3 | Zoo, Correlated
+(2 rows)
+
+-- correlated sub-select (EXCLUDED.* alias):
+INSERT INTO upsert_test VALUES (1, 'Bat'), (3, 'Zot') ON CONFLICT(a)
+ DO UPDATE SET (b, a) = (SELECT b || ', Excluded', a from upsert_test i WHERE i.a = excluded.a)
+ RETURNING *;
+ a | b
+---+---------------------------
+ 1 | Foo, Correlated, Excluded
+ 3 | Zoo, Correlated, Excluded
+(2 rows)
+
+-- ON CONFLICT using system attributes in RETURNING, testing both the
+-- inserting and updating paths. See bug report at:
+-- https://www.postgresql.org/message-id/73436355-6432-49B1-92ED-1FE4F7E7E100%40finefun.com.au
+INSERT INTO upsert_test VALUES (2, 'Beeble') ON CONFLICT(a)
+ DO UPDATE SET (b, a) = (SELECT b || ', Excluded', a from upsert_test i WHERE i.a = excluded.a)
+ RETURNING tableoid::regclass, xmin = pg_current_xact_id()::xid AS xmin_correct, xmax = 0 AS xmax_correct;
+ tableoid | xmin_correct | xmax_correct
+-------------+--------------+--------------
+ upsert_test | t | t
+(1 row)
+
+-- currently xmax is set after a conflict - that's probably not good,
+-- but it seems worthwhile to have to be explicit if that changes.
+INSERT INTO upsert_test VALUES (2, 'Brox') ON CONFLICT(a)
+ DO UPDATE SET (b, a) = (SELECT b || ', Excluded', a from upsert_test i WHERE i.a = excluded.a)
+ RETURNING tableoid::regclass, xmin = pg_current_xact_id()::xid AS xmin_correct, xmax = pg_current_xact_id()::xid AS xmax_correct;
+ tableoid | xmin_correct | xmax_correct
+-------------+--------------+--------------
+ upsert_test | t | t
+(1 row)
+
+DROP TABLE update_test;
+DROP TABLE upsert_test;
+-- Test ON CONFLICT DO UPDATE with partitioned table and non-identical children
+CREATE TABLE upsert_test (
+ a INT PRIMARY KEY,
+ b TEXT
+) PARTITION BY LIST (a);
+CREATE TABLE upsert_test_1 PARTITION OF upsert_test FOR VALUES IN (1);
+CREATE TABLE upsert_test_2 (b TEXT, a INT PRIMARY KEY);
+ALTER TABLE upsert_test ATTACH PARTITION upsert_test_2 FOR VALUES IN (2);
+INSERT INTO upsert_test VALUES(1, 'Boo'), (2, 'Zoo');
+-- uncorrelated sub-select:
+WITH aaa AS (SELECT 1 AS a, 'Foo' AS b) INSERT INTO upsert_test
+ VALUES (1, 'Bar') ON CONFLICT(a)
+ DO UPDATE SET (b, a) = (SELECT b, a FROM aaa) RETURNING *;
+ a | b
+---+-----
+ 1 | Foo
+(1 row)
+
+-- correlated sub-select:
+WITH aaa AS (SELECT 1 AS ctea, ' Foo' AS cteb) INSERT INTO upsert_test
+ VALUES (1, 'Bar'), (2, 'Baz') ON CONFLICT(a)
+ DO UPDATE SET (b, a) = (SELECT upsert_test.b||cteb, upsert_test.a FROM aaa) RETURNING *;
+ a | b
+---+---------
+ 1 | Foo Foo
+ 2 | Zoo Foo
+(2 rows)
+
+DROP TABLE upsert_test;
+---------------------------
+-- UPDATE with row movement
+---------------------------
+-- When a partitioned table receives an UPDATE to the partitioned key and the
+-- new values no longer meet the partition's bound, the row must be moved to
+-- the correct partition for the new partition key (if one exists). We must
+-- also ensure that updatable views on partitioned tables properly enforce any
+-- WITH CHECK OPTION that is defined. The situation with triggers in this case
+-- also requires thorough testing as partition key updates causing row
+-- movement convert UPDATEs into DELETE+INSERT.
+CREATE TABLE range_parted (
+ a text,
+ b bigint,
+ c numeric,
+ d int,
+ e varchar
+) PARTITION BY RANGE (a, b);
+-- Create partitions intentionally in descending bound order, so as to test
+-- that update-row-movement works with the leaf partitions not in bound order.
+CREATE TABLE part_b_20_b_30 (e varchar, c numeric, a text, b bigint, d int);
+ALTER TABLE range_parted ATTACH PARTITION part_b_20_b_30 FOR VALUES FROM ('b', 20) TO ('b', 30);
+CREATE TABLE part_b_10_b_20 (e varchar, c numeric, a text, b bigint, d int) PARTITION BY RANGE (c);
+CREATE TABLE part_b_1_b_10 PARTITION OF range_parted FOR VALUES FROM ('b', 1) TO ('b', 10);
+ALTER TABLE range_parted ATTACH PARTITION part_b_10_b_20 FOR VALUES FROM ('b', 10) TO ('b', 20);
+CREATE TABLE part_a_10_a_20 PARTITION OF range_parted FOR VALUES FROM ('a', 10) TO ('a', 20);
+CREATE TABLE part_a_1_a_10 PARTITION OF range_parted FOR VALUES FROM ('a', 1) TO ('a', 10);
+-- Check that partition-key UPDATE works sanely on a partitioned table that
+-- does not have any child partitions.
+UPDATE part_b_10_b_20 set b = b - 6;
+-- Create some more partitions following the above pattern of descending bound
+-- order, but let's make the situation a bit more complex by having the
+-- attribute numbers of the columns vary from their parent partition.
+CREATE TABLE part_c_100_200 (e varchar, c numeric, a text, b bigint, d int) PARTITION BY range (abs(d));
+ALTER TABLE part_c_100_200 DROP COLUMN e, DROP COLUMN c, DROP COLUMN a;
+ALTER TABLE part_c_100_200 ADD COLUMN c numeric, ADD COLUMN e varchar, ADD COLUMN a text;
+ALTER TABLE part_c_100_200 DROP COLUMN b;
+ALTER TABLE part_c_100_200 ADD COLUMN b bigint;
+CREATE TABLE part_d_1_15 PARTITION OF part_c_100_200 FOR VALUES FROM (1) TO (15);
+CREATE TABLE part_d_15_20 PARTITION OF part_c_100_200 FOR VALUES FROM (15) TO (20);
+ALTER TABLE part_b_10_b_20 ATTACH PARTITION part_c_100_200 FOR VALUES FROM (100) TO (200);
+CREATE TABLE part_c_1_100 (e varchar, d int, c numeric, b bigint, a text);
+ALTER TABLE part_b_10_b_20 ATTACH PARTITION part_c_1_100 FOR VALUES FROM (1) TO (100);
+\set init_range_parted 'truncate range_parted; insert into range_parted VALUES (''a'', 1, 1, 1), (''a'', 10, 200, 1), (''b'', 12, 96, 1), (''b'', 13, 97, 2), (''b'', 15, 105, 16), (''b'', 17, 105, 19)'
+\set show_data 'select tableoid::regclass::text COLLATE "C" partname, * from range_parted ORDER BY 1, 2, 3, 4, 5, 6'
+:init_range_parted;
+:show_data;
+ partname | a | b | c | d | e
+----------------+---+----+-----+----+---
+ part_a_10_a_20 | a | 10 | 200 | 1 |
+ part_a_1_a_10 | a | 1 | 1 | 1 |
+ part_c_1_100 | b | 12 | 96 | 1 |
+ part_c_1_100 | b | 13 | 97 | 2 |
+ part_d_15_20 | b | 15 | 105 | 16 |
+ part_d_15_20 | b | 17 | 105 | 19 |
+(6 rows)
+
+-- The order of subplans should be in bound order
+EXPLAIN (costs off) UPDATE range_parted set c = c - 50 WHERE c > 97;
+ QUERY PLAN
+-------------------------------------------------------
+ Update on range_parted
+ Update on part_a_1_a_10 range_parted_1
+ Update on part_a_10_a_20 range_parted_2
+ Update on part_b_1_b_10 range_parted_3
+ Update on part_c_1_100 range_parted_4
+ Update on part_d_1_15 range_parted_5
+ Update on part_d_15_20 range_parted_6
+ Update on part_b_20_b_30 range_parted_7
+ -> Append
+ -> Seq Scan on part_a_1_a_10 range_parted_1
+ Filter: (c > '97'::numeric)
+ -> Seq Scan on part_a_10_a_20 range_parted_2
+ Filter: (c > '97'::numeric)
+ -> Seq Scan on part_b_1_b_10 range_parted_3
+ Filter: (c > '97'::numeric)
+ -> Seq Scan on part_c_1_100 range_parted_4
+ Filter: (c > '97'::numeric)
+ -> Seq Scan on part_d_1_15 range_parted_5
+ Filter: (c > '97'::numeric)
+ -> Seq Scan on part_d_15_20 range_parted_6
+ Filter: (c > '97'::numeric)
+ -> Seq Scan on part_b_20_b_30 range_parted_7
+ Filter: (c > '97'::numeric)
+(23 rows)
+
+-- fail, row movement happens only within the partition subtree.
+UPDATE part_c_100_200 set c = c - 20, d = c WHERE c = 105;
+ERROR: new row for relation "part_c_100_200" violates partition constraint
+DETAIL: Failing row contains (105, 85, null, b, 15).
+-- fail, no partition key update, so no attempt to move tuple,
+-- but "a = 'a'" violates partition constraint enforced by root partition)
+UPDATE part_b_10_b_20 set a = 'a';
+ERROR: new row for relation "part_b_10_b_20" violates partition constraint
+DETAIL: Failing row contains (null, 96, a, 12, 1).
+-- ok, partition key update, no constraint violation
+UPDATE range_parted set d = d - 10 WHERE d > 10;
+-- ok, no partition key update, no constraint violation
+UPDATE range_parted set e = d;
+-- No row found
+UPDATE part_c_1_100 set c = c + 20 WHERE c = 98;
+-- ok, row movement
+UPDATE part_b_10_b_20 set c = c + 20 returning c, b, a;
+ c | b | a
+-----+----+---
+ 116 | 12 | b
+ 117 | 13 | b
+ 125 | 15 | b
+ 125 | 17 | b
+(4 rows)
+
+:show_data;
+ partname | a | b | c | d | e
+----------------+---+----+-----+---+---
+ part_a_10_a_20 | a | 10 | 200 | 1 | 1
+ part_a_1_a_10 | a | 1 | 1 | 1 | 1
+ part_d_1_15 | b | 12 | 116 | 1 | 1
+ part_d_1_15 | b | 13 | 117 | 2 | 2
+ part_d_1_15 | b | 15 | 125 | 6 | 6
+ part_d_1_15 | b | 17 | 125 | 9 | 9
+(6 rows)
+
+-- fail, row movement happens only within the partition subtree.
+UPDATE part_b_10_b_20 set b = b - 6 WHERE c > 116 returning *;
+ERROR: new row for relation "part_b_10_b_20" violates partition constraint
+DETAIL: Failing row contains (2, 117, b, 7, 2).
+-- ok, row movement, with subset of rows moved into different partition.
+UPDATE range_parted set b = b - 6 WHERE c > 116 returning a, b + c;
+ a | ?column?
+---+----------
+ a | 204
+ b | 124
+ b | 134
+ b | 136
+(4 rows)
+
+:show_data;
+ partname | a | b | c | d | e
+---------------+---+----+-----+---+---
+ part_a_1_a_10 | a | 1 | 1 | 1 | 1
+ part_a_1_a_10 | a | 4 | 200 | 1 | 1
+ part_b_1_b_10 | b | 7 | 117 | 2 | 2
+ part_b_1_b_10 | b | 9 | 125 | 6 | 6
+ part_d_1_15 | b | 11 | 125 | 9 | 9
+ part_d_1_15 | b | 12 | 116 | 1 | 1
+(6 rows)
+
+-- Common table needed for multiple test scenarios.
+CREATE TABLE mintab(c1 int);
+INSERT into mintab VALUES (120);
+-- update partition key using updatable view.
+CREATE VIEW upview AS SELECT * FROM range_parted WHERE (select c > c1 FROM mintab) WITH CHECK OPTION;
+-- ok
+UPDATE upview set c = 199 WHERE b = 4;
+-- fail, check option violation
+UPDATE upview set c = 120 WHERE b = 4;
+ERROR: new row violates check option for view "upview"
+DETAIL: Failing row contains (a, 4, 120, 1, 1).
+-- fail, row movement with check option violation
+UPDATE upview set a = 'b', b = 15, c = 120 WHERE b = 4;
+ERROR: new row violates check option for view "upview"
+DETAIL: Failing row contains (b, 15, 120, 1, 1).
+-- ok, row movement, check option passes
+UPDATE upview set a = 'b', b = 15 WHERE b = 4;
+:show_data;
+ partname | a | b | c | d | e
+---------------+---+----+-----+---+---
+ part_a_1_a_10 | a | 1 | 1 | 1 | 1
+ part_b_1_b_10 | b | 7 | 117 | 2 | 2
+ part_b_1_b_10 | b | 9 | 125 | 6 | 6
+ part_d_1_15 | b | 11 | 125 | 9 | 9
+ part_d_1_15 | b | 12 | 116 | 1 | 1
+ part_d_1_15 | b | 15 | 199 | 1 | 1
+(6 rows)
+
+-- cleanup
+DROP VIEW upview;
+-- RETURNING having whole-row vars.
+:init_range_parted;
+UPDATE range_parted set c = 95 WHERE a = 'b' and b > 10 and c > 100 returning (range_parted), *;
+ range_parted | a | b | c | d | e
+---------------+---+----+----+----+---
+ (b,15,95,16,) | b | 15 | 95 | 16 |
+ (b,17,95,19,) | b | 17 | 95 | 19 |
+(2 rows)
+
+:show_data;
+ partname | a | b | c | d | e
+----------------+---+----+-----+----+---
+ part_a_10_a_20 | a | 10 | 200 | 1 |
+ part_a_1_a_10 | a | 1 | 1 | 1 |
+ part_c_1_100 | b | 12 | 96 | 1 |
+ part_c_1_100 | b | 13 | 97 | 2 |
+ part_c_1_100 | b | 15 | 95 | 16 |
+ part_c_1_100 | b | 17 | 95 | 19 |
+(6 rows)
+
+-- Transition tables with update row movement
+:init_range_parted;
+CREATE FUNCTION trans_updatetrigfunc() RETURNS trigger LANGUAGE plpgsql AS
+$$
+ begin
+ raise notice 'trigger = %, old table = %, new table = %',
+ TG_NAME,
+ (select string_agg(old_table::text, ', ' ORDER BY a) FROM old_table),
+ (select string_agg(new_table::text, ', ' ORDER BY a) FROM new_table);
+ return null;
+ end;
+$$;
+CREATE TRIGGER trans_updatetrig
+ AFTER UPDATE ON range_parted REFERENCING OLD TABLE AS old_table NEW TABLE AS new_table
+ FOR EACH STATEMENT EXECUTE PROCEDURE trans_updatetrigfunc();
+UPDATE range_parted set c = (case when c = 96 then 110 else c + 1 end ) WHERE a = 'b' and b > 10 and c >= 96;
+NOTICE: trigger = trans_updatetrig, old table = (b,12,96,1,), (b,13,97,2,), (b,15,105,16,), (b,17,105,19,), new table = (b,12,110,1,), (b,13,98,2,), (b,15,106,16,), (b,17,106,19,)
+:show_data;
+ partname | a | b | c | d | e
+----------------+---+----+-----+----+---
+ part_a_10_a_20 | a | 10 | 200 | 1 |
+ part_a_1_a_10 | a | 1 | 1 | 1 |
+ part_c_1_100 | b | 13 | 98 | 2 |
+ part_d_15_20 | b | 15 | 106 | 16 |
+ part_d_15_20 | b | 17 | 106 | 19 |
+ part_d_1_15 | b | 12 | 110 | 1 |
+(6 rows)
+
+:init_range_parted;
+-- Enabling OLD TABLE capture for both DELETE as well as UPDATE stmt triggers
+-- should not cause DELETEd rows to be captured twice. Similar thing for
+-- INSERT triggers and inserted rows.
+CREATE TRIGGER trans_deletetrig
+ AFTER DELETE ON range_parted REFERENCING OLD TABLE AS old_table
+ FOR EACH STATEMENT EXECUTE PROCEDURE trans_updatetrigfunc();
+CREATE TRIGGER trans_inserttrig
+ AFTER INSERT ON range_parted REFERENCING NEW TABLE AS new_table
+ FOR EACH STATEMENT EXECUTE PROCEDURE trans_updatetrigfunc();
+UPDATE range_parted set c = c + 50 WHERE a = 'b' and b > 10 and c >= 96;
+NOTICE: trigger = trans_updatetrig, old table = (b,12,96,1,), (b,13,97,2,), (b,15,105,16,), (b,17,105,19,), new table = (b,12,146,1,), (b,13,147,2,), (b,15,155,16,), (b,17,155,19,)
+:show_data;
+ partname | a | b | c | d | e
+----------------+---+----+-----+----+---
+ part_a_10_a_20 | a | 10 | 200 | 1 |
+ part_a_1_a_10 | a | 1 | 1 | 1 |
+ part_d_15_20 | b | 15 | 155 | 16 |
+ part_d_15_20 | b | 17 | 155 | 19 |
+ part_d_1_15 | b | 12 | 146 | 1 |
+ part_d_1_15 | b | 13 | 147 | 2 |
+(6 rows)
+
+DROP TRIGGER trans_deletetrig ON range_parted;
+DROP TRIGGER trans_inserttrig ON range_parted;
+-- Don't drop trans_updatetrig yet. It is required below.
+-- Test with transition tuple conversion happening for rows moved into the
+-- new partition. This requires a trigger that references transition table
+-- (we already have trans_updatetrig). For inserted rows, the conversion
+-- is not usually needed, because the original tuple is already compatible with
+-- the desired transition tuple format. But conversion happens when there is a
+-- BR trigger because the trigger can change the inserted row. So install a
+-- BR triggers on those child partitions where the rows will be moved.
+CREATE FUNCTION func_parted_mod_b() RETURNS trigger AS $$
+BEGIN
+ NEW.b = NEW.b + 1;
+ return NEW;
+END $$ language plpgsql;
+CREATE TRIGGER trig_c1_100 BEFORE UPDATE OR INSERT ON part_c_1_100
+ FOR EACH ROW EXECUTE PROCEDURE func_parted_mod_b();
+CREATE TRIGGER trig_d1_15 BEFORE UPDATE OR INSERT ON part_d_1_15
+ FOR EACH ROW EXECUTE PROCEDURE func_parted_mod_b();
+CREATE TRIGGER trig_d15_20 BEFORE UPDATE OR INSERT ON part_d_15_20
+ FOR EACH ROW EXECUTE PROCEDURE func_parted_mod_b();
+:init_range_parted;
+UPDATE range_parted set c = (case when c = 96 then 110 else c + 1 end) WHERE a = 'b' and b > 10 and c >= 96;
+NOTICE: trigger = trans_updatetrig, old table = (b,13,96,1,), (b,14,97,2,), (b,16,105,16,), (b,18,105,19,), new table = (b,15,110,1,), (b,15,98,2,), (b,17,106,16,), (b,19,106,19,)
+:show_data;
+ partname | a | b | c | d | e
+----------------+---+----+-----+----+---
+ part_a_10_a_20 | a | 10 | 200 | 1 |
+ part_a_1_a_10 | a | 1 | 1 | 1 |
+ part_c_1_100 | b | 15 | 98 | 2 |
+ part_d_15_20 | b | 17 | 106 | 16 |
+ part_d_15_20 | b | 19 | 106 | 19 |
+ part_d_1_15 | b | 15 | 110 | 1 |
+(6 rows)
+
+:init_range_parted;
+UPDATE range_parted set c = c + 50 WHERE a = 'b' and b > 10 and c >= 96;
+NOTICE: trigger = trans_updatetrig, old table = (b,13,96,1,), (b,14,97,2,), (b,16,105,16,), (b,18,105,19,), new table = (b,15,146,1,), (b,16,147,2,), (b,17,155,16,), (b,19,155,19,)
+:show_data;
+ partname | a | b | c | d | e
+----------------+---+----+-----+----+---
+ part_a_10_a_20 | a | 10 | 200 | 1 |
+ part_a_1_a_10 | a | 1 | 1 | 1 |
+ part_d_15_20 | b | 17 | 155 | 16 |
+ part_d_15_20 | b | 19 | 155 | 19 |
+ part_d_1_15 | b | 15 | 146 | 1 |
+ part_d_1_15 | b | 16 | 147 | 2 |
+(6 rows)
+
+-- Case where per-partition tuple conversion map array is allocated, but the
+-- map is not required for the particular tuple that is routed, thanks to
+-- matching table attributes of the partition and the target table.
+:init_range_parted;
+UPDATE range_parted set b = 15 WHERE b = 1;
+NOTICE: trigger = trans_updatetrig, old table = (a,1,1,1,), new table = (a,15,1,1,)
+:show_data;
+ partname | a | b | c | d | e
+----------------+---+----+-----+----+---
+ part_a_10_a_20 | a | 10 | 200 | 1 |
+ part_a_10_a_20 | a | 15 | 1 | 1 |
+ part_c_1_100 | b | 13 | 96 | 1 |
+ part_c_1_100 | b | 14 | 97 | 2 |
+ part_d_15_20 | b | 16 | 105 | 16 |
+ part_d_15_20 | b | 18 | 105 | 19 |
+(6 rows)
+
+DROP TRIGGER trans_updatetrig ON range_parted;
+DROP TRIGGER trig_c1_100 ON part_c_1_100;
+DROP TRIGGER trig_d1_15 ON part_d_1_15;
+DROP TRIGGER trig_d15_20 ON part_d_15_20;
+DROP FUNCTION func_parted_mod_b();
+-- RLS policies with update-row-movement
+-----------------------------------------
+ALTER TABLE range_parted ENABLE ROW LEVEL SECURITY;
+CREATE USER regress_range_parted_user;
+GRANT ALL ON range_parted, mintab TO regress_range_parted_user;
+CREATE POLICY seeall ON range_parted AS PERMISSIVE FOR SELECT USING (true);
+CREATE POLICY policy_range_parted ON range_parted for UPDATE USING (true) WITH CHECK (c % 2 = 0);
+:init_range_parted;
+SET SESSION AUTHORIZATION regress_range_parted_user;
+-- This should fail with RLS violation error while moving row from
+-- part_a_10_a_20 to part_d_1_15, because we are setting 'c' to an odd number.
+UPDATE range_parted set a = 'b', c = 151 WHERE a = 'a' and c = 200;
+ERROR: new row violates row-level security policy for table "range_parted"
+RESET SESSION AUTHORIZATION;
+-- Create a trigger on part_d_1_15
+CREATE FUNCTION func_d_1_15() RETURNS trigger AS $$
+BEGIN
+ NEW.c = NEW.c + 1; -- Make even numbers odd, or vice versa
+ return NEW;
+END $$ LANGUAGE plpgsql;
+CREATE TRIGGER trig_d_1_15 BEFORE INSERT ON part_d_1_15
+ FOR EACH ROW EXECUTE PROCEDURE func_d_1_15();
+:init_range_parted;
+SET SESSION AUTHORIZATION regress_range_parted_user;
+-- Here, RLS checks should succeed while moving row from part_a_10_a_20 to
+-- part_d_1_15. Even though the UPDATE is setting 'c' to an odd number, the
+-- trigger at the destination partition again makes it an even number.
+UPDATE range_parted set a = 'b', c = 151 WHERE a = 'a' and c = 200;
+RESET SESSION AUTHORIZATION;
+:init_range_parted;
+SET SESSION AUTHORIZATION regress_range_parted_user;
+-- This should fail with RLS violation error. Even though the UPDATE is setting
+-- 'c' to an even number, the trigger at the destination partition again makes
+-- it an odd number.
+UPDATE range_parted set a = 'b', c = 150 WHERE a = 'a' and c = 200;
+ERROR: new row violates row-level security policy for table "range_parted"
+-- Cleanup
+RESET SESSION AUTHORIZATION;
+DROP TRIGGER trig_d_1_15 ON part_d_1_15;
+DROP FUNCTION func_d_1_15();
+-- Policy expression contains SubPlan
+RESET SESSION AUTHORIZATION;
+:init_range_parted;
+CREATE POLICY policy_range_parted_subplan on range_parted
+ AS RESTRICTIVE for UPDATE USING (true)
+ WITH CHECK ((SELECT range_parted.c <= c1 FROM mintab));
+SET SESSION AUTHORIZATION regress_range_parted_user;
+-- fail, mintab has row with c1 = 120
+UPDATE range_parted set a = 'b', c = 122 WHERE a = 'a' and c = 200;
+ERROR: new row violates row-level security policy "policy_range_parted_subplan" for table "range_parted"
+-- ok
+UPDATE range_parted set a = 'b', c = 120 WHERE a = 'a' and c = 200;
+-- RLS policy expression contains whole row.
+RESET SESSION AUTHORIZATION;
+:init_range_parted;
+CREATE POLICY policy_range_parted_wholerow on range_parted AS RESTRICTIVE for UPDATE USING (true)
+ WITH CHECK (range_parted = row('b', 10, 112, 1, NULL)::range_parted);
+SET SESSION AUTHORIZATION regress_range_parted_user;
+-- ok, should pass the RLS check
+UPDATE range_parted set a = 'b', c = 112 WHERE a = 'a' and c = 200;
+RESET SESSION AUTHORIZATION;
+:init_range_parted;
+SET SESSION AUTHORIZATION regress_range_parted_user;
+-- fail, the whole row RLS check should fail
+UPDATE range_parted set a = 'b', c = 116 WHERE a = 'a' and c = 200;
+ERROR: new row violates row-level security policy "policy_range_parted_wholerow" for table "range_parted"
+-- Cleanup
+RESET SESSION AUTHORIZATION;
+DROP POLICY policy_range_parted ON range_parted;
+DROP POLICY policy_range_parted_subplan ON range_parted;
+DROP POLICY policy_range_parted_wholerow ON range_parted;
+REVOKE ALL ON range_parted, mintab FROM regress_range_parted_user;
+DROP USER regress_range_parted_user;
+DROP TABLE mintab;
+-- statement triggers with update row movement
+---------------------------------------------------
+:init_range_parted;
+CREATE FUNCTION trigfunc() returns trigger language plpgsql as
+$$
+ begin
+ raise notice 'trigger = % fired on table % during %',
+ TG_NAME, TG_TABLE_NAME, TG_OP;
+ return null;
+ end;
+$$;
+-- Triggers on root partition
+CREATE TRIGGER parent_delete_trig
+ AFTER DELETE ON range_parted for each statement execute procedure trigfunc();
+CREATE TRIGGER parent_update_trig
+ AFTER UPDATE ON range_parted for each statement execute procedure trigfunc();
+CREATE TRIGGER parent_insert_trig
+ AFTER INSERT ON range_parted for each statement execute procedure trigfunc();
+-- Triggers on leaf partition part_c_1_100
+CREATE TRIGGER c1_delete_trig
+ AFTER DELETE ON part_c_1_100 for each statement execute procedure trigfunc();
+CREATE TRIGGER c1_update_trig
+ AFTER UPDATE ON part_c_1_100 for each statement execute procedure trigfunc();
+CREATE TRIGGER c1_insert_trig
+ AFTER INSERT ON part_c_1_100 for each statement execute procedure trigfunc();
+-- Triggers on leaf partition part_d_1_15
+CREATE TRIGGER d1_delete_trig
+ AFTER DELETE ON part_d_1_15 for each statement execute procedure trigfunc();
+CREATE TRIGGER d1_update_trig
+ AFTER UPDATE ON part_d_1_15 for each statement execute procedure trigfunc();
+CREATE TRIGGER d1_insert_trig
+ AFTER INSERT ON part_d_1_15 for each statement execute procedure trigfunc();
+-- Triggers on leaf partition part_d_15_20
+CREATE TRIGGER d15_delete_trig
+ AFTER DELETE ON part_d_15_20 for each statement execute procedure trigfunc();
+CREATE TRIGGER d15_update_trig
+ AFTER UPDATE ON part_d_15_20 for each statement execute procedure trigfunc();
+CREATE TRIGGER d15_insert_trig
+ AFTER INSERT ON part_d_15_20 for each statement execute procedure trigfunc();
+-- Move all rows from part_c_100_200 to part_c_1_100. None of the delete or
+-- insert statement triggers should be fired.
+UPDATE range_parted set c = c - 50 WHERE c > 97;
+NOTICE: trigger = parent_update_trig fired on table range_parted during UPDATE
+:show_data;
+ partname | a | b | c | d | e
+----------------+---+----+-----+----+---
+ part_a_10_a_20 | a | 10 | 150 | 1 |
+ part_a_1_a_10 | a | 1 | 1 | 1 |
+ part_c_1_100 | b | 12 | 96 | 1 |
+ part_c_1_100 | b | 13 | 97 | 2 |
+ part_c_1_100 | b | 15 | 55 | 16 |
+ part_c_1_100 | b | 17 | 55 | 19 |
+(6 rows)
+
+DROP TRIGGER parent_delete_trig ON range_parted;
+DROP TRIGGER parent_update_trig ON range_parted;
+DROP TRIGGER parent_insert_trig ON range_parted;
+DROP TRIGGER c1_delete_trig ON part_c_1_100;
+DROP TRIGGER c1_update_trig ON part_c_1_100;
+DROP TRIGGER c1_insert_trig ON part_c_1_100;
+DROP TRIGGER d1_delete_trig ON part_d_1_15;
+DROP TRIGGER d1_update_trig ON part_d_1_15;
+DROP TRIGGER d1_insert_trig ON part_d_1_15;
+DROP TRIGGER d15_delete_trig ON part_d_15_20;
+DROP TRIGGER d15_update_trig ON part_d_15_20;
+DROP TRIGGER d15_insert_trig ON part_d_15_20;
+-- Creating default partition for range
+:init_range_parted;
+create table part_def partition of range_parted default;
+\d+ part_def
+ Table "public.part_def"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+-------------------+-----------+----------+---------+----------+--------------+-------------
+ a | text | | | | extended | |
+ b | bigint | | | | plain | |
+ c | numeric | | | | main | |
+ d | integer | | | | plain | |
+ e | character varying | | | | extended | |
+Partition of: range_parted DEFAULT
+Partition constraint: (NOT ((a IS NOT NULL) AND (b IS NOT NULL) AND (((a = 'a'::text) AND (b >= '1'::bigint) AND (b < '10'::bigint)) OR ((a = 'a'::text) AND (b >= '10'::bigint) AND (b < '20'::bigint)) OR ((a = 'b'::text) AND (b >= '1'::bigint) AND (b < '10'::bigint)) OR ((a = 'b'::text) AND (b >= '10'::bigint) AND (b < '20'::bigint)) OR ((a = 'b'::text) AND (b >= '20'::bigint) AND (b < '30'::bigint)))))
+
+insert into range_parted values ('c', 9);
+-- ok
+update part_def set a = 'd' where a = 'c';
+-- fail
+update part_def set a = 'a' where a = 'd';
+ERROR: new row for relation "part_def" violates partition constraint
+DETAIL: Failing row contains (a, 9, null, null, null).
+:show_data;
+ partname | a | b | c | d | e
+----------------+---+----+-----+----+---
+ part_a_10_a_20 | a | 10 | 200 | 1 |
+ part_a_1_a_10 | a | 1 | 1 | 1 |
+ part_c_1_100 | b | 12 | 96 | 1 |
+ part_c_1_100 | b | 13 | 97 | 2 |
+ part_d_15_20 | b | 15 | 105 | 16 |
+ part_d_15_20 | b | 17 | 105 | 19 |
+ part_def | d | 9 | | |
+(7 rows)
+
+-- Update row movement from non-default to default partition.
+-- fail, default partition is not under part_a_10_a_20;
+UPDATE part_a_10_a_20 set a = 'ad' WHERE a = 'a';
+ERROR: new row for relation "part_a_10_a_20" violates partition constraint
+DETAIL: Failing row contains (ad, 10, 200, 1, null).
+-- ok
+UPDATE range_parted set a = 'ad' WHERE a = 'a';
+UPDATE range_parted set a = 'bd' WHERE a = 'b';
+:show_data;
+ partname | a | b | c | d | e
+----------+----+----+-----+----+---
+ part_def | ad | 1 | 1 | 1 |
+ part_def | ad | 10 | 200 | 1 |
+ part_def | bd | 12 | 96 | 1 |
+ part_def | bd | 13 | 97 | 2 |
+ part_def | bd | 15 | 105 | 16 |
+ part_def | bd | 17 | 105 | 19 |
+ part_def | d | 9 | | |
+(7 rows)
+
+-- Update row movement from default to non-default partitions.
+-- ok
+UPDATE range_parted set a = 'a' WHERE a = 'ad';
+UPDATE range_parted set a = 'b' WHERE a = 'bd';
+:show_data;
+ partname | a | b | c | d | e
+----------------+---+----+-----+----+---
+ part_a_10_a_20 | a | 10 | 200 | 1 |
+ part_a_1_a_10 | a | 1 | 1 | 1 |
+ part_c_1_100 | b | 12 | 96 | 1 |
+ part_c_1_100 | b | 13 | 97 | 2 |
+ part_d_15_20 | b | 15 | 105 | 16 |
+ part_d_15_20 | b | 17 | 105 | 19 |
+ part_def | d | 9 | | |
+(7 rows)
+
+-- Cleanup: range_parted no longer needed.
+DROP TABLE range_parted;
+CREATE TABLE list_parted (
+ a text,
+ b int
+) PARTITION BY list (a);
+CREATE TABLE list_part1 PARTITION OF list_parted for VALUES in ('a', 'b');
+CREATE TABLE list_default PARTITION OF list_parted default;
+INSERT into list_part1 VALUES ('a', 1);
+INSERT into list_default VALUES ('d', 10);
+-- fail
+UPDATE list_default set a = 'a' WHERE a = 'd';
+ERROR: new row for relation "list_default" violates partition constraint
+DETAIL: Failing row contains (a, 10).
+-- ok
+UPDATE list_default set a = 'x' WHERE a = 'd';
+DROP TABLE list_parted;
+-- Test retrieval of system columns with non-consistent partition row types.
+-- This is only partially supported, as seen in the results.
+create table utrtest (a int, b text) partition by list (a);
+create table utr1 (a int check (a in (1)), q text, b text);
+create table utr2 (a int check (a in (2)), b text);
+alter table utr1 drop column q;
+alter table utrtest attach partition utr1 for values in (1);
+alter table utrtest attach partition utr2 for values in (2);
+insert into utrtest values (1, 'foo')
+ returning *, tableoid::regclass, xmin = pg_current_xact_id()::xid as xmin_ok;
+ a | b | tableoid | xmin_ok
+---+-----+----------+---------
+ 1 | foo | utr1 | t
+(1 row)
+
+insert into utrtest values (2, 'bar')
+ returning *, tableoid::regclass, xmin = pg_current_xact_id()::xid as xmin_ok; -- fails
+ERROR: cannot retrieve a system column in this context
+insert into utrtest values (2, 'bar')
+ returning *, tableoid::regclass;
+ a | b | tableoid
+---+-----+----------
+ 2 | bar | utr2
+(1 row)
+
+update utrtest set b = b || b from (values (1), (2)) s(x) where a = s.x
+ returning *, tableoid::regclass, xmin = pg_current_xact_id()::xid as xmin_ok;
+ a | b | x | tableoid | xmin_ok
+---+--------+---+----------+---------
+ 1 | foofoo | 1 | utr1 | t
+ 2 | barbar | 2 | utr2 | t
+(2 rows)
+
+update utrtest set a = 3 - a from (values (1), (2)) s(x) where a = s.x
+ returning *, tableoid::regclass, xmin = pg_current_xact_id()::xid as xmin_ok; -- fails
+ERROR: cannot retrieve a system column in this context
+update utrtest set a = 3 - a from (values (1), (2)) s(x) where a = s.x
+ returning *, tableoid::regclass;
+ a | b | x | tableoid
+---+--------+---+----------
+ 2 | foofoo | 1 | utr2
+ 1 | barbar | 2 | utr1
+(2 rows)
+
+delete from utrtest
+ returning *, tableoid::regclass, xmax = pg_current_xact_id()::xid as xmax_ok;
+ a | b | tableoid | xmax_ok
+---+--------+----------+---------
+ 1 | barbar | utr1 | t
+ 2 | foofoo | utr2 | t
+(2 rows)
+
+drop table utrtest;
+--------------
+-- Some more update-partition-key test scenarios below. This time use list
+-- partitions.
+--------------
+-- Setup for list partitions
+CREATE TABLE list_parted (a numeric, b int, c int8) PARTITION BY list (a);
+CREATE TABLE sub_parted PARTITION OF list_parted for VALUES in (1) PARTITION BY list (b);
+CREATE TABLE sub_part1(b int, c int8, a numeric);
+ALTER TABLE sub_parted ATTACH PARTITION sub_part1 for VALUES in (1);
+CREATE TABLE sub_part2(b int, c int8, a numeric);
+ALTER TABLE sub_parted ATTACH PARTITION sub_part2 for VALUES in (2);
+CREATE TABLE list_part1(a numeric, b int, c int8);
+ALTER TABLE list_parted ATTACH PARTITION list_part1 for VALUES in (2,3);
+INSERT into list_parted VALUES (2,5,50);
+INSERT into list_parted VALUES (3,6,60);
+INSERT into sub_parted VALUES (1,1,60);
+INSERT into sub_parted VALUES (1,2,10);
+-- Test partition constraint violation when intermediate ancestor is used and
+-- constraint is inherited from upper root.
+UPDATE sub_parted set a = 2 WHERE c = 10;
+ERROR: new row for relation "sub_parted" violates partition constraint
+DETAIL: Failing row contains (2, 2, 10).
+-- Test update-partition-key, where the unpruned partitions do not have their
+-- partition keys updated.
+SELECT tableoid::regclass::text, * FROM list_parted WHERE a = 2 ORDER BY 1;
+ tableoid | a | b | c
+------------+---+---+----
+ list_part1 | 2 | 5 | 50
+(1 row)
+
+UPDATE list_parted set b = c + a WHERE a = 2;
+SELECT tableoid::regclass::text, * FROM list_parted WHERE a = 2 ORDER BY 1;
+ tableoid | a | b | c
+------------+---+----+----
+ list_part1 | 2 | 52 | 50
+(1 row)
+
+-- Test the case where BR UPDATE triggers change the partition key.
+CREATE FUNCTION func_parted_mod_b() returns trigger as $$
+BEGIN
+ NEW.b = 2; -- This is changing partition key column.
+ return NEW;
+END $$ LANGUAGE plpgsql;
+CREATE TRIGGER parted_mod_b before update on sub_part1
+ for each row execute procedure func_parted_mod_b();
+SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4;
+ tableoid | a | b | c
+------------+---+----+----
+ list_part1 | 2 | 52 | 50
+ list_part1 | 3 | 6 | 60
+ sub_part1 | 1 | 1 | 60
+ sub_part2 | 1 | 2 | 10
+(4 rows)
+
+-- This should do the tuple routing even though there is no explicit
+-- partition-key update, because there is a trigger on sub_part1.
+UPDATE list_parted set c = 70 WHERE b = 1;
+SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4;
+ tableoid | a | b | c
+------------+---+----+----
+ list_part1 | 2 | 52 | 50
+ list_part1 | 3 | 6 | 60
+ sub_part2 | 1 | 2 | 10
+ sub_part2 | 1 | 2 | 70
+(4 rows)
+
+DROP TRIGGER parted_mod_b ON sub_part1;
+-- If BR DELETE trigger prevented DELETE from happening, we should also skip
+-- the INSERT if that delete is part of UPDATE=>DELETE+INSERT.
+CREATE OR REPLACE FUNCTION func_parted_mod_b() returns trigger as $$
+BEGIN
+ raise notice 'Trigger: Got OLD row %, but returning NULL', OLD;
+ return NULL;
+END $$ LANGUAGE plpgsql;
+CREATE TRIGGER trig_skip_delete before delete on sub_part2
+ for each row execute procedure func_parted_mod_b();
+UPDATE list_parted set b = 1 WHERE c = 70;
+NOTICE: Trigger: Got OLD row (2,70,1), but returning NULL
+SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4;
+ tableoid | a | b | c
+------------+---+----+----
+ list_part1 | 2 | 52 | 50
+ list_part1 | 3 | 6 | 60
+ sub_part2 | 1 | 2 | 10
+ sub_part2 | 1 | 2 | 70
+(4 rows)
+
+-- Drop the trigger. Now the row should be moved.
+DROP TRIGGER trig_skip_delete ON sub_part2;
+UPDATE list_parted set b = 1 WHERE c = 70;
+SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4;
+ tableoid | a | b | c
+------------+---+----+----
+ list_part1 | 2 | 52 | 50
+ list_part1 | 3 | 6 | 60
+ sub_part1 | 1 | 1 | 70
+ sub_part2 | 1 | 2 | 10
+(4 rows)
+
+DROP FUNCTION func_parted_mod_b();
+-- UPDATE partition-key with FROM clause. If join produces multiple output
+-- rows for the same row to be modified, we should tuple-route the row only
+-- once. There should not be any rows inserted.
+CREATE TABLE non_parted (id int);
+INSERT into non_parted VALUES (1), (1), (1), (2), (2), (2), (3), (3), (3);
+UPDATE list_parted t1 set a = 2 FROM non_parted t2 WHERE t1.a = t2.id and a = 1;
+SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4;
+ tableoid | a | b | c
+------------+---+----+----
+ list_part1 | 2 | 1 | 70
+ list_part1 | 2 | 2 | 10
+ list_part1 | 2 | 52 | 50
+ list_part1 | 3 | 6 | 60
+(4 rows)
+
+DROP TABLE non_parted;
+-- Cleanup: list_parted no longer needed.
+DROP TABLE list_parted;
+-- create custom operator class and hash function, for the same reason
+-- explained in alter_table.sql
+create or replace function dummy_hashint4(a int4, seed int8) returns int8 as
+$$ begin return (a + seed); end; $$ language 'plpgsql' immutable;
+create operator class custom_opclass for type int4 using hash as
+operator 1 = , function 2 dummy_hashint4(int4, int8);
+create table hash_parted (
+ a int,
+ b int
+) partition by hash (a custom_opclass, b custom_opclass);
+create table hpart1 partition of hash_parted for values with (modulus 2, remainder 1);
+create table hpart2 partition of hash_parted for values with (modulus 4, remainder 2);
+create table hpart3 partition of hash_parted for values with (modulus 8, remainder 0);
+create table hpart4 partition of hash_parted for values with (modulus 8, remainder 4);
+insert into hpart1 values (1, 1);
+insert into hpart2 values (2, 5);
+insert into hpart4 values (3, 4);
+-- fail
+update hpart1 set a = 3, b=4 where a = 1;
+ERROR: new row for relation "hpart1" violates partition constraint
+DETAIL: Failing row contains (3, 4).
+-- ok, row movement
+update hash_parted set b = b - 1 where b = 1;
+-- ok
+update hash_parted set b = b + 8 where b = 1;
+-- cleanup
+drop table hash_parted;
+drop operator class custom_opclass using hash;
+drop function dummy_hashint4(a int4, seed int8);
diff --git a/ydb/library/yql/tests/postgresql/original/cases/update.sql b/ydb/library/yql/tests/postgresql/original/cases/update.sql
new file mode 100644
index 0000000000..7a7bee77b9
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/update.sql
@@ -0,0 +1,669 @@
+--
+-- UPDATE syntax tests
+--
+
+CREATE TABLE update_test (
+ a INT DEFAULT 10,
+ b INT,
+ c TEXT
+);
+
+CREATE TABLE upsert_test (
+ a INT PRIMARY KEY,
+ b TEXT
+);
+
+INSERT INTO update_test VALUES (5, 10, 'foo');
+INSERT INTO update_test(b, a) VALUES (15, 10);
+
+SELECT * FROM update_test;
+
+UPDATE update_test SET a = DEFAULT, b = DEFAULT;
+
+SELECT * FROM update_test;
+
+-- aliases for the UPDATE target table
+UPDATE update_test AS t SET b = 10 WHERE t.a = 10;
+
+SELECT * FROM update_test;
+
+UPDATE update_test t SET b = t.b + 10 WHERE t.a = 10;
+
+SELECT * FROM update_test;
+
+--
+-- Test VALUES in FROM
+--
+
+UPDATE update_test SET a=v.i FROM (VALUES(100, 20)) AS v(i, j)
+ WHERE update_test.b = v.j;
+
+SELECT * FROM update_test;
+
+-- fail, wrong data type:
+UPDATE update_test SET a = v.* FROM (VALUES(100, 20)) AS v(i, j)
+ WHERE update_test.b = v.j;
+
+--
+-- Test multiple-set-clause syntax
+--
+
+INSERT INTO update_test SELECT a,b+1,c FROM update_test;
+SELECT * FROM update_test;
+
+UPDATE update_test SET (c,b,a) = ('bugle', b+11, DEFAULT) WHERE c = 'foo';
+SELECT * FROM update_test;
+UPDATE update_test SET (c,b) = ('car', a+b), a = a + 1 WHERE a = 10;
+SELECT * FROM update_test;
+-- fail, multi assignment to same column:
+UPDATE update_test SET (c,b) = ('car', a+b), b = a + 1 WHERE a = 10;
+
+-- uncorrelated sub-select:
+UPDATE update_test
+ SET (b,a) = (select a,b from update_test where b = 41 and c = 'car')
+ WHERE a = 100 AND b = 20;
+SELECT * FROM update_test;
+-- correlated sub-select:
+UPDATE update_test o
+ SET (b,a) = (select a+1,b from update_test i
+ where i.a=o.a and i.b=o.b and i.c is not distinct from o.c);
+SELECT * FROM update_test;
+-- fail, multiple rows supplied:
+UPDATE update_test SET (b,a) = (select a+1,b from update_test);
+-- set to null if no rows supplied:
+UPDATE update_test SET (b,a) = (select a+1,b from update_test where a = 1000)
+ WHERE a = 11;
+SELECT * FROM update_test;
+-- *-expansion should work in this context:
+UPDATE update_test SET (a,b) = ROW(v.*) FROM (VALUES(21, 100)) AS v(i, j)
+ WHERE update_test.a = v.i;
+-- you might expect this to work, but syntactically it's not a RowExpr:
+UPDATE update_test SET (a,b) = (v.*) FROM (VALUES(21, 101)) AS v(i, j)
+ WHERE update_test.a = v.i;
+
+-- if an alias for the target table is specified, don't allow references
+-- to the original table name
+UPDATE update_test AS t SET b = update_test.b + 10 WHERE t.a = 10;
+
+-- Make sure that we can update to a TOASTed value.
+UPDATE update_test SET c = repeat('x', 10000) WHERE c = 'car';
+SELECT a, b, char_length(c) FROM update_test;
+
+-- Check multi-assignment with a Result node to handle a one-time filter.
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE update_test t
+ SET (a, b) = (SELECT b, a FROM update_test s WHERE s.a = t.a)
+ WHERE CURRENT_USER = SESSION_USER;
+UPDATE update_test t
+ SET (a, b) = (SELECT b, a FROM update_test s WHERE s.a = t.a)
+ WHERE CURRENT_USER = SESSION_USER;
+SELECT a, b, char_length(c) FROM update_test;
+
+-- Test ON CONFLICT DO UPDATE
+
+INSERT INTO upsert_test VALUES(1, 'Boo'), (3, 'Zoo');
+-- uncorrelated sub-select:
+WITH aaa AS (SELECT 1 AS a, 'Foo' AS b) INSERT INTO upsert_test
+ VALUES (1, 'Bar') ON CONFLICT(a)
+ DO UPDATE SET (b, a) = (SELECT b, a FROM aaa) RETURNING *;
+-- correlated sub-select:
+INSERT INTO upsert_test VALUES (1, 'Baz'), (3, 'Zaz') ON CONFLICT(a)
+ DO UPDATE SET (b, a) = (SELECT b || ', Correlated', a from upsert_test i WHERE i.a = upsert_test.a)
+ RETURNING *;
+-- correlated sub-select (EXCLUDED.* alias):
+INSERT INTO upsert_test VALUES (1, 'Bat'), (3, 'Zot') ON CONFLICT(a)
+ DO UPDATE SET (b, a) = (SELECT b || ', Excluded', a from upsert_test i WHERE i.a = excluded.a)
+ RETURNING *;
+
+-- ON CONFLICT using system attributes in RETURNING, testing both the
+-- inserting and updating paths. See bug report at:
+-- https://www.postgresql.org/message-id/73436355-6432-49B1-92ED-1FE4F7E7E100%40finefun.com.au
+INSERT INTO upsert_test VALUES (2, 'Beeble') ON CONFLICT(a)
+ DO UPDATE SET (b, a) = (SELECT b || ', Excluded', a from upsert_test i WHERE i.a = excluded.a)
+ RETURNING tableoid::regclass, xmin = pg_current_xact_id()::xid AS xmin_correct, xmax = 0 AS xmax_correct;
+-- currently xmax is set after a conflict - that's probably not good,
+-- but it seems worthwhile to have to be explicit if that changes.
+INSERT INTO upsert_test VALUES (2, 'Brox') ON CONFLICT(a)
+ DO UPDATE SET (b, a) = (SELECT b || ', Excluded', a from upsert_test i WHERE i.a = excluded.a)
+ RETURNING tableoid::regclass, xmin = pg_current_xact_id()::xid AS xmin_correct, xmax = pg_current_xact_id()::xid AS xmax_correct;
+
+DROP TABLE update_test;
+DROP TABLE upsert_test;
+
+-- Test ON CONFLICT DO UPDATE with partitioned table and non-identical children
+
+CREATE TABLE upsert_test (
+ a INT PRIMARY KEY,
+ b TEXT
+) PARTITION BY LIST (a);
+
+CREATE TABLE upsert_test_1 PARTITION OF upsert_test FOR VALUES IN (1);
+CREATE TABLE upsert_test_2 (b TEXT, a INT PRIMARY KEY);
+ALTER TABLE upsert_test ATTACH PARTITION upsert_test_2 FOR VALUES IN (2);
+
+INSERT INTO upsert_test VALUES(1, 'Boo'), (2, 'Zoo');
+-- uncorrelated sub-select:
+WITH aaa AS (SELECT 1 AS a, 'Foo' AS b) INSERT INTO upsert_test
+ VALUES (1, 'Bar') ON CONFLICT(a)
+ DO UPDATE SET (b, a) = (SELECT b, a FROM aaa) RETURNING *;
+-- correlated sub-select:
+WITH aaa AS (SELECT 1 AS ctea, ' Foo' AS cteb) INSERT INTO upsert_test
+ VALUES (1, 'Bar'), (2, 'Baz') ON CONFLICT(a)
+ DO UPDATE SET (b, a) = (SELECT upsert_test.b||cteb, upsert_test.a FROM aaa) RETURNING *;
+
+DROP TABLE upsert_test;
+
+
+---------------------------
+-- UPDATE with row movement
+---------------------------
+
+-- When a partitioned table receives an UPDATE to the partitioned key and the
+-- new values no longer meet the partition's bound, the row must be moved to
+-- the correct partition for the new partition key (if one exists). We must
+-- also ensure that updatable views on partitioned tables properly enforce any
+-- WITH CHECK OPTION that is defined. The situation with triggers in this case
+-- also requires thorough testing as partition key updates causing row
+-- movement convert UPDATEs into DELETE+INSERT.
+
+CREATE TABLE range_parted (
+ a text,
+ b bigint,
+ c numeric,
+ d int,
+ e varchar
+) PARTITION BY RANGE (a, b);
+
+-- Create partitions intentionally in descending bound order, so as to test
+-- that update-row-movement works with the leaf partitions not in bound order.
+CREATE TABLE part_b_20_b_30 (e varchar, c numeric, a text, b bigint, d int);
+ALTER TABLE range_parted ATTACH PARTITION part_b_20_b_30 FOR VALUES FROM ('b', 20) TO ('b', 30);
+CREATE TABLE part_b_10_b_20 (e varchar, c numeric, a text, b bigint, d int) PARTITION BY RANGE (c);
+CREATE TABLE part_b_1_b_10 PARTITION OF range_parted FOR VALUES FROM ('b', 1) TO ('b', 10);
+ALTER TABLE range_parted ATTACH PARTITION part_b_10_b_20 FOR VALUES FROM ('b', 10) TO ('b', 20);
+CREATE TABLE part_a_10_a_20 PARTITION OF range_parted FOR VALUES FROM ('a', 10) TO ('a', 20);
+CREATE TABLE part_a_1_a_10 PARTITION OF range_parted FOR VALUES FROM ('a', 1) TO ('a', 10);
+
+-- Check that partition-key UPDATE works sanely on a partitioned table that
+-- does not have any child partitions.
+UPDATE part_b_10_b_20 set b = b - 6;
+
+-- Create some more partitions following the above pattern of descending bound
+-- order, but let's make the situation a bit more complex by having the
+-- attribute numbers of the columns vary from their parent partition.
+CREATE TABLE part_c_100_200 (e varchar, c numeric, a text, b bigint, d int) PARTITION BY range (abs(d));
+ALTER TABLE part_c_100_200 DROP COLUMN e, DROP COLUMN c, DROP COLUMN a;
+ALTER TABLE part_c_100_200 ADD COLUMN c numeric, ADD COLUMN e varchar, ADD COLUMN a text;
+ALTER TABLE part_c_100_200 DROP COLUMN b;
+ALTER TABLE part_c_100_200 ADD COLUMN b bigint;
+CREATE TABLE part_d_1_15 PARTITION OF part_c_100_200 FOR VALUES FROM (1) TO (15);
+CREATE TABLE part_d_15_20 PARTITION OF part_c_100_200 FOR VALUES FROM (15) TO (20);
+
+ALTER TABLE part_b_10_b_20 ATTACH PARTITION part_c_100_200 FOR VALUES FROM (100) TO (200);
+
+CREATE TABLE part_c_1_100 (e varchar, d int, c numeric, b bigint, a text);
+ALTER TABLE part_b_10_b_20 ATTACH PARTITION part_c_1_100 FOR VALUES FROM (1) TO (100);
+
+\set init_range_parted 'truncate range_parted; insert into range_parted VALUES (''a'', 1, 1, 1), (''a'', 10, 200, 1), (''b'', 12, 96, 1), (''b'', 13, 97, 2), (''b'', 15, 105, 16), (''b'', 17, 105, 19)'
+\set show_data 'select tableoid::regclass::text COLLATE "C" partname, * from range_parted ORDER BY 1, 2, 3, 4, 5, 6'
+:init_range_parted;
+:show_data;
+
+-- The order of subplans should be in bound order
+EXPLAIN (costs off) UPDATE range_parted set c = c - 50 WHERE c > 97;
+
+-- fail, row movement happens only within the partition subtree.
+UPDATE part_c_100_200 set c = c - 20, d = c WHERE c = 105;
+-- fail, no partition key update, so no attempt to move tuple,
+-- but "a = 'a'" violates partition constraint enforced by root partition)
+UPDATE part_b_10_b_20 set a = 'a';
+-- ok, partition key update, no constraint violation
+UPDATE range_parted set d = d - 10 WHERE d > 10;
+-- ok, no partition key update, no constraint violation
+UPDATE range_parted set e = d;
+-- No row found
+UPDATE part_c_1_100 set c = c + 20 WHERE c = 98;
+-- ok, row movement
+UPDATE part_b_10_b_20 set c = c + 20 returning c, b, a;
+:show_data;
+
+-- fail, row movement happens only within the partition subtree.
+UPDATE part_b_10_b_20 set b = b - 6 WHERE c > 116 returning *;
+-- ok, row movement, with subset of rows moved into different partition.
+UPDATE range_parted set b = b - 6 WHERE c > 116 returning a, b + c;
+
+:show_data;
+
+-- Common table needed for multiple test scenarios.
+CREATE TABLE mintab(c1 int);
+INSERT into mintab VALUES (120);
+
+-- update partition key using updatable view.
+CREATE VIEW upview AS SELECT * FROM range_parted WHERE (select c > c1 FROM mintab) WITH CHECK OPTION;
+-- ok
+UPDATE upview set c = 199 WHERE b = 4;
+-- fail, check option violation
+UPDATE upview set c = 120 WHERE b = 4;
+-- fail, row movement with check option violation
+UPDATE upview set a = 'b', b = 15, c = 120 WHERE b = 4;
+-- ok, row movement, check option passes
+UPDATE upview set a = 'b', b = 15 WHERE b = 4;
+
+:show_data;
+
+-- cleanup
+DROP VIEW upview;
+
+-- RETURNING having whole-row vars.
+:init_range_parted;
+UPDATE range_parted set c = 95 WHERE a = 'b' and b > 10 and c > 100 returning (range_parted), *;
+:show_data;
+
+
+-- Transition tables with update row movement
+:init_range_parted;
+
+CREATE FUNCTION trans_updatetrigfunc() RETURNS trigger LANGUAGE plpgsql AS
+$$
+ begin
+ raise notice 'trigger = %, old table = %, new table = %',
+ TG_NAME,
+ (select string_agg(old_table::text, ', ' ORDER BY a) FROM old_table),
+ (select string_agg(new_table::text, ', ' ORDER BY a) FROM new_table);
+ return null;
+ end;
+$$;
+
+CREATE TRIGGER trans_updatetrig
+ AFTER UPDATE ON range_parted REFERENCING OLD TABLE AS old_table NEW TABLE AS new_table
+ FOR EACH STATEMENT EXECUTE PROCEDURE trans_updatetrigfunc();
+
+UPDATE range_parted set c = (case when c = 96 then 110 else c + 1 end ) WHERE a = 'b' and b > 10 and c >= 96;
+:show_data;
+:init_range_parted;
+
+-- Enabling OLD TABLE capture for both DELETE as well as UPDATE stmt triggers
+-- should not cause DELETEd rows to be captured twice. Similar thing for
+-- INSERT triggers and inserted rows.
+CREATE TRIGGER trans_deletetrig
+ AFTER DELETE ON range_parted REFERENCING OLD TABLE AS old_table
+ FOR EACH STATEMENT EXECUTE PROCEDURE trans_updatetrigfunc();
+CREATE TRIGGER trans_inserttrig
+ AFTER INSERT ON range_parted REFERENCING NEW TABLE AS new_table
+ FOR EACH STATEMENT EXECUTE PROCEDURE trans_updatetrigfunc();
+UPDATE range_parted set c = c + 50 WHERE a = 'b' and b > 10 and c >= 96;
+:show_data;
+DROP TRIGGER trans_deletetrig ON range_parted;
+DROP TRIGGER trans_inserttrig ON range_parted;
+-- Don't drop trans_updatetrig yet. It is required below.
+
+-- Test with transition tuple conversion happening for rows moved into the
+-- new partition. This requires a trigger that references transition table
+-- (we already have trans_updatetrig). For inserted rows, the conversion
+-- is not usually needed, because the original tuple is already compatible with
+-- the desired transition tuple format. But conversion happens when there is a
+-- BR trigger because the trigger can change the inserted row. So install a
+-- BR triggers on those child partitions where the rows will be moved.
+CREATE FUNCTION func_parted_mod_b() RETURNS trigger AS $$
+BEGIN
+ NEW.b = NEW.b + 1;
+ return NEW;
+END $$ language plpgsql;
+CREATE TRIGGER trig_c1_100 BEFORE UPDATE OR INSERT ON part_c_1_100
+ FOR EACH ROW EXECUTE PROCEDURE func_parted_mod_b();
+CREATE TRIGGER trig_d1_15 BEFORE UPDATE OR INSERT ON part_d_1_15
+ FOR EACH ROW EXECUTE PROCEDURE func_parted_mod_b();
+CREATE TRIGGER trig_d15_20 BEFORE UPDATE OR INSERT ON part_d_15_20
+ FOR EACH ROW EXECUTE PROCEDURE func_parted_mod_b();
+:init_range_parted;
+UPDATE range_parted set c = (case when c = 96 then 110 else c + 1 end) WHERE a = 'b' and b > 10 and c >= 96;
+:show_data;
+:init_range_parted;
+UPDATE range_parted set c = c + 50 WHERE a = 'b' and b > 10 and c >= 96;
+:show_data;
+
+-- Case where per-partition tuple conversion map array is allocated, but the
+-- map is not required for the particular tuple that is routed, thanks to
+-- matching table attributes of the partition and the target table.
+:init_range_parted;
+UPDATE range_parted set b = 15 WHERE b = 1;
+:show_data;
+
+DROP TRIGGER trans_updatetrig ON range_parted;
+DROP TRIGGER trig_c1_100 ON part_c_1_100;
+DROP TRIGGER trig_d1_15 ON part_d_1_15;
+DROP TRIGGER trig_d15_20 ON part_d_15_20;
+DROP FUNCTION func_parted_mod_b();
+
+-- RLS policies with update-row-movement
+-----------------------------------------
+
+ALTER TABLE range_parted ENABLE ROW LEVEL SECURITY;
+CREATE USER regress_range_parted_user;
+GRANT ALL ON range_parted, mintab TO regress_range_parted_user;
+CREATE POLICY seeall ON range_parted AS PERMISSIVE FOR SELECT USING (true);
+CREATE POLICY policy_range_parted ON range_parted for UPDATE USING (true) WITH CHECK (c % 2 = 0);
+
+:init_range_parted;
+SET SESSION AUTHORIZATION regress_range_parted_user;
+-- This should fail with RLS violation error while moving row from
+-- part_a_10_a_20 to part_d_1_15, because we are setting 'c' to an odd number.
+UPDATE range_parted set a = 'b', c = 151 WHERE a = 'a' and c = 200;
+
+RESET SESSION AUTHORIZATION;
+-- Create a trigger on part_d_1_15
+CREATE FUNCTION func_d_1_15() RETURNS trigger AS $$
+BEGIN
+ NEW.c = NEW.c + 1; -- Make even numbers odd, or vice versa
+ return NEW;
+END $$ LANGUAGE plpgsql;
+CREATE TRIGGER trig_d_1_15 BEFORE INSERT ON part_d_1_15
+ FOR EACH ROW EXECUTE PROCEDURE func_d_1_15();
+
+:init_range_parted;
+SET SESSION AUTHORIZATION regress_range_parted_user;
+
+-- Here, RLS checks should succeed while moving row from part_a_10_a_20 to
+-- part_d_1_15. Even though the UPDATE is setting 'c' to an odd number, the
+-- trigger at the destination partition again makes it an even number.
+UPDATE range_parted set a = 'b', c = 151 WHERE a = 'a' and c = 200;
+
+RESET SESSION AUTHORIZATION;
+:init_range_parted;
+SET SESSION AUTHORIZATION regress_range_parted_user;
+-- This should fail with RLS violation error. Even though the UPDATE is setting
+-- 'c' to an even number, the trigger at the destination partition again makes
+-- it an odd number.
+UPDATE range_parted set a = 'b', c = 150 WHERE a = 'a' and c = 200;
+
+-- Cleanup
+RESET SESSION AUTHORIZATION;
+DROP TRIGGER trig_d_1_15 ON part_d_1_15;
+DROP FUNCTION func_d_1_15();
+
+-- Policy expression contains SubPlan
+RESET SESSION AUTHORIZATION;
+:init_range_parted;
+CREATE POLICY policy_range_parted_subplan on range_parted
+ AS RESTRICTIVE for UPDATE USING (true)
+ WITH CHECK ((SELECT range_parted.c <= c1 FROM mintab));
+SET SESSION AUTHORIZATION regress_range_parted_user;
+-- fail, mintab has row with c1 = 120
+UPDATE range_parted set a = 'b', c = 122 WHERE a = 'a' and c = 200;
+-- ok
+UPDATE range_parted set a = 'b', c = 120 WHERE a = 'a' and c = 200;
+
+-- RLS policy expression contains whole row.
+
+RESET SESSION AUTHORIZATION;
+:init_range_parted;
+CREATE POLICY policy_range_parted_wholerow on range_parted AS RESTRICTIVE for UPDATE USING (true)
+ WITH CHECK (range_parted = row('b', 10, 112, 1, NULL)::range_parted);
+SET SESSION AUTHORIZATION regress_range_parted_user;
+-- ok, should pass the RLS check
+UPDATE range_parted set a = 'b', c = 112 WHERE a = 'a' and c = 200;
+RESET SESSION AUTHORIZATION;
+:init_range_parted;
+SET SESSION AUTHORIZATION regress_range_parted_user;
+-- fail, the whole row RLS check should fail
+UPDATE range_parted set a = 'b', c = 116 WHERE a = 'a' and c = 200;
+
+-- Cleanup
+RESET SESSION AUTHORIZATION;
+DROP POLICY policy_range_parted ON range_parted;
+DROP POLICY policy_range_parted_subplan ON range_parted;
+DROP POLICY policy_range_parted_wholerow ON range_parted;
+REVOKE ALL ON range_parted, mintab FROM regress_range_parted_user;
+DROP USER regress_range_parted_user;
+DROP TABLE mintab;
+
+
+-- statement triggers with update row movement
+---------------------------------------------------
+
+:init_range_parted;
+
+CREATE FUNCTION trigfunc() returns trigger language plpgsql as
+$$
+ begin
+ raise notice 'trigger = % fired on table % during %',
+ TG_NAME, TG_TABLE_NAME, TG_OP;
+ return null;
+ end;
+$$;
+-- Triggers on root partition
+CREATE TRIGGER parent_delete_trig
+ AFTER DELETE ON range_parted for each statement execute procedure trigfunc();
+CREATE TRIGGER parent_update_trig
+ AFTER UPDATE ON range_parted for each statement execute procedure trigfunc();
+CREATE TRIGGER parent_insert_trig
+ AFTER INSERT ON range_parted for each statement execute procedure trigfunc();
+
+-- Triggers on leaf partition part_c_1_100
+CREATE TRIGGER c1_delete_trig
+ AFTER DELETE ON part_c_1_100 for each statement execute procedure trigfunc();
+CREATE TRIGGER c1_update_trig
+ AFTER UPDATE ON part_c_1_100 for each statement execute procedure trigfunc();
+CREATE TRIGGER c1_insert_trig
+ AFTER INSERT ON part_c_1_100 for each statement execute procedure trigfunc();
+
+-- Triggers on leaf partition part_d_1_15
+CREATE TRIGGER d1_delete_trig
+ AFTER DELETE ON part_d_1_15 for each statement execute procedure trigfunc();
+CREATE TRIGGER d1_update_trig
+ AFTER UPDATE ON part_d_1_15 for each statement execute procedure trigfunc();
+CREATE TRIGGER d1_insert_trig
+ AFTER INSERT ON part_d_1_15 for each statement execute procedure trigfunc();
+-- Triggers on leaf partition part_d_15_20
+CREATE TRIGGER d15_delete_trig
+ AFTER DELETE ON part_d_15_20 for each statement execute procedure trigfunc();
+CREATE TRIGGER d15_update_trig
+ AFTER UPDATE ON part_d_15_20 for each statement execute procedure trigfunc();
+CREATE TRIGGER d15_insert_trig
+ AFTER INSERT ON part_d_15_20 for each statement execute procedure trigfunc();
+
+-- Move all rows from part_c_100_200 to part_c_1_100. None of the delete or
+-- insert statement triggers should be fired.
+UPDATE range_parted set c = c - 50 WHERE c > 97;
+:show_data;
+
+DROP TRIGGER parent_delete_trig ON range_parted;
+DROP TRIGGER parent_update_trig ON range_parted;
+DROP TRIGGER parent_insert_trig ON range_parted;
+DROP TRIGGER c1_delete_trig ON part_c_1_100;
+DROP TRIGGER c1_update_trig ON part_c_1_100;
+DROP TRIGGER c1_insert_trig ON part_c_1_100;
+DROP TRIGGER d1_delete_trig ON part_d_1_15;
+DROP TRIGGER d1_update_trig ON part_d_1_15;
+DROP TRIGGER d1_insert_trig ON part_d_1_15;
+DROP TRIGGER d15_delete_trig ON part_d_15_20;
+DROP TRIGGER d15_update_trig ON part_d_15_20;
+DROP TRIGGER d15_insert_trig ON part_d_15_20;
+
+
+-- Creating default partition for range
+:init_range_parted;
+create table part_def partition of range_parted default;
+\d+ part_def
+insert into range_parted values ('c', 9);
+-- ok
+update part_def set a = 'd' where a = 'c';
+-- fail
+update part_def set a = 'a' where a = 'd';
+
+:show_data;
+
+-- Update row movement from non-default to default partition.
+-- fail, default partition is not under part_a_10_a_20;
+UPDATE part_a_10_a_20 set a = 'ad' WHERE a = 'a';
+-- ok
+UPDATE range_parted set a = 'ad' WHERE a = 'a';
+UPDATE range_parted set a = 'bd' WHERE a = 'b';
+:show_data;
+-- Update row movement from default to non-default partitions.
+-- ok
+UPDATE range_parted set a = 'a' WHERE a = 'ad';
+UPDATE range_parted set a = 'b' WHERE a = 'bd';
+:show_data;
+
+-- Cleanup: range_parted no longer needed.
+DROP TABLE range_parted;
+
+CREATE TABLE list_parted (
+ a text,
+ b int
+) PARTITION BY list (a);
+CREATE TABLE list_part1 PARTITION OF list_parted for VALUES in ('a', 'b');
+CREATE TABLE list_default PARTITION OF list_parted default;
+INSERT into list_part1 VALUES ('a', 1);
+INSERT into list_default VALUES ('d', 10);
+
+-- fail
+UPDATE list_default set a = 'a' WHERE a = 'd';
+-- ok
+UPDATE list_default set a = 'x' WHERE a = 'd';
+
+DROP TABLE list_parted;
+
+-- Test retrieval of system columns with non-consistent partition row types.
+-- This is only partially supported, as seen in the results.
+
+create table utrtest (a int, b text) partition by list (a);
+create table utr1 (a int check (a in (1)), q text, b text);
+create table utr2 (a int check (a in (2)), b text);
+alter table utr1 drop column q;
+alter table utrtest attach partition utr1 for values in (1);
+alter table utrtest attach partition utr2 for values in (2);
+
+insert into utrtest values (1, 'foo')
+ returning *, tableoid::regclass, xmin = pg_current_xact_id()::xid as xmin_ok;
+insert into utrtest values (2, 'bar')
+ returning *, tableoid::regclass, xmin = pg_current_xact_id()::xid as xmin_ok; -- fails
+insert into utrtest values (2, 'bar')
+ returning *, tableoid::regclass;
+
+update utrtest set b = b || b from (values (1), (2)) s(x) where a = s.x
+ returning *, tableoid::regclass, xmin = pg_current_xact_id()::xid as xmin_ok;
+
+update utrtest set a = 3 - a from (values (1), (2)) s(x) where a = s.x
+ returning *, tableoid::regclass, xmin = pg_current_xact_id()::xid as xmin_ok; -- fails
+
+update utrtest set a = 3 - a from (values (1), (2)) s(x) where a = s.x
+ returning *, tableoid::regclass;
+
+delete from utrtest
+ returning *, tableoid::regclass, xmax = pg_current_xact_id()::xid as xmax_ok;
+
+drop table utrtest;
+
+
+--------------
+-- Some more update-partition-key test scenarios below. This time use list
+-- partitions.
+--------------
+
+-- Setup for list partitions
+CREATE TABLE list_parted (a numeric, b int, c int8) PARTITION BY list (a);
+CREATE TABLE sub_parted PARTITION OF list_parted for VALUES in (1) PARTITION BY list (b);
+
+CREATE TABLE sub_part1(b int, c int8, a numeric);
+ALTER TABLE sub_parted ATTACH PARTITION sub_part1 for VALUES in (1);
+CREATE TABLE sub_part2(b int, c int8, a numeric);
+ALTER TABLE sub_parted ATTACH PARTITION sub_part2 for VALUES in (2);
+
+CREATE TABLE list_part1(a numeric, b int, c int8);
+ALTER TABLE list_parted ATTACH PARTITION list_part1 for VALUES in (2,3);
+
+INSERT into list_parted VALUES (2,5,50);
+INSERT into list_parted VALUES (3,6,60);
+INSERT into sub_parted VALUES (1,1,60);
+INSERT into sub_parted VALUES (1,2,10);
+
+-- Test partition constraint violation when intermediate ancestor is used and
+-- constraint is inherited from upper root.
+UPDATE sub_parted set a = 2 WHERE c = 10;
+
+-- Test update-partition-key, where the unpruned partitions do not have their
+-- partition keys updated.
+SELECT tableoid::regclass::text, * FROM list_parted WHERE a = 2 ORDER BY 1;
+UPDATE list_parted set b = c + a WHERE a = 2;
+SELECT tableoid::regclass::text, * FROM list_parted WHERE a = 2 ORDER BY 1;
+
+
+-- Test the case where BR UPDATE triggers change the partition key.
+CREATE FUNCTION func_parted_mod_b() returns trigger as $$
+BEGIN
+ NEW.b = 2; -- This is changing partition key column.
+ return NEW;
+END $$ LANGUAGE plpgsql;
+CREATE TRIGGER parted_mod_b before update on sub_part1
+ for each row execute procedure func_parted_mod_b();
+
+SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4;
+
+-- This should do the tuple routing even though there is no explicit
+-- partition-key update, because there is a trigger on sub_part1.
+UPDATE list_parted set c = 70 WHERE b = 1;
+SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4;
+
+DROP TRIGGER parted_mod_b ON sub_part1;
+
+-- If BR DELETE trigger prevented DELETE from happening, we should also skip
+-- the INSERT if that delete is part of UPDATE=>DELETE+INSERT.
+CREATE OR REPLACE FUNCTION func_parted_mod_b() returns trigger as $$
+BEGIN
+ raise notice 'Trigger: Got OLD row %, but returning NULL', OLD;
+ return NULL;
+END $$ LANGUAGE plpgsql;
+CREATE TRIGGER trig_skip_delete before delete on sub_part2
+ for each row execute procedure func_parted_mod_b();
+UPDATE list_parted set b = 1 WHERE c = 70;
+SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4;
+-- Drop the trigger. Now the row should be moved.
+DROP TRIGGER trig_skip_delete ON sub_part2;
+UPDATE list_parted set b = 1 WHERE c = 70;
+SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4;
+DROP FUNCTION func_parted_mod_b();
+
+-- UPDATE partition-key with FROM clause. If join produces multiple output
+-- rows for the same row to be modified, we should tuple-route the row only
+-- once. There should not be any rows inserted.
+CREATE TABLE non_parted (id int);
+INSERT into non_parted VALUES (1), (1), (1), (2), (2), (2), (3), (3), (3);
+UPDATE list_parted t1 set a = 2 FROM non_parted t2 WHERE t1.a = t2.id and a = 1;
+SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4;
+DROP TABLE non_parted;
+
+-- Cleanup: list_parted no longer needed.
+DROP TABLE list_parted;
+
+-- create custom operator class and hash function, for the same reason
+-- explained in alter_table.sql
+create or replace function dummy_hashint4(a int4, seed int8) returns int8 as
+$$ begin return (a + seed); end; $$ language 'plpgsql' immutable;
+create operator class custom_opclass for type int4 using hash as
+operator 1 = , function 2 dummy_hashint4(int4, int8);
+
+create table hash_parted (
+ a int,
+ b int
+) partition by hash (a custom_opclass, b custom_opclass);
+create table hpart1 partition of hash_parted for values with (modulus 2, remainder 1);
+create table hpart2 partition of hash_parted for values with (modulus 4, remainder 2);
+create table hpart3 partition of hash_parted for values with (modulus 8, remainder 0);
+create table hpart4 partition of hash_parted for values with (modulus 8, remainder 4);
+insert into hpart1 values (1, 1);
+insert into hpart2 values (2, 5);
+insert into hpart4 values (3, 4);
+
+-- fail
+update hpart1 set a = 3, b=4 where a = 1;
+-- ok, row movement
+update hash_parted set b = b - 1 where b = 1;
+-- ok
+update hash_parted set b = b + 8 where b = 1;
+
+-- cleanup
+drop table hash_parted;
+drop operator class custom_opclass using hash;
+drop function dummy_hashint4(a int4, seed int8);
diff --git a/ydb/library/yql/tests/postgresql/original/cases/uuid.out b/ydb/library/yql/tests/postgresql/original/cases/uuid.out
new file mode 100644
index 0000000000..090103df48
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/uuid.out
@@ -0,0 +1,159 @@
+-- regression test for the uuid datatype
+-- creating test tables
+CREATE TABLE guid1
+(
+ guid_field UUID,
+ text_field TEXT DEFAULT(now())
+);
+CREATE TABLE guid2
+(
+ guid_field UUID,
+ text_field TEXT DEFAULT(now())
+);
+-- inserting invalid data tests
+-- too long
+INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-1111-111111111111F');
+ERROR: invalid input syntax for type uuid: "11111111-1111-1111-1111-111111111111F"
+LINE 1: INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-111...
+ ^
+-- too short
+INSERT INTO guid1(guid_field) VALUES('{11111111-1111-1111-1111-11111111111}');
+ERROR: invalid input syntax for type uuid: "{11111111-1111-1111-1111-11111111111}"
+LINE 1: INSERT INTO guid1(guid_field) VALUES('{11111111-1111-1111-11...
+ ^
+-- valid data but invalid format
+INSERT INTO guid1(guid_field) VALUES('111-11111-1111-1111-1111-111111111111');
+ERROR: invalid input syntax for type uuid: "111-11111-1111-1111-1111-111111111111"
+LINE 1: INSERT INTO guid1(guid_field) VALUES('111-11111-1111-1111-11...
+ ^
+INSERT INTO guid1(guid_field) VALUES('{22222222-2222-2222-2222-222222222222 ');
+ERROR: invalid input syntax for type uuid: "{22222222-2222-2222-2222-222222222222 "
+LINE 1: INSERT INTO guid1(guid_field) VALUES('{22222222-2222-2222-22...
+ ^
+-- invalid data
+INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-G111-111111111111');
+ERROR: invalid input syntax for type uuid: "11111111-1111-1111-G111-111111111111"
+LINE 1: INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-G11...
+ ^
+INSERT INTO guid1(guid_field) VALUES('11+11111-1111-1111-1111-111111111111');
+ERROR: invalid input syntax for type uuid: "11+11111-1111-1111-1111-111111111111"
+LINE 1: INSERT INTO guid1(guid_field) VALUES('11+11111-1111-1111-111...
+ ^
+--inserting three input formats
+INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-1111-111111111111');
+INSERT INTO guid1(guid_field) VALUES('{22222222-2222-2222-2222-222222222222}');
+INSERT INTO guid1(guid_field) VALUES('3f3e3c3b3a3039383736353433a2313e');
+-- retrieving the inserted data
+SELECT guid_field FROM guid1;
+ guid_field
+--------------------------------------
+ 11111111-1111-1111-1111-111111111111
+ 22222222-2222-2222-2222-222222222222
+ 3f3e3c3b-3a30-3938-3736-353433a2313e
+(3 rows)
+
+-- ordering test
+SELECT guid_field FROM guid1 ORDER BY guid_field ASC;
+ guid_field
+--------------------------------------
+ 11111111-1111-1111-1111-111111111111
+ 22222222-2222-2222-2222-222222222222
+ 3f3e3c3b-3a30-3938-3736-353433a2313e
+(3 rows)
+
+SELECT guid_field FROM guid1 ORDER BY guid_field DESC;
+ guid_field
+--------------------------------------
+ 3f3e3c3b-3a30-3938-3736-353433a2313e
+ 22222222-2222-2222-2222-222222222222
+ 11111111-1111-1111-1111-111111111111
+(3 rows)
+
+-- = operator test
+SELECT COUNT(*) FROM guid1 WHERE guid_field = '3f3e3c3b-3a30-3938-3736-353433a2313e';
+ count
+-------
+ 1
+(1 row)
+
+-- <> operator test
+SELECT COUNT(*) FROM guid1 WHERE guid_field <> '11111111111111111111111111111111';
+ count
+-------
+ 2
+(1 row)
+
+-- < operator test
+SELECT COUNT(*) FROM guid1 WHERE guid_field < '22222222-2222-2222-2222-222222222222';
+ count
+-------
+ 1
+(1 row)
+
+-- <= operator test
+SELECT COUNT(*) FROM guid1 WHERE guid_field <= '22222222-2222-2222-2222-222222222222';
+ count
+-------
+ 2
+(1 row)
+
+-- > operator test
+SELECT COUNT(*) FROM guid1 WHERE guid_field > '22222222-2222-2222-2222-222222222222';
+ count
+-------
+ 1
+(1 row)
+
+-- >= operator test
+SELECT COUNT(*) FROM guid1 WHERE guid_field >= '22222222-2222-2222-2222-222222222222';
+ count
+-------
+ 2
+(1 row)
+
+-- btree and hash index creation test
+CREATE INDEX guid1_btree ON guid1 USING BTREE (guid_field);
+CREATE INDEX guid1_hash ON guid1 USING HASH (guid_field);
+-- unique index test
+CREATE UNIQUE INDEX guid1_unique_BTREE ON guid1 USING BTREE (guid_field);
+-- should fail
+INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-1111-111111111111');
+ERROR: duplicate key value violates unique constraint "guid1_unique_btree"
+DETAIL: Key (guid_field)=(11111111-1111-1111-1111-111111111111) already exists.
+-- check to see whether the new indexes are actually there
+SELECT count(*) FROM pg_class WHERE relkind='i' AND relname LIKE 'guid%';
+ count
+-------
+ 3
+(1 row)
+
+-- populating the test tables with additional records
+INSERT INTO guid1(guid_field) VALUES('44444444-4444-4444-4444-444444444444');
+INSERT INTO guid2(guid_field) VALUES('11111111-1111-1111-1111-111111111111');
+INSERT INTO guid2(guid_field) VALUES('{22222222-2222-2222-2222-222222222222}');
+INSERT INTO guid2(guid_field) VALUES('3f3e3c3b3a3039383736353433a2313e');
+-- join test
+SELECT COUNT(*) FROM guid1 g1 INNER JOIN guid2 g2 ON g1.guid_field = g2.guid_field;
+ count
+-------
+ 3
+(1 row)
+
+SELECT COUNT(*) FROM guid1 g1 LEFT JOIN guid2 g2 ON g1.guid_field = g2.guid_field WHERE g2.guid_field IS NULL;
+ count
+-------
+ 1
+(1 row)
+
+-- generation test
+TRUNCATE guid1;
+INSERT INTO guid1 (guid_field) VALUES (gen_random_uuid());
+INSERT INTO guid1 (guid_field) VALUES (gen_random_uuid());
+SELECT count(DISTINCT guid_field) FROM guid1;
+ count
+-------
+ 2
+(1 row)
+
+-- clean up
+DROP TABLE guid1, guid2 CASCADE;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/uuid.sql b/ydb/library/yql/tests/postgresql/original/cases/uuid.sql
new file mode 100644
index 0000000000..3bd3b357c7
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/uuid.sql
@@ -0,0 +1,85 @@
+-- regression test for the uuid datatype
+-- creating test tables
+CREATE TABLE guid1
+(
+ guid_field UUID,
+ text_field TEXT DEFAULT(now())
+);
+CREATE TABLE guid2
+(
+ guid_field UUID,
+ text_field TEXT DEFAULT(now())
+);
+
+-- inserting invalid data tests
+-- too long
+INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-1111-111111111111F');
+-- too short
+INSERT INTO guid1(guid_field) VALUES('{11111111-1111-1111-1111-11111111111}');
+-- valid data but invalid format
+INSERT INTO guid1(guid_field) VALUES('111-11111-1111-1111-1111-111111111111');
+INSERT INTO guid1(guid_field) VALUES('{22222222-2222-2222-2222-222222222222 ');
+-- invalid data
+INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-G111-111111111111');
+INSERT INTO guid1(guid_field) VALUES('11+11111-1111-1111-1111-111111111111');
+
+--inserting three input formats
+INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-1111-111111111111');
+INSERT INTO guid1(guid_field) VALUES('{22222222-2222-2222-2222-222222222222}');
+INSERT INTO guid1(guid_field) VALUES('3f3e3c3b3a3039383736353433a2313e');
+
+-- retrieving the inserted data
+SELECT guid_field FROM guid1;
+
+-- ordering test
+SELECT guid_field FROM guid1 ORDER BY guid_field ASC;
+SELECT guid_field FROM guid1 ORDER BY guid_field DESC;
+
+-- = operator test
+SELECT COUNT(*) FROM guid1 WHERE guid_field = '3f3e3c3b-3a30-3938-3736-353433a2313e';
+
+-- <> operator test
+SELECT COUNT(*) FROM guid1 WHERE guid_field <> '11111111111111111111111111111111';
+
+-- < operator test
+SELECT COUNT(*) FROM guid1 WHERE guid_field < '22222222-2222-2222-2222-222222222222';
+
+-- <= operator test
+SELECT COUNT(*) FROM guid1 WHERE guid_field <= '22222222-2222-2222-2222-222222222222';
+
+-- > operator test
+SELECT COUNT(*) FROM guid1 WHERE guid_field > '22222222-2222-2222-2222-222222222222';
+
+-- >= operator test
+SELECT COUNT(*) FROM guid1 WHERE guid_field >= '22222222-2222-2222-2222-222222222222';
+
+-- btree and hash index creation test
+CREATE INDEX guid1_btree ON guid1 USING BTREE (guid_field);
+CREATE INDEX guid1_hash ON guid1 USING HASH (guid_field);
+
+-- unique index test
+CREATE UNIQUE INDEX guid1_unique_BTREE ON guid1 USING BTREE (guid_field);
+-- should fail
+INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-1111-111111111111');
+
+-- check to see whether the new indexes are actually there
+SELECT count(*) FROM pg_class WHERE relkind='i' AND relname LIKE 'guid%';
+
+-- populating the test tables with additional records
+INSERT INTO guid1(guid_field) VALUES('44444444-4444-4444-4444-444444444444');
+INSERT INTO guid2(guid_field) VALUES('11111111-1111-1111-1111-111111111111');
+INSERT INTO guid2(guid_field) VALUES('{22222222-2222-2222-2222-222222222222}');
+INSERT INTO guid2(guid_field) VALUES('3f3e3c3b3a3039383736353433a2313e');
+
+-- join test
+SELECT COUNT(*) FROM guid1 g1 INNER JOIN guid2 g2 ON g1.guid_field = g2.guid_field;
+SELECT COUNT(*) FROM guid1 g1 LEFT JOIN guid2 g2 ON g1.guid_field = g2.guid_field WHERE g2.guid_field IS NULL;
+
+-- generation test
+TRUNCATE guid1;
+INSERT INTO guid1 (guid_field) VALUES (gen_random_uuid());
+INSERT INTO guid1 (guid_field) VALUES (gen_random_uuid());
+SELECT count(DISTINCT guid_field) FROM guid1;
+
+-- clean up
+DROP TABLE guid1, guid2 CASCADE;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/varchar.out b/ydb/library/yql/tests/postgresql/original/cases/varchar.out
new file mode 100644
index 0000000000..da23ae810b
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/varchar.out
@@ -0,0 +1,111 @@
+--
+-- VARCHAR
+--
+CREATE TABLE VARCHAR_TBL(f1 varchar(1));
+INSERT INTO VARCHAR_TBL (f1) VALUES ('a');
+INSERT INTO VARCHAR_TBL (f1) VALUES ('A');
+-- any of the following three input formats are acceptable
+INSERT INTO VARCHAR_TBL (f1) VALUES ('1');
+INSERT INTO VARCHAR_TBL (f1) VALUES (2);
+INSERT INTO VARCHAR_TBL (f1) VALUES ('3');
+-- zero-length char
+INSERT INTO VARCHAR_TBL (f1) VALUES ('');
+-- try varchar's of greater than 1 length
+INSERT INTO VARCHAR_TBL (f1) VALUES ('cd');
+ERROR: value too long for type character varying(1)
+INSERT INTO VARCHAR_TBL (f1) VALUES ('c ');
+SELECT * FROM VARCHAR_TBL;
+ f1
+----
+ a
+ A
+ 1
+ 2
+ 3
+
+ c
+(7 rows)
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 <> 'a';
+ f1
+----
+ A
+ 1
+ 2
+ 3
+
+ c
+(6 rows)
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 = 'a';
+ f1
+----
+ a
+(1 row)
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 < 'a';
+ f1
+----
+ A
+ 1
+ 2
+ 3
+
+(5 rows)
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 <= 'a';
+ f1
+----
+ a
+ A
+ 1
+ 2
+ 3
+
+(6 rows)
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 > 'a';
+ f1
+----
+ c
+(1 row)
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 >= 'a';
+ f1
+----
+ a
+ c
+(2 rows)
+
+DROP TABLE VARCHAR_TBL;
+--
+-- Now test longer arrays of char
+--
+CREATE TABLE VARCHAR_TBL(f1 varchar(4));
+INSERT INTO VARCHAR_TBL (f1) VALUES ('a');
+INSERT INTO VARCHAR_TBL (f1) VALUES ('ab');
+INSERT INTO VARCHAR_TBL (f1) VALUES ('abcd');
+INSERT INTO VARCHAR_TBL (f1) VALUES ('abcde');
+ERROR: value too long for type character varying(4)
+INSERT INTO VARCHAR_TBL (f1) VALUES ('abcd ');
+SELECT * FROM VARCHAR_TBL;
+ f1
+------
+ a
+ ab
+ abcd
+ abcd
+(4 rows)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/varchar.sql b/ydb/library/yql/tests/postgresql/original/cases/varchar.sql
new file mode 100644
index 0000000000..35e24b84d3
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/varchar.sql
@@ -0,0 +1,66 @@
+--
+-- VARCHAR
+--
+
+CREATE TABLE VARCHAR_TBL(f1 varchar(1));
+
+INSERT INTO VARCHAR_TBL (f1) VALUES ('a');
+
+INSERT INTO VARCHAR_TBL (f1) VALUES ('A');
+
+-- any of the following three input formats are acceptable
+INSERT INTO VARCHAR_TBL (f1) VALUES ('1');
+
+INSERT INTO VARCHAR_TBL (f1) VALUES (2);
+
+INSERT INTO VARCHAR_TBL (f1) VALUES ('3');
+
+-- zero-length char
+INSERT INTO VARCHAR_TBL (f1) VALUES ('');
+
+-- try varchar's of greater than 1 length
+INSERT INTO VARCHAR_TBL (f1) VALUES ('cd');
+INSERT INTO VARCHAR_TBL (f1) VALUES ('c ');
+
+
+SELECT * FROM VARCHAR_TBL;
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 <> 'a';
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 = 'a';
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 < 'a';
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 <= 'a';
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 > 'a';
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 >= 'a';
+
+DROP TABLE VARCHAR_TBL;
+
+--
+-- Now test longer arrays of char
+--
+
+CREATE TABLE VARCHAR_TBL(f1 varchar(4));
+
+INSERT INTO VARCHAR_TBL (f1) VALUES ('a');
+INSERT INTO VARCHAR_TBL (f1) VALUES ('ab');
+INSERT INTO VARCHAR_TBL (f1) VALUES ('abcd');
+INSERT INTO VARCHAR_TBL (f1) VALUES ('abcde');
+INSERT INTO VARCHAR_TBL (f1) VALUES ('abcd ');
+
+SELECT * FROM VARCHAR_TBL;
diff --git a/ydb/library/yql/tests/postgresql/original/cases/varchar_1.out b/ydb/library/yql/tests/postgresql/original/cases/varchar_1.out
new file mode 100644
index 0000000000..958f9c07e0
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/varchar_1.out
@@ -0,0 +1,111 @@
+--
+-- VARCHAR
+--
+CREATE TABLE VARCHAR_TBL(f1 varchar(1));
+INSERT INTO VARCHAR_TBL (f1) VALUES ('a');
+INSERT INTO VARCHAR_TBL (f1) VALUES ('A');
+-- any of the following three input formats are acceptable
+INSERT INTO VARCHAR_TBL (f1) VALUES ('1');
+INSERT INTO VARCHAR_TBL (f1) VALUES (2);
+INSERT INTO VARCHAR_TBL (f1) VALUES ('3');
+-- zero-length char
+INSERT INTO VARCHAR_TBL (f1) VALUES ('');
+-- try varchar's of greater than 1 length
+INSERT INTO VARCHAR_TBL (f1) VALUES ('cd');
+ERROR: value too long for type character varying(1)
+INSERT INTO VARCHAR_TBL (f1) VALUES ('c ');
+SELECT * FROM VARCHAR_TBL;
+ f1
+----
+ a
+ A
+ 1
+ 2
+ 3
+
+ c
+(7 rows)
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 <> 'a';
+ f1
+----
+ A
+ 1
+ 2
+ 3
+
+ c
+(6 rows)
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 = 'a';
+ f1
+----
+ a
+(1 row)
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 < 'a';
+ f1
+----
+ 1
+ 2
+ 3
+
+(4 rows)
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 <= 'a';
+ f1
+----
+ a
+ 1
+ 2
+ 3
+
+(5 rows)
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 > 'a';
+ f1
+----
+ A
+ c
+(2 rows)
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 >= 'a';
+ f1
+----
+ a
+ A
+ c
+(3 rows)
+
+DROP TABLE VARCHAR_TBL;
+--
+-- Now test longer arrays of char
+--
+CREATE TABLE VARCHAR_TBL(f1 varchar(4));
+INSERT INTO VARCHAR_TBL (f1) VALUES ('a');
+INSERT INTO VARCHAR_TBL (f1) VALUES ('ab');
+INSERT INTO VARCHAR_TBL (f1) VALUES ('abcd');
+INSERT INTO VARCHAR_TBL (f1) VALUES ('abcde');
+ERROR: value too long for type character varying(4)
+INSERT INTO VARCHAR_TBL (f1) VALUES ('abcd ');
+SELECT * FROM VARCHAR_TBL;
+ f1
+------
+ a
+ ab
+ abcd
+ abcd
+(4 rows)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/varchar_2.out b/ydb/library/yql/tests/postgresql/original/cases/varchar_2.out
new file mode 100644
index 0000000000..b1d412ca00
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/varchar_2.out
@@ -0,0 +1,111 @@
+--
+-- VARCHAR
+--
+CREATE TABLE VARCHAR_TBL(f1 varchar(1));
+INSERT INTO VARCHAR_TBL (f1) VALUES ('a');
+INSERT INTO VARCHAR_TBL (f1) VALUES ('A');
+-- any of the following three input formats are acceptable
+INSERT INTO VARCHAR_TBL (f1) VALUES ('1');
+INSERT INTO VARCHAR_TBL (f1) VALUES (2);
+INSERT INTO VARCHAR_TBL (f1) VALUES ('3');
+-- zero-length char
+INSERT INTO VARCHAR_TBL (f1) VALUES ('');
+-- try varchar's of greater than 1 length
+INSERT INTO VARCHAR_TBL (f1) VALUES ('cd');
+ERROR: value too long for type character varying(1)
+INSERT INTO VARCHAR_TBL (f1) VALUES ('c ');
+SELECT * FROM VARCHAR_TBL;
+ f1
+----
+ a
+ A
+ 1
+ 2
+ 3
+
+ c
+(7 rows)
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 <> 'a';
+ f1
+----
+ A
+ 1
+ 2
+ 3
+
+ c
+(6 rows)
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 = 'a';
+ f1
+----
+ a
+(1 row)
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 < 'a';
+ f1
+----
+
+(1 row)
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 <= 'a';
+ f1
+----
+ a
+
+(2 rows)
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 > 'a';
+ f1
+----
+ A
+ 1
+ 2
+ 3
+ c
+(5 rows)
+
+SELECT c.*
+ FROM VARCHAR_TBL c
+ WHERE c.f1 >= 'a';
+ f1
+----
+ a
+ A
+ 1
+ 2
+ 3
+ c
+(6 rows)
+
+DROP TABLE VARCHAR_TBL;
+--
+-- Now test longer arrays of char
+--
+CREATE TABLE VARCHAR_TBL(f1 varchar(4));
+INSERT INTO VARCHAR_TBL (f1) VALUES ('a');
+INSERT INTO VARCHAR_TBL (f1) VALUES ('ab');
+INSERT INTO VARCHAR_TBL (f1) VALUES ('abcd');
+INSERT INTO VARCHAR_TBL (f1) VALUES ('abcde');
+ERROR: value too long for type character varying(4)
+INSERT INTO VARCHAR_TBL (f1) VALUES ('abcd ');
+SELECT * FROM VARCHAR_TBL;
+ f1
+------
+ a
+ ab
+ abcd
+ abcd
+(4 rows)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/window.out b/ydb/library/yql/tests/postgresql/original/cases/window.out
new file mode 100644
index 0000000000..19e2ac518a
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/window.out
@@ -0,0 +1,4065 @@
+--
+-- WINDOW FUNCTIONS
+--
+CREATE TEMPORARY TABLE empsalary (
+ depname varchar,
+ empno bigint,
+ salary int,
+ enroll_date date
+);
+INSERT INTO empsalary VALUES
+('develop', 10, 5200, '2007-08-01'),
+('sales', 1, 5000, '2006-10-01'),
+('personnel', 5, 3500, '2007-12-10'),
+('sales', 4, 4800, '2007-08-08'),
+('personnel', 2, 3900, '2006-12-23'),
+('develop', 7, 4200, '2008-01-01'),
+('develop', 9, 4500, '2008-01-01'),
+('sales', 3, 4800, '2007-08-01'),
+('develop', 8, 6000, '2006-10-01'),
+('develop', 11, 5200, '2007-08-15');
+SELECT depname, empno, salary, sum(salary) OVER (PARTITION BY depname) FROM empsalary ORDER BY depname, salary;
+ depname | empno | salary | sum
+-----------+-------+--------+-------
+ develop | 7 | 4200 | 25100
+ develop | 9 | 4500 | 25100
+ develop | 11 | 5200 | 25100
+ develop | 10 | 5200 | 25100
+ develop | 8 | 6000 | 25100
+ personnel | 5 | 3500 | 7400
+ personnel | 2 | 3900 | 7400
+ sales | 3 | 4800 | 14600
+ sales | 4 | 4800 | 14600
+ sales | 1 | 5000 | 14600
+(10 rows)
+
+SELECT depname, empno, salary, rank() OVER (PARTITION BY depname ORDER BY salary) FROM empsalary;
+ depname | empno | salary | rank
+-----------+-------+--------+------
+ develop | 7 | 4200 | 1
+ develop | 9 | 4500 | 2
+ develop | 11 | 5200 | 3
+ develop | 10 | 5200 | 3
+ develop | 8 | 6000 | 5
+ personnel | 5 | 3500 | 1
+ personnel | 2 | 3900 | 2
+ sales | 3 | 4800 | 1
+ sales | 4 | 4800 | 1
+ sales | 1 | 5000 | 3
+(10 rows)
+
+-- with GROUP BY
+SELECT four, ten, SUM(SUM(four)) OVER (PARTITION BY four), AVG(ten) FROM tenk1
+GROUP BY four, ten ORDER BY four, ten;
+ four | ten | sum | avg
+------+-----+------+------------------------
+ 0 | 0 | 0 | 0.00000000000000000000
+ 0 | 2 | 0 | 2.0000000000000000
+ 0 | 4 | 0 | 4.0000000000000000
+ 0 | 6 | 0 | 6.0000000000000000
+ 0 | 8 | 0 | 8.0000000000000000
+ 1 | 1 | 2500 | 1.00000000000000000000
+ 1 | 3 | 2500 | 3.0000000000000000
+ 1 | 5 | 2500 | 5.0000000000000000
+ 1 | 7 | 2500 | 7.0000000000000000
+ 1 | 9 | 2500 | 9.0000000000000000
+ 2 | 0 | 5000 | 0.00000000000000000000
+ 2 | 2 | 5000 | 2.0000000000000000
+ 2 | 4 | 5000 | 4.0000000000000000
+ 2 | 6 | 5000 | 6.0000000000000000
+ 2 | 8 | 5000 | 8.0000000000000000
+ 3 | 1 | 7500 | 1.00000000000000000000
+ 3 | 3 | 7500 | 3.0000000000000000
+ 3 | 5 | 7500 | 5.0000000000000000
+ 3 | 7 | 7500 | 7.0000000000000000
+ 3 | 9 | 7500 | 9.0000000000000000
+(20 rows)
+
+SELECT depname, empno, salary, sum(salary) OVER w FROM empsalary WINDOW w AS (PARTITION BY depname);
+ depname | empno | salary | sum
+-----------+-------+--------+-------
+ develop | 11 | 5200 | 25100
+ develop | 7 | 4200 | 25100
+ develop | 9 | 4500 | 25100
+ develop | 8 | 6000 | 25100
+ develop | 10 | 5200 | 25100
+ personnel | 5 | 3500 | 7400
+ personnel | 2 | 3900 | 7400
+ sales | 3 | 4800 | 14600
+ sales | 1 | 5000 | 14600
+ sales | 4 | 4800 | 14600
+(10 rows)
+
+SELECT depname, empno, salary, rank() OVER w FROM empsalary WINDOW w AS (PARTITION BY depname ORDER BY salary) ORDER BY rank() OVER w;
+ depname | empno | salary | rank
+-----------+-------+--------+------
+ develop | 7 | 4200 | 1
+ personnel | 5 | 3500 | 1
+ sales | 3 | 4800 | 1
+ sales | 4 | 4800 | 1
+ personnel | 2 | 3900 | 2
+ develop | 9 | 4500 | 2
+ sales | 1 | 5000 | 3
+ develop | 11 | 5200 | 3
+ develop | 10 | 5200 | 3
+ develop | 8 | 6000 | 5
+(10 rows)
+
+-- empty window specification
+SELECT COUNT(*) OVER () FROM tenk1 WHERE unique2 < 10;
+ count
+-------
+ 10
+ 10
+ 10
+ 10
+ 10
+ 10
+ 10
+ 10
+ 10
+ 10
+(10 rows)
+
+SELECT COUNT(*) OVER w FROM tenk1 WHERE unique2 < 10 WINDOW w AS ();
+ count
+-------
+ 10
+ 10
+ 10
+ 10
+ 10
+ 10
+ 10
+ 10
+ 10
+ 10
+(10 rows)
+
+-- no window operation
+SELECT four FROM tenk1 WHERE FALSE WINDOW w AS (PARTITION BY ten);
+ four
+------
+(0 rows)
+
+-- cumulative aggregate
+SELECT sum(four) OVER (PARTITION BY ten ORDER BY unique2) AS sum_1, ten, four FROM tenk1 WHERE unique2 < 10;
+ sum_1 | ten | four
+-------+-----+------
+ 0 | 0 | 0
+ 0 | 0 | 0
+ 2 | 0 | 2
+ 3 | 1 | 3
+ 4 | 1 | 1
+ 5 | 1 | 1
+ 3 | 3 | 3
+ 0 | 4 | 0
+ 1 | 7 | 1
+ 1 | 9 | 1
+(10 rows)
+
+SELECT row_number() OVER (ORDER BY unique2) FROM tenk1 WHERE unique2 < 10;
+ row_number
+------------
+ 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+ 10
+(10 rows)
+
+SELECT rank() OVER (PARTITION BY four ORDER BY ten) AS rank_1, ten, four FROM tenk1 WHERE unique2 < 10;
+ rank_1 | ten | four
+--------+-----+------
+ 1 | 0 | 0
+ 1 | 0 | 0
+ 3 | 4 | 0
+ 1 | 1 | 1
+ 1 | 1 | 1
+ 3 | 7 | 1
+ 4 | 9 | 1
+ 1 | 0 | 2
+ 1 | 1 | 3
+ 2 | 3 | 3
+(10 rows)
+
+SELECT dense_rank() OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+ dense_rank | ten | four
+------------+-----+------
+ 1 | 0 | 0
+ 1 | 0 | 0
+ 2 | 4 | 0
+ 1 | 1 | 1
+ 1 | 1 | 1
+ 2 | 7 | 1
+ 3 | 9 | 1
+ 1 | 0 | 2
+ 1 | 1 | 3
+ 2 | 3 | 3
+(10 rows)
+
+SELECT percent_rank() OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+ percent_rank | ten | four
+--------------------+-----+------
+ 0 | 0 | 0
+ 0 | 0 | 0
+ 1 | 4 | 0
+ 0 | 1 | 1
+ 0 | 1 | 1
+ 0.6666666666666666 | 7 | 1
+ 1 | 9 | 1
+ 0 | 0 | 2
+ 0 | 1 | 3
+ 1 | 3 | 3
+(10 rows)
+
+SELECT cume_dist() OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+ cume_dist | ten | four
+--------------------+-----+------
+ 0.6666666666666666 | 0 | 0
+ 0.6666666666666666 | 0 | 0
+ 1 | 4 | 0
+ 0.5 | 1 | 1
+ 0.5 | 1 | 1
+ 0.75 | 7 | 1
+ 1 | 9 | 1
+ 1 | 0 | 2
+ 0.5 | 1 | 3
+ 1 | 3 | 3
+(10 rows)
+
+SELECT ntile(3) OVER (ORDER BY ten, four), ten, four FROM tenk1 WHERE unique2 < 10;
+ ntile | ten | four
+-------+-----+------
+ 1 | 0 | 0
+ 1 | 0 | 0
+ 1 | 0 | 2
+ 1 | 1 | 1
+ 2 | 1 | 1
+ 2 | 1 | 3
+ 2 | 3 | 3
+ 3 | 4 | 0
+ 3 | 7 | 1
+ 3 | 9 | 1
+(10 rows)
+
+SELECT ntile(NULL) OVER (ORDER BY ten, four), ten, four FROM tenk1 LIMIT 2;
+ ntile | ten | four
+-------+-----+------
+ | 0 | 0
+ | 0 | 0
+(2 rows)
+
+SELECT lag(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+ lag | ten | four
+-----+-----+------
+ | 0 | 0
+ 0 | 0 | 0
+ 0 | 4 | 0
+ | 1 | 1
+ 1 | 1 | 1
+ 1 | 7 | 1
+ 7 | 9 | 1
+ | 0 | 2
+ | 1 | 3
+ 1 | 3 | 3
+(10 rows)
+
+SELECT lag(ten, four) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+ lag | ten | four
+-----+-----+------
+ 0 | 0 | 0
+ 0 | 0 | 0
+ 4 | 4 | 0
+ | 1 | 1
+ 1 | 1 | 1
+ 1 | 7 | 1
+ 7 | 9 | 1
+ | 0 | 2
+ | 1 | 3
+ | 3 | 3
+(10 rows)
+
+SELECT lag(ten, four, 0) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+ lag | ten | four
+-----+-----+------
+ 0 | 0 | 0
+ 0 | 0 | 0
+ 4 | 4 | 0
+ 0 | 1 | 1
+ 1 | 1 | 1
+ 1 | 7 | 1
+ 7 | 9 | 1
+ 0 | 0 | 2
+ 0 | 1 | 3
+ 0 | 3 | 3
+(10 rows)
+
+SELECT lag(ten, four, 0.7) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten;
+ lag | ten | four
+-----+-----+------
+ 0 | 0 | 0
+ 0 | 0 | 0
+ 4 | 4 | 0
+ 0.7 | 1 | 1
+ 1 | 1 | 1
+ 1 | 7 | 1
+ 7 | 9 | 1
+ 0.7 | 0 | 2
+ 0.7 | 1 | 3
+ 0.7 | 3 | 3
+(10 rows)
+
+SELECT lead(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+ lead | ten | four
+------+-----+------
+ 0 | 0 | 0
+ 4 | 0 | 0
+ | 4 | 0
+ 1 | 1 | 1
+ 7 | 1 | 1
+ 9 | 7 | 1
+ | 9 | 1
+ | 0 | 2
+ 3 | 1 | 3
+ | 3 | 3
+(10 rows)
+
+SELECT lead(ten * 2, 1) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+ lead | ten | four
+------+-----+------
+ 0 | 0 | 0
+ 8 | 0 | 0
+ | 4 | 0
+ 2 | 1 | 1
+ 14 | 1 | 1
+ 18 | 7 | 1
+ | 9 | 1
+ | 0 | 2
+ 6 | 1 | 3
+ | 3 | 3
+(10 rows)
+
+SELECT lead(ten * 2, 1, -1) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+ lead | ten | four
+------+-----+------
+ 0 | 0 | 0
+ 8 | 0 | 0
+ -1 | 4 | 0
+ 2 | 1 | 1
+ 14 | 1 | 1
+ 18 | 7 | 1
+ -1 | 9 | 1
+ -1 | 0 | 2
+ 6 | 1 | 3
+ -1 | 3 | 3
+(10 rows)
+
+SELECT lead(ten * 2, 1, -1.4) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten;
+ lead | ten | four
+------+-----+------
+ 0 | 0 | 0
+ 8 | 0 | 0
+ -1.4 | 4 | 0
+ 2 | 1 | 1
+ 14 | 1 | 1
+ 18 | 7 | 1
+ -1.4 | 9 | 1
+ -1.4 | 0 | 2
+ 6 | 1 | 3
+ -1.4 | 3 | 3
+(10 rows)
+
+SELECT first_value(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+ first_value | ten | four
+-------------+-----+------
+ 0 | 0 | 0
+ 0 | 0 | 0
+ 0 | 4 | 0
+ 1 | 1 | 1
+ 1 | 1 | 1
+ 1 | 7 | 1
+ 1 | 9 | 1
+ 0 | 0 | 2
+ 1 | 1 | 3
+ 1 | 3 | 3
+(10 rows)
+
+-- last_value returns the last row of the frame, which is CURRENT ROW in ORDER BY window.
+SELECT last_value(four) OVER (ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+ last_value | ten | four
+------------+-----+------
+ 0 | 0 | 0
+ 0 | 0 | 2
+ 0 | 0 | 0
+ 1 | 1 | 1
+ 1 | 1 | 3
+ 1 | 1 | 1
+ 3 | 3 | 3
+ 0 | 4 | 0
+ 1 | 7 | 1
+ 1 | 9 | 1
+(10 rows)
+
+SELECT last_value(ten) OVER (PARTITION BY four), ten, four FROM
+ (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten)s
+ ORDER BY four, ten;
+ last_value | ten | four
+------------+-----+------
+ 4 | 0 | 0
+ 4 | 0 | 0
+ 4 | 4 | 0
+ 9 | 1 | 1
+ 9 | 1 | 1
+ 9 | 7 | 1
+ 9 | 9 | 1
+ 0 | 0 | 2
+ 3 | 1 | 3
+ 3 | 3 | 3
+(10 rows)
+
+SELECT nth_value(ten, four + 1) OVER (PARTITION BY four), ten, four
+ FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten)s;
+ nth_value | ten | four
+-----------+-----+------
+ 0 | 0 | 0
+ 0 | 0 | 0
+ 0 | 4 | 0
+ 1 | 1 | 1
+ 1 | 1 | 1
+ 1 | 7 | 1
+ 1 | 9 | 1
+ | 0 | 2
+ | 1 | 3
+ | 3 | 3
+(10 rows)
+
+SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER (PARTITION BY two ORDER BY ten) AS wsum
+FROM tenk1 GROUP BY ten, two;
+ ten | two | gsum | wsum
+-----+-----+-------+--------
+ 0 | 0 | 45000 | 45000
+ 2 | 0 | 47000 | 92000
+ 4 | 0 | 49000 | 141000
+ 6 | 0 | 51000 | 192000
+ 8 | 0 | 53000 | 245000
+ 1 | 1 | 46000 | 46000
+ 3 | 1 | 48000 | 94000
+ 5 | 1 | 50000 | 144000
+ 7 | 1 | 52000 | 196000
+ 9 | 1 | 54000 | 250000
+(10 rows)
+
+SELECT count(*) OVER (PARTITION BY four), four FROM (SELECT * FROM tenk1 WHERE two = 1)s WHERE unique2 < 10;
+ count | four
+-------+------
+ 4 | 1
+ 4 | 1
+ 4 | 1
+ 4 | 1
+ 2 | 3
+ 2 | 3
+(6 rows)
+
+SELECT (count(*) OVER (PARTITION BY four ORDER BY ten) +
+ sum(hundred) OVER (PARTITION BY four ORDER BY ten))::varchar AS cntsum
+ FROM tenk1 WHERE unique2 < 10;
+ cntsum
+--------
+ 22
+ 22
+ 87
+ 24
+ 24
+ 82
+ 92
+ 51
+ 92
+ 136
+(10 rows)
+
+-- opexpr with different windows evaluation.
+SELECT * FROM(
+ SELECT count(*) OVER (PARTITION BY four ORDER BY ten) +
+ sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS total,
+ count(*) OVER (PARTITION BY four ORDER BY ten) AS fourcount,
+ sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS twosum
+ FROM tenk1
+)sub
+WHERE total <> fourcount + twosum;
+ total | fourcount | twosum
+-------+-----------+--------
+(0 rows)
+
+SELECT avg(four) OVER (PARTITION BY four ORDER BY thousand / 100) FROM tenk1 WHERE unique2 < 10;
+ avg
+------------------------
+ 0.00000000000000000000
+ 0.00000000000000000000
+ 0.00000000000000000000
+ 1.00000000000000000000
+ 1.00000000000000000000
+ 1.00000000000000000000
+ 1.00000000000000000000
+ 2.0000000000000000
+ 3.0000000000000000
+ 3.0000000000000000
+(10 rows)
+
+SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER win AS wsum
+FROM tenk1 GROUP BY ten, two WINDOW win AS (PARTITION BY two ORDER BY ten);
+ ten | two | gsum | wsum
+-----+-----+-------+--------
+ 0 | 0 | 45000 | 45000
+ 2 | 0 | 47000 | 92000
+ 4 | 0 | 49000 | 141000
+ 6 | 0 | 51000 | 192000
+ 8 | 0 | 53000 | 245000
+ 1 | 1 | 46000 | 46000
+ 3 | 1 | 48000 | 94000
+ 5 | 1 | 50000 | 144000
+ 7 | 1 | 52000 | 196000
+ 9 | 1 | 54000 | 250000
+(10 rows)
+
+-- more than one window with GROUP BY
+SELECT sum(salary),
+ row_number() OVER (ORDER BY depname),
+ sum(sum(salary)) OVER (ORDER BY depname DESC)
+FROM empsalary GROUP BY depname;
+ sum | row_number | sum
+-------+------------+-------
+ 25100 | 1 | 47100
+ 7400 | 2 | 22000
+ 14600 | 3 | 14600
+(3 rows)
+
+-- identical windows with different names
+SELECT sum(salary) OVER w1, count(*) OVER w2
+FROM empsalary WINDOW w1 AS (ORDER BY salary), w2 AS (ORDER BY salary);
+ sum | count
+-------+-------
+ 3500 | 1
+ 7400 | 2
+ 11600 | 3
+ 16100 | 4
+ 25700 | 6
+ 25700 | 6
+ 30700 | 7
+ 41100 | 9
+ 41100 | 9
+ 47100 | 10
+(10 rows)
+
+-- subplan
+SELECT lead(ten, (SELECT two FROM tenk1 WHERE s.unique2 = unique2)) OVER (PARTITION BY four ORDER BY ten)
+FROM tenk1 s WHERE unique2 < 10;
+ lead
+------
+ 0
+ 0
+ 4
+ 1
+ 7
+ 9
+
+ 0
+ 3
+
+(10 rows)
+
+-- empty table
+SELECT count(*) OVER (PARTITION BY four) FROM (SELECT * FROM tenk1 WHERE FALSE)s;
+ count
+-------
+(0 rows)
+
+-- mixture of agg/wfunc in the same window
+SELECT sum(salary) OVER w, rank() OVER w FROM empsalary WINDOW w AS (PARTITION BY depname ORDER BY salary DESC);
+ sum | rank
+-------+------
+ 6000 | 1
+ 16400 | 2
+ 16400 | 2
+ 20900 | 4
+ 25100 | 5
+ 3900 | 1
+ 7400 | 2
+ 5000 | 1
+ 14600 | 2
+ 14600 | 2
+(10 rows)
+
+-- strict aggs
+SELECT empno, depname, salary, bonus, depadj, MIN(bonus) OVER (ORDER BY empno), MAX(depadj) OVER () FROM(
+ SELECT *,
+ CASE WHEN enroll_date < '2008-01-01' THEN 2008 - extract(YEAR FROM enroll_date) END * 500 AS bonus,
+ CASE WHEN
+ AVG(salary) OVER (PARTITION BY depname) < salary
+ THEN 200 END AS depadj FROM empsalary
+)s;
+ empno | depname | salary | bonus | depadj | min | max
+-------+-----------+--------+-------+--------+------+-----
+ 1 | sales | 5000 | 1000 | 200 | 1000 | 200
+ 2 | personnel | 3900 | 1000 | 200 | 1000 | 200
+ 3 | sales | 4800 | 500 | | 500 | 200
+ 4 | sales | 4800 | 500 | | 500 | 200
+ 5 | personnel | 3500 | 500 | | 500 | 200
+ 7 | develop | 4200 | | | 500 | 200
+ 8 | develop | 6000 | 1000 | 200 | 500 | 200
+ 9 | develop | 4500 | | | 500 | 200
+ 10 | develop | 5200 | 500 | 200 | 500 | 200
+ 11 | develop | 5200 | 500 | 200 | 500 | 200
+(10 rows)
+
+-- window function over ungrouped agg over empty row set (bug before 9.1)
+SELECT SUM(COUNT(f1)) OVER () FROM int4_tbl WHERE f1=42;
+ sum
+-----
+ 0
+(1 row)
+
+-- window function with ORDER BY an expression involving aggregates (9.1 bug)
+select ten,
+ sum(unique1) + sum(unique2) as res,
+ rank() over (order by sum(unique1) + sum(unique2)) as rank
+from tenk1
+group by ten order by ten;
+ ten | res | rank
+-----+----------+------
+ 0 | 9976146 | 4
+ 1 | 10114187 | 9
+ 2 | 10059554 | 8
+ 3 | 9878541 | 1
+ 4 | 9881005 | 2
+ 5 | 9981670 | 5
+ 6 | 9947099 | 3
+ 7 | 10120309 | 10
+ 8 | 9991305 | 6
+ 9 | 10040184 | 7
+(10 rows)
+
+-- window and aggregate with GROUP BY expression (9.2 bug)
+explain (costs off)
+select first_value(max(x)) over (), y
+ from (select unique1 as x, ten+four as y from tenk1) ss
+ group by y;
+ QUERY PLAN
+---------------------------------------------
+ WindowAgg
+ -> HashAggregate
+ Group Key: (tenk1.ten + tenk1.four)
+ -> Seq Scan on tenk1
+(4 rows)
+
+-- test non-default frame specifications
+SELECT four, ten,
+ sum(ten) over (partition by four order by ten),
+ last_value(ten) over (partition by four order by ten)
+FROM (select distinct ten, four from tenk1) ss;
+ four | ten | sum | last_value
+------+-----+-----+------------
+ 0 | 0 | 0 | 0
+ 0 | 2 | 2 | 2
+ 0 | 4 | 6 | 4
+ 0 | 6 | 12 | 6
+ 0 | 8 | 20 | 8
+ 1 | 1 | 1 | 1
+ 1 | 3 | 4 | 3
+ 1 | 5 | 9 | 5
+ 1 | 7 | 16 | 7
+ 1 | 9 | 25 | 9
+ 2 | 0 | 0 | 0
+ 2 | 2 | 2 | 2
+ 2 | 4 | 6 | 4
+ 2 | 6 | 12 | 6
+ 2 | 8 | 20 | 8
+ 3 | 1 | 1 | 1
+ 3 | 3 | 4 | 3
+ 3 | 5 | 9 | 5
+ 3 | 7 | 16 | 7
+ 3 | 9 | 25 | 9
+(20 rows)
+
+SELECT four, ten,
+ sum(ten) over (partition by four order by ten range between unbounded preceding and current row),
+ last_value(ten) over (partition by four order by ten range between unbounded preceding and current row)
+FROM (select distinct ten, four from tenk1) ss;
+ four | ten | sum | last_value
+------+-----+-----+------------
+ 0 | 0 | 0 | 0
+ 0 | 2 | 2 | 2
+ 0 | 4 | 6 | 4
+ 0 | 6 | 12 | 6
+ 0 | 8 | 20 | 8
+ 1 | 1 | 1 | 1
+ 1 | 3 | 4 | 3
+ 1 | 5 | 9 | 5
+ 1 | 7 | 16 | 7
+ 1 | 9 | 25 | 9
+ 2 | 0 | 0 | 0
+ 2 | 2 | 2 | 2
+ 2 | 4 | 6 | 4
+ 2 | 6 | 12 | 6
+ 2 | 8 | 20 | 8
+ 3 | 1 | 1 | 1
+ 3 | 3 | 4 | 3
+ 3 | 5 | 9 | 5
+ 3 | 7 | 16 | 7
+ 3 | 9 | 25 | 9
+(20 rows)
+
+SELECT four, ten,
+ sum(ten) over (partition by four order by ten range between unbounded preceding and unbounded following),
+ last_value(ten) over (partition by four order by ten range between unbounded preceding and unbounded following)
+FROM (select distinct ten, four from tenk1) ss;
+ four | ten | sum | last_value
+------+-----+-----+------------
+ 0 | 0 | 20 | 8
+ 0 | 2 | 20 | 8
+ 0 | 4 | 20 | 8
+ 0 | 6 | 20 | 8
+ 0 | 8 | 20 | 8
+ 1 | 1 | 25 | 9
+ 1 | 3 | 25 | 9
+ 1 | 5 | 25 | 9
+ 1 | 7 | 25 | 9
+ 1 | 9 | 25 | 9
+ 2 | 0 | 20 | 8
+ 2 | 2 | 20 | 8
+ 2 | 4 | 20 | 8
+ 2 | 6 | 20 | 8
+ 2 | 8 | 20 | 8
+ 3 | 1 | 25 | 9
+ 3 | 3 | 25 | 9
+ 3 | 5 | 25 | 9
+ 3 | 7 | 25 | 9
+ 3 | 9 | 25 | 9
+(20 rows)
+
+SELECT four, ten/4 as two,
+ sum(ten/4) over (partition by four order by ten/4 range between unbounded preceding and current row),
+ last_value(ten/4) over (partition by four order by ten/4 range between unbounded preceding and current row)
+FROM (select distinct ten, four from tenk1) ss;
+ four | two | sum | last_value
+------+-----+-----+------------
+ 0 | 0 | 0 | 0
+ 0 | 0 | 0 | 0
+ 0 | 1 | 2 | 1
+ 0 | 1 | 2 | 1
+ 0 | 2 | 4 | 2
+ 1 | 0 | 0 | 0
+ 1 | 0 | 0 | 0
+ 1 | 1 | 2 | 1
+ 1 | 1 | 2 | 1
+ 1 | 2 | 4 | 2
+ 2 | 0 | 0 | 0
+ 2 | 0 | 0 | 0
+ 2 | 1 | 2 | 1
+ 2 | 1 | 2 | 1
+ 2 | 2 | 4 | 2
+ 3 | 0 | 0 | 0
+ 3 | 0 | 0 | 0
+ 3 | 1 | 2 | 1
+ 3 | 1 | 2 | 1
+ 3 | 2 | 4 | 2
+(20 rows)
+
+SELECT four, ten/4 as two,
+ sum(ten/4) over (partition by four order by ten/4 rows between unbounded preceding and current row),
+ last_value(ten/4) over (partition by four order by ten/4 rows between unbounded preceding and current row)
+FROM (select distinct ten, four from tenk1) ss;
+ four | two | sum | last_value
+------+-----+-----+------------
+ 0 | 0 | 0 | 0
+ 0 | 0 | 0 | 0
+ 0 | 1 | 1 | 1
+ 0 | 1 | 2 | 1
+ 0 | 2 | 4 | 2
+ 1 | 0 | 0 | 0
+ 1 | 0 | 0 | 0
+ 1 | 1 | 1 | 1
+ 1 | 1 | 2 | 1
+ 1 | 2 | 4 | 2
+ 2 | 0 | 0 | 0
+ 2 | 0 | 0 | 0
+ 2 | 1 | 1 | 1
+ 2 | 1 | 2 | 1
+ 2 | 2 | 4 | 2
+ 3 | 0 | 0 | 0
+ 3 | 0 | 0 | 0
+ 3 | 1 | 1 | 1
+ 3 | 1 | 2 | 1
+ 3 | 2 | 4 | 2
+(20 rows)
+
+SELECT sum(unique1) over (order by four range between current row and unbounded following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 45 | 0 | 0
+ 45 | 8 | 0
+ 45 | 4 | 0
+ 33 | 5 | 1
+ 33 | 9 | 1
+ 33 | 1 | 1
+ 18 | 6 | 2
+ 18 | 2 | 2
+ 10 | 3 | 3
+ 10 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (rows between current row and unbounded following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 45 | 4 | 0
+ 41 | 2 | 2
+ 39 | 1 | 1
+ 38 | 6 | 2
+ 32 | 9 | 1
+ 23 | 8 | 0
+ 15 | 5 | 1
+ 10 | 3 | 3
+ 7 | 7 | 3
+ 0 | 0 | 0
+(10 rows)
+
+SELECT sum(unique1) over (rows between 2 preceding and 2 following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 7 | 4 | 0
+ 13 | 2 | 2
+ 22 | 1 | 1
+ 26 | 6 | 2
+ 29 | 9 | 1
+ 31 | 8 | 0
+ 32 | 5 | 1
+ 23 | 3 | 3
+ 15 | 7 | 3
+ 10 | 0 | 0
+(10 rows)
+
+SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude no others),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 7 | 4 | 0
+ 13 | 2 | 2
+ 22 | 1 | 1
+ 26 | 6 | 2
+ 29 | 9 | 1
+ 31 | 8 | 0
+ 32 | 5 | 1
+ 23 | 3 | 3
+ 15 | 7 | 3
+ 10 | 0 | 0
+(10 rows)
+
+SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude current row),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 3 | 4 | 0
+ 11 | 2 | 2
+ 21 | 1 | 1
+ 20 | 6 | 2
+ 20 | 9 | 1
+ 23 | 8 | 0
+ 27 | 5 | 1
+ 20 | 3 | 3
+ 8 | 7 | 3
+ 10 | 0 | 0
+(10 rows)
+
+SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude group),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ | 4 | 0
+ | 2 | 2
+ | 1 | 1
+ | 6 | 2
+ | 9 | 1
+ | 8 | 0
+ | 5 | 1
+ | 3 | 3
+ | 7 | 3
+ | 0 | 0
+(10 rows)
+
+SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude ties),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 4 | 4 | 0
+ 2 | 2 | 2
+ 1 | 1 | 1
+ 6 | 6 | 2
+ 9 | 9 | 1
+ 8 | 8 | 0
+ 5 | 5 | 1
+ 3 | 3 | 3
+ 7 | 7 | 3
+ 0 | 0 | 0
+(10 rows)
+
+SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude current row),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ first_value | unique1 | four
+-------------+---------+------
+ 8 | 0 | 0
+ 4 | 8 | 0
+ 5 | 4 | 0
+ 9 | 5 | 1
+ 1 | 9 | 1
+ 6 | 1 | 1
+ 2 | 6 | 2
+ 3 | 2 | 2
+ 7 | 3 | 3
+ | 7 | 3
+(10 rows)
+
+SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude group),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ first_value | unique1 | four
+-------------+---------+------
+ | 0 | 0
+ 5 | 8 | 0
+ 5 | 4 | 0
+ | 5 | 1
+ 6 | 9 | 1
+ 6 | 1 | 1
+ 3 | 6 | 2
+ 3 | 2 | 2
+ | 3 | 3
+ | 7 | 3
+(10 rows)
+
+SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude ties),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ first_value | unique1 | four
+-------------+---------+------
+ 0 | 0 | 0
+ 8 | 8 | 0
+ 4 | 4 | 0
+ 5 | 5 | 1
+ 9 | 9 | 1
+ 1 | 1 | 1
+ 6 | 6 | 2
+ 2 | 2 | 2
+ 3 | 3 | 3
+ 7 | 7 | 3
+(10 rows)
+
+SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude current row),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ last_value | unique1 | four
+------------+---------+------
+ 4 | 0 | 0
+ 5 | 8 | 0
+ 9 | 4 | 0
+ 1 | 5 | 1
+ 6 | 9 | 1
+ 2 | 1 | 1
+ 3 | 6 | 2
+ 7 | 2 | 2
+ 7 | 3 | 3
+ | 7 | 3
+(10 rows)
+
+SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude group),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ last_value | unique1 | four
+------------+---------+------
+ | 0 | 0
+ 5 | 8 | 0
+ 9 | 4 | 0
+ | 5 | 1
+ 6 | 9 | 1
+ 2 | 1 | 1
+ 3 | 6 | 2
+ 7 | 2 | 2
+ | 3 | 3
+ | 7 | 3
+(10 rows)
+
+SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude ties),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ last_value | unique1 | four
+------------+---------+------
+ 0 | 0 | 0
+ 5 | 8 | 0
+ 9 | 4 | 0
+ 5 | 5 | 1
+ 6 | 9 | 1
+ 2 | 1 | 1
+ 3 | 6 | 2
+ 7 | 2 | 2
+ 3 | 3 | 3
+ 7 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (rows between 2 preceding and 1 preceding),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ | 4 | 0
+ 4 | 2 | 2
+ 6 | 1 | 1
+ 3 | 6 | 2
+ 7 | 9 | 1
+ 15 | 8 | 0
+ 17 | 5 | 1
+ 13 | 3 | 3
+ 8 | 7 | 3
+ 10 | 0 | 0
+(10 rows)
+
+SELECT sum(unique1) over (rows between 1 following and 3 following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 9 | 4 | 0
+ 16 | 2 | 2
+ 23 | 1 | 1
+ 22 | 6 | 2
+ 16 | 9 | 1
+ 15 | 8 | 0
+ 10 | 5 | 1
+ 7 | 3 | 3
+ 0 | 7 | 3
+ | 0 | 0
+(10 rows)
+
+SELECT sum(unique1) over (rows between unbounded preceding and 1 following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 6 | 4 | 0
+ 7 | 2 | 2
+ 13 | 1 | 1
+ 22 | 6 | 2
+ 30 | 9 | 1
+ 35 | 8 | 0
+ 38 | 5 | 1
+ 45 | 3 | 3
+ 45 | 7 | 3
+ 45 | 0 | 0
+(10 rows)
+
+SELECT sum(unique1) over (w range between current row and unbounded following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four);
+ sum | unique1 | four
+-----+---------+------
+ 45 | 0 | 0
+ 45 | 8 | 0
+ 45 | 4 | 0
+ 33 | 5 | 1
+ 33 | 9 | 1
+ 33 | 1 | 1
+ 18 | 6 | 2
+ 18 | 2 | 2
+ 10 | 3 | 3
+ 10 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (w range between unbounded preceding and current row exclude current row),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four);
+ sum | unique1 | four
+-----+---------+------
+ 12 | 0 | 0
+ 4 | 8 | 0
+ 8 | 4 | 0
+ 22 | 5 | 1
+ 18 | 9 | 1
+ 26 | 1 | 1
+ 29 | 6 | 2
+ 33 | 2 | 2
+ 42 | 3 | 3
+ 38 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (w range between unbounded preceding and current row exclude group),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four);
+ sum | unique1 | four
+-----+---------+------
+ | 0 | 0
+ | 8 | 0
+ | 4 | 0
+ 12 | 5 | 1
+ 12 | 9 | 1
+ 12 | 1 | 1
+ 27 | 6 | 2
+ 27 | 2 | 2
+ 35 | 3 | 3
+ 35 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (w range between unbounded preceding and current row exclude ties),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four);
+ sum | unique1 | four
+-----+---------+------
+ 0 | 0 | 0
+ 8 | 8 | 0
+ 4 | 4 | 0
+ 17 | 5 | 1
+ 21 | 9 | 1
+ 13 | 1 | 1
+ 33 | 6 | 2
+ 29 | 2 | 2
+ 38 | 3 | 3
+ 42 | 7 | 3
+(10 rows)
+
+SELECT first_value(unique1) over w,
+ nth_value(unique1, 2) over w AS nth_2,
+ last_value(unique1) over w, unique1, four
+FROM tenk1 WHERE unique1 < 10
+WINDOW w AS (order by four range between current row and unbounded following);
+ first_value | nth_2 | last_value | unique1 | four
+-------------+-------+------------+---------+------
+ 0 | 8 | 7 | 0 | 0
+ 0 | 8 | 7 | 8 | 0
+ 0 | 8 | 7 | 4 | 0
+ 5 | 9 | 7 | 5 | 1
+ 5 | 9 | 7 | 9 | 1
+ 5 | 9 | 7 | 1 | 1
+ 6 | 2 | 7 | 6 | 2
+ 6 | 2 | 7 | 2 | 2
+ 3 | 7 | 7 | 3 | 3
+ 3 | 7 | 7 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over
+ (order by unique1
+ rows (SELECT unique1 FROM tenk1 ORDER BY unique1 LIMIT 1) + 1 PRECEDING),
+ unique1
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1
+-----+---------
+ 0 | 0
+ 1 | 1
+ 3 | 2
+ 5 | 3
+ 7 | 4
+ 9 | 5
+ 11 | 6
+ 13 | 7
+ 15 | 8
+ 17 | 9
+(10 rows)
+
+CREATE TEMP VIEW v_window AS
+ SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following) as sum_rows
+ FROM generate_series(1, 10) i;
+SELECT * FROM v_window;
+ i | sum_rows
+----+----------
+ 1 | 3
+ 2 | 6
+ 3 | 9
+ 4 | 12
+ 5 | 15
+ 6 | 18
+ 7 | 21
+ 8 | 24
+ 9 | 27
+ 10 | 19
+(10 rows)
+
+SELECT pg_get_viewdef('v_window');
+ pg_get_viewdef
+---------------------------------------------------------------------------------------
+ SELECT i.i, +
+ sum(i.i) OVER (ORDER BY i.i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS sum_rows+
+ FROM generate_series(1, 10) i(i);
+(1 row)
+
+CREATE OR REPLACE TEMP VIEW v_window AS
+ SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following
+ exclude current row) as sum_rows FROM generate_series(1, 10) i;
+SELECT * FROM v_window;
+ i | sum_rows
+----+----------
+ 1 | 2
+ 2 | 4
+ 3 | 6
+ 4 | 8
+ 5 | 10
+ 6 | 12
+ 7 | 14
+ 8 | 16
+ 9 | 18
+ 10 | 9
+(10 rows)
+
+SELECT pg_get_viewdef('v_window');
+ pg_get_viewdef
+-----------------------------------------------------------------------------------------------------------
+ SELECT i.i, +
+ sum(i.i) OVER (ORDER BY i.i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE CURRENT ROW) AS sum_rows+
+ FROM generate_series(1, 10) i(i);
+(1 row)
+
+CREATE OR REPLACE TEMP VIEW v_window AS
+ SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following
+ exclude group) as sum_rows FROM generate_series(1, 10) i;
+SELECT * FROM v_window;
+ i | sum_rows
+----+----------
+ 1 | 2
+ 2 | 4
+ 3 | 6
+ 4 | 8
+ 5 | 10
+ 6 | 12
+ 7 | 14
+ 8 | 16
+ 9 | 18
+ 10 | 9
+(10 rows)
+
+SELECT pg_get_viewdef('v_window');
+ pg_get_viewdef
+-----------------------------------------------------------------------------------------------------
+ SELECT i.i, +
+ sum(i.i) OVER (ORDER BY i.i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE GROUP) AS sum_rows+
+ FROM generate_series(1, 10) i(i);
+(1 row)
+
+CREATE OR REPLACE TEMP VIEW v_window AS
+ SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following
+ exclude ties) as sum_rows FROM generate_series(1, 10) i;
+SELECT * FROM v_window;
+ i | sum_rows
+----+----------
+ 1 | 3
+ 2 | 6
+ 3 | 9
+ 4 | 12
+ 5 | 15
+ 6 | 18
+ 7 | 21
+ 8 | 24
+ 9 | 27
+ 10 | 19
+(10 rows)
+
+SELECT pg_get_viewdef('v_window');
+ pg_get_viewdef
+----------------------------------------------------------------------------------------------------
+ SELECT i.i, +
+ sum(i.i) OVER (ORDER BY i.i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE TIES) AS sum_rows+
+ FROM generate_series(1, 10) i(i);
+(1 row)
+
+CREATE OR REPLACE TEMP VIEW v_window AS
+ SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following
+ exclude no others) as sum_rows FROM generate_series(1, 10) i;
+SELECT * FROM v_window;
+ i | sum_rows
+----+----------
+ 1 | 3
+ 2 | 6
+ 3 | 9
+ 4 | 12
+ 5 | 15
+ 6 | 18
+ 7 | 21
+ 8 | 24
+ 9 | 27
+ 10 | 19
+(10 rows)
+
+SELECT pg_get_viewdef('v_window');
+ pg_get_viewdef
+---------------------------------------------------------------------------------------
+ SELECT i.i, +
+ sum(i.i) OVER (ORDER BY i.i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS sum_rows+
+ FROM generate_series(1, 10) i(i);
+(1 row)
+
+CREATE OR REPLACE TEMP VIEW v_window AS
+ SELECT i, sum(i) over (order by i groups between 1 preceding and 1 following) as sum_rows FROM generate_series(1, 10) i;
+SELECT * FROM v_window;
+ i | sum_rows
+----+----------
+ 1 | 3
+ 2 | 6
+ 3 | 9
+ 4 | 12
+ 5 | 15
+ 6 | 18
+ 7 | 21
+ 8 | 24
+ 9 | 27
+ 10 | 19
+(10 rows)
+
+SELECT pg_get_viewdef('v_window');
+ pg_get_viewdef
+-----------------------------------------------------------------------------------------
+ SELECT i.i, +
+ sum(i.i) OVER (ORDER BY i.i GROUPS BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS sum_rows+
+ FROM generate_series(1, 10) i(i);
+(1 row)
+
+DROP VIEW v_window;
+CREATE TEMP VIEW v_window AS
+ SELECT i, min(i) over (order by i range between '1 day' preceding and '10 days' following) as min_i
+ FROM generate_series(now(), now()+'100 days'::interval, '1 hour') i;
+SELECT pg_get_viewdef('v_window');
+ pg_get_viewdef
+---------------------------------------------------------------------------------------------------------------------------
+ SELECT i.i, +
+ min(i.i) OVER (ORDER BY i.i RANGE BETWEEN '@ 1 day'::interval PRECEDING AND '@ 10 days'::interval FOLLOWING) AS min_i+
+ FROM generate_series(now(), (now() + '@ 100 days'::interval), '@ 1 hour'::interval) i(i);
+(1 row)
+
+-- RANGE offset PRECEDING/FOLLOWING tests
+SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ | 0 | 0
+ | 8 | 0
+ | 4 | 0
+ 12 | 5 | 1
+ 12 | 9 | 1
+ 12 | 1 | 1
+ 27 | 6 | 2
+ 27 | 2 | 2
+ 23 | 3 | 3
+ 23 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (order by four desc range between 2::int8 preceding and 1::int2 preceding),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ | 3 | 3
+ | 7 | 3
+ 10 | 6 | 2
+ 10 | 2 | 2
+ 18 | 9 | 1
+ 18 | 5 | 1
+ 18 | 1 | 1
+ 23 | 0 | 0
+ 23 | 8 | 0
+ 23 | 4 | 0
+(10 rows)
+
+SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude no others),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ | 0 | 0
+ | 8 | 0
+ | 4 | 0
+ 12 | 5 | 1
+ 12 | 9 | 1
+ 12 | 1 | 1
+ 27 | 6 | 2
+ 27 | 2 | 2
+ 23 | 3 | 3
+ 23 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude current row),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ | 0 | 0
+ | 8 | 0
+ | 4 | 0
+ 12 | 5 | 1
+ 12 | 9 | 1
+ 12 | 1 | 1
+ 27 | 6 | 2
+ 27 | 2 | 2
+ 23 | 3 | 3
+ 23 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude group),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ | 0 | 0
+ | 8 | 0
+ | 4 | 0
+ 12 | 5 | 1
+ 12 | 9 | 1
+ 12 | 1 | 1
+ 27 | 6 | 2
+ 27 | 2 | 2
+ 23 | 3 | 3
+ 23 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude ties),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ | 0 | 0
+ | 8 | 0
+ | 4 | 0
+ 12 | 5 | 1
+ 12 | 9 | 1
+ 12 | 1 | 1
+ 27 | 6 | 2
+ 27 | 2 | 2
+ 23 | 3 | 3
+ 23 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (order by four range between 2::int8 preceding and 6::int2 following exclude ties),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 33 | 0 | 0
+ 41 | 8 | 0
+ 37 | 4 | 0
+ 35 | 5 | 1
+ 39 | 9 | 1
+ 31 | 1 | 1
+ 43 | 6 | 2
+ 39 | 2 | 2
+ 26 | 3 | 3
+ 30 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (order by four range between 2::int8 preceding and 6::int2 following exclude group),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 33 | 0 | 0
+ 33 | 8 | 0
+ 33 | 4 | 0
+ 30 | 5 | 1
+ 30 | 9 | 1
+ 30 | 1 | 1
+ 37 | 6 | 2
+ 37 | 2 | 2
+ 23 | 3 | 3
+ 23 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (partition by four order by unique1 range between 5::int8 preceding and 6::int2 following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 4 | 0 | 0
+ 12 | 4 | 0
+ 12 | 8 | 0
+ 6 | 1 | 1
+ 15 | 5 | 1
+ 14 | 9 | 1
+ 8 | 2 | 2
+ 8 | 6 | 2
+ 10 | 3 | 3
+ 10 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (partition by four order by unique1 range between 5::int8 preceding and 6::int2 following
+ exclude current row),unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 4 | 0 | 0
+ 8 | 4 | 0
+ 4 | 8 | 0
+ 5 | 1 | 1
+ 10 | 5 | 1
+ 5 | 9 | 1
+ 6 | 2 | 2
+ 2 | 6 | 2
+ 7 | 3 | 3
+ 3 | 7 | 3
+(10 rows)
+
+select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following),
+ salary, enroll_date from empsalary;
+ sum | salary | enroll_date
+-------+--------+-------------
+ 34900 | 5000 | 10-01-2006
+ 34900 | 6000 | 10-01-2006
+ 38400 | 3900 | 12-23-2006
+ 47100 | 4800 | 08-01-2007
+ 47100 | 5200 | 08-01-2007
+ 47100 | 4800 | 08-08-2007
+ 47100 | 5200 | 08-15-2007
+ 36100 | 3500 | 12-10-2007
+ 32200 | 4500 | 01-01-2008
+ 32200 | 4200 | 01-01-2008
+(10 rows)
+
+select sum(salary) over (order by enroll_date desc range between '1 year'::interval preceding and '1 year'::interval following),
+ salary, enroll_date from empsalary;
+ sum | salary | enroll_date
+-------+--------+-------------
+ 32200 | 4200 | 01-01-2008
+ 32200 | 4500 | 01-01-2008
+ 36100 | 3500 | 12-10-2007
+ 47100 | 5200 | 08-15-2007
+ 47100 | 4800 | 08-08-2007
+ 47100 | 4800 | 08-01-2007
+ 47100 | 5200 | 08-01-2007
+ 38400 | 3900 | 12-23-2006
+ 34900 | 5000 | 10-01-2006
+ 34900 | 6000 | 10-01-2006
+(10 rows)
+
+select sum(salary) over (order by enroll_date desc range between '1 year'::interval following and '1 year'::interval following),
+ salary, enroll_date from empsalary;
+ sum | salary | enroll_date
+-----+--------+-------------
+ | 4200 | 01-01-2008
+ | 4500 | 01-01-2008
+ | 3500 | 12-10-2007
+ | 5200 | 08-15-2007
+ | 4800 | 08-08-2007
+ | 4800 | 08-01-2007
+ | 5200 | 08-01-2007
+ | 3900 | 12-23-2006
+ | 5000 | 10-01-2006
+ | 6000 | 10-01-2006
+(10 rows)
+
+select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following
+ exclude current row), salary, enroll_date from empsalary;
+ sum | salary | enroll_date
+-------+--------+-------------
+ 29900 | 5000 | 10-01-2006
+ 28900 | 6000 | 10-01-2006
+ 34500 | 3900 | 12-23-2006
+ 42300 | 4800 | 08-01-2007
+ 41900 | 5200 | 08-01-2007
+ 42300 | 4800 | 08-08-2007
+ 41900 | 5200 | 08-15-2007
+ 32600 | 3500 | 12-10-2007
+ 27700 | 4500 | 01-01-2008
+ 28000 | 4200 | 01-01-2008
+(10 rows)
+
+select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following
+ exclude group), salary, enroll_date from empsalary;
+ sum | salary | enroll_date
+-------+--------+-------------
+ 23900 | 5000 | 10-01-2006
+ 23900 | 6000 | 10-01-2006
+ 34500 | 3900 | 12-23-2006
+ 37100 | 4800 | 08-01-2007
+ 37100 | 5200 | 08-01-2007
+ 42300 | 4800 | 08-08-2007
+ 41900 | 5200 | 08-15-2007
+ 32600 | 3500 | 12-10-2007
+ 23500 | 4500 | 01-01-2008
+ 23500 | 4200 | 01-01-2008
+(10 rows)
+
+select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following
+ exclude ties), salary, enroll_date from empsalary;
+ sum | salary | enroll_date
+-------+--------+-------------
+ 28900 | 5000 | 10-01-2006
+ 29900 | 6000 | 10-01-2006
+ 38400 | 3900 | 12-23-2006
+ 41900 | 4800 | 08-01-2007
+ 42300 | 5200 | 08-01-2007
+ 47100 | 4800 | 08-08-2007
+ 47100 | 5200 | 08-15-2007
+ 36100 | 3500 | 12-10-2007
+ 28000 | 4500 | 01-01-2008
+ 27700 | 4200 | 01-01-2008
+(10 rows)
+
+select first_value(salary) over(order by salary range between 1000 preceding and 1000 following),
+ lead(salary) over(order by salary range between 1000 preceding and 1000 following),
+ nth_value(salary, 1) over(order by salary range between 1000 preceding and 1000 following),
+ salary from empsalary;
+ first_value | lead | nth_value | salary
+-------------+------+-----------+--------
+ 3500 | 3900 | 3500 | 3500
+ 3500 | 4200 | 3500 | 3900
+ 3500 | 4500 | 3500 | 4200
+ 3500 | 4800 | 3500 | 4500
+ 3900 | 4800 | 3900 | 4800
+ 3900 | 5000 | 3900 | 4800
+ 4200 | 5200 | 4200 | 5000
+ 4200 | 5200 | 4200 | 5200
+ 4200 | 6000 | 4200 | 5200
+ 5000 | | 5000 | 6000
+(10 rows)
+
+select last_value(salary) over(order by salary range between 1000 preceding and 1000 following),
+ lag(salary) over(order by salary range between 1000 preceding and 1000 following),
+ salary from empsalary;
+ last_value | lag | salary
+------------+------+--------
+ 4500 | | 3500
+ 4800 | 3500 | 3900
+ 5200 | 3900 | 4200
+ 5200 | 4200 | 4500
+ 5200 | 4500 | 4800
+ 5200 | 4800 | 4800
+ 6000 | 4800 | 5000
+ 6000 | 5000 | 5200
+ 6000 | 5200 | 5200
+ 6000 | 5200 | 6000
+(10 rows)
+
+select first_value(salary) over(order by salary range between 1000 following and 3000 following
+ exclude current row),
+ lead(salary) over(order by salary range between 1000 following and 3000 following exclude ties),
+ nth_value(salary, 1) over(order by salary range between 1000 following and 3000 following
+ exclude ties),
+ salary from empsalary;
+ first_value | lead | nth_value | salary
+-------------+------+-----------+--------
+ 4500 | 3900 | 4500 | 3500
+ 5000 | 4200 | 5000 | 3900
+ 5200 | 4500 | 5200 | 4200
+ 6000 | 4800 | 6000 | 4500
+ 6000 | 4800 | 6000 | 4800
+ 6000 | 5000 | 6000 | 4800
+ 6000 | 5200 | 6000 | 5000
+ | 5200 | | 5200
+ | 6000 | | 5200
+ | | | 6000
+(10 rows)
+
+select last_value(salary) over(order by salary range between 1000 following and 3000 following
+ exclude group),
+ lag(salary) over(order by salary range between 1000 following and 3000 following exclude group),
+ salary from empsalary;
+ last_value | lag | salary
+------------+------+--------
+ 6000 | | 3500
+ 6000 | 3500 | 3900
+ 6000 | 3900 | 4200
+ 6000 | 4200 | 4500
+ 6000 | 4500 | 4800
+ 6000 | 4800 | 4800
+ 6000 | 4800 | 5000
+ | 5000 | 5200
+ | 5200 | 5200
+ | 5200 | 6000
+(10 rows)
+
+select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following
+ exclude ties),
+ last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following),
+ salary, enroll_date from empsalary;
+ first_value | last_value | salary | enroll_date
+-------------+------------+--------+-------------
+ 5000 | 5200 | 5000 | 10-01-2006
+ 6000 | 5200 | 6000 | 10-01-2006
+ 5000 | 3500 | 3900 | 12-23-2006
+ 5000 | 4200 | 4800 | 08-01-2007
+ 5000 | 4200 | 5200 | 08-01-2007
+ 5000 | 4200 | 4800 | 08-08-2007
+ 5000 | 4200 | 5200 | 08-15-2007
+ 5000 | 4200 | 3500 | 12-10-2007
+ 5000 | 4200 | 4500 | 01-01-2008
+ 5000 | 4200 | 4200 | 01-01-2008
+(10 rows)
+
+select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following
+ exclude ties),
+ last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following
+ exclude ties),
+ salary, enroll_date from empsalary;
+ first_value | last_value | salary | enroll_date
+-------------+------------+--------+-------------
+ 5000 | 5200 | 5000 | 10-01-2006
+ 6000 | 5200 | 6000 | 10-01-2006
+ 5000 | 3500 | 3900 | 12-23-2006
+ 5000 | 4200 | 4800 | 08-01-2007
+ 5000 | 4200 | 5200 | 08-01-2007
+ 5000 | 4200 | 4800 | 08-08-2007
+ 5000 | 4200 | 5200 | 08-15-2007
+ 5000 | 4200 | 3500 | 12-10-2007
+ 5000 | 4500 | 4500 | 01-01-2008
+ 5000 | 4200 | 4200 | 01-01-2008
+(10 rows)
+
+select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following
+ exclude group),
+ last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following
+ exclude group),
+ salary, enroll_date from empsalary;
+ first_value | last_value | salary | enroll_date
+-------------+------------+--------+-------------
+ 3900 | 5200 | 5000 | 10-01-2006
+ 3900 | 5200 | 6000 | 10-01-2006
+ 5000 | 3500 | 3900 | 12-23-2006
+ 5000 | 4200 | 4800 | 08-01-2007
+ 5000 | 4200 | 5200 | 08-01-2007
+ 5000 | 4200 | 4800 | 08-08-2007
+ 5000 | 4200 | 5200 | 08-15-2007
+ 5000 | 4200 | 3500 | 12-10-2007
+ 5000 | 3500 | 4500 | 01-01-2008
+ 5000 | 3500 | 4200 | 01-01-2008
+(10 rows)
+
+select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following
+ exclude current row),
+ last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following
+ exclude current row),
+ salary, enroll_date from empsalary;
+ first_value | last_value | salary | enroll_date
+-------------+------------+--------+-------------
+ 6000 | 5200 | 5000 | 10-01-2006
+ 5000 | 5200 | 6000 | 10-01-2006
+ 5000 | 3500 | 3900 | 12-23-2006
+ 5000 | 4200 | 4800 | 08-01-2007
+ 5000 | 4200 | 5200 | 08-01-2007
+ 5000 | 4200 | 4800 | 08-08-2007
+ 5000 | 4200 | 5200 | 08-15-2007
+ 5000 | 4200 | 3500 | 12-10-2007
+ 5000 | 4200 | 4500 | 01-01-2008
+ 5000 | 4500 | 4200 | 01-01-2008
+(10 rows)
+
+-- RANGE offset PRECEDING/FOLLOWING with null values
+select x, y,
+ first_value(y) over w,
+ last_value(y) over w
+from
+ (select x, x as y from generate_series(1,5) as x
+ union all select null, 42
+ union all select null, 43) ss
+window w as
+ (order by x asc nulls first range between 2 preceding and 2 following);
+ x | y | first_value | last_value
+---+----+-------------+------------
+ | 42 | 42 | 43
+ | 43 | 42 | 43
+ 1 | 1 | 1 | 3
+ 2 | 2 | 1 | 4
+ 3 | 3 | 1 | 5
+ 4 | 4 | 2 | 5
+ 5 | 5 | 3 | 5
+(7 rows)
+
+select x, y,
+ first_value(y) over w,
+ last_value(y) over w
+from
+ (select x, x as y from generate_series(1,5) as x
+ union all select null, 42
+ union all select null, 43) ss
+window w as
+ (order by x asc nulls last range between 2 preceding and 2 following);
+ x | y | first_value | last_value
+---+----+-------------+------------
+ 1 | 1 | 1 | 3
+ 2 | 2 | 1 | 4
+ 3 | 3 | 1 | 5
+ 4 | 4 | 2 | 5
+ 5 | 5 | 3 | 5
+ | 42 | 42 | 43
+ | 43 | 42 | 43
+(7 rows)
+
+select x, y,
+ first_value(y) over w,
+ last_value(y) over w
+from
+ (select x, x as y from generate_series(1,5) as x
+ union all select null, 42
+ union all select null, 43) ss
+window w as
+ (order by x desc nulls first range between 2 preceding and 2 following);
+ x | y | first_value | last_value
+---+----+-------------+------------
+ | 43 | 43 | 42
+ | 42 | 43 | 42
+ 5 | 5 | 5 | 3
+ 4 | 4 | 5 | 2
+ 3 | 3 | 5 | 1
+ 2 | 2 | 4 | 1
+ 1 | 1 | 3 | 1
+(7 rows)
+
+select x, y,
+ first_value(y) over w,
+ last_value(y) over w
+from
+ (select x, x as y from generate_series(1,5) as x
+ union all select null, 42
+ union all select null, 43) ss
+window w as
+ (order by x desc nulls last range between 2 preceding and 2 following);
+ x | y | first_value | last_value
+---+----+-------------+------------
+ 5 | 5 | 5 | 3
+ 4 | 4 | 5 | 2
+ 3 | 3 | 5 | 1
+ 2 | 2 | 4 | 1
+ 1 | 1 | 3 | 1
+ | 42 | 42 | 43
+ | 43 | 42 | 43
+(7 rows)
+
+-- Check overflow behavior for various integer sizes
+select x, last_value(x) over (order by x::smallint range between current row and 2147450884 following)
+from generate_series(32764, 32766) x;
+ x | last_value
+-------+------------
+ 32764 | 32766
+ 32765 | 32766
+ 32766 | 32766
+(3 rows)
+
+select x, last_value(x) over (order by x::smallint desc range between current row and 2147450885 following)
+from generate_series(-32766, -32764) x;
+ x | last_value
+--------+------------
+ -32764 | -32766
+ -32765 | -32766
+ -32766 | -32766
+(3 rows)
+
+select x, last_value(x) over (order by x range between current row and 4 following)
+from generate_series(2147483644, 2147483646) x;
+ x | last_value
+------------+------------
+ 2147483644 | 2147483646
+ 2147483645 | 2147483646
+ 2147483646 | 2147483646
+(3 rows)
+
+select x, last_value(x) over (order by x desc range between current row and 5 following)
+from generate_series(-2147483646, -2147483644) x;
+ x | last_value
+-------------+-------------
+ -2147483644 | -2147483646
+ -2147483645 | -2147483646
+ -2147483646 | -2147483646
+(3 rows)
+
+select x, last_value(x) over (order by x range between current row and 4 following)
+from generate_series(9223372036854775804, 9223372036854775806) x;
+ x | last_value
+---------------------+---------------------
+ 9223372036854775804 | 9223372036854775806
+ 9223372036854775805 | 9223372036854775806
+ 9223372036854775806 | 9223372036854775806
+(3 rows)
+
+select x, last_value(x) over (order by x desc range between current row and 5 following)
+from generate_series(-9223372036854775806, -9223372036854775804) x;
+ x | last_value
+----------------------+----------------------
+ -9223372036854775804 | -9223372036854775806
+ -9223372036854775805 | -9223372036854775806
+ -9223372036854775806 | -9223372036854775806
+(3 rows)
+
+-- Test in_range for other numeric datatypes
+create temp table numerics(
+ id int,
+ f_float4 float4,
+ f_float8 float8,
+ f_numeric numeric
+);
+insert into numerics values
+(0, '-infinity', '-infinity', '-infinity'),
+(1, -3, -3, -3),
+(2, -1, -1, -1),
+(3, 0, 0, 0),
+(4, 1.1, 1.1, 1.1),
+(5, 1.12, 1.12, 1.12),
+(6, 2, 2, 2),
+(7, 100, 100, 100),
+(8, 'infinity', 'infinity', 'infinity'),
+(9, 'NaN', 'NaN', 'NaN');
+select id, f_float4, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float4 range between
+ 1 preceding and 1 following);
+ id | f_float4 | first_value | last_value
+----+-----------+-------------+------------
+ 0 | -Infinity | 0 | 0
+ 1 | -3 | 1 | 1
+ 2 | -1 | 2 | 3
+ 3 | 0 | 2 | 3
+ 4 | 1.1 | 4 | 6
+ 5 | 1.12 | 4 | 6
+ 6 | 2 | 4 | 6
+ 7 | 100 | 7 | 7
+ 8 | Infinity | 8 | 8
+ 9 | NaN | 9 | 9
+(10 rows)
+
+select id, f_float4, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float4 range between
+ 1 preceding and 1.1::float4 following);
+ id | f_float4 | first_value | last_value
+----+-----------+-------------+------------
+ 0 | -Infinity | 0 | 0
+ 1 | -3 | 1 | 1
+ 2 | -1 | 2 | 3
+ 3 | 0 | 2 | 4
+ 4 | 1.1 | 4 | 6
+ 5 | 1.12 | 4 | 6
+ 6 | 2 | 4 | 6
+ 7 | 100 | 7 | 7
+ 8 | Infinity | 8 | 8
+ 9 | NaN | 9 | 9
+(10 rows)
+
+select id, f_float4, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float4 range between
+ 'inf' preceding and 'inf' following);
+ id | f_float4 | first_value | last_value
+----+-----------+-------------+------------
+ 0 | -Infinity | 0 | 8
+ 1 | -3 | 0 | 8
+ 2 | -1 | 0 | 8
+ 3 | 0 | 0 | 8
+ 4 | 1.1 | 0 | 8
+ 5 | 1.12 | 0 | 8
+ 6 | 2 | 0 | 8
+ 7 | 100 | 0 | 8
+ 8 | Infinity | 0 | 8
+ 9 | NaN | 9 | 9
+(10 rows)
+
+select id, f_float4, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float4 range between
+ 'inf' preceding and 'inf' preceding);
+ id | f_float4 | first_value | last_value
+----+-----------+-------------+------------
+ 0 | -Infinity | 0 | 0
+ 1 | -3 | 0 | 0
+ 2 | -1 | 0 | 0
+ 3 | 0 | 0 | 0
+ 4 | 1.1 | 0 | 0
+ 5 | 1.12 | 0 | 0
+ 6 | 2 | 0 | 0
+ 7 | 100 | 0 | 0
+ 8 | Infinity | 0 | 8
+ 9 | NaN | 9 | 9
+(10 rows)
+
+select id, f_float4, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float4 range between
+ 'inf' following and 'inf' following);
+ id | f_float4 | first_value | last_value
+----+-----------+-------------+------------
+ 0 | -Infinity | 0 | 8
+ 1 | -3 | 8 | 8
+ 2 | -1 | 8 | 8
+ 3 | 0 | 8 | 8
+ 4 | 1.1 | 8 | 8
+ 5 | 1.12 | 8 | 8
+ 6 | 2 | 8 | 8
+ 7 | 100 | 8 | 8
+ 8 | Infinity | 8 | 8
+ 9 | NaN | 9 | 9
+(10 rows)
+
+select id, f_float4, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float4 range between
+ 1.1 preceding and 'NaN' following); -- error, NaN disallowed
+ERROR: invalid preceding or following size in window function
+select id, f_float8, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float8 range between
+ 1 preceding and 1 following);
+ id | f_float8 | first_value | last_value
+----+-----------+-------------+------------
+ 0 | -Infinity | 0 | 0
+ 1 | -3 | 1 | 1
+ 2 | -1 | 2 | 3
+ 3 | 0 | 2 | 3
+ 4 | 1.1 | 4 | 6
+ 5 | 1.12 | 4 | 6
+ 6 | 2 | 4 | 6
+ 7 | 100 | 7 | 7
+ 8 | Infinity | 8 | 8
+ 9 | NaN | 9 | 9
+(10 rows)
+
+select id, f_float8, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float8 range between
+ 1 preceding and 1.1::float8 following);
+ id | f_float8 | first_value | last_value
+----+-----------+-------------+------------
+ 0 | -Infinity | 0 | 0
+ 1 | -3 | 1 | 1
+ 2 | -1 | 2 | 3
+ 3 | 0 | 2 | 4
+ 4 | 1.1 | 4 | 6
+ 5 | 1.12 | 4 | 6
+ 6 | 2 | 4 | 6
+ 7 | 100 | 7 | 7
+ 8 | Infinity | 8 | 8
+ 9 | NaN | 9 | 9
+(10 rows)
+
+select id, f_float8, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float8 range between
+ 'inf' preceding and 'inf' following);
+ id | f_float8 | first_value | last_value
+----+-----------+-------------+------------
+ 0 | -Infinity | 0 | 8
+ 1 | -3 | 0 | 8
+ 2 | -1 | 0 | 8
+ 3 | 0 | 0 | 8
+ 4 | 1.1 | 0 | 8
+ 5 | 1.12 | 0 | 8
+ 6 | 2 | 0 | 8
+ 7 | 100 | 0 | 8
+ 8 | Infinity | 0 | 8
+ 9 | NaN | 9 | 9
+(10 rows)
+
+select id, f_float8, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float8 range between
+ 'inf' preceding and 'inf' preceding);
+ id | f_float8 | first_value | last_value
+----+-----------+-------------+------------
+ 0 | -Infinity | 0 | 0
+ 1 | -3 | 0 | 0
+ 2 | -1 | 0 | 0
+ 3 | 0 | 0 | 0
+ 4 | 1.1 | 0 | 0
+ 5 | 1.12 | 0 | 0
+ 6 | 2 | 0 | 0
+ 7 | 100 | 0 | 0
+ 8 | Infinity | 0 | 8
+ 9 | NaN | 9 | 9
+(10 rows)
+
+select id, f_float8, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float8 range between
+ 'inf' following and 'inf' following);
+ id | f_float8 | first_value | last_value
+----+-----------+-------------+------------
+ 0 | -Infinity | 0 | 8
+ 1 | -3 | 8 | 8
+ 2 | -1 | 8 | 8
+ 3 | 0 | 8 | 8
+ 4 | 1.1 | 8 | 8
+ 5 | 1.12 | 8 | 8
+ 6 | 2 | 8 | 8
+ 7 | 100 | 8 | 8
+ 8 | Infinity | 8 | 8
+ 9 | NaN | 9 | 9
+(10 rows)
+
+select id, f_float8, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float8 range between
+ 1.1 preceding and 'NaN' following); -- error, NaN disallowed
+ERROR: invalid preceding or following size in window function
+select id, f_numeric, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_numeric range between
+ 1 preceding and 1 following);
+ id | f_numeric | first_value | last_value
+----+-----------+-------------+------------
+ 0 | -Infinity | 0 | 0
+ 1 | -3 | 1 | 1
+ 2 | -1 | 2 | 3
+ 3 | 0 | 2 | 3
+ 4 | 1.1 | 4 | 6
+ 5 | 1.12 | 4 | 6
+ 6 | 2 | 4 | 6
+ 7 | 100 | 7 | 7
+ 8 | Infinity | 8 | 8
+ 9 | NaN | 9 | 9
+(10 rows)
+
+select id, f_numeric, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_numeric range between
+ 1 preceding and 1.1::numeric following);
+ id | f_numeric | first_value | last_value
+----+-----------+-------------+------------
+ 0 | -Infinity | 0 | 0
+ 1 | -3 | 1 | 1
+ 2 | -1 | 2 | 3
+ 3 | 0 | 2 | 4
+ 4 | 1.1 | 4 | 6
+ 5 | 1.12 | 4 | 6
+ 6 | 2 | 4 | 6
+ 7 | 100 | 7 | 7
+ 8 | Infinity | 8 | 8
+ 9 | NaN | 9 | 9
+(10 rows)
+
+select id, f_numeric, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_numeric range between
+ 1 preceding and 1.1::float8 following); -- currently unsupported
+ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type numeric and offset type double precision
+LINE 4: 1 preceding and 1.1::float8 following);
+ ^
+HINT: Cast the offset value to an appropriate type.
+select id, f_numeric, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_numeric range between
+ 'inf' preceding and 'inf' following);
+ id | f_numeric | first_value | last_value
+----+-----------+-------------+------------
+ 0 | -Infinity | 0 | 8
+ 1 | -3 | 0 | 8
+ 2 | -1 | 0 | 8
+ 3 | 0 | 0 | 8
+ 4 | 1.1 | 0 | 8
+ 5 | 1.12 | 0 | 8
+ 6 | 2 | 0 | 8
+ 7 | 100 | 0 | 8
+ 8 | Infinity | 0 | 8
+ 9 | NaN | 9 | 9
+(10 rows)
+
+select id, f_numeric, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_numeric range between
+ 'inf' preceding and 'inf' preceding);
+ id | f_numeric | first_value | last_value
+----+-----------+-------------+------------
+ 0 | -Infinity | 0 | 0
+ 1 | -3 | 0 | 0
+ 2 | -1 | 0 | 0
+ 3 | 0 | 0 | 0
+ 4 | 1.1 | 0 | 0
+ 5 | 1.12 | 0 | 0
+ 6 | 2 | 0 | 0
+ 7 | 100 | 0 | 0
+ 8 | Infinity | 0 | 8
+ 9 | NaN | 9 | 9
+(10 rows)
+
+select id, f_numeric, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_numeric range between
+ 'inf' following and 'inf' following);
+ id | f_numeric | first_value | last_value
+----+-----------+-------------+------------
+ 0 | -Infinity | 0 | 8
+ 1 | -3 | 8 | 8
+ 2 | -1 | 8 | 8
+ 3 | 0 | 8 | 8
+ 4 | 1.1 | 8 | 8
+ 5 | 1.12 | 8 | 8
+ 6 | 2 | 8 | 8
+ 7 | 100 | 8 | 8
+ 8 | Infinity | 8 | 8
+ 9 | NaN | 9 | 9
+(10 rows)
+
+select id, f_numeric, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_numeric range between
+ 1.1 preceding and 'NaN' following); -- error, NaN disallowed
+ERROR: invalid preceding or following size in window function
+-- Test in_range for other datetime datatypes
+create temp table datetimes(
+ id int,
+ f_time time,
+ f_timetz timetz,
+ f_interval interval,
+ f_timestamptz timestamptz,
+ f_timestamp timestamp
+);
+insert into datetimes values
+(1, '11:00', '11:00 BST', '1 year', '2000-10-19 10:23:54+01', '2000-10-19 10:23:54'),
+(2, '12:00', '12:00 BST', '2 years', '2001-10-19 10:23:54+01', '2001-10-19 10:23:54'),
+(3, '13:00', '13:00 BST', '3 years', '2001-10-19 10:23:54+01', '2001-10-19 10:23:54'),
+(4, '14:00', '14:00 BST', '4 years', '2002-10-19 10:23:54+01', '2002-10-19 10:23:54'),
+(5, '15:00', '15:00 BST', '5 years', '2003-10-19 10:23:54+01', '2003-10-19 10:23:54'),
+(6, '15:00', '15:00 BST', '5 years', '2004-10-19 10:23:54+01', '2004-10-19 10:23:54'),
+(7, '17:00', '17:00 BST', '7 years', '2005-10-19 10:23:54+01', '2005-10-19 10:23:54'),
+(8, '18:00', '18:00 BST', '8 years', '2006-10-19 10:23:54+01', '2006-10-19 10:23:54'),
+(9, '19:00', '19:00 BST', '9 years', '2007-10-19 10:23:54+01', '2007-10-19 10:23:54'),
+(10, '20:00', '20:00 BST', '10 years', '2008-10-19 10:23:54+01', '2008-10-19 10:23:54');
+select id, f_time, first_value(id) over w, last_value(id) over w
+from datetimes
+window w as (order by f_time range between
+ '70 min'::interval preceding and '2 hours'::interval following);
+ id | f_time | first_value | last_value
+----+----------+-------------+------------
+ 1 | 11:00:00 | 1 | 3
+ 2 | 12:00:00 | 1 | 4
+ 3 | 13:00:00 | 2 | 6
+ 4 | 14:00:00 | 3 | 6
+ 5 | 15:00:00 | 4 | 7
+ 6 | 15:00:00 | 4 | 7
+ 7 | 17:00:00 | 7 | 9
+ 8 | 18:00:00 | 7 | 10
+ 9 | 19:00:00 | 8 | 10
+ 10 | 20:00:00 | 9 | 10
+(10 rows)
+
+select id, f_time, first_value(id) over w, last_value(id) over w
+from datetimes
+window w as (order by f_time desc range between
+ '70 min' preceding and '2 hours' following);
+ id | f_time | first_value | last_value
+----+----------+-------------+------------
+ 10 | 20:00:00 | 10 | 8
+ 9 | 19:00:00 | 10 | 7
+ 8 | 18:00:00 | 9 | 7
+ 7 | 17:00:00 | 8 | 5
+ 6 | 15:00:00 | 6 | 3
+ 5 | 15:00:00 | 6 | 3
+ 4 | 14:00:00 | 6 | 2
+ 3 | 13:00:00 | 4 | 1
+ 2 | 12:00:00 | 3 | 1
+ 1 | 11:00:00 | 2 | 1
+(10 rows)
+
+select id, f_timetz, first_value(id) over w, last_value(id) over w
+from datetimes
+window w as (order by f_timetz range between
+ '70 min'::interval preceding and '2 hours'::interval following);
+ id | f_timetz | first_value | last_value
+----+-------------+-------------+------------
+ 1 | 11:00:00+01 | 1 | 3
+ 2 | 12:00:00+01 | 1 | 4
+ 3 | 13:00:00+01 | 2 | 6
+ 4 | 14:00:00+01 | 3 | 6
+ 5 | 15:00:00+01 | 4 | 7
+ 6 | 15:00:00+01 | 4 | 7
+ 7 | 17:00:00+01 | 7 | 9
+ 8 | 18:00:00+01 | 7 | 10
+ 9 | 19:00:00+01 | 8 | 10
+ 10 | 20:00:00+01 | 9 | 10
+(10 rows)
+
+select id, f_timetz, first_value(id) over w, last_value(id) over w
+from datetimes
+window w as (order by f_timetz desc range between
+ '70 min' preceding and '2 hours' following);
+ id | f_timetz | first_value | last_value
+----+-------------+-------------+------------
+ 10 | 20:00:00+01 | 10 | 8
+ 9 | 19:00:00+01 | 10 | 7
+ 8 | 18:00:00+01 | 9 | 7
+ 7 | 17:00:00+01 | 8 | 5
+ 6 | 15:00:00+01 | 6 | 3
+ 5 | 15:00:00+01 | 6 | 3
+ 4 | 14:00:00+01 | 6 | 2
+ 3 | 13:00:00+01 | 4 | 1
+ 2 | 12:00:00+01 | 3 | 1
+ 1 | 11:00:00+01 | 2 | 1
+(10 rows)
+
+select id, f_interval, first_value(id) over w, last_value(id) over w
+from datetimes
+window w as (order by f_interval range between
+ '1 year'::interval preceding and '1 year'::interval following);
+ id | f_interval | first_value | last_value
+----+------------+-------------+------------
+ 1 | @ 1 year | 1 | 2
+ 2 | @ 2 years | 1 | 3
+ 3 | @ 3 years | 2 | 4
+ 4 | @ 4 years | 3 | 6
+ 5 | @ 5 years | 4 | 6
+ 6 | @ 5 years | 4 | 6
+ 7 | @ 7 years | 7 | 8
+ 8 | @ 8 years | 7 | 9
+ 9 | @ 9 years | 8 | 10
+ 10 | @ 10 years | 9 | 10
+(10 rows)
+
+select id, f_interval, first_value(id) over w, last_value(id) over w
+from datetimes
+window w as (order by f_interval desc range between
+ '1 year' preceding and '1 year' following);
+ id | f_interval | first_value | last_value
+----+------------+-------------+------------
+ 10 | @ 10 years | 10 | 9
+ 9 | @ 9 years | 10 | 8
+ 8 | @ 8 years | 9 | 7
+ 7 | @ 7 years | 8 | 7
+ 6 | @ 5 years | 6 | 4
+ 5 | @ 5 years | 6 | 4
+ 4 | @ 4 years | 6 | 3
+ 3 | @ 3 years | 4 | 2
+ 2 | @ 2 years | 3 | 1
+ 1 | @ 1 year | 2 | 1
+(10 rows)
+
+select id, f_timestamptz, first_value(id) over w, last_value(id) over w
+from datetimes
+window w as (order by f_timestamptz range between
+ '1 year'::interval preceding and '1 year'::interval following);
+ id | f_timestamptz | first_value | last_value
+----+------------------------------+-------------+------------
+ 1 | Thu Oct 19 02:23:54 2000 PDT | 1 | 3
+ 2 | Fri Oct 19 02:23:54 2001 PDT | 1 | 4
+ 3 | Fri Oct 19 02:23:54 2001 PDT | 1 | 4
+ 4 | Sat Oct 19 02:23:54 2002 PDT | 2 | 5
+ 5 | Sun Oct 19 02:23:54 2003 PDT | 4 | 6
+ 6 | Tue Oct 19 02:23:54 2004 PDT | 5 | 7
+ 7 | Wed Oct 19 02:23:54 2005 PDT | 6 | 8
+ 8 | Thu Oct 19 02:23:54 2006 PDT | 7 | 9
+ 9 | Fri Oct 19 02:23:54 2007 PDT | 8 | 10
+ 10 | Sun Oct 19 02:23:54 2008 PDT | 9 | 10
+(10 rows)
+
+select id, f_timestamptz, first_value(id) over w, last_value(id) over w
+from datetimes
+window w as (order by f_timestamptz desc range between
+ '1 year' preceding and '1 year' following);
+ id | f_timestamptz | first_value | last_value
+----+------------------------------+-------------+------------
+ 10 | Sun Oct 19 02:23:54 2008 PDT | 10 | 9
+ 9 | Fri Oct 19 02:23:54 2007 PDT | 10 | 8
+ 8 | Thu Oct 19 02:23:54 2006 PDT | 9 | 7
+ 7 | Wed Oct 19 02:23:54 2005 PDT | 8 | 6
+ 6 | Tue Oct 19 02:23:54 2004 PDT | 7 | 5
+ 5 | Sun Oct 19 02:23:54 2003 PDT | 6 | 4
+ 4 | Sat Oct 19 02:23:54 2002 PDT | 5 | 2
+ 3 | Fri Oct 19 02:23:54 2001 PDT | 4 | 1
+ 2 | Fri Oct 19 02:23:54 2001 PDT | 4 | 1
+ 1 | Thu Oct 19 02:23:54 2000 PDT | 3 | 1
+(10 rows)
+
+select id, f_timestamp, first_value(id) over w, last_value(id) over w
+from datetimes
+window w as (order by f_timestamp range between
+ '1 year'::interval preceding and '1 year'::interval following);
+ id | f_timestamp | first_value | last_value
+----+--------------------------+-------------+------------
+ 1 | Thu Oct 19 10:23:54 2000 | 1 | 3
+ 2 | Fri Oct 19 10:23:54 2001 | 1 | 4
+ 3 | Fri Oct 19 10:23:54 2001 | 1 | 4
+ 4 | Sat Oct 19 10:23:54 2002 | 2 | 5
+ 5 | Sun Oct 19 10:23:54 2003 | 4 | 6
+ 6 | Tue Oct 19 10:23:54 2004 | 5 | 7
+ 7 | Wed Oct 19 10:23:54 2005 | 6 | 8
+ 8 | Thu Oct 19 10:23:54 2006 | 7 | 9
+ 9 | Fri Oct 19 10:23:54 2007 | 8 | 10
+ 10 | Sun Oct 19 10:23:54 2008 | 9 | 10
+(10 rows)
+
+select id, f_timestamp, first_value(id) over w, last_value(id) over w
+from datetimes
+window w as (order by f_timestamp desc range between
+ '1 year' preceding and '1 year' following);
+ id | f_timestamp | first_value | last_value
+----+--------------------------+-------------+------------
+ 10 | Sun Oct 19 10:23:54 2008 | 10 | 9
+ 9 | Fri Oct 19 10:23:54 2007 | 10 | 8
+ 8 | Thu Oct 19 10:23:54 2006 | 9 | 7
+ 7 | Wed Oct 19 10:23:54 2005 | 8 | 6
+ 6 | Tue Oct 19 10:23:54 2004 | 7 | 5
+ 5 | Sun Oct 19 10:23:54 2003 | 6 | 4
+ 4 | Sat Oct 19 10:23:54 2002 | 5 | 2
+ 3 | Fri Oct 19 10:23:54 2001 | 4 | 1
+ 2 | Fri Oct 19 10:23:54 2001 | 4 | 1
+ 1 | Thu Oct 19 10:23:54 2000 | 3 | 1
+(10 rows)
+
+-- RANGE offset PRECEDING/FOLLOWING error cases
+select sum(salary) over (order by enroll_date, salary range between '1 year'::interval preceding and '2 years'::interval following
+ exclude ties), salary, enroll_date from empsalary;
+ERROR: RANGE with offset PRECEDING/FOLLOWING requires exactly one ORDER BY column
+LINE 1: select sum(salary) over (order by enroll_date, salary range ...
+ ^
+select sum(salary) over (range between '1 year'::interval preceding and '2 years'::interval following
+ exclude ties), salary, enroll_date from empsalary;
+ERROR: RANGE with offset PRECEDING/FOLLOWING requires exactly one ORDER BY column
+LINE 1: select sum(salary) over (range between '1 year'::interval pr...
+ ^
+select sum(salary) over (order by depname range between '1 year'::interval preceding and '2 years'::interval following
+ exclude ties), salary, enroll_date from empsalary;
+ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type text
+LINE 1: ... sum(salary) over (order by depname range between '1 year'::...
+ ^
+select max(enroll_date) over (order by enroll_date range between 1 preceding and 2 following
+ exclude ties), salary, enroll_date from empsalary;
+ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type date and offset type integer
+LINE 1: ...ll_date) over (order by enroll_date range between 1 precedin...
+ ^
+HINT: Cast the offset value to an appropriate type.
+select max(enroll_date) over (order by salary range between -1 preceding and 2 following
+ exclude ties), salary, enroll_date from empsalary;
+ERROR: invalid preceding or following size in window function
+select max(enroll_date) over (order by salary range between 1 preceding and -2 following
+ exclude ties), salary, enroll_date from empsalary;
+ERROR: invalid preceding or following size in window function
+select max(enroll_date) over (order by salary range between '1 year'::interval preceding and '2 years'::interval following
+ exclude ties), salary, enroll_date from empsalary;
+ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type integer and offset type interval
+LINE 1: ...(enroll_date) over (order by salary range between '1 year'::...
+ ^
+HINT: Cast the offset value to an appropriate type.
+select max(enroll_date) over (order by enroll_date range between '1 year'::interval preceding and '-2 years'::interval following
+ exclude ties), salary, enroll_date from empsalary;
+ERROR: invalid preceding or following size in window function
+-- GROUPS tests
+SELECT sum(unique1) over (order by four groups between unbounded preceding and current row),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 12 | 0 | 0
+ 12 | 8 | 0
+ 12 | 4 | 0
+ 27 | 5 | 1
+ 27 | 9 | 1
+ 27 | 1 | 1
+ 35 | 6 | 2
+ 35 | 2 | 2
+ 45 | 3 | 3
+ 45 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (order by four groups between unbounded preceding and unbounded following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 45 | 0 | 0
+ 45 | 8 | 0
+ 45 | 4 | 0
+ 45 | 5 | 1
+ 45 | 9 | 1
+ 45 | 1 | 1
+ 45 | 6 | 2
+ 45 | 2 | 2
+ 45 | 3 | 3
+ 45 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (order by four groups between current row and unbounded following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 45 | 0 | 0
+ 45 | 8 | 0
+ 45 | 4 | 0
+ 33 | 5 | 1
+ 33 | 9 | 1
+ 33 | 1 | 1
+ 18 | 6 | 2
+ 18 | 2 | 2
+ 10 | 3 | 3
+ 10 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (order by four groups between 1 preceding and unbounded following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 45 | 0 | 0
+ 45 | 8 | 0
+ 45 | 4 | 0
+ 45 | 5 | 1
+ 45 | 9 | 1
+ 45 | 1 | 1
+ 33 | 6 | 2
+ 33 | 2 | 2
+ 18 | 3 | 3
+ 18 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (order by four groups between 1 following and unbounded following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 33 | 0 | 0
+ 33 | 8 | 0
+ 33 | 4 | 0
+ 18 | 5 | 1
+ 18 | 9 | 1
+ 18 | 1 | 1
+ 10 | 6 | 2
+ 10 | 2 | 2
+ | 3 | 3
+ | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (order by four groups between unbounded preceding and 2 following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 35 | 0 | 0
+ 35 | 8 | 0
+ 35 | 4 | 0
+ 45 | 5 | 1
+ 45 | 9 | 1
+ 45 | 1 | 1
+ 45 | 6 | 2
+ 45 | 2 | 2
+ 45 | 3 | 3
+ 45 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (order by four groups between 2 preceding and 1 preceding),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ | 0 | 0
+ | 8 | 0
+ | 4 | 0
+ 12 | 5 | 1
+ 12 | 9 | 1
+ 12 | 1 | 1
+ 27 | 6 | 2
+ 27 | 2 | 2
+ 23 | 3 | 3
+ 23 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 27 | 0 | 0
+ 27 | 8 | 0
+ 27 | 4 | 0
+ 35 | 5 | 1
+ 35 | 9 | 1
+ 35 | 1 | 1
+ 45 | 6 | 2
+ 45 | 2 | 2
+ 33 | 3 | 3
+ 33 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (order by four groups between 0 preceding and 0 following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 12 | 0 | 0
+ 12 | 8 | 0
+ 12 | 4 | 0
+ 15 | 5 | 1
+ 15 | 9 | 1
+ 15 | 1 | 1
+ 8 | 6 | 2
+ 8 | 2 | 2
+ 10 | 3 | 3
+ 10 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following
+ exclude current row), unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 27 | 0 | 0
+ 19 | 8 | 0
+ 23 | 4 | 0
+ 30 | 5 | 1
+ 26 | 9 | 1
+ 34 | 1 | 1
+ 39 | 6 | 2
+ 43 | 2 | 2
+ 30 | 3 | 3
+ 26 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following
+ exclude group), unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 15 | 0 | 0
+ 15 | 8 | 0
+ 15 | 4 | 0
+ 20 | 5 | 1
+ 20 | 9 | 1
+ 20 | 1 | 1
+ 37 | 6 | 2
+ 37 | 2 | 2
+ 23 | 3 | 3
+ 23 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following
+ exclude ties), unique1, four
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four
+-----+---------+------
+ 15 | 0 | 0
+ 23 | 8 | 0
+ 19 | 4 | 0
+ 25 | 5 | 1
+ 29 | 9 | 1
+ 21 | 1 | 1
+ 43 | 6 | 2
+ 39 | 2 | 2
+ 26 | 3 | 3
+ 30 | 7 | 3
+(10 rows)
+
+SELECT sum(unique1) over (partition by ten
+ order by four groups between 0 preceding and 0 following),unique1, four, ten
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four | ten
+-----+---------+------+-----
+ 0 | 0 | 0 | 0
+ 1 | 1 | 1 | 1
+ 2 | 2 | 2 | 2
+ 3 | 3 | 3 | 3
+ 4 | 4 | 0 | 4
+ 5 | 5 | 1 | 5
+ 6 | 6 | 2 | 6
+ 7 | 7 | 3 | 7
+ 8 | 8 | 0 | 8
+ 9 | 9 | 1 | 9
+(10 rows)
+
+SELECT sum(unique1) over (partition by ten
+ order by four groups between 0 preceding and 0 following exclude current row), unique1, four, ten
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four | ten
+-----+---------+------+-----
+ | 0 | 0 | 0
+ | 1 | 1 | 1
+ | 2 | 2 | 2
+ | 3 | 3 | 3
+ | 4 | 0 | 4
+ | 5 | 1 | 5
+ | 6 | 2 | 6
+ | 7 | 3 | 7
+ | 8 | 0 | 8
+ | 9 | 1 | 9
+(10 rows)
+
+SELECT sum(unique1) over (partition by ten
+ order by four groups between 0 preceding and 0 following exclude group), unique1, four, ten
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four | ten
+-----+---------+------+-----
+ | 0 | 0 | 0
+ | 1 | 1 | 1
+ | 2 | 2 | 2
+ | 3 | 3 | 3
+ | 4 | 0 | 4
+ | 5 | 1 | 5
+ | 6 | 2 | 6
+ | 7 | 3 | 7
+ | 8 | 0 | 8
+ | 9 | 1 | 9
+(10 rows)
+
+SELECT sum(unique1) over (partition by ten
+ order by four groups between 0 preceding and 0 following exclude ties), unique1, four, ten
+FROM tenk1 WHERE unique1 < 10;
+ sum | unique1 | four | ten
+-----+---------+------+-----
+ 0 | 0 | 0 | 0
+ 1 | 1 | 1 | 1
+ 2 | 2 | 2 | 2
+ 3 | 3 | 3 | 3
+ 4 | 4 | 0 | 4
+ 5 | 5 | 1 | 5
+ 6 | 6 | 2 | 6
+ 7 | 7 | 3 | 7
+ 8 | 8 | 0 | 8
+ 9 | 9 | 1 | 9
+(10 rows)
+
+select first_value(salary) over(order by enroll_date groups between 1 preceding and 1 following),
+ lead(salary) over(order by enroll_date groups between 1 preceding and 1 following),
+ nth_value(salary, 1) over(order by enroll_date groups between 1 preceding and 1 following),
+ salary, enroll_date from empsalary;
+ first_value | lead | nth_value | salary | enroll_date
+-------------+------+-----------+--------+-------------
+ 5000 | 6000 | 5000 | 5000 | 10-01-2006
+ 5000 | 3900 | 5000 | 6000 | 10-01-2006
+ 5000 | 4800 | 5000 | 3900 | 12-23-2006
+ 3900 | 5200 | 3900 | 4800 | 08-01-2007
+ 3900 | 4800 | 3900 | 5200 | 08-01-2007
+ 4800 | 5200 | 4800 | 4800 | 08-08-2007
+ 4800 | 3500 | 4800 | 5200 | 08-15-2007
+ 5200 | 4500 | 5200 | 3500 | 12-10-2007
+ 3500 | 4200 | 3500 | 4500 | 01-01-2008
+ 3500 | | 3500 | 4200 | 01-01-2008
+(10 rows)
+
+select last_value(salary) over(order by enroll_date groups between 1 preceding and 1 following),
+ lag(salary) over(order by enroll_date groups between 1 preceding and 1 following),
+ salary, enroll_date from empsalary;
+ last_value | lag | salary | enroll_date
+------------+------+--------+-------------
+ 3900 | | 5000 | 10-01-2006
+ 3900 | 5000 | 6000 | 10-01-2006
+ 5200 | 6000 | 3900 | 12-23-2006
+ 4800 | 3900 | 4800 | 08-01-2007
+ 4800 | 4800 | 5200 | 08-01-2007
+ 5200 | 5200 | 4800 | 08-08-2007
+ 3500 | 4800 | 5200 | 08-15-2007
+ 4200 | 5200 | 3500 | 12-10-2007
+ 4200 | 3500 | 4500 | 01-01-2008
+ 4200 | 4500 | 4200 | 01-01-2008
+(10 rows)
+
+select first_value(salary) over(order by enroll_date groups between 1 following and 3 following
+ exclude current row),
+ lead(salary) over(order by enroll_date groups between 1 following and 3 following exclude ties),
+ nth_value(salary, 1) over(order by enroll_date groups between 1 following and 3 following
+ exclude ties),
+ salary, enroll_date from empsalary;
+ first_value | lead | nth_value | salary | enroll_date
+-------------+------+-----------+--------+-------------
+ 3900 | 6000 | 3900 | 5000 | 10-01-2006
+ 3900 | 3900 | 3900 | 6000 | 10-01-2006
+ 4800 | 4800 | 4800 | 3900 | 12-23-2006
+ 4800 | 5200 | 4800 | 4800 | 08-01-2007
+ 4800 | 4800 | 4800 | 5200 | 08-01-2007
+ 5200 | 5200 | 5200 | 4800 | 08-08-2007
+ 3500 | 3500 | 3500 | 5200 | 08-15-2007
+ 4500 | 4500 | 4500 | 3500 | 12-10-2007
+ | 4200 | | 4500 | 01-01-2008
+ | | | 4200 | 01-01-2008
+(10 rows)
+
+select last_value(salary) over(order by enroll_date groups between 1 following and 3 following
+ exclude group),
+ lag(salary) over(order by enroll_date groups between 1 following and 3 following exclude group),
+ salary, enroll_date from empsalary;
+ last_value | lag | salary | enroll_date
+------------+------+--------+-------------
+ 4800 | | 5000 | 10-01-2006
+ 4800 | 5000 | 6000 | 10-01-2006
+ 5200 | 6000 | 3900 | 12-23-2006
+ 3500 | 3900 | 4800 | 08-01-2007
+ 3500 | 4800 | 5200 | 08-01-2007
+ 4200 | 5200 | 4800 | 08-08-2007
+ 4200 | 4800 | 5200 | 08-15-2007
+ 4200 | 5200 | 3500 | 12-10-2007
+ | 3500 | 4500 | 01-01-2008
+ | 4500 | 4200 | 01-01-2008
+(10 rows)
+
+-- Show differences in offset interpretation between ROWS, RANGE, and GROUPS
+WITH cte (x) AS (
+ SELECT * FROM generate_series(1, 35, 2)
+)
+SELECT x, (sum(x) over w)
+FROM cte
+WINDOW w AS (ORDER BY x rows between 1 preceding and 1 following);
+ x | sum
+----+-----
+ 1 | 4
+ 3 | 9
+ 5 | 15
+ 7 | 21
+ 9 | 27
+ 11 | 33
+ 13 | 39
+ 15 | 45
+ 17 | 51
+ 19 | 57
+ 21 | 63
+ 23 | 69
+ 25 | 75
+ 27 | 81
+ 29 | 87
+ 31 | 93
+ 33 | 99
+ 35 | 68
+(18 rows)
+
+WITH cte (x) AS (
+ SELECT * FROM generate_series(1, 35, 2)
+)
+SELECT x, (sum(x) over w)
+FROM cte
+WINDOW w AS (ORDER BY x range between 1 preceding and 1 following);
+ x | sum
+----+-----
+ 1 | 1
+ 3 | 3
+ 5 | 5
+ 7 | 7
+ 9 | 9
+ 11 | 11
+ 13 | 13
+ 15 | 15
+ 17 | 17
+ 19 | 19
+ 21 | 21
+ 23 | 23
+ 25 | 25
+ 27 | 27
+ 29 | 29
+ 31 | 31
+ 33 | 33
+ 35 | 35
+(18 rows)
+
+WITH cte (x) AS (
+ SELECT * FROM generate_series(1, 35, 2)
+)
+SELECT x, (sum(x) over w)
+FROM cte
+WINDOW w AS (ORDER BY x groups between 1 preceding and 1 following);
+ x | sum
+----+-----
+ 1 | 4
+ 3 | 9
+ 5 | 15
+ 7 | 21
+ 9 | 27
+ 11 | 33
+ 13 | 39
+ 15 | 45
+ 17 | 51
+ 19 | 57
+ 21 | 63
+ 23 | 69
+ 25 | 75
+ 27 | 81
+ 29 | 87
+ 31 | 93
+ 33 | 99
+ 35 | 68
+(18 rows)
+
+WITH cte (x) AS (
+ select 1 union all select 1 union all select 1 union all
+ SELECT * FROM generate_series(5, 49, 2)
+)
+SELECT x, (sum(x) over w)
+FROM cte
+WINDOW w AS (ORDER BY x rows between 1 preceding and 1 following);
+ x | sum
+----+-----
+ 1 | 2
+ 1 | 3
+ 1 | 7
+ 5 | 13
+ 7 | 21
+ 9 | 27
+ 11 | 33
+ 13 | 39
+ 15 | 45
+ 17 | 51
+ 19 | 57
+ 21 | 63
+ 23 | 69
+ 25 | 75
+ 27 | 81
+ 29 | 87
+ 31 | 93
+ 33 | 99
+ 35 | 105
+ 37 | 111
+ 39 | 117
+ 41 | 123
+ 43 | 129
+ 45 | 135
+ 47 | 141
+ 49 | 96
+(26 rows)
+
+WITH cte (x) AS (
+ select 1 union all select 1 union all select 1 union all
+ SELECT * FROM generate_series(5, 49, 2)
+)
+SELECT x, (sum(x) over w)
+FROM cte
+WINDOW w AS (ORDER BY x range between 1 preceding and 1 following);
+ x | sum
+----+-----
+ 1 | 3
+ 1 | 3
+ 1 | 3
+ 5 | 5
+ 7 | 7
+ 9 | 9
+ 11 | 11
+ 13 | 13
+ 15 | 15
+ 17 | 17
+ 19 | 19
+ 21 | 21
+ 23 | 23
+ 25 | 25
+ 27 | 27
+ 29 | 29
+ 31 | 31
+ 33 | 33
+ 35 | 35
+ 37 | 37
+ 39 | 39
+ 41 | 41
+ 43 | 43
+ 45 | 45
+ 47 | 47
+ 49 | 49
+(26 rows)
+
+WITH cte (x) AS (
+ select 1 union all select 1 union all select 1 union all
+ SELECT * FROM generate_series(5, 49, 2)
+)
+SELECT x, (sum(x) over w)
+FROM cte
+WINDOW w AS (ORDER BY x groups between 1 preceding and 1 following);
+ x | sum
+----+-----
+ 1 | 8
+ 1 | 8
+ 1 | 8
+ 5 | 15
+ 7 | 21
+ 9 | 27
+ 11 | 33
+ 13 | 39
+ 15 | 45
+ 17 | 51
+ 19 | 57
+ 21 | 63
+ 23 | 69
+ 25 | 75
+ 27 | 81
+ 29 | 87
+ 31 | 93
+ 33 | 99
+ 35 | 105
+ 37 | 111
+ 39 | 117
+ 41 | 123
+ 43 | 129
+ 45 | 135
+ 47 | 141
+ 49 | 96
+(26 rows)
+
+-- with UNION
+SELECT count(*) OVER (PARTITION BY four) FROM (SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk2)s LIMIT 0;
+ count
+-------
+(0 rows)
+
+-- check some degenerate cases
+create temp table t1 (f1 int, f2 int8);
+insert into t1 values (1,1),(1,2),(2,2);
+select f1, sum(f1) over (partition by f1
+ range between 1 preceding and 1 following)
+from t1 where f1 = f2; -- error, must have order by
+ERROR: RANGE with offset PRECEDING/FOLLOWING requires exactly one ORDER BY column
+LINE 1: select f1, sum(f1) over (partition by f1
+ ^
+explain (costs off)
+select f1, sum(f1) over (partition by f1 order by f2
+ range between 1 preceding and 1 following)
+from t1 where f1 = f2;
+ QUERY PLAN
+---------------------------------
+ WindowAgg
+ -> Sort
+ Sort Key: f1
+ -> Seq Scan on t1
+ Filter: (f1 = f2)
+(5 rows)
+
+select f1, sum(f1) over (partition by f1 order by f2
+ range between 1 preceding and 1 following)
+from t1 where f1 = f2;
+ f1 | sum
+----+-----
+ 1 | 1
+ 2 | 2
+(2 rows)
+
+select f1, sum(f1) over (partition by f1, f1 order by f2
+ range between 2 preceding and 1 preceding)
+from t1 where f1 = f2;
+ f1 | sum
+----+-----
+ 1 |
+ 2 |
+(2 rows)
+
+select f1, sum(f1) over (partition by f1, f2 order by f2
+ range between 1 following and 2 following)
+from t1 where f1 = f2;
+ f1 | sum
+----+-----
+ 1 |
+ 2 |
+(2 rows)
+
+select f1, sum(f1) over (partition by f1
+ groups between 1 preceding and 1 following)
+from t1 where f1 = f2; -- error, must have order by
+ERROR: GROUPS mode requires an ORDER BY clause
+LINE 1: select f1, sum(f1) over (partition by f1
+ ^
+explain (costs off)
+select f1, sum(f1) over (partition by f1 order by f2
+ groups between 1 preceding and 1 following)
+from t1 where f1 = f2;
+ QUERY PLAN
+---------------------------------
+ WindowAgg
+ -> Sort
+ Sort Key: f1
+ -> Seq Scan on t1
+ Filter: (f1 = f2)
+(5 rows)
+
+select f1, sum(f1) over (partition by f1 order by f2
+ groups between 1 preceding and 1 following)
+from t1 where f1 = f2;
+ f1 | sum
+----+-----
+ 1 | 1
+ 2 | 2
+(2 rows)
+
+select f1, sum(f1) over (partition by f1, f1 order by f2
+ groups between 2 preceding and 1 preceding)
+from t1 where f1 = f2;
+ f1 | sum
+----+-----
+ 1 |
+ 2 |
+(2 rows)
+
+select f1, sum(f1) over (partition by f1, f2 order by f2
+ groups between 1 following and 2 following)
+from t1 where f1 = f2;
+ f1 | sum
+----+-----
+ 1 |
+ 2 |
+(2 rows)
+
+-- ordering by a non-integer constant is allowed
+SELECT rank() OVER (ORDER BY length('abc'));
+ rank
+------
+ 1
+(1 row)
+
+-- can't order by another window function
+SELECT rank() OVER (ORDER BY rank() OVER (ORDER BY random()));
+ERROR: window functions are not allowed in window definitions
+LINE 1: SELECT rank() OVER (ORDER BY rank() OVER (ORDER BY random())...
+ ^
+-- some other errors
+SELECT * FROM empsalary WHERE row_number() OVER (ORDER BY salary) < 10;
+ERROR: window functions are not allowed in WHERE
+LINE 1: SELECT * FROM empsalary WHERE row_number() OVER (ORDER BY sa...
+ ^
+SELECT * FROM empsalary INNER JOIN tenk1 ON row_number() OVER (ORDER BY salary) < 10;
+ERROR: window functions are not allowed in JOIN conditions
+LINE 1: SELECT * FROM empsalary INNER JOIN tenk1 ON row_number() OVE...
+ ^
+SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GROUP BY 1;
+ERROR: window functions are not allowed in GROUP BY
+LINE 1: SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GRO...
+ ^
+SELECT * FROM rank() OVER (ORDER BY random());
+ERROR: syntax error at or near "ORDER"
+LINE 1: SELECT * FROM rank() OVER (ORDER BY random());
+ ^
+DELETE FROM empsalary WHERE (rank() OVER (ORDER BY random())) > 10;
+ERROR: window functions are not allowed in WHERE
+LINE 1: DELETE FROM empsalary WHERE (rank() OVER (ORDER BY random())...
+ ^
+DELETE FROM empsalary RETURNING rank() OVER (ORDER BY random());
+ERROR: window functions are not allowed in RETURNING
+LINE 1: DELETE FROM empsalary RETURNING rank() OVER (ORDER BY random...
+ ^
+SELECT count(*) OVER w FROM tenk1 WINDOW w AS (ORDER BY unique1), w AS (ORDER BY unique1);
+ERROR: window "w" is already defined
+LINE 1: ...w FROM tenk1 WINDOW w AS (ORDER BY unique1), w AS (ORDER BY ...
+ ^
+SELECT rank() OVER (PARTITION BY four, ORDER BY ten) FROM tenk1;
+ERROR: syntax error at or near "ORDER"
+LINE 1: SELECT rank() OVER (PARTITION BY four, ORDER BY ten) FROM te...
+ ^
+SELECT count() OVER () FROM tenk1;
+ERROR: count(*) must be used to call a parameterless aggregate function
+LINE 1: SELECT count() OVER () FROM tenk1;
+ ^
+SELECT generate_series(1, 100) OVER () FROM empsalary;
+ERROR: OVER specified, but generate_series is not a window function nor an aggregate function
+LINE 1: SELECT generate_series(1, 100) OVER () FROM empsalary;
+ ^
+SELECT ntile(0) OVER (ORDER BY ten), ten, four FROM tenk1;
+ERROR: argument of ntile must be greater than zero
+SELECT nth_value(four, 0) OVER (ORDER BY ten), ten, four FROM tenk1;
+ERROR: argument of nth_value must be greater than zero
+-- filter
+SELECT sum(salary), row_number() OVER (ORDER BY depname), sum(
+ sum(salary) FILTER (WHERE enroll_date > '2007-01-01')
+) FILTER (WHERE depname <> 'sales') OVER (ORDER BY depname DESC) AS "filtered_sum",
+ depname
+FROM empsalary GROUP BY depname;
+ sum | row_number | filtered_sum | depname
+-------+------------+--------------+-----------
+ 25100 | 1 | 22600 | develop
+ 7400 | 2 | 3500 | personnel
+ 14600 | 3 | | sales
+(3 rows)
+
+-- Test pushdown of quals into a subquery containing window functions
+-- pushdown is safe because all PARTITION BY clauses include depname:
+EXPLAIN (COSTS OFF)
+SELECT * FROM
+ (SELECT depname,
+ sum(salary) OVER (PARTITION BY depname) depsalary,
+ min(salary) OVER (PARTITION BY depname || 'A', depname) depminsalary
+ FROM empsalary) emp
+WHERE depname = 'sales';
+ QUERY PLAN
+--------------------------------------------------------------------------
+ Subquery Scan on emp
+ -> WindowAgg
+ -> WindowAgg
+ -> Sort
+ Sort Key: (((empsalary.depname)::text || 'A'::text))
+ -> Seq Scan on empsalary
+ Filter: ((depname)::text = 'sales'::text)
+(7 rows)
+
+-- pushdown is unsafe because there's a PARTITION BY clause without depname:
+EXPLAIN (COSTS OFF)
+SELECT * FROM
+ (SELECT depname,
+ sum(salary) OVER (PARTITION BY enroll_date) enroll_salary,
+ min(salary) OVER (PARTITION BY depname) depminsalary
+ FROM empsalary) emp
+WHERE depname = 'sales';
+ QUERY PLAN
+-------------------------------------------------------
+ Subquery Scan on emp
+ Filter: ((emp.depname)::text = 'sales'::text)
+ -> WindowAgg
+ -> Sort
+ Sort Key: empsalary.enroll_date
+ -> WindowAgg
+ -> Sort
+ Sort Key: empsalary.depname
+ -> Seq Scan on empsalary
+(9 rows)
+
+-- Test Sort node collapsing
+EXPLAIN (COSTS OFF)
+SELECT * FROM
+ (SELECT depname,
+ sum(salary) OVER (PARTITION BY depname order by empno) depsalary,
+ min(salary) OVER (PARTITION BY depname, empno order by enroll_date) depminsalary
+ FROM empsalary) emp
+WHERE depname = 'sales';
+ QUERY PLAN
+----------------------------------------------------------------------
+ Subquery Scan on emp
+ -> WindowAgg
+ -> WindowAgg
+ -> Sort
+ Sort Key: empsalary.empno, empsalary.enroll_date
+ -> Seq Scan on empsalary
+ Filter: ((depname)::text = 'sales'::text)
+(7 rows)
+
+-- Test Sort node reordering
+EXPLAIN (COSTS OFF)
+SELECT
+ lead(1) OVER (PARTITION BY depname ORDER BY salary, enroll_date),
+ lag(1) OVER (PARTITION BY depname ORDER BY salary,enroll_date,empno)
+FROM empsalary;
+ QUERY PLAN
+-------------------------------------------------------------
+ WindowAgg
+ -> WindowAgg
+ -> Sort
+ Sort Key: depname, salary, enroll_date, empno
+ -> Seq Scan on empsalary
+(5 rows)
+
+-- Test incremental sorting
+EXPLAIN (COSTS OFF)
+SELECT * FROM
+ (SELECT depname,
+ empno,
+ salary,
+ enroll_date,
+ row_number() OVER (PARTITION BY depname ORDER BY enroll_date) AS first_emp,
+ row_number() OVER (PARTITION BY depname ORDER BY enroll_date DESC) AS last_emp
+ FROM empsalary) emp
+WHERE first_emp = 1 OR last_emp = 1;
+ QUERY PLAN
+-----------------------------------------------------------------------------------
+ Subquery Scan on emp
+ Filter: ((emp.first_emp = 1) OR (emp.last_emp = 1))
+ -> WindowAgg
+ -> Incremental Sort
+ Sort Key: empsalary.depname, empsalary.enroll_date
+ Presorted Key: empsalary.depname
+ -> WindowAgg
+ -> Sort
+ Sort Key: empsalary.depname, empsalary.enroll_date DESC
+ -> Seq Scan on empsalary
+(10 rows)
+
+SELECT * FROM
+ (SELECT depname,
+ empno,
+ salary,
+ enroll_date,
+ row_number() OVER (PARTITION BY depname ORDER BY enroll_date) AS first_emp,
+ row_number() OVER (PARTITION BY depname ORDER BY enroll_date DESC) AS last_emp
+ FROM empsalary) emp
+WHERE first_emp = 1 OR last_emp = 1;
+ depname | empno | salary | enroll_date | first_emp | last_emp
+-----------+-------+--------+-------------+-----------+----------
+ develop | 8 | 6000 | 10-01-2006 | 1 | 5
+ develop | 7 | 4200 | 01-01-2008 | 5 | 1
+ personnel | 2 | 3900 | 12-23-2006 | 1 | 2
+ personnel | 5 | 3500 | 12-10-2007 | 2 | 1
+ sales | 1 | 5000 | 10-01-2006 | 1 | 3
+ sales | 4 | 4800 | 08-08-2007 | 3 | 1
+(6 rows)
+
+-- cleanup
+DROP TABLE empsalary;
+-- test user-defined window function with named args and default args
+CREATE FUNCTION nth_value_def(val anyelement, n integer = 1) RETURNS anyelement
+ LANGUAGE internal WINDOW IMMUTABLE STRICT AS 'window_nth_value';
+SELECT nth_value_def(n := 2, val := ten) OVER (PARTITION BY four), ten, four
+ FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten) s;
+ nth_value_def | ten | four
+---------------+-----+------
+ 0 | 0 | 0
+ 0 | 0 | 0
+ 0 | 4 | 0
+ 1 | 1 | 1
+ 1 | 1 | 1
+ 1 | 7 | 1
+ 1 | 9 | 1
+ | 0 | 2
+ 3 | 1 | 3
+ 3 | 3 | 3
+(10 rows)
+
+SELECT nth_value_def(ten) OVER (PARTITION BY four), ten, four
+ FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten) s;
+ nth_value_def | ten | four
+---------------+-----+------
+ 0 | 0 | 0
+ 0 | 0 | 0
+ 0 | 4 | 0
+ 1 | 1 | 1
+ 1 | 1 | 1
+ 1 | 7 | 1
+ 1 | 9 | 1
+ 0 | 0 | 2
+ 1 | 1 | 3
+ 1 | 3 | 3
+(10 rows)
+
+--
+-- Test the basic moving-aggregate machinery
+--
+-- create aggregates that record the series of transform calls (these are
+-- intentionally not true inverses)
+CREATE FUNCTION logging_sfunc_nonstrict(text, anyelement) RETURNS text AS
+$$ SELECT COALESCE($1, '') || '*' || quote_nullable($2) $$
+LANGUAGE SQL IMMUTABLE;
+CREATE FUNCTION logging_msfunc_nonstrict(text, anyelement) RETURNS text AS
+$$ SELECT COALESCE($1, '') || '+' || quote_nullable($2) $$
+LANGUAGE SQL IMMUTABLE;
+CREATE FUNCTION logging_minvfunc_nonstrict(text, anyelement) RETURNS text AS
+$$ SELECT $1 || '-' || quote_nullable($2) $$
+LANGUAGE SQL IMMUTABLE;
+CREATE AGGREGATE logging_agg_nonstrict (anyelement)
+(
+ stype = text,
+ sfunc = logging_sfunc_nonstrict,
+ mstype = text,
+ msfunc = logging_msfunc_nonstrict,
+ minvfunc = logging_minvfunc_nonstrict
+);
+CREATE AGGREGATE logging_agg_nonstrict_initcond (anyelement)
+(
+ stype = text,
+ sfunc = logging_sfunc_nonstrict,
+ mstype = text,
+ msfunc = logging_msfunc_nonstrict,
+ minvfunc = logging_minvfunc_nonstrict,
+ initcond = 'I',
+ minitcond = 'MI'
+);
+CREATE FUNCTION logging_sfunc_strict(text, anyelement) RETURNS text AS
+$$ SELECT $1 || '*' || quote_nullable($2) $$
+LANGUAGE SQL STRICT IMMUTABLE;
+CREATE FUNCTION logging_msfunc_strict(text, anyelement) RETURNS text AS
+$$ SELECT $1 || '+' || quote_nullable($2) $$
+LANGUAGE SQL STRICT IMMUTABLE;
+CREATE FUNCTION logging_minvfunc_strict(text, anyelement) RETURNS text AS
+$$ SELECT $1 || '-' || quote_nullable($2) $$
+LANGUAGE SQL STRICT IMMUTABLE;
+CREATE AGGREGATE logging_agg_strict (text)
+(
+ stype = text,
+ sfunc = logging_sfunc_strict,
+ mstype = text,
+ msfunc = logging_msfunc_strict,
+ minvfunc = logging_minvfunc_strict
+);
+CREATE AGGREGATE logging_agg_strict_initcond (anyelement)
+(
+ stype = text,
+ sfunc = logging_sfunc_strict,
+ mstype = text,
+ msfunc = logging_msfunc_strict,
+ minvfunc = logging_minvfunc_strict,
+ initcond = 'I',
+ minitcond = 'MI'
+);
+-- test strict and non-strict cases
+SELECT
+ p::text || ',' || i::text || ':' || COALESCE(v::text, 'NULL') AS row,
+ logging_agg_nonstrict(v) over wnd as nstrict,
+ logging_agg_nonstrict_initcond(v) over wnd as nstrict_init,
+ logging_agg_strict(v::text) over wnd as strict,
+ logging_agg_strict_initcond(v) over wnd as strict_init
+FROM (VALUES
+ (1, 1, NULL),
+ (1, 2, 'a'),
+ (1, 3, 'b'),
+ (1, 4, NULL),
+ (1, 5, NULL),
+ (1, 6, 'c'),
+ (2, 1, NULL),
+ (2, 2, 'x'),
+ (3, 1, 'z')
+) AS t(p, i, v)
+WINDOW wnd AS (PARTITION BY P ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
+ORDER BY p, i;
+ row | nstrict | nstrict_init | strict | strict_init
+----------+-----------------------------------------------+-------------------------------------------------+-----------+----------------
+ 1,1:NULL | +NULL | MI+NULL | | MI
+ 1,2:a | +NULL+'a' | MI+NULL+'a' | a | MI+'a'
+ 1,3:b | +NULL+'a'-NULL+'b' | MI+NULL+'a'-NULL+'b' | a+'b' | MI+'a'+'b'
+ 1,4:NULL | +NULL+'a'-NULL+'b'-'a'+NULL | MI+NULL+'a'-NULL+'b'-'a'+NULL | a+'b'-'a' | MI+'a'+'b'-'a'
+ 1,5:NULL | +NULL+'a'-NULL+'b'-'a'+NULL-'b'+NULL | MI+NULL+'a'-NULL+'b'-'a'+NULL-'b'+NULL | | MI
+ 1,6:c | +NULL+'a'-NULL+'b'-'a'+NULL-'b'+NULL-NULL+'c' | MI+NULL+'a'-NULL+'b'-'a'+NULL-'b'+NULL-NULL+'c' | c | MI+'c'
+ 2,1:NULL | +NULL | MI+NULL | | MI
+ 2,2:x | +NULL+'x' | MI+NULL+'x' | x | MI+'x'
+ 3,1:z | +'z' | MI+'z' | z | MI+'z'
+(9 rows)
+
+-- and again, but with filter
+SELECT
+ p::text || ',' || i::text || ':' ||
+ CASE WHEN f THEN COALESCE(v::text, 'NULL') ELSE '-' END as row,
+ logging_agg_nonstrict(v) filter(where f) over wnd as nstrict_filt,
+ logging_agg_nonstrict_initcond(v) filter(where f) over wnd as nstrict_init_filt,
+ logging_agg_strict(v::text) filter(where f) over wnd as strict_filt,
+ logging_agg_strict_initcond(v) filter(where f) over wnd as strict_init_filt
+FROM (VALUES
+ (1, 1, true, NULL),
+ (1, 2, false, 'a'),
+ (1, 3, true, 'b'),
+ (1, 4, false, NULL),
+ (1, 5, false, NULL),
+ (1, 6, false, 'c'),
+ (2, 1, false, NULL),
+ (2, 2, true, 'x'),
+ (3, 1, true, 'z')
+) AS t(p, i, f, v)
+WINDOW wnd AS (PARTITION BY p ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
+ORDER BY p, i;
+ row | nstrict_filt | nstrict_init_filt | strict_filt | strict_init_filt
+----------+--------------+-------------------+-------------+------------------
+ 1,1:NULL | +NULL | MI+NULL | | MI
+ 1,2:- | +NULL | MI+NULL | | MI
+ 1,3:b | +'b' | MI+'b' | b | MI+'b'
+ 1,4:- | +'b' | MI+'b' | b | MI+'b'
+ 1,5:- | | MI | | MI
+ 1,6:- | | MI | | MI
+ 2,1:- | | MI | | MI
+ 2,2:x | +'x' | MI+'x' | x | MI+'x'
+ 3,1:z | +'z' | MI+'z' | z | MI+'z'
+(9 rows)
+
+-- test that volatile arguments disable moving-aggregate mode
+SELECT
+ i::text || ':' || COALESCE(v::text, 'NULL') as row,
+ logging_agg_strict(v::text)
+ over wnd as inverse,
+ logging_agg_strict(v::text || CASE WHEN random() < 0 then '?' ELSE '' END)
+ over wnd as noinverse
+FROM (VALUES
+ (1, 'a'),
+ (2, 'b'),
+ (3, 'c')
+) AS t(i, v)
+WINDOW wnd AS (ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
+ORDER BY i;
+ row | inverse | noinverse
+-----+---------------+-----------
+ 1:a | a | a
+ 2:b | a+'b' | a*'b'
+ 3:c | a+'b'-'a'+'c' | b*'c'
+(3 rows)
+
+SELECT
+ i::text || ':' || COALESCE(v::text, 'NULL') as row,
+ logging_agg_strict(v::text) filter(where true)
+ over wnd as inverse,
+ logging_agg_strict(v::text) filter(where random() >= 0)
+ over wnd as noinverse
+FROM (VALUES
+ (1, 'a'),
+ (2, 'b'),
+ (3, 'c')
+) AS t(i, v)
+WINDOW wnd AS (ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
+ORDER BY i;
+ row | inverse | noinverse
+-----+---------------+-----------
+ 1:a | a | a
+ 2:b | a+'b' | a*'b'
+ 3:c | a+'b'-'a'+'c' | b*'c'
+(3 rows)
+
+-- test that non-overlapping windows don't use inverse transitions
+SELECT
+ logging_agg_strict(v::text) OVER wnd
+FROM (VALUES
+ (1, 'a'),
+ (2, 'b'),
+ (3, 'c')
+) AS t(i, v)
+WINDOW wnd AS (ORDER BY i ROWS BETWEEN CURRENT ROW AND CURRENT ROW)
+ORDER BY i;
+ logging_agg_strict
+--------------------
+ a
+ b
+ c
+(3 rows)
+
+-- test that returning NULL from the inverse transition functions
+-- restarts the aggregation from scratch. The second aggregate is supposed
+-- to test cases where only some aggregates restart, the third one checks
+-- that one aggregate restarting doesn't cause others to restart.
+CREATE FUNCTION sum_int_randrestart_minvfunc(int4, int4) RETURNS int4 AS
+$$ SELECT CASE WHEN random() < 0.2 THEN NULL ELSE $1 - $2 END $$
+LANGUAGE SQL STRICT;
+CREATE AGGREGATE sum_int_randomrestart (int4)
+(
+ stype = int4,
+ sfunc = int4pl,
+ mstype = int4,
+ msfunc = int4pl,
+ minvfunc = sum_int_randrestart_minvfunc
+);
+WITH
+vs AS (
+ SELECT i, (random() * 100)::int4 AS v
+ FROM generate_series(1, 100) AS i
+),
+sum_following AS (
+ SELECT i, SUM(v) OVER
+ (ORDER BY i DESC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS s
+ FROM vs
+)
+SELECT DISTINCT
+ sum_following.s = sum_int_randomrestart(v) OVER fwd AS eq1,
+ -sum_following.s = sum_int_randomrestart(-v) OVER fwd AS eq2,
+ 100*3+(vs.i-1)*3 = length(logging_agg_nonstrict(''::text) OVER fwd) AS eq3
+FROM vs
+JOIN sum_following ON sum_following.i = vs.i
+WINDOW fwd AS (
+ ORDER BY vs.i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING
+);
+ eq1 | eq2 | eq3
+-----+-----+-----
+ t | t | t
+(1 row)
+
+--
+-- Test various built-in aggregates that have moving-aggregate support
+--
+-- test inverse transition functions handle NULLs properly
+SELECT i,AVG(v::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
+ i | avg
+---+--------------------
+ 1 | 1.5000000000000000
+ 2 | 2.0000000000000000
+ 3 |
+ 4 |
+(4 rows)
+
+SELECT i,AVG(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
+ i | avg
+---+--------------------
+ 1 | 1.5000000000000000
+ 2 | 2.0000000000000000
+ 3 |
+ 4 |
+(4 rows)
+
+SELECT i,AVG(v::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
+ i | avg
+---+--------------------
+ 1 | 1.5000000000000000
+ 2 | 2.0000000000000000
+ 3 |
+ 4 |
+(4 rows)
+
+SELECT i,AVG(v::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,1.5),(2,2.5),(3,NULL),(4,NULL)) t(i,v);
+ i | avg
+---+--------------------
+ 1 | 2.0000000000000000
+ 2 | 2.5000000000000000
+ 3 |
+ 4 |
+(4 rows)
+
+SELECT i,AVG(v::interval) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,'1 sec'),(2,'2 sec'),(3,NULL),(4,NULL)) t(i,v);
+ i | avg
+---+------------
+ 1 | @ 1.5 secs
+ 2 | @ 2 secs
+ 3 |
+ 4 |
+(4 rows)
+
+SELECT i,SUM(v::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
+ i | sum
+---+-----
+ 1 | 3
+ 2 | 2
+ 3 |
+ 4 |
+(4 rows)
+
+SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
+ i | sum
+---+-----
+ 1 | 3
+ 2 | 2
+ 3 |
+ 4 |
+(4 rows)
+
+SELECT i,SUM(v::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
+ i | sum
+---+-----
+ 1 | 3
+ 2 | 2
+ 3 |
+ 4 |
+(4 rows)
+
+SELECT i,SUM(v::money) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,'1.10'),(2,'2.20'),(3,NULL),(4,NULL)) t(i,v);
+ i | sum
+---+-------
+ 1 | $3.30
+ 2 | $2.20
+ 3 |
+ 4 |
+(4 rows)
+
+SELECT i,SUM(v::interval) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,'1 sec'),(2,'2 sec'),(3,NULL),(4,NULL)) t(i,v);
+ i | sum
+---+----------
+ 1 | @ 3 secs
+ 2 | @ 2 secs
+ 3 |
+ 4 |
+(4 rows)
+
+SELECT i,SUM(v::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,1.1),(2,2.2),(3,NULL),(4,NULL)) t(i,v);
+ i | sum
+---+-----
+ 1 | 3.3
+ 2 | 2.2
+ 3 |
+ 4 |
+(4 rows)
+
+SELECT SUM(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,1.01),(2,2),(3,3)) v(i,n);
+ sum
+------
+ 6.01
+ 5
+ 3
+(3 rows)
+
+SELECT i,COUNT(v) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
+ i | count
+---+-------
+ 1 | 2
+ 2 | 1
+ 3 | 0
+ 4 | 0
+(4 rows)
+
+SELECT i,COUNT(*) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
+ i | count
+---+-------
+ 1 | 4
+ 2 | 3
+ 3 | 2
+ 4 | 1
+(4 rows)
+
+SELECT VAR_POP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+ var_pop
+-----------------------
+ 21704.000000000000
+ 13868.750000000000
+ 11266.666666666667
+ 4225.0000000000000000
+ 0
+(5 rows)
+
+SELECT VAR_POP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+ var_pop
+-----------------------
+ 21704.000000000000
+ 13868.750000000000
+ 11266.666666666667
+ 4225.0000000000000000
+ 0
+(5 rows)
+
+SELECT VAR_POP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+ var_pop
+-----------------------
+ 21704.000000000000
+ 13868.750000000000
+ 11266.666666666667
+ 4225.0000000000000000
+ 0
+(5 rows)
+
+SELECT VAR_POP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+ var_pop
+-----------------------
+ 21704.000000000000
+ 13868.750000000000
+ 11266.666666666667
+ 4225.0000000000000000
+ 0
+(5 rows)
+
+SELECT VAR_SAMP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+ var_samp
+-----------------------
+ 27130.000000000000
+ 18491.666666666667
+ 16900.000000000000
+ 8450.0000000000000000
+
+(5 rows)
+
+SELECT VAR_SAMP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+ var_samp
+-----------------------
+ 27130.000000000000
+ 18491.666666666667
+ 16900.000000000000
+ 8450.0000000000000000
+
+(5 rows)
+
+SELECT VAR_SAMP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+ var_samp
+-----------------------
+ 27130.000000000000
+ 18491.666666666667
+ 16900.000000000000
+ 8450.0000000000000000
+
+(5 rows)
+
+SELECT VAR_SAMP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+ var_samp
+-----------------------
+ 27130.000000000000
+ 18491.666666666667
+ 16900.000000000000
+ 8450.0000000000000000
+
+(5 rows)
+
+SELECT VARIANCE(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+ variance
+-----------------------
+ 27130.000000000000
+ 18491.666666666667
+ 16900.000000000000
+ 8450.0000000000000000
+
+(5 rows)
+
+SELECT VARIANCE(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+ variance
+-----------------------
+ 27130.000000000000
+ 18491.666666666667
+ 16900.000000000000
+ 8450.0000000000000000
+
+(5 rows)
+
+SELECT VARIANCE(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+ variance
+-----------------------
+ 27130.000000000000
+ 18491.666666666667
+ 16900.000000000000
+ 8450.0000000000000000
+
+(5 rows)
+
+SELECT VARIANCE(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+ variance
+-----------------------
+ 27130.000000000000
+ 18491.666666666667
+ 16900.000000000000
+ 8450.0000000000000000
+
+(5 rows)
+
+SELECT STDDEV_POP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
+ stddev_pop
+---------------------
+ 147.322774885623
+ 147.322774885623
+ 117.765657133139
+ 106.144555520604
+ 65.0000000000000000
+ 0
+(6 rows)
+
+SELECT STDDEV_POP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
+ stddev_pop
+---------------------
+ 147.322774885623
+ 147.322774885623
+ 117.765657133139
+ 106.144555520604
+ 65.0000000000000000
+ 0
+(6 rows)
+
+SELECT STDDEV_POP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
+ stddev_pop
+---------------------
+ 147.322774885623
+ 147.322774885623
+ 117.765657133139
+ 106.144555520604
+ 65.0000000000000000
+ 0
+(6 rows)
+
+SELECT STDDEV_POP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
+ stddev_pop
+---------------------
+ 147.322774885623
+ 147.322774885623
+ 117.765657133139
+ 106.144555520604
+ 65.0000000000000000
+ 0
+(6 rows)
+
+SELECT STDDEV_SAMP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
+ stddev_samp
+---------------------
+ 164.711869639076
+ 164.711869639076
+ 135.984067694222
+ 130.000000000000
+ 91.9238815542511782
+
+(6 rows)
+
+SELECT STDDEV_SAMP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
+ stddev_samp
+---------------------
+ 164.711869639076
+ 164.711869639076
+ 135.984067694222
+ 130.000000000000
+ 91.9238815542511782
+
+(6 rows)
+
+SELECT STDDEV_SAMP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
+ stddev_samp
+---------------------
+ 164.711869639076
+ 164.711869639076
+ 135.984067694222
+ 130.000000000000
+ 91.9238815542511782
+
+(6 rows)
+
+SELECT STDDEV_SAMP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
+ stddev_samp
+---------------------
+ 164.711869639076
+ 164.711869639076
+ 135.984067694222
+ 130.000000000000
+ 91.9238815542511782
+
+(6 rows)
+
+SELECT STDDEV(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+ stddev
+---------------------
+ 164.711869639076
+ 164.711869639076
+ 135.984067694222
+ 130.000000000000
+ 91.9238815542511782
+
+(6 rows)
+
+SELECT STDDEV(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+ stddev
+---------------------
+ 164.711869639076
+ 164.711869639076
+ 135.984067694222
+ 130.000000000000
+ 91.9238815542511782
+
+(6 rows)
+
+SELECT STDDEV(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+ stddev
+---------------------
+ 164.711869639076
+ 164.711869639076
+ 135.984067694222
+ 130.000000000000
+ 91.9238815542511782
+
+(6 rows)
+
+SELECT STDDEV(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+ stddev
+---------------------
+ 164.711869639076
+ 164.711869639076
+ 135.984067694222
+ 130.000000000000
+ 91.9238815542511782
+
+(6 rows)
+
+-- test that inverse transition functions work with various frame options
+SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND CURRENT ROW)
+ FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
+ i | sum
+---+-----
+ 1 | 1
+ 2 | 2
+ 3 |
+ 4 |
+(4 rows)
+
+SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING)
+ FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
+ i | sum
+---+-----
+ 1 | 3
+ 2 | 2
+ 3 |
+ 4 |
+(4 rows)
+
+SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING)
+ FROM (VALUES(1,1),(2,2),(3,3),(4,4)) t(i,v);
+ i | sum
+---+-----
+ 1 | 3
+ 2 | 6
+ 3 | 9
+ 4 | 7
+(4 rows)
+
+-- ensure aggregate over numeric properly recovers from NaN values
+SELECT a, b,
+ SUM(b) OVER(ORDER BY A ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
+FROM (VALUES(1,1::numeric),(2,2),(3,'NaN'),(4,3),(5,4)) t(a,b);
+ a | b | sum
+---+-----+-----
+ 1 | 1 | 1
+ 2 | 2 | 3
+ 3 | NaN | NaN
+ 4 | 3 | NaN
+ 5 | 4 | 7
+(5 rows)
+
+-- It might be tempting for someone to add an inverse trans function for
+-- float and double precision. This should not be done as it can give incorrect
+-- results. This test should fail if anyone ever does this without thinking too
+-- hard about it.
+SELECT to_char(SUM(n::float8) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING),'999999999999999999999D9')
+ FROM (VALUES(1,1e20),(2,1)) n(i,n);
+ to_char
+--------------------------
+ 100000000000000000000
+ 1.0
+(2 rows)
+
+SELECT i, b, bool_and(b) OVER w, bool_or(b) OVER w
+ FROM (VALUES (1,true), (2,true), (3,false), (4,false), (5,true)) v(i,b)
+ WINDOW w AS (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING);
+ i | b | bool_and | bool_or
+---+---+----------+---------
+ 1 | t | t | t
+ 2 | t | f | t
+ 3 | f | f | f
+ 4 | f | f | t
+ 5 | t | t | t
+(5 rows)
+
+-- Tests for problems with failure to walk or mutate expressions
+-- within window frame clauses.
+-- test walker (fails with collation error if expressions are not walked)
+SELECT array_agg(i) OVER w
+ FROM generate_series(1,5) i
+WINDOW w AS (ORDER BY i ROWS BETWEEN (('foo' < 'foobar')::integer) PRECEDING AND CURRENT ROW);
+ array_agg
+-----------
+ {1}
+ {1,2}
+ {2,3}
+ {3,4}
+ {4,5}
+(5 rows)
+
+-- test mutator (fails when inlined if expressions are not mutated)
+CREATE FUNCTION pg_temp.f(group_size BIGINT) RETURNS SETOF integer[]
+AS $$
+ SELECT array_agg(s) OVER w
+ FROM generate_series(1,5) s
+ WINDOW w AS (ORDER BY s ROWS BETWEEN CURRENT ROW AND GROUP_SIZE FOLLOWING)
+$$ LANGUAGE SQL STABLE;
+EXPLAIN (costs off) SELECT * FROM pg_temp.f(2);
+ QUERY PLAN
+------------------------------------------------------
+ Subquery Scan on f
+ -> WindowAgg
+ -> Sort
+ Sort Key: s.s
+ -> Function Scan on generate_series s
+(5 rows)
+
+SELECT * FROM pg_temp.f(2);
+ f
+---------
+ {1,2,3}
+ {2,3,4}
+ {3,4,5}
+ {4,5}
+ {5}
+(5 rows)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/window.sql b/ydb/library/yql/tests/postgresql/original/cases/window.sql
new file mode 100644
index 0000000000..eae5fa6017
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/window.sql
@@ -0,0 +1,1330 @@
+--
+-- WINDOW FUNCTIONS
+--
+
+CREATE TEMPORARY TABLE empsalary (
+ depname varchar,
+ empno bigint,
+ salary int,
+ enroll_date date
+);
+
+INSERT INTO empsalary VALUES
+('develop', 10, 5200, '2007-08-01'),
+('sales', 1, 5000, '2006-10-01'),
+('personnel', 5, 3500, '2007-12-10'),
+('sales', 4, 4800, '2007-08-08'),
+('personnel', 2, 3900, '2006-12-23'),
+('develop', 7, 4200, '2008-01-01'),
+('develop', 9, 4500, '2008-01-01'),
+('sales', 3, 4800, '2007-08-01'),
+('develop', 8, 6000, '2006-10-01'),
+('develop', 11, 5200, '2007-08-15');
+
+SELECT depname, empno, salary, sum(salary) OVER (PARTITION BY depname) FROM empsalary ORDER BY depname, salary;
+
+SELECT depname, empno, salary, rank() OVER (PARTITION BY depname ORDER BY salary) FROM empsalary;
+
+-- with GROUP BY
+SELECT four, ten, SUM(SUM(four)) OVER (PARTITION BY four), AVG(ten) FROM tenk1
+GROUP BY four, ten ORDER BY four, ten;
+
+SELECT depname, empno, salary, sum(salary) OVER w FROM empsalary WINDOW w AS (PARTITION BY depname);
+
+SELECT depname, empno, salary, rank() OVER w FROM empsalary WINDOW w AS (PARTITION BY depname ORDER BY salary) ORDER BY rank() OVER w;
+
+-- empty window specification
+SELECT COUNT(*) OVER () FROM tenk1 WHERE unique2 < 10;
+
+SELECT COUNT(*) OVER w FROM tenk1 WHERE unique2 < 10 WINDOW w AS ();
+
+-- no window operation
+SELECT four FROM tenk1 WHERE FALSE WINDOW w AS (PARTITION BY ten);
+
+-- cumulative aggregate
+SELECT sum(four) OVER (PARTITION BY ten ORDER BY unique2) AS sum_1, ten, four FROM tenk1 WHERE unique2 < 10;
+
+SELECT row_number() OVER (ORDER BY unique2) FROM tenk1 WHERE unique2 < 10;
+
+SELECT rank() OVER (PARTITION BY four ORDER BY ten) AS rank_1, ten, four FROM tenk1 WHERE unique2 < 10;
+
+SELECT dense_rank() OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+
+SELECT percent_rank() OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+
+SELECT cume_dist() OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+
+SELECT ntile(3) OVER (ORDER BY ten, four), ten, four FROM tenk1 WHERE unique2 < 10;
+
+SELECT ntile(NULL) OVER (ORDER BY ten, four), ten, four FROM tenk1 LIMIT 2;
+
+SELECT lag(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+
+SELECT lag(ten, four) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+
+SELECT lag(ten, four, 0) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+SELECT lag(ten, four, 0.7) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten;
+
+SELECT lead(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+
+SELECT lead(ten * 2, 1) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+
+SELECT lead(ten * 2, 1, -1) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+SELECT lead(ten * 2, 1, -1.4) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten;
+
+SELECT first_value(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+
+-- last_value returns the last row of the frame, which is CURRENT ROW in ORDER BY window.
+SELECT last_value(four) OVER (ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10;
+
+SELECT last_value(ten) OVER (PARTITION BY four), ten, four FROM
+ (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten)s
+ ORDER BY four, ten;
+
+SELECT nth_value(ten, four + 1) OVER (PARTITION BY four), ten, four
+ FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten)s;
+
+SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER (PARTITION BY two ORDER BY ten) AS wsum
+FROM tenk1 GROUP BY ten, two;
+
+SELECT count(*) OVER (PARTITION BY four), four FROM (SELECT * FROM tenk1 WHERE two = 1)s WHERE unique2 < 10;
+
+SELECT (count(*) OVER (PARTITION BY four ORDER BY ten) +
+ sum(hundred) OVER (PARTITION BY four ORDER BY ten))::varchar AS cntsum
+ FROM tenk1 WHERE unique2 < 10;
+
+-- opexpr with different windows evaluation.
+SELECT * FROM(
+ SELECT count(*) OVER (PARTITION BY four ORDER BY ten) +
+ sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS total,
+ count(*) OVER (PARTITION BY four ORDER BY ten) AS fourcount,
+ sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS twosum
+ FROM tenk1
+)sub
+WHERE total <> fourcount + twosum;
+
+SELECT avg(four) OVER (PARTITION BY four ORDER BY thousand / 100) FROM tenk1 WHERE unique2 < 10;
+
+SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER win AS wsum
+FROM tenk1 GROUP BY ten, two WINDOW win AS (PARTITION BY two ORDER BY ten);
+
+-- more than one window with GROUP BY
+SELECT sum(salary),
+ row_number() OVER (ORDER BY depname),
+ sum(sum(salary)) OVER (ORDER BY depname DESC)
+FROM empsalary GROUP BY depname;
+
+-- identical windows with different names
+SELECT sum(salary) OVER w1, count(*) OVER w2
+FROM empsalary WINDOW w1 AS (ORDER BY salary), w2 AS (ORDER BY salary);
+
+-- subplan
+SELECT lead(ten, (SELECT two FROM tenk1 WHERE s.unique2 = unique2)) OVER (PARTITION BY four ORDER BY ten)
+FROM tenk1 s WHERE unique2 < 10;
+
+-- empty table
+SELECT count(*) OVER (PARTITION BY four) FROM (SELECT * FROM tenk1 WHERE FALSE)s;
+
+-- mixture of agg/wfunc in the same window
+SELECT sum(salary) OVER w, rank() OVER w FROM empsalary WINDOW w AS (PARTITION BY depname ORDER BY salary DESC);
+
+-- strict aggs
+SELECT empno, depname, salary, bonus, depadj, MIN(bonus) OVER (ORDER BY empno), MAX(depadj) OVER () FROM(
+ SELECT *,
+ CASE WHEN enroll_date < '2008-01-01' THEN 2008 - extract(YEAR FROM enroll_date) END * 500 AS bonus,
+ CASE WHEN
+ AVG(salary) OVER (PARTITION BY depname) < salary
+ THEN 200 END AS depadj FROM empsalary
+)s;
+
+-- window function over ungrouped agg over empty row set (bug before 9.1)
+SELECT SUM(COUNT(f1)) OVER () FROM int4_tbl WHERE f1=42;
+
+-- window function with ORDER BY an expression involving aggregates (9.1 bug)
+select ten,
+ sum(unique1) + sum(unique2) as res,
+ rank() over (order by sum(unique1) + sum(unique2)) as rank
+from tenk1
+group by ten order by ten;
+
+-- window and aggregate with GROUP BY expression (9.2 bug)
+explain (costs off)
+select first_value(max(x)) over (), y
+ from (select unique1 as x, ten+four as y from tenk1) ss
+ group by y;
+
+-- test non-default frame specifications
+SELECT four, ten,
+ sum(ten) over (partition by four order by ten),
+ last_value(ten) over (partition by four order by ten)
+FROM (select distinct ten, four from tenk1) ss;
+
+SELECT four, ten,
+ sum(ten) over (partition by four order by ten range between unbounded preceding and current row),
+ last_value(ten) over (partition by four order by ten range between unbounded preceding and current row)
+FROM (select distinct ten, four from tenk1) ss;
+
+SELECT four, ten,
+ sum(ten) over (partition by four order by ten range between unbounded preceding and unbounded following),
+ last_value(ten) over (partition by four order by ten range between unbounded preceding and unbounded following)
+FROM (select distinct ten, four from tenk1) ss;
+
+SELECT four, ten/4 as two,
+ sum(ten/4) over (partition by four order by ten/4 range between unbounded preceding and current row),
+ last_value(ten/4) over (partition by four order by ten/4 range between unbounded preceding and current row)
+FROM (select distinct ten, four from tenk1) ss;
+
+SELECT four, ten/4 as two,
+ sum(ten/4) over (partition by four order by ten/4 rows between unbounded preceding and current row),
+ last_value(ten/4) over (partition by four order by ten/4 rows between unbounded preceding and current row)
+FROM (select distinct ten, four from tenk1) ss;
+
+SELECT sum(unique1) over (order by four range between current row and unbounded following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (rows between current row and unbounded following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (rows between 2 preceding and 2 following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude no others),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude current row),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude group),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude ties),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude current row),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude group),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude ties),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude current row),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude group),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude ties),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (rows between 2 preceding and 1 preceding),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (rows between 1 following and 3 following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (rows between unbounded preceding and 1 following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (w range between current row and unbounded following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four);
+
+SELECT sum(unique1) over (w range between unbounded preceding and current row exclude current row),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four);
+
+SELECT sum(unique1) over (w range between unbounded preceding and current row exclude group),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four);
+
+SELECT sum(unique1) over (w range between unbounded preceding and current row exclude ties),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four);
+
+SELECT first_value(unique1) over w,
+ nth_value(unique1, 2) over w AS nth_2,
+ last_value(unique1) over w, unique1, four
+FROM tenk1 WHERE unique1 < 10
+WINDOW w AS (order by four range between current row and unbounded following);
+
+SELECT sum(unique1) over
+ (order by unique1
+ rows (SELECT unique1 FROM tenk1 ORDER BY unique1 LIMIT 1) + 1 PRECEDING),
+ unique1
+FROM tenk1 WHERE unique1 < 10;
+
+CREATE TEMP VIEW v_window AS
+ SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following) as sum_rows
+ FROM generate_series(1, 10) i;
+
+SELECT * FROM v_window;
+
+SELECT pg_get_viewdef('v_window');
+
+CREATE OR REPLACE TEMP VIEW v_window AS
+ SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following
+ exclude current row) as sum_rows FROM generate_series(1, 10) i;
+
+SELECT * FROM v_window;
+
+SELECT pg_get_viewdef('v_window');
+
+CREATE OR REPLACE TEMP VIEW v_window AS
+ SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following
+ exclude group) as sum_rows FROM generate_series(1, 10) i;
+
+SELECT * FROM v_window;
+
+SELECT pg_get_viewdef('v_window');
+
+CREATE OR REPLACE TEMP VIEW v_window AS
+ SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following
+ exclude ties) as sum_rows FROM generate_series(1, 10) i;
+
+SELECT * FROM v_window;
+
+SELECT pg_get_viewdef('v_window');
+
+CREATE OR REPLACE TEMP VIEW v_window AS
+ SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following
+ exclude no others) as sum_rows FROM generate_series(1, 10) i;
+
+SELECT * FROM v_window;
+
+SELECT pg_get_viewdef('v_window');
+
+CREATE OR REPLACE TEMP VIEW v_window AS
+ SELECT i, sum(i) over (order by i groups between 1 preceding and 1 following) as sum_rows FROM generate_series(1, 10) i;
+
+SELECT * FROM v_window;
+
+SELECT pg_get_viewdef('v_window');
+
+DROP VIEW v_window;
+
+CREATE TEMP VIEW v_window AS
+ SELECT i, min(i) over (order by i range between '1 day' preceding and '10 days' following) as min_i
+ FROM generate_series(now(), now()+'100 days'::interval, '1 hour') i;
+
+SELECT pg_get_viewdef('v_window');
+
+-- RANGE offset PRECEDING/FOLLOWING tests
+
+SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (order by four desc range between 2::int8 preceding and 1::int2 preceding),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude no others),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude current row),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude group),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude ties),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (order by four range between 2::int8 preceding and 6::int2 following exclude ties),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (order by four range between 2::int8 preceding and 6::int2 following exclude group),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (partition by four order by unique1 range between 5::int8 preceding and 6::int2 following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (partition by four order by unique1 range between 5::int8 preceding and 6::int2 following
+ exclude current row),unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following),
+ salary, enroll_date from empsalary;
+
+select sum(salary) over (order by enroll_date desc range between '1 year'::interval preceding and '1 year'::interval following),
+ salary, enroll_date from empsalary;
+
+select sum(salary) over (order by enroll_date desc range between '1 year'::interval following and '1 year'::interval following),
+ salary, enroll_date from empsalary;
+
+select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following
+ exclude current row), salary, enroll_date from empsalary;
+
+select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following
+ exclude group), salary, enroll_date from empsalary;
+
+select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following
+ exclude ties), salary, enroll_date from empsalary;
+
+select first_value(salary) over(order by salary range between 1000 preceding and 1000 following),
+ lead(salary) over(order by salary range between 1000 preceding and 1000 following),
+ nth_value(salary, 1) over(order by salary range between 1000 preceding and 1000 following),
+ salary from empsalary;
+
+select last_value(salary) over(order by salary range between 1000 preceding and 1000 following),
+ lag(salary) over(order by salary range between 1000 preceding and 1000 following),
+ salary from empsalary;
+
+select first_value(salary) over(order by salary range between 1000 following and 3000 following
+ exclude current row),
+ lead(salary) over(order by salary range between 1000 following and 3000 following exclude ties),
+ nth_value(salary, 1) over(order by salary range between 1000 following and 3000 following
+ exclude ties),
+ salary from empsalary;
+
+select last_value(salary) over(order by salary range between 1000 following and 3000 following
+ exclude group),
+ lag(salary) over(order by salary range between 1000 following and 3000 following exclude group),
+ salary from empsalary;
+
+select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following
+ exclude ties),
+ last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following),
+ salary, enroll_date from empsalary;
+
+select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following
+ exclude ties),
+ last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following
+ exclude ties),
+ salary, enroll_date from empsalary;
+
+select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following
+ exclude group),
+ last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following
+ exclude group),
+ salary, enroll_date from empsalary;
+
+select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following
+ exclude current row),
+ last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following
+ exclude current row),
+ salary, enroll_date from empsalary;
+
+-- RANGE offset PRECEDING/FOLLOWING with null values
+select x, y,
+ first_value(y) over w,
+ last_value(y) over w
+from
+ (select x, x as y from generate_series(1,5) as x
+ union all select null, 42
+ union all select null, 43) ss
+window w as
+ (order by x asc nulls first range between 2 preceding and 2 following);
+
+select x, y,
+ first_value(y) over w,
+ last_value(y) over w
+from
+ (select x, x as y from generate_series(1,5) as x
+ union all select null, 42
+ union all select null, 43) ss
+window w as
+ (order by x asc nulls last range between 2 preceding and 2 following);
+
+select x, y,
+ first_value(y) over w,
+ last_value(y) over w
+from
+ (select x, x as y from generate_series(1,5) as x
+ union all select null, 42
+ union all select null, 43) ss
+window w as
+ (order by x desc nulls first range between 2 preceding and 2 following);
+
+select x, y,
+ first_value(y) over w,
+ last_value(y) over w
+from
+ (select x, x as y from generate_series(1,5) as x
+ union all select null, 42
+ union all select null, 43) ss
+window w as
+ (order by x desc nulls last range between 2 preceding and 2 following);
+
+-- Check overflow behavior for various integer sizes
+
+select x, last_value(x) over (order by x::smallint range between current row and 2147450884 following)
+from generate_series(32764, 32766) x;
+
+select x, last_value(x) over (order by x::smallint desc range between current row and 2147450885 following)
+from generate_series(-32766, -32764) x;
+
+select x, last_value(x) over (order by x range between current row and 4 following)
+from generate_series(2147483644, 2147483646) x;
+
+select x, last_value(x) over (order by x desc range between current row and 5 following)
+from generate_series(-2147483646, -2147483644) x;
+
+select x, last_value(x) over (order by x range between current row and 4 following)
+from generate_series(9223372036854775804, 9223372036854775806) x;
+
+select x, last_value(x) over (order by x desc range between current row and 5 following)
+from generate_series(-9223372036854775806, -9223372036854775804) x;
+
+-- Test in_range for other numeric datatypes
+
+create temp table numerics(
+ id int,
+ f_float4 float4,
+ f_float8 float8,
+ f_numeric numeric
+);
+
+insert into numerics values
+(0, '-infinity', '-infinity', '-infinity'),
+(1, -3, -3, -3),
+(2, -1, -1, -1),
+(3, 0, 0, 0),
+(4, 1.1, 1.1, 1.1),
+(5, 1.12, 1.12, 1.12),
+(6, 2, 2, 2),
+(7, 100, 100, 100),
+(8, 'infinity', 'infinity', 'infinity'),
+(9, 'NaN', 'NaN', 'NaN');
+
+select id, f_float4, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float4 range between
+ 1 preceding and 1 following);
+select id, f_float4, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float4 range between
+ 1 preceding and 1.1::float4 following);
+select id, f_float4, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float4 range between
+ 'inf' preceding and 'inf' following);
+select id, f_float4, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float4 range between
+ 'inf' preceding and 'inf' preceding);
+select id, f_float4, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float4 range between
+ 'inf' following and 'inf' following);
+select id, f_float4, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float4 range between
+ 1.1 preceding and 'NaN' following); -- error, NaN disallowed
+
+select id, f_float8, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float8 range between
+ 1 preceding and 1 following);
+select id, f_float8, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float8 range between
+ 1 preceding and 1.1::float8 following);
+select id, f_float8, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float8 range between
+ 'inf' preceding and 'inf' following);
+select id, f_float8, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float8 range between
+ 'inf' preceding and 'inf' preceding);
+select id, f_float8, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float8 range between
+ 'inf' following and 'inf' following);
+select id, f_float8, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_float8 range between
+ 1.1 preceding and 'NaN' following); -- error, NaN disallowed
+
+select id, f_numeric, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_numeric range between
+ 1 preceding and 1 following);
+select id, f_numeric, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_numeric range between
+ 1 preceding and 1.1::numeric following);
+select id, f_numeric, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_numeric range between
+ 1 preceding and 1.1::float8 following); -- currently unsupported
+select id, f_numeric, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_numeric range between
+ 'inf' preceding and 'inf' following);
+select id, f_numeric, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_numeric range between
+ 'inf' preceding and 'inf' preceding);
+select id, f_numeric, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_numeric range between
+ 'inf' following and 'inf' following);
+select id, f_numeric, first_value(id) over w, last_value(id) over w
+from numerics
+window w as (order by f_numeric range between
+ 1.1 preceding and 'NaN' following); -- error, NaN disallowed
+
+-- Test in_range for other datetime datatypes
+
+create temp table datetimes(
+ id int,
+ f_time time,
+ f_timetz timetz,
+ f_interval interval,
+ f_timestamptz timestamptz,
+ f_timestamp timestamp
+);
+
+insert into datetimes values
+(1, '11:00', '11:00 BST', '1 year', '2000-10-19 10:23:54+01', '2000-10-19 10:23:54'),
+(2, '12:00', '12:00 BST', '2 years', '2001-10-19 10:23:54+01', '2001-10-19 10:23:54'),
+(3, '13:00', '13:00 BST', '3 years', '2001-10-19 10:23:54+01', '2001-10-19 10:23:54'),
+(4, '14:00', '14:00 BST', '4 years', '2002-10-19 10:23:54+01', '2002-10-19 10:23:54'),
+(5, '15:00', '15:00 BST', '5 years', '2003-10-19 10:23:54+01', '2003-10-19 10:23:54'),
+(6, '15:00', '15:00 BST', '5 years', '2004-10-19 10:23:54+01', '2004-10-19 10:23:54'),
+(7, '17:00', '17:00 BST', '7 years', '2005-10-19 10:23:54+01', '2005-10-19 10:23:54'),
+(8, '18:00', '18:00 BST', '8 years', '2006-10-19 10:23:54+01', '2006-10-19 10:23:54'),
+(9, '19:00', '19:00 BST', '9 years', '2007-10-19 10:23:54+01', '2007-10-19 10:23:54'),
+(10, '20:00', '20:00 BST', '10 years', '2008-10-19 10:23:54+01', '2008-10-19 10:23:54');
+
+select id, f_time, first_value(id) over w, last_value(id) over w
+from datetimes
+window w as (order by f_time range between
+ '70 min'::interval preceding and '2 hours'::interval following);
+
+select id, f_time, first_value(id) over w, last_value(id) over w
+from datetimes
+window w as (order by f_time desc range between
+ '70 min' preceding and '2 hours' following);
+
+select id, f_timetz, first_value(id) over w, last_value(id) over w
+from datetimes
+window w as (order by f_timetz range between
+ '70 min'::interval preceding and '2 hours'::interval following);
+
+select id, f_timetz, first_value(id) over w, last_value(id) over w
+from datetimes
+window w as (order by f_timetz desc range between
+ '70 min' preceding and '2 hours' following);
+
+select id, f_interval, first_value(id) over w, last_value(id) over w
+from datetimes
+window w as (order by f_interval range between
+ '1 year'::interval preceding and '1 year'::interval following);
+
+select id, f_interval, first_value(id) over w, last_value(id) over w
+from datetimes
+window w as (order by f_interval desc range between
+ '1 year' preceding and '1 year' following);
+
+select id, f_timestamptz, first_value(id) over w, last_value(id) over w
+from datetimes
+window w as (order by f_timestamptz range between
+ '1 year'::interval preceding and '1 year'::interval following);
+
+select id, f_timestamptz, first_value(id) over w, last_value(id) over w
+from datetimes
+window w as (order by f_timestamptz desc range between
+ '1 year' preceding and '1 year' following);
+
+select id, f_timestamp, first_value(id) over w, last_value(id) over w
+from datetimes
+window w as (order by f_timestamp range between
+ '1 year'::interval preceding and '1 year'::interval following);
+
+select id, f_timestamp, first_value(id) over w, last_value(id) over w
+from datetimes
+window w as (order by f_timestamp desc range between
+ '1 year' preceding and '1 year' following);
+
+-- RANGE offset PRECEDING/FOLLOWING error cases
+select sum(salary) over (order by enroll_date, salary range between '1 year'::interval preceding and '2 years'::interval following
+ exclude ties), salary, enroll_date from empsalary;
+
+select sum(salary) over (range between '1 year'::interval preceding and '2 years'::interval following
+ exclude ties), salary, enroll_date from empsalary;
+
+select sum(salary) over (order by depname range between '1 year'::interval preceding and '2 years'::interval following
+ exclude ties), salary, enroll_date from empsalary;
+
+select max(enroll_date) over (order by enroll_date range between 1 preceding and 2 following
+ exclude ties), salary, enroll_date from empsalary;
+
+select max(enroll_date) over (order by salary range between -1 preceding and 2 following
+ exclude ties), salary, enroll_date from empsalary;
+
+select max(enroll_date) over (order by salary range between 1 preceding and -2 following
+ exclude ties), salary, enroll_date from empsalary;
+
+select max(enroll_date) over (order by salary range between '1 year'::interval preceding and '2 years'::interval following
+ exclude ties), salary, enroll_date from empsalary;
+
+select max(enroll_date) over (order by enroll_date range between '1 year'::interval preceding and '-2 years'::interval following
+ exclude ties), salary, enroll_date from empsalary;
+
+-- GROUPS tests
+
+SELECT sum(unique1) over (order by four groups between unbounded preceding and current row),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (order by four groups between unbounded preceding and unbounded following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (order by four groups between current row and unbounded following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (order by four groups between 1 preceding and unbounded following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (order by four groups between 1 following and unbounded following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (order by four groups between unbounded preceding and 2 following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (order by four groups between 2 preceding and 1 preceding),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (order by four groups between 0 preceding and 0 following),
+ unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following
+ exclude current row), unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following
+ exclude group), unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following
+ exclude ties), unique1, four
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (partition by ten
+ order by four groups between 0 preceding and 0 following),unique1, four, ten
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (partition by ten
+ order by four groups between 0 preceding and 0 following exclude current row), unique1, four, ten
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (partition by ten
+ order by four groups between 0 preceding and 0 following exclude group), unique1, four, ten
+FROM tenk1 WHERE unique1 < 10;
+
+SELECT sum(unique1) over (partition by ten
+ order by four groups between 0 preceding and 0 following exclude ties), unique1, four, ten
+FROM tenk1 WHERE unique1 < 10;
+
+select first_value(salary) over(order by enroll_date groups between 1 preceding and 1 following),
+ lead(salary) over(order by enroll_date groups between 1 preceding and 1 following),
+ nth_value(salary, 1) over(order by enroll_date groups between 1 preceding and 1 following),
+ salary, enroll_date from empsalary;
+
+select last_value(salary) over(order by enroll_date groups between 1 preceding and 1 following),
+ lag(salary) over(order by enroll_date groups between 1 preceding and 1 following),
+ salary, enroll_date from empsalary;
+
+select first_value(salary) over(order by enroll_date groups between 1 following and 3 following
+ exclude current row),
+ lead(salary) over(order by enroll_date groups between 1 following and 3 following exclude ties),
+ nth_value(salary, 1) over(order by enroll_date groups between 1 following and 3 following
+ exclude ties),
+ salary, enroll_date from empsalary;
+
+select last_value(salary) over(order by enroll_date groups between 1 following and 3 following
+ exclude group),
+ lag(salary) over(order by enroll_date groups between 1 following and 3 following exclude group),
+ salary, enroll_date from empsalary;
+
+-- Show differences in offset interpretation between ROWS, RANGE, and GROUPS
+WITH cte (x) AS (
+ SELECT * FROM generate_series(1, 35, 2)
+)
+SELECT x, (sum(x) over w)
+FROM cte
+WINDOW w AS (ORDER BY x rows between 1 preceding and 1 following);
+
+WITH cte (x) AS (
+ SELECT * FROM generate_series(1, 35, 2)
+)
+SELECT x, (sum(x) over w)
+FROM cte
+WINDOW w AS (ORDER BY x range between 1 preceding and 1 following);
+
+WITH cte (x) AS (
+ SELECT * FROM generate_series(1, 35, 2)
+)
+SELECT x, (sum(x) over w)
+FROM cte
+WINDOW w AS (ORDER BY x groups between 1 preceding and 1 following);
+
+WITH cte (x) AS (
+ select 1 union all select 1 union all select 1 union all
+ SELECT * FROM generate_series(5, 49, 2)
+)
+SELECT x, (sum(x) over w)
+FROM cte
+WINDOW w AS (ORDER BY x rows between 1 preceding and 1 following);
+
+WITH cte (x) AS (
+ select 1 union all select 1 union all select 1 union all
+ SELECT * FROM generate_series(5, 49, 2)
+)
+SELECT x, (sum(x) over w)
+FROM cte
+WINDOW w AS (ORDER BY x range between 1 preceding and 1 following);
+
+WITH cte (x) AS (
+ select 1 union all select 1 union all select 1 union all
+ SELECT * FROM generate_series(5, 49, 2)
+)
+SELECT x, (sum(x) over w)
+FROM cte
+WINDOW w AS (ORDER BY x groups between 1 preceding and 1 following);
+
+-- with UNION
+SELECT count(*) OVER (PARTITION BY four) FROM (SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk2)s LIMIT 0;
+
+-- check some degenerate cases
+create temp table t1 (f1 int, f2 int8);
+insert into t1 values (1,1),(1,2),(2,2);
+
+select f1, sum(f1) over (partition by f1
+ range between 1 preceding and 1 following)
+from t1 where f1 = f2; -- error, must have order by
+explain (costs off)
+select f1, sum(f1) over (partition by f1 order by f2
+ range between 1 preceding and 1 following)
+from t1 where f1 = f2;
+select f1, sum(f1) over (partition by f1 order by f2
+ range between 1 preceding and 1 following)
+from t1 where f1 = f2;
+select f1, sum(f1) over (partition by f1, f1 order by f2
+ range between 2 preceding and 1 preceding)
+from t1 where f1 = f2;
+select f1, sum(f1) over (partition by f1, f2 order by f2
+ range between 1 following and 2 following)
+from t1 where f1 = f2;
+
+select f1, sum(f1) over (partition by f1
+ groups between 1 preceding and 1 following)
+from t1 where f1 = f2; -- error, must have order by
+explain (costs off)
+select f1, sum(f1) over (partition by f1 order by f2
+ groups between 1 preceding and 1 following)
+from t1 where f1 = f2;
+select f1, sum(f1) over (partition by f1 order by f2
+ groups between 1 preceding and 1 following)
+from t1 where f1 = f2;
+select f1, sum(f1) over (partition by f1, f1 order by f2
+ groups between 2 preceding and 1 preceding)
+from t1 where f1 = f2;
+select f1, sum(f1) over (partition by f1, f2 order by f2
+ groups between 1 following and 2 following)
+from t1 where f1 = f2;
+
+-- ordering by a non-integer constant is allowed
+SELECT rank() OVER (ORDER BY length('abc'));
+
+-- can't order by another window function
+SELECT rank() OVER (ORDER BY rank() OVER (ORDER BY random()));
+
+-- some other errors
+SELECT * FROM empsalary WHERE row_number() OVER (ORDER BY salary) < 10;
+
+SELECT * FROM empsalary INNER JOIN tenk1 ON row_number() OVER (ORDER BY salary) < 10;
+
+SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GROUP BY 1;
+
+SELECT * FROM rank() OVER (ORDER BY random());
+
+DELETE FROM empsalary WHERE (rank() OVER (ORDER BY random())) > 10;
+
+DELETE FROM empsalary RETURNING rank() OVER (ORDER BY random());
+
+SELECT count(*) OVER w FROM tenk1 WINDOW w AS (ORDER BY unique1), w AS (ORDER BY unique1);
+
+SELECT rank() OVER (PARTITION BY four, ORDER BY ten) FROM tenk1;
+
+SELECT count() OVER () FROM tenk1;
+
+SELECT generate_series(1, 100) OVER () FROM empsalary;
+
+SELECT ntile(0) OVER (ORDER BY ten), ten, four FROM tenk1;
+
+SELECT nth_value(four, 0) OVER (ORDER BY ten), ten, four FROM tenk1;
+
+-- filter
+
+SELECT sum(salary), row_number() OVER (ORDER BY depname), sum(
+ sum(salary) FILTER (WHERE enroll_date > '2007-01-01')
+) FILTER (WHERE depname <> 'sales') OVER (ORDER BY depname DESC) AS "filtered_sum",
+ depname
+FROM empsalary GROUP BY depname;
+
+-- Test pushdown of quals into a subquery containing window functions
+
+-- pushdown is safe because all PARTITION BY clauses include depname:
+EXPLAIN (COSTS OFF)
+SELECT * FROM
+ (SELECT depname,
+ sum(salary) OVER (PARTITION BY depname) depsalary,
+ min(salary) OVER (PARTITION BY depname || 'A', depname) depminsalary
+ FROM empsalary) emp
+WHERE depname = 'sales';
+
+-- pushdown is unsafe because there's a PARTITION BY clause without depname:
+EXPLAIN (COSTS OFF)
+SELECT * FROM
+ (SELECT depname,
+ sum(salary) OVER (PARTITION BY enroll_date) enroll_salary,
+ min(salary) OVER (PARTITION BY depname) depminsalary
+ FROM empsalary) emp
+WHERE depname = 'sales';
+
+-- Test Sort node collapsing
+EXPLAIN (COSTS OFF)
+SELECT * FROM
+ (SELECT depname,
+ sum(salary) OVER (PARTITION BY depname order by empno) depsalary,
+ min(salary) OVER (PARTITION BY depname, empno order by enroll_date) depminsalary
+ FROM empsalary) emp
+WHERE depname = 'sales';
+
+-- Test Sort node reordering
+EXPLAIN (COSTS OFF)
+SELECT
+ lead(1) OVER (PARTITION BY depname ORDER BY salary, enroll_date),
+ lag(1) OVER (PARTITION BY depname ORDER BY salary,enroll_date,empno)
+FROM empsalary;
+
+-- Test incremental sorting
+EXPLAIN (COSTS OFF)
+SELECT * FROM
+ (SELECT depname,
+ empno,
+ salary,
+ enroll_date,
+ row_number() OVER (PARTITION BY depname ORDER BY enroll_date) AS first_emp,
+ row_number() OVER (PARTITION BY depname ORDER BY enroll_date DESC) AS last_emp
+ FROM empsalary) emp
+WHERE first_emp = 1 OR last_emp = 1;
+
+SELECT * FROM
+ (SELECT depname,
+ empno,
+ salary,
+ enroll_date,
+ row_number() OVER (PARTITION BY depname ORDER BY enroll_date) AS first_emp,
+ row_number() OVER (PARTITION BY depname ORDER BY enroll_date DESC) AS last_emp
+ FROM empsalary) emp
+WHERE first_emp = 1 OR last_emp = 1;
+
+-- cleanup
+DROP TABLE empsalary;
+
+-- test user-defined window function with named args and default args
+CREATE FUNCTION nth_value_def(val anyelement, n integer = 1) RETURNS anyelement
+ LANGUAGE internal WINDOW IMMUTABLE STRICT AS 'window_nth_value';
+
+SELECT nth_value_def(n := 2, val := ten) OVER (PARTITION BY four), ten, four
+ FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten) s;
+
+SELECT nth_value_def(ten) OVER (PARTITION BY four), ten, four
+ FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten) s;
+
+--
+-- Test the basic moving-aggregate machinery
+--
+
+-- create aggregates that record the series of transform calls (these are
+-- intentionally not true inverses)
+
+CREATE FUNCTION logging_sfunc_nonstrict(text, anyelement) RETURNS text AS
+$$ SELECT COALESCE($1, '') || '*' || quote_nullable($2) $$
+LANGUAGE SQL IMMUTABLE;
+
+CREATE FUNCTION logging_msfunc_nonstrict(text, anyelement) RETURNS text AS
+$$ SELECT COALESCE($1, '') || '+' || quote_nullable($2) $$
+LANGUAGE SQL IMMUTABLE;
+
+CREATE FUNCTION logging_minvfunc_nonstrict(text, anyelement) RETURNS text AS
+$$ SELECT $1 || '-' || quote_nullable($2) $$
+LANGUAGE SQL IMMUTABLE;
+
+CREATE AGGREGATE logging_agg_nonstrict (anyelement)
+(
+ stype = text,
+ sfunc = logging_sfunc_nonstrict,
+ mstype = text,
+ msfunc = logging_msfunc_nonstrict,
+ minvfunc = logging_minvfunc_nonstrict
+);
+
+CREATE AGGREGATE logging_agg_nonstrict_initcond (anyelement)
+(
+ stype = text,
+ sfunc = logging_sfunc_nonstrict,
+ mstype = text,
+ msfunc = logging_msfunc_nonstrict,
+ minvfunc = logging_minvfunc_nonstrict,
+ initcond = 'I',
+ minitcond = 'MI'
+);
+
+CREATE FUNCTION logging_sfunc_strict(text, anyelement) RETURNS text AS
+$$ SELECT $1 || '*' || quote_nullable($2) $$
+LANGUAGE SQL STRICT IMMUTABLE;
+
+CREATE FUNCTION logging_msfunc_strict(text, anyelement) RETURNS text AS
+$$ SELECT $1 || '+' || quote_nullable($2) $$
+LANGUAGE SQL STRICT IMMUTABLE;
+
+CREATE FUNCTION logging_minvfunc_strict(text, anyelement) RETURNS text AS
+$$ SELECT $1 || '-' || quote_nullable($2) $$
+LANGUAGE SQL STRICT IMMUTABLE;
+
+CREATE AGGREGATE logging_agg_strict (text)
+(
+ stype = text,
+ sfunc = logging_sfunc_strict,
+ mstype = text,
+ msfunc = logging_msfunc_strict,
+ minvfunc = logging_minvfunc_strict
+);
+
+CREATE AGGREGATE logging_agg_strict_initcond (anyelement)
+(
+ stype = text,
+ sfunc = logging_sfunc_strict,
+ mstype = text,
+ msfunc = logging_msfunc_strict,
+ minvfunc = logging_minvfunc_strict,
+ initcond = 'I',
+ minitcond = 'MI'
+);
+
+-- test strict and non-strict cases
+SELECT
+ p::text || ',' || i::text || ':' || COALESCE(v::text, 'NULL') AS row,
+ logging_agg_nonstrict(v) over wnd as nstrict,
+ logging_agg_nonstrict_initcond(v) over wnd as nstrict_init,
+ logging_agg_strict(v::text) over wnd as strict,
+ logging_agg_strict_initcond(v) over wnd as strict_init
+FROM (VALUES
+ (1, 1, NULL),
+ (1, 2, 'a'),
+ (1, 3, 'b'),
+ (1, 4, NULL),
+ (1, 5, NULL),
+ (1, 6, 'c'),
+ (2, 1, NULL),
+ (2, 2, 'x'),
+ (3, 1, 'z')
+) AS t(p, i, v)
+WINDOW wnd AS (PARTITION BY P ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
+ORDER BY p, i;
+
+-- and again, but with filter
+SELECT
+ p::text || ',' || i::text || ':' ||
+ CASE WHEN f THEN COALESCE(v::text, 'NULL') ELSE '-' END as row,
+ logging_agg_nonstrict(v) filter(where f) over wnd as nstrict_filt,
+ logging_agg_nonstrict_initcond(v) filter(where f) over wnd as nstrict_init_filt,
+ logging_agg_strict(v::text) filter(where f) over wnd as strict_filt,
+ logging_agg_strict_initcond(v) filter(where f) over wnd as strict_init_filt
+FROM (VALUES
+ (1, 1, true, NULL),
+ (1, 2, false, 'a'),
+ (1, 3, true, 'b'),
+ (1, 4, false, NULL),
+ (1, 5, false, NULL),
+ (1, 6, false, 'c'),
+ (2, 1, false, NULL),
+ (2, 2, true, 'x'),
+ (3, 1, true, 'z')
+) AS t(p, i, f, v)
+WINDOW wnd AS (PARTITION BY p ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
+ORDER BY p, i;
+
+-- test that volatile arguments disable moving-aggregate mode
+SELECT
+ i::text || ':' || COALESCE(v::text, 'NULL') as row,
+ logging_agg_strict(v::text)
+ over wnd as inverse,
+ logging_agg_strict(v::text || CASE WHEN random() < 0 then '?' ELSE '' END)
+ over wnd as noinverse
+FROM (VALUES
+ (1, 'a'),
+ (2, 'b'),
+ (3, 'c')
+) AS t(i, v)
+WINDOW wnd AS (ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
+ORDER BY i;
+
+SELECT
+ i::text || ':' || COALESCE(v::text, 'NULL') as row,
+ logging_agg_strict(v::text) filter(where true)
+ over wnd as inverse,
+ logging_agg_strict(v::text) filter(where random() >= 0)
+ over wnd as noinverse
+FROM (VALUES
+ (1, 'a'),
+ (2, 'b'),
+ (3, 'c')
+) AS t(i, v)
+WINDOW wnd AS (ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
+ORDER BY i;
+
+-- test that non-overlapping windows don't use inverse transitions
+SELECT
+ logging_agg_strict(v::text) OVER wnd
+FROM (VALUES
+ (1, 'a'),
+ (2, 'b'),
+ (3, 'c')
+) AS t(i, v)
+WINDOW wnd AS (ORDER BY i ROWS BETWEEN CURRENT ROW AND CURRENT ROW)
+ORDER BY i;
+
+-- test that returning NULL from the inverse transition functions
+-- restarts the aggregation from scratch. The second aggregate is supposed
+-- to test cases where only some aggregates restart, the third one checks
+-- that one aggregate restarting doesn't cause others to restart.
+
+CREATE FUNCTION sum_int_randrestart_minvfunc(int4, int4) RETURNS int4 AS
+$$ SELECT CASE WHEN random() < 0.2 THEN NULL ELSE $1 - $2 END $$
+LANGUAGE SQL STRICT;
+
+CREATE AGGREGATE sum_int_randomrestart (int4)
+(
+ stype = int4,
+ sfunc = int4pl,
+ mstype = int4,
+ msfunc = int4pl,
+ minvfunc = sum_int_randrestart_minvfunc
+);
+
+WITH
+vs AS (
+ SELECT i, (random() * 100)::int4 AS v
+ FROM generate_series(1, 100) AS i
+),
+sum_following AS (
+ SELECT i, SUM(v) OVER
+ (ORDER BY i DESC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS s
+ FROM vs
+)
+SELECT DISTINCT
+ sum_following.s = sum_int_randomrestart(v) OVER fwd AS eq1,
+ -sum_following.s = sum_int_randomrestart(-v) OVER fwd AS eq2,
+ 100*3+(vs.i-1)*3 = length(logging_agg_nonstrict(''::text) OVER fwd) AS eq3
+FROM vs
+JOIN sum_following ON sum_following.i = vs.i
+WINDOW fwd AS (
+ ORDER BY vs.i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING
+);
+
+--
+-- Test various built-in aggregates that have moving-aggregate support
+--
+
+-- test inverse transition functions handle NULLs properly
+SELECT i,AVG(v::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
+
+SELECT i,AVG(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
+
+SELECT i,AVG(v::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
+
+SELECT i,AVG(v::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,1.5),(2,2.5),(3,NULL),(4,NULL)) t(i,v);
+
+SELECT i,AVG(v::interval) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,'1 sec'),(2,'2 sec'),(3,NULL),(4,NULL)) t(i,v);
+
+SELECT i,SUM(v::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
+
+SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
+
+SELECT i,SUM(v::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
+
+SELECT i,SUM(v::money) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,'1.10'),(2,'2.20'),(3,NULL),(4,NULL)) t(i,v);
+
+SELECT i,SUM(v::interval) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,'1 sec'),(2,'2 sec'),(3,NULL),(4,NULL)) t(i,v);
+
+SELECT i,SUM(v::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,1.1),(2,2.2),(3,NULL),(4,NULL)) t(i,v);
+
+SELECT SUM(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,1.01),(2,2),(3,3)) v(i,n);
+
+SELECT i,COUNT(v) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
+
+SELECT i,COUNT(*) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
+
+SELECT VAR_POP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+
+SELECT VAR_POP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+
+SELECT VAR_POP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+
+SELECT VAR_POP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+
+SELECT VAR_SAMP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+
+SELECT VAR_SAMP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+
+SELECT VAR_SAMP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+
+SELECT VAR_SAMP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+
+SELECT VARIANCE(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+
+SELECT VARIANCE(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+
+SELECT VARIANCE(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+
+SELECT VARIANCE(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+
+SELECT STDDEV_POP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
+
+SELECT STDDEV_POP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
+
+SELECT STDDEV_POP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
+
+SELECT STDDEV_POP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
+
+SELECT STDDEV_SAMP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
+
+SELECT STDDEV_SAMP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
+
+SELECT STDDEV_SAMP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
+
+SELECT STDDEV_SAMP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n);
+
+SELECT STDDEV(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+
+SELECT STDDEV(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+
+SELECT STDDEV(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+
+SELECT STDDEV(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING)
+ FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n);
+
+-- test that inverse transition functions work with various frame options
+SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND CURRENT ROW)
+ FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
+
+SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING)
+ FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v);
+
+SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING)
+ FROM (VALUES(1,1),(2,2),(3,3),(4,4)) t(i,v);
+
+-- ensure aggregate over numeric properly recovers from NaN values
+SELECT a, b,
+ SUM(b) OVER(ORDER BY A ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
+FROM (VALUES(1,1::numeric),(2,2),(3,'NaN'),(4,3),(5,4)) t(a,b);
+
+-- It might be tempting for someone to add an inverse trans function for
+-- float and double precision. This should not be done as it can give incorrect
+-- results. This test should fail if anyone ever does this without thinking too
+-- hard about it.
+SELECT to_char(SUM(n::float8) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING),'999999999999999999999D9')
+ FROM (VALUES(1,1e20),(2,1)) n(i,n);
+
+SELECT i, b, bool_and(b) OVER w, bool_or(b) OVER w
+ FROM (VALUES (1,true), (2,true), (3,false), (4,false), (5,true)) v(i,b)
+ WINDOW w AS (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING);
+
+-- Tests for problems with failure to walk or mutate expressions
+-- within window frame clauses.
+
+-- test walker (fails with collation error if expressions are not walked)
+SELECT array_agg(i) OVER w
+ FROM generate_series(1,5) i
+WINDOW w AS (ORDER BY i ROWS BETWEEN (('foo' < 'foobar')::integer) PRECEDING AND CURRENT ROW);
+
+-- test mutator (fails when inlined if expressions are not mutated)
+CREATE FUNCTION pg_temp.f(group_size BIGINT) RETURNS SETOF integer[]
+AS $$
+ SELECT array_agg(s) OVER w
+ FROM generate_series(1,5) s
+ WINDOW w AS (ORDER BY s ROWS BETWEEN CURRENT ROW AND GROUP_SIZE FOLLOWING)
+$$ LANGUAGE SQL STABLE;
+
+EXPLAIN (costs off) SELECT * FROM pg_temp.f(2);
+SELECT * FROM pg_temp.f(2);
diff --git a/ydb/library/yql/tests/postgresql/original/cases/xml.out b/ydb/library/yql/tests/postgresql/original/cases/xml.out
new file mode 100644
index 0000000000..55ac49be26
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/xml.out
@@ -0,0 +1,1570 @@
+CREATE TABLE xmltest (
+ id int,
+ data xml
+);
+INSERT INTO xmltest VALUES (1, '<value>one</value>');
+INSERT INTO xmltest VALUES (2, '<value>two</value>');
+INSERT INTO xmltest VALUES (3, '<wrong');
+ERROR: invalid XML content
+LINE 1: INSERT INTO xmltest VALUES (3, '<wrong');
+ ^
+DETAIL: line 1: Couldn't find end of Start Tag wrong line 1
+<wrong
+ ^
+SELECT * FROM xmltest;
+ id | data
+----+--------------------
+ 1 | <value>one</value>
+ 2 | <value>two</value>
+(2 rows)
+
+SELECT xmlcomment('test');
+ xmlcomment
+-------------
+ <!--test-->
+(1 row)
+
+SELECT xmlcomment('-test');
+ xmlcomment
+--------------
+ <!---test-->
+(1 row)
+
+SELECT xmlcomment('test-');
+ERROR: invalid XML comment
+SELECT xmlcomment('--test');
+ERROR: invalid XML comment
+SELECT xmlcomment('te st');
+ xmlcomment
+--------------
+ <!--te st-->
+(1 row)
+
+SELECT xmlconcat(xmlcomment('hello'),
+ xmlelement(NAME qux, 'foo'),
+ xmlcomment('world'));
+ xmlconcat
+----------------------------------------
+ <!--hello--><qux>foo</qux><!--world-->
+(1 row)
+
+SELECT xmlconcat('hello', 'you');
+ xmlconcat
+-----------
+ helloyou
+(1 row)
+
+SELECT xmlconcat(1, 2);
+ERROR: argument of XMLCONCAT must be type xml, not type integer
+LINE 1: SELECT xmlconcat(1, 2);
+ ^
+SELECT xmlconcat('bad', '<syntax');
+ERROR: invalid XML content
+LINE 1: SELECT xmlconcat('bad', '<syntax');
+ ^
+DETAIL: line 1: Couldn't find end of Start Tag syntax line 1
+<syntax
+ ^
+SELECT xmlconcat('<foo/>', NULL, '<?xml version="1.1" standalone="no"?><bar/>');
+ xmlconcat
+--------------
+ <foo/><bar/>
+(1 row)
+
+SELECT xmlconcat('<?xml version="1.1"?><foo/>', NULL, '<?xml version="1.1" standalone="no"?><bar/>');
+ xmlconcat
+-----------------------------------
+ <?xml version="1.1"?><foo/><bar/>
+(1 row)
+
+SELECT xmlconcat(NULL);
+ xmlconcat
+-----------
+
+(1 row)
+
+SELECT xmlconcat(NULL, NULL);
+ xmlconcat
+-----------
+
+(1 row)
+
+SELECT xmlelement(name element,
+ xmlattributes (1 as one, 'deuce' as two),
+ 'content');
+ xmlelement
+------------------------------------------------
+ <element one="1" two="deuce">content</element>
+(1 row)
+
+SELECT xmlelement(name element,
+ xmlattributes ('unnamed and wrong'));
+ERROR: unnamed XML attribute value must be a column reference
+LINE 2: xmlattributes ('unnamed and wrong'));
+ ^
+SELECT xmlelement(name element, xmlelement(name nested, 'stuff'));
+ xmlelement
+-------------------------------------------
+ <element><nested>stuff</nested></element>
+(1 row)
+
+SELECT xmlelement(name employee, xmlforest(name, age, salary as pay)) FROM emp;
+ xmlelement
+----------------------------------------------------------------------
+ <employee><name>sharon</name><age>25</age><pay>1000</pay></employee>
+ <employee><name>sam</name><age>30</age><pay>2000</pay></employee>
+ <employee><name>bill</name><age>20</age><pay>1000</pay></employee>
+ <employee><name>jeff</name><age>23</age><pay>600</pay></employee>
+ <employee><name>cim</name><age>30</age><pay>400</pay></employee>
+ <employee><name>linda</name><age>19</age><pay>100</pay></employee>
+(6 rows)
+
+SELECT xmlelement(name duplicate, xmlattributes(1 as a, 2 as b, 3 as a));
+ERROR: XML attribute name "a" appears more than once
+LINE 1: ...ment(name duplicate, xmlattributes(1 as a, 2 as b, 3 as a));
+ ^
+SELECT xmlelement(name num, 37);
+ xmlelement
+---------------
+ <num>37</num>
+(1 row)
+
+SELECT xmlelement(name foo, text 'bar');
+ xmlelement
+----------------
+ <foo>bar</foo>
+(1 row)
+
+SELECT xmlelement(name foo, xml 'bar');
+ xmlelement
+----------------
+ <foo>bar</foo>
+(1 row)
+
+SELECT xmlelement(name foo, text 'b<a/>r');
+ xmlelement
+-------------------------
+ <foo>b&lt;a/&gt;r</foo>
+(1 row)
+
+SELECT xmlelement(name foo, xml 'b<a/>r');
+ xmlelement
+-------------------
+ <foo>b<a/>r</foo>
+(1 row)
+
+SELECT xmlelement(name foo, array[1, 2, 3]);
+ xmlelement
+-------------------------------------------------------------------------
+ <foo><element>1</element><element>2</element><element>3</element></foo>
+(1 row)
+
+SET xmlbinary TO base64;
+SELECT xmlelement(name foo, bytea 'bar');
+ xmlelement
+-----------------
+ <foo>YmFy</foo>
+(1 row)
+
+SET xmlbinary TO hex;
+SELECT xmlelement(name foo, bytea 'bar');
+ xmlelement
+-------------------
+ <foo>626172</foo>
+(1 row)
+
+SELECT xmlelement(name foo, xmlattributes(true as bar));
+ xmlelement
+-------------------
+ <foo bar="true"/>
+(1 row)
+
+SELECT xmlelement(name foo, xmlattributes('2009-04-09 00:24:37'::timestamp as bar));
+ xmlelement
+----------------------------------
+ <foo bar="2009-04-09T00:24:37"/>
+(1 row)
+
+SELECT xmlelement(name foo, xmlattributes('infinity'::timestamp as bar));
+ERROR: timestamp out of range
+DETAIL: XML does not support infinite timestamp values.
+SELECT xmlelement(name foo, xmlattributes('<>&"''' as funny, xml 'b<a/>r' as funnier));
+ xmlelement
+------------------------------------------------------------
+ <foo funny="&lt;&gt;&amp;&quot;'" funnier="b&lt;a/&gt;r"/>
+(1 row)
+
+SELECT xmlparse(content '');
+ xmlparse
+----------
+
+(1 row)
+
+SELECT xmlparse(content ' ');
+ xmlparse
+----------
+
+(1 row)
+
+SELECT xmlparse(content 'abc');
+ xmlparse
+----------
+ abc
+(1 row)
+
+SELECT xmlparse(content '<abc>x</abc>');
+ xmlparse
+--------------
+ <abc>x</abc>
+(1 row)
+
+SELECT xmlparse(content '<invalidentity>&</invalidentity>');
+ERROR: invalid XML content
+DETAIL: line 1: xmlParseEntityRef: no name
+<invalidentity>&</invalidentity>
+ ^
+line 1: chunk is not well balanced
+<invalidentity>&</invalidentity>
+ ^
+SELECT xmlparse(content '<undefinedentity>&idontexist;</undefinedentity>');
+ERROR: invalid XML content
+DETAIL: line 1: Entity 'idontexist' not defined
+<undefinedentity>&idontexist;</undefinedentity>
+ ^
+line 1: chunk is not well balanced
+<undefinedentity>&idontexist;</undefinedentity>
+ ^
+SELECT xmlparse(content '<invalidns xmlns=''&lt;''/>');
+ xmlparse
+---------------------------
+ <invalidns xmlns='&lt;'/>
+(1 row)
+
+SELECT xmlparse(content '<relativens xmlns=''relative''/>');
+ xmlparse
+--------------------------------
+ <relativens xmlns='relative'/>
+(1 row)
+
+SELECT xmlparse(content '<twoerrors>&idontexist;</unbalanced>');
+ERROR: invalid XML content
+DETAIL: line 1: Entity 'idontexist' not defined
+<twoerrors>&idontexist;</unbalanced>
+ ^
+line 1: Opening and ending tag mismatch: twoerrors line 1 and unbalanced
+<twoerrors>&idontexist;</unbalanced>
+ ^
+line 1: chunk is not well balanced
+<twoerrors>&idontexist;</unbalanced>
+ ^
+SELECT xmlparse(content '<nosuchprefix:tag/>');
+ xmlparse
+---------------------
+ <nosuchprefix:tag/>
+(1 row)
+
+SELECT xmlparse(document ' ');
+ERROR: invalid XML document
+DETAIL: line 1: Start tag expected, '<' not found
+
+ ^
+SELECT xmlparse(document 'abc');
+ERROR: invalid XML document
+DETAIL: line 1: Start tag expected, '<' not found
+abc
+^
+SELECT xmlparse(document '<abc>x</abc>');
+ xmlparse
+--------------
+ <abc>x</abc>
+(1 row)
+
+SELECT xmlparse(document '<invalidentity>&</abc>');
+ERROR: invalid XML document
+DETAIL: line 1: xmlParseEntityRef: no name
+<invalidentity>&</abc>
+ ^
+line 1: Opening and ending tag mismatch: invalidentity line 1 and abc
+<invalidentity>&</abc>
+ ^
+SELECT xmlparse(document '<undefinedentity>&idontexist;</abc>');
+ERROR: invalid XML document
+DETAIL: line 1: Entity 'idontexist' not defined
+<undefinedentity>&idontexist;</abc>
+ ^
+line 1: Opening and ending tag mismatch: undefinedentity line 1 and abc
+<undefinedentity>&idontexist;</abc>
+ ^
+SELECT xmlparse(document '<invalidns xmlns=''&lt;''/>');
+ xmlparse
+---------------------------
+ <invalidns xmlns='&lt;'/>
+(1 row)
+
+SELECT xmlparse(document '<relativens xmlns=''relative''/>');
+ xmlparse
+--------------------------------
+ <relativens xmlns='relative'/>
+(1 row)
+
+SELECT xmlparse(document '<twoerrors>&idontexist;</unbalanced>');
+ERROR: invalid XML document
+DETAIL: line 1: Entity 'idontexist' not defined
+<twoerrors>&idontexist;</unbalanced>
+ ^
+line 1: Opening and ending tag mismatch: twoerrors line 1 and unbalanced
+<twoerrors>&idontexist;</unbalanced>
+ ^
+SELECT xmlparse(document '<nosuchprefix:tag/>');
+ xmlparse
+---------------------
+ <nosuchprefix:tag/>
+(1 row)
+
+SELECT xmlpi(name foo);
+ xmlpi
+---------
+ <?foo?>
+(1 row)
+
+SELECT xmlpi(name xml);
+ERROR: invalid XML processing instruction
+DETAIL: XML processing instruction target name cannot be "xml".
+SELECT xmlpi(name xmlstuff);
+ xmlpi
+--------------
+ <?xmlstuff?>
+(1 row)
+
+SELECT xmlpi(name foo, 'bar');
+ xmlpi
+-------------
+ <?foo bar?>
+(1 row)
+
+SELECT xmlpi(name foo, 'in?>valid');
+ERROR: invalid XML processing instruction
+DETAIL: XML processing instruction cannot contain "?>".
+SELECT xmlpi(name foo, null);
+ xmlpi
+-------
+
+(1 row)
+
+SELECT xmlpi(name xml, null);
+ERROR: invalid XML processing instruction
+DETAIL: XML processing instruction target name cannot be "xml".
+SELECT xmlpi(name xmlstuff, null);
+ xmlpi
+-------
+
+(1 row)
+
+SELECT xmlpi(name "xml-stylesheet", 'href="mystyle.css" type="text/css"');
+ xmlpi
+-------------------------------------------------------
+ <?xml-stylesheet href="mystyle.css" type="text/css"?>
+(1 row)
+
+SELECT xmlpi(name foo, ' bar');
+ xmlpi
+-------------
+ <?foo bar?>
+(1 row)
+
+SELECT xmlroot(xml '<foo/>', version no value, standalone no value);
+ xmlroot
+---------
+ <foo/>
+(1 row)
+
+SELECT xmlroot(xml '<foo/>', version '2.0');
+ xmlroot
+-----------------------------
+ <?xml version="2.0"?><foo/>
+(1 row)
+
+SELECT xmlroot(xml '<foo/>', version no value, standalone yes);
+ xmlroot
+----------------------------------------------
+ <?xml version="1.0" standalone="yes"?><foo/>
+(1 row)
+
+SELECT xmlroot(xml '<?xml version="1.1"?><foo/>', version no value, standalone yes);
+ xmlroot
+----------------------------------------------
+ <?xml version="1.0" standalone="yes"?><foo/>
+(1 row)
+
+SELECT xmlroot(xmlroot(xml '<foo/>', version '1.0'), version '1.1', standalone no);
+ xmlroot
+---------------------------------------------
+ <?xml version="1.1" standalone="no"?><foo/>
+(1 row)
+
+SELECT xmlroot('<?xml version="1.1" standalone="yes"?><foo/>', version no value, standalone no);
+ xmlroot
+---------------------------------------------
+ <?xml version="1.0" standalone="no"?><foo/>
+(1 row)
+
+SELECT xmlroot('<?xml version="1.1" standalone="yes"?><foo/>', version no value, standalone no value);
+ xmlroot
+---------
+ <foo/>
+(1 row)
+
+SELECT xmlroot('<?xml version="1.1" standalone="yes"?><foo/>', version no value);
+ xmlroot
+----------------------------------------------
+ <?xml version="1.0" standalone="yes"?><foo/>
+(1 row)
+
+SELECT xmlroot (
+ xmlelement (
+ name gazonk,
+ xmlattributes (
+ 'val' AS name,
+ 1 + 1 AS num
+ ),
+ xmlelement (
+ NAME qux,
+ 'foo'
+ )
+ ),
+ version '1.0',
+ standalone yes
+);
+ xmlroot
+------------------------------------------------------------------------------------------
+ <?xml version="1.0" standalone="yes"?><gazonk name="val" num="2"><qux>foo</qux></gazonk>
+(1 row)
+
+SELECT xmlserialize(content data as character varying(20)) FROM xmltest;
+ xmlserialize
+--------------------
+ <value>one</value>
+ <value>two</value>
+(2 rows)
+
+SELECT xmlserialize(content 'good' as char(10));
+ xmlserialize
+--------------
+ good
+(1 row)
+
+SELECT xmlserialize(document 'bad' as text);
+ERROR: not an XML document
+SELECT xml '<foo>bar</foo>' IS DOCUMENT;
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT xml '<foo>bar</foo><bar>foo</bar>' IS DOCUMENT;
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT xml '<abc/>' IS NOT DOCUMENT;
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT xml 'abc' IS NOT DOCUMENT;
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '<>' IS NOT DOCUMENT;
+ERROR: invalid XML content
+LINE 1: SELECT '<>' IS NOT DOCUMENT;
+ ^
+DETAIL: line 1: StartTag: invalid element name
+<>
+ ^
+SELECT xmlagg(data) FROM xmltest;
+ xmlagg
+--------------------------------------
+ <value>one</value><value>two</value>
+(1 row)
+
+SELECT xmlagg(data) FROM xmltest WHERE id > 10;
+ xmlagg
+--------
+
+(1 row)
+
+SELECT xmlelement(name employees, xmlagg(xmlelement(name name, name))) FROM emp;
+ xmlelement
+--------------------------------------------------------------------------------------------------------------------------------
+ <employees><name>sharon</name><name>sam</name><name>bill</name><name>jeff</name><name>cim</name><name>linda</name></employees>
+(1 row)
+
+-- Check mapping SQL identifier to XML name
+SELECT xmlpi(name ":::_xml_abc135.%-&_");
+ xmlpi
+-------------------------------------------------
+ <?_x003A_::_x005F_xml_abc135._x0025_-_x0026__?>
+(1 row)
+
+SELECT xmlpi(name "123");
+ xmlpi
+---------------
+ <?_x0031_23?>
+(1 row)
+
+PREPARE foo (xml) AS SELECT xmlconcat('<foo/>', $1);
+SET XML OPTION DOCUMENT;
+EXECUTE foo ('<bar/>');
+ xmlconcat
+--------------
+ <foo/><bar/>
+(1 row)
+
+EXECUTE foo ('bad');
+ERROR: invalid XML document
+LINE 1: EXECUTE foo ('bad');
+ ^
+DETAIL: line 1: Start tag expected, '<' not found
+bad
+^
+SELECT xml '<!DOCTYPE a><a/><b/>';
+ERROR: invalid XML document
+LINE 1: SELECT xml '<!DOCTYPE a><a/><b/>';
+ ^
+DETAIL: line 1: Extra content at the end of the document
+<!DOCTYPE a><a/><b/>
+ ^
+SET XML OPTION CONTENT;
+EXECUTE foo ('<bar/>');
+ xmlconcat
+--------------
+ <foo/><bar/>
+(1 row)
+
+EXECUTE foo ('good');
+ xmlconcat
+------------
+ <foo/>good
+(1 row)
+
+SELECT xml '<!-- in SQL:2006+ a doc is content too--> <?y z?> <!DOCTYPE a><a/>';
+ xml
+--------------------------------------------------------------------
+ <!-- in SQL:2006+ a doc is content too--> <?y z?> <!DOCTYPE a><a/>
+(1 row)
+
+SELECT xml '<?xml version="1.0"?> <!-- hi--> <!DOCTYPE a><a/>';
+ xml
+------------------------------
+ <!-- hi--> <!DOCTYPE a><a/>
+(1 row)
+
+SELECT xml '<!DOCTYPE a><a/>';
+ xml
+------------------
+ <!DOCTYPE a><a/>
+(1 row)
+
+SELECT xml '<!-- hi--> oops <!DOCTYPE a><a/>';
+ERROR: invalid XML content
+LINE 1: SELECT xml '<!-- hi--> oops <!DOCTYPE a><a/>';
+ ^
+DETAIL: line 1: StartTag: invalid element name
+<!-- hi--> oops <!DOCTYPE a><a/>
+ ^
+SELECT xml '<!-- hi--> <oops/> <!DOCTYPE a><a/>';
+ERROR: invalid XML content
+LINE 1: SELECT xml '<!-- hi--> <oops/> <!DOCTYPE a><a/>';
+ ^
+DETAIL: line 1: StartTag: invalid element name
+<!-- hi--> <oops/> <!DOCTYPE a><a/>
+ ^
+SELECT xml '<!DOCTYPE a><a/><b/>';
+ERROR: invalid XML content
+LINE 1: SELECT xml '<!DOCTYPE a><a/><b/>';
+ ^
+DETAIL: line 1: Extra content at the end of the document
+<!DOCTYPE a><a/><b/>
+ ^
+-- Test backwards parsing
+CREATE VIEW xmlview1 AS SELECT xmlcomment('test');
+CREATE VIEW xmlview2 AS SELECT xmlconcat('hello', 'you');
+CREATE VIEW xmlview3 AS SELECT xmlelement(name element, xmlattributes (1 as ":one:", 'deuce' as two), 'content&');
+CREATE VIEW xmlview4 AS SELECT xmlelement(name employee, xmlforest(name, age, salary as pay)) FROM emp;
+CREATE VIEW xmlview5 AS SELECT xmlparse(content '<abc>x</abc>');
+CREATE VIEW xmlview6 AS SELECT xmlpi(name foo, 'bar');
+CREATE VIEW xmlview7 AS SELECT xmlroot(xml '<foo/>', version no value, standalone yes);
+CREATE VIEW xmlview8 AS SELECT xmlserialize(content 'good' as char(10));
+CREATE VIEW xmlview9 AS SELECT xmlserialize(content 'good' as text);
+SELECT table_name, view_definition FROM information_schema.views
+ WHERE table_name LIKE 'xmlview%' ORDER BY 1;
+ table_name | view_definition
+------------+-------------------------------------------------------------------------------------------------------------------
+ xmlview1 | SELECT xmlcomment('test'::text) AS xmlcomment;
+ xmlview2 | SELECT XMLCONCAT('hello'::xml, 'you'::xml) AS "xmlconcat";
+ xmlview3 | SELECT XMLELEMENT(NAME element, XMLATTRIBUTES(1 AS ":one:", 'deuce' AS two), 'content&') AS "xmlelement";
+ xmlview4 | SELECT XMLELEMENT(NAME employee, XMLFOREST(emp.name AS name, emp.age AS age, emp.salary AS pay)) AS "xmlelement"+
+ | FROM emp;
+ xmlview5 | SELECT XMLPARSE(CONTENT '<abc>x</abc>'::text STRIP WHITESPACE) AS "xmlparse";
+ xmlview6 | SELECT XMLPI(NAME foo, 'bar'::text) AS "xmlpi";
+ xmlview7 | SELECT XMLROOT('<foo/>'::xml, VERSION NO VALUE, STANDALONE YES) AS "xmlroot";
+ xmlview8 | SELECT (XMLSERIALIZE(CONTENT 'good'::xml AS character(10)))::character(10) AS "xmlserialize";
+ xmlview9 | SELECT XMLSERIALIZE(CONTENT 'good'::xml AS text) AS "xmlserialize";
+(9 rows)
+
+-- Text XPath expressions evaluation
+SELECT xpath('/value', data) FROM xmltest;
+ xpath
+----------------------
+ {<value>one</value>}
+ {<value>two</value>}
+(2 rows)
+
+SELECT xpath(NULL, NULL) IS NULL FROM xmltest;
+ ?column?
+----------
+ t
+ t
+(2 rows)
+
+SELECT xpath('', '<!-- error -->');
+ERROR: empty XPath expression
+CONTEXT: SQL function "xpath" statement 1
+SELECT xpath('//text()', '<local:data xmlns:local="http://127.0.0.1"><local:piece id="1">number one</local:piece><local:piece id="2" /></local:data>');
+ xpath
+----------------
+ {"number one"}
+(1 row)
+
+SELECT xpath('//loc:piece/@id', '<local:data xmlns:local="http://127.0.0.1"><local:piece id="1">number one</local:piece><local:piece id="2" /></local:data>', ARRAY[ARRAY['loc', 'http://127.0.0.1']]);
+ xpath
+-------
+ {1,2}
+(1 row)
+
+SELECT xpath('//loc:piece', '<local:data xmlns:local="http://127.0.0.1"><local:piece id="1">number one</local:piece><local:piece id="2" /></local:data>', ARRAY[ARRAY['loc', 'http://127.0.0.1']]);
+ xpath
+------------------------------------------------------------------------------------------------------------------------------------------------
+ {"<local:piece xmlns:local=\"http://127.0.0.1\" id=\"1\">number one</local:piece>","<local:piece xmlns:local=\"http://127.0.0.1\" id=\"2\"/>"}
+(1 row)
+
+SELECT xpath('//loc:piece', '<local:data xmlns:local="http://127.0.0.1" xmlns="http://127.0.0.2"><local:piece id="1"><internal>number one</internal><internal2/></local:piece><local:piece id="2" /></local:data>', ARRAY[ARRAY['loc', 'http://127.0.0.1']]);
+ xpath
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ {"<local:piece xmlns:local=\"http://127.0.0.1\" xmlns=\"http://127.0.0.2\" id=\"1\"><internal>number one</internal><internal2/></local:piece>","<local:piece xmlns:local=\"http://127.0.0.1\" id=\"2\"/>"}
+(1 row)
+
+SELECT xpath('//b', '<a>one <b>two</b> three <b>etc</b></a>');
+ xpath
+-------------------------
+ {<b>two</b>,<b>etc</b>}
+(1 row)
+
+SELECT xpath('//text()', '<root>&lt;</root>');
+ xpath
+--------
+ {&lt;}
+(1 row)
+
+SELECT xpath('//@value', '<root value="&lt;"/>');
+ xpath
+--------
+ {&lt;}
+(1 row)
+
+SELECT xpath('''<<invalid>>''', '<root/>');
+ xpath
+---------------------------
+ {&lt;&lt;invalid&gt;&gt;}
+(1 row)
+
+SELECT xpath('count(//*)', '<root><sub/><sub/></root>');
+ xpath
+-------
+ {3}
+(1 row)
+
+SELECT xpath('count(//*)=0', '<root><sub/><sub/></root>');
+ xpath
+---------
+ {false}
+(1 row)
+
+SELECT xpath('count(//*)=3', '<root><sub/><sub/></root>');
+ xpath
+--------
+ {true}
+(1 row)
+
+SELECT xpath('name(/*)', '<root><sub/><sub/></root>');
+ xpath
+--------
+ {root}
+(1 row)
+
+SELECT xpath('/nosuchtag', '<root/>');
+ xpath
+-------
+ {}
+(1 row)
+
+SELECT xpath('root', '<root/>');
+ xpath
+-----------
+ {<root/>}
+(1 row)
+
+-- Round-trip non-ASCII data through xpath().
+DO $$
+DECLARE
+ xml_declaration text := '<?xml version="1.0" encoding="ISO-8859-1"?>';
+ degree_symbol text;
+ res xml[];
+BEGIN
+ -- Per the documentation, except when the server encoding is UTF8, xpath()
+ -- may not work on non-ASCII data. The untranslatable_character and
+ -- undefined_function traps below, currently dead code, will become relevant
+ -- if we remove this limitation.
+ IF current_setting('server_encoding') <> 'UTF8' THEN
+ RAISE LOG 'skip: encoding % unsupported for xpath',
+ current_setting('server_encoding');
+ RETURN;
+ END IF;
+
+ degree_symbol := convert_from('\xc2b0', 'UTF8');
+ res := xpath('text()', (xml_declaration ||
+ '<x>' || degree_symbol || '</x>')::xml);
+ IF degree_symbol <> res[1]::text THEN
+ RAISE 'expected % (%), got % (%)',
+ degree_symbol, convert_to(degree_symbol, 'UTF8'),
+ res[1], convert_to(res[1]::text, 'UTF8');
+ END IF;
+EXCEPTION
+ -- character with byte sequence 0xc2 0xb0 in encoding "UTF8" has no equivalent in encoding "LATIN8"
+ WHEN untranslatable_character
+ -- default conversion function for encoding "UTF8" to "MULE_INTERNAL" does not exist
+ OR undefined_function
+ -- unsupported XML feature
+ OR feature_not_supported THEN
+ RAISE LOG 'skip: %', SQLERRM;
+END
+$$;
+-- Test xmlexists and xpath_exists
+SELECT xmlexists('//town[text() = ''Toronto'']' PASSING BY REF '<towns><town>Bidford-on-Avon</town><town>Cwmbran</town><town>Bristol</town></towns>');
+ xmlexists
+-----------
+ f
+(1 row)
+
+SELECT xmlexists('//town[text() = ''Cwmbran'']' PASSING BY REF '<towns><town>Bidford-on-Avon</town><town>Cwmbran</town><town>Bristol</town></towns>');
+ xmlexists
+-----------
+ t
+(1 row)
+
+SELECT xmlexists('count(/nosuchtag)' PASSING BY REF '<root/>');
+ xmlexists
+-----------
+ t
+(1 row)
+
+SELECT xpath_exists('//town[text() = ''Toronto'']','<towns><town>Bidford-on-Avon</town><town>Cwmbran</town><town>Bristol</town></towns>'::xml);
+ xpath_exists
+--------------
+ f
+(1 row)
+
+SELECT xpath_exists('//town[text() = ''Cwmbran'']','<towns><town>Bidford-on-Avon</town><town>Cwmbran</town><town>Bristol</town></towns>'::xml);
+ xpath_exists
+--------------
+ t
+(1 row)
+
+SELECT xpath_exists('count(/nosuchtag)', '<root/>'::xml);
+ xpath_exists
+--------------
+ t
+(1 row)
+
+INSERT INTO xmltest VALUES (4, '<menu><beers><name>Budvar</name><cost>free</cost><name>Carling</name><cost>lots</cost></beers></menu>'::xml);
+INSERT INTO xmltest VALUES (5, '<menu><beers><name>Molson</name><cost>free</cost><name>Carling</name><cost>lots</cost></beers></menu>'::xml);
+INSERT INTO xmltest VALUES (6, '<myns:menu xmlns:myns="http://myns.com"><myns:beers><myns:name>Budvar</myns:name><myns:cost>free</myns:cost><myns:name>Carling</myns:name><myns:cost>lots</myns:cost></myns:beers></myns:menu>'::xml);
+INSERT INTO xmltest VALUES (7, '<myns:menu xmlns:myns="http://myns.com"><myns:beers><myns:name>Molson</myns:name><myns:cost>free</myns:cost><myns:name>Carling</myns:name><myns:cost>lots</myns:cost></myns:beers></myns:menu>'::xml);
+SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beer' PASSING data);
+ count
+-------
+ 0
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beer' PASSING BY REF data BY REF);
+ count
+-------
+ 0
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beers' PASSING BY REF data);
+ count
+-------
+ 2
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beers/name[text() = ''Molson'']' PASSING BY REF data);
+ count
+-------
+ 1
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/menu/beer',data);
+ count
+-------
+ 0
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/menu/beers',data);
+ count
+-------
+ 2
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/menu/beers/name[text() = ''Molson'']',data);
+ count
+-------
+ 1
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/myns:menu/myns:beer',data,ARRAY[ARRAY['myns','http://myns.com']]);
+ count
+-------
+ 0
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/myns:menu/myns:beers',data,ARRAY[ARRAY['myns','http://myns.com']]);
+ count
+-------
+ 2
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/myns:menu/myns:beers/myns:name[text() = ''Molson'']',data,ARRAY[ARRAY['myns','http://myns.com']]);
+ count
+-------
+ 1
+(1 row)
+
+CREATE TABLE query ( expr TEXT );
+INSERT INTO query VALUES ('/menu/beers/cost[text() = ''lots'']');
+SELECT COUNT(id) FROM xmltest, query WHERE xmlexists(expr PASSING BY REF data);
+ count
+-------
+ 2
+(1 row)
+
+-- Test xml_is_well_formed and variants
+SELECT xml_is_well_formed_document('<foo>bar</foo>');
+ xml_is_well_formed_document
+-----------------------------
+ t
+(1 row)
+
+SELECT xml_is_well_formed_document('abc');
+ xml_is_well_formed_document
+-----------------------------
+ f
+(1 row)
+
+SELECT xml_is_well_formed_content('<foo>bar</foo>');
+ xml_is_well_formed_content
+----------------------------
+ t
+(1 row)
+
+SELECT xml_is_well_formed_content('abc');
+ xml_is_well_formed_content
+----------------------------
+ t
+(1 row)
+
+SET xmloption TO DOCUMENT;
+SELECT xml_is_well_formed('abc');
+ xml_is_well_formed
+--------------------
+ f
+(1 row)
+
+SELECT xml_is_well_formed('<>');
+ xml_is_well_formed
+--------------------
+ f
+(1 row)
+
+SELECT xml_is_well_formed('<abc/>');
+ xml_is_well_formed
+--------------------
+ t
+(1 row)
+
+SELECT xml_is_well_formed('<foo>bar</foo>');
+ xml_is_well_formed
+--------------------
+ t
+(1 row)
+
+SELECT xml_is_well_formed('<foo>bar</foo');
+ xml_is_well_formed
+--------------------
+ f
+(1 row)
+
+SELECT xml_is_well_formed('<foo><bar>baz</foo>');
+ xml_is_well_formed
+--------------------
+ f
+(1 row)
+
+SELECT xml_is_well_formed('<local:data xmlns:local="http://127.0.0.1"><local:piece id="1">number one</local:piece><local:piece id="2" /></local:data>');
+ xml_is_well_formed
+--------------------
+ t
+(1 row)
+
+SELECT xml_is_well_formed('<pg:foo xmlns:pg="http://postgresql.org/stuff">bar</my:foo>');
+ xml_is_well_formed
+--------------------
+ f
+(1 row)
+
+SELECT xml_is_well_formed('<pg:foo xmlns:pg="http://postgresql.org/stuff">bar</pg:foo>');
+ xml_is_well_formed
+--------------------
+ t
+(1 row)
+
+SELECT xml_is_well_formed('<invalidentity>&</abc>');
+ xml_is_well_formed
+--------------------
+ f
+(1 row)
+
+SELECT xml_is_well_formed('<undefinedentity>&idontexist;</abc>');
+ xml_is_well_formed
+--------------------
+ f
+(1 row)
+
+SELECT xml_is_well_formed('<invalidns xmlns=''&lt;''/>');
+ xml_is_well_formed
+--------------------
+ t
+(1 row)
+
+SELECT xml_is_well_formed('<relativens xmlns=''relative''/>');
+ xml_is_well_formed
+--------------------
+ t
+(1 row)
+
+SELECT xml_is_well_formed('<twoerrors>&idontexist;</unbalanced>');
+ xml_is_well_formed
+--------------------
+ f
+(1 row)
+
+SET xmloption TO CONTENT;
+SELECT xml_is_well_formed('abc');
+ xml_is_well_formed
+--------------------
+ t
+(1 row)
+
+-- Since xpath() deals with namespaces, it's a bit stricter about
+-- what's well-formed and what's not. If we don't obey these rules
+-- (i.e. ignore namespace-related errors from libxml), xpath()
+-- fails in subtle ways. The following would for example produce
+-- the xml value
+-- <invalidns xmlns='<'/>
+-- which is invalid because '<' may not appear un-escaped in
+-- attribute values.
+-- Since different libxml versions emit slightly different
+-- error messages, we suppress the DETAIL in this test.
+\set VERBOSITY terse
+SELECT xpath('/*', '<invalidns xmlns=''&lt;''/>');
+ERROR: could not parse XML document
+\set VERBOSITY default
+-- Again, the XML isn't well-formed for namespace purposes
+SELECT xpath('/*', '<nosuchprefix:tag/>');
+ERROR: could not parse XML document
+DETAIL: line 1: Namespace prefix nosuchprefix on tag is not defined
+<nosuchprefix:tag/>
+ ^
+CONTEXT: SQL function "xpath" statement 1
+-- XPath deprecates relative namespaces, but they're not supposed to
+-- throw an error, only a warning.
+SELECT xpath('/*', '<relativens xmlns=''relative''/>');
+WARNING: line 1: xmlns: URI relative is not absolute
+<relativens xmlns='relative'/>
+ ^
+ xpath
+--------------------------------------
+ {"<relativens xmlns=\"relative\"/>"}
+(1 row)
+
+-- External entity references should not leak filesystem information.
+SELECT XMLPARSE(DOCUMENT '<!DOCTYPE foo [<!ENTITY c SYSTEM "/etc/passwd">]><foo>&c;</foo>');
+ xmlparse
+-----------------------------------------------------------------
+ <!DOCTYPE foo [<!ENTITY c SYSTEM "/etc/passwd">]><foo>&c;</foo>
+(1 row)
+
+SELECT XMLPARSE(DOCUMENT '<!DOCTYPE foo [<!ENTITY c SYSTEM "/etc/no.such.file">]><foo>&c;</foo>');
+ xmlparse
+-----------------------------------------------------------------------
+ <!DOCTYPE foo [<!ENTITY c SYSTEM "/etc/no.such.file">]><foo>&c;</foo>
+(1 row)
+
+-- This might or might not load the requested DTD, but it mustn't throw error.
+SELECT XMLPARSE(DOCUMENT '<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN" "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd"><chapter>&nbsp;</chapter>');
+ xmlparse
+------------------------------------------------------------------------------------------------------------------------------------------------------
+ <!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN" "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd"><chapter>&nbsp;</chapter>
+(1 row)
+
+-- XMLPATH tests
+CREATE TABLE xmldata(data xml);
+INSERT INTO xmldata VALUES('<ROWS>
+<ROW id="1">
+ <COUNTRY_ID>AU</COUNTRY_ID>
+ <COUNTRY_NAME>Australia</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID>
+</ROW>
+<ROW id="2">
+ <COUNTRY_ID>CN</COUNTRY_ID>
+ <COUNTRY_NAME>China</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID>
+</ROW>
+<ROW id="3">
+ <COUNTRY_ID>HK</COUNTRY_ID>
+ <COUNTRY_NAME>HongKong</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID>
+</ROW>
+<ROW id="4">
+ <COUNTRY_ID>IN</COUNTRY_ID>
+ <COUNTRY_NAME>India</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID>
+</ROW>
+<ROW id="5">
+ <COUNTRY_ID>JP</COUNTRY_ID>
+ <COUNTRY_NAME>Japan</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID><PREMIER_NAME>Sinzo Abe</PREMIER_NAME>
+</ROW>
+<ROW id="6">
+ <COUNTRY_ID>SG</COUNTRY_ID>
+ <COUNTRY_NAME>Singapore</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID><SIZE unit="km">791</SIZE>
+</ROW>
+</ROWS>');
+-- XMLTABLE with columns
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME/text()' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+ id | _id | country_name | country_id | region_id | size | unit | premier_name
+----+-----+--------------+------------+-----------+------+------+---------------
+ 1 | 1 | Australia | AU | 3 | | | not specified
+ 2 | 2 | China | CN | 3 | | | not specified
+ 3 | 3 | HongKong | HK | 3 | | | not specified
+ 4 | 4 | India | IN | 3 | | | not specified
+ 5 | 5 | Japan | JP | 3 | | | Sinzo Abe
+ 6 | 6 | Singapore | SG | 3 | 791 | km | not specified
+(6 rows)
+
+CREATE VIEW xmltableview1 AS SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME/text()' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+SELECT * FROM xmltableview1;
+ id | _id | country_name | country_id | region_id | size | unit | premier_name
+----+-----+--------------+------------+-----------+------+------+---------------
+ 1 | 1 | Australia | AU | 3 | | | not specified
+ 2 | 2 | China | CN | 3 | | | not specified
+ 3 | 3 | HongKong | HK | 3 | | | not specified
+ 4 | 4 | India | IN | 3 | | | not specified
+ 5 | 5 | Japan | JP | 3 | | | Sinzo Abe
+ 6 | 6 | Singapore | SG | 3 | 791 | km | not specified
+(6 rows)
+
+\sv xmltableview1
+CREATE OR REPLACE VIEW public.xmltableview1 AS
+ SELECT "xmltable".id,
+ "xmltable"._id,
+ "xmltable".country_name,
+ "xmltable".country_id,
+ "xmltable".region_id,
+ "xmltable".size,
+ "xmltable".unit,
+ "xmltable".premier_name
+ FROM ( SELECT xmldata.data
+ FROM xmldata) x,
+ LATERAL XMLTABLE(('/ROWS/ROW'::text) PASSING (x.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text))
+EXPLAIN (COSTS OFF) SELECT * FROM xmltableview1;
+ QUERY PLAN
+-----------------------------------------
+ Nested Loop
+ -> Seq Scan on xmldata
+ -> Table Function Scan on "xmltable"
+(3 rows)
+
+EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM xmltableview1;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name
+ -> Seq Scan on public.xmldata
+ Output: xmldata.data
+ -> Table Function Scan on "xmltable"
+ Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name
+ Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text))
+(7 rows)
+
+-- errors
+SELECT * FROM XMLTABLE (ROW () PASSING null COLUMNS v1 timestamp) AS f (v1, v2);
+ERROR: XMLTABLE function has 1 columns available but 2 columns specified
+-- XMLNAMESPACES tests
+SELECT * FROM XMLTABLE(XMLNAMESPACES('http://x.y' AS zz),
+ '/zz:rows/zz:row'
+ PASSING '<rows xmlns="http://x.y"><row><a>10</a></row></rows>'
+ COLUMNS a int PATH 'zz:a');
+ a
+----
+ 10
+(1 row)
+
+CREATE VIEW xmltableview2 AS SELECT * FROM XMLTABLE(XMLNAMESPACES('http://x.y' AS zz),
+ '/zz:rows/zz:row'
+ PASSING '<rows xmlns="http://x.y"><row><a>10</a></row></rows>'
+ COLUMNS a int PATH 'zz:a');
+SELECT * FROM xmltableview2;
+ a
+----
+ 10
+(1 row)
+
+SELECT * FROM XMLTABLE(XMLNAMESPACES(DEFAULT 'http://x.y'),
+ '/rows/row'
+ PASSING '<rows xmlns="http://x.y"><row><a>10</a></row></rows>'
+ COLUMNS a int PATH 'a');
+ERROR: DEFAULT namespace is not supported
+SELECT * FROM XMLTABLE('.'
+ PASSING '<foo/>'
+ COLUMNS a text PATH 'foo/namespace::node()');
+ a
+--------------------------------------
+ http://www.w3.org/XML/1998/namespace
+(1 row)
+
+-- used in prepare statements
+PREPARE pp AS
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+EXECUTE pp;
+ id | _id | country_name | country_id | region_id | size | unit | premier_name
+----+-----+--------------+------------+-----------+------+------+---------------
+ 1 | 1 | Australia | AU | 3 | | | not specified
+ 2 | 2 | China | CN | 3 | | | not specified
+ 3 | 3 | HongKong | HK | 3 | | | not specified
+ 4 | 4 | India | IN | 3 | | | not specified
+ 5 | 5 | Japan | JP | 3 | | | Sinzo Abe
+ 6 | 6 | Singapore | SG | 3 | 791 | km | not specified
+(6 rows)
+
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int);
+ COUNTRY_NAME | REGION_ID
+--------------+-----------
+ India | 3
+ Japan | 3
+(2 rows)
+
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id FOR ORDINALITY, "COUNTRY_NAME" text, "REGION_ID" int);
+ id | COUNTRY_NAME | REGION_ID
+----+--------------+-----------
+ 1 | India | 3
+ 2 | Japan | 3
+(2 rows)
+
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int);
+ id | COUNTRY_NAME | REGION_ID
+----+--------------+-----------
+ 4 | India | 3
+ 5 | Japan | 3
+(2 rows)
+
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id');
+ id
+----
+ 4
+ 5
+(2 rows)
+
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id FOR ORDINALITY);
+ id
+----
+ 1
+ 2
+(2 rows)
+
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int, rawdata xml PATH '.');
+ id | COUNTRY_NAME | REGION_ID | rawdata
+----+--------------+-----------+------------------------------------------------------------------
+ 4 | India | 3 | <ROW id="4"> +
+ | | | <COUNTRY_ID>IN</COUNTRY_ID> +
+ | | | <COUNTRY_NAME>India</COUNTRY_NAME> +
+ | | | <REGION_ID>3</REGION_ID> +
+ | | | </ROW>
+ 5 | Japan | 3 | <ROW id="5"> +
+ | | | <COUNTRY_ID>JP</COUNTRY_ID> +
+ | | | <COUNTRY_NAME>Japan</COUNTRY_NAME> +
+ | | | <REGION_ID>3</REGION_ID><PREMIER_NAME>Sinzo Abe</PREMIER_NAME>+
+ | | | </ROW>
+(2 rows)
+
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int, rawdata xml PATH './*');
+ id | COUNTRY_NAME | REGION_ID | rawdata
+----+--------------+-----------+-----------------------------------------------------------------------------------------------------------------------------
+ 4 | India | 3 | <COUNTRY_ID>IN</COUNTRY_ID><COUNTRY_NAME>India</COUNTRY_NAME><REGION_ID>3</REGION_ID>
+ 5 | Japan | 3 | <COUNTRY_ID>JP</COUNTRY_ID><COUNTRY_NAME>Japan</COUNTRY_NAME><REGION_ID>3</REGION_ID><PREMIER_NAME>Sinzo Abe</PREMIER_NAME>
+(2 rows)
+
+SELECT * FROM xmltable('/root' passing '<root><element>a1a<!-- aaaa -->a2a<?aaaaa?> <!--z--> bbbb<x>xxx</x>cccc</element></root>' COLUMNS element text);
+ element
+----------------------
+ a1aa2a bbbbxxxcccc
+(1 row)
+
+SELECT * FROM xmltable('/root' passing '<root><element>a1a<!-- aaaa -->a2a<?aaaaa?> <!--z--> bbbb<x>xxx</x>cccc</element></root>' COLUMNS element text PATH 'element/text()'); -- should fail
+ERROR: more than one value returned by column XPath expression
+-- CDATA test
+select * from xmltable('d/r' passing '<d><r><c><![CDATA[<hello> &"<>!<a>foo</a>]]></c></r><r><c>2</c></r></d>' columns c text);
+ c
+-------------------------
+ <hello> &"<>!<a>foo</a>
+ 2
+(2 rows)
+
+-- XML builtin entities
+SELECT * FROM xmltable('/x/a' PASSING '<x><a><ent>&apos;</ent></a><a><ent>&quot;</ent></a><a><ent>&amp;</ent></a><a><ent>&lt;</ent></a><a><ent>&gt;</ent></a></x>' COLUMNS ent text);
+ ent
+-----
+ '
+ "
+ &
+ <
+ >
+(5 rows)
+
+SELECT * FROM xmltable('/x/a' PASSING '<x><a><ent>&apos;</ent></a><a><ent>&quot;</ent></a><a><ent>&amp;</ent></a><a><ent>&lt;</ent></a><a><ent>&gt;</ent></a></x>' COLUMNS ent xml);
+ ent
+------------------
+ <ent>'</ent>
+ <ent>"</ent>
+ <ent>&amp;</ent>
+ <ent>&lt;</ent>
+ <ent>&gt;</ent>
+(5 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name
+ -> Seq Scan on public.xmldata
+ Output: xmldata.data
+ -> Table Function Scan on "xmltable"
+ Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name
+ Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text))
+(7 rows)
+
+-- test qual
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int) WHERE "COUNTRY_NAME" = 'Japan';
+ COUNTRY_NAME | REGION_ID
+--------------+-----------
+ Japan | 3
+(1 row)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int) WHERE "COUNTRY_NAME" = 'Japan';
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: "xmltable"."COUNTRY_NAME", "xmltable"."REGION_ID"
+ -> Seq Scan on public.xmldata
+ Output: xmldata.data
+ -> Table Function Scan on "xmltable"
+ Output: "xmltable"."COUNTRY_NAME", "xmltable"."REGION_ID"
+ Table Function Call: XMLTABLE(('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]'::text) PASSING (xmldata.data) COLUMNS "COUNTRY_NAME" text, "REGION_ID" integer)
+ Filter: ("xmltable"."COUNTRY_NAME" = 'Japan'::text)
+(8 rows)
+
+-- should to work with more data
+INSERT INTO xmldata VALUES('<ROWS>
+<ROW id="10">
+ <COUNTRY_ID>CZ</COUNTRY_ID>
+ <COUNTRY_NAME>Czech Republic</COUNTRY_NAME>
+ <REGION_ID>2</REGION_ID><PREMIER_NAME>Milos Zeman</PREMIER_NAME>
+</ROW>
+<ROW id="11">
+ <COUNTRY_ID>DE</COUNTRY_ID>
+ <COUNTRY_NAME>Germany</COUNTRY_NAME>
+ <REGION_ID>2</REGION_ID>
+</ROW>
+<ROW id="12">
+ <COUNTRY_ID>FR</COUNTRY_ID>
+ <COUNTRY_NAME>France</COUNTRY_NAME>
+ <REGION_ID>2</REGION_ID>
+</ROW>
+</ROWS>');
+INSERT INTO xmldata VALUES('<ROWS>
+<ROW id="20">
+ <COUNTRY_ID>EG</COUNTRY_ID>
+ <COUNTRY_NAME>Egypt</COUNTRY_NAME>
+ <REGION_ID>1</REGION_ID>
+</ROW>
+<ROW id="21">
+ <COUNTRY_ID>SD</COUNTRY_ID>
+ <COUNTRY_NAME>Sudan</COUNTRY_NAME>
+ <REGION_ID>1</REGION_ID>
+</ROW>
+</ROWS>');
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+ id | _id | country_name | country_id | region_id | size | unit | premier_name
+----+-----+----------------+------------+-----------+------+------+---------------
+ 1 | 1 | Australia | AU | 3 | | | not specified
+ 2 | 2 | China | CN | 3 | | | not specified
+ 3 | 3 | HongKong | HK | 3 | | | not specified
+ 4 | 4 | India | IN | 3 | | | not specified
+ 5 | 5 | Japan | JP | 3 | | | Sinzo Abe
+ 6 | 6 | Singapore | SG | 3 | 791 | km | not specified
+ 10 | 1 | Czech Republic | CZ | 2 | | | Milos Zeman
+ 11 | 2 | Germany | DE | 2 | | | not specified
+ 12 | 3 | France | FR | 2 | | | not specified
+ 20 | 1 | Egypt | EG | 1 | | | not specified
+ 21 | 2 | Sudan | SD | 1 | | | not specified
+(11 rows)
+
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified')
+ WHERE region_id = 2;
+ id | _id | country_name | country_id | region_id | size | unit | premier_name
+----+-----+----------------+------------+-----------+------+------+---------------
+ 10 | 1 | Czech Republic | CZ | 2 | | | Milos Zeman
+ 11 | 2 | Germany | DE | 2 | | | not specified
+ 12 | 3 | France | FR | 2 | | | not specified
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified')
+ WHERE region_id = 2;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name
+ -> Seq Scan on public.xmldata
+ Output: xmldata.data
+ -> Table Function Scan on "xmltable"
+ Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name
+ Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text))
+ Filter: ("xmltable".region_id = 2)
+(8 rows)
+
+-- should fail, NULL value
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE' NOT NULL,
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+ERROR: null is not allowed in column "size"
+-- if all is ok, then result is empty
+-- one line xml test
+WITH
+ x AS (SELECT proname, proowner, procost::numeric, pronargs,
+ array_to_string(proargnames,',') as proargnames,
+ case when proargtypes <> '' then array_to_string(proargtypes::oid[],',') end as proargtypes
+ FROM pg_proc WHERE proname = 'f_leak'),
+ y AS (SELECT xmlelement(name proc,
+ xmlforest(proname, proowner,
+ procost, pronargs,
+ proargnames, proargtypes)) as proc
+ FROM x),
+ z AS (SELECT xmltable.*
+ FROM y,
+ LATERAL xmltable('/proc' PASSING proc
+ COLUMNS proname name,
+ proowner oid,
+ procost float,
+ pronargs int,
+ proargnames text,
+ proargtypes text))
+ SELECT * FROM z
+ EXCEPT SELECT * FROM x;
+ proname | proowner | procost | pronargs | proargnames | proargtypes
+---------+----------+---------+----------+-------------+-------------
+(0 rows)
+
+-- multi line xml test, result should be empty too
+WITH
+ x AS (SELECT proname, proowner, procost::numeric, pronargs,
+ array_to_string(proargnames,',') as proargnames,
+ case when proargtypes <> '' then array_to_string(proargtypes::oid[],',') end as proargtypes
+ FROM pg_proc),
+ y AS (SELECT xmlelement(name data,
+ xmlagg(xmlelement(name proc,
+ xmlforest(proname, proowner, procost,
+ pronargs, proargnames, proargtypes)))) as doc
+ FROM x),
+ z AS (SELECT xmltable.*
+ FROM y,
+ LATERAL xmltable('/data/proc' PASSING doc
+ COLUMNS proname name,
+ proowner oid,
+ procost float,
+ pronargs int,
+ proargnames text,
+ proargtypes text))
+ SELECT * FROM z
+ EXCEPT SELECT * FROM x;
+ proname | proowner | procost | pronargs | proargnames | proargtypes
+---------+----------+---------+----------+-------------+-------------
+(0 rows)
+
+CREATE TABLE xmltest2(x xml, _path text);
+INSERT INTO xmltest2 VALUES('<d><r><ac>1</ac></r></d>', 'A');
+INSERT INTO xmltest2 VALUES('<d><r><bc>2</bc></r></d>', 'B');
+INSERT INTO xmltest2 VALUES('<d><r><cc>3</cc></r></d>', 'C');
+INSERT INTO xmltest2 VALUES('<d><r><dc>2</dc></r></d>', 'D');
+SELECT xmltable.* FROM xmltest2, LATERAL xmltable('/d/r' PASSING x COLUMNS a int PATH '' || lower(_path) || 'c');
+ a
+---
+ 1
+ 2
+ 3
+ 2
+(4 rows)
+
+SELECT xmltable.* FROM xmltest2, LATERAL xmltable(('/d/r/' || lower(_path) || 'c') PASSING x COLUMNS a int PATH '.');
+ a
+---
+ 1
+ 2
+ 3
+ 2
+(4 rows)
+
+SELECT xmltable.* FROM xmltest2, LATERAL xmltable(('/d/r/' || lower(_path) || 'c') PASSING x COLUMNS a int PATH 'x' DEFAULT ascii(_path) - 54);
+ a
+----
+ 11
+ 12
+ 13
+ 14
+(4 rows)
+
+-- XPath result can be boolean or number too
+SELECT * FROM XMLTABLE('*' PASSING '<a>a</a>' COLUMNS a xml PATH '.', b text PATH '.', c text PATH '"hi"', d boolean PATH '. = "a"', e integer PATH 'string-length(.)');
+ a | b | c | d | e
+----------+---+----+---+---
+ <a>a</a> | a | hi | t | 1
+(1 row)
+
+\x
+SELECT * FROM XMLTABLE('*' PASSING '<e>pre<!--c1--><?pi arg?><![CDATA[&ent1]]><n2>&amp;deep</n2>post</e>' COLUMNS x xml PATH 'node()', y xml PATH '/');
+-[ RECORD 1 ]-----------------------------------------------------------
+x | pre<!--c1--><?pi arg?><![CDATA[&ent1]]><n2>&amp;deep</n2>post
+y | <e>pre<!--c1--><?pi arg?><![CDATA[&ent1]]><n2>&amp;deep</n2>post</e>+
+ |
+
+\x
+SELECT * FROM XMLTABLE('.' PASSING XMLELEMENT(NAME a) columns a varchar(20) PATH '"<foo/>"', b xml PATH '"<foo/>"');
+ a | b
+--------+--------------
+ <foo/> | &lt;foo/&gt;
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/original/cases/xml.sql b/ydb/library/yql/tests/postgresql/original/cases/xml.sql
new file mode 100644
index 0000000000..e3f90db4d5
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/xml.sql
@@ -0,0 +1,619 @@
+CREATE TABLE xmltest (
+ id int,
+ data xml
+);
+
+INSERT INTO xmltest VALUES (1, '<value>one</value>');
+INSERT INTO xmltest VALUES (2, '<value>two</value>');
+INSERT INTO xmltest VALUES (3, '<wrong');
+
+SELECT * FROM xmltest;
+
+
+SELECT xmlcomment('test');
+SELECT xmlcomment('-test');
+SELECT xmlcomment('test-');
+SELECT xmlcomment('--test');
+SELECT xmlcomment('te st');
+
+
+SELECT xmlconcat(xmlcomment('hello'),
+ xmlelement(NAME qux, 'foo'),
+ xmlcomment('world'));
+
+SELECT xmlconcat('hello', 'you');
+SELECT xmlconcat(1, 2);
+SELECT xmlconcat('bad', '<syntax');
+SELECT xmlconcat('<foo/>', NULL, '<?xml version="1.1" standalone="no"?><bar/>');
+SELECT xmlconcat('<?xml version="1.1"?><foo/>', NULL, '<?xml version="1.1" standalone="no"?><bar/>');
+SELECT xmlconcat(NULL);
+SELECT xmlconcat(NULL, NULL);
+
+
+SELECT xmlelement(name element,
+ xmlattributes (1 as one, 'deuce' as two),
+ 'content');
+
+SELECT xmlelement(name element,
+ xmlattributes ('unnamed and wrong'));
+
+SELECT xmlelement(name element, xmlelement(name nested, 'stuff'));
+
+SELECT xmlelement(name employee, xmlforest(name, age, salary as pay)) FROM emp;
+
+SELECT xmlelement(name duplicate, xmlattributes(1 as a, 2 as b, 3 as a));
+
+SELECT xmlelement(name num, 37);
+SELECT xmlelement(name foo, text 'bar');
+SELECT xmlelement(name foo, xml 'bar');
+SELECT xmlelement(name foo, text 'b<a/>r');
+SELECT xmlelement(name foo, xml 'b<a/>r');
+SELECT xmlelement(name foo, array[1, 2, 3]);
+SET xmlbinary TO base64;
+SELECT xmlelement(name foo, bytea 'bar');
+SET xmlbinary TO hex;
+SELECT xmlelement(name foo, bytea 'bar');
+
+SELECT xmlelement(name foo, xmlattributes(true as bar));
+SELECT xmlelement(name foo, xmlattributes('2009-04-09 00:24:37'::timestamp as bar));
+SELECT xmlelement(name foo, xmlattributes('infinity'::timestamp as bar));
+SELECT xmlelement(name foo, xmlattributes('<>&"''' as funny, xml 'b<a/>r' as funnier));
+
+
+SELECT xmlparse(content '');
+SELECT xmlparse(content ' ');
+SELECT xmlparse(content 'abc');
+SELECT xmlparse(content '<abc>x</abc>');
+SELECT xmlparse(content '<invalidentity>&</invalidentity>');
+SELECT xmlparse(content '<undefinedentity>&idontexist;</undefinedentity>');
+SELECT xmlparse(content '<invalidns xmlns=''&lt;''/>');
+SELECT xmlparse(content '<relativens xmlns=''relative''/>');
+SELECT xmlparse(content '<twoerrors>&idontexist;</unbalanced>');
+SELECT xmlparse(content '<nosuchprefix:tag/>');
+
+SELECT xmlparse(document ' ');
+SELECT xmlparse(document 'abc');
+SELECT xmlparse(document '<abc>x</abc>');
+SELECT xmlparse(document '<invalidentity>&</abc>');
+SELECT xmlparse(document '<undefinedentity>&idontexist;</abc>');
+SELECT xmlparse(document '<invalidns xmlns=''&lt;''/>');
+SELECT xmlparse(document '<relativens xmlns=''relative''/>');
+SELECT xmlparse(document '<twoerrors>&idontexist;</unbalanced>');
+SELECT xmlparse(document '<nosuchprefix:tag/>');
+
+
+SELECT xmlpi(name foo);
+SELECT xmlpi(name xml);
+SELECT xmlpi(name xmlstuff);
+SELECT xmlpi(name foo, 'bar');
+SELECT xmlpi(name foo, 'in?>valid');
+SELECT xmlpi(name foo, null);
+SELECT xmlpi(name xml, null);
+SELECT xmlpi(name xmlstuff, null);
+SELECT xmlpi(name "xml-stylesheet", 'href="mystyle.css" type="text/css"');
+SELECT xmlpi(name foo, ' bar');
+
+
+SELECT xmlroot(xml '<foo/>', version no value, standalone no value);
+SELECT xmlroot(xml '<foo/>', version '2.0');
+SELECT xmlroot(xml '<foo/>', version no value, standalone yes);
+SELECT xmlroot(xml '<?xml version="1.1"?><foo/>', version no value, standalone yes);
+SELECT xmlroot(xmlroot(xml '<foo/>', version '1.0'), version '1.1', standalone no);
+SELECT xmlroot('<?xml version="1.1" standalone="yes"?><foo/>', version no value, standalone no);
+SELECT xmlroot('<?xml version="1.1" standalone="yes"?><foo/>', version no value, standalone no value);
+SELECT xmlroot('<?xml version="1.1" standalone="yes"?><foo/>', version no value);
+
+
+SELECT xmlroot (
+ xmlelement (
+ name gazonk,
+ xmlattributes (
+ 'val' AS name,
+ 1 + 1 AS num
+ ),
+ xmlelement (
+ NAME qux,
+ 'foo'
+ )
+ ),
+ version '1.0',
+ standalone yes
+);
+
+
+SELECT xmlserialize(content data as character varying(20)) FROM xmltest;
+SELECT xmlserialize(content 'good' as char(10));
+SELECT xmlserialize(document 'bad' as text);
+
+
+SELECT xml '<foo>bar</foo>' IS DOCUMENT;
+SELECT xml '<foo>bar</foo><bar>foo</bar>' IS DOCUMENT;
+SELECT xml '<abc/>' IS NOT DOCUMENT;
+SELECT xml 'abc' IS NOT DOCUMENT;
+SELECT '<>' IS NOT DOCUMENT;
+
+
+SELECT xmlagg(data) FROM xmltest;
+SELECT xmlagg(data) FROM xmltest WHERE id > 10;
+SELECT xmlelement(name employees, xmlagg(xmlelement(name name, name))) FROM emp;
+
+
+-- Check mapping SQL identifier to XML name
+
+SELECT xmlpi(name ":::_xml_abc135.%-&_");
+SELECT xmlpi(name "123");
+
+
+PREPARE foo (xml) AS SELECT xmlconcat('<foo/>', $1);
+
+SET XML OPTION DOCUMENT;
+EXECUTE foo ('<bar/>');
+EXECUTE foo ('bad');
+SELECT xml '<!DOCTYPE a><a/><b/>';
+
+SET XML OPTION CONTENT;
+EXECUTE foo ('<bar/>');
+EXECUTE foo ('good');
+SELECT xml '<!-- in SQL:2006+ a doc is content too--> <?y z?> <!DOCTYPE a><a/>';
+SELECT xml '<?xml version="1.0"?> <!-- hi--> <!DOCTYPE a><a/>';
+SELECT xml '<!DOCTYPE a><a/>';
+SELECT xml '<!-- hi--> oops <!DOCTYPE a><a/>';
+SELECT xml '<!-- hi--> <oops/> <!DOCTYPE a><a/>';
+SELECT xml '<!DOCTYPE a><a/><b/>';
+
+
+-- Test backwards parsing
+
+CREATE VIEW xmlview1 AS SELECT xmlcomment('test');
+CREATE VIEW xmlview2 AS SELECT xmlconcat('hello', 'you');
+CREATE VIEW xmlview3 AS SELECT xmlelement(name element, xmlattributes (1 as ":one:", 'deuce' as two), 'content&');
+CREATE VIEW xmlview4 AS SELECT xmlelement(name employee, xmlforest(name, age, salary as pay)) FROM emp;
+CREATE VIEW xmlview5 AS SELECT xmlparse(content '<abc>x</abc>');
+CREATE VIEW xmlview6 AS SELECT xmlpi(name foo, 'bar');
+CREATE VIEW xmlview7 AS SELECT xmlroot(xml '<foo/>', version no value, standalone yes);
+CREATE VIEW xmlview8 AS SELECT xmlserialize(content 'good' as char(10));
+CREATE VIEW xmlview9 AS SELECT xmlserialize(content 'good' as text);
+
+SELECT table_name, view_definition FROM information_schema.views
+ WHERE table_name LIKE 'xmlview%' ORDER BY 1;
+
+-- Text XPath expressions evaluation
+
+SELECT xpath('/value', data) FROM xmltest;
+SELECT xpath(NULL, NULL) IS NULL FROM xmltest;
+SELECT xpath('', '<!-- error -->');
+SELECT xpath('//text()', '<local:data xmlns:local="http://127.0.0.1"><local:piece id="1">number one</local:piece><local:piece id="2" /></local:data>');
+SELECT xpath('//loc:piece/@id', '<local:data xmlns:local="http://127.0.0.1"><local:piece id="1">number one</local:piece><local:piece id="2" /></local:data>', ARRAY[ARRAY['loc', 'http://127.0.0.1']]);
+SELECT xpath('//loc:piece', '<local:data xmlns:local="http://127.0.0.1"><local:piece id="1">number one</local:piece><local:piece id="2" /></local:data>', ARRAY[ARRAY['loc', 'http://127.0.0.1']]);
+SELECT xpath('//loc:piece', '<local:data xmlns:local="http://127.0.0.1" xmlns="http://127.0.0.2"><local:piece id="1"><internal>number one</internal><internal2/></local:piece><local:piece id="2" /></local:data>', ARRAY[ARRAY['loc', 'http://127.0.0.1']]);
+SELECT xpath('//b', '<a>one <b>two</b> three <b>etc</b></a>');
+SELECT xpath('//text()', '<root>&lt;</root>');
+SELECT xpath('//@value', '<root value="&lt;"/>');
+SELECT xpath('''<<invalid>>''', '<root/>');
+SELECT xpath('count(//*)', '<root><sub/><sub/></root>');
+SELECT xpath('count(//*)=0', '<root><sub/><sub/></root>');
+SELECT xpath('count(//*)=3', '<root><sub/><sub/></root>');
+SELECT xpath('name(/*)', '<root><sub/><sub/></root>');
+SELECT xpath('/nosuchtag', '<root/>');
+SELECT xpath('root', '<root/>');
+
+-- Round-trip non-ASCII data through xpath().
+DO $$
+DECLARE
+ xml_declaration text := '<?xml version="1.0" encoding="ISO-8859-1"?>';
+ degree_symbol text;
+ res xml[];
+BEGIN
+ -- Per the documentation, except when the server encoding is UTF8, xpath()
+ -- may not work on non-ASCII data. The untranslatable_character and
+ -- undefined_function traps below, currently dead code, will become relevant
+ -- if we remove this limitation.
+ IF current_setting('server_encoding') <> 'UTF8' THEN
+ RAISE LOG 'skip: encoding % unsupported for xpath',
+ current_setting('server_encoding');
+ RETURN;
+ END IF;
+
+ degree_symbol := convert_from('\xc2b0', 'UTF8');
+ res := xpath('text()', (xml_declaration ||
+ '<x>' || degree_symbol || '</x>')::xml);
+ IF degree_symbol <> res[1]::text THEN
+ RAISE 'expected % (%), got % (%)',
+ degree_symbol, convert_to(degree_symbol, 'UTF8'),
+ res[1], convert_to(res[1]::text, 'UTF8');
+ END IF;
+EXCEPTION
+ -- character with byte sequence 0xc2 0xb0 in encoding "UTF8" has no equivalent in encoding "LATIN8"
+ WHEN untranslatable_character
+ -- default conversion function for encoding "UTF8" to "MULE_INTERNAL" does not exist
+ OR undefined_function
+ -- unsupported XML feature
+ OR feature_not_supported THEN
+ RAISE LOG 'skip: %', SQLERRM;
+END
+$$;
+
+-- Test xmlexists and xpath_exists
+SELECT xmlexists('//town[text() = ''Toronto'']' PASSING BY REF '<towns><town>Bidford-on-Avon</town><town>Cwmbran</town><town>Bristol</town></towns>');
+SELECT xmlexists('//town[text() = ''Cwmbran'']' PASSING BY REF '<towns><town>Bidford-on-Avon</town><town>Cwmbran</town><town>Bristol</town></towns>');
+SELECT xmlexists('count(/nosuchtag)' PASSING BY REF '<root/>');
+SELECT xpath_exists('//town[text() = ''Toronto'']','<towns><town>Bidford-on-Avon</town><town>Cwmbran</town><town>Bristol</town></towns>'::xml);
+SELECT xpath_exists('//town[text() = ''Cwmbran'']','<towns><town>Bidford-on-Avon</town><town>Cwmbran</town><town>Bristol</town></towns>'::xml);
+SELECT xpath_exists('count(/nosuchtag)', '<root/>'::xml);
+
+INSERT INTO xmltest VALUES (4, '<menu><beers><name>Budvar</name><cost>free</cost><name>Carling</name><cost>lots</cost></beers></menu>'::xml);
+INSERT INTO xmltest VALUES (5, '<menu><beers><name>Molson</name><cost>free</cost><name>Carling</name><cost>lots</cost></beers></menu>'::xml);
+INSERT INTO xmltest VALUES (6, '<myns:menu xmlns:myns="http://myns.com"><myns:beers><myns:name>Budvar</myns:name><myns:cost>free</myns:cost><myns:name>Carling</myns:name><myns:cost>lots</myns:cost></myns:beers></myns:menu>'::xml);
+INSERT INTO xmltest VALUES (7, '<myns:menu xmlns:myns="http://myns.com"><myns:beers><myns:name>Molson</myns:name><myns:cost>free</myns:cost><myns:name>Carling</myns:name><myns:cost>lots</myns:cost></myns:beers></myns:menu>'::xml);
+
+SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beer' PASSING data);
+SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beer' PASSING BY REF data BY REF);
+SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beers' PASSING BY REF data);
+SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beers/name[text() = ''Molson'']' PASSING BY REF data);
+
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/menu/beer',data);
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/menu/beers',data);
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/menu/beers/name[text() = ''Molson'']',data);
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/myns:menu/myns:beer',data,ARRAY[ARRAY['myns','http://myns.com']]);
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/myns:menu/myns:beers',data,ARRAY[ARRAY['myns','http://myns.com']]);
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/myns:menu/myns:beers/myns:name[text() = ''Molson'']',data,ARRAY[ARRAY['myns','http://myns.com']]);
+
+CREATE TABLE query ( expr TEXT );
+INSERT INTO query VALUES ('/menu/beers/cost[text() = ''lots'']');
+SELECT COUNT(id) FROM xmltest, query WHERE xmlexists(expr PASSING BY REF data);
+
+-- Test xml_is_well_formed and variants
+
+SELECT xml_is_well_formed_document('<foo>bar</foo>');
+SELECT xml_is_well_formed_document('abc');
+SELECT xml_is_well_formed_content('<foo>bar</foo>');
+SELECT xml_is_well_formed_content('abc');
+
+SET xmloption TO DOCUMENT;
+SELECT xml_is_well_formed('abc');
+SELECT xml_is_well_formed('<>');
+SELECT xml_is_well_formed('<abc/>');
+SELECT xml_is_well_formed('<foo>bar</foo>');
+SELECT xml_is_well_formed('<foo>bar</foo');
+SELECT xml_is_well_formed('<foo><bar>baz</foo>');
+SELECT xml_is_well_formed('<local:data xmlns:local="http://127.0.0.1"><local:piece id="1">number one</local:piece><local:piece id="2" /></local:data>');
+SELECT xml_is_well_formed('<pg:foo xmlns:pg="http://postgresql.org/stuff">bar</my:foo>');
+SELECT xml_is_well_formed('<pg:foo xmlns:pg="http://postgresql.org/stuff">bar</pg:foo>');
+SELECT xml_is_well_formed('<invalidentity>&</abc>');
+SELECT xml_is_well_formed('<undefinedentity>&idontexist;</abc>');
+SELECT xml_is_well_formed('<invalidns xmlns=''&lt;''/>');
+SELECT xml_is_well_formed('<relativens xmlns=''relative''/>');
+SELECT xml_is_well_formed('<twoerrors>&idontexist;</unbalanced>');
+
+SET xmloption TO CONTENT;
+SELECT xml_is_well_formed('abc');
+
+-- Since xpath() deals with namespaces, it's a bit stricter about
+-- what's well-formed and what's not. If we don't obey these rules
+-- (i.e. ignore namespace-related errors from libxml), xpath()
+-- fails in subtle ways. The following would for example produce
+-- the xml value
+-- <invalidns xmlns='<'/>
+-- which is invalid because '<' may not appear un-escaped in
+-- attribute values.
+-- Since different libxml versions emit slightly different
+-- error messages, we suppress the DETAIL in this test.
+\set VERBOSITY terse
+SELECT xpath('/*', '<invalidns xmlns=''&lt;''/>');
+\set VERBOSITY default
+
+-- Again, the XML isn't well-formed for namespace purposes
+SELECT xpath('/*', '<nosuchprefix:tag/>');
+
+-- XPath deprecates relative namespaces, but they're not supposed to
+-- throw an error, only a warning.
+SELECT xpath('/*', '<relativens xmlns=''relative''/>');
+
+-- External entity references should not leak filesystem information.
+SELECT XMLPARSE(DOCUMENT '<!DOCTYPE foo [<!ENTITY c SYSTEM "/etc/passwd">]><foo>&c;</foo>');
+SELECT XMLPARSE(DOCUMENT '<!DOCTYPE foo [<!ENTITY c SYSTEM "/etc/no.such.file">]><foo>&c;</foo>');
+-- This might or might not load the requested DTD, but it mustn't throw error.
+SELECT XMLPARSE(DOCUMENT '<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN" "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd"><chapter>&nbsp;</chapter>');
+
+-- XMLPATH tests
+CREATE TABLE xmldata(data xml);
+INSERT INTO xmldata VALUES('<ROWS>
+<ROW id="1">
+ <COUNTRY_ID>AU</COUNTRY_ID>
+ <COUNTRY_NAME>Australia</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID>
+</ROW>
+<ROW id="2">
+ <COUNTRY_ID>CN</COUNTRY_ID>
+ <COUNTRY_NAME>China</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID>
+</ROW>
+<ROW id="3">
+ <COUNTRY_ID>HK</COUNTRY_ID>
+ <COUNTRY_NAME>HongKong</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID>
+</ROW>
+<ROW id="4">
+ <COUNTRY_ID>IN</COUNTRY_ID>
+ <COUNTRY_NAME>India</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID>
+</ROW>
+<ROW id="5">
+ <COUNTRY_ID>JP</COUNTRY_ID>
+ <COUNTRY_NAME>Japan</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID><PREMIER_NAME>Sinzo Abe</PREMIER_NAME>
+</ROW>
+<ROW id="6">
+ <COUNTRY_ID>SG</COUNTRY_ID>
+ <COUNTRY_NAME>Singapore</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID><SIZE unit="km">791</SIZE>
+</ROW>
+</ROWS>');
+
+-- XMLTABLE with columns
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME/text()' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+
+CREATE VIEW xmltableview1 AS SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME/text()' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+
+SELECT * FROM xmltableview1;
+
+\sv xmltableview1
+
+EXPLAIN (COSTS OFF) SELECT * FROM xmltableview1;
+EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM xmltableview1;
+
+-- errors
+SELECT * FROM XMLTABLE (ROW () PASSING null COLUMNS v1 timestamp) AS f (v1, v2);
+
+-- XMLNAMESPACES tests
+SELECT * FROM XMLTABLE(XMLNAMESPACES('http://x.y' AS zz),
+ '/zz:rows/zz:row'
+ PASSING '<rows xmlns="http://x.y"><row><a>10</a></row></rows>'
+ COLUMNS a int PATH 'zz:a');
+
+CREATE VIEW xmltableview2 AS SELECT * FROM XMLTABLE(XMLNAMESPACES('http://x.y' AS zz),
+ '/zz:rows/zz:row'
+ PASSING '<rows xmlns="http://x.y"><row><a>10</a></row></rows>'
+ COLUMNS a int PATH 'zz:a');
+
+SELECT * FROM xmltableview2;
+
+SELECT * FROM XMLTABLE(XMLNAMESPACES(DEFAULT 'http://x.y'),
+ '/rows/row'
+ PASSING '<rows xmlns="http://x.y"><row><a>10</a></row></rows>'
+ COLUMNS a int PATH 'a');
+
+SELECT * FROM XMLTABLE('.'
+ PASSING '<foo/>'
+ COLUMNS a text PATH 'foo/namespace::node()');
+
+-- used in prepare statements
+PREPARE pp AS
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+
+EXECUTE pp;
+
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int);
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id FOR ORDINALITY, "COUNTRY_NAME" text, "REGION_ID" int);
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int);
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id');
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id FOR ORDINALITY);
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int, rawdata xml PATH '.');
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int, rawdata xml PATH './*');
+
+SELECT * FROM xmltable('/root' passing '<root><element>a1a<!-- aaaa -->a2a<?aaaaa?> <!--z--> bbbb<x>xxx</x>cccc</element></root>' COLUMNS element text);
+SELECT * FROM xmltable('/root' passing '<root><element>a1a<!-- aaaa -->a2a<?aaaaa?> <!--z--> bbbb<x>xxx</x>cccc</element></root>' COLUMNS element text PATH 'element/text()'); -- should fail
+
+-- CDATA test
+select * from xmltable('d/r' passing '<d><r><c><![CDATA[<hello> &"<>!<a>foo</a>]]></c></r><r><c>2</c></r></d>' columns c text);
+
+-- XML builtin entities
+SELECT * FROM xmltable('/x/a' PASSING '<x><a><ent>&apos;</ent></a><a><ent>&quot;</ent></a><a><ent>&amp;</ent></a><a><ent>&lt;</ent></a><a><ent>&gt;</ent></a></x>' COLUMNS ent text);
+SELECT * FROM xmltable('/x/a' PASSING '<x><a><ent>&apos;</ent></a><a><ent>&quot;</ent></a><a><ent>&amp;</ent></a><a><ent>&lt;</ent></a><a><ent>&gt;</ent></a></x>' COLUMNS ent xml);
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+
+-- test qual
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int) WHERE "COUNTRY_NAME" = 'Japan';
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int) WHERE "COUNTRY_NAME" = 'Japan';
+
+-- should to work with more data
+INSERT INTO xmldata VALUES('<ROWS>
+<ROW id="10">
+ <COUNTRY_ID>CZ</COUNTRY_ID>
+ <COUNTRY_NAME>Czech Republic</COUNTRY_NAME>
+ <REGION_ID>2</REGION_ID><PREMIER_NAME>Milos Zeman</PREMIER_NAME>
+</ROW>
+<ROW id="11">
+ <COUNTRY_ID>DE</COUNTRY_ID>
+ <COUNTRY_NAME>Germany</COUNTRY_NAME>
+ <REGION_ID>2</REGION_ID>
+</ROW>
+<ROW id="12">
+ <COUNTRY_ID>FR</COUNTRY_ID>
+ <COUNTRY_NAME>France</COUNTRY_NAME>
+ <REGION_ID>2</REGION_ID>
+</ROW>
+</ROWS>');
+
+INSERT INTO xmldata VALUES('<ROWS>
+<ROW id="20">
+ <COUNTRY_ID>EG</COUNTRY_ID>
+ <COUNTRY_NAME>Egypt</COUNTRY_NAME>
+ <REGION_ID>1</REGION_ID>
+</ROW>
+<ROW id="21">
+ <COUNTRY_ID>SD</COUNTRY_ID>
+ <COUNTRY_NAME>Sudan</COUNTRY_NAME>
+ <REGION_ID>1</REGION_ID>
+</ROW>
+</ROWS>');
+
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified')
+ WHERE region_id = 2;
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified')
+ WHERE region_id = 2;
+
+-- should fail, NULL value
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE' NOT NULL,
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+
+-- if all is ok, then result is empty
+-- one line xml test
+WITH
+ x AS (SELECT proname, proowner, procost::numeric, pronargs,
+ array_to_string(proargnames,',') as proargnames,
+ case when proargtypes <> '' then array_to_string(proargtypes::oid[],',') end as proargtypes
+ FROM pg_proc WHERE proname = 'f_leak'),
+ y AS (SELECT xmlelement(name proc,
+ xmlforest(proname, proowner,
+ procost, pronargs,
+ proargnames, proargtypes)) as proc
+ FROM x),
+ z AS (SELECT xmltable.*
+ FROM y,
+ LATERAL xmltable('/proc' PASSING proc
+ COLUMNS proname name,
+ proowner oid,
+ procost float,
+ pronargs int,
+ proargnames text,
+ proargtypes text))
+ SELECT * FROM z
+ EXCEPT SELECT * FROM x;
+
+-- multi line xml test, result should be empty too
+WITH
+ x AS (SELECT proname, proowner, procost::numeric, pronargs,
+ array_to_string(proargnames,',') as proargnames,
+ case when proargtypes <> '' then array_to_string(proargtypes::oid[],',') end as proargtypes
+ FROM pg_proc),
+ y AS (SELECT xmlelement(name data,
+ xmlagg(xmlelement(name proc,
+ xmlforest(proname, proowner, procost,
+ pronargs, proargnames, proargtypes)))) as doc
+ FROM x),
+ z AS (SELECT xmltable.*
+ FROM y,
+ LATERAL xmltable('/data/proc' PASSING doc
+ COLUMNS proname name,
+ proowner oid,
+ procost float,
+ pronargs int,
+ proargnames text,
+ proargtypes text))
+ SELECT * FROM z
+ EXCEPT SELECT * FROM x;
+
+CREATE TABLE xmltest2(x xml, _path text);
+
+INSERT INTO xmltest2 VALUES('<d><r><ac>1</ac></r></d>', 'A');
+INSERT INTO xmltest2 VALUES('<d><r><bc>2</bc></r></d>', 'B');
+INSERT INTO xmltest2 VALUES('<d><r><cc>3</cc></r></d>', 'C');
+INSERT INTO xmltest2 VALUES('<d><r><dc>2</dc></r></d>', 'D');
+
+SELECT xmltable.* FROM xmltest2, LATERAL xmltable('/d/r' PASSING x COLUMNS a int PATH '' || lower(_path) || 'c');
+SELECT xmltable.* FROM xmltest2, LATERAL xmltable(('/d/r/' || lower(_path) || 'c') PASSING x COLUMNS a int PATH '.');
+SELECT xmltable.* FROM xmltest2, LATERAL xmltable(('/d/r/' || lower(_path) || 'c') PASSING x COLUMNS a int PATH 'x' DEFAULT ascii(_path) - 54);
+
+-- XPath result can be boolean or number too
+SELECT * FROM XMLTABLE('*' PASSING '<a>a</a>' COLUMNS a xml PATH '.', b text PATH '.', c text PATH '"hi"', d boolean PATH '. = "a"', e integer PATH 'string-length(.)');
+\x
+SELECT * FROM XMLTABLE('*' PASSING '<e>pre<!--c1--><?pi arg?><![CDATA[&ent1]]><n2>&amp;deep</n2>post</e>' COLUMNS x xml PATH 'node()', y xml PATH '/');
+\x
+
+SELECT * FROM XMLTABLE('.' PASSING XMLELEMENT(NAME a) columns a varchar(20) PATH '"<foo/>"', b xml PATH '"<foo/>"');
diff --git a/ydb/library/yql/tests/postgresql/original/cases/xml_1.out b/ydb/library/yql/tests/postgresql/original/cases/xml_1.out
new file mode 100644
index 0000000000..897ed382f1
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/xml_1.out
@@ -0,0 +1,1417 @@
+CREATE TABLE xmltest (
+ id int,
+ data xml
+);
+INSERT INTO xmltest VALUES (1, '<value>one</value>');
+ERROR: unsupported XML feature
+LINE 1: INSERT INTO xmltest VALUES (1, '<value>one</value>');
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+INSERT INTO xmltest VALUES (2, '<value>two</value>');
+ERROR: unsupported XML feature
+LINE 1: INSERT INTO xmltest VALUES (2, '<value>two</value>');
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+INSERT INTO xmltest VALUES (3, '<wrong');
+ERROR: unsupported XML feature
+LINE 1: INSERT INTO xmltest VALUES (3, '<wrong');
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT * FROM xmltest;
+ id | data
+----+------
+(0 rows)
+
+SELECT xmlcomment('test');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlcomment('-test');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlcomment('test-');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlcomment('--test');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlcomment('te st');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlconcat(xmlcomment('hello'),
+ xmlelement(NAME qux, 'foo'),
+ xmlcomment('world'));
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlconcat('hello', 'you');
+ERROR: unsupported XML feature
+LINE 1: SELECT xmlconcat('hello', 'you');
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlconcat(1, 2);
+ERROR: argument of XMLCONCAT must be type xml, not type integer
+LINE 1: SELECT xmlconcat(1, 2);
+ ^
+SELECT xmlconcat('bad', '<syntax');
+ERROR: unsupported XML feature
+LINE 1: SELECT xmlconcat('bad', '<syntax');
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlconcat('<foo/>', NULL, '<?xml version="1.1" standalone="no"?><bar/>');
+ERROR: unsupported XML feature
+LINE 1: SELECT xmlconcat('<foo/>', NULL, '<?xml version="1.1" standa...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlconcat('<?xml version="1.1"?><foo/>', NULL, '<?xml version="1.1" standalone="no"?><bar/>');
+ERROR: unsupported XML feature
+LINE 1: SELECT xmlconcat('<?xml version="1.1"?><foo/>', NULL, '<?xml...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlconcat(NULL);
+ xmlconcat
+-----------
+
+(1 row)
+
+SELECT xmlconcat(NULL, NULL);
+ xmlconcat
+-----------
+
+(1 row)
+
+SELECT xmlelement(name element,
+ xmlattributes (1 as one, 'deuce' as two),
+ 'content');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlelement(name element,
+ xmlattributes ('unnamed and wrong'));
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlelement(name element, xmlelement(name nested, 'stuff'));
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlelement(name employee, xmlforest(name, age, salary as pay)) FROM emp;
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlelement(name duplicate, xmlattributes(1 as a, 2 as b, 3 as a));
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlelement(name num, 37);
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlelement(name foo, text 'bar');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlelement(name foo, xml 'bar');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlelement(name foo, text 'b<a/>r');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlelement(name foo, xml 'b<a/>r');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlelement(name foo, array[1, 2, 3]);
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SET xmlbinary TO base64;
+SELECT xmlelement(name foo, bytea 'bar');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SET xmlbinary TO hex;
+SELECT xmlelement(name foo, bytea 'bar');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlelement(name foo, xmlattributes(true as bar));
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlelement(name foo, xmlattributes('2009-04-09 00:24:37'::timestamp as bar));
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlelement(name foo, xmlattributes('infinity'::timestamp as bar));
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlelement(name foo, xmlattributes('<>&"''' as funny, xml 'b<a/>r' as funnier));
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlparse(content '');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlparse(content ' ');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlparse(content 'abc');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlparse(content '<abc>x</abc>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlparse(content '<invalidentity>&</invalidentity>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlparse(content '<undefinedentity>&idontexist;</undefinedentity>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlparse(content '<invalidns xmlns=''&lt;''/>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlparse(content '<relativens xmlns=''relative''/>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlparse(content '<twoerrors>&idontexist;</unbalanced>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlparse(content '<nosuchprefix:tag/>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlparse(document ' ');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlparse(document 'abc');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlparse(document '<abc>x</abc>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlparse(document '<invalidentity>&</abc>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlparse(document '<undefinedentity>&idontexist;</abc>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlparse(document '<invalidns xmlns=''&lt;''/>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlparse(document '<relativens xmlns=''relative''/>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlparse(document '<twoerrors>&idontexist;</unbalanced>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlparse(document '<nosuchprefix:tag/>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlpi(name foo);
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlpi(name xml);
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlpi(name xmlstuff);
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlpi(name foo, 'bar');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlpi(name foo, 'in?>valid');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlpi(name foo, null);
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlpi(name xml, null);
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlpi(name xmlstuff, null);
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlpi(name "xml-stylesheet", 'href="mystyle.css" type="text/css"');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlpi(name foo, ' bar');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlroot(xml '<foo/>', version no value, standalone no value);
+ERROR: unsupported XML feature
+LINE 1: SELECT xmlroot(xml '<foo/>', version no value, standalone no...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlroot(xml '<foo/>', version '2.0');
+ERROR: unsupported XML feature
+LINE 1: SELECT xmlroot(xml '<foo/>', version '2.0');
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlroot(xml '<foo/>', version no value, standalone yes);
+ERROR: unsupported XML feature
+LINE 1: SELECT xmlroot(xml '<foo/>', version no value, standalone ye...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlroot(xml '<?xml version="1.1"?><foo/>', version no value, standalone yes);
+ERROR: unsupported XML feature
+LINE 1: SELECT xmlroot(xml '<?xml version="1.1"?><foo/>', version no...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlroot(xmlroot(xml '<foo/>', version '1.0'), version '1.1', standalone no);
+ERROR: unsupported XML feature
+LINE 1: SELECT xmlroot(xmlroot(xml '<foo/>', version '1.0'), version...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlroot('<?xml version="1.1" standalone="yes"?><foo/>', version no value, standalone no);
+ERROR: unsupported XML feature
+LINE 1: SELECT xmlroot('<?xml version="1.1" standalone="yes"?><foo/>...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlroot('<?xml version="1.1" standalone="yes"?><foo/>', version no value, standalone no value);
+ERROR: unsupported XML feature
+LINE 1: SELECT xmlroot('<?xml version="1.1" standalone="yes"?><foo/>...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlroot('<?xml version="1.1" standalone="yes"?><foo/>', version no value);
+ERROR: unsupported XML feature
+LINE 1: SELECT xmlroot('<?xml version="1.1" standalone="yes"?><foo/>...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlroot (
+ xmlelement (
+ name gazonk,
+ xmlattributes (
+ 'val' AS name,
+ 1 + 1 AS num
+ ),
+ xmlelement (
+ NAME qux,
+ 'foo'
+ )
+ ),
+ version '1.0',
+ standalone yes
+);
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlserialize(content data as character varying(20)) FROM xmltest;
+ xmlserialize
+--------------
+(0 rows)
+
+SELECT xmlserialize(content 'good' as char(10));
+ERROR: unsupported XML feature
+LINE 1: SELECT xmlserialize(content 'good' as char(10));
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlserialize(document 'bad' as text);
+ERROR: unsupported XML feature
+LINE 1: SELECT xmlserialize(document 'bad' as text);
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml '<foo>bar</foo>' IS DOCUMENT;
+ERROR: unsupported XML feature
+LINE 1: SELECT xml '<foo>bar</foo>' IS DOCUMENT;
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml '<foo>bar</foo><bar>foo</bar>' IS DOCUMENT;
+ERROR: unsupported XML feature
+LINE 1: SELECT xml '<foo>bar</foo><bar>foo</bar>' IS DOCUMENT;
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml '<abc/>' IS NOT DOCUMENT;
+ERROR: unsupported XML feature
+LINE 1: SELECT xml '<abc/>' IS NOT DOCUMENT;
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml 'abc' IS NOT DOCUMENT;
+ERROR: unsupported XML feature
+LINE 1: SELECT xml 'abc' IS NOT DOCUMENT;
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT '<>' IS NOT DOCUMENT;
+ERROR: unsupported XML feature
+LINE 1: SELECT '<>' IS NOT DOCUMENT;
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlagg(data) FROM xmltest;
+ xmlagg
+--------
+
+(1 row)
+
+SELECT xmlagg(data) FROM xmltest WHERE id > 10;
+ xmlagg
+--------
+
+(1 row)
+
+SELECT xmlelement(name employees, xmlagg(xmlelement(name name, name))) FROM emp;
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+-- Check mapping SQL identifier to XML name
+SELECT xmlpi(name ":::_xml_abc135.%-&_");
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlpi(name "123");
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+PREPARE foo (xml) AS SELECT xmlconcat('<foo/>', $1);
+ERROR: unsupported XML feature
+LINE 1: PREPARE foo (xml) AS SELECT xmlconcat('<foo/>', $1);
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SET XML OPTION DOCUMENT;
+EXECUTE foo ('<bar/>');
+ERROR: prepared statement "foo" does not exist
+EXECUTE foo ('bad');
+ERROR: prepared statement "foo" does not exist
+SELECT xml '<!DOCTYPE a><a/><b/>';
+ERROR: unsupported XML feature
+LINE 1: SELECT xml '<!DOCTYPE a><a/><b/>';
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SET XML OPTION CONTENT;
+EXECUTE foo ('<bar/>');
+ERROR: prepared statement "foo" does not exist
+EXECUTE foo ('good');
+ERROR: prepared statement "foo" does not exist
+SELECT xml '<!-- in SQL:2006+ a doc is content too--> <?y z?> <!DOCTYPE a><a/>';
+ERROR: unsupported XML feature
+LINE 1: SELECT xml '<!-- in SQL:2006+ a doc is content too--> <?y z?...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml '<?xml version="1.0"?> <!-- hi--> <!DOCTYPE a><a/>';
+ERROR: unsupported XML feature
+LINE 1: SELECT xml '<?xml version="1.0"?> <!-- hi--> <!DOCTYPE a><a/...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml '<!DOCTYPE a><a/>';
+ERROR: unsupported XML feature
+LINE 1: SELECT xml '<!DOCTYPE a><a/>';
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml '<!-- hi--> oops <!DOCTYPE a><a/>';
+ERROR: unsupported XML feature
+LINE 1: SELECT xml '<!-- hi--> oops <!DOCTYPE a><a/>';
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml '<!-- hi--> <oops/> <!DOCTYPE a><a/>';
+ERROR: unsupported XML feature
+LINE 1: SELECT xml '<!-- hi--> <oops/> <!DOCTYPE a><a/>';
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml '<!DOCTYPE a><a/><b/>';
+ERROR: unsupported XML feature
+LINE 1: SELECT xml '<!DOCTYPE a><a/><b/>';
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+-- Test backwards parsing
+CREATE VIEW xmlview1 AS SELECT xmlcomment('test');
+CREATE VIEW xmlview2 AS SELECT xmlconcat('hello', 'you');
+ERROR: unsupported XML feature
+LINE 1: CREATE VIEW xmlview2 AS SELECT xmlconcat('hello', 'you');
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+CREATE VIEW xmlview3 AS SELECT xmlelement(name element, xmlattributes (1 as ":one:", 'deuce' as two), 'content&');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+CREATE VIEW xmlview4 AS SELECT xmlelement(name employee, xmlforest(name, age, salary as pay)) FROM emp;
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+CREATE VIEW xmlview5 AS SELECT xmlparse(content '<abc>x</abc>');
+CREATE VIEW xmlview6 AS SELECT xmlpi(name foo, 'bar');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+CREATE VIEW xmlview7 AS SELECT xmlroot(xml '<foo/>', version no value, standalone yes);
+ERROR: unsupported XML feature
+LINE 1: CREATE VIEW xmlview7 AS SELECT xmlroot(xml '<foo/>', version...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+CREATE VIEW xmlview8 AS SELECT xmlserialize(content 'good' as char(10));
+ERROR: unsupported XML feature
+LINE 1: ...EATE VIEW xmlview8 AS SELECT xmlserialize(content 'good' as ...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+CREATE VIEW xmlview9 AS SELECT xmlserialize(content 'good' as text);
+ERROR: unsupported XML feature
+LINE 1: ...EATE VIEW xmlview9 AS SELECT xmlserialize(content 'good' as ...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT table_name, view_definition FROM information_schema.views
+ WHERE table_name LIKE 'xmlview%' ORDER BY 1;
+ table_name | view_definition
+------------+--------------------------------------------------------------------------------
+ xmlview1 | SELECT xmlcomment('test'::text) AS xmlcomment;
+ xmlview5 | SELECT XMLPARSE(CONTENT '<abc>x</abc>'::text STRIP WHITESPACE) AS "xmlparse";
+(2 rows)
+
+-- Text XPath expressions evaluation
+SELECT xpath('/value', data) FROM xmltest;
+ xpath
+-------
+(0 rows)
+
+SELECT xpath(NULL, NULL) IS NULL FROM xmltest;
+ ?column?
+----------
+(0 rows)
+
+SELECT xpath('', '<!-- error -->');
+ERROR: unsupported XML feature
+LINE 1: SELECT xpath('', '<!-- error -->');
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xpath('//text()', '<local:data xmlns:local="http://127.0.0.1"><local:piece id="1">number one</local:piece><local:piece id="2" /></local:data>');
+ERROR: unsupported XML feature
+LINE 1: SELECT xpath('//text()', '<local:data xmlns:local="http://12...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xpath('//loc:piece/@id', '<local:data xmlns:local="http://127.0.0.1"><local:piece id="1">number one</local:piece><local:piece id="2" /></local:data>', ARRAY[ARRAY['loc', 'http://127.0.0.1']]);
+ERROR: unsupported XML feature
+LINE 1: SELECT xpath('//loc:piece/@id', '<local:data xmlns:local="ht...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xpath('//loc:piece', '<local:data xmlns:local="http://127.0.0.1"><local:piece id="1">number one</local:piece><local:piece id="2" /></local:data>', ARRAY[ARRAY['loc', 'http://127.0.0.1']]);
+ERROR: unsupported XML feature
+LINE 1: SELECT xpath('//loc:piece', '<local:data xmlns:local="http:/...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xpath('//loc:piece', '<local:data xmlns:local="http://127.0.0.1" xmlns="http://127.0.0.2"><local:piece id="1"><internal>number one</internal><internal2/></local:piece><local:piece id="2" /></local:data>', ARRAY[ARRAY['loc', 'http://127.0.0.1']]);
+ERROR: unsupported XML feature
+LINE 1: SELECT xpath('//loc:piece', '<local:data xmlns:local="http:/...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xpath('//b', '<a>one <b>two</b> three <b>etc</b></a>');
+ERROR: unsupported XML feature
+LINE 1: SELECT xpath('//b', '<a>one <b>two</b> three <b>etc</b></a>'...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xpath('//text()', '<root>&lt;</root>');
+ERROR: unsupported XML feature
+LINE 1: SELECT xpath('//text()', '<root>&lt;</root>');
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xpath('//@value', '<root value="&lt;"/>');
+ERROR: unsupported XML feature
+LINE 1: SELECT xpath('//@value', '<root value="&lt;"/>');
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xpath('''<<invalid>>''', '<root/>');
+ERROR: unsupported XML feature
+LINE 1: SELECT xpath('''<<invalid>>''', '<root/>');
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xpath('count(//*)', '<root><sub/><sub/></root>');
+ERROR: unsupported XML feature
+LINE 1: SELECT xpath('count(//*)', '<root><sub/><sub/></root>');
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xpath('count(//*)=0', '<root><sub/><sub/></root>');
+ERROR: unsupported XML feature
+LINE 1: SELECT xpath('count(//*)=0', '<root><sub/><sub/></root>');
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xpath('count(//*)=3', '<root><sub/><sub/></root>');
+ERROR: unsupported XML feature
+LINE 1: SELECT xpath('count(//*)=3', '<root><sub/><sub/></root>');
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xpath('name(/*)', '<root><sub/><sub/></root>');
+ERROR: unsupported XML feature
+LINE 1: SELECT xpath('name(/*)', '<root><sub/><sub/></root>');
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xpath('/nosuchtag', '<root/>');
+ERROR: unsupported XML feature
+LINE 1: SELECT xpath('/nosuchtag', '<root/>');
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xpath('root', '<root/>');
+ERROR: unsupported XML feature
+LINE 1: SELECT xpath('root', '<root/>');
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+-- Round-trip non-ASCII data through xpath().
+DO $$
+DECLARE
+ xml_declaration text := '<?xml version="1.0" encoding="ISO-8859-1"?>';
+ degree_symbol text;
+ res xml[];
+BEGIN
+ -- Per the documentation, except when the server encoding is UTF8, xpath()
+ -- may not work on non-ASCII data. The untranslatable_character and
+ -- undefined_function traps below, currently dead code, will become relevant
+ -- if we remove this limitation.
+ IF current_setting('server_encoding') <> 'UTF8' THEN
+ RAISE LOG 'skip: encoding % unsupported for xpath',
+ current_setting('server_encoding');
+ RETURN;
+ END IF;
+
+ degree_symbol := convert_from('\xc2b0', 'UTF8');
+ res := xpath('text()', (xml_declaration ||
+ '<x>' || degree_symbol || '</x>')::xml);
+ IF degree_symbol <> res[1]::text THEN
+ RAISE 'expected % (%), got % (%)',
+ degree_symbol, convert_to(degree_symbol, 'UTF8'),
+ res[1], convert_to(res[1]::text, 'UTF8');
+ END IF;
+EXCEPTION
+ -- character with byte sequence 0xc2 0xb0 in encoding "UTF8" has no equivalent in encoding "LATIN8"
+ WHEN untranslatable_character
+ -- default conversion function for encoding "UTF8" to "MULE_INTERNAL" does not exist
+ OR undefined_function
+ -- unsupported XML feature
+ OR feature_not_supported THEN
+ RAISE LOG 'skip: %', SQLERRM;
+END
+$$;
+-- Test xmlexists and xpath_exists
+SELECT xmlexists('//town[text() = ''Toronto'']' PASSING BY REF '<towns><town>Bidford-on-Avon</town><town>Cwmbran</town><town>Bristol</town></towns>');
+ERROR: unsupported XML feature
+LINE 1: ...sts('//town[text() = ''Toronto'']' PASSING BY REF '<towns><t...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlexists('//town[text() = ''Cwmbran'']' PASSING BY REF '<towns><town>Bidford-on-Avon</town><town>Cwmbran</town><town>Bristol</town></towns>');
+ERROR: unsupported XML feature
+LINE 1: ...sts('//town[text() = ''Cwmbran'']' PASSING BY REF '<towns><t...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmlexists('count(/nosuchtag)' PASSING BY REF '<root/>');
+ERROR: unsupported XML feature
+LINE 1: ...LECT xmlexists('count(/nosuchtag)' PASSING BY REF '<root/>')...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xpath_exists('//town[text() = ''Toronto'']','<towns><town>Bidford-on-Avon</town><town>Cwmbran</town><town>Bristol</town></towns>'::xml);
+ERROR: unsupported XML feature
+LINE 1: ...ELECT xpath_exists('//town[text() = ''Toronto'']','<towns><t...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xpath_exists('//town[text() = ''Cwmbran'']','<towns><town>Bidford-on-Avon</town><town>Cwmbran</town><town>Bristol</town></towns>'::xml);
+ERROR: unsupported XML feature
+LINE 1: ...ELECT xpath_exists('//town[text() = ''Cwmbran'']','<towns><t...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xpath_exists('count(/nosuchtag)', '<root/>'::xml);
+ERROR: unsupported XML feature
+LINE 1: SELECT xpath_exists('count(/nosuchtag)', '<root/>'::xml);
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+INSERT INTO xmltest VALUES (4, '<menu><beers><name>Budvar</name><cost>free</cost><name>Carling</name><cost>lots</cost></beers></menu>'::xml);
+ERROR: unsupported XML feature
+LINE 1: INSERT INTO xmltest VALUES (4, '<menu><beers><name>Budvar</n...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+INSERT INTO xmltest VALUES (5, '<menu><beers><name>Molson</name><cost>free</cost><name>Carling</name><cost>lots</cost></beers></menu>'::xml);
+ERROR: unsupported XML feature
+LINE 1: INSERT INTO xmltest VALUES (5, '<menu><beers><name>Molson</n...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+INSERT INTO xmltest VALUES (6, '<myns:menu xmlns:myns="http://myns.com"><myns:beers><myns:name>Budvar</myns:name><myns:cost>free</myns:cost><myns:name>Carling</myns:name><myns:cost>lots</myns:cost></myns:beers></myns:menu>'::xml);
+ERROR: unsupported XML feature
+LINE 1: INSERT INTO xmltest VALUES (6, '<myns:menu xmlns:myns="http:...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+INSERT INTO xmltest VALUES (7, '<myns:menu xmlns:myns="http://myns.com"><myns:beers><myns:name>Molson</myns:name><myns:cost>free</myns:cost><myns:name>Carling</myns:name><myns:cost>lots</myns:cost></myns:beers></myns:menu>'::xml);
+ERROR: unsupported XML feature
+LINE 1: INSERT INTO xmltest VALUES (7, '<myns:menu xmlns:myns="http:...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beer' PASSING data);
+ count
+-------
+ 0
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beer' PASSING BY REF data BY REF);
+ count
+-------
+ 0
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beers' PASSING BY REF data);
+ count
+-------
+ 0
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beers/name[text() = ''Molson'']' PASSING BY REF data);
+ count
+-------
+ 0
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/menu/beer',data);
+ count
+-------
+ 0
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/menu/beers',data);
+ count
+-------
+ 0
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/menu/beers/name[text() = ''Molson'']',data);
+ count
+-------
+ 0
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/myns:menu/myns:beer',data,ARRAY[ARRAY['myns','http://myns.com']]);
+ count
+-------
+ 0
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/myns:menu/myns:beers',data,ARRAY[ARRAY['myns','http://myns.com']]);
+ count
+-------
+ 0
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/myns:menu/myns:beers/myns:name[text() = ''Molson'']',data,ARRAY[ARRAY['myns','http://myns.com']]);
+ count
+-------
+ 0
+(1 row)
+
+CREATE TABLE query ( expr TEXT );
+INSERT INTO query VALUES ('/menu/beers/cost[text() = ''lots'']');
+SELECT COUNT(id) FROM xmltest, query WHERE xmlexists(expr PASSING BY REF data);
+ count
+-------
+ 0
+(1 row)
+
+-- Test xml_is_well_formed and variants
+SELECT xml_is_well_formed_document('<foo>bar</foo>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml_is_well_formed_document('abc');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml_is_well_formed_content('<foo>bar</foo>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml_is_well_formed_content('abc');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SET xmloption TO DOCUMENT;
+SELECT xml_is_well_formed('abc');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml_is_well_formed('<>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml_is_well_formed('<abc/>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml_is_well_formed('<foo>bar</foo>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml_is_well_formed('<foo>bar</foo');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml_is_well_formed('<foo><bar>baz</foo>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml_is_well_formed('<local:data xmlns:local="http://127.0.0.1"><local:piece id="1">number one</local:piece><local:piece id="2" /></local:data>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml_is_well_formed('<pg:foo xmlns:pg="http://postgresql.org/stuff">bar</my:foo>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml_is_well_formed('<pg:foo xmlns:pg="http://postgresql.org/stuff">bar</pg:foo>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml_is_well_formed('<invalidentity>&</abc>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml_is_well_formed('<undefinedentity>&idontexist;</abc>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml_is_well_formed('<invalidns xmlns=''&lt;''/>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml_is_well_formed('<relativens xmlns=''relative''/>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xml_is_well_formed('<twoerrors>&idontexist;</unbalanced>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SET xmloption TO CONTENT;
+SELECT xml_is_well_formed('abc');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+-- Since xpath() deals with namespaces, it's a bit stricter about
+-- what's well-formed and what's not. If we don't obey these rules
+-- (i.e. ignore namespace-related errors from libxml), xpath()
+-- fails in subtle ways. The following would for example produce
+-- the xml value
+-- <invalidns xmlns='<'/>
+-- which is invalid because '<' may not appear un-escaped in
+-- attribute values.
+-- Since different libxml versions emit slightly different
+-- error messages, we suppress the DETAIL in this test.
+\set VERBOSITY terse
+SELECT xpath('/*', '<invalidns xmlns=''&lt;''/>');
+ERROR: unsupported XML feature at character 20
+\set VERBOSITY default
+-- Again, the XML isn't well-formed for namespace purposes
+SELECT xpath('/*', '<nosuchprefix:tag/>');
+ERROR: unsupported XML feature
+LINE 1: SELECT xpath('/*', '<nosuchprefix:tag/>');
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+-- XPath deprecates relative namespaces, but they're not supposed to
+-- throw an error, only a warning.
+SELECT xpath('/*', '<relativens xmlns=''relative''/>');
+ERROR: unsupported XML feature
+LINE 1: SELECT xpath('/*', '<relativens xmlns=''relative''/>');
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+-- External entity references should not leak filesystem information.
+SELECT XMLPARSE(DOCUMENT '<!DOCTYPE foo [<!ENTITY c SYSTEM "/etc/passwd">]><foo>&c;</foo>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT XMLPARSE(DOCUMENT '<!DOCTYPE foo [<!ENTITY c SYSTEM "/etc/no.such.file">]><foo>&c;</foo>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+-- This might or might not load the requested DTD, but it mustn't throw error.
+SELECT XMLPARSE(DOCUMENT '<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN" "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd"><chapter>&nbsp;</chapter>');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+-- XMLPATH tests
+CREATE TABLE xmldata(data xml);
+INSERT INTO xmldata VALUES('<ROWS>
+<ROW id="1">
+ <COUNTRY_ID>AU</COUNTRY_ID>
+ <COUNTRY_NAME>Australia</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID>
+</ROW>
+<ROW id="2">
+ <COUNTRY_ID>CN</COUNTRY_ID>
+ <COUNTRY_NAME>China</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID>
+</ROW>
+<ROW id="3">
+ <COUNTRY_ID>HK</COUNTRY_ID>
+ <COUNTRY_NAME>HongKong</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID>
+</ROW>
+<ROW id="4">
+ <COUNTRY_ID>IN</COUNTRY_ID>
+ <COUNTRY_NAME>India</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID>
+</ROW>
+<ROW id="5">
+ <COUNTRY_ID>JP</COUNTRY_ID>
+ <COUNTRY_NAME>Japan</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID><PREMIER_NAME>Sinzo Abe</PREMIER_NAME>
+</ROW>
+<ROW id="6">
+ <COUNTRY_ID>SG</COUNTRY_ID>
+ <COUNTRY_NAME>Singapore</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID><SIZE unit="km">791</SIZE>
+</ROW>
+</ROWS>');
+ERROR: unsupported XML feature
+LINE 1: INSERT INTO xmldata VALUES('<ROWS>
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+-- XMLTABLE with columns
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME/text()' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+ id | _id | country_name | country_id | region_id | size | unit | premier_name
+----+-----+--------------+------------+-----------+------+------+--------------
+(0 rows)
+
+CREATE VIEW xmltableview1 AS SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME/text()' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+SELECT * FROM xmltableview1;
+ id | _id | country_name | country_id | region_id | size | unit | premier_name
+----+-----+--------------+------------+-----------+------+------+--------------
+(0 rows)
+
+\sv xmltableview1
+CREATE OR REPLACE VIEW public.xmltableview1 AS
+ SELECT "xmltable".id,
+ "xmltable"._id,
+ "xmltable".country_name,
+ "xmltable".country_id,
+ "xmltable".region_id,
+ "xmltable".size,
+ "xmltable".unit,
+ "xmltable".premier_name
+ FROM ( SELECT xmldata.data
+ FROM xmldata) x,
+ LATERAL XMLTABLE(('/ROWS/ROW'::text) PASSING (x.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text))
+EXPLAIN (COSTS OFF) SELECT * FROM xmltableview1;
+ QUERY PLAN
+-----------------------------------------
+ Nested Loop
+ -> Seq Scan on xmldata
+ -> Table Function Scan on "xmltable"
+(3 rows)
+
+EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM xmltableview1;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name
+ -> Seq Scan on public.xmldata
+ Output: xmldata.data
+ -> Table Function Scan on "xmltable"
+ Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name
+ Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text))
+(7 rows)
+
+-- errors
+SELECT * FROM XMLTABLE (ROW () PASSING null COLUMNS v1 timestamp) AS f (v1, v2);
+ERROR: XMLTABLE function has 1 columns available but 2 columns specified
+-- XMLNAMESPACES tests
+SELECT * FROM XMLTABLE(XMLNAMESPACES('http://x.y' AS zz),
+ '/zz:rows/zz:row'
+ PASSING '<rows xmlns="http://x.y"><row><a>10</a></row></rows>'
+ COLUMNS a int PATH 'zz:a');
+ERROR: unsupported XML feature
+LINE 3: PASSING '<rows xmlns="http://x.y"><row...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+CREATE VIEW xmltableview2 AS SELECT * FROM XMLTABLE(XMLNAMESPACES('http://x.y' AS zz),
+ '/zz:rows/zz:row'
+ PASSING '<rows xmlns="http://x.y"><row><a>10</a></row></rows>'
+ COLUMNS a int PATH 'zz:a');
+ERROR: unsupported XML feature
+LINE 3: PASSING '<rows xmlns="http://x.y"><row...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT * FROM xmltableview2;
+ERROR: relation "xmltableview2" does not exist
+LINE 1: SELECT * FROM xmltableview2;
+ ^
+SELECT * FROM XMLTABLE(XMLNAMESPACES(DEFAULT 'http://x.y'),
+ '/rows/row'
+ PASSING '<rows xmlns="http://x.y"><row><a>10</a></row></rows>'
+ COLUMNS a int PATH 'a');
+ERROR: unsupported XML feature
+LINE 3: PASSING '<rows xmlns="http://x.y"><row...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT * FROM XMLTABLE('.'
+ PASSING '<foo/>'
+ COLUMNS a text PATH 'foo/namespace::node()');
+ERROR: unsupported XML feature
+LINE 2: PASSING '<foo/>'
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+-- used in prepare statements
+PREPARE pp AS
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+EXECUTE pp;
+ id | _id | country_name | country_id | region_id | size | unit | premier_name
+----+-----+--------------+------------+-----------+------+------+--------------
+(0 rows)
+
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int);
+ COUNTRY_NAME | REGION_ID
+--------------+-----------
+(0 rows)
+
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id FOR ORDINALITY, "COUNTRY_NAME" text, "REGION_ID" int);
+ id | COUNTRY_NAME | REGION_ID
+----+--------------+-----------
+(0 rows)
+
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int);
+ id | COUNTRY_NAME | REGION_ID
+----+--------------+-----------
+(0 rows)
+
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id');
+ id
+----
+(0 rows)
+
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id FOR ORDINALITY);
+ id
+----
+(0 rows)
+
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int, rawdata xml PATH '.');
+ id | COUNTRY_NAME | REGION_ID | rawdata
+----+--------------+-----------+---------
+(0 rows)
+
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int, rawdata xml PATH './*');
+ id | COUNTRY_NAME | REGION_ID | rawdata
+----+--------------+-----------+---------
+(0 rows)
+
+SELECT * FROM xmltable('/root' passing '<root><element>a1a<!-- aaaa -->a2a<?aaaaa?> <!--z--> bbbb<x>xxx</x>cccc</element></root>' COLUMNS element text);
+ERROR: unsupported XML feature
+LINE 1: SELECT * FROM xmltable('/root' passing '<root><element>a1a<!...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT * FROM xmltable('/root' passing '<root><element>a1a<!-- aaaa -->a2a<?aaaaa?> <!--z--> bbbb<x>xxx</x>cccc</element></root>' COLUMNS element text PATH 'element/text()'); -- should fail
+ERROR: unsupported XML feature
+LINE 1: SELECT * FROM xmltable('/root' passing '<root><element>a1a<!...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+-- CDATA test
+select * from xmltable('d/r' passing '<d><r><c><![CDATA[<hello> &"<>!<a>foo</a>]]></c></r><r><c>2</c></r></d>' columns c text);
+ERROR: unsupported XML feature
+LINE 1: select * from xmltable('d/r' passing '<d><r><c><![CDATA[<hel...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+-- XML builtin entities
+SELECT * FROM xmltable('/x/a' PASSING '<x><a><ent>&apos;</ent></a><a><ent>&quot;</ent></a><a><ent>&amp;</ent></a><a><ent>&lt;</ent></a><a><ent>&gt;</ent></a></x>' COLUMNS ent text);
+ERROR: unsupported XML feature
+LINE 1: SELECT * FROM xmltable('/x/a' PASSING '<x><a><ent>&apos;</en...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT * FROM xmltable('/x/a' PASSING '<x><a><ent>&apos;</ent></a><a><ent>&quot;</ent></a><a><ent>&amp;</ent></a><a><ent>&lt;</ent></a><a><ent>&gt;</ent></a></x>' COLUMNS ent xml);
+ERROR: unsupported XML feature
+LINE 1: SELECT * FROM xmltable('/x/a' PASSING '<x><a><ent>&apos;</en...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name
+ -> Seq Scan on public.xmldata
+ Output: xmldata.data
+ -> Table Function Scan on "xmltable"
+ Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name
+ Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text))
+(7 rows)
+
+-- test qual
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int) WHERE "COUNTRY_NAME" = 'Japan';
+ COUNTRY_NAME | REGION_ID
+--------------+-----------
+(0 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int) WHERE "COUNTRY_NAME" = 'Japan';
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: "xmltable"."COUNTRY_NAME", "xmltable"."REGION_ID"
+ -> Seq Scan on public.xmldata
+ Output: xmldata.data
+ -> Table Function Scan on "xmltable"
+ Output: "xmltable"."COUNTRY_NAME", "xmltable"."REGION_ID"
+ Table Function Call: XMLTABLE(('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]'::text) PASSING (xmldata.data) COLUMNS "COUNTRY_NAME" text, "REGION_ID" integer)
+ Filter: ("xmltable"."COUNTRY_NAME" = 'Japan'::text)
+(8 rows)
+
+-- should to work with more data
+INSERT INTO xmldata VALUES('<ROWS>
+<ROW id="10">
+ <COUNTRY_ID>CZ</COUNTRY_ID>
+ <COUNTRY_NAME>Czech Republic</COUNTRY_NAME>
+ <REGION_ID>2</REGION_ID><PREMIER_NAME>Milos Zeman</PREMIER_NAME>
+</ROW>
+<ROW id="11">
+ <COUNTRY_ID>DE</COUNTRY_ID>
+ <COUNTRY_NAME>Germany</COUNTRY_NAME>
+ <REGION_ID>2</REGION_ID>
+</ROW>
+<ROW id="12">
+ <COUNTRY_ID>FR</COUNTRY_ID>
+ <COUNTRY_NAME>France</COUNTRY_NAME>
+ <REGION_ID>2</REGION_ID>
+</ROW>
+</ROWS>');
+ERROR: unsupported XML feature
+LINE 1: INSERT INTO xmldata VALUES('<ROWS>
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+INSERT INTO xmldata VALUES('<ROWS>
+<ROW id="20">
+ <COUNTRY_ID>EG</COUNTRY_ID>
+ <COUNTRY_NAME>Egypt</COUNTRY_NAME>
+ <REGION_ID>1</REGION_ID>
+</ROW>
+<ROW id="21">
+ <COUNTRY_ID>SD</COUNTRY_ID>
+ <COUNTRY_NAME>Sudan</COUNTRY_NAME>
+ <REGION_ID>1</REGION_ID>
+</ROW>
+</ROWS>');
+ERROR: unsupported XML feature
+LINE 1: INSERT INTO xmldata VALUES('<ROWS>
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+ id | _id | country_name | country_id | region_id | size | unit | premier_name
+----+-----+--------------+------------+-----------+------+------+--------------
+(0 rows)
+
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified')
+ WHERE region_id = 2;
+ id | _id | country_name | country_id | region_id | size | unit | premier_name
+----+-----+--------------+------------+-----------+------+------+--------------
+(0 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified')
+ WHERE region_id = 2;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name
+ -> Seq Scan on public.xmldata
+ Output: xmldata.data
+ -> Table Function Scan on "xmltable"
+ Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name
+ Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text))
+ Filter: ("xmltable".region_id = 2)
+(8 rows)
+
+-- should fail, NULL value
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE' NOT NULL,
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+ id | _id | country_name | country_id | region_id | size | unit | premier_name
+----+-----+--------------+------------+-----------+------+------+--------------
+(0 rows)
+
+-- if all is ok, then result is empty
+-- one line xml test
+WITH
+ x AS (SELECT proname, proowner, procost::numeric, pronargs,
+ array_to_string(proargnames,',') as proargnames,
+ case when proargtypes <> '' then array_to_string(proargtypes::oid[],',') end as proargtypes
+ FROM pg_proc WHERE proname = 'f_leak'),
+ y AS (SELECT xmlelement(name proc,
+ xmlforest(proname, proowner,
+ procost, pronargs,
+ proargnames, proargtypes)) as proc
+ FROM x),
+ z AS (SELECT xmltable.*
+ FROM y,
+ LATERAL xmltable('/proc' PASSING proc
+ COLUMNS proname name,
+ proowner oid,
+ procost float,
+ pronargs int,
+ proargnames text,
+ proargtypes text))
+ SELECT * FROM z
+ EXCEPT SELECT * FROM x;
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+-- multi line xml test, result should be empty too
+WITH
+ x AS (SELECT proname, proowner, procost::numeric, pronargs,
+ array_to_string(proargnames,',') as proargnames,
+ case when proargtypes <> '' then array_to_string(proargtypes::oid[],',') end as proargtypes
+ FROM pg_proc),
+ y AS (SELECT xmlelement(name data,
+ xmlagg(xmlelement(name proc,
+ xmlforest(proname, proowner, procost,
+ pronargs, proargnames, proargtypes)))) as doc
+ FROM x),
+ z AS (SELECT xmltable.*
+ FROM y,
+ LATERAL xmltable('/data/proc' PASSING doc
+ COLUMNS proname name,
+ proowner oid,
+ procost float,
+ pronargs int,
+ proargnames text,
+ proargtypes text))
+ SELECT * FROM z
+ EXCEPT SELECT * FROM x;
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+CREATE TABLE xmltest2(x xml, _path text);
+INSERT INTO xmltest2 VALUES('<d><r><ac>1</ac></r></d>', 'A');
+ERROR: unsupported XML feature
+LINE 1: INSERT INTO xmltest2 VALUES('<d><r><ac>1</ac></r></d>', 'A')...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+INSERT INTO xmltest2 VALUES('<d><r><bc>2</bc></r></d>', 'B');
+ERROR: unsupported XML feature
+LINE 1: INSERT INTO xmltest2 VALUES('<d><r><bc>2</bc></r></d>', 'B')...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+INSERT INTO xmltest2 VALUES('<d><r><cc>3</cc></r></d>', 'C');
+ERROR: unsupported XML feature
+LINE 1: INSERT INTO xmltest2 VALUES('<d><r><cc>3</cc></r></d>', 'C')...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+INSERT INTO xmltest2 VALUES('<d><r><dc>2</dc></r></d>', 'D');
+ERROR: unsupported XML feature
+LINE 1: INSERT INTO xmltest2 VALUES('<d><r><dc>2</dc></r></d>', 'D')...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+SELECT xmltable.* FROM xmltest2, LATERAL xmltable('/d/r' PASSING x COLUMNS a int PATH '' || lower(_path) || 'c');
+ a
+---
+(0 rows)
+
+SELECT xmltable.* FROM xmltest2, LATERAL xmltable(('/d/r/' || lower(_path) || 'c') PASSING x COLUMNS a int PATH '.');
+ a
+---
+(0 rows)
+
+SELECT xmltable.* FROM xmltest2, LATERAL xmltable(('/d/r/' || lower(_path) || 'c') PASSING x COLUMNS a int PATH 'x' DEFAULT ascii(_path) - 54);
+ a
+---
+(0 rows)
+
+-- XPath result can be boolean or number too
+SELECT * FROM XMLTABLE('*' PASSING '<a>a</a>' COLUMNS a xml PATH '.', b text PATH '.', c text PATH '"hi"', d boolean PATH '. = "a"', e integer PATH 'string-length(.)');
+ERROR: unsupported XML feature
+LINE 1: SELECT * FROM XMLTABLE('*' PASSING '<a>a</a>' COLUMNS a xml ...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+\x
+SELECT * FROM XMLTABLE('*' PASSING '<e>pre<!--c1--><?pi arg?><![CDATA[&ent1]]><n2>&amp;deep</n2>post</e>' COLUMNS x xml PATH 'node()', y xml PATH '/');
+ERROR: unsupported XML feature
+LINE 1: SELECT * FROM XMLTABLE('*' PASSING '<e>pre<!--c1--><?pi arg?...
+ ^
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
+\x
+SELECT * FROM XMLTABLE('.' PASSING XMLELEMENT(NAME a) columns a varchar(20) PATH '"<foo/>"', b xml PATH '"<foo/>"');
+ERROR: unsupported XML feature
+DETAIL: This functionality requires the server to be built with libxml support.
+HINT: You need to rebuild PostgreSQL using --with-libxml.
diff --git a/ydb/library/yql/tests/postgresql/original/cases/xml_2.out b/ydb/library/yql/tests/postgresql/original/cases/xml_2.out
new file mode 100644
index 0000000000..493c6186e1
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/original/cases/xml_2.out
@@ -0,0 +1,1550 @@
+CREATE TABLE xmltest (
+ id int,
+ data xml
+);
+INSERT INTO xmltest VALUES (1, '<value>one</value>');
+INSERT INTO xmltest VALUES (2, '<value>two</value>');
+INSERT INTO xmltest VALUES (3, '<wrong');
+ERROR: invalid XML content
+LINE 1: INSERT INTO xmltest VALUES (3, '<wrong');
+ ^
+DETAIL: line 1: Couldn't find end of Start Tag wrong line 1
+SELECT * FROM xmltest;
+ id | data
+----+--------------------
+ 1 | <value>one</value>
+ 2 | <value>two</value>
+(2 rows)
+
+SELECT xmlcomment('test');
+ xmlcomment
+-------------
+ <!--test-->
+(1 row)
+
+SELECT xmlcomment('-test');
+ xmlcomment
+--------------
+ <!---test-->
+(1 row)
+
+SELECT xmlcomment('test-');
+ERROR: invalid XML comment
+SELECT xmlcomment('--test');
+ERROR: invalid XML comment
+SELECT xmlcomment('te st');
+ xmlcomment
+--------------
+ <!--te st-->
+(1 row)
+
+SELECT xmlconcat(xmlcomment('hello'),
+ xmlelement(NAME qux, 'foo'),
+ xmlcomment('world'));
+ xmlconcat
+----------------------------------------
+ <!--hello--><qux>foo</qux><!--world-->
+(1 row)
+
+SELECT xmlconcat('hello', 'you');
+ xmlconcat
+-----------
+ helloyou
+(1 row)
+
+SELECT xmlconcat(1, 2);
+ERROR: argument of XMLCONCAT must be type xml, not type integer
+LINE 1: SELECT xmlconcat(1, 2);
+ ^
+SELECT xmlconcat('bad', '<syntax');
+ERROR: invalid XML content
+LINE 1: SELECT xmlconcat('bad', '<syntax');
+ ^
+DETAIL: line 1: Couldn't find end of Start Tag syntax line 1
+SELECT xmlconcat('<foo/>', NULL, '<?xml version="1.1" standalone="no"?><bar/>');
+ xmlconcat
+--------------
+ <foo/><bar/>
+(1 row)
+
+SELECT xmlconcat('<?xml version="1.1"?><foo/>', NULL, '<?xml version="1.1" standalone="no"?><bar/>');
+ xmlconcat
+-----------------------------------
+ <?xml version="1.1"?><foo/><bar/>
+(1 row)
+
+SELECT xmlconcat(NULL);
+ xmlconcat
+-----------
+
+(1 row)
+
+SELECT xmlconcat(NULL, NULL);
+ xmlconcat
+-----------
+
+(1 row)
+
+SELECT xmlelement(name element,
+ xmlattributes (1 as one, 'deuce' as two),
+ 'content');
+ xmlelement
+------------------------------------------------
+ <element one="1" two="deuce">content</element>
+(1 row)
+
+SELECT xmlelement(name element,
+ xmlattributes ('unnamed and wrong'));
+ERROR: unnamed XML attribute value must be a column reference
+LINE 2: xmlattributes ('unnamed and wrong'));
+ ^
+SELECT xmlelement(name element, xmlelement(name nested, 'stuff'));
+ xmlelement
+-------------------------------------------
+ <element><nested>stuff</nested></element>
+(1 row)
+
+SELECT xmlelement(name employee, xmlforest(name, age, salary as pay)) FROM emp;
+ xmlelement
+----------------------------------------------------------------------
+ <employee><name>sharon</name><age>25</age><pay>1000</pay></employee>
+ <employee><name>sam</name><age>30</age><pay>2000</pay></employee>
+ <employee><name>bill</name><age>20</age><pay>1000</pay></employee>
+ <employee><name>jeff</name><age>23</age><pay>600</pay></employee>
+ <employee><name>cim</name><age>30</age><pay>400</pay></employee>
+ <employee><name>linda</name><age>19</age><pay>100</pay></employee>
+(6 rows)
+
+SELECT xmlelement(name duplicate, xmlattributes(1 as a, 2 as b, 3 as a));
+ERROR: XML attribute name "a" appears more than once
+LINE 1: ...ment(name duplicate, xmlattributes(1 as a, 2 as b, 3 as a));
+ ^
+SELECT xmlelement(name num, 37);
+ xmlelement
+---------------
+ <num>37</num>
+(1 row)
+
+SELECT xmlelement(name foo, text 'bar');
+ xmlelement
+----------------
+ <foo>bar</foo>
+(1 row)
+
+SELECT xmlelement(name foo, xml 'bar');
+ xmlelement
+----------------
+ <foo>bar</foo>
+(1 row)
+
+SELECT xmlelement(name foo, text 'b<a/>r');
+ xmlelement
+-------------------------
+ <foo>b&lt;a/&gt;r</foo>
+(1 row)
+
+SELECT xmlelement(name foo, xml 'b<a/>r');
+ xmlelement
+-------------------
+ <foo>b<a/>r</foo>
+(1 row)
+
+SELECT xmlelement(name foo, array[1, 2, 3]);
+ xmlelement
+-------------------------------------------------------------------------
+ <foo><element>1</element><element>2</element><element>3</element></foo>
+(1 row)
+
+SET xmlbinary TO base64;
+SELECT xmlelement(name foo, bytea 'bar');
+ xmlelement
+-----------------
+ <foo>YmFy</foo>
+(1 row)
+
+SET xmlbinary TO hex;
+SELECT xmlelement(name foo, bytea 'bar');
+ xmlelement
+-------------------
+ <foo>626172</foo>
+(1 row)
+
+SELECT xmlelement(name foo, xmlattributes(true as bar));
+ xmlelement
+-------------------
+ <foo bar="true"/>
+(1 row)
+
+SELECT xmlelement(name foo, xmlattributes('2009-04-09 00:24:37'::timestamp as bar));
+ xmlelement
+----------------------------------
+ <foo bar="2009-04-09T00:24:37"/>
+(1 row)
+
+SELECT xmlelement(name foo, xmlattributes('infinity'::timestamp as bar));
+ERROR: timestamp out of range
+DETAIL: XML does not support infinite timestamp values.
+SELECT xmlelement(name foo, xmlattributes('<>&"''' as funny, xml 'b<a/>r' as funnier));
+ xmlelement
+------------------------------------------------------------
+ <foo funny="&lt;&gt;&amp;&quot;'" funnier="b&lt;a/&gt;r"/>
+(1 row)
+
+SELECT xmlparse(content '');
+ xmlparse
+----------
+
+(1 row)
+
+SELECT xmlparse(content ' ');
+ xmlparse
+----------
+
+(1 row)
+
+SELECT xmlparse(content 'abc');
+ xmlparse
+----------
+ abc
+(1 row)
+
+SELECT xmlparse(content '<abc>x</abc>');
+ xmlparse
+--------------
+ <abc>x</abc>
+(1 row)
+
+SELECT xmlparse(content '<invalidentity>&</invalidentity>');
+ERROR: invalid XML content
+DETAIL: line 1: xmlParseEntityRef: no name
+<invalidentity>&</invalidentity>
+ ^
+line 1: chunk is not well balanced
+SELECT xmlparse(content '<undefinedentity>&idontexist;</undefinedentity>');
+ERROR: invalid XML content
+DETAIL: line 1: Entity 'idontexist' not defined
+<undefinedentity>&idontexist;</undefinedentity>
+ ^
+line 1: chunk is not well balanced
+SELECT xmlparse(content '<invalidns xmlns=''&lt;''/>');
+ xmlparse
+---------------------------
+ <invalidns xmlns='&lt;'/>
+(1 row)
+
+SELECT xmlparse(content '<relativens xmlns=''relative''/>');
+ xmlparse
+--------------------------------
+ <relativens xmlns='relative'/>
+(1 row)
+
+SELECT xmlparse(content '<twoerrors>&idontexist;</unbalanced>');
+ERROR: invalid XML content
+DETAIL: line 1: Entity 'idontexist' not defined
+<twoerrors>&idontexist;</unbalanced>
+ ^
+line 1: Opening and ending tag mismatch: twoerrors line 1 and unbalanced
+line 1: chunk is not well balanced
+SELECT xmlparse(content '<nosuchprefix:tag/>');
+ xmlparse
+---------------------
+ <nosuchprefix:tag/>
+(1 row)
+
+SELECT xmlparse(document ' ');
+ERROR: invalid XML document
+DETAIL: line 1: Start tag expected, '<' not found
+SELECT xmlparse(document 'abc');
+ERROR: invalid XML document
+DETAIL: line 1: Start tag expected, '<' not found
+abc
+^
+SELECT xmlparse(document '<abc>x</abc>');
+ xmlparse
+--------------
+ <abc>x</abc>
+(1 row)
+
+SELECT xmlparse(document '<invalidentity>&</abc>');
+ERROR: invalid XML document
+DETAIL: line 1: xmlParseEntityRef: no name
+<invalidentity>&</abc>
+ ^
+line 1: Opening and ending tag mismatch: invalidentity line 1 and abc
+SELECT xmlparse(document '<undefinedentity>&idontexist;</abc>');
+ERROR: invalid XML document
+DETAIL: line 1: Entity 'idontexist' not defined
+<undefinedentity>&idontexist;</abc>
+ ^
+line 1: Opening and ending tag mismatch: undefinedentity line 1 and abc
+SELECT xmlparse(document '<invalidns xmlns=''&lt;''/>');
+ xmlparse
+---------------------------
+ <invalidns xmlns='&lt;'/>
+(1 row)
+
+SELECT xmlparse(document '<relativens xmlns=''relative''/>');
+ xmlparse
+--------------------------------
+ <relativens xmlns='relative'/>
+(1 row)
+
+SELECT xmlparse(document '<twoerrors>&idontexist;</unbalanced>');
+ERROR: invalid XML document
+DETAIL: line 1: Entity 'idontexist' not defined
+<twoerrors>&idontexist;</unbalanced>
+ ^
+line 1: Opening and ending tag mismatch: twoerrors line 1 and unbalanced
+SELECT xmlparse(document '<nosuchprefix:tag/>');
+ xmlparse
+---------------------
+ <nosuchprefix:tag/>
+(1 row)
+
+SELECT xmlpi(name foo);
+ xmlpi
+---------
+ <?foo?>
+(1 row)
+
+SELECT xmlpi(name xml);
+ERROR: invalid XML processing instruction
+DETAIL: XML processing instruction target name cannot be "xml".
+SELECT xmlpi(name xmlstuff);
+ xmlpi
+--------------
+ <?xmlstuff?>
+(1 row)
+
+SELECT xmlpi(name foo, 'bar');
+ xmlpi
+-------------
+ <?foo bar?>
+(1 row)
+
+SELECT xmlpi(name foo, 'in?>valid');
+ERROR: invalid XML processing instruction
+DETAIL: XML processing instruction cannot contain "?>".
+SELECT xmlpi(name foo, null);
+ xmlpi
+-------
+
+(1 row)
+
+SELECT xmlpi(name xml, null);
+ERROR: invalid XML processing instruction
+DETAIL: XML processing instruction target name cannot be "xml".
+SELECT xmlpi(name xmlstuff, null);
+ xmlpi
+-------
+
+(1 row)
+
+SELECT xmlpi(name "xml-stylesheet", 'href="mystyle.css" type="text/css"');
+ xmlpi
+-------------------------------------------------------
+ <?xml-stylesheet href="mystyle.css" type="text/css"?>
+(1 row)
+
+SELECT xmlpi(name foo, ' bar');
+ xmlpi
+-------------
+ <?foo bar?>
+(1 row)
+
+SELECT xmlroot(xml '<foo/>', version no value, standalone no value);
+ xmlroot
+---------
+ <foo/>
+(1 row)
+
+SELECT xmlroot(xml '<foo/>', version '2.0');
+ xmlroot
+-----------------------------
+ <?xml version="2.0"?><foo/>
+(1 row)
+
+SELECT xmlroot(xml '<foo/>', version no value, standalone yes);
+ xmlroot
+----------------------------------------------
+ <?xml version="1.0" standalone="yes"?><foo/>
+(1 row)
+
+SELECT xmlroot(xml '<?xml version="1.1"?><foo/>', version no value, standalone yes);
+ xmlroot
+----------------------------------------------
+ <?xml version="1.0" standalone="yes"?><foo/>
+(1 row)
+
+SELECT xmlroot(xmlroot(xml '<foo/>', version '1.0'), version '1.1', standalone no);
+ xmlroot
+---------------------------------------------
+ <?xml version="1.1" standalone="no"?><foo/>
+(1 row)
+
+SELECT xmlroot('<?xml version="1.1" standalone="yes"?><foo/>', version no value, standalone no);
+ xmlroot
+---------------------------------------------
+ <?xml version="1.0" standalone="no"?><foo/>
+(1 row)
+
+SELECT xmlroot('<?xml version="1.1" standalone="yes"?><foo/>', version no value, standalone no value);
+ xmlroot
+---------
+ <foo/>
+(1 row)
+
+SELECT xmlroot('<?xml version="1.1" standalone="yes"?><foo/>', version no value);
+ xmlroot
+----------------------------------------------
+ <?xml version="1.0" standalone="yes"?><foo/>
+(1 row)
+
+SELECT xmlroot (
+ xmlelement (
+ name gazonk,
+ xmlattributes (
+ 'val' AS name,
+ 1 + 1 AS num
+ ),
+ xmlelement (
+ NAME qux,
+ 'foo'
+ )
+ ),
+ version '1.0',
+ standalone yes
+);
+ xmlroot
+------------------------------------------------------------------------------------------
+ <?xml version="1.0" standalone="yes"?><gazonk name="val" num="2"><qux>foo</qux></gazonk>
+(1 row)
+
+SELECT xmlserialize(content data as character varying(20)) FROM xmltest;
+ xmlserialize
+--------------------
+ <value>one</value>
+ <value>two</value>
+(2 rows)
+
+SELECT xmlserialize(content 'good' as char(10));
+ xmlserialize
+--------------
+ good
+(1 row)
+
+SELECT xmlserialize(document 'bad' as text);
+ERROR: not an XML document
+SELECT xml '<foo>bar</foo>' IS DOCUMENT;
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT xml '<foo>bar</foo><bar>foo</bar>' IS DOCUMENT;
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT xml '<abc/>' IS NOT DOCUMENT;
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT xml 'abc' IS NOT DOCUMENT;
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT '<>' IS NOT DOCUMENT;
+ERROR: invalid XML content
+LINE 1: SELECT '<>' IS NOT DOCUMENT;
+ ^
+DETAIL: line 1: StartTag: invalid element name
+<>
+ ^
+SELECT xmlagg(data) FROM xmltest;
+ xmlagg
+--------------------------------------
+ <value>one</value><value>two</value>
+(1 row)
+
+SELECT xmlagg(data) FROM xmltest WHERE id > 10;
+ xmlagg
+--------
+
+(1 row)
+
+SELECT xmlelement(name employees, xmlagg(xmlelement(name name, name))) FROM emp;
+ xmlelement
+--------------------------------------------------------------------------------------------------------------------------------
+ <employees><name>sharon</name><name>sam</name><name>bill</name><name>jeff</name><name>cim</name><name>linda</name></employees>
+(1 row)
+
+-- Check mapping SQL identifier to XML name
+SELECT xmlpi(name ":::_xml_abc135.%-&_");
+ xmlpi
+-------------------------------------------------
+ <?_x003A_::_x005F_xml_abc135._x0025_-_x0026__?>
+(1 row)
+
+SELECT xmlpi(name "123");
+ xmlpi
+---------------
+ <?_x0031_23?>
+(1 row)
+
+PREPARE foo (xml) AS SELECT xmlconcat('<foo/>', $1);
+SET XML OPTION DOCUMENT;
+EXECUTE foo ('<bar/>');
+ xmlconcat
+--------------
+ <foo/><bar/>
+(1 row)
+
+EXECUTE foo ('bad');
+ERROR: invalid XML document
+LINE 1: EXECUTE foo ('bad');
+ ^
+DETAIL: line 1: Start tag expected, '<' not found
+bad
+^
+SELECT xml '<!DOCTYPE a><a/><b/>';
+ERROR: invalid XML document
+LINE 1: SELECT xml '<!DOCTYPE a><a/><b/>';
+ ^
+DETAIL: line 1: Extra content at the end of the document
+<!DOCTYPE a><a/><b/>
+ ^
+SET XML OPTION CONTENT;
+EXECUTE foo ('<bar/>');
+ xmlconcat
+--------------
+ <foo/><bar/>
+(1 row)
+
+EXECUTE foo ('good');
+ xmlconcat
+------------
+ <foo/>good
+(1 row)
+
+SELECT xml '<!-- in SQL:2006+ a doc is content too--> <?y z?> <!DOCTYPE a><a/>';
+ xml
+--------------------------------------------------------------------
+ <!-- in SQL:2006+ a doc is content too--> <?y z?> <!DOCTYPE a><a/>
+(1 row)
+
+SELECT xml '<?xml version="1.0"?> <!-- hi--> <!DOCTYPE a><a/>';
+ xml
+------------------------------
+ <!-- hi--> <!DOCTYPE a><a/>
+(1 row)
+
+SELECT xml '<!DOCTYPE a><a/>';
+ xml
+------------------
+ <!DOCTYPE a><a/>
+(1 row)
+
+SELECT xml '<!-- hi--> oops <!DOCTYPE a><a/>';
+ERROR: invalid XML content
+LINE 1: SELECT xml '<!-- hi--> oops <!DOCTYPE a><a/>';
+ ^
+DETAIL: line 1: StartTag: invalid element name
+<!-- hi--> oops <!DOCTYPE a><a/>
+ ^
+SELECT xml '<!-- hi--> <oops/> <!DOCTYPE a><a/>';
+ERROR: invalid XML content
+LINE 1: SELECT xml '<!-- hi--> <oops/> <!DOCTYPE a><a/>';
+ ^
+DETAIL: line 1: StartTag: invalid element name
+<!-- hi--> <oops/> <!DOCTYPE a><a/>
+ ^
+SELECT xml '<!DOCTYPE a><a/><b/>';
+ERROR: invalid XML content
+LINE 1: SELECT xml '<!DOCTYPE a><a/><b/>';
+ ^
+DETAIL: line 1: Extra content at the end of the document
+<!DOCTYPE a><a/><b/>
+ ^
+-- Test backwards parsing
+CREATE VIEW xmlview1 AS SELECT xmlcomment('test');
+CREATE VIEW xmlview2 AS SELECT xmlconcat('hello', 'you');
+CREATE VIEW xmlview3 AS SELECT xmlelement(name element, xmlattributes (1 as ":one:", 'deuce' as two), 'content&');
+CREATE VIEW xmlview4 AS SELECT xmlelement(name employee, xmlforest(name, age, salary as pay)) FROM emp;
+CREATE VIEW xmlview5 AS SELECT xmlparse(content '<abc>x</abc>');
+CREATE VIEW xmlview6 AS SELECT xmlpi(name foo, 'bar');
+CREATE VIEW xmlview7 AS SELECT xmlroot(xml '<foo/>', version no value, standalone yes);
+CREATE VIEW xmlview8 AS SELECT xmlserialize(content 'good' as char(10));
+CREATE VIEW xmlview9 AS SELECT xmlserialize(content 'good' as text);
+SELECT table_name, view_definition FROM information_schema.views
+ WHERE table_name LIKE 'xmlview%' ORDER BY 1;
+ table_name | view_definition
+------------+-------------------------------------------------------------------------------------------------------------------
+ xmlview1 | SELECT xmlcomment('test'::text) AS xmlcomment;
+ xmlview2 | SELECT XMLCONCAT('hello'::xml, 'you'::xml) AS "xmlconcat";
+ xmlview3 | SELECT XMLELEMENT(NAME element, XMLATTRIBUTES(1 AS ":one:", 'deuce' AS two), 'content&') AS "xmlelement";
+ xmlview4 | SELECT XMLELEMENT(NAME employee, XMLFOREST(emp.name AS name, emp.age AS age, emp.salary AS pay)) AS "xmlelement"+
+ | FROM emp;
+ xmlview5 | SELECT XMLPARSE(CONTENT '<abc>x</abc>'::text STRIP WHITESPACE) AS "xmlparse";
+ xmlview6 | SELECT XMLPI(NAME foo, 'bar'::text) AS "xmlpi";
+ xmlview7 | SELECT XMLROOT('<foo/>'::xml, VERSION NO VALUE, STANDALONE YES) AS "xmlroot";
+ xmlview8 | SELECT (XMLSERIALIZE(CONTENT 'good'::xml AS character(10)))::character(10) AS "xmlserialize";
+ xmlview9 | SELECT XMLSERIALIZE(CONTENT 'good'::xml AS text) AS "xmlserialize";
+(9 rows)
+
+-- Text XPath expressions evaluation
+SELECT xpath('/value', data) FROM xmltest;
+ xpath
+----------------------
+ {<value>one</value>}
+ {<value>two</value>}
+(2 rows)
+
+SELECT xpath(NULL, NULL) IS NULL FROM xmltest;
+ ?column?
+----------
+ t
+ t
+(2 rows)
+
+SELECT xpath('', '<!-- error -->');
+ERROR: empty XPath expression
+CONTEXT: SQL function "xpath" statement 1
+SELECT xpath('//text()', '<local:data xmlns:local="http://127.0.0.1"><local:piece id="1">number one</local:piece><local:piece id="2" /></local:data>');
+ xpath
+----------------
+ {"number one"}
+(1 row)
+
+SELECT xpath('//loc:piece/@id', '<local:data xmlns:local="http://127.0.0.1"><local:piece id="1">number one</local:piece><local:piece id="2" /></local:data>', ARRAY[ARRAY['loc', 'http://127.0.0.1']]);
+ xpath
+-------
+ {1,2}
+(1 row)
+
+SELECT xpath('//loc:piece', '<local:data xmlns:local="http://127.0.0.1"><local:piece id="1">number one</local:piece><local:piece id="2" /></local:data>', ARRAY[ARRAY['loc', 'http://127.0.0.1']]);
+ xpath
+------------------------------------------------------------------------------------------------------------------------------------------------
+ {"<local:piece xmlns:local=\"http://127.0.0.1\" id=\"1\">number one</local:piece>","<local:piece xmlns:local=\"http://127.0.0.1\" id=\"2\"/>"}
+(1 row)
+
+SELECT xpath('//loc:piece', '<local:data xmlns:local="http://127.0.0.1" xmlns="http://127.0.0.2"><local:piece id="1"><internal>number one</internal><internal2/></local:piece><local:piece id="2" /></local:data>', ARRAY[ARRAY['loc', 'http://127.0.0.1']]);
+ xpath
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ {"<local:piece xmlns:local=\"http://127.0.0.1\" xmlns=\"http://127.0.0.2\" id=\"1\"><internal>number one</internal><internal2/></local:piece>","<local:piece xmlns:local=\"http://127.0.0.1\" id=\"2\"/>"}
+(1 row)
+
+SELECT xpath('//b', '<a>one <b>two</b> three <b>etc</b></a>');
+ xpath
+-------------------------
+ {<b>two</b>,<b>etc</b>}
+(1 row)
+
+SELECT xpath('//text()', '<root>&lt;</root>');
+ xpath
+--------
+ {&lt;}
+(1 row)
+
+SELECT xpath('//@value', '<root value="&lt;"/>');
+ xpath
+--------
+ {&lt;}
+(1 row)
+
+SELECT xpath('''<<invalid>>''', '<root/>');
+ xpath
+---------------------------
+ {&lt;&lt;invalid&gt;&gt;}
+(1 row)
+
+SELECT xpath('count(//*)', '<root><sub/><sub/></root>');
+ xpath
+-------
+ {3}
+(1 row)
+
+SELECT xpath('count(//*)=0', '<root><sub/><sub/></root>');
+ xpath
+---------
+ {false}
+(1 row)
+
+SELECT xpath('count(//*)=3', '<root><sub/><sub/></root>');
+ xpath
+--------
+ {true}
+(1 row)
+
+SELECT xpath('name(/*)', '<root><sub/><sub/></root>');
+ xpath
+--------
+ {root}
+(1 row)
+
+SELECT xpath('/nosuchtag', '<root/>');
+ xpath
+-------
+ {}
+(1 row)
+
+SELECT xpath('root', '<root/>');
+ xpath
+-----------
+ {<root/>}
+(1 row)
+
+-- Round-trip non-ASCII data through xpath().
+DO $$
+DECLARE
+ xml_declaration text := '<?xml version="1.0" encoding="ISO-8859-1"?>';
+ degree_symbol text;
+ res xml[];
+BEGIN
+ -- Per the documentation, except when the server encoding is UTF8, xpath()
+ -- may not work on non-ASCII data. The untranslatable_character and
+ -- undefined_function traps below, currently dead code, will become relevant
+ -- if we remove this limitation.
+ IF current_setting('server_encoding') <> 'UTF8' THEN
+ RAISE LOG 'skip: encoding % unsupported for xpath',
+ current_setting('server_encoding');
+ RETURN;
+ END IF;
+
+ degree_symbol := convert_from('\xc2b0', 'UTF8');
+ res := xpath('text()', (xml_declaration ||
+ '<x>' || degree_symbol || '</x>')::xml);
+ IF degree_symbol <> res[1]::text THEN
+ RAISE 'expected % (%), got % (%)',
+ degree_symbol, convert_to(degree_symbol, 'UTF8'),
+ res[1], convert_to(res[1]::text, 'UTF8');
+ END IF;
+EXCEPTION
+ -- character with byte sequence 0xc2 0xb0 in encoding "UTF8" has no equivalent in encoding "LATIN8"
+ WHEN untranslatable_character
+ -- default conversion function for encoding "UTF8" to "MULE_INTERNAL" does not exist
+ OR undefined_function
+ -- unsupported XML feature
+ OR feature_not_supported THEN
+ RAISE LOG 'skip: %', SQLERRM;
+END
+$$;
+-- Test xmlexists and xpath_exists
+SELECT xmlexists('//town[text() = ''Toronto'']' PASSING BY REF '<towns><town>Bidford-on-Avon</town><town>Cwmbran</town><town>Bristol</town></towns>');
+ xmlexists
+-----------
+ f
+(1 row)
+
+SELECT xmlexists('//town[text() = ''Cwmbran'']' PASSING BY REF '<towns><town>Bidford-on-Avon</town><town>Cwmbran</town><town>Bristol</town></towns>');
+ xmlexists
+-----------
+ t
+(1 row)
+
+SELECT xmlexists('count(/nosuchtag)' PASSING BY REF '<root/>');
+ xmlexists
+-----------
+ t
+(1 row)
+
+SELECT xpath_exists('//town[text() = ''Toronto'']','<towns><town>Bidford-on-Avon</town><town>Cwmbran</town><town>Bristol</town></towns>'::xml);
+ xpath_exists
+--------------
+ f
+(1 row)
+
+SELECT xpath_exists('//town[text() = ''Cwmbran'']','<towns><town>Bidford-on-Avon</town><town>Cwmbran</town><town>Bristol</town></towns>'::xml);
+ xpath_exists
+--------------
+ t
+(1 row)
+
+SELECT xpath_exists('count(/nosuchtag)', '<root/>'::xml);
+ xpath_exists
+--------------
+ t
+(1 row)
+
+INSERT INTO xmltest VALUES (4, '<menu><beers><name>Budvar</name><cost>free</cost><name>Carling</name><cost>lots</cost></beers></menu>'::xml);
+INSERT INTO xmltest VALUES (5, '<menu><beers><name>Molson</name><cost>free</cost><name>Carling</name><cost>lots</cost></beers></menu>'::xml);
+INSERT INTO xmltest VALUES (6, '<myns:menu xmlns:myns="http://myns.com"><myns:beers><myns:name>Budvar</myns:name><myns:cost>free</myns:cost><myns:name>Carling</myns:name><myns:cost>lots</myns:cost></myns:beers></myns:menu>'::xml);
+INSERT INTO xmltest VALUES (7, '<myns:menu xmlns:myns="http://myns.com"><myns:beers><myns:name>Molson</myns:name><myns:cost>free</myns:cost><myns:name>Carling</myns:name><myns:cost>lots</myns:cost></myns:beers></myns:menu>'::xml);
+SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beer' PASSING data);
+ count
+-------
+ 0
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beer' PASSING BY REF data BY REF);
+ count
+-------
+ 0
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beers' PASSING BY REF data);
+ count
+-------
+ 2
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beers/name[text() = ''Molson'']' PASSING BY REF data);
+ count
+-------
+ 1
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/menu/beer',data);
+ count
+-------
+ 0
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/menu/beers',data);
+ count
+-------
+ 2
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/menu/beers/name[text() = ''Molson'']',data);
+ count
+-------
+ 1
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/myns:menu/myns:beer',data,ARRAY[ARRAY['myns','http://myns.com']]);
+ count
+-------
+ 0
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/myns:menu/myns:beers',data,ARRAY[ARRAY['myns','http://myns.com']]);
+ count
+-------
+ 2
+(1 row)
+
+SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/myns:menu/myns:beers/myns:name[text() = ''Molson'']',data,ARRAY[ARRAY['myns','http://myns.com']]);
+ count
+-------
+ 1
+(1 row)
+
+CREATE TABLE query ( expr TEXT );
+INSERT INTO query VALUES ('/menu/beers/cost[text() = ''lots'']');
+SELECT COUNT(id) FROM xmltest, query WHERE xmlexists(expr PASSING BY REF data);
+ count
+-------
+ 2
+(1 row)
+
+-- Test xml_is_well_formed and variants
+SELECT xml_is_well_formed_document('<foo>bar</foo>');
+ xml_is_well_formed_document
+-----------------------------
+ t
+(1 row)
+
+SELECT xml_is_well_formed_document('abc');
+ xml_is_well_formed_document
+-----------------------------
+ f
+(1 row)
+
+SELECT xml_is_well_formed_content('<foo>bar</foo>');
+ xml_is_well_formed_content
+----------------------------
+ t
+(1 row)
+
+SELECT xml_is_well_formed_content('abc');
+ xml_is_well_formed_content
+----------------------------
+ t
+(1 row)
+
+SET xmloption TO DOCUMENT;
+SELECT xml_is_well_formed('abc');
+ xml_is_well_formed
+--------------------
+ f
+(1 row)
+
+SELECT xml_is_well_formed('<>');
+ xml_is_well_formed
+--------------------
+ f
+(1 row)
+
+SELECT xml_is_well_formed('<abc/>');
+ xml_is_well_formed
+--------------------
+ t
+(1 row)
+
+SELECT xml_is_well_formed('<foo>bar</foo>');
+ xml_is_well_formed
+--------------------
+ t
+(1 row)
+
+SELECT xml_is_well_formed('<foo>bar</foo');
+ xml_is_well_formed
+--------------------
+ f
+(1 row)
+
+SELECT xml_is_well_formed('<foo><bar>baz</foo>');
+ xml_is_well_formed
+--------------------
+ f
+(1 row)
+
+SELECT xml_is_well_formed('<local:data xmlns:local="http://127.0.0.1"><local:piece id="1">number one</local:piece><local:piece id="2" /></local:data>');
+ xml_is_well_formed
+--------------------
+ t
+(1 row)
+
+SELECT xml_is_well_formed('<pg:foo xmlns:pg="http://postgresql.org/stuff">bar</my:foo>');
+ xml_is_well_formed
+--------------------
+ f
+(1 row)
+
+SELECT xml_is_well_formed('<pg:foo xmlns:pg="http://postgresql.org/stuff">bar</pg:foo>');
+ xml_is_well_formed
+--------------------
+ t
+(1 row)
+
+SELECT xml_is_well_formed('<invalidentity>&</abc>');
+ xml_is_well_formed
+--------------------
+ f
+(1 row)
+
+SELECT xml_is_well_formed('<undefinedentity>&idontexist;</abc>');
+ xml_is_well_formed
+--------------------
+ f
+(1 row)
+
+SELECT xml_is_well_formed('<invalidns xmlns=''&lt;''/>');
+ xml_is_well_formed
+--------------------
+ t
+(1 row)
+
+SELECT xml_is_well_formed('<relativens xmlns=''relative''/>');
+ xml_is_well_formed
+--------------------
+ t
+(1 row)
+
+SELECT xml_is_well_formed('<twoerrors>&idontexist;</unbalanced>');
+ xml_is_well_formed
+--------------------
+ f
+(1 row)
+
+SET xmloption TO CONTENT;
+SELECT xml_is_well_formed('abc');
+ xml_is_well_formed
+--------------------
+ t
+(1 row)
+
+-- Since xpath() deals with namespaces, it's a bit stricter about
+-- what's well-formed and what's not. If we don't obey these rules
+-- (i.e. ignore namespace-related errors from libxml), xpath()
+-- fails in subtle ways. The following would for example produce
+-- the xml value
+-- <invalidns xmlns='<'/>
+-- which is invalid because '<' may not appear un-escaped in
+-- attribute values.
+-- Since different libxml versions emit slightly different
+-- error messages, we suppress the DETAIL in this test.
+\set VERBOSITY terse
+SELECT xpath('/*', '<invalidns xmlns=''&lt;''/>');
+ERROR: could not parse XML document
+\set VERBOSITY default
+-- Again, the XML isn't well-formed for namespace purposes
+SELECT xpath('/*', '<nosuchprefix:tag/>');
+ERROR: could not parse XML document
+DETAIL: line 1: Namespace prefix nosuchprefix on tag is not defined
+<nosuchprefix:tag/>
+ ^
+CONTEXT: SQL function "xpath" statement 1
+-- XPath deprecates relative namespaces, but they're not supposed to
+-- throw an error, only a warning.
+SELECT xpath('/*', '<relativens xmlns=''relative''/>');
+WARNING: line 1: xmlns: URI relative is not absolute
+<relativens xmlns='relative'/>
+ ^
+ xpath
+--------------------------------------
+ {"<relativens xmlns=\"relative\"/>"}
+(1 row)
+
+-- External entity references should not leak filesystem information.
+SELECT XMLPARSE(DOCUMENT '<!DOCTYPE foo [<!ENTITY c SYSTEM "/etc/passwd">]><foo>&c;</foo>');
+ xmlparse
+-----------------------------------------------------------------
+ <!DOCTYPE foo [<!ENTITY c SYSTEM "/etc/passwd">]><foo>&c;</foo>
+(1 row)
+
+SELECT XMLPARSE(DOCUMENT '<!DOCTYPE foo [<!ENTITY c SYSTEM "/etc/no.such.file">]><foo>&c;</foo>');
+ xmlparse
+-----------------------------------------------------------------------
+ <!DOCTYPE foo [<!ENTITY c SYSTEM "/etc/no.such.file">]><foo>&c;</foo>
+(1 row)
+
+-- This might or might not load the requested DTD, but it mustn't throw error.
+SELECT XMLPARSE(DOCUMENT '<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN" "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd"><chapter>&nbsp;</chapter>');
+ xmlparse
+------------------------------------------------------------------------------------------------------------------------------------------------------
+ <!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN" "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd"><chapter>&nbsp;</chapter>
+(1 row)
+
+-- XMLPATH tests
+CREATE TABLE xmldata(data xml);
+INSERT INTO xmldata VALUES('<ROWS>
+<ROW id="1">
+ <COUNTRY_ID>AU</COUNTRY_ID>
+ <COUNTRY_NAME>Australia</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID>
+</ROW>
+<ROW id="2">
+ <COUNTRY_ID>CN</COUNTRY_ID>
+ <COUNTRY_NAME>China</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID>
+</ROW>
+<ROW id="3">
+ <COUNTRY_ID>HK</COUNTRY_ID>
+ <COUNTRY_NAME>HongKong</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID>
+</ROW>
+<ROW id="4">
+ <COUNTRY_ID>IN</COUNTRY_ID>
+ <COUNTRY_NAME>India</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID>
+</ROW>
+<ROW id="5">
+ <COUNTRY_ID>JP</COUNTRY_ID>
+ <COUNTRY_NAME>Japan</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID><PREMIER_NAME>Sinzo Abe</PREMIER_NAME>
+</ROW>
+<ROW id="6">
+ <COUNTRY_ID>SG</COUNTRY_ID>
+ <COUNTRY_NAME>Singapore</COUNTRY_NAME>
+ <REGION_ID>3</REGION_ID><SIZE unit="km">791</SIZE>
+</ROW>
+</ROWS>');
+-- XMLTABLE with columns
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME/text()' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+ id | _id | country_name | country_id | region_id | size | unit | premier_name
+----+-----+--------------+------------+-----------+------+------+---------------
+ 1 | 1 | Australia | AU | 3 | | | not specified
+ 2 | 2 | China | CN | 3 | | | not specified
+ 3 | 3 | HongKong | HK | 3 | | | not specified
+ 4 | 4 | India | IN | 3 | | | not specified
+ 5 | 5 | Japan | JP | 3 | | | Sinzo Abe
+ 6 | 6 | Singapore | SG | 3 | 791 | km | not specified
+(6 rows)
+
+CREATE VIEW xmltableview1 AS SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME/text()' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+SELECT * FROM xmltableview1;
+ id | _id | country_name | country_id | region_id | size | unit | premier_name
+----+-----+--------------+------------+-----------+------+------+---------------
+ 1 | 1 | Australia | AU | 3 | | | not specified
+ 2 | 2 | China | CN | 3 | | | not specified
+ 3 | 3 | HongKong | HK | 3 | | | not specified
+ 4 | 4 | India | IN | 3 | | | not specified
+ 5 | 5 | Japan | JP | 3 | | | Sinzo Abe
+ 6 | 6 | Singapore | SG | 3 | 791 | km | not specified
+(6 rows)
+
+\sv xmltableview1
+CREATE OR REPLACE VIEW public.xmltableview1 AS
+ SELECT "xmltable".id,
+ "xmltable"._id,
+ "xmltable".country_name,
+ "xmltable".country_id,
+ "xmltable".region_id,
+ "xmltable".size,
+ "xmltable".unit,
+ "xmltable".premier_name
+ FROM ( SELECT xmldata.data
+ FROM xmldata) x,
+ LATERAL XMLTABLE(('/ROWS/ROW'::text) PASSING (x.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text))
+EXPLAIN (COSTS OFF) SELECT * FROM xmltableview1;
+ QUERY PLAN
+-----------------------------------------
+ Nested Loop
+ -> Seq Scan on xmldata
+ -> Table Function Scan on "xmltable"
+(3 rows)
+
+EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM xmltableview1;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name
+ -> Seq Scan on public.xmldata
+ Output: xmldata.data
+ -> Table Function Scan on "xmltable"
+ Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name
+ Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text))
+(7 rows)
+
+-- errors
+SELECT * FROM XMLTABLE (ROW () PASSING null COLUMNS v1 timestamp) AS f (v1, v2);
+ERROR: XMLTABLE function has 1 columns available but 2 columns specified
+-- XMLNAMESPACES tests
+SELECT * FROM XMLTABLE(XMLNAMESPACES('http://x.y' AS zz),
+ '/zz:rows/zz:row'
+ PASSING '<rows xmlns="http://x.y"><row><a>10</a></row></rows>'
+ COLUMNS a int PATH 'zz:a');
+ a
+----
+ 10
+(1 row)
+
+CREATE VIEW xmltableview2 AS SELECT * FROM XMLTABLE(XMLNAMESPACES('http://x.y' AS zz),
+ '/zz:rows/zz:row'
+ PASSING '<rows xmlns="http://x.y"><row><a>10</a></row></rows>'
+ COLUMNS a int PATH 'zz:a');
+SELECT * FROM xmltableview2;
+ a
+----
+ 10
+(1 row)
+
+SELECT * FROM XMLTABLE(XMLNAMESPACES(DEFAULT 'http://x.y'),
+ '/rows/row'
+ PASSING '<rows xmlns="http://x.y"><row><a>10</a></row></rows>'
+ COLUMNS a int PATH 'a');
+ERROR: DEFAULT namespace is not supported
+SELECT * FROM XMLTABLE('.'
+ PASSING '<foo/>'
+ COLUMNS a text PATH 'foo/namespace::node()');
+ a
+--------------------------------------
+ http://www.w3.org/XML/1998/namespace
+(1 row)
+
+-- used in prepare statements
+PREPARE pp AS
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+EXECUTE pp;
+ id | _id | country_name | country_id | region_id | size | unit | premier_name
+----+-----+--------------+------------+-----------+------+------+---------------
+ 1 | 1 | Australia | AU | 3 | | | not specified
+ 2 | 2 | China | CN | 3 | | | not specified
+ 3 | 3 | HongKong | HK | 3 | | | not specified
+ 4 | 4 | India | IN | 3 | | | not specified
+ 5 | 5 | Japan | JP | 3 | | | Sinzo Abe
+ 6 | 6 | Singapore | SG | 3 | 791 | km | not specified
+(6 rows)
+
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int);
+ COUNTRY_NAME | REGION_ID
+--------------+-----------
+ India | 3
+ Japan | 3
+(2 rows)
+
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id FOR ORDINALITY, "COUNTRY_NAME" text, "REGION_ID" int);
+ id | COUNTRY_NAME | REGION_ID
+----+--------------+-----------
+ 1 | India | 3
+ 2 | Japan | 3
+(2 rows)
+
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int);
+ id | COUNTRY_NAME | REGION_ID
+----+--------------+-----------
+ 4 | India | 3
+ 5 | Japan | 3
+(2 rows)
+
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id');
+ id
+----
+ 4
+ 5
+(2 rows)
+
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id FOR ORDINALITY);
+ id
+----
+ 1
+ 2
+(2 rows)
+
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int, rawdata xml PATH '.');
+ id | COUNTRY_NAME | REGION_ID | rawdata
+----+--------------+-----------+------------------------------------------------------------------
+ 4 | India | 3 | <ROW id="4"> +
+ | | | <COUNTRY_ID>IN</COUNTRY_ID> +
+ | | | <COUNTRY_NAME>India</COUNTRY_NAME> +
+ | | | <REGION_ID>3</REGION_ID> +
+ | | | </ROW>
+ 5 | Japan | 3 | <ROW id="5"> +
+ | | | <COUNTRY_ID>JP</COUNTRY_ID> +
+ | | | <COUNTRY_NAME>Japan</COUNTRY_NAME> +
+ | | | <REGION_ID>3</REGION_ID><PREMIER_NAME>Sinzo Abe</PREMIER_NAME>+
+ | | | </ROW>
+(2 rows)
+
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int, rawdata xml PATH './*');
+ id | COUNTRY_NAME | REGION_ID | rawdata
+----+--------------+-----------+-----------------------------------------------------------------------------------------------------------------------------
+ 4 | India | 3 | <COUNTRY_ID>IN</COUNTRY_ID><COUNTRY_NAME>India</COUNTRY_NAME><REGION_ID>3</REGION_ID>
+ 5 | Japan | 3 | <COUNTRY_ID>JP</COUNTRY_ID><COUNTRY_NAME>Japan</COUNTRY_NAME><REGION_ID>3</REGION_ID><PREMIER_NAME>Sinzo Abe</PREMIER_NAME>
+(2 rows)
+
+SELECT * FROM xmltable('/root' passing '<root><element>a1a<!-- aaaa -->a2a<?aaaaa?> <!--z--> bbbb<x>xxx</x>cccc</element></root>' COLUMNS element text);
+ element
+----------------------
+ a1aa2a bbbbxxxcccc
+(1 row)
+
+SELECT * FROM xmltable('/root' passing '<root><element>a1a<!-- aaaa -->a2a<?aaaaa?> <!--z--> bbbb<x>xxx</x>cccc</element></root>' COLUMNS element text PATH 'element/text()'); -- should fail
+ERROR: more than one value returned by column XPath expression
+-- CDATA test
+select * from xmltable('d/r' passing '<d><r><c><![CDATA[<hello> &"<>!<a>foo</a>]]></c></r><r><c>2</c></r></d>' columns c text);
+ c
+-------------------------
+ <hello> &"<>!<a>foo</a>
+ 2
+(2 rows)
+
+-- XML builtin entities
+SELECT * FROM xmltable('/x/a' PASSING '<x><a><ent>&apos;</ent></a><a><ent>&quot;</ent></a><a><ent>&amp;</ent></a><a><ent>&lt;</ent></a><a><ent>&gt;</ent></a></x>' COLUMNS ent text);
+ ent
+-----
+ '
+ "
+ &
+ <
+ >
+(5 rows)
+
+SELECT * FROM xmltable('/x/a' PASSING '<x><a><ent>&apos;</ent></a><a><ent>&quot;</ent></a><a><ent>&amp;</ent></a><a><ent>&lt;</ent></a><a><ent>&gt;</ent></a></x>' COLUMNS ent xml);
+ ent
+------------------
+ <ent>'</ent>
+ <ent>"</ent>
+ <ent>&amp;</ent>
+ <ent>&lt;</ent>
+ <ent>&gt;</ent>
+(5 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name
+ -> Seq Scan on public.xmldata
+ Output: xmldata.data
+ -> Table Function Scan on "xmltable"
+ Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name
+ Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text))
+(7 rows)
+
+-- test qual
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int) WHERE "COUNTRY_NAME" = 'Japan';
+ COUNTRY_NAME | REGION_ID
+--------------+-----------
+ Japan | 3
+(1 row)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int) WHERE "COUNTRY_NAME" = 'Japan';
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: "xmltable"."COUNTRY_NAME", "xmltable"."REGION_ID"
+ -> Seq Scan on public.xmldata
+ Output: xmldata.data
+ -> Table Function Scan on "xmltable"
+ Output: "xmltable"."COUNTRY_NAME", "xmltable"."REGION_ID"
+ Table Function Call: XMLTABLE(('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]'::text) PASSING (xmldata.data) COLUMNS "COUNTRY_NAME" text, "REGION_ID" integer)
+ Filter: ("xmltable"."COUNTRY_NAME" = 'Japan'::text)
+(8 rows)
+
+-- should to work with more data
+INSERT INTO xmldata VALUES('<ROWS>
+<ROW id="10">
+ <COUNTRY_ID>CZ</COUNTRY_ID>
+ <COUNTRY_NAME>Czech Republic</COUNTRY_NAME>
+ <REGION_ID>2</REGION_ID><PREMIER_NAME>Milos Zeman</PREMIER_NAME>
+</ROW>
+<ROW id="11">
+ <COUNTRY_ID>DE</COUNTRY_ID>
+ <COUNTRY_NAME>Germany</COUNTRY_NAME>
+ <REGION_ID>2</REGION_ID>
+</ROW>
+<ROW id="12">
+ <COUNTRY_ID>FR</COUNTRY_ID>
+ <COUNTRY_NAME>France</COUNTRY_NAME>
+ <REGION_ID>2</REGION_ID>
+</ROW>
+</ROWS>');
+INSERT INTO xmldata VALUES('<ROWS>
+<ROW id="20">
+ <COUNTRY_ID>EG</COUNTRY_ID>
+ <COUNTRY_NAME>Egypt</COUNTRY_NAME>
+ <REGION_ID>1</REGION_ID>
+</ROW>
+<ROW id="21">
+ <COUNTRY_ID>SD</COUNTRY_ID>
+ <COUNTRY_NAME>Sudan</COUNTRY_NAME>
+ <REGION_ID>1</REGION_ID>
+</ROW>
+</ROWS>');
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+ id | _id | country_name | country_id | region_id | size | unit | premier_name
+----+-----+----------------+------------+-----------+------+------+---------------
+ 1 | 1 | Australia | AU | 3 | | | not specified
+ 2 | 2 | China | CN | 3 | | | not specified
+ 3 | 3 | HongKong | HK | 3 | | | not specified
+ 4 | 4 | India | IN | 3 | | | not specified
+ 5 | 5 | Japan | JP | 3 | | | Sinzo Abe
+ 6 | 6 | Singapore | SG | 3 | 791 | km | not specified
+ 10 | 1 | Czech Republic | CZ | 2 | | | Milos Zeman
+ 11 | 2 | Germany | DE | 2 | | | not specified
+ 12 | 3 | France | FR | 2 | | | not specified
+ 20 | 1 | Egypt | EG | 1 | | | not specified
+ 21 | 2 | Sudan | SD | 1 | | | not specified
+(11 rows)
+
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified')
+ WHERE region_id = 2;
+ id | _id | country_name | country_id | region_id | size | unit | premier_name
+----+-----+----------------+------------+-----------+------+------+---------------
+ 10 | 1 | Czech Republic | CZ | 2 | | | Milos Zeman
+ 11 | 2 | Germany | DE | 2 | | | not specified
+ 12 | 3 | France | FR | 2 | | | not specified
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS OFF)
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE',
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified')
+ WHERE region_id = 2;
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name
+ -> Seq Scan on public.xmldata
+ Output: xmldata.data
+ -> Table Function Scan on "xmltable"
+ Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name
+ Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text))
+ Filter: ("xmltable".region_id = 2)
+(8 rows)
+
+-- should fail, NULL value
+SELECT xmltable.*
+ FROM (SELECT data FROM xmldata) x,
+ LATERAL XMLTABLE('/ROWS/ROW'
+ PASSING data
+ COLUMNS id int PATH '@id',
+ _id FOR ORDINALITY,
+ country_name text PATH 'COUNTRY_NAME' NOT NULL,
+ country_id text PATH 'COUNTRY_ID',
+ region_id int PATH 'REGION_ID',
+ size float PATH 'SIZE' NOT NULL,
+ unit text PATH 'SIZE/@unit',
+ premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified');
+ERROR: null is not allowed in column "size"
+-- if all is ok, then result is empty
+-- one line xml test
+WITH
+ x AS (SELECT proname, proowner, procost::numeric, pronargs,
+ array_to_string(proargnames,',') as proargnames,
+ case when proargtypes <> '' then array_to_string(proargtypes::oid[],',') end as proargtypes
+ FROM pg_proc WHERE proname = 'f_leak'),
+ y AS (SELECT xmlelement(name proc,
+ xmlforest(proname, proowner,
+ procost, pronargs,
+ proargnames, proargtypes)) as proc
+ FROM x),
+ z AS (SELECT xmltable.*
+ FROM y,
+ LATERAL xmltable('/proc' PASSING proc
+ COLUMNS proname name,
+ proowner oid,
+ procost float,
+ pronargs int,
+ proargnames text,
+ proargtypes text))
+ SELECT * FROM z
+ EXCEPT SELECT * FROM x;
+ proname | proowner | procost | pronargs | proargnames | proargtypes
+---------+----------+---------+----------+-------------+-------------
+(0 rows)
+
+-- multi line xml test, result should be empty too
+WITH
+ x AS (SELECT proname, proowner, procost::numeric, pronargs,
+ array_to_string(proargnames,',') as proargnames,
+ case when proargtypes <> '' then array_to_string(proargtypes::oid[],',') end as proargtypes
+ FROM pg_proc),
+ y AS (SELECT xmlelement(name data,
+ xmlagg(xmlelement(name proc,
+ xmlforest(proname, proowner, procost,
+ pronargs, proargnames, proargtypes)))) as doc
+ FROM x),
+ z AS (SELECT xmltable.*
+ FROM y,
+ LATERAL xmltable('/data/proc' PASSING doc
+ COLUMNS proname name,
+ proowner oid,
+ procost float,
+ pronargs int,
+ proargnames text,
+ proargtypes text))
+ SELECT * FROM z
+ EXCEPT SELECT * FROM x;
+ proname | proowner | procost | pronargs | proargnames | proargtypes
+---------+----------+---------+----------+-------------+-------------
+(0 rows)
+
+CREATE TABLE xmltest2(x xml, _path text);
+INSERT INTO xmltest2 VALUES('<d><r><ac>1</ac></r></d>', 'A');
+INSERT INTO xmltest2 VALUES('<d><r><bc>2</bc></r></d>', 'B');
+INSERT INTO xmltest2 VALUES('<d><r><cc>3</cc></r></d>', 'C');
+INSERT INTO xmltest2 VALUES('<d><r><dc>2</dc></r></d>', 'D');
+SELECT xmltable.* FROM xmltest2, LATERAL xmltable('/d/r' PASSING x COLUMNS a int PATH '' || lower(_path) || 'c');
+ a
+---
+ 1
+ 2
+ 3
+ 2
+(4 rows)
+
+SELECT xmltable.* FROM xmltest2, LATERAL xmltable(('/d/r/' || lower(_path) || 'c') PASSING x COLUMNS a int PATH '.');
+ a
+---
+ 1
+ 2
+ 3
+ 2
+(4 rows)
+
+SELECT xmltable.* FROM xmltest2, LATERAL xmltable(('/d/r/' || lower(_path) || 'c') PASSING x COLUMNS a int PATH 'x' DEFAULT ascii(_path) - 54);
+ a
+----
+ 11
+ 12
+ 13
+ 14
+(4 rows)
+
+-- XPath result can be boolean or number too
+SELECT * FROM XMLTABLE('*' PASSING '<a>a</a>' COLUMNS a xml PATH '.', b text PATH '.', c text PATH '"hi"', d boolean PATH '. = "a"', e integer PATH 'string-length(.)');
+ a | b | c | d | e
+----------+---+----+---+---
+ <a>a</a> | a | hi | t | 1
+(1 row)
+
+\x
+SELECT * FROM XMLTABLE('*' PASSING '<e>pre<!--c1--><?pi arg?><![CDATA[&ent1]]><n2>&amp;deep</n2>post</e>' COLUMNS x xml PATH 'node()', y xml PATH '/');
+-[ RECORD 1 ]-----------------------------------------------------------
+x | pre<!--c1--><?pi arg?><![CDATA[&ent1]]><n2>&amp;deep</n2>post
+y | <e>pre<!--c1--><?pi arg?><![CDATA[&ent1]]><n2>&amp;deep</n2>post</e>+
+ |
+
+\x
+SELECT * FROM XMLTABLE('.' PASSING XMLELEMENT(NAME a) columns a varchar(20) PATH '"<foo/>"', b xml PATH '"<foo/>"');
+ a | b
+--------+--------------
+ <foo/> | &lt;foo/&gt;
+(1 row)
+
diff --git a/ydb/library/yql/tests/postgresql/patches/alter_table.out.patch b/ydb/library/yql/tests/postgresql/patches/alter_table.out.patch
new file mode 100644
index 0000000000..5b5418ce4e
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/patches/alter_table.out.patch
@@ -0,0 +1,112 @@
+--- alter_table.out (40977c8f0a0a339864b620914542e2d90392919b)
++++ alter_table.out (8a1083b9b852a7dbb540208bd92787ee92f311f7)
+@@ -2671,7 +2671,8 @@ create table alterlock (f1 int primary key, f2 text);
+ insert into alterlock values (1, 'foo');
+ create table alterlock2 (f3 int primary key, f1 int);
+ insert into alterlock2 values (1, 1);
+-begin; alter table alterlock alter column f2 set statistics 150;
++begin;
++alter table alterlock alter column f2 set statistics 150;
+ select * from my_locks order by 1;
+ relname | max_lockmode
+ -----------+--------------------------
+@@ -2679,7 +2680,8 @@ select * from my_locks order by 1;
+ (1 row)
+
+ rollback;
+-begin; alter table alterlock cluster on alterlock_pkey;
++begin;
++alter table alterlock cluster on alterlock_pkey;
+ select * from my_locks order by 1;
+ relname | max_lockmode
+ ----------------+--------------------------
+@@ -2688,7 +2690,8 @@ select * from my_locks order by 1;
+ (2 rows)
+
+ commit;
+-begin; alter table alterlock set without cluster;
++begin;
++alter table alterlock set without cluster;
+ select * from my_locks order by 1;
+ relname | max_lockmode
+ -----------+--------------------------
+@@ -2696,7 +2699,8 @@ select * from my_locks order by 1;
+ (1 row)
+
+ commit;
+-begin; alter table alterlock set (fillfactor = 100);
++begin;
++alter table alterlock set (fillfactor = 100);
+ select * from my_locks order by 1;
+ relname | max_lockmode
+ -----------+--------------------------
+@@ -2705,7 +2709,8 @@ select * from my_locks order by 1;
+ (2 rows)
+
+ commit;
+-begin; alter table alterlock reset (fillfactor);
++begin;
++alter table alterlock reset (fillfactor);
+ select * from my_locks order by 1;
+ relname | max_lockmode
+ -----------+--------------------------
+@@ -2714,7 +2719,8 @@ select * from my_locks order by 1;
+ (2 rows)
+
+ commit;
+-begin; alter table alterlock set (toast.autovacuum_enabled = off);
++begin;
++alter table alterlock set (toast.autovacuum_enabled = off);
+ select * from my_locks order by 1;
+ relname | max_lockmode
+ -----------+--------------------------
+@@ -2723,7 +2729,8 @@ select * from my_locks order by 1;
+ (2 rows)
+
+ commit;
+-begin; alter table alterlock set (autovacuum_enabled = off);
++begin;
++alter table alterlock set (autovacuum_enabled = off);
+ select * from my_locks order by 1;
+ relname | max_lockmode
+ -----------+--------------------------
+@@ -2732,7 +2739,8 @@ select * from my_locks order by 1;
+ (2 rows)
+
+ commit;
+-begin; alter table alterlock alter column f2 set (n_distinct = 1);
++begin;
++alter table alterlock alter column f2 set (n_distinct = 1);
+ select * from my_locks order by 1;
+ relname | max_lockmode
+ -----------+--------------------------
+@@ -2741,7 +2749,8 @@ select * from my_locks order by 1;
+
+ rollback;
+ -- test that mixing options with different lock levels works as expected
+-begin; alter table alterlock set (autovacuum_enabled = off, fillfactor = 80);
++begin;
++alter table alterlock set (autovacuum_enabled = off, fillfactor = 80);
+ select * from my_locks order by 1;
+ relname | max_lockmode
+ -----------+--------------------------
+@@ -2750,7 +2759,8 @@ select * from my_locks order by 1;
+ (2 rows)
+
+ commit;
+-begin; alter table alterlock alter column f2 set storage extended;
++begin;
++alter table alterlock alter column f2 set storage extended;
+ select * from my_locks order by 1;
+ relname | max_lockmode
+ -----------+---------------------
+@@ -2758,7 +2768,8 @@ select * from my_locks order by 1;
+ (1 row)
+
+ rollback;
+-begin; alter table alterlock alter column f2 set default 'x';
++begin;
++alter table alterlock alter column f2 set default 'x';
+ select * from my_locks order by 1;
+ relname | max_lockmode
+ -----------+---------------------
diff --git a/ydb/library/yql/tests/postgresql/patches/alter_table.sql.patch b/ydb/library/yql/tests/postgresql/patches/alter_table.sql.patch
new file mode 100644
index 0000000000..28e98d0a77
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/patches/alter_table.sql.patch
@@ -0,0 +1,73 @@
+--- alter_table.sql (40977c8f0a0a339864b620914542e2d90392919b)
++++ alter_table.sql (8a1083b9b852a7dbb540208bd92787ee92f311f7)
+@@ -1730,48 +1730,59 @@ insert into alterlock values (1, 'foo');
+ create table alterlock2 (f3 int primary key, f1 int);
+ insert into alterlock2 values (1, 1);
+
+-begin; alter table alterlock alter column f2 set statistics 150;
++begin;
++alter table alterlock alter column f2 set statistics 150;
+ select * from my_locks order by 1;
+ rollback;
+
+-begin; alter table alterlock cluster on alterlock_pkey;
++begin;
++alter table alterlock cluster on alterlock_pkey;
+ select * from my_locks order by 1;
+ commit;
+
+-begin; alter table alterlock set without cluster;
++begin;
++alter table alterlock set without cluster;
+ select * from my_locks order by 1;
+ commit;
+
+-begin; alter table alterlock set (fillfactor = 100);
++begin;
++alter table alterlock set (fillfactor = 100);
+ select * from my_locks order by 1;
+ commit;
+
+-begin; alter table alterlock reset (fillfactor);
++begin;
++alter table alterlock reset (fillfactor);
+ select * from my_locks order by 1;
+ commit;
+
+-begin; alter table alterlock set (toast.autovacuum_enabled = off);
++begin;
++alter table alterlock set (toast.autovacuum_enabled = off);
+ select * from my_locks order by 1;
+ commit;
+
+-begin; alter table alterlock set (autovacuum_enabled = off);
++begin;
++alter table alterlock set (autovacuum_enabled = off);
+ select * from my_locks order by 1;
+ commit;
+
+-begin; alter table alterlock alter column f2 set (n_distinct = 1);
++begin;
++alter table alterlock alter column f2 set (n_distinct = 1);
+ select * from my_locks order by 1;
+ rollback;
+
+ -- test that mixing options with different lock levels works as expected
+-begin; alter table alterlock set (autovacuum_enabled = off, fillfactor = 80);
++begin;
++alter table alterlock set (autovacuum_enabled = off, fillfactor = 80);
+ select * from my_locks order by 1;
+ commit;
+
+-begin; alter table alterlock alter column f2 set storage extended;
++begin;
++alter table alterlock alter column f2 set storage extended;
+ select * from my_locks order by 1;
+ rollback;
+
+-begin; alter table alterlock alter column f2 set default 'x';
++begin;
++alter table alterlock alter column f2 set default 'x';
+ select * from my_locks order by 1;
+ rollback;
+
diff --git a/ydb/library/yql/tests/postgresql/patches/create_table.out.patch b/ydb/library/yql/tests/postgresql/patches/create_table.out.patch
new file mode 100644
index 0000000000..7bffbf189e
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/patches/create_table.out.patch
@@ -0,0 +1,96 @@
+--- create_table.out 2023-07-24 18:02:30.058334000 +0300
++++ create_table.out 2023-07-27 11:27:15.779766000 +0300
+@@ -218,43 +218,7 @@
+ -- invalid: non-lowercase quoted reloptions identifiers
+ CREATE TABLE tas_case WITH ("Fillfactor" = 10) AS SELECT 1 a;
+ ERROR: unrecognized parameter "Fillfactor"
+-CREATE UNLOGGED TABLE unlogged1 (a int primary key); -- OK
+-CREATE TEMPORARY TABLE unlogged2 (a int primary key); -- OK
+-SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged\d' ORDER BY relname;
+- relname | relkind | relpersistence
+-----------------+---------+----------------
+- unlogged1 | r | u
+- unlogged1_pkey | i | u
+- unlogged2 | r | t
+- unlogged2_pkey | i | t
+-(4 rows)
+-
+-REINDEX INDEX unlogged1_pkey;
+-REINDEX INDEX unlogged2_pkey;
+-SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged\d' ORDER BY relname;
+- relname | relkind | relpersistence
+-----------------+---------+----------------
+- unlogged1 | r | u
+- unlogged1_pkey | i | u
+- unlogged2 | r | t
+- unlogged2_pkey | i | t
+-(4 rows)
+-
+-DROP TABLE unlogged2;
+-INSERT INTO unlogged1 VALUES (42);
+-CREATE UNLOGGED TABLE public.unlogged2 (a int primary key); -- also OK
+-CREATE UNLOGGED TABLE pg_temp.unlogged3 (a int primary key); -- not OK
+-ERROR: only temporary relations may be created in temporary schemas
+-LINE 1: CREATE UNLOGGED TABLE pg_temp.unlogged3 (a int primary key);
+- ^
+-CREATE TABLE pg_temp.implicitly_temp (a int primary key); -- OK
+ CREATE TEMP TABLE explicitly_temp (a int primary key); -- also OK
+-CREATE TEMP TABLE pg_temp.doubly_temp (a int primary key); -- also OK
+-CREATE TEMP TABLE public.temp_to_perm (a int primary key); -- not OK
+-ERROR: cannot create temporary relation in non-temporary schema
+-LINE 1: CREATE TEMP TABLE public.temp_to_perm (a int primary key);
+- ^
+-DROP TABLE unlogged1, public.unlogged2;
+ CREATE TABLE as_select1 AS SELECT * FROM pg_class WHERE relkind = 'r';
+ CREATE TABLE as_select1 AS SELECT * FROM pg_class WHERE relkind = 'r';
+ ERROR: relation "as_select1" already exists
+@@ -295,8 +259,10 @@
+ CREATE TABLE withoid() WITH (oids = true);
+ ERROR: tables declared WITH OIDS are not supported
+ -- but explicitly not adding oids is still supported
+-CREATE TEMP TABLE withoutoid() WITHOUT OIDS; DROP TABLE withoutoid;
+-CREATE TEMP TABLE withoutoid() WITH (oids = false); DROP TABLE withoutoid;
++CREATE TEMP TABLE withoutoid() WITHOUT OIDS;
++DROP TABLE withoutoid;
++CREATE TEMP TABLE withoutoid() WITH (oids = false);
++DROP TABLE withoutoid;
+ -- check restriction with default expressions
+ -- invalid use of column reference in default expressions
+ CREATE TABLE default_expr_column (id int DEFAULT (id));
+@@ -334,14 +300,18 @@
+ -- Verify that subtransaction rollback restores rd_createSubid.
+ BEGIN;
+ CREATE TABLE remember_create_subid (c int);
+-SAVEPOINT q; DROP TABLE remember_create_subid; ROLLBACK TO q;
++SAVEPOINT q;
++DROP TABLE remember_create_subid;
++ROLLBACK TO q;
+ COMMIT;
+ DROP TABLE remember_create_subid;
+ -- Verify that subtransaction rollback restores rd_firstRelfilenodeSubid.
+ CREATE TABLE remember_node_subid (c int);
+ BEGIN;
+ ALTER TABLE remember_node_subid ALTER c TYPE bigint;
+-SAVEPOINT q; DROP TABLE remember_node_subid; ROLLBACK TO q;
++SAVEPOINT q;
++DROP TABLE remember_node_subid;
++ROLLBACK TO q;
+ COMMIT;
+ DROP TABLE remember_node_subid;
+ --
+@@ -945,15 +915,6 @@
+ CONSTRAINT check_a CHECK (length(a) > 0)
+ ) PARTITION BY LIST (a);
+ CREATE TABLE part_a PARTITION OF parted FOR VALUES IN ('a');
+--- only inherited attributes (never local ones)
+-SELECT attname, attislocal, attinhcount FROM pg_attribute
+- WHERE attrelid = 'part_a'::regclass and attnum > 0
+- ORDER BY attnum;
+- attname | attislocal | attinhcount
+----------+------------+-------------
+- a | f | 1
+- b | f | 1
+-(2 rows)
+
+ -- able to specify column default, column constraint, and table constraint
+ -- first check the "column specified more than once" error
diff --git a/ydb/library/yql/tests/postgresql/patches/create_table.sql.patch b/ydb/library/yql/tests/postgresql/patches/create_table.sql.patch
new file mode 100644
index 0000000000..e496d21e76
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/patches/create_table.sql.patch
@@ -0,0 +1,71 @@
+--- create_table.sql 2023-07-24 18:02:30.058334000 +0300
++++ create_table.sql 2023-07-27 11:26:22.063095000 +0300
+@@ -256,21 +256,7 @@
+ -- invalid: non-lowercase quoted reloptions identifiers
+ CREATE TABLE tas_case WITH ("Fillfactor" = 10) AS SELECT 1 a;
+
+-CREATE UNLOGGED TABLE unlogged1 (a int primary key); -- OK
+-CREATE TEMPORARY TABLE unlogged2 (a int primary key); -- OK
+-SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged\d' ORDER BY relname;
+-REINDEX INDEX unlogged1_pkey;
+-REINDEX INDEX unlogged2_pkey;
+-SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged\d' ORDER BY relname;
+-DROP TABLE unlogged2;
+-INSERT INTO unlogged1 VALUES (42);
+-CREATE UNLOGGED TABLE public.unlogged2 (a int primary key); -- also OK
+-CREATE UNLOGGED TABLE pg_temp.unlogged3 (a int primary key); -- not OK
+-CREATE TABLE pg_temp.implicitly_temp (a int primary key); -- OK
+ CREATE TEMP TABLE explicitly_temp (a int primary key); -- also OK
+-CREATE TEMP TABLE pg_temp.doubly_temp (a int primary key); -- also OK
+-CREATE TEMP TABLE public.temp_to_perm (a int primary key); -- not OK
+-DROP TABLE unlogged1, public.unlogged2;
+
+ CREATE TABLE as_select1 AS SELECT * FROM pg_class WHERE relkind = 'r';
+ CREATE TABLE as_select1 AS SELECT * FROM pg_class WHERE relkind = 'r';
+@@ -301,8 +287,10 @@
+ CREATE TABLE withoid() WITH (oids = true);
+
+ -- but explicitly not adding oids is still supported
+-CREATE TEMP TABLE withoutoid() WITHOUT OIDS; DROP TABLE withoutoid;
+-CREATE TEMP TABLE withoutoid() WITH (oids = false); DROP TABLE withoutoid;
++CREATE TEMP TABLE withoutoid() WITHOUT OIDS;
++DROP TABLE withoutoid;
++CREATE TEMP TABLE withoutoid() WITH (oids = false);
++DROP TABLE withoutoid;
+
+ -- check restriction with default expressions
+ -- invalid use of column reference in default expressions
+@@ -321,7 +309,9 @@
+ -- Verify that subtransaction rollback restores rd_createSubid.
+ BEGIN;
+ CREATE TABLE remember_create_subid (c int);
+-SAVEPOINT q; DROP TABLE remember_create_subid; ROLLBACK TO q;
++SAVEPOINT q;
++DROP TABLE remember_create_subid;
++ROLLBACK TO q;
+ COMMIT;
+ DROP TABLE remember_create_subid;
+
+@@ -329,7 +319,9 @@
+ CREATE TABLE remember_node_subid (c int);
+ BEGIN;
+ ALTER TABLE remember_node_subid ALTER c TYPE bigint;
+-SAVEPOINT q; DROP TABLE remember_node_subid; ROLLBACK TO q;
++SAVEPOINT q;
++DROP TABLE remember_node_subid;
++ROLLBACK TO q;
+ COMMIT;
+ DROP TABLE remember_node_subid;
+
+@@ -753,11 +745,6 @@
+
+ CREATE TABLE part_a PARTITION OF parted FOR VALUES IN ('a');
+
+--- only inherited attributes (never local ones)
+-SELECT attname, attislocal, attinhcount FROM pg_attribute
+- WHERE attrelid = 'part_a'::regclass and attnum > 0
+- ORDER BY attnum;
+-
+ -- able to specify column default, column constraint, and table constraint
+
+ -- first check the "column specified more than once" error
diff --git a/ydb/library/yql/tests/postgresql/patches/json_encoding.out.patch b/ydb/library/yql/tests/postgresql/patches/json_encoding.out.patch
new file mode 100644
index 0000000000..fb758ac64a
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/patches/json_encoding.out.patch
@@ -0,0 +1,16 @@
+--- json_encoding.out (645054971474b440c91194f741c40b6d7be0b93d)
++++ json_encoding.out (8a1083b9b852a7dbb540208bd92787ee92f311f7)
+@@ -1,13 +1,6 @@
+ --
+ -- encoding-sensitive tests for json and jsonb
+ --
+--- We provide expected-results files for UTF8 (json_encoding.out)
+--- and for SQL_ASCII (json_encoding_1.out). Skip otherwise.
+-SELECT getdatabaseencoding() NOT IN ('UTF8', 'SQL_ASCII')
+- AS skip_test \gset
+-\if :skip_test
+-\quit
+-\endif
+ SELECT getdatabaseencoding(); -- just to label the results files
+ getdatabaseencoding
+ ---------------------
diff --git a/ydb/library/yql/tests/postgresql/patches/json_encoding.sql.patch b/ydb/library/yql/tests/postgresql/patches/json_encoding.sql.patch
new file mode 100644
index 0000000000..e248455baa
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/patches/json_encoding.sql.patch
@@ -0,0 +1,17 @@
+--- json_encoding.sql (645054971474b440c91194f741c40b6d7be0b93d)
++++ json_encoding.sql (8a1083b9b852a7dbb540208bd92787ee92f311f7)
+@@ -2,14 +2,6 @@
+ -- encoding-sensitive tests for json and jsonb
+ --
+
+--- We provide expected-results files for UTF8 (json_encoding.out)
+--- and for SQL_ASCII (json_encoding_1.out). Skip otherwise.
+-SELECT getdatabaseencoding() NOT IN ('UTF8', 'SQL_ASCII')
+- AS skip_test \gset
+-\if :skip_test
+-\quit
+-\endif
+-
+ SELECT getdatabaseencoding(); -- just to label the results files
+
+ -- first json
diff --git a/ydb/library/yql/tests/postgresql/patches/jsonpath_encoding.out.patch b/ydb/library/yql/tests/postgresql/patches/jsonpath_encoding.out.patch
new file mode 100644
index 0000000000..b2479a9c1b
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/patches/jsonpath_encoding.out.patch
@@ -0,0 +1,16 @@
+--- jsonpath_encoding.out (645054971474b440c91194f741c40b6d7be0b93d)
++++ jsonpath_encoding.out (8a1083b9b852a7dbb540208bd92787ee92f311f7)
+@@ -1,13 +1,6 @@
+ --
+ -- encoding-sensitive tests for jsonpath
+ --
+--- We provide expected-results files for UTF8 (jsonpath_encoding.out)
+--- and for SQL_ASCII (jsonpath_encoding_1.out). Skip otherwise.
+-SELECT getdatabaseencoding() NOT IN ('UTF8', 'SQL_ASCII')
+- AS skip_test \gset
+-\if :skip_test
+-\quit
+-\endif
+ SELECT getdatabaseencoding(); -- just to label the results files
+ getdatabaseencoding
+ ---------------------
diff --git a/ydb/library/yql/tests/postgresql/patches/jsonpath_encoding.sql.patch b/ydb/library/yql/tests/postgresql/patches/jsonpath_encoding.sql.patch
new file mode 100644
index 0000000000..2de56bcba3
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/patches/jsonpath_encoding.sql.patch
@@ -0,0 +1,17 @@
+--- jsonpath_encoding.sql (645054971474b440c91194f741c40b6d7be0b93d)
++++ jsonpath_encoding.sql (8a1083b9b852a7dbb540208bd92787ee92f311f7)
+@@ -2,14 +2,6 @@
+ -- encoding-sensitive tests for jsonpath
+ --
+
+--- We provide expected-results files for UTF8 (jsonpath_encoding.out)
+--- and for SQL_ASCII (jsonpath_encoding_1.out). Skip otherwise.
+-SELECT getdatabaseencoding() NOT IN ('UTF8', 'SQL_ASCII')
+- AS skip_test \gset
+-\if :skip_test
+-\quit
+-\endif
+-
+ SELECT getdatabaseencoding(); -- just to label the results files
+
+ -- checks for double-quoted values
diff --git a/ydb/library/yql/tests/postgresql/patches/unicode.out.patch b/ydb/library/yql/tests/postgresql/patches/unicode.out.patch
new file mode 100644
index 0000000000..8d79a5701e
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/patches/unicode.out.patch
@@ -0,0 +1,10 @@
+--- unicode.out (645054971474b440c91194f741c40b6d7be0b93d)
++++ unicode.out (8a1083b9b852a7dbb540208bd92787ee92f311f7)
+@@ -1,7 +1,3 @@
+-SELECT getdatabaseencoding() <> 'UTF8' AS skip_test \gset
+-\if :skip_test
+-\quit
+-\endif
+ SELECT U&'\0061\0308bc' <> U&'\00E4bc' COLLATE "C" AS sanity_check;
+ sanity_check
+ --------------
diff --git a/ydb/library/yql/tests/postgresql/patches/unicode.sql.patch b/ydb/library/yql/tests/postgresql/patches/unicode.sql.patch
new file mode 100644
index 0000000000..c89a1e5e67
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/patches/unicode.sql.patch
@@ -0,0 +1,11 @@
+--- unicode.sql (645054971474b440c91194f741c40b6d7be0b93d)
++++ unicode.sql (8a1083b9b852a7dbb540208bd92787ee92f311f7)
+@@ -1,8 +1,3 @@
+-SELECT getdatabaseencoding() <> 'UTF8' AS skip_test \gset
+-\if :skip_test
+-\quit
+-\endif
+-
+ SELECT U&'\0061\0308bc' <> U&'\00E4bc' COLLATE "C" AS sanity_check;
+
+ SELECT normalize('');
diff --git a/ydb/library/yql/tests/postgresql/pg_tests.csv b/ydb/library/yql/tests/postgresql/pg_tests.csv
new file mode 100644
index 0000000000..6996cc19e3
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/pg_tests.csv
@@ -0,0 +1,54 @@
+testcase,statements,successful,ratio
+functional_deps,40,3,7.5
+comments,7,7,100.0
+expressions,63,8,12.7
+limit,84,5,5.95
+jsonpath_encoding,31,31,100.0
+name,40,4,10.0
+create_misc,76,1,1.32
+dbsize,24,22,91.67
+select_distinct,46,1,2.17
+json_encoding,42,42,100.0
+jsonb_jsonpath,427,0,0.0
+oid,27,1,3.7
+numerology,24,4,16.67
+select_distinct_on,4,0,0.0
+float4,96,17,17.71
+int2,49,8,16.33
+select,88,2,2.27
+insert,357,8,2.24
+unicode,13,4,30.77
+strings,390,28,7.18
+int4,70,27,38.57
+float8,168,49,29.17
+interval,168,87,51.79
+jsonpath,169,152,89.94
+select_into,67,2,2.99
+alter_table,1679,2,0.12
+bit,115,13,11.3
+aggregates,416,0,0.0
+text,76,5,6.58
+arrays,410,6,1.46
+truncate,193,3,1.55
+horology,306,76,24.84
+int8,142,6,4.23
+update,288,13,4.51
+time,39,11,28.21
+case,63,13,20.63
+subselect,234,2,0.85
+create_table,383,41,10.7
+json,454,44,9.69
+jsonb,1017,43,4.23
+union,186,0,0.0
+date,264,17,6.44
+timetz,45,13,28.89
+xml,234,3,1.28
+delete,10,0,0.0
+boolean,93,62,66.67
+window,298,2,0.67
+join,591,23,3.89
+select_implicit,44,11,25.0
+timestamp,145,78,53.79
+timestamptz,315,79,25.08
+select_having,23,11,47.83
+numeric,915,490,53.55
diff --git a/ydb/library/yql/tests/postgresql/status.md b/ydb/library/yql/tests/postgresql/status.md
new file mode 100644
index 0000000000..5603c33126
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/status.md
@@ -0,0 +1,61 @@
+==Статус прохождения регрессионных тестов PG в YQL==
+
+#|
+||№ п/п | Имя теста|Число операторов| Из них выполняется| % выполнения | Последнее обновление | Основные проблемы ||
+|| 1 | boolean | 93 | 64 (+1) | 68.82 | 29.09.2023 | DROP TABLE, implicit casts, \pset (null) ||
+|| 2 | char | 25 | 3 | 12.0 | 25.05.2023 | implicit cast, pgbpchar vs pgtext ||
+|| 3 | name | 40 | 22 (+17) | 55.0 | 29.09.2023 | parse_ident, implicit casts ||
+|| 4 | varchar | 24 | 2 | 8.33 | 25.05.2023 | ||
+|| 5 | text | 76 | 5 | 6.58 | 25.05.2023 | строковые функции (format, concat, concat_ws, length) и оператор конкатенации, implicit casts, отличаются сообщения об ошибках для оператора конкатенации с аргументами неподходящих типов ||
+|| 6 | int2 | 49 | 47 (+39) | 95.92 | 29.09.2023 | ||
+|| 7 | int4 | 70 | 70 (+42) | 100.0 | 29.09.2023 | ||
+|| 8 | int8 | 142 | 13 (+7) | 9.15 | 29.09.2023 | generate_series, pg_type, gcd, implicit casts ||
+|| 9 | oid | 27 | 21 (+19) | 77.78 | 29.09.2023 | ||
+|| 10 | float4 | 96 | 48 (+31) | 50.0 | 29.09.2023 | CREATE TYPE, CREATE FUNCTION, WITH, форматирование NaN и Infinity, float4send ||
+|| 11 | float8 | 168 | 96 (+1) | 57.14 | 25.10.2023 | CREATE CAST, форматирование NaN и Infinity, extra_float_digits, implicit casts, float8send ||
+|| 12 | bit | 115 | 4 (+4) | 3.48 | 25.10.2023 | substring, COPY FROM stdin, битовые константы ||
+|| 13 | numeric | 915 | 526 (+23) | 57.49 | 10.08.2023 | CREATE UNIQUE INDEX, VACUUM ANALYZE, implicit casts, ошибочно проходит cast в int2 и int8, форматирование NaN и Infinity, COPY FROM stdin, SET lc_numeric, умножение больших целых чисел не дает в результате число с плавающей точкой, sum(), округление, nullif, форматирование чисел ||
+|| 14 | uuid | 36 | 0 | 0.0 | 02.05.2023 | ||·
+|| 15 | strings | 390 | 31 (+3) | 7.95 | 25.08.2023 | SET, RESET, standard_conforming_strings, bytea_output, \pset, неинициализированная поддержка регулярок, pg_class ||
+|| 16 | numerology | 24 | 8 (+4) | 33.33 | 26.07.2023 | ||
+|| 17 | date | 264 | 17 | 6.44 | 25.05.2023 | ||
+|| 18 | time | 39 | 11 | 28.21 | 25.05.2023 | ||
+|| 19 | timetz | 45 | 13 | 28.89 | 25.05.2023 | ||
+|| 20 | timestamp | 145 | 81 (+3) | 55.86 | 10.08.2023 | ||
+|| 21 | timestamptz | 315 | 83 (+4) | 26.35 | 10.08.2023 | ||
+|| 22 | interval | 168 | 115 (+1) | 68.45 | 25.10.2023 | ||
+|| 23 | horology | 306 | 79 (+28) | 25.82 | 10.08.2023 | SET, DateStyle, TimeZone, автоматически назначаемые имена колонкам-выражениям, SET TIME ZOME, RESET TIME ZONE, интервальный тип ПГ, ||
+|| 24 | comments | 7 | 7 | 100.0 | 25.05.2023 | ||
+|| 25 | expressions | 63 | 14 (+4) | 22.22 | 25.10.2023 | ||
+|| 26 | unicode | 13 | 4 (+4) | 30.77 | 10.08.2023 | ||
+|| 27 | create_table | 368 | 47 (+1) | 12.77 | 25.10.2023 | CREATE UNLOGGED TABLE, REINDEX, PREPARE ... SELECT, DEALLOCATE, \gexec, pg_class, pg_attribute, CREATE TABLE PARTITION OF ||
+|| 28 | insert | 357 | 10 (+10) | 2.8 | 25.10.2023 | CREATE TEMP TABLE, ALTER TABLE, DROP TABLE, CREATE TYPE, CREATE RULE, \d+, DROP TYPE, create table...partition by range, create table ... partition of ..., tableoid::regclass, create or replace function, create operator, ||
+|| 29 | create_misc | 76 | 3 (+2) | 3.95 | 29.09.2023 | ||
+|| 30 | select | 88 | 4 (+4) | 4.55 | 25.10.2023 | порядок сортировки в виде ORDER BY поле using > или <, а также NULLS FIRST/LAST; ANALYZE, переменные enable_seqscan, enable_bitmapscan, enable_sort, whole-row Var referencing a subquery, подзапросы внутри values, INSERT INTO ... DEFAULT VALUES, Range sub select unsupported lateral, CREATE INDEX, DROP INDEX, explain (опции costs, analyze, timing, summary), SELECT 1 AS x ORDER BY x; CREATE FUNCTION, DROP FUNCTION, table inheritance, PARTITION BY ||
+|| 31 | select_into | 67 | 3 | 4.48 | 27.07.2023 | ||
+|| 32 | select_distinct | 46 | 1 | 2.17 | 27.07.2023 | ||
+|| 33 | select_distinct_on | 4 | 0 | 0.0 | 25.05.2023 | ||
+|| 34 | select_implicit | 44 | 11 | 25.0 | 25.05.2023 | ||
+|| 35 | select_having | 23 | 11 | 47.83 | 25.05.2023 | ||
+|| 36 | subselect | 234 | 2 | 0.85 | 25.05.2023 | ||
+|| 37 | union | 186 | 0 | 0.0 | 25.05.2023 | ||
+|| 38 | case | 63 | 13 | 20.63 | 25.05.2023 | implicit casts, create function volatile ||
+|| 39 | join | 591 | 23 (+1) | 3.89 | 14.06.2023 | ||
+|| 40 | aggregates | 416 | 1 (+1) | 0.24 | 25.08.2023 | ||
+|| 41 | arrays | 410 | 1 | 0.24 | 29.09.2023 | ||
+|| 42 | update | 288 | 17 | 5.9 | 27.07.2023 | :-переменные ||
+|| 43 | delete | 10 | 0 | 0.0 | 25.05.2023 | ||
+|| 44 | dbsize | 24 | 24 (+16) | 100.0 | 10.08.2023 | ||
+|| 45 | window | 298 | 2 (+2) | 0.67 | 14.06.2023 | ||
+|| 46 | functional_deps | 40 | 4 (+1) | 10.0 | 29.09.2023 | ||
+|| 47 | json | 454 | 51 (+1) | 11.23 | 25.10.2023 | ||
+|| 48 | jsonb | 1017 | 50 (+1) | 4.92 | 25.10.2023 | ||
+|| 49 | json_encoding | 42 | 42 | 100.0 | 29.05.2023 | ||
+|| 50 | jsonpath | 169 | 152 | 89.94 | 29.05.2023 | числа с точкой без целой части (например .1), литерал '00' ||
+|| 51 | jsonpath_encoding | 31 | 31 | 100.0 | 29.05.2023 | ||
+|| 52 | jsonb_jsonpath | 427 | 5 (+5) | 1.17 | 27.07.2023 | ||
+|| 53 | limit | 84 | 5 (+4) | 5.95 | 10.08.2023 | ||
+|| 54 | truncate | 193 | 3 | 1.55 | 25.05.2023 | ||
+|| 55 | alter_table | 1679 | 4 (+2) | 0.24 | 25.08.2023 | COMMENT ON TABLE ||
+|| 56 | xml | 234 | 3 | 1.28 | 25.05.2023 | \set VERBOSITY ||
+|#
diff --git a/ydb/library/yql/tests/postgresql/status.old b/ydb/library/yql/tests/postgresql/status.old
new file mode 100644
index 0000000000..e74e294ea7
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/status.old
@@ -0,0 +1,61 @@
+==Статус прохождения регрессионных тестов PG в YQL==
+
+#|
+||№ п/п | Имя теста|Число операторов| Из них выполняется| % выполнения | Последнее обновление | Основные проблемы ||
+|| 1 | boolean | 93 | 62 | 66.67 | 29.05.2023 | DROP TABLE, implicit casts, \pset (null) ||
+|| 2 | char | 25 | 3 | 12.0 | 25.05.2023 | implicit cast, pgbpchar vs pgtext ||
+|| 3 | name | 40 | 4 | 10.0 | 25.05.2023 | parse_ident, implicit casts ||
+|| 4 | varchar | 24 | 2 | 8.33 | 25.05.2023 | ||
+|| 5 | text | 76 | 5 | 6.58 | 25.05.2023 | строковые функции (format, concat, concat_ws, length) и оператор конкатенации, implicit casts, отличаются сообщения об ошибках для оператора конкатенации с аргументами неподходящих типов ||
+|| 6 | int2 | 49 | 8 | 16.33 | 26.04.2023 | ||
+|| 7 | int4 | 70 | 27 | 38.57 | 29.05.2023 | implicit casts ||
+|| 8 | int8 | 142 | 6 | 4.23 | 25.05.2023 | generate_series, pg_type, gcd, implicit casts ||
+|| 9 | oid | 27 | 1 | 3.7 | 25.05.2023 | ||
+|| 10 | float4 | 96 | 17 | 17.71 | 29.05.2023 | CREATE TYPE, CREATE FUNCTION, WITH, форматирование NaN и Infinity, float4send ||
+|| 11 | float8 | 168 | 49 | 29.17 | 29.05.2023 | CREATE CAST, форматирование NaN и Infinity, extra_float_digits, implicit casts, float8send ||
+|| 12 | bit | 115 | 13 | 11.3 | 28.04.2023 | substring, COPY FROM stdin, битовые константы ||
+|| 13 | numeric | 915 | 490 (+12) | 53.55 | 14.06.2023 | CREATE UNIQUE INDEX, VACUUM ANALYZE, implicit casts, ошибочно проходит cast в int2 и int8, форматирование NaN и Infinity, COPY FROM stdin, SET lc_numeric, умножение больших целых чисел не дает в результате число с плавающей точкой, sum(), округление, nullif, форматирование чисел ||
+|| 14 | uuid | 36 | 0 | 0.0 | 02.05.2023 | ||·
+|| 15 | strings | 390 | 28 | 7.18 | 29.05.2023 | SET, RESET, standard_conforming_strings, bytea_output, \pset, неинициализированная поддержка регулярок, pg_class ||
+|| 16 | numerology | 24 | 4 | 16.67 | 25.05.2023 | ||
+|| 17 | date | 264 | 17 | 6.44 | 25.05.2023 | ||
+|| 18 | time | 39 | 11 | 28.21 | 25.05.2023 | ||
+|| 19 | timetz | 45 | 13 | 28.89 | 25.05.2023 | ||
+|| 20 | timestamp | 145 | 78 (+1) | 53.79 | 14.06.2023 | ||
+|| 21 | timestamptz | 315 | 79 (+1) | 25.08 | 14.06.2023 | ||
+|| 22 | interval | 168 | 87 (+1) | 51.79 | 14.06.2023 | ||
+|| 23 | horology | 306 | 76 | 24.84 | 29.05.2023 | SET, DateStyle, TimeZone, автоматически назначаемые имена колонкам-выражениям, SET TIME ZOME, RESET TIME ZONE, интервальный тип ПГ, ||
+|| 24 | comments | 7 | 7 | 100.0 | 25.05.2023 | ||
+|| 25 | expressions | 63 | 8 (+6) | 12.7 | 14.06.2023 | ||
+|| 26 | unicode | 13 | 4 | 30.77 | 03.05.2023 | ||
+|| 27 | create_table | 383 | 41 (+11) | 10.7 | 14.06.2023 | CREATE UNLOGGED TABLE, REINDEX, PREPARE ... SELECT, DEALLOCATE, \gexec, ||
+|| 28 | insert | 357 | 8 | 2.24 | 25.05.2023 | CREATE TEMP TABLE, ALTER TABLE, DROP TABLE, CREATE TYPE, CREATE RULE, \d+, DROP TYPE, create table...partition by range, create table ... partition of ..., tableoid::regclass, create or replace function, create operator, ||
+|| 29 | create_misc | 76 | 1 | 1.32 | 25.05.2023 | ||
+|| 30 | select | 85 | 1 (+1) | 1.18 | 14.06.2023 | порядок сортировки в виде ORDER BY поле using > или <, а также NULLS FIRST/LAST; ANALYZE, переменные enable_seqscan, enable_bitmapscan, enable_sort, whole-row Var referencing a subquery, подзапросы внутри values, INSERT INTO ... DEFAULT VALUES, Range sub select unsupported lateral, CREATE INDEX, DROP INDEX, explain (опции costs, analyze, timing, summary), SELECT 1 AS x ORDER BY x; CREATE FUNCTION, DROP FUNCTION, table inheritance, PARTITION BY ||
+|| 31 | select_into | 67 | 2 | 2.99 | 25.05.2023 | ||
+|| 32 | select_distinct | 46 | 1 (+1) | 2.17 | 14.06.2023 | ||
+|| 33 | select_distinct_on | 4 | 0 | 0.0 | 25.05.2023 | ||
+|| 34 | select_implicit | 44 | 11 | 25.0 | 25.05.2023 | ||
+|| 35 | select_having | 23 | 11 | 47.83 | 25.05.2023 | ||
+|| 36 | subselect | 234 | 2 | 0.85 | 25.05.2023 | ||
+|| 37 | union | 186 | 0 | 0.0 | 25.05.2023 | ||
+|| 38 | case | 63 | 13 | 20.63 | 25.05.2023 | implicit casts, create function volatile ||
+|| 39 | join | 591 | 23 (+1) | 3.89 | 14.06.2023 | ||
+|| 40 | aggregates | 416 | 0 | 0.0 | 25.05.2023 | ||
+|| 41 | arrays | 410 | 6 | 1.46 | 14.06.2023 | ||
+|| 42 | update | 288 | 13 | 4.51 | 25.05.2023 | :-переменные ||
+|| 43 | delete | 10 | 0 | 0.0 | 25.05.2023 | ||
+|| 44 | dbsize | 24 | 22 (+1) | 91.67 | 14.06.2023 | ||
+|| 45 | window | 298 | 2 (+2) | 0.67 | 14.06.2023 | ||
+|| 46 | functional_deps | 40 | 3 (+3) | 7.5 | 14.06.2023 | ||
+|| 47 | json | 454 | 44 (+2) | 9.69 | 14.06.2023 | ||
+|| 48 | jsonb | 1017 | 43 (+2) | 4.23 | 14.06.2023 | ||
+|| 49 | json_encoding | 42 | 42 | 100.0 | 29.05.2023 | ||
+|| 50 | jsonpath | 169 | 152 | 89.94 | 29.05.2023 | числа с точкой без целой части (например .1), литерал '00' ||
+|| 51 | jsonpath_encoding | 31 | 31 | 100.0 | 29.05.2023 | ||
+|| 52 | jsonb_jsonpath | 427 | 0 | 0.0 | 25.05.2023 | ||
+|| 53 | limit | 84 | 5 (+2) | 5.95 | 14.06.2023 | ||
+|| 54 | truncate | 193 | 3 | 1.55 | 25.05.2023 | ||
+|| 55 | alter_table | 1679 | 2 | 0.12 | 29.05.2023 | COMMENT ON TABLE ||
+|| 56 | xml | 234 | 3 | 1.28 | 25.05.2023 | \set VERBOSITY ||
+|#
diff --git a/ydb/library/yql/tests/postgresql/test.sh b/ydb/library/yql/tests/postgresql/test.sh
new file mode 100755
index 0000000000..27fe140d78
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/test.sh
@@ -0,0 +1,2 @@
+#! /bin/sh
+ya make -tt $@
diff --git a/ydb/library/yql/tests/postgresql/test_postgres.py b/ydb/library/yql/tests/postgresql/test_postgres.py
new file mode 100644
index 0000000000..c872abd22b
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/test_postgres.py
@@ -0,0 +1,14 @@
+import yatest.common
+from common import find_sql_tests, run_sql_test
+
+DATA_PATH = yatest.common.test_source_path('cases')
+PGRUN = yatest.common.binary_path('ydb/library/yql/tools/pgrun/pgrun')
+
+
+def pytest_generate_tests(metafunc):
+ ids, tests = zip(*find_sql_tests(DATA_PATH))
+ metafunc.parametrize(['sql', 'out'], tests, ids=ids)
+
+
+def test(sql, out, tmp_path):
+ run_sql_test(sql, out, tmp_path, PGRUN)
diff --git a/ydb/library/yql/tests/postgresql/test_postgres_original.py b/ydb/library/yql/tests/postgresql/test_postgres_original.py
new file mode 100644
index 0000000000..e92af45d92
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/test_postgres_original.py
@@ -0,0 +1,14 @@
+import yatest.common
+from common import find_sql_tests, run_sql_test
+
+DATA_PATH = yatest.common.test_source_path('original/cases')
+PGRUN = yatest.common.binary_path('ydb/library/yql/tools/pgrun/pgrun')
+
+
+def pytest_generate_tests(metafunc):
+ ids, tests = zip(*find_sql_tests(DATA_PATH))
+ metafunc.parametrize(['sql', 'out'], tests, ids=ids)
+
+
+def test(sql, out, tmp_path):
+ run_sql_test(sql, out, tmp_path, PGRUN)
diff --git a/ydb/library/yql/tests/postgresql/update-status.sh b/ydb/library/yql/tests/postgresql/update-status.sh
new file mode 100755
index 0000000000..992808604b
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/update-status.sh
@@ -0,0 +1,3 @@
+#! /bin/sh
+
+../../tools/pg-make-test/update-test-status/update-test-status pg_tests.csv status.md
diff --git a/ydb/library/yql/tests/postgresql/ya.make b/ydb/library/yql/tests/postgresql/ya.make
new file mode 100644
index 0000000000..a999196219
--- /dev/null
+++ b/ydb/library/yql/tests/postgresql/ya.make
@@ -0,0 +1,41 @@
+PY3TEST()
+
+IF(ORIGINAL)
+
+TEST_SRCS(
+ test_postgres_original.py
+)
+
+DATA(
+ arcadia/ydb/library/yql/tests/postgresql/original/cases
+)
+
+ELSE()
+
+TEST_SRCS(
+ test_postgres.py
+)
+
+DATA(
+ arcadia/ydb/library/yql/tests/postgresql/cases
+)
+
+ENDIF()
+
+DATA(
+ arcadia/ydb/library/yql/tests/postgresql/patches
+)
+
+SIZE(MEDIUM)
+TIMEOUT(600)
+
+FORK_TESTS()
+FORK_SUBTESTS()
+SPLIT_FACTOR(10)
+
+DEPENDS(
+ ydb/library/yql/tests/postgresql/common
+ ydb/library/yql/tools/pgrun
+)
+
+END()
diff --git a/ydb/library/yql/tests/ya.make b/ydb/library/yql/tests/ya.make
index e9d720412a..302e284b22 100644
--- a/ydb/library/yql/tests/ya.make
+++ b/ydb/library/yql/tests/ya.make
@@ -2,4 +2,5 @@ RECURSE(
common
s-expressions
sql
+ postgresql
)
diff --git a/ydb/library/yql/tools/CMakeLists.txt b/ydb/library/yql/tools/CMakeLists.txt
index 1d7c5f669c..b662ca8893 100644
--- a/ydb/library/yql/tools/CMakeLists.txt
+++ b/ydb/library/yql/tools/CMakeLists.txt
@@ -10,6 +10,7 @@ add_subdirectory(astdiff)
add_subdirectory(dq)
add_subdirectory(dqrun)
add_subdirectory(mrjob)
+add_subdirectory(pgrun)
add_subdirectory(sql2yql)
add_subdirectory(sql_formatter)
add_subdirectory(udf_dep_stub)
diff --git a/ydb/library/yql/tools/pg-make-test/README.md b/ydb/library/yql/tools/pg-make-test/README.md
new file mode 100644
index 0000000000..12167806db
--- /dev/null
+++ b/ydb/library/yql/tools/pg-make-test/README.md
@@ -0,0 +1,10 @@
+# pg-make-test
+
+This tool extracts a subset of SQL statements which work as expected
+from Postgres' regression tests located in arcadia/yql/tests/postgresql/original/cases.
+The extracted tests are normally saved into arcadia/yql/tests/postgresql/cases.
+The tests could optionally get patched.
+
+For each .sql test file the tool expects a matching .out file(s) in the
+same dir. For each pair of .sql/.out files the working subset of SQL
+statements is written to the output dir.
diff --git a/ydb/library/yql/tools/pg-make-test/__main__.py b/ydb/library/yql/tools/pg-make-test/__main__.py
new file mode 100644
index 0000000000..017fd8e38c
--- /dev/null
+++ b/ydb/library/yql/tools/pg-make-test/__main__.py
@@ -0,0 +1,439 @@
+import os
+import os.path
+import sys
+import logging
+import subprocess
+from multiprocessing import Pool
+from pathlib import Path
+import tempfile
+import shutil
+import re
+import csv
+import click
+import patch
+from library.python.svn_version import svn_version
+from ydb.library.yql.tests.postgresql.common import get_out_files, Differ
+
+
+PROGRAM_NAME = "pg-make-test"
+RUNNER = "../pgrun/pgrun"
+SPLITTER = "../pgrun/pgrun split-statements"
+REPORT_FILE = "pg_tests.csv"
+LOGGER = None
+
+
+def get_logger(name, logfile, is_debug):
+ logger = logging.getLogger(name)
+ logger.setLevel(logging.DEBUG if is_debug else logging.INFO)
+
+ if logfile is not None:
+ logger.addHandler(logging.FileHandler(logfile, encoding="utf-8"))
+
+ return logger
+
+
+def setup_logging(logfile, is_debug):
+ global LOGGER
+
+ LOGGER = get_logger(__file__, logfile, is_debug)
+
+
+class Configuration:
+ def __init__(
+ self, srcdir, dstdir, patchdir, skip_tests, runner, splitter, report_path, parallel, logfile, is_debug
+ ):
+ self.srcdir = srcdir
+ self.dstdir = dstdir
+ self.patchdir = patchdir
+ self.skip_tests = skip_tests
+ self.runner = runner
+ self.splitter = splitter
+ self.report_path = report_path
+ self.parallel = parallel
+ self.logfile = logfile
+ self.is_debug = is_debug
+
+
+def save_strings(fname, lst):
+ with open(fname, 'wb') as f:
+ for line in lst:
+ f.write(line)
+
+
+def argwhere1(predicate, collection, default):
+ """Returns index of the first element in collection, which satisfies the predicate."""
+
+ try:
+ pos, _ = next(enumerate(item for item in collection if predicate(item)))
+ except StopIteration:
+ return default
+ else:
+ return pos
+
+
+class TestCaseBuilder:
+ def __init__(self, config):
+ self.config = config
+
+ def build(self, sqlfile):
+ is_split_logging = self.config.logfile is not None and self.config.parallel
+ if is_split_logging:
+ logger = get_logger(
+ sqlfile.stem,
+ f"{self.config.logfile.parent}/{sqlfile.stem}-{self.config.logfile.name}",
+ self.config.is_debug,
+ )
+ else:
+ logger = LOGGER
+
+ splitted_stmts = list(self.split_sql_file(sqlfile))
+ stmts_count = len(splitted_stmts)
+
+ ressqlfile = self.config.dstdir / sqlfile.name
+ resoutfile = ressqlfile.with_suffix('.out')
+
+ max_stmts_run = 0
+ ressql = None
+ resout = None
+
+ for outfile in get_out_files(sqlfile):
+ test_name = Path(sqlfile).name
+ LOGGER.info("Processing (%d) %s -> %s", os.getpid(), test_name, Path(outfile).name)
+ if is_split_logging:
+ logger.info("Processing (%d) %s -> %s", os.getpid(), test_name, Path(outfile).name)
+
+ with open(outfile, 'rb') as fout:
+ outdata = fout.readlines()
+
+ statements = list(self.split_out_file(splitted_stmts, outdata, logger))
+ for (s_sql, s_out) in statements:
+ logger.debug(
+ "<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n%s\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n%s\n============================",
+ '\n'.join(str(sql_line) for sql_line in s_sql),
+ '\n'.join(str(out_line) for out_line in s_out),
+ )
+
+ with tempfile.TemporaryDirectory() as tempdir:
+ test_out_name = Path(tempdir) / "test.out"
+ test_err_name = test_out_name.with_suffix(".err")
+
+ logger.debug("Running %s '%s' -> [%s]", self.config.runner, sqlfile, outfile)
+ with open(sqlfile, 'rb') as f, open(test_out_name, 'wb') as fout, open(test_err_name, 'wb') as ferr:
+ pi = subprocess.run(self.config.runner + ["--datadir", tempdir], stdin=f, stdout=fout, stderr=ferr)
+
+ if pi.returncode != 0:
+ logger.warning("%s returned error code %d", self.config.runner, pi.returncode)
+
+ with open(test_out_name, 'rb') as fresult:
+ out = fresult.readlines()
+ logger.debug("Run result:\n%s", str(b'\n'.join(out)))
+
+ real_statements = list(self.split_out_file(splitted_stmts, out, logger))
+ for (s_sql, s_out) in real_statements:
+ logger.debug(
+ "<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n%s\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n%s\n============================",
+ '\n'.join(str(sql_line) for sql_line in s_sql),
+ '\n'.join(str(out_line) for out_line in s_out),
+ )
+
+ stmts_run = 0
+ stmts = []
+ outs = []
+ assert len(statements) == len(real_statements), f"Incorrect statements split in {test_name}. Statements in out-file: {len(statements)}, statements in pgrun output: {len(real_statements)}"
+ for ((l_sql, out), (r_sql, res)) in zip(statements, real_statements):
+ if l_sql != r_sql:
+ logger.warning("out SQL <> pgrun SQL:\n <: %s\n >: %s", l_sql, r_sql)
+ break
+
+ if len(Differ.diff(b''.join(out), b''.join(res))) == 0:
+ stmts.extend(l_sql)
+ outs.extend(out)
+ stmts_run += 1
+ else:
+ logger.warning("out result differs from pgrun result:\n <<: %s\n >>: %s", out, res)
+
+ if max_stmts_run < stmts_run:
+ max_stmts_run = stmts_run
+ ressql = stmts
+ resout = outs
+
+ LOGGER.info('Case built: %s', sqlfile.name)
+ if is_split_logging:
+ logger.info('Case built: %s', sqlfile.name)
+
+ if ressql is not None and resout is not None:
+ save_strings(ressqlfile, ressql)
+ save_strings(resoutfile, resout)
+
+ return Path(sqlfile).stem, stmts_count, stmts_run, round(stmts_run * 100 / stmts_count, 2)
+
+ def split_sql_file(self, sqlfile):
+ with open(sqlfile, "rb") as f:
+ pi = subprocess.run(self.config.splitter, stdin=f, stdout=subprocess.PIPE, stderr=sys.stderr, check=True)
+
+ lines = iter(pi.stdout.splitlines(keepends=True))
+ delimiter = next(lines)
+
+ cur_stmt = []
+
+ for line in lines:
+ if line == delimiter:
+ yield cur_stmt
+ cur_stmt = []
+ continue
+
+ cur_stmt.append(line)
+
+ if cur_stmt:
+ yield cur_stmt
+
+ reCopyFromStdin = re.compile(b"COPY[^;]+FROM std(?:in|out)", re.I)
+
+ def split_out_file(self, stmts, outdata, logger):
+ """Matches SQL & its output in outdata with individual SQL statements in stmts.
+
+ Args:
+ stmts ([[str]]): Iterator of SQL statements.
+ outdata ([str]): Contents of out-file.
+
+ Yields:
+ [([str], [str])]: Sequence of matching parts in sql & out files.
+ """
+
+ cur_stmt_out = []
+ out_iter = enumerate(outdata)
+ echo_none = False
+ in_copy_from = False
+ no_more_stmts_expected = False
+
+ try:
+ line_no, out_line = next(out_iter)
+ except StopIteration:
+ no_more_stmts_expected = True
+
+ in_line_no = 0
+
+ for i, stmt in enumerate(stmts):
+ if no_more_stmts_expected:
+ yield stmt, cur_stmt_out
+ cur_stmt_out = []
+ continue
+
+ try:
+ for stmt_line in stmt:
+ in_line_no += 1
+
+ if echo_none:
+ if stmt_line.startswith(b"\\set ECHO ") and not stmt_line.rstrip().endswith(b"none"):
+ echo_none = False
+ continue
+
+ if stmt_line.startswith(b"\\set ECHO none"):
+ echo_none = True
+
+ # We skip data lines of copy ... from stdin, since they aren't in the out-file
+ if in_copy_from:
+ if stmt_line.startswith(b"\\."):
+ in_copy_from = False
+ continue
+
+ if self.reCopyFromStdin.match(stmt_line):
+ in_copy_from = True
+
+ logger.debug("Line %d: %s -> %d: %s", in_line_no, stmt_line, line_no, out_line)
+
+ if stmt_line != out_line:
+ raise Exception(f"Mismatch at {line_no}: '{stmt_line}' != '{out_line}'")
+ cur_stmt_out.append(out_line)
+
+ line_no, out_line = next(out_iter)
+
+ assert not in_copy_from, f"Missing copy from stdout table end marker \\. at line {in_line_no}"
+
+ if echo_none:
+ continue
+
+ try:
+ next_stmt = stmts[i + 1]
+ except IndexError:
+ cur_stmt_out.append(out_line)
+ cur_stmt_out.extend(l for _, l in out_iter)
+ logger.debug("Last out:\n%s", str(b'\n'.join(cur_stmt_out)))
+ yield stmt, cur_stmt_out
+ return
+
+ while True:
+ while out_line != next_stmt[0]:
+ logger.debug("Out: %s -> %s", next_stmt[0], out_line)
+ cur_stmt_out.append(out_line)
+ line_no, out_line = next(out_iter)
+
+ last_pos = argwhere1(lambda s: self.reCopyFromStdin.match(s), next_stmt, default=len(next_stmt))
+ maybe_next_stmt = outdata[line_no : line_no + last_pos]
+ logger.debug("Left: %s\nRight: %s", next_stmt, maybe_next_stmt)
+ if next_stmt[:last_pos] == maybe_next_stmt:
+ break
+
+ cur_stmt_out.append(out_line)
+ line_no, out_line = next(out_iter)
+ yield stmt, cur_stmt_out
+ cur_stmt_out = []
+ except StopIteration:
+ no_more_stmts_expected = True
+ yield stmt, cur_stmt_out
+ cur_stmt_out = []
+
+
+def load_patches(patchdir):
+ for p in patchdir.glob("*.patch"):
+ ps = patch.fromfile(p)
+ if ps is not False:
+ yield p.stem, ps
+
+
+def patch_cases(cases, patches, patchdir):
+ for i, sql_full_name in enumerate(cases):
+ sql_name = sql_full_name.name
+ p = patches.get(sql_name, None)
+ if p is None:
+ continue
+
+ patched_sql_full_name = patchdir / sql_name
+ shutil.copyfile(sql_full_name, patched_sql_full_name)
+ success = p.apply(root=patchdir)
+ if not success:
+ LOGGER.warning(
+ "Failed to patch %s testcase. Original version %s will be used", patched_sql_full_name, sql_full_name
+ )
+ continue
+
+ out_full_name = sql_full_name.with_suffix('.out')
+ out_name = out_full_name.name
+ patched_out_full_name = patchdir / out_name
+ # .out file should be in the same dir as .sql file, so copy it anyway
+ shutil.copyfile(out_full_name, patched_out_full_name)
+
+ p = patches.get(out_name, None)
+ if p is None:
+ LOGGER.warning(
+ "Out-file patch for %s testcase is not found. Original version %s will be used",
+ patched_sql_full_name,
+ sql_full_name,
+ )
+ continue
+
+ success = p.apply(root=patchdir)
+ if not success:
+ LOGGER.warning(
+ "Failed to patch out-file for %s testcase. Original version %s will be used",
+ patched_sql_full_name,
+ sql_full_name,
+ )
+ continue
+
+ cases[i] = patched_sql_full_name
+ LOGGER.info("Patched", sql_full_name, "->", cases[i])
+
+
+@click.command()
+@click.argument("cases", type=str, nargs=-1)
+@click.option(
+ "--srcdir",
+ "-i",
+ help="Directory with SQL suits to process",
+ required=True,
+ multiple=False,
+ type=click.Path(exists=True, file_okay=False, resolve_path=True, path_type=Path),
+)
+@click.option(
+ "--dstdir",
+ "-o",
+ help="Output directory",
+ required=True,
+ multiple=False,
+ type=click.Path(exists=True, file_okay=False, resolve_path=True, writable=True, path_type=Path),
+)
+@click.option(
+ "--patchdir",
+ "-p",
+ help="Directory with patches for SQL suits",
+ required=False,
+ multiple=False,
+ type=click.Path(exists=True, file_okay=False, resolve_path=True, path_type=Path),
+)
+@click.option("--skip", "-s", help="Comma-separated list of testsuits to skip", multiple=False, type=click.STRING)
+@click.option("--runner", help="Test runner", default=RUNNER, required=False, multiple=False, type=click.STRING)
+@click.option(
+ "--splitter", help="SQL statements splitter", default=SPLITTER, required=False, multiple=False, type=click.STRING
+)
+@click.option(
+ "--report",
+ "-r",
+ help="Report file name",
+ default=REPORT_FILE,
+ required=False,
+ multiple=False,
+ type=click.Path(dir_okay=False, resolve_path=True, writable=True, path_type=Path),
+)
+@click.option("--parallel/--no-parallel", help="Tests build mode", default=True, required=False)
+@click.option(
+ "--logfile",
+ "-l",
+ help="Log file",
+ default=None,
+ required=False,
+ multiple=False,
+ type=click.Path(dir_okay=False, resolve_path=True, writable=True, path_type=Path),
+)
+@click.option("--debug/--no-debug", help="Logs verbosity", default=False, required=False)
+@click.version_option(version=svn_version(), prog_name=PROGRAM_NAME)
+def cli(cases, srcdir, dstdir, patchdir, skip, runner, splitter, report, parallel, logfile, debug):
+ setup_logging(logfile, debug)
+
+ if skip is not None:
+ skip_tests = frozenset(
+ test_name if not (test_name := s.strip()).endswith(".sql") else test_name[:-4] for s in skip.split(",")
+ )
+ else:
+ skip_tests = frozenset()
+
+ config = Configuration(
+ srcdir, dstdir, patchdir, skip_tests, runner.split(), splitter.split(), report, parallel, logfile, debug
+ )
+
+ if not cases:
+ cases = [c for c in config.srcdir.glob("*.sql") if c.stem not in skip_tests]
+ else:
+ cases = [Path(c) if os.path.isabs(c) else config.srcdir / c for c in cases]
+
+ if config.patchdir is not None:
+ patches = dict(load_patches(config.patchdir))
+ LOGGER.info("Patches: %s", ", ".join(p for p in patches))
+ else:
+ patches = {}
+
+ with tempfile.TemporaryDirectory() as tempdir:
+ patch_cases(cases, patches, Path(tempdir))
+
+ LOGGER.info("Test cases: %s", ", ".join(c.as_posix() for c in cases))
+
+ builder = TestCaseBuilder(config)
+ if config.parallel:
+ with Pool() as pool:
+ results = list(pool.imap_unordered(builder.build, cases))
+ else:
+ results = [builder.build(c) for c in cases]
+
+ with open(config.report_path, "w", newline='') as f:
+ writer = csv.writer(f, dialect="excel")
+ writer.writerow(["testcase", "statements", "successful", "ratio"])
+ writer.writerows(results)
+
+
+if __name__ == "__main__":
+ try:
+ cli()
+ finally:
+ logging.shutdown()
+
+# vim:tw=78:sw=4:et:ai:si
diff --git a/ydb/library/yql/tools/pg-make-test/update-test-status/__main__.py b/ydb/library/yql/tools/pg-make-test/update-test-status/__main__.py
new file mode 100644
index 0000000000..0a1f9852a4
--- /dev/null
+++ b/ydb/library/yql/tools/pg-make-test/update-test-status/__main__.py
@@ -0,0 +1,88 @@
+import os
+from collections import namedtuple
+from datetime import date
+import csv
+from pathlib import Path
+import re
+import click
+
+
+TestCase = namedtuple('TestCase', 'statements successful ratio')
+
+
+def load_testcases(filename):
+ testcases = {}
+
+ with open(filename, newline='') as infile:
+ reader = csv.DictReader(infile, delimiter=',')
+ for row in reader:
+ testcases[row['testcase']] = TestCase(row['statements'], row['successful'], row['ratio'])
+
+ return testcases
+
+
+reIncrement = re.compile(r'(\d+)(?: ?\(+ ?(\d+)\))?')
+
+
+def format_success_count(old, new):
+ m = reIncrement.search(old)
+ if m is None:
+ return str(new)
+
+ old_count = int(m[1])
+ new_count = int(new)
+
+ if old_count < new_count:
+ return f'{new_count} (+{new_count - old_count})'
+
+ # we need it to keep old increments, if count stays the same
+ if old_count == new_count:
+ return str(old)
+
+ return str(new)
+
+
+reTableRow = re.compile(r'^\|\| \d+ \|')
+
+
+@click.command()
+@click.argument("reportfile", type=str, nargs=1)
+@click.argument("statusfile", type=str, nargs=1)
+def cli(reportfile, statusfile):
+ today = date.today().strftime('%d.%m.%Y')
+
+ testcases = load_testcases(reportfile)
+
+ outfile = Path(statusfile).with_suffix('.tmp')
+ with open(statusfile, 'r') as fin, open(outfile, 'w') as fout:
+ for line in fin:
+ if reTableRow.match(line):
+ testno, testname, stmts_count, success_count, ratio, when_updated, notes = line.split(' | ')
+ try:
+ testcase = testcases[testname]
+ new_success_count = format_success_count(success_count, testcase.successful)
+ fout.write(
+ ' | '.join(
+ [
+ testno,
+ testname,
+ testcase.statements,
+ new_success_count,
+ testcase.ratio,
+ today if new_success_count != success_count else when_updated,
+ notes,
+ ]
+ )
+ )
+ except KeyError:
+ fout.write(line)
+ else:
+ fout.write(line)
+
+ oldfile = Path(statusfile).with_suffix('.old')
+ os.rename(statusfile, oldfile)
+ os.rename(outfile, statusfile)
+
+
+if __name__ == '__main__':
+ cli()
diff --git a/ydb/library/yql/tools/pg-make-test/update-test-status/ya.make b/ydb/library/yql/tools/pg-make-test/update-test-status/ya.make
new file mode 100644
index 0000000000..353b8acfc5
--- /dev/null
+++ b/ydb/library/yql/tools/pg-make-test/update-test-status/ya.make
@@ -0,0 +1,11 @@
+PY3_PROGRAM(update-test-status)
+
+PY_SRCS(
+ __main__.py
+)
+
+PEERDIR(
+ contrib/python/click
+)
+
+END()
diff --git a/ydb/library/yql/tools/pg-make-test/ya.make b/ydb/library/yql/tools/pg-make-test/ya.make
new file mode 100644
index 0000000000..a2ccfaf752
--- /dev/null
+++ b/ydb/library/yql/tools/pg-make-test/ya.make
@@ -0,0 +1,21 @@
+PY3_PROGRAM(pg-make-test)
+
+PY_SRCS(
+ __main__.py
+)
+
+PEERDIR(
+ contrib/python/click
+ contrib/python/PyYAML
+ contrib/python/patch
+ library/python/svn_version
+ ydb/library/yql/tests/postgresql/common
+)
+
+END()
+
+RECURSE(
+ ../pgrun
+ update-test-status
+)
+
diff --git a/ydb/library/yql/tools/pgrun/CMakeLists.darwin-x86_64.txt b/ydb/library/yql/tools/pgrun/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..3f9820811f
--- /dev/null
+++ b/ydb/library/yql/tools/pgrun/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,76 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+get_built_tool_path(
+ TOOL_rescompiler_bin
+ TOOL_rescompiler_dependency
+ tools/rescompiler/bin
+ rescompiler
+)
+
+add_executable(pgrun)
+target_compile_options(pgrun PRIVATE
+ -DUSE_CURRENT_UDF_ABI_VERSION
+)
+target_link_libraries(pgrun PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ contrib-libs-protobuf
+ library-cpp-getopt
+ contrib-libs-fmt
+ library-cpp-yson
+ yql-sql-pg
+ yql-core-facade
+ yql-core-file_storage
+ core-file_storage-proto
+ core-file_storage-http_download
+ core-services-mounts
+ minikql-comp_nodes-llvm
+ library-yql-protos
+ udf-service-exception_policy
+ yql-utils-backtrace
+ library-yql-core
+ sql-v1-format
+ providers-common-codec
+ providers-common-comp_nodes
+ providers-common-proto
+ providers-common-provider
+ providers-common-udf_resolve
+ providers-dq-provider
+ yt-gateway-file
+ yql-core-url_preprocessing
+ yql-parser-pg_wrapper
+ library-cpp-resource
+)
+target_link_options(pgrun PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(pgrun PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/yql/tools/pgrun/pgrun.cpp
+ ${CMAKE_BINARY_DIR}/ydb/library/yql/tools/pgrun/bad9f76ae5ff92ff1d60596e5ab477d0.cpp
+)
+resources(pgrun
+ ${CMAKE_BINARY_DIR}/ydb/library/yql/tools/pgrun/bad9f76ae5ff92ff1d60596e5ab477d0.cpp
+ INPUTS
+ ${CMAKE_SOURCE_DIR}/yql/cfg/local/gateways.conf
+ ${CMAKE_SOURCE_DIR}/yql/cfg/local/fs.conf
+ ${CMAKE_SOURCE_DIR}/yql/cfg/local/fs_arc.conf
+ KEYS
+ gateways.conf
+ fs.conf
+ fs_arc.conf
+)
+target_allocator(pgrun
+ cpp-malloc-jemalloc
+)
+vcs_info(pgrun)
diff --git a/ydb/library/yql/tools/pgrun/CMakeLists.linux-aarch64.txt b/ydb/library/yql/tools/pgrun/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..5dcfdb63b4
--- /dev/null
+++ b/ydb/library/yql/tools/pgrun/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,80 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+get_built_tool_path(
+ TOOL_rescompiler_bin
+ TOOL_rescompiler_dependency
+ tools/rescompiler/bin
+ rescompiler
+)
+
+add_executable(pgrun)
+target_compile_options(pgrun PRIVATE
+ -DUSE_CURRENT_UDF_ABI_VERSION
+)
+target_link_libraries(pgrun PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ contrib-libs-protobuf
+ library-cpp-getopt
+ contrib-libs-fmt
+ library-cpp-yson
+ yql-sql-pg
+ yql-core-facade
+ yql-core-file_storage
+ core-file_storage-proto
+ core-file_storage-http_download
+ core-services-mounts
+ minikql-comp_nodes-llvm
+ library-yql-protos
+ udf-service-exception_policy
+ yql-utils-backtrace
+ library-yql-core
+ sql-v1-format
+ providers-common-codec
+ providers-common-comp_nodes
+ providers-common-proto
+ providers-common-provider
+ providers-common-udf_resolve
+ providers-dq-provider
+ yt-gateway-file
+ yql-core-url_preprocessing
+ yql-parser-pg_wrapper
+ library-cpp-resource
+)
+target_link_options(pgrun PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+ -lutil
+)
+target_sources(pgrun PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/yql/tools/pgrun/pgrun.cpp
+ ${CMAKE_BINARY_DIR}/ydb/library/yql/tools/pgrun/bad9f76ae5ff92ff1d60596e5ab477d0.cpp
+)
+resources(pgrun
+ ${CMAKE_BINARY_DIR}/ydb/library/yql/tools/pgrun/bad9f76ae5ff92ff1d60596e5ab477d0.cpp
+ INPUTS
+ ${CMAKE_SOURCE_DIR}/yql/cfg/local/gateways.conf
+ ${CMAKE_SOURCE_DIR}/yql/cfg/local/fs.conf
+ ${CMAKE_SOURCE_DIR}/yql/cfg/local/fs_arc.conf
+ KEYS
+ gateways.conf
+ fs.conf
+ fs_arc.conf
+)
+target_allocator(pgrun
+ cpp-malloc-jemalloc
+)
+vcs_info(pgrun)
diff --git a/ydb/library/yql/tools/pgrun/CMakeLists.linux-x86_64.txt b/ydb/library/yql/tools/pgrun/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..c95061b781
--- /dev/null
+++ b/ydb/library/yql/tools/pgrun/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,81 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+get_built_tool_path(
+ TOOL_rescompiler_bin
+ TOOL_rescompiler_dependency
+ tools/rescompiler/bin
+ rescompiler
+)
+
+add_executable(pgrun)
+target_compile_options(pgrun PRIVATE
+ -DUSE_CURRENT_UDF_ABI_VERSION
+)
+target_link_libraries(pgrun PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ contrib-libs-protobuf
+ library-cpp-getopt
+ contrib-libs-fmt
+ library-cpp-yson
+ yql-sql-pg
+ yql-core-facade
+ yql-core-file_storage
+ core-file_storage-proto
+ core-file_storage-http_download
+ core-services-mounts
+ minikql-comp_nodes-llvm
+ library-yql-protos
+ udf-service-exception_policy
+ yql-utils-backtrace
+ library-yql-core
+ sql-v1-format
+ providers-common-codec
+ providers-common-comp_nodes
+ providers-common-proto
+ providers-common-provider
+ providers-common-udf_resolve
+ providers-dq-provider
+ yt-gateway-file
+ yql-core-url_preprocessing
+ yql-parser-pg_wrapper
+ library-cpp-resource
+)
+target_link_options(pgrun PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+ -lutil
+)
+target_sources(pgrun PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/yql/tools/pgrun/pgrun.cpp
+ ${CMAKE_BINARY_DIR}/ydb/library/yql/tools/pgrun/bad9f76ae5ff92ff1d60596e5ab477d0.cpp
+)
+resources(pgrun
+ ${CMAKE_BINARY_DIR}/ydb/library/yql/tools/pgrun/bad9f76ae5ff92ff1d60596e5ab477d0.cpp
+ INPUTS
+ ${CMAKE_SOURCE_DIR}/yql/cfg/local/gateways.conf
+ ${CMAKE_SOURCE_DIR}/yql/cfg/local/fs.conf
+ ${CMAKE_SOURCE_DIR}/yql/cfg/local/fs_arc.conf
+ KEYS
+ gateways.conf
+ fs.conf
+ fs_arc.conf
+)
+target_allocator(pgrun
+ cpp-malloc-jemalloc
+)
+vcs_info(pgrun)
diff --git a/ydb/library/yql/tools/pgrun/CMakeLists.txt b/ydb/library/yql/tools/pgrun/CMakeLists.txt
new file mode 100644
index 0000000000..f8b31df0c1
--- /dev/null
+++ b/ydb/library/yql/tools/pgrun/CMakeLists.txt
@@ -0,0 +1,17 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
+ include(CMakeLists.linux-aarch64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
+ include(CMakeLists.darwin-x86_64.txt)
+elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
+ include(CMakeLists.windows-x86_64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
+ include(CMakeLists.linux-x86_64.txt)
+endif()
diff --git a/ydb/library/yql/tools/pgrun/CMakeLists.windows-x86_64.txt b/ydb/library/yql/tools/pgrun/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..29b9db9bca
--- /dev/null
+++ b/ydb/library/yql/tools/pgrun/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,69 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+get_built_tool_path(
+ TOOL_rescompiler_bin
+ TOOL_rescompiler_dependency
+ tools/rescompiler/bin
+ rescompiler
+)
+
+add_executable(pgrun)
+target_compile_options(pgrun PRIVATE
+ -DUSE_CURRENT_UDF_ABI_VERSION
+)
+target_link_libraries(pgrun PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ contrib-libs-protobuf
+ library-cpp-getopt
+ contrib-libs-fmt
+ library-cpp-yson
+ yql-sql-pg
+ yql-core-facade
+ yql-core-file_storage
+ core-file_storage-proto
+ core-file_storage-http_download
+ core-services-mounts
+ minikql-comp_nodes-llvm
+ library-yql-protos
+ udf-service-exception_policy
+ yql-utils-backtrace
+ library-yql-core
+ sql-v1-format
+ providers-common-codec
+ providers-common-comp_nodes
+ providers-common-proto
+ providers-common-provider
+ providers-common-udf_resolve
+ providers-dq-provider
+ yt-gateway-file
+ yql-core-url_preprocessing
+ yql-parser-pg_wrapper
+ library-cpp-resource
+)
+target_sources(pgrun PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/yql/tools/pgrun/pgrun.cpp
+ ${CMAKE_BINARY_DIR}/ydb/library/yql/tools/pgrun/bad9f76ae5ff92ff1d60596e5ab477d0.cpp
+)
+resources(pgrun
+ ${CMAKE_BINARY_DIR}/ydb/library/yql/tools/pgrun/bad9f76ae5ff92ff1d60596e5ab477d0.cpp
+ INPUTS
+ ${CMAKE_SOURCE_DIR}/yql/cfg/local/gateways.conf
+ ${CMAKE_SOURCE_DIR}/yql/cfg/local/fs.conf
+ ${CMAKE_SOURCE_DIR}/yql/cfg/local/fs_arc.conf
+ KEYS
+ gateways.conf
+ fs.conf
+ fs_arc.conf
+)
+target_allocator(pgrun
+ system_allocator
+)
+vcs_info(pgrun)
diff --git a/ydb/library/yql/tools/pgrun/pgrun.cpp b/ydb/library/yql/tools/pgrun/pgrun.cpp
new file mode 100644
index 0000000000..0802f519f1
--- /dev/null
+++ b/ydb/library/yql/tools/pgrun/pgrun.cpp
@@ -0,0 +1,1099 @@
+#include <ydb/library/yql/utils/backtrace/backtrace.h>
+#include <ydb/library/yql/minikql/invoke_builtins/mkql_builtins.h>
+#include <ydb/library/yql/minikql/mkql_function_registry.h>
+#include <ydb/library/yql/core/facade/yql_facade.h>
+#include <ydb/library/yql/core/yql_opt_utils.h>
+#include <ydb/library/yql/core/yql_expr_optimize.h>
+#include <ydb/library/yql/providers/yt/gateway/file/yql_yt_file.h>
+#include <ydb/library/yql/providers/yt/gateway/file/yql_yt_file_services.h>
+#include <ydb/library/yql/providers/common/provider/yql_provider_names.h>
+#include <ydb/library/yql/providers/common/proto/gateways_config.pb.h>
+#include <ydb/library/yql/providers/yt/provider/yql_yt_provider.h>
+#include <ydb/library/yql/public/issue/yql_issue.h>
+
+#include <library/cpp/getopt/last_getopt.h>
+#include <library/cpp/yson/public.h>
+#include <library/cpp/yt/yson_string/string.h>
+#include <fmt/format.h>
+
+#include <util/system/user.h>
+#include <util/stream/file.h>
+#include <util/system/fs.h>
+#include <util/folder/path.h>
+#include <util/folder/tempdir.h>
+#include <util/string/split.h>
+#include <util/generic/yexception.h>
+#include <util/generic/iterator.h>
+#include <util/generic/string.h>
+#include <util/generic/strbuf.h>
+#include <library/cpp/yson/parser.h>
+#include <library/cpp/yson/node/node.h>
+#include <library/cpp/yson/node/node_builder.h>
+
+using namespace NYql;
+using namespace NKikimr::NMiniKQL;
+
+namespace NMiniKQL = NKikimr::NMiniKQL;
+
+const ui32 PRETTY_FLAGS = NYql::TAstPrintFlags::PerLine | NYql::TAstPrintFlags::ShortQuote |
+ NYql::TAstPrintFlags::AdaptArbitraryContent;
+
+bool IsEscapedChar(const TString& s, size_t pos) {
+ bool escaped = false;
+ while (s[--pos] == '\\') {
+ escaped = !escaped;
+ }
+ return escaped;
+}
+
+class TStatementIterator final
+ : public TInputRangeAdaptor<TStatementIterator>
+{
+ enum class State {
+ InOperator,
+ EndOfOperator,
+ LineComment,
+ BlockComment,
+ QuotedIdentifier,
+ StringLiteral,
+ EscapedStringLiteral,
+ DollarStringLiteral,
+ InMetaCommand,
+ InCopyFromStdin,
+ InVar,
+ };
+
+public:
+ TStatementIterator(const TString&& program)
+ : Program_(std::move(program))
+ , Cur_()
+ , Pos_(0)
+ , State_(State::InOperator)
+ , AtStmtStart_(true)
+ , Mode_(State::InOperator)
+ , Depth_(0)
+ , Tag_()
+ , StandardConformingStrings_(true)
+ {
+ }
+
+ static bool IsInWsSignificantState(State state) {
+ switch (state) {
+ case State::QuotedIdentifier:
+ case State::StringLiteral:
+ case State::EscapedStringLiteral:
+ case State::DollarStringLiteral:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ TString RemoveEmptyLines(const TString& s, bool inStatement) {
+ if (s.empty()) {
+ return {};
+ }
+
+ TStringBuilder sb;
+ auto isFirstLine = true;
+
+ if (inStatement && s[0] == '\n') {
+ sb << '\n';
+ }
+
+ for (TStringBuf line : StringSplitter(s).SplitBySet("\r\n").SkipEmpty()) {
+ if (isFirstLine) {
+ isFirstLine = false;
+ } else {
+ sb << '\n';
+ }
+ sb << line;
+ }
+ return sb;
+ }
+
+ const TString* Next()
+ {
+ if (TStringBuf::npos == Pos_)
+ return nullptr;
+
+ size_t startPos = Pos_;
+ size_t curPos = Pos_;
+ size_t endPos;
+ auto prevState = State_;
+
+ TStringBuilder stmt;
+ TStringBuilder rawStmt;
+ auto inStatement = false;
+
+ while (!CallParser(startPos)) {
+ endPos = (TStringBuf::npos != Pos_) ? Pos_ : Program_.length();
+
+ TStringBuf part{&Program_[curPos], endPos - curPos};
+
+ if (IsInWsSignificantState(prevState)) {
+ if (!rawStmt.empty()) {
+ stmt << RemoveEmptyLines(rawStmt, inStatement);
+ rawStmt.clear();
+ }
+ stmt << part;
+ inStatement = true;
+ } else {
+ rawStmt << part;
+ }
+ curPos = endPos;
+ prevState = State_;
+ }
+ endPos = (TStringBuf::npos != Pos_) ? Pos_ : Program_.length();
+
+ TStringBuf part{&Program_[curPos], endPos - curPos};
+
+ if (IsInWsSignificantState(prevState)) {
+ if (!rawStmt.empty()) {
+ stmt << RemoveEmptyLines(rawStmt, inStatement);
+ rawStmt.clear();
+ }
+ stmt << part;
+ inStatement = true;
+ } else {
+ rawStmt << part;
+ }
+
+#if 0
+ if (0 < Pos_ && !(Pos_ == TStringBuf::npos || Program_[Pos_-1] == '\n')) {
+ Cerr << "Last char: '" << Program_[Pos_-1] << "'\n";
+ }
+#endif
+
+ stmt << RemoveEmptyLines(rawStmt, inStatement);
+ // inv: Pos_ is at the start of next token
+ if (startPos == endPos)
+ return nullptr;
+
+ stmt << '\n';
+ Cur_ = stmt;
+
+ ApplyStateFromStatement(Cur_);
+
+ return &Cur_;
+ }
+
+private:
+
+ // States:
+ // - in-operator
+ // - line comment
+ // - block comment
+ // - quoted identifier (U& quoted identifier is no difference)
+ // - string literal (U& string literal is the same for our purpose)
+ // - E string literal
+ // - $ string literal
+ // - end-of-operator
+
+ // Rules:
+ // - in-operator
+ // -- -> next: line comment
+ // /* -> depth := 1, next: block comment
+ // " -> next: quoted identifier
+ // ' -> next: string literal
+ // E' -> next: E string literal
+ // $tag$, not preceded by alnum char (a bit of simplification here but sufficient) -> tag := tag, next: $ string literal
+ // ; -> current_mode := end-of-operator, next: end-of-operator
+
+ // - line comment
+ // EOL -> next: current_mode
+
+ // - block comment
+ // /* -> ++depth
+ // */ -> --depth, if (depth == 0) -> next: current_mode
+
+ // - quoted identifier
+ // " -> next: in-operator
+
+ // - string literal
+ // ' -> next: in-operator
+
+ // - E string literal
+ // ' -> if not preceeded by \ next: in-operator
+
+ // - $ string literal
+ // $tag$ -> next: in-operator
+
+ // - end-of-operator
+ // -- -> next: line comment, just once
+ // /* -> depth := 1, next: block comment
+ // non-space char -> unget, emit, current_mode := in-operator, next: in-operator
+
+ // In every state:
+ // EOS -> emit if consumed part of the input is not empty
+
+ bool SaveDollarTag() {
+ if (Pos_ + 1 == Program_.length())
+ return false;
+
+ auto p = Program_.cbegin() + (Pos_ + 1);
+
+ if (std::isdigit(*p))
+ return false;
+
+ for (;p != Program_.cend(); ++p) {
+ if (*p == '$') {
+ auto bp = &Program_[Pos_];
+ auto l = p - bp;
+ Tag_ = TStringBuf(bp, l + 1);
+ Pos_ += l;
+
+ return true;
+ }
+ if (!(std::isalpha(*p) || std::isdigit(*p) || *p == '_'))
+ return false;
+ }
+ return false;
+ }
+
+ bool IsCopyFromStdin(size_t startPos, size_t endPos) {
+ TString stmt(Program_, startPos, endPos - startPos + 1);
+ stmt.to_upper();
+ // FROM STDOUT is used in insert.sql testcase, probably a bug
+ return stmt.Contains(" FROM STDIN") || stmt.Contains(" FROM STDOUT");
+ }
+
+ bool InOperatorParser(size_t startPos) {
+ // need \ to detect psql meta-commands
+ static const TString midNextTokens{"'\";-/$\\"};
+ // need : for basic psql-vars support
+ static const TString initNextTokens{"'\";-/$\\:"};
+ const auto& nextTokens = (AtStmtStart_) ? initNextTokens : midNextTokens;
+
+ if (AtStmtStart_) {
+ Pos_ = Program_.find_first_not_of(" \t\n\r\v", Pos_);
+ if (TString::npos == Pos_) {
+ return true;
+ }
+ }
+
+ Pos_ = Program_.find_first_of(nextTokens, Pos_);
+
+ if (TString::npos == Pos_) {
+ return true;
+ }
+
+ switch (Program_[Pos_]) {
+ case '\'':
+ State_ = (!StandardConformingStrings_ || 0 < Pos_ && std::toupper(Program_[Pos_ - 1]) == 'E')
+ ? State::EscapedStringLiteral
+ : State::StringLiteral;
+ break;
+
+ case '"':
+ State_ = State::QuotedIdentifier;
+ break;
+
+ case ';':
+ State_ = Mode_ = IsCopyFromStdin(startPos, Pos_)
+ ? State::InCopyFromStdin
+ : State::EndOfOperator;
+ break;
+
+ case '-':
+ if (Pos_ < Program_.length() && Program_[Pos_ + 1] == '-') {
+ State_ = State::LineComment;
+ ++Pos_;
+ }
+ break;
+
+ case '/':
+ if (Pos_ < Program_.length() && Program_[Pos_ + 1] == '*') {
+ State_ = State::BlockComment;
+ ++Depth_;
+ ++Pos_;
+ }
+ break;
+
+ case '$':
+ if (Pos_ == 0 || std::isspace(Program_[Pos_ - 1])) {
+ if (SaveDollarTag())
+ State_ = State::DollarStringLiteral;
+ }
+ break;
+
+ case '\\':
+ if (AtStmtStart_) {
+ State_ = State::InMetaCommand;
+ } else if (Program_.Contains("\\gexec", Pos_)) {
+ Pos_ += 6;
+ return Emit(Program_[Pos_] == '\n');
+ }
+ break;
+
+ case ':':
+ if (Pos_ == 0 || Program_[Pos_-1] == '\n') {
+ State_ = State::InVar;
+ }
+ break;
+ }
+ ++Pos_;
+ if (Program_.length() == Pos_) {
+ Pos_ = TString::npos;
+ return true;
+ }
+
+ return false;
+ }
+
+ bool Emit(bool atEol) {
+ State_ = Mode_ = State::InOperator;
+ AtStmtStart_ = true;
+
+ if (atEol) {
+ ++Pos_;
+ if (Program_.length() == Pos_) {
+ Pos_ = TString::npos;
+ return true;
+ }
+ }
+ // else do not consume as we're expected to be on the first char of the next statement
+
+ return true;
+ }
+
+ bool EndOfOperatorParser() {
+ const auto p = std::find_if_not(Program_.cbegin() + Pos_, Program_.cend(), [](const auto& c) {
+ return c == ' ' || c == '\t' || c == '\r';
+ });
+
+ if (p == Program_.cend()) {
+ Pos_ = TStringBuf::npos;
+ return true;
+ }
+
+ Pos_ = p - Program_.cbegin();
+
+ switch (*p) {
+ case '-':
+ if (Pos_ < Program_.length() && Program_[Pos_ + 1] == '-') {
+ State_ = State::LineComment;
+ ++Pos_;
+ }
+ break;
+
+ case '/':
+ if (Pos_ < Program_.length() && Program_[Pos_ + 1] == '*') {
+ State_ = State::BlockComment;
+ ++Depth_;
+ ++Pos_;
+ }
+ break;
+
+ default:
+ return Emit(*p == '\n');
+ }
+
+ ++Pos_;
+ if (Program_.length() == Pos_) {
+ Pos_ = TString::npos;
+ return true;
+ }
+
+ return false;
+ }
+
+ bool LineCommentParser() {
+ Pos_ = Program_.find('\n', Pos_);
+
+ if (TString::npos == Pos_)
+ return true;
+
+ ++Pos_;
+ if (Program_.length() == Pos_) {
+ Pos_ = TString::npos;
+ return true;
+ }
+
+ if (Mode_ == State::EndOfOperator) {
+ return Emit(false);
+ }
+
+ State_ = Mode_;
+
+ return false;
+ }
+
+ bool BlockCommentParser() {
+ Pos_ = Program_.find_first_of("*/", Pos_);
+
+ if (TString::npos == Pos_)
+ return true;
+
+ switch(Program_[Pos_]) {
+ case '/':
+ if (Pos_ < Program_.length() && Program_[Pos_ + 1] == '*') {
+ ++Depth_;
+ ++Pos_;
+ }
+ break;
+
+ case '*':
+ if (Pos_ < Program_.length() && Program_[Pos_ + 1] == '/') {
+ --Depth_;
+ ++Pos_;
+
+ if (0 == Depth_) {
+ State_ = Mode_;
+ }
+ }
+ break;
+ }
+ ++Pos_;
+ if (Program_.length() == Pos_) {
+ Pos_ = TString::npos;
+ return true;
+ }
+
+ return false;
+ }
+
+ bool QuotedIdentifierParser() {
+ Pos_ = Program_.find('"', Pos_);
+
+ if (TString::npos == Pos_)
+ return true;
+
+ ++Pos_;
+ if (Program_.length() == Pos_) {
+ Pos_ = TString::npos;
+ return true;
+ }
+
+ State_ = State::InOperator;
+ AtStmtStart_ = false;
+
+ return false;
+ }
+
+ bool StringLiteralParser() {
+ Pos_ = Program_.find('\'', Pos_);
+
+ if (TString::npos == Pos_)
+ return true;
+
+ ++Pos_;
+ if (Program_.length() == Pos_) {
+ Pos_ = TString::npos;
+ return true;
+ }
+
+ State_ = State::InOperator;
+ AtStmtStart_ = false;
+
+ return false;
+ }
+
+ bool EscapedStringLiteralParser() {
+ Pos_ = Program_.find('\'', Pos_);
+
+ if (TString::npos == Pos_)
+ return true;
+
+ if (IsEscapedChar(Program_, Pos_)) {
+ ++Pos_;
+ return false;
+ }
+
+ ++Pos_;
+ if (Program_.length() == Pos_) {
+ Pos_ = TString::npos;
+ return true;
+ }
+
+ State_ = State::InOperator;
+ AtStmtStart_ = false;
+
+ return false;
+ }
+
+ bool DollarStringLiteralParser() {
+ Y_ENSURE(Tag_ != nullptr && 2 <= Tag_.length());
+
+ Pos_ = Program_.find(Tag_, Pos_);
+
+ if (TString::npos == Pos_)
+ return true;
+
+ Pos_ += Tag_.length();
+ if (Program_.length() == Pos_) {
+ Pos_ = TString::npos;
+ return true;
+ }
+
+ Tag_.Clear();
+
+ State_ = State::InOperator;
+ AtStmtStart_ = false;
+
+ return false;
+ }
+
+ bool MetaCommandParser() {
+ Pos_ = Program_.find('\n', Pos_);
+
+ if (TString::npos == Pos_)
+ return true;
+
+ ++Pos_;
+ if (Program_.length() == Pos_) {
+ Pos_ = TString::npos;
+ return true;
+ }
+
+ return Emit(false);
+ }
+
+ bool InCopyFromStdinParser() {
+ Pos_ = Program_.find("\n\\.\n", Pos_);
+
+ if (TString::npos == Pos_)
+ return true;
+
+ Pos_ += 4;
+ return Emit(false);
+ }
+
+ // For now we support vars occupying a whole line only
+ bool VarParser() {
+ // TODO: validate var name
+ Pos_ = Program_.find('\n', Pos_);
+
+ if (TString::npos == Pos_)
+ return true;
+
+ ++Pos_;
+ if (Program_.length() == Pos_) {
+ Pos_ = TString::npos;
+ return true;
+ }
+
+ return Emit(false);
+ }
+
+ bool CallParser(size_t startPos) {
+ switch (State_) {
+ case State::InOperator:
+ return InOperatorParser(startPos);
+
+ case State::EndOfOperator:
+ return EndOfOperatorParser();
+
+ case State::LineComment:
+ return LineCommentParser();
+
+ case State::BlockComment:
+ return BlockCommentParser();
+
+ case State::QuotedIdentifier:
+ return QuotedIdentifierParser();
+
+ case State::StringLiteral:
+ return StringLiteralParser();
+
+ case State::EscapedStringLiteral:
+ return EscapedStringLiteralParser();
+
+ case State::DollarStringLiteral:
+ return DollarStringLiteralParser();
+
+ case State::InMetaCommand:
+ return MetaCommandParser();
+
+ case State::InCopyFromStdin:
+ return InCopyFromStdinParser();
+
+ case State::InVar:
+ return VarParser();
+
+ default:
+ Y_UNREACHABLE();
+ }
+ }
+
+ void ApplyStateFromStatement(const TStringBuf& stmt) {
+ if (stmt.contains("set standard_conforming_strings = on;") ||
+ stmt.contains("reset standard_conforming_strings;"))
+ {
+ StandardConformingStrings_ = true;
+ } else if (stmt.contains("set standard_conforming_strings = off;")) {
+ StandardConformingStrings_ = false;
+ }
+ }
+
+ TString Program_;
+ TString Cur_;
+ size_t Pos_;
+ State State_;
+ bool AtStmtStart_;
+
+ State Mode_;
+ ui16 Depth_;
+ TStringBuf Tag_;
+ bool StandardConformingStrings_;
+};
+
+TString GetFormattedStmt(const TStringBuf& stmt) {
+ TString result;
+ result.reserve(stmt.length());
+ size_t pos = 0, next_pos = TStringBuf::npos;
+
+ while (TStringBuf::npos != (next_pos = stmt.find('\n', pos))) {
+ if (1 < next_pos - pos) {
+ if (!(stmt[pos] == '\r' && 2 == next_pos - pos)) {
+ result += stmt.substr(pos, next_pos - pos + 1);
+ }
+ }
+ pos = next_pos + 1;
+ }
+ if (pos < stmt.length())
+ result += stmt.substr(pos);
+
+ if (0 < result.length() && '\n' == result.back())
+ result.pop_back();
+
+ if (0 < result.length() && '\r' == result.back())
+ result.pop_back();
+
+ return result;
+}
+
+void PrintExprTo(const TProgramPtr& program, IOutputStream& out) {
+ TStringStream baseSS;
+
+ auto baseAst = ConvertToAst(*program->ExprRoot(), program->ExprCtx(),
+ NYql::TExprAnnotationFlags::None, true);
+ baseAst.Root->PrettyPrintTo(baseSS, PRETTY_FLAGS);
+
+ out << baseSS.Data();
+}
+
+NYT::TNode ParseYson(const TString& yson)
+{
+ NYT::TNode root;
+ NYT::TNodeBuilder builder(&root);
+ NYson::TStatelessYsonParser resultParser(&builder);
+ resultParser.Parse(yson);
+
+ return root;
+}
+
+TString GetPgErrorMessage(const TIssue& issue) {
+ const TString anchor("reason: ");
+ const auto& msg = issue.GetMessage();
+
+ auto pos = msg.find(anchor);
+
+ if (TString::npos == pos)
+ return TString(msg);
+
+ return msg.substr(pos + anchor.length());
+}
+
+void WriteErrorToStream(const TProgramPtr program)
+{
+ program->PrintErrorsTo(Cerr);
+
+ for (const auto& topIssue: program->Issues()) {
+ WalkThroughIssues(topIssue, true, [&](const TIssue& issue, ui16 /*level*/) {
+ const auto msg = GetPgErrorMessage(issue);
+ Cout << msg;
+ if (msg.back() != '\n') {
+ Cout << '\n';
+ }
+ });
+ }
+}
+
+using CellFormatter = std::function<const TString(const TString&)>;
+using TColumnType = TString;
+
+inline const TString FormatBool(const TString& value)
+{
+ static const TString T = "t";
+ static const TString F = "f";
+
+ return (value == "true") ? T
+ : (value == "false") ? F
+ : ythrow yexception() << "Unexpected bool literal: " << value;
+}
+
+inline const TString FormatNumeric(const TString& value)
+{
+ static const TString Zero = "0.0";
+
+ return (value == "0") ? Zero : value;
+}
+
+inline const TString FormatTransparent(const TString& value)
+{
+ return value;
+}
+
+static const THashMap<TColumnType, CellFormatter> ColumnFormatters {
+ { "bool", FormatBool },
+ { "numeric", FormatNumeric },
+};
+
+static const THashSet<TColumnType> RightAlignedTypes {
+ "int2",
+ "int4",
+ "int8",
+ "float4",
+ "float8",
+ "numeric",
+};
+
+struct TColumn {
+ TString Name;
+ size_t Width;
+ CellFormatter Formatter;
+ bool RightAligned;
+};
+
+std::string FormatCell(const TString& data, const TColumn& column, size_t index, size_t numberOfColumns) {
+ const auto delim = (index == 0) ? " " : " | ";
+
+ if (column.RightAligned)
+ return fmt::format("{0}{1:>{2}}", delim, data, column.Width);
+
+ if (index == numberOfColumns - 1)
+ return fmt::format("{0}{1}", delim, data);
+
+ return fmt::format("{0}{1:<{2}}", delim, data, column.Width);
+}
+
+void WriteTableToStream(IOutputStream& stream, const NYT::TNode::TListType& cols, const NYT::TNode::TListType& rows)
+{
+ TVector<TColumn> columns;
+ TList<TVector<TString>> formattedData;
+
+ for (const auto& col: cols) {
+ const auto& colName = col[0].AsString();
+ const auto& colType = col[1][1].AsString();
+
+ auto& c = columns.emplace_back();
+
+ c.Name = colName;
+ c.Width = colName.length();
+ c.Formatter = ColumnFormatters.Value(colType, FormatTransparent);
+ c.RightAligned = RightAlignedTypes.contains(colType);
+ }
+
+ for (const auto& row : rows) {
+ auto& rowData = formattedData.emplace_back();
+
+ { int i = 0;
+ for (const auto& col : row.AsList()) {
+ const auto& cellData = col.AsString();
+ auto& c = columns[i];
+
+ rowData.emplace_back(c.Formatter(cellData));
+ c.Width = std::max(c.Width, rowData.back().length());
+
+ ++i;
+ }}
+ }
+
+ if (columns.empty()) {
+ stream << "--";
+ } else {
+ const auto totalTableWidth =
+ std::accumulate(columns.cbegin(), columns.cend(), std::size_t{0},
+ [] (const auto& sum, const auto& elem) { return sum + elem.Width; }) + columns.size() * 3 - 1;
+ TString filler(totalTableWidth, '-');
+ stream << fmt::format(" {0:^{1}} ", columns[0].Name, columns[0].Width);
+ for (size_t i = 1, pos = columns[0].Width + 2; i < columns.size(); ++i) {
+ const auto& c = columns[i];
+
+ stream << fmt::format("| {0:^{1}} ", c.Name, c.Width);
+ filler[pos] = '+';
+ pos += c.Width + 3;
+ }
+ stream << '\n' << filler;
+ }
+
+ for (const auto& row : formattedData) {
+ stream << '\n';
+
+ for (size_t i = 0; i < row.size(); ++i) {
+ stream << FormatCell(row[i], columns[i], i, columns.size());
+ }
+ }
+ stream << fmt::format("\n({} {})\n", formattedData.size(), (formattedData.size() == 1) ? "row" : "rows");
+}
+
+std::pair<TString, TString> GetYtTableDataPaths(const TFsPath& dataDir, const TString tableName) {
+ const auto dataFileName = dataDir / tableName;
+ const auto attrFileName = dataDir / (tableName + ".attr");
+ return {dataFileName, attrFileName};
+}
+
+void CreateYtFileTable(const TFsPath& dataDir, const TString tableName, const TExprNode::TPtr columnsNode, THashMap<TString, TString>& tablesMapping) {
+ const auto [dataFilePath, attrFilePath] =
+ GetYtTableDataPaths(dataDir, tableName);
+
+ TFile dataFile{dataFilePath, CreateNew};
+ TFile attrFile{attrFilePath, CreateNew};
+
+ THolder<TFixedBufferFileOutput> fo;
+ fo.Reset(new TFixedBufferFileOutput{attrFile.GetName()});
+ IOutputStream *attrS{fo.Get()};
+
+ *attrS << R"__({
+ "_yql_row_spec"={
+ "Type"=["StructType";[
+)__";
+
+ for (const auto &columnNode : columnsNode->Children()) {
+ const auto &colName = columnNode->Child(0)->Content();
+ const auto &colTypeNode = columnNode->Child(1);
+
+ *attrS << fmt::format(R"__( ["{0}";["{1}";"{2}";];];
+)__",
+ colName, colTypeNode->Content(),
+ colTypeNode->Child(0)->Content());
+ }
+ *attrS << R"__( ];];
+ };
+})__";
+
+ tablesMapping[TString("yt.plato.") + tableName] = dataFile.GetName();
+}
+
+bool RemoveFile(const TString& fileName) {
+ if (NFs::Remove(fileName)) {
+ return true;
+ }
+
+ switch (errno) {
+ case ENOENT:
+ return false;
+ default:
+ throw yexception() << "Cannot remove existing table file \"" << fileName << "\"\n";
+ }
+}
+
+void DeleteYtFileTable(const TFsPath& dataDir, const TString tableName, THashMap<TString, TString>& tablesMapping) {
+ const auto [dataFilePath, attrFilePath] = GetYtTableDataPaths(dataDir, tableName);
+
+ if (!RemoveFile(dataFilePath)) {
+ Cout << "table \"" << tableName << "\" does not exist\n";
+ }
+ RemoveFile(attrFilePath);
+
+ tablesMapping.erase(TString("yt.plato.") + tableName);
+}
+
+int SplitStatements(int argc, char* argv[]) {
+ Y_UNUSED(argc);
+ Y_UNUSED(argv);
+
+ const TString delimiter{"===a738dc70-2d81-45b4-88f2-738d09b186b7===\n"};
+
+ for (const auto& stmt : TStatementIterator{Cin.ReadAll()}) {
+ Cout << delimiter << stmt;
+ }
+ return 0;
+}
+
+void WriteToYtTableScheme(
+ const NYql::TExprNode& writeNode,
+ const TTempDir& tempDir,
+ const TIntrusivePtr<class NYql::NFile::TYtFileServices> yqlNativeServices) {
+ const auto* keyNode = writeNode.Child(2);
+
+ const auto* tableNameNode = keyNode->Child(0)->Child(1);
+ Y_ENSURE(tableNameNode->IsCallable("String"));
+
+ const auto& tableName = tableNameNode->Child(0)->Content();
+ Y_ENSURE(!tableName.empty());
+
+ const auto* optionsNode = writeNode.Child(4);
+ Y_ENSURE(optionsNode);
+
+ const auto modeNode = GetSetting(*optionsNode, "mode");
+ Y_ENSURE(modeNode);
+ const auto mode = modeNode->Child(1)->Content();
+
+ if (mode == "create") {
+ const auto columnsNode = GetSetting(*optionsNode, "columns");
+ Y_ENSURE(columnsNode);
+
+ CreateYtFileTable(tempDir.Path(), TString(tableName), columnsNode->ChildPtr(1),
+ yqlNativeServices->GetTablesMapping());
+ }
+ else if (mode == "drop") {
+ DeleteYtFileTable(tempDir.Path(), TString(tableName), yqlNativeServices->GetTablesMapping());
+ }
+}
+
+void ProcessMetaCmd(const TStringBuf& cmd) {
+ Cerr << "Metacommand " << cmd << " is not supported\n";
+}
+
+int Main(int argc, char* argv[])
+{
+ using namespace NLastGetopt;
+ TOpts opts = TOpts::Default();
+
+ const TString runnerName{"pgrun"};
+ TVector<TString> udfsPaths;
+ TString rawDataDir;
+ THashMap<TString, TString> clusterMapping;
+
+ static const TString DefaultCluster{"plato"};
+ clusterMapping[DefaultCluster] = YtProviderName;
+
+ opts.AddHelpOption();
+ opts.AddLongOption("datadir", "directory for tables").StoreResult<TString>(&rawDataDir);
+ opts.SetFreeArgsMax(0);
+
+ TOptsParseResult res(&opts, argc, argv);
+
+ const bool tempDirExists = !rawDataDir.empty() && NFs::Exists(rawDataDir);
+ TTempDir tempDir{rawDataDir.empty() ? TTempDir{} : TTempDir{rawDataDir}};
+ if (tempDirExists) {
+ tempDir.DoNotRemove();
+ }
+
+ auto fsConfig = MakeHolder<TFileStorageConfig>();
+
+ THolder<TGatewaysConfig> gatewaysConfig;
+
+ auto fileStorage = CreateFileStorage(*fsConfig);
+ fileStorage = WithAsync(fileStorage);
+
+ auto funcRegistry = CreateFunctionRegistry(&NYql::NBacktrace::KikimrBackTrace, CreateBuiltinRegistry(), false, udfsPaths);
+
+ bool keepTempFiles = true;
+ bool emulateOutputForMultirun = false;
+
+ auto yqlNativeServices = NFile::TYtFileServices::Make(funcRegistry.Get(), {}, fileStorage, tempDir.Path(), keepTempFiles);
+ auto ytNativeGateway = CreateYtFileGateway(yqlNativeServices, &emulateOutputForMultirun);
+
+ TVector<TDataProviderInitializer> dataProvidersInit;
+ dataProvidersInit.push_back(GetYtNativeDataProviderInitializer(ytNativeGateway));
+
+ TExprContext ctx;
+ TExprContext::TFreezeGuard freezeGuard(ctx);
+
+ TProgramFactory factory(true, funcRegistry.Get(), ctx.NextUniqueId, dataProvidersInit, runnerName);
+ factory.SetGatewaysConfig(gatewaysConfig.Get());
+
+ const TString username = GetUsername();
+ THashSet<TString> sqlFlags;
+
+ for (const auto& raw_stmt : TStatementIterator{Cin.ReadAll()}) {
+ const auto stmt = GetFormattedStmt(raw_stmt);
+ Cout << stmt << '\n';
+
+ if (stmt[0] == '\\') {
+ ProcessMetaCmd(stmt);
+ continue;
+ }
+
+ google::protobuf::Arena arena;
+ NSQLTranslation::TTranslationSettings settings;
+ settings.Arena = &arena;
+ settings.ClusterMapping = clusterMapping;
+ settings.DefaultCluster = DefaultCluster;
+ settings.Flags = sqlFlags;
+ settings.SyntaxVersion = 1;
+ settings.AnsiLexer = false;
+ settings.V0Behavior = NSQLTranslation::EV0Behavior::Report;
+ settings.AssumeYdbOnClusterWithSlash = false;
+ settings.PgParser = true;
+
+ auto program = factory.Create("-stdin-", stmt);
+
+ if (!program->ParseSql(settings)) {
+ WriteErrorToStream(program);
+
+ continue;
+ }
+
+ if (!program->Compile(username)) {
+ WriteErrorToStream(program);
+
+ continue;
+ }
+
+#if 0
+ auto validate_status = program->Validate(username, &Cout, true);
+ program->PrintErrorsTo(Cerr);
+ if (validate_status == TProgram::TStatus::Error) {
+ return 1;
+ }
+
+ auto optimize_status = program->Optimize(username, nullptr, &Cerr, &Cout, true);
+ program->PrintErrorsTo(Cerr);
+ if (optimize_status == TProgram::TStatus::Error) {
+ return 1;
+ }
+#endif
+
+ static const THashSet<TString> ignoredNodes{"CommitAll!", "Commit!" };
+ const auto opNode = NYql::FindNode(program->ExprRoot(),
+ [] (const TExprNode::TPtr& node) { return !ignoredNodes.contains(node->Content()); });
+ if (opNode->IsCallable("Write!")) {
+ Y_ENSURE(opNode->ChildrenSize() == 5);
+
+ const auto* keyNode = opNode->Child(2);
+ const bool isWriteToTableSchemeNode = keyNode->IsCallable("Key") && 0 < keyNode->ChildrenSize() &&
+ keyNode->Child(0)->Child(0)->IsAtom("tablescheme");
+
+ if (isWriteToTableSchemeNode) {
+ WriteToYtTableScheme(*opNode, tempDir, yqlNativeServices);
+ continue;
+ }
+ }
+
+ auto status = program->Run(username, nullptr, nullptr, nullptr, true);
+ program->ConfigureYsonResultFormat(NYson::EYsonFormat::Text);
+
+ if (status == TProgram::TStatus::Error) {
+ WriteErrorToStream(program);
+ continue;
+ }
+
+ if (program->HasResults()) {
+ // Cout << program->ResultsAsString() << Endl;
+
+ const auto root = ParseYson(program->ResultsAsString());
+
+ const auto& cols = root[0]["Write"][0]["Type"][1][1].AsList();
+ const auto& data = root[0]["Write"][0]["Data"].AsList();
+
+ WriteTableToStream(Cout, cols, data);
+ Cout << Endl;
+ }
+ }
+
+ return 0;
+}
+
+int main(int argc, char* argv[])
+{
+ NYql::NBacktrace::RegisterKikimrFatalActions();
+ NYql::NBacktrace::EnableKikimrSymbolize();
+
+ try {
+ if (1 < argc) {
+ if (TString(argv[1]) == "split-statements") {
+ return SplitStatements(argc, argv);
+ }
+ }
+ return Main(argc, argv);
+ }
+ catch (...) {
+ Cerr << CurrentExceptionMessage() << Endl;
+ return 1;
+ }
+}
+
diff --git a/ydb/library/yql/tools/pgrun/ya.make b/ydb/library/yql/tools/pgrun/ya.make
new file mode 100644
index 0000000000..f663021562
--- /dev/null
+++ b/ydb/library/yql/tools/pgrun/ya.make
@@ -0,0 +1,46 @@
+PROGRAM(pgrun)
+
+ALLOCATOR(J)
+
+SRCS(
+ pgrun.cpp
+)
+
+PEERDIR(
+ contrib/libs/protobuf
+ library/cpp/getopt
+ contrib/libs/fmt
+ library/cpp/yson
+ ydb/library/yql/sql/pg
+ ydb/library/yql/core/facade
+ ydb/library/yql/core/file_storage
+ ydb/library/yql/core/file_storage/proto
+ ydb/library/yql/core/file_storage/http_download
+ ydb/library/yql/core/services/mounts
+ ydb/library/yql/minikql/comp_nodes/llvm
+ ydb/library/yql/protos
+ ydb/library/yql/public/udf/service/exception_policy
+ ydb/library/yql/utils/backtrace
+ ydb/library/yql/core
+ ydb/library/yql/sql/v1/format
+ ydb/library/yql/providers/common/codec
+ ydb/library/yql/providers/common/comp_nodes
+ ydb/library/yql/providers/common/proto
+ ydb/library/yql/providers/common/provider
+ ydb/library/yql/providers/common/udf_resolve
+ ydb/library/yql/providers/dq/provider
+ ydb/library/yql/providers/yt/gateway/file
+ ydb/library/yql/core/url_preprocessing
+ ydb/library/yql/parser/pg_wrapper
+)
+
+
+RESOURCE(
+yql/cfg/local/gateways.conf gateways.conf
+yql/cfg/local/fs.conf fs.conf
+yql/cfg/local/fs_arc.conf fs_arc.conf
+)
+
+YQL_LAST_ABI_VERSION()
+
+END()
diff --git a/ydb/library/yql/tools/ya.make b/ydb/library/yql/tools/ya.make
index 4631e3b04e..88b6d225db 100644
--- a/ydb/library/yql/tools/ya.make
+++ b/ydb/library/yql/tools/ya.make
@@ -3,6 +3,8 @@ RECURSE(
dqrun
dq
mrjob
+ pgrun
+ pg-make-test
sql2yql
sql_formatter
udf_dep_stub