aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/hyperscan
diff options
context:
space:
mode:
authorthegeorg <thegeorg@yandex-team.ru>2022-02-10 16:45:12 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:45:12 +0300
commit49116032d905455a7b1c994e4a696afc885c1e71 (patch)
treebe835aa92c6248212e705f25388ebafcf84bc7a1 /contrib/libs/hyperscan
parent4e839db24a3bbc9f1c610c43d6faaaa99824dcca (diff)
downloadydb-49116032d905455a7b1c994e4a696afc885c1e71.tar.gz
Restoring authorship annotation for <thegeorg@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/libs/hyperscan')
-rw-r--r--contrib/libs/hyperscan/.yandex_meta/devtools.copyrights.report502
-rw-r--r--contrib/libs/hyperscan/.yandex_meta/devtools.licenses.report44
-rw-r--r--contrib/libs/hyperscan/.yandex_meta/licenses.list.txt510
-rw-r--r--contrib/libs/hyperscan/CHANGELOG.md600
-rw-r--r--contrib/libs/hyperscan/README.md4
-rw-r--r--contrib/libs/hyperscan/config-linux.h218
-rw-r--r--contrib/libs/hyperscan/config-win.h32
-rw-r--r--contrib/libs/hyperscan/config.h14
-rw-r--r--contrib/libs/hyperscan/hs_version.h4
-rw-r--r--contrib/libs/hyperscan/runtime_avx2/.yandex_meta/licenses.list.txt64
-rw-r--r--contrib/libs/hyperscan/runtime_avx2/hs_common.h1192
-rw-r--r--contrib/libs/hyperscan/runtime_avx2/hs_runtime.h1242
-rw-r--r--contrib/libs/hyperscan/runtime_avx2/ya.make996
-rw-r--r--contrib/libs/hyperscan/runtime_avx512/.yandex_meta/licenses.list.txt64
-rw-r--r--contrib/libs/hyperscan/runtime_avx512/hs_common.h1192
-rw-r--r--contrib/libs/hyperscan/runtime_avx512/hs_runtime.h1242
-rw-r--r--contrib/libs/hyperscan/runtime_avx512/ya.make1000
-rw-r--r--contrib/libs/hyperscan/runtime_core2/.yandex_meta/licenses.list.txt64
-rw-r--r--contrib/libs/hyperscan/runtime_core2/hs_common.h1192
-rw-r--r--contrib/libs/hyperscan/runtime_core2/hs_runtime.h1242
-rw-r--r--contrib/libs/hyperscan/runtime_core2/ya.make976
-rw-r--r--contrib/libs/hyperscan/runtime_corei7/.yandex_meta/licenses.list.txt64
-rw-r--r--contrib/libs/hyperscan/runtime_corei7/hs_common.h1192
-rw-r--r--contrib/libs/hyperscan/runtime_corei7/hs_runtime.h1242
-rw-r--r--contrib/libs/hyperscan/runtime_corei7/ya.make988
-rw-r--r--contrib/libs/hyperscan/src/compiler/compiler.cpp282
-rw-r--r--contrib/libs/hyperscan/src/compiler/compiler.h50
-rw-r--r--contrib/libs/hyperscan/src/compiler/expression_info.h12
-rw-r--r--contrib/libs/hyperscan/src/database.c18
-rw-r--r--contrib/libs/hyperscan/src/database.h26
-rw-r--r--contrib/libs/hyperscan/src/fdr/fdr_compile.cpp6
-rw-r--r--contrib/libs/hyperscan/src/fdr/fdr_confirm.h2
-rw-r--r--contrib/libs/hyperscan/src/fdr/fdr_confirm_compile.cpp2
-rw-r--r--contrib/libs/hyperscan/src/fdr/fdr_confirm_runtime.h2
-rw-r--r--contrib/libs/hyperscan/src/fdr/teddy.c590
-rw-r--r--contrib/libs/hyperscan/src/fdr/teddy_avx2.c436
-rw-r--r--contrib/libs/hyperscan/src/fdr/teddy_compile.cpp200
-rw-r--r--contrib/libs/hyperscan/src/fdr/teddy_runtime_common.h32
-rw-r--r--contrib/libs/hyperscan/src/grey.cpp24
-rw-r--r--contrib/libs/hyperscan/src/grey.h4
-rw-r--r--contrib/libs/hyperscan/src/hs.cpp338
-rw-r--r--contrib/libs/hyperscan/src/hs.h14
-rw-r--r--contrib/libs/hyperscan/src/hs_common.h22
-rw-r--r--contrib/libs/hyperscan/src/hs_compile.h454
-rw-r--r--contrib/libs/hyperscan/src/hs_internal.h24
-rw-r--r--contrib/libs/hyperscan/src/hs_runtime.h12
-rw-r--r--contrib/libs/hyperscan/src/hwlm/hwlm_literal.cpp2
-rw-r--r--contrib/libs/hyperscan/src/hwlm/hwlm_literal.h8
-rw-r--r--contrib/libs/hyperscan/src/hwlm/noodle_engine_avx2.c2
-rw-r--r--contrib/libs/hyperscan/src/nfa/accel_dfa_build_strat.cpp2
-rw-r--r--contrib/libs/hyperscan/src/nfa/accel_dfa_build_strat.h16
-rw-r--r--contrib/libs/hyperscan/src/nfa/goughcompile.cpp6
-rw-r--r--contrib/libs/hyperscan/src/nfa/limex_compile.cpp270
-rw-r--r--contrib/libs/hyperscan/src/nfa/limex_compile.h4
-rw-r--r--contrib/libs/hyperscan/src/nfa/limex_exceptional.h146
-rw-r--r--contrib/libs/hyperscan/src/nfa/limex_internal.h10
-rw-r--r--contrib/libs/hyperscan/src/nfa/mcclellan.c344
-rw-r--r--contrib/libs/hyperscan/src/nfa/mcclellan_common_impl.h212
-rw-r--r--contrib/libs/hyperscan/src/nfa/mcclellan_internal.h106
-rw-r--r--contrib/libs/hyperscan/src/nfa/mcclellancompile.cpp1142
-rw-r--r--contrib/libs/hyperscan/src/nfa/mcclellancompile.h4
-rw-r--r--contrib/libs/hyperscan/src/nfa/mcclellandump.h124
-rw-r--r--contrib/libs/hyperscan/src/nfa/mcsheng.c2672
-rw-r--r--contrib/libs/hyperscan/src/nfa/mcsheng.h148
-rw-r--r--contrib/libs/hyperscan/src/nfa/mcsheng_compile.cpp928
-rw-r--r--contrib/libs/hyperscan/src/nfa/mcsheng_compile.h4
-rw-r--r--contrib/libs/hyperscan/src/nfa/mcsheng_data.c26
-rw-r--r--contrib/libs/hyperscan/src/nfa/mcsheng_internal.h62
-rw-r--r--contrib/libs/hyperscan/src/nfa/nfa_api_dispatch.c10
-rw-r--r--contrib/libs/hyperscan/src/nfa/nfa_build_util.cpp120
-rw-r--r--contrib/libs/hyperscan/src/nfa/nfa_build_util.h2
-rw-r--r--contrib/libs/hyperscan/src/nfa/nfa_internal.h46
-rw-r--r--contrib/libs/hyperscan/src/nfa/sheng.c2412
-rw-r--r--contrib/libs/hyperscan/src/nfa/sheng.h166
-rw-r--r--contrib/libs/hyperscan/src/nfa/sheng_defs.h804
-rw-r--r--contrib/libs/hyperscan/src/nfa/sheng_impl.h250
-rw-r--r--contrib/libs/hyperscan/src/nfa/sheng_impl4.h856
-rw-r--r--contrib/libs/hyperscan/src/nfa/sheng_internal.h76
-rw-r--r--contrib/libs/hyperscan/src/nfa/shengcompile.cpp626
-rw-r--r--contrib/libs/hyperscan/src/nfa/shengcompile.h20
-rw-r--r--contrib/libs/hyperscan/src/nfa/vermicelli.h274
-rw-r--r--contrib/libs/hyperscan/src/nfa/vermicelli_sse.h994
-rw-r--r--contrib/libs/hyperscan/src/nfagraph/ng.cpp8
-rw-r--r--contrib/libs/hyperscan/src/nfagraph/ng.h4
-rw-r--r--contrib/libs/hyperscan/src/nfagraph/ng_calc_components.cpp22
-rw-r--r--contrib/libs/hyperscan/src/nfagraph/ng_cyclic_redundancy.cpp2
-rw-r--r--contrib/libs/hyperscan/src/nfagraph/ng_equivalence.cpp6
-rw-r--r--contrib/libs/hyperscan/src/nfagraph/ng_haig.cpp4
-rw-r--r--contrib/libs/hyperscan/src/nfagraph/ng_is_equal.h20
-rw-r--r--contrib/libs/hyperscan/src/nfagraph/ng_limex.cpp22
-rw-r--r--contrib/libs/hyperscan/src/nfagraph/ng_limex.h6
-rw-r--r--contrib/libs/hyperscan/src/nfagraph/ng_limex_accel.cpp2
-rw-r--r--contrib/libs/hyperscan/src/nfagraph/ng_literal_analysis.cpp4
-rw-r--r--contrib/libs/hyperscan/src/nfagraph/ng_mcclellan.cpp6
-rw-r--r--contrib/libs/hyperscan/src/nfagraph/ng_repeat.cpp66
-rw-r--r--contrib/libs/hyperscan/src/nfagraph/ng_stop.cpp12
-rw-r--r--contrib/libs/hyperscan/src/nfagraph/ng_stop.h12
-rw-r--r--contrib/libs/hyperscan/src/nfagraph/ng_violet.cpp42
-rw-r--r--contrib/libs/hyperscan/src/nfagraph/ng_width.cpp2
-rw-r--r--contrib/libs/hyperscan/src/parser/logical_combination.cpp672
-rw-r--r--contrib/libs/hyperscan/src/parser/logical_combination.h224
-rw-r--r--contrib/libs/hyperscan/src/parser/shortcut_literal.cpp4
-rw-r--r--contrib/libs/hyperscan/src/parser/utf8_validate.cpp2
-rw-r--r--contrib/libs/hyperscan/src/parser/utf8_validate.h6
-rw-r--r--contrib/libs/hyperscan/src/report.h282
-rw-r--r--contrib/libs/hyperscan/src/rose/block.c4
-rw-r--r--contrib/libs/hyperscan/src/rose/catchup.c14
-rw-r--r--contrib/libs/hyperscan/src/rose/catchup.h28
-rw-r--r--contrib/libs/hyperscan/src/rose/match.c90
-rw-r--r--contrib/libs/hyperscan/src/rose/match.h6
-rw-r--r--contrib/libs/hyperscan/src/rose/program_runtime.c6926
-rw-r--r--contrib/libs/hyperscan/src/rose/program_runtime.h8
-rw-r--r--contrib/libs/hyperscan/src/rose/rose.h14
-rw-r--r--contrib/libs/hyperscan/src/rose/rose_build_add.cpp18
-rw-r--r--contrib/libs/hyperscan/src/rose/rose_build_bytecode.cpp150
-rw-r--r--contrib/libs/hyperscan/src/rose/rose_build_dedupe.cpp10
-rw-r--r--contrib/libs/hyperscan/src/rose/rose_build_groups.cpp2
-rw-r--r--contrib/libs/hyperscan/src/rose/rose_build_impl.h2
-rw-r--r--contrib/libs/hyperscan/src/rose/rose_build_instructions.cpp134
-rw-r--r--contrib/libs/hyperscan/src/rose/rose_build_instructions.h474
-rw-r--r--contrib/libs/hyperscan/src/rose/rose_build_lookaround.cpp4
-rw-r--r--contrib/libs/hyperscan/src/rose/rose_build_matchers.cpp2
-rw-r--r--contrib/libs/hyperscan/src/rose/rose_build_merge.cpp50
-rw-r--r--contrib/libs/hyperscan/src/rose/rose_build_misc.cpp16
-rw-r--r--contrib/libs/hyperscan/src/rose/rose_build_program.cpp578
-rw-r--r--contrib/libs/hyperscan/src/rose/rose_build_program.h6
-rw-r--r--contrib/libs/hyperscan/src/rose/rose_graph.h2
-rw-r--r--contrib/libs/hyperscan/src/rose/rose_in_graph.h18
-rw-r--r--contrib/libs/hyperscan/src/rose/rose_internal.h54
-rw-r--r--contrib/libs/hyperscan/src/rose/rose_program.h174
-rw-r--r--contrib/libs/hyperscan/src/rose/runtime.h20
-rw-r--r--contrib/libs/hyperscan/src/rose/stream.c6
-rw-r--r--contrib/libs/hyperscan/src/rose/stream_long_lit.h6
-rw-r--r--contrib/libs/hyperscan/src/rose/validate_mask.h74
-rw-r--r--contrib/libs/hyperscan/src/rose/validate_shufti.h158
-rw-r--r--contrib/libs/hyperscan/src/runtime.c184
-rw-r--r--contrib/libs/hyperscan/src/scratch.c8
-rw-r--r--contrib/libs/hyperscan/src/scratch.h36
-rw-r--r--contrib/libs/hyperscan/src/smallwrite/smallwrite_build.cpp14
-rw-r--r--contrib/libs/hyperscan/src/som/slot_manager_internal.h2
-rw-r--r--contrib/libs/hyperscan/src/stream_compress_impl.h16
-rw-r--r--contrib/libs/hyperscan/src/ue2common.h16
-rw-r--r--contrib/libs/hyperscan/src/util/arch.h4
-rw-r--r--contrib/libs/hyperscan/src/util/bitfield.h18
-rw-r--r--contrib/libs/hyperscan/src/util/copybytes.h64
-rw-r--r--contrib/libs/hyperscan/src/util/cpuid_flags.c36
-rw-r--r--contrib/libs/hyperscan/src/util/cpuid_inline.h98
-rw-r--r--contrib/libs/hyperscan/src/util/dump_util.h126
-rw-r--r--contrib/libs/hyperscan/src/util/graph.h2
-rw-r--r--contrib/libs/hyperscan/src/util/graph_small_color_map.h32
-rw-r--r--contrib/libs/hyperscan/src/util/graph_undirected.h1002
-rw-r--r--contrib/libs/hyperscan/src/util/logical.h154
-rw-r--r--contrib/libs/hyperscan/src/util/multibit.h26
-rw-r--r--contrib/libs/hyperscan/src/util/multibit_build.cpp4
-rw-r--r--contrib/libs/hyperscan/src/util/multibit_build.h2
-rw-r--r--contrib/libs/hyperscan/src/util/report.h30
-rw-r--r--contrib/libs/hyperscan/src/util/report_manager.cpp78
-rw-r--r--contrib/libs/hyperscan/src/util/report_manager.h54
-rw-r--r--contrib/libs/hyperscan/src/util/simd_utils.h192
-rw-r--r--contrib/libs/hyperscan/src/util/target_info.cpp18
-rw-r--r--contrib/libs/hyperscan/src/util/target_info.h6
-rw-r--r--contrib/libs/hyperscan/src/util/ue2_graph.h286
-rw-r--r--contrib/libs/hyperscan/src/util/ue2string.cpp2
-rw-r--r--contrib/libs/hyperscan/src/util/ue2string.h2
-rw-r--r--contrib/libs/hyperscan/src/util/uniform_ops.h26
-rw-r--r--contrib/libs/hyperscan/ya.make208
166 files changed, 22274 insertions, 22274 deletions
diff --git a/contrib/libs/hyperscan/.yandex_meta/devtools.copyrights.report b/contrib/libs/hyperscan/.yandex_meta/devtools.copyrights.report
index c74f520feb..dca295ec03 100644
--- a/contrib/libs/hyperscan/.yandex_meta/devtools.copyrights.report
+++ b/contrib/libs/hyperscan/.yandex_meta/devtools.copyrights.report
@@ -29,21 +29,21 @@
# FILE_INCLUDE - include all file data into licenses text file
# =======================
-KEEP COPYRIGHT_SERVICE_LABEL 05894dd5e4177359f7667049b9377980
-BELONGS ya.make
- License text:
- * Copyright (c) 2018, Intel Corporation
- Scancode info:
- Original SPDX id: COPYRIGHT_SERVICE_LABEL
- Score : 100.00
- Match type : COPYRIGHT
- Files with this license:
- src/parser/logical_combination.h [2:2]
- src/util/graph_undirected.h [2:2]
- src/util/logical.h [2:2]
-
+KEEP COPYRIGHT_SERVICE_LABEL 05894dd5e4177359f7667049b9377980
+BELONGS ya.make
+ License text:
+ * Copyright (c) 2018, Intel Corporation
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ src/parser/logical_combination.h [2:2]
+ src/util/graph_undirected.h [2:2]
+ src/util/logical.h [2:2]
+
KEEP COPYRIGHT_SERVICE_LABEL 1942d1946b0cb94f5ed6cff68e676e12
-BELONGS ya.make
+BELONGS ya.make
License text:
* Copyright (c) 2004-2006 Intel Corporation - All Rights Reserved
Scancode info:
@@ -54,7 +54,7 @@ BELONGS ya.make
src/crc32.c [43:43]
KEEP COPYRIGHT_SERVICE_LABEL 2594e47a0a16675bf535be6215e19672
-BELONGS ya.make
+BELONGS ya.make
License text:
* Copyright (c) 2016, Intel Corporation
Scancode info:
@@ -75,18 +75,18 @@ BELONGS ya.make
src/util/fatbit_build.cpp [2:2]
src/util/fatbit_build.h [2:2]
-KEEP COPYRIGHT_SERVICE_LABEL 2b68f540d88813a56538df81b21ad0bd
-BELONGS ya.make
- License text:
- * Copyright (c) 2016-2018, Intel Corporation
- Scancode info:
- Original SPDX id: COPYRIGHT_SERVICE_LABEL
- Score : 100.00
- Match type : COPYRIGHT
- Files with this license:
- src/nfagraph/ng_violet.cpp [2:2]
- src/util/ue2_graph.h [2:2]
-
+KEEP COPYRIGHT_SERVICE_LABEL 2b68f540d88813a56538df81b21ad0bd
+BELONGS ya.make
+ License text:
+ * Copyright (c) 2016-2018, Intel Corporation
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ src/nfagraph/ng_violet.cpp [2:2]
+ src/util/ue2_graph.h [2:2]
+
KEEP COPYRIGHT_SERVICE_LABEL 46f8b1e28887be66473a94b9fff421ef
BELONGS ya.make
License text:
@@ -100,7 +100,7 @@ BELONGS ya.make
LICENSE [37:38]
KEEP COPYRIGHT_SERVICE_LABEL 59904f3d4998fdc080793ad19c4c54cb
-BELONGS ya.make
+BELONGS ya.make
License text:
* Copyright (c) 2017, Intel Corporation
Scancode info:
@@ -111,7 +111,7 @@ BELONGS ya.make
src/hwlm/noodle_engine_avx512.c [2:2]
src/nfa/rdfa.cpp [2:2]
src/parser/control_verbs.h [2:2]
- src/parser/control_verbs.rl6 [2:2]
+ src/parser/control_verbs.rl6 [2:2]
src/rose/rose_build_dedupe.cpp [2:2]
src/rose/rose_build_engine_blob.cpp [2:2]
src/rose/rose_build_exclusive.h [2:2]
@@ -132,170 +132,170 @@ BELONGS ya.make
src/util/small_vector.h [2:2]
src/util/unordered.h [2:2]
-KEEP COPYRIGHT_SERVICE_LABEL 6391196b1fd203d7ad6956b8ef6b669b
-BELONGS ya.make
- License text:
- * Copyright (c) 2017-2020, Intel Corporation
- Scancode info:
- Original SPDX id: COPYRIGHT_SERVICE_LABEL
- Score : 100.00
- Match type : COPYRIGHT
- Files with this license:
- src/rose/rose_build_instructions.cpp [2:2]
- src/rose/rose_build_instructions.h [2:2]
- src/util/arch.h [2:2]
- src/util/cpuid_inline.h [2:2]
-
-KEEP COPYRIGHT_SERVICE_LABEL 6443950cec567e72e1bbf863aa7faf00
-BELONGS ya.make
- License text:
- * Copyright (c) 2015-2020, Intel Corporation
- Scancode info:
- Original SPDX id: COPYRIGHT_SERVICE_LABEL
- Score : 100.00
- Match type : COPYRIGHT
- Files with this license:
- src/compiler/compiler.cpp [2:2]
- src/database.c [2:2]
- src/database.h [2:2]
- src/fdr/teddy.c [2:2]
- src/fdr/teddy_compile.cpp [2:2]
- src/hs.cpp [2:2]
- src/hs.h [2:2]
- src/hs_compile.h [2:2]
- src/nfa/limex_compile.cpp [2:2]
- src/nfa/limex_compile.h [2:2]
- src/nfa/limex_exceptional.h [2:2]
- src/nfa/limex_internal.h [2:2]
- src/nfa/mcclellancompile.cpp [2:2]
- src/nfa/nfa_api_dispatch.c [2:2]
- src/nfa/nfa_build_util.cpp [2:2]
- src/nfa/nfa_build_util.h [2:2]
- src/nfa/nfa_internal.h [2:2]
- src/nfa/vermicelli.h [2:2]
- src/nfa/vermicelli_sse.h [2:2]
- src/nfagraph/ng_limex.cpp [2:2]
- src/nfagraph/ng_limex.h [2:2]
- src/rose/program_runtime.c [2:2]
- src/rose/rose_build_bytecode.cpp [2:2]
- src/rose/rose_build_lookaround.cpp [2:2]
- src/rose/rose_program.h [2:2]
- src/smallwrite/smallwrite_build.cpp [2:2]
- src/util/cpuid_flags.c [2:2]
- src/util/simd_utils.h [2:2]
- src/util/target_info.cpp [2:2]
- src/util/target_info.h [2:2]
- src/util/uniform_ops.h [2:2]
-
-KEEP COPYRIGHT_SERVICE_LABEL 64781db6332ddf13b4473282805e75ad
-BELONGS ya.make
- License text:
- * Copyright (c) 2016-2019, Intel Corporation
- Scancode info:
- Original SPDX id: COPYRIGHT_SERVICE_LABEL
- Score : 100.00
- Match type : COPYRIGHT
- Files with this license:
- src/report.h [2:2]
- src/rose/rose_build_matchers.cpp [2:2]
- src/rose/rose_build_program.h [2:2]
-
-KEEP COPYRIGHT_SERVICE_LABEL 653f52025a0e666c760b8ff771c18d09
-BELONGS runtime_avx2/ya.make runtime_avx512/ya.make runtime_core2/ya.make runtime_corei7/ya.make ya.make
- License text:
- * Copyright (c) 2015-2019, Intel Corporation
- Scancode info:
- Original SPDX id: COPYRIGHT_SERVICE_LABEL
- Score : 100.00
- Match type : COPYRIGHT
- Files with this license:
- runtime_avx2/hs_common.h [2:2]
- runtime_avx512/hs_common.h [2:2]
- runtime_core2/hs_common.h [2:2]
- runtime_corei7/hs_common.h [2:2]
- src/compiler/compiler.h [2:2]
- src/fdr/fdr_compile.cpp [2:2]
- src/fdr/fdr_confirm.h [2:2]
- src/fdr/fdr_confirm_compile.cpp [2:2]
- src/fdr/fdr_confirm_runtime.h [2:2]
- src/hs_common.h [2:2]
- src/hwlm/hwlm_literal.cpp [2:2]
- src/hwlm/hwlm_literal.h [2:2]
- src/parser/shortcut_literal.cpp [2:2]
- src/rose/block.c [2:2]
- src/rose/match.c [2:2]
- src/rose/program_runtime.h [2:2]
- src/rose/rose.h [2:2]
- src/rose/rose_build_impl.h [2:2]
- src/rose/rose_internal.h [2:2]
- src/runtime.c [2:2]
- src/scratch.c [2:2]
- src/scratch.h [2:2]
- src/util/ue2string.cpp [2:2]
- src/util/ue2string.h [2:2]
-
-KEEP COPYRIGHT_SERVICE_LABEL 6dbc40c4bf9c0f89c1d7a1064c7cd4b3
-BELONGS runtime_avx2/ya.make runtime_avx512/ya.make runtime_core2/ya.make runtime_corei7/ya.make ya.make
- License text:
- * Copyright (c) 2015-2018, Intel Corporation
- Scancode info:
- Original SPDX id: COPYRIGHT_SERVICE_LABEL
- Score : 100.00
- Match type : COPYRIGHT
- Files with this license:
- runtime_avx2/hs_runtime.h [2:2]
- runtime_avx512/hs_runtime.h [2:2]
- runtime_core2/hs_runtime.h [2:2]
- runtime_corei7/hs_runtime.h [2:2]
- src/grey.cpp [2:2]
- src/grey.h [2:2]
- src/hs_runtime.h [2:2]
- src/nfa/accel_dfa_build_strat.h [2:2]
- src/nfa/goughcompile.cpp [2:2]
- src/nfa/mcclellan.c [2:2]
- src/nfa/mcclellan_common_impl.h [2:2]
- src/nfa/mcclellan_internal.h [2:2]
- src/nfa/mcclellancompile.h [2:2]
- src/nfagraph/ng.cpp [2:2]
- src/nfagraph/ng.h [2:2]
- src/nfagraph/ng_calc_components.cpp [2:2]
- src/nfagraph/ng_haig.cpp [2:2]
- src/nfagraph/ng_mcclellan.cpp [2:2]
- src/nfagraph/ng_repeat.cpp [2:2]
- src/nfagraph/ng_stop.cpp [2:2]
- src/nfagraph/ng_stop.h [2:2]
- src/rose/catchup.c [2:2]
- src/rose/catchup.h [2:2]
- src/rose/match.h [2:2]
- src/rose/rose_build_add.cpp [2:2]
- src/rose/rose_build_merge.cpp [2:2]
- src/rose/rose_build_misc.cpp [2:2]
- src/rose/rose_graph.h [2:2]
- src/rose/rose_in_graph.h [2:2]
- src/rose/runtime.h [2:2]
- src/rose/stream.c [2:2]
- src/ue2common.h [2:2]
- src/util/bitfield.h [2:2]
- src/util/multibit.h [2:2]
- src/util/report.h [2:2]
- src/util/report_manager.cpp [2:2]
- src/util/report_manager.h [2:2]
-
-KEEP COPYRIGHT_SERVICE_LABEL 6fb2319e3a2b91a95a6fe913104899d7
-BELONGS ya.make
- License text:
- * Copyright (c) 2017-2018, Intel Corporation
- Scancode info:
- Original SPDX id: COPYRIGHT_SERVICE_LABEL
- Score : 100.00
- Match type : COPYRIGHT
- Files with this license:
- src/compiler/expression_info.h [2:2]
- src/stream_compress_impl.h [2:2]
- src/util/graph_small_color_map.h [2:2]
-
+KEEP COPYRIGHT_SERVICE_LABEL 6391196b1fd203d7ad6956b8ef6b669b
+BELONGS ya.make
+ License text:
+ * Copyright (c) 2017-2020, Intel Corporation
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ src/rose/rose_build_instructions.cpp [2:2]
+ src/rose/rose_build_instructions.h [2:2]
+ src/util/arch.h [2:2]
+ src/util/cpuid_inline.h [2:2]
+
+KEEP COPYRIGHT_SERVICE_LABEL 6443950cec567e72e1bbf863aa7faf00
+BELONGS ya.make
+ License text:
+ * Copyright (c) 2015-2020, Intel Corporation
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ src/compiler/compiler.cpp [2:2]
+ src/database.c [2:2]
+ src/database.h [2:2]
+ src/fdr/teddy.c [2:2]
+ src/fdr/teddy_compile.cpp [2:2]
+ src/hs.cpp [2:2]
+ src/hs.h [2:2]
+ src/hs_compile.h [2:2]
+ src/nfa/limex_compile.cpp [2:2]
+ src/nfa/limex_compile.h [2:2]
+ src/nfa/limex_exceptional.h [2:2]
+ src/nfa/limex_internal.h [2:2]
+ src/nfa/mcclellancompile.cpp [2:2]
+ src/nfa/nfa_api_dispatch.c [2:2]
+ src/nfa/nfa_build_util.cpp [2:2]
+ src/nfa/nfa_build_util.h [2:2]
+ src/nfa/nfa_internal.h [2:2]
+ src/nfa/vermicelli.h [2:2]
+ src/nfa/vermicelli_sse.h [2:2]
+ src/nfagraph/ng_limex.cpp [2:2]
+ src/nfagraph/ng_limex.h [2:2]
+ src/rose/program_runtime.c [2:2]
+ src/rose/rose_build_bytecode.cpp [2:2]
+ src/rose/rose_build_lookaround.cpp [2:2]
+ src/rose/rose_program.h [2:2]
+ src/smallwrite/smallwrite_build.cpp [2:2]
+ src/util/cpuid_flags.c [2:2]
+ src/util/simd_utils.h [2:2]
+ src/util/target_info.cpp [2:2]
+ src/util/target_info.h [2:2]
+ src/util/uniform_ops.h [2:2]
+
+KEEP COPYRIGHT_SERVICE_LABEL 64781db6332ddf13b4473282805e75ad
+BELONGS ya.make
+ License text:
+ * Copyright (c) 2016-2019, Intel Corporation
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ src/report.h [2:2]
+ src/rose/rose_build_matchers.cpp [2:2]
+ src/rose/rose_build_program.h [2:2]
+
+KEEP COPYRIGHT_SERVICE_LABEL 653f52025a0e666c760b8ff771c18d09
+BELONGS runtime_avx2/ya.make runtime_avx512/ya.make runtime_core2/ya.make runtime_corei7/ya.make ya.make
+ License text:
+ * Copyright (c) 2015-2019, Intel Corporation
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ runtime_avx2/hs_common.h [2:2]
+ runtime_avx512/hs_common.h [2:2]
+ runtime_core2/hs_common.h [2:2]
+ runtime_corei7/hs_common.h [2:2]
+ src/compiler/compiler.h [2:2]
+ src/fdr/fdr_compile.cpp [2:2]
+ src/fdr/fdr_confirm.h [2:2]
+ src/fdr/fdr_confirm_compile.cpp [2:2]
+ src/fdr/fdr_confirm_runtime.h [2:2]
+ src/hs_common.h [2:2]
+ src/hwlm/hwlm_literal.cpp [2:2]
+ src/hwlm/hwlm_literal.h [2:2]
+ src/parser/shortcut_literal.cpp [2:2]
+ src/rose/block.c [2:2]
+ src/rose/match.c [2:2]
+ src/rose/program_runtime.h [2:2]
+ src/rose/rose.h [2:2]
+ src/rose/rose_build_impl.h [2:2]
+ src/rose/rose_internal.h [2:2]
+ src/runtime.c [2:2]
+ src/scratch.c [2:2]
+ src/scratch.h [2:2]
+ src/util/ue2string.cpp [2:2]
+ src/util/ue2string.h [2:2]
+
+KEEP COPYRIGHT_SERVICE_LABEL 6dbc40c4bf9c0f89c1d7a1064c7cd4b3
+BELONGS runtime_avx2/ya.make runtime_avx512/ya.make runtime_core2/ya.make runtime_corei7/ya.make ya.make
+ License text:
+ * Copyright (c) 2015-2018, Intel Corporation
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ runtime_avx2/hs_runtime.h [2:2]
+ runtime_avx512/hs_runtime.h [2:2]
+ runtime_core2/hs_runtime.h [2:2]
+ runtime_corei7/hs_runtime.h [2:2]
+ src/grey.cpp [2:2]
+ src/grey.h [2:2]
+ src/hs_runtime.h [2:2]
+ src/nfa/accel_dfa_build_strat.h [2:2]
+ src/nfa/goughcompile.cpp [2:2]
+ src/nfa/mcclellan.c [2:2]
+ src/nfa/mcclellan_common_impl.h [2:2]
+ src/nfa/mcclellan_internal.h [2:2]
+ src/nfa/mcclellancompile.h [2:2]
+ src/nfagraph/ng.cpp [2:2]
+ src/nfagraph/ng.h [2:2]
+ src/nfagraph/ng_calc_components.cpp [2:2]
+ src/nfagraph/ng_haig.cpp [2:2]
+ src/nfagraph/ng_mcclellan.cpp [2:2]
+ src/nfagraph/ng_repeat.cpp [2:2]
+ src/nfagraph/ng_stop.cpp [2:2]
+ src/nfagraph/ng_stop.h [2:2]
+ src/rose/catchup.c [2:2]
+ src/rose/catchup.h [2:2]
+ src/rose/match.h [2:2]
+ src/rose/rose_build_add.cpp [2:2]
+ src/rose/rose_build_merge.cpp [2:2]
+ src/rose/rose_build_misc.cpp [2:2]
+ src/rose/rose_graph.h [2:2]
+ src/rose/rose_in_graph.h [2:2]
+ src/rose/runtime.h [2:2]
+ src/rose/stream.c [2:2]
+ src/ue2common.h [2:2]
+ src/util/bitfield.h [2:2]
+ src/util/multibit.h [2:2]
+ src/util/report.h [2:2]
+ src/util/report_manager.cpp [2:2]
+ src/util/report_manager.h [2:2]
+
+KEEP COPYRIGHT_SERVICE_LABEL 6fb2319e3a2b91a95a6fe913104899d7
+BELONGS ya.make
+ License text:
+ * Copyright (c) 2017-2018, Intel Corporation
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ src/compiler/expression_info.h [2:2]
+ src/stream_compress_impl.h [2:2]
+ src/util/graph_small_color_map.h [2:2]
+
KEEP COPYRIGHT_SERVICE_LABEL ba9cb80d3dee6e526379bccc8de74870
-BELONGS ya.make
+BELONGS ya.make
License text:
* Copyright (c) 2016-2017, Intel Corporation
Scancode info:
@@ -317,41 +317,41 @@ BELONGS ya.make
src/rose/rose_build_matchers.h [2:2]
src/util/accel_scheme.h [2:2]
src/util/clique.cpp [2:2]
- src/util/dump_util.h [2:2]
+ src/util/dump_util.h [2:2]
src/util/hash.h [2:2]
src/util/simd_utils.c [2:2]
-KEEP COPYRIGHT_SERVICE_LABEL badc23068da09e9ee5d2d616e9a1161c
-BELONGS ya.make
- License text:
- * Copyright (c) 2016-2020, Intel Corporation
- Scancode info:
- Original SPDX id: COPYRIGHT_SERVICE_LABEL
- Score : 100.00
- Match type : COPYRIGHT
- Files with this license:
- src/fdr/teddy_avx2.c [2:2]
- src/fdr/teddy_runtime_common.h [2:2]
- src/nfa/mcsheng.c [2:2]
- src/nfa/mcsheng.h [2:2]
- src/nfa/mcsheng_compile.cpp [2:2]
- src/nfa/mcsheng_data.c [2:2]
- src/nfa/mcsheng_internal.h [2:2]
- src/nfa/sheng.c [2:2]
- src/nfa/sheng.h [2:2]
- src/nfa/sheng_defs.h [2:2]
- src/nfa/sheng_impl.h [2:2]
- src/nfa/sheng_impl4.h [2:2]
- src/nfa/sheng_internal.h [2:2]
- src/nfa/shengcompile.cpp [2:2]
- src/nfa/shengcompile.h [2:2]
- src/rose/rose_build_program.cpp [2:2]
- src/rose/validate_mask.h [2:2]
- src/rose/validate_shufti.h [2:2]
- src/util/copybytes.h [2:2]
-
+KEEP COPYRIGHT_SERVICE_LABEL badc23068da09e9ee5d2d616e9a1161c
+BELONGS ya.make
+ License text:
+ * Copyright (c) 2016-2020, Intel Corporation
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ src/fdr/teddy_avx2.c [2:2]
+ src/fdr/teddy_runtime_common.h [2:2]
+ src/nfa/mcsheng.c [2:2]
+ src/nfa/mcsheng.h [2:2]
+ src/nfa/mcsheng_compile.cpp [2:2]
+ src/nfa/mcsheng_data.c [2:2]
+ src/nfa/mcsheng_internal.h [2:2]
+ src/nfa/sheng.c [2:2]
+ src/nfa/sheng.h [2:2]
+ src/nfa/sheng_defs.h [2:2]
+ src/nfa/sheng_impl.h [2:2]
+ src/nfa/sheng_impl4.h [2:2]
+ src/nfa/sheng_internal.h [2:2]
+ src/nfa/shengcompile.cpp [2:2]
+ src/nfa/shengcompile.h [2:2]
+ src/rose/rose_build_program.cpp [2:2]
+ src/rose/validate_mask.h [2:2]
+ src/rose/validate_shufti.h [2:2]
+ src/util/copybytes.h [2:2]
+
KEEP COPYRIGHT_SERVICE_LABEL ce190d0dc63fbede469d35d6d09b1aab
-BELONGS ya.make
+BELONGS ya.make
License text:
* Copyright (c) 2015-2016, Intel Corporation
Scancode info:
@@ -384,7 +384,7 @@ BELONGS ya.make
src/nfa/limex_simd512.c [2:2]
src/nfa/limex_state_impl.h [2:2]
src/nfa/mcclellan.h [2:2]
- src/nfa/mcclellandump.h [2:2]
+ src/nfa/mcclellandump.h [2:2]
src/nfa/mpv.c [2:2]
src/nfa/mpv.h [2:2]
src/nfa/mpv_internal.h [2:2]
@@ -418,19 +418,19 @@ BELONGS ya.make
src/util/multibit.c [2:2]
src/util/multibit_internal.h [2:2]
-KEEP COPYRIGHT_SERVICE_LABEL e6a7da132551d7d672f392b8cbebda6f
-BELONGS ya.make
- License text:
- * Copyright (c) 2018-2020, Intel Corporation
- Scancode info:
- Original SPDX id: COPYRIGHT_SERVICE_LABEL
- Score : 100.00
- Match type : COPYRIGHT
- Files with this license:
- src/parser/logical_combination.cpp [2:2]
-
+KEEP COPYRIGHT_SERVICE_LABEL e6a7da132551d7d672f392b8cbebda6f
+BELONGS ya.make
+ License text:
+ * Copyright (c) 2018-2020, Intel Corporation
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ src/parser/logical_combination.cpp [2:2]
+
KEEP COPYRIGHT_SERVICE_LABEL ec0537a229e1880f54c1d10e8cd723c3
-BELONGS ya.make
+BELONGS ya.make
License text:
Copyright (c) 2015, Intel Corporation
Scancode info:
@@ -440,7 +440,7 @@ BELONGS ya.make
Files with this license:
COPYING [1:1]
LICENSE [3:3]
- hs_version.h [2:2]
+ hs_version.h [2:2]
src/allocator.h [2:2]
src/compiler/error.h [2:2]
src/crc32.h [2:2]
@@ -562,19 +562,19 @@ BELONGS ya.make
src/util/unicode_def.h [2:2]
src/util/unicode_set.h [2:2]
-KEEP COPYRIGHT_SERVICE_LABEL f115acc88879ae751ea4c79bbdd7668d
-BELONGS ya.make
- License text:
- * Copyright (c) 2019, Intel Corporation
- Scancode info:
- Original SPDX id: COPYRIGHT_SERVICE_LABEL
- Score : 100.00
- Match type : COPYRIGHT
- Files with this license:
- src/hs_internal.h [2:2]
-
+KEEP COPYRIGHT_SERVICE_LABEL f115acc88879ae751ea4c79bbdd7668d
+BELONGS ya.make
+ License text:
+ * Copyright (c) 2019, Intel Corporation
+ Scancode info:
+ Original SPDX id: COPYRIGHT_SERVICE_LABEL
+ Score : 100.00
+ Match type : COPYRIGHT
+ Files with this license:
+ src/hs_internal.h [2:2]
+
KEEP COPYRIGHT_SERVICE_LABEL f5216e1f34680f615cf0259d3a834876
-BELONGS ya.make
+BELONGS ya.make
License text:
* Copyright (c) 2015-2017, Intel Corporation
Scancode info:
@@ -708,7 +708,7 @@ BELONGS ya.make
src/nfagraph/ng_vacuous.h [2:2]
src/nfagraph/ng_width.cpp [2:2]
src/parser/Parser.h [2:2]
- src/parser/Parser.rl6 [2:2]
+ src/parser/Parser.rl6 [2:2]
src/parser/buildstate.cpp [2:2]
src/parser/buildstate.h [2:2]
src/parser/check_refs.cpp [2:2]
@@ -764,7 +764,7 @@ BELONGS ya.make
src/util/verify_types.h [2:2]
KEEP COPYRIGHT_SERVICE_LABEL fd6578dd286e9257f73d8cc59e377eb7
-BELONGS ya.make
+BELONGS ya.make
License text:
// Copyright (C) 2005-2009 Jongsoo Park <jongsoo.park -at- gmail.com>
Scancode info:
diff --git a/contrib/libs/hyperscan/.yandex_meta/devtools.licenses.report b/contrib/libs/hyperscan/.yandex_meta/devtools.licenses.report
index 9d775af0c8..be13596dd2 100644
--- a/contrib/libs/hyperscan/.yandex_meta/devtools.licenses.report
+++ b/contrib/libs/hyperscan/.yandex_meta/devtools.licenses.report
@@ -30,7 +30,7 @@
# =======================
KEEP BSD-3-Clause 0a4ed1ba60401e7adddd4627f0742271
-BELONGS ya.make
+BELONGS ya.make
License text:
*** What follows is derived from Intel's Slicing-by-8 CRC32 impl, which is BSD
*** licensed and available from http://sourceforge.net/projects/slicing-by-8/
@@ -54,7 +54,7 @@ BELONGS ya.make
LICENSE [92:116]
KEEP BSD-3-Clause 1cf0b95cc97a0b670f31f920d6a0cc89
-BELONGS ya.make
+BELONGS ya.make
License text:
* This software program is licensed subject to the BSD License,
* available at http://www.opensource.org/licenses/bsd-license.html.
@@ -78,7 +78,7 @@ BELONGS ya.make
LICENSE [64:86]
KEEP BSL-1.0 2cc71fe4bd12718a9884bf7ff37269f3
-BELONGS ya.make
+BELONGS ya.make
License text:
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
@@ -103,7 +103,7 @@ BELONGS ya.make
LICENSE [5:26]
KEEP BSD-3-Clause 4bd413230c8a7f23e3bb7671e2416c1c
-BELONGS ya.make
+BELONGS ya.make
License text:
// BSD-licensed code, with additions to handled aligned case automatically.
Scancode info:
@@ -141,7 +141,7 @@ BELONGS ya.make
LICENSE [1:1]
KEEP BSD-3-Clause 6e500f366f90c01e0d7da6e88327038a
-BELONGS runtime_avx2/ya.make runtime_avx512/ya.make runtime_core2/ya.make runtime_corei7/ya.make ya.make
+BELONGS runtime_avx2/ya.make runtime_avx512/ya.make runtime_core2/ya.make runtime_corei7/ya.make ya.make
Note: matched license text is too long. Read it in the source files.
Scancode info:
Original SPDX id: BSD-3-Clause
@@ -149,15 +149,15 @@ BELONGS runtime_avx2/ya.make runtime_avx512/ya.make runtime_core2/ya.make runtim
Match type : TEXT
Links : http://www.opensource.org/licenses/BSD-3-Clause, https://spdx.org/licenses/BSD-3-Clause
Files with this license:
- hs_version.h [4:26]
- runtime_avx2/hs_common.h [4:26]
- runtime_avx2/hs_runtime.h [4:26]
- runtime_avx512/hs_common.h [4:26]
- runtime_avx512/hs_runtime.h [4:26]
- runtime_core2/hs_common.h [4:26]
- runtime_core2/hs_runtime.h [4:26]
- runtime_corei7/hs_common.h [4:26]
- runtime_corei7/hs_runtime.h [4:26]
+ hs_version.h [4:26]
+ runtime_avx2/hs_common.h [4:26]
+ runtime_avx2/hs_runtime.h [4:26]
+ runtime_avx512/hs_common.h [4:26]
+ runtime_avx512/hs_runtime.h [4:26]
+ runtime_core2/hs_common.h [4:26]
+ runtime_core2/hs_runtime.h [4:26]
+ runtime_corei7/hs_common.h [4:26]
+ runtime_corei7/hs_runtime.h [4:26]
src/alloc.c [4:26]
src/allocator.h [4:26]
src/compiler/asserts.cpp [4:26]
@@ -280,7 +280,7 @@ BELONGS runtime_avx2/ya.make runtime_avx512/ya.make runtime_core2/ya.make runtim
src/nfa/mcclellancompile.h [4:26]
src/nfa/mcclellancompile_util.cpp [4:26]
src/nfa/mcclellancompile_util.h [4:26]
- src/nfa/mcclellandump.h [4:26]
+ src/nfa/mcclellandump.h [4:26]
src/nfa/mcsheng.c [4:26]
src/nfa/mcsheng.h [4:26]
src/nfa/mcsheng_compile.cpp [4:26]
@@ -477,7 +477,7 @@ BELONGS runtime_avx2/ya.make runtime_avx512/ya.make runtime_core2/ya.make runtim
src/parser/ConstComponentVisitor.cpp [4:26]
src/parser/ConstComponentVisitor.h [4:26]
src/parser/Parser.h [4:26]
- src/parser/Parser.rl6 [4:26]
+ src/parser/Parser.rl6 [4:26]
src/parser/Utf8ComponentClass.cpp [4:26]
src/parser/Utf8ComponentClass.h [4:26]
src/parser/buildstate.cpp [4:26]
@@ -485,10 +485,10 @@ BELONGS runtime_avx2/ya.make runtime_avx512/ya.make runtime_core2/ya.make runtim
src/parser/check_refs.cpp [4:26]
src/parser/check_refs.h [4:26]
src/parser/control_verbs.h [4:26]
- src/parser/control_verbs.rl6 [4:26]
+ src/parser/control_verbs.rl6 [4:26]
src/parser/dump.h [4:26]
- src/parser/logical_combination.cpp [4:26]
- src/parser/logical_combination.h [4:26]
+ src/parser/logical_combination.cpp [4:26]
+ src/parser/logical_combination.h [4:26]
src/parser/parse_error.cpp [4:26]
src/parser/parse_error.h [4:26]
src/parser/parser_util.cpp [4:26]
@@ -631,7 +631,7 @@ BELONGS runtime_avx2/ya.make runtime_avx512/ya.make runtime_core2/ya.make runtim
src/util/dump_charclass.h [4:26]
src/util/dump_mask.cpp [4:26]
src/util/dump_mask.h [4:26]
- src/util/dump_util.h [4:26]
+ src/util/dump_util.h [4:26]
src/util/exhaust.h [4:26]
src/util/fatbit.h [4:26]
src/util/fatbit_build.cpp [4:26]
@@ -640,13 +640,13 @@ BELONGS runtime_avx2/ya.make runtime_avx512/ya.make runtime_core2/ya.make runtim
src/util/graph.h [4:26]
src/util/graph_range.h [4:26]
src/util/graph_small_color_map.h [4:26]
- src/util/graph_undirected.h [4:26]
+ src/util/graph_undirected.h [4:26]
src/util/hash.h [4:26]
src/util/hash_dynamic_bitset.h [4:26]
src/util/insertion_ordered.h [4:26]
src/util/intrinsics.h [4:26]
src/util/join.h [4:26]
- src/util/logical.h [4:26]
+ src/util/logical.h [4:26]
src/util/make_unique.h [4:26]
src/util/masked_move.c [4:26]
src/util/masked_move.h [4:26]
diff --git a/contrib/libs/hyperscan/.yandex_meta/licenses.list.txt b/contrib/libs/hyperscan/.yandex_meta/licenses.list.txt
index a8851e4b11..323496da79 100644
--- a/contrib/libs/hyperscan/.yandex_meta/licenses.list.txt
+++ b/contrib/libs/hyperscan/.yandex_meta/licenses.list.txt
@@ -1,255 +1,255 @@
-====================BSD-2-Clause====================
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-====================BSD-3-Clause====================
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
-
-
-====================BSD-3-Clause====================
- * This software program is licensed subject to the BSD License,
- * available at http://www.opensource.org/licenses/bsd-license.html.
-
-
-====================BSD-3-Clause====================
- *** What follows is derived from Intel's Slicing-by-8 CRC32 impl, which is BSD
- *** licensed and available from http://sourceforge.net/projects/slicing-by-8/
-
-
-====================BSD-3-Clause====================
-// BSD-licensed code, with additions to handled aligned case automatically.
-
-
-====================BSD-3-Clause====================
-Hyperscan is licensed under the BSD License.
-
-
-====================BSD-3-Clause====================
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-====================BSD-3-Clause====================
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of Intel Corporation nor the names of its contributors
- may be used to endorse or promote products derived from this software
- without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-====================BSD-3-Clause====================
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of Intel Corporation nor the names of its contributors
- may be used to endorse or promote products derived from this software
- without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-====================BSL-1.0====================
-// Distributed under the Boost Software License, Version 1.0.
-// (See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-
-====================BSL-1.0====================
-Boost Software License - Version 1.0 - August 17th, 2003
-
-Permission is hereby granted, free of charge, to any person or organization
-obtaining a copy of the software and accompanying documentation covered by
-this license (the "Software") to use, reproduce, display, distribute,
-execute, and transmit the Software, and to prepare derivative works of the
-Software, and to permit third-parties to whom the Software is furnished to
-do so, all subject to the following:
-
-The copyright notices in the Software and this entire statement, including
-the above license grant, this restriction and the following disclaimer,
-must be included in all copies of the Software, in whole or in part, and
-all derivative works of the Software, unless such copies or derivative
-works are solely in the form of machine-executable object code generated by
-a source language processor.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
-SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
-FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
-ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-DEALINGS IN THE SOFTWARE.
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2004-2006 Intel Corporation - All Rights Reserved
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2015-2016, Intel Corporation
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2015-2017, Intel Corporation
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2015-2018, Intel Corporation
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2015-2019, Intel Corporation
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2015-2020, Intel Corporation
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2016, Intel Corporation
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2016-2017, Intel Corporation
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2016-2018, Intel Corporation
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2016-2019, Intel Corporation
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2016-2020, Intel Corporation
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2017, Intel Corporation
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2017-2018, Intel Corporation
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2017-2020, Intel Corporation
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2018, Intel Corporation
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2018-2020, Intel Corporation
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2019, Intel Corporation
-
-
-====================COPYRIGHT====================
-// Copyright (C) 2005-2009 Jongsoo Park <jongsoo.park -at- gmail.com>
-
-
-====================COPYRIGHT====================
-Copyright (c) 2004-2006, Intel Corporation
-All rights reserved.
-
-
-====================COPYRIGHT====================
-Copyright (c) 2015, Intel Corporation
+====================BSD-2-Clause====================
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+====================BSD-3-Clause====================
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+
+
+====================BSD-3-Clause====================
+ * This software program is licensed subject to the BSD License,
+ * available at http://www.opensource.org/licenses/bsd-license.html.
+
+
+====================BSD-3-Clause====================
+ *** What follows is derived from Intel's Slicing-by-8 CRC32 impl, which is BSD
+ *** licensed and available from http://sourceforge.net/projects/slicing-by-8/
+
+
+====================BSD-3-Clause====================
+// BSD-licensed code, with additions to handled aligned case automatically.
+
+
+====================BSD-3-Clause====================
+Hyperscan is licensed under the BSD License.
+
+
+====================BSD-3-Clause====================
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+====================BSD-3-Clause====================
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+====================BSD-3-Clause====================
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+====================BSL-1.0====================
+// Distributed under the Boost Software License, Version 1.0.
+// (See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+
+====================BSL-1.0====================
+Boost Software License - Version 1.0 - August 17th, 2003
+
+Permission is hereby granted, free of charge, to any person or organization
+obtaining a copy of the software and accompanying documentation covered by
+this license (the "Software") to use, reproduce, display, distribute,
+execute, and transmit the Software, and to prepare derivative works of the
+Software, and to permit third-parties to whom the Software is furnished to
+do so, all subject to the following:
+
+The copyright notices in the Software and this entire statement, including
+the above license grant, this restriction and the following disclaimer,
+must be included in all copies of the Software, in whole or in part, and
+all derivative works of the Software, unless such copies or derivative
+works are solely in the form of machine-executable object code generated by
+a source language processor.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2004-2006 Intel Corporation - All Rights Reserved
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2015-2016, Intel Corporation
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2015-2017, Intel Corporation
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2015-2018, Intel Corporation
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2015-2019, Intel Corporation
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2015-2020, Intel Corporation
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2016, Intel Corporation
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2016-2017, Intel Corporation
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2016-2018, Intel Corporation
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2016-2019, Intel Corporation
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2016-2020, Intel Corporation
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2017, Intel Corporation
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2017-2018, Intel Corporation
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2017-2020, Intel Corporation
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2018, Intel Corporation
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2018-2020, Intel Corporation
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2019, Intel Corporation
+
+
+====================COPYRIGHT====================
+// Copyright (C) 2005-2009 Jongsoo Park <jongsoo.park -at- gmail.com>
+
+
+====================COPYRIGHT====================
+Copyright (c) 2004-2006, Intel Corporation
+All rights reserved.
+
+
+====================COPYRIGHT====================
+Copyright (c) 2015, Intel Corporation
diff --git a/contrib/libs/hyperscan/CHANGELOG.md b/contrib/libs/hyperscan/CHANGELOG.md
index 3da2c27ee1..8de3a8d6c9 100644
--- a/contrib/libs/hyperscan/CHANGELOG.md
+++ b/contrib/libs/hyperscan/CHANGELOG.md
@@ -2,306 +2,306 @@
This is a list of notable changes to Hyperscan, in reverse chronological order.
-## [5.4.0] 2020-12-31
-- Improvement on literal matcher "Fat Teddy" performance, including
- support for Intel(R) AVX-512 Vector Byte Manipulation Instructions (Intel(R)
- AVX-512 VBMI).
-- Introduce a new 32-state shuffle-based DFA engine ("Sheng32"). This improves
- scanning performance by leveraging AVX-512 VBMI.
-- Introduce a new 64-state shuffle-based DFA engine ("Sheng64"). This improves
- scanning performance by leveraging AVX-512 VBMI.
-- Introduce a new shuffle-based hybrid DFA engine ("McSheng64"). This improves
- scanning performance by leveraging AVX-512 VBMI.
-- Improvement on exceptional state handling performance for LimEx NFA, including
- support for AVX-512 VBMI.
-- Improvement on lookaround performance with new models, including support for
- AVX-512.
-- Improvement on DFA state space efficiency.
-- Optimization on decision of NFA/DFA generation.
-- hsbench: add CSV dump support for hsbench.
-- Bugfix for cmake error on Icelake under release mode.
-- Bugfix in find_vertices_in_cycles() to avoid self-loop checking in SCC.
-- Bugfix for issue #270: fix return value handling in chimera.
-- Bugfix for issue #284: use correct free function in logical combination.
-- Add BUILD_EXAMPLES cmake option to enable example code compilation. (#260)
-- Some typo fixing. (#242, #259)
-
-## [5.3.0] 2020-05-15
-- Improvement on literal matcher "Teddy" performance, including support for
- Intel(R) AVX-512 Vector Byte Manipulation Instructions (Intel(R) AVX-512
- VBMI).
-- Improvement on single-byte/two-byte matching performance, including support
- for Intel(R) Advanced Vector Extensions 512 (Intel(R) AVX-512).
-- hsbench: add hyphen support for -T option.
-- tools/fuzz: add test scripts for synthetic pattern generation.
-- Bugfix for acceleration path analysis in LimEx NFA.
-- Bugfix for duplicate matches for Small-write engine.
-- Bugfix for UTF8 checking problem for hscollider.
-- Bugfix for issue #205: avoid crash of `hs_compile_lit_multi()` with clang and
- ASAN.
-- Bugfix for issue #211: fix error in `db_check_platform()` function.
-- Bugfix for issue #217: fix cmake parsing issue of CPU arch for non-English
- locale.
-- Bugfix for issue #228: avoid undefined behavior when calling `close()` after
- `fdopendir()` in `loadExpressions()`.
-- Bugfix for issue #239: fix hyperscan compile issue under gcc-10.
-- Add VLAN packets processing capability in pcap analysis script. (#214)
-- Avoid extra convert instruction for "Noodle". (#221)
-- Add Hyperscan version marcro in `hs.h`. (#222)
-
-## [5.2.1] 2019-10-13
-- Bugfix for issue #186: fix compile issue when `BUILD_SHARED_LIBS` is on in
- release mode.
-- Disable redundant move check for older compiler versions.
-
-## [5.2.0] 2019-07-12
-- Literal API: add new API `hs_compile_lit()` and `hs_compile_lit_multi()` to
- process pure literal rule sets. The 2 literal APIs treat each expression text
- in a literal sense without recognizing any regular grammers.
-- Logical combination: add support for purely negative combinations, which
- report match at EOD in case of no sub-expressions matched.
-- Windows porting: support shared library (DLL) on Windows with available tools
- hscheck, hsbench and hsdump.
-- Bugfix for issue #148: fix uninitialized use of `scatter_unit_uX` due to
- padding.
-- Bugfix for issue #155: fix numerical result out of range error.
-- Bugfix for issue #165: avoid corruption of pending combination report in
- streaming mode.
-- Bugfix for issue #174: fix scratch free issue when memory allocation fails.
-
-## [5.1.1] 2019-04-03
-- Add extra detection and handling when invalid rose programs are triggered.
-- Bugfix for issue #136: fix CMake parsing of CPU architecure for GCC-9.
-- Bugfix for issue #137: avoid file path impact on fat runtime build.
-- Bugfix for issue #141: fix rose literal programs for multi-pattern
- matching when no pattern ids are provided.
-- Bugfix for issue #144: fix library install path in pkg-config files.
-
-## [5.1.0] 2019-01-17
-- Improve DFA state compression by wide-state optimization to reduce bytecode
- size.
-- Create specific interpreter runtime handling to boost the performance of pure
- literal matching.
-- Optimize original presentation of interpreter (the "Rose" engine ) to
- increase overall performance.
-- Bugfix for logical combinations: fix error reporting combination's match in
- case of sub-expression has EOD match under streaming mode.
-- Bugfix for logical combinations: fix miss reporting combination's match under
- vacuous input.
-- Bugfix for issue #104: fix compile error with Boost 1.68.0.
-- Bugfix for issue #127: avoid pcre error for hscollider with installed PCRE
- package.
-- Update version of PCRE used by testing tools as a syntax and semantic
- reference to PCRE 8.41 or above.
-- Fix github repo address in doc.
-
-## [5.0.0] 2018-07-09
-- Introduce chimera hybrid engine of Hyperscan and PCRE, to fully support
- PCRE syntax as well as to take advantage of the high performance nature of
- Hyperscan.
-- New API feature: logical combinations (AND, OR and NOT) of patterns in a
- given pattern set.
-- Windows porting: hsbench, hscheck, hscollider and hsdump tools now available
- on Windows 8 or newer.
-- Improve undirected graph implementation to avoid graph copy and reduce
- compile time.
-- Bugfix for issue #86: enable hscollider for installed PCRE package.
-
-## [4.7.0] 2018-01-24
-- Introduced hscollider pattern testing tool, for validating Hyperscan match
- behaviour against PCRE.
-- Introduced hscheck pattern compilation tool.
-- Introduced hsdump development tool for producing information about Hyperscan
- pattern compilation.
-- New API feature: extended approximate matching support for Hamming distance.
-- Bugfix for issue #69: Force C++ linkage in Xcode.
-- Bugfix for issue #73: More documentation for `hs_close_stream()`.
-- Bugfix for issue #78: Fix for fat runtime initialisation when used as a
- shared library.
-
-## [4.6.0] 2017-09-22
-- New API feature: stream state compression. This allows the user to compress
- and restore state for streams to reduce memory usage.
-- Many improvements to literal matching performance, including more support
- for Intel(R) Advanced Vector Extensions 512 (Intel(R) AVX-512).
-- Compile time improvements, mainly reducing compiler memory allocation.
- Also results in reduced compile time for some pattern sets.
-- Bugfix for issue #62: fix error building Hyperscan using older versions of
- Boost.
-- Small updates to fix warnings identified by Coverity.
-
-## [4.5.2] 2017-07-26
-- Bugfix for issue #57: Treat characters between `\Q.\E` as codepoints in
- UTF8 mode.
-- Bugfix for issue #60: Use a portable flag for mktemp for fat runtime builds.
-- Bugfix for fat runtime builds on AVX-512 capable machines with Hyperscan's
- AVX-512 support disabled.
-
-## [4.5.1] 2017-06-16
-- Bugfix for issue #56: workaround for gcc-4.8 C++11 defect.
-- Bugfix for literal matching table generation, reversing a regression in
- performance for some literal matching cases.
-- Bugfixes for hsbench, related to multicore benchmarking, portability fixes
- for FreeBSD, and clarifying output results.
-- CMake: removed a duplicate else branch that causes very recent (v3.9) builds
- of CMake to fail.
-
-## [4.5.0] 2017-06-09
-- New API feature: approximate matching using the "edit distance" extended
- parameter. This allows the user to request all matches that are a given edit
- distance from an exact match for a pattern.
-- Initial support for Intel(R) Advanced Vector Extensions 512 (Intel(R)
- AVX-512), disabled by default. To enable it, pass `-DBUILD_AVX512=1` to
- `cmake`.
-- Major compile time improvements in many subsystems, reducing compile time
- significantly for many large pattern sets.
-- Internal reworking of literal matchers to operate on literals of at
- most eight characters, with subsequent confirmation done in the Rose
- interpreter. This reduces complexity and bytecode size and improves
- performance for many pattern sets.
-- Improve performance of the FDR literal matcher front end.
-- Improve bucket assignment and other heuristics governing the FDR literal
- matcher.
-- Improve optimisation passes that take advantage of extended parameter
- constraints (`min_offset`, etc).
-- Introduce further lookaround specialisations to improve scanning performance.
-- Optimise Rose interpreter construction to reduce the length of programs
- generated in some situations.
-- Remove the old "Rose" pattern decomposition analysis pass in favour of the
- new "Violet" pass introduced in Hyperscan 4.3.0.
-- In streaming mode, allow exhaustion (where the stream can no longer produce
- matchers) to be detected in more situations, improving scanning performance.
-- Improve parsing of control verbs (such as `(*UTF8)`) that can only occur at
- the beginning of the pattern. Combinations of supported verbs in any order
- are now permitted.
-- Update version of PCRE used by testing tools as a syntax and semantic
- reference to PCRE 8.40.
-- Tuning support for Intel(R) microarchitecture code names Skylake, Skylake
- Server, Goldmont.
-- CMake: when building a native build with a version of GCC that doesn't
- recognise the host compiler, tune for the microarch selected by
- `-march=native`.
-- CMake: don't fail if SQLite (which is only required to build the `hsbench`
- tool) is not present.
-- CMake: detect libc++ directly and use that to inform the Boost version
- requirement.
-- Bugfix for issue #51: make the fat runtime build wrapper less fragile.
-- Bugfix for issues #46, #52: use `sqlite3_errmsg()` to allow SQLite 3.6.x to
- be used. Thanks to @EaseTheWorld for the PR.
-
-## [4.4.1] 2017-02-28
-- Bugfixes to fix issues where stale data was being referenced in scratch
- memory. In particular this may have resulted in `hs_close_stream()`
- referencing data from other previously scanned streams. This may result in
- incorrect matches being been reported.
-
-## [4.4.0] 2017-01-20
-- Introduce the "fat runtime" build. This will build several variants of the
- Hyperscan scanning engine specialised for different processor feature sets,
- and use the appropriate one for the host at runtime. This uses the "ifunc"
- indirect function attribute provided by GCC and is currently available on
- Linux only, where it is the default for release builds.
-- New API function: add the `hs_valid_platform()` function. This function tests
- whether the host provides the SSSE3 instruction set required by Hyperscan.
-- Introduce a new standard benchmarking tool, "hsbench". This provides an easy
- way to measure Hyperscan's performance for a particular set of patterns and
- corpus of data to be scanned.
-- Introduce a 64-bit GPR LimEx NFA model, which uses 64-bit GPRs on 64-bit
- hosts and SSE registers on 32-bit hosts.
-- Introduce a new DFA model ("McSheng") which is a hybrid of the existing
- McClellan and Sheng models. This improves scanning performance for some
- cases.
-- Introduce lookaround specialisations to improve scanning performance.
-- Improve the handling of long literals by moving confirmation to the Rose
- interpreter and simplifying the hash table used to track them in streaming
- mode.
-- Improve compile time optimisation for removing redundant paths from
- expression graphs.
-- Build: improve support for building with MSVC toolchain.
-- Reduce the size of small write DFAs used for small scans in block mode.
-- Introduce a custom graph type (`ue2_graph`) used in place of the Boost Graph
- Library's `adjacency_list` type. Improves compile time performance and type
- safety.
-- Improve scanning performance of the McClellan DFA.
-- Bugfix for a very unusual SOM case where the incorrect start offset was
- reported for a match.
-- Bugfix for issue #37, removing execute permissions from some source files.
-- Bugfix for issue #41, handle Windows line endings in pattern files.
-
-## [4.3.2] 2016-11-15
-- Bugfix for issue #39. This small change is a workaround for an issue in
- Boost 1.62. The fix has been submitted to Boost for inclusion in a future
- release.
-
-## [4.3.1] 2016-08-29
-- Bugfix for issue #30. In recent versions of Clang, a write to a variable was
- being elided, resulting in corrupted stream state after calling
- `hs_reset_stream()`.
-
-## [4.3.0] 2016-08-24
-- Introduce a new analysis pass ("Violet") used for decomposition of patterns
- into literals and smaller engines.
-- Introduce a new container engine ("Tamarama") for infix and suffix engines
- that can be proven to run exclusively of one another. This reduces stream
- state for pattern sets with many such engines.
-- Introduce a new shuffle-based DFA engine ("Sheng"). This improves scanning
- performance for pattern sets where small engines are generated.
-- Improve the analysis used to extract extra mask information from short
- literals.
-- Reduced compile time spent in equivalence class analysis.
-- Build: frame pointers are now only omitted for 32-bit release builds.
-- Build: Workaround for C++ issues reported on FreeBSD/libc++ platforms.
- (github issue #27)
-- Simplify the LimEx NFA with a unified "variable shift" model, which reduces
- the number of different NFA code paths to one per model size.
-- Allow some anchored prefixes that may squash the literal to which they are
- attached to run eagerly. This improves scanning performance for some
- patterns.
-- Simplify and improve EOD ("end of data") matching, using the interpreter for
- all operations.
-- Elide unnecessary instructions in the Rose interpreter at compile time.
-- Reduce the number of inlined instantiations of the Rose interpreter in order
- to reduce instruction cache pressure.
-- Small improvements to literal matcher acceleration.
-- Parser: ignore `\E` metacharacters that are not preceded by `\Q`. This
- conforms to PCRE's behaviour, rather than returning a compile error.
-- Check for misaligned memory when allocating an error structure in Hyperscan's
- compile path and return an appropriate error if detected.
-
-## [4.2.0] 2016-05-31
-- Introduce an interpreter for many complex actions to replace the use of
- internal reports within the core of Hyperscan (the "Rose" engine). This
- improves scanning performance and reduces database size for many pattern
- sets.
-- Many enhancements to the acceleration framework used by NFA and DFA engines,
- including more flexible multibyte implementations and more AVX2 support. This
- improves scanning performance for many pattern sets.
-- Improved prefiltering support for complex patterns containing very large
- bounded repeats (`R{M,N}` with large `N`).
-- Improve scanning performance of pattern sets with a very large number of
- EOD-anchored patterns.
-- Improve scanning performance of large pattern sets that use the
- `HS_FLAG_SINGLEMATCH` flag.
-- Improve scanning performance of pattern sets that contain a single literal by
- improving the "Noodle" literal matcher.
-- Small reductions in total stream state for many pattern sets.
-- Improve runtime detection of AVX2 support.
-- Disable -Werror for release builds, in order to behave better for packagers
- and users with different compiler combinations than those that we test.
-- Improve support for building on Windows with MSVC 2015 (github issue #14).
- Support for Hyperscan on Windows is still experimental.
-- Small updates to fix warnings identified by Coverity.
-- Remove Python codegen for the "FDR" and "Teddy" literal matchers. These are
- now implemented directly in C code.
-- Remove the specialist "Sidecar" engine in favour of using our more general
- repeat engines.
-- New API function: add the `hs_expression_ext_info()` function. This is a
- variant of `hs_expression_info()` that can accept patterns with extended
- parameters.
-- New API error value: add the `HS_SCRATCH_IN_USE` error, which is returned
- when Hyperscan detects that a scratch region is already in use on entry to an
- API function.
-
+## [5.4.0] 2020-12-31
+- Improvement on literal matcher "Fat Teddy" performance, including
+ support for Intel(R) AVX-512 Vector Byte Manipulation Instructions (Intel(R)
+ AVX-512 VBMI).
+- Introduce a new 32-state shuffle-based DFA engine ("Sheng32"). This improves
+ scanning performance by leveraging AVX-512 VBMI.
+- Introduce a new 64-state shuffle-based DFA engine ("Sheng64"). This improves
+ scanning performance by leveraging AVX-512 VBMI.
+- Introduce a new shuffle-based hybrid DFA engine ("McSheng64"). This improves
+ scanning performance by leveraging AVX-512 VBMI.
+- Improvement on exceptional state handling performance for LimEx NFA, including
+ support for AVX-512 VBMI.
+- Improvement on lookaround performance with new models, including support for
+ AVX-512.
+- Improvement on DFA state space efficiency.
+- Optimization on decision of NFA/DFA generation.
+- hsbench: add CSV dump support for hsbench.
+- Bugfix for cmake error on Icelake under release mode.
+- Bugfix in find_vertices_in_cycles() to avoid self-loop checking in SCC.
+- Bugfix for issue #270: fix return value handling in chimera.
+- Bugfix for issue #284: use correct free function in logical combination.
+- Add BUILD_EXAMPLES cmake option to enable example code compilation. (#260)
+- Some typo fixing. (#242, #259)
+
+## [5.3.0] 2020-05-15
+- Improvement on literal matcher "Teddy" performance, including support for
+ Intel(R) AVX-512 Vector Byte Manipulation Instructions (Intel(R) AVX-512
+ VBMI).
+- Improvement on single-byte/two-byte matching performance, including support
+ for Intel(R) Advanced Vector Extensions 512 (Intel(R) AVX-512).
+- hsbench: add hyphen support for -T option.
+- tools/fuzz: add test scripts for synthetic pattern generation.
+- Bugfix for acceleration path analysis in LimEx NFA.
+- Bugfix for duplicate matches for Small-write engine.
+- Bugfix for UTF8 checking problem for hscollider.
+- Bugfix for issue #205: avoid crash of `hs_compile_lit_multi()` with clang and
+ ASAN.
+- Bugfix for issue #211: fix error in `db_check_platform()` function.
+- Bugfix for issue #217: fix cmake parsing issue of CPU arch for non-English
+ locale.
+- Bugfix for issue #228: avoid undefined behavior when calling `close()` after
+ `fdopendir()` in `loadExpressions()`.
+- Bugfix for issue #239: fix hyperscan compile issue under gcc-10.
+- Add VLAN packets processing capability in pcap analysis script. (#214)
+- Avoid extra convert instruction for "Noodle". (#221)
+- Add Hyperscan version marcro in `hs.h`. (#222)
+
+## [5.2.1] 2019-10-13
+- Bugfix for issue #186: fix compile issue when `BUILD_SHARED_LIBS` is on in
+ release mode.
+- Disable redundant move check for older compiler versions.
+
+## [5.2.0] 2019-07-12
+- Literal API: add new API `hs_compile_lit()` and `hs_compile_lit_multi()` to
+ process pure literal rule sets. The 2 literal APIs treat each expression text
+ in a literal sense without recognizing any regular grammers.
+- Logical combination: add support for purely negative combinations, which
+ report match at EOD in case of no sub-expressions matched.
+- Windows porting: support shared library (DLL) on Windows with available tools
+ hscheck, hsbench and hsdump.
+- Bugfix for issue #148: fix uninitialized use of `scatter_unit_uX` due to
+ padding.
+- Bugfix for issue #155: fix numerical result out of range error.
+- Bugfix for issue #165: avoid corruption of pending combination report in
+ streaming mode.
+- Bugfix for issue #174: fix scratch free issue when memory allocation fails.
+
+## [5.1.1] 2019-04-03
+- Add extra detection and handling when invalid rose programs are triggered.
+- Bugfix for issue #136: fix CMake parsing of CPU architecure for GCC-9.
+- Bugfix for issue #137: avoid file path impact on fat runtime build.
+- Bugfix for issue #141: fix rose literal programs for multi-pattern
+ matching when no pattern ids are provided.
+- Bugfix for issue #144: fix library install path in pkg-config files.
+
+## [5.1.0] 2019-01-17
+- Improve DFA state compression by wide-state optimization to reduce bytecode
+ size.
+- Create specific interpreter runtime handling to boost the performance of pure
+ literal matching.
+- Optimize original presentation of interpreter (the "Rose" engine ) to
+ increase overall performance.
+- Bugfix for logical combinations: fix error reporting combination's match in
+ case of sub-expression has EOD match under streaming mode.
+- Bugfix for logical combinations: fix miss reporting combination's match under
+ vacuous input.
+- Bugfix for issue #104: fix compile error with Boost 1.68.0.
+- Bugfix for issue #127: avoid pcre error for hscollider with installed PCRE
+ package.
+- Update version of PCRE used by testing tools as a syntax and semantic
+ reference to PCRE 8.41 or above.
+- Fix github repo address in doc.
+
+## [5.0.0] 2018-07-09
+- Introduce chimera hybrid engine of Hyperscan and PCRE, to fully support
+ PCRE syntax as well as to take advantage of the high performance nature of
+ Hyperscan.
+- New API feature: logical combinations (AND, OR and NOT) of patterns in a
+ given pattern set.
+- Windows porting: hsbench, hscheck, hscollider and hsdump tools now available
+ on Windows 8 or newer.
+- Improve undirected graph implementation to avoid graph copy and reduce
+ compile time.
+- Bugfix for issue #86: enable hscollider for installed PCRE package.
+
+## [4.7.0] 2018-01-24
+- Introduced hscollider pattern testing tool, for validating Hyperscan match
+ behaviour against PCRE.
+- Introduced hscheck pattern compilation tool.
+- Introduced hsdump development tool for producing information about Hyperscan
+ pattern compilation.
+- New API feature: extended approximate matching support for Hamming distance.
+- Bugfix for issue #69: Force C++ linkage in Xcode.
+- Bugfix for issue #73: More documentation for `hs_close_stream()`.
+- Bugfix for issue #78: Fix for fat runtime initialisation when used as a
+ shared library.
+
+## [4.6.0] 2017-09-22
+- New API feature: stream state compression. This allows the user to compress
+ and restore state for streams to reduce memory usage.
+- Many improvements to literal matching performance, including more support
+ for Intel(R) Advanced Vector Extensions 512 (Intel(R) AVX-512).
+- Compile time improvements, mainly reducing compiler memory allocation.
+ Also results in reduced compile time for some pattern sets.
+- Bugfix for issue #62: fix error building Hyperscan using older versions of
+ Boost.
+- Small updates to fix warnings identified by Coverity.
+
+## [4.5.2] 2017-07-26
+- Bugfix for issue #57: Treat characters between `\Q.\E` as codepoints in
+ UTF8 mode.
+- Bugfix for issue #60: Use a portable flag for mktemp for fat runtime builds.
+- Bugfix for fat runtime builds on AVX-512 capable machines with Hyperscan's
+ AVX-512 support disabled.
+
+## [4.5.1] 2017-06-16
+- Bugfix for issue #56: workaround for gcc-4.8 C++11 defect.
+- Bugfix for literal matching table generation, reversing a regression in
+ performance for some literal matching cases.
+- Bugfixes for hsbench, related to multicore benchmarking, portability fixes
+ for FreeBSD, and clarifying output results.
+- CMake: removed a duplicate else branch that causes very recent (v3.9) builds
+ of CMake to fail.
+
+## [4.5.0] 2017-06-09
+- New API feature: approximate matching using the "edit distance" extended
+ parameter. This allows the user to request all matches that are a given edit
+ distance from an exact match for a pattern.
+- Initial support for Intel(R) Advanced Vector Extensions 512 (Intel(R)
+ AVX-512), disabled by default. To enable it, pass `-DBUILD_AVX512=1` to
+ `cmake`.
+- Major compile time improvements in many subsystems, reducing compile time
+ significantly for many large pattern sets.
+- Internal reworking of literal matchers to operate on literals of at
+ most eight characters, with subsequent confirmation done in the Rose
+ interpreter. This reduces complexity and bytecode size and improves
+ performance for many pattern sets.
+- Improve performance of the FDR literal matcher front end.
+- Improve bucket assignment and other heuristics governing the FDR literal
+ matcher.
+- Improve optimisation passes that take advantage of extended parameter
+ constraints (`min_offset`, etc).
+- Introduce further lookaround specialisations to improve scanning performance.
+- Optimise Rose interpreter construction to reduce the length of programs
+ generated in some situations.
+- Remove the old "Rose" pattern decomposition analysis pass in favour of the
+ new "Violet" pass introduced in Hyperscan 4.3.0.
+- In streaming mode, allow exhaustion (where the stream can no longer produce
+ matchers) to be detected in more situations, improving scanning performance.
+- Improve parsing of control verbs (such as `(*UTF8)`) that can only occur at
+ the beginning of the pattern. Combinations of supported verbs in any order
+ are now permitted.
+- Update version of PCRE used by testing tools as a syntax and semantic
+ reference to PCRE 8.40.
+- Tuning support for Intel(R) microarchitecture code names Skylake, Skylake
+ Server, Goldmont.
+- CMake: when building a native build with a version of GCC that doesn't
+ recognise the host compiler, tune for the microarch selected by
+ `-march=native`.
+- CMake: don't fail if SQLite (which is only required to build the `hsbench`
+ tool) is not present.
+- CMake: detect libc++ directly and use that to inform the Boost version
+ requirement.
+- Bugfix for issue #51: make the fat runtime build wrapper less fragile.
+- Bugfix for issues #46, #52: use `sqlite3_errmsg()` to allow SQLite 3.6.x to
+ be used. Thanks to @EaseTheWorld for the PR.
+
+## [4.4.1] 2017-02-28
+- Bugfixes to fix issues where stale data was being referenced in scratch
+ memory. In particular this may have resulted in `hs_close_stream()`
+ referencing data from other previously scanned streams. This may result in
+ incorrect matches being been reported.
+
+## [4.4.0] 2017-01-20
+- Introduce the "fat runtime" build. This will build several variants of the
+ Hyperscan scanning engine specialised for different processor feature sets,
+ and use the appropriate one for the host at runtime. This uses the "ifunc"
+ indirect function attribute provided by GCC and is currently available on
+ Linux only, where it is the default for release builds.
+- New API function: add the `hs_valid_platform()` function. This function tests
+ whether the host provides the SSSE3 instruction set required by Hyperscan.
+- Introduce a new standard benchmarking tool, "hsbench". This provides an easy
+ way to measure Hyperscan's performance for a particular set of patterns and
+ corpus of data to be scanned.
+- Introduce a 64-bit GPR LimEx NFA model, which uses 64-bit GPRs on 64-bit
+ hosts and SSE registers on 32-bit hosts.
+- Introduce a new DFA model ("McSheng") which is a hybrid of the existing
+ McClellan and Sheng models. This improves scanning performance for some
+ cases.
+- Introduce lookaround specialisations to improve scanning performance.
+- Improve the handling of long literals by moving confirmation to the Rose
+ interpreter and simplifying the hash table used to track them in streaming
+ mode.
+- Improve compile time optimisation for removing redundant paths from
+ expression graphs.
+- Build: improve support for building with MSVC toolchain.
+- Reduce the size of small write DFAs used for small scans in block mode.
+- Introduce a custom graph type (`ue2_graph`) used in place of the Boost Graph
+ Library's `adjacency_list` type. Improves compile time performance and type
+ safety.
+- Improve scanning performance of the McClellan DFA.
+- Bugfix for a very unusual SOM case where the incorrect start offset was
+ reported for a match.
+- Bugfix for issue #37, removing execute permissions from some source files.
+- Bugfix for issue #41, handle Windows line endings in pattern files.
+
+## [4.3.2] 2016-11-15
+- Bugfix for issue #39. This small change is a workaround for an issue in
+ Boost 1.62. The fix has been submitted to Boost for inclusion in a future
+ release.
+
+## [4.3.1] 2016-08-29
+- Bugfix for issue #30. In recent versions of Clang, a write to a variable was
+ being elided, resulting in corrupted stream state after calling
+ `hs_reset_stream()`.
+
+## [4.3.0] 2016-08-24
+- Introduce a new analysis pass ("Violet") used for decomposition of patterns
+ into literals and smaller engines.
+- Introduce a new container engine ("Tamarama") for infix and suffix engines
+ that can be proven to run exclusively of one another. This reduces stream
+ state for pattern sets with many such engines.
+- Introduce a new shuffle-based DFA engine ("Sheng"). This improves scanning
+ performance for pattern sets where small engines are generated.
+- Improve the analysis used to extract extra mask information from short
+ literals.
+- Reduced compile time spent in equivalence class analysis.
+- Build: frame pointers are now only omitted for 32-bit release builds.
+- Build: Workaround for C++ issues reported on FreeBSD/libc++ platforms.
+ (github issue #27)
+- Simplify the LimEx NFA with a unified "variable shift" model, which reduces
+ the number of different NFA code paths to one per model size.
+- Allow some anchored prefixes that may squash the literal to which they are
+ attached to run eagerly. This improves scanning performance for some
+ patterns.
+- Simplify and improve EOD ("end of data") matching, using the interpreter for
+ all operations.
+- Elide unnecessary instructions in the Rose interpreter at compile time.
+- Reduce the number of inlined instantiations of the Rose interpreter in order
+ to reduce instruction cache pressure.
+- Small improvements to literal matcher acceleration.
+- Parser: ignore `\E` metacharacters that are not preceded by `\Q`. This
+ conforms to PCRE's behaviour, rather than returning a compile error.
+- Check for misaligned memory when allocating an error structure in Hyperscan's
+ compile path and return an appropriate error if detected.
+
+## [4.2.0] 2016-05-31
+- Introduce an interpreter for many complex actions to replace the use of
+ internal reports within the core of Hyperscan (the "Rose" engine). This
+ improves scanning performance and reduces database size for many pattern
+ sets.
+- Many enhancements to the acceleration framework used by NFA and DFA engines,
+ including more flexible multibyte implementations and more AVX2 support. This
+ improves scanning performance for many pattern sets.
+- Improved prefiltering support for complex patterns containing very large
+ bounded repeats (`R{M,N}` with large `N`).
+- Improve scanning performance of pattern sets with a very large number of
+ EOD-anchored patterns.
+- Improve scanning performance of large pattern sets that use the
+ `HS_FLAG_SINGLEMATCH` flag.
+- Improve scanning performance of pattern sets that contain a single literal by
+ improving the "Noodle" literal matcher.
+- Small reductions in total stream state for many pattern sets.
+- Improve runtime detection of AVX2 support.
+- Disable -Werror for release builds, in order to behave better for packagers
+ and users with different compiler combinations than those that we test.
+- Improve support for building on Windows with MSVC 2015 (github issue #14).
+ Support for Hyperscan on Windows is still experimental.
+- Small updates to fix warnings identified by Coverity.
+- Remove Python codegen for the "FDR" and "Teddy" literal matchers. These are
+ now implemented directly in C code.
+- Remove the specialist "Sidecar" engine in favour of using our more general
+ repeat engines.
+- New API function: add the `hs_expression_ext_info()` function. This is a
+ variant of `hs_expression_info()` that can accept patterns with extended
+ parameters.
+- New API error value: add the `HS_SCRATCH_IN_USE` error, which is returned
+ when Hyperscan detects that a scratch region is already in use on entry to an
+ API function.
+
## [4.1.0] 2015-12-18
- Update version of PCRE used by testing tools as a syntax and semantic
reference to PCRE 8.38.
diff --git a/contrib/libs/hyperscan/README.md b/contrib/libs/hyperscan/README.md
index ae0d2ce0d2..9f4c03723c 100644
--- a/contrib/libs/hyperscan/README.md
+++ b/contrib/libs/hyperscan/README.md
@@ -13,7 +13,7 @@ Hyperscan is typically used in a DPI library stack.
# Documentation
Information on building the Hyperscan library and using its API is available in
-the [Developer Reference Guide](http://intel.github.io/hyperscan/dev-reference/).
+the [Developer Reference Guide](http://intel.github.io/hyperscan/dev-reference/).
# License
@@ -32,7 +32,7 @@ branch.
# Get Involved
-The official homepage for Hyperscan is at [www.hyperscan.io](https://www.hyperscan.io).
+The official homepage for Hyperscan is at [www.hyperscan.io](https://www.hyperscan.io).
If you have questions or comments, we encourage you to [join the mailing
list](https://lists.01.org/mailman/listinfo/hyperscan). Bugs can be filed by
diff --git a/contrib/libs/hyperscan/config-linux.h b/contrib/libs/hyperscan/config-linux.h
index d4f8794cab..56951c3881 100644
--- a/contrib/libs/hyperscan/config-linux.h
+++ b/contrib/libs/hyperscan/config-linux.h
@@ -1,109 +1,109 @@
-/* used by cmake */
-
-#ifndef CONFIG_H_
-#define CONFIG_H_
-
-/* "Define if the build is 32 bit" */
-/* #undef ARCH_32_BIT */
-
-/* "Define if the build is 64 bit" */
-#define ARCH_64_BIT
-
-/* "Define if building for IA32" */
-/* #undef ARCH_IA32 */
-
-/* "Define if building for EM64T" */
-#define ARCH_X86_64
-
-/* internal build, switch on dump support. */
-/* #undef DUMP_SUPPORT */
-
-/* Define if building "fat" runtime. */
-#define FAT_RUNTIME
-
-/* Define if building AVX-512 in the fat runtime. */
-#define BUILD_AVX512
-
-/* Define if building AVX512VBMI in the fat runtime. */
-/* #undef BUILD_AVX512VBMI */
-
-/* Define to 1 if `backtrace' works. */
-/* #undef HAVE_BACKTRACE */
-
-/* C compiler has __builtin_assume_aligned */
-#define HAVE_CC_BUILTIN_ASSUME_ALIGNED
-
-/* C++ compiler has __builtin_assume_aligned */
-#define HAVE_CXX_BUILTIN_ASSUME_ALIGNED
-
-/* C++ compiler has x86intrin.h */
-#define HAVE_CXX_X86INTRIN_H
-
-/* C compiler has x86intrin.h */
-#define HAVE_C_X86INTRIN_H
-
-/* C++ compiler has intrin.h */
-/* #undef HAVE_CXX_INTRIN_H */
-
-/* C compiler has intrin.h */
-/* #undef HAVE_C_INTRIN_H */
-
-/* Define to 1 if you have the declaration of `pthread_setaffinity_np', and to
- 0 if you don't. */
-/* #undef HAVE_DECL_PTHREAD_SETAFFINITY_NP */
-
-/* #undef HAVE_PTHREAD_NP_H */
-
-/* Define to 1 if you have the `malloc_info' function. */
-/* #undef HAVE_MALLOC_INFO */
-
-/* Define to 1 if you have the `memmem' function. */
-/* #undef HAVE_MEMMEM */
-
-/* Define to 1 if you have a working `mmap' system call. */
-#define HAVE_MMAP
-
-/* Define to 1 if `posix_memalign' works. */
-#define HAVE_POSIX_MEMALIGN
-
-/* Define to 1 if you have the `setrlimit' function. */
-/* #undef HAVE_SETRLIMIT */
-
-/* Define to 1 if you have the `shmget' function. */
-/* #undef HAVE_SHMGET */
-
-/* Define to 1 if you have the `sigaction' function. */
-/* #undef HAVE_SIGACTION */
-
-/* Define to 1 if you have the `sigaltstack' function. */
-/* #undef HAVE_SIGALTSTACK */
-
-/* Define if the sqlite3_open_v2 call is available */
-/* #undef HAVE_SQLITE3_OPEN_V2 */
-
-/* Define to 1 if you have the <unistd.h> header file. */
-#define HAVE_UNISTD_H
-
-/* Define to 1 if you have the `_aligned_malloc' function. */
-/* #undef HAVE__ALIGNED_MALLOC */
-
-/* Define if compiler has __builtin_constant_p */
-#define HAVE__BUILTIN_CONSTANT_P
-
-/* Optimize, inline critical functions */
-#define HS_OPTIMIZE
-
-#define HS_VERSION
-#define HS_MAJOR_VERSION
-#define HS_MINOR_VERSION
-/* #undef HS_PATCH_VERSION */
-
-#define BUILD_DATE
-
-/* define if this is a release build. */
-#define RELEASE_BUILD
-
-/* define if reverse_graph requires patch for boost 1.62.0 */
-/* #undef BOOST_REVGRAPH_PATCH */
-
-#endif /* CONFIG_H_ */
+/* used by cmake */
+
+#ifndef CONFIG_H_
+#define CONFIG_H_
+
+/* "Define if the build is 32 bit" */
+/* #undef ARCH_32_BIT */
+
+/* "Define if the build is 64 bit" */
+#define ARCH_64_BIT
+
+/* "Define if building for IA32" */
+/* #undef ARCH_IA32 */
+
+/* "Define if building for EM64T" */
+#define ARCH_X86_64
+
+/* internal build, switch on dump support. */
+/* #undef DUMP_SUPPORT */
+
+/* Define if building "fat" runtime. */
+#define FAT_RUNTIME
+
+/* Define if building AVX-512 in the fat runtime. */
+#define BUILD_AVX512
+
+/* Define if building AVX512VBMI in the fat runtime. */
+/* #undef BUILD_AVX512VBMI */
+
+/* Define to 1 if `backtrace' works. */
+/* #undef HAVE_BACKTRACE */
+
+/* C compiler has __builtin_assume_aligned */
+#define HAVE_CC_BUILTIN_ASSUME_ALIGNED
+
+/* C++ compiler has __builtin_assume_aligned */
+#define HAVE_CXX_BUILTIN_ASSUME_ALIGNED
+
+/* C++ compiler has x86intrin.h */
+#define HAVE_CXX_X86INTRIN_H
+
+/* C compiler has x86intrin.h */
+#define HAVE_C_X86INTRIN_H
+
+/* C++ compiler has intrin.h */
+/* #undef HAVE_CXX_INTRIN_H */
+
+/* C compiler has intrin.h */
+/* #undef HAVE_C_INTRIN_H */
+
+/* Define to 1 if you have the declaration of `pthread_setaffinity_np', and to
+ 0 if you don't. */
+/* #undef HAVE_DECL_PTHREAD_SETAFFINITY_NP */
+
+/* #undef HAVE_PTHREAD_NP_H */
+
+/* Define to 1 if you have the `malloc_info' function. */
+/* #undef HAVE_MALLOC_INFO */
+
+/* Define to 1 if you have the `memmem' function. */
+/* #undef HAVE_MEMMEM */
+
+/* Define to 1 if you have a working `mmap' system call. */
+#define HAVE_MMAP
+
+/* Define to 1 if `posix_memalign' works. */
+#define HAVE_POSIX_MEMALIGN
+
+/* Define to 1 if you have the `setrlimit' function. */
+/* #undef HAVE_SETRLIMIT */
+
+/* Define to 1 if you have the `shmget' function. */
+/* #undef HAVE_SHMGET */
+
+/* Define to 1 if you have the `sigaction' function. */
+/* #undef HAVE_SIGACTION */
+
+/* Define to 1 if you have the `sigaltstack' function. */
+/* #undef HAVE_SIGALTSTACK */
+
+/* Define if the sqlite3_open_v2 call is available */
+/* #undef HAVE_SQLITE3_OPEN_V2 */
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H
+
+/* Define to 1 if you have the `_aligned_malloc' function. */
+/* #undef HAVE__ALIGNED_MALLOC */
+
+/* Define if compiler has __builtin_constant_p */
+#define HAVE__BUILTIN_CONSTANT_P
+
+/* Optimize, inline critical functions */
+#define HS_OPTIMIZE
+
+#define HS_VERSION
+#define HS_MAJOR_VERSION
+#define HS_MINOR_VERSION
+/* #undef HS_PATCH_VERSION */
+
+#define BUILD_DATE
+
+/* define if this is a release build. */
+#define RELEASE_BUILD
+
+/* define if reverse_graph requires patch for boost 1.62.0 */
+/* #undef BOOST_REVGRAPH_PATCH */
+
+#endif /* CONFIG_H_ */
diff --git a/contrib/libs/hyperscan/config-win.h b/contrib/libs/hyperscan/config-win.h
index d688243027..d7a751af09 100644
--- a/contrib/libs/hyperscan/config-win.h
+++ b/contrib/libs/hyperscan/config-win.h
@@ -1,16 +1,16 @@
-#include "config-linux.h"
-
-/* C++ compiler has x86intrin.h */
-#undef HAVE_CXX_X86INTRIN_H
-
-/* C compiler has x86intrin.h */
-#undef HAVE_C_X86INTRIN_H
-
-/* C++ compiler has intrin.h */
-#define HAVE_CXX_INTRIN_H
-
-/* C compiler has intrin.h */
-#define HAVE_C_INTRIN_H
-
-/* Define if compiler has __builtin_constant_p */
-#undef HAVE__BUILTIN_CONSTANT_P
+#include "config-linux.h"
+
+/* C++ compiler has x86intrin.h */
+#undef HAVE_CXX_X86INTRIN_H
+
+/* C compiler has x86intrin.h */
+#undef HAVE_C_X86INTRIN_H
+
+/* C++ compiler has intrin.h */
+#define HAVE_CXX_INTRIN_H
+
+/* C compiler has intrin.h */
+#define HAVE_C_INTRIN_H
+
+/* Define if compiler has __builtin_constant_p */
+#undef HAVE__BUILTIN_CONSTANT_P
diff --git a/contrib/libs/hyperscan/config.h b/contrib/libs/hyperscan/config.h
index b27f1cf733..5623f311fa 100644
--- a/contrib/libs/hyperscan/config.h
+++ b/contrib/libs/hyperscan/config.h
@@ -1,7 +1,7 @@
-#pragma once
-
-#if defined(_MSC_VER)
-# include "config-win.h"
-#else
-# include "config-linux.h"
-#endif
+#pragma once
+
+#if defined(_MSC_VER)
+# include "config-win.h"
+#else
+# include "config-linux.h"
+#endif
diff --git a/contrib/libs/hyperscan/hs_version.h b/contrib/libs/hyperscan/hs_version.h
index 74abe45a47..af41f33bbc 100644
--- a/contrib/libs/hyperscan/hs_version.h
+++ b/contrib/libs/hyperscan/hs_version.h
@@ -32,9 +32,9 @@
/**
* A version string to identify this release of Hyperscan.
*/
-#define HS_VERSION_STRING "5.4.0 1980-01-01"
+#define HS_VERSION_STRING "5.4.0 1980-01-01"
-#define HS_VERSION_32BIT ((5 << 24) | (4 << 16) | (0 << 8) | 0)
+#define HS_VERSION_32BIT ((5 << 24) | (4 << 16) | (0 << 8) | 0)
#endif /* HS_VERSION_H_C6428FAF8E3713 */
diff --git a/contrib/libs/hyperscan/runtime_avx2/.yandex_meta/licenses.list.txt b/contrib/libs/hyperscan/runtime_avx2/.yandex_meta/licenses.list.txt
index 358c19fe4a..b2ced66bbd 100644
--- a/contrib/libs/hyperscan/runtime_avx2/.yandex_meta/licenses.list.txt
+++ b/contrib/libs/hyperscan/runtime_avx2/.yandex_meta/licenses.list.txt
@@ -1,32 +1,32 @@
-====================BSD-3-Clause====================
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2015-2018, Intel Corporation
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2015-2019, Intel Corporation
+====================BSD-3-Clause====================
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2015-2018, Intel Corporation
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2015-2019, Intel Corporation
diff --git a/contrib/libs/hyperscan/runtime_avx2/hs_common.h b/contrib/libs/hyperscan/runtime_avx2/hs_common.h
index 597a341396..c19b8f0149 100644
--- a/contrib/libs/hyperscan/runtime_avx2/hs_common.h
+++ b/contrib/libs/hyperscan/runtime_avx2/hs_common.h
@@ -1,596 +1,596 @@
-/*
- * Copyright (c) 2015-2019, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef HS_AVX2_COMMON_H
-#define HS_AVX2_COMMON_H
-
-#if defined(_WIN32)
-#define HS_CDECL __cdecl
-#else
-#define HS_CDECL
-#endif
-#include <stdlib.h>
-
-/**
- * @file
- * @brief The Hyperscan common API definition.
- *
- * Hyperscan is a high speed regular expression engine.
- *
- * This header contains functions available to both the Hyperscan compiler and
- * runtime.
- */
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-struct hs_database;
-
-/**
- * A Hyperscan pattern database.
- *
- * Generated by one of the Hyperscan compiler functions:
- * - @ref hs_compile()
- * - @ref hs_compile_multi()
- * - @ref hs_compile_ext_multi()
- */
-typedef struct hs_database hs_database_t;
-
-/**
- * A type for errors returned by Hyperscan functions.
- */
-typedef int hs_error_t;
-
-/**
- * Free a compiled pattern database.
- *
- * The free callback set by @ref hs_set_database_allocator() (or @ref
- * hs_set_allocator()) will be used by this function.
- *
- * @param db
- * A compiled pattern database. NULL may also be safely provided, in which
- * case the function does nothing.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx2_hs_free_database(hs_database_t *db);
-
-/**
- * Serialize a pattern database to a stream of bytes.
- *
- * The allocator callback set by @ref hs_set_misc_allocator() (or @ref
- * hs_set_allocator()) will be used by this function.
- *
- * @param db
- * A compiled pattern database.
- *
- * @param bytes
- * On success, a pointer to an array of bytes will be returned here.
- * These bytes can be subsequently relocated or written to disk. The
- * caller is responsible for freeing this block.
- *
- * @param length
- * On success, the number of bytes in the generated byte array will be
- * returned here.
- *
- * @return
- * @ref HS_SUCCESS on success, @ref HS_NOMEM if the byte array cannot be
- * allocated, other values may be returned if errors are detected.
- */
-hs_error_t avx2_hs_serialize_database(const hs_database_t *db, char **bytes,
- size_t *length);
-
-/**
- * Reconstruct a pattern database from a stream of bytes previously generated
- * by @ref hs_serialize_database().
- *
- * This function will allocate sufficient space for the database using the
- * allocator set with @ref hs_set_database_allocator() (or @ref
- * hs_set_allocator()); to use a pre-allocated region of memory, use the @ref
- * hs_deserialize_database_at() function.
- *
- * @param bytes
- * A byte array generated by @ref hs_serialize_database() representing a
- * compiled pattern database.
- *
- * @param length
- * The length of the byte array generated by @ref hs_serialize_database().
- * This should be the same value as that returned by @ref
- * hs_serialize_database().
- *
- * @param db
- * On success, a pointer to a newly allocated @ref hs_database_t will be
- * returned here. This database can then be used for scanning, and
- * eventually freed by the caller using @ref hs_free_database().
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx2_hs_deserialize_database(const char *bytes,
- const size_t length,
- hs_database_t **db);
-
-/**
- * Reconstruct a pattern database from a stream of bytes previously generated
- * by @ref hs_serialize_database() at a given memory location.
- *
- * This function (unlike @ref hs_deserialize_database()) will write the
- * reconstructed database to the memory location given in the @p db parameter.
- * The amount of space required at this location can be determined with the
- * @ref hs_serialized_database_size() function.
- *
- * @param bytes
- * A byte array generated by @ref hs_serialize_database() representing a
- * compiled pattern database.
- *
- * @param length
- * The length of the byte array generated by @ref hs_serialize_database().
- * This should be the same value as that returned by @ref
- * hs_serialize_database().
- *
- * @param db
- * Pointer to an 8-byte aligned block of memory of sufficient size to hold
- * the deserialized database. On success, the reconstructed database will
- * be written to this location. This database can then be used for pattern
- * matching. The user is responsible for freeing this memory; the @ref
- * hs_free_database() call should not be used.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx2_hs_deserialize_database_at(const char *bytes,
- const size_t length,
- hs_database_t *db);
-
-/**
- * Provides the size of the stream state allocated by a single stream opened
- * against the given database.
- *
- * @param database
- * Pointer to a compiled (streaming mode) pattern database.
- *
- * @param stream_size
- * On success, the size in bytes of an individual stream opened against the
- * given database is placed in this parameter.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx2_hs_stream_size(const hs_database_t *database,
- size_t *stream_size);
-
-/**
- * Provides the size of the given database in bytes.
- *
- * @param database
- * Pointer to compiled pattern database.
- *
- * @param database_size
- * On success, the size of the compiled database in bytes is placed in this
- * parameter.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx2_hs_database_size(const hs_database_t *database,
- size_t *database_size);
-
-/**
- * Utility function for reporting the size that would be required by a
- * database if it were deserialized.
- *
- * This can be used to allocate a shared memory region or other "special"
- * allocation prior to deserializing with the @ref hs_deserialize_database_at()
- * function.
- *
- * @param bytes
- * Pointer to a byte array generated by @ref hs_serialize_database()
- * representing a compiled pattern database.
- *
- * @param length
- * The length of the byte array generated by @ref hs_serialize_database().
- * This should be the same value as that returned by @ref
- * hs_serialize_database().
- *
- * @param deserialized_size
- * On success, the size of the compiled database that would be generated
- * by @ref hs_deserialize_database_at() is returned here.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx2_hs_serialized_database_size(const char *bytes,
- const size_t length,
- size_t *deserialized_size);
-
-/**
- * Utility function providing information about a database.
- *
- * @param database
- * Pointer to a compiled database.
- *
- * @param info
- * On success, a string containing the version and platform information for
- * the supplied database is placed in the parameter. The string is
- * allocated using the allocator supplied in @ref hs_set_misc_allocator()
- * (or malloc() if no allocator was set) and should be freed by the caller.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx2_hs_database_info(const hs_database_t *database,
- char **info);
-
-/**
- * Utility function providing information about a serialized database.
- *
- * @param bytes
- * Pointer to a serialized database.
- *
- * @param length
- * Length in bytes of the serialized database.
- *
- * @param info
- * On success, a string containing the version and platform information
- * for the supplied serialized database is placed in the parameter. The
- * string is allocated using the allocator supplied in @ref
- * hs_set_misc_allocator() (or malloc() if no allocator was set) and
- * should be freed by the caller.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx2_hs_serialized_database_info(const char *bytes,
- size_t length, char **info);
-
-/**
- * The type of the callback function that will be used by Hyperscan to allocate
- * more memory at runtime as required, for example in @ref hs_open_stream() to
- * allocate stream state.
- *
- * If Hyperscan is to be used in a multi-threaded, or similarly concurrent
- * environment, the allocation function will need to be re-entrant, or
- * similarly safe for concurrent use.
- *
- * @param size
- * The number of bytes to allocate.
- * @return
- * A pointer to the region of memory allocated, or NULL on error.
- */
-typedef void *(HS_CDECL *hs_alloc_t)(size_t size);
-
-/**
- * The type of the callback function that will be used by Hyperscan to free
- * memory regions previously allocated using the @ref hs_alloc_t function.
- *
- * @param ptr
- * The region of memory to be freed.
- */
-typedef void (HS_CDECL *hs_free_t)(void *ptr);
-
-/**
- * Set the allocate and free functions used by Hyperscan for allocating
- * memory at runtime for stream state, scratch space, database bytecode,
- * and various other data structure returned by the Hyperscan API.
- *
- * The function is equivalent to calling @ref hs_set_stream_allocator(),
- * @ref hs_set_scratch_allocator(), @ref hs_set_database_allocator() and
- * @ref hs_set_misc_allocator() with the provided parameters.
- *
- * This call will override any previous allocators that have been set.
- *
- * Note: there is no way to change the allocator used for temporary objects
- * created during the various compile calls (@ref hs_compile(), @ref
- * hs_compile_multi(), @ref hs_compile_ext_multi()).
- *
- * @param alloc_func
- * A callback function pointer that allocates memory. This function must
- * return memory suitably aligned for the largest representable data type
- * on this platform.
- *
- * @param free_func
- * A callback function pointer that frees allocated memory.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx2_hs_set_allocator(hs_alloc_t alloc_func,
- hs_free_t free_func);
-
-/**
- * Set the allocate and free functions used by Hyperscan for allocating memory
- * for database bytecode produced by the compile calls (@ref hs_compile(), @ref
- * hs_compile_multi(), @ref hs_compile_ext_multi()) and by database
- * deserialization (@ref hs_deserialize_database()).
- *
- * If no database allocation functions are set, or if NULL is used in place of
- * both parameters, then memory allocation will default to standard methods
- * (such as the system malloc() and free() calls).
- *
- * This call will override any previous database allocators that have been set.
- *
- * Note: the database allocator may also be set by calling @ref
- * hs_set_allocator().
- *
- * Note: there is no way to change how temporary objects created during the
- * various compile calls (@ref hs_compile(), @ref hs_compile_multi(), @ref
- * hs_compile_ext_multi()) are allocated.
- *
- * @param alloc_func
- * A callback function pointer that allocates memory. This function must
- * return memory suitably aligned for the largest representable data type
- * on this platform.
- *
- * @param free_func
- * A callback function pointer that frees allocated memory.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx2_hs_set_database_allocator(hs_alloc_t alloc_func,
- hs_free_t free_func);
-
-/**
- * Set the allocate and free functions used by Hyperscan for allocating memory
- * for items returned by the Hyperscan API such as @ref hs_compile_error_t, @ref
- * hs_expr_info_t and serialized databases.
- *
- * If no misc allocation functions are set, or if NULL is used in place of both
- * parameters, then memory allocation will default to standard methods (such as
- * the system malloc() and free() calls).
- *
- * This call will override any previous misc allocators that have been set.
- *
- * Note: the misc allocator may also be set by calling @ref hs_set_allocator().
- *
- * @param alloc_func
- * A callback function pointer that allocates memory. This function must
- * return memory suitably aligned for the largest representable data type
- * on this platform.
- *
- * @param free_func
- * A callback function pointer that frees allocated memory.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx2_hs_set_misc_allocator(hs_alloc_t alloc_func,
- hs_free_t free_func);
-
-/**
- * Set the allocate and free functions used by Hyperscan for allocating memory
- * for scratch space by @ref hs_alloc_scratch() and @ref hs_clone_scratch().
- *
- * If no scratch allocation functions are set, or if NULL is used in place of
- * both parameters, then memory allocation will default to standard methods
- * (such as the system malloc() and free() calls).
- *
- * This call will override any previous scratch allocators that have been set.
- *
- * Note: the scratch allocator may also be set by calling @ref
- * hs_set_allocator().
- *
- * @param alloc_func
- * A callback function pointer that allocates memory. This function must
- * return memory suitably aligned for the largest representable data type
- * on this platform.
- *
- * @param free_func
- * A callback function pointer that frees allocated memory.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx2_hs_set_scratch_allocator(hs_alloc_t alloc_func,
- hs_free_t free_func);
-
-/**
- * Set the allocate and free functions used by Hyperscan for allocating memory
- * for stream state by @ref hs_open_stream().
- *
- * If no stream allocation functions are set, or if NULL is used in place of
- * both parameters, then memory allocation will default to standard methods
- * (such as the system malloc() and free() calls).
- *
- * This call will override any previous stream allocators that have been set.
- *
- * Note: the stream allocator may also be set by calling @ref
- * hs_set_allocator().
- *
- * @param alloc_func
- * A callback function pointer that allocates memory. This function must
- * return memory suitably aligned for the largest representable data type
- * on this platform.
- *
- * @param free_func
- * A callback function pointer that frees allocated memory.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx2_hs_set_stream_allocator(hs_alloc_t alloc_func,
- hs_free_t free_func);
-
-/**
- * Utility function for identifying this release version.
- *
- * @return
- * A string containing the version number of this release build and the
- * date of the build. It is allocated statically, so it does not need to
- * be freed by the caller.
- */
-const char * avx2_hs_version(void);
-
-/**
- * Utility function to test the current system architecture.
- *
- * Hyperscan requires the Supplemental Streaming SIMD Extensions 3 instruction
- * set. This function can be called on any x86 platform to determine if the
- * system provides the required instruction set.
- *
- * This function does not test for more advanced features if Hyperscan has
- * been built for a more specific architecture, for example the AVX2
- * instruction set.
- *
- * @return
- * @ref HS_SUCCESS on success, @ref HS_ARCH_ERROR if system does not
- * support Hyperscan.
- */
-hs_error_t avx2_hs_valid_platform(void);
-
-/**
- * @defgroup HS_ERROR hs_error_t values
- *
- * @{
- */
-
-/**
- * The engine completed normally.
- */
-#define HS_SUCCESS 0
-
-/**
- * A parameter passed to this function was invalid.
- *
- * This error is only returned in cases where the function can detect an
- * invalid parameter -- it cannot be relied upon to detect (for example)
- * pointers to freed memory or other invalid data.
- */
-#define HS_INVALID (-1)
-
-/**
- * A memory allocation failed.
- */
-#define HS_NOMEM (-2)
-
-/**
- * The engine was terminated by callback.
- *
- * This return value indicates that the target buffer was partially scanned,
- * but that the callback function requested that scanning cease after a match
- * was located.
- */
-#define HS_SCAN_TERMINATED (-3)
-
-/**
- * The pattern compiler failed, and the @ref hs_compile_error_t should be
- * inspected for more detail.
- */
-#define HS_COMPILER_ERROR (-4)
-
-/**
- * The given database was built for a different version of Hyperscan.
- */
-#define HS_DB_VERSION_ERROR (-5)
-
-/**
- * The given database was built for a different platform (i.e., CPU type).
- */
-#define HS_DB_PLATFORM_ERROR (-6)
-
-/**
- * The given database was built for a different mode of operation. This error
- * is returned when streaming calls are used with a block or vectored database
- * and vice versa.
- */
-#define HS_DB_MODE_ERROR (-7)
-
-/**
- * A parameter passed to this function was not correctly aligned.
- */
-#define HS_BAD_ALIGN (-8)
-
-/**
- * The memory allocator (either malloc() or the allocator set with @ref
- * hs_set_allocator()) did not correctly return memory suitably aligned for the
- * largest representable data type on this platform.
- */
-#define HS_BAD_ALLOC (-9)
-
-/**
- * The scratch region was already in use.
- *
- * This error is returned when Hyperscan is able to detect that the scratch
- * region given is already in use by another Hyperscan API call.
- *
- * A separate scratch region, allocated with @ref hs_alloc_scratch() or @ref
- * hs_clone_scratch(), is required for every concurrent caller of the Hyperscan
- * API.
- *
- * For example, this error might be returned when @ref hs_scan() has been
- * called inside a callback delivered by a currently-executing @ref hs_scan()
- * call using the same scratch region.
- *
- * Note: Not all concurrent uses of scratch regions may be detected. This error
- * is intended as a best-effort debugging tool, not a guarantee.
- */
-#define HS_SCRATCH_IN_USE (-10)
-
-/**
- * Unsupported CPU architecture.
- *
- * This error is returned when Hyperscan is able to detect that the current
- * system does not support the required instruction set.
- *
- * At a minimum, Hyperscan requires Supplemental Streaming SIMD Extensions 3
- * (SSSE3).
- */
-#define HS_ARCH_ERROR (-11)
-
-/**
- * Provided buffer was too small.
- *
- * This error indicates that there was insufficient space in the buffer. The
- * call should be repeated with a larger provided buffer.
- *
- * Note: in this situation, it is normal for the amount of space required to be
- * returned in the same manner as the used space would have been returned if the
- * call was successful.
- */
-#define HS_INSUFFICIENT_SPACE (-12)
-
-/**
- * Unexpected internal error.
- *
- * This error indicates that there was unexpected matching behaviors. This
- * could be related to invalid usage of stream and scratch space or invalid memory
- * operations by users.
- *
- */
-#define HS_UNKNOWN_ERROR (-13)
-
-/** @} */
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
-#endif /* HS_AVX2_COMMON_H */
+/*
+ * Copyright (c) 2015-2019, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HS_AVX2_COMMON_H
+#define HS_AVX2_COMMON_H
+
+#if defined(_WIN32)
+#define HS_CDECL __cdecl
+#else
+#define HS_CDECL
+#endif
+#include <stdlib.h>
+
+/**
+ * @file
+ * @brief The Hyperscan common API definition.
+ *
+ * Hyperscan is a high speed regular expression engine.
+ *
+ * This header contains functions available to both the Hyperscan compiler and
+ * runtime.
+ */
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+struct hs_database;
+
+/**
+ * A Hyperscan pattern database.
+ *
+ * Generated by one of the Hyperscan compiler functions:
+ * - @ref hs_compile()
+ * - @ref hs_compile_multi()
+ * - @ref hs_compile_ext_multi()
+ */
+typedef struct hs_database hs_database_t;
+
+/**
+ * A type for errors returned by Hyperscan functions.
+ */
+typedef int hs_error_t;
+
+/**
+ * Free a compiled pattern database.
+ *
+ * The free callback set by @ref hs_set_database_allocator() (or @ref
+ * hs_set_allocator()) will be used by this function.
+ *
+ * @param db
+ * A compiled pattern database. NULL may also be safely provided, in which
+ * case the function does nothing.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx2_hs_free_database(hs_database_t *db);
+
+/**
+ * Serialize a pattern database to a stream of bytes.
+ *
+ * The allocator callback set by @ref hs_set_misc_allocator() (or @ref
+ * hs_set_allocator()) will be used by this function.
+ *
+ * @param db
+ * A compiled pattern database.
+ *
+ * @param bytes
+ * On success, a pointer to an array of bytes will be returned here.
+ * These bytes can be subsequently relocated or written to disk. The
+ * caller is responsible for freeing this block.
+ *
+ * @param length
+ * On success, the number of bytes in the generated byte array will be
+ * returned here.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, @ref HS_NOMEM if the byte array cannot be
+ * allocated, other values may be returned if errors are detected.
+ */
+hs_error_t avx2_hs_serialize_database(const hs_database_t *db, char **bytes,
+ size_t *length);
+
+/**
+ * Reconstruct a pattern database from a stream of bytes previously generated
+ * by @ref hs_serialize_database().
+ *
+ * This function will allocate sufficient space for the database using the
+ * allocator set with @ref hs_set_database_allocator() (or @ref
+ * hs_set_allocator()); to use a pre-allocated region of memory, use the @ref
+ * hs_deserialize_database_at() function.
+ *
+ * @param bytes
+ * A byte array generated by @ref hs_serialize_database() representing a
+ * compiled pattern database.
+ *
+ * @param length
+ * The length of the byte array generated by @ref hs_serialize_database().
+ * This should be the same value as that returned by @ref
+ * hs_serialize_database().
+ *
+ * @param db
+ * On success, a pointer to a newly allocated @ref hs_database_t will be
+ * returned here. This database can then be used for scanning, and
+ * eventually freed by the caller using @ref hs_free_database().
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx2_hs_deserialize_database(const char *bytes,
+ const size_t length,
+ hs_database_t **db);
+
+/**
+ * Reconstruct a pattern database from a stream of bytes previously generated
+ * by @ref hs_serialize_database() at a given memory location.
+ *
+ * This function (unlike @ref hs_deserialize_database()) will write the
+ * reconstructed database to the memory location given in the @p db parameter.
+ * The amount of space required at this location can be determined with the
+ * @ref hs_serialized_database_size() function.
+ *
+ * @param bytes
+ * A byte array generated by @ref hs_serialize_database() representing a
+ * compiled pattern database.
+ *
+ * @param length
+ * The length of the byte array generated by @ref hs_serialize_database().
+ * This should be the same value as that returned by @ref
+ * hs_serialize_database().
+ *
+ * @param db
+ * Pointer to an 8-byte aligned block of memory of sufficient size to hold
+ * the deserialized database. On success, the reconstructed database will
+ * be written to this location. This database can then be used for pattern
+ * matching. The user is responsible for freeing this memory; the @ref
+ * hs_free_database() call should not be used.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx2_hs_deserialize_database_at(const char *bytes,
+ const size_t length,
+ hs_database_t *db);
+
+/**
+ * Provides the size of the stream state allocated by a single stream opened
+ * against the given database.
+ *
+ * @param database
+ * Pointer to a compiled (streaming mode) pattern database.
+ *
+ * @param stream_size
+ * On success, the size in bytes of an individual stream opened against the
+ * given database is placed in this parameter.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx2_hs_stream_size(const hs_database_t *database,
+ size_t *stream_size);
+
+/**
+ * Provides the size of the given database in bytes.
+ *
+ * @param database
+ * Pointer to compiled pattern database.
+ *
+ * @param database_size
+ * On success, the size of the compiled database in bytes is placed in this
+ * parameter.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx2_hs_database_size(const hs_database_t *database,
+ size_t *database_size);
+
+/**
+ * Utility function for reporting the size that would be required by a
+ * database if it were deserialized.
+ *
+ * This can be used to allocate a shared memory region or other "special"
+ * allocation prior to deserializing with the @ref hs_deserialize_database_at()
+ * function.
+ *
+ * @param bytes
+ * Pointer to a byte array generated by @ref hs_serialize_database()
+ * representing a compiled pattern database.
+ *
+ * @param length
+ * The length of the byte array generated by @ref hs_serialize_database().
+ * This should be the same value as that returned by @ref
+ * hs_serialize_database().
+ *
+ * @param deserialized_size
+ * On success, the size of the compiled database that would be generated
+ * by @ref hs_deserialize_database_at() is returned here.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx2_hs_serialized_database_size(const char *bytes,
+ const size_t length,
+ size_t *deserialized_size);
+
+/**
+ * Utility function providing information about a database.
+ *
+ * @param database
+ * Pointer to a compiled database.
+ *
+ * @param info
+ * On success, a string containing the version and platform information for
+ * the supplied database is placed in the parameter. The string is
+ * allocated using the allocator supplied in @ref hs_set_misc_allocator()
+ * (or malloc() if no allocator was set) and should be freed by the caller.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx2_hs_database_info(const hs_database_t *database,
+ char **info);
+
+/**
+ * Utility function providing information about a serialized database.
+ *
+ * @param bytes
+ * Pointer to a serialized database.
+ *
+ * @param length
+ * Length in bytes of the serialized database.
+ *
+ * @param info
+ * On success, a string containing the version and platform information
+ * for the supplied serialized database is placed in the parameter. The
+ * string is allocated using the allocator supplied in @ref
+ * hs_set_misc_allocator() (or malloc() if no allocator was set) and
+ * should be freed by the caller.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx2_hs_serialized_database_info(const char *bytes,
+ size_t length, char **info);
+
+/**
+ * The type of the callback function that will be used by Hyperscan to allocate
+ * more memory at runtime as required, for example in @ref hs_open_stream() to
+ * allocate stream state.
+ *
+ * If Hyperscan is to be used in a multi-threaded, or similarly concurrent
+ * environment, the allocation function will need to be re-entrant, or
+ * similarly safe for concurrent use.
+ *
+ * @param size
+ * The number of bytes to allocate.
+ * @return
+ * A pointer to the region of memory allocated, or NULL on error.
+ */
+typedef void *(HS_CDECL *hs_alloc_t)(size_t size);
+
+/**
+ * The type of the callback function that will be used by Hyperscan to free
+ * memory regions previously allocated using the @ref hs_alloc_t function.
+ *
+ * @param ptr
+ * The region of memory to be freed.
+ */
+typedef void (HS_CDECL *hs_free_t)(void *ptr);
+
+/**
+ * Set the allocate and free functions used by Hyperscan for allocating
+ * memory at runtime for stream state, scratch space, database bytecode,
+ * and various other data structure returned by the Hyperscan API.
+ *
+ * The function is equivalent to calling @ref hs_set_stream_allocator(),
+ * @ref hs_set_scratch_allocator(), @ref hs_set_database_allocator() and
+ * @ref hs_set_misc_allocator() with the provided parameters.
+ *
+ * This call will override any previous allocators that have been set.
+ *
+ * Note: there is no way to change the allocator used for temporary objects
+ * created during the various compile calls (@ref hs_compile(), @ref
+ * hs_compile_multi(), @ref hs_compile_ext_multi()).
+ *
+ * @param alloc_func
+ * A callback function pointer that allocates memory. This function must
+ * return memory suitably aligned for the largest representable data type
+ * on this platform.
+ *
+ * @param free_func
+ * A callback function pointer that frees allocated memory.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx2_hs_set_allocator(hs_alloc_t alloc_func,
+ hs_free_t free_func);
+
+/**
+ * Set the allocate and free functions used by Hyperscan for allocating memory
+ * for database bytecode produced by the compile calls (@ref hs_compile(), @ref
+ * hs_compile_multi(), @ref hs_compile_ext_multi()) and by database
+ * deserialization (@ref hs_deserialize_database()).
+ *
+ * If no database allocation functions are set, or if NULL is used in place of
+ * both parameters, then memory allocation will default to standard methods
+ * (such as the system malloc() and free() calls).
+ *
+ * This call will override any previous database allocators that have been set.
+ *
+ * Note: the database allocator may also be set by calling @ref
+ * hs_set_allocator().
+ *
+ * Note: there is no way to change how temporary objects created during the
+ * various compile calls (@ref hs_compile(), @ref hs_compile_multi(), @ref
+ * hs_compile_ext_multi()) are allocated.
+ *
+ * @param alloc_func
+ * A callback function pointer that allocates memory. This function must
+ * return memory suitably aligned for the largest representable data type
+ * on this platform.
+ *
+ * @param free_func
+ * A callback function pointer that frees allocated memory.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx2_hs_set_database_allocator(hs_alloc_t alloc_func,
+ hs_free_t free_func);
+
+/**
+ * Set the allocate and free functions used by Hyperscan for allocating memory
+ * for items returned by the Hyperscan API such as @ref hs_compile_error_t, @ref
+ * hs_expr_info_t and serialized databases.
+ *
+ * If no misc allocation functions are set, or if NULL is used in place of both
+ * parameters, then memory allocation will default to standard methods (such as
+ * the system malloc() and free() calls).
+ *
+ * This call will override any previous misc allocators that have been set.
+ *
+ * Note: the misc allocator may also be set by calling @ref hs_set_allocator().
+ *
+ * @param alloc_func
+ * A callback function pointer that allocates memory. This function must
+ * return memory suitably aligned for the largest representable data type
+ * on this platform.
+ *
+ * @param free_func
+ * A callback function pointer that frees allocated memory.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx2_hs_set_misc_allocator(hs_alloc_t alloc_func,
+ hs_free_t free_func);
+
+/**
+ * Set the allocate and free functions used by Hyperscan for allocating memory
+ * for scratch space by @ref hs_alloc_scratch() and @ref hs_clone_scratch().
+ *
+ * If no scratch allocation functions are set, or if NULL is used in place of
+ * both parameters, then memory allocation will default to standard methods
+ * (such as the system malloc() and free() calls).
+ *
+ * This call will override any previous scratch allocators that have been set.
+ *
+ * Note: the scratch allocator may also be set by calling @ref
+ * hs_set_allocator().
+ *
+ * @param alloc_func
+ * A callback function pointer that allocates memory. This function must
+ * return memory suitably aligned for the largest representable data type
+ * on this platform.
+ *
+ * @param free_func
+ * A callback function pointer that frees allocated memory.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx2_hs_set_scratch_allocator(hs_alloc_t alloc_func,
+ hs_free_t free_func);
+
+/**
+ * Set the allocate and free functions used by Hyperscan for allocating memory
+ * for stream state by @ref hs_open_stream().
+ *
+ * If no stream allocation functions are set, or if NULL is used in place of
+ * both parameters, then memory allocation will default to standard methods
+ * (such as the system malloc() and free() calls).
+ *
+ * This call will override any previous stream allocators that have been set.
+ *
+ * Note: the stream allocator may also be set by calling @ref
+ * hs_set_allocator().
+ *
+ * @param alloc_func
+ * A callback function pointer that allocates memory. This function must
+ * return memory suitably aligned for the largest representable data type
+ * on this platform.
+ *
+ * @param free_func
+ * A callback function pointer that frees allocated memory.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx2_hs_set_stream_allocator(hs_alloc_t alloc_func,
+ hs_free_t free_func);
+
+/**
+ * Utility function for identifying this release version.
+ *
+ * @return
+ * A string containing the version number of this release build and the
+ * date of the build. It is allocated statically, so it does not need to
+ * be freed by the caller.
+ */
+const char * avx2_hs_version(void);
+
+/**
+ * Utility function to test the current system architecture.
+ *
+ * Hyperscan requires the Supplemental Streaming SIMD Extensions 3 instruction
+ * set. This function can be called on any x86 platform to determine if the
+ * system provides the required instruction set.
+ *
+ * This function does not test for more advanced features if Hyperscan has
+ * been built for a more specific architecture, for example the AVX2
+ * instruction set.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, @ref HS_ARCH_ERROR if system does not
+ * support Hyperscan.
+ */
+hs_error_t avx2_hs_valid_platform(void);
+
+/**
+ * @defgroup HS_ERROR hs_error_t values
+ *
+ * @{
+ */
+
+/**
+ * The engine completed normally.
+ */
+#define HS_SUCCESS 0
+
+/**
+ * A parameter passed to this function was invalid.
+ *
+ * This error is only returned in cases where the function can detect an
+ * invalid parameter -- it cannot be relied upon to detect (for example)
+ * pointers to freed memory or other invalid data.
+ */
+#define HS_INVALID (-1)
+
+/**
+ * A memory allocation failed.
+ */
+#define HS_NOMEM (-2)
+
+/**
+ * The engine was terminated by callback.
+ *
+ * This return value indicates that the target buffer was partially scanned,
+ * but that the callback function requested that scanning cease after a match
+ * was located.
+ */
+#define HS_SCAN_TERMINATED (-3)
+
+/**
+ * The pattern compiler failed, and the @ref hs_compile_error_t should be
+ * inspected for more detail.
+ */
+#define HS_COMPILER_ERROR (-4)
+
+/**
+ * The given database was built for a different version of Hyperscan.
+ */
+#define HS_DB_VERSION_ERROR (-5)
+
+/**
+ * The given database was built for a different platform (i.e., CPU type).
+ */
+#define HS_DB_PLATFORM_ERROR (-6)
+
+/**
+ * The given database was built for a different mode of operation. This error
+ * is returned when streaming calls are used with a block or vectored database
+ * and vice versa.
+ */
+#define HS_DB_MODE_ERROR (-7)
+
+/**
+ * A parameter passed to this function was not correctly aligned.
+ */
+#define HS_BAD_ALIGN (-8)
+
+/**
+ * The memory allocator (either malloc() or the allocator set with @ref
+ * hs_set_allocator()) did not correctly return memory suitably aligned for the
+ * largest representable data type on this platform.
+ */
+#define HS_BAD_ALLOC (-9)
+
+/**
+ * The scratch region was already in use.
+ *
+ * This error is returned when Hyperscan is able to detect that the scratch
+ * region given is already in use by another Hyperscan API call.
+ *
+ * A separate scratch region, allocated with @ref hs_alloc_scratch() or @ref
+ * hs_clone_scratch(), is required for every concurrent caller of the Hyperscan
+ * API.
+ *
+ * For example, this error might be returned when @ref hs_scan() has been
+ * called inside a callback delivered by a currently-executing @ref hs_scan()
+ * call using the same scratch region.
+ *
+ * Note: Not all concurrent uses of scratch regions may be detected. This error
+ * is intended as a best-effort debugging tool, not a guarantee.
+ */
+#define HS_SCRATCH_IN_USE (-10)
+
+/**
+ * Unsupported CPU architecture.
+ *
+ * This error is returned when Hyperscan is able to detect that the current
+ * system does not support the required instruction set.
+ *
+ * At a minimum, Hyperscan requires Supplemental Streaming SIMD Extensions 3
+ * (SSSE3).
+ */
+#define HS_ARCH_ERROR (-11)
+
+/**
+ * Provided buffer was too small.
+ *
+ * This error indicates that there was insufficient space in the buffer. The
+ * call should be repeated with a larger provided buffer.
+ *
+ * Note: in this situation, it is normal for the amount of space required to be
+ * returned in the same manner as the used space would have been returned if the
+ * call was successful.
+ */
+#define HS_INSUFFICIENT_SPACE (-12)
+
+/**
+ * Unexpected internal error.
+ *
+ * This error indicates that there was unexpected matching behaviors. This
+ * could be related to invalid usage of stream and scratch space or invalid memory
+ * operations by users.
+ *
+ */
+#define HS_UNKNOWN_ERROR (-13)
+
+/** @} */
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* HS_AVX2_COMMON_H */
diff --git a/contrib/libs/hyperscan/runtime_avx2/hs_runtime.h b/contrib/libs/hyperscan/runtime_avx2/hs_runtime.h
index 4e35c9e3f8..8f4dcc1a60 100644
--- a/contrib/libs/hyperscan/runtime_avx2/hs_runtime.h
+++ b/contrib/libs/hyperscan/runtime_avx2/hs_runtime.h
@@ -1,621 +1,621 @@
-/*
- * Copyright (c) 2015-2018, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef HS_AVX2_RUNTIME_H
-#define HS_AVX2_RUNTIME_H
-
-#include <stdlib.h>
-
-/**
- * @file
- * @brief The Hyperscan runtime API definition.
- *
- * Hyperscan is a high speed regular expression engine.
- *
- * This header contains functions for using compiled Hyperscan databases for
- * scanning data at runtime.
- */
-
-#include "hs_common.h"
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-/**
- * Definition of the stream identifier type.
- */
-struct hs_stream;
-
-/**
- * The stream identifier returned by @ref hs_open_stream().
- */
-typedef struct hs_stream hs_stream_t;
-
-struct hs_scratch;
-
-/**
- * A Hyperscan scratch space.
- */
-typedef struct hs_scratch hs_scratch_t;
-
-/**
- * Definition of the match event callback function type.
- *
- * A callback function matching the defined type must be provided by the
- * application calling the @ref hs_scan(), @ref hs_scan_vector() or @ref
- * hs_scan_stream() functions (or other streaming calls which can produce
- * matches).
- *
- * This callback function will be invoked whenever a match is located in the
- * target data during the execution of a scan. The details of the match are
- * passed in as parameters to the callback function, and the callback function
- * should return a value indicating whether or not matching should continue on
- * the target data. If no callbacks are desired from a scan call, NULL may be
- * provided in order to suppress match production.
- *
- * This callback function should not attempt to call Hyperscan API functions on
- * the same stream nor should it attempt to reuse the scratch space allocated
- * for the API calls that caused it to be triggered. Making another call to the
- * Hyperscan library with completely independent parameters should work (for
- * example, scanning a different database in a new stream and with new scratch
- * space), but reusing data structures like stream state and/or scratch space
- * will produce undefined behavior.
- *
- * @param id
- * The ID number of the expression that matched. If the expression was a
- * single expression compiled with @ref hs_compile(), this value will be
- * zero.
- *
- * @param from
- * - If a start of match flag is enabled for the current pattern, this
- * argument will be set to the start of match for the pattern assuming
- * that that start of match value lies within the current 'start of match
- * horizon' chosen by one of the SOM_HORIZON mode flags.
-
- * - If the start of match value lies outside this horizon (possible only
- * when the SOM_HORIZON value is not @ref HS_MODE_SOM_HORIZON_LARGE),
- * the @p from value will be set to @ref HS_OFFSET_PAST_HORIZON.
-
- * - This argument will be set to zero if the Start of Match flag is not
- * enabled for the given pattern.
- *
- * @param to
- * The offset after the last byte that matches the expression.
- *
- * @param flags
- * This is provided for future use and is unused at present.
- *
- * @param context
- * The pointer supplied by the user to the @ref hs_scan(), @ref
- * hs_scan_vector() or @ref hs_scan_stream() function.
- *
- * @return
- * Non-zero if the matching should cease, else zero. If scanning is
- * performed in streaming mode and a non-zero value is returned, any
- * subsequent calls to @ref hs_scan_stream() for that stream will
- * immediately return with @ref HS_SCAN_TERMINATED.
- */
-typedef int (HS_CDECL *match_event_handler)(unsigned int id,
- unsigned long long from,
- unsigned long long to,
- unsigned int flags,
- void *context);
-
-/**
- * Open and initialise a stream.
- *
- * @param db
- * A compiled pattern database.
- *
- * @param flags
- * Flags modifying the behaviour of the stream. This parameter is provided
- * for future use and is unused at present.
- *
- * @param stream
- * On success, a pointer to the generated @ref hs_stream_t will be
- * returned; NULL on failure.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx2_hs_open_stream(const hs_database_t *db, unsigned int flags,
- hs_stream_t **stream);
-
-/**
- * Write data to be scanned to the opened stream.
- *
- * This is the function call in which the actual pattern matching takes place
- * as data is written to the stream. Matches will be returned via the @ref
- * match_event_handler callback supplied.
- *
- * @param id
- * The stream ID (returned by @ref hs_open_stream()) to which the data
- * will be written.
- *
- * @param data
- * Pointer to the data to be scanned.
- *
- * @param length
- * The number of bytes to scan.
- *
- * @param flags
- * Flags modifying the behaviour of the stream. This parameter is provided
- * for future use and is unused at present.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch().
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param ctxt
- * The user defined pointer which will be passed to the callback function
- * when a match occurs.
- *
- * @return
- * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the
- * match callback indicated that scanning should stop; other values on
- * error.
- */
-hs_error_t avx2_hs_scan_stream(hs_stream_t *id, const char *data,
- unsigned int length, unsigned int flags,
- hs_scratch_t *scratch,
- match_event_handler onEvent, void *ctxt);
-
-/**
- * Close a stream.
- *
- * This function completes matching on the given stream and frees the memory
- * associated with the stream state. After this call, the stream pointed to by
- * @p id is invalid and can no longer be used. To reuse the stream state after
- * completion, rather than closing it, the @ref hs_reset_stream function can be
- * used.
- *
- * This function must be called for any stream created with @ref
- * hs_open_stream(), even if scanning has been terminated by a non-zero return
- * from the match callback function.
- *
- * Note: This operation may result in matches being returned (via calls to the
- * match event callback) for expressions anchored to the end of the data stream
- * (for example, via the use of the `$` meta-character). If these matches are
- * not desired, NULL may be provided as the @ref match_event_handler callback.
- *
- * If NULL is provided as the @ref match_event_handler callback, it is
- * permissible to provide a NULL scratch.
- *
- * @param id
- * The stream ID returned by @ref hs_open_stream().
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
- * allowed to be NULL only if the @p onEvent callback is also NULL.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param ctxt
- * The user defined pointer which will be passed to the callback function
- * when a match occurs.
- *
- * @return
- * Returns @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx2_hs_close_stream(hs_stream_t *id, hs_scratch_t *scratch,
- match_event_handler onEvent, void *ctxt);
-
-/**
- * Reset a stream to an initial state.
- *
- * Conceptually, this is equivalent to performing @ref hs_close_stream() on the
- * given stream, followed by a @ref hs_open_stream(). This new stream replaces
- * the original stream in memory, avoiding the overhead of freeing the old
- * stream and allocating the new one.
- *
- * Note: This operation may result in matches being returned (via calls to the
- * match event callback) for expressions anchored to the end of the original
- * data stream (for example, via the use of the `$` meta-character). If these
- * matches are not desired, NULL may be provided as the @ref match_event_handler
- * callback.
- *
- * Note: the stream will also be tied to the same database.
- *
- * @param id
- * The stream (as created by @ref hs_open_stream()) to be replaced.
- *
- * @param flags
- * Flags modifying the behaviour of the stream. This parameter is provided
- * for future use and is unused at present.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
- * allowed to be NULL only if the @p onEvent callback is also NULL.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param context
- * The user defined pointer which will be passed to the callback function
- * when a match occurs.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx2_hs_reset_stream(hs_stream_t *id, unsigned int flags,
- hs_scratch_t *scratch,
- match_event_handler onEvent, void *context);
-
-/**
- * Duplicate the given stream. The new stream will have the same state as the
- * original including the current stream offset.
- *
- * @param to_id
- * On success, a pointer to the new, copied @ref hs_stream_t will be
- * returned; NULL on failure.
- *
- * @param from_id
- * The stream (as created by @ref hs_open_stream()) to be copied.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx2_hs_copy_stream(hs_stream_t **to_id,
- const hs_stream_t *from_id);
-
-/**
- * Duplicate the given 'from' stream state onto the 'to' stream. The 'to' stream
- * will first be reset (reporting any EOD matches if a non-NULL @p onEvent
- * callback handler is provided).
- *
- * Note: the 'to' stream and the 'from' stream must be open against the same
- * database.
- *
- * @param to_id
- * On success, a pointer to the new, copied @ref hs_stream_t will be
- * returned; NULL on failure.
- *
- * @param from_id
- * The stream (as created by @ref hs_open_stream()) to be copied.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
- * allowed to be NULL only if the @p onEvent callback is also NULL.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param context
- * The user defined pointer which will be passed to the callback function
- * when a match occurs.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx2_hs_reset_and_copy_stream(hs_stream_t *to_id,
- const hs_stream_t *from_id,
- hs_scratch_t *scratch,
- match_event_handler onEvent,
- void *context);
-
-/**
- * Creates a compressed representation of the provided stream in the buffer
- * provided. This compressed representation can be converted back into a stream
- * state by using @ref hs_expand_stream() or @ref hs_reset_and_expand_stream().
- * The size of the compressed representation will be placed into @p used_space.
- *
- * If there is not sufficient space in the buffer to hold the compressed
- * representation, @ref HS_INSUFFICIENT_SPACE will be returned and @p used_space
- * will be populated with the amount of space required.
- *
- * Note: this function does not close the provided stream, you may continue to
- * use the stream or to free it with @ref hs_close_stream().
- *
- * @param stream
- * The stream (as created by @ref hs_open_stream()) to be compressed.
- *
- * @param buf
- * Buffer to write the compressed representation into. Note: if the call is
- * just being used to determine the amount of space required, it is allowed
- * to pass NULL here and @p buf_space as 0.
- *
- * @param buf_space
- * The number of bytes in @p buf. If buf_space is too small, the call will
- * fail with @ref HS_INSUFFICIENT_SPACE.
- *
- * @param used_space
- * Pointer to where the amount of used space will be written to. The used
- * buffer space is always less than or equal to @p buf_space. If the call
- * fails with @ref HS_INSUFFICIENT_SPACE, this pointer will be used to
- * write out the amount of buffer space required.
- *
- * @return
- * @ref HS_SUCCESS on success, @ref HS_INSUFFICIENT_SPACE if the provided
- * buffer is too small.
- */
-hs_error_t avx2_hs_compress_stream(const hs_stream_t *stream, char *buf,
- size_t buf_space, size_t *used_space);
-
-/**
- * Decompresses a compressed representation created by @ref hs_compress_stream()
- * into a new stream.
- *
- * Note: @p buf must correspond to a complete compressed representation created
- * by @ref hs_compress_stream() of a stream that was opened against @p db. It is
- * not always possible to detect misuse of this API and behaviour is undefined
- * if these properties are not satisfied.
- *
- * @param db
- * The compiled pattern database that the compressed stream was opened
- * against.
- *
- * @param stream
- * On success, a pointer to the expanded @ref hs_stream_t will be
- * returned; NULL on failure.
- *
- * @param buf
- * A compressed representation of a stream. These compressed forms are
- * created by @ref hs_compress_stream().
- *
- * @param buf_size
- * The size in bytes of the compressed representation.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx2_hs_expand_stream(const hs_database_t *db,
- hs_stream_t **stream, const char *buf,
- size_t buf_size);
-
-/**
- * Decompresses a compressed representation created by @ref hs_compress_stream()
- * on top of the 'to' stream. The 'to' stream will first be reset (reporting
- * any EOD matches if a non-NULL @p onEvent callback handler is provided).
- *
- * Note: the 'to' stream must be opened against the same database as the
- * compressed stream.
- *
- * Note: @p buf must correspond to a complete compressed representation created
- * by @ref hs_compress_stream() of a stream that was opened against @p db. It is
- * not always possible to detect misuse of this API and behaviour is undefined
- * if these properties are not satisfied.
- *
- * @param to_stream
- * A pointer to a valid stream state. A pointer to the expanded @ref
- * hs_stream_t will be returned; NULL on failure.
- *
- * @param buf
- * A compressed representation of a stream. These compressed forms are
- * created by @ref hs_compress_stream().
- *
- * @param buf_size
- * The size in bytes of the compressed representation.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
- * allowed to be NULL only if the @p onEvent callback is also NULL.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param context
- * The user defined pointer which will be passed to the callback function
- * when a match occurs.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx2_hs_reset_and_expand_stream(hs_stream_t *to_stream,
- const char *buf, size_t buf_size,
- hs_scratch_t *scratch,
- match_event_handler onEvent,
- void *context);
-
-/**
- * The block (non-streaming) regular expression scanner.
- *
- * This is the function call in which the actual pattern matching takes place
- * for block-mode pattern databases.
- *
- * @param db
- * A compiled pattern database.
- *
- * @param data
- * Pointer to the data to be scanned.
- *
- * @param length
- * The number of bytes to scan.
- *
- * @param flags
- * Flags modifying the behaviour of this function. This parameter is
- * provided for future use and is unused at present.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch() for this
- * database.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param context
- * The user defined pointer which will be passed to the callback function.
- *
- * @return
- * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the
- * match callback indicated that scanning should stop; other values on
- * error.
- */
-hs_error_t avx2_hs_scan(const hs_database_t *db, const char *data,
- unsigned int length, unsigned int flags,
- hs_scratch_t *scratch, match_event_handler onEvent,
- void *context);
-
-/**
- * The vectored regular expression scanner.
- *
- * This is the function call in which the actual pattern matching takes place
- * for vectoring-mode pattern databases.
- *
- * @param db
- * A compiled pattern database.
- *
- * @param data
- * An array of pointers to the data blocks to be scanned.
- *
- * @param length
- * An array of lengths (in bytes) of each data block to scan.
- *
- * @param count
- * Number of data blocks to scan. This should correspond to the size of
- * of the @p data and @p length arrays.
- *
- * @param flags
- * Flags modifying the behaviour of this function. This parameter is
- * provided for future use and is unused at present.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch() for
- * this database.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param context
- * The user defined pointer which will be passed to the callback function.
- *
- * @return
- * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the match
- * callback indicated that scanning should stop; other values on error.
- */
-hs_error_t avx2_hs_scan_vector(const hs_database_t *db,
- const char *const *data,
- const unsigned int *length,
- unsigned int count, unsigned int flags,
- hs_scratch_t *scratch,
- match_event_handler onEvent, void *context);
-
-/**
- * Allocate a "scratch" space for use by Hyperscan.
- *
- * This is required for runtime use, and one scratch space per thread, or
- * concurrent caller, is required. Any allocator callback set by @ref
- * hs_set_scratch_allocator() or @ref hs_set_allocator() will be used by this
- * function.
- *
- * @param db
- * The database, as produced by @ref hs_compile().
- *
- * @param scratch
- * On first allocation, a pointer to NULL should be provided so a new
- * scratch can be allocated. If a scratch block has been previously
- * allocated, then a pointer to it should be passed back in to see if it
- * is valid for this database block. If a new scratch block is required,
- * the original will be freed and the new one returned, otherwise the
- * previous scratch block will be returned. On success, the scratch block
- * will be suitable for use with the provided database in addition to any
- * databases that original scratch space was suitable for.
- *
- * @return
- * @ref HS_SUCCESS on successful allocation; @ref HS_NOMEM if the
- * allocation fails. Other errors may be returned if invalid parameters
- * are specified.
- */
-hs_error_t avx2_hs_alloc_scratch(const hs_database_t *db,
- hs_scratch_t **scratch);
-
-/**
- * Allocate a scratch space that is a clone of an existing scratch space.
- *
- * This is useful when multiple concurrent threads will be using the same set
- * of compiled databases, and another scratch space is required. Any allocator
- * callback set by @ref hs_set_scratch_allocator() or @ref hs_set_allocator()
- * will be used by this function.
- *
- * @param src
- * The existing @ref hs_scratch_t to be cloned.
- *
- * @param dest
- * A pointer to the new scratch space will be returned here.
- *
- * @return
- * @ref HS_SUCCESS on success; @ref HS_NOMEM if the allocation fails.
- * Other errors may be returned if invalid parameters are specified.
- */
-hs_error_t avx2_hs_clone_scratch(const hs_scratch_t *src,
- hs_scratch_t **dest);
-
-/**
- * Provides the size of the given scratch space.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch() or @ref
- * hs_clone_scratch().
- *
- * @param scratch_size
- * On success, the size of the scratch space in bytes is placed in this
- * parameter.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx2_hs_scratch_size(const hs_scratch_t *scratch,
- size_t *scratch_size);
-
-/**
- * Free a scratch block previously allocated by @ref hs_alloc_scratch() or @ref
- * hs_clone_scratch().
- *
- * The free callback set by @ref hs_set_scratch_allocator() or @ref
- * hs_set_allocator() will be used by this function.
- *
- * @param scratch
- * The scratch block to be freed. NULL may also be safely provided.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx2_hs_free_scratch(hs_scratch_t *scratch);
-
-/**
- * Callback 'from' return value, indicating that the start of this match was
- * too early to be tracked with the requested SOM_HORIZON precision.
- */
-#define HS_OFFSET_PAST_HORIZON (~0ULL)
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
-#endif /* HS_AVX2_RUNTIME_H */
+/*
+ * Copyright (c) 2015-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HS_AVX2_RUNTIME_H
+#define HS_AVX2_RUNTIME_H
+
+#include <stdlib.h>
+
+/**
+ * @file
+ * @brief The Hyperscan runtime API definition.
+ *
+ * Hyperscan is a high speed regular expression engine.
+ *
+ * This header contains functions for using compiled Hyperscan databases for
+ * scanning data at runtime.
+ */
+
+#include "hs_common.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * Definition of the stream identifier type.
+ */
+struct hs_stream;
+
+/**
+ * The stream identifier returned by @ref hs_open_stream().
+ */
+typedef struct hs_stream hs_stream_t;
+
+struct hs_scratch;
+
+/**
+ * A Hyperscan scratch space.
+ */
+typedef struct hs_scratch hs_scratch_t;
+
+/**
+ * Definition of the match event callback function type.
+ *
+ * A callback function matching the defined type must be provided by the
+ * application calling the @ref hs_scan(), @ref hs_scan_vector() or @ref
+ * hs_scan_stream() functions (or other streaming calls which can produce
+ * matches).
+ *
+ * This callback function will be invoked whenever a match is located in the
+ * target data during the execution of a scan. The details of the match are
+ * passed in as parameters to the callback function, and the callback function
+ * should return a value indicating whether or not matching should continue on
+ * the target data. If no callbacks are desired from a scan call, NULL may be
+ * provided in order to suppress match production.
+ *
+ * This callback function should not attempt to call Hyperscan API functions on
+ * the same stream nor should it attempt to reuse the scratch space allocated
+ * for the API calls that caused it to be triggered. Making another call to the
+ * Hyperscan library with completely independent parameters should work (for
+ * example, scanning a different database in a new stream and with new scratch
+ * space), but reusing data structures like stream state and/or scratch space
+ * will produce undefined behavior.
+ *
+ * @param id
+ * The ID number of the expression that matched. If the expression was a
+ * single expression compiled with @ref hs_compile(), this value will be
+ * zero.
+ *
+ * @param from
+ * - If a start of match flag is enabled for the current pattern, this
+ * argument will be set to the start of match for the pattern assuming
+ * that that start of match value lies within the current 'start of match
+ * horizon' chosen by one of the SOM_HORIZON mode flags.
+
+ * - If the start of match value lies outside this horizon (possible only
+ * when the SOM_HORIZON value is not @ref HS_MODE_SOM_HORIZON_LARGE),
+ * the @p from value will be set to @ref HS_OFFSET_PAST_HORIZON.
+
+ * - This argument will be set to zero if the Start of Match flag is not
+ * enabled for the given pattern.
+ *
+ * @param to
+ * The offset after the last byte that matches the expression.
+ *
+ * @param flags
+ * This is provided for future use and is unused at present.
+ *
+ * @param context
+ * The pointer supplied by the user to the @ref hs_scan(), @ref
+ * hs_scan_vector() or @ref hs_scan_stream() function.
+ *
+ * @return
+ * Non-zero if the matching should cease, else zero. If scanning is
+ * performed in streaming mode and a non-zero value is returned, any
+ * subsequent calls to @ref hs_scan_stream() for that stream will
+ * immediately return with @ref HS_SCAN_TERMINATED.
+ */
+typedef int (HS_CDECL *match_event_handler)(unsigned int id,
+ unsigned long long from,
+ unsigned long long to,
+ unsigned int flags,
+ void *context);
+
+/**
+ * Open and initialise a stream.
+ *
+ * @param db
+ * A compiled pattern database.
+ *
+ * @param flags
+ * Flags modifying the behaviour of the stream. This parameter is provided
+ * for future use and is unused at present.
+ *
+ * @param stream
+ * On success, a pointer to the generated @ref hs_stream_t will be
+ * returned; NULL on failure.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx2_hs_open_stream(const hs_database_t *db, unsigned int flags,
+ hs_stream_t **stream);
+
+/**
+ * Write data to be scanned to the opened stream.
+ *
+ * This is the function call in which the actual pattern matching takes place
+ * as data is written to the stream. Matches will be returned via the @ref
+ * match_event_handler callback supplied.
+ *
+ * @param id
+ * The stream ID (returned by @ref hs_open_stream()) to which the data
+ * will be written.
+ *
+ * @param data
+ * Pointer to the data to be scanned.
+ *
+ * @param length
+ * The number of bytes to scan.
+ *
+ * @param flags
+ * Flags modifying the behaviour of the stream. This parameter is provided
+ * for future use and is unused at present.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch().
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param ctxt
+ * The user defined pointer which will be passed to the callback function
+ * when a match occurs.
+ *
+ * @return
+ * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the
+ * match callback indicated that scanning should stop; other values on
+ * error.
+ */
+hs_error_t avx2_hs_scan_stream(hs_stream_t *id, const char *data,
+ unsigned int length, unsigned int flags,
+ hs_scratch_t *scratch,
+ match_event_handler onEvent, void *ctxt);
+
+/**
+ * Close a stream.
+ *
+ * This function completes matching on the given stream and frees the memory
+ * associated with the stream state. After this call, the stream pointed to by
+ * @p id is invalid and can no longer be used. To reuse the stream state after
+ * completion, rather than closing it, the @ref hs_reset_stream function can be
+ * used.
+ *
+ * This function must be called for any stream created with @ref
+ * hs_open_stream(), even if scanning has been terminated by a non-zero return
+ * from the match callback function.
+ *
+ * Note: This operation may result in matches being returned (via calls to the
+ * match event callback) for expressions anchored to the end of the data stream
+ * (for example, via the use of the `$` meta-character). If these matches are
+ * not desired, NULL may be provided as the @ref match_event_handler callback.
+ *
+ * If NULL is provided as the @ref match_event_handler callback, it is
+ * permissible to provide a NULL scratch.
+ *
+ * @param id
+ * The stream ID returned by @ref hs_open_stream().
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
+ * allowed to be NULL only if the @p onEvent callback is also NULL.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param ctxt
+ * The user defined pointer which will be passed to the callback function
+ * when a match occurs.
+ *
+ * @return
+ * Returns @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx2_hs_close_stream(hs_stream_t *id, hs_scratch_t *scratch,
+ match_event_handler onEvent, void *ctxt);
+
+/**
+ * Reset a stream to an initial state.
+ *
+ * Conceptually, this is equivalent to performing @ref hs_close_stream() on the
+ * given stream, followed by a @ref hs_open_stream(). This new stream replaces
+ * the original stream in memory, avoiding the overhead of freeing the old
+ * stream and allocating the new one.
+ *
+ * Note: This operation may result in matches being returned (via calls to the
+ * match event callback) for expressions anchored to the end of the original
+ * data stream (for example, via the use of the `$` meta-character). If these
+ * matches are not desired, NULL may be provided as the @ref match_event_handler
+ * callback.
+ *
+ * Note: the stream will also be tied to the same database.
+ *
+ * @param id
+ * The stream (as created by @ref hs_open_stream()) to be replaced.
+ *
+ * @param flags
+ * Flags modifying the behaviour of the stream. This parameter is provided
+ * for future use and is unused at present.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
+ * allowed to be NULL only if the @p onEvent callback is also NULL.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param context
+ * The user defined pointer which will be passed to the callback function
+ * when a match occurs.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx2_hs_reset_stream(hs_stream_t *id, unsigned int flags,
+ hs_scratch_t *scratch,
+ match_event_handler onEvent, void *context);
+
+/**
+ * Duplicate the given stream. The new stream will have the same state as the
+ * original including the current stream offset.
+ *
+ * @param to_id
+ * On success, a pointer to the new, copied @ref hs_stream_t will be
+ * returned; NULL on failure.
+ *
+ * @param from_id
+ * The stream (as created by @ref hs_open_stream()) to be copied.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx2_hs_copy_stream(hs_stream_t **to_id,
+ const hs_stream_t *from_id);
+
+/**
+ * Duplicate the given 'from' stream state onto the 'to' stream. The 'to' stream
+ * will first be reset (reporting any EOD matches if a non-NULL @p onEvent
+ * callback handler is provided).
+ *
+ * Note: the 'to' stream and the 'from' stream must be open against the same
+ * database.
+ *
+ * @param to_id
+ * On success, a pointer to the new, copied @ref hs_stream_t will be
+ * returned; NULL on failure.
+ *
+ * @param from_id
+ * The stream (as created by @ref hs_open_stream()) to be copied.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
+ * allowed to be NULL only if the @p onEvent callback is also NULL.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param context
+ * The user defined pointer which will be passed to the callback function
+ * when a match occurs.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx2_hs_reset_and_copy_stream(hs_stream_t *to_id,
+ const hs_stream_t *from_id,
+ hs_scratch_t *scratch,
+ match_event_handler onEvent,
+ void *context);
+
+/**
+ * Creates a compressed representation of the provided stream in the buffer
+ * provided. This compressed representation can be converted back into a stream
+ * state by using @ref hs_expand_stream() or @ref hs_reset_and_expand_stream().
+ * The size of the compressed representation will be placed into @p used_space.
+ *
+ * If there is not sufficient space in the buffer to hold the compressed
+ * representation, @ref HS_INSUFFICIENT_SPACE will be returned and @p used_space
+ * will be populated with the amount of space required.
+ *
+ * Note: this function does not close the provided stream, you may continue to
+ * use the stream or to free it with @ref hs_close_stream().
+ *
+ * @param stream
+ * The stream (as created by @ref hs_open_stream()) to be compressed.
+ *
+ * @param buf
+ * Buffer to write the compressed representation into. Note: if the call is
+ * just being used to determine the amount of space required, it is allowed
+ * to pass NULL here and @p buf_space as 0.
+ *
+ * @param buf_space
+ * The number of bytes in @p buf. If buf_space is too small, the call will
+ * fail with @ref HS_INSUFFICIENT_SPACE.
+ *
+ * @param used_space
+ * Pointer to where the amount of used space will be written to. The used
+ * buffer space is always less than or equal to @p buf_space. If the call
+ * fails with @ref HS_INSUFFICIENT_SPACE, this pointer will be used to
+ * write out the amount of buffer space required.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, @ref HS_INSUFFICIENT_SPACE if the provided
+ * buffer is too small.
+ */
+hs_error_t avx2_hs_compress_stream(const hs_stream_t *stream, char *buf,
+ size_t buf_space, size_t *used_space);
+
+/**
+ * Decompresses a compressed representation created by @ref hs_compress_stream()
+ * into a new stream.
+ *
+ * Note: @p buf must correspond to a complete compressed representation created
+ * by @ref hs_compress_stream() of a stream that was opened against @p db. It is
+ * not always possible to detect misuse of this API and behaviour is undefined
+ * if these properties are not satisfied.
+ *
+ * @param db
+ * The compiled pattern database that the compressed stream was opened
+ * against.
+ *
+ * @param stream
+ * On success, a pointer to the expanded @ref hs_stream_t will be
+ * returned; NULL on failure.
+ *
+ * @param buf
+ * A compressed representation of a stream. These compressed forms are
+ * created by @ref hs_compress_stream().
+ *
+ * @param buf_size
+ * The size in bytes of the compressed representation.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx2_hs_expand_stream(const hs_database_t *db,
+ hs_stream_t **stream, const char *buf,
+ size_t buf_size);
+
+/**
+ * Decompresses a compressed representation created by @ref hs_compress_stream()
+ * on top of the 'to' stream. The 'to' stream will first be reset (reporting
+ * any EOD matches if a non-NULL @p onEvent callback handler is provided).
+ *
+ * Note: the 'to' stream must be opened against the same database as the
+ * compressed stream.
+ *
+ * Note: @p buf must correspond to a complete compressed representation created
+ * by @ref hs_compress_stream() of a stream that was opened against @p db. It is
+ * not always possible to detect misuse of this API and behaviour is undefined
+ * if these properties are not satisfied.
+ *
+ * @param to_stream
+ * A pointer to a valid stream state. A pointer to the expanded @ref
+ * hs_stream_t will be returned; NULL on failure.
+ *
+ * @param buf
+ * A compressed representation of a stream. These compressed forms are
+ * created by @ref hs_compress_stream().
+ *
+ * @param buf_size
+ * The size in bytes of the compressed representation.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
+ * allowed to be NULL only if the @p onEvent callback is also NULL.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param context
+ * The user defined pointer which will be passed to the callback function
+ * when a match occurs.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx2_hs_reset_and_expand_stream(hs_stream_t *to_stream,
+ const char *buf, size_t buf_size,
+ hs_scratch_t *scratch,
+ match_event_handler onEvent,
+ void *context);
+
+/**
+ * The block (non-streaming) regular expression scanner.
+ *
+ * This is the function call in which the actual pattern matching takes place
+ * for block-mode pattern databases.
+ *
+ * @param db
+ * A compiled pattern database.
+ *
+ * @param data
+ * Pointer to the data to be scanned.
+ *
+ * @param length
+ * The number of bytes to scan.
+ *
+ * @param flags
+ * Flags modifying the behaviour of this function. This parameter is
+ * provided for future use and is unused at present.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch() for this
+ * database.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param context
+ * The user defined pointer which will be passed to the callback function.
+ *
+ * @return
+ * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the
+ * match callback indicated that scanning should stop; other values on
+ * error.
+ */
+hs_error_t avx2_hs_scan(const hs_database_t *db, const char *data,
+ unsigned int length, unsigned int flags,
+ hs_scratch_t *scratch, match_event_handler onEvent,
+ void *context);
+
+/**
+ * The vectored regular expression scanner.
+ *
+ * This is the function call in which the actual pattern matching takes place
+ * for vectoring-mode pattern databases.
+ *
+ * @param db
+ * A compiled pattern database.
+ *
+ * @param data
+ * An array of pointers to the data blocks to be scanned.
+ *
+ * @param length
+ * An array of lengths (in bytes) of each data block to scan.
+ *
+ * @param count
+ * Number of data blocks to scan. This should correspond to the size of
+ * of the @p data and @p length arrays.
+ *
+ * @param flags
+ * Flags modifying the behaviour of this function. This parameter is
+ * provided for future use and is unused at present.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch() for
+ * this database.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param context
+ * The user defined pointer which will be passed to the callback function.
+ *
+ * @return
+ * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the match
+ * callback indicated that scanning should stop; other values on error.
+ */
+hs_error_t avx2_hs_scan_vector(const hs_database_t *db,
+ const char *const *data,
+ const unsigned int *length,
+ unsigned int count, unsigned int flags,
+ hs_scratch_t *scratch,
+ match_event_handler onEvent, void *context);
+
+/**
+ * Allocate a "scratch" space for use by Hyperscan.
+ *
+ * This is required for runtime use, and one scratch space per thread, or
+ * concurrent caller, is required. Any allocator callback set by @ref
+ * hs_set_scratch_allocator() or @ref hs_set_allocator() will be used by this
+ * function.
+ *
+ * @param db
+ * The database, as produced by @ref hs_compile().
+ *
+ * @param scratch
+ * On first allocation, a pointer to NULL should be provided so a new
+ * scratch can be allocated. If a scratch block has been previously
+ * allocated, then a pointer to it should be passed back in to see if it
+ * is valid for this database block. If a new scratch block is required,
+ * the original will be freed and the new one returned, otherwise the
+ * previous scratch block will be returned. On success, the scratch block
+ * will be suitable for use with the provided database in addition to any
+ * databases that original scratch space was suitable for.
+ *
+ * @return
+ * @ref HS_SUCCESS on successful allocation; @ref HS_NOMEM if the
+ * allocation fails. Other errors may be returned if invalid parameters
+ * are specified.
+ */
+hs_error_t avx2_hs_alloc_scratch(const hs_database_t *db,
+ hs_scratch_t **scratch);
+
+/**
+ * Allocate a scratch space that is a clone of an existing scratch space.
+ *
+ * This is useful when multiple concurrent threads will be using the same set
+ * of compiled databases, and another scratch space is required. Any allocator
+ * callback set by @ref hs_set_scratch_allocator() or @ref hs_set_allocator()
+ * will be used by this function.
+ *
+ * @param src
+ * The existing @ref hs_scratch_t to be cloned.
+ *
+ * @param dest
+ * A pointer to the new scratch space will be returned here.
+ *
+ * @return
+ * @ref HS_SUCCESS on success; @ref HS_NOMEM if the allocation fails.
+ * Other errors may be returned if invalid parameters are specified.
+ */
+hs_error_t avx2_hs_clone_scratch(const hs_scratch_t *src,
+ hs_scratch_t **dest);
+
+/**
+ * Provides the size of the given scratch space.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch() or @ref
+ * hs_clone_scratch().
+ *
+ * @param scratch_size
+ * On success, the size of the scratch space in bytes is placed in this
+ * parameter.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx2_hs_scratch_size(const hs_scratch_t *scratch,
+ size_t *scratch_size);
+
+/**
+ * Free a scratch block previously allocated by @ref hs_alloc_scratch() or @ref
+ * hs_clone_scratch().
+ *
+ * The free callback set by @ref hs_set_scratch_allocator() or @ref
+ * hs_set_allocator() will be used by this function.
+ *
+ * @param scratch
+ * The scratch block to be freed. NULL may also be safely provided.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx2_hs_free_scratch(hs_scratch_t *scratch);
+
+/**
+ * Callback 'from' return value, indicating that the start of this match was
+ * too early to be tracked with the requested SOM_HORIZON precision.
+ */
+#define HS_OFFSET_PAST_HORIZON (~0ULL)
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* HS_AVX2_RUNTIME_H */
diff --git a/contrib/libs/hyperscan/runtime_avx2/ya.make b/contrib/libs/hyperscan/runtime_avx2/ya.make
index c50690c5e9..2fc6d7f163 100644
--- a/contrib/libs/hyperscan/runtime_avx2/ya.make
+++ b/contrib/libs/hyperscan/runtime_avx2/ya.make
@@ -1,498 +1,498 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
-OWNER(
- galtsev
- g:antiinfra
- g:cpp-contrib
- g:yql
-)
-
-LICENSE(BSD-3-Clause)
-
-LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-
-ADDINCL(
- contrib/libs/hyperscan
- contrib/libs/hyperscan/src
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_RUNTIME()
-
-CFLAGS(
- ${SSE41_CFLAGS}
- -DHAVE_SSE41
- ${SSE42_CFLAGS}
- -DHAVE_SSE42
- ${POPCNT_CFLAGS}
- -DHAVE_POPCOUNT_INSTR
- ${AVX_CFLAGS}
- -DHAVE_AVX
- ${AVX2_CFLAGS}
- -DHAVE_AVX2
- -DCrc32c_ComputeBuf=avx2_Crc32c_ComputeBuf
- -DblockInitSufPQ=avx2_blockInitSufPQ
- -Dcompress_stream=avx2_compress_stream
- -Dcpuid_flags=avx2_cpuid_flags
- -Dcpuid_tune=avx2_cpuid_tune
- -DdbIsValid=avx2_dbIsValid
- -DdoAccel128=avx2_doAccel128
- -DdoAccel256=avx2_doAccel256
- -DdoAccel32=avx2_doAccel32
- -DdoAccel384=avx2_doAccel384
- -DdoAccel512=avx2_doAccel512
- -DdoAccel64=avx2_doAccel64
- -Dexpand_stream=avx2_expand_stream
- -DfdrExec=avx2_fdrExec
- -DfdrExecStreaming=avx2_fdrExecStreaming
- -Dfdr_exec_fat_teddy_msks1=avx2_fdr_exec_fat_teddy_msks1
- -Dfdr_exec_fat_teddy_msks1_pck=avx2_fdr_exec_fat_teddy_msks1_pck
- -Dfdr_exec_fat_teddy_msks2=avx2_fdr_exec_fat_teddy_msks2
- -Dfdr_exec_fat_teddy_msks2_pck=avx2_fdr_exec_fat_teddy_msks2_pck
- -Dfdr_exec_fat_teddy_msks3=avx2_fdr_exec_fat_teddy_msks3
- -Dfdr_exec_fat_teddy_msks3_pck=avx2_fdr_exec_fat_teddy_msks3_pck
- -Dfdr_exec_fat_teddy_msks4=avx2_fdr_exec_fat_teddy_msks4
- -Dfdr_exec_fat_teddy_msks4_pck=avx2_fdr_exec_fat_teddy_msks4_pck
- -Dfdr_exec_teddy_msks1=avx2_fdr_exec_teddy_msks1
- -Dfdr_exec_teddy_msks1_pck=avx2_fdr_exec_teddy_msks1_pck
- -Dfdr_exec_teddy_msks2=avx2_fdr_exec_teddy_msks2
- -Dfdr_exec_teddy_msks2_pck=avx2_fdr_exec_teddy_msks2_pck
- -Dfdr_exec_teddy_msks3=avx2_fdr_exec_teddy_msks3
- -Dfdr_exec_teddy_msks3_pck=avx2_fdr_exec_teddy_msks3_pck
- -Dfdr_exec_teddy_msks4=avx2_fdr_exec_teddy_msks4
- -Dfdr_exec_teddy_msks4_pck=avx2_fdr_exec_teddy_msks4_pck
- -DflushQueuedLiterals_i=avx2_flushQueuedLiterals_i
- -DflushStoredSomMatches_i=avx2_flushStoredSomMatches_i
- -DhandleSomExternal=avx2_handleSomExternal
- -DhandleSomInternal=avx2_handleSomInternal
- -Dhs_alloc_scratch=avx2_hs_alloc_scratch
- -Dhs_clone_scratch=avx2_hs_clone_scratch
- -Dhs_close_stream=avx2_hs_close_stream
- -Dhs_compress_stream=avx2_hs_compress_stream
- -Dhs_copy_stream=avx2_hs_copy_stream
- -Dhs_database_alloc=avx2_hs_database_alloc
- -Dhs_database_free=avx2_hs_database_free
- -Dhs_database_info=avx2_hs_database_info
- -Dhs_database_size=avx2_hs_database_size
- -Dhs_deserialize_database=avx2_hs_deserialize_database
- -Dhs_deserialize_database_at=avx2_hs_deserialize_database_at
- -Dhs_expand_stream=avx2_hs_expand_stream
- -Dhs_free_database=avx2_hs_free_database
- -Dhs_free_scratch=avx2_hs_free_scratch
- -Dhs_misc_alloc=avx2_hs_misc_alloc
- -Dhs_misc_free=avx2_hs_misc_free
- -Dhs_open_stream=avx2_hs_open_stream
- -Dhs_reset_and_copy_stream=avx2_hs_reset_and_copy_stream
- -Dhs_reset_and_expand_stream=avx2_hs_reset_and_expand_stream
- -Dhs_reset_stream=avx2_hs_reset_stream
- -Dhs_scan=avx2_hs_scan
- -Dhs_scan_stream=avx2_hs_scan_stream
- -Dhs_scan_vector=avx2_hs_scan_vector
- -Dhs_scratch_alloc=avx2_hs_scratch_alloc
- -Dhs_scratch_free=avx2_hs_scratch_free
- -Dhs_scratch_size=avx2_hs_scratch_size
- -Dhs_serialize_database=avx2_hs_serialize_database
- -Dhs_serialized_database_info=avx2_hs_serialized_database_info
- -Dhs_serialized_database_size=avx2_hs_serialized_database_size
- -Dhs_set_allocator=avx2_hs_set_allocator
- -Dhs_set_database_allocator=avx2_hs_set_database_allocator
- -Dhs_set_misc_allocator=avx2_hs_set_misc_allocator
- -Dhs_set_scratch_allocator=avx2_hs_set_scratch_allocator
- -Dhs_set_stream_allocator=avx2_hs_set_stream_allocator
- -Dhs_stream_alloc=avx2_hs_stream_alloc
- -Dhs_stream_free=avx2_hs_stream_free
- -Dhs_stream_size=avx2_hs_stream_size
- -Dhs_valid_platform=avx2_hs_valid_platform
- -Dhs_version=avx2_hs_version
- -DhwlmExec=avx2_hwlmExec
- -DhwlmExecStreaming=avx2_hwlmExecStreaming
- -DloadSomFromStream=avx2_loadSomFromStream
- -Dloadcompressed128=avx2_loadcompressed128
- -Dloadcompressed256=avx2_loadcompressed256
- -Dloadcompressed32=avx2_loadcompressed32
- -Dloadcompressed384=avx2_loadcompressed384
- -Dloadcompressed512=avx2_loadcompressed512
- -Dloadcompressed64=avx2_loadcompressed64
- -Dmcsheng_pext_mask=avx2_mcsheng_pext_mask
- -Dmm_mask_mask=avx2_mm_mask_mask
- -Dmm_shuffle_end=avx2_mm_shuffle_end
- -Dmmbit_keyshift_lut=avx2_mmbit_keyshift_lut
- -Dmmbit_maxlevel_direct_lut=avx2_mmbit_maxlevel_direct_lut
- -Dmmbit_maxlevel_from_keyshift_lut=avx2_mmbit_maxlevel_from_keyshift_lut
- -Dmmbit_root_offset_from_level=avx2_mmbit_root_offset_from_level
- -Dmmbit_zero_to_lut=avx2_mmbit_zero_to_lut
- -DnfaBlockExecReverse=avx2_nfaBlockExecReverse
- -DnfaCheckFinalState=avx2_nfaCheckFinalState
- -DnfaExecCastle_Q=avx2_nfaExecCastle_Q
- -DnfaExecCastle_Q2=avx2_nfaExecCastle_Q2
- -DnfaExecCastle_QR=avx2_nfaExecCastle_QR
- -DnfaExecCastle_expandState=avx2_nfaExecCastle_expandState
- -DnfaExecCastle_inAccept=avx2_nfaExecCastle_inAccept
- -DnfaExecCastle_inAnyAccept=avx2_nfaExecCastle_inAnyAccept
- -DnfaExecCastle_initCompressedState=avx2_nfaExecCastle_initCompressedState
- -DnfaExecCastle_queueCompressState=avx2_nfaExecCastle_queueCompressState
- -DnfaExecCastle_queueInitState=avx2_nfaExecCastle_queueInitState
- -DnfaExecCastle_reportCurrent=avx2_nfaExecCastle_reportCurrent
- -DnfaExecGough16_Q=avx2_nfaExecGough16_Q
- -DnfaExecGough16_Q2=avx2_nfaExecGough16_Q2
- -DnfaExecGough16_QR=avx2_nfaExecGough16_QR
- -DnfaExecGough16_expandState=avx2_nfaExecGough16_expandState
- -DnfaExecGough16_inAccept=avx2_nfaExecGough16_inAccept
- -DnfaExecGough16_inAnyAccept=avx2_nfaExecGough16_inAnyAccept
- -DnfaExecGough16_initCompressedState=avx2_nfaExecGough16_initCompressedState
- -DnfaExecGough16_queueCompressState=avx2_nfaExecGough16_queueCompressState
- -DnfaExecGough16_queueInitState=avx2_nfaExecGough16_queueInitState
- -DnfaExecGough16_reportCurrent=avx2_nfaExecGough16_reportCurrent
- -DnfaExecGough16_testEOD=avx2_nfaExecGough16_testEOD
- -DnfaExecGough8_Q=avx2_nfaExecGough8_Q
- -DnfaExecGough8_Q2=avx2_nfaExecGough8_Q2
- -DnfaExecGough8_QR=avx2_nfaExecGough8_QR
- -DnfaExecGough8_expandState=avx2_nfaExecGough8_expandState
- -DnfaExecGough8_inAccept=avx2_nfaExecGough8_inAccept
- -DnfaExecGough8_inAnyAccept=avx2_nfaExecGough8_inAnyAccept
- -DnfaExecGough8_initCompressedState=avx2_nfaExecGough8_initCompressedState
- -DnfaExecGough8_queueCompressState=avx2_nfaExecGough8_queueCompressState
- -DnfaExecGough8_queueInitState=avx2_nfaExecGough8_queueInitState
- -DnfaExecGough8_reportCurrent=avx2_nfaExecGough8_reportCurrent
- -DnfaExecGough8_testEOD=avx2_nfaExecGough8_testEOD
- -DnfaExecLbrDot_Q=avx2_nfaExecLbrDot_Q
- -DnfaExecLbrDot_Q2=avx2_nfaExecLbrDot_Q2
- -DnfaExecLbrDot_QR=avx2_nfaExecLbrDot_QR
- -DnfaExecLbrDot_expandState=avx2_nfaExecLbrDot_expandState
- -DnfaExecLbrDot_inAccept=avx2_nfaExecLbrDot_inAccept
- -DnfaExecLbrDot_inAnyAccept=avx2_nfaExecLbrDot_inAnyAccept
- -DnfaExecLbrDot_initCompressedState=avx2_nfaExecLbrDot_initCompressedState
- -DnfaExecLbrDot_queueCompressState=avx2_nfaExecLbrDot_queueCompressState
- -DnfaExecLbrDot_queueInitState=avx2_nfaExecLbrDot_queueInitState
- -DnfaExecLbrDot_reportCurrent=avx2_nfaExecLbrDot_reportCurrent
- -DnfaExecLbrNVerm_Q=avx2_nfaExecLbrNVerm_Q
- -DnfaExecLbrNVerm_Q2=avx2_nfaExecLbrNVerm_Q2
- -DnfaExecLbrNVerm_QR=avx2_nfaExecLbrNVerm_QR
- -DnfaExecLbrNVerm_expandState=avx2_nfaExecLbrNVerm_expandState
- -DnfaExecLbrNVerm_inAccept=avx2_nfaExecLbrNVerm_inAccept
- -DnfaExecLbrNVerm_inAnyAccept=avx2_nfaExecLbrNVerm_inAnyAccept
- -DnfaExecLbrNVerm_initCompressedState=avx2_nfaExecLbrNVerm_initCompressedState
- -DnfaExecLbrNVerm_queueCompressState=avx2_nfaExecLbrNVerm_queueCompressState
- -DnfaExecLbrNVerm_queueInitState=avx2_nfaExecLbrNVerm_queueInitState
- -DnfaExecLbrNVerm_reportCurrent=avx2_nfaExecLbrNVerm_reportCurrent
- -DnfaExecLbrShuf_Q=avx2_nfaExecLbrShuf_Q
- -DnfaExecLbrShuf_Q2=avx2_nfaExecLbrShuf_Q2
- -DnfaExecLbrShuf_QR=avx2_nfaExecLbrShuf_QR
- -DnfaExecLbrShuf_expandState=avx2_nfaExecLbrShuf_expandState
- -DnfaExecLbrShuf_inAccept=avx2_nfaExecLbrShuf_inAccept
- -DnfaExecLbrShuf_inAnyAccept=avx2_nfaExecLbrShuf_inAnyAccept
- -DnfaExecLbrShuf_initCompressedState=avx2_nfaExecLbrShuf_initCompressedState
- -DnfaExecLbrShuf_queueCompressState=avx2_nfaExecLbrShuf_queueCompressState
- -DnfaExecLbrShuf_queueInitState=avx2_nfaExecLbrShuf_queueInitState
- -DnfaExecLbrShuf_reportCurrent=avx2_nfaExecLbrShuf_reportCurrent
- -DnfaExecLbrTruf_Q=avx2_nfaExecLbrTruf_Q
- -DnfaExecLbrTruf_Q2=avx2_nfaExecLbrTruf_Q2
- -DnfaExecLbrTruf_QR=avx2_nfaExecLbrTruf_QR
- -DnfaExecLbrTruf_expandState=avx2_nfaExecLbrTruf_expandState
- -DnfaExecLbrTruf_inAccept=avx2_nfaExecLbrTruf_inAccept
- -DnfaExecLbrTruf_inAnyAccept=avx2_nfaExecLbrTruf_inAnyAccept
- -DnfaExecLbrTruf_initCompressedState=avx2_nfaExecLbrTruf_initCompressedState
- -DnfaExecLbrTruf_queueCompressState=avx2_nfaExecLbrTruf_queueCompressState
- -DnfaExecLbrTruf_queueInitState=avx2_nfaExecLbrTruf_queueInitState
- -DnfaExecLbrTruf_reportCurrent=avx2_nfaExecLbrTruf_reportCurrent
- -DnfaExecLbrVerm_Q=avx2_nfaExecLbrVerm_Q
- -DnfaExecLbrVerm_Q2=avx2_nfaExecLbrVerm_Q2
- -DnfaExecLbrVerm_QR=avx2_nfaExecLbrVerm_QR
- -DnfaExecLbrVerm_expandState=avx2_nfaExecLbrVerm_expandState
- -DnfaExecLbrVerm_inAccept=avx2_nfaExecLbrVerm_inAccept
- -DnfaExecLbrVerm_inAnyAccept=avx2_nfaExecLbrVerm_inAnyAccept
- -DnfaExecLbrVerm_initCompressedState=avx2_nfaExecLbrVerm_initCompressedState
- -DnfaExecLbrVerm_queueCompressState=avx2_nfaExecLbrVerm_queueCompressState
- -DnfaExecLbrVerm_queueInitState=avx2_nfaExecLbrVerm_queueInitState
- -DnfaExecLbrVerm_reportCurrent=avx2_nfaExecLbrVerm_reportCurrent
- -DnfaExecLimEx128_B_Reverse=avx2_nfaExecLimEx128_B_Reverse
- -DnfaExecLimEx128_Q=avx2_nfaExecLimEx128_Q
- -DnfaExecLimEx128_Q2=avx2_nfaExecLimEx128_Q2
- -DnfaExecLimEx128_QR=avx2_nfaExecLimEx128_QR
- -DnfaExecLimEx128_expandState=avx2_nfaExecLimEx128_expandState
- -DnfaExecLimEx128_inAccept=avx2_nfaExecLimEx128_inAccept
- -DnfaExecLimEx128_inAnyAccept=avx2_nfaExecLimEx128_inAnyAccept
- -DnfaExecLimEx128_initCompressedState=avx2_nfaExecLimEx128_initCompressedState
- -DnfaExecLimEx128_queueCompressState=avx2_nfaExecLimEx128_queueCompressState
- -DnfaExecLimEx128_queueInitState=avx2_nfaExecLimEx128_queueInitState
- -DnfaExecLimEx128_reportCurrent=avx2_nfaExecLimEx128_reportCurrent
- -DnfaExecLimEx128_testEOD=avx2_nfaExecLimEx128_testEOD
- -DnfaExecLimEx128_zombie_status=avx2_nfaExecLimEx128_zombie_status
- -DnfaExecLimEx256_B_Reverse=avx2_nfaExecLimEx256_B_Reverse
- -DnfaExecLimEx256_Q=avx2_nfaExecLimEx256_Q
- -DnfaExecLimEx256_Q2=avx2_nfaExecLimEx256_Q2
- -DnfaExecLimEx256_QR=avx2_nfaExecLimEx256_QR
- -DnfaExecLimEx256_expandState=avx2_nfaExecLimEx256_expandState
- -DnfaExecLimEx256_inAccept=avx2_nfaExecLimEx256_inAccept
- -DnfaExecLimEx256_inAnyAccept=avx2_nfaExecLimEx256_inAnyAccept
- -DnfaExecLimEx256_initCompressedState=avx2_nfaExecLimEx256_initCompressedState
- -DnfaExecLimEx256_queueCompressState=avx2_nfaExecLimEx256_queueCompressState
- -DnfaExecLimEx256_queueInitState=avx2_nfaExecLimEx256_queueInitState
- -DnfaExecLimEx256_reportCurrent=avx2_nfaExecLimEx256_reportCurrent
- -DnfaExecLimEx256_testEOD=avx2_nfaExecLimEx256_testEOD
- -DnfaExecLimEx256_zombie_status=avx2_nfaExecLimEx256_zombie_status
- -DnfaExecLimEx32_B_Reverse=avx2_nfaExecLimEx32_B_Reverse
- -DnfaExecLimEx32_Q=avx2_nfaExecLimEx32_Q
- -DnfaExecLimEx32_Q2=avx2_nfaExecLimEx32_Q2
- -DnfaExecLimEx32_QR=avx2_nfaExecLimEx32_QR
- -DnfaExecLimEx32_expandState=avx2_nfaExecLimEx32_expandState
- -DnfaExecLimEx32_inAccept=avx2_nfaExecLimEx32_inAccept
- -DnfaExecLimEx32_inAnyAccept=avx2_nfaExecLimEx32_inAnyAccept
- -DnfaExecLimEx32_initCompressedState=avx2_nfaExecLimEx32_initCompressedState
- -DnfaExecLimEx32_queueCompressState=avx2_nfaExecLimEx32_queueCompressState
- -DnfaExecLimEx32_queueInitState=avx2_nfaExecLimEx32_queueInitState
- -DnfaExecLimEx32_reportCurrent=avx2_nfaExecLimEx32_reportCurrent
- -DnfaExecLimEx32_testEOD=avx2_nfaExecLimEx32_testEOD
- -DnfaExecLimEx32_zombie_status=avx2_nfaExecLimEx32_zombie_status
- -DnfaExecLimEx384_B_Reverse=avx2_nfaExecLimEx384_B_Reverse
- -DnfaExecLimEx384_Q=avx2_nfaExecLimEx384_Q
- -DnfaExecLimEx384_Q2=avx2_nfaExecLimEx384_Q2
- -DnfaExecLimEx384_QR=avx2_nfaExecLimEx384_QR
- -DnfaExecLimEx384_expandState=avx2_nfaExecLimEx384_expandState
- -DnfaExecLimEx384_inAccept=avx2_nfaExecLimEx384_inAccept
- -DnfaExecLimEx384_inAnyAccept=avx2_nfaExecLimEx384_inAnyAccept
- -DnfaExecLimEx384_initCompressedState=avx2_nfaExecLimEx384_initCompressedState
- -DnfaExecLimEx384_queueCompressState=avx2_nfaExecLimEx384_queueCompressState
- -DnfaExecLimEx384_queueInitState=avx2_nfaExecLimEx384_queueInitState
- -DnfaExecLimEx384_reportCurrent=avx2_nfaExecLimEx384_reportCurrent
- -DnfaExecLimEx384_testEOD=avx2_nfaExecLimEx384_testEOD
- -DnfaExecLimEx384_zombie_status=avx2_nfaExecLimEx384_zombie_status
- -DnfaExecLimEx512_B_Reverse=avx2_nfaExecLimEx512_B_Reverse
- -DnfaExecLimEx512_Q=avx2_nfaExecLimEx512_Q
- -DnfaExecLimEx512_Q2=avx2_nfaExecLimEx512_Q2
- -DnfaExecLimEx512_QR=avx2_nfaExecLimEx512_QR
- -DnfaExecLimEx512_expandState=avx2_nfaExecLimEx512_expandState
- -DnfaExecLimEx512_inAccept=avx2_nfaExecLimEx512_inAccept
- -DnfaExecLimEx512_inAnyAccept=avx2_nfaExecLimEx512_inAnyAccept
- -DnfaExecLimEx512_initCompressedState=avx2_nfaExecLimEx512_initCompressedState
- -DnfaExecLimEx512_queueCompressState=avx2_nfaExecLimEx512_queueCompressState
- -DnfaExecLimEx512_queueInitState=avx2_nfaExecLimEx512_queueInitState
- -DnfaExecLimEx512_reportCurrent=avx2_nfaExecLimEx512_reportCurrent
- -DnfaExecLimEx512_testEOD=avx2_nfaExecLimEx512_testEOD
- -DnfaExecLimEx512_zombie_status=avx2_nfaExecLimEx512_zombie_status
- -DnfaExecLimEx64_B_Reverse=avx2_nfaExecLimEx64_B_Reverse
- -DnfaExecLimEx64_Q=avx2_nfaExecLimEx64_Q
- -DnfaExecLimEx64_Q2=avx2_nfaExecLimEx64_Q2
- -DnfaExecLimEx64_QR=avx2_nfaExecLimEx64_QR
- -DnfaExecLimEx64_expandState=avx2_nfaExecLimEx64_expandState
- -DnfaExecLimEx64_inAccept=avx2_nfaExecLimEx64_inAccept
- -DnfaExecLimEx64_inAnyAccept=avx2_nfaExecLimEx64_inAnyAccept
- -DnfaExecLimEx64_initCompressedState=avx2_nfaExecLimEx64_initCompressedState
- -DnfaExecLimEx64_queueCompressState=avx2_nfaExecLimEx64_queueCompressState
- -DnfaExecLimEx64_queueInitState=avx2_nfaExecLimEx64_queueInitState
- -DnfaExecLimEx64_reportCurrent=avx2_nfaExecLimEx64_reportCurrent
- -DnfaExecLimEx64_testEOD=avx2_nfaExecLimEx64_testEOD
- -DnfaExecLimEx64_zombie_status=avx2_nfaExecLimEx64_zombie_status
- -DnfaExecMcClellan16_B=avx2_nfaExecMcClellan16_B
- -DnfaExecMcClellan16_Q=avx2_nfaExecMcClellan16_Q
- -DnfaExecMcClellan16_Q2=avx2_nfaExecMcClellan16_Q2
- -DnfaExecMcClellan16_QR=avx2_nfaExecMcClellan16_QR
- -DnfaExecMcClellan16_SimpStream=avx2_nfaExecMcClellan16_SimpStream
- -DnfaExecMcClellan16_expandState=avx2_nfaExecMcClellan16_expandState
- -DnfaExecMcClellan16_inAccept=avx2_nfaExecMcClellan16_inAccept
- -DnfaExecMcClellan16_inAnyAccept=avx2_nfaExecMcClellan16_inAnyAccept
- -DnfaExecMcClellan16_initCompressedState=avx2_nfaExecMcClellan16_initCompressedState
- -DnfaExecMcClellan16_queueCompressState=avx2_nfaExecMcClellan16_queueCompressState
- -DnfaExecMcClellan16_queueInitState=avx2_nfaExecMcClellan16_queueInitState
- -DnfaExecMcClellan16_reportCurrent=avx2_nfaExecMcClellan16_reportCurrent
- -DnfaExecMcClellan16_testEOD=avx2_nfaExecMcClellan16_testEOD
- -DnfaExecMcClellan8_B=avx2_nfaExecMcClellan8_B
- -DnfaExecMcClellan8_Q=avx2_nfaExecMcClellan8_Q
- -DnfaExecMcClellan8_Q2=avx2_nfaExecMcClellan8_Q2
- -DnfaExecMcClellan8_QR=avx2_nfaExecMcClellan8_QR
- -DnfaExecMcClellan8_SimpStream=avx2_nfaExecMcClellan8_SimpStream
- -DnfaExecMcClellan8_expandState=avx2_nfaExecMcClellan8_expandState
- -DnfaExecMcClellan8_inAccept=avx2_nfaExecMcClellan8_inAccept
- -DnfaExecMcClellan8_inAnyAccept=avx2_nfaExecMcClellan8_inAnyAccept
- -DnfaExecMcClellan8_initCompressedState=avx2_nfaExecMcClellan8_initCompressedState
- -DnfaExecMcClellan8_queueCompressState=avx2_nfaExecMcClellan8_queueCompressState
- -DnfaExecMcClellan8_queueInitState=avx2_nfaExecMcClellan8_queueInitState
- -DnfaExecMcClellan8_reportCurrent=avx2_nfaExecMcClellan8_reportCurrent
- -DnfaExecMcClellan8_testEOD=avx2_nfaExecMcClellan8_testEOD
- -DnfaExecMcSheng16_Q=avx2_nfaExecMcSheng16_Q
- -DnfaExecMcSheng16_Q2=avx2_nfaExecMcSheng16_Q2
- -DnfaExecMcSheng16_QR=avx2_nfaExecMcSheng16_QR
- -DnfaExecMcSheng16_expandState=avx2_nfaExecMcSheng16_expandState
- -DnfaExecMcSheng16_inAccept=avx2_nfaExecMcSheng16_inAccept
- -DnfaExecMcSheng16_inAnyAccept=avx2_nfaExecMcSheng16_inAnyAccept
- -DnfaExecMcSheng16_initCompressedState=avx2_nfaExecMcSheng16_initCompressedState
- -DnfaExecMcSheng16_queueCompressState=avx2_nfaExecMcSheng16_queueCompressState
- -DnfaExecMcSheng16_queueInitState=avx2_nfaExecMcSheng16_queueInitState
- -DnfaExecMcSheng16_reportCurrent=avx2_nfaExecMcSheng16_reportCurrent
- -DnfaExecMcSheng16_testEOD=avx2_nfaExecMcSheng16_testEOD
- -DnfaExecMcSheng8_Q=avx2_nfaExecMcSheng8_Q
- -DnfaExecMcSheng8_Q2=avx2_nfaExecMcSheng8_Q2
- -DnfaExecMcSheng8_QR=avx2_nfaExecMcSheng8_QR
- -DnfaExecMcSheng8_expandState=avx2_nfaExecMcSheng8_expandState
- -DnfaExecMcSheng8_inAccept=avx2_nfaExecMcSheng8_inAccept
- -DnfaExecMcSheng8_inAnyAccept=avx2_nfaExecMcSheng8_inAnyAccept
- -DnfaExecMcSheng8_initCompressedState=avx2_nfaExecMcSheng8_initCompressedState
- -DnfaExecMcSheng8_queueCompressState=avx2_nfaExecMcSheng8_queueCompressState
- -DnfaExecMcSheng8_queueInitState=avx2_nfaExecMcSheng8_queueInitState
- -DnfaExecMcSheng8_reportCurrent=avx2_nfaExecMcSheng8_reportCurrent
- -DnfaExecMcSheng8_testEOD=avx2_nfaExecMcSheng8_testEOD
- -DnfaExecMpv_Q=avx2_nfaExecMpv_Q
- -DnfaExecMpv_QueueExecRaw=avx2_nfaExecMpv_QueueExecRaw
- -DnfaExecMpv_expandState=avx2_nfaExecMpv_expandState
- -DnfaExecMpv_initCompressedState=avx2_nfaExecMpv_initCompressedState
- -DnfaExecMpv_queueCompressState=avx2_nfaExecMpv_queueCompressState
- -DnfaExecMpv_queueInitState=avx2_nfaExecMpv_queueInitState
- -DnfaExecMpv_reportCurrent=avx2_nfaExecMpv_reportCurrent
- -DnfaExecSheng_B=avx2_nfaExecSheng_B
- -DnfaExecSheng_Q=avx2_nfaExecSheng_Q
- -DnfaExecSheng_Q2=avx2_nfaExecSheng_Q2
- -DnfaExecSheng_QR=avx2_nfaExecSheng_QR
- -DnfaExecSheng_expandState=avx2_nfaExecSheng_expandState
- -DnfaExecSheng_inAccept=avx2_nfaExecSheng_inAccept
- -DnfaExecSheng_inAnyAccept=avx2_nfaExecSheng_inAnyAccept
- -DnfaExecSheng_initCompressedState=avx2_nfaExecSheng_initCompressedState
- -DnfaExecSheng_queueCompressState=avx2_nfaExecSheng_queueCompressState
- -DnfaExecSheng_queueInitState=avx2_nfaExecSheng_queueInitState
- -DnfaExecSheng_reportCurrent=avx2_nfaExecSheng_reportCurrent
- -DnfaExecSheng_testEOD=avx2_nfaExecSheng_testEOD
- -DnfaExecTamarama_Q=avx2_nfaExecTamarama_Q
- -DnfaExecTamarama_Q2=avx2_nfaExecTamarama_Q2
- -DnfaExecTamarama_QR=avx2_nfaExecTamarama_QR
- -DnfaExecTamarama_expandState=avx2_nfaExecTamarama_expandState
- -DnfaExecTamarama_inAccept=avx2_nfaExecTamarama_inAccept
- -DnfaExecTamarama_inAnyAccept=avx2_nfaExecTamarama_inAnyAccept
- -DnfaExecTamarama_queueCompressState=avx2_nfaExecTamarama_queueCompressState
- -DnfaExecTamarama_queueInitState=avx2_nfaExecTamarama_queueInitState
- -DnfaExecTamarama_reportCurrent=avx2_nfaExecTamarama_reportCurrent
- -DnfaExecTamarama_testEOD=avx2_nfaExecTamarama_testEOD
- -DnfaExecTamarama_zombie_status=avx2_nfaExecTamarama_zombie_status
- -DnfaExpandState=avx2_nfaExpandState
- -DnfaGetZombieStatus=avx2_nfaGetZombieStatus
- -DnfaInAcceptState=avx2_nfaInAcceptState
- -DnfaInAnyAcceptState=avx2_nfaInAnyAcceptState
- -DnfaInitCompressedState=avx2_nfaInitCompressedState
- -DnfaQueueCompressState=avx2_nfaQueueCompressState
- -DnfaQueueExec=avx2_nfaQueueExec
- -DnfaQueueExec2_raw=avx2_nfaQueueExec2_raw
- -DnfaQueueExecRose=avx2_nfaQueueExecRose
- -DnfaQueueExecToMatch=avx2_nfaQueueExecToMatch
- -DnfaQueueExec_raw=avx2_nfaQueueExec_raw
- -DnfaQueueInitState=avx2_nfaQueueInitState
- -DnfaReportCurrentMatches=avx2_nfaReportCurrentMatches
- -DnoodExec=avx2_noodExec
- -DnoodExecStreaming=avx2_noodExecStreaming
- -Dp_mask_arr=avx2_p_mask_arr
- -Dp_mask_arr256=avx2_p_mask_arr256
- -DrepeatHasMatchBitmap=avx2_repeatHasMatchBitmap
- -DrepeatHasMatchRange=avx2_repeatHasMatchRange
- -DrepeatHasMatchRing=avx2_repeatHasMatchRing
- -DrepeatHasMatchSparseOptimalP=avx2_repeatHasMatchSparseOptimalP
- -DrepeatHasMatchTrailer=avx2_repeatHasMatchTrailer
- -DrepeatLastTopBitmap=avx2_repeatLastTopBitmap
- -DrepeatLastTopRange=avx2_repeatLastTopRange
- -DrepeatLastTopRing=avx2_repeatLastTopRing
- -DrepeatLastTopSparseOptimalP=avx2_repeatLastTopSparseOptimalP
- -DrepeatLastTopTrailer=avx2_repeatLastTopTrailer
- -DrepeatNextMatchBitmap=avx2_repeatNextMatchBitmap
- -DrepeatNextMatchRange=avx2_repeatNextMatchRange
- -DrepeatNextMatchRing=avx2_repeatNextMatchRing
- -DrepeatNextMatchSparseOptimalP=avx2_repeatNextMatchSparseOptimalP
- -DrepeatNextMatchTrailer=avx2_repeatNextMatchTrailer
- -DrepeatPack=avx2_repeatPack
- -DrepeatStoreBitmap=avx2_repeatStoreBitmap
- -DrepeatStoreRange=avx2_repeatStoreRange
- -DrepeatStoreRing=avx2_repeatStoreRing
- -DrepeatStoreSparseOptimalP=avx2_repeatStoreSparseOptimalP
- -DrepeatStoreTrailer=avx2_repeatStoreTrailer
- -DrepeatUnpack=avx2_repeatUnpack
- -DroseAnchoredCallback=avx2_roseAnchoredCallback
- -DroseBlockExec=avx2_roseBlockExec
- -DroseCallback=avx2_roseCallback
- -DroseCatchUpAll=avx2_roseCatchUpAll
- -DroseCatchUpMPV_i=avx2_roseCatchUpMPV_i
- -DroseCatchUpSuf=avx2_roseCatchUpSuf
- -DroseDelayRebuildCallback=avx2_roseDelayRebuildCallback
- -DroseFloatingCallback=avx2_roseFloatingCallback
- -DroseHandleChainMatch=avx2_roseHandleChainMatch
- -DroseInitState=avx2_roseInitState
- -DroseNfaAdaptor=avx2_roseNfaAdaptor
- -DroseNfaEarliestSom=avx2_roseNfaEarliestSom
- -DroseReportAdaptor=avx2_roseReportAdaptor
- -DroseRunBoundaryProgram=avx2_roseRunBoundaryProgram
- -DroseRunFlushCombProgram=avx2_roseRunFlushCombProgram
- -DroseRunLastFlushCombProgram=avx2_roseRunLastFlushCombProgram
- -DroseRunProgram=avx2_roseRunProgram
- -DroseRunProgram_l=avx2_roseRunProgram_l
- -DroseStreamEodExec=avx2_roseStreamEodExec
- -DroseStreamExec=avx2_roseStreamExec
- -DrshuftiExec=avx2_rshuftiExec
- -DrtruffleExec=avx2_rtruffleExec
- -Drun_accel=avx2_run_accel
- -DsetSomFromSomAware=avx2_setSomFromSomAware
- -DshuftiDoubleExec=avx2_shuftiDoubleExec
- -DshuftiExec=avx2_shuftiExec
- -Dsimd_onebit_masks=avx2_simd_onebit_masks
- -Dsize_compress_stream=avx2_size_compress_stream
- -DstoreSomToStream=avx2_storeSomToStream
- -Dstorecompressed128=avx2_storecompressed128
- -Dstorecompressed256=avx2_storecompressed256
- -Dstorecompressed32=avx2_storecompressed32
- -Dstorecompressed384=avx2_storecompressed384
- -Dstorecompressed512=avx2_storecompressed512
- -Dstorecompressed64=avx2_storecompressed64
- -DstreamInitSufPQ=avx2_streamInitSufPQ
- -DtruffleExec=avx2_truffleExec
- -Dvbs_mask_data=avx2_vbs_mask_data
-)
-
-SRCDIR(contrib/libs/hyperscan)
-
-SRCS(
- src/alloc.c
- src/crc32.c
- src/database.c
- src/fdr/fdr.c
- src/fdr/teddy.c
- src/fdr/teddy_avx2.c
- src/hs_valid_platform.c
- src/hs_version.c
- src/hwlm/hwlm.c
- src/hwlm/noodle_engine.c
- src/nfa/accel.c
- src/nfa/castle.c
- src/nfa/gough.c
- src/nfa/lbr.c
- src/nfa/limex_64.c
- src/nfa/limex_accel.c
- src/nfa/limex_native.c
- src/nfa/limex_simd128.c
- src/nfa/limex_simd256.c
- src/nfa/limex_simd384.c
- src/nfa/limex_simd512.c
- src/nfa/mcclellan.c
- src/nfa/mcsheng.c
- src/nfa/mcsheng_data.c
- src/nfa/mpv.c
- src/nfa/nfa_api_dispatch.c
- src/nfa/repeat.c
- src/nfa/sheng.c
- src/nfa/shufti.c
- src/nfa/tamarama.c
- src/nfa/truffle.c
- src/rose/block.c
- src/rose/catchup.c
- src/rose/init.c
- src/rose/match.c
- src/rose/program_runtime.c
- src/rose/stream.c
- src/runtime.c
- src/scratch.c
- src/som/som_runtime.c
- src/som/som_stream.c
- src/stream_compress.c
- src/util/cpuid_flags.c
- src/util/masked_move.c
- src/util/multibit.c
- src/util/simd_utils.c
- src/util/state_compress.c
-)
-
-END()
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ galtsev
+ g:antiinfra
+ g:cpp-contrib
+ g:yql
+)
+
+LICENSE(BSD-3-Clause)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+ADDINCL(
+ contrib/libs/hyperscan
+ contrib/libs/hyperscan/src
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_RUNTIME()
+
+CFLAGS(
+ ${SSE41_CFLAGS}
+ -DHAVE_SSE41
+ ${SSE42_CFLAGS}
+ -DHAVE_SSE42
+ ${POPCNT_CFLAGS}
+ -DHAVE_POPCOUNT_INSTR
+ ${AVX_CFLAGS}
+ -DHAVE_AVX
+ ${AVX2_CFLAGS}
+ -DHAVE_AVX2
+ -DCrc32c_ComputeBuf=avx2_Crc32c_ComputeBuf
+ -DblockInitSufPQ=avx2_blockInitSufPQ
+ -Dcompress_stream=avx2_compress_stream
+ -Dcpuid_flags=avx2_cpuid_flags
+ -Dcpuid_tune=avx2_cpuid_tune
+ -DdbIsValid=avx2_dbIsValid
+ -DdoAccel128=avx2_doAccel128
+ -DdoAccel256=avx2_doAccel256
+ -DdoAccel32=avx2_doAccel32
+ -DdoAccel384=avx2_doAccel384
+ -DdoAccel512=avx2_doAccel512
+ -DdoAccel64=avx2_doAccel64
+ -Dexpand_stream=avx2_expand_stream
+ -DfdrExec=avx2_fdrExec
+ -DfdrExecStreaming=avx2_fdrExecStreaming
+ -Dfdr_exec_fat_teddy_msks1=avx2_fdr_exec_fat_teddy_msks1
+ -Dfdr_exec_fat_teddy_msks1_pck=avx2_fdr_exec_fat_teddy_msks1_pck
+ -Dfdr_exec_fat_teddy_msks2=avx2_fdr_exec_fat_teddy_msks2
+ -Dfdr_exec_fat_teddy_msks2_pck=avx2_fdr_exec_fat_teddy_msks2_pck
+ -Dfdr_exec_fat_teddy_msks3=avx2_fdr_exec_fat_teddy_msks3
+ -Dfdr_exec_fat_teddy_msks3_pck=avx2_fdr_exec_fat_teddy_msks3_pck
+ -Dfdr_exec_fat_teddy_msks4=avx2_fdr_exec_fat_teddy_msks4
+ -Dfdr_exec_fat_teddy_msks4_pck=avx2_fdr_exec_fat_teddy_msks4_pck
+ -Dfdr_exec_teddy_msks1=avx2_fdr_exec_teddy_msks1
+ -Dfdr_exec_teddy_msks1_pck=avx2_fdr_exec_teddy_msks1_pck
+ -Dfdr_exec_teddy_msks2=avx2_fdr_exec_teddy_msks2
+ -Dfdr_exec_teddy_msks2_pck=avx2_fdr_exec_teddy_msks2_pck
+ -Dfdr_exec_teddy_msks3=avx2_fdr_exec_teddy_msks3
+ -Dfdr_exec_teddy_msks3_pck=avx2_fdr_exec_teddy_msks3_pck
+ -Dfdr_exec_teddy_msks4=avx2_fdr_exec_teddy_msks4
+ -Dfdr_exec_teddy_msks4_pck=avx2_fdr_exec_teddy_msks4_pck
+ -DflushQueuedLiterals_i=avx2_flushQueuedLiterals_i
+ -DflushStoredSomMatches_i=avx2_flushStoredSomMatches_i
+ -DhandleSomExternal=avx2_handleSomExternal
+ -DhandleSomInternal=avx2_handleSomInternal
+ -Dhs_alloc_scratch=avx2_hs_alloc_scratch
+ -Dhs_clone_scratch=avx2_hs_clone_scratch
+ -Dhs_close_stream=avx2_hs_close_stream
+ -Dhs_compress_stream=avx2_hs_compress_stream
+ -Dhs_copy_stream=avx2_hs_copy_stream
+ -Dhs_database_alloc=avx2_hs_database_alloc
+ -Dhs_database_free=avx2_hs_database_free
+ -Dhs_database_info=avx2_hs_database_info
+ -Dhs_database_size=avx2_hs_database_size
+ -Dhs_deserialize_database=avx2_hs_deserialize_database
+ -Dhs_deserialize_database_at=avx2_hs_deserialize_database_at
+ -Dhs_expand_stream=avx2_hs_expand_stream
+ -Dhs_free_database=avx2_hs_free_database
+ -Dhs_free_scratch=avx2_hs_free_scratch
+ -Dhs_misc_alloc=avx2_hs_misc_alloc
+ -Dhs_misc_free=avx2_hs_misc_free
+ -Dhs_open_stream=avx2_hs_open_stream
+ -Dhs_reset_and_copy_stream=avx2_hs_reset_and_copy_stream
+ -Dhs_reset_and_expand_stream=avx2_hs_reset_and_expand_stream
+ -Dhs_reset_stream=avx2_hs_reset_stream
+ -Dhs_scan=avx2_hs_scan
+ -Dhs_scan_stream=avx2_hs_scan_stream
+ -Dhs_scan_vector=avx2_hs_scan_vector
+ -Dhs_scratch_alloc=avx2_hs_scratch_alloc
+ -Dhs_scratch_free=avx2_hs_scratch_free
+ -Dhs_scratch_size=avx2_hs_scratch_size
+ -Dhs_serialize_database=avx2_hs_serialize_database
+ -Dhs_serialized_database_info=avx2_hs_serialized_database_info
+ -Dhs_serialized_database_size=avx2_hs_serialized_database_size
+ -Dhs_set_allocator=avx2_hs_set_allocator
+ -Dhs_set_database_allocator=avx2_hs_set_database_allocator
+ -Dhs_set_misc_allocator=avx2_hs_set_misc_allocator
+ -Dhs_set_scratch_allocator=avx2_hs_set_scratch_allocator
+ -Dhs_set_stream_allocator=avx2_hs_set_stream_allocator
+ -Dhs_stream_alloc=avx2_hs_stream_alloc
+ -Dhs_stream_free=avx2_hs_stream_free
+ -Dhs_stream_size=avx2_hs_stream_size
+ -Dhs_valid_platform=avx2_hs_valid_platform
+ -Dhs_version=avx2_hs_version
+ -DhwlmExec=avx2_hwlmExec
+ -DhwlmExecStreaming=avx2_hwlmExecStreaming
+ -DloadSomFromStream=avx2_loadSomFromStream
+ -Dloadcompressed128=avx2_loadcompressed128
+ -Dloadcompressed256=avx2_loadcompressed256
+ -Dloadcompressed32=avx2_loadcompressed32
+ -Dloadcompressed384=avx2_loadcompressed384
+ -Dloadcompressed512=avx2_loadcompressed512
+ -Dloadcompressed64=avx2_loadcompressed64
+ -Dmcsheng_pext_mask=avx2_mcsheng_pext_mask
+ -Dmm_mask_mask=avx2_mm_mask_mask
+ -Dmm_shuffle_end=avx2_mm_shuffle_end
+ -Dmmbit_keyshift_lut=avx2_mmbit_keyshift_lut
+ -Dmmbit_maxlevel_direct_lut=avx2_mmbit_maxlevel_direct_lut
+ -Dmmbit_maxlevel_from_keyshift_lut=avx2_mmbit_maxlevel_from_keyshift_lut
+ -Dmmbit_root_offset_from_level=avx2_mmbit_root_offset_from_level
+ -Dmmbit_zero_to_lut=avx2_mmbit_zero_to_lut
+ -DnfaBlockExecReverse=avx2_nfaBlockExecReverse
+ -DnfaCheckFinalState=avx2_nfaCheckFinalState
+ -DnfaExecCastle_Q=avx2_nfaExecCastle_Q
+ -DnfaExecCastle_Q2=avx2_nfaExecCastle_Q2
+ -DnfaExecCastle_QR=avx2_nfaExecCastle_QR
+ -DnfaExecCastle_expandState=avx2_nfaExecCastle_expandState
+ -DnfaExecCastle_inAccept=avx2_nfaExecCastle_inAccept
+ -DnfaExecCastle_inAnyAccept=avx2_nfaExecCastle_inAnyAccept
+ -DnfaExecCastle_initCompressedState=avx2_nfaExecCastle_initCompressedState
+ -DnfaExecCastle_queueCompressState=avx2_nfaExecCastle_queueCompressState
+ -DnfaExecCastle_queueInitState=avx2_nfaExecCastle_queueInitState
+ -DnfaExecCastle_reportCurrent=avx2_nfaExecCastle_reportCurrent
+ -DnfaExecGough16_Q=avx2_nfaExecGough16_Q
+ -DnfaExecGough16_Q2=avx2_nfaExecGough16_Q2
+ -DnfaExecGough16_QR=avx2_nfaExecGough16_QR
+ -DnfaExecGough16_expandState=avx2_nfaExecGough16_expandState
+ -DnfaExecGough16_inAccept=avx2_nfaExecGough16_inAccept
+ -DnfaExecGough16_inAnyAccept=avx2_nfaExecGough16_inAnyAccept
+ -DnfaExecGough16_initCompressedState=avx2_nfaExecGough16_initCompressedState
+ -DnfaExecGough16_queueCompressState=avx2_nfaExecGough16_queueCompressState
+ -DnfaExecGough16_queueInitState=avx2_nfaExecGough16_queueInitState
+ -DnfaExecGough16_reportCurrent=avx2_nfaExecGough16_reportCurrent
+ -DnfaExecGough16_testEOD=avx2_nfaExecGough16_testEOD
+ -DnfaExecGough8_Q=avx2_nfaExecGough8_Q
+ -DnfaExecGough8_Q2=avx2_nfaExecGough8_Q2
+ -DnfaExecGough8_QR=avx2_nfaExecGough8_QR
+ -DnfaExecGough8_expandState=avx2_nfaExecGough8_expandState
+ -DnfaExecGough8_inAccept=avx2_nfaExecGough8_inAccept
+ -DnfaExecGough8_inAnyAccept=avx2_nfaExecGough8_inAnyAccept
+ -DnfaExecGough8_initCompressedState=avx2_nfaExecGough8_initCompressedState
+ -DnfaExecGough8_queueCompressState=avx2_nfaExecGough8_queueCompressState
+ -DnfaExecGough8_queueInitState=avx2_nfaExecGough8_queueInitState
+ -DnfaExecGough8_reportCurrent=avx2_nfaExecGough8_reportCurrent
+ -DnfaExecGough8_testEOD=avx2_nfaExecGough8_testEOD
+ -DnfaExecLbrDot_Q=avx2_nfaExecLbrDot_Q
+ -DnfaExecLbrDot_Q2=avx2_nfaExecLbrDot_Q2
+ -DnfaExecLbrDot_QR=avx2_nfaExecLbrDot_QR
+ -DnfaExecLbrDot_expandState=avx2_nfaExecLbrDot_expandState
+ -DnfaExecLbrDot_inAccept=avx2_nfaExecLbrDot_inAccept
+ -DnfaExecLbrDot_inAnyAccept=avx2_nfaExecLbrDot_inAnyAccept
+ -DnfaExecLbrDot_initCompressedState=avx2_nfaExecLbrDot_initCompressedState
+ -DnfaExecLbrDot_queueCompressState=avx2_nfaExecLbrDot_queueCompressState
+ -DnfaExecLbrDot_queueInitState=avx2_nfaExecLbrDot_queueInitState
+ -DnfaExecLbrDot_reportCurrent=avx2_nfaExecLbrDot_reportCurrent
+ -DnfaExecLbrNVerm_Q=avx2_nfaExecLbrNVerm_Q
+ -DnfaExecLbrNVerm_Q2=avx2_nfaExecLbrNVerm_Q2
+ -DnfaExecLbrNVerm_QR=avx2_nfaExecLbrNVerm_QR
+ -DnfaExecLbrNVerm_expandState=avx2_nfaExecLbrNVerm_expandState
+ -DnfaExecLbrNVerm_inAccept=avx2_nfaExecLbrNVerm_inAccept
+ -DnfaExecLbrNVerm_inAnyAccept=avx2_nfaExecLbrNVerm_inAnyAccept
+ -DnfaExecLbrNVerm_initCompressedState=avx2_nfaExecLbrNVerm_initCompressedState
+ -DnfaExecLbrNVerm_queueCompressState=avx2_nfaExecLbrNVerm_queueCompressState
+ -DnfaExecLbrNVerm_queueInitState=avx2_nfaExecLbrNVerm_queueInitState
+ -DnfaExecLbrNVerm_reportCurrent=avx2_nfaExecLbrNVerm_reportCurrent
+ -DnfaExecLbrShuf_Q=avx2_nfaExecLbrShuf_Q
+ -DnfaExecLbrShuf_Q2=avx2_nfaExecLbrShuf_Q2
+ -DnfaExecLbrShuf_QR=avx2_nfaExecLbrShuf_QR
+ -DnfaExecLbrShuf_expandState=avx2_nfaExecLbrShuf_expandState
+ -DnfaExecLbrShuf_inAccept=avx2_nfaExecLbrShuf_inAccept
+ -DnfaExecLbrShuf_inAnyAccept=avx2_nfaExecLbrShuf_inAnyAccept
+ -DnfaExecLbrShuf_initCompressedState=avx2_nfaExecLbrShuf_initCompressedState
+ -DnfaExecLbrShuf_queueCompressState=avx2_nfaExecLbrShuf_queueCompressState
+ -DnfaExecLbrShuf_queueInitState=avx2_nfaExecLbrShuf_queueInitState
+ -DnfaExecLbrShuf_reportCurrent=avx2_nfaExecLbrShuf_reportCurrent
+ -DnfaExecLbrTruf_Q=avx2_nfaExecLbrTruf_Q
+ -DnfaExecLbrTruf_Q2=avx2_nfaExecLbrTruf_Q2
+ -DnfaExecLbrTruf_QR=avx2_nfaExecLbrTruf_QR
+ -DnfaExecLbrTruf_expandState=avx2_nfaExecLbrTruf_expandState
+ -DnfaExecLbrTruf_inAccept=avx2_nfaExecLbrTruf_inAccept
+ -DnfaExecLbrTruf_inAnyAccept=avx2_nfaExecLbrTruf_inAnyAccept
+ -DnfaExecLbrTruf_initCompressedState=avx2_nfaExecLbrTruf_initCompressedState
+ -DnfaExecLbrTruf_queueCompressState=avx2_nfaExecLbrTruf_queueCompressState
+ -DnfaExecLbrTruf_queueInitState=avx2_nfaExecLbrTruf_queueInitState
+ -DnfaExecLbrTruf_reportCurrent=avx2_nfaExecLbrTruf_reportCurrent
+ -DnfaExecLbrVerm_Q=avx2_nfaExecLbrVerm_Q
+ -DnfaExecLbrVerm_Q2=avx2_nfaExecLbrVerm_Q2
+ -DnfaExecLbrVerm_QR=avx2_nfaExecLbrVerm_QR
+ -DnfaExecLbrVerm_expandState=avx2_nfaExecLbrVerm_expandState
+ -DnfaExecLbrVerm_inAccept=avx2_nfaExecLbrVerm_inAccept
+ -DnfaExecLbrVerm_inAnyAccept=avx2_nfaExecLbrVerm_inAnyAccept
+ -DnfaExecLbrVerm_initCompressedState=avx2_nfaExecLbrVerm_initCompressedState
+ -DnfaExecLbrVerm_queueCompressState=avx2_nfaExecLbrVerm_queueCompressState
+ -DnfaExecLbrVerm_queueInitState=avx2_nfaExecLbrVerm_queueInitState
+ -DnfaExecLbrVerm_reportCurrent=avx2_nfaExecLbrVerm_reportCurrent
+ -DnfaExecLimEx128_B_Reverse=avx2_nfaExecLimEx128_B_Reverse
+ -DnfaExecLimEx128_Q=avx2_nfaExecLimEx128_Q
+ -DnfaExecLimEx128_Q2=avx2_nfaExecLimEx128_Q2
+ -DnfaExecLimEx128_QR=avx2_nfaExecLimEx128_QR
+ -DnfaExecLimEx128_expandState=avx2_nfaExecLimEx128_expandState
+ -DnfaExecLimEx128_inAccept=avx2_nfaExecLimEx128_inAccept
+ -DnfaExecLimEx128_inAnyAccept=avx2_nfaExecLimEx128_inAnyAccept
+ -DnfaExecLimEx128_initCompressedState=avx2_nfaExecLimEx128_initCompressedState
+ -DnfaExecLimEx128_queueCompressState=avx2_nfaExecLimEx128_queueCompressState
+ -DnfaExecLimEx128_queueInitState=avx2_nfaExecLimEx128_queueInitState
+ -DnfaExecLimEx128_reportCurrent=avx2_nfaExecLimEx128_reportCurrent
+ -DnfaExecLimEx128_testEOD=avx2_nfaExecLimEx128_testEOD
+ -DnfaExecLimEx128_zombie_status=avx2_nfaExecLimEx128_zombie_status
+ -DnfaExecLimEx256_B_Reverse=avx2_nfaExecLimEx256_B_Reverse
+ -DnfaExecLimEx256_Q=avx2_nfaExecLimEx256_Q
+ -DnfaExecLimEx256_Q2=avx2_nfaExecLimEx256_Q2
+ -DnfaExecLimEx256_QR=avx2_nfaExecLimEx256_QR
+ -DnfaExecLimEx256_expandState=avx2_nfaExecLimEx256_expandState
+ -DnfaExecLimEx256_inAccept=avx2_nfaExecLimEx256_inAccept
+ -DnfaExecLimEx256_inAnyAccept=avx2_nfaExecLimEx256_inAnyAccept
+ -DnfaExecLimEx256_initCompressedState=avx2_nfaExecLimEx256_initCompressedState
+ -DnfaExecLimEx256_queueCompressState=avx2_nfaExecLimEx256_queueCompressState
+ -DnfaExecLimEx256_queueInitState=avx2_nfaExecLimEx256_queueInitState
+ -DnfaExecLimEx256_reportCurrent=avx2_nfaExecLimEx256_reportCurrent
+ -DnfaExecLimEx256_testEOD=avx2_nfaExecLimEx256_testEOD
+ -DnfaExecLimEx256_zombie_status=avx2_nfaExecLimEx256_zombie_status
+ -DnfaExecLimEx32_B_Reverse=avx2_nfaExecLimEx32_B_Reverse
+ -DnfaExecLimEx32_Q=avx2_nfaExecLimEx32_Q
+ -DnfaExecLimEx32_Q2=avx2_nfaExecLimEx32_Q2
+ -DnfaExecLimEx32_QR=avx2_nfaExecLimEx32_QR
+ -DnfaExecLimEx32_expandState=avx2_nfaExecLimEx32_expandState
+ -DnfaExecLimEx32_inAccept=avx2_nfaExecLimEx32_inAccept
+ -DnfaExecLimEx32_inAnyAccept=avx2_nfaExecLimEx32_inAnyAccept
+ -DnfaExecLimEx32_initCompressedState=avx2_nfaExecLimEx32_initCompressedState
+ -DnfaExecLimEx32_queueCompressState=avx2_nfaExecLimEx32_queueCompressState
+ -DnfaExecLimEx32_queueInitState=avx2_nfaExecLimEx32_queueInitState
+ -DnfaExecLimEx32_reportCurrent=avx2_nfaExecLimEx32_reportCurrent
+ -DnfaExecLimEx32_testEOD=avx2_nfaExecLimEx32_testEOD
+ -DnfaExecLimEx32_zombie_status=avx2_nfaExecLimEx32_zombie_status
+ -DnfaExecLimEx384_B_Reverse=avx2_nfaExecLimEx384_B_Reverse
+ -DnfaExecLimEx384_Q=avx2_nfaExecLimEx384_Q
+ -DnfaExecLimEx384_Q2=avx2_nfaExecLimEx384_Q2
+ -DnfaExecLimEx384_QR=avx2_nfaExecLimEx384_QR
+ -DnfaExecLimEx384_expandState=avx2_nfaExecLimEx384_expandState
+ -DnfaExecLimEx384_inAccept=avx2_nfaExecLimEx384_inAccept
+ -DnfaExecLimEx384_inAnyAccept=avx2_nfaExecLimEx384_inAnyAccept
+ -DnfaExecLimEx384_initCompressedState=avx2_nfaExecLimEx384_initCompressedState
+ -DnfaExecLimEx384_queueCompressState=avx2_nfaExecLimEx384_queueCompressState
+ -DnfaExecLimEx384_queueInitState=avx2_nfaExecLimEx384_queueInitState
+ -DnfaExecLimEx384_reportCurrent=avx2_nfaExecLimEx384_reportCurrent
+ -DnfaExecLimEx384_testEOD=avx2_nfaExecLimEx384_testEOD
+ -DnfaExecLimEx384_zombie_status=avx2_nfaExecLimEx384_zombie_status
+ -DnfaExecLimEx512_B_Reverse=avx2_nfaExecLimEx512_B_Reverse
+ -DnfaExecLimEx512_Q=avx2_nfaExecLimEx512_Q
+ -DnfaExecLimEx512_Q2=avx2_nfaExecLimEx512_Q2
+ -DnfaExecLimEx512_QR=avx2_nfaExecLimEx512_QR
+ -DnfaExecLimEx512_expandState=avx2_nfaExecLimEx512_expandState
+ -DnfaExecLimEx512_inAccept=avx2_nfaExecLimEx512_inAccept
+ -DnfaExecLimEx512_inAnyAccept=avx2_nfaExecLimEx512_inAnyAccept
+ -DnfaExecLimEx512_initCompressedState=avx2_nfaExecLimEx512_initCompressedState
+ -DnfaExecLimEx512_queueCompressState=avx2_nfaExecLimEx512_queueCompressState
+ -DnfaExecLimEx512_queueInitState=avx2_nfaExecLimEx512_queueInitState
+ -DnfaExecLimEx512_reportCurrent=avx2_nfaExecLimEx512_reportCurrent
+ -DnfaExecLimEx512_testEOD=avx2_nfaExecLimEx512_testEOD
+ -DnfaExecLimEx512_zombie_status=avx2_nfaExecLimEx512_zombie_status
+ -DnfaExecLimEx64_B_Reverse=avx2_nfaExecLimEx64_B_Reverse
+ -DnfaExecLimEx64_Q=avx2_nfaExecLimEx64_Q
+ -DnfaExecLimEx64_Q2=avx2_nfaExecLimEx64_Q2
+ -DnfaExecLimEx64_QR=avx2_nfaExecLimEx64_QR
+ -DnfaExecLimEx64_expandState=avx2_nfaExecLimEx64_expandState
+ -DnfaExecLimEx64_inAccept=avx2_nfaExecLimEx64_inAccept
+ -DnfaExecLimEx64_inAnyAccept=avx2_nfaExecLimEx64_inAnyAccept
+ -DnfaExecLimEx64_initCompressedState=avx2_nfaExecLimEx64_initCompressedState
+ -DnfaExecLimEx64_queueCompressState=avx2_nfaExecLimEx64_queueCompressState
+ -DnfaExecLimEx64_queueInitState=avx2_nfaExecLimEx64_queueInitState
+ -DnfaExecLimEx64_reportCurrent=avx2_nfaExecLimEx64_reportCurrent
+ -DnfaExecLimEx64_testEOD=avx2_nfaExecLimEx64_testEOD
+ -DnfaExecLimEx64_zombie_status=avx2_nfaExecLimEx64_zombie_status
+ -DnfaExecMcClellan16_B=avx2_nfaExecMcClellan16_B
+ -DnfaExecMcClellan16_Q=avx2_nfaExecMcClellan16_Q
+ -DnfaExecMcClellan16_Q2=avx2_nfaExecMcClellan16_Q2
+ -DnfaExecMcClellan16_QR=avx2_nfaExecMcClellan16_QR
+ -DnfaExecMcClellan16_SimpStream=avx2_nfaExecMcClellan16_SimpStream
+ -DnfaExecMcClellan16_expandState=avx2_nfaExecMcClellan16_expandState
+ -DnfaExecMcClellan16_inAccept=avx2_nfaExecMcClellan16_inAccept
+ -DnfaExecMcClellan16_inAnyAccept=avx2_nfaExecMcClellan16_inAnyAccept
+ -DnfaExecMcClellan16_initCompressedState=avx2_nfaExecMcClellan16_initCompressedState
+ -DnfaExecMcClellan16_queueCompressState=avx2_nfaExecMcClellan16_queueCompressState
+ -DnfaExecMcClellan16_queueInitState=avx2_nfaExecMcClellan16_queueInitState
+ -DnfaExecMcClellan16_reportCurrent=avx2_nfaExecMcClellan16_reportCurrent
+ -DnfaExecMcClellan16_testEOD=avx2_nfaExecMcClellan16_testEOD
+ -DnfaExecMcClellan8_B=avx2_nfaExecMcClellan8_B
+ -DnfaExecMcClellan8_Q=avx2_nfaExecMcClellan8_Q
+ -DnfaExecMcClellan8_Q2=avx2_nfaExecMcClellan8_Q2
+ -DnfaExecMcClellan8_QR=avx2_nfaExecMcClellan8_QR
+ -DnfaExecMcClellan8_SimpStream=avx2_nfaExecMcClellan8_SimpStream
+ -DnfaExecMcClellan8_expandState=avx2_nfaExecMcClellan8_expandState
+ -DnfaExecMcClellan8_inAccept=avx2_nfaExecMcClellan8_inAccept
+ -DnfaExecMcClellan8_inAnyAccept=avx2_nfaExecMcClellan8_inAnyAccept
+ -DnfaExecMcClellan8_initCompressedState=avx2_nfaExecMcClellan8_initCompressedState
+ -DnfaExecMcClellan8_queueCompressState=avx2_nfaExecMcClellan8_queueCompressState
+ -DnfaExecMcClellan8_queueInitState=avx2_nfaExecMcClellan8_queueInitState
+ -DnfaExecMcClellan8_reportCurrent=avx2_nfaExecMcClellan8_reportCurrent
+ -DnfaExecMcClellan8_testEOD=avx2_nfaExecMcClellan8_testEOD
+ -DnfaExecMcSheng16_Q=avx2_nfaExecMcSheng16_Q
+ -DnfaExecMcSheng16_Q2=avx2_nfaExecMcSheng16_Q2
+ -DnfaExecMcSheng16_QR=avx2_nfaExecMcSheng16_QR
+ -DnfaExecMcSheng16_expandState=avx2_nfaExecMcSheng16_expandState
+ -DnfaExecMcSheng16_inAccept=avx2_nfaExecMcSheng16_inAccept
+ -DnfaExecMcSheng16_inAnyAccept=avx2_nfaExecMcSheng16_inAnyAccept
+ -DnfaExecMcSheng16_initCompressedState=avx2_nfaExecMcSheng16_initCompressedState
+ -DnfaExecMcSheng16_queueCompressState=avx2_nfaExecMcSheng16_queueCompressState
+ -DnfaExecMcSheng16_queueInitState=avx2_nfaExecMcSheng16_queueInitState
+ -DnfaExecMcSheng16_reportCurrent=avx2_nfaExecMcSheng16_reportCurrent
+ -DnfaExecMcSheng16_testEOD=avx2_nfaExecMcSheng16_testEOD
+ -DnfaExecMcSheng8_Q=avx2_nfaExecMcSheng8_Q
+ -DnfaExecMcSheng8_Q2=avx2_nfaExecMcSheng8_Q2
+ -DnfaExecMcSheng8_QR=avx2_nfaExecMcSheng8_QR
+ -DnfaExecMcSheng8_expandState=avx2_nfaExecMcSheng8_expandState
+ -DnfaExecMcSheng8_inAccept=avx2_nfaExecMcSheng8_inAccept
+ -DnfaExecMcSheng8_inAnyAccept=avx2_nfaExecMcSheng8_inAnyAccept
+ -DnfaExecMcSheng8_initCompressedState=avx2_nfaExecMcSheng8_initCompressedState
+ -DnfaExecMcSheng8_queueCompressState=avx2_nfaExecMcSheng8_queueCompressState
+ -DnfaExecMcSheng8_queueInitState=avx2_nfaExecMcSheng8_queueInitState
+ -DnfaExecMcSheng8_reportCurrent=avx2_nfaExecMcSheng8_reportCurrent
+ -DnfaExecMcSheng8_testEOD=avx2_nfaExecMcSheng8_testEOD
+ -DnfaExecMpv_Q=avx2_nfaExecMpv_Q
+ -DnfaExecMpv_QueueExecRaw=avx2_nfaExecMpv_QueueExecRaw
+ -DnfaExecMpv_expandState=avx2_nfaExecMpv_expandState
+ -DnfaExecMpv_initCompressedState=avx2_nfaExecMpv_initCompressedState
+ -DnfaExecMpv_queueCompressState=avx2_nfaExecMpv_queueCompressState
+ -DnfaExecMpv_queueInitState=avx2_nfaExecMpv_queueInitState
+ -DnfaExecMpv_reportCurrent=avx2_nfaExecMpv_reportCurrent
+ -DnfaExecSheng_B=avx2_nfaExecSheng_B
+ -DnfaExecSheng_Q=avx2_nfaExecSheng_Q
+ -DnfaExecSheng_Q2=avx2_nfaExecSheng_Q2
+ -DnfaExecSheng_QR=avx2_nfaExecSheng_QR
+ -DnfaExecSheng_expandState=avx2_nfaExecSheng_expandState
+ -DnfaExecSheng_inAccept=avx2_nfaExecSheng_inAccept
+ -DnfaExecSheng_inAnyAccept=avx2_nfaExecSheng_inAnyAccept
+ -DnfaExecSheng_initCompressedState=avx2_nfaExecSheng_initCompressedState
+ -DnfaExecSheng_queueCompressState=avx2_nfaExecSheng_queueCompressState
+ -DnfaExecSheng_queueInitState=avx2_nfaExecSheng_queueInitState
+ -DnfaExecSheng_reportCurrent=avx2_nfaExecSheng_reportCurrent
+ -DnfaExecSheng_testEOD=avx2_nfaExecSheng_testEOD
+ -DnfaExecTamarama_Q=avx2_nfaExecTamarama_Q
+ -DnfaExecTamarama_Q2=avx2_nfaExecTamarama_Q2
+ -DnfaExecTamarama_QR=avx2_nfaExecTamarama_QR
+ -DnfaExecTamarama_expandState=avx2_nfaExecTamarama_expandState
+ -DnfaExecTamarama_inAccept=avx2_nfaExecTamarama_inAccept
+ -DnfaExecTamarama_inAnyAccept=avx2_nfaExecTamarama_inAnyAccept
+ -DnfaExecTamarama_queueCompressState=avx2_nfaExecTamarama_queueCompressState
+ -DnfaExecTamarama_queueInitState=avx2_nfaExecTamarama_queueInitState
+ -DnfaExecTamarama_reportCurrent=avx2_nfaExecTamarama_reportCurrent
+ -DnfaExecTamarama_testEOD=avx2_nfaExecTamarama_testEOD
+ -DnfaExecTamarama_zombie_status=avx2_nfaExecTamarama_zombie_status
+ -DnfaExpandState=avx2_nfaExpandState
+ -DnfaGetZombieStatus=avx2_nfaGetZombieStatus
+ -DnfaInAcceptState=avx2_nfaInAcceptState
+ -DnfaInAnyAcceptState=avx2_nfaInAnyAcceptState
+ -DnfaInitCompressedState=avx2_nfaInitCompressedState
+ -DnfaQueueCompressState=avx2_nfaQueueCompressState
+ -DnfaQueueExec=avx2_nfaQueueExec
+ -DnfaQueueExec2_raw=avx2_nfaQueueExec2_raw
+ -DnfaQueueExecRose=avx2_nfaQueueExecRose
+ -DnfaQueueExecToMatch=avx2_nfaQueueExecToMatch
+ -DnfaQueueExec_raw=avx2_nfaQueueExec_raw
+ -DnfaQueueInitState=avx2_nfaQueueInitState
+ -DnfaReportCurrentMatches=avx2_nfaReportCurrentMatches
+ -DnoodExec=avx2_noodExec
+ -DnoodExecStreaming=avx2_noodExecStreaming
+ -Dp_mask_arr=avx2_p_mask_arr
+ -Dp_mask_arr256=avx2_p_mask_arr256
+ -DrepeatHasMatchBitmap=avx2_repeatHasMatchBitmap
+ -DrepeatHasMatchRange=avx2_repeatHasMatchRange
+ -DrepeatHasMatchRing=avx2_repeatHasMatchRing
+ -DrepeatHasMatchSparseOptimalP=avx2_repeatHasMatchSparseOptimalP
+ -DrepeatHasMatchTrailer=avx2_repeatHasMatchTrailer
+ -DrepeatLastTopBitmap=avx2_repeatLastTopBitmap
+ -DrepeatLastTopRange=avx2_repeatLastTopRange
+ -DrepeatLastTopRing=avx2_repeatLastTopRing
+ -DrepeatLastTopSparseOptimalP=avx2_repeatLastTopSparseOptimalP
+ -DrepeatLastTopTrailer=avx2_repeatLastTopTrailer
+ -DrepeatNextMatchBitmap=avx2_repeatNextMatchBitmap
+ -DrepeatNextMatchRange=avx2_repeatNextMatchRange
+ -DrepeatNextMatchRing=avx2_repeatNextMatchRing
+ -DrepeatNextMatchSparseOptimalP=avx2_repeatNextMatchSparseOptimalP
+ -DrepeatNextMatchTrailer=avx2_repeatNextMatchTrailer
+ -DrepeatPack=avx2_repeatPack
+ -DrepeatStoreBitmap=avx2_repeatStoreBitmap
+ -DrepeatStoreRange=avx2_repeatStoreRange
+ -DrepeatStoreRing=avx2_repeatStoreRing
+ -DrepeatStoreSparseOptimalP=avx2_repeatStoreSparseOptimalP
+ -DrepeatStoreTrailer=avx2_repeatStoreTrailer
+ -DrepeatUnpack=avx2_repeatUnpack
+ -DroseAnchoredCallback=avx2_roseAnchoredCallback
+ -DroseBlockExec=avx2_roseBlockExec
+ -DroseCallback=avx2_roseCallback
+ -DroseCatchUpAll=avx2_roseCatchUpAll
+ -DroseCatchUpMPV_i=avx2_roseCatchUpMPV_i
+ -DroseCatchUpSuf=avx2_roseCatchUpSuf
+ -DroseDelayRebuildCallback=avx2_roseDelayRebuildCallback
+ -DroseFloatingCallback=avx2_roseFloatingCallback
+ -DroseHandleChainMatch=avx2_roseHandleChainMatch
+ -DroseInitState=avx2_roseInitState
+ -DroseNfaAdaptor=avx2_roseNfaAdaptor
+ -DroseNfaEarliestSom=avx2_roseNfaEarliestSom
+ -DroseReportAdaptor=avx2_roseReportAdaptor
+ -DroseRunBoundaryProgram=avx2_roseRunBoundaryProgram
+ -DroseRunFlushCombProgram=avx2_roseRunFlushCombProgram
+ -DroseRunLastFlushCombProgram=avx2_roseRunLastFlushCombProgram
+ -DroseRunProgram=avx2_roseRunProgram
+ -DroseRunProgram_l=avx2_roseRunProgram_l
+ -DroseStreamEodExec=avx2_roseStreamEodExec
+ -DroseStreamExec=avx2_roseStreamExec
+ -DrshuftiExec=avx2_rshuftiExec
+ -DrtruffleExec=avx2_rtruffleExec
+ -Drun_accel=avx2_run_accel
+ -DsetSomFromSomAware=avx2_setSomFromSomAware
+ -DshuftiDoubleExec=avx2_shuftiDoubleExec
+ -DshuftiExec=avx2_shuftiExec
+ -Dsimd_onebit_masks=avx2_simd_onebit_masks
+ -Dsize_compress_stream=avx2_size_compress_stream
+ -DstoreSomToStream=avx2_storeSomToStream
+ -Dstorecompressed128=avx2_storecompressed128
+ -Dstorecompressed256=avx2_storecompressed256
+ -Dstorecompressed32=avx2_storecompressed32
+ -Dstorecompressed384=avx2_storecompressed384
+ -Dstorecompressed512=avx2_storecompressed512
+ -Dstorecompressed64=avx2_storecompressed64
+ -DstreamInitSufPQ=avx2_streamInitSufPQ
+ -DtruffleExec=avx2_truffleExec
+ -Dvbs_mask_data=avx2_vbs_mask_data
+)
+
+SRCDIR(contrib/libs/hyperscan)
+
+SRCS(
+ src/alloc.c
+ src/crc32.c
+ src/database.c
+ src/fdr/fdr.c
+ src/fdr/teddy.c
+ src/fdr/teddy_avx2.c
+ src/hs_valid_platform.c
+ src/hs_version.c
+ src/hwlm/hwlm.c
+ src/hwlm/noodle_engine.c
+ src/nfa/accel.c
+ src/nfa/castle.c
+ src/nfa/gough.c
+ src/nfa/lbr.c
+ src/nfa/limex_64.c
+ src/nfa/limex_accel.c
+ src/nfa/limex_native.c
+ src/nfa/limex_simd128.c
+ src/nfa/limex_simd256.c
+ src/nfa/limex_simd384.c
+ src/nfa/limex_simd512.c
+ src/nfa/mcclellan.c
+ src/nfa/mcsheng.c
+ src/nfa/mcsheng_data.c
+ src/nfa/mpv.c
+ src/nfa/nfa_api_dispatch.c
+ src/nfa/repeat.c
+ src/nfa/sheng.c
+ src/nfa/shufti.c
+ src/nfa/tamarama.c
+ src/nfa/truffle.c
+ src/rose/block.c
+ src/rose/catchup.c
+ src/rose/init.c
+ src/rose/match.c
+ src/rose/program_runtime.c
+ src/rose/stream.c
+ src/runtime.c
+ src/scratch.c
+ src/som/som_runtime.c
+ src/som/som_stream.c
+ src/stream_compress.c
+ src/util/cpuid_flags.c
+ src/util/masked_move.c
+ src/util/multibit.c
+ src/util/simd_utils.c
+ src/util/state_compress.c
+)
+
+END()
diff --git a/contrib/libs/hyperscan/runtime_avx512/.yandex_meta/licenses.list.txt b/contrib/libs/hyperscan/runtime_avx512/.yandex_meta/licenses.list.txt
index 358c19fe4a..b2ced66bbd 100644
--- a/contrib/libs/hyperscan/runtime_avx512/.yandex_meta/licenses.list.txt
+++ b/contrib/libs/hyperscan/runtime_avx512/.yandex_meta/licenses.list.txt
@@ -1,32 +1,32 @@
-====================BSD-3-Clause====================
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2015-2018, Intel Corporation
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2015-2019, Intel Corporation
+====================BSD-3-Clause====================
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2015-2018, Intel Corporation
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2015-2019, Intel Corporation
diff --git a/contrib/libs/hyperscan/runtime_avx512/hs_common.h b/contrib/libs/hyperscan/runtime_avx512/hs_common.h
index cfaa9a0eab..13bcfa7003 100644
--- a/contrib/libs/hyperscan/runtime_avx512/hs_common.h
+++ b/contrib/libs/hyperscan/runtime_avx512/hs_common.h
@@ -1,596 +1,596 @@
-/*
- * Copyright (c) 2015-2019, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef HS_AVX512_COMMON_H
-#define HS_AVX512_COMMON_H
-
-#if defined(_WIN32)
-#define HS_CDECL __cdecl
-#else
-#define HS_CDECL
-#endif
-#include <stdlib.h>
-
-/**
- * @file
- * @brief The Hyperscan common API definition.
- *
- * Hyperscan is a high speed regular expression engine.
- *
- * This header contains functions available to both the Hyperscan compiler and
- * runtime.
- */
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-struct hs_database;
-
-/**
- * A Hyperscan pattern database.
- *
- * Generated by one of the Hyperscan compiler functions:
- * - @ref hs_compile()
- * - @ref hs_compile_multi()
- * - @ref hs_compile_ext_multi()
- */
-typedef struct hs_database hs_database_t;
-
-/**
- * A type for errors returned by Hyperscan functions.
- */
-typedef int hs_error_t;
-
-/**
- * Free a compiled pattern database.
- *
- * The free callback set by @ref hs_set_database_allocator() (or @ref
- * hs_set_allocator()) will be used by this function.
- *
- * @param db
- * A compiled pattern database. NULL may also be safely provided, in which
- * case the function does nothing.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx512_hs_free_database(hs_database_t *db);
-
-/**
- * Serialize a pattern database to a stream of bytes.
- *
- * The allocator callback set by @ref hs_set_misc_allocator() (or @ref
- * hs_set_allocator()) will be used by this function.
- *
- * @param db
- * A compiled pattern database.
- *
- * @param bytes
- * On success, a pointer to an array of bytes will be returned here.
- * These bytes can be subsequently relocated or written to disk. The
- * caller is responsible for freeing this block.
- *
- * @param length
- * On success, the number of bytes in the generated byte array will be
- * returned here.
- *
- * @return
- * @ref HS_SUCCESS on success, @ref HS_NOMEM if the byte array cannot be
- * allocated, other values may be returned if errors are detected.
- */
-hs_error_t avx512_hs_serialize_database(const hs_database_t *db, char **bytes,
- size_t *length);
-
-/**
- * Reconstruct a pattern database from a stream of bytes previously generated
- * by @ref hs_serialize_database().
- *
- * This function will allocate sufficient space for the database using the
- * allocator set with @ref hs_set_database_allocator() (or @ref
- * hs_set_allocator()); to use a pre-allocated region of memory, use the @ref
- * hs_deserialize_database_at() function.
- *
- * @param bytes
- * A byte array generated by @ref hs_serialize_database() representing a
- * compiled pattern database.
- *
- * @param length
- * The length of the byte array generated by @ref hs_serialize_database().
- * This should be the same value as that returned by @ref
- * hs_serialize_database().
- *
- * @param db
- * On success, a pointer to a newly allocated @ref hs_database_t will be
- * returned here. This database can then be used for scanning, and
- * eventually freed by the caller using @ref hs_free_database().
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx512_hs_deserialize_database(const char *bytes,
- const size_t length,
- hs_database_t **db);
-
-/**
- * Reconstruct a pattern database from a stream of bytes previously generated
- * by @ref hs_serialize_database() at a given memory location.
- *
- * This function (unlike @ref hs_deserialize_database()) will write the
- * reconstructed database to the memory location given in the @p db parameter.
- * The amount of space required at this location can be determined with the
- * @ref hs_serialized_database_size() function.
- *
- * @param bytes
- * A byte array generated by @ref hs_serialize_database() representing a
- * compiled pattern database.
- *
- * @param length
- * The length of the byte array generated by @ref hs_serialize_database().
- * This should be the same value as that returned by @ref
- * hs_serialize_database().
- *
- * @param db
- * Pointer to an 8-byte aligned block of memory of sufficient size to hold
- * the deserialized database. On success, the reconstructed database will
- * be written to this location. This database can then be used for pattern
- * matching. The user is responsible for freeing this memory; the @ref
- * hs_free_database() call should not be used.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx512_hs_deserialize_database_at(const char *bytes,
- const size_t length,
- hs_database_t *db);
-
-/**
- * Provides the size of the stream state allocated by a single stream opened
- * against the given database.
- *
- * @param database
- * Pointer to a compiled (streaming mode) pattern database.
- *
- * @param stream_size
- * On success, the size in bytes of an individual stream opened against the
- * given database is placed in this parameter.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx512_hs_stream_size(const hs_database_t *database,
- size_t *stream_size);
-
-/**
- * Provides the size of the given database in bytes.
- *
- * @param database
- * Pointer to compiled pattern database.
- *
- * @param database_size
- * On success, the size of the compiled database in bytes is placed in this
- * parameter.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx512_hs_database_size(const hs_database_t *database,
- size_t *database_size);
-
-/**
- * Utility function for reporting the size that would be required by a
- * database if it were deserialized.
- *
- * This can be used to allocate a shared memory region or other "special"
- * allocation prior to deserializing with the @ref hs_deserialize_database_at()
- * function.
- *
- * @param bytes
- * Pointer to a byte array generated by @ref hs_serialize_database()
- * representing a compiled pattern database.
- *
- * @param length
- * The length of the byte array generated by @ref hs_serialize_database().
- * This should be the same value as that returned by @ref
- * hs_serialize_database().
- *
- * @param deserialized_size
- * On success, the size of the compiled database that would be generated
- * by @ref hs_deserialize_database_at() is returned here.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx512_hs_serialized_database_size(const char *bytes,
- const size_t length,
- size_t *deserialized_size);
-
-/**
- * Utility function providing information about a database.
- *
- * @param database
- * Pointer to a compiled database.
- *
- * @param info
- * On success, a string containing the version and platform information for
- * the supplied database is placed in the parameter. The string is
- * allocated using the allocator supplied in @ref hs_set_misc_allocator()
- * (or malloc() if no allocator was set) and should be freed by the caller.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx512_hs_database_info(const hs_database_t *database,
- char **info);
-
-/**
- * Utility function providing information about a serialized database.
- *
- * @param bytes
- * Pointer to a serialized database.
- *
- * @param length
- * Length in bytes of the serialized database.
- *
- * @param info
- * On success, a string containing the version and platform information
- * for the supplied serialized database is placed in the parameter. The
- * string is allocated using the allocator supplied in @ref
- * hs_set_misc_allocator() (or malloc() if no allocator was set) and
- * should be freed by the caller.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx512_hs_serialized_database_info(const char *bytes,
- size_t length, char **info);
-
-/**
- * The type of the callback function that will be used by Hyperscan to allocate
- * more memory at runtime as required, for example in @ref hs_open_stream() to
- * allocate stream state.
- *
- * If Hyperscan is to be used in a multi-threaded, or similarly concurrent
- * environment, the allocation function will need to be re-entrant, or
- * similarly safe for concurrent use.
- *
- * @param size
- * The number of bytes to allocate.
- * @return
- * A pointer to the region of memory allocated, or NULL on error.
- */
-typedef void *(HS_CDECL *hs_alloc_t)(size_t size);
-
-/**
- * The type of the callback function that will be used by Hyperscan to free
- * memory regions previously allocated using the @ref hs_alloc_t function.
- *
- * @param ptr
- * The region of memory to be freed.
- */
-typedef void (HS_CDECL *hs_free_t)(void *ptr);
-
-/**
- * Set the allocate and free functions used by Hyperscan for allocating
- * memory at runtime for stream state, scratch space, database bytecode,
- * and various other data structure returned by the Hyperscan API.
- *
- * The function is equivalent to calling @ref hs_set_stream_allocator(),
- * @ref hs_set_scratch_allocator(), @ref hs_set_database_allocator() and
- * @ref hs_set_misc_allocator() with the provided parameters.
- *
- * This call will override any previous allocators that have been set.
- *
- * Note: there is no way to change the allocator used for temporary objects
- * created during the various compile calls (@ref hs_compile(), @ref
- * hs_compile_multi(), @ref hs_compile_ext_multi()).
- *
- * @param alloc_func
- * A callback function pointer that allocates memory. This function must
- * return memory suitably aligned for the largest representable data type
- * on this platform.
- *
- * @param free_func
- * A callback function pointer that frees allocated memory.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx512_hs_set_allocator(hs_alloc_t alloc_func,
- hs_free_t free_func);
-
-/**
- * Set the allocate and free functions used by Hyperscan for allocating memory
- * for database bytecode produced by the compile calls (@ref hs_compile(), @ref
- * hs_compile_multi(), @ref hs_compile_ext_multi()) and by database
- * deserialization (@ref hs_deserialize_database()).
- *
- * If no database allocation functions are set, or if NULL is used in place of
- * both parameters, then memory allocation will default to standard methods
- * (such as the system malloc() and free() calls).
- *
- * This call will override any previous database allocators that have been set.
- *
- * Note: the database allocator may also be set by calling @ref
- * hs_set_allocator().
- *
- * Note: there is no way to change how temporary objects created during the
- * various compile calls (@ref hs_compile(), @ref hs_compile_multi(), @ref
- * hs_compile_ext_multi()) are allocated.
- *
- * @param alloc_func
- * A callback function pointer that allocates memory. This function must
- * return memory suitably aligned for the largest representable data type
- * on this platform.
- *
- * @param free_func
- * A callback function pointer that frees allocated memory.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx512_hs_set_database_allocator(hs_alloc_t alloc_func,
- hs_free_t free_func);
-
-/**
- * Set the allocate and free functions used by Hyperscan for allocating memory
- * for items returned by the Hyperscan API such as @ref hs_compile_error_t, @ref
- * hs_expr_info_t and serialized databases.
- *
- * If no misc allocation functions are set, or if NULL is used in place of both
- * parameters, then memory allocation will default to standard methods (such as
- * the system malloc() and free() calls).
- *
- * This call will override any previous misc allocators that have been set.
- *
- * Note: the misc allocator may also be set by calling @ref hs_set_allocator().
- *
- * @param alloc_func
- * A callback function pointer that allocates memory. This function must
- * return memory suitably aligned for the largest representable data type
- * on this platform.
- *
- * @param free_func
- * A callback function pointer that frees allocated memory.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx512_hs_set_misc_allocator(hs_alloc_t alloc_func,
- hs_free_t free_func);
-
-/**
- * Set the allocate and free functions used by Hyperscan for allocating memory
- * for scratch space by @ref hs_alloc_scratch() and @ref hs_clone_scratch().
- *
- * If no scratch allocation functions are set, or if NULL is used in place of
- * both parameters, then memory allocation will default to standard methods
- * (such as the system malloc() and free() calls).
- *
- * This call will override any previous scratch allocators that have been set.
- *
- * Note: the scratch allocator may also be set by calling @ref
- * hs_set_allocator().
- *
- * @param alloc_func
- * A callback function pointer that allocates memory. This function must
- * return memory suitably aligned for the largest representable data type
- * on this platform.
- *
- * @param free_func
- * A callback function pointer that frees allocated memory.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx512_hs_set_scratch_allocator(hs_alloc_t alloc_func,
- hs_free_t free_func);
-
-/**
- * Set the allocate and free functions used by Hyperscan for allocating memory
- * for stream state by @ref hs_open_stream().
- *
- * If no stream allocation functions are set, or if NULL is used in place of
- * both parameters, then memory allocation will default to standard methods
- * (such as the system malloc() and free() calls).
- *
- * This call will override any previous stream allocators that have been set.
- *
- * Note: the stream allocator may also be set by calling @ref
- * hs_set_allocator().
- *
- * @param alloc_func
- * A callback function pointer that allocates memory. This function must
- * return memory suitably aligned for the largest representable data type
- * on this platform.
- *
- * @param free_func
- * A callback function pointer that frees allocated memory.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx512_hs_set_stream_allocator(hs_alloc_t alloc_func,
- hs_free_t free_func);
-
-/**
- * Utility function for identifying this release version.
- *
- * @return
- * A string containing the version number of this release build and the
- * date of the build. It is allocated statically, so it does not need to
- * be freed by the caller.
- */
-const char * avx512_hs_version(void);
-
-/**
- * Utility function to test the current system architecture.
- *
- * Hyperscan requires the Supplemental Streaming SIMD Extensions 3 instruction
- * set. This function can be called on any x86 platform to determine if the
- * system provides the required instruction set.
- *
- * This function does not test for more advanced features if Hyperscan has
- * been built for a more specific architecture, for example the AVX2
- * instruction set.
- *
- * @return
- * @ref HS_SUCCESS on success, @ref HS_ARCH_ERROR if system does not
- * support Hyperscan.
- */
-hs_error_t avx512_hs_valid_platform(void);
-
-/**
- * @defgroup HS_ERROR hs_error_t values
- *
- * @{
- */
-
-/**
- * The engine completed normally.
- */
-#define HS_SUCCESS 0
-
-/**
- * A parameter passed to this function was invalid.
- *
- * This error is only returned in cases where the function can detect an
- * invalid parameter -- it cannot be relied upon to detect (for example)
- * pointers to freed memory or other invalid data.
- */
-#define HS_INVALID (-1)
-
-/**
- * A memory allocation failed.
- */
-#define HS_NOMEM (-2)
-
-/**
- * The engine was terminated by callback.
- *
- * This return value indicates that the target buffer was partially scanned,
- * but that the callback function requested that scanning cease after a match
- * was located.
- */
-#define HS_SCAN_TERMINATED (-3)
-
-/**
- * The pattern compiler failed, and the @ref hs_compile_error_t should be
- * inspected for more detail.
- */
-#define HS_COMPILER_ERROR (-4)
-
-/**
- * The given database was built for a different version of Hyperscan.
- */
-#define HS_DB_VERSION_ERROR (-5)
-
-/**
- * The given database was built for a different platform (i.e., CPU type).
- */
-#define HS_DB_PLATFORM_ERROR (-6)
-
-/**
- * The given database was built for a different mode of operation. This error
- * is returned when streaming calls are used with a block or vectored database
- * and vice versa.
- */
-#define HS_DB_MODE_ERROR (-7)
-
-/**
- * A parameter passed to this function was not correctly aligned.
- */
-#define HS_BAD_ALIGN (-8)
-
-/**
- * The memory allocator (either malloc() or the allocator set with @ref
- * hs_set_allocator()) did not correctly return memory suitably aligned for the
- * largest representable data type on this platform.
- */
-#define HS_BAD_ALLOC (-9)
-
-/**
- * The scratch region was already in use.
- *
- * This error is returned when Hyperscan is able to detect that the scratch
- * region given is already in use by another Hyperscan API call.
- *
- * A separate scratch region, allocated with @ref hs_alloc_scratch() or @ref
- * hs_clone_scratch(), is required for every concurrent caller of the Hyperscan
- * API.
- *
- * For example, this error might be returned when @ref hs_scan() has been
- * called inside a callback delivered by a currently-executing @ref hs_scan()
- * call using the same scratch region.
- *
- * Note: Not all concurrent uses of scratch regions may be detected. This error
- * is intended as a best-effort debugging tool, not a guarantee.
- */
-#define HS_SCRATCH_IN_USE (-10)
-
-/**
- * Unsupported CPU architecture.
- *
- * This error is returned when Hyperscan is able to detect that the current
- * system does not support the required instruction set.
- *
- * At a minimum, Hyperscan requires Supplemental Streaming SIMD Extensions 3
- * (SSSE3).
- */
-#define HS_ARCH_ERROR (-11)
-
-/**
- * Provided buffer was too small.
- *
- * This error indicates that there was insufficient space in the buffer. The
- * call should be repeated with a larger provided buffer.
- *
- * Note: in this situation, it is normal for the amount of space required to be
- * returned in the same manner as the used space would have been returned if the
- * call was successful.
- */
-#define HS_INSUFFICIENT_SPACE (-12)
-
-/**
- * Unexpected internal error.
- *
- * This error indicates that there was unexpected matching behaviors. This
- * could be related to invalid usage of stream and scratch space or invalid memory
- * operations by users.
- *
- */
-#define HS_UNKNOWN_ERROR (-13)
-
-/** @} */
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
-#endif /* HS_AVX512_COMMON_H */
+/*
+ * Copyright (c) 2015-2019, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HS_AVX512_COMMON_H
+#define HS_AVX512_COMMON_H
+
+#if defined(_WIN32)
+#define HS_CDECL __cdecl
+#else
+#define HS_CDECL
+#endif
+#include <stdlib.h>
+
+/**
+ * @file
+ * @brief The Hyperscan common API definition.
+ *
+ * Hyperscan is a high speed regular expression engine.
+ *
+ * This header contains functions available to both the Hyperscan compiler and
+ * runtime.
+ */
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+struct hs_database;
+
+/**
+ * A Hyperscan pattern database.
+ *
+ * Generated by one of the Hyperscan compiler functions:
+ * - @ref hs_compile()
+ * - @ref hs_compile_multi()
+ * - @ref hs_compile_ext_multi()
+ */
+typedef struct hs_database hs_database_t;
+
+/**
+ * A type for errors returned by Hyperscan functions.
+ */
+typedef int hs_error_t;
+
+/**
+ * Free a compiled pattern database.
+ *
+ * The free callback set by @ref hs_set_database_allocator() (or @ref
+ * hs_set_allocator()) will be used by this function.
+ *
+ * @param db
+ * A compiled pattern database. NULL may also be safely provided, in which
+ * case the function does nothing.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx512_hs_free_database(hs_database_t *db);
+
+/**
+ * Serialize a pattern database to a stream of bytes.
+ *
+ * The allocator callback set by @ref hs_set_misc_allocator() (or @ref
+ * hs_set_allocator()) will be used by this function.
+ *
+ * @param db
+ * A compiled pattern database.
+ *
+ * @param bytes
+ * On success, a pointer to an array of bytes will be returned here.
+ * These bytes can be subsequently relocated or written to disk. The
+ * caller is responsible for freeing this block.
+ *
+ * @param length
+ * On success, the number of bytes in the generated byte array will be
+ * returned here.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, @ref HS_NOMEM if the byte array cannot be
+ * allocated, other values may be returned if errors are detected.
+ */
+hs_error_t avx512_hs_serialize_database(const hs_database_t *db, char **bytes,
+ size_t *length);
+
+/**
+ * Reconstruct a pattern database from a stream of bytes previously generated
+ * by @ref hs_serialize_database().
+ *
+ * This function will allocate sufficient space for the database using the
+ * allocator set with @ref hs_set_database_allocator() (or @ref
+ * hs_set_allocator()); to use a pre-allocated region of memory, use the @ref
+ * hs_deserialize_database_at() function.
+ *
+ * @param bytes
+ * A byte array generated by @ref hs_serialize_database() representing a
+ * compiled pattern database.
+ *
+ * @param length
+ * The length of the byte array generated by @ref hs_serialize_database().
+ * This should be the same value as that returned by @ref
+ * hs_serialize_database().
+ *
+ * @param db
+ * On success, a pointer to a newly allocated @ref hs_database_t will be
+ * returned here. This database can then be used for scanning, and
+ * eventually freed by the caller using @ref hs_free_database().
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx512_hs_deserialize_database(const char *bytes,
+ const size_t length,
+ hs_database_t **db);
+
+/**
+ * Reconstruct a pattern database from a stream of bytes previously generated
+ * by @ref hs_serialize_database() at a given memory location.
+ *
+ * This function (unlike @ref hs_deserialize_database()) will write the
+ * reconstructed database to the memory location given in the @p db parameter.
+ * The amount of space required at this location can be determined with the
+ * @ref hs_serialized_database_size() function.
+ *
+ * @param bytes
+ * A byte array generated by @ref hs_serialize_database() representing a
+ * compiled pattern database.
+ *
+ * @param length
+ * The length of the byte array generated by @ref hs_serialize_database().
+ * This should be the same value as that returned by @ref
+ * hs_serialize_database().
+ *
+ * @param db
+ * Pointer to an 8-byte aligned block of memory of sufficient size to hold
+ * the deserialized database. On success, the reconstructed database will
+ * be written to this location. This database can then be used for pattern
+ * matching. The user is responsible for freeing this memory; the @ref
+ * hs_free_database() call should not be used.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx512_hs_deserialize_database_at(const char *bytes,
+ const size_t length,
+ hs_database_t *db);
+
+/**
+ * Provides the size of the stream state allocated by a single stream opened
+ * against the given database.
+ *
+ * @param database
+ * Pointer to a compiled (streaming mode) pattern database.
+ *
+ * @param stream_size
+ * On success, the size in bytes of an individual stream opened against the
+ * given database is placed in this parameter.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx512_hs_stream_size(const hs_database_t *database,
+ size_t *stream_size);
+
+/**
+ * Provides the size of the given database in bytes.
+ *
+ * @param database
+ * Pointer to compiled pattern database.
+ *
+ * @param database_size
+ * On success, the size of the compiled database in bytes is placed in this
+ * parameter.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx512_hs_database_size(const hs_database_t *database,
+ size_t *database_size);
+
+/**
+ * Utility function for reporting the size that would be required by a
+ * database if it were deserialized.
+ *
+ * This can be used to allocate a shared memory region or other "special"
+ * allocation prior to deserializing with the @ref hs_deserialize_database_at()
+ * function.
+ *
+ * @param bytes
+ * Pointer to a byte array generated by @ref hs_serialize_database()
+ * representing a compiled pattern database.
+ *
+ * @param length
+ * The length of the byte array generated by @ref hs_serialize_database().
+ * This should be the same value as that returned by @ref
+ * hs_serialize_database().
+ *
+ * @param deserialized_size
+ * On success, the size of the compiled database that would be generated
+ * by @ref hs_deserialize_database_at() is returned here.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx512_hs_serialized_database_size(const char *bytes,
+ const size_t length,
+ size_t *deserialized_size);
+
+/**
+ * Utility function providing information about a database.
+ *
+ * @param database
+ * Pointer to a compiled database.
+ *
+ * @param info
+ * On success, a string containing the version and platform information for
+ * the supplied database is placed in the parameter. The string is
+ * allocated using the allocator supplied in @ref hs_set_misc_allocator()
+ * (or malloc() if no allocator was set) and should be freed by the caller.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx512_hs_database_info(const hs_database_t *database,
+ char **info);
+
+/**
+ * Utility function providing information about a serialized database.
+ *
+ * @param bytes
+ * Pointer to a serialized database.
+ *
+ * @param length
+ * Length in bytes of the serialized database.
+ *
+ * @param info
+ * On success, a string containing the version and platform information
+ * for the supplied serialized database is placed in the parameter. The
+ * string is allocated using the allocator supplied in @ref
+ * hs_set_misc_allocator() (or malloc() if no allocator was set) and
+ * should be freed by the caller.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx512_hs_serialized_database_info(const char *bytes,
+ size_t length, char **info);
+
+/**
+ * The type of the callback function that will be used by Hyperscan to allocate
+ * more memory at runtime as required, for example in @ref hs_open_stream() to
+ * allocate stream state.
+ *
+ * If Hyperscan is to be used in a multi-threaded, or similarly concurrent
+ * environment, the allocation function will need to be re-entrant, or
+ * similarly safe for concurrent use.
+ *
+ * @param size
+ * The number of bytes to allocate.
+ * @return
+ * A pointer to the region of memory allocated, or NULL on error.
+ */
+typedef void *(HS_CDECL *hs_alloc_t)(size_t size);
+
+/**
+ * The type of the callback function that will be used by Hyperscan to free
+ * memory regions previously allocated using the @ref hs_alloc_t function.
+ *
+ * @param ptr
+ * The region of memory to be freed.
+ */
+typedef void (HS_CDECL *hs_free_t)(void *ptr);
+
+/**
+ * Set the allocate and free functions used by Hyperscan for allocating
+ * memory at runtime for stream state, scratch space, database bytecode,
+ * and various other data structure returned by the Hyperscan API.
+ *
+ * The function is equivalent to calling @ref hs_set_stream_allocator(),
+ * @ref hs_set_scratch_allocator(), @ref hs_set_database_allocator() and
+ * @ref hs_set_misc_allocator() with the provided parameters.
+ *
+ * This call will override any previous allocators that have been set.
+ *
+ * Note: there is no way to change the allocator used for temporary objects
+ * created during the various compile calls (@ref hs_compile(), @ref
+ * hs_compile_multi(), @ref hs_compile_ext_multi()).
+ *
+ * @param alloc_func
+ * A callback function pointer that allocates memory. This function must
+ * return memory suitably aligned for the largest representable data type
+ * on this platform.
+ *
+ * @param free_func
+ * A callback function pointer that frees allocated memory.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx512_hs_set_allocator(hs_alloc_t alloc_func,
+ hs_free_t free_func);
+
+/**
+ * Set the allocate and free functions used by Hyperscan for allocating memory
+ * for database bytecode produced by the compile calls (@ref hs_compile(), @ref
+ * hs_compile_multi(), @ref hs_compile_ext_multi()) and by database
+ * deserialization (@ref hs_deserialize_database()).
+ *
+ * If no database allocation functions are set, or if NULL is used in place of
+ * both parameters, then memory allocation will default to standard methods
+ * (such as the system malloc() and free() calls).
+ *
+ * This call will override any previous database allocators that have been set.
+ *
+ * Note: the database allocator may also be set by calling @ref
+ * hs_set_allocator().
+ *
+ * Note: there is no way to change how temporary objects created during the
+ * various compile calls (@ref hs_compile(), @ref hs_compile_multi(), @ref
+ * hs_compile_ext_multi()) are allocated.
+ *
+ * @param alloc_func
+ * A callback function pointer that allocates memory. This function must
+ * return memory suitably aligned for the largest representable data type
+ * on this platform.
+ *
+ * @param free_func
+ * A callback function pointer that frees allocated memory.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx512_hs_set_database_allocator(hs_alloc_t alloc_func,
+ hs_free_t free_func);
+
+/**
+ * Set the allocate and free functions used by Hyperscan for allocating memory
+ * for items returned by the Hyperscan API such as @ref hs_compile_error_t, @ref
+ * hs_expr_info_t and serialized databases.
+ *
+ * If no misc allocation functions are set, or if NULL is used in place of both
+ * parameters, then memory allocation will default to standard methods (such as
+ * the system malloc() and free() calls).
+ *
+ * This call will override any previous misc allocators that have been set.
+ *
+ * Note: the misc allocator may also be set by calling @ref hs_set_allocator().
+ *
+ * @param alloc_func
+ * A callback function pointer that allocates memory. This function must
+ * return memory suitably aligned for the largest representable data type
+ * on this platform.
+ *
+ * @param free_func
+ * A callback function pointer that frees allocated memory.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx512_hs_set_misc_allocator(hs_alloc_t alloc_func,
+ hs_free_t free_func);
+
+/**
+ * Set the allocate and free functions used by Hyperscan for allocating memory
+ * for scratch space by @ref hs_alloc_scratch() and @ref hs_clone_scratch().
+ *
+ * If no scratch allocation functions are set, or if NULL is used in place of
+ * both parameters, then memory allocation will default to standard methods
+ * (such as the system malloc() and free() calls).
+ *
+ * This call will override any previous scratch allocators that have been set.
+ *
+ * Note: the scratch allocator may also be set by calling @ref
+ * hs_set_allocator().
+ *
+ * @param alloc_func
+ * A callback function pointer that allocates memory. This function must
+ * return memory suitably aligned for the largest representable data type
+ * on this platform.
+ *
+ * @param free_func
+ * A callback function pointer that frees allocated memory.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx512_hs_set_scratch_allocator(hs_alloc_t alloc_func,
+ hs_free_t free_func);
+
+/**
+ * Set the allocate and free functions used by Hyperscan for allocating memory
+ * for stream state by @ref hs_open_stream().
+ *
+ * If no stream allocation functions are set, or if NULL is used in place of
+ * both parameters, then memory allocation will default to standard methods
+ * (such as the system malloc() and free() calls).
+ *
+ * This call will override any previous stream allocators that have been set.
+ *
+ * Note: the stream allocator may also be set by calling @ref
+ * hs_set_allocator().
+ *
+ * @param alloc_func
+ * A callback function pointer that allocates memory. This function must
+ * return memory suitably aligned for the largest representable data type
+ * on this platform.
+ *
+ * @param free_func
+ * A callback function pointer that frees allocated memory.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx512_hs_set_stream_allocator(hs_alloc_t alloc_func,
+ hs_free_t free_func);
+
+/**
+ * Utility function for identifying this release version.
+ *
+ * @return
+ * A string containing the version number of this release build and the
+ * date of the build. It is allocated statically, so it does not need to
+ * be freed by the caller.
+ */
+const char * avx512_hs_version(void);
+
+/**
+ * Utility function to test the current system architecture.
+ *
+ * Hyperscan requires the Supplemental Streaming SIMD Extensions 3 instruction
+ * set. This function can be called on any x86 platform to determine if the
+ * system provides the required instruction set.
+ *
+ * This function does not test for more advanced features if Hyperscan has
+ * been built for a more specific architecture, for example the AVX2
+ * instruction set.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, @ref HS_ARCH_ERROR if system does not
+ * support Hyperscan.
+ */
+hs_error_t avx512_hs_valid_platform(void);
+
+/**
+ * @defgroup HS_ERROR hs_error_t values
+ *
+ * @{
+ */
+
+/**
+ * The engine completed normally.
+ */
+#define HS_SUCCESS 0
+
+/**
+ * A parameter passed to this function was invalid.
+ *
+ * This error is only returned in cases where the function can detect an
+ * invalid parameter -- it cannot be relied upon to detect (for example)
+ * pointers to freed memory or other invalid data.
+ */
+#define HS_INVALID (-1)
+
+/**
+ * A memory allocation failed.
+ */
+#define HS_NOMEM (-2)
+
+/**
+ * The engine was terminated by callback.
+ *
+ * This return value indicates that the target buffer was partially scanned,
+ * but that the callback function requested that scanning cease after a match
+ * was located.
+ */
+#define HS_SCAN_TERMINATED (-3)
+
+/**
+ * The pattern compiler failed, and the @ref hs_compile_error_t should be
+ * inspected for more detail.
+ */
+#define HS_COMPILER_ERROR (-4)
+
+/**
+ * The given database was built for a different version of Hyperscan.
+ */
+#define HS_DB_VERSION_ERROR (-5)
+
+/**
+ * The given database was built for a different platform (i.e., CPU type).
+ */
+#define HS_DB_PLATFORM_ERROR (-6)
+
+/**
+ * The given database was built for a different mode of operation. This error
+ * is returned when streaming calls are used with a block or vectored database
+ * and vice versa.
+ */
+#define HS_DB_MODE_ERROR (-7)
+
+/**
+ * A parameter passed to this function was not correctly aligned.
+ */
+#define HS_BAD_ALIGN (-8)
+
+/**
+ * The memory allocator (either malloc() or the allocator set with @ref
+ * hs_set_allocator()) did not correctly return memory suitably aligned for the
+ * largest representable data type on this platform.
+ */
+#define HS_BAD_ALLOC (-9)
+
+/**
+ * The scratch region was already in use.
+ *
+ * This error is returned when Hyperscan is able to detect that the scratch
+ * region given is already in use by another Hyperscan API call.
+ *
+ * A separate scratch region, allocated with @ref hs_alloc_scratch() or @ref
+ * hs_clone_scratch(), is required for every concurrent caller of the Hyperscan
+ * API.
+ *
+ * For example, this error might be returned when @ref hs_scan() has been
+ * called inside a callback delivered by a currently-executing @ref hs_scan()
+ * call using the same scratch region.
+ *
+ * Note: Not all concurrent uses of scratch regions may be detected. This error
+ * is intended as a best-effort debugging tool, not a guarantee.
+ */
+#define HS_SCRATCH_IN_USE (-10)
+
+/**
+ * Unsupported CPU architecture.
+ *
+ * This error is returned when Hyperscan is able to detect that the current
+ * system does not support the required instruction set.
+ *
+ * At a minimum, Hyperscan requires Supplemental Streaming SIMD Extensions 3
+ * (SSSE3).
+ */
+#define HS_ARCH_ERROR (-11)
+
+/**
+ * Provided buffer was too small.
+ *
+ * This error indicates that there was insufficient space in the buffer. The
+ * call should be repeated with a larger provided buffer.
+ *
+ * Note: in this situation, it is normal for the amount of space required to be
+ * returned in the same manner as the used space would have been returned if the
+ * call was successful.
+ */
+#define HS_INSUFFICIENT_SPACE (-12)
+
+/**
+ * Unexpected internal error.
+ *
+ * This error indicates that there was unexpected matching behaviors. This
+ * could be related to invalid usage of stream and scratch space or invalid memory
+ * operations by users.
+ *
+ */
+#define HS_UNKNOWN_ERROR (-13)
+
+/** @} */
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* HS_AVX512_COMMON_H */
diff --git a/contrib/libs/hyperscan/runtime_avx512/hs_runtime.h b/contrib/libs/hyperscan/runtime_avx512/hs_runtime.h
index 8fcb5d48f7..843cde7dd3 100644
--- a/contrib/libs/hyperscan/runtime_avx512/hs_runtime.h
+++ b/contrib/libs/hyperscan/runtime_avx512/hs_runtime.h
@@ -1,621 +1,621 @@
-/*
- * Copyright (c) 2015-2018, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef HS_AVX512_RUNTIME_H
-#define HS_AVX512_RUNTIME_H
-
-#include <stdlib.h>
-
-/**
- * @file
- * @brief The Hyperscan runtime API definition.
- *
- * Hyperscan is a high speed regular expression engine.
- *
- * This header contains functions for using compiled Hyperscan databases for
- * scanning data at runtime.
- */
-
-#include "hs_common.h"
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-/**
- * Definition of the stream identifier type.
- */
-struct hs_stream;
-
-/**
- * The stream identifier returned by @ref hs_open_stream().
- */
-typedef struct hs_stream hs_stream_t;
-
-struct hs_scratch;
-
-/**
- * A Hyperscan scratch space.
- */
-typedef struct hs_scratch hs_scratch_t;
-
-/**
- * Definition of the match event callback function type.
- *
- * A callback function matching the defined type must be provided by the
- * application calling the @ref hs_scan(), @ref hs_scan_vector() or @ref
- * hs_scan_stream() functions (or other streaming calls which can produce
- * matches).
- *
- * This callback function will be invoked whenever a match is located in the
- * target data during the execution of a scan. The details of the match are
- * passed in as parameters to the callback function, and the callback function
- * should return a value indicating whether or not matching should continue on
- * the target data. If no callbacks are desired from a scan call, NULL may be
- * provided in order to suppress match production.
- *
- * This callback function should not attempt to call Hyperscan API functions on
- * the same stream nor should it attempt to reuse the scratch space allocated
- * for the API calls that caused it to be triggered. Making another call to the
- * Hyperscan library with completely independent parameters should work (for
- * example, scanning a different database in a new stream and with new scratch
- * space), but reusing data structures like stream state and/or scratch space
- * will produce undefined behavior.
- *
- * @param id
- * The ID number of the expression that matched. If the expression was a
- * single expression compiled with @ref hs_compile(), this value will be
- * zero.
- *
- * @param from
- * - If a start of match flag is enabled for the current pattern, this
- * argument will be set to the start of match for the pattern assuming
- * that that start of match value lies within the current 'start of match
- * horizon' chosen by one of the SOM_HORIZON mode flags.
-
- * - If the start of match value lies outside this horizon (possible only
- * when the SOM_HORIZON value is not @ref HS_MODE_SOM_HORIZON_LARGE),
- * the @p from value will be set to @ref HS_OFFSET_PAST_HORIZON.
-
- * - This argument will be set to zero if the Start of Match flag is not
- * enabled for the given pattern.
- *
- * @param to
- * The offset after the last byte that matches the expression.
- *
- * @param flags
- * This is provided for future use and is unused at present.
- *
- * @param context
- * The pointer supplied by the user to the @ref hs_scan(), @ref
- * hs_scan_vector() or @ref hs_scan_stream() function.
- *
- * @return
- * Non-zero if the matching should cease, else zero. If scanning is
- * performed in streaming mode and a non-zero value is returned, any
- * subsequent calls to @ref hs_scan_stream() for that stream will
- * immediately return with @ref HS_SCAN_TERMINATED.
- */
-typedef int (HS_CDECL *match_event_handler)(unsigned int id,
- unsigned long long from,
- unsigned long long to,
- unsigned int flags,
- void *context);
-
-/**
- * Open and initialise a stream.
- *
- * @param db
- * A compiled pattern database.
- *
- * @param flags
- * Flags modifying the behaviour of the stream. This parameter is provided
- * for future use and is unused at present.
- *
- * @param stream
- * On success, a pointer to the generated @ref hs_stream_t will be
- * returned; NULL on failure.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx512_hs_open_stream(const hs_database_t *db, unsigned int flags,
- hs_stream_t **stream);
-
-/**
- * Write data to be scanned to the opened stream.
- *
- * This is the function call in which the actual pattern matching takes place
- * as data is written to the stream. Matches will be returned via the @ref
- * match_event_handler callback supplied.
- *
- * @param id
- * The stream ID (returned by @ref hs_open_stream()) to which the data
- * will be written.
- *
- * @param data
- * Pointer to the data to be scanned.
- *
- * @param length
- * The number of bytes to scan.
- *
- * @param flags
- * Flags modifying the behaviour of the stream. This parameter is provided
- * for future use and is unused at present.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch().
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param ctxt
- * The user defined pointer which will be passed to the callback function
- * when a match occurs.
- *
- * @return
- * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the
- * match callback indicated that scanning should stop; other values on
- * error.
- */
-hs_error_t avx512_hs_scan_stream(hs_stream_t *id, const char *data,
- unsigned int length, unsigned int flags,
- hs_scratch_t *scratch,
- match_event_handler onEvent, void *ctxt);
-
-/**
- * Close a stream.
- *
- * This function completes matching on the given stream and frees the memory
- * associated with the stream state. After this call, the stream pointed to by
- * @p id is invalid and can no longer be used. To reuse the stream state after
- * completion, rather than closing it, the @ref hs_reset_stream function can be
- * used.
- *
- * This function must be called for any stream created with @ref
- * hs_open_stream(), even if scanning has been terminated by a non-zero return
- * from the match callback function.
- *
- * Note: This operation may result in matches being returned (via calls to the
- * match event callback) for expressions anchored to the end of the data stream
- * (for example, via the use of the `$` meta-character). If these matches are
- * not desired, NULL may be provided as the @ref match_event_handler callback.
- *
- * If NULL is provided as the @ref match_event_handler callback, it is
- * permissible to provide a NULL scratch.
- *
- * @param id
- * The stream ID returned by @ref hs_open_stream().
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
- * allowed to be NULL only if the @p onEvent callback is also NULL.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param ctxt
- * The user defined pointer which will be passed to the callback function
- * when a match occurs.
- *
- * @return
- * Returns @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx512_hs_close_stream(hs_stream_t *id, hs_scratch_t *scratch,
- match_event_handler onEvent, void *ctxt);
-
-/**
- * Reset a stream to an initial state.
- *
- * Conceptually, this is equivalent to performing @ref hs_close_stream() on the
- * given stream, followed by a @ref hs_open_stream(). This new stream replaces
- * the original stream in memory, avoiding the overhead of freeing the old
- * stream and allocating the new one.
- *
- * Note: This operation may result in matches being returned (via calls to the
- * match event callback) for expressions anchored to the end of the original
- * data stream (for example, via the use of the `$` meta-character). If these
- * matches are not desired, NULL may be provided as the @ref match_event_handler
- * callback.
- *
- * Note: the stream will also be tied to the same database.
- *
- * @param id
- * The stream (as created by @ref hs_open_stream()) to be replaced.
- *
- * @param flags
- * Flags modifying the behaviour of the stream. This parameter is provided
- * for future use and is unused at present.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
- * allowed to be NULL only if the @p onEvent callback is also NULL.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param context
- * The user defined pointer which will be passed to the callback function
- * when a match occurs.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx512_hs_reset_stream(hs_stream_t *id, unsigned int flags,
- hs_scratch_t *scratch,
- match_event_handler onEvent, void *context);
-
-/**
- * Duplicate the given stream. The new stream will have the same state as the
- * original including the current stream offset.
- *
- * @param to_id
- * On success, a pointer to the new, copied @ref hs_stream_t will be
- * returned; NULL on failure.
- *
- * @param from_id
- * The stream (as created by @ref hs_open_stream()) to be copied.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx512_hs_copy_stream(hs_stream_t **to_id,
- const hs_stream_t *from_id);
-
-/**
- * Duplicate the given 'from' stream state onto the 'to' stream. The 'to' stream
- * will first be reset (reporting any EOD matches if a non-NULL @p onEvent
- * callback handler is provided).
- *
- * Note: the 'to' stream and the 'from' stream must be open against the same
- * database.
- *
- * @param to_id
- * On success, a pointer to the new, copied @ref hs_stream_t will be
- * returned; NULL on failure.
- *
- * @param from_id
- * The stream (as created by @ref hs_open_stream()) to be copied.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
- * allowed to be NULL only if the @p onEvent callback is also NULL.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param context
- * The user defined pointer which will be passed to the callback function
- * when a match occurs.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx512_hs_reset_and_copy_stream(hs_stream_t *to_id,
- const hs_stream_t *from_id,
- hs_scratch_t *scratch,
- match_event_handler onEvent,
- void *context);
-
-/**
- * Creates a compressed representation of the provided stream in the buffer
- * provided. This compressed representation can be converted back into a stream
- * state by using @ref hs_expand_stream() or @ref hs_reset_and_expand_stream().
- * The size of the compressed representation will be placed into @p used_space.
- *
- * If there is not sufficient space in the buffer to hold the compressed
- * representation, @ref HS_INSUFFICIENT_SPACE will be returned and @p used_space
- * will be populated with the amount of space required.
- *
- * Note: this function does not close the provided stream, you may continue to
- * use the stream or to free it with @ref hs_close_stream().
- *
- * @param stream
- * The stream (as created by @ref hs_open_stream()) to be compressed.
- *
- * @param buf
- * Buffer to write the compressed representation into. Note: if the call is
- * just being used to determine the amount of space required, it is allowed
- * to pass NULL here and @p buf_space as 0.
- *
- * @param buf_space
- * The number of bytes in @p buf. If buf_space is too small, the call will
- * fail with @ref HS_INSUFFICIENT_SPACE.
- *
- * @param used_space
- * Pointer to where the amount of used space will be written to. The used
- * buffer space is always less than or equal to @p buf_space. If the call
- * fails with @ref HS_INSUFFICIENT_SPACE, this pointer will be used to
- * write out the amount of buffer space required.
- *
- * @return
- * @ref HS_SUCCESS on success, @ref HS_INSUFFICIENT_SPACE if the provided
- * buffer is too small.
- */
-hs_error_t avx512_hs_compress_stream(const hs_stream_t *stream, char *buf,
- size_t buf_space, size_t *used_space);
-
-/**
- * Decompresses a compressed representation created by @ref hs_compress_stream()
- * into a new stream.
- *
- * Note: @p buf must correspond to a complete compressed representation created
- * by @ref hs_compress_stream() of a stream that was opened against @p db. It is
- * not always possible to detect misuse of this API and behaviour is undefined
- * if these properties are not satisfied.
- *
- * @param db
- * The compiled pattern database that the compressed stream was opened
- * against.
- *
- * @param stream
- * On success, a pointer to the expanded @ref hs_stream_t will be
- * returned; NULL on failure.
- *
- * @param buf
- * A compressed representation of a stream. These compressed forms are
- * created by @ref hs_compress_stream().
- *
- * @param buf_size
- * The size in bytes of the compressed representation.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx512_hs_expand_stream(const hs_database_t *db,
- hs_stream_t **stream, const char *buf,
- size_t buf_size);
-
-/**
- * Decompresses a compressed representation created by @ref hs_compress_stream()
- * on top of the 'to' stream. The 'to' stream will first be reset (reporting
- * any EOD matches if a non-NULL @p onEvent callback handler is provided).
- *
- * Note: the 'to' stream must be opened against the same database as the
- * compressed stream.
- *
- * Note: @p buf must correspond to a complete compressed representation created
- * by @ref hs_compress_stream() of a stream that was opened against @p db. It is
- * not always possible to detect misuse of this API and behaviour is undefined
- * if these properties are not satisfied.
- *
- * @param to_stream
- * A pointer to a valid stream state. A pointer to the expanded @ref
- * hs_stream_t will be returned; NULL on failure.
- *
- * @param buf
- * A compressed representation of a stream. These compressed forms are
- * created by @ref hs_compress_stream().
- *
- * @param buf_size
- * The size in bytes of the compressed representation.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
- * allowed to be NULL only if the @p onEvent callback is also NULL.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param context
- * The user defined pointer which will be passed to the callback function
- * when a match occurs.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx512_hs_reset_and_expand_stream(hs_stream_t *to_stream,
- const char *buf, size_t buf_size,
- hs_scratch_t *scratch,
- match_event_handler onEvent,
- void *context);
-
-/**
- * The block (non-streaming) regular expression scanner.
- *
- * This is the function call in which the actual pattern matching takes place
- * for block-mode pattern databases.
- *
- * @param db
- * A compiled pattern database.
- *
- * @param data
- * Pointer to the data to be scanned.
- *
- * @param length
- * The number of bytes to scan.
- *
- * @param flags
- * Flags modifying the behaviour of this function. This parameter is
- * provided for future use and is unused at present.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch() for this
- * database.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param context
- * The user defined pointer which will be passed to the callback function.
- *
- * @return
- * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the
- * match callback indicated that scanning should stop; other values on
- * error.
- */
-hs_error_t avx512_hs_scan(const hs_database_t *db, const char *data,
- unsigned int length, unsigned int flags,
- hs_scratch_t *scratch, match_event_handler onEvent,
- void *context);
-
-/**
- * The vectored regular expression scanner.
- *
- * This is the function call in which the actual pattern matching takes place
- * for vectoring-mode pattern databases.
- *
- * @param db
- * A compiled pattern database.
- *
- * @param data
- * An array of pointers to the data blocks to be scanned.
- *
- * @param length
- * An array of lengths (in bytes) of each data block to scan.
- *
- * @param count
- * Number of data blocks to scan. This should correspond to the size of
- * of the @p data and @p length arrays.
- *
- * @param flags
- * Flags modifying the behaviour of this function. This parameter is
- * provided for future use and is unused at present.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch() for
- * this database.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param context
- * The user defined pointer which will be passed to the callback function.
- *
- * @return
- * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the match
- * callback indicated that scanning should stop; other values on error.
- */
-hs_error_t avx512_hs_scan_vector(const hs_database_t *db,
- const char *const *data,
- const unsigned int *length,
- unsigned int count, unsigned int flags,
- hs_scratch_t *scratch,
- match_event_handler onEvent, void *context);
-
-/**
- * Allocate a "scratch" space for use by Hyperscan.
- *
- * This is required for runtime use, and one scratch space per thread, or
- * concurrent caller, is required. Any allocator callback set by @ref
- * hs_set_scratch_allocator() or @ref hs_set_allocator() will be used by this
- * function.
- *
- * @param db
- * The database, as produced by @ref hs_compile().
- *
- * @param scratch
- * On first allocation, a pointer to NULL should be provided so a new
- * scratch can be allocated. If a scratch block has been previously
- * allocated, then a pointer to it should be passed back in to see if it
- * is valid for this database block. If a new scratch block is required,
- * the original will be freed and the new one returned, otherwise the
- * previous scratch block will be returned. On success, the scratch block
- * will be suitable for use with the provided database in addition to any
- * databases that original scratch space was suitable for.
- *
- * @return
- * @ref HS_SUCCESS on successful allocation; @ref HS_NOMEM if the
- * allocation fails. Other errors may be returned if invalid parameters
- * are specified.
- */
-hs_error_t avx512_hs_alloc_scratch(const hs_database_t *db,
- hs_scratch_t **scratch);
-
-/**
- * Allocate a scratch space that is a clone of an existing scratch space.
- *
- * This is useful when multiple concurrent threads will be using the same set
- * of compiled databases, and another scratch space is required. Any allocator
- * callback set by @ref hs_set_scratch_allocator() or @ref hs_set_allocator()
- * will be used by this function.
- *
- * @param src
- * The existing @ref hs_scratch_t to be cloned.
- *
- * @param dest
- * A pointer to the new scratch space will be returned here.
- *
- * @return
- * @ref HS_SUCCESS on success; @ref HS_NOMEM if the allocation fails.
- * Other errors may be returned if invalid parameters are specified.
- */
-hs_error_t avx512_hs_clone_scratch(const hs_scratch_t *src,
- hs_scratch_t **dest);
-
-/**
- * Provides the size of the given scratch space.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch() or @ref
- * hs_clone_scratch().
- *
- * @param scratch_size
- * On success, the size of the scratch space in bytes is placed in this
- * parameter.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx512_hs_scratch_size(const hs_scratch_t *scratch,
- size_t *scratch_size);
-
-/**
- * Free a scratch block previously allocated by @ref hs_alloc_scratch() or @ref
- * hs_clone_scratch().
- *
- * The free callback set by @ref hs_set_scratch_allocator() or @ref
- * hs_set_allocator() will be used by this function.
- *
- * @param scratch
- * The scratch block to be freed. NULL may also be safely provided.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t avx512_hs_free_scratch(hs_scratch_t *scratch);
-
-/**
- * Callback 'from' return value, indicating that the start of this match was
- * too early to be tracked with the requested SOM_HORIZON precision.
- */
-#define HS_OFFSET_PAST_HORIZON (~0ULL)
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
-#endif /* HS_AVX512_RUNTIME_H */
+/*
+ * Copyright (c) 2015-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HS_AVX512_RUNTIME_H
+#define HS_AVX512_RUNTIME_H
+
+#include <stdlib.h>
+
+/**
+ * @file
+ * @brief The Hyperscan runtime API definition.
+ *
+ * Hyperscan is a high speed regular expression engine.
+ *
+ * This header contains functions for using compiled Hyperscan databases for
+ * scanning data at runtime.
+ */
+
+#include "hs_common.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * Definition of the stream identifier type.
+ */
+struct hs_stream;
+
+/**
+ * The stream identifier returned by @ref hs_open_stream().
+ */
+typedef struct hs_stream hs_stream_t;
+
+struct hs_scratch;
+
+/**
+ * A Hyperscan scratch space.
+ */
+typedef struct hs_scratch hs_scratch_t;
+
+/**
+ * Definition of the match event callback function type.
+ *
+ * A callback function matching the defined type must be provided by the
+ * application calling the @ref hs_scan(), @ref hs_scan_vector() or @ref
+ * hs_scan_stream() functions (or other streaming calls which can produce
+ * matches).
+ *
+ * This callback function will be invoked whenever a match is located in the
+ * target data during the execution of a scan. The details of the match are
+ * passed in as parameters to the callback function, and the callback function
+ * should return a value indicating whether or not matching should continue on
+ * the target data. If no callbacks are desired from a scan call, NULL may be
+ * provided in order to suppress match production.
+ *
+ * This callback function should not attempt to call Hyperscan API functions on
+ * the same stream nor should it attempt to reuse the scratch space allocated
+ * for the API calls that caused it to be triggered. Making another call to the
+ * Hyperscan library with completely independent parameters should work (for
+ * example, scanning a different database in a new stream and with new scratch
+ * space), but reusing data structures like stream state and/or scratch space
+ * will produce undefined behavior.
+ *
+ * @param id
+ * The ID number of the expression that matched. If the expression was a
+ * single expression compiled with @ref hs_compile(), this value will be
+ * zero.
+ *
+ * @param from
+ * - If a start of match flag is enabled for the current pattern, this
+ * argument will be set to the start of match for the pattern assuming
+ * that that start of match value lies within the current 'start of match
+ * horizon' chosen by one of the SOM_HORIZON mode flags.
+
+ * - If the start of match value lies outside this horizon (possible only
+ * when the SOM_HORIZON value is not @ref HS_MODE_SOM_HORIZON_LARGE),
+ * the @p from value will be set to @ref HS_OFFSET_PAST_HORIZON.
+
+ * - This argument will be set to zero if the Start of Match flag is not
+ * enabled for the given pattern.
+ *
+ * @param to
+ * The offset after the last byte that matches the expression.
+ *
+ * @param flags
+ * This is provided for future use and is unused at present.
+ *
+ * @param context
+ * The pointer supplied by the user to the @ref hs_scan(), @ref
+ * hs_scan_vector() or @ref hs_scan_stream() function.
+ *
+ * @return
+ * Non-zero if the matching should cease, else zero. If scanning is
+ * performed in streaming mode and a non-zero value is returned, any
+ * subsequent calls to @ref hs_scan_stream() for that stream will
+ * immediately return with @ref HS_SCAN_TERMINATED.
+ */
+typedef int (HS_CDECL *match_event_handler)(unsigned int id,
+ unsigned long long from,
+ unsigned long long to,
+ unsigned int flags,
+ void *context);
+
+/**
+ * Open and initialise a stream.
+ *
+ * @param db
+ * A compiled pattern database.
+ *
+ * @param flags
+ * Flags modifying the behaviour of the stream. This parameter is provided
+ * for future use and is unused at present.
+ *
+ * @param stream
+ * On success, a pointer to the generated @ref hs_stream_t will be
+ * returned; NULL on failure.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx512_hs_open_stream(const hs_database_t *db, unsigned int flags,
+ hs_stream_t **stream);
+
+/**
+ * Write data to be scanned to the opened stream.
+ *
+ * This is the function call in which the actual pattern matching takes place
+ * as data is written to the stream. Matches will be returned via the @ref
+ * match_event_handler callback supplied.
+ *
+ * @param id
+ * The stream ID (returned by @ref hs_open_stream()) to which the data
+ * will be written.
+ *
+ * @param data
+ * Pointer to the data to be scanned.
+ *
+ * @param length
+ * The number of bytes to scan.
+ *
+ * @param flags
+ * Flags modifying the behaviour of the stream. This parameter is provided
+ * for future use and is unused at present.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch().
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param ctxt
+ * The user defined pointer which will be passed to the callback function
+ * when a match occurs.
+ *
+ * @return
+ * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the
+ * match callback indicated that scanning should stop; other values on
+ * error.
+ */
+hs_error_t avx512_hs_scan_stream(hs_stream_t *id, const char *data,
+ unsigned int length, unsigned int flags,
+ hs_scratch_t *scratch,
+ match_event_handler onEvent, void *ctxt);
+
+/**
+ * Close a stream.
+ *
+ * This function completes matching on the given stream and frees the memory
+ * associated with the stream state. After this call, the stream pointed to by
+ * @p id is invalid and can no longer be used. To reuse the stream state after
+ * completion, rather than closing it, the @ref hs_reset_stream function can be
+ * used.
+ *
+ * This function must be called for any stream created with @ref
+ * hs_open_stream(), even if scanning has been terminated by a non-zero return
+ * from the match callback function.
+ *
+ * Note: This operation may result in matches being returned (via calls to the
+ * match event callback) for expressions anchored to the end of the data stream
+ * (for example, via the use of the `$` meta-character). If these matches are
+ * not desired, NULL may be provided as the @ref match_event_handler callback.
+ *
+ * If NULL is provided as the @ref match_event_handler callback, it is
+ * permissible to provide a NULL scratch.
+ *
+ * @param id
+ * The stream ID returned by @ref hs_open_stream().
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
+ * allowed to be NULL only if the @p onEvent callback is also NULL.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param ctxt
+ * The user defined pointer which will be passed to the callback function
+ * when a match occurs.
+ *
+ * @return
+ * Returns @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx512_hs_close_stream(hs_stream_t *id, hs_scratch_t *scratch,
+ match_event_handler onEvent, void *ctxt);
+
+/**
+ * Reset a stream to an initial state.
+ *
+ * Conceptually, this is equivalent to performing @ref hs_close_stream() on the
+ * given stream, followed by a @ref hs_open_stream(). This new stream replaces
+ * the original stream in memory, avoiding the overhead of freeing the old
+ * stream and allocating the new one.
+ *
+ * Note: This operation may result in matches being returned (via calls to the
+ * match event callback) for expressions anchored to the end of the original
+ * data stream (for example, via the use of the `$` meta-character). If these
+ * matches are not desired, NULL may be provided as the @ref match_event_handler
+ * callback.
+ *
+ * Note: the stream will also be tied to the same database.
+ *
+ * @param id
+ * The stream (as created by @ref hs_open_stream()) to be replaced.
+ *
+ * @param flags
+ * Flags modifying the behaviour of the stream. This parameter is provided
+ * for future use and is unused at present.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
+ * allowed to be NULL only if the @p onEvent callback is also NULL.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param context
+ * The user defined pointer which will be passed to the callback function
+ * when a match occurs.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx512_hs_reset_stream(hs_stream_t *id, unsigned int flags,
+ hs_scratch_t *scratch,
+ match_event_handler onEvent, void *context);
+
+/**
+ * Duplicate the given stream. The new stream will have the same state as the
+ * original including the current stream offset.
+ *
+ * @param to_id
+ * On success, a pointer to the new, copied @ref hs_stream_t will be
+ * returned; NULL on failure.
+ *
+ * @param from_id
+ * The stream (as created by @ref hs_open_stream()) to be copied.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx512_hs_copy_stream(hs_stream_t **to_id,
+ const hs_stream_t *from_id);
+
+/**
+ * Duplicate the given 'from' stream state onto the 'to' stream. The 'to' stream
+ * will first be reset (reporting any EOD matches if a non-NULL @p onEvent
+ * callback handler is provided).
+ *
+ * Note: the 'to' stream and the 'from' stream must be open against the same
+ * database.
+ *
+ * @param to_id
+ * On success, a pointer to the new, copied @ref hs_stream_t will be
+ * returned; NULL on failure.
+ *
+ * @param from_id
+ * The stream (as created by @ref hs_open_stream()) to be copied.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
+ * allowed to be NULL only if the @p onEvent callback is also NULL.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param context
+ * The user defined pointer which will be passed to the callback function
+ * when a match occurs.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx512_hs_reset_and_copy_stream(hs_stream_t *to_id,
+ const hs_stream_t *from_id,
+ hs_scratch_t *scratch,
+ match_event_handler onEvent,
+ void *context);
+
+/**
+ * Creates a compressed representation of the provided stream in the buffer
+ * provided. This compressed representation can be converted back into a stream
+ * state by using @ref hs_expand_stream() or @ref hs_reset_and_expand_stream().
+ * The size of the compressed representation will be placed into @p used_space.
+ *
+ * If there is not sufficient space in the buffer to hold the compressed
+ * representation, @ref HS_INSUFFICIENT_SPACE will be returned and @p used_space
+ * will be populated with the amount of space required.
+ *
+ * Note: this function does not close the provided stream, you may continue to
+ * use the stream or to free it with @ref hs_close_stream().
+ *
+ * @param stream
+ * The stream (as created by @ref hs_open_stream()) to be compressed.
+ *
+ * @param buf
+ * Buffer to write the compressed representation into. Note: if the call is
+ * just being used to determine the amount of space required, it is allowed
+ * to pass NULL here and @p buf_space as 0.
+ *
+ * @param buf_space
+ * The number of bytes in @p buf. If buf_space is too small, the call will
+ * fail with @ref HS_INSUFFICIENT_SPACE.
+ *
+ * @param used_space
+ * Pointer to where the amount of used space will be written to. The used
+ * buffer space is always less than or equal to @p buf_space. If the call
+ * fails with @ref HS_INSUFFICIENT_SPACE, this pointer will be used to
+ * write out the amount of buffer space required.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, @ref HS_INSUFFICIENT_SPACE if the provided
+ * buffer is too small.
+ */
+hs_error_t avx512_hs_compress_stream(const hs_stream_t *stream, char *buf,
+ size_t buf_space, size_t *used_space);
+
+/**
+ * Decompresses a compressed representation created by @ref hs_compress_stream()
+ * into a new stream.
+ *
+ * Note: @p buf must correspond to a complete compressed representation created
+ * by @ref hs_compress_stream() of a stream that was opened against @p db. It is
+ * not always possible to detect misuse of this API and behaviour is undefined
+ * if these properties are not satisfied.
+ *
+ * @param db
+ * The compiled pattern database that the compressed stream was opened
+ * against.
+ *
+ * @param stream
+ * On success, a pointer to the expanded @ref hs_stream_t will be
+ * returned; NULL on failure.
+ *
+ * @param buf
+ * A compressed representation of a stream. These compressed forms are
+ * created by @ref hs_compress_stream().
+ *
+ * @param buf_size
+ * The size in bytes of the compressed representation.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx512_hs_expand_stream(const hs_database_t *db,
+ hs_stream_t **stream, const char *buf,
+ size_t buf_size);
+
+/**
+ * Decompresses a compressed representation created by @ref hs_compress_stream()
+ * on top of the 'to' stream. The 'to' stream will first be reset (reporting
+ * any EOD matches if a non-NULL @p onEvent callback handler is provided).
+ *
+ * Note: the 'to' stream must be opened against the same database as the
+ * compressed stream.
+ *
+ * Note: @p buf must correspond to a complete compressed representation created
+ * by @ref hs_compress_stream() of a stream that was opened against @p db. It is
+ * not always possible to detect misuse of this API and behaviour is undefined
+ * if these properties are not satisfied.
+ *
+ * @param to_stream
+ * A pointer to a valid stream state. A pointer to the expanded @ref
+ * hs_stream_t will be returned; NULL on failure.
+ *
+ * @param buf
+ * A compressed representation of a stream. These compressed forms are
+ * created by @ref hs_compress_stream().
+ *
+ * @param buf_size
+ * The size in bytes of the compressed representation.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
+ * allowed to be NULL only if the @p onEvent callback is also NULL.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param context
+ * The user defined pointer which will be passed to the callback function
+ * when a match occurs.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx512_hs_reset_and_expand_stream(hs_stream_t *to_stream,
+ const char *buf, size_t buf_size,
+ hs_scratch_t *scratch,
+ match_event_handler onEvent,
+ void *context);
+
+/**
+ * The block (non-streaming) regular expression scanner.
+ *
+ * This is the function call in which the actual pattern matching takes place
+ * for block-mode pattern databases.
+ *
+ * @param db
+ * A compiled pattern database.
+ *
+ * @param data
+ * Pointer to the data to be scanned.
+ *
+ * @param length
+ * The number of bytes to scan.
+ *
+ * @param flags
+ * Flags modifying the behaviour of this function. This parameter is
+ * provided for future use and is unused at present.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch() for this
+ * database.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param context
+ * The user defined pointer which will be passed to the callback function.
+ *
+ * @return
+ * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the
+ * match callback indicated that scanning should stop; other values on
+ * error.
+ */
+hs_error_t avx512_hs_scan(const hs_database_t *db, const char *data,
+ unsigned int length, unsigned int flags,
+ hs_scratch_t *scratch, match_event_handler onEvent,
+ void *context);
+
+/**
+ * The vectored regular expression scanner.
+ *
+ * This is the function call in which the actual pattern matching takes place
+ * for vectoring-mode pattern databases.
+ *
+ * @param db
+ * A compiled pattern database.
+ *
+ * @param data
+ * An array of pointers to the data blocks to be scanned.
+ *
+ * @param length
+ * An array of lengths (in bytes) of each data block to scan.
+ *
+ * @param count
+ * Number of data blocks to scan. This should correspond to the size of
+ * of the @p data and @p length arrays.
+ *
+ * @param flags
+ * Flags modifying the behaviour of this function. This parameter is
+ * provided for future use and is unused at present.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch() for
+ * this database.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param context
+ * The user defined pointer which will be passed to the callback function.
+ *
+ * @return
+ * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the match
+ * callback indicated that scanning should stop; other values on error.
+ */
+hs_error_t avx512_hs_scan_vector(const hs_database_t *db,
+ const char *const *data,
+ const unsigned int *length,
+ unsigned int count, unsigned int flags,
+ hs_scratch_t *scratch,
+ match_event_handler onEvent, void *context);
+
+/**
+ * Allocate a "scratch" space for use by Hyperscan.
+ *
+ * This is required for runtime use, and one scratch space per thread, or
+ * concurrent caller, is required. Any allocator callback set by @ref
+ * hs_set_scratch_allocator() or @ref hs_set_allocator() will be used by this
+ * function.
+ *
+ * @param db
+ * The database, as produced by @ref hs_compile().
+ *
+ * @param scratch
+ * On first allocation, a pointer to NULL should be provided so a new
+ * scratch can be allocated. If a scratch block has been previously
+ * allocated, then a pointer to it should be passed back in to see if it
+ * is valid for this database block. If a new scratch block is required,
+ * the original will be freed and the new one returned, otherwise the
+ * previous scratch block will be returned. On success, the scratch block
+ * will be suitable for use with the provided database in addition to any
+ * databases that original scratch space was suitable for.
+ *
+ * @return
+ * @ref HS_SUCCESS on successful allocation; @ref HS_NOMEM if the
+ * allocation fails. Other errors may be returned if invalid parameters
+ * are specified.
+ */
+hs_error_t avx512_hs_alloc_scratch(const hs_database_t *db,
+ hs_scratch_t **scratch);
+
+/**
+ * Allocate a scratch space that is a clone of an existing scratch space.
+ *
+ * This is useful when multiple concurrent threads will be using the same set
+ * of compiled databases, and another scratch space is required. Any allocator
+ * callback set by @ref hs_set_scratch_allocator() or @ref hs_set_allocator()
+ * will be used by this function.
+ *
+ * @param src
+ * The existing @ref hs_scratch_t to be cloned.
+ *
+ * @param dest
+ * A pointer to the new scratch space will be returned here.
+ *
+ * @return
+ * @ref HS_SUCCESS on success; @ref HS_NOMEM if the allocation fails.
+ * Other errors may be returned if invalid parameters are specified.
+ */
+hs_error_t avx512_hs_clone_scratch(const hs_scratch_t *src,
+ hs_scratch_t **dest);
+
+/**
+ * Provides the size of the given scratch space.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch() or @ref
+ * hs_clone_scratch().
+ *
+ * @param scratch_size
+ * On success, the size of the scratch space in bytes is placed in this
+ * parameter.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx512_hs_scratch_size(const hs_scratch_t *scratch,
+ size_t *scratch_size);
+
+/**
+ * Free a scratch block previously allocated by @ref hs_alloc_scratch() or @ref
+ * hs_clone_scratch().
+ *
+ * The free callback set by @ref hs_set_scratch_allocator() or @ref
+ * hs_set_allocator() will be used by this function.
+ *
+ * @param scratch
+ * The scratch block to be freed. NULL may also be safely provided.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t avx512_hs_free_scratch(hs_scratch_t *scratch);
+
+/**
+ * Callback 'from' return value, indicating that the start of this match was
+ * too early to be tracked with the requested SOM_HORIZON precision.
+ */
+#define HS_OFFSET_PAST_HORIZON (~0ULL)
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* HS_AVX512_RUNTIME_H */
diff --git a/contrib/libs/hyperscan/runtime_avx512/ya.make b/contrib/libs/hyperscan/runtime_avx512/ya.make
index 36c3aff12a..5602b274f8 100644
--- a/contrib/libs/hyperscan/runtime_avx512/ya.make
+++ b/contrib/libs/hyperscan/runtime_avx512/ya.make
@@ -1,500 +1,500 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
-OWNER(
- galtsev
- g:antiinfra
- g:cpp-contrib
- g:yql
-)
-
-LICENSE(BSD-3-Clause)
-
-LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-
-ADDINCL(
- contrib/libs/hyperscan
- contrib/libs/hyperscan/src
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_RUNTIME()
-
-CFLAGS(
- ${SSE41_CFLAGS}
- -DHAVE_SSE41
- ${SSE42_CFLAGS}
- -DHAVE_SSE42
- -DHAVE_POPCOUNT_INSTR
- ${POPCNT_CFLAGS}
- ${AVX_CFLAGS}
- -DHAVE_AVX
- ${AVX2_CFLAGS}
- -DHAVE_AVX2
- ${AVX512_CFLAGS}
- -DHAVE_AVX512
- -DCrc32c_ComputeBuf=avx512_Crc32c_ComputeBuf
- -DblockInitSufPQ=avx512_blockInitSufPQ
- -Dcompress_stream=avx512_compress_stream
- -Dcpuid_flags=avx512_cpuid_flags
- -Dcpuid_tune=avx512_cpuid_tune
- -DdbIsValid=avx512_dbIsValid
- -DdoAccel128=avx512_doAccel128
- -DdoAccel256=avx512_doAccel256
- -DdoAccel32=avx512_doAccel32
- -DdoAccel384=avx512_doAccel384
- -DdoAccel512=avx512_doAccel512
- -DdoAccel64=avx512_doAccel64
- -Dexpand_stream=avx512_expand_stream
- -DfdrExec=avx512_fdrExec
- -DfdrExecStreaming=avx512_fdrExecStreaming
- -Dfdr_exec_fat_teddy_msks1=avx512_fdr_exec_fat_teddy_msks1
- -Dfdr_exec_fat_teddy_msks1_pck=avx512_fdr_exec_fat_teddy_msks1_pck
- -Dfdr_exec_fat_teddy_msks2=avx512_fdr_exec_fat_teddy_msks2
- -Dfdr_exec_fat_teddy_msks2_pck=avx512_fdr_exec_fat_teddy_msks2_pck
- -Dfdr_exec_fat_teddy_msks3=avx512_fdr_exec_fat_teddy_msks3
- -Dfdr_exec_fat_teddy_msks3_pck=avx512_fdr_exec_fat_teddy_msks3_pck
- -Dfdr_exec_fat_teddy_msks4=avx512_fdr_exec_fat_teddy_msks4
- -Dfdr_exec_fat_teddy_msks4_pck=avx512_fdr_exec_fat_teddy_msks4_pck
- -Dfdr_exec_teddy_msks1=avx512_fdr_exec_teddy_msks1
- -Dfdr_exec_teddy_msks1_pck=avx512_fdr_exec_teddy_msks1_pck
- -Dfdr_exec_teddy_msks2=avx512_fdr_exec_teddy_msks2
- -Dfdr_exec_teddy_msks2_pck=avx512_fdr_exec_teddy_msks2_pck
- -Dfdr_exec_teddy_msks3=avx512_fdr_exec_teddy_msks3
- -Dfdr_exec_teddy_msks3_pck=avx512_fdr_exec_teddy_msks3_pck
- -Dfdr_exec_teddy_msks4=avx512_fdr_exec_teddy_msks4
- -Dfdr_exec_teddy_msks4_pck=avx512_fdr_exec_teddy_msks4_pck
- -DflushQueuedLiterals_i=avx512_flushQueuedLiterals_i
- -DflushStoredSomMatches_i=avx512_flushStoredSomMatches_i
- -DhandleSomExternal=avx512_handleSomExternal
- -DhandleSomInternal=avx512_handleSomInternal
- -Dhs_alloc_scratch=avx512_hs_alloc_scratch
- -Dhs_clone_scratch=avx512_hs_clone_scratch
- -Dhs_close_stream=avx512_hs_close_stream
- -Dhs_compress_stream=avx512_hs_compress_stream
- -Dhs_copy_stream=avx512_hs_copy_stream
- -Dhs_database_alloc=avx512_hs_database_alloc
- -Dhs_database_free=avx512_hs_database_free
- -Dhs_database_info=avx512_hs_database_info
- -Dhs_database_size=avx512_hs_database_size
- -Dhs_deserialize_database=avx512_hs_deserialize_database
- -Dhs_deserialize_database_at=avx512_hs_deserialize_database_at
- -Dhs_expand_stream=avx512_hs_expand_stream
- -Dhs_free_database=avx512_hs_free_database
- -Dhs_free_scratch=avx512_hs_free_scratch
- -Dhs_misc_alloc=avx512_hs_misc_alloc
- -Dhs_misc_free=avx512_hs_misc_free
- -Dhs_open_stream=avx512_hs_open_stream
- -Dhs_reset_and_copy_stream=avx512_hs_reset_and_copy_stream
- -Dhs_reset_and_expand_stream=avx512_hs_reset_and_expand_stream
- -Dhs_reset_stream=avx512_hs_reset_stream
- -Dhs_scan=avx512_hs_scan
- -Dhs_scan_stream=avx512_hs_scan_stream
- -Dhs_scan_vector=avx512_hs_scan_vector
- -Dhs_scratch_alloc=avx512_hs_scratch_alloc
- -Dhs_scratch_free=avx512_hs_scratch_free
- -Dhs_scratch_size=avx512_hs_scratch_size
- -Dhs_serialize_database=avx512_hs_serialize_database
- -Dhs_serialized_database_info=avx512_hs_serialized_database_info
- -Dhs_serialized_database_size=avx512_hs_serialized_database_size
- -Dhs_set_allocator=avx512_hs_set_allocator
- -Dhs_set_database_allocator=avx512_hs_set_database_allocator
- -Dhs_set_misc_allocator=avx512_hs_set_misc_allocator
- -Dhs_set_scratch_allocator=avx512_hs_set_scratch_allocator
- -Dhs_set_stream_allocator=avx512_hs_set_stream_allocator
- -Dhs_stream_alloc=avx512_hs_stream_alloc
- -Dhs_stream_free=avx512_hs_stream_free
- -Dhs_stream_size=avx512_hs_stream_size
- -Dhs_valid_platform=avx512_hs_valid_platform
- -Dhs_version=avx512_hs_version
- -DhwlmExec=avx512_hwlmExec
- -DhwlmExecStreaming=avx512_hwlmExecStreaming
- -DloadSomFromStream=avx512_loadSomFromStream
- -Dloadcompressed128=avx512_loadcompressed128
- -Dloadcompressed256=avx512_loadcompressed256
- -Dloadcompressed32=avx512_loadcompressed32
- -Dloadcompressed384=avx512_loadcompressed384
- -Dloadcompressed512=avx512_loadcompressed512
- -Dloadcompressed64=avx512_loadcompressed64
- -Dmcsheng_pext_mask=avx512_mcsheng_pext_mask
- -Dmm_mask_mask=avx512_mm_mask_mask
- -Dmm_shuffle_end=avx512_mm_shuffle_end
- -Dmmbit_keyshift_lut=avx512_mmbit_keyshift_lut
- -Dmmbit_maxlevel_direct_lut=avx512_mmbit_maxlevel_direct_lut
- -Dmmbit_maxlevel_from_keyshift_lut=avx512_mmbit_maxlevel_from_keyshift_lut
- -Dmmbit_root_offset_from_level=avx512_mmbit_root_offset_from_level
- -Dmmbit_zero_to_lut=avx512_mmbit_zero_to_lut
- -DnfaBlockExecReverse=avx512_nfaBlockExecReverse
- -DnfaCheckFinalState=avx512_nfaCheckFinalState
- -DnfaExecCastle_Q=avx512_nfaExecCastle_Q
- -DnfaExecCastle_Q2=avx512_nfaExecCastle_Q2
- -DnfaExecCastle_QR=avx512_nfaExecCastle_QR
- -DnfaExecCastle_expandState=avx512_nfaExecCastle_expandState
- -DnfaExecCastle_inAccept=avx512_nfaExecCastle_inAccept
- -DnfaExecCastle_inAnyAccept=avx512_nfaExecCastle_inAnyAccept
- -DnfaExecCastle_initCompressedState=avx512_nfaExecCastle_initCompressedState
- -DnfaExecCastle_queueCompressState=avx512_nfaExecCastle_queueCompressState
- -DnfaExecCastle_queueInitState=avx512_nfaExecCastle_queueInitState
- -DnfaExecCastle_reportCurrent=avx512_nfaExecCastle_reportCurrent
- -DnfaExecGough16_Q=avx512_nfaExecGough16_Q
- -DnfaExecGough16_Q2=avx512_nfaExecGough16_Q2
- -DnfaExecGough16_QR=avx512_nfaExecGough16_QR
- -DnfaExecGough16_expandState=avx512_nfaExecGough16_expandState
- -DnfaExecGough16_inAccept=avx512_nfaExecGough16_inAccept
- -DnfaExecGough16_inAnyAccept=avx512_nfaExecGough16_inAnyAccept
- -DnfaExecGough16_initCompressedState=avx512_nfaExecGough16_initCompressedState
- -DnfaExecGough16_queueCompressState=avx512_nfaExecGough16_queueCompressState
- -DnfaExecGough16_queueInitState=avx512_nfaExecGough16_queueInitState
- -DnfaExecGough16_reportCurrent=avx512_nfaExecGough16_reportCurrent
- -DnfaExecGough16_testEOD=avx512_nfaExecGough16_testEOD
- -DnfaExecGough8_Q=avx512_nfaExecGough8_Q
- -DnfaExecGough8_Q2=avx512_nfaExecGough8_Q2
- -DnfaExecGough8_QR=avx512_nfaExecGough8_QR
- -DnfaExecGough8_expandState=avx512_nfaExecGough8_expandState
- -DnfaExecGough8_inAccept=avx512_nfaExecGough8_inAccept
- -DnfaExecGough8_inAnyAccept=avx512_nfaExecGough8_inAnyAccept
- -DnfaExecGough8_initCompressedState=avx512_nfaExecGough8_initCompressedState
- -DnfaExecGough8_queueCompressState=avx512_nfaExecGough8_queueCompressState
- -DnfaExecGough8_queueInitState=avx512_nfaExecGough8_queueInitState
- -DnfaExecGough8_reportCurrent=avx512_nfaExecGough8_reportCurrent
- -DnfaExecGough8_testEOD=avx512_nfaExecGough8_testEOD
- -DnfaExecLbrDot_Q=avx512_nfaExecLbrDot_Q
- -DnfaExecLbrDot_Q2=avx512_nfaExecLbrDot_Q2
- -DnfaExecLbrDot_QR=avx512_nfaExecLbrDot_QR
- -DnfaExecLbrDot_expandState=avx512_nfaExecLbrDot_expandState
- -DnfaExecLbrDot_inAccept=avx512_nfaExecLbrDot_inAccept
- -DnfaExecLbrDot_inAnyAccept=avx512_nfaExecLbrDot_inAnyAccept
- -DnfaExecLbrDot_initCompressedState=avx512_nfaExecLbrDot_initCompressedState
- -DnfaExecLbrDot_queueCompressState=avx512_nfaExecLbrDot_queueCompressState
- -DnfaExecLbrDot_queueInitState=avx512_nfaExecLbrDot_queueInitState
- -DnfaExecLbrDot_reportCurrent=avx512_nfaExecLbrDot_reportCurrent
- -DnfaExecLbrNVerm_Q=avx512_nfaExecLbrNVerm_Q
- -DnfaExecLbrNVerm_Q2=avx512_nfaExecLbrNVerm_Q2
- -DnfaExecLbrNVerm_QR=avx512_nfaExecLbrNVerm_QR
- -DnfaExecLbrNVerm_expandState=avx512_nfaExecLbrNVerm_expandState
- -DnfaExecLbrNVerm_inAccept=avx512_nfaExecLbrNVerm_inAccept
- -DnfaExecLbrNVerm_inAnyAccept=avx512_nfaExecLbrNVerm_inAnyAccept
- -DnfaExecLbrNVerm_initCompressedState=avx512_nfaExecLbrNVerm_initCompressedState
- -DnfaExecLbrNVerm_queueCompressState=avx512_nfaExecLbrNVerm_queueCompressState
- -DnfaExecLbrNVerm_queueInitState=avx512_nfaExecLbrNVerm_queueInitState
- -DnfaExecLbrNVerm_reportCurrent=avx512_nfaExecLbrNVerm_reportCurrent
- -DnfaExecLbrShuf_Q=avx512_nfaExecLbrShuf_Q
- -DnfaExecLbrShuf_Q2=avx512_nfaExecLbrShuf_Q2
- -DnfaExecLbrShuf_QR=avx512_nfaExecLbrShuf_QR
- -DnfaExecLbrShuf_expandState=avx512_nfaExecLbrShuf_expandState
- -DnfaExecLbrShuf_inAccept=avx512_nfaExecLbrShuf_inAccept
- -DnfaExecLbrShuf_inAnyAccept=avx512_nfaExecLbrShuf_inAnyAccept
- -DnfaExecLbrShuf_initCompressedState=avx512_nfaExecLbrShuf_initCompressedState
- -DnfaExecLbrShuf_queueCompressState=avx512_nfaExecLbrShuf_queueCompressState
- -DnfaExecLbrShuf_queueInitState=avx512_nfaExecLbrShuf_queueInitState
- -DnfaExecLbrShuf_reportCurrent=avx512_nfaExecLbrShuf_reportCurrent
- -DnfaExecLbrTruf_Q=avx512_nfaExecLbrTruf_Q
- -DnfaExecLbrTruf_Q2=avx512_nfaExecLbrTruf_Q2
- -DnfaExecLbrTruf_QR=avx512_nfaExecLbrTruf_QR
- -DnfaExecLbrTruf_expandState=avx512_nfaExecLbrTruf_expandState
- -DnfaExecLbrTruf_inAccept=avx512_nfaExecLbrTruf_inAccept
- -DnfaExecLbrTruf_inAnyAccept=avx512_nfaExecLbrTruf_inAnyAccept
- -DnfaExecLbrTruf_initCompressedState=avx512_nfaExecLbrTruf_initCompressedState
- -DnfaExecLbrTruf_queueCompressState=avx512_nfaExecLbrTruf_queueCompressState
- -DnfaExecLbrTruf_queueInitState=avx512_nfaExecLbrTruf_queueInitState
- -DnfaExecLbrTruf_reportCurrent=avx512_nfaExecLbrTruf_reportCurrent
- -DnfaExecLbrVerm_Q=avx512_nfaExecLbrVerm_Q
- -DnfaExecLbrVerm_Q2=avx512_nfaExecLbrVerm_Q2
- -DnfaExecLbrVerm_QR=avx512_nfaExecLbrVerm_QR
- -DnfaExecLbrVerm_expandState=avx512_nfaExecLbrVerm_expandState
- -DnfaExecLbrVerm_inAccept=avx512_nfaExecLbrVerm_inAccept
- -DnfaExecLbrVerm_inAnyAccept=avx512_nfaExecLbrVerm_inAnyAccept
- -DnfaExecLbrVerm_initCompressedState=avx512_nfaExecLbrVerm_initCompressedState
- -DnfaExecLbrVerm_queueCompressState=avx512_nfaExecLbrVerm_queueCompressState
- -DnfaExecLbrVerm_queueInitState=avx512_nfaExecLbrVerm_queueInitState
- -DnfaExecLbrVerm_reportCurrent=avx512_nfaExecLbrVerm_reportCurrent
- -DnfaExecLimEx128_B_Reverse=avx512_nfaExecLimEx128_B_Reverse
- -DnfaExecLimEx128_Q=avx512_nfaExecLimEx128_Q
- -DnfaExecLimEx128_Q2=avx512_nfaExecLimEx128_Q2
- -DnfaExecLimEx128_QR=avx512_nfaExecLimEx128_QR
- -DnfaExecLimEx128_expandState=avx512_nfaExecLimEx128_expandState
- -DnfaExecLimEx128_inAccept=avx512_nfaExecLimEx128_inAccept
- -DnfaExecLimEx128_inAnyAccept=avx512_nfaExecLimEx128_inAnyAccept
- -DnfaExecLimEx128_initCompressedState=avx512_nfaExecLimEx128_initCompressedState
- -DnfaExecLimEx128_queueCompressState=avx512_nfaExecLimEx128_queueCompressState
- -DnfaExecLimEx128_queueInitState=avx512_nfaExecLimEx128_queueInitState
- -DnfaExecLimEx128_reportCurrent=avx512_nfaExecLimEx128_reportCurrent
- -DnfaExecLimEx128_testEOD=avx512_nfaExecLimEx128_testEOD
- -DnfaExecLimEx128_zombie_status=avx512_nfaExecLimEx128_zombie_status
- -DnfaExecLimEx256_B_Reverse=avx512_nfaExecLimEx256_B_Reverse
- -DnfaExecLimEx256_Q=avx512_nfaExecLimEx256_Q
- -DnfaExecLimEx256_Q2=avx512_nfaExecLimEx256_Q2
- -DnfaExecLimEx256_QR=avx512_nfaExecLimEx256_QR
- -DnfaExecLimEx256_expandState=avx512_nfaExecLimEx256_expandState
- -DnfaExecLimEx256_inAccept=avx512_nfaExecLimEx256_inAccept
- -DnfaExecLimEx256_inAnyAccept=avx512_nfaExecLimEx256_inAnyAccept
- -DnfaExecLimEx256_initCompressedState=avx512_nfaExecLimEx256_initCompressedState
- -DnfaExecLimEx256_queueCompressState=avx512_nfaExecLimEx256_queueCompressState
- -DnfaExecLimEx256_queueInitState=avx512_nfaExecLimEx256_queueInitState
- -DnfaExecLimEx256_reportCurrent=avx512_nfaExecLimEx256_reportCurrent
- -DnfaExecLimEx256_testEOD=avx512_nfaExecLimEx256_testEOD
- -DnfaExecLimEx256_zombie_status=avx512_nfaExecLimEx256_zombie_status
- -DnfaExecLimEx32_B_Reverse=avx512_nfaExecLimEx32_B_Reverse
- -DnfaExecLimEx32_Q=avx512_nfaExecLimEx32_Q
- -DnfaExecLimEx32_Q2=avx512_nfaExecLimEx32_Q2
- -DnfaExecLimEx32_QR=avx512_nfaExecLimEx32_QR
- -DnfaExecLimEx32_expandState=avx512_nfaExecLimEx32_expandState
- -DnfaExecLimEx32_inAccept=avx512_nfaExecLimEx32_inAccept
- -DnfaExecLimEx32_inAnyAccept=avx512_nfaExecLimEx32_inAnyAccept
- -DnfaExecLimEx32_initCompressedState=avx512_nfaExecLimEx32_initCompressedState
- -DnfaExecLimEx32_queueCompressState=avx512_nfaExecLimEx32_queueCompressState
- -DnfaExecLimEx32_queueInitState=avx512_nfaExecLimEx32_queueInitState
- -DnfaExecLimEx32_reportCurrent=avx512_nfaExecLimEx32_reportCurrent
- -DnfaExecLimEx32_testEOD=avx512_nfaExecLimEx32_testEOD
- -DnfaExecLimEx32_zombie_status=avx512_nfaExecLimEx32_zombie_status
- -DnfaExecLimEx384_B_Reverse=avx512_nfaExecLimEx384_B_Reverse
- -DnfaExecLimEx384_Q=avx512_nfaExecLimEx384_Q
- -DnfaExecLimEx384_Q2=avx512_nfaExecLimEx384_Q2
- -DnfaExecLimEx384_QR=avx512_nfaExecLimEx384_QR
- -DnfaExecLimEx384_expandState=avx512_nfaExecLimEx384_expandState
- -DnfaExecLimEx384_inAccept=avx512_nfaExecLimEx384_inAccept
- -DnfaExecLimEx384_inAnyAccept=avx512_nfaExecLimEx384_inAnyAccept
- -DnfaExecLimEx384_initCompressedState=avx512_nfaExecLimEx384_initCompressedState
- -DnfaExecLimEx384_queueCompressState=avx512_nfaExecLimEx384_queueCompressState
- -DnfaExecLimEx384_queueInitState=avx512_nfaExecLimEx384_queueInitState
- -DnfaExecLimEx384_reportCurrent=avx512_nfaExecLimEx384_reportCurrent
- -DnfaExecLimEx384_testEOD=avx512_nfaExecLimEx384_testEOD
- -DnfaExecLimEx384_zombie_status=avx512_nfaExecLimEx384_zombie_status
- -DnfaExecLimEx512_B_Reverse=avx512_nfaExecLimEx512_B_Reverse
- -DnfaExecLimEx512_Q=avx512_nfaExecLimEx512_Q
- -DnfaExecLimEx512_Q2=avx512_nfaExecLimEx512_Q2
- -DnfaExecLimEx512_QR=avx512_nfaExecLimEx512_QR
- -DnfaExecLimEx512_expandState=avx512_nfaExecLimEx512_expandState
- -DnfaExecLimEx512_inAccept=avx512_nfaExecLimEx512_inAccept
- -DnfaExecLimEx512_inAnyAccept=avx512_nfaExecLimEx512_inAnyAccept
- -DnfaExecLimEx512_initCompressedState=avx512_nfaExecLimEx512_initCompressedState
- -DnfaExecLimEx512_queueCompressState=avx512_nfaExecLimEx512_queueCompressState
- -DnfaExecLimEx512_queueInitState=avx512_nfaExecLimEx512_queueInitState
- -DnfaExecLimEx512_reportCurrent=avx512_nfaExecLimEx512_reportCurrent
- -DnfaExecLimEx512_testEOD=avx512_nfaExecLimEx512_testEOD
- -DnfaExecLimEx512_zombie_status=avx512_nfaExecLimEx512_zombie_status
- -DnfaExecLimEx64_B_Reverse=avx512_nfaExecLimEx64_B_Reverse
- -DnfaExecLimEx64_Q=avx512_nfaExecLimEx64_Q
- -DnfaExecLimEx64_Q2=avx512_nfaExecLimEx64_Q2
- -DnfaExecLimEx64_QR=avx512_nfaExecLimEx64_QR
- -DnfaExecLimEx64_expandState=avx512_nfaExecLimEx64_expandState
- -DnfaExecLimEx64_inAccept=avx512_nfaExecLimEx64_inAccept
- -DnfaExecLimEx64_inAnyAccept=avx512_nfaExecLimEx64_inAnyAccept
- -DnfaExecLimEx64_initCompressedState=avx512_nfaExecLimEx64_initCompressedState
- -DnfaExecLimEx64_queueCompressState=avx512_nfaExecLimEx64_queueCompressState
- -DnfaExecLimEx64_queueInitState=avx512_nfaExecLimEx64_queueInitState
- -DnfaExecLimEx64_reportCurrent=avx512_nfaExecLimEx64_reportCurrent
- -DnfaExecLimEx64_testEOD=avx512_nfaExecLimEx64_testEOD
- -DnfaExecLimEx64_zombie_status=avx512_nfaExecLimEx64_zombie_status
- -DnfaExecMcClellan16_B=avx512_nfaExecMcClellan16_B
- -DnfaExecMcClellan16_Q=avx512_nfaExecMcClellan16_Q
- -DnfaExecMcClellan16_Q2=avx512_nfaExecMcClellan16_Q2
- -DnfaExecMcClellan16_QR=avx512_nfaExecMcClellan16_QR
- -DnfaExecMcClellan16_SimpStream=avx512_nfaExecMcClellan16_SimpStream
- -DnfaExecMcClellan16_expandState=avx512_nfaExecMcClellan16_expandState
- -DnfaExecMcClellan16_inAccept=avx512_nfaExecMcClellan16_inAccept
- -DnfaExecMcClellan16_inAnyAccept=avx512_nfaExecMcClellan16_inAnyAccept
- -DnfaExecMcClellan16_initCompressedState=avx512_nfaExecMcClellan16_initCompressedState
- -DnfaExecMcClellan16_queueCompressState=avx512_nfaExecMcClellan16_queueCompressState
- -DnfaExecMcClellan16_queueInitState=avx512_nfaExecMcClellan16_queueInitState
- -DnfaExecMcClellan16_reportCurrent=avx512_nfaExecMcClellan16_reportCurrent
- -DnfaExecMcClellan16_testEOD=avx512_nfaExecMcClellan16_testEOD
- -DnfaExecMcClellan8_B=avx512_nfaExecMcClellan8_B
- -DnfaExecMcClellan8_Q=avx512_nfaExecMcClellan8_Q
- -DnfaExecMcClellan8_Q2=avx512_nfaExecMcClellan8_Q2
- -DnfaExecMcClellan8_QR=avx512_nfaExecMcClellan8_QR
- -DnfaExecMcClellan8_SimpStream=avx512_nfaExecMcClellan8_SimpStream
- -DnfaExecMcClellan8_expandState=avx512_nfaExecMcClellan8_expandState
- -DnfaExecMcClellan8_inAccept=avx512_nfaExecMcClellan8_inAccept
- -DnfaExecMcClellan8_inAnyAccept=avx512_nfaExecMcClellan8_inAnyAccept
- -DnfaExecMcClellan8_initCompressedState=avx512_nfaExecMcClellan8_initCompressedState
- -DnfaExecMcClellan8_queueCompressState=avx512_nfaExecMcClellan8_queueCompressState
- -DnfaExecMcClellan8_queueInitState=avx512_nfaExecMcClellan8_queueInitState
- -DnfaExecMcClellan8_reportCurrent=avx512_nfaExecMcClellan8_reportCurrent
- -DnfaExecMcClellan8_testEOD=avx512_nfaExecMcClellan8_testEOD
- -DnfaExecMcSheng16_Q=avx512_nfaExecMcSheng16_Q
- -DnfaExecMcSheng16_Q2=avx512_nfaExecMcSheng16_Q2
- -DnfaExecMcSheng16_QR=avx512_nfaExecMcSheng16_QR
- -DnfaExecMcSheng16_expandState=avx512_nfaExecMcSheng16_expandState
- -DnfaExecMcSheng16_inAccept=avx512_nfaExecMcSheng16_inAccept
- -DnfaExecMcSheng16_inAnyAccept=avx512_nfaExecMcSheng16_inAnyAccept
- -DnfaExecMcSheng16_initCompressedState=avx512_nfaExecMcSheng16_initCompressedState
- -DnfaExecMcSheng16_queueCompressState=avx512_nfaExecMcSheng16_queueCompressState
- -DnfaExecMcSheng16_queueInitState=avx512_nfaExecMcSheng16_queueInitState
- -DnfaExecMcSheng16_reportCurrent=avx512_nfaExecMcSheng16_reportCurrent
- -DnfaExecMcSheng16_testEOD=avx512_nfaExecMcSheng16_testEOD
- -DnfaExecMcSheng8_Q=avx512_nfaExecMcSheng8_Q
- -DnfaExecMcSheng8_Q2=avx512_nfaExecMcSheng8_Q2
- -DnfaExecMcSheng8_QR=avx512_nfaExecMcSheng8_QR
- -DnfaExecMcSheng8_expandState=avx512_nfaExecMcSheng8_expandState
- -DnfaExecMcSheng8_inAccept=avx512_nfaExecMcSheng8_inAccept
- -DnfaExecMcSheng8_inAnyAccept=avx512_nfaExecMcSheng8_inAnyAccept
- -DnfaExecMcSheng8_initCompressedState=avx512_nfaExecMcSheng8_initCompressedState
- -DnfaExecMcSheng8_queueCompressState=avx512_nfaExecMcSheng8_queueCompressState
- -DnfaExecMcSheng8_queueInitState=avx512_nfaExecMcSheng8_queueInitState
- -DnfaExecMcSheng8_reportCurrent=avx512_nfaExecMcSheng8_reportCurrent
- -DnfaExecMcSheng8_testEOD=avx512_nfaExecMcSheng8_testEOD
- -DnfaExecMpv_Q=avx512_nfaExecMpv_Q
- -DnfaExecMpv_QueueExecRaw=avx512_nfaExecMpv_QueueExecRaw
- -DnfaExecMpv_expandState=avx512_nfaExecMpv_expandState
- -DnfaExecMpv_initCompressedState=avx512_nfaExecMpv_initCompressedState
- -DnfaExecMpv_queueCompressState=avx512_nfaExecMpv_queueCompressState
- -DnfaExecMpv_queueInitState=avx512_nfaExecMpv_queueInitState
- -DnfaExecMpv_reportCurrent=avx512_nfaExecMpv_reportCurrent
- -DnfaExecSheng_B=avx512_nfaExecSheng_B
- -DnfaExecSheng_Q=avx512_nfaExecSheng_Q
- -DnfaExecSheng_Q2=avx512_nfaExecSheng_Q2
- -DnfaExecSheng_QR=avx512_nfaExecSheng_QR
- -DnfaExecSheng_expandState=avx512_nfaExecSheng_expandState
- -DnfaExecSheng_inAccept=avx512_nfaExecSheng_inAccept
- -DnfaExecSheng_inAnyAccept=avx512_nfaExecSheng_inAnyAccept
- -DnfaExecSheng_initCompressedState=avx512_nfaExecSheng_initCompressedState
- -DnfaExecSheng_queueCompressState=avx512_nfaExecSheng_queueCompressState
- -DnfaExecSheng_queueInitState=avx512_nfaExecSheng_queueInitState
- -DnfaExecSheng_reportCurrent=avx512_nfaExecSheng_reportCurrent
- -DnfaExecSheng_testEOD=avx512_nfaExecSheng_testEOD
- -DnfaExecTamarama_Q=avx512_nfaExecTamarama_Q
- -DnfaExecTamarama_Q2=avx512_nfaExecTamarama_Q2
- -DnfaExecTamarama_QR=avx512_nfaExecTamarama_QR
- -DnfaExecTamarama_expandState=avx512_nfaExecTamarama_expandState
- -DnfaExecTamarama_inAccept=avx512_nfaExecTamarama_inAccept
- -DnfaExecTamarama_inAnyAccept=avx512_nfaExecTamarama_inAnyAccept
- -DnfaExecTamarama_queueCompressState=avx512_nfaExecTamarama_queueCompressState
- -DnfaExecTamarama_queueInitState=avx512_nfaExecTamarama_queueInitState
- -DnfaExecTamarama_reportCurrent=avx512_nfaExecTamarama_reportCurrent
- -DnfaExecTamarama_testEOD=avx512_nfaExecTamarama_testEOD
- -DnfaExecTamarama_zombie_status=avx512_nfaExecTamarama_zombie_status
- -DnfaExpandState=avx512_nfaExpandState
- -DnfaGetZombieStatus=avx512_nfaGetZombieStatus
- -DnfaInAcceptState=avx512_nfaInAcceptState
- -DnfaInAnyAcceptState=avx512_nfaInAnyAcceptState
- -DnfaInitCompressedState=avx512_nfaInitCompressedState
- -DnfaQueueCompressState=avx512_nfaQueueCompressState
- -DnfaQueueExec=avx512_nfaQueueExec
- -DnfaQueueExec2_raw=avx512_nfaQueueExec2_raw
- -DnfaQueueExecRose=avx512_nfaQueueExecRose
- -DnfaQueueExecToMatch=avx512_nfaQueueExecToMatch
- -DnfaQueueExec_raw=avx512_nfaQueueExec_raw
- -DnfaQueueInitState=avx512_nfaQueueInitState
- -DnfaReportCurrentMatches=avx512_nfaReportCurrentMatches
- -DnoodExec=avx512_noodExec
- -DnoodExecStreaming=avx512_noodExecStreaming
- -Dp_mask_arr=avx512_p_mask_arr
- -Dp_mask_arr256=avx512_p_mask_arr256
- -DrepeatHasMatchBitmap=avx512_repeatHasMatchBitmap
- -DrepeatHasMatchRange=avx512_repeatHasMatchRange
- -DrepeatHasMatchRing=avx512_repeatHasMatchRing
- -DrepeatHasMatchSparseOptimalP=avx512_repeatHasMatchSparseOptimalP
- -DrepeatHasMatchTrailer=avx512_repeatHasMatchTrailer
- -DrepeatLastTopBitmap=avx512_repeatLastTopBitmap
- -DrepeatLastTopRange=avx512_repeatLastTopRange
- -DrepeatLastTopRing=avx512_repeatLastTopRing
- -DrepeatLastTopSparseOptimalP=avx512_repeatLastTopSparseOptimalP
- -DrepeatLastTopTrailer=avx512_repeatLastTopTrailer
- -DrepeatNextMatchBitmap=avx512_repeatNextMatchBitmap
- -DrepeatNextMatchRange=avx512_repeatNextMatchRange
- -DrepeatNextMatchRing=avx512_repeatNextMatchRing
- -DrepeatNextMatchSparseOptimalP=avx512_repeatNextMatchSparseOptimalP
- -DrepeatNextMatchTrailer=avx512_repeatNextMatchTrailer
- -DrepeatPack=avx512_repeatPack
- -DrepeatStoreBitmap=avx512_repeatStoreBitmap
- -DrepeatStoreRange=avx512_repeatStoreRange
- -DrepeatStoreRing=avx512_repeatStoreRing
- -DrepeatStoreSparseOptimalP=avx512_repeatStoreSparseOptimalP
- -DrepeatStoreTrailer=avx512_repeatStoreTrailer
- -DrepeatUnpack=avx512_repeatUnpack
- -DroseAnchoredCallback=avx512_roseAnchoredCallback
- -DroseBlockExec=avx512_roseBlockExec
- -DroseCallback=avx512_roseCallback
- -DroseCatchUpAll=avx512_roseCatchUpAll
- -DroseCatchUpMPV_i=avx512_roseCatchUpMPV_i
- -DroseCatchUpSuf=avx512_roseCatchUpSuf
- -DroseDelayRebuildCallback=avx512_roseDelayRebuildCallback
- -DroseFloatingCallback=avx512_roseFloatingCallback
- -DroseHandleChainMatch=avx512_roseHandleChainMatch
- -DroseInitState=avx512_roseInitState
- -DroseNfaAdaptor=avx512_roseNfaAdaptor
- -DroseNfaEarliestSom=avx512_roseNfaEarliestSom
- -DroseReportAdaptor=avx512_roseReportAdaptor
- -DroseRunBoundaryProgram=avx512_roseRunBoundaryProgram
- -DroseRunFlushCombProgram=avx512_roseRunFlushCombProgram
- -DroseRunLastFlushCombProgram=avx512_roseRunLastFlushCombProgram
- -DroseRunProgram=avx512_roseRunProgram
- -DroseRunProgram_l=avx512_roseRunProgram_l
- -DroseStreamEodExec=avx512_roseStreamEodExec
- -DroseStreamExec=avx512_roseStreamExec
- -DrshuftiExec=avx512_rshuftiExec
- -DrtruffleExec=avx512_rtruffleExec
- -Drun_accel=avx512_run_accel
- -DsetSomFromSomAware=avx512_setSomFromSomAware
- -DshuftiDoubleExec=avx512_shuftiDoubleExec
- -DshuftiExec=avx512_shuftiExec
- -Dsimd_onebit_masks=avx512_simd_onebit_masks
- -Dsize_compress_stream=avx512_size_compress_stream
- -DstoreSomToStream=avx512_storeSomToStream
- -Dstorecompressed128=avx512_storecompressed128
- -Dstorecompressed256=avx512_storecompressed256
- -Dstorecompressed32=avx512_storecompressed32
- -Dstorecompressed384=avx512_storecompressed384
- -Dstorecompressed512=avx512_storecompressed512
- -Dstorecompressed64=avx512_storecompressed64
- -DstreamInitSufPQ=avx512_streamInitSufPQ
- -DtruffleExec=avx512_truffleExec
- -Dvbs_mask_data=avx512_vbs_mask_data
-)
-
-SRCDIR(contrib/libs/hyperscan)
-
-SRCS(
- src/alloc.c
- src/crc32.c
- src/database.c
- src/fdr/fdr.c
- src/fdr/teddy.c
- src/fdr/teddy_avx2.c
- src/hs_valid_platform.c
- src/hs_version.c
- src/hwlm/hwlm.c
- src/hwlm/noodle_engine.c
- src/nfa/accel.c
- src/nfa/castle.c
- src/nfa/gough.c
- src/nfa/lbr.c
- src/nfa/limex_64.c
- src/nfa/limex_accel.c
- src/nfa/limex_native.c
- src/nfa/limex_simd128.c
- src/nfa/limex_simd256.c
- src/nfa/limex_simd384.c
- src/nfa/limex_simd512.c
- src/nfa/mcclellan.c
- src/nfa/mcsheng.c
- src/nfa/mcsheng_data.c
- src/nfa/mpv.c
- src/nfa/nfa_api_dispatch.c
- src/nfa/repeat.c
- src/nfa/sheng.c
- src/nfa/shufti.c
- src/nfa/tamarama.c
- src/nfa/truffle.c
- src/rose/block.c
- src/rose/catchup.c
- src/rose/init.c
- src/rose/match.c
- src/rose/program_runtime.c
- src/rose/stream.c
- src/runtime.c
- src/scratch.c
- src/som/som_runtime.c
- src/som/som_stream.c
- src/stream_compress.c
- src/util/cpuid_flags.c
- src/util/masked_move.c
- src/util/multibit.c
- src/util/simd_utils.c
- src/util/state_compress.c
-)
-
-END()
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ galtsev
+ g:antiinfra
+ g:cpp-contrib
+ g:yql
+)
+
+LICENSE(BSD-3-Clause)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+ADDINCL(
+ contrib/libs/hyperscan
+ contrib/libs/hyperscan/src
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_RUNTIME()
+
+CFLAGS(
+ ${SSE41_CFLAGS}
+ -DHAVE_SSE41
+ ${SSE42_CFLAGS}
+ -DHAVE_SSE42
+ -DHAVE_POPCOUNT_INSTR
+ ${POPCNT_CFLAGS}
+ ${AVX_CFLAGS}
+ -DHAVE_AVX
+ ${AVX2_CFLAGS}
+ -DHAVE_AVX2
+ ${AVX512_CFLAGS}
+ -DHAVE_AVX512
+ -DCrc32c_ComputeBuf=avx512_Crc32c_ComputeBuf
+ -DblockInitSufPQ=avx512_blockInitSufPQ
+ -Dcompress_stream=avx512_compress_stream
+ -Dcpuid_flags=avx512_cpuid_flags
+ -Dcpuid_tune=avx512_cpuid_tune
+ -DdbIsValid=avx512_dbIsValid
+ -DdoAccel128=avx512_doAccel128
+ -DdoAccel256=avx512_doAccel256
+ -DdoAccel32=avx512_doAccel32
+ -DdoAccel384=avx512_doAccel384
+ -DdoAccel512=avx512_doAccel512
+ -DdoAccel64=avx512_doAccel64
+ -Dexpand_stream=avx512_expand_stream
+ -DfdrExec=avx512_fdrExec
+ -DfdrExecStreaming=avx512_fdrExecStreaming
+ -Dfdr_exec_fat_teddy_msks1=avx512_fdr_exec_fat_teddy_msks1
+ -Dfdr_exec_fat_teddy_msks1_pck=avx512_fdr_exec_fat_teddy_msks1_pck
+ -Dfdr_exec_fat_teddy_msks2=avx512_fdr_exec_fat_teddy_msks2
+ -Dfdr_exec_fat_teddy_msks2_pck=avx512_fdr_exec_fat_teddy_msks2_pck
+ -Dfdr_exec_fat_teddy_msks3=avx512_fdr_exec_fat_teddy_msks3
+ -Dfdr_exec_fat_teddy_msks3_pck=avx512_fdr_exec_fat_teddy_msks3_pck
+ -Dfdr_exec_fat_teddy_msks4=avx512_fdr_exec_fat_teddy_msks4
+ -Dfdr_exec_fat_teddy_msks4_pck=avx512_fdr_exec_fat_teddy_msks4_pck
+ -Dfdr_exec_teddy_msks1=avx512_fdr_exec_teddy_msks1
+ -Dfdr_exec_teddy_msks1_pck=avx512_fdr_exec_teddy_msks1_pck
+ -Dfdr_exec_teddy_msks2=avx512_fdr_exec_teddy_msks2
+ -Dfdr_exec_teddy_msks2_pck=avx512_fdr_exec_teddy_msks2_pck
+ -Dfdr_exec_teddy_msks3=avx512_fdr_exec_teddy_msks3
+ -Dfdr_exec_teddy_msks3_pck=avx512_fdr_exec_teddy_msks3_pck
+ -Dfdr_exec_teddy_msks4=avx512_fdr_exec_teddy_msks4
+ -Dfdr_exec_teddy_msks4_pck=avx512_fdr_exec_teddy_msks4_pck
+ -DflushQueuedLiterals_i=avx512_flushQueuedLiterals_i
+ -DflushStoredSomMatches_i=avx512_flushStoredSomMatches_i
+ -DhandleSomExternal=avx512_handleSomExternal
+ -DhandleSomInternal=avx512_handleSomInternal
+ -Dhs_alloc_scratch=avx512_hs_alloc_scratch
+ -Dhs_clone_scratch=avx512_hs_clone_scratch
+ -Dhs_close_stream=avx512_hs_close_stream
+ -Dhs_compress_stream=avx512_hs_compress_stream
+ -Dhs_copy_stream=avx512_hs_copy_stream
+ -Dhs_database_alloc=avx512_hs_database_alloc
+ -Dhs_database_free=avx512_hs_database_free
+ -Dhs_database_info=avx512_hs_database_info
+ -Dhs_database_size=avx512_hs_database_size
+ -Dhs_deserialize_database=avx512_hs_deserialize_database
+ -Dhs_deserialize_database_at=avx512_hs_deserialize_database_at
+ -Dhs_expand_stream=avx512_hs_expand_stream
+ -Dhs_free_database=avx512_hs_free_database
+ -Dhs_free_scratch=avx512_hs_free_scratch
+ -Dhs_misc_alloc=avx512_hs_misc_alloc
+ -Dhs_misc_free=avx512_hs_misc_free
+ -Dhs_open_stream=avx512_hs_open_stream
+ -Dhs_reset_and_copy_stream=avx512_hs_reset_and_copy_stream
+ -Dhs_reset_and_expand_stream=avx512_hs_reset_and_expand_stream
+ -Dhs_reset_stream=avx512_hs_reset_stream
+ -Dhs_scan=avx512_hs_scan
+ -Dhs_scan_stream=avx512_hs_scan_stream
+ -Dhs_scan_vector=avx512_hs_scan_vector
+ -Dhs_scratch_alloc=avx512_hs_scratch_alloc
+ -Dhs_scratch_free=avx512_hs_scratch_free
+ -Dhs_scratch_size=avx512_hs_scratch_size
+ -Dhs_serialize_database=avx512_hs_serialize_database
+ -Dhs_serialized_database_info=avx512_hs_serialized_database_info
+ -Dhs_serialized_database_size=avx512_hs_serialized_database_size
+ -Dhs_set_allocator=avx512_hs_set_allocator
+ -Dhs_set_database_allocator=avx512_hs_set_database_allocator
+ -Dhs_set_misc_allocator=avx512_hs_set_misc_allocator
+ -Dhs_set_scratch_allocator=avx512_hs_set_scratch_allocator
+ -Dhs_set_stream_allocator=avx512_hs_set_stream_allocator
+ -Dhs_stream_alloc=avx512_hs_stream_alloc
+ -Dhs_stream_free=avx512_hs_stream_free
+ -Dhs_stream_size=avx512_hs_stream_size
+ -Dhs_valid_platform=avx512_hs_valid_platform
+ -Dhs_version=avx512_hs_version
+ -DhwlmExec=avx512_hwlmExec
+ -DhwlmExecStreaming=avx512_hwlmExecStreaming
+ -DloadSomFromStream=avx512_loadSomFromStream
+ -Dloadcompressed128=avx512_loadcompressed128
+ -Dloadcompressed256=avx512_loadcompressed256
+ -Dloadcompressed32=avx512_loadcompressed32
+ -Dloadcompressed384=avx512_loadcompressed384
+ -Dloadcompressed512=avx512_loadcompressed512
+ -Dloadcompressed64=avx512_loadcompressed64
+ -Dmcsheng_pext_mask=avx512_mcsheng_pext_mask
+ -Dmm_mask_mask=avx512_mm_mask_mask
+ -Dmm_shuffle_end=avx512_mm_shuffle_end
+ -Dmmbit_keyshift_lut=avx512_mmbit_keyshift_lut
+ -Dmmbit_maxlevel_direct_lut=avx512_mmbit_maxlevel_direct_lut
+ -Dmmbit_maxlevel_from_keyshift_lut=avx512_mmbit_maxlevel_from_keyshift_lut
+ -Dmmbit_root_offset_from_level=avx512_mmbit_root_offset_from_level
+ -Dmmbit_zero_to_lut=avx512_mmbit_zero_to_lut
+ -DnfaBlockExecReverse=avx512_nfaBlockExecReverse
+ -DnfaCheckFinalState=avx512_nfaCheckFinalState
+ -DnfaExecCastle_Q=avx512_nfaExecCastle_Q
+ -DnfaExecCastle_Q2=avx512_nfaExecCastle_Q2
+ -DnfaExecCastle_QR=avx512_nfaExecCastle_QR
+ -DnfaExecCastle_expandState=avx512_nfaExecCastle_expandState
+ -DnfaExecCastle_inAccept=avx512_nfaExecCastle_inAccept
+ -DnfaExecCastle_inAnyAccept=avx512_nfaExecCastle_inAnyAccept
+ -DnfaExecCastle_initCompressedState=avx512_nfaExecCastle_initCompressedState
+ -DnfaExecCastle_queueCompressState=avx512_nfaExecCastle_queueCompressState
+ -DnfaExecCastle_queueInitState=avx512_nfaExecCastle_queueInitState
+ -DnfaExecCastle_reportCurrent=avx512_nfaExecCastle_reportCurrent
+ -DnfaExecGough16_Q=avx512_nfaExecGough16_Q
+ -DnfaExecGough16_Q2=avx512_nfaExecGough16_Q2
+ -DnfaExecGough16_QR=avx512_nfaExecGough16_QR
+ -DnfaExecGough16_expandState=avx512_nfaExecGough16_expandState
+ -DnfaExecGough16_inAccept=avx512_nfaExecGough16_inAccept
+ -DnfaExecGough16_inAnyAccept=avx512_nfaExecGough16_inAnyAccept
+ -DnfaExecGough16_initCompressedState=avx512_nfaExecGough16_initCompressedState
+ -DnfaExecGough16_queueCompressState=avx512_nfaExecGough16_queueCompressState
+ -DnfaExecGough16_queueInitState=avx512_nfaExecGough16_queueInitState
+ -DnfaExecGough16_reportCurrent=avx512_nfaExecGough16_reportCurrent
+ -DnfaExecGough16_testEOD=avx512_nfaExecGough16_testEOD
+ -DnfaExecGough8_Q=avx512_nfaExecGough8_Q
+ -DnfaExecGough8_Q2=avx512_nfaExecGough8_Q2
+ -DnfaExecGough8_QR=avx512_nfaExecGough8_QR
+ -DnfaExecGough8_expandState=avx512_nfaExecGough8_expandState
+ -DnfaExecGough8_inAccept=avx512_nfaExecGough8_inAccept
+ -DnfaExecGough8_inAnyAccept=avx512_nfaExecGough8_inAnyAccept
+ -DnfaExecGough8_initCompressedState=avx512_nfaExecGough8_initCompressedState
+ -DnfaExecGough8_queueCompressState=avx512_nfaExecGough8_queueCompressState
+ -DnfaExecGough8_queueInitState=avx512_nfaExecGough8_queueInitState
+ -DnfaExecGough8_reportCurrent=avx512_nfaExecGough8_reportCurrent
+ -DnfaExecGough8_testEOD=avx512_nfaExecGough8_testEOD
+ -DnfaExecLbrDot_Q=avx512_nfaExecLbrDot_Q
+ -DnfaExecLbrDot_Q2=avx512_nfaExecLbrDot_Q2
+ -DnfaExecLbrDot_QR=avx512_nfaExecLbrDot_QR
+ -DnfaExecLbrDot_expandState=avx512_nfaExecLbrDot_expandState
+ -DnfaExecLbrDot_inAccept=avx512_nfaExecLbrDot_inAccept
+ -DnfaExecLbrDot_inAnyAccept=avx512_nfaExecLbrDot_inAnyAccept
+ -DnfaExecLbrDot_initCompressedState=avx512_nfaExecLbrDot_initCompressedState
+ -DnfaExecLbrDot_queueCompressState=avx512_nfaExecLbrDot_queueCompressState
+ -DnfaExecLbrDot_queueInitState=avx512_nfaExecLbrDot_queueInitState
+ -DnfaExecLbrDot_reportCurrent=avx512_nfaExecLbrDot_reportCurrent
+ -DnfaExecLbrNVerm_Q=avx512_nfaExecLbrNVerm_Q
+ -DnfaExecLbrNVerm_Q2=avx512_nfaExecLbrNVerm_Q2
+ -DnfaExecLbrNVerm_QR=avx512_nfaExecLbrNVerm_QR
+ -DnfaExecLbrNVerm_expandState=avx512_nfaExecLbrNVerm_expandState
+ -DnfaExecLbrNVerm_inAccept=avx512_nfaExecLbrNVerm_inAccept
+ -DnfaExecLbrNVerm_inAnyAccept=avx512_nfaExecLbrNVerm_inAnyAccept
+ -DnfaExecLbrNVerm_initCompressedState=avx512_nfaExecLbrNVerm_initCompressedState
+ -DnfaExecLbrNVerm_queueCompressState=avx512_nfaExecLbrNVerm_queueCompressState
+ -DnfaExecLbrNVerm_queueInitState=avx512_nfaExecLbrNVerm_queueInitState
+ -DnfaExecLbrNVerm_reportCurrent=avx512_nfaExecLbrNVerm_reportCurrent
+ -DnfaExecLbrShuf_Q=avx512_nfaExecLbrShuf_Q
+ -DnfaExecLbrShuf_Q2=avx512_nfaExecLbrShuf_Q2
+ -DnfaExecLbrShuf_QR=avx512_nfaExecLbrShuf_QR
+ -DnfaExecLbrShuf_expandState=avx512_nfaExecLbrShuf_expandState
+ -DnfaExecLbrShuf_inAccept=avx512_nfaExecLbrShuf_inAccept
+ -DnfaExecLbrShuf_inAnyAccept=avx512_nfaExecLbrShuf_inAnyAccept
+ -DnfaExecLbrShuf_initCompressedState=avx512_nfaExecLbrShuf_initCompressedState
+ -DnfaExecLbrShuf_queueCompressState=avx512_nfaExecLbrShuf_queueCompressState
+ -DnfaExecLbrShuf_queueInitState=avx512_nfaExecLbrShuf_queueInitState
+ -DnfaExecLbrShuf_reportCurrent=avx512_nfaExecLbrShuf_reportCurrent
+ -DnfaExecLbrTruf_Q=avx512_nfaExecLbrTruf_Q
+ -DnfaExecLbrTruf_Q2=avx512_nfaExecLbrTruf_Q2
+ -DnfaExecLbrTruf_QR=avx512_nfaExecLbrTruf_QR
+ -DnfaExecLbrTruf_expandState=avx512_nfaExecLbrTruf_expandState
+ -DnfaExecLbrTruf_inAccept=avx512_nfaExecLbrTruf_inAccept
+ -DnfaExecLbrTruf_inAnyAccept=avx512_nfaExecLbrTruf_inAnyAccept
+ -DnfaExecLbrTruf_initCompressedState=avx512_nfaExecLbrTruf_initCompressedState
+ -DnfaExecLbrTruf_queueCompressState=avx512_nfaExecLbrTruf_queueCompressState
+ -DnfaExecLbrTruf_queueInitState=avx512_nfaExecLbrTruf_queueInitState
+ -DnfaExecLbrTruf_reportCurrent=avx512_nfaExecLbrTruf_reportCurrent
+ -DnfaExecLbrVerm_Q=avx512_nfaExecLbrVerm_Q
+ -DnfaExecLbrVerm_Q2=avx512_nfaExecLbrVerm_Q2
+ -DnfaExecLbrVerm_QR=avx512_nfaExecLbrVerm_QR
+ -DnfaExecLbrVerm_expandState=avx512_nfaExecLbrVerm_expandState
+ -DnfaExecLbrVerm_inAccept=avx512_nfaExecLbrVerm_inAccept
+ -DnfaExecLbrVerm_inAnyAccept=avx512_nfaExecLbrVerm_inAnyAccept
+ -DnfaExecLbrVerm_initCompressedState=avx512_nfaExecLbrVerm_initCompressedState
+ -DnfaExecLbrVerm_queueCompressState=avx512_nfaExecLbrVerm_queueCompressState
+ -DnfaExecLbrVerm_queueInitState=avx512_nfaExecLbrVerm_queueInitState
+ -DnfaExecLbrVerm_reportCurrent=avx512_nfaExecLbrVerm_reportCurrent
+ -DnfaExecLimEx128_B_Reverse=avx512_nfaExecLimEx128_B_Reverse
+ -DnfaExecLimEx128_Q=avx512_nfaExecLimEx128_Q
+ -DnfaExecLimEx128_Q2=avx512_nfaExecLimEx128_Q2
+ -DnfaExecLimEx128_QR=avx512_nfaExecLimEx128_QR
+ -DnfaExecLimEx128_expandState=avx512_nfaExecLimEx128_expandState
+ -DnfaExecLimEx128_inAccept=avx512_nfaExecLimEx128_inAccept
+ -DnfaExecLimEx128_inAnyAccept=avx512_nfaExecLimEx128_inAnyAccept
+ -DnfaExecLimEx128_initCompressedState=avx512_nfaExecLimEx128_initCompressedState
+ -DnfaExecLimEx128_queueCompressState=avx512_nfaExecLimEx128_queueCompressState
+ -DnfaExecLimEx128_queueInitState=avx512_nfaExecLimEx128_queueInitState
+ -DnfaExecLimEx128_reportCurrent=avx512_nfaExecLimEx128_reportCurrent
+ -DnfaExecLimEx128_testEOD=avx512_nfaExecLimEx128_testEOD
+ -DnfaExecLimEx128_zombie_status=avx512_nfaExecLimEx128_zombie_status
+ -DnfaExecLimEx256_B_Reverse=avx512_nfaExecLimEx256_B_Reverse
+ -DnfaExecLimEx256_Q=avx512_nfaExecLimEx256_Q
+ -DnfaExecLimEx256_Q2=avx512_nfaExecLimEx256_Q2
+ -DnfaExecLimEx256_QR=avx512_nfaExecLimEx256_QR
+ -DnfaExecLimEx256_expandState=avx512_nfaExecLimEx256_expandState
+ -DnfaExecLimEx256_inAccept=avx512_nfaExecLimEx256_inAccept
+ -DnfaExecLimEx256_inAnyAccept=avx512_nfaExecLimEx256_inAnyAccept
+ -DnfaExecLimEx256_initCompressedState=avx512_nfaExecLimEx256_initCompressedState
+ -DnfaExecLimEx256_queueCompressState=avx512_nfaExecLimEx256_queueCompressState
+ -DnfaExecLimEx256_queueInitState=avx512_nfaExecLimEx256_queueInitState
+ -DnfaExecLimEx256_reportCurrent=avx512_nfaExecLimEx256_reportCurrent
+ -DnfaExecLimEx256_testEOD=avx512_nfaExecLimEx256_testEOD
+ -DnfaExecLimEx256_zombie_status=avx512_nfaExecLimEx256_zombie_status
+ -DnfaExecLimEx32_B_Reverse=avx512_nfaExecLimEx32_B_Reverse
+ -DnfaExecLimEx32_Q=avx512_nfaExecLimEx32_Q
+ -DnfaExecLimEx32_Q2=avx512_nfaExecLimEx32_Q2
+ -DnfaExecLimEx32_QR=avx512_nfaExecLimEx32_QR
+ -DnfaExecLimEx32_expandState=avx512_nfaExecLimEx32_expandState
+ -DnfaExecLimEx32_inAccept=avx512_nfaExecLimEx32_inAccept
+ -DnfaExecLimEx32_inAnyAccept=avx512_nfaExecLimEx32_inAnyAccept
+ -DnfaExecLimEx32_initCompressedState=avx512_nfaExecLimEx32_initCompressedState
+ -DnfaExecLimEx32_queueCompressState=avx512_nfaExecLimEx32_queueCompressState
+ -DnfaExecLimEx32_queueInitState=avx512_nfaExecLimEx32_queueInitState
+ -DnfaExecLimEx32_reportCurrent=avx512_nfaExecLimEx32_reportCurrent
+ -DnfaExecLimEx32_testEOD=avx512_nfaExecLimEx32_testEOD
+ -DnfaExecLimEx32_zombie_status=avx512_nfaExecLimEx32_zombie_status
+ -DnfaExecLimEx384_B_Reverse=avx512_nfaExecLimEx384_B_Reverse
+ -DnfaExecLimEx384_Q=avx512_nfaExecLimEx384_Q
+ -DnfaExecLimEx384_Q2=avx512_nfaExecLimEx384_Q2
+ -DnfaExecLimEx384_QR=avx512_nfaExecLimEx384_QR
+ -DnfaExecLimEx384_expandState=avx512_nfaExecLimEx384_expandState
+ -DnfaExecLimEx384_inAccept=avx512_nfaExecLimEx384_inAccept
+ -DnfaExecLimEx384_inAnyAccept=avx512_nfaExecLimEx384_inAnyAccept
+ -DnfaExecLimEx384_initCompressedState=avx512_nfaExecLimEx384_initCompressedState
+ -DnfaExecLimEx384_queueCompressState=avx512_nfaExecLimEx384_queueCompressState
+ -DnfaExecLimEx384_queueInitState=avx512_nfaExecLimEx384_queueInitState
+ -DnfaExecLimEx384_reportCurrent=avx512_nfaExecLimEx384_reportCurrent
+ -DnfaExecLimEx384_testEOD=avx512_nfaExecLimEx384_testEOD
+ -DnfaExecLimEx384_zombie_status=avx512_nfaExecLimEx384_zombie_status
+ -DnfaExecLimEx512_B_Reverse=avx512_nfaExecLimEx512_B_Reverse
+ -DnfaExecLimEx512_Q=avx512_nfaExecLimEx512_Q
+ -DnfaExecLimEx512_Q2=avx512_nfaExecLimEx512_Q2
+ -DnfaExecLimEx512_QR=avx512_nfaExecLimEx512_QR
+ -DnfaExecLimEx512_expandState=avx512_nfaExecLimEx512_expandState
+ -DnfaExecLimEx512_inAccept=avx512_nfaExecLimEx512_inAccept
+ -DnfaExecLimEx512_inAnyAccept=avx512_nfaExecLimEx512_inAnyAccept
+ -DnfaExecLimEx512_initCompressedState=avx512_nfaExecLimEx512_initCompressedState
+ -DnfaExecLimEx512_queueCompressState=avx512_nfaExecLimEx512_queueCompressState
+ -DnfaExecLimEx512_queueInitState=avx512_nfaExecLimEx512_queueInitState
+ -DnfaExecLimEx512_reportCurrent=avx512_nfaExecLimEx512_reportCurrent
+ -DnfaExecLimEx512_testEOD=avx512_nfaExecLimEx512_testEOD
+ -DnfaExecLimEx512_zombie_status=avx512_nfaExecLimEx512_zombie_status
+ -DnfaExecLimEx64_B_Reverse=avx512_nfaExecLimEx64_B_Reverse
+ -DnfaExecLimEx64_Q=avx512_nfaExecLimEx64_Q
+ -DnfaExecLimEx64_Q2=avx512_nfaExecLimEx64_Q2
+ -DnfaExecLimEx64_QR=avx512_nfaExecLimEx64_QR
+ -DnfaExecLimEx64_expandState=avx512_nfaExecLimEx64_expandState
+ -DnfaExecLimEx64_inAccept=avx512_nfaExecLimEx64_inAccept
+ -DnfaExecLimEx64_inAnyAccept=avx512_nfaExecLimEx64_inAnyAccept
+ -DnfaExecLimEx64_initCompressedState=avx512_nfaExecLimEx64_initCompressedState
+ -DnfaExecLimEx64_queueCompressState=avx512_nfaExecLimEx64_queueCompressState
+ -DnfaExecLimEx64_queueInitState=avx512_nfaExecLimEx64_queueInitState
+ -DnfaExecLimEx64_reportCurrent=avx512_nfaExecLimEx64_reportCurrent
+ -DnfaExecLimEx64_testEOD=avx512_nfaExecLimEx64_testEOD
+ -DnfaExecLimEx64_zombie_status=avx512_nfaExecLimEx64_zombie_status
+ -DnfaExecMcClellan16_B=avx512_nfaExecMcClellan16_B
+ -DnfaExecMcClellan16_Q=avx512_nfaExecMcClellan16_Q
+ -DnfaExecMcClellan16_Q2=avx512_nfaExecMcClellan16_Q2
+ -DnfaExecMcClellan16_QR=avx512_nfaExecMcClellan16_QR
+ -DnfaExecMcClellan16_SimpStream=avx512_nfaExecMcClellan16_SimpStream
+ -DnfaExecMcClellan16_expandState=avx512_nfaExecMcClellan16_expandState
+ -DnfaExecMcClellan16_inAccept=avx512_nfaExecMcClellan16_inAccept
+ -DnfaExecMcClellan16_inAnyAccept=avx512_nfaExecMcClellan16_inAnyAccept
+ -DnfaExecMcClellan16_initCompressedState=avx512_nfaExecMcClellan16_initCompressedState
+ -DnfaExecMcClellan16_queueCompressState=avx512_nfaExecMcClellan16_queueCompressState
+ -DnfaExecMcClellan16_queueInitState=avx512_nfaExecMcClellan16_queueInitState
+ -DnfaExecMcClellan16_reportCurrent=avx512_nfaExecMcClellan16_reportCurrent
+ -DnfaExecMcClellan16_testEOD=avx512_nfaExecMcClellan16_testEOD
+ -DnfaExecMcClellan8_B=avx512_nfaExecMcClellan8_B
+ -DnfaExecMcClellan8_Q=avx512_nfaExecMcClellan8_Q
+ -DnfaExecMcClellan8_Q2=avx512_nfaExecMcClellan8_Q2
+ -DnfaExecMcClellan8_QR=avx512_nfaExecMcClellan8_QR
+ -DnfaExecMcClellan8_SimpStream=avx512_nfaExecMcClellan8_SimpStream
+ -DnfaExecMcClellan8_expandState=avx512_nfaExecMcClellan8_expandState
+ -DnfaExecMcClellan8_inAccept=avx512_nfaExecMcClellan8_inAccept
+ -DnfaExecMcClellan8_inAnyAccept=avx512_nfaExecMcClellan8_inAnyAccept
+ -DnfaExecMcClellan8_initCompressedState=avx512_nfaExecMcClellan8_initCompressedState
+ -DnfaExecMcClellan8_queueCompressState=avx512_nfaExecMcClellan8_queueCompressState
+ -DnfaExecMcClellan8_queueInitState=avx512_nfaExecMcClellan8_queueInitState
+ -DnfaExecMcClellan8_reportCurrent=avx512_nfaExecMcClellan8_reportCurrent
+ -DnfaExecMcClellan8_testEOD=avx512_nfaExecMcClellan8_testEOD
+ -DnfaExecMcSheng16_Q=avx512_nfaExecMcSheng16_Q
+ -DnfaExecMcSheng16_Q2=avx512_nfaExecMcSheng16_Q2
+ -DnfaExecMcSheng16_QR=avx512_nfaExecMcSheng16_QR
+ -DnfaExecMcSheng16_expandState=avx512_nfaExecMcSheng16_expandState
+ -DnfaExecMcSheng16_inAccept=avx512_nfaExecMcSheng16_inAccept
+ -DnfaExecMcSheng16_inAnyAccept=avx512_nfaExecMcSheng16_inAnyAccept
+ -DnfaExecMcSheng16_initCompressedState=avx512_nfaExecMcSheng16_initCompressedState
+ -DnfaExecMcSheng16_queueCompressState=avx512_nfaExecMcSheng16_queueCompressState
+ -DnfaExecMcSheng16_queueInitState=avx512_nfaExecMcSheng16_queueInitState
+ -DnfaExecMcSheng16_reportCurrent=avx512_nfaExecMcSheng16_reportCurrent
+ -DnfaExecMcSheng16_testEOD=avx512_nfaExecMcSheng16_testEOD
+ -DnfaExecMcSheng8_Q=avx512_nfaExecMcSheng8_Q
+ -DnfaExecMcSheng8_Q2=avx512_nfaExecMcSheng8_Q2
+ -DnfaExecMcSheng8_QR=avx512_nfaExecMcSheng8_QR
+ -DnfaExecMcSheng8_expandState=avx512_nfaExecMcSheng8_expandState
+ -DnfaExecMcSheng8_inAccept=avx512_nfaExecMcSheng8_inAccept
+ -DnfaExecMcSheng8_inAnyAccept=avx512_nfaExecMcSheng8_inAnyAccept
+ -DnfaExecMcSheng8_initCompressedState=avx512_nfaExecMcSheng8_initCompressedState
+ -DnfaExecMcSheng8_queueCompressState=avx512_nfaExecMcSheng8_queueCompressState
+ -DnfaExecMcSheng8_queueInitState=avx512_nfaExecMcSheng8_queueInitState
+ -DnfaExecMcSheng8_reportCurrent=avx512_nfaExecMcSheng8_reportCurrent
+ -DnfaExecMcSheng8_testEOD=avx512_nfaExecMcSheng8_testEOD
+ -DnfaExecMpv_Q=avx512_nfaExecMpv_Q
+ -DnfaExecMpv_QueueExecRaw=avx512_nfaExecMpv_QueueExecRaw
+ -DnfaExecMpv_expandState=avx512_nfaExecMpv_expandState
+ -DnfaExecMpv_initCompressedState=avx512_nfaExecMpv_initCompressedState
+ -DnfaExecMpv_queueCompressState=avx512_nfaExecMpv_queueCompressState
+ -DnfaExecMpv_queueInitState=avx512_nfaExecMpv_queueInitState
+ -DnfaExecMpv_reportCurrent=avx512_nfaExecMpv_reportCurrent
+ -DnfaExecSheng_B=avx512_nfaExecSheng_B
+ -DnfaExecSheng_Q=avx512_nfaExecSheng_Q
+ -DnfaExecSheng_Q2=avx512_nfaExecSheng_Q2
+ -DnfaExecSheng_QR=avx512_nfaExecSheng_QR
+ -DnfaExecSheng_expandState=avx512_nfaExecSheng_expandState
+ -DnfaExecSheng_inAccept=avx512_nfaExecSheng_inAccept
+ -DnfaExecSheng_inAnyAccept=avx512_nfaExecSheng_inAnyAccept
+ -DnfaExecSheng_initCompressedState=avx512_nfaExecSheng_initCompressedState
+ -DnfaExecSheng_queueCompressState=avx512_nfaExecSheng_queueCompressState
+ -DnfaExecSheng_queueInitState=avx512_nfaExecSheng_queueInitState
+ -DnfaExecSheng_reportCurrent=avx512_nfaExecSheng_reportCurrent
+ -DnfaExecSheng_testEOD=avx512_nfaExecSheng_testEOD
+ -DnfaExecTamarama_Q=avx512_nfaExecTamarama_Q
+ -DnfaExecTamarama_Q2=avx512_nfaExecTamarama_Q2
+ -DnfaExecTamarama_QR=avx512_nfaExecTamarama_QR
+ -DnfaExecTamarama_expandState=avx512_nfaExecTamarama_expandState
+ -DnfaExecTamarama_inAccept=avx512_nfaExecTamarama_inAccept
+ -DnfaExecTamarama_inAnyAccept=avx512_nfaExecTamarama_inAnyAccept
+ -DnfaExecTamarama_queueCompressState=avx512_nfaExecTamarama_queueCompressState
+ -DnfaExecTamarama_queueInitState=avx512_nfaExecTamarama_queueInitState
+ -DnfaExecTamarama_reportCurrent=avx512_nfaExecTamarama_reportCurrent
+ -DnfaExecTamarama_testEOD=avx512_nfaExecTamarama_testEOD
+ -DnfaExecTamarama_zombie_status=avx512_nfaExecTamarama_zombie_status
+ -DnfaExpandState=avx512_nfaExpandState
+ -DnfaGetZombieStatus=avx512_nfaGetZombieStatus
+ -DnfaInAcceptState=avx512_nfaInAcceptState
+ -DnfaInAnyAcceptState=avx512_nfaInAnyAcceptState
+ -DnfaInitCompressedState=avx512_nfaInitCompressedState
+ -DnfaQueueCompressState=avx512_nfaQueueCompressState
+ -DnfaQueueExec=avx512_nfaQueueExec
+ -DnfaQueueExec2_raw=avx512_nfaQueueExec2_raw
+ -DnfaQueueExecRose=avx512_nfaQueueExecRose
+ -DnfaQueueExecToMatch=avx512_nfaQueueExecToMatch
+ -DnfaQueueExec_raw=avx512_nfaQueueExec_raw
+ -DnfaQueueInitState=avx512_nfaQueueInitState
+ -DnfaReportCurrentMatches=avx512_nfaReportCurrentMatches
+ -DnoodExec=avx512_noodExec
+ -DnoodExecStreaming=avx512_noodExecStreaming
+ -Dp_mask_arr=avx512_p_mask_arr
+ -Dp_mask_arr256=avx512_p_mask_arr256
+ -DrepeatHasMatchBitmap=avx512_repeatHasMatchBitmap
+ -DrepeatHasMatchRange=avx512_repeatHasMatchRange
+ -DrepeatHasMatchRing=avx512_repeatHasMatchRing
+ -DrepeatHasMatchSparseOptimalP=avx512_repeatHasMatchSparseOptimalP
+ -DrepeatHasMatchTrailer=avx512_repeatHasMatchTrailer
+ -DrepeatLastTopBitmap=avx512_repeatLastTopBitmap
+ -DrepeatLastTopRange=avx512_repeatLastTopRange
+ -DrepeatLastTopRing=avx512_repeatLastTopRing
+ -DrepeatLastTopSparseOptimalP=avx512_repeatLastTopSparseOptimalP
+ -DrepeatLastTopTrailer=avx512_repeatLastTopTrailer
+ -DrepeatNextMatchBitmap=avx512_repeatNextMatchBitmap
+ -DrepeatNextMatchRange=avx512_repeatNextMatchRange
+ -DrepeatNextMatchRing=avx512_repeatNextMatchRing
+ -DrepeatNextMatchSparseOptimalP=avx512_repeatNextMatchSparseOptimalP
+ -DrepeatNextMatchTrailer=avx512_repeatNextMatchTrailer
+ -DrepeatPack=avx512_repeatPack
+ -DrepeatStoreBitmap=avx512_repeatStoreBitmap
+ -DrepeatStoreRange=avx512_repeatStoreRange
+ -DrepeatStoreRing=avx512_repeatStoreRing
+ -DrepeatStoreSparseOptimalP=avx512_repeatStoreSparseOptimalP
+ -DrepeatStoreTrailer=avx512_repeatStoreTrailer
+ -DrepeatUnpack=avx512_repeatUnpack
+ -DroseAnchoredCallback=avx512_roseAnchoredCallback
+ -DroseBlockExec=avx512_roseBlockExec
+ -DroseCallback=avx512_roseCallback
+ -DroseCatchUpAll=avx512_roseCatchUpAll
+ -DroseCatchUpMPV_i=avx512_roseCatchUpMPV_i
+ -DroseCatchUpSuf=avx512_roseCatchUpSuf
+ -DroseDelayRebuildCallback=avx512_roseDelayRebuildCallback
+ -DroseFloatingCallback=avx512_roseFloatingCallback
+ -DroseHandleChainMatch=avx512_roseHandleChainMatch
+ -DroseInitState=avx512_roseInitState
+ -DroseNfaAdaptor=avx512_roseNfaAdaptor
+ -DroseNfaEarliestSom=avx512_roseNfaEarliestSom
+ -DroseReportAdaptor=avx512_roseReportAdaptor
+ -DroseRunBoundaryProgram=avx512_roseRunBoundaryProgram
+ -DroseRunFlushCombProgram=avx512_roseRunFlushCombProgram
+ -DroseRunLastFlushCombProgram=avx512_roseRunLastFlushCombProgram
+ -DroseRunProgram=avx512_roseRunProgram
+ -DroseRunProgram_l=avx512_roseRunProgram_l
+ -DroseStreamEodExec=avx512_roseStreamEodExec
+ -DroseStreamExec=avx512_roseStreamExec
+ -DrshuftiExec=avx512_rshuftiExec
+ -DrtruffleExec=avx512_rtruffleExec
+ -Drun_accel=avx512_run_accel
+ -DsetSomFromSomAware=avx512_setSomFromSomAware
+ -DshuftiDoubleExec=avx512_shuftiDoubleExec
+ -DshuftiExec=avx512_shuftiExec
+ -Dsimd_onebit_masks=avx512_simd_onebit_masks
+ -Dsize_compress_stream=avx512_size_compress_stream
+ -DstoreSomToStream=avx512_storeSomToStream
+ -Dstorecompressed128=avx512_storecompressed128
+ -Dstorecompressed256=avx512_storecompressed256
+ -Dstorecompressed32=avx512_storecompressed32
+ -Dstorecompressed384=avx512_storecompressed384
+ -Dstorecompressed512=avx512_storecompressed512
+ -Dstorecompressed64=avx512_storecompressed64
+ -DstreamInitSufPQ=avx512_streamInitSufPQ
+ -DtruffleExec=avx512_truffleExec
+ -Dvbs_mask_data=avx512_vbs_mask_data
+)
+
+SRCDIR(contrib/libs/hyperscan)
+
+SRCS(
+ src/alloc.c
+ src/crc32.c
+ src/database.c
+ src/fdr/fdr.c
+ src/fdr/teddy.c
+ src/fdr/teddy_avx2.c
+ src/hs_valid_platform.c
+ src/hs_version.c
+ src/hwlm/hwlm.c
+ src/hwlm/noodle_engine.c
+ src/nfa/accel.c
+ src/nfa/castle.c
+ src/nfa/gough.c
+ src/nfa/lbr.c
+ src/nfa/limex_64.c
+ src/nfa/limex_accel.c
+ src/nfa/limex_native.c
+ src/nfa/limex_simd128.c
+ src/nfa/limex_simd256.c
+ src/nfa/limex_simd384.c
+ src/nfa/limex_simd512.c
+ src/nfa/mcclellan.c
+ src/nfa/mcsheng.c
+ src/nfa/mcsheng_data.c
+ src/nfa/mpv.c
+ src/nfa/nfa_api_dispatch.c
+ src/nfa/repeat.c
+ src/nfa/sheng.c
+ src/nfa/shufti.c
+ src/nfa/tamarama.c
+ src/nfa/truffle.c
+ src/rose/block.c
+ src/rose/catchup.c
+ src/rose/init.c
+ src/rose/match.c
+ src/rose/program_runtime.c
+ src/rose/stream.c
+ src/runtime.c
+ src/scratch.c
+ src/som/som_runtime.c
+ src/som/som_stream.c
+ src/stream_compress.c
+ src/util/cpuid_flags.c
+ src/util/masked_move.c
+ src/util/multibit.c
+ src/util/simd_utils.c
+ src/util/state_compress.c
+)
+
+END()
diff --git a/contrib/libs/hyperscan/runtime_core2/.yandex_meta/licenses.list.txt b/contrib/libs/hyperscan/runtime_core2/.yandex_meta/licenses.list.txt
index 358c19fe4a..b2ced66bbd 100644
--- a/contrib/libs/hyperscan/runtime_core2/.yandex_meta/licenses.list.txt
+++ b/contrib/libs/hyperscan/runtime_core2/.yandex_meta/licenses.list.txt
@@ -1,32 +1,32 @@
-====================BSD-3-Clause====================
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2015-2018, Intel Corporation
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2015-2019, Intel Corporation
+====================BSD-3-Clause====================
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2015-2018, Intel Corporation
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2015-2019, Intel Corporation
diff --git a/contrib/libs/hyperscan/runtime_core2/hs_common.h b/contrib/libs/hyperscan/runtime_core2/hs_common.h
index d5fa5c69e8..bda6959659 100644
--- a/contrib/libs/hyperscan/runtime_core2/hs_common.h
+++ b/contrib/libs/hyperscan/runtime_core2/hs_common.h
@@ -1,596 +1,596 @@
-/*
- * Copyright (c) 2015-2019, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef HS_CORE2_COMMON_H
-#define HS_CORE2_COMMON_H
-
-#if defined(_WIN32)
-#define HS_CDECL __cdecl
-#else
-#define HS_CDECL
-#endif
-#include <stdlib.h>
-
-/**
- * @file
- * @brief The Hyperscan common API definition.
- *
- * Hyperscan is a high speed regular expression engine.
- *
- * This header contains functions available to both the Hyperscan compiler and
- * runtime.
- */
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-struct hs_database;
-
-/**
- * A Hyperscan pattern database.
- *
- * Generated by one of the Hyperscan compiler functions:
- * - @ref hs_compile()
- * - @ref hs_compile_multi()
- * - @ref hs_compile_ext_multi()
- */
-typedef struct hs_database hs_database_t;
-
-/**
- * A type for errors returned by Hyperscan functions.
- */
-typedef int hs_error_t;
-
-/**
- * Free a compiled pattern database.
- *
- * The free callback set by @ref hs_set_database_allocator() (or @ref
- * hs_set_allocator()) will be used by this function.
- *
- * @param db
- * A compiled pattern database. NULL may also be safely provided, in which
- * case the function does nothing.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t core2_hs_free_database(hs_database_t *db);
-
-/**
- * Serialize a pattern database to a stream of bytes.
- *
- * The allocator callback set by @ref hs_set_misc_allocator() (or @ref
- * hs_set_allocator()) will be used by this function.
- *
- * @param db
- * A compiled pattern database.
- *
- * @param bytes
- * On success, a pointer to an array of bytes will be returned here.
- * These bytes can be subsequently relocated or written to disk. The
- * caller is responsible for freeing this block.
- *
- * @param length
- * On success, the number of bytes in the generated byte array will be
- * returned here.
- *
- * @return
- * @ref HS_SUCCESS on success, @ref HS_NOMEM if the byte array cannot be
- * allocated, other values may be returned if errors are detected.
- */
-hs_error_t core2_hs_serialize_database(const hs_database_t *db, char **bytes,
- size_t *length);
-
-/**
- * Reconstruct a pattern database from a stream of bytes previously generated
- * by @ref hs_serialize_database().
- *
- * This function will allocate sufficient space for the database using the
- * allocator set with @ref hs_set_database_allocator() (or @ref
- * hs_set_allocator()); to use a pre-allocated region of memory, use the @ref
- * hs_deserialize_database_at() function.
- *
- * @param bytes
- * A byte array generated by @ref hs_serialize_database() representing a
- * compiled pattern database.
- *
- * @param length
- * The length of the byte array generated by @ref hs_serialize_database().
- * This should be the same value as that returned by @ref
- * hs_serialize_database().
- *
- * @param db
- * On success, a pointer to a newly allocated @ref hs_database_t will be
- * returned here. This database can then be used for scanning, and
- * eventually freed by the caller using @ref hs_free_database().
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t core2_hs_deserialize_database(const char *bytes,
- const size_t length,
- hs_database_t **db);
-
-/**
- * Reconstruct a pattern database from a stream of bytes previously generated
- * by @ref hs_serialize_database() at a given memory location.
- *
- * This function (unlike @ref hs_deserialize_database()) will write the
- * reconstructed database to the memory location given in the @p db parameter.
- * The amount of space required at this location can be determined with the
- * @ref hs_serialized_database_size() function.
- *
- * @param bytes
- * A byte array generated by @ref hs_serialize_database() representing a
- * compiled pattern database.
- *
- * @param length
- * The length of the byte array generated by @ref hs_serialize_database().
- * This should be the same value as that returned by @ref
- * hs_serialize_database().
- *
- * @param db
- * Pointer to an 8-byte aligned block of memory of sufficient size to hold
- * the deserialized database. On success, the reconstructed database will
- * be written to this location. This database can then be used for pattern
- * matching. The user is responsible for freeing this memory; the @ref
- * hs_free_database() call should not be used.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t core2_hs_deserialize_database_at(const char *bytes,
- const size_t length,
- hs_database_t *db);
-
-/**
- * Provides the size of the stream state allocated by a single stream opened
- * against the given database.
- *
- * @param database
- * Pointer to a compiled (streaming mode) pattern database.
- *
- * @param stream_size
- * On success, the size in bytes of an individual stream opened against the
- * given database is placed in this parameter.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t core2_hs_stream_size(const hs_database_t *database,
- size_t *stream_size);
-
-/**
- * Provides the size of the given database in bytes.
- *
- * @param database
- * Pointer to compiled pattern database.
- *
- * @param database_size
- * On success, the size of the compiled database in bytes is placed in this
- * parameter.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t core2_hs_database_size(const hs_database_t *database,
- size_t *database_size);
-
-/**
- * Utility function for reporting the size that would be required by a
- * database if it were deserialized.
- *
- * This can be used to allocate a shared memory region or other "special"
- * allocation prior to deserializing with the @ref hs_deserialize_database_at()
- * function.
- *
- * @param bytes
- * Pointer to a byte array generated by @ref hs_serialize_database()
- * representing a compiled pattern database.
- *
- * @param length
- * The length of the byte array generated by @ref hs_serialize_database().
- * This should be the same value as that returned by @ref
- * hs_serialize_database().
- *
- * @param deserialized_size
- * On success, the size of the compiled database that would be generated
- * by @ref hs_deserialize_database_at() is returned here.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t core2_hs_serialized_database_size(const char *bytes,
- const size_t length,
- size_t *deserialized_size);
-
-/**
- * Utility function providing information about a database.
- *
- * @param database
- * Pointer to a compiled database.
- *
- * @param info
- * On success, a string containing the version and platform information for
- * the supplied database is placed in the parameter. The string is
- * allocated using the allocator supplied in @ref hs_set_misc_allocator()
- * (or malloc() if no allocator was set) and should be freed by the caller.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t core2_hs_database_info(const hs_database_t *database,
- char **info);
-
-/**
- * Utility function providing information about a serialized database.
- *
- * @param bytes
- * Pointer to a serialized database.
- *
- * @param length
- * Length in bytes of the serialized database.
- *
- * @param info
- * On success, a string containing the version and platform information
- * for the supplied serialized database is placed in the parameter. The
- * string is allocated using the allocator supplied in @ref
- * hs_set_misc_allocator() (or malloc() if no allocator was set) and
- * should be freed by the caller.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t core2_hs_serialized_database_info(const char *bytes,
- size_t length, char **info);
-
-/**
- * The type of the callback function that will be used by Hyperscan to allocate
- * more memory at runtime as required, for example in @ref hs_open_stream() to
- * allocate stream state.
- *
- * If Hyperscan is to be used in a multi-threaded, or similarly concurrent
- * environment, the allocation function will need to be re-entrant, or
- * similarly safe for concurrent use.
- *
- * @param size
- * The number of bytes to allocate.
- * @return
- * A pointer to the region of memory allocated, or NULL on error.
- */
-typedef void *(HS_CDECL *hs_alloc_t)(size_t size);
-
-/**
- * The type of the callback function that will be used by Hyperscan to free
- * memory regions previously allocated using the @ref hs_alloc_t function.
- *
- * @param ptr
- * The region of memory to be freed.
- */
-typedef void (HS_CDECL *hs_free_t)(void *ptr);
-
-/**
- * Set the allocate and free functions used by Hyperscan for allocating
- * memory at runtime for stream state, scratch space, database bytecode,
- * and various other data structure returned by the Hyperscan API.
- *
- * The function is equivalent to calling @ref hs_set_stream_allocator(),
- * @ref hs_set_scratch_allocator(), @ref hs_set_database_allocator() and
- * @ref hs_set_misc_allocator() with the provided parameters.
- *
- * This call will override any previous allocators that have been set.
- *
- * Note: there is no way to change the allocator used for temporary objects
- * created during the various compile calls (@ref hs_compile(), @ref
- * hs_compile_multi(), @ref hs_compile_ext_multi()).
- *
- * @param alloc_func
- * A callback function pointer that allocates memory. This function must
- * return memory suitably aligned for the largest representable data type
- * on this platform.
- *
- * @param free_func
- * A callback function pointer that frees allocated memory.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t core2_hs_set_allocator(hs_alloc_t alloc_func,
- hs_free_t free_func);
-
-/**
- * Set the allocate and free functions used by Hyperscan for allocating memory
- * for database bytecode produced by the compile calls (@ref hs_compile(), @ref
- * hs_compile_multi(), @ref hs_compile_ext_multi()) and by database
- * deserialization (@ref hs_deserialize_database()).
- *
- * If no database allocation functions are set, or if NULL is used in place of
- * both parameters, then memory allocation will default to standard methods
- * (such as the system malloc() and free() calls).
- *
- * This call will override any previous database allocators that have been set.
- *
- * Note: the database allocator may also be set by calling @ref
- * hs_set_allocator().
- *
- * Note: there is no way to change how temporary objects created during the
- * various compile calls (@ref hs_compile(), @ref hs_compile_multi(), @ref
- * hs_compile_ext_multi()) are allocated.
- *
- * @param alloc_func
- * A callback function pointer that allocates memory. This function must
- * return memory suitably aligned for the largest representable data type
- * on this platform.
- *
- * @param free_func
- * A callback function pointer that frees allocated memory.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t core2_hs_set_database_allocator(hs_alloc_t alloc_func,
- hs_free_t free_func);
-
-/**
- * Set the allocate and free functions used by Hyperscan for allocating memory
- * for items returned by the Hyperscan API such as @ref hs_compile_error_t, @ref
- * hs_expr_info_t and serialized databases.
- *
- * If no misc allocation functions are set, or if NULL is used in place of both
- * parameters, then memory allocation will default to standard methods (such as
- * the system malloc() and free() calls).
- *
- * This call will override any previous misc allocators that have been set.
- *
- * Note: the misc allocator may also be set by calling @ref hs_set_allocator().
- *
- * @param alloc_func
- * A callback function pointer that allocates memory. This function must
- * return memory suitably aligned for the largest representable data type
- * on this platform.
- *
- * @param free_func
- * A callback function pointer that frees allocated memory.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t core2_hs_set_misc_allocator(hs_alloc_t alloc_func,
- hs_free_t free_func);
-
-/**
- * Set the allocate and free functions used by Hyperscan for allocating memory
- * for scratch space by @ref hs_alloc_scratch() and @ref hs_clone_scratch().
- *
- * If no scratch allocation functions are set, or if NULL is used in place of
- * both parameters, then memory allocation will default to standard methods
- * (such as the system malloc() and free() calls).
- *
- * This call will override any previous scratch allocators that have been set.
- *
- * Note: the scratch allocator may also be set by calling @ref
- * hs_set_allocator().
- *
- * @param alloc_func
- * A callback function pointer that allocates memory. This function must
- * return memory suitably aligned for the largest representable data type
- * on this platform.
- *
- * @param free_func
- * A callback function pointer that frees allocated memory.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t core2_hs_set_scratch_allocator(hs_alloc_t alloc_func,
- hs_free_t free_func);
-
-/**
- * Set the allocate and free functions used by Hyperscan for allocating memory
- * for stream state by @ref hs_open_stream().
- *
- * If no stream allocation functions are set, or if NULL is used in place of
- * both parameters, then memory allocation will default to standard methods
- * (such as the system malloc() and free() calls).
- *
- * This call will override any previous stream allocators that have been set.
- *
- * Note: the stream allocator may also be set by calling @ref
- * hs_set_allocator().
- *
- * @param alloc_func
- * A callback function pointer that allocates memory. This function must
- * return memory suitably aligned for the largest representable data type
- * on this platform.
- *
- * @param free_func
- * A callback function pointer that frees allocated memory.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t core2_hs_set_stream_allocator(hs_alloc_t alloc_func,
- hs_free_t free_func);
-
-/**
- * Utility function for identifying this release version.
- *
- * @return
- * A string containing the version number of this release build and the
- * date of the build. It is allocated statically, so it does not need to
- * be freed by the caller.
- */
-const char * core2_hs_version(void);
-
-/**
- * Utility function to test the current system architecture.
- *
- * Hyperscan requires the Supplemental Streaming SIMD Extensions 3 instruction
- * set. This function can be called on any x86 platform to determine if the
- * system provides the required instruction set.
- *
- * This function does not test for more advanced features if Hyperscan has
- * been built for a more specific architecture, for example the AVX2
- * instruction set.
- *
- * @return
- * @ref HS_SUCCESS on success, @ref HS_ARCH_ERROR if system does not
- * support Hyperscan.
- */
-hs_error_t core2_hs_valid_platform(void);
-
-/**
- * @defgroup HS_ERROR hs_error_t values
- *
- * @{
- */
-
-/**
- * The engine completed normally.
- */
-#define HS_SUCCESS 0
-
-/**
- * A parameter passed to this function was invalid.
- *
- * This error is only returned in cases where the function can detect an
- * invalid parameter -- it cannot be relied upon to detect (for example)
- * pointers to freed memory or other invalid data.
- */
-#define HS_INVALID (-1)
-
-/**
- * A memory allocation failed.
- */
-#define HS_NOMEM (-2)
-
-/**
- * The engine was terminated by callback.
- *
- * This return value indicates that the target buffer was partially scanned,
- * but that the callback function requested that scanning cease after a match
- * was located.
- */
-#define HS_SCAN_TERMINATED (-3)
-
-/**
- * The pattern compiler failed, and the @ref hs_compile_error_t should be
- * inspected for more detail.
- */
-#define HS_COMPILER_ERROR (-4)
-
-/**
- * The given database was built for a different version of Hyperscan.
- */
-#define HS_DB_VERSION_ERROR (-5)
-
-/**
- * The given database was built for a different platform (i.e., CPU type).
- */
-#define HS_DB_PLATFORM_ERROR (-6)
-
-/**
- * The given database was built for a different mode of operation. This error
- * is returned when streaming calls are used with a block or vectored database
- * and vice versa.
- */
-#define HS_DB_MODE_ERROR (-7)
-
-/**
- * A parameter passed to this function was not correctly aligned.
- */
-#define HS_BAD_ALIGN (-8)
-
-/**
- * The memory allocator (either malloc() or the allocator set with @ref
- * hs_set_allocator()) did not correctly return memory suitably aligned for the
- * largest representable data type on this platform.
- */
-#define HS_BAD_ALLOC (-9)
-
-/**
- * The scratch region was already in use.
- *
- * This error is returned when Hyperscan is able to detect that the scratch
- * region given is already in use by another Hyperscan API call.
- *
- * A separate scratch region, allocated with @ref hs_alloc_scratch() or @ref
- * hs_clone_scratch(), is required for every concurrent caller of the Hyperscan
- * API.
- *
- * For example, this error might be returned when @ref hs_scan() has been
- * called inside a callback delivered by a currently-executing @ref hs_scan()
- * call using the same scratch region.
- *
- * Note: Not all concurrent uses of scratch regions may be detected. This error
- * is intended as a best-effort debugging tool, not a guarantee.
- */
-#define HS_SCRATCH_IN_USE (-10)
-
-/**
- * Unsupported CPU architecture.
- *
- * This error is returned when Hyperscan is able to detect that the current
- * system does not support the required instruction set.
- *
- * At a minimum, Hyperscan requires Supplemental Streaming SIMD Extensions 3
- * (SSSE3).
- */
-#define HS_ARCH_ERROR (-11)
-
-/**
- * Provided buffer was too small.
- *
- * This error indicates that there was insufficient space in the buffer. The
- * call should be repeated with a larger provided buffer.
- *
- * Note: in this situation, it is normal for the amount of space required to be
- * returned in the same manner as the used space would have been returned if the
- * call was successful.
- */
-#define HS_INSUFFICIENT_SPACE (-12)
-
-/**
- * Unexpected internal error.
- *
- * This error indicates that there was unexpected matching behaviors. This
- * could be related to invalid usage of stream and scratch space or invalid memory
- * operations by users.
- *
- */
-#define HS_UNKNOWN_ERROR (-13)
-
-/** @} */
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
-#endif /* HS_CORE2_COMMON_H */
+/*
+ * Copyright (c) 2015-2019, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HS_CORE2_COMMON_H
+#define HS_CORE2_COMMON_H
+
+#if defined(_WIN32)
+#define HS_CDECL __cdecl
+#else
+#define HS_CDECL
+#endif
+#include <stdlib.h>
+
+/**
+ * @file
+ * @brief The Hyperscan common API definition.
+ *
+ * Hyperscan is a high speed regular expression engine.
+ *
+ * This header contains functions available to both the Hyperscan compiler and
+ * runtime.
+ */
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+struct hs_database;
+
+/**
+ * A Hyperscan pattern database.
+ *
+ * Generated by one of the Hyperscan compiler functions:
+ * - @ref hs_compile()
+ * - @ref hs_compile_multi()
+ * - @ref hs_compile_ext_multi()
+ */
+typedef struct hs_database hs_database_t;
+
+/**
+ * A type for errors returned by Hyperscan functions.
+ */
+typedef int hs_error_t;
+
+/**
+ * Free a compiled pattern database.
+ *
+ * The free callback set by @ref hs_set_database_allocator() (or @ref
+ * hs_set_allocator()) will be used by this function.
+ *
+ * @param db
+ * A compiled pattern database. NULL may also be safely provided, in which
+ * case the function does nothing.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t core2_hs_free_database(hs_database_t *db);
+
+/**
+ * Serialize a pattern database to a stream of bytes.
+ *
+ * The allocator callback set by @ref hs_set_misc_allocator() (or @ref
+ * hs_set_allocator()) will be used by this function.
+ *
+ * @param db
+ * A compiled pattern database.
+ *
+ * @param bytes
+ * On success, a pointer to an array of bytes will be returned here.
+ * These bytes can be subsequently relocated or written to disk. The
+ * caller is responsible for freeing this block.
+ *
+ * @param length
+ * On success, the number of bytes in the generated byte array will be
+ * returned here.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, @ref HS_NOMEM if the byte array cannot be
+ * allocated, other values may be returned if errors are detected.
+ */
+hs_error_t core2_hs_serialize_database(const hs_database_t *db, char **bytes,
+ size_t *length);
+
+/**
+ * Reconstruct a pattern database from a stream of bytes previously generated
+ * by @ref hs_serialize_database().
+ *
+ * This function will allocate sufficient space for the database using the
+ * allocator set with @ref hs_set_database_allocator() (or @ref
+ * hs_set_allocator()); to use a pre-allocated region of memory, use the @ref
+ * hs_deserialize_database_at() function.
+ *
+ * @param bytes
+ * A byte array generated by @ref hs_serialize_database() representing a
+ * compiled pattern database.
+ *
+ * @param length
+ * The length of the byte array generated by @ref hs_serialize_database().
+ * This should be the same value as that returned by @ref
+ * hs_serialize_database().
+ *
+ * @param db
+ * On success, a pointer to a newly allocated @ref hs_database_t will be
+ * returned here. This database can then be used for scanning, and
+ * eventually freed by the caller using @ref hs_free_database().
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t core2_hs_deserialize_database(const char *bytes,
+ const size_t length,
+ hs_database_t **db);
+
+/**
+ * Reconstruct a pattern database from a stream of bytes previously generated
+ * by @ref hs_serialize_database() at a given memory location.
+ *
+ * This function (unlike @ref hs_deserialize_database()) will write the
+ * reconstructed database to the memory location given in the @p db parameter.
+ * The amount of space required at this location can be determined with the
+ * @ref hs_serialized_database_size() function.
+ *
+ * @param bytes
+ * A byte array generated by @ref hs_serialize_database() representing a
+ * compiled pattern database.
+ *
+ * @param length
+ * The length of the byte array generated by @ref hs_serialize_database().
+ * This should be the same value as that returned by @ref
+ * hs_serialize_database().
+ *
+ * @param db
+ * Pointer to an 8-byte aligned block of memory of sufficient size to hold
+ * the deserialized database. On success, the reconstructed database will
+ * be written to this location. This database can then be used for pattern
+ * matching. The user is responsible for freeing this memory; the @ref
+ * hs_free_database() call should not be used.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t core2_hs_deserialize_database_at(const char *bytes,
+ const size_t length,
+ hs_database_t *db);
+
+/**
+ * Provides the size of the stream state allocated by a single stream opened
+ * against the given database.
+ *
+ * @param database
+ * Pointer to a compiled (streaming mode) pattern database.
+ *
+ * @param stream_size
+ * On success, the size in bytes of an individual stream opened against the
+ * given database is placed in this parameter.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t core2_hs_stream_size(const hs_database_t *database,
+ size_t *stream_size);
+
+/**
+ * Provides the size of the given database in bytes.
+ *
+ * @param database
+ * Pointer to compiled pattern database.
+ *
+ * @param database_size
+ * On success, the size of the compiled database in bytes is placed in this
+ * parameter.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t core2_hs_database_size(const hs_database_t *database,
+ size_t *database_size);
+
+/**
+ * Utility function for reporting the size that would be required by a
+ * database if it were deserialized.
+ *
+ * This can be used to allocate a shared memory region or other "special"
+ * allocation prior to deserializing with the @ref hs_deserialize_database_at()
+ * function.
+ *
+ * @param bytes
+ * Pointer to a byte array generated by @ref hs_serialize_database()
+ * representing a compiled pattern database.
+ *
+ * @param length
+ * The length of the byte array generated by @ref hs_serialize_database().
+ * This should be the same value as that returned by @ref
+ * hs_serialize_database().
+ *
+ * @param deserialized_size
+ * On success, the size of the compiled database that would be generated
+ * by @ref hs_deserialize_database_at() is returned here.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t core2_hs_serialized_database_size(const char *bytes,
+ const size_t length,
+ size_t *deserialized_size);
+
+/**
+ * Utility function providing information about a database.
+ *
+ * @param database
+ * Pointer to a compiled database.
+ *
+ * @param info
+ * On success, a string containing the version and platform information for
+ * the supplied database is placed in the parameter. The string is
+ * allocated using the allocator supplied in @ref hs_set_misc_allocator()
+ * (or malloc() if no allocator was set) and should be freed by the caller.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t core2_hs_database_info(const hs_database_t *database,
+ char **info);
+
+/**
+ * Utility function providing information about a serialized database.
+ *
+ * @param bytes
+ * Pointer to a serialized database.
+ *
+ * @param length
+ * Length in bytes of the serialized database.
+ *
+ * @param info
+ * On success, a string containing the version and platform information
+ * for the supplied serialized database is placed in the parameter. The
+ * string is allocated using the allocator supplied in @ref
+ * hs_set_misc_allocator() (or malloc() if no allocator was set) and
+ * should be freed by the caller.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t core2_hs_serialized_database_info(const char *bytes,
+ size_t length, char **info);
+
+/**
+ * The type of the callback function that will be used by Hyperscan to allocate
+ * more memory at runtime as required, for example in @ref hs_open_stream() to
+ * allocate stream state.
+ *
+ * If Hyperscan is to be used in a multi-threaded, or similarly concurrent
+ * environment, the allocation function will need to be re-entrant, or
+ * similarly safe for concurrent use.
+ *
+ * @param size
+ * The number of bytes to allocate.
+ * @return
+ * A pointer to the region of memory allocated, or NULL on error.
+ */
+typedef void *(HS_CDECL *hs_alloc_t)(size_t size);
+
+/**
+ * The type of the callback function that will be used by Hyperscan to free
+ * memory regions previously allocated using the @ref hs_alloc_t function.
+ *
+ * @param ptr
+ * The region of memory to be freed.
+ */
+typedef void (HS_CDECL *hs_free_t)(void *ptr);
+
+/**
+ * Set the allocate and free functions used by Hyperscan for allocating
+ * memory at runtime for stream state, scratch space, database bytecode,
+ * and various other data structure returned by the Hyperscan API.
+ *
+ * The function is equivalent to calling @ref hs_set_stream_allocator(),
+ * @ref hs_set_scratch_allocator(), @ref hs_set_database_allocator() and
+ * @ref hs_set_misc_allocator() with the provided parameters.
+ *
+ * This call will override any previous allocators that have been set.
+ *
+ * Note: there is no way to change the allocator used for temporary objects
+ * created during the various compile calls (@ref hs_compile(), @ref
+ * hs_compile_multi(), @ref hs_compile_ext_multi()).
+ *
+ * @param alloc_func
+ * A callback function pointer that allocates memory. This function must
+ * return memory suitably aligned for the largest representable data type
+ * on this platform.
+ *
+ * @param free_func
+ * A callback function pointer that frees allocated memory.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t core2_hs_set_allocator(hs_alloc_t alloc_func,
+ hs_free_t free_func);
+
+/**
+ * Set the allocate and free functions used by Hyperscan for allocating memory
+ * for database bytecode produced by the compile calls (@ref hs_compile(), @ref
+ * hs_compile_multi(), @ref hs_compile_ext_multi()) and by database
+ * deserialization (@ref hs_deserialize_database()).
+ *
+ * If no database allocation functions are set, or if NULL is used in place of
+ * both parameters, then memory allocation will default to standard methods
+ * (such as the system malloc() and free() calls).
+ *
+ * This call will override any previous database allocators that have been set.
+ *
+ * Note: the database allocator may also be set by calling @ref
+ * hs_set_allocator().
+ *
+ * Note: there is no way to change how temporary objects created during the
+ * various compile calls (@ref hs_compile(), @ref hs_compile_multi(), @ref
+ * hs_compile_ext_multi()) are allocated.
+ *
+ * @param alloc_func
+ * A callback function pointer that allocates memory. This function must
+ * return memory suitably aligned for the largest representable data type
+ * on this platform.
+ *
+ * @param free_func
+ * A callback function pointer that frees allocated memory.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t core2_hs_set_database_allocator(hs_alloc_t alloc_func,
+ hs_free_t free_func);
+
+/**
+ * Set the allocate and free functions used by Hyperscan for allocating memory
+ * for items returned by the Hyperscan API such as @ref hs_compile_error_t, @ref
+ * hs_expr_info_t and serialized databases.
+ *
+ * If no misc allocation functions are set, or if NULL is used in place of both
+ * parameters, then memory allocation will default to standard methods (such as
+ * the system malloc() and free() calls).
+ *
+ * This call will override any previous misc allocators that have been set.
+ *
+ * Note: the misc allocator may also be set by calling @ref hs_set_allocator().
+ *
+ * @param alloc_func
+ * A callback function pointer that allocates memory. This function must
+ * return memory suitably aligned for the largest representable data type
+ * on this platform.
+ *
+ * @param free_func
+ * A callback function pointer that frees allocated memory.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t core2_hs_set_misc_allocator(hs_alloc_t alloc_func,
+ hs_free_t free_func);
+
+/**
+ * Set the allocate and free functions used by Hyperscan for allocating memory
+ * for scratch space by @ref hs_alloc_scratch() and @ref hs_clone_scratch().
+ *
+ * If no scratch allocation functions are set, or if NULL is used in place of
+ * both parameters, then memory allocation will default to standard methods
+ * (such as the system malloc() and free() calls).
+ *
+ * This call will override any previous scratch allocators that have been set.
+ *
+ * Note: the scratch allocator may also be set by calling @ref
+ * hs_set_allocator().
+ *
+ * @param alloc_func
+ * A callback function pointer that allocates memory. This function must
+ * return memory suitably aligned for the largest representable data type
+ * on this platform.
+ *
+ * @param free_func
+ * A callback function pointer that frees allocated memory.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t core2_hs_set_scratch_allocator(hs_alloc_t alloc_func,
+ hs_free_t free_func);
+
+/**
+ * Set the allocate and free functions used by Hyperscan for allocating memory
+ * for stream state by @ref hs_open_stream().
+ *
+ * If no stream allocation functions are set, or if NULL is used in place of
+ * both parameters, then memory allocation will default to standard methods
+ * (such as the system malloc() and free() calls).
+ *
+ * This call will override any previous stream allocators that have been set.
+ *
+ * Note: the stream allocator may also be set by calling @ref
+ * hs_set_allocator().
+ *
+ * @param alloc_func
+ * A callback function pointer that allocates memory. This function must
+ * return memory suitably aligned for the largest representable data type
+ * on this platform.
+ *
+ * @param free_func
+ * A callback function pointer that frees allocated memory.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t core2_hs_set_stream_allocator(hs_alloc_t alloc_func,
+ hs_free_t free_func);
+
+/**
+ * Utility function for identifying this release version.
+ *
+ * @return
+ * A string containing the version number of this release build and the
+ * date of the build. It is allocated statically, so it does not need to
+ * be freed by the caller.
+ */
+const char * core2_hs_version(void);
+
+/**
+ * Utility function to test the current system architecture.
+ *
+ * Hyperscan requires the Supplemental Streaming SIMD Extensions 3 instruction
+ * set. This function can be called on any x86 platform to determine if the
+ * system provides the required instruction set.
+ *
+ * This function does not test for more advanced features if Hyperscan has
+ * been built for a more specific architecture, for example the AVX2
+ * instruction set.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, @ref HS_ARCH_ERROR if system does not
+ * support Hyperscan.
+ */
+hs_error_t core2_hs_valid_platform(void);
+
+/**
+ * @defgroup HS_ERROR hs_error_t values
+ *
+ * @{
+ */
+
+/**
+ * The engine completed normally.
+ */
+#define HS_SUCCESS 0
+
+/**
+ * A parameter passed to this function was invalid.
+ *
+ * This error is only returned in cases where the function can detect an
+ * invalid parameter -- it cannot be relied upon to detect (for example)
+ * pointers to freed memory or other invalid data.
+ */
+#define HS_INVALID (-1)
+
+/**
+ * A memory allocation failed.
+ */
+#define HS_NOMEM (-2)
+
+/**
+ * The engine was terminated by callback.
+ *
+ * This return value indicates that the target buffer was partially scanned,
+ * but that the callback function requested that scanning cease after a match
+ * was located.
+ */
+#define HS_SCAN_TERMINATED (-3)
+
+/**
+ * The pattern compiler failed, and the @ref hs_compile_error_t should be
+ * inspected for more detail.
+ */
+#define HS_COMPILER_ERROR (-4)
+
+/**
+ * The given database was built for a different version of Hyperscan.
+ */
+#define HS_DB_VERSION_ERROR (-5)
+
+/**
+ * The given database was built for a different platform (i.e., CPU type).
+ */
+#define HS_DB_PLATFORM_ERROR (-6)
+
+/**
+ * The given database was built for a different mode of operation. This error
+ * is returned when streaming calls are used with a block or vectored database
+ * and vice versa.
+ */
+#define HS_DB_MODE_ERROR (-7)
+
+/**
+ * A parameter passed to this function was not correctly aligned.
+ */
+#define HS_BAD_ALIGN (-8)
+
+/**
+ * The memory allocator (either malloc() or the allocator set with @ref
+ * hs_set_allocator()) did not correctly return memory suitably aligned for the
+ * largest representable data type on this platform.
+ */
+#define HS_BAD_ALLOC (-9)
+
+/**
+ * The scratch region was already in use.
+ *
+ * This error is returned when Hyperscan is able to detect that the scratch
+ * region given is already in use by another Hyperscan API call.
+ *
+ * A separate scratch region, allocated with @ref hs_alloc_scratch() or @ref
+ * hs_clone_scratch(), is required for every concurrent caller of the Hyperscan
+ * API.
+ *
+ * For example, this error might be returned when @ref hs_scan() has been
+ * called inside a callback delivered by a currently-executing @ref hs_scan()
+ * call using the same scratch region.
+ *
+ * Note: Not all concurrent uses of scratch regions may be detected. This error
+ * is intended as a best-effort debugging tool, not a guarantee.
+ */
+#define HS_SCRATCH_IN_USE (-10)
+
+/**
+ * Unsupported CPU architecture.
+ *
+ * This error is returned when Hyperscan is able to detect that the current
+ * system does not support the required instruction set.
+ *
+ * At a minimum, Hyperscan requires Supplemental Streaming SIMD Extensions 3
+ * (SSSE3).
+ */
+#define HS_ARCH_ERROR (-11)
+
+/**
+ * Provided buffer was too small.
+ *
+ * This error indicates that there was insufficient space in the buffer. The
+ * call should be repeated with a larger provided buffer.
+ *
+ * Note: in this situation, it is normal for the amount of space required to be
+ * returned in the same manner as the used space would have been returned if the
+ * call was successful.
+ */
+#define HS_INSUFFICIENT_SPACE (-12)
+
+/**
+ * Unexpected internal error.
+ *
+ * This error indicates that there was unexpected matching behaviors. This
+ * could be related to invalid usage of stream and scratch space or invalid memory
+ * operations by users.
+ *
+ */
+#define HS_UNKNOWN_ERROR (-13)
+
+/** @} */
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* HS_CORE2_COMMON_H */
diff --git a/contrib/libs/hyperscan/runtime_core2/hs_runtime.h b/contrib/libs/hyperscan/runtime_core2/hs_runtime.h
index 656ceb0a76..2504a16186 100644
--- a/contrib/libs/hyperscan/runtime_core2/hs_runtime.h
+++ b/contrib/libs/hyperscan/runtime_core2/hs_runtime.h
@@ -1,621 +1,621 @@
-/*
- * Copyright (c) 2015-2018, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef HS_CORE2_RUNTIME_H
-#define HS_CORE2_RUNTIME_H
-
-#include <stdlib.h>
-
-/**
- * @file
- * @brief The Hyperscan runtime API definition.
- *
- * Hyperscan is a high speed regular expression engine.
- *
- * This header contains functions for using compiled Hyperscan databases for
- * scanning data at runtime.
- */
-
-#include "hs_common.h"
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-/**
- * Definition of the stream identifier type.
- */
-struct hs_stream;
-
-/**
- * The stream identifier returned by @ref hs_open_stream().
- */
-typedef struct hs_stream hs_stream_t;
-
-struct hs_scratch;
-
-/**
- * A Hyperscan scratch space.
- */
-typedef struct hs_scratch hs_scratch_t;
-
-/**
- * Definition of the match event callback function type.
- *
- * A callback function matching the defined type must be provided by the
- * application calling the @ref hs_scan(), @ref hs_scan_vector() or @ref
- * hs_scan_stream() functions (or other streaming calls which can produce
- * matches).
- *
- * This callback function will be invoked whenever a match is located in the
- * target data during the execution of a scan. The details of the match are
- * passed in as parameters to the callback function, and the callback function
- * should return a value indicating whether or not matching should continue on
- * the target data. If no callbacks are desired from a scan call, NULL may be
- * provided in order to suppress match production.
- *
- * This callback function should not attempt to call Hyperscan API functions on
- * the same stream nor should it attempt to reuse the scratch space allocated
- * for the API calls that caused it to be triggered. Making another call to the
- * Hyperscan library with completely independent parameters should work (for
- * example, scanning a different database in a new stream and with new scratch
- * space), but reusing data structures like stream state and/or scratch space
- * will produce undefined behavior.
- *
- * @param id
- * The ID number of the expression that matched. If the expression was a
- * single expression compiled with @ref hs_compile(), this value will be
- * zero.
- *
- * @param from
- * - If a start of match flag is enabled for the current pattern, this
- * argument will be set to the start of match for the pattern assuming
- * that that start of match value lies within the current 'start of match
- * horizon' chosen by one of the SOM_HORIZON mode flags.
-
- * - If the start of match value lies outside this horizon (possible only
- * when the SOM_HORIZON value is not @ref HS_MODE_SOM_HORIZON_LARGE),
- * the @p from value will be set to @ref HS_OFFSET_PAST_HORIZON.
-
- * - This argument will be set to zero if the Start of Match flag is not
- * enabled for the given pattern.
- *
- * @param to
- * The offset after the last byte that matches the expression.
- *
- * @param flags
- * This is provided for future use and is unused at present.
- *
- * @param context
- * The pointer supplied by the user to the @ref hs_scan(), @ref
- * hs_scan_vector() or @ref hs_scan_stream() function.
- *
- * @return
- * Non-zero if the matching should cease, else zero. If scanning is
- * performed in streaming mode and a non-zero value is returned, any
- * subsequent calls to @ref hs_scan_stream() for that stream will
- * immediately return with @ref HS_SCAN_TERMINATED.
- */
-typedef int (HS_CDECL *match_event_handler)(unsigned int id,
- unsigned long long from,
- unsigned long long to,
- unsigned int flags,
- void *context);
-
-/**
- * Open and initialise a stream.
- *
- * @param db
- * A compiled pattern database.
- *
- * @param flags
- * Flags modifying the behaviour of the stream. This parameter is provided
- * for future use and is unused at present.
- *
- * @param stream
- * On success, a pointer to the generated @ref hs_stream_t will be
- * returned; NULL on failure.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t core2_hs_open_stream(const hs_database_t *db, unsigned int flags,
- hs_stream_t **stream);
-
-/**
- * Write data to be scanned to the opened stream.
- *
- * This is the function call in which the actual pattern matching takes place
- * as data is written to the stream. Matches will be returned via the @ref
- * match_event_handler callback supplied.
- *
- * @param id
- * The stream ID (returned by @ref hs_open_stream()) to which the data
- * will be written.
- *
- * @param data
- * Pointer to the data to be scanned.
- *
- * @param length
- * The number of bytes to scan.
- *
- * @param flags
- * Flags modifying the behaviour of the stream. This parameter is provided
- * for future use and is unused at present.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch().
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param ctxt
- * The user defined pointer which will be passed to the callback function
- * when a match occurs.
- *
- * @return
- * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the
- * match callback indicated that scanning should stop; other values on
- * error.
- */
-hs_error_t core2_hs_scan_stream(hs_stream_t *id, const char *data,
- unsigned int length, unsigned int flags,
- hs_scratch_t *scratch,
- match_event_handler onEvent, void *ctxt);
-
-/**
- * Close a stream.
- *
- * This function completes matching on the given stream and frees the memory
- * associated with the stream state. After this call, the stream pointed to by
- * @p id is invalid and can no longer be used. To reuse the stream state after
- * completion, rather than closing it, the @ref hs_reset_stream function can be
- * used.
- *
- * This function must be called for any stream created with @ref
- * hs_open_stream(), even if scanning has been terminated by a non-zero return
- * from the match callback function.
- *
- * Note: This operation may result in matches being returned (via calls to the
- * match event callback) for expressions anchored to the end of the data stream
- * (for example, via the use of the `$` meta-character). If these matches are
- * not desired, NULL may be provided as the @ref match_event_handler callback.
- *
- * If NULL is provided as the @ref match_event_handler callback, it is
- * permissible to provide a NULL scratch.
- *
- * @param id
- * The stream ID returned by @ref hs_open_stream().
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
- * allowed to be NULL only if the @p onEvent callback is also NULL.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param ctxt
- * The user defined pointer which will be passed to the callback function
- * when a match occurs.
- *
- * @return
- * Returns @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t core2_hs_close_stream(hs_stream_t *id, hs_scratch_t *scratch,
- match_event_handler onEvent, void *ctxt);
-
-/**
- * Reset a stream to an initial state.
- *
- * Conceptually, this is equivalent to performing @ref hs_close_stream() on the
- * given stream, followed by a @ref hs_open_stream(). This new stream replaces
- * the original stream in memory, avoiding the overhead of freeing the old
- * stream and allocating the new one.
- *
- * Note: This operation may result in matches being returned (via calls to the
- * match event callback) for expressions anchored to the end of the original
- * data stream (for example, via the use of the `$` meta-character). If these
- * matches are not desired, NULL may be provided as the @ref match_event_handler
- * callback.
- *
- * Note: the stream will also be tied to the same database.
- *
- * @param id
- * The stream (as created by @ref hs_open_stream()) to be replaced.
- *
- * @param flags
- * Flags modifying the behaviour of the stream. This parameter is provided
- * for future use and is unused at present.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
- * allowed to be NULL only if the @p onEvent callback is also NULL.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param context
- * The user defined pointer which will be passed to the callback function
- * when a match occurs.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t core2_hs_reset_stream(hs_stream_t *id, unsigned int flags,
- hs_scratch_t *scratch,
- match_event_handler onEvent, void *context);
-
-/**
- * Duplicate the given stream. The new stream will have the same state as the
- * original including the current stream offset.
- *
- * @param to_id
- * On success, a pointer to the new, copied @ref hs_stream_t will be
- * returned; NULL on failure.
- *
- * @param from_id
- * The stream (as created by @ref hs_open_stream()) to be copied.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t core2_hs_copy_stream(hs_stream_t **to_id,
- const hs_stream_t *from_id);
-
-/**
- * Duplicate the given 'from' stream state onto the 'to' stream. The 'to' stream
- * will first be reset (reporting any EOD matches if a non-NULL @p onEvent
- * callback handler is provided).
- *
- * Note: the 'to' stream and the 'from' stream must be open against the same
- * database.
- *
- * @param to_id
- * On success, a pointer to the new, copied @ref hs_stream_t will be
- * returned; NULL on failure.
- *
- * @param from_id
- * The stream (as created by @ref hs_open_stream()) to be copied.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
- * allowed to be NULL only if the @p onEvent callback is also NULL.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param context
- * The user defined pointer which will be passed to the callback function
- * when a match occurs.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t core2_hs_reset_and_copy_stream(hs_stream_t *to_id,
- const hs_stream_t *from_id,
- hs_scratch_t *scratch,
- match_event_handler onEvent,
- void *context);
-
-/**
- * Creates a compressed representation of the provided stream in the buffer
- * provided. This compressed representation can be converted back into a stream
- * state by using @ref hs_expand_stream() or @ref hs_reset_and_expand_stream().
- * The size of the compressed representation will be placed into @p used_space.
- *
- * If there is not sufficient space in the buffer to hold the compressed
- * representation, @ref HS_INSUFFICIENT_SPACE will be returned and @p used_space
- * will be populated with the amount of space required.
- *
- * Note: this function does not close the provided stream, you may continue to
- * use the stream or to free it with @ref hs_close_stream().
- *
- * @param stream
- * The stream (as created by @ref hs_open_stream()) to be compressed.
- *
- * @param buf
- * Buffer to write the compressed representation into. Note: if the call is
- * just being used to determine the amount of space required, it is allowed
- * to pass NULL here and @p buf_space as 0.
- *
- * @param buf_space
- * The number of bytes in @p buf. If buf_space is too small, the call will
- * fail with @ref HS_INSUFFICIENT_SPACE.
- *
- * @param used_space
- * Pointer to where the amount of used space will be written to. The used
- * buffer space is always less than or equal to @p buf_space. If the call
- * fails with @ref HS_INSUFFICIENT_SPACE, this pointer will be used to
- * write out the amount of buffer space required.
- *
- * @return
- * @ref HS_SUCCESS on success, @ref HS_INSUFFICIENT_SPACE if the provided
- * buffer is too small.
- */
-hs_error_t core2_hs_compress_stream(const hs_stream_t *stream, char *buf,
- size_t buf_space, size_t *used_space);
-
-/**
- * Decompresses a compressed representation created by @ref hs_compress_stream()
- * into a new stream.
- *
- * Note: @p buf must correspond to a complete compressed representation created
- * by @ref hs_compress_stream() of a stream that was opened against @p db. It is
- * not always possible to detect misuse of this API and behaviour is undefined
- * if these properties are not satisfied.
- *
- * @param db
- * The compiled pattern database that the compressed stream was opened
- * against.
- *
- * @param stream
- * On success, a pointer to the expanded @ref hs_stream_t will be
- * returned; NULL on failure.
- *
- * @param buf
- * A compressed representation of a stream. These compressed forms are
- * created by @ref hs_compress_stream().
- *
- * @param buf_size
- * The size in bytes of the compressed representation.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t core2_hs_expand_stream(const hs_database_t *db,
- hs_stream_t **stream, const char *buf,
- size_t buf_size);
-
-/**
- * Decompresses a compressed representation created by @ref hs_compress_stream()
- * on top of the 'to' stream. The 'to' stream will first be reset (reporting
- * any EOD matches if a non-NULL @p onEvent callback handler is provided).
- *
- * Note: the 'to' stream must be opened against the same database as the
- * compressed stream.
- *
- * Note: @p buf must correspond to a complete compressed representation created
- * by @ref hs_compress_stream() of a stream that was opened against @p db. It is
- * not always possible to detect misuse of this API and behaviour is undefined
- * if these properties are not satisfied.
- *
- * @param to_stream
- * A pointer to a valid stream state. A pointer to the expanded @ref
- * hs_stream_t will be returned; NULL on failure.
- *
- * @param buf
- * A compressed representation of a stream. These compressed forms are
- * created by @ref hs_compress_stream().
- *
- * @param buf_size
- * The size in bytes of the compressed representation.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
- * allowed to be NULL only if the @p onEvent callback is also NULL.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param context
- * The user defined pointer which will be passed to the callback function
- * when a match occurs.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t core2_hs_reset_and_expand_stream(hs_stream_t *to_stream,
- const char *buf, size_t buf_size,
- hs_scratch_t *scratch,
- match_event_handler onEvent,
- void *context);
-
-/**
- * The block (non-streaming) regular expression scanner.
- *
- * This is the function call in which the actual pattern matching takes place
- * for block-mode pattern databases.
- *
- * @param db
- * A compiled pattern database.
- *
- * @param data
- * Pointer to the data to be scanned.
- *
- * @param length
- * The number of bytes to scan.
- *
- * @param flags
- * Flags modifying the behaviour of this function. This parameter is
- * provided for future use and is unused at present.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch() for this
- * database.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param context
- * The user defined pointer which will be passed to the callback function.
- *
- * @return
- * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the
- * match callback indicated that scanning should stop; other values on
- * error.
- */
-hs_error_t core2_hs_scan(const hs_database_t *db, const char *data,
- unsigned int length, unsigned int flags,
- hs_scratch_t *scratch, match_event_handler onEvent,
- void *context);
-
-/**
- * The vectored regular expression scanner.
- *
- * This is the function call in which the actual pattern matching takes place
- * for vectoring-mode pattern databases.
- *
- * @param db
- * A compiled pattern database.
- *
- * @param data
- * An array of pointers to the data blocks to be scanned.
- *
- * @param length
- * An array of lengths (in bytes) of each data block to scan.
- *
- * @param count
- * Number of data blocks to scan. This should correspond to the size of
- * of the @p data and @p length arrays.
- *
- * @param flags
- * Flags modifying the behaviour of this function. This parameter is
- * provided for future use and is unused at present.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch() for
- * this database.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param context
- * The user defined pointer which will be passed to the callback function.
- *
- * @return
- * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the match
- * callback indicated that scanning should stop; other values on error.
- */
-hs_error_t core2_hs_scan_vector(const hs_database_t *db,
- const char *const *data,
- const unsigned int *length,
- unsigned int count, unsigned int flags,
- hs_scratch_t *scratch,
- match_event_handler onEvent, void *context);
-
-/**
- * Allocate a "scratch" space for use by Hyperscan.
- *
- * This is required for runtime use, and one scratch space per thread, or
- * concurrent caller, is required. Any allocator callback set by @ref
- * hs_set_scratch_allocator() or @ref hs_set_allocator() will be used by this
- * function.
- *
- * @param db
- * The database, as produced by @ref hs_compile().
- *
- * @param scratch
- * On first allocation, a pointer to NULL should be provided so a new
- * scratch can be allocated. If a scratch block has been previously
- * allocated, then a pointer to it should be passed back in to see if it
- * is valid for this database block. If a new scratch block is required,
- * the original will be freed and the new one returned, otherwise the
- * previous scratch block will be returned. On success, the scratch block
- * will be suitable for use with the provided database in addition to any
- * databases that original scratch space was suitable for.
- *
- * @return
- * @ref HS_SUCCESS on successful allocation; @ref HS_NOMEM if the
- * allocation fails. Other errors may be returned if invalid parameters
- * are specified.
- */
-hs_error_t core2_hs_alloc_scratch(const hs_database_t *db,
- hs_scratch_t **scratch);
-
-/**
- * Allocate a scratch space that is a clone of an existing scratch space.
- *
- * This is useful when multiple concurrent threads will be using the same set
- * of compiled databases, and another scratch space is required. Any allocator
- * callback set by @ref hs_set_scratch_allocator() or @ref hs_set_allocator()
- * will be used by this function.
- *
- * @param src
- * The existing @ref hs_scratch_t to be cloned.
- *
- * @param dest
- * A pointer to the new scratch space will be returned here.
- *
- * @return
- * @ref HS_SUCCESS on success; @ref HS_NOMEM if the allocation fails.
- * Other errors may be returned if invalid parameters are specified.
- */
-hs_error_t core2_hs_clone_scratch(const hs_scratch_t *src,
- hs_scratch_t **dest);
-
-/**
- * Provides the size of the given scratch space.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch() or @ref
- * hs_clone_scratch().
- *
- * @param scratch_size
- * On success, the size of the scratch space in bytes is placed in this
- * parameter.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t core2_hs_scratch_size(const hs_scratch_t *scratch,
- size_t *scratch_size);
-
-/**
- * Free a scratch block previously allocated by @ref hs_alloc_scratch() or @ref
- * hs_clone_scratch().
- *
- * The free callback set by @ref hs_set_scratch_allocator() or @ref
- * hs_set_allocator() will be used by this function.
- *
- * @param scratch
- * The scratch block to be freed. NULL may also be safely provided.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t core2_hs_free_scratch(hs_scratch_t *scratch);
-
-/**
- * Callback 'from' return value, indicating that the start of this match was
- * too early to be tracked with the requested SOM_HORIZON precision.
- */
-#define HS_OFFSET_PAST_HORIZON (~0ULL)
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
-#endif /* HS_CORE2_RUNTIME_H */
+/*
+ * Copyright (c) 2015-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HS_CORE2_RUNTIME_H
+#define HS_CORE2_RUNTIME_H
+
+#include <stdlib.h>
+
+/**
+ * @file
+ * @brief The Hyperscan runtime API definition.
+ *
+ * Hyperscan is a high speed regular expression engine.
+ *
+ * This header contains functions for using compiled Hyperscan databases for
+ * scanning data at runtime.
+ */
+
+#include "hs_common.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * Definition of the stream identifier type.
+ */
+struct hs_stream;
+
+/**
+ * The stream identifier returned by @ref hs_open_stream().
+ */
+typedef struct hs_stream hs_stream_t;
+
+struct hs_scratch;
+
+/**
+ * A Hyperscan scratch space.
+ */
+typedef struct hs_scratch hs_scratch_t;
+
+/**
+ * Definition of the match event callback function type.
+ *
+ * A callback function matching the defined type must be provided by the
+ * application calling the @ref hs_scan(), @ref hs_scan_vector() or @ref
+ * hs_scan_stream() functions (or other streaming calls which can produce
+ * matches).
+ *
+ * This callback function will be invoked whenever a match is located in the
+ * target data during the execution of a scan. The details of the match are
+ * passed in as parameters to the callback function, and the callback function
+ * should return a value indicating whether or not matching should continue on
+ * the target data. If no callbacks are desired from a scan call, NULL may be
+ * provided in order to suppress match production.
+ *
+ * This callback function should not attempt to call Hyperscan API functions on
+ * the same stream nor should it attempt to reuse the scratch space allocated
+ * for the API calls that caused it to be triggered. Making another call to the
+ * Hyperscan library with completely independent parameters should work (for
+ * example, scanning a different database in a new stream and with new scratch
+ * space), but reusing data structures like stream state and/or scratch space
+ * will produce undefined behavior.
+ *
+ * @param id
+ * The ID number of the expression that matched. If the expression was a
+ * single expression compiled with @ref hs_compile(), this value will be
+ * zero.
+ *
+ * @param from
+ * - If a start of match flag is enabled for the current pattern, this
+ * argument will be set to the start of match for the pattern assuming
+ * that that start of match value lies within the current 'start of match
+ * horizon' chosen by one of the SOM_HORIZON mode flags.
+
+ * - If the start of match value lies outside this horizon (possible only
+ * when the SOM_HORIZON value is not @ref HS_MODE_SOM_HORIZON_LARGE),
+ * the @p from value will be set to @ref HS_OFFSET_PAST_HORIZON.
+
+ * - This argument will be set to zero if the Start of Match flag is not
+ * enabled for the given pattern.
+ *
+ * @param to
+ * The offset after the last byte that matches the expression.
+ *
+ * @param flags
+ * This is provided for future use and is unused at present.
+ *
+ * @param context
+ * The pointer supplied by the user to the @ref hs_scan(), @ref
+ * hs_scan_vector() or @ref hs_scan_stream() function.
+ *
+ * @return
+ * Non-zero if the matching should cease, else zero. If scanning is
+ * performed in streaming mode and a non-zero value is returned, any
+ * subsequent calls to @ref hs_scan_stream() for that stream will
+ * immediately return with @ref HS_SCAN_TERMINATED.
+ */
+typedef int (HS_CDECL *match_event_handler)(unsigned int id,
+ unsigned long long from,
+ unsigned long long to,
+ unsigned int flags,
+ void *context);
+
+/**
+ * Open and initialise a stream.
+ *
+ * @param db
+ * A compiled pattern database.
+ *
+ * @param flags
+ * Flags modifying the behaviour of the stream. This parameter is provided
+ * for future use and is unused at present.
+ *
+ * @param stream
+ * On success, a pointer to the generated @ref hs_stream_t will be
+ * returned; NULL on failure.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t core2_hs_open_stream(const hs_database_t *db, unsigned int flags,
+ hs_stream_t **stream);
+
+/**
+ * Write data to be scanned to the opened stream.
+ *
+ * This is the function call in which the actual pattern matching takes place
+ * as data is written to the stream. Matches will be returned via the @ref
+ * match_event_handler callback supplied.
+ *
+ * @param id
+ * The stream ID (returned by @ref hs_open_stream()) to which the data
+ * will be written.
+ *
+ * @param data
+ * Pointer to the data to be scanned.
+ *
+ * @param length
+ * The number of bytes to scan.
+ *
+ * @param flags
+ * Flags modifying the behaviour of the stream. This parameter is provided
+ * for future use and is unused at present.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch().
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param ctxt
+ * The user defined pointer which will be passed to the callback function
+ * when a match occurs.
+ *
+ * @return
+ * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the
+ * match callback indicated that scanning should stop; other values on
+ * error.
+ */
+hs_error_t core2_hs_scan_stream(hs_stream_t *id, const char *data,
+ unsigned int length, unsigned int flags,
+ hs_scratch_t *scratch,
+ match_event_handler onEvent, void *ctxt);
+
+/**
+ * Close a stream.
+ *
+ * This function completes matching on the given stream and frees the memory
+ * associated with the stream state. After this call, the stream pointed to by
+ * @p id is invalid and can no longer be used. To reuse the stream state after
+ * completion, rather than closing it, the @ref hs_reset_stream function can be
+ * used.
+ *
+ * This function must be called for any stream created with @ref
+ * hs_open_stream(), even if scanning has been terminated by a non-zero return
+ * from the match callback function.
+ *
+ * Note: This operation may result in matches being returned (via calls to the
+ * match event callback) for expressions anchored to the end of the data stream
+ * (for example, via the use of the `$` meta-character). If these matches are
+ * not desired, NULL may be provided as the @ref match_event_handler callback.
+ *
+ * If NULL is provided as the @ref match_event_handler callback, it is
+ * permissible to provide a NULL scratch.
+ *
+ * @param id
+ * The stream ID returned by @ref hs_open_stream().
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
+ * allowed to be NULL only if the @p onEvent callback is also NULL.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param ctxt
+ * The user defined pointer which will be passed to the callback function
+ * when a match occurs.
+ *
+ * @return
+ * Returns @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t core2_hs_close_stream(hs_stream_t *id, hs_scratch_t *scratch,
+ match_event_handler onEvent, void *ctxt);
+
+/**
+ * Reset a stream to an initial state.
+ *
+ * Conceptually, this is equivalent to performing @ref hs_close_stream() on the
+ * given stream, followed by a @ref hs_open_stream(). This new stream replaces
+ * the original stream in memory, avoiding the overhead of freeing the old
+ * stream and allocating the new one.
+ *
+ * Note: This operation may result in matches being returned (via calls to the
+ * match event callback) for expressions anchored to the end of the original
+ * data stream (for example, via the use of the `$` meta-character). If these
+ * matches are not desired, NULL may be provided as the @ref match_event_handler
+ * callback.
+ *
+ * Note: the stream will also be tied to the same database.
+ *
+ * @param id
+ * The stream (as created by @ref hs_open_stream()) to be replaced.
+ *
+ * @param flags
+ * Flags modifying the behaviour of the stream. This parameter is provided
+ * for future use and is unused at present.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
+ * allowed to be NULL only if the @p onEvent callback is also NULL.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param context
+ * The user defined pointer which will be passed to the callback function
+ * when a match occurs.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t core2_hs_reset_stream(hs_stream_t *id, unsigned int flags,
+ hs_scratch_t *scratch,
+ match_event_handler onEvent, void *context);
+
+/**
+ * Duplicate the given stream. The new stream will have the same state as the
+ * original including the current stream offset.
+ *
+ * @param to_id
+ * On success, a pointer to the new, copied @ref hs_stream_t will be
+ * returned; NULL on failure.
+ *
+ * @param from_id
+ * The stream (as created by @ref hs_open_stream()) to be copied.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t core2_hs_copy_stream(hs_stream_t **to_id,
+ const hs_stream_t *from_id);
+
+/**
+ * Duplicate the given 'from' stream state onto the 'to' stream. The 'to' stream
+ * will first be reset (reporting any EOD matches if a non-NULL @p onEvent
+ * callback handler is provided).
+ *
+ * Note: the 'to' stream and the 'from' stream must be open against the same
+ * database.
+ *
+ * @param to_id
+ * On success, a pointer to the new, copied @ref hs_stream_t will be
+ * returned; NULL on failure.
+ *
+ * @param from_id
+ * The stream (as created by @ref hs_open_stream()) to be copied.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
+ * allowed to be NULL only if the @p onEvent callback is also NULL.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param context
+ * The user defined pointer which will be passed to the callback function
+ * when a match occurs.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t core2_hs_reset_and_copy_stream(hs_stream_t *to_id,
+ const hs_stream_t *from_id,
+ hs_scratch_t *scratch,
+ match_event_handler onEvent,
+ void *context);
+
+/**
+ * Creates a compressed representation of the provided stream in the buffer
+ * provided. This compressed representation can be converted back into a stream
+ * state by using @ref hs_expand_stream() or @ref hs_reset_and_expand_stream().
+ * The size of the compressed representation will be placed into @p used_space.
+ *
+ * If there is not sufficient space in the buffer to hold the compressed
+ * representation, @ref HS_INSUFFICIENT_SPACE will be returned and @p used_space
+ * will be populated with the amount of space required.
+ *
+ * Note: this function does not close the provided stream, you may continue to
+ * use the stream or to free it with @ref hs_close_stream().
+ *
+ * @param stream
+ * The stream (as created by @ref hs_open_stream()) to be compressed.
+ *
+ * @param buf
+ * Buffer to write the compressed representation into. Note: if the call is
+ * just being used to determine the amount of space required, it is allowed
+ * to pass NULL here and @p buf_space as 0.
+ *
+ * @param buf_space
+ * The number of bytes in @p buf. If buf_space is too small, the call will
+ * fail with @ref HS_INSUFFICIENT_SPACE.
+ *
+ * @param used_space
+ * Pointer to where the amount of used space will be written to. The used
+ * buffer space is always less than or equal to @p buf_space. If the call
+ * fails with @ref HS_INSUFFICIENT_SPACE, this pointer will be used to
+ * write out the amount of buffer space required.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, @ref HS_INSUFFICIENT_SPACE if the provided
+ * buffer is too small.
+ */
+hs_error_t core2_hs_compress_stream(const hs_stream_t *stream, char *buf,
+ size_t buf_space, size_t *used_space);
+
+/**
+ * Decompresses a compressed representation created by @ref hs_compress_stream()
+ * into a new stream.
+ *
+ * Note: @p buf must correspond to a complete compressed representation created
+ * by @ref hs_compress_stream() of a stream that was opened against @p db. It is
+ * not always possible to detect misuse of this API and behaviour is undefined
+ * if these properties are not satisfied.
+ *
+ * @param db
+ * The compiled pattern database that the compressed stream was opened
+ * against.
+ *
+ * @param stream
+ * On success, a pointer to the expanded @ref hs_stream_t will be
+ * returned; NULL on failure.
+ *
+ * @param buf
+ * A compressed representation of a stream. These compressed forms are
+ * created by @ref hs_compress_stream().
+ *
+ * @param buf_size
+ * The size in bytes of the compressed representation.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t core2_hs_expand_stream(const hs_database_t *db,
+ hs_stream_t **stream, const char *buf,
+ size_t buf_size);
+
+/**
+ * Decompresses a compressed representation created by @ref hs_compress_stream()
+ * on top of the 'to' stream. The 'to' stream will first be reset (reporting
+ * any EOD matches if a non-NULL @p onEvent callback handler is provided).
+ *
+ * Note: the 'to' stream must be opened against the same database as the
+ * compressed stream.
+ *
+ * Note: @p buf must correspond to a complete compressed representation created
+ * by @ref hs_compress_stream() of a stream that was opened against @p db. It is
+ * not always possible to detect misuse of this API and behaviour is undefined
+ * if these properties are not satisfied.
+ *
+ * @param to_stream
+ * A pointer to a valid stream state. A pointer to the expanded @ref
+ * hs_stream_t will be returned; NULL on failure.
+ *
+ * @param buf
+ * A compressed representation of a stream. These compressed forms are
+ * created by @ref hs_compress_stream().
+ *
+ * @param buf_size
+ * The size in bytes of the compressed representation.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
+ * allowed to be NULL only if the @p onEvent callback is also NULL.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param context
+ * The user defined pointer which will be passed to the callback function
+ * when a match occurs.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t core2_hs_reset_and_expand_stream(hs_stream_t *to_stream,
+ const char *buf, size_t buf_size,
+ hs_scratch_t *scratch,
+ match_event_handler onEvent,
+ void *context);
+
+/**
+ * The block (non-streaming) regular expression scanner.
+ *
+ * This is the function call in which the actual pattern matching takes place
+ * for block-mode pattern databases.
+ *
+ * @param db
+ * A compiled pattern database.
+ *
+ * @param data
+ * Pointer to the data to be scanned.
+ *
+ * @param length
+ * The number of bytes to scan.
+ *
+ * @param flags
+ * Flags modifying the behaviour of this function. This parameter is
+ * provided for future use and is unused at present.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch() for this
+ * database.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param context
+ * The user defined pointer which will be passed to the callback function.
+ *
+ * @return
+ * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the
+ * match callback indicated that scanning should stop; other values on
+ * error.
+ */
+hs_error_t core2_hs_scan(const hs_database_t *db, const char *data,
+ unsigned int length, unsigned int flags,
+ hs_scratch_t *scratch, match_event_handler onEvent,
+ void *context);
+
+/**
+ * The vectored regular expression scanner.
+ *
+ * This is the function call in which the actual pattern matching takes place
+ * for vectoring-mode pattern databases.
+ *
+ * @param db
+ * A compiled pattern database.
+ *
+ * @param data
+ * An array of pointers to the data blocks to be scanned.
+ *
+ * @param length
+ * An array of lengths (in bytes) of each data block to scan.
+ *
+ * @param count
+ * Number of data blocks to scan. This should correspond to the size of
+ * of the @p data and @p length arrays.
+ *
+ * @param flags
+ * Flags modifying the behaviour of this function. This parameter is
+ * provided for future use and is unused at present.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch() for
+ * this database.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param context
+ * The user defined pointer which will be passed to the callback function.
+ *
+ * @return
+ * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the match
+ * callback indicated that scanning should stop; other values on error.
+ */
+hs_error_t core2_hs_scan_vector(const hs_database_t *db,
+ const char *const *data,
+ const unsigned int *length,
+ unsigned int count, unsigned int flags,
+ hs_scratch_t *scratch,
+ match_event_handler onEvent, void *context);
+
+/**
+ * Allocate a "scratch" space for use by Hyperscan.
+ *
+ * This is required for runtime use, and one scratch space per thread, or
+ * concurrent caller, is required. Any allocator callback set by @ref
+ * hs_set_scratch_allocator() or @ref hs_set_allocator() will be used by this
+ * function.
+ *
+ * @param db
+ * The database, as produced by @ref hs_compile().
+ *
+ * @param scratch
+ * On first allocation, a pointer to NULL should be provided so a new
+ * scratch can be allocated. If a scratch block has been previously
+ * allocated, then a pointer to it should be passed back in to see if it
+ * is valid for this database block. If a new scratch block is required,
+ * the original will be freed and the new one returned, otherwise the
+ * previous scratch block will be returned. On success, the scratch block
+ * will be suitable for use with the provided database in addition to any
+ * databases that original scratch space was suitable for.
+ *
+ * @return
+ * @ref HS_SUCCESS on successful allocation; @ref HS_NOMEM if the
+ * allocation fails. Other errors may be returned if invalid parameters
+ * are specified.
+ */
+hs_error_t core2_hs_alloc_scratch(const hs_database_t *db,
+ hs_scratch_t **scratch);
+
+/**
+ * Allocate a scratch space that is a clone of an existing scratch space.
+ *
+ * This is useful when multiple concurrent threads will be using the same set
+ * of compiled databases, and another scratch space is required. Any allocator
+ * callback set by @ref hs_set_scratch_allocator() or @ref hs_set_allocator()
+ * will be used by this function.
+ *
+ * @param src
+ * The existing @ref hs_scratch_t to be cloned.
+ *
+ * @param dest
+ * A pointer to the new scratch space will be returned here.
+ *
+ * @return
+ * @ref HS_SUCCESS on success; @ref HS_NOMEM if the allocation fails.
+ * Other errors may be returned if invalid parameters are specified.
+ */
+hs_error_t core2_hs_clone_scratch(const hs_scratch_t *src,
+ hs_scratch_t **dest);
+
+/**
+ * Provides the size of the given scratch space.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch() or @ref
+ * hs_clone_scratch().
+ *
+ * @param scratch_size
+ * On success, the size of the scratch space in bytes is placed in this
+ * parameter.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t core2_hs_scratch_size(const hs_scratch_t *scratch,
+ size_t *scratch_size);
+
+/**
+ * Free a scratch block previously allocated by @ref hs_alloc_scratch() or @ref
+ * hs_clone_scratch().
+ *
+ * The free callback set by @ref hs_set_scratch_allocator() or @ref
+ * hs_set_allocator() will be used by this function.
+ *
+ * @param scratch
+ * The scratch block to be freed. NULL may also be safely provided.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t core2_hs_free_scratch(hs_scratch_t *scratch);
+
+/**
+ * Callback 'from' return value, indicating that the start of this match was
+ * too early to be tracked with the requested SOM_HORIZON precision.
+ */
+#define HS_OFFSET_PAST_HORIZON (~0ULL)
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* HS_CORE2_RUNTIME_H */
diff --git a/contrib/libs/hyperscan/runtime_core2/ya.make b/contrib/libs/hyperscan/runtime_core2/ya.make
index d484071ad9..10401f5f44 100644
--- a/contrib/libs/hyperscan/runtime_core2/ya.make
+++ b/contrib/libs/hyperscan/runtime_core2/ya.make
@@ -1,488 +1,488 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
-OWNER(
- galtsev
- g:antiinfra
- g:cpp-contrib
- g:yql
-)
-
-LICENSE(BSD-3-Clause)
-
-LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-
-ADDINCL(
- contrib/libs/hyperscan
- contrib/libs/hyperscan/src
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_RUNTIME()
-
-CFLAGS(
- -DCrc32c_ComputeBuf=core2_Crc32c_ComputeBuf
- -DblockInitSufPQ=core2_blockInitSufPQ
- -Dcompress_stream=core2_compress_stream
- -Dcpuid_flags=core2_cpuid_flags
- -Dcpuid_tune=core2_cpuid_tune
- -DdbIsValid=core2_dbIsValid
- -DdoAccel128=core2_doAccel128
- -DdoAccel256=core2_doAccel256
- -DdoAccel32=core2_doAccel32
- -DdoAccel384=core2_doAccel384
- -DdoAccel512=core2_doAccel512
- -DdoAccel64=core2_doAccel64
- -Dexpand_stream=core2_expand_stream
- -DfdrExec=core2_fdrExec
- -DfdrExecStreaming=core2_fdrExecStreaming
- -Dfdr_exec_fat_teddy_msks1=core2_fdr_exec_fat_teddy_msks1
- -Dfdr_exec_fat_teddy_msks1_pck=core2_fdr_exec_fat_teddy_msks1_pck
- -Dfdr_exec_fat_teddy_msks2=core2_fdr_exec_fat_teddy_msks2
- -Dfdr_exec_fat_teddy_msks2_pck=core2_fdr_exec_fat_teddy_msks2_pck
- -Dfdr_exec_fat_teddy_msks3=core2_fdr_exec_fat_teddy_msks3
- -Dfdr_exec_fat_teddy_msks3_pck=core2_fdr_exec_fat_teddy_msks3_pck
- -Dfdr_exec_fat_teddy_msks4=core2_fdr_exec_fat_teddy_msks4
- -Dfdr_exec_fat_teddy_msks4_pck=core2_fdr_exec_fat_teddy_msks4_pck
- -Dfdr_exec_teddy_msks1=core2_fdr_exec_teddy_msks1
- -Dfdr_exec_teddy_msks1_pck=core2_fdr_exec_teddy_msks1_pck
- -Dfdr_exec_teddy_msks2=core2_fdr_exec_teddy_msks2
- -Dfdr_exec_teddy_msks2_pck=core2_fdr_exec_teddy_msks2_pck
- -Dfdr_exec_teddy_msks3=core2_fdr_exec_teddy_msks3
- -Dfdr_exec_teddy_msks3_pck=core2_fdr_exec_teddy_msks3_pck
- -Dfdr_exec_teddy_msks4=core2_fdr_exec_teddy_msks4
- -Dfdr_exec_teddy_msks4_pck=core2_fdr_exec_teddy_msks4_pck
- -DflushQueuedLiterals_i=core2_flushQueuedLiterals_i
- -DflushStoredSomMatches_i=core2_flushStoredSomMatches_i
- -DhandleSomExternal=core2_handleSomExternal
- -DhandleSomInternal=core2_handleSomInternal
- -Dhs_alloc_scratch=core2_hs_alloc_scratch
- -Dhs_clone_scratch=core2_hs_clone_scratch
- -Dhs_close_stream=core2_hs_close_stream
- -Dhs_compress_stream=core2_hs_compress_stream
- -Dhs_copy_stream=core2_hs_copy_stream
- -Dhs_database_alloc=core2_hs_database_alloc
- -Dhs_database_free=core2_hs_database_free
- -Dhs_database_info=core2_hs_database_info
- -Dhs_database_size=core2_hs_database_size
- -Dhs_deserialize_database=core2_hs_deserialize_database
- -Dhs_deserialize_database_at=core2_hs_deserialize_database_at
- -Dhs_expand_stream=core2_hs_expand_stream
- -Dhs_free_database=core2_hs_free_database
- -Dhs_free_scratch=core2_hs_free_scratch
- -Dhs_misc_alloc=core2_hs_misc_alloc
- -Dhs_misc_free=core2_hs_misc_free
- -Dhs_open_stream=core2_hs_open_stream
- -Dhs_reset_and_copy_stream=core2_hs_reset_and_copy_stream
- -Dhs_reset_and_expand_stream=core2_hs_reset_and_expand_stream
- -Dhs_reset_stream=core2_hs_reset_stream
- -Dhs_scan=core2_hs_scan
- -Dhs_scan_stream=core2_hs_scan_stream
- -Dhs_scan_vector=core2_hs_scan_vector
- -Dhs_scratch_alloc=core2_hs_scratch_alloc
- -Dhs_scratch_free=core2_hs_scratch_free
- -Dhs_scratch_size=core2_hs_scratch_size
- -Dhs_serialize_database=core2_hs_serialize_database
- -Dhs_serialized_database_info=core2_hs_serialized_database_info
- -Dhs_serialized_database_size=core2_hs_serialized_database_size
- -Dhs_set_allocator=core2_hs_set_allocator
- -Dhs_set_database_allocator=core2_hs_set_database_allocator
- -Dhs_set_misc_allocator=core2_hs_set_misc_allocator
- -Dhs_set_scratch_allocator=core2_hs_set_scratch_allocator
- -Dhs_set_stream_allocator=core2_hs_set_stream_allocator
- -Dhs_stream_alloc=core2_hs_stream_alloc
- -Dhs_stream_free=core2_hs_stream_free
- -Dhs_stream_size=core2_hs_stream_size
- -Dhs_valid_platform=core2_hs_valid_platform
- -Dhs_version=core2_hs_version
- -DhwlmExec=core2_hwlmExec
- -DhwlmExecStreaming=core2_hwlmExecStreaming
- -DloadSomFromStream=core2_loadSomFromStream
- -Dloadcompressed128=core2_loadcompressed128
- -Dloadcompressed256=core2_loadcompressed256
- -Dloadcompressed32=core2_loadcompressed32
- -Dloadcompressed384=core2_loadcompressed384
- -Dloadcompressed512=core2_loadcompressed512
- -Dloadcompressed64=core2_loadcompressed64
- -Dmcsheng_pext_mask=core2_mcsheng_pext_mask
- -Dmm_mask_mask=core2_mm_mask_mask
- -Dmm_shuffle_end=core2_mm_shuffle_end
- -Dmmbit_keyshift_lut=core2_mmbit_keyshift_lut
- -Dmmbit_maxlevel_direct_lut=core2_mmbit_maxlevel_direct_lut
- -Dmmbit_maxlevel_from_keyshift_lut=core2_mmbit_maxlevel_from_keyshift_lut
- -Dmmbit_root_offset_from_level=core2_mmbit_root_offset_from_level
- -Dmmbit_zero_to_lut=core2_mmbit_zero_to_lut
- -DnfaBlockExecReverse=core2_nfaBlockExecReverse
- -DnfaCheckFinalState=core2_nfaCheckFinalState
- -DnfaExecCastle_Q=core2_nfaExecCastle_Q
- -DnfaExecCastle_Q2=core2_nfaExecCastle_Q2
- -DnfaExecCastle_QR=core2_nfaExecCastle_QR
- -DnfaExecCastle_expandState=core2_nfaExecCastle_expandState
- -DnfaExecCastle_inAccept=core2_nfaExecCastle_inAccept
- -DnfaExecCastle_inAnyAccept=core2_nfaExecCastle_inAnyAccept
- -DnfaExecCastle_initCompressedState=core2_nfaExecCastle_initCompressedState
- -DnfaExecCastle_queueCompressState=core2_nfaExecCastle_queueCompressState
- -DnfaExecCastle_queueInitState=core2_nfaExecCastle_queueInitState
- -DnfaExecCastle_reportCurrent=core2_nfaExecCastle_reportCurrent
- -DnfaExecGough16_Q=core2_nfaExecGough16_Q
- -DnfaExecGough16_Q2=core2_nfaExecGough16_Q2
- -DnfaExecGough16_QR=core2_nfaExecGough16_QR
- -DnfaExecGough16_expandState=core2_nfaExecGough16_expandState
- -DnfaExecGough16_inAccept=core2_nfaExecGough16_inAccept
- -DnfaExecGough16_inAnyAccept=core2_nfaExecGough16_inAnyAccept
- -DnfaExecGough16_initCompressedState=core2_nfaExecGough16_initCompressedState
- -DnfaExecGough16_queueCompressState=core2_nfaExecGough16_queueCompressState
- -DnfaExecGough16_queueInitState=core2_nfaExecGough16_queueInitState
- -DnfaExecGough16_reportCurrent=core2_nfaExecGough16_reportCurrent
- -DnfaExecGough16_testEOD=core2_nfaExecGough16_testEOD
- -DnfaExecGough8_Q=core2_nfaExecGough8_Q
- -DnfaExecGough8_Q2=core2_nfaExecGough8_Q2
- -DnfaExecGough8_QR=core2_nfaExecGough8_QR
- -DnfaExecGough8_expandState=core2_nfaExecGough8_expandState
- -DnfaExecGough8_inAccept=core2_nfaExecGough8_inAccept
- -DnfaExecGough8_inAnyAccept=core2_nfaExecGough8_inAnyAccept
- -DnfaExecGough8_initCompressedState=core2_nfaExecGough8_initCompressedState
- -DnfaExecGough8_queueCompressState=core2_nfaExecGough8_queueCompressState
- -DnfaExecGough8_queueInitState=core2_nfaExecGough8_queueInitState
- -DnfaExecGough8_reportCurrent=core2_nfaExecGough8_reportCurrent
- -DnfaExecGough8_testEOD=core2_nfaExecGough8_testEOD
- -DnfaExecLbrDot_Q=core2_nfaExecLbrDot_Q
- -DnfaExecLbrDot_Q2=core2_nfaExecLbrDot_Q2
- -DnfaExecLbrDot_QR=core2_nfaExecLbrDot_QR
- -DnfaExecLbrDot_expandState=core2_nfaExecLbrDot_expandState
- -DnfaExecLbrDot_inAccept=core2_nfaExecLbrDot_inAccept
- -DnfaExecLbrDot_inAnyAccept=core2_nfaExecLbrDot_inAnyAccept
- -DnfaExecLbrDot_initCompressedState=core2_nfaExecLbrDot_initCompressedState
- -DnfaExecLbrDot_queueCompressState=core2_nfaExecLbrDot_queueCompressState
- -DnfaExecLbrDot_queueInitState=core2_nfaExecLbrDot_queueInitState
- -DnfaExecLbrDot_reportCurrent=core2_nfaExecLbrDot_reportCurrent
- -DnfaExecLbrNVerm_Q=core2_nfaExecLbrNVerm_Q
- -DnfaExecLbrNVerm_Q2=core2_nfaExecLbrNVerm_Q2
- -DnfaExecLbrNVerm_QR=core2_nfaExecLbrNVerm_QR
- -DnfaExecLbrNVerm_expandState=core2_nfaExecLbrNVerm_expandState
- -DnfaExecLbrNVerm_inAccept=core2_nfaExecLbrNVerm_inAccept
- -DnfaExecLbrNVerm_inAnyAccept=core2_nfaExecLbrNVerm_inAnyAccept
- -DnfaExecLbrNVerm_initCompressedState=core2_nfaExecLbrNVerm_initCompressedState
- -DnfaExecLbrNVerm_queueCompressState=core2_nfaExecLbrNVerm_queueCompressState
- -DnfaExecLbrNVerm_queueInitState=core2_nfaExecLbrNVerm_queueInitState
- -DnfaExecLbrNVerm_reportCurrent=core2_nfaExecLbrNVerm_reportCurrent
- -DnfaExecLbrShuf_Q=core2_nfaExecLbrShuf_Q
- -DnfaExecLbrShuf_Q2=core2_nfaExecLbrShuf_Q2
- -DnfaExecLbrShuf_QR=core2_nfaExecLbrShuf_QR
- -DnfaExecLbrShuf_expandState=core2_nfaExecLbrShuf_expandState
- -DnfaExecLbrShuf_inAccept=core2_nfaExecLbrShuf_inAccept
- -DnfaExecLbrShuf_inAnyAccept=core2_nfaExecLbrShuf_inAnyAccept
- -DnfaExecLbrShuf_initCompressedState=core2_nfaExecLbrShuf_initCompressedState
- -DnfaExecLbrShuf_queueCompressState=core2_nfaExecLbrShuf_queueCompressState
- -DnfaExecLbrShuf_queueInitState=core2_nfaExecLbrShuf_queueInitState
- -DnfaExecLbrShuf_reportCurrent=core2_nfaExecLbrShuf_reportCurrent
- -DnfaExecLbrTruf_Q=core2_nfaExecLbrTruf_Q
- -DnfaExecLbrTruf_Q2=core2_nfaExecLbrTruf_Q2
- -DnfaExecLbrTruf_QR=core2_nfaExecLbrTruf_QR
- -DnfaExecLbrTruf_expandState=core2_nfaExecLbrTruf_expandState
- -DnfaExecLbrTruf_inAccept=core2_nfaExecLbrTruf_inAccept
- -DnfaExecLbrTruf_inAnyAccept=core2_nfaExecLbrTruf_inAnyAccept
- -DnfaExecLbrTruf_initCompressedState=core2_nfaExecLbrTruf_initCompressedState
- -DnfaExecLbrTruf_queueCompressState=core2_nfaExecLbrTruf_queueCompressState
- -DnfaExecLbrTruf_queueInitState=core2_nfaExecLbrTruf_queueInitState
- -DnfaExecLbrTruf_reportCurrent=core2_nfaExecLbrTruf_reportCurrent
- -DnfaExecLbrVerm_Q=core2_nfaExecLbrVerm_Q
- -DnfaExecLbrVerm_Q2=core2_nfaExecLbrVerm_Q2
- -DnfaExecLbrVerm_QR=core2_nfaExecLbrVerm_QR
- -DnfaExecLbrVerm_expandState=core2_nfaExecLbrVerm_expandState
- -DnfaExecLbrVerm_inAccept=core2_nfaExecLbrVerm_inAccept
- -DnfaExecLbrVerm_inAnyAccept=core2_nfaExecLbrVerm_inAnyAccept
- -DnfaExecLbrVerm_initCompressedState=core2_nfaExecLbrVerm_initCompressedState
- -DnfaExecLbrVerm_queueCompressState=core2_nfaExecLbrVerm_queueCompressState
- -DnfaExecLbrVerm_queueInitState=core2_nfaExecLbrVerm_queueInitState
- -DnfaExecLbrVerm_reportCurrent=core2_nfaExecLbrVerm_reportCurrent
- -DnfaExecLimEx128_B_Reverse=core2_nfaExecLimEx128_B_Reverse
- -DnfaExecLimEx128_Q=core2_nfaExecLimEx128_Q
- -DnfaExecLimEx128_Q2=core2_nfaExecLimEx128_Q2
- -DnfaExecLimEx128_QR=core2_nfaExecLimEx128_QR
- -DnfaExecLimEx128_expandState=core2_nfaExecLimEx128_expandState
- -DnfaExecLimEx128_inAccept=core2_nfaExecLimEx128_inAccept
- -DnfaExecLimEx128_inAnyAccept=core2_nfaExecLimEx128_inAnyAccept
- -DnfaExecLimEx128_initCompressedState=core2_nfaExecLimEx128_initCompressedState
- -DnfaExecLimEx128_queueCompressState=core2_nfaExecLimEx128_queueCompressState
- -DnfaExecLimEx128_queueInitState=core2_nfaExecLimEx128_queueInitState
- -DnfaExecLimEx128_reportCurrent=core2_nfaExecLimEx128_reportCurrent
- -DnfaExecLimEx128_testEOD=core2_nfaExecLimEx128_testEOD
- -DnfaExecLimEx128_zombie_status=core2_nfaExecLimEx128_zombie_status
- -DnfaExecLimEx256_B_Reverse=core2_nfaExecLimEx256_B_Reverse
- -DnfaExecLimEx256_Q=core2_nfaExecLimEx256_Q
- -DnfaExecLimEx256_Q2=core2_nfaExecLimEx256_Q2
- -DnfaExecLimEx256_QR=core2_nfaExecLimEx256_QR
- -DnfaExecLimEx256_expandState=core2_nfaExecLimEx256_expandState
- -DnfaExecLimEx256_inAccept=core2_nfaExecLimEx256_inAccept
- -DnfaExecLimEx256_inAnyAccept=core2_nfaExecLimEx256_inAnyAccept
- -DnfaExecLimEx256_initCompressedState=core2_nfaExecLimEx256_initCompressedState
- -DnfaExecLimEx256_queueCompressState=core2_nfaExecLimEx256_queueCompressState
- -DnfaExecLimEx256_queueInitState=core2_nfaExecLimEx256_queueInitState
- -DnfaExecLimEx256_reportCurrent=core2_nfaExecLimEx256_reportCurrent
- -DnfaExecLimEx256_testEOD=core2_nfaExecLimEx256_testEOD
- -DnfaExecLimEx256_zombie_status=core2_nfaExecLimEx256_zombie_status
- -DnfaExecLimEx32_B_Reverse=core2_nfaExecLimEx32_B_Reverse
- -DnfaExecLimEx32_Q=core2_nfaExecLimEx32_Q
- -DnfaExecLimEx32_Q2=core2_nfaExecLimEx32_Q2
- -DnfaExecLimEx32_QR=core2_nfaExecLimEx32_QR
- -DnfaExecLimEx32_expandState=core2_nfaExecLimEx32_expandState
- -DnfaExecLimEx32_inAccept=core2_nfaExecLimEx32_inAccept
- -DnfaExecLimEx32_inAnyAccept=core2_nfaExecLimEx32_inAnyAccept
- -DnfaExecLimEx32_initCompressedState=core2_nfaExecLimEx32_initCompressedState
- -DnfaExecLimEx32_queueCompressState=core2_nfaExecLimEx32_queueCompressState
- -DnfaExecLimEx32_queueInitState=core2_nfaExecLimEx32_queueInitState
- -DnfaExecLimEx32_reportCurrent=core2_nfaExecLimEx32_reportCurrent
- -DnfaExecLimEx32_testEOD=core2_nfaExecLimEx32_testEOD
- -DnfaExecLimEx32_zombie_status=core2_nfaExecLimEx32_zombie_status
- -DnfaExecLimEx384_B_Reverse=core2_nfaExecLimEx384_B_Reverse
- -DnfaExecLimEx384_Q=core2_nfaExecLimEx384_Q
- -DnfaExecLimEx384_Q2=core2_nfaExecLimEx384_Q2
- -DnfaExecLimEx384_QR=core2_nfaExecLimEx384_QR
- -DnfaExecLimEx384_expandState=core2_nfaExecLimEx384_expandState
- -DnfaExecLimEx384_inAccept=core2_nfaExecLimEx384_inAccept
- -DnfaExecLimEx384_inAnyAccept=core2_nfaExecLimEx384_inAnyAccept
- -DnfaExecLimEx384_initCompressedState=core2_nfaExecLimEx384_initCompressedState
- -DnfaExecLimEx384_queueCompressState=core2_nfaExecLimEx384_queueCompressState
- -DnfaExecLimEx384_queueInitState=core2_nfaExecLimEx384_queueInitState
- -DnfaExecLimEx384_reportCurrent=core2_nfaExecLimEx384_reportCurrent
- -DnfaExecLimEx384_testEOD=core2_nfaExecLimEx384_testEOD
- -DnfaExecLimEx384_zombie_status=core2_nfaExecLimEx384_zombie_status
- -DnfaExecLimEx512_B_Reverse=core2_nfaExecLimEx512_B_Reverse
- -DnfaExecLimEx512_Q=core2_nfaExecLimEx512_Q
- -DnfaExecLimEx512_Q2=core2_nfaExecLimEx512_Q2
- -DnfaExecLimEx512_QR=core2_nfaExecLimEx512_QR
- -DnfaExecLimEx512_expandState=core2_nfaExecLimEx512_expandState
- -DnfaExecLimEx512_inAccept=core2_nfaExecLimEx512_inAccept
- -DnfaExecLimEx512_inAnyAccept=core2_nfaExecLimEx512_inAnyAccept
- -DnfaExecLimEx512_initCompressedState=core2_nfaExecLimEx512_initCompressedState
- -DnfaExecLimEx512_queueCompressState=core2_nfaExecLimEx512_queueCompressState
- -DnfaExecLimEx512_queueInitState=core2_nfaExecLimEx512_queueInitState
- -DnfaExecLimEx512_reportCurrent=core2_nfaExecLimEx512_reportCurrent
- -DnfaExecLimEx512_testEOD=core2_nfaExecLimEx512_testEOD
- -DnfaExecLimEx512_zombie_status=core2_nfaExecLimEx512_zombie_status
- -DnfaExecLimEx64_B_Reverse=core2_nfaExecLimEx64_B_Reverse
- -DnfaExecLimEx64_Q=core2_nfaExecLimEx64_Q
- -DnfaExecLimEx64_Q2=core2_nfaExecLimEx64_Q2
- -DnfaExecLimEx64_QR=core2_nfaExecLimEx64_QR
- -DnfaExecLimEx64_expandState=core2_nfaExecLimEx64_expandState
- -DnfaExecLimEx64_inAccept=core2_nfaExecLimEx64_inAccept
- -DnfaExecLimEx64_inAnyAccept=core2_nfaExecLimEx64_inAnyAccept
- -DnfaExecLimEx64_initCompressedState=core2_nfaExecLimEx64_initCompressedState
- -DnfaExecLimEx64_queueCompressState=core2_nfaExecLimEx64_queueCompressState
- -DnfaExecLimEx64_queueInitState=core2_nfaExecLimEx64_queueInitState
- -DnfaExecLimEx64_reportCurrent=core2_nfaExecLimEx64_reportCurrent
- -DnfaExecLimEx64_testEOD=core2_nfaExecLimEx64_testEOD
- -DnfaExecLimEx64_zombie_status=core2_nfaExecLimEx64_zombie_status
- -DnfaExecMcClellan16_B=core2_nfaExecMcClellan16_B
- -DnfaExecMcClellan16_Q=core2_nfaExecMcClellan16_Q
- -DnfaExecMcClellan16_Q2=core2_nfaExecMcClellan16_Q2
- -DnfaExecMcClellan16_QR=core2_nfaExecMcClellan16_QR
- -DnfaExecMcClellan16_SimpStream=core2_nfaExecMcClellan16_SimpStream
- -DnfaExecMcClellan16_expandState=core2_nfaExecMcClellan16_expandState
- -DnfaExecMcClellan16_inAccept=core2_nfaExecMcClellan16_inAccept
- -DnfaExecMcClellan16_inAnyAccept=core2_nfaExecMcClellan16_inAnyAccept
- -DnfaExecMcClellan16_initCompressedState=core2_nfaExecMcClellan16_initCompressedState
- -DnfaExecMcClellan16_queueCompressState=core2_nfaExecMcClellan16_queueCompressState
- -DnfaExecMcClellan16_queueInitState=core2_nfaExecMcClellan16_queueInitState
- -DnfaExecMcClellan16_reportCurrent=core2_nfaExecMcClellan16_reportCurrent
- -DnfaExecMcClellan16_testEOD=core2_nfaExecMcClellan16_testEOD
- -DnfaExecMcClellan8_B=core2_nfaExecMcClellan8_B
- -DnfaExecMcClellan8_Q=core2_nfaExecMcClellan8_Q
- -DnfaExecMcClellan8_Q2=core2_nfaExecMcClellan8_Q2
- -DnfaExecMcClellan8_QR=core2_nfaExecMcClellan8_QR
- -DnfaExecMcClellan8_SimpStream=core2_nfaExecMcClellan8_SimpStream
- -DnfaExecMcClellan8_expandState=core2_nfaExecMcClellan8_expandState
- -DnfaExecMcClellan8_inAccept=core2_nfaExecMcClellan8_inAccept
- -DnfaExecMcClellan8_inAnyAccept=core2_nfaExecMcClellan8_inAnyAccept
- -DnfaExecMcClellan8_initCompressedState=core2_nfaExecMcClellan8_initCompressedState
- -DnfaExecMcClellan8_queueCompressState=core2_nfaExecMcClellan8_queueCompressState
- -DnfaExecMcClellan8_queueInitState=core2_nfaExecMcClellan8_queueInitState
- -DnfaExecMcClellan8_reportCurrent=core2_nfaExecMcClellan8_reportCurrent
- -DnfaExecMcClellan8_testEOD=core2_nfaExecMcClellan8_testEOD
- -DnfaExecMcSheng16_Q=core2_nfaExecMcSheng16_Q
- -DnfaExecMcSheng16_Q2=core2_nfaExecMcSheng16_Q2
- -DnfaExecMcSheng16_QR=core2_nfaExecMcSheng16_QR
- -DnfaExecMcSheng16_expandState=core2_nfaExecMcSheng16_expandState
- -DnfaExecMcSheng16_inAccept=core2_nfaExecMcSheng16_inAccept
- -DnfaExecMcSheng16_inAnyAccept=core2_nfaExecMcSheng16_inAnyAccept
- -DnfaExecMcSheng16_initCompressedState=core2_nfaExecMcSheng16_initCompressedState
- -DnfaExecMcSheng16_queueCompressState=core2_nfaExecMcSheng16_queueCompressState
- -DnfaExecMcSheng16_queueInitState=core2_nfaExecMcSheng16_queueInitState
- -DnfaExecMcSheng16_reportCurrent=core2_nfaExecMcSheng16_reportCurrent
- -DnfaExecMcSheng16_testEOD=core2_nfaExecMcSheng16_testEOD
- -DnfaExecMcSheng8_Q=core2_nfaExecMcSheng8_Q
- -DnfaExecMcSheng8_Q2=core2_nfaExecMcSheng8_Q2
- -DnfaExecMcSheng8_QR=core2_nfaExecMcSheng8_QR
- -DnfaExecMcSheng8_expandState=core2_nfaExecMcSheng8_expandState
- -DnfaExecMcSheng8_inAccept=core2_nfaExecMcSheng8_inAccept
- -DnfaExecMcSheng8_inAnyAccept=core2_nfaExecMcSheng8_inAnyAccept
- -DnfaExecMcSheng8_initCompressedState=core2_nfaExecMcSheng8_initCompressedState
- -DnfaExecMcSheng8_queueCompressState=core2_nfaExecMcSheng8_queueCompressState
- -DnfaExecMcSheng8_queueInitState=core2_nfaExecMcSheng8_queueInitState
- -DnfaExecMcSheng8_reportCurrent=core2_nfaExecMcSheng8_reportCurrent
- -DnfaExecMcSheng8_testEOD=core2_nfaExecMcSheng8_testEOD
- -DnfaExecMpv_Q=core2_nfaExecMpv_Q
- -DnfaExecMpv_QueueExecRaw=core2_nfaExecMpv_QueueExecRaw
- -DnfaExecMpv_expandState=core2_nfaExecMpv_expandState
- -DnfaExecMpv_initCompressedState=core2_nfaExecMpv_initCompressedState
- -DnfaExecMpv_queueCompressState=core2_nfaExecMpv_queueCompressState
- -DnfaExecMpv_queueInitState=core2_nfaExecMpv_queueInitState
- -DnfaExecMpv_reportCurrent=core2_nfaExecMpv_reportCurrent
- -DnfaExecSheng_B=core2_nfaExecSheng_B
- -DnfaExecSheng_Q=core2_nfaExecSheng_Q
- -DnfaExecSheng_Q2=core2_nfaExecSheng_Q2
- -DnfaExecSheng_QR=core2_nfaExecSheng_QR
- -DnfaExecSheng_expandState=core2_nfaExecSheng_expandState
- -DnfaExecSheng_inAccept=core2_nfaExecSheng_inAccept
- -DnfaExecSheng_inAnyAccept=core2_nfaExecSheng_inAnyAccept
- -DnfaExecSheng_initCompressedState=core2_nfaExecSheng_initCompressedState
- -DnfaExecSheng_queueCompressState=core2_nfaExecSheng_queueCompressState
- -DnfaExecSheng_queueInitState=core2_nfaExecSheng_queueInitState
- -DnfaExecSheng_reportCurrent=core2_nfaExecSheng_reportCurrent
- -DnfaExecSheng_testEOD=core2_nfaExecSheng_testEOD
- -DnfaExecTamarama_Q=core2_nfaExecTamarama_Q
- -DnfaExecTamarama_Q2=core2_nfaExecTamarama_Q2
- -DnfaExecTamarama_QR=core2_nfaExecTamarama_QR
- -DnfaExecTamarama_expandState=core2_nfaExecTamarama_expandState
- -DnfaExecTamarama_inAccept=core2_nfaExecTamarama_inAccept
- -DnfaExecTamarama_inAnyAccept=core2_nfaExecTamarama_inAnyAccept
- -DnfaExecTamarama_queueCompressState=core2_nfaExecTamarama_queueCompressState
- -DnfaExecTamarama_queueInitState=core2_nfaExecTamarama_queueInitState
- -DnfaExecTamarama_reportCurrent=core2_nfaExecTamarama_reportCurrent
- -DnfaExecTamarama_testEOD=core2_nfaExecTamarama_testEOD
- -DnfaExecTamarama_zombie_status=core2_nfaExecTamarama_zombie_status
- -DnfaExpandState=core2_nfaExpandState
- -DnfaGetZombieStatus=core2_nfaGetZombieStatus
- -DnfaInAcceptState=core2_nfaInAcceptState
- -DnfaInAnyAcceptState=core2_nfaInAnyAcceptState
- -DnfaInitCompressedState=core2_nfaInitCompressedState
- -DnfaQueueCompressState=core2_nfaQueueCompressState
- -DnfaQueueExec=core2_nfaQueueExec
- -DnfaQueueExec2_raw=core2_nfaQueueExec2_raw
- -DnfaQueueExecRose=core2_nfaQueueExecRose
- -DnfaQueueExecToMatch=core2_nfaQueueExecToMatch
- -DnfaQueueExec_raw=core2_nfaQueueExec_raw
- -DnfaQueueInitState=core2_nfaQueueInitState
- -DnfaReportCurrentMatches=core2_nfaReportCurrentMatches
- -DnoodExec=core2_noodExec
- -DnoodExecStreaming=core2_noodExecStreaming
- -Dp_mask_arr=core2_p_mask_arr
- -Dp_mask_arr256=core2_p_mask_arr256
- -DrepeatHasMatchBitmap=core2_repeatHasMatchBitmap
- -DrepeatHasMatchRange=core2_repeatHasMatchRange
- -DrepeatHasMatchRing=core2_repeatHasMatchRing
- -DrepeatHasMatchSparseOptimalP=core2_repeatHasMatchSparseOptimalP
- -DrepeatHasMatchTrailer=core2_repeatHasMatchTrailer
- -DrepeatLastTopBitmap=core2_repeatLastTopBitmap
- -DrepeatLastTopRange=core2_repeatLastTopRange
- -DrepeatLastTopRing=core2_repeatLastTopRing
- -DrepeatLastTopSparseOptimalP=core2_repeatLastTopSparseOptimalP
- -DrepeatLastTopTrailer=core2_repeatLastTopTrailer
- -DrepeatNextMatchBitmap=core2_repeatNextMatchBitmap
- -DrepeatNextMatchRange=core2_repeatNextMatchRange
- -DrepeatNextMatchRing=core2_repeatNextMatchRing
- -DrepeatNextMatchSparseOptimalP=core2_repeatNextMatchSparseOptimalP
- -DrepeatNextMatchTrailer=core2_repeatNextMatchTrailer
- -DrepeatPack=core2_repeatPack
- -DrepeatStoreBitmap=core2_repeatStoreBitmap
- -DrepeatStoreRange=core2_repeatStoreRange
- -DrepeatStoreRing=core2_repeatStoreRing
- -DrepeatStoreSparseOptimalP=core2_repeatStoreSparseOptimalP
- -DrepeatStoreTrailer=core2_repeatStoreTrailer
- -DrepeatUnpack=core2_repeatUnpack
- -DroseAnchoredCallback=core2_roseAnchoredCallback
- -DroseBlockExec=core2_roseBlockExec
- -DroseCallback=core2_roseCallback
- -DroseCatchUpAll=core2_roseCatchUpAll
- -DroseCatchUpMPV_i=core2_roseCatchUpMPV_i
- -DroseCatchUpSuf=core2_roseCatchUpSuf
- -DroseDelayRebuildCallback=core2_roseDelayRebuildCallback
- -DroseFloatingCallback=core2_roseFloatingCallback
- -DroseHandleChainMatch=core2_roseHandleChainMatch
- -DroseInitState=core2_roseInitState
- -DroseNfaAdaptor=core2_roseNfaAdaptor
- -DroseNfaEarliestSom=core2_roseNfaEarliestSom
- -DroseReportAdaptor=core2_roseReportAdaptor
- -DroseRunBoundaryProgram=core2_roseRunBoundaryProgram
- -DroseRunFlushCombProgram=core2_roseRunFlushCombProgram
- -DroseRunLastFlushCombProgram=core2_roseRunLastFlushCombProgram
- -DroseRunProgram=core2_roseRunProgram
- -DroseRunProgram_l=core2_roseRunProgram_l
- -DroseStreamEodExec=core2_roseStreamEodExec
- -DroseStreamExec=core2_roseStreamExec
- -DrshuftiExec=core2_rshuftiExec
- -DrtruffleExec=core2_rtruffleExec
- -Drun_accel=core2_run_accel
- -DsetSomFromSomAware=core2_setSomFromSomAware
- -DshuftiDoubleExec=core2_shuftiDoubleExec
- -DshuftiExec=core2_shuftiExec
- -Dsimd_onebit_masks=core2_simd_onebit_masks
- -Dsize_compress_stream=core2_size_compress_stream
- -DstoreSomToStream=core2_storeSomToStream
- -Dstorecompressed128=core2_storecompressed128
- -Dstorecompressed256=core2_storecompressed256
- -Dstorecompressed32=core2_storecompressed32
- -Dstorecompressed384=core2_storecompressed384
- -Dstorecompressed512=core2_storecompressed512
- -Dstorecompressed64=core2_storecompressed64
- -DstreamInitSufPQ=core2_streamInitSufPQ
- -DtruffleExec=core2_truffleExec
- -Dvbs_mask_data=core2_vbs_mask_data
-)
-
-SRCDIR(contrib/libs/hyperscan)
-
-SRCS(
- src/alloc.c
- src/crc32.c
- src/database.c
- src/fdr/fdr.c
- src/fdr/teddy.c
- src/fdr/teddy_avx2.c
- src/hs_valid_platform.c
- src/hs_version.c
- src/hwlm/hwlm.c
- src/hwlm/noodle_engine.c
- src/nfa/accel.c
- src/nfa/castle.c
- src/nfa/gough.c
- src/nfa/lbr.c
- src/nfa/limex_64.c
- src/nfa/limex_accel.c
- src/nfa/limex_native.c
- src/nfa/limex_simd128.c
- src/nfa/limex_simd256.c
- src/nfa/limex_simd384.c
- src/nfa/limex_simd512.c
- src/nfa/mcclellan.c
- src/nfa/mcsheng.c
- src/nfa/mcsheng_data.c
- src/nfa/mpv.c
- src/nfa/nfa_api_dispatch.c
- src/nfa/repeat.c
- src/nfa/sheng.c
- src/nfa/shufti.c
- src/nfa/tamarama.c
- src/nfa/truffle.c
- src/rose/block.c
- src/rose/catchup.c
- src/rose/init.c
- src/rose/match.c
- src/rose/program_runtime.c
- src/rose/stream.c
- src/runtime.c
- src/scratch.c
- src/som/som_runtime.c
- src/som/som_stream.c
- src/stream_compress.c
- src/util/cpuid_flags.c
- src/util/masked_move.c
- src/util/multibit.c
- src/util/simd_utils.c
- src/util/state_compress.c
-)
-
-END()
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ galtsev
+ g:antiinfra
+ g:cpp-contrib
+ g:yql
+)
+
+LICENSE(BSD-3-Clause)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+ADDINCL(
+ contrib/libs/hyperscan
+ contrib/libs/hyperscan/src
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_RUNTIME()
+
+CFLAGS(
+ -DCrc32c_ComputeBuf=core2_Crc32c_ComputeBuf
+ -DblockInitSufPQ=core2_blockInitSufPQ
+ -Dcompress_stream=core2_compress_stream
+ -Dcpuid_flags=core2_cpuid_flags
+ -Dcpuid_tune=core2_cpuid_tune
+ -DdbIsValid=core2_dbIsValid
+ -DdoAccel128=core2_doAccel128
+ -DdoAccel256=core2_doAccel256
+ -DdoAccel32=core2_doAccel32
+ -DdoAccel384=core2_doAccel384
+ -DdoAccel512=core2_doAccel512
+ -DdoAccel64=core2_doAccel64
+ -Dexpand_stream=core2_expand_stream
+ -DfdrExec=core2_fdrExec
+ -DfdrExecStreaming=core2_fdrExecStreaming
+ -Dfdr_exec_fat_teddy_msks1=core2_fdr_exec_fat_teddy_msks1
+ -Dfdr_exec_fat_teddy_msks1_pck=core2_fdr_exec_fat_teddy_msks1_pck
+ -Dfdr_exec_fat_teddy_msks2=core2_fdr_exec_fat_teddy_msks2
+ -Dfdr_exec_fat_teddy_msks2_pck=core2_fdr_exec_fat_teddy_msks2_pck
+ -Dfdr_exec_fat_teddy_msks3=core2_fdr_exec_fat_teddy_msks3
+ -Dfdr_exec_fat_teddy_msks3_pck=core2_fdr_exec_fat_teddy_msks3_pck
+ -Dfdr_exec_fat_teddy_msks4=core2_fdr_exec_fat_teddy_msks4
+ -Dfdr_exec_fat_teddy_msks4_pck=core2_fdr_exec_fat_teddy_msks4_pck
+ -Dfdr_exec_teddy_msks1=core2_fdr_exec_teddy_msks1
+ -Dfdr_exec_teddy_msks1_pck=core2_fdr_exec_teddy_msks1_pck
+ -Dfdr_exec_teddy_msks2=core2_fdr_exec_teddy_msks2
+ -Dfdr_exec_teddy_msks2_pck=core2_fdr_exec_teddy_msks2_pck
+ -Dfdr_exec_teddy_msks3=core2_fdr_exec_teddy_msks3
+ -Dfdr_exec_teddy_msks3_pck=core2_fdr_exec_teddy_msks3_pck
+ -Dfdr_exec_teddy_msks4=core2_fdr_exec_teddy_msks4
+ -Dfdr_exec_teddy_msks4_pck=core2_fdr_exec_teddy_msks4_pck
+ -DflushQueuedLiterals_i=core2_flushQueuedLiterals_i
+ -DflushStoredSomMatches_i=core2_flushStoredSomMatches_i
+ -DhandleSomExternal=core2_handleSomExternal
+ -DhandleSomInternal=core2_handleSomInternal
+ -Dhs_alloc_scratch=core2_hs_alloc_scratch
+ -Dhs_clone_scratch=core2_hs_clone_scratch
+ -Dhs_close_stream=core2_hs_close_stream
+ -Dhs_compress_stream=core2_hs_compress_stream
+ -Dhs_copy_stream=core2_hs_copy_stream
+ -Dhs_database_alloc=core2_hs_database_alloc
+ -Dhs_database_free=core2_hs_database_free
+ -Dhs_database_info=core2_hs_database_info
+ -Dhs_database_size=core2_hs_database_size
+ -Dhs_deserialize_database=core2_hs_deserialize_database
+ -Dhs_deserialize_database_at=core2_hs_deserialize_database_at
+ -Dhs_expand_stream=core2_hs_expand_stream
+ -Dhs_free_database=core2_hs_free_database
+ -Dhs_free_scratch=core2_hs_free_scratch
+ -Dhs_misc_alloc=core2_hs_misc_alloc
+ -Dhs_misc_free=core2_hs_misc_free
+ -Dhs_open_stream=core2_hs_open_stream
+ -Dhs_reset_and_copy_stream=core2_hs_reset_and_copy_stream
+ -Dhs_reset_and_expand_stream=core2_hs_reset_and_expand_stream
+ -Dhs_reset_stream=core2_hs_reset_stream
+ -Dhs_scan=core2_hs_scan
+ -Dhs_scan_stream=core2_hs_scan_stream
+ -Dhs_scan_vector=core2_hs_scan_vector
+ -Dhs_scratch_alloc=core2_hs_scratch_alloc
+ -Dhs_scratch_free=core2_hs_scratch_free
+ -Dhs_scratch_size=core2_hs_scratch_size
+ -Dhs_serialize_database=core2_hs_serialize_database
+ -Dhs_serialized_database_info=core2_hs_serialized_database_info
+ -Dhs_serialized_database_size=core2_hs_serialized_database_size
+ -Dhs_set_allocator=core2_hs_set_allocator
+ -Dhs_set_database_allocator=core2_hs_set_database_allocator
+ -Dhs_set_misc_allocator=core2_hs_set_misc_allocator
+ -Dhs_set_scratch_allocator=core2_hs_set_scratch_allocator
+ -Dhs_set_stream_allocator=core2_hs_set_stream_allocator
+ -Dhs_stream_alloc=core2_hs_stream_alloc
+ -Dhs_stream_free=core2_hs_stream_free
+ -Dhs_stream_size=core2_hs_stream_size
+ -Dhs_valid_platform=core2_hs_valid_platform
+ -Dhs_version=core2_hs_version
+ -DhwlmExec=core2_hwlmExec
+ -DhwlmExecStreaming=core2_hwlmExecStreaming
+ -DloadSomFromStream=core2_loadSomFromStream
+ -Dloadcompressed128=core2_loadcompressed128
+ -Dloadcompressed256=core2_loadcompressed256
+ -Dloadcompressed32=core2_loadcompressed32
+ -Dloadcompressed384=core2_loadcompressed384
+ -Dloadcompressed512=core2_loadcompressed512
+ -Dloadcompressed64=core2_loadcompressed64
+ -Dmcsheng_pext_mask=core2_mcsheng_pext_mask
+ -Dmm_mask_mask=core2_mm_mask_mask
+ -Dmm_shuffle_end=core2_mm_shuffle_end
+ -Dmmbit_keyshift_lut=core2_mmbit_keyshift_lut
+ -Dmmbit_maxlevel_direct_lut=core2_mmbit_maxlevel_direct_lut
+ -Dmmbit_maxlevel_from_keyshift_lut=core2_mmbit_maxlevel_from_keyshift_lut
+ -Dmmbit_root_offset_from_level=core2_mmbit_root_offset_from_level
+ -Dmmbit_zero_to_lut=core2_mmbit_zero_to_lut
+ -DnfaBlockExecReverse=core2_nfaBlockExecReverse
+ -DnfaCheckFinalState=core2_nfaCheckFinalState
+ -DnfaExecCastle_Q=core2_nfaExecCastle_Q
+ -DnfaExecCastle_Q2=core2_nfaExecCastle_Q2
+ -DnfaExecCastle_QR=core2_nfaExecCastle_QR
+ -DnfaExecCastle_expandState=core2_nfaExecCastle_expandState
+ -DnfaExecCastle_inAccept=core2_nfaExecCastle_inAccept
+ -DnfaExecCastle_inAnyAccept=core2_nfaExecCastle_inAnyAccept
+ -DnfaExecCastle_initCompressedState=core2_nfaExecCastle_initCompressedState
+ -DnfaExecCastle_queueCompressState=core2_nfaExecCastle_queueCompressState
+ -DnfaExecCastle_queueInitState=core2_nfaExecCastle_queueInitState
+ -DnfaExecCastle_reportCurrent=core2_nfaExecCastle_reportCurrent
+ -DnfaExecGough16_Q=core2_nfaExecGough16_Q
+ -DnfaExecGough16_Q2=core2_nfaExecGough16_Q2
+ -DnfaExecGough16_QR=core2_nfaExecGough16_QR
+ -DnfaExecGough16_expandState=core2_nfaExecGough16_expandState
+ -DnfaExecGough16_inAccept=core2_nfaExecGough16_inAccept
+ -DnfaExecGough16_inAnyAccept=core2_nfaExecGough16_inAnyAccept
+ -DnfaExecGough16_initCompressedState=core2_nfaExecGough16_initCompressedState
+ -DnfaExecGough16_queueCompressState=core2_nfaExecGough16_queueCompressState
+ -DnfaExecGough16_queueInitState=core2_nfaExecGough16_queueInitState
+ -DnfaExecGough16_reportCurrent=core2_nfaExecGough16_reportCurrent
+ -DnfaExecGough16_testEOD=core2_nfaExecGough16_testEOD
+ -DnfaExecGough8_Q=core2_nfaExecGough8_Q
+ -DnfaExecGough8_Q2=core2_nfaExecGough8_Q2
+ -DnfaExecGough8_QR=core2_nfaExecGough8_QR
+ -DnfaExecGough8_expandState=core2_nfaExecGough8_expandState
+ -DnfaExecGough8_inAccept=core2_nfaExecGough8_inAccept
+ -DnfaExecGough8_inAnyAccept=core2_nfaExecGough8_inAnyAccept
+ -DnfaExecGough8_initCompressedState=core2_nfaExecGough8_initCompressedState
+ -DnfaExecGough8_queueCompressState=core2_nfaExecGough8_queueCompressState
+ -DnfaExecGough8_queueInitState=core2_nfaExecGough8_queueInitState
+ -DnfaExecGough8_reportCurrent=core2_nfaExecGough8_reportCurrent
+ -DnfaExecGough8_testEOD=core2_nfaExecGough8_testEOD
+ -DnfaExecLbrDot_Q=core2_nfaExecLbrDot_Q
+ -DnfaExecLbrDot_Q2=core2_nfaExecLbrDot_Q2
+ -DnfaExecLbrDot_QR=core2_nfaExecLbrDot_QR
+ -DnfaExecLbrDot_expandState=core2_nfaExecLbrDot_expandState
+ -DnfaExecLbrDot_inAccept=core2_nfaExecLbrDot_inAccept
+ -DnfaExecLbrDot_inAnyAccept=core2_nfaExecLbrDot_inAnyAccept
+ -DnfaExecLbrDot_initCompressedState=core2_nfaExecLbrDot_initCompressedState
+ -DnfaExecLbrDot_queueCompressState=core2_nfaExecLbrDot_queueCompressState
+ -DnfaExecLbrDot_queueInitState=core2_nfaExecLbrDot_queueInitState
+ -DnfaExecLbrDot_reportCurrent=core2_nfaExecLbrDot_reportCurrent
+ -DnfaExecLbrNVerm_Q=core2_nfaExecLbrNVerm_Q
+ -DnfaExecLbrNVerm_Q2=core2_nfaExecLbrNVerm_Q2
+ -DnfaExecLbrNVerm_QR=core2_nfaExecLbrNVerm_QR
+ -DnfaExecLbrNVerm_expandState=core2_nfaExecLbrNVerm_expandState
+ -DnfaExecLbrNVerm_inAccept=core2_nfaExecLbrNVerm_inAccept
+ -DnfaExecLbrNVerm_inAnyAccept=core2_nfaExecLbrNVerm_inAnyAccept
+ -DnfaExecLbrNVerm_initCompressedState=core2_nfaExecLbrNVerm_initCompressedState
+ -DnfaExecLbrNVerm_queueCompressState=core2_nfaExecLbrNVerm_queueCompressState
+ -DnfaExecLbrNVerm_queueInitState=core2_nfaExecLbrNVerm_queueInitState
+ -DnfaExecLbrNVerm_reportCurrent=core2_nfaExecLbrNVerm_reportCurrent
+ -DnfaExecLbrShuf_Q=core2_nfaExecLbrShuf_Q
+ -DnfaExecLbrShuf_Q2=core2_nfaExecLbrShuf_Q2
+ -DnfaExecLbrShuf_QR=core2_nfaExecLbrShuf_QR
+ -DnfaExecLbrShuf_expandState=core2_nfaExecLbrShuf_expandState
+ -DnfaExecLbrShuf_inAccept=core2_nfaExecLbrShuf_inAccept
+ -DnfaExecLbrShuf_inAnyAccept=core2_nfaExecLbrShuf_inAnyAccept
+ -DnfaExecLbrShuf_initCompressedState=core2_nfaExecLbrShuf_initCompressedState
+ -DnfaExecLbrShuf_queueCompressState=core2_nfaExecLbrShuf_queueCompressState
+ -DnfaExecLbrShuf_queueInitState=core2_nfaExecLbrShuf_queueInitState
+ -DnfaExecLbrShuf_reportCurrent=core2_nfaExecLbrShuf_reportCurrent
+ -DnfaExecLbrTruf_Q=core2_nfaExecLbrTruf_Q
+ -DnfaExecLbrTruf_Q2=core2_nfaExecLbrTruf_Q2
+ -DnfaExecLbrTruf_QR=core2_nfaExecLbrTruf_QR
+ -DnfaExecLbrTruf_expandState=core2_nfaExecLbrTruf_expandState
+ -DnfaExecLbrTruf_inAccept=core2_nfaExecLbrTruf_inAccept
+ -DnfaExecLbrTruf_inAnyAccept=core2_nfaExecLbrTruf_inAnyAccept
+ -DnfaExecLbrTruf_initCompressedState=core2_nfaExecLbrTruf_initCompressedState
+ -DnfaExecLbrTruf_queueCompressState=core2_nfaExecLbrTruf_queueCompressState
+ -DnfaExecLbrTruf_queueInitState=core2_nfaExecLbrTruf_queueInitState
+ -DnfaExecLbrTruf_reportCurrent=core2_nfaExecLbrTruf_reportCurrent
+ -DnfaExecLbrVerm_Q=core2_nfaExecLbrVerm_Q
+ -DnfaExecLbrVerm_Q2=core2_nfaExecLbrVerm_Q2
+ -DnfaExecLbrVerm_QR=core2_nfaExecLbrVerm_QR
+ -DnfaExecLbrVerm_expandState=core2_nfaExecLbrVerm_expandState
+ -DnfaExecLbrVerm_inAccept=core2_nfaExecLbrVerm_inAccept
+ -DnfaExecLbrVerm_inAnyAccept=core2_nfaExecLbrVerm_inAnyAccept
+ -DnfaExecLbrVerm_initCompressedState=core2_nfaExecLbrVerm_initCompressedState
+ -DnfaExecLbrVerm_queueCompressState=core2_nfaExecLbrVerm_queueCompressState
+ -DnfaExecLbrVerm_queueInitState=core2_nfaExecLbrVerm_queueInitState
+ -DnfaExecLbrVerm_reportCurrent=core2_nfaExecLbrVerm_reportCurrent
+ -DnfaExecLimEx128_B_Reverse=core2_nfaExecLimEx128_B_Reverse
+ -DnfaExecLimEx128_Q=core2_nfaExecLimEx128_Q
+ -DnfaExecLimEx128_Q2=core2_nfaExecLimEx128_Q2
+ -DnfaExecLimEx128_QR=core2_nfaExecLimEx128_QR
+ -DnfaExecLimEx128_expandState=core2_nfaExecLimEx128_expandState
+ -DnfaExecLimEx128_inAccept=core2_nfaExecLimEx128_inAccept
+ -DnfaExecLimEx128_inAnyAccept=core2_nfaExecLimEx128_inAnyAccept
+ -DnfaExecLimEx128_initCompressedState=core2_nfaExecLimEx128_initCompressedState
+ -DnfaExecLimEx128_queueCompressState=core2_nfaExecLimEx128_queueCompressState
+ -DnfaExecLimEx128_queueInitState=core2_nfaExecLimEx128_queueInitState
+ -DnfaExecLimEx128_reportCurrent=core2_nfaExecLimEx128_reportCurrent
+ -DnfaExecLimEx128_testEOD=core2_nfaExecLimEx128_testEOD
+ -DnfaExecLimEx128_zombie_status=core2_nfaExecLimEx128_zombie_status
+ -DnfaExecLimEx256_B_Reverse=core2_nfaExecLimEx256_B_Reverse
+ -DnfaExecLimEx256_Q=core2_nfaExecLimEx256_Q
+ -DnfaExecLimEx256_Q2=core2_nfaExecLimEx256_Q2
+ -DnfaExecLimEx256_QR=core2_nfaExecLimEx256_QR
+ -DnfaExecLimEx256_expandState=core2_nfaExecLimEx256_expandState
+ -DnfaExecLimEx256_inAccept=core2_nfaExecLimEx256_inAccept
+ -DnfaExecLimEx256_inAnyAccept=core2_nfaExecLimEx256_inAnyAccept
+ -DnfaExecLimEx256_initCompressedState=core2_nfaExecLimEx256_initCompressedState
+ -DnfaExecLimEx256_queueCompressState=core2_nfaExecLimEx256_queueCompressState
+ -DnfaExecLimEx256_queueInitState=core2_nfaExecLimEx256_queueInitState
+ -DnfaExecLimEx256_reportCurrent=core2_nfaExecLimEx256_reportCurrent
+ -DnfaExecLimEx256_testEOD=core2_nfaExecLimEx256_testEOD
+ -DnfaExecLimEx256_zombie_status=core2_nfaExecLimEx256_zombie_status
+ -DnfaExecLimEx32_B_Reverse=core2_nfaExecLimEx32_B_Reverse
+ -DnfaExecLimEx32_Q=core2_nfaExecLimEx32_Q
+ -DnfaExecLimEx32_Q2=core2_nfaExecLimEx32_Q2
+ -DnfaExecLimEx32_QR=core2_nfaExecLimEx32_QR
+ -DnfaExecLimEx32_expandState=core2_nfaExecLimEx32_expandState
+ -DnfaExecLimEx32_inAccept=core2_nfaExecLimEx32_inAccept
+ -DnfaExecLimEx32_inAnyAccept=core2_nfaExecLimEx32_inAnyAccept
+ -DnfaExecLimEx32_initCompressedState=core2_nfaExecLimEx32_initCompressedState
+ -DnfaExecLimEx32_queueCompressState=core2_nfaExecLimEx32_queueCompressState
+ -DnfaExecLimEx32_queueInitState=core2_nfaExecLimEx32_queueInitState
+ -DnfaExecLimEx32_reportCurrent=core2_nfaExecLimEx32_reportCurrent
+ -DnfaExecLimEx32_testEOD=core2_nfaExecLimEx32_testEOD
+ -DnfaExecLimEx32_zombie_status=core2_nfaExecLimEx32_zombie_status
+ -DnfaExecLimEx384_B_Reverse=core2_nfaExecLimEx384_B_Reverse
+ -DnfaExecLimEx384_Q=core2_nfaExecLimEx384_Q
+ -DnfaExecLimEx384_Q2=core2_nfaExecLimEx384_Q2
+ -DnfaExecLimEx384_QR=core2_nfaExecLimEx384_QR
+ -DnfaExecLimEx384_expandState=core2_nfaExecLimEx384_expandState
+ -DnfaExecLimEx384_inAccept=core2_nfaExecLimEx384_inAccept
+ -DnfaExecLimEx384_inAnyAccept=core2_nfaExecLimEx384_inAnyAccept
+ -DnfaExecLimEx384_initCompressedState=core2_nfaExecLimEx384_initCompressedState
+ -DnfaExecLimEx384_queueCompressState=core2_nfaExecLimEx384_queueCompressState
+ -DnfaExecLimEx384_queueInitState=core2_nfaExecLimEx384_queueInitState
+ -DnfaExecLimEx384_reportCurrent=core2_nfaExecLimEx384_reportCurrent
+ -DnfaExecLimEx384_testEOD=core2_nfaExecLimEx384_testEOD
+ -DnfaExecLimEx384_zombie_status=core2_nfaExecLimEx384_zombie_status
+ -DnfaExecLimEx512_B_Reverse=core2_nfaExecLimEx512_B_Reverse
+ -DnfaExecLimEx512_Q=core2_nfaExecLimEx512_Q
+ -DnfaExecLimEx512_Q2=core2_nfaExecLimEx512_Q2
+ -DnfaExecLimEx512_QR=core2_nfaExecLimEx512_QR
+ -DnfaExecLimEx512_expandState=core2_nfaExecLimEx512_expandState
+ -DnfaExecLimEx512_inAccept=core2_nfaExecLimEx512_inAccept
+ -DnfaExecLimEx512_inAnyAccept=core2_nfaExecLimEx512_inAnyAccept
+ -DnfaExecLimEx512_initCompressedState=core2_nfaExecLimEx512_initCompressedState
+ -DnfaExecLimEx512_queueCompressState=core2_nfaExecLimEx512_queueCompressState
+ -DnfaExecLimEx512_queueInitState=core2_nfaExecLimEx512_queueInitState
+ -DnfaExecLimEx512_reportCurrent=core2_nfaExecLimEx512_reportCurrent
+ -DnfaExecLimEx512_testEOD=core2_nfaExecLimEx512_testEOD
+ -DnfaExecLimEx512_zombie_status=core2_nfaExecLimEx512_zombie_status
+ -DnfaExecLimEx64_B_Reverse=core2_nfaExecLimEx64_B_Reverse
+ -DnfaExecLimEx64_Q=core2_nfaExecLimEx64_Q
+ -DnfaExecLimEx64_Q2=core2_nfaExecLimEx64_Q2
+ -DnfaExecLimEx64_QR=core2_nfaExecLimEx64_QR
+ -DnfaExecLimEx64_expandState=core2_nfaExecLimEx64_expandState
+ -DnfaExecLimEx64_inAccept=core2_nfaExecLimEx64_inAccept
+ -DnfaExecLimEx64_inAnyAccept=core2_nfaExecLimEx64_inAnyAccept
+ -DnfaExecLimEx64_initCompressedState=core2_nfaExecLimEx64_initCompressedState
+ -DnfaExecLimEx64_queueCompressState=core2_nfaExecLimEx64_queueCompressState
+ -DnfaExecLimEx64_queueInitState=core2_nfaExecLimEx64_queueInitState
+ -DnfaExecLimEx64_reportCurrent=core2_nfaExecLimEx64_reportCurrent
+ -DnfaExecLimEx64_testEOD=core2_nfaExecLimEx64_testEOD
+ -DnfaExecLimEx64_zombie_status=core2_nfaExecLimEx64_zombie_status
+ -DnfaExecMcClellan16_B=core2_nfaExecMcClellan16_B
+ -DnfaExecMcClellan16_Q=core2_nfaExecMcClellan16_Q
+ -DnfaExecMcClellan16_Q2=core2_nfaExecMcClellan16_Q2
+ -DnfaExecMcClellan16_QR=core2_nfaExecMcClellan16_QR
+ -DnfaExecMcClellan16_SimpStream=core2_nfaExecMcClellan16_SimpStream
+ -DnfaExecMcClellan16_expandState=core2_nfaExecMcClellan16_expandState
+ -DnfaExecMcClellan16_inAccept=core2_nfaExecMcClellan16_inAccept
+ -DnfaExecMcClellan16_inAnyAccept=core2_nfaExecMcClellan16_inAnyAccept
+ -DnfaExecMcClellan16_initCompressedState=core2_nfaExecMcClellan16_initCompressedState
+ -DnfaExecMcClellan16_queueCompressState=core2_nfaExecMcClellan16_queueCompressState
+ -DnfaExecMcClellan16_queueInitState=core2_nfaExecMcClellan16_queueInitState
+ -DnfaExecMcClellan16_reportCurrent=core2_nfaExecMcClellan16_reportCurrent
+ -DnfaExecMcClellan16_testEOD=core2_nfaExecMcClellan16_testEOD
+ -DnfaExecMcClellan8_B=core2_nfaExecMcClellan8_B
+ -DnfaExecMcClellan8_Q=core2_nfaExecMcClellan8_Q
+ -DnfaExecMcClellan8_Q2=core2_nfaExecMcClellan8_Q2
+ -DnfaExecMcClellan8_QR=core2_nfaExecMcClellan8_QR
+ -DnfaExecMcClellan8_SimpStream=core2_nfaExecMcClellan8_SimpStream
+ -DnfaExecMcClellan8_expandState=core2_nfaExecMcClellan8_expandState
+ -DnfaExecMcClellan8_inAccept=core2_nfaExecMcClellan8_inAccept
+ -DnfaExecMcClellan8_inAnyAccept=core2_nfaExecMcClellan8_inAnyAccept
+ -DnfaExecMcClellan8_initCompressedState=core2_nfaExecMcClellan8_initCompressedState
+ -DnfaExecMcClellan8_queueCompressState=core2_nfaExecMcClellan8_queueCompressState
+ -DnfaExecMcClellan8_queueInitState=core2_nfaExecMcClellan8_queueInitState
+ -DnfaExecMcClellan8_reportCurrent=core2_nfaExecMcClellan8_reportCurrent
+ -DnfaExecMcClellan8_testEOD=core2_nfaExecMcClellan8_testEOD
+ -DnfaExecMcSheng16_Q=core2_nfaExecMcSheng16_Q
+ -DnfaExecMcSheng16_Q2=core2_nfaExecMcSheng16_Q2
+ -DnfaExecMcSheng16_QR=core2_nfaExecMcSheng16_QR
+ -DnfaExecMcSheng16_expandState=core2_nfaExecMcSheng16_expandState
+ -DnfaExecMcSheng16_inAccept=core2_nfaExecMcSheng16_inAccept
+ -DnfaExecMcSheng16_inAnyAccept=core2_nfaExecMcSheng16_inAnyAccept
+ -DnfaExecMcSheng16_initCompressedState=core2_nfaExecMcSheng16_initCompressedState
+ -DnfaExecMcSheng16_queueCompressState=core2_nfaExecMcSheng16_queueCompressState
+ -DnfaExecMcSheng16_queueInitState=core2_nfaExecMcSheng16_queueInitState
+ -DnfaExecMcSheng16_reportCurrent=core2_nfaExecMcSheng16_reportCurrent
+ -DnfaExecMcSheng16_testEOD=core2_nfaExecMcSheng16_testEOD
+ -DnfaExecMcSheng8_Q=core2_nfaExecMcSheng8_Q
+ -DnfaExecMcSheng8_Q2=core2_nfaExecMcSheng8_Q2
+ -DnfaExecMcSheng8_QR=core2_nfaExecMcSheng8_QR
+ -DnfaExecMcSheng8_expandState=core2_nfaExecMcSheng8_expandState
+ -DnfaExecMcSheng8_inAccept=core2_nfaExecMcSheng8_inAccept
+ -DnfaExecMcSheng8_inAnyAccept=core2_nfaExecMcSheng8_inAnyAccept
+ -DnfaExecMcSheng8_initCompressedState=core2_nfaExecMcSheng8_initCompressedState
+ -DnfaExecMcSheng8_queueCompressState=core2_nfaExecMcSheng8_queueCompressState
+ -DnfaExecMcSheng8_queueInitState=core2_nfaExecMcSheng8_queueInitState
+ -DnfaExecMcSheng8_reportCurrent=core2_nfaExecMcSheng8_reportCurrent
+ -DnfaExecMcSheng8_testEOD=core2_nfaExecMcSheng8_testEOD
+ -DnfaExecMpv_Q=core2_nfaExecMpv_Q
+ -DnfaExecMpv_QueueExecRaw=core2_nfaExecMpv_QueueExecRaw
+ -DnfaExecMpv_expandState=core2_nfaExecMpv_expandState
+ -DnfaExecMpv_initCompressedState=core2_nfaExecMpv_initCompressedState
+ -DnfaExecMpv_queueCompressState=core2_nfaExecMpv_queueCompressState
+ -DnfaExecMpv_queueInitState=core2_nfaExecMpv_queueInitState
+ -DnfaExecMpv_reportCurrent=core2_nfaExecMpv_reportCurrent
+ -DnfaExecSheng_B=core2_nfaExecSheng_B
+ -DnfaExecSheng_Q=core2_nfaExecSheng_Q
+ -DnfaExecSheng_Q2=core2_nfaExecSheng_Q2
+ -DnfaExecSheng_QR=core2_nfaExecSheng_QR
+ -DnfaExecSheng_expandState=core2_nfaExecSheng_expandState
+ -DnfaExecSheng_inAccept=core2_nfaExecSheng_inAccept
+ -DnfaExecSheng_inAnyAccept=core2_nfaExecSheng_inAnyAccept
+ -DnfaExecSheng_initCompressedState=core2_nfaExecSheng_initCompressedState
+ -DnfaExecSheng_queueCompressState=core2_nfaExecSheng_queueCompressState
+ -DnfaExecSheng_queueInitState=core2_nfaExecSheng_queueInitState
+ -DnfaExecSheng_reportCurrent=core2_nfaExecSheng_reportCurrent
+ -DnfaExecSheng_testEOD=core2_nfaExecSheng_testEOD
+ -DnfaExecTamarama_Q=core2_nfaExecTamarama_Q
+ -DnfaExecTamarama_Q2=core2_nfaExecTamarama_Q2
+ -DnfaExecTamarama_QR=core2_nfaExecTamarama_QR
+ -DnfaExecTamarama_expandState=core2_nfaExecTamarama_expandState
+ -DnfaExecTamarama_inAccept=core2_nfaExecTamarama_inAccept
+ -DnfaExecTamarama_inAnyAccept=core2_nfaExecTamarama_inAnyAccept
+ -DnfaExecTamarama_queueCompressState=core2_nfaExecTamarama_queueCompressState
+ -DnfaExecTamarama_queueInitState=core2_nfaExecTamarama_queueInitState
+ -DnfaExecTamarama_reportCurrent=core2_nfaExecTamarama_reportCurrent
+ -DnfaExecTamarama_testEOD=core2_nfaExecTamarama_testEOD
+ -DnfaExecTamarama_zombie_status=core2_nfaExecTamarama_zombie_status
+ -DnfaExpandState=core2_nfaExpandState
+ -DnfaGetZombieStatus=core2_nfaGetZombieStatus
+ -DnfaInAcceptState=core2_nfaInAcceptState
+ -DnfaInAnyAcceptState=core2_nfaInAnyAcceptState
+ -DnfaInitCompressedState=core2_nfaInitCompressedState
+ -DnfaQueueCompressState=core2_nfaQueueCompressState
+ -DnfaQueueExec=core2_nfaQueueExec
+ -DnfaQueueExec2_raw=core2_nfaQueueExec2_raw
+ -DnfaQueueExecRose=core2_nfaQueueExecRose
+ -DnfaQueueExecToMatch=core2_nfaQueueExecToMatch
+ -DnfaQueueExec_raw=core2_nfaQueueExec_raw
+ -DnfaQueueInitState=core2_nfaQueueInitState
+ -DnfaReportCurrentMatches=core2_nfaReportCurrentMatches
+ -DnoodExec=core2_noodExec
+ -DnoodExecStreaming=core2_noodExecStreaming
+ -Dp_mask_arr=core2_p_mask_arr
+ -Dp_mask_arr256=core2_p_mask_arr256
+ -DrepeatHasMatchBitmap=core2_repeatHasMatchBitmap
+ -DrepeatHasMatchRange=core2_repeatHasMatchRange
+ -DrepeatHasMatchRing=core2_repeatHasMatchRing
+ -DrepeatHasMatchSparseOptimalP=core2_repeatHasMatchSparseOptimalP
+ -DrepeatHasMatchTrailer=core2_repeatHasMatchTrailer
+ -DrepeatLastTopBitmap=core2_repeatLastTopBitmap
+ -DrepeatLastTopRange=core2_repeatLastTopRange
+ -DrepeatLastTopRing=core2_repeatLastTopRing
+ -DrepeatLastTopSparseOptimalP=core2_repeatLastTopSparseOptimalP
+ -DrepeatLastTopTrailer=core2_repeatLastTopTrailer
+ -DrepeatNextMatchBitmap=core2_repeatNextMatchBitmap
+ -DrepeatNextMatchRange=core2_repeatNextMatchRange
+ -DrepeatNextMatchRing=core2_repeatNextMatchRing
+ -DrepeatNextMatchSparseOptimalP=core2_repeatNextMatchSparseOptimalP
+ -DrepeatNextMatchTrailer=core2_repeatNextMatchTrailer
+ -DrepeatPack=core2_repeatPack
+ -DrepeatStoreBitmap=core2_repeatStoreBitmap
+ -DrepeatStoreRange=core2_repeatStoreRange
+ -DrepeatStoreRing=core2_repeatStoreRing
+ -DrepeatStoreSparseOptimalP=core2_repeatStoreSparseOptimalP
+ -DrepeatStoreTrailer=core2_repeatStoreTrailer
+ -DrepeatUnpack=core2_repeatUnpack
+ -DroseAnchoredCallback=core2_roseAnchoredCallback
+ -DroseBlockExec=core2_roseBlockExec
+ -DroseCallback=core2_roseCallback
+ -DroseCatchUpAll=core2_roseCatchUpAll
+ -DroseCatchUpMPV_i=core2_roseCatchUpMPV_i
+ -DroseCatchUpSuf=core2_roseCatchUpSuf
+ -DroseDelayRebuildCallback=core2_roseDelayRebuildCallback
+ -DroseFloatingCallback=core2_roseFloatingCallback
+ -DroseHandleChainMatch=core2_roseHandleChainMatch
+ -DroseInitState=core2_roseInitState
+ -DroseNfaAdaptor=core2_roseNfaAdaptor
+ -DroseNfaEarliestSom=core2_roseNfaEarliestSom
+ -DroseReportAdaptor=core2_roseReportAdaptor
+ -DroseRunBoundaryProgram=core2_roseRunBoundaryProgram
+ -DroseRunFlushCombProgram=core2_roseRunFlushCombProgram
+ -DroseRunLastFlushCombProgram=core2_roseRunLastFlushCombProgram
+ -DroseRunProgram=core2_roseRunProgram
+ -DroseRunProgram_l=core2_roseRunProgram_l
+ -DroseStreamEodExec=core2_roseStreamEodExec
+ -DroseStreamExec=core2_roseStreamExec
+ -DrshuftiExec=core2_rshuftiExec
+ -DrtruffleExec=core2_rtruffleExec
+ -Drun_accel=core2_run_accel
+ -DsetSomFromSomAware=core2_setSomFromSomAware
+ -DshuftiDoubleExec=core2_shuftiDoubleExec
+ -DshuftiExec=core2_shuftiExec
+ -Dsimd_onebit_masks=core2_simd_onebit_masks
+ -Dsize_compress_stream=core2_size_compress_stream
+ -DstoreSomToStream=core2_storeSomToStream
+ -Dstorecompressed128=core2_storecompressed128
+ -Dstorecompressed256=core2_storecompressed256
+ -Dstorecompressed32=core2_storecompressed32
+ -Dstorecompressed384=core2_storecompressed384
+ -Dstorecompressed512=core2_storecompressed512
+ -Dstorecompressed64=core2_storecompressed64
+ -DstreamInitSufPQ=core2_streamInitSufPQ
+ -DtruffleExec=core2_truffleExec
+ -Dvbs_mask_data=core2_vbs_mask_data
+)
+
+SRCDIR(contrib/libs/hyperscan)
+
+SRCS(
+ src/alloc.c
+ src/crc32.c
+ src/database.c
+ src/fdr/fdr.c
+ src/fdr/teddy.c
+ src/fdr/teddy_avx2.c
+ src/hs_valid_platform.c
+ src/hs_version.c
+ src/hwlm/hwlm.c
+ src/hwlm/noodle_engine.c
+ src/nfa/accel.c
+ src/nfa/castle.c
+ src/nfa/gough.c
+ src/nfa/lbr.c
+ src/nfa/limex_64.c
+ src/nfa/limex_accel.c
+ src/nfa/limex_native.c
+ src/nfa/limex_simd128.c
+ src/nfa/limex_simd256.c
+ src/nfa/limex_simd384.c
+ src/nfa/limex_simd512.c
+ src/nfa/mcclellan.c
+ src/nfa/mcsheng.c
+ src/nfa/mcsheng_data.c
+ src/nfa/mpv.c
+ src/nfa/nfa_api_dispatch.c
+ src/nfa/repeat.c
+ src/nfa/sheng.c
+ src/nfa/shufti.c
+ src/nfa/tamarama.c
+ src/nfa/truffle.c
+ src/rose/block.c
+ src/rose/catchup.c
+ src/rose/init.c
+ src/rose/match.c
+ src/rose/program_runtime.c
+ src/rose/stream.c
+ src/runtime.c
+ src/scratch.c
+ src/som/som_runtime.c
+ src/som/som_stream.c
+ src/stream_compress.c
+ src/util/cpuid_flags.c
+ src/util/masked_move.c
+ src/util/multibit.c
+ src/util/simd_utils.c
+ src/util/state_compress.c
+)
+
+END()
diff --git a/contrib/libs/hyperscan/runtime_corei7/.yandex_meta/licenses.list.txt b/contrib/libs/hyperscan/runtime_corei7/.yandex_meta/licenses.list.txt
index 358c19fe4a..b2ced66bbd 100644
--- a/contrib/libs/hyperscan/runtime_corei7/.yandex_meta/licenses.list.txt
+++ b/contrib/libs/hyperscan/runtime_corei7/.yandex_meta/licenses.list.txt
@@ -1,32 +1,32 @@
-====================BSD-3-Clause====================
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2015-2018, Intel Corporation
-
-
-====================COPYRIGHT====================
- * Copyright (c) 2015-2019, Intel Corporation
+====================BSD-3-Clause====================
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2015-2018, Intel Corporation
+
+
+====================COPYRIGHT====================
+ * Copyright (c) 2015-2019, Intel Corporation
diff --git a/contrib/libs/hyperscan/runtime_corei7/hs_common.h b/contrib/libs/hyperscan/runtime_corei7/hs_common.h
index c0b5f79fd0..174bb2a250 100644
--- a/contrib/libs/hyperscan/runtime_corei7/hs_common.h
+++ b/contrib/libs/hyperscan/runtime_corei7/hs_common.h
@@ -1,596 +1,596 @@
-/*
- * Copyright (c) 2015-2019, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef HS_COREI7_COMMON_H
-#define HS_COREI7_COMMON_H
-
-#if defined(_WIN32)
-#define HS_CDECL __cdecl
-#else
-#define HS_CDECL
-#endif
-#include <stdlib.h>
-
-/**
- * @file
- * @brief The Hyperscan common API definition.
- *
- * Hyperscan is a high speed regular expression engine.
- *
- * This header contains functions available to both the Hyperscan compiler and
- * runtime.
- */
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-struct hs_database;
-
-/**
- * A Hyperscan pattern database.
- *
- * Generated by one of the Hyperscan compiler functions:
- * - @ref hs_compile()
- * - @ref hs_compile_multi()
- * - @ref hs_compile_ext_multi()
- */
-typedef struct hs_database hs_database_t;
-
-/**
- * A type for errors returned by Hyperscan functions.
- */
-typedef int hs_error_t;
-
-/**
- * Free a compiled pattern database.
- *
- * The free callback set by @ref hs_set_database_allocator() (or @ref
- * hs_set_allocator()) will be used by this function.
- *
- * @param db
- * A compiled pattern database. NULL may also be safely provided, in which
- * case the function does nothing.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t corei7_hs_free_database(hs_database_t *db);
-
-/**
- * Serialize a pattern database to a stream of bytes.
- *
- * The allocator callback set by @ref hs_set_misc_allocator() (or @ref
- * hs_set_allocator()) will be used by this function.
- *
- * @param db
- * A compiled pattern database.
- *
- * @param bytes
- * On success, a pointer to an array of bytes will be returned here.
- * These bytes can be subsequently relocated or written to disk. The
- * caller is responsible for freeing this block.
- *
- * @param length
- * On success, the number of bytes in the generated byte array will be
- * returned here.
- *
- * @return
- * @ref HS_SUCCESS on success, @ref HS_NOMEM if the byte array cannot be
- * allocated, other values may be returned if errors are detected.
- */
-hs_error_t corei7_hs_serialize_database(const hs_database_t *db, char **bytes,
- size_t *length);
-
-/**
- * Reconstruct a pattern database from a stream of bytes previously generated
- * by @ref hs_serialize_database().
- *
- * This function will allocate sufficient space for the database using the
- * allocator set with @ref hs_set_database_allocator() (or @ref
- * hs_set_allocator()); to use a pre-allocated region of memory, use the @ref
- * hs_deserialize_database_at() function.
- *
- * @param bytes
- * A byte array generated by @ref hs_serialize_database() representing a
- * compiled pattern database.
- *
- * @param length
- * The length of the byte array generated by @ref hs_serialize_database().
- * This should be the same value as that returned by @ref
- * hs_serialize_database().
- *
- * @param db
- * On success, a pointer to a newly allocated @ref hs_database_t will be
- * returned here. This database can then be used for scanning, and
- * eventually freed by the caller using @ref hs_free_database().
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t corei7_hs_deserialize_database(const char *bytes,
- const size_t length,
- hs_database_t **db);
-
-/**
- * Reconstruct a pattern database from a stream of bytes previously generated
- * by @ref hs_serialize_database() at a given memory location.
- *
- * This function (unlike @ref hs_deserialize_database()) will write the
- * reconstructed database to the memory location given in the @p db parameter.
- * The amount of space required at this location can be determined with the
- * @ref hs_serialized_database_size() function.
- *
- * @param bytes
- * A byte array generated by @ref hs_serialize_database() representing a
- * compiled pattern database.
- *
- * @param length
- * The length of the byte array generated by @ref hs_serialize_database().
- * This should be the same value as that returned by @ref
- * hs_serialize_database().
- *
- * @param db
- * Pointer to an 8-byte aligned block of memory of sufficient size to hold
- * the deserialized database. On success, the reconstructed database will
- * be written to this location. This database can then be used for pattern
- * matching. The user is responsible for freeing this memory; the @ref
- * hs_free_database() call should not be used.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t corei7_hs_deserialize_database_at(const char *bytes,
- const size_t length,
- hs_database_t *db);
-
-/**
- * Provides the size of the stream state allocated by a single stream opened
- * against the given database.
- *
- * @param database
- * Pointer to a compiled (streaming mode) pattern database.
- *
- * @param stream_size
- * On success, the size in bytes of an individual stream opened against the
- * given database is placed in this parameter.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t corei7_hs_stream_size(const hs_database_t *database,
- size_t *stream_size);
-
-/**
- * Provides the size of the given database in bytes.
- *
- * @param database
- * Pointer to compiled pattern database.
- *
- * @param database_size
- * On success, the size of the compiled database in bytes is placed in this
- * parameter.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t corei7_hs_database_size(const hs_database_t *database,
- size_t *database_size);
-
-/**
- * Utility function for reporting the size that would be required by a
- * database if it were deserialized.
- *
- * This can be used to allocate a shared memory region or other "special"
- * allocation prior to deserializing with the @ref hs_deserialize_database_at()
- * function.
- *
- * @param bytes
- * Pointer to a byte array generated by @ref hs_serialize_database()
- * representing a compiled pattern database.
- *
- * @param length
- * The length of the byte array generated by @ref hs_serialize_database().
- * This should be the same value as that returned by @ref
- * hs_serialize_database().
- *
- * @param deserialized_size
- * On success, the size of the compiled database that would be generated
- * by @ref hs_deserialize_database_at() is returned here.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t corei7_hs_serialized_database_size(const char *bytes,
- const size_t length,
- size_t *deserialized_size);
-
-/**
- * Utility function providing information about a database.
- *
- * @param database
- * Pointer to a compiled database.
- *
- * @param info
- * On success, a string containing the version and platform information for
- * the supplied database is placed in the parameter. The string is
- * allocated using the allocator supplied in @ref hs_set_misc_allocator()
- * (or malloc() if no allocator was set) and should be freed by the caller.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t corei7_hs_database_info(const hs_database_t *database,
- char **info);
-
-/**
- * Utility function providing information about a serialized database.
- *
- * @param bytes
- * Pointer to a serialized database.
- *
- * @param length
- * Length in bytes of the serialized database.
- *
- * @param info
- * On success, a string containing the version and platform information
- * for the supplied serialized database is placed in the parameter. The
- * string is allocated using the allocator supplied in @ref
- * hs_set_misc_allocator() (or malloc() if no allocator was set) and
- * should be freed by the caller.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t corei7_hs_serialized_database_info(const char *bytes,
- size_t length, char **info);
-
-/**
- * The type of the callback function that will be used by Hyperscan to allocate
- * more memory at runtime as required, for example in @ref hs_open_stream() to
- * allocate stream state.
- *
- * If Hyperscan is to be used in a multi-threaded, or similarly concurrent
- * environment, the allocation function will need to be re-entrant, or
- * similarly safe for concurrent use.
- *
- * @param size
- * The number of bytes to allocate.
- * @return
- * A pointer to the region of memory allocated, or NULL on error.
- */
-typedef void *(HS_CDECL *hs_alloc_t)(size_t size);
-
-/**
- * The type of the callback function that will be used by Hyperscan to free
- * memory regions previously allocated using the @ref hs_alloc_t function.
- *
- * @param ptr
- * The region of memory to be freed.
- */
-typedef void (HS_CDECL *hs_free_t)(void *ptr);
-
-/**
- * Set the allocate and free functions used by Hyperscan for allocating
- * memory at runtime for stream state, scratch space, database bytecode,
- * and various other data structure returned by the Hyperscan API.
- *
- * The function is equivalent to calling @ref hs_set_stream_allocator(),
- * @ref hs_set_scratch_allocator(), @ref hs_set_database_allocator() and
- * @ref hs_set_misc_allocator() with the provided parameters.
- *
- * This call will override any previous allocators that have been set.
- *
- * Note: there is no way to change the allocator used for temporary objects
- * created during the various compile calls (@ref hs_compile(), @ref
- * hs_compile_multi(), @ref hs_compile_ext_multi()).
- *
- * @param alloc_func
- * A callback function pointer that allocates memory. This function must
- * return memory suitably aligned for the largest representable data type
- * on this platform.
- *
- * @param free_func
- * A callback function pointer that frees allocated memory.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t corei7_hs_set_allocator(hs_alloc_t alloc_func,
- hs_free_t free_func);
-
-/**
- * Set the allocate and free functions used by Hyperscan for allocating memory
- * for database bytecode produced by the compile calls (@ref hs_compile(), @ref
- * hs_compile_multi(), @ref hs_compile_ext_multi()) and by database
- * deserialization (@ref hs_deserialize_database()).
- *
- * If no database allocation functions are set, or if NULL is used in place of
- * both parameters, then memory allocation will default to standard methods
- * (such as the system malloc() and free() calls).
- *
- * This call will override any previous database allocators that have been set.
- *
- * Note: the database allocator may also be set by calling @ref
- * hs_set_allocator().
- *
- * Note: there is no way to change how temporary objects created during the
- * various compile calls (@ref hs_compile(), @ref hs_compile_multi(), @ref
- * hs_compile_ext_multi()) are allocated.
- *
- * @param alloc_func
- * A callback function pointer that allocates memory. This function must
- * return memory suitably aligned for the largest representable data type
- * on this platform.
- *
- * @param free_func
- * A callback function pointer that frees allocated memory.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t corei7_hs_set_database_allocator(hs_alloc_t alloc_func,
- hs_free_t free_func);
-
-/**
- * Set the allocate and free functions used by Hyperscan for allocating memory
- * for items returned by the Hyperscan API such as @ref hs_compile_error_t, @ref
- * hs_expr_info_t and serialized databases.
- *
- * If no misc allocation functions are set, or if NULL is used in place of both
- * parameters, then memory allocation will default to standard methods (such as
- * the system malloc() and free() calls).
- *
- * This call will override any previous misc allocators that have been set.
- *
- * Note: the misc allocator may also be set by calling @ref hs_set_allocator().
- *
- * @param alloc_func
- * A callback function pointer that allocates memory. This function must
- * return memory suitably aligned for the largest representable data type
- * on this platform.
- *
- * @param free_func
- * A callback function pointer that frees allocated memory.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t corei7_hs_set_misc_allocator(hs_alloc_t alloc_func,
- hs_free_t free_func);
-
-/**
- * Set the allocate and free functions used by Hyperscan for allocating memory
- * for scratch space by @ref hs_alloc_scratch() and @ref hs_clone_scratch().
- *
- * If no scratch allocation functions are set, or if NULL is used in place of
- * both parameters, then memory allocation will default to standard methods
- * (such as the system malloc() and free() calls).
- *
- * This call will override any previous scratch allocators that have been set.
- *
- * Note: the scratch allocator may also be set by calling @ref
- * hs_set_allocator().
- *
- * @param alloc_func
- * A callback function pointer that allocates memory. This function must
- * return memory suitably aligned for the largest representable data type
- * on this platform.
- *
- * @param free_func
- * A callback function pointer that frees allocated memory.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t corei7_hs_set_scratch_allocator(hs_alloc_t alloc_func,
- hs_free_t free_func);
-
-/**
- * Set the allocate and free functions used by Hyperscan for allocating memory
- * for stream state by @ref hs_open_stream().
- *
- * If no stream allocation functions are set, or if NULL is used in place of
- * both parameters, then memory allocation will default to standard methods
- * (such as the system malloc() and free() calls).
- *
- * This call will override any previous stream allocators that have been set.
- *
- * Note: the stream allocator may also be set by calling @ref
- * hs_set_allocator().
- *
- * @param alloc_func
- * A callback function pointer that allocates memory. This function must
- * return memory suitably aligned for the largest representable data type
- * on this platform.
- *
- * @param free_func
- * A callback function pointer that frees allocated memory.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t corei7_hs_set_stream_allocator(hs_alloc_t alloc_func,
- hs_free_t free_func);
-
-/**
- * Utility function for identifying this release version.
- *
- * @return
- * A string containing the version number of this release build and the
- * date of the build. It is allocated statically, so it does not need to
- * be freed by the caller.
- */
-const char * corei7_hs_version(void);
-
-/**
- * Utility function to test the current system architecture.
- *
- * Hyperscan requires the Supplemental Streaming SIMD Extensions 3 instruction
- * set. This function can be called on any x86 platform to determine if the
- * system provides the required instruction set.
- *
- * This function does not test for more advanced features if Hyperscan has
- * been built for a more specific architecture, for example the AVX2
- * instruction set.
- *
- * @return
- * @ref HS_SUCCESS on success, @ref HS_ARCH_ERROR if system does not
- * support Hyperscan.
- */
-hs_error_t corei7_hs_valid_platform(void);
-
-/**
- * @defgroup HS_ERROR hs_error_t values
- *
- * @{
- */
-
-/**
- * The engine completed normally.
- */
-#define HS_SUCCESS 0
-
-/**
- * A parameter passed to this function was invalid.
- *
- * This error is only returned in cases where the function can detect an
- * invalid parameter -- it cannot be relied upon to detect (for example)
- * pointers to freed memory or other invalid data.
- */
-#define HS_INVALID (-1)
-
-/**
- * A memory allocation failed.
- */
-#define HS_NOMEM (-2)
-
-/**
- * The engine was terminated by callback.
- *
- * This return value indicates that the target buffer was partially scanned,
- * but that the callback function requested that scanning cease after a match
- * was located.
- */
-#define HS_SCAN_TERMINATED (-3)
-
-/**
- * The pattern compiler failed, and the @ref hs_compile_error_t should be
- * inspected for more detail.
- */
-#define HS_COMPILER_ERROR (-4)
-
-/**
- * The given database was built for a different version of Hyperscan.
- */
-#define HS_DB_VERSION_ERROR (-5)
-
-/**
- * The given database was built for a different platform (i.e., CPU type).
- */
-#define HS_DB_PLATFORM_ERROR (-6)
-
-/**
- * The given database was built for a different mode of operation. This error
- * is returned when streaming calls are used with a block or vectored database
- * and vice versa.
- */
-#define HS_DB_MODE_ERROR (-7)
-
-/**
- * A parameter passed to this function was not correctly aligned.
- */
-#define HS_BAD_ALIGN (-8)
-
-/**
- * The memory allocator (either malloc() or the allocator set with @ref
- * hs_set_allocator()) did not correctly return memory suitably aligned for the
- * largest representable data type on this platform.
- */
-#define HS_BAD_ALLOC (-9)
-
-/**
- * The scratch region was already in use.
- *
- * This error is returned when Hyperscan is able to detect that the scratch
- * region given is already in use by another Hyperscan API call.
- *
- * A separate scratch region, allocated with @ref hs_alloc_scratch() or @ref
- * hs_clone_scratch(), is required for every concurrent caller of the Hyperscan
- * API.
- *
- * For example, this error might be returned when @ref hs_scan() has been
- * called inside a callback delivered by a currently-executing @ref hs_scan()
- * call using the same scratch region.
- *
- * Note: Not all concurrent uses of scratch regions may be detected. This error
- * is intended as a best-effort debugging tool, not a guarantee.
- */
-#define HS_SCRATCH_IN_USE (-10)
-
-/**
- * Unsupported CPU architecture.
- *
- * This error is returned when Hyperscan is able to detect that the current
- * system does not support the required instruction set.
- *
- * At a minimum, Hyperscan requires Supplemental Streaming SIMD Extensions 3
- * (SSSE3).
- */
-#define HS_ARCH_ERROR (-11)
-
-/**
- * Provided buffer was too small.
- *
- * This error indicates that there was insufficient space in the buffer. The
- * call should be repeated with a larger provided buffer.
- *
- * Note: in this situation, it is normal for the amount of space required to be
- * returned in the same manner as the used space would have been returned if the
- * call was successful.
- */
-#define HS_INSUFFICIENT_SPACE (-12)
-
-/**
- * Unexpected internal error.
- *
- * This error indicates that there was unexpected matching behaviors. This
- * could be related to invalid usage of stream and scratch space or invalid memory
- * operations by users.
- *
- */
-#define HS_UNKNOWN_ERROR (-13)
-
-/** @} */
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
-#endif /* HS_COREI7_COMMON_H */
+/*
+ * Copyright (c) 2015-2019, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HS_COREI7_COMMON_H
+#define HS_COREI7_COMMON_H
+
+#if defined(_WIN32)
+#define HS_CDECL __cdecl
+#else
+#define HS_CDECL
+#endif
+#include <stdlib.h>
+
+/**
+ * @file
+ * @brief The Hyperscan common API definition.
+ *
+ * Hyperscan is a high speed regular expression engine.
+ *
+ * This header contains functions available to both the Hyperscan compiler and
+ * runtime.
+ */
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+struct hs_database;
+
+/**
+ * A Hyperscan pattern database.
+ *
+ * Generated by one of the Hyperscan compiler functions:
+ * - @ref hs_compile()
+ * - @ref hs_compile_multi()
+ * - @ref hs_compile_ext_multi()
+ */
+typedef struct hs_database hs_database_t;
+
+/**
+ * A type for errors returned by Hyperscan functions.
+ */
+typedef int hs_error_t;
+
+/**
+ * Free a compiled pattern database.
+ *
+ * The free callback set by @ref hs_set_database_allocator() (or @ref
+ * hs_set_allocator()) will be used by this function.
+ *
+ * @param db
+ * A compiled pattern database. NULL may also be safely provided, in which
+ * case the function does nothing.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t corei7_hs_free_database(hs_database_t *db);
+
+/**
+ * Serialize a pattern database to a stream of bytes.
+ *
+ * The allocator callback set by @ref hs_set_misc_allocator() (or @ref
+ * hs_set_allocator()) will be used by this function.
+ *
+ * @param db
+ * A compiled pattern database.
+ *
+ * @param bytes
+ * On success, a pointer to an array of bytes will be returned here.
+ * These bytes can be subsequently relocated or written to disk. The
+ * caller is responsible for freeing this block.
+ *
+ * @param length
+ * On success, the number of bytes in the generated byte array will be
+ * returned here.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, @ref HS_NOMEM if the byte array cannot be
+ * allocated, other values may be returned if errors are detected.
+ */
+hs_error_t corei7_hs_serialize_database(const hs_database_t *db, char **bytes,
+ size_t *length);
+
+/**
+ * Reconstruct a pattern database from a stream of bytes previously generated
+ * by @ref hs_serialize_database().
+ *
+ * This function will allocate sufficient space for the database using the
+ * allocator set with @ref hs_set_database_allocator() (or @ref
+ * hs_set_allocator()); to use a pre-allocated region of memory, use the @ref
+ * hs_deserialize_database_at() function.
+ *
+ * @param bytes
+ * A byte array generated by @ref hs_serialize_database() representing a
+ * compiled pattern database.
+ *
+ * @param length
+ * The length of the byte array generated by @ref hs_serialize_database().
+ * This should be the same value as that returned by @ref
+ * hs_serialize_database().
+ *
+ * @param db
+ * On success, a pointer to a newly allocated @ref hs_database_t will be
+ * returned here. This database can then be used for scanning, and
+ * eventually freed by the caller using @ref hs_free_database().
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t corei7_hs_deserialize_database(const char *bytes,
+ const size_t length,
+ hs_database_t **db);
+
+/**
+ * Reconstruct a pattern database from a stream of bytes previously generated
+ * by @ref hs_serialize_database() at a given memory location.
+ *
+ * This function (unlike @ref hs_deserialize_database()) will write the
+ * reconstructed database to the memory location given in the @p db parameter.
+ * The amount of space required at this location can be determined with the
+ * @ref hs_serialized_database_size() function.
+ *
+ * @param bytes
+ * A byte array generated by @ref hs_serialize_database() representing a
+ * compiled pattern database.
+ *
+ * @param length
+ * The length of the byte array generated by @ref hs_serialize_database().
+ * This should be the same value as that returned by @ref
+ * hs_serialize_database().
+ *
+ * @param db
+ * Pointer to an 8-byte aligned block of memory of sufficient size to hold
+ * the deserialized database. On success, the reconstructed database will
+ * be written to this location. This database can then be used for pattern
+ * matching. The user is responsible for freeing this memory; the @ref
+ * hs_free_database() call should not be used.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t corei7_hs_deserialize_database_at(const char *bytes,
+ const size_t length,
+ hs_database_t *db);
+
+/**
+ * Provides the size of the stream state allocated by a single stream opened
+ * against the given database.
+ *
+ * @param database
+ * Pointer to a compiled (streaming mode) pattern database.
+ *
+ * @param stream_size
+ * On success, the size in bytes of an individual stream opened against the
+ * given database is placed in this parameter.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t corei7_hs_stream_size(const hs_database_t *database,
+ size_t *stream_size);
+
+/**
+ * Provides the size of the given database in bytes.
+ *
+ * @param database
+ * Pointer to compiled pattern database.
+ *
+ * @param database_size
+ * On success, the size of the compiled database in bytes is placed in this
+ * parameter.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t corei7_hs_database_size(const hs_database_t *database,
+ size_t *database_size);
+
+/**
+ * Utility function for reporting the size that would be required by a
+ * database if it were deserialized.
+ *
+ * This can be used to allocate a shared memory region or other "special"
+ * allocation prior to deserializing with the @ref hs_deserialize_database_at()
+ * function.
+ *
+ * @param bytes
+ * Pointer to a byte array generated by @ref hs_serialize_database()
+ * representing a compiled pattern database.
+ *
+ * @param length
+ * The length of the byte array generated by @ref hs_serialize_database().
+ * This should be the same value as that returned by @ref
+ * hs_serialize_database().
+ *
+ * @param deserialized_size
+ * On success, the size of the compiled database that would be generated
+ * by @ref hs_deserialize_database_at() is returned here.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t corei7_hs_serialized_database_size(const char *bytes,
+ const size_t length,
+ size_t *deserialized_size);
+
+/**
+ * Utility function providing information about a database.
+ *
+ * @param database
+ * Pointer to a compiled database.
+ *
+ * @param info
+ * On success, a string containing the version and platform information for
+ * the supplied database is placed in the parameter. The string is
+ * allocated using the allocator supplied in @ref hs_set_misc_allocator()
+ * (or malloc() if no allocator was set) and should be freed by the caller.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t corei7_hs_database_info(const hs_database_t *database,
+ char **info);
+
+/**
+ * Utility function providing information about a serialized database.
+ *
+ * @param bytes
+ * Pointer to a serialized database.
+ *
+ * @param length
+ * Length in bytes of the serialized database.
+ *
+ * @param info
+ * On success, a string containing the version and platform information
+ * for the supplied serialized database is placed in the parameter. The
+ * string is allocated using the allocator supplied in @ref
+ * hs_set_misc_allocator() (or malloc() if no allocator was set) and
+ * should be freed by the caller.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t corei7_hs_serialized_database_info(const char *bytes,
+ size_t length, char **info);
+
+/**
+ * The type of the callback function that will be used by Hyperscan to allocate
+ * more memory at runtime as required, for example in @ref hs_open_stream() to
+ * allocate stream state.
+ *
+ * If Hyperscan is to be used in a multi-threaded, or similarly concurrent
+ * environment, the allocation function will need to be re-entrant, or
+ * similarly safe for concurrent use.
+ *
+ * @param size
+ * The number of bytes to allocate.
+ * @return
+ * A pointer to the region of memory allocated, or NULL on error.
+ */
+typedef void *(HS_CDECL *hs_alloc_t)(size_t size);
+
+/**
+ * The type of the callback function that will be used by Hyperscan to free
+ * memory regions previously allocated using the @ref hs_alloc_t function.
+ *
+ * @param ptr
+ * The region of memory to be freed.
+ */
+typedef void (HS_CDECL *hs_free_t)(void *ptr);
+
+/**
+ * Set the allocate and free functions used by Hyperscan for allocating
+ * memory at runtime for stream state, scratch space, database bytecode,
+ * and various other data structure returned by the Hyperscan API.
+ *
+ * The function is equivalent to calling @ref hs_set_stream_allocator(),
+ * @ref hs_set_scratch_allocator(), @ref hs_set_database_allocator() and
+ * @ref hs_set_misc_allocator() with the provided parameters.
+ *
+ * This call will override any previous allocators that have been set.
+ *
+ * Note: there is no way to change the allocator used for temporary objects
+ * created during the various compile calls (@ref hs_compile(), @ref
+ * hs_compile_multi(), @ref hs_compile_ext_multi()).
+ *
+ * @param alloc_func
+ * A callback function pointer that allocates memory. This function must
+ * return memory suitably aligned for the largest representable data type
+ * on this platform.
+ *
+ * @param free_func
+ * A callback function pointer that frees allocated memory.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t corei7_hs_set_allocator(hs_alloc_t alloc_func,
+ hs_free_t free_func);
+
+/**
+ * Set the allocate and free functions used by Hyperscan for allocating memory
+ * for database bytecode produced by the compile calls (@ref hs_compile(), @ref
+ * hs_compile_multi(), @ref hs_compile_ext_multi()) and by database
+ * deserialization (@ref hs_deserialize_database()).
+ *
+ * If no database allocation functions are set, or if NULL is used in place of
+ * both parameters, then memory allocation will default to standard methods
+ * (such as the system malloc() and free() calls).
+ *
+ * This call will override any previous database allocators that have been set.
+ *
+ * Note: the database allocator may also be set by calling @ref
+ * hs_set_allocator().
+ *
+ * Note: there is no way to change how temporary objects created during the
+ * various compile calls (@ref hs_compile(), @ref hs_compile_multi(), @ref
+ * hs_compile_ext_multi()) are allocated.
+ *
+ * @param alloc_func
+ * A callback function pointer that allocates memory. This function must
+ * return memory suitably aligned for the largest representable data type
+ * on this platform.
+ *
+ * @param free_func
+ * A callback function pointer that frees allocated memory.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t corei7_hs_set_database_allocator(hs_alloc_t alloc_func,
+ hs_free_t free_func);
+
+/**
+ * Set the allocate and free functions used by Hyperscan for allocating memory
+ * for items returned by the Hyperscan API such as @ref hs_compile_error_t, @ref
+ * hs_expr_info_t and serialized databases.
+ *
+ * If no misc allocation functions are set, or if NULL is used in place of both
+ * parameters, then memory allocation will default to standard methods (such as
+ * the system malloc() and free() calls).
+ *
+ * This call will override any previous misc allocators that have been set.
+ *
+ * Note: the misc allocator may also be set by calling @ref hs_set_allocator().
+ *
+ * @param alloc_func
+ * A callback function pointer that allocates memory. This function must
+ * return memory suitably aligned for the largest representable data type
+ * on this platform.
+ *
+ * @param free_func
+ * A callback function pointer that frees allocated memory.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t corei7_hs_set_misc_allocator(hs_alloc_t alloc_func,
+ hs_free_t free_func);
+
+/**
+ * Set the allocate and free functions used by Hyperscan for allocating memory
+ * for scratch space by @ref hs_alloc_scratch() and @ref hs_clone_scratch().
+ *
+ * If no scratch allocation functions are set, or if NULL is used in place of
+ * both parameters, then memory allocation will default to standard methods
+ * (such as the system malloc() and free() calls).
+ *
+ * This call will override any previous scratch allocators that have been set.
+ *
+ * Note: the scratch allocator may also be set by calling @ref
+ * hs_set_allocator().
+ *
+ * @param alloc_func
+ * A callback function pointer that allocates memory. This function must
+ * return memory suitably aligned for the largest representable data type
+ * on this platform.
+ *
+ * @param free_func
+ * A callback function pointer that frees allocated memory.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t corei7_hs_set_scratch_allocator(hs_alloc_t alloc_func,
+ hs_free_t free_func);
+
+/**
+ * Set the allocate and free functions used by Hyperscan for allocating memory
+ * for stream state by @ref hs_open_stream().
+ *
+ * If no stream allocation functions are set, or if NULL is used in place of
+ * both parameters, then memory allocation will default to standard methods
+ * (such as the system malloc() and free() calls).
+ *
+ * This call will override any previous stream allocators that have been set.
+ *
+ * Note: the stream allocator may also be set by calling @ref
+ * hs_set_allocator().
+ *
+ * @param alloc_func
+ * A callback function pointer that allocates memory. This function must
+ * return memory suitably aligned for the largest representable data type
+ * on this platform.
+ *
+ * @param free_func
+ * A callback function pointer that frees allocated memory.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t corei7_hs_set_stream_allocator(hs_alloc_t alloc_func,
+ hs_free_t free_func);
+
+/**
+ * Utility function for identifying this release version.
+ *
+ * @return
+ * A string containing the version number of this release build and the
+ * date of the build. It is allocated statically, so it does not need to
+ * be freed by the caller.
+ */
+const char * corei7_hs_version(void);
+
+/**
+ * Utility function to test the current system architecture.
+ *
+ * Hyperscan requires the Supplemental Streaming SIMD Extensions 3 instruction
+ * set. This function can be called on any x86 platform to determine if the
+ * system provides the required instruction set.
+ *
+ * This function does not test for more advanced features if Hyperscan has
+ * been built for a more specific architecture, for example the AVX2
+ * instruction set.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, @ref HS_ARCH_ERROR if system does not
+ * support Hyperscan.
+ */
+hs_error_t corei7_hs_valid_platform(void);
+
+/**
+ * @defgroup HS_ERROR hs_error_t values
+ *
+ * @{
+ */
+
+/**
+ * The engine completed normally.
+ */
+#define HS_SUCCESS 0
+
+/**
+ * A parameter passed to this function was invalid.
+ *
+ * This error is only returned in cases where the function can detect an
+ * invalid parameter -- it cannot be relied upon to detect (for example)
+ * pointers to freed memory or other invalid data.
+ */
+#define HS_INVALID (-1)
+
+/**
+ * A memory allocation failed.
+ */
+#define HS_NOMEM (-2)
+
+/**
+ * The engine was terminated by callback.
+ *
+ * This return value indicates that the target buffer was partially scanned,
+ * but that the callback function requested that scanning cease after a match
+ * was located.
+ */
+#define HS_SCAN_TERMINATED (-3)
+
+/**
+ * The pattern compiler failed, and the @ref hs_compile_error_t should be
+ * inspected for more detail.
+ */
+#define HS_COMPILER_ERROR (-4)
+
+/**
+ * The given database was built for a different version of Hyperscan.
+ */
+#define HS_DB_VERSION_ERROR (-5)
+
+/**
+ * The given database was built for a different platform (i.e., CPU type).
+ */
+#define HS_DB_PLATFORM_ERROR (-6)
+
+/**
+ * The given database was built for a different mode of operation. This error
+ * is returned when streaming calls are used with a block or vectored database
+ * and vice versa.
+ */
+#define HS_DB_MODE_ERROR (-7)
+
+/**
+ * A parameter passed to this function was not correctly aligned.
+ */
+#define HS_BAD_ALIGN (-8)
+
+/**
+ * The memory allocator (either malloc() or the allocator set with @ref
+ * hs_set_allocator()) did not correctly return memory suitably aligned for the
+ * largest representable data type on this platform.
+ */
+#define HS_BAD_ALLOC (-9)
+
+/**
+ * The scratch region was already in use.
+ *
+ * This error is returned when Hyperscan is able to detect that the scratch
+ * region given is already in use by another Hyperscan API call.
+ *
+ * A separate scratch region, allocated with @ref hs_alloc_scratch() or @ref
+ * hs_clone_scratch(), is required for every concurrent caller of the Hyperscan
+ * API.
+ *
+ * For example, this error might be returned when @ref hs_scan() has been
+ * called inside a callback delivered by a currently-executing @ref hs_scan()
+ * call using the same scratch region.
+ *
+ * Note: Not all concurrent uses of scratch regions may be detected. This error
+ * is intended as a best-effort debugging tool, not a guarantee.
+ */
+#define HS_SCRATCH_IN_USE (-10)
+
+/**
+ * Unsupported CPU architecture.
+ *
+ * This error is returned when Hyperscan is able to detect that the current
+ * system does not support the required instruction set.
+ *
+ * At a minimum, Hyperscan requires Supplemental Streaming SIMD Extensions 3
+ * (SSSE3).
+ */
+#define HS_ARCH_ERROR (-11)
+
+/**
+ * Provided buffer was too small.
+ *
+ * This error indicates that there was insufficient space in the buffer. The
+ * call should be repeated with a larger provided buffer.
+ *
+ * Note: in this situation, it is normal for the amount of space required to be
+ * returned in the same manner as the used space would have been returned if the
+ * call was successful.
+ */
+#define HS_INSUFFICIENT_SPACE (-12)
+
+/**
+ * Unexpected internal error.
+ *
+ * This error indicates that there was unexpected matching behaviors. This
+ * could be related to invalid usage of stream and scratch space or invalid memory
+ * operations by users.
+ *
+ */
+#define HS_UNKNOWN_ERROR (-13)
+
+/** @} */
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* HS_COREI7_COMMON_H */
diff --git a/contrib/libs/hyperscan/runtime_corei7/hs_runtime.h b/contrib/libs/hyperscan/runtime_corei7/hs_runtime.h
index e38245e1c2..a55b183ce3 100644
--- a/contrib/libs/hyperscan/runtime_corei7/hs_runtime.h
+++ b/contrib/libs/hyperscan/runtime_corei7/hs_runtime.h
@@ -1,621 +1,621 @@
-/*
- * Copyright (c) 2015-2018, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef HS_COREI7_RUNTIME_H
-#define HS_COREI7_RUNTIME_H
-
-#include <stdlib.h>
-
-/**
- * @file
- * @brief The Hyperscan runtime API definition.
- *
- * Hyperscan is a high speed regular expression engine.
- *
- * This header contains functions for using compiled Hyperscan databases for
- * scanning data at runtime.
- */
-
-#include "hs_common.h"
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-/**
- * Definition of the stream identifier type.
- */
-struct hs_stream;
-
-/**
- * The stream identifier returned by @ref hs_open_stream().
- */
-typedef struct hs_stream hs_stream_t;
-
-struct hs_scratch;
-
-/**
- * A Hyperscan scratch space.
- */
-typedef struct hs_scratch hs_scratch_t;
-
-/**
- * Definition of the match event callback function type.
- *
- * A callback function matching the defined type must be provided by the
- * application calling the @ref hs_scan(), @ref hs_scan_vector() or @ref
- * hs_scan_stream() functions (or other streaming calls which can produce
- * matches).
- *
- * This callback function will be invoked whenever a match is located in the
- * target data during the execution of a scan. The details of the match are
- * passed in as parameters to the callback function, and the callback function
- * should return a value indicating whether or not matching should continue on
- * the target data. If no callbacks are desired from a scan call, NULL may be
- * provided in order to suppress match production.
- *
- * This callback function should not attempt to call Hyperscan API functions on
- * the same stream nor should it attempt to reuse the scratch space allocated
- * for the API calls that caused it to be triggered. Making another call to the
- * Hyperscan library with completely independent parameters should work (for
- * example, scanning a different database in a new stream and with new scratch
- * space), but reusing data structures like stream state and/or scratch space
- * will produce undefined behavior.
- *
- * @param id
- * The ID number of the expression that matched. If the expression was a
- * single expression compiled with @ref hs_compile(), this value will be
- * zero.
- *
- * @param from
- * - If a start of match flag is enabled for the current pattern, this
- * argument will be set to the start of match for the pattern assuming
- * that that start of match value lies within the current 'start of match
- * horizon' chosen by one of the SOM_HORIZON mode flags.
-
- * - If the start of match value lies outside this horizon (possible only
- * when the SOM_HORIZON value is not @ref HS_MODE_SOM_HORIZON_LARGE),
- * the @p from value will be set to @ref HS_OFFSET_PAST_HORIZON.
-
- * - This argument will be set to zero if the Start of Match flag is not
- * enabled for the given pattern.
- *
- * @param to
- * The offset after the last byte that matches the expression.
- *
- * @param flags
- * This is provided for future use and is unused at present.
- *
- * @param context
- * The pointer supplied by the user to the @ref hs_scan(), @ref
- * hs_scan_vector() or @ref hs_scan_stream() function.
- *
- * @return
- * Non-zero if the matching should cease, else zero. If scanning is
- * performed in streaming mode and a non-zero value is returned, any
- * subsequent calls to @ref hs_scan_stream() for that stream will
- * immediately return with @ref HS_SCAN_TERMINATED.
- */
-typedef int (HS_CDECL *match_event_handler)(unsigned int id,
- unsigned long long from,
- unsigned long long to,
- unsigned int flags,
- void *context);
-
-/**
- * Open and initialise a stream.
- *
- * @param db
- * A compiled pattern database.
- *
- * @param flags
- * Flags modifying the behaviour of the stream. This parameter is provided
- * for future use and is unused at present.
- *
- * @param stream
- * On success, a pointer to the generated @ref hs_stream_t will be
- * returned; NULL on failure.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t corei7_hs_open_stream(const hs_database_t *db, unsigned int flags,
- hs_stream_t **stream);
-
-/**
- * Write data to be scanned to the opened stream.
- *
- * This is the function call in which the actual pattern matching takes place
- * as data is written to the stream. Matches will be returned via the @ref
- * match_event_handler callback supplied.
- *
- * @param id
- * The stream ID (returned by @ref hs_open_stream()) to which the data
- * will be written.
- *
- * @param data
- * Pointer to the data to be scanned.
- *
- * @param length
- * The number of bytes to scan.
- *
- * @param flags
- * Flags modifying the behaviour of the stream. This parameter is provided
- * for future use and is unused at present.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch().
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param ctxt
- * The user defined pointer which will be passed to the callback function
- * when a match occurs.
- *
- * @return
- * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the
- * match callback indicated that scanning should stop; other values on
- * error.
- */
-hs_error_t corei7_hs_scan_stream(hs_stream_t *id, const char *data,
- unsigned int length, unsigned int flags,
- hs_scratch_t *scratch,
- match_event_handler onEvent, void *ctxt);
-
-/**
- * Close a stream.
- *
- * This function completes matching on the given stream and frees the memory
- * associated with the stream state. After this call, the stream pointed to by
- * @p id is invalid and can no longer be used. To reuse the stream state after
- * completion, rather than closing it, the @ref hs_reset_stream function can be
- * used.
- *
- * This function must be called for any stream created with @ref
- * hs_open_stream(), even if scanning has been terminated by a non-zero return
- * from the match callback function.
- *
- * Note: This operation may result in matches being returned (via calls to the
- * match event callback) for expressions anchored to the end of the data stream
- * (for example, via the use of the `$` meta-character). If these matches are
- * not desired, NULL may be provided as the @ref match_event_handler callback.
- *
- * If NULL is provided as the @ref match_event_handler callback, it is
- * permissible to provide a NULL scratch.
- *
- * @param id
- * The stream ID returned by @ref hs_open_stream().
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
- * allowed to be NULL only if the @p onEvent callback is also NULL.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param ctxt
- * The user defined pointer which will be passed to the callback function
- * when a match occurs.
- *
- * @return
- * Returns @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t corei7_hs_close_stream(hs_stream_t *id, hs_scratch_t *scratch,
- match_event_handler onEvent, void *ctxt);
-
-/**
- * Reset a stream to an initial state.
- *
- * Conceptually, this is equivalent to performing @ref hs_close_stream() on the
- * given stream, followed by a @ref hs_open_stream(). This new stream replaces
- * the original stream in memory, avoiding the overhead of freeing the old
- * stream and allocating the new one.
- *
- * Note: This operation may result in matches being returned (via calls to the
- * match event callback) for expressions anchored to the end of the original
- * data stream (for example, via the use of the `$` meta-character). If these
- * matches are not desired, NULL may be provided as the @ref match_event_handler
- * callback.
- *
- * Note: the stream will also be tied to the same database.
- *
- * @param id
- * The stream (as created by @ref hs_open_stream()) to be replaced.
- *
- * @param flags
- * Flags modifying the behaviour of the stream. This parameter is provided
- * for future use and is unused at present.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
- * allowed to be NULL only if the @p onEvent callback is also NULL.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param context
- * The user defined pointer which will be passed to the callback function
- * when a match occurs.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t corei7_hs_reset_stream(hs_stream_t *id, unsigned int flags,
- hs_scratch_t *scratch,
- match_event_handler onEvent, void *context);
-
-/**
- * Duplicate the given stream. The new stream will have the same state as the
- * original including the current stream offset.
- *
- * @param to_id
- * On success, a pointer to the new, copied @ref hs_stream_t will be
- * returned; NULL on failure.
- *
- * @param from_id
- * The stream (as created by @ref hs_open_stream()) to be copied.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t corei7_hs_copy_stream(hs_stream_t **to_id,
- const hs_stream_t *from_id);
-
-/**
- * Duplicate the given 'from' stream state onto the 'to' stream. The 'to' stream
- * will first be reset (reporting any EOD matches if a non-NULL @p onEvent
- * callback handler is provided).
- *
- * Note: the 'to' stream and the 'from' stream must be open against the same
- * database.
- *
- * @param to_id
- * On success, a pointer to the new, copied @ref hs_stream_t will be
- * returned; NULL on failure.
- *
- * @param from_id
- * The stream (as created by @ref hs_open_stream()) to be copied.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
- * allowed to be NULL only if the @p onEvent callback is also NULL.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param context
- * The user defined pointer which will be passed to the callback function
- * when a match occurs.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t corei7_hs_reset_and_copy_stream(hs_stream_t *to_id,
- const hs_stream_t *from_id,
- hs_scratch_t *scratch,
- match_event_handler onEvent,
- void *context);
-
-/**
- * Creates a compressed representation of the provided stream in the buffer
- * provided. This compressed representation can be converted back into a stream
- * state by using @ref hs_expand_stream() or @ref hs_reset_and_expand_stream().
- * The size of the compressed representation will be placed into @p used_space.
- *
- * If there is not sufficient space in the buffer to hold the compressed
- * representation, @ref HS_INSUFFICIENT_SPACE will be returned and @p used_space
- * will be populated with the amount of space required.
- *
- * Note: this function does not close the provided stream, you may continue to
- * use the stream or to free it with @ref hs_close_stream().
- *
- * @param stream
- * The stream (as created by @ref hs_open_stream()) to be compressed.
- *
- * @param buf
- * Buffer to write the compressed representation into. Note: if the call is
- * just being used to determine the amount of space required, it is allowed
- * to pass NULL here and @p buf_space as 0.
- *
- * @param buf_space
- * The number of bytes in @p buf. If buf_space is too small, the call will
- * fail with @ref HS_INSUFFICIENT_SPACE.
- *
- * @param used_space
- * Pointer to where the amount of used space will be written to. The used
- * buffer space is always less than or equal to @p buf_space. If the call
- * fails with @ref HS_INSUFFICIENT_SPACE, this pointer will be used to
- * write out the amount of buffer space required.
- *
- * @return
- * @ref HS_SUCCESS on success, @ref HS_INSUFFICIENT_SPACE if the provided
- * buffer is too small.
- */
-hs_error_t corei7_hs_compress_stream(const hs_stream_t *stream, char *buf,
- size_t buf_space, size_t *used_space);
-
-/**
- * Decompresses a compressed representation created by @ref hs_compress_stream()
- * into a new stream.
- *
- * Note: @p buf must correspond to a complete compressed representation created
- * by @ref hs_compress_stream() of a stream that was opened against @p db. It is
- * not always possible to detect misuse of this API and behaviour is undefined
- * if these properties are not satisfied.
- *
- * @param db
- * The compiled pattern database that the compressed stream was opened
- * against.
- *
- * @param stream
- * On success, a pointer to the expanded @ref hs_stream_t will be
- * returned; NULL on failure.
- *
- * @param buf
- * A compressed representation of a stream. These compressed forms are
- * created by @ref hs_compress_stream().
- *
- * @param buf_size
- * The size in bytes of the compressed representation.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t corei7_hs_expand_stream(const hs_database_t *db,
- hs_stream_t **stream, const char *buf,
- size_t buf_size);
-
-/**
- * Decompresses a compressed representation created by @ref hs_compress_stream()
- * on top of the 'to' stream. The 'to' stream will first be reset (reporting
- * any EOD matches if a non-NULL @p onEvent callback handler is provided).
- *
- * Note: the 'to' stream must be opened against the same database as the
- * compressed stream.
- *
- * Note: @p buf must correspond to a complete compressed representation created
- * by @ref hs_compress_stream() of a stream that was opened against @p db. It is
- * not always possible to detect misuse of this API and behaviour is undefined
- * if these properties are not satisfied.
- *
- * @param to_stream
- * A pointer to a valid stream state. A pointer to the expanded @ref
- * hs_stream_t will be returned; NULL on failure.
- *
- * @param buf
- * A compressed representation of a stream. These compressed forms are
- * created by @ref hs_compress_stream().
- *
- * @param buf_size
- * The size in bytes of the compressed representation.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
- * allowed to be NULL only if the @p onEvent callback is also NULL.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param context
- * The user defined pointer which will be passed to the callback function
- * when a match occurs.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t corei7_hs_reset_and_expand_stream(hs_stream_t *to_stream,
- const char *buf, size_t buf_size,
- hs_scratch_t *scratch,
- match_event_handler onEvent,
- void *context);
-
-/**
- * The block (non-streaming) regular expression scanner.
- *
- * This is the function call in which the actual pattern matching takes place
- * for block-mode pattern databases.
- *
- * @param db
- * A compiled pattern database.
- *
- * @param data
- * Pointer to the data to be scanned.
- *
- * @param length
- * The number of bytes to scan.
- *
- * @param flags
- * Flags modifying the behaviour of this function. This parameter is
- * provided for future use and is unused at present.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch() for this
- * database.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param context
- * The user defined pointer which will be passed to the callback function.
- *
- * @return
- * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the
- * match callback indicated that scanning should stop; other values on
- * error.
- */
-hs_error_t corei7_hs_scan(const hs_database_t *db, const char *data,
- unsigned int length, unsigned int flags,
- hs_scratch_t *scratch, match_event_handler onEvent,
- void *context);
-
-/**
- * The vectored regular expression scanner.
- *
- * This is the function call in which the actual pattern matching takes place
- * for vectoring-mode pattern databases.
- *
- * @param db
- * A compiled pattern database.
- *
- * @param data
- * An array of pointers to the data blocks to be scanned.
- *
- * @param length
- * An array of lengths (in bytes) of each data block to scan.
- *
- * @param count
- * Number of data blocks to scan. This should correspond to the size of
- * of the @p data and @p length arrays.
- *
- * @param flags
- * Flags modifying the behaviour of this function. This parameter is
- * provided for future use and is unused at present.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch() for
- * this database.
- *
- * @param onEvent
- * Pointer to a match event callback function. If a NULL pointer is given,
- * no matches will be returned.
- *
- * @param context
- * The user defined pointer which will be passed to the callback function.
- *
- * @return
- * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the match
- * callback indicated that scanning should stop; other values on error.
- */
-hs_error_t corei7_hs_scan_vector(const hs_database_t *db,
- const char *const *data,
- const unsigned int *length,
- unsigned int count, unsigned int flags,
- hs_scratch_t *scratch,
- match_event_handler onEvent, void *context);
-
-/**
- * Allocate a "scratch" space for use by Hyperscan.
- *
- * This is required for runtime use, and one scratch space per thread, or
- * concurrent caller, is required. Any allocator callback set by @ref
- * hs_set_scratch_allocator() or @ref hs_set_allocator() will be used by this
- * function.
- *
- * @param db
- * The database, as produced by @ref hs_compile().
- *
- * @param scratch
- * On first allocation, a pointer to NULL should be provided so a new
- * scratch can be allocated. If a scratch block has been previously
- * allocated, then a pointer to it should be passed back in to see if it
- * is valid for this database block. If a new scratch block is required,
- * the original will be freed and the new one returned, otherwise the
- * previous scratch block will be returned. On success, the scratch block
- * will be suitable for use with the provided database in addition to any
- * databases that original scratch space was suitable for.
- *
- * @return
- * @ref HS_SUCCESS on successful allocation; @ref HS_NOMEM if the
- * allocation fails. Other errors may be returned if invalid parameters
- * are specified.
- */
-hs_error_t corei7_hs_alloc_scratch(const hs_database_t *db,
- hs_scratch_t **scratch);
-
-/**
- * Allocate a scratch space that is a clone of an existing scratch space.
- *
- * This is useful when multiple concurrent threads will be using the same set
- * of compiled databases, and another scratch space is required. Any allocator
- * callback set by @ref hs_set_scratch_allocator() or @ref hs_set_allocator()
- * will be used by this function.
- *
- * @param src
- * The existing @ref hs_scratch_t to be cloned.
- *
- * @param dest
- * A pointer to the new scratch space will be returned here.
- *
- * @return
- * @ref HS_SUCCESS on success; @ref HS_NOMEM if the allocation fails.
- * Other errors may be returned if invalid parameters are specified.
- */
-hs_error_t corei7_hs_clone_scratch(const hs_scratch_t *src,
- hs_scratch_t **dest);
-
-/**
- * Provides the size of the given scratch space.
- *
- * @param scratch
- * A per-thread scratch space allocated by @ref hs_alloc_scratch() or @ref
- * hs_clone_scratch().
- *
- * @param scratch_size
- * On success, the size of the scratch space in bytes is placed in this
- * parameter.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t corei7_hs_scratch_size(const hs_scratch_t *scratch,
- size_t *scratch_size);
-
-/**
- * Free a scratch block previously allocated by @ref hs_alloc_scratch() or @ref
- * hs_clone_scratch().
- *
- * The free callback set by @ref hs_set_scratch_allocator() or @ref
- * hs_set_allocator() will be used by this function.
- *
- * @param scratch
- * The scratch block to be freed. NULL may also be safely provided.
- *
- * @return
- * @ref HS_SUCCESS on success, other values on failure.
- */
-hs_error_t corei7_hs_free_scratch(hs_scratch_t *scratch);
-
-/**
- * Callback 'from' return value, indicating that the start of this match was
- * too early to be tracked with the requested SOM_HORIZON precision.
- */
-#define HS_OFFSET_PAST_HORIZON (~0ULL)
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
-#endif /* HS_COREI7_RUNTIME_H */
+/*
+ * Copyright (c) 2015-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HS_COREI7_RUNTIME_H
+#define HS_COREI7_RUNTIME_H
+
+#include <stdlib.h>
+
+/**
+ * @file
+ * @brief The Hyperscan runtime API definition.
+ *
+ * Hyperscan is a high speed regular expression engine.
+ *
+ * This header contains functions for using compiled Hyperscan databases for
+ * scanning data at runtime.
+ */
+
+#include "hs_common.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * Definition of the stream identifier type.
+ */
+struct hs_stream;
+
+/**
+ * The stream identifier returned by @ref hs_open_stream().
+ */
+typedef struct hs_stream hs_stream_t;
+
+struct hs_scratch;
+
+/**
+ * A Hyperscan scratch space.
+ */
+typedef struct hs_scratch hs_scratch_t;
+
+/**
+ * Definition of the match event callback function type.
+ *
+ * A callback function matching the defined type must be provided by the
+ * application calling the @ref hs_scan(), @ref hs_scan_vector() or @ref
+ * hs_scan_stream() functions (or other streaming calls which can produce
+ * matches).
+ *
+ * This callback function will be invoked whenever a match is located in the
+ * target data during the execution of a scan. The details of the match are
+ * passed in as parameters to the callback function, and the callback function
+ * should return a value indicating whether or not matching should continue on
+ * the target data. If no callbacks are desired from a scan call, NULL may be
+ * provided in order to suppress match production.
+ *
+ * This callback function should not attempt to call Hyperscan API functions on
+ * the same stream nor should it attempt to reuse the scratch space allocated
+ * for the API calls that caused it to be triggered. Making another call to the
+ * Hyperscan library with completely independent parameters should work (for
+ * example, scanning a different database in a new stream and with new scratch
+ * space), but reusing data structures like stream state and/or scratch space
+ * will produce undefined behavior.
+ *
+ * @param id
+ * The ID number of the expression that matched. If the expression was a
+ * single expression compiled with @ref hs_compile(), this value will be
+ * zero.
+ *
+ * @param from
+ * - If a start of match flag is enabled for the current pattern, this
+ * argument will be set to the start of match for the pattern assuming
+ * that that start of match value lies within the current 'start of match
+ * horizon' chosen by one of the SOM_HORIZON mode flags.
+
+ * - If the start of match value lies outside this horizon (possible only
+ * when the SOM_HORIZON value is not @ref HS_MODE_SOM_HORIZON_LARGE),
+ * the @p from value will be set to @ref HS_OFFSET_PAST_HORIZON.
+
+ * - This argument will be set to zero if the Start of Match flag is not
+ * enabled for the given pattern.
+ *
+ * @param to
+ * The offset after the last byte that matches the expression.
+ *
+ * @param flags
+ * This is provided for future use and is unused at present.
+ *
+ * @param context
+ * The pointer supplied by the user to the @ref hs_scan(), @ref
+ * hs_scan_vector() or @ref hs_scan_stream() function.
+ *
+ * @return
+ * Non-zero if the matching should cease, else zero. If scanning is
+ * performed in streaming mode and a non-zero value is returned, any
+ * subsequent calls to @ref hs_scan_stream() for that stream will
+ * immediately return with @ref HS_SCAN_TERMINATED.
+ */
+typedef int (HS_CDECL *match_event_handler)(unsigned int id,
+ unsigned long long from,
+ unsigned long long to,
+ unsigned int flags,
+ void *context);
+
+/**
+ * Open and initialise a stream.
+ *
+ * @param db
+ * A compiled pattern database.
+ *
+ * @param flags
+ * Flags modifying the behaviour of the stream. This parameter is provided
+ * for future use and is unused at present.
+ *
+ * @param stream
+ * On success, a pointer to the generated @ref hs_stream_t will be
+ * returned; NULL on failure.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t corei7_hs_open_stream(const hs_database_t *db, unsigned int flags,
+ hs_stream_t **stream);
+
+/**
+ * Write data to be scanned to the opened stream.
+ *
+ * This is the function call in which the actual pattern matching takes place
+ * as data is written to the stream. Matches will be returned via the @ref
+ * match_event_handler callback supplied.
+ *
+ * @param id
+ * The stream ID (returned by @ref hs_open_stream()) to which the data
+ * will be written.
+ *
+ * @param data
+ * Pointer to the data to be scanned.
+ *
+ * @param length
+ * The number of bytes to scan.
+ *
+ * @param flags
+ * Flags modifying the behaviour of the stream. This parameter is provided
+ * for future use and is unused at present.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch().
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param ctxt
+ * The user defined pointer which will be passed to the callback function
+ * when a match occurs.
+ *
+ * @return
+ * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the
+ * match callback indicated that scanning should stop; other values on
+ * error.
+ */
+hs_error_t corei7_hs_scan_stream(hs_stream_t *id, const char *data,
+ unsigned int length, unsigned int flags,
+ hs_scratch_t *scratch,
+ match_event_handler onEvent, void *ctxt);
+
+/**
+ * Close a stream.
+ *
+ * This function completes matching on the given stream and frees the memory
+ * associated with the stream state. After this call, the stream pointed to by
+ * @p id is invalid and can no longer be used. To reuse the stream state after
+ * completion, rather than closing it, the @ref hs_reset_stream function can be
+ * used.
+ *
+ * This function must be called for any stream created with @ref
+ * hs_open_stream(), even if scanning has been terminated by a non-zero return
+ * from the match callback function.
+ *
+ * Note: This operation may result in matches being returned (via calls to the
+ * match event callback) for expressions anchored to the end of the data stream
+ * (for example, via the use of the `$` meta-character). If these matches are
+ * not desired, NULL may be provided as the @ref match_event_handler callback.
+ *
+ * If NULL is provided as the @ref match_event_handler callback, it is
+ * permissible to provide a NULL scratch.
+ *
+ * @param id
+ * The stream ID returned by @ref hs_open_stream().
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
+ * allowed to be NULL only if the @p onEvent callback is also NULL.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param ctxt
+ * The user defined pointer which will be passed to the callback function
+ * when a match occurs.
+ *
+ * @return
+ * Returns @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t corei7_hs_close_stream(hs_stream_t *id, hs_scratch_t *scratch,
+ match_event_handler onEvent, void *ctxt);
+
+/**
+ * Reset a stream to an initial state.
+ *
+ * Conceptually, this is equivalent to performing @ref hs_close_stream() on the
+ * given stream, followed by a @ref hs_open_stream(). This new stream replaces
+ * the original stream in memory, avoiding the overhead of freeing the old
+ * stream and allocating the new one.
+ *
+ * Note: This operation may result in matches being returned (via calls to the
+ * match event callback) for expressions anchored to the end of the original
+ * data stream (for example, via the use of the `$` meta-character). If these
+ * matches are not desired, NULL may be provided as the @ref match_event_handler
+ * callback.
+ *
+ * Note: the stream will also be tied to the same database.
+ *
+ * @param id
+ * The stream (as created by @ref hs_open_stream()) to be replaced.
+ *
+ * @param flags
+ * Flags modifying the behaviour of the stream. This parameter is provided
+ * for future use and is unused at present.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
+ * allowed to be NULL only if the @p onEvent callback is also NULL.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param context
+ * The user defined pointer which will be passed to the callback function
+ * when a match occurs.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t corei7_hs_reset_stream(hs_stream_t *id, unsigned int flags,
+ hs_scratch_t *scratch,
+ match_event_handler onEvent, void *context);
+
+/**
+ * Duplicate the given stream. The new stream will have the same state as the
+ * original including the current stream offset.
+ *
+ * @param to_id
+ * On success, a pointer to the new, copied @ref hs_stream_t will be
+ * returned; NULL on failure.
+ *
+ * @param from_id
+ * The stream (as created by @ref hs_open_stream()) to be copied.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t corei7_hs_copy_stream(hs_stream_t **to_id,
+ const hs_stream_t *from_id);
+
+/**
+ * Duplicate the given 'from' stream state onto the 'to' stream. The 'to' stream
+ * will first be reset (reporting any EOD matches if a non-NULL @p onEvent
+ * callback handler is provided).
+ *
+ * Note: the 'to' stream and the 'from' stream must be open against the same
+ * database.
+ *
+ * @param to_id
+ * On success, a pointer to the new, copied @ref hs_stream_t will be
+ * returned; NULL on failure.
+ *
+ * @param from_id
+ * The stream (as created by @ref hs_open_stream()) to be copied.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
+ * allowed to be NULL only if the @p onEvent callback is also NULL.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param context
+ * The user defined pointer which will be passed to the callback function
+ * when a match occurs.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t corei7_hs_reset_and_copy_stream(hs_stream_t *to_id,
+ const hs_stream_t *from_id,
+ hs_scratch_t *scratch,
+ match_event_handler onEvent,
+ void *context);
+
+/**
+ * Creates a compressed representation of the provided stream in the buffer
+ * provided. This compressed representation can be converted back into a stream
+ * state by using @ref hs_expand_stream() or @ref hs_reset_and_expand_stream().
+ * The size of the compressed representation will be placed into @p used_space.
+ *
+ * If there is not sufficient space in the buffer to hold the compressed
+ * representation, @ref HS_INSUFFICIENT_SPACE will be returned and @p used_space
+ * will be populated with the amount of space required.
+ *
+ * Note: this function does not close the provided stream, you may continue to
+ * use the stream or to free it with @ref hs_close_stream().
+ *
+ * @param stream
+ * The stream (as created by @ref hs_open_stream()) to be compressed.
+ *
+ * @param buf
+ * Buffer to write the compressed representation into. Note: if the call is
+ * just being used to determine the amount of space required, it is allowed
+ * to pass NULL here and @p buf_space as 0.
+ *
+ * @param buf_space
+ * The number of bytes in @p buf. If buf_space is too small, the call will
+ * fail with @ref HS_INSUFFICIENT_SPACE.
+ *
+ * @param used_space
+ * Pointer to where the amount of used space will be written to. The used
+ * buffer space is always less than or equal to @p buf_space. If the call
+ * fails with @ref HS_INSUFFICIENT_SPACE, this pointer will be used to
+ * write out the amount of buffer space required.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, @ref HS_INSUFFICIENT_SPACE if the provided
+ * buffer is too small.
+ */
+hs_error_t corei7_hs_compress_stream(const hs_stream_t *stream, char *buf,
+ size_t buf_space, size_t *used_space);
+
+/**
+ * Decompresses a compressed representation created by @ref hs_compress_stream()
+ * into a new stream.
+ *
+ * Note: @p buf must correspond to a complete compressed representation created
+ * by @ref hs_compress_stream() of a stream that was opened against @p db. It is
+ * not always possible to detect misuse of this API and behaviour is undefined
+ * if these properties are not satisfied.
+ *
+ * @param db
+ * The compiled pattern database that the compressed stream was opened
+ * against.
+ *
+ * @param stream
+ * On success, a pointer to the expanded @ref hs_stream_t will be
+ * returned; NULL on failure.
+ *
+ * @param buf
+ * A compressed representation of a stream. These compressed forms are
+ * created by @ref hs_compress_stream().
+ *
+ * @param buf_size
+ * The size in bytes of the compressed representation.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t corei7_hs_expand_stream(const hs_database_t *db,
+ hs_stream_t **stream, const char *buf,
+ size_t buf_size);
+
+/**
+ * Decompresses a compressed representation created by @ref hs_compress_stream()
+ * on top of the 'to' stream. The 'to' stream will first be reset (reporting
+ * any EOD matches if a non-NULL @p onEvent callback handler is provided).
+ *
+ * Note: the 'to' stream must be opened against the same database as the
+ * compressed stream.
+ *
+ * Note: @p buf must correspond to a complete compressed representation created
+ * by @ref hs_compress_stream() of a stream that was opened against @p db. It is
+ * not always possible to detect misuse of this API and behaviour is undefined
+ * if these properties are not satisfied.
+ *
+ * @param to_stream
+ * A pointer to a valid stream state. A pointer to the expanded @ref
+ * hs_stream_t will be returned; NULL on failure.
+ *
+ * @param buf
+ * A compressed representation of a stream. These compressed forms are
+ * created by @ref hs_compress_stream().
+ *
+ * @param buf_size
+ * The size in bytes of the compressed representation.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch(). This is
+ * allowed to be NULL only if the @p onEvent callback is also NULL.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param context
+ * The user defined pointer which will be passed to the callback function
+ * when a match occurs.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t corei7_hs_reset_and_expand_stream(hs_stream_t *to_stream,
+ const char *buf, size_t buf_size,
+ hs_scratch_t *scratch,
+ match_event_handler onEvent,
+ void *context);
+
+/**
+ * The block (non-streaming) regular expression scanner.
+ *
+ * This is the function call in which the actual pattern matching takes place
+ * for block-mode pattern databases.
+ *
+ * @param db
+ * A compiled pattern database.
+ *
+ * @param data
+ * Pointer to the data to be scanned.
+ *
+ * @param length
+ * The number of bytes to scan.
+ *
+ * @param flags
+ * Flags modifying the behaviour of this function. This parameter is
+ * provided for future use and is unused at present.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch() for this
+ * database.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param context
+ * The user defined pointer which will be passed to the callback function.
+ *
+ * @return
+ * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the
+ * match callback indicated that scanning should stop; other values on
+ * error.
+ */
+hs_error_t corei7_hs_scan(const hs_database_t *db, const char *data,
+ unsigned int length, unsigned int flags,
+ hs_scratch_t *scratch, match_event_handler onEvent,
+ void *context);
+
+/**
+ * The vectored regular expression scanner.
+ *
+ * This is the function call in which the actual pattern matching takes place
+ * for vectoring-mode pattern databases.
+ *
+ * @param db
+ * A compiled pattern database.
+ *
+ * @param data
+ * An array of pointers to the data blocks to be scanned.
+ *
+ * @param length
+ * An array of lengths (in bytes) of each data block to scan.
+ *
+ * @param count
+ * Number of data blocks to scan. This should correspond to the size of
+ * of the @p data and @p length arrays.
+ *
+ * @param flags
+ * Flags modifying the behaviour of this function. This parameter is
+ * provided for future use and is unused at present.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch() for
+ * this database.
+ *
+ * @param onEvent
+ * Pointer to a match event callback function. If a NULL pointer is given,
+ * no matches will be returned.
+ *
+ * @param context
+ * The user defined pointer which will be passed to the callback function.
+ *
+ * @return
+ * Returns @ref HS_SUCCESS on success; @ref HS_SCAN_TERMINATED if the match
+ * callback indicated that scanning should stop; other values on error.
+ */
+hs_error_t corei7_hs_scan_vector(const hs_database_t *db,
+ const char *const *data,
+ const unsigned int *length,
+ unsigned int count, unsigned int flags,
+ hs_scratch_t *scratch,
+ match_event_handler onEvent, void *context);
+
+/**
+ * Allocate a "scratch" space for use by Hyperscan.
+ *
+ * This is required for runtime use, and one scratch space per thread, or
+ * concurrent caller, is required. Any allocator callback set by @ref
+ * hs_set_scratch_allocator() or @ref hs_set_allocator() will be used by this
+ * function.
+ *
+ * @param db
+ * The database, as produced by @ref hs_compile().
+ *
+ * @param scratch
+ * On first allocation, a pointer to NULL should be provided so a new
+ * scratch can be allocated. If a scratch block has been previously
+ * allocated, then a pointer to it should be passed back in to see if it
+ * is valid for this database block. If a new scratch block is required,
+ * the original will be freed and the new one returned, otherwise the
+ * previous scratch block will be returned. On success, the scratch block
+ * will be suitable for use with the provided database in addition to any
+ * databases that original scratch space was suitable for.
+ *
+ * @return
+ * @ref HS_SUCCESS on successful allocation; @ref HS_NOMEM if the
+ * allocation fails. Other errors may be returned if invalid parameters
+ * are specified.
+ */
+hs_error_t corei7_hs_alloc_scratch(const hs_database_t *db,
+ hs_scratch_t **scratch);
+
+/**
+ * Allocate a scratch space that is a clone of an existing scratch space.
+ *
+ * This is useful when multiple concurrent threads will be using the same set
+ * of compiled databases, and another scratch space is required. Any allocator
+ * callback set by @ref hs_set_scratch_allocator() or @ref hs_set_allocator()
+ * will be used by this function.
+ *
+ * @param src
+ * The existing @ref hs_scratch_t to be cloned.
+ *
+ * @param dest
+ * A pointer to the new scratch space will be returned here.
+ *
+ * @return
+ * @ref HS_SUCCESS on success; @ref HS_NOMEM if the allocation fails.
+ * Other errors may be returned if invalid parameters are specified.
+ */
+hs_error_t corei7_hs_clone_scratch(const hs_scratch_t *src,
+ hs_scratch_t **dest);
+
+/**
+ * Provides the size of the given scratch space.
+ *
+ * @param scratch
+ * A per-thread scratch space allocated by @ref hs_alloc_scratch() or @ref
+ * hs_clone_scratch().
+ *
+ * @param scratch_size
+ * On success, the size of the scratch space in bytes is placed in this
+ * parameter.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t corei7_hs_scratch_size(const hs_scratch_t *scratch,
+ size_t *scratch_size);
+
+/**
+ * Free a scratch block previously allocated by @ref hs_alloc_scratch() or @ref
+ * hs_clone_scratch().
+ *
+ * The free callback set by @ref hs_set_scratch_allocator() or @ref
+ * hs_set_allocator() will be used by this function.
+ *
+ * @param scratch
+ * The scratch block to be freed. NULL may also be safely provided.
+ *
+ * @return
+ * @ref HS_SUCCESS on success, other values on failure.
+ */
+hs_error_t corei7_hs_free_scratch(hs_scratch_t *scratch);
+
+/**
+ * Callback 'from' return value, indicating that the start of this match was
+ * too early to be tracked with the requested SOM_HORIZON precision.
+ */
+#define HS_OFFSET_PAST_HORIZON (~0ULL)
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* HS_COREI7_RUNTIME_H */
diff --git a/contrib/libs/hyperscan/runtime_corei7/ya.make b/contrib/libs/hyperscan/runtime_corei7/ya.make
index 30e834e767..efdd466c09 100644
--- a/contrib/libs/hyperscan/runtime_corei7/ya.make
+++ b/contrib/libs/hyperscan/runtime_corei7/ya.make
@@ -1,494 +1,494 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
-OWNER(
- galtsev
- g:antiinfra
- g:cpp-contrib
- g:yql
-)
-
-LICENSE(BSD-3-Clause)
-
-LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-
-ADDINCL(
- contrib/libs/hyperscan
- contrib/libs/hyperscan/src
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_RUNTIME()
-
-CFLAGS(
- ${SSE41_CFLAGS}
- -DHAVE_SSE41
- ${SSE42_CFLAGS}
- -DHAVE_SSE42
- ${POPCNT_CFLAGS}
- -DHAVE_POPCOUNT_INSTR
- -DCrc32c_ComputeBuf=corei7_Crc32c_ComputeBuf
- -DblockInitSufPQ=corei7_blockInitSufPQ
- -Dcompress_stream=corei7_compress_stream
- -Dcpuid_flags=corei7_cpuid_flags
- -Dcpuid_tune=corei7_cpuid_tune
- -DdbIsValid=corei7_dbIsValid
- -DdoAccel128=corei7_doAccel128
- -DdoAccel256=corei7_doAccel256
- -DdoAccel32=corei7_doAccel32
- -DdoAccel384=corei7_doAccel384
- -DdoAccel512=corei7_doAccel512
- -DdoAccel64=corei7_doAccel64
- -Dexpand_stream=corei7_expand_stream
- -DfdrExec=corei7_fdrExec
- -DfdrExecStreaming=corei7_fdrExecStreaming
- -Dfdr_exec_fat_teddy_msks1=corei7_fdr_exec_fat_teddy_msks1
- -Dfdr_exec_fat_teddy_msks1_pck=corei7_fdr_exec_fat_teddy_msks1_pck
- -Dfdr_exec_fat_teddy_msks2=corei7_fdr_exec_fat_teddy_msks2
- -Dfdr_exec_fat_teddy_msks2_pck=corei7_fdr_exec_fat_teddy_msks2_pck
- -Dfdr_exec_fat_teddy_msks3=corei7_fdr_exec_fat_teddy_msks3
- -Dfdr_exec_fat_teddy_msks3_pck=corei7_fdr_exec_fat_teddy_msks3_pck
- -Dfdr_exec_fat_teddy_msks4=corei7_fdr_exec_fat_teddy_msks4
- -Dfdr_exec_fat_teddy_msks4_pck=corei7_fdr_exec_fat_teddy_msks4_pck
- -Dfdr_exec_teddy_msks1=corei7_fdr_exec_teddy_msks1
- -Dfdr_exec_teddy_msks1_pck=corei7_fdr_exec_teddy_msks1_pck
- -Dfdr_exec_teddy_msks2=corei7_fdr_exec_teddy_msks2
- -Dfdr_exec_teddy_msks2_pck=corei7_fdr_exec_teddy_msks2_pck
- -Dfdr_exec_teddy_msks3=corei7_fdr_exec_teddy_msks3
- -Dfdr_exec_teddy_msks3_pck=corei7_fdr_exec_teddy_msks3_pck
- -Dfdr_exec_teddy_msks4=corei7_fdr_exec_teddy_msks4
- -Dfdr_exec_teddy_msks4_pck=corei7_fdr_exec_teddy_msks4_pck
- -DflushQueuedLiterals_i=corei7_flushQueuedLiterals_i
- -DflushStoredSomMatches_i=corei7_flushStoredSomMatches_i
- -DhandleSomExternal=corei7_handleSomExternal
- -DhandleSomInternal=corei7_handleSomInternal
- -Dhs_alloc_scratch=corei7_hs_alloc_scratch
- -Dhs_clone_scratch=corei7_hs_clone_scratch
- -Dhs_close_stream=corei7_hs_close_stream
- -Dhs_compress_stream=corei7_hs_compress_stream
- -Dhs_copy_stream=corei7_hs_copy_stream
- -Dhs_database_alloc=corei7_hs_database_alloc
- -Dhs_database_free=corei7_hs_database_free
- -Dhs_database_info=corei7_hs_database_info
- -Dhs_database_size=corei7_hs_database_size
- -Dhs_deserialize_database=corei7_hs_deserialize_database
- -Dhs_deserialize_database_at=corei7_hs_deserialize_database_at
- -Dhs_expand_stream=corei7_hs_expand_stream
- -Dhs_free_database=corei7_hs_free_database
- -Dhs_free_scratch=corei7_hs_free_scratch
- -Dhs_misc_alloc=corei7_hs_misc_alloc
- -Dhs_misc_free=corei7_hs_misc_free
- -Dhs_open_stream=corei7_hs_open_stream
- -Dhs_reset_and_copy_stream=corei7_hs_reset_and_copy_stream
- -Dhs_reset_and_expand_stream=corei7_hs_reset_and_expand_stream
- -Dhs_reset_stream=corei7_hs_reset_stream
- -Dhs_scan=corei7_hs_scan
- -Dhs_scan_stream=corei7_hs_scan_stream
- -Dhs_scan_vector=corei7_hs_scan_vector
- -Dhs_scratch_alloc=corei7_hs_scratch_alloc
- -Dhs_scratch_free=corei7_hs_scratch_free
- -Dhs_scratch_size=corei7_hs_scratch_size
- -Dhs_serialize_database=corei7_hs_serialize_database
- -Dhs_serialized_database_info=corei7_hs_serialized_database_info
- -Dhs_serialized_database_size=corei7_hs_serialized_database_size
- -Dhs_set_allocator=corei7_hs_set_allocator
- -Dhs_set_database_allocator=corei7_hs_set_database_allocator
- -Dhs_set_misc_allocator=corei7_hs_set_misc_allocator
- -Dhs_set_scratch_allocator=corei7_hs_set_scratch_allocator
- -Dhs_set_stream_allocator=corei7_hs_set_stream_allocator
- -Dhs_stream_alloc=corei7_hs_stream_alloc
- -Dhs_stream_free=corei7_hs_stream_free
- -Dhs_stream_size=corei7_hs_stream_size
- -Dhs_valid_platform=corei7_hs_valid_platform
- -Dhs_version=corei7_hs_version
- -DhwlmExec=corei7_hwlmExec
- -DhwlmExecStreaming=corei7_hwlmExecStreaming
- -DloadSomFromStream=corei7_loadSomFromStream
- -Dloadcompressed128=corei7_loadcompressed128
- -Dloadcompressed256=corei7_loadcompressed256
- -Dloadcompressed32=corei7_loadcompressed32
- -Dloadcompressed384=corei7_loadcompressed384
- -Dloadcompressed512=corei7_loadcompressed512
- -Dloadcompressed64=corei7_loadcompressed64
- -Dmcsheng_pext_mask=corei7_mcsheng_pext_mask
- -Dmm_mask_mask=corei7_mm_mask_mask
- -Dmm_shuffle_end=corei7_mm_shuffle_end
- -Dmmbit_keyshift_lut=corei7_mmbit_keyshift_lut
- -Dmmbit_maxlevel_direct_lut=corei7_mmbit_maxlevel_direct_lut
- -Dmmbit_maxlevel_from_keyshift_lut=corei7_mmbit_maxlevel_from_keyshift_lut
- -Dmmbit_root_offset_from_level=corei7_mmbit_root_offset_from_level
- -Dmmbit_zero_to_lut=corei7_mmbit_zero_to_lut
- -DnfaBlockExecReverse=corei7_nfaBlockExecReverse
- -DnfaCheckFinalState=corei7_nfaCheckFinalState
- -DnfaExecCastle_Q=corei7_nfaExecCastle_Q
- -DnfaExecCastle_Q2=corei7_nfaExecCastle_Q2
- -DnfaExecCastle_QR=corei7_nfaExecCastle_QR
- -DnfaExecCastle_expandState=corei7_nfaExecCastle_expandState
- -DnfaExecCastle_inAccept=corei7_nfaExecCastle_inAccept
- -DnfaExecCastle_inAnyAccept=corei7_nfaExecCastle_inAnyAccept
- -DnfaExecCastle_initCompressedState=corei7_nfaExecCastle_initCompressedState
- -DnfaExecCastle_queueCompressState=corei7_nfaExecCastle_queueCompressState
- -DnfaExecCastle_queueInitState=corei7_nfaExecCastle_queueInitState
- -DnfaExecCastle_reportCurrent=corei7_nfaExecCastle_reportCurrent
- -DnfaExecGough16_Q=corei7_nfaExecGough16_Q
- -DnfaExecGough16_Q2=corei7_nfaExecGough16_Q2
- -DnfaExecGough16_QR=corei7_nfaExecGough16_QR
- -DnfaExecGough16_expandState=corei7_nfaExecGough16_expandState
- -DnfaExecGough16_inAccept=corei7_nfaExecGough16_inAccept
- -DnfaExecGough16_inAnyAccept=corei7_nfaExecGough16_inAnyAccept
- -DnfaExecGough16_initCompressedState=corei7_nfaExecGough16_initCompressedState
- -DnfaExecGough16_queueCompressState=corei7_nfaExecGough16_queueCompressState
- -DnfaExecGough16_queueInitState=corei7_nfaExecGough16_queueInitState
- -DnfaExecGough16_reportCurrent=corei7_nfaExecGough16_reportCurrent
- -DnfaExecGough16_testEOD=corei7_nfaExecGough16_testEOD
- -DnfaExecGough8_Q=corei7_nfaExecGough8_Q
- -DnfaExecGough8_Q2=corei7_nfaExecGough8_Q2
- -DnfaExecGough8_QR=corei7_nfaExecGough8_QR
- -DnfaExecGough8_expandState=corei7_nfaExecGough8_expandState
- -DnfaExecGough8_inAccept=corei7_nfaExecGough8_inAccept
- -DnfaExecGough8_inAnyAccept=corei7_nfaExecGough8_inAnyAccept
- -DnfaExecGough8_initCompressedState=corei7_nfaExecGough8_initCompressedState
- -DnfaExecGough8_queueCompressState=corei7_nfaExecGough8_queueCompressState
- -DnfaExecGough8_queueInitState=corei7_nfaExecGough8_queueInitState
- -DnfaExecGough8_reportCurrent=corei7_nfaExecGough8_reportCurrent
- -DnfaExecGough8_testEOD=corei7_nfaExecGough8_testEOD
- -DnfaExecLbrDot_Q=corei7_nfaExecLbrDot_Q
- -DnfaExecLbrDot_Q2=corei7_nfaExecLbrDot_Q2
- -DnfaExecLbrDot_QR=corei7_nfaExecLbrDot_QR
- -DnfaExecLbrDot_expandState=corei7_nfaExecLbrDot_expandState
- -DnfaExecLbrDot_inAccept=corei7_nfaExecLbrDot_inAccept
- -DnfaExecLbrDot_inAnyAccept=corei7_nfaExecLbrDot_inAnyAccept
- -DnfaExecLbrDot_initCompressedState=corei7_nfaExecLbrDot_initCompressedState
- -DnfaExecLbrDot_queueCompressState=corei7_nfaExecLbrDot_queueCompressState
- -DnfaExecLbrDot_queueInitState=corei7_nfaExecLbrDot_queueInitState
- -DnfaExecLbrDot_reportCurrent=corei7_nfaExecLbrDot_reportCurrent
- -DnfaExecLbrNVerm_Q=corei7_nfaExecLbrNVerm_Q
- -DnfaExecLbrNVerm_Q2=corei7_nfaExecLbrNVerm_Q2
- -DnfaExecLbrNVerm_QR=corei7_nfaExecLbrNVerm_QR
- -DnfaExecLbrNVerm_expandState=corei7_nfaExecLbrNVerm_expandState
- -DnfaExecLbrNVerm_inAccept=corei7_nfaExecLbrNVerm_inAccept
- -DnfaExecLbrNVerm_inAnyAccept=corei7_nfaExecLbrNVerm_inAnyAccept
- -DnfaExecLbrNVerm_initCompressedState=corei7_nfaExecLbrNVerm_initCompressedState
- -DnfaExecLbrNVerm_queueCompressState=corei7_nfaExecLbrNVerm_queueCompressState
- -DnfaExecLbrNVerm_queueInitState=corei7_nfaExecLbrNVerm_queueInitState
- -DnfaExecLbrNVerm_reportCurrent=corei7_nfaExecLbrNVerm_reportCurrent
- -DnfaExecLbrShuf_Q=corei7_nfaExecLbrShuf_Q
- -DnfaExecLbrShuf_Q2=corei7_nfaExecLbrShuf_Q2
- -DnfaExecLbrShuf_QR=corei7_nfaExecLbrShuf_QR
- -DnfaExecLbrShuf_expandState=corei7_nfaExecLbrShuf_expandState
- -DnfaExecLbrShuf_inAccept=corei7_nfaExecLbrShuf_inAccept
- -DnfaExecLbrShuf_inAnyAccept=corei7_nfaExecLbrShuf_inAnyAccept
- -DnfaExecLbrShuf_initCompressedState=corei7_nfaExecLbrShuf_initCompressedState
- -DnfaExecLbrShuf_queueCompressState=corei7_nfaExecLbrShuf_queueCompressState
- -DnfaExecLbrShuf_queueInitState=corei7_nfaExecLbrShuf_queueInitState
- -DnfaExecLbrShuf_reportCurrent=corei7_nfaExecLbrShuf_reportCurrent
- -DnfaExecLbrTruf_Q=corei7_nfaExecLbrTruf_Q
- -DnfaExecLbrTruf_Q2=corei7_nfaExecLbrTruf_Q2
- -DnfaExecLbrTruf_QR=corei7_nfaExecLbrTruf_QR
- -DnfaExecLbrTruf_expandState=corei7_nfaExecLbrTruf_expandState
- -DnfaExecLbrTruf_inAccept=corei7_nfaExecLbrTruf_inAccept
- -DnfaExecLbrTruf_inAnyAccept=corei7_nfaExecLbrTruf_inAnyAccept
- -DnfaExecLbrTruf_initCompressedState=corei7_nfaExecLbrTruf_initCompressedState
- -DnfaExecLbrTruf_queueCompressState=corei7_nfaExecLbrTruf_queueCompressState
- -DnfaExecLbrTruf_queueInitState=corei7_nfaExecLbrTruf_queueInitState
- -DnfaExecLbrTruf_reportCurrent=corei7_nfaExecLbrTruf_reportCurrent
- -DnfaExecLbrVerm_Q=corei7_nfaExecLbrVerm_Q
- -DnfaExecLbrVerm_Q2=corei7_nfaExecLbrVerm_Q2
- -DnfaExecLbrVerm_QR=corei7_nfaExecLbrVerm_QR
- -DnfaExecLbrVerm_expandState=corei7_nfaExecLbrVerm_expandState
- -DnfaExecLbrVerm_inAccept=corei7_nfaExecLbrVerm_inAccept
- -DnfaExecLbrVerm_inAnyAccept=corei7_nfaExecLbrVerm_inAnyAccept
- -DnfaExecLbrVerm_initCompressedState=corei7_nfaExecLbrVerm_initCompressedState
- -DnfaExecLbrVerm_queueCompressState=corei7_nfaExecLbrVerm_queueCompressState
- -DnfaExecLbrVerm_queueInitState=corei7_nfaExecLbrVerm_queueInitState
- -DnfaExecLbrVerm_reportCurrent=corei7_nfaExecLbrVerm_reportCurrent
- -DnfaExecLimEx128_B_Reverse=corei7_nfaExecLimEx128_B_Reverse
- -DnfaExecLimEx128_Q=corei7_nfaExecLimEx128_Q
- -DnfaExecLimEx128_Q2=corei7_nfaExecLimEx128_Q2
- -DnfaExecLimEx128_QR=corei7_nfaExecLimEx128_QR
- -DnfaExecLimEx128_expandState=corei7_nfaExecLimEx128_expandState
- -DnfaExecLimEx128_inAccept=corei7_nfaExecLimEx128_inAccept
- -DnfaExecLimEx128_inAnyAccept=corei7_nfaExecLimEx128_inAnyAccept
- -DnfaExecLimEx128_initCompressedState=corei7_nfaExecLimEx128_initCompressedState
- -DnfaExecLimEx128_queueCompressState=corei7_nfaExecLimEx128_queueCompressState
- -DnfaExecLimEx128_queueInitState=corei7_nfaExecLimEx128_queueInitState
- -DnfaExecLimEx128_reportCurrent=corei7_nfaExecLimEx128_reportCurrent
- -DnfaExecLimEx128_testEOD=corei7_nfaExecLimEx128_testEOD
- -DnfaExecLimEx128_zombie_status=corei7_nfaExecLimEx128_zombie_status
- -DnfaExecLimEx256_B_Reverse=corei7_nfaExecLimEx256_B_Reverse
- -DnfaExecLimEx256_Q=corei7_nfaExecLimEx256_Q
- -DnfaExecLimEx256_Q2=corei7_nfaExecLimEx256_Q2
- -DnfaExecLimEx256_QR=corei7_nfaExecLimEx256_QR
- -DnfaExecLimEx256_expandState=corei7_nfaExecLimEx256_expandState
- -DnfaExecLimEx256_inAccept=corei7_nfaExecLimEx256_inAccept
- -DnfaExecLimEx256_inAnyAccept=corei7_nfaExecLimEx256_inAnyAccept
- -DnfaExecLimEx256_initCompressedState=corei7_nfaExecLimEx256_initCompressedState
- -DnfaExecLimEx256_queueCompressState=corei7_nfaExecLimEx256_queueCompressState
- -DnfaExecLimEx256_queueInitState=corei7_nfaExecLimEx256_queueInitState
- -DnfaExecLimEx256_reportCurrent=corei7_nfaExecLimEx256_reportCurrent
- -DnfaExecLimEx256_testEOD=corei7_nfaExecLimEx256_testEOD
- -DnfaExecLimEx256_zombie_status=corei7_nfaExecLimEx256_zombie_status
- -DnfaExecLimEx32_B_Reverse=corei7_nfaExecLimEx32_B_Reverse
- -DnfaExecLimEx32_Q=corei7_nfaExecLimEx32_Q
- -DnfaExecLimEx32_Q2=corei7_nfaExecLimEx32_Q2
- -DnfaExecLimEx32_QR=corei7_nfaExecLimEx32_QR
- -DnfaExecLimEx32_expandState=corei7_nfaExecLimEx32_expandState
- -DnfaExecLimEx32_inAccept=corei7_nfaExecLimEx32_inAccept
- -DnfaExecLimEx32_inAnyAccept=corei7_nfaExecLimEx32_inAnyAccept
- -DnfaExecLimEx32_initCompressedState=corei7_nfaExecLimEx32_initCompressedState
- -DnfaExecLimEx32_queueCompressState=corei7_nfaExecLimEx32_queueCompressState
- -DnfaExecLimEx32_queueInitState=corei7_nfaExecLimEx32_queueInitState
- -DnfaExecLimEx32_reportCurrent=corei7_nfaExecLimEx32_reportCurrent
- -DnfaExecLimEx32_testEOD=corei7_nfaExecLimEx32_testEOD
- -DnfaExecLimEx32_zombie_status=corei7_nfaExecLimEx32_zombie_status
- -DnfaExecLimEx384_B_Reverse=corei7_nfaExecLimEx384_B_Reverse
- -DnfaExecLimEx384_Q=corei7_nfaExecLimEx384_Q
- -DnfaExecLimEx384_Q2=corei7_nfaExecLimEx384_Q2
- -DnfaExecLimEx384_QR=corei7_nfaExecLimEx384_QR
- -DnfaExecLimEx384_expandState=corei7_nfaExecLimEx384_expandState
- -DnfaExecLimEx384_inAccept=corei7_nfaExecLimEx384_inAccept
- -DnfaExecLimEx384_inAnyAccept=corei7_nfaExecLimEx384_inAnyAccept
- -DnfaExecLimEx384_initCompressedState=corei7_nfaExecLimEx384_initCompressedState
- -DnfaExecLimEx384_queueCompressState=corei7_nfaExecLimEx384_queueCompressState
- -DnfaExecLimEx384_queueInitState=corei7_nfaExecLimEx384_queueInitState
- -DnfaExecLimEx384_reportCurrent=corei7_nfaExecLimEx384_reportCurrent
- -DnfaExecLimEx384_testEOD=corei7_nfaExecLimEx384_testEOD
- -DnfaExecLimEx384_zombie_status=corei7_nfaExecLimEx384_zombie_status
- -DnfaExecLimEx512_B_Reverse=corei7_nfaExecLimEx512_B_Reverse
- -DnfaExecLimEx512_Q=corei7_nfaExecLimEx512_Q
- -DnfaExecLimEx512_Q2=corei7_nfaExecLimEx512_Q2
- -DnfaExecLimEx512_QR=corei7_nfaExecLimEx512_QR
- -DnfaExecLimEx512_expandState=corei7_nfaExecLimEx512_expandState
- -DnfaExecLimEx512_inAccept=corei7_nfaExecLimEx512_inAccept
- -DnfaExecLimEx512_inAnyAccept=corei7_nfaExecLimEx512_inAnyAccept
- -DnfaExecLimEx512_initCompressedState=corei7_nfaExecLimEx512_initCompressedState
- -DnfaExecLimEx512_queueCompressState=corei7_nfaExecLimEx512_queueCompressState
- -DnfaExecLimEx512_queueInitState=corei7_nfaExecLimEx512_queueInitState
- -DnfaExecLimEx512_reportCurrent=corei7_nfaExecLimEx512_reportCurrent
- -DnfaExecLimEx512_testEOD=corei7_nfaExecLimEx512_testEOD
- -DnfaExecLimEx512_zombie_status=corei7_nfaExecLimEx512_zombie_status
- -DnfaExecLimEx64_B_Reverse=corei7_nfaExecLimEx64_B_Reverse
- -DnfaExecLimEx64_Q=corei7_nfaExecLimEx64_Q
- -DnfaExecLimEx64_Q2=corei7_nfaExecLimEx64_Q2
- -DnfaExecLimEx64_QR=corei7_nfaExecLimEx64_QR
- -DnfaExecLimEx64_expandState=corei7_nfaExecLimEx64_expandState
- -DnfaExecLimEx64_inAccept=corei7_nfaExecLimEx64_inAccept
- -DnfaExecLimEx64_inAnyAccept=corei7_nfaExecLimEx64_inAnyAccept
- -DnfaExecLimEx64_initCompressedState=corei7_nfaExecLimEx64_initCompressedState
- -DnfaExecLimEx64_queueCompressState=corei7_nfaExecLimEx64_queueCompressState
- -DnfaExecLimEx64_queueInitState=corei7_nfaExecLimEx64_queueInitState
- -DnfaExecLimEx64_reportCurrent=corei7_nfaExecLimEx64_reportCurrent
- -DnfaExecLimEx64_testEOD=corei7_nfaExecLimEx64_testEOD
- -DnfaExecLimEx64_zombie_status=corei7_nfaExecLimEx64_zombie_status
- -DnfaExecMcClellan16_B=corei7_nfaExecMcClellan16_B
- -DnfaExecMcClellan16_Q=corei7_nfaExecMcClellan16_Q
- -DnfaExecMcClellan16_Q2=corei7_nfaExecMcClellan16_Q2
- -DnfaExecMcClellan16_QR=corei7_nfaExecMcClellan16_QR
- -DnfaExecMcClellan16_SimpStream=corei7_nfaExecMcClellan16_SimpStream
- -DnfaExecMcClellan16_expandState=corei7_nfaExecMcClellan16_expandState
- -DnfaExecMcClellan16_inAccept=corei7_nfaExecMcClellan16_inAccept
- -DnfaExecMcClellan16_inAnyAccept=corei7_nfaExecMcClellan16_inAnyAccept
- -DnfaExecMcClellan16_initCompressedState=corei7_nfaExecMcClellan16_initCompressedState
- -DnfaExecMcClellan16_queueCompressState=corei7_nfaExecMcClellan16_queueCompressState
- -DnfaExecMcClellan16_queueInitState=corei7_nfaExecMcClellan16_queueInitState
- -DnfaExecMcClellan16_reportCurrent=corei7_nfaExecMcClellan16_reportCurrent
- -DnfaExecMcClellan16_testEOD=corei7_nfaExecMcClellan16_testEOD
- -DnfaExecMcClellan8_B=corei7_nfaExecMcClellan8_B
- -DnfaExecMcClellan8_Q=corei7_nfaExecMcClellan8_Q
- -DnfaExecMcClellan8_Q2=corei7_nfaExecMcClellan8_Q2
- -DnfaExecMcClellan8_QR=corei7_nfaExecMcClellan8_QR
- -DnfaExecMcClellan8_SimpStream=corei7_nfaExecMcClellan8_SimpStream
- -DnfaExecMcClellan8_expandState=corei7_nfaExecMcClellan8_expandState
- -DnfaExecMcClellan8_inAccept=corei7_nfaExecMcClellan8_inAccept
- -DnfaExecMcClellan8_inAnyAccept=corei7_nfaExecMcClellan8_inAnyAccept
- -DnfaExecMcClellan8_initCompressedState=corei7_nfaExecMcClellan8_initCompressedState
- -DnfaExecMcClellan8_queueCompressState=corei7_nfaExecMcClellan8_queueCompressState
- -DnfaExecMcClellan8_queueInitState=corei7_nfaExecMcClellan8_queueInitState
- -DnfaExecMcClellan8_reportCurrent=corei7_nfaExecMcClellan8_reportCurrent
- -DnfaExecMcClellan8_testEOD=corei7_nfaExecMcClellan8_testEOD
- -DnfaExecMcSheng16_Q=corei7_nfaExecMcSheng16_Q
- -DnfaExecMcSheng16_Q2=corei7_nfaExecMcSheng16_Q2
- -DnfaExecMcSheng16_QR=corei7_nfaExecMcSheng16_QR
- -DnfaExecMcSheng16_expandState=corei7_nfaExecMcSheng16_expandState
- -DnfaExecMcSheng16_inAccept=corei7_nfaExecMcSheng16_inAccept
- -DnfaExecMcSheng16_inAnyAccept=corei7_nfaExecMcSheng16_inAnyAccept
- -DnfaExecMcSheng16_initCompressedState=corei7_nfaExecMcSheng16_initCompressedState
- -DnfaExecMcSheng16_queueCompressState=corei7_nfaExecMcSheng16_queueCompressState
- -DnfaExecMcSheng16_queueInitState=corei7_nfaExecMcSheng16_queueInitState
- -DnfaExecMcSheng16_reportCurrent=corei7_nfaExecMcSheng16_reportCurrent
- -DnfaExecMcSheng16_testEOD=corei7_nfaExecMcSheng16_testEOD
- -DnfaExecMcSheng8_Q=corei7_nfaExecMcSheng8_Q
- -DnfaExecMcSheng8_Q2=corei7_nfaExecMcSheng8_Q2
- -DnfaExecMcSheng8_QR=corei7_nfaExecMcSheng8_QR
- -DnfaExecMcSheng8_expandState=corei7_nfaExecMcSheng8_expandState
- -DnfaExecMcSheng8_inAccept=corei7_nfaExecMcSheng8_inAccept
- -DnfaExecMcSheng8_inAnyAccept=corei7_nfaExecMcSheng8_inAnyAccept
- -DnfaExecMcSheng8_initCompressedState=corei7_nfaExecMcSheng8_initCompressedState
- -DnfaExecMcSheng8_queueCompressState=corei7_nfaExecMcSheng8_queueCompressState
- -DnfaExecMcSheng8_queueInitState=corei7_nfaExecMcSheng8_queueInitState
- -DnfaExecMcSheng8_reportCurrent=corei7_nfaExecMcSheng8_reportCurrent
- -DnfaExecMcSheng8_testEOD=corei7_nfaExecMcSheng8_testEOD
- -DnfaExecMpv_Q=corei7_nfaExecMpv_Q
- -DnfaExecMpv_QueueExecRaw=corei7_nfaExecMpv_QueueExecRaw
- -DnfaExecMpv_expandState=corei7_nfaExecMpv_expandState
- -DnfaExecMpv_initCompressedState=corei7_nfaExecMpv_initCompressedState
- -DnfaExecMpv_queueCompressState=corei7_nfaExecMpv_queueCompressState
- -DnfaExecMpv_queueInitState=corei7_nfaExecMpv_queueInitState
- -DnfaExecMpv_reportCurrent=corei7_nfaExecMpv_reportCurrent
- -DnfaExecSheng_B=corei7_nfaExecSheng_B
- -DnfaExecSheng_Q=corei7_nfaExecSheng_Q
- -DnfaExecSheng_Q2=corei7_nfaExecSheng_Q2
- -DnfaExecSheng_QR=corei7_nfaExecSheng_QR
- -DnfaExecSheng_expandState=corei7_nfaExecSheng_expandState
- -DnfaExecSheng_inAccept=corei7_nfaExecSheng_inAccept
- -DnfaExecSheng_inAnyAccept=corei7_nfaExecSheng_inAnyAccept
- -DnfaExecSheng_initCompressedState=corei7_nfaExecSheng_initCompressedState
- -DnfaExecSheng_queueCompressState=corei7_nfaExecSheng_queueCompressState
- -DnfaExecSheng_queueInitState=corei7_nfaExecSheng_queueInitState
- -DnfaExecSheng_reportCurrent=corei7_nfaExecSheng_reportCurrent
- -DnfaExecSheng_testEOD=corei7_nfaExecSheng_testEOD
- -DnfaExecTamarama_Q=corei7_nfaExecTamarama_Q
- -DnfaExecTamarama_Q2=corei7_nfaExecTamarama_Q2
- -DnfaExecTamarama_QR=corei7_nfaExecTamarama_QR
- -DnfaExecTamarama_expandState=corei7_nfaExecTamarama_expandState
- -DnfaExecTamarama_inAccept=corei7_nfaExecTamarama_inAccept
- -DnfaExecTamarama_inAnyAccept=corei7_nfaExecTamarama_inAnyAccept
- -DnfaExecTamarama_queueCompressState=corei7_nfaExecTamarama_queueCompressState
- -DnfaExecTamarama_queueInitState=corei7_nfaExecTamarama_queueInitState
- -DnfaExecTamarama_reportCurrent=corei7_nfaExecTamarama_reportCurrent
- -DnfaExecTamarama_testEOD=corei7_nfaExecTamarama_testEOD
- -DnfaExecTamarama_zombie_status=corei7_nfaExecTamarama_zombie_status
- -DnfaExpandState=corei7_nfaExpandState
- -DnfaGetZombieStatus=corei7_nfaGetZombieStatus
- -DnfaInAcceptState=corei7_nfaInAcceptState
- -DnfaInAnyAcceptState=corei7_nfaInAnyAcceptState
- -DnfaInitCompressedState=corei7_nfaInitCompressedState
- -DnfaQueueCompressState=corei7_nfaQueueCompressState
- -DnfaQueueExec=corei7_nfaQueueExec
- -DnfaQueueExec2_raw=corei7_nfaQueueExec2_raw
- -DnfaQueueExecRose=corei7_nfaQueueExecRose
- -DnfaQueueExecToMatch=corei7_nfaQueueExecToMatch
- -DnfaQueueExec_raw=corei7_nfaQueueExec_raw
- -DnfaQueueInitState=corei7_nfaQueueInitState
- -DnfaReportCurrentMatches=corei7_nfaReportCurrentMatches
- -DnoodExec=corei7_noodExec
- -DnoodExecStreaming=corei7_noodExecStreaming
- -Dp_mask_arr=corei7_p_mask_arr
- -Dp_mask_arr256=corei7_p_mask_arr256
- -DrepeatHasMatchBitmap=corei7_repeatHasMatchBitmap
- -DrepeatHasMatchRange=corei7_repeatHasMatchRange
- -DrepeatHasMatchRing=corei7_repeatHasMatchRing
- -DrepeatHasMatchSparseOptimalP=corei7_repeatHasMatchSparseOptimalP
- -DrepeatHasMatchTrailer=corei7_repeatHasMatchTrailer
- -DrepeatLastTopBitmap=corei7_repeatLastTopBitmap
- -DrepeatLastTopRange=corei7_repeatLastTopRange
- -DrepeatLastTopRing=corei7_repeatLastTopRing
- -DrepeatLastTopSparseOptimalP=corei7_repeatLastTopSparseOptimalP
- -DrepeatLastTopTrailer=corei7_repeatLastTopTrailer
- -DrepeatNextMatchBitmap=corei7_repeatNextMatchBitmap
- -DrepeatNextMatchRange=corei7_repeatNextMatchRange
- -DrepeatNextMatchRing=corei7_repeatNextMatchRing
- -DrepeatNextMatchSparseOptimalP=corei7_repeatNextMatchSparseOptimalP
- -DrepeatNextMatchTrailer=corei7_repeatNextMatchTrailer
- -DrepeatPack=corei7_repeatPack
- -DrepeatStoreBitmap=corei7_repeatStoreBitmap
- -DrepeatStoreRange=corei7_repeatStoreRange
- -DrepeatStoreRing=corei7_repeatStoreRing
- -DrepeatStoreSparseOptimalP=corei7_repeatStoreSparseOptimalP
- -DrepeatStoreTrailer=corei7_repeatStoreTrailer
- -DrepeatUnpack=corei7_repeatUnpack
- -DroseAnchoredCallback=corei7_roseAnchoredCallback
- -DroseBlockExec=corei7_roseBlockExec
- -DroseCallback=corei7_roseCallback
- -DroseCatchUpAll=corei7_roseCatchUpAll
- -DroseCatchUpMPV_i=corei7_roseCatchUpMPV_i
- -DroseCatchUpSuf=corei7_roseCatchUpSuf
- -DroseDelayRebuildCallback=corei7_roseDelayRebuildCallback
- -DroseFloatingCallback=corei7_roseFloatingCallback
- -DroseHandleChainMatch=corei7_roseHandleChainMatch
- -DroseInitState=corei7_roseInitState
- -DroseNfaAdaptor=corei7_roseNfaAdaptor
- -DroseNfaEarliestSom=corei7_roseNfaEarliestSom
- -DroseReportAdaptor=corei7_roseReportAdaptor
- -DroseRunBoundaryProgram=corei7_roseRunBoundaryProgram
- -DroseRunFlushCombProgram=corei7_roseRunFlushCombProgram
- -DroseRunLastFlushCombProgram=corei7_roseRunLastFlushCombProgram
- -DroseRunProgram=corei7_roseRunProgram
- -DroseRunProgram_l=corei7_roseRunProgram_l
- -DroseStreamEodExec=corei7_roseStreamEodExec
- -DroseStreamExec=corei7_roseStreamExec
- -DrshuftiExec=corei7_rshuftiExec
- -DrtruffleExec=corei7_rtruffleExec
- -Drun_accel=corei7_run_accel
- -DsetSomFromSomAware=corei7_setSomFromSomAware
- -DshuftiDoubleExec=corei7_shuftiDoubleExec
- -DshuftiExec=corei7_shuftiExec
- -Dsimd_onebit_masks=corei7_simd_onebit_masks
- -Dsize_compress_stream=corei7_size_compress_stream
- -DstoreSomToStream=corei7_storeSomToStream
- -Dstorecompressed128=corei7_storecompressed128
- -Dstorecompressed256=corei7_storecompressed256
- -Dstorecompressed32=corei7_storecompressed32
- -Dstorecompressed384=corei7_storecompressed384
- -Dstorecompressed512=corei7_storecompressed512
- -Dstorecompressed64=corei7_storecompressed64
- -DstreamInitSufPQ=corei7_streamInitSufPQ
- -DtruffleExec=corei7_truffleExec
- -Dvbs_mask_data=corei7_vbs_mask_data
-)
-
-SRCDIR(contrib/libs/hyperscan)
-
-SRCS(
- src/alloc.c
- src/crc32.c
- src/database.c
- src/fdr/fdr.c
- src/fdr/teddy.c
- src/fdr/teddy_avx2.c
- src/hs_valid_platform.c
- src/hs_version.c
- src/hwlm/hwlm.c
- src/hwlm/noodle_engine.c
- src/nfa/accel.c
- src/nfa/castle.c
- src/nfa/gough.c
- src/nfa/lbr.c
- src/nfa/limex_64.c
- src/nfa/limex_accel.c
- src/nfa/limex_native.c
- src/nfa/limex_simd128.c
- src/nfa/limex_simd256.c
- src/nfa/limex_simd384.c
- src/nfa/limex_simd512.c
- src/nfa/mcclellan.c
- src/nfa/mcsheng.c
- src/nfa/mcsheng_data.c
- src/nfa/mpv.c
- src/nfa/nfa_api_dispatch.c
- src/nfa/repeat.c
- src/nfa/sheng.c
- src/nfa/shufti.c
- src/nfa/tamarama.c
- src/nfa/truffle.c
- src/rose/block.c
- src/rose/catchup.c
- src/rose/init.c
- src/rose/match.c
- src/rose/program_runtime.c
- src/rose/stream.c
- src/runtime.c
- src/scratch.c
- src/som/som_runtime.c
- src/som/som_stream.c
- src/stream_compress.c
- src/util/cpuid_flags.c
- src/util/masked_move.c
- src/util/multibit.c
- src/util/simd_utils.c
- src/util/state_compress.c
-)
-
-END()
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+OWNER(
+ galtsev
+ g:antiinfra
+ g:cpp-contrib
+ g:yql
+)
+
+LICENSE(BSD-3-Clause)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+ADDINCL(
+ contrib/libs/hyperscan
+ contrib/libs/hyperscan/src
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_RUNTIME()
+
+CFLAGS(
+ ${SSE41_CFLAGS}
+ -DHAVE_SSE41
+ ${SSE42_CFLAGS}
+ -DHAVE_SSE42
+ ${POPCNT_CFLAGS}
+ -DHAVE_POPCOUNT_INSTR
+ -DCrc32c_ComputeBuf=corei7_Crc32c_ComputeBuf
+ -DblockInitSufPQ=corei7_blockInitSufPQ
+ -Dcompress_stream=corei7_compress_stream
+ -Dcpuid_flags=corei7_cpuid_flags
+ -Dcpuid_tune=corei7_cpuid_tune
+ -DdbIsValid=corei7_dbIsValid
+ -DdoAccel128=corei7_doAccel128
+ -DdoAccel256=corei7_doAccel256
+ -DdoAccel32=corei7_doAccel32
+ -DdoAccel384=corei7_doAccel384
+ -DdoAccel512=corei7_doAccel512
+ -DdoAccel64=corei7_doAccel64
+ -Dexpand_stream=corei7_expand_stream
+ -DfdrExec=corei7_fdrExec
+ -DfdrExecStreaming=corei7_fdrExecStreaming
+ -Dfdr_exec_fat_teddy_msks1=corei7_fdr_exec_fat_teddy_msks1
+ -Dfdr_exec_fat_teddy_msks1_pck=corei7_fdr_exec_fat_teddy_msks1_pck
+ -Dfdr_exec_fat_teddy_msks2=corei7_fdr_exec_fat_teddy_msks2
+ -Dfdr_exec_fat_teddy_msks2_pck=corei7_fdr_exec_fat_teddy_msks2_pck
+ -Dfdr_exec_fat_teddy_msks3=corei7_fdr_exec_fat_teddy_msks3
+ -Dfdr_exec_fat_teddy_msks3_pck=corei7_fdr_exec_fat_teddy_msks3_pck
+ -Dfdr_exec_fat_teddy_msks4=corei7_fdr_exec_fat_teddy_msks4
+ -Dfdr_exec_fat_teddy_msks4_pck=corei7_fdr_exec_fat_teddy_msks4_pck
+ -Dfdr_exec_teddy_msks1=corei7_fdr_exec_teddy_msks1
+ -Dfdr_exec_teddy_msks1_pck=corei7_fdr_exec_teddy_msks1_pck
+ -Dfdr_exec_teddy_msks2=corei7_fdr_exec_teddy_msks2
+ -Dfdr_exec_teddy_msks2_pck=corei7_fdr_exec_teddy_msks2_pck
+ -Dfdr_exec_teddy_msks3=corei7_fdr_exec_teddy_msks3
+ -Dfdr_exec_teddy_msks3_pck=corei7_fdr_exec_teddy_msks3_pck
+ -Dfdr_exec_teddy_msks4=corei7_fdr_exec_teddy_msks4
+ -Dfdr_exec_teddy_msks4_pck=corei7_fdr_exec_teddy_msks4_pck
+ -DflushQueuedLiterals_i=corei7_flushQueuedLiterals_i
+ -DflushStoredSomMatches_i=corei7_flushStoredSomMatches_i
+ -DhandleSomExternal=corei7_handleSomExternal
+ -DhandleSomInternal=corei7_handleSomInternal
+ -Dhs_alloc_scratch=corei7_hs_alloc_scratch
+ -Dhs_clone_scratch=corei7_hs_clone_scratch
+ -Dhs_close_stream=corei7_hs_close_stream
+ -Dhs_compress_stream=corei7_hs_compress_stream
+ -Dhs_copy_stream=corei7_hs_copy_stream
+ -Dhs_database_alloc=corei7_hs_database_alloc
+ -Dhs_database_free=corei7_hs_database_free
+ -Dhs_database_info=corei7_hs_database_info
+ -Dhs_database_size=corei7_hs_database_size
+ -Dhs_deserialize_database=corei7_hs_deserialize_database
+ -Dhs_deserialize_database_at=corei7_hs_deserialize_database_at
+ -Dhs_expand_stream=corei7_hs_expand_stream
+ -Dhs_free_database=corei7_hs_free_database
+ -Dhs_free_scratch=corei7_hs_free_scratch
+ -Dhs_misc_alloc=corei7_hs_misc_alloc
+ -Dhs_misc_free=corei7_hs_misc_free
+ -Dhs_open_stream=corei7_hs_open_stream
+ -Dhs_reset_and_copy_stream=corei7_hs_reset_and_copy_stream
+ -Dhs_reset_and_expand_stream=corei7_hs_reset_and_expand_stream
+ -Dhs_reset_stream=corei7_hs_reset_stream
+ -Dhs_scan=corei7_hs_scan
+ -Dhs_scan_stream=corei7_hs_scan_stream
+ -Dhs_scan_vector=corei7_hs_scan_vector
+ -Dhs_scratch_alloc=corei7_hs_scratch_alloc
+ -Dhs_scratch_free=corei7_hs_scratch_free
+ -Dhs_scratch_size=corei7_hs_scratch_size
+ -Dhs_serialize_database=corei7_hs_serialize_database
+ -Dhs_serialized_database_info=corei7_hs_serialized_database_info
+ -Dhs_serialized_database_size=corei7_hs_serialized_database_size
+ -Dhs_set_allocator=corei7_hs_set_allocator
+ -Dhs_set_database_allocator=corei7_hs_set_database_allocator
+ -Dhs_set_misc_allocator=corei7_hs_set_misc_allocator
+ -Dhs_set_scratch_allocator=corei7_hs_set_scratch_allocator
+ -Dhs_set_stream_allocator=corei7_hs_set_stream_allocator
+ -Dhs_stream_alloc=corei7_hs_stream_alloc
+ -Dhs_stream_free=corei7_hs_stream_free
+ -Dhs_stream_size=corei7_hs_stream_size
+ -Dhs_valid_platform=corei7_hs_valid_platform
+ -Dhs_version=corei7_hs_version
+ -DhwlmExec=corei7_hwlmExec
+ -DhwlmExecStreaming=corei7_hwlmExecStreaming
+ -DloadSomFromStream=corei7_loadSomFromStream
+ -Dloadcompressed128=corei7_loadcompressed128
+ -Dloadcompressed256=corei7_loadcompressed256
+ -Dloadcompressed32=corei7_loadcompressed32
+ -Dloadcompressed384=corei7_loadcompressed384
+ -Dloadcompressed512=corei7_loadcompressed512
+ -Dloadcompressed64=corei7_loadcompressed64
+ -Dmcsheng_pext_mask=corei7_mcsheng_pext_mask
+ -Dmm_mask_mask=corei7_mm_mask_mask
+ -Dmm_shuffle_end=corei7_mm_shuffle_end
+ -Dmmbit_keyshift_lut=corei7_mmbit_keyshift_lut
+ -Dmmbit_maxlevel_direct_lut=corei7_mmbit_maxlevel_direct_lut
+ -Dmmbit_maxlevel_from_keyshift_lut=corei7_mmbit_maxlevel_from_keyshift_lut
+ -Dmmbit_root_offset_from_level=corei7_mmbit_root_offset_from_level
+ -Dmmbit_zero_to_lut=corei7_mmbit_zero_to_lut
+ -DnfaBlockExecReverse=corei7_nfaBlockExecReverse
+ -DnfaCheckFinalState=corei7_nfaCheckFinalState
+ -DnfaExecCastle_Q=corei7_nfaExecCastle_Q
+ -DnfaExecCastle_Q2=corei7_nfaExecCastle_Q2
+ -DnfaExecCastle_QR=corei7_nfaExecCastle_QR
+ -DnfaExecCastle_expandState=corei7_nfaExecCastle_expandState
+ -DnfaExecCastle_inAccept=corei7_nfaExecCastle_inAccept
+ -DnfaExecCastle_inAnyAccept=corei7_nfaExecCastle_inAnyAccept
+ -DnfaExecCastle_initCompressedState=corei7_nfaExecCastle_initCompressedState
+ -DnfaExecCastle_queueCompressState=corei7_nfaExecCastle_queueCompressState
+ -DnfaExecCastle_queueInitState=corei7_nfaExecCastle_queueInitState
+ -DnfaExecCastle_reportCurrent=corei7_nfaExecCastle_reportCurrent
+ -DnfaExecGough16_Q=corei7_nfaExecGough16_Q
+ -DnfaExecGough16_Q2=corei7_nfaExecGough16_Q2
+ -DnfaExecGough16_QR=corei7_nfaExecGough16_QR
+ -DnfaExecGough16_expandState=corei7_nfaExecGough16_expandState
+ -DnfaExecGough16_inAccept=corei7_nfaExecGough16_inAccept
+ -DnfaExecGough16_inAnyAccept=corei7_nfaExecGough16_inAnyAccept
+ -DnfaExecGough16_initCompressedState=corei7_nfaExecGough16_initCompressedState
+ -DnfaExecGough16_queueCompressState=corei7_nfaExecGough16_queueCompressState
+ -DnfaExecGough16_queueInitState=corei7_nfaExecGough16_queueInitState
+ -DnfaExecGough16_reportCurrent=corei7_nfaExecGough16_reportCurrent
+ -DnfaExecGough16_testEOD=corei7_nfaExecGough16_testEOD
+ -DnfaExecGough8_Q=corei7_nfaExecGough8_Q
+ -DnfaExecGough8_Q2=corei7_nfaExecGough8_Q2
+ -DnfaExecGough8_QR=corei7_nfaExecGough8_QR
+ -DnfaExecGough8_expandState=corei7_nfaExecGough8_expandState
+ -DnfaExecGough8_inAccept=corei7_nfaExecGough8_inAccept
+ -DnfaExecGough8_inAnyAccept=corei7_nfaExecGough8_inAnyAccept
+ -DnfaExecGough8_initCompressedState=corei7_nfaExecGough8_initCompressedState
+ -DnfaExecGough8_queueCompressState=corei7_nfaExecGough8_queueCompressState
+ -DnfaExecGough8_queueInitState=corei7_nfaExecGough8_queueInitState
+ -DnfaExecGough8_reportCurrent=corei7_nfaExecGough8_reportCurrent
+ -DnfaExecGough8_testEOD=corei7_nfaExecGough8_testEOD
+ -DnfaExecLbrDot_Q=corei7_nfaExecLbrDot_Q
+ -DnfaExecLbrDot_Q2=corei7_nfaExecLbrDot_Q2
+ -DnfaExecLbrDot_QR=corei7_nfaExecLbrDot_QR
+ -DnfaExecLbrDot_expandState=corei7_nfaExecLbrDot_expandState
+ -DnfaExecLbrDot_inAccept=corei7_nfaExecLbrDot_inAccept
+ -DnfaExecLbrDot_inAnyAccept=corei7_nfaExecLbrDot_inAnyAccept
+ -DnfaExecLbrDot_initCompressedState=corei7_nfaExecLbrDot_initCompressedState
+ -DnfaExecLbrDot_queueCompressState=corei7_nfaExecLbrDot_queueCompressState
+ -DnfaExecLbrDot_queueInitState=corei7_nfaExecLbrDot_queueInitState
+ -DnfaExecLbrDot_reportCurrent=corei7_nfaExecLbrDot_reportCurrent
+ -DnfaExecLbrNVerm_Q=corei7_nfaExecLbrNVerm_Q
+ -DnfaExecLbrNVerm_Q2=corei7_nfaExecLbrNVerm_Q2
+ -DnfaExecLbrNVerm_QR=corei7_nfaExecLbrNVerm_QR
+ -DnfaExecLbrNVerm_expandState=corei7_nfaExecLbrNVerm_expandState
+ -DnfaExecLbrNVerm_inAccept=corei7_nfaExecLbrNVerm_inAccept
+ -DnfaExecLbrNVerm_inAnyAccept=corei7_nfaExecLbrNVerm_inAnyAccept
+ -DnfaExecLbrNVerm_initCompressedState=corei7_nfaExecLbrNVerm_initCompressedState
+ -DnfaExecLbrNVerm_queueCompressState=corei7_nfaExecLbrNVerm_queueCompressState
+ -DnfaExecLbrNVerm_queueInitState=corei7_nfaExecLbrNVerm_queueInitState
+ -DnfaExecLbrNVerm_reportCurrent=corei7_nfaExecLbrNVerm_reportCurrent
+ -DnfaExecLbrShuf_Q=corei7_nfaExecLbrShuf_Q
+ -DnfaExecLbrShuf_Q2=corei7_nfaExecLbrShuf_Q2
+ -DnfaExecLbrShuf_QR=corei7_nfaExecLbrShuf_QR
+ -DnfaExecLbrShuf_expandState=corei7_nfaExecLbrShuf_expandState
+ -DnfaExecLbrShuf_inAccept=corei7_nfaExecLbrShuf_inAccept
+ -DnfaExecLbrShuf_inAnyAccept=corei7_nfaExecLbrShuf_inAnyAccept
+ -DnfaExecLbrShuf_initCompressedState=corei7_nfaExecLbrShuf_initCompressedState
+ -DnfaExecLbrShuf_queueCompressState=corei7_nfaExecLbrShuf_queueCompressState
+ -DnfaExecLbrShuf_queueInitState=corei7_nfaExecLbrShuf_queueInitState
+ -DnfaExecLbrShuf_reportCurrent=corei7_nfaExecLbrShuf_reportCurrent
+ -DnfaExecLbrTruf_Q=corei7_nfaExecLbrTruf_Q
+ -DnfaExecLbrTruf_Q2=corei7_nfaExecLbrTruf_Q2
+ -DnfaExecLbrTruf_QR=corei7_nfaExecLbrTruf_QR
+ -DnfaExecLbrTruf_expandState=corei7_nfaExecLbrTruf_expandState
+ -DnfaExecLbrTruf_inAccept=corei7_nfaExecLbrTruf_inAccept
+ -DnfaExecLbrTruf_inAnyAccept=corei7_nfaExecLbrTruf_inAnyAccept
+ -DnfaExecLbrTruf_initCompressedState=corei7_nfaExecLbrTruf_initCompressedState
+ -DnfaExecLbrTruf_queueCompressState=corei7_nfaExecLbrTruf_queueCompressState
+ -DnfaExecLbrTruf_queueInitState=corei7_nfaExecLbrTruf_queueInitState
+ -DnfaExecLbrTruf_reportCurrent=corei7_nfaExecLbrTruf_reportCurrent
+ -DnfaExecLbrVerm_Q=corei7_nfaExecLbrVerm_Q
+ -DnfaExecLbrVerm_Q2=corei7_nfaExecLbrVerm_Q2
+ -DnfaExecLbrVerm_QR=corei7_nfaExecLbrVerm_QR
+ -DnfaExecLbrVerm_expandState=corei7_nfaExecLbrVerm_expandState
+ -DnfaExecLbrVerm_inAccept=corei7_nfaExecLbrVerm_inAccept
+ -DnfaExecLbrVerm_inAnyAccept=corei7_nfaExecLbrVerm_inAnyAccept
+ -DnfaExecLbrVerm_initCompressedState=corei7_nfaExecLbrVerm_initCompressedState
+ -DnfaExecLbrVerm_queueCompressState=corei7_nfaExecLbrVerm_queueCompressState
+ -DnfaExecLbrVerm_queueInitState=corei7_nfaExecLbrVerm_queueInitState
+ -DnfaExecLbrVerm_reportCurrent=corei7_nfaExecLbrVerm_reportCurrent
+ -DnfaExecLimEx128_B_Reverse=corei7_nfaExecLimEx128_B_Reverse
+ -DnfaExecLimEx128_Q=corei7_nfaExecLimEx128_Q
+ -DnfaExecLimEx128_Q2=corei7_nfaExecLimEx128_Q2
+ -DnfaExecLimEx128_QR=corei7_nfaExecLimEx128_QR
+ -DnfaExecLimEx128_expandState=corei7_nfaExecLimEx128_expandState
+ -DnfaExecLimEx128_inAccept=corei7_nfaExecLimEx128_inAccept
+ -DnfaExecLimEx128_inAnyAccept=corei7_nfaExecLimEx128_inAnyAccept
+ -DnfaExecLimEx128_initCompressedState=corei7_nfaExecLimEx128_initCompressedState
+ -DnfaExecLimEx128_queueCompressState=corei7_nfaExecLimEx128_queueCompressState
+ -DnfaExecLimEx128_queueInitState=corei7_nfaExecLimEx128_queueInitState
+ -DnfaExecLimEx128_reportCurrent=corei7_nfaExecLimEx128_reportCurrent
+ -DnfaExecLimEx128_testEOD=corei7_nfaExecLimEx128_testEOD
+ -DnfaExecLimEx128_zombie_status=corei7_nfaExecLimEx128_zombie_status
+ -DnfaExecLimEx256_B_Reverse=corei7_nfaExecLimEx256_B_Reverse
+ -DnfaExecLimEx256_Q=corei7_nfaExecLimEx256_Q
+ -DnfaExecLimEx256_Q2=corei7_nfaExecLimEx256_Q2
+ -DnfaExecLimEx256_QR=corei7_nfaExecLimEx256_QR
+ -DnfaExecLimEx256_expandState=corei7_nfaExecLimEx256_expandState
+ -DnfaExecLimEx256_inAccept=corei7_nfaExecLimEx256_inAccept
+ -DnfaExecLimEx256_inAnyAccept=corei7_nfaExecLimEx256_inAnyAccept
+ -DnfaExecLimEx256_initCompressedState=corei7_nfaExecLimEx256_initCompressedState
+ -DnfaExecLimEx256_queueCompressState=corei7_nfaExecLimEx256_queueCompressState
+ -DnfaExecLimEx256_queueInitState=corei7_nfaExecLimEx256_queueInitState
+ -DnfaExecLimEx256_reportCurrent=corei7_nfaExecLimEx256_reportCurrent
+ -DnfaExecLimEx256_testEOD=corei7_nfaExecLimEx256_testEOD
+ -DnfaExecLimEx256_zombie_status=corei7_nfaExecLimEx256_zombie_status
+ -DnfaExecLimEx32_B_Reverse=corei7_nfaExecLimEx32_B_Reverse
+ -DnfaExecLimEx32_Q=corei7_nfaExecLimEx32_Q
+ -DnfaExecLimEx32_Q2=corei7_nfaExecLimEx32_Q2
+ -DnfaExecLimEx32_QR=corei7_nfaExecLimEx32_QR
+ -DnfaExecLimEx32_expandState=corei7_nfaExecLimEx32_expandState
+ -DnfaExecLimEx32_inAccept=corei7_nfaExecLimEx32_inAccept
+ -DnfaExecLimEx32_inAnyAccept=corei7_nfaExecLimEx32_inAnyAccept
+ -DnfaExecLimEx32_initCompressedState=corei7_nfaExecLimEx32_initCompressedState
+ -DnfaExecLimEx32_queueCompressState=corei7_nfaExecLimEx32_queueCompressState
+ -DnfaExecLimEx32_queueInitState=corei7_nfaExecLimEx32_queueInitState
+ -DnfaExecLimEx32_reportCurrent=corei7_nfaExecLimEx32_reportCurrent
+ -DnfaExecLimEx32_testEOD=corei7_nfaExecLimEx32_testEOD
+ -DnfaExecLimEx32_zombie_status=corei7_nfaExecLimEx32_zombie_status
+ -DnfaExecLimEx384_B_Reverse=corei7_nfaExecLimEx384_B_Reverse
+ -DnfaExecLimEx384_Q=corei7_nfaExecLimEx384_Q
+ -DnfaExecLimEx384_Q2=corei7_nfaExecLimEx384_Q2
+ -DnfaExecLimEx384_QR=corei7_nfaExecLimEx384_QR
+ -DnfaExecLimEx384_expandState=corei7_nfaExecLimEx384_expandState
+ -DnfaExecLimEx384_inAccept=corei7_nfaExecLimEx384_inAccept
+ -DnfaExecLimEx384_inAnyAccept=corei7_nfaExecLimEx384_inAnyAccept
+ -DnfaExecLimEx384_initCompressedState=corei7_nfaExecLimEx384_initCompressedState
+ -DnfaExecLimEx384_queueCompressState=corei7_nfaExecLimEx384_queueCompressState
+ -DnfaExecLimEx384_queueInitState=corei7_nfaExecLimEx384_queueInitState
+ -DnfaExecLimEx384_reportCurrent=corei7_nfaExecLimEx384_reportCurrent
+ -DnfaExecLimEx384_testEOD=corei7_nfaExecLimEx384_testEOD
+ -DnfaExecLimEx384_zombie_status=corei7_nfaExecLimEx384_zombie_status
+ -DnfaExecLimEx512_B_Reverse=corei7_nfaExecLimEx512_B_Reverse
+ -DnfaExecLimEx512_Q=corei7_nfaExecLimEx512_Q
+ -DnfaExecLimEx512_Q2=corei7_nfaExecLimEx512_Q2
+ -DnfaExecLimEx512_QR=corei7_nfaExecLimEx512_QR
+ -DnfaExecLimEx512_expandState=corei7_nfaExecLimEx512_expandState
+ -DnfaExecLimEx512_inAccept=corei7_nfaExecLimEx512_inAccept
+ -DnfaExecLimEx512_inAnyAccept=corei7_nfaExecLimEx512_inAnyAccept
+ -DnfaExecLimEx512_initCompressedState=corei7_nfaExecLimEx512_initCompressedState
+ -DnfaExecLimEx512_queueCompressState=corei7_nfaExecLimEx512_queueCompressState
+ -DnfaExecLimEx512_queueInitState=corei7_nfaExecLimEx512_queueInitState
+ -DnfaExecLimEx512_reportCurrent=corei7_nfaExecLimEx512_reportCurrent
+ -DnfaExecLimEx512_testEOD=corei7_nfaExecLimEx512_testEOD
+ -DnfaExecLimEx512_zombie_status=corei7_nfaExecLimEx512_zombie_status
+ -DnfaExecLimEx64_B_Reverse=corei7_nfaExecLimEx64_B_Reverse
+ -DnfaExecLimEx64_Q=corei7_nfaExecLimEx64_Q
+ -DnfaExecLimEx64_Q2=corei7_nfaExecLimEx64_Q2
+ -DnfaExecLimEx64_QR=corei7_nfaExecLimEx64_QR
+ -DnfaExecLimEx64_expandState=corei7_nfaExecLimEx64_expandState
+ -DnfaExecLimEx64_inAccept=corei7_nfaExecLimEx64_inAccept
+ -DnfaExecLimEx64_inAnyAccept=corei7_nfaExecLimEx64_inAnyAccept
+ -DnfaExecLimEx64_initCompressedState=corei7_nfaExecLimEx64_initCompressedState
+ -DnfaExecLimEx64_queueCompressState=corei7_nfaExecLimEx64_queueCompressState
+ -DnfaExecLimEx64_queueInitState=corei7_nfaExecLimEx64_queueInitState
+ -DnfaExecLimEx64_reportCurrent=corei7_nfaExecLimEx64_reportCurrent
+ -DnfaExecLimEx64_testEOD=corei7_nfaExecLimEx64_testEOD
+ -DnfaExecLimEx64_zombie_status=corei7_nfaExecLimEx64_zombie_status
+ -DnfaExecMcClellan16_B=corei7_nfaExecMcClellan16_B
+ -DnfaExecMcClellan16_Q=corei7_nfaExecMcClellan16_Q
+ -DnfaExecMcClellan16_Q2=corei7_nfaExecMcClellan16_Q2
+ -DnfaExecMcClellan16_QR=corei7_nfaExecMcClellan16_QR
+ -DnfaExecMcClellan16_SimpStream=corei7_nfaExecMcClellan16_SimpStream
+ -DnfaExecMcClellan16_expandState=corei7_nfaExecMcClellan16_expandState
+ -DnfaExecMcClellan16_inAccept=corei7_nfaExecMcClellan16_inAccept
+ -DnfaExecMcClellan16_inAnyAccept=corei7_nfaExecMcClellan16_inAnyAccept
+ -DnfaExecMcClellan16_initCompressedState=corei7_nfaExecMcClellan16_initCompressedState
+ -DnfaExecMcClellan16_queueCompressState=corei7_nfaExecMcClellan16_queueCompressState
+ -DnfaExecMcClellan16_queueInitState=corei7_nfaExecMcClellan16_queueInitState
+ -DnfaExecMcClellan16_reportCurrent=corei7_nfaExecMcClellan16_reportCurrent
+ -DnfaExecMcClellan16_testEOD=corei7_nfaExecMcClellan16_testEOD
+ -DnfaExecMcClellan8_B=corei7_nfaExecMcClellan8_B
+ -DnfaExecMcClellan8_Q=corei7_nfaExecMcClellan8_Q
+ -DnfaExecMcClellan8_Q2=corei7_nfaExecMcClellan8_Q2
+ -DnfaExecMcClellan8_QR=corei7_nfaExecMcClellan8_QR
+ -DnfaExecMcClellan8_SimpStream=corei7_nfaExecMcClellan8_SimpStream
+ -DnfaExecMcClellan8_expandState=corei7_nfaExecMcClellan8_expandState
+ -DnfaExecMcClellan8_inAccept=corei7_nfaExecMcClellan8_inAccept
+ -DnfaExecMcClellan8_inAnyAccept=corei7_nfaExecMcClellan8_inAnyAccept
+ -DnfaExecMcClellan8_initCompressedState=corei7_nfaExecMcClellan8_initCompressedState
+ -DnfaExecMcClellan8_queueCompressState=corei7_nfaExecMcClellan8_queueCompressState
+ -DnfaExecMcClellan8_queueInitState=corei7_nfaExecMcClellan8_queueInitState
+ -DnfaExecMcClellan8_reportCurrent=corei7_nfaExecMcClellan8_reportCurrent
+ -DnfaExecMcClellan8_testEOD=corei7_nfaExecMcClellan8_testEOD
+ -DnfaExecMcSheng16_Q=corei7_nfaExecMcSheng16_Q
+ -DnfaExecMcSheng16_Q2=corei7_nfaExecMcSheng16_Q2
+ -DnfaExecMcSheng16_QR=corei7_nfaExecMcSheng16_QR
+ -DnfaExecMcSheng16_expandState=corei7_nfaExecMcSheng16_expandState
+ -DnfaExecMcSheng16_inAccept=corei7_nfaExecMcSheng16_inAccept
+ -DnfaExecMcSheng16_inAnyAccept=corei7_nfaExecMcSheng16_inAnyAccept
+ -DnfaExecMcSheng16_initCompressedState=corei7_nfaExecMcSheng16_initCompressedState
+ -DnfaExecMcSheng16_queueCompressState=corei7_nfaExecMcSheng16_queueCompressState
+ -DnfaExecMcSheng16_queueInitState=corei7_nfaExecMcSheng16_queueInitState
+ -DnfaExecMcSheng16_reportCurrent=corei7_nfaExecMcSheng16_reportCurrent
+ -DnfaExecMcSheng16_testEOD=corei7_nfaExecMcSheng16_testEOD
+ -DnfaExecMcSheng8_Q=corei7_nfaExecMcSheng8_Q
+ -DnfaExecMcSheng8_Q2=corei7_nfaExecMcSheng8_Q2
+ -DnfaExecMcSheng8_QR=corei7_nfaExecMcSheng8_QR
+ -DnfaExecMcSheng8_expandState=corei7_nfaExecMcSheng8_expandState
+ -DnfaExecMcSheng8_inAccept=corei7_nfaExecMcSheng8_inAccept
+ -DnfaExecMcSheng8_inAnyAccept=corei7_nfaExecMcSheng8_inAnyAccept
+ -DnfaExecMcSheng8_initCompressedState=corei7_nfaExecMcSheng8_initCompressedState
+ -DnfaExecMcSheng8_queueCompressState=corei7_nfaExecMcSheng8_queueCompressState
+ -DnfaExecMcSheng8_queueInitState=corei7_nfaExecMcSheng8_queueInitState
+ -DnfaExecMcSheng8_reportCurrent=corei7_nfaExecMcSheng8_reportCurrent
+ -DnfaExecMcSheng8_testEOD=corei7_nfaExecMcSheng8_testEOD
+ -DnfaExecMpv_Q=corei7_nfaExecMpv_Q
+ -DnfaExecMpv_QueueExecRaw=corei7_nfaExecMpv_QueueExecRaw
+ -DnfaExecMpv_expandState=corei7_nfaExecMpv_expandState
+ -DnfaExecMpv_initCompressedState=corei7_nfaExecMpv_initCompressedState
+ -DnfaExecMpv_queueCompressState=corei7_nfaExecMpv_queueCompressState
+ -DnfaExecMpv_queueInitState=corei7_nfaExecMpv_queueInitState
+ -DnfaExecMpv_reportCurrent=corei7_nfaExecMpv_reportCurrent
+ -DnfaExecSheng_B=corei7_nfaExecSheng_B
+ -DnfaExecSheng_Q=corei7_nfaExecSheng_Q
+ -DnfaExecSheng_Q2=corei7_nfaExecSheng_Q2
+ -DnfaExecSheng_QR=corei7_nfaExecSheng_QR
+ -DnfaExecSheng_expandState=corei7_nfaExecSheng_expandState
+ -DnfaExecSheng_inAccept=corei7_nfaExecSheng_inAccept
+ -DnfaExecSheng_inAnyAccept=corei7_nfaExecSheng_inAnyAccept
+ -DnfaExecSheng_initCompressedState=corei7_nfaExecSheng_initCompressedState
+ -DnfaExecSheng_queueCompressState=corei7_nfaExecSheng_queueCompressState
+ -DnfaExecSheng_queueInitState=corei7_nfaExecSheng_queueInitState
+ -DnfaExecSheng_reportCurrent=corei7_nfaExecSheng_reportCurrent
+ -DnfaExecSheng_testEOD=corei7_nfaExecSheng_testEOD
+ -DnfaExecTamarama_Q=corei7_nfaExecTamarama_Q
+ -DnfaExecTamarama_Q2=corei7_nfaExecTamarama_Q2
+ -DnfaExecTamarama_QR=corei7_nfaExecTamarama_QR
+ -DnfaExecTamarama_expandState=corei7_nfaExecTamarama_expandState
+ -DnfaExecTamarama_inAccept=corei7_nfaExecTamarama_inAccept
+ -DnfaExecTamarama_inAnyAccept=corei7_nfaExecTamarama_inAnyAccept
+ -DnfaExecTamarama_queueCompressState=corei7_nfaExecTamarama_queueCompressState
+ -DnfaExecTamarama_queueInitState=corei7_nfaExecTamarama_queueInitState
+ -DnfaExecTamarama_reportCurrent=corei7_nfaExecTamarama_reportCurrent
+ -DnfaExecTamarama_testEOD=corei7_nfaExecTamarama_testEOD
+ -DnfaExecTamarama_zombie_status=corei7_nfaExecTamarama_zombie_status
+ -DnfaExpandState=corei7_nfaExpandState
+ -DnfaGetZombieStatus=corei7_nfaGetZombieStatus
+ -DnfaInAcceptState=corei7_nfaInAcceptState
+ -DnfaInAnyAcceptState=corei7_nfaInAnyAcceptState
+ -DnfaInitCompressedState=corei7_nfaInitCompressedState
+ -DnfaQueueCompressState=corei7_nfaQueueCompressState
+ -DnfaQueueExec=corei7_nfaQueueExec
+ -DnfaQueueExec2_raw=corei7_nfaQueueExec2_raw
+ -DnfaQueueExecRose=corei7_nfaQueueExecRose
+ -DnfaQueueExecToMatch=corei7_nfaQueueExecToMatch
+ -DnfaQueueExec_raw=corei7_nfaQueueExec_raw
+ -DnfaQueueInitState=corei7_nfaQueueInitState
+ -DnfaReportCurrentMatches=corei7_nfaReportCurrentMatches
+ -DnoodExec=corei7_noodExec
+ -DnoodExecStreaming=corei7_noodExecStreaming
+ -Dp_mask_arr=corei7_p_mask_arr
+ -Dp_mask_arr256=corei7_p_mask_arr256
+ -DrepeatHasMatchBitmap=corei7_repeatHasMatchBitmap
+ -DrepeatHasMatchRange=corei7_repeatHasMatchRange
+ -DrepeatHasMatchRing=corei7_repeatHasMatchRing
+ -DrepeatHasMatchSparseOptimalP=corei7_repeatHasMatchSparseOptimalP
+ -DrepeatHasMatchTrailer=corei7_repeatHasMatchTrailer
+ -DrepeatLastTopBitmap=corei7_repeatLastTopBitmap
+ -DrepeatLastTopRange=corei7_repeatLastTopRange
+ -DrepeatLastTopRing=corei7_repeatLastTopRing
+ -DrepeatLastTopSparseOptimalP=corei7_repeatLastTopSparseOptimalP
+ -DrepeatLastTopTrailer=corei7_repeatLastTopTrailer
+ -DrepeatNextMatchBitmap=corei7_repeatNextMatchBitmap
+ -DrepeatNextMatchRange=corei7_repeatNextMatchRange
+ -DrepeatNextMatchRing=corei7_repeatNextMatchRing
+ -DrepeatNextMatchSparseOptimalP=corei7_repeatNextMatchSparseOptimalP
+ -DrepeatNextMatchTrailer=corei7_repeatNextMatchTrailer
+ -DrepeatPack=corei7_repeatPack
+ -DrepeatStoreBitmap=corei7_repeatStoreBitmap
+ -DrepeatStoreRange=corei7_repeatStoreRange
+ -DrepeatStoreRing=corei7_repeatStoreRing
+ -DrepeatStoreSparseOptimalP=corei7_repeatStoreSparseOptimalP
+ -DrepeatStoreTrailer=corei7_repeatStoreTrailer
+ -DrepeatUnpack=corei7_repeatUnpack
+ -DroseAnchoredCallback=corei7_roseAnchoredCallback
+ -DroseBlockExec=corei7_roseBlockExec
+ -DroseCallback=corei7_roseCallback
+ -DroseCatchUpAll=corei7_roseCatchUpAll
+ -DroseCatchUpMPV_i=corei7_roseCatchUpMPV_i
+ -DroseCatchUpSuf=corei7_roseCatchUpSuf
+ -DroseDelayRebuildCallback=corei7_roseDelayRebuildCallback
+ -DroseFloatingCallback=corei7_roseFloatingCallback
+ -DroseHandleChainMatch=corei7_roseHandleChainMatch
+ -DroseInitState=corei7_roseInitState
+ -DroseNfaAdaptor=corei7_roseNfaAdaptor
+ -DroseNfaEarliestSom=corei7_roseNfaEarliestSom
+ -DroseReportAdaptor=corei7_roseReportAdaptor
+ -DroseRunBoundaryProgram=corei7_roseRunBoundaryProgram
+ -DroseRunFlushCombProgram=corei7_roseRunFlushCombProgram
+ -DroseRunLastFlushCombProgram=corei7_roseRunLastFlushCombProgram
+ -DroseRunProgram=corei7_roseRunProgram
+ -DroseRunProgram_l=corei7_roseRunProgram_l
+ -DroseStreamEodExec=corei7_roseStreamEodExec
+ -DroseStreamExec=corei7_roseStreamExec
+ -DrshuftiExec=corei7_rshuftiExec
+ -DrtruffleExec=corei7_rtruffleExec
+ -Drun_accel=corei7_run_accel
+ -DsetSomFromSomAware=corei7_setSomFromSomAware
+ -DshuftiDoubleExec=corei7_shuftiDoubleExec
+ -DshuftiExec=corei7_shuftiExec
+ -Dsimd_onebit_masks=corei7_simd_onebit_masks
+ -Dsize_compress_stream=corei7_size_compress_stream
+ -DstoreSomToStream=corei7_storeSomToStream
+ -Dstorecompressed128=corei7_storecompressed128
+ -Dstorecompressed256=corei7_storecompressed256
+ -Dstorecompressed32=corei7_storecompressed32
+ -Dstorecompressed384=corei7_storecompressed384
+ -Dstorecompressed512=corei7_storecompressed512
+ -Dstorecompressed64=corei7_storecompressed64
+ -DstreamInitSufPQ=corei7_streamInitSufPQ
+ -DtruffleExec=corei7_truffleExec
+ -Dvbs_mask_data=corei7_vbs_mask_data
+)
+
+SRCDIR(contrib/libs/hyperscan)
+
+SRCS(
+ src/alloc.c
+ src/crc32.c
+ src/database.c
+ src/fdr/fdr.c
+ src/fdr/teddy.c
+ src/fdr/teddy_avx2.c
+ src/hs_valid_platform.c
+ src/hs_version.c
+ src/hwlm/hwlm.c
+ src/hwlm/noodle_engine.c
+ src/nfa/accel.c
+ src/nfa/castle.c
+ src/nfa/gough.c
+ src/nfa/lbr.c
+ src/nfa/limex_64.c
+ src/nfa/limex_accel.c
+ src/nfa/limex_native.c
+ src/nfa/limex_simd128.c
+ src/nfa/limex_simd256.c
+ src/nfa/limex_simd384.c
+ src/nfa/limex_simd512.c
+ src/nfa/mcclellan.c
+ src/nfa/mcsheng.c
+ src/nfa/mcsheng_data.c
+ src/nfa/mpv.c
+ src/nfa/nfa_api_dispatch.c
+ src/nfa/repeat.c
+ src/nfa/sheng.c
+ src/nfa/shufti.c
+ src/nfa/tamarama.c
+ src/nfa/truffle.c
+ src/rose/block.c
+ src/rose/catchup.c
+ src/rose/init.c
+ src/rose/match.c
+ src/rose/program_runtime.c
+ src/rose/stream.c
+ src/runtime.c
+ src/scratch.c
+ src/som/som_runtime.c
+ src/som/som_stream.c
+ src/stream_compress.c
+ src/util/cpuid_flags.c
+ src/util/masked_move.c
+ src/util/multibit.c
+ src/util/simd_utils.c
+ src/util/state_compress.c
+)
+
+END()
diff --git a/contrib/libs/hyperscan/src/compiler/compiler.cpp b/contrib/libs/hyperscan/src/compiler/compiler.cpp
index f5f08a4d91..5751bd64f4 100644
--- a/contrib/libs/hyperscan/src/compiler/compiler.cpp
+++ b/contrib/libs/hyperscan/src/compiler/compiler.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -45,7 +45,7 @@
#include "parser/buildstate.h"
#include "parser/dump.h"
#include "parser/Component.h"
-#include "parser/logical_combination.h"
+#include "parser/logical_combination.h"
#include "parser/parse_error.h"
#include "parser/Parser.h" // for flags
#include "parser/position.h"
@@ -56,13 +56,13 @@
#include "parser/unsupported.h"
#include "parser/utf8_validate.h"
#include "rose/rose_build.h"
-#include "rose/rose_internal.h"
+#include "rose/rose_internal.h"
#include "som/slot_manager_dump.h"
#include "util/bytecode_ptr.h"
#include "util/compile_error.h"
#include "util/target_info.h"
#include "util/verify_types.h"
-#include "util/ue2string.h"
+#include "util/ue2string.h"
#include <algorithm>
#include <cassert>
@@ -109,66 +109,66 @@ void validateExt(const hs_expr_ext &ext) {
}
-void ParsedLitExpression::parseLiteral(const char *expression, size_t len,
- bool nocase) {
- const char *c = expression;
- for (size_t i = 0; i < len; i++) {
- lit.push_back(*c, nocase);
- c++;
- }
-}
-
-ParsedLitExpression::ParsedLitExpression(unsigned index_in,
- const char *expression,
- size_t expLength, unsigned flags,
- ReportID report)
- : expr(index_in, false, flags & HS_FLAG_SINGLEMATCH, false, false,
- SOM_NONE, report, 0, MAX_OFFSET, 0, 0, 0, false) {
- // For pure literal expression, below 'HS_FLAG_'s are unuseful:
- // DOTALL/ALLOWEMPTY/UTF8/UCP/PREFILTER/COMBINATION/QUIET/MULTILINE
-
- if (flags & ~HS_FLAG_ALL) {
- DEBUG_PRINTF("Unrecognised flag, flags=%u.\n", flags);
- throw CompileError("Unrecognised flag.");
- }
-
- // FIXME: we disallow highlander + SOM, see UE-1850.
- if ((flags & HS_FLAG_SINGLEMATCH) && (flags & HS_FLAG_SOM_LEFTMOST)) {
- throw CompileError("HS_FLAG_SINGLEMATCH is not supported in "
- "combination with HS_FLAG_SOM_LEFTMOST.");
- }
-
- // Set SOM type.
- if (flags & HS_FLAG_SOM_LEFTMOST) {
- expr.som = SOM_LEFT;
- }
-
- // Transfer expression text into ue2_literal.
- bool nocase = flags & HS_FLAG_CASELESS ? true : false;
- parseLiteral(expression, expLength, nocase);
-
-}
-
+void ParsedLitExpression::parseLiteral(const char *expression, size_t len,
+ bool nocase) {
+ const char *c = expression;
+ for (size_t i = 0; i < len; i++) {
+ lit.push_back(*c, nocase);
+ c++;
+ }
+}
+
+ParsedLitExpression::ParsedLitExpression(unsigned index_in,
+ const char *expression,
+ size_t expLength, unsigned flags,
+ ReportID report)
+ : expr(index_in, false, flags & HS_FLAG_SINGLEMATCH, false, false,
+ SOM_NONE, report, 0, MAX_OFFSET, 0, 0, 0, false) {
+ // For pure literal expression, below 'HS_FLAG_'s are unuseful:
+ // DOTALL/ALLOWEMPTY/UTF8/UCP/PREFILTER/COMBINATION/QUIET/MULTILINE
+
+ if (flags & ~HS_FLAG_ALL) {
+ DEBUG_PRINTF("Unrecognised flag, flags=%u.\n", flags);
+ throw CompileError("Unrecognised flag.");
+ }
+
+ // FIXME: we disallow highlander + SOM, see UE-1850.
+ if ((flags & HS_FLAG_SINGLEMATCH) && (flags & HS_FLAG_SOM_LEFTMOST)) {
+ throw CompileError("HS_FLAG_SINGLEMATCH is not supported in "
+ "combination with HS_FLAG_SOM_LEFTMOST.");
+ }
+
+ // Set SOM type.
+ if (flags & HS_FLAG_SOM_LEFTMOST) {
+ expr.som = SOM_LEFT;
+ }
+
+ // Transfer expression text into ue2_literal.
+ bool nocase = flags & HS_FLAG_CASELESS ? true : false;
+ parseLiteral(expression, expLength, nocase);
+
+}
+
ParsedExpression::ParsedExpression(unsigned index_in, const char *expression,
unsigned flags, ReportID report,
const hs_expr_ext *ext)
: expr(index_in, flags & HS_FLAG_ALLOWEMPTY, flags & HS_FLAG_SINGLEMATCH,
false, flags & HS_FLAG_PREFILTER, SOM_NONE, report, 0, MAX_OFFSET,
- 0, 0, 0, flags & HS_FLAG_QUIET) {
- // We disallow SOM + Quiet.
- if ((flags & HS_FLAG_QUIET) && (flags & HS_FLAG_SOM_LEFTMOST)) {
- throw CompileError("HS_FLAG_QUIET is not supported in "
- "combination with HS_FLAG_SOM_LEFTMOST.");
- }
- flags &= ~HS_FLAG_QUIET;
+ 0, 0, 0, flags & HS_FLAG_QUIET) {
+ // We disallow SOM + Quiet.
+ if ((flags & HS_FLAG_QUIET) && (flags & HS_FLAG_SOM_LEFTMOST)) {
+ throw CompileError("HS_FLAG_QUIET is not supported in "
+ "combination with HS_FLAG_SOM_LEFTMOST.");
+ }
+ flags &= ~HS_FLAG_QUIET;
ParseMode mode(flags);
component = parse(expression, mode);
expr.utf8 = mode.utf8; /* utf8 may be set by parse() */
- const size_t len = strlen(expression);
- if (expr.utf8 && !isValidUtf8(expression, len)) {
+ const size_t len = strlen(expression);
+ if (expr.utf8 && !isValidUtf8(expression, len)) {
throw ParseError("Expression is not valid UTF-8.");
}
@@ -283,45 +283,45 @@ void addExpression(NG &ng, unsigned index, const char *expression,
DEBUG_PRINTF("index=%u, id=%u, flags=%u, expr='%s'\n", index, id, flags,
expression);
- if (flags & HS_FLAG_COMBINATION) {
- if (flags & ~(HS_FLAG_COMBINATION | HS_FLAG_QUIET |
- HS_FLAG_SINGLEMATCH)) {
- throw CompileError("only HS_FLAG_QUIET and HS_FLAG_SINGLEMATCH "
- "are supported in combination "
- "with HS_FLAG_COMBINATION.");
- }
- if (flags & HS_FLAG_QUIET) {
- DEBUG_PRINTF("skip QUIET logical combination expression %u\n", id);
- } else {
- u32 ekey = INVALID_EKEY;
- u64a min_offset = 0;
- u64a max_offset = MAX_OFFSET;
- if (flags & HS_FLAG_SINGLEMATCH) {
- ekey = ng.rm.getExhaustibleKey(id);
- }
- if (ext) {
- validateExt(*ext);
- if (ext->flags & ~(HS_EXT_FLAG_MIN_OFFSET |
- HS_EXT_FLAG_MAX_OFFSET)) {
- throw CompileError("only HS_EXT_FLAG_MIN_OFFSET and "
- "HS_EXT_FLAG_MAX_OFFSET extra flags "
- "are supported in combination "
- "with HS_FLAG_COMBINATION.");
- }
- if (ext->flags & HS_EXT_FLAG_MIN_OFFSET) {
- min_offset = ext->min_offset;
- }
- if (ext->flags & HS_EXT_FLAG_MAX_OFFSET) {
- max_offset = ext->max_offset;
- }
- }
- ng.rm.pl.parseLogicalCombination(id, expression, ekey, min_offset,
- max_offset);
- DEBUG_PRINTF("parsed logical combination expression %u\n", id);
- }
- return;
- }
-
+ if (flags & HS_FLAG_COMBINATION) {
+ if (flags & ~(HS_FLAG_COMBINATION | HS_FLAG_QUIET |
+ HS_FLAG_SINGLEMATCH)) {
+ throw CompileError("only HS_FLAG_QUIET and HS_FLAG_SINGLEMATCH "
+ "are supported in combination "
+ "with HS_FLAG_COMBINATION.");
+ }
+ if (flags & HS_FLAG_QUIET) {
+ DEBUG_PRINTF("skip QUIET logical combination expression %u\n", id);
+ } else {
+ u32 ekey = INVALID_EKEY;
+ u64a min_offset = 0;
+ u64a max_offset = MAX_OFFSET;
+ if (flags & HS_FLAG_SINGLEMATCH) {
+ ekey = ng.rm.getExhaustibleKey(id);
+ }
+ if (ext) {
+ validateExt(*ext);
+ if (ext->flags & ~(HS_EXT_FLAG_MIN_OFFSET |
+ HS_EXT_FLAG_MAX_OFFSET)) {
+ throw CompileError("only HS_EXT_FLAG_MIN_OFFSET and "
+ "HS_EXT_FLAG_MAX_OFFSET extra flags "
+ "are supported in combination "
+ "with HS_FLAG_COMBINATION.");
+ }
+ if (ext->flags & HS_EXT_FLAG_MIN_OFFSET) {
+ min_offset = ext->min_offset;
+ }
+ if (ext->flags & HS_EXT_FLAG_MAX_OFFSET) {
+ max_offset = ext->max_offset;
+ }
+ }
+ ng.rm.pl.parseLogicalCombination(id, expression, ekey, min_offset,
+ max_offset);
+ DEBUG_PRINTF("parsed logical combination expression %u\n", id);
+ }
+ return;
+ }
+
// Ensure that our pattern isn't too long (in characters).
if (strlen(expression) > cc.grey.limitPatternLength) {
throw CompileError("Pattern length exceeds limit.");
@@ -387,48 +387,48 @@ void addExpression(NG &ng, unsigned index, const char *expression,
}
}
-void addLitExpression(NG &ng, unsigned index, const char *expression,
- unsigned flags, const hs_expr_ext *ext, ReportID id,
- size_t expLength) {
- assert(expression);
- const CompileContext &cc = ng.cc;
- DEBUG_PRINTF("index=%u, id=%u, flags=%u, expr='%s', len='%zu'\n", index,
- id, flags, expression, expLength);
-
- // Extended parameters are not supported for pure literal patterns.
- if (ext && ext->flags != 0LLU) {
- throw CompileError("Extended parameters are not supported for pure "
- "literal matching API.");
- }
-
- // Ensure that our pattern isn't too long (in characters).
- if (expLength > cc.grey.limitPatternLength) {
- throw CompileError("Pattern length exceeds limit.");
- }
-
- // filter out flags not supported by pure literal API.
- u64a not_supported = HS_FLAG_DOTALL | HS_FLAG_ALLOWEMPTY | HS_FLAG_UTF8 |
- HS_FLAG_UCP | HS_FLAG_PREFILTER | HS_FLAG_COMBINATION |
- HS_FLAG_QUIET | HS_FLAG_MULTILINE;
-
- if (flags & not_supported) {
- throw CompileError("Only HS_FLAG_CASELESS, HS_FLAG_SINGLEMATCH and "
- "HS_FLAG_SOM_LEFTMOST are supported in literal API.");
- }
-
- // This expression must be a pure literal, we can build ue2_literal
- // directly based on expression text.
- ParsedLitExpression ple(index, expression, expLength, flags, id);
-
- // Feed the ue2_literal into Rose.
- const auto &expr = ple.expr;
- if (ng.addLiteral(ple.lit, expr.index, expr.report, expr.highlander,
- expr.som, expr.quiet)) {
- DEBUG_PRINTF("took pure literal\n");
- return;
- }
-}
-
+void addLitExpression(NG &ng, unsigned index, const char *expression,
+ unsigned flags, const hs_expr_ext *ext, ReportID id,
+ size_t expLength) {
+ assert(expression);
+ const CompileContext &cc = ng.cc;
+ DEBUG_PRINTF("index=%u, id=%u, flags=%u, expr='%s', len='%zu'\n", index,
+ id, flags, expression, expLength);
+
+ // Extended parameters are not supported for pure literal patterns.
+ if (ext && ext->flags != 0LLU) {
+ throw CompileError("Extended parameters are not supported for pure "
+ "literal matching API.");
+ }
+
+ // Ensure that our pattern isn't too long (in characters).
+ if (expLength > cc.grey.limitPatternLength) {
+ throw CompileError("Pattern length exceeds limit.");
+ }
+
+ // filter out flags not supported by pure literal API.
+ u64a not_supported = HS_FLAG_DOTALL | HS_FLAG_ALLOWEMPTY | HS_FLAG_UTF8 |
+ HS_FLAG_UCP | HS_FLAG_PREFILTER | HS_FLAG_COMBINATION |
+ HS_FLAG_QUIET | HS_FLAG_MULTILINE;
+
+ if (flags & not_supported) {
+ throw CompileError("Only HS_FLAG_CASELESS, HS_FLAG_SINGLEMATCH and "
+ "HS_FLAG_SOM_LEFTMOST are supported in literal API.");
+ }
+
+ // This expression must be a pure literal, we can build ue2_literal
+ // directly based on expression text.
+ ParsedLitExpression ple(index, expression, expLength, flags, id);
+
+ // Feed the ue2_literal into Rose.
+ const auto &expr = ple.expr;
+ if (ng.addLiteral(ple.lit, expr.index, expr.report, expr.highlander,
+ expr.som, expr.quiet)) {
+ DEBUG_PRINTF("took pure literal\n");
+ return;
+ }
+}
+
static
bytecode_ptr<RoseEngine> generateRoseEngine(NG &ng) {
const u32 minWidth =
@@ -458,9 +458,9 @@ platform_t target_to_platform(const target_t &target_info) {
if (!target_info.has_avx512()) {
p |= HS_PLATFORM_NOAVX512;
}
- if (!target_info.has_avx512vbmi()) {
- p |= HS_PLATFORM_NOAVX512VBMI;
- }
+ if (!target_info.has_avx512vbmi()) {
+ p |= HS_PLATFORM_NOAVX512VBMI;
+ }
return p;
}
@@ -503,13 +503,13 @@ hs_database_t *dbCreate(const char *in_bytecode, size_t len, u64a platform) {
}
-struct hs_database *build(NG &ng, unsigned int *length, u8 pureFlag) {
+struct hs_database *build(NG &ng, unsigned int *length, u8 pureFlag) {
assert(length);
auto rose = generateRoseEngine(ng);
- struct RoseEngine *roseHead = rose.get();
- roseHead->pureLiteral = pureFlag;
-
+ struct RoseEngine *roseHead = rose.get();
+ roseHead->pureLiteral = pureFlag;
+
if (!rose) {
throw CompileError("Unable to generate bytecode.");
}
diff --git a/contrib/libs/hyperscan/src/compiler/compiler.h b/contrib/libs/hyperscan/src/compiler/compiler.h
index f6d5200f3b..b42cb1425b 100644
--- a/contrib/libs/hyperscan/src/compiler/compiler.h
+++ b/contrib/libs/hyperscan/src/compiler/compiler.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2019, Intel Corporation
+ * Copyright (c) 2015-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -38,7 +38,7 @@
#include "compiler/expression_info.h"
#include "parser/Component.h"
#include "util/noncopyable.h"
-#include "util/ue2string.h"
+#include "util/ue2string.h"
#include <memory>
@@ -67,22 +67,22 @@ public:
std::unique_ptr<Component> component;
};
-
-/** \brief Class gathering together the pieces of a parsed lit-expression. */
-class ParsedLitExpression : noncopyable {
-public:
- ParsedLitExpression(unsigned index, const char *expression,
- size_t expLength, unsigned flags, ReportID report);
-
- void parseLiteral(const char *expression, size_t len, bool nocase);
-
- /** \brief Expression information (from flags, extparam etc) */
- ExpressionInfo expr;
-
- /** \brief Format the lit-expression text into Hyperscan literal type. */
- ue2_literal lit;
-};
-
+
+/** \brief Class gathering together the pieces of a parsed lit-expression. */
+class ParsedLitExpression : noncopyable {
+public:
+ ParsedLitExpression(unsigned index, const char *expression,
+ size_t expLength, unsigned flags, ReportID report);
+
+ void parseLiteral(const char *expression, size_t len, bool nocase);
+
+ /** \brief Expression information (from flags, extparam etc) */
+ ExpressionInfo expr;
+
+ /** \brief Format the lit-expression text into Hyperscan literal type. */
+ ue2_literal lit;
+};
+
/**
* \brief Class gathering together the pieces of an expression that has been
* built into an NFA graph.
@@ -116,10 +116,10 @@ struct BuiltExpression {
void addExpression(NG &ng, unsigned index, const char *expression,
unsigned flags, const hs_expr_ext *ext, ReportID report);
-void addLitExpression(NG &ng, unsigned index, const char *expression,
- unsigned flags, const hs_expr_ext *ext, ReportID id,
- size_t expLength);
-
+void addLitExpression(NG &ng, unsigned index, const char *expression,
+ unsigned flags, const hs_expr_ext *ext, ReportID id,
+ size_t expLength);
+
/**
* Build a Hyperscan database out of the expressions we've been given. A
* fatal error will result in an exception being thrown.
@@ -128,13 +128,13 @@ void addLitExpression(NG &ng, unsigned index, const char *expression,
* The global NG object.
* @param[out] length
* The number of bytes occupied by the compiled structure.
- * @param pureFlag
- * The flag indicating invocation from literal API or not.
+ * @param pureFlag
+ * The flag indicating invocation from literal API or not.
* @return
* The compiled structure. Should be deallocated with the
* hs_database_free() function.
*/
-struct hs_database *build(NG &ng, unsigned int *length, u8 pureFlag);
+struct hs_database *build(NG &ng, unsigned int *length, u8 pureFlag);
/**
* Constructs an NFA graph from the given expression tree.
diff --git a/contrib/libs/hyperscan/src/compiler/expression_info.h b/contrib/libs/hyperscan/src/compiler/expression_info.h
index bc34034712..fefb3b58af 100644
--- a/contrib/libs/hyperscan/src/compiler/expression_info.h
+++ b/contrib/libs/hyperscan/src/compiler/expression_info.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, Intel Corporation
+ * Copyright (c) 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -46,12 +46,12 @@ public:
bool highlander_in, bool utf8_in, bool prefilter_in,
som_type som_in, ReportID report_in, u64a min_offset_in,
u64a max_offset_in, u64a min_length_in, u32 edit_distance_in,
- u32 hamm_distance_in, bool quiet_in)
+ u32 hamm_distance_in, bool quiet_in)
: index(index_in), report(report_in), allow_vacuous(allow_vacuous_in),
highlander(highlander_in), utf8(utf8_in), prefilter(prefilter_in),
som(som_in), min_offset(min_offset_in), max_offset(max_offset_in),
min_length(min_length_in), edit_distance(edit_distance_in),
- hamm_distance(hamm_distance_in), quiet(quiet_in) {}
+ hamm_distance(hamm_distance_in), quiet(quiet_in) {}
/**
* \brief Index of the expression represented by this graph.
@@ -98,9 +98,9 @@ public:
*/
u32 edit_distance;
u32 hamm_distance;
-
- /** \brief Quiet on match. */
- bool quiet;
+
+ /** \brief Quiet on match. */
+ bool quiet;
};
}
diff --git a/contrib/libs/hyperscan/src/database.c b/contrib/libs/hyperscan/src/database.c
index 3842091aa8..6adf1419dd 100644
--- a/contrib/libs/hyperscan/src/database.c
+++ b/contrib/libs/hyperscan/src/database.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -114,9 +114,9 @@ hs_error_t HS_CDECL hs_serialize_database(const hs_database_t *db, char **bytes,
static
hs_error_t db_check_platform(const u64a p) {
if (p != hs_current_platform
- && p != (hs_current_platform | hs_current_platform_no_avx2)
- && p != (hs_current_platform | hs_current_platform_no_avx512)
- && p != (hs_current_platform | hs_current_platform_no_avx512vbmi)) {
+ && p != (hs_current_platform | hs_current_platform_no_avx2)
+ && p != (hs_current_platform | hs_current_platform_no_avx512)
+ && p != (hs_current_platform | hs_current_platform_no_avx512vbmi)) {
return HS_DB_PLATFORM_ERROR;
}
// passed all checks
@@ -371,11 +371,11 @@ hs_error_t print_database_string(char **s, u32 version, const platform_t plat,
u8 minor = (version >> 16) & 0xff;
u8 major = (version >> 24) & 0xff;
- const char *features = (plat & HS_PLATFORM_NOAVX512VBMI)
- ? (plat & HS_PLATFORM_NOAVX512)
- ? (plat & HS_PLATFORM_NOAVX2) ? "" : "AVX2"
- : "AVX512"
- : "AVX512VBMI";
+ const char *features = (plat & HS_PLATFORM_NOAVX512VBMI)
+ ? (plat & HS_PLATFORM_NOAVX512)
+ ? (plat & HS_PLATFORM_NOAVX2) ? "" : "AVX2"
+ : "AVX512"
+ : "AVX512VBMI";
const char *mode = NULL;
diff --git a/contrib/libs/hyperscan/src/database.h b/contrib/libs/hyperscan/src/database.h
index 376bcb5ee5..f122f97be7 100644
--- a/contrib/libs/hyperscan/src/database.h
+++ b/contrib/libs/hyperscan/src/database.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -55,7 +55,7 @@ extern "C"
#define HS_PLATFORM_NOAVX2 (4<<13)
#define HS_PLATFORM_NOAVX512 (8<<13)
-#define HS_PLATFORM_NOAVX512VBMI (0x10<<13)
+#define HS_PLATFORM_NOAVX512VBMI (0x10<<13)
/** \brief Platform features bitmask. */
typedef u64a platform_t;
@@ -68,9 +68,9 @@ const platform_t hs_current_platform = {
#if !defined(HAVE_AVX512)
HS_PLATFORM_NOAVX512 |
#endif
-#if !defined(HAVE_AVX512VBMI)
- HS_PLATFORM_NOAVX512VBMI |
-#endif
+#if !defined(HAVE_AVX512VBMI)
+ HS_PLATFORM_NOAVX512VBMI |
+#endif
0,
};
@@ -78,23 +78,23 @@ static UNUSED
const platform_t hs_current_platform_no_avx2 = {
HS_PLATFORM_NOAVX2 |
HS_PLATFORM_NOAVX512 |
- HS_PLATFORM_NOAVX512VBMI |
+ HS_PLATFORM_NOAVX512VBMI |
0,
};
static UNUSED
const platform_t hs_current_platform_no_avx512 = {
HS_PLATFORM_NOAVX512 |
- HS_PLATFORM_NOAVX512VBMI |
+ HS_PLATFORM_NOAVX512VBMI |
+ 0,
+};
+
+static UNUSED
+const platform_t hs_current_platform_no_avx512vbmi = {
+ HS_PLATFORM_NOAVX512VBMI |
0,
};
-static UNUSED
-const platform_t hs_current_platform_no_avx512vbmi = {
- HS_PLATFORM_NOAVX512VBMI |
- 0,
-};
-
/*
* a header to enclose the actual bytecode - useful for keeping info about the
* compiled data.
diff --git a/contrib/libs/hyperscan/src/fdr/fdr_compile.cpp b/contrib/libs/hyperscan/src/fdr/fdr_compile.cpp
index be633c5ecd..fcfc08638e 100644
--- a/contrib/libs/hyperscan/src/fdr/fdr_compile.cpp
+++ b/contrib/libs/hyperscan/src/fdr/fdr_compile.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2019, Intel Corporation
+ * Copyright (c) 2015-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -282,8 +282,8 @@ const array<double, 100> Scorer::count_lut{{
}};
const array<double, 9> Scorer::len_lut{{
- 0, pow(1, -3.0), pow(2, -3.0), pow(3, -3.0), pow(4, -3.0),
- pow(5, -3.0), pow(6, -3.0), pow(7, -3.0), pow(8, -3.0)}};
+ 0, pow(1, -3.0), pow(2, -3.0), pow(3, -3.0), pow(4, -3.0),
+ pow(5, -3.0), pow(6, -3.0), pow(7, -3.0), pow(8, -3.0)}};
/**
* Returns true if the two given literals should be placed in the same chunk as
diff --git a/contrib/libs/hyperscan/src/fdr/fdr_confirm.h b/contrib/libs/hyperscan/src/fdr/fdr_confirm.h
index 375213805b..a23082cc6d 100644
--- a/contrib/libs/hyperscan/src/fdr/fdr_confirm.h
+++ b/contrib/libs/hyperscan/src/fdr/fdr_confirm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2019, Intel Corporation
+ * Copyright (c) 2015-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/contrib/libs/hyperscan/src/fdr/fdr_confirm_compile.cpp b/contrib/libs/hyperscan/src/fdr/fdr_confirm_compile.cpp
index d02a2463a8..8e3690895e 100644
--- a/contrib/libs/hyperscan/src/fdr/fdr_confirm_compile.cpp
+++ b/contrib/libs/hyperscan/src/fdr/fdr_confirm_compile.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2019, Intel Corporation
+ * Copyright (c) 2015-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/contrib/libs/hyperscan/src/fdr/fdr_confirm_runtime.h b/contrib/libs/hyperscan/src/fdr/fdr_confirm_runtime.h
index e9f804879d..5a2164952c 100644
--- a/contrib/libs/hyperscan/src/fdr/fdr_confirm_runtime.h
+++ b/contrib/libs/hyperscan/src/fdr/fdr_confirm_runtime.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2019, Intel Corporation
+ * Copyright (c) 2015-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/contrib/libs/hyperscan/src/fdr/teddy.c b/contrib/libs/hyperscan/src/fdr/teddy.c
index 28fb5c9668..e6f5476198 100644
--- a/contrib/libs/hyperscan/src/fdr/teddy.c
+++ b/contrib/libs/hyperscan/src/fdr/teddy.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -74,30 +74,30 @@ const u8 ALIGN_DIRECTIVE p_mask_arr[17][32] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
};
-#if defined(HAVE_AVX512VBMI) // VBMI strong teddy
-
-#define CONF_CHUNK_64(chunk, bucket, off, reason, pt, conf_fn) \
-do { \
- if (unlikely(chunk != ones_u64a)) { \
- chunk = ~chunk; \
- conf_fn(&chunk, bucket, off, confBase, reason, a, pt, \
- &control, &last_match); \
- CHECK_HWLM_TERMINATE_MATCHING; \
- } \
-} while(0)
-
-#define CONF_CHUNK_32(chunk, bucket, off, reason, pt, conf_fn) \
-do { \
- if (unlikely(chunk != ones_u32)) { \
- chunk = ~chunk; \
- conf_fn(&chunk, bucket, off, confBase, reason, a, pt, \
- &control, &last_match); \
- CHECK_HWLM_TERMINATE_MATCHING; \
- } \
-} while(0)
-
-#else
-
+#if defined(HAVE_AVX512VBMI) // VBMI strong teddy
+
+#define CONF_CHUNK_64(chunk, bucket, off, reason, pt, conf_fn) \
+do { \
+ if (unlikely(chunk != ones_u64a)) { \
+ chunk = ~chunk; \
+ conf_fn(&chunk, bucket, off, confBase, reason, a, pt, \
+ &control, &last_match); \
+ CHECK_HWLM_TERMINATE_MATCHING; \
+ } \
+} while(0)
+
+#define CONF_CHUNK_32(chunk, bucket, off, reason, pt, conf_fn) \
+do { \
+ if (unlikely(chunk != ones_u32)) { \
+ chunk = ~chunk; \
+ conf_fn(&chunk, bucket, off, confBase, reason, a, pt, \
+ &control, &last_match); \
+ CHECK_HWLM_TERMINATE_MATCHING; \
+ } \
+} while(0)
+
+#else
+
#define CONF_CHUNK_64(chunk, bucket, off, reason, conf_fn) \
do { \
if (unlikely(chunk != ones_u64a)) { \
@@ -118,278 +118,278 @@ do { \
} \
} while(0)
-#endif
+#endif
+
+#if defined(HAVE_AVX512VBMI) // VBMI strong teddy
+
+#ifdef ARCH_64_BIT
+#define CONFIRM_TEDDY(var, bucket, offset, reason, pt, conf_fn) \
+do { \
+ if (unlikely(diff512(var, ones512()))) { \
+ m128 p128_0 = extract128from512(var, 0); \
+ m128 p128_1 = extract128from512(var, 1); \
+ m128 p128_2 = extract128from512(var, 2); \
+ m128 p128_3 = extract128from512(var, 3); \
+ u64a part1 = movq(p128_0); \
+ u64a part2 = movq(rshiftbyte_m128(p128_0, 8)); \
+ u64a part3 = movq(p128_1); \
+ u64a part4 = movq(rshiftbyte_m128(p128_1, 8)); \
+ u64a part5 = movq(p128_2); \
+ u64a part6 = movq(rshiftbyte_m128(p128_2, 8)); \
+ u64a part7 = movq(p128_3); \
+ u64a part8 = movq(rshiftbyte_m128(p128_3, 8)); \
+ CONF_CHUNK_64(part1, bucket, offset, reason, pt, conf_fn); \
+ CONF_CHUNK_64(part2, bucket, offset + 8, reason, pt, conf_fn); \
+ CONF_CHUNK_64(part3, bucket, offset + 16, reason, pt, conf_fn); \
+ CONF_CHUNK_64(part4, bucket, offset + 24, reason, pt, conf_fn); \
+ CONF_CHUNK_64(part5, bucket, offset + 32, reason, pt, conf_fn); \
+ CONF_CHUNK_64(part6, bucket, offset + 40, reason, pt, conf_fn); \
+ CONF_CHUNK_64(part7, bucket, offset + 48, reason, pt, conf_fn); \
+ CONF_CHUNK_64(part8, bucket, offset + 56, reason, pt, conf_fn); \
+ } \
+} while(0)
+#else
+#define CONFIRM_TEDDY(var, bucket, offset, reason, pt, conf_fn) \
+do { \
+ if (unlikely(diff512(var, ones512()))) { \
+ m128 p128_0 = extract128from512(var, 0); \
+ m128 p128_1 = extract128from512(var, 1); \
+ m128 p128_2 = extract128from512(var, 2); \
+ m128 p128_3 = extract128from512(var, 3); \
+ u32 part1 = movd(p128_0); \
+ u32 part2 = movd(rshiftbyte_m128(p128_0, 4)); \
+ u32 part3 = movd(rshiftbyte_m128(p128_0, 8)); \
+ u32 part4 = movd(rshiftbyte_m128(p128_0, 12)); \
+ u32 part5 = movd(p128_1); \
+ u32 part6 = movd(rshiftbyte_m128(p128_1, 4)); \
+ u32 part7 = movd(rshiftbyte_m128(p128_1, 8)); \
+ u32 part8 = movd(rshiftbyte_m128(p128_1, 12)); \
+ u32 part9 = movd(p128_2); \
+ u32 part10 = movd(rshiftbyte_m128(p128_2, 4)); \
+ u32 part11 = movd(rshiftbyte_m128(p128_2, 8)); \
+ u32 part12 = movd(rshiftbyte_m128(p128_2, 12)); \
+ u32 part13 = movd(p128_3); \
+ u32 part14 = movd(rshiftbyte_m128(p128_3, 4)); \
+ u32 part15 = movd(rshiftbyte_m128(p128_3, 8)); \
+ u32 part16 = movd(rshiftbyte_m128(p128_3, 12)); \
+ CONF_CHUNK_32(part1, bucket, offset, reason, pt, conf_fn); \
+ CONF_CHUNK_32(part2, bucket, offset + 4, reason, pt, conf_fn); \
+ CONF_CHUNK_32(part3, bucket, offset + 8, reason, pt, conf_fn); \
+ CONF_CHUNK_32(part4, bucket, offset + 12, reason, pt, conf_fn); \
+ CONF_CHUNK_32(part5, bucket, offset + 16, reason, pt, conf_fn); \
+ CONF_CHUNK_32(part6, bucket, offset + 20, reason, pt, conf_fn); \
+ CONF_CHUNK_32(part7, bucket, offset + 24, reason, pt, conf_fn); \
+ CONF_CHUNK_32(part8, bucket, offset + 28, reason, pt, conf_fn); \
+ CONF_CHUNK_32(part9, bucket, offset + 32, reason, pt, conf_fn); \
+ CONF_CHUNK_32(part10, bucket, offset + 36, reason, pt, conf_fn); \
+ CONF_CHUNK_32(part11, bucket, offset + 40, reason, pt, conf_fn); \
+ CONF_CHUNK_32(part12, bucket, offset + 44, reason, pt, conf_fn); \
+ CONF_CHUNK_32(part13, bucket, offset + 48, reason, pt, conf_fn); \
+ CONF_CHUNK_32(part14, bucket, offset + 52, reason, pt, conf_fn); \
+ CONF_CHUNK_32(part15, bucket, offset + 56, reason, pt, conf_fn); \
+ CONF_CHUNK_32(part16, bucket, offset + 60, reason, pt, conf_fn); \
+ } \
+} while(0)
+#endif
+
+#define PREP_SHUF_MASK \
+ m512 lo = and512(val, *lo_mask); \
+ m512 hi = and512(rshift64_m512(val, 4), *lo_mask)
+
+#define TEDDY_VBMI_PSHUFB_OR_M1 \
+ m512 shuf_or_b0 = or512(pshufb_m512(dup_mask[0], lo), \
+ pshufb_m512(dup_mask[1], hi));
+
+#define TEDDY_VBMI_PSHUFB_OR_M2 \
+ TEDDY_VBMI_PSHUFB_OR_M1 \
+ m512 shuf_or_b1 = or512(pshufb_m512(dup_mask[2], lo), \
+ pshufb_m512(dup_mask[3], hi));
+
+#define TEDDY_VBMI_PSHUFB_OR_M3 \
+ TEDDY_VBMI_PSHUFB_OR_M2 \
+ m512 shuf_or_b2 = or512(pshufb_m512(dup_mask[4], lo), \
+ pshufb_m512(dup_mask[5], hi));
+
+#define TEDDY_VBMI_PSHUFB_OR_M4 \
+ TEDDY_VBMI_PSHUFB_OR_M3 \
+ m512 shuf_or_b3 = or512(pshufb_m512(dup_mask[6], lo), \
+ pshufb_m512(dup_mask[7], hi));
+
+#define TEDDY_VBMI_SL1_MASK 0xfffffffffffffffeULL
+#define TEDDY_VBMI_SL2_MASK 0xfffffffffffffffcULL
+#define TEDDY_VBMI_SL3_MASK 0xfffffffffffffff8ULL
+
+#define TEDDY_VBMI_SHIFT_M1
+
+#define TEDDY_VBMI_SHIFT_M2 \
+ TEDDY_VBMI_SHIFT_M1 \
+ m512 sl1 = maskz_vpermb512(TEDDY_VBMI_SL1_MASK, sl_msk[0], shuf_or_b1);
+
+#define TEDDY_VBMI_SHIFT_M3 \
+ TEDDY_VBMI_SHIFT_M2 \
+ m512 sl2 = maskz_vpermb512(TEDDY_VBMI_SL2_MASK, sl_msk[1], shuf_or_b2);
+
+#define TEDDY_VBMI_SHIFT_M4 \
+ TEDDY_VBMI_SHIFT_M3 \
+ m512 sl3 = maskz_vpermb512(TEDDY_VBMI_SL3_MASK, sl_msk[2], shuf_or_b3);
+
+#define SHIFT_OR_M1 \
+ shuf_or_b0
+
+#define SHIFT_OR_M2 \
+ or512(sl1, SHIFT_OR_M1)
+
+#define SHIFT_OR_M3 \
+ or512(sl2, SHIFT_OR_M2)
+
+#define SHIFT_OR_M4 \
+ or512(sl3, SHIFT_OR_M3)
+
+static really_inline
+m512 prep_conf_teddy_m1(const m512 *lo_mask, const m512 *dup_mask,
+ UNUSED const m512 *sl_msk, const m512 val) {
+ PREP_SHUF_MASK;
+ TEDDY_VBMI_PSHUFB_OR_M1;
+ TEDDY_VBMI_SHIFT_M1;
+ return SHIFT_OR_M1;
+}
+
+static really_inline
+m512 prep_conf_teddy_m2(const m512 *lo_mask, const m512 *dup_mask,
+ const m512 *sl_msk, const m512 val) {
+ PREP_SHUF_MASK;
+ TEDDY_VBMI_PSHUFB_OR_M2;
+ TEDDY_VBMI_SHIFT_M2;
+ return SHIFT_OR_M2;
+}
+
+static really_inline
+m512 prep_conf_teddy_m3(const m512 *lo_mask, const m512 *dup_mask,
+ const m512 *sl_msk, const m512 val) {
+ PREP_SHUF_MASK;
+ TEDDY_VBMI_PSHUFB_OR_M3;
+ TEDDY_VBMI_SHIFT_M3;
+ return SHIFT_OR_M3;
+}
+
+static really_inline
+m512 prep_conf_teddy_m4(const m512 *lo_mask, const m512 *dup_mask,
+ const m512 *sl_msk, const m512 val) {
+ PREP_SHUF_MASK;
+ TEDDY_VBMI_PSHUFB_OR_M4;
+ TEDDY_VBMI_SHIFT_M4;
+ return SHIFT_OR_M4;
+}
+
+#define PREP_CONF_FN(val, n) \
+ prep_conf_teddy_m##n(&lo_mask, dup_mask, sl_msk, val)
+
+#define TEDDY_VBMI_SL1_POS 15
+#define TEDDY_VBMI_SL2_POS 14
+#define TEDDY_VBMI_SL3_POS 13
+
+#define TEDDY_VBMI_LOAD_SHIFT_MASK_M1
+
+#define TEDDY_VBMI_LOAD_SHIFT_MASK_M2 \
+ TEDDY_VBMI_LOAD_SHIFT_MASK_M1 \
+ sl_msk[0] = loadu512(p_sh_mask_arr + TEDDY_VBMI_SL1_POS);
+
+#define TEDDY_VBMI_LOAD_SHIFT_MASK_M3 \
+ TEDDY_VBMI_LOAD_SHIFT_MASK_M2 \
+ sl_msk[1] = loadu512(p_sh_mask_arr + TEDDY_VBMI_SL2_POS);
+
+#define TEDDY_VBMI_LOAD_SHIFT_MASK_M4 \
+ TEDDY_VBMI_LOAD_SHIFT_MASK_M3 \
+ sl_msk[2] = loadu512(p_sh_mask_arr + TEDDY_VBMI_SL3_POS);
+
+#define PREPARE_MASKS_1 \
+ dup_mask[0] = set4x128(maskBase[0]); \
+ dup_mask[1] = set4x128(maskBase[1]);
+
+#define PREPARE_MASKS_2 \
+ PREPARE_MASKS_1 \
+ dup_mask[2] = set4x128(maskBase[2]); \
+ dup_mask[3] = set4x128(maskBase[3]);
+
+#define PREPARE_MASKS_3 \
+ PREPARE_MASKS_2 \
+ dup_mask[4] = set4x128(maskBase[4]); \
+ dup_mask[5] = set4x128(maskBase[5]);
+
+#define PREPARE_MASKS_4 \
+ PREPARE_MASKS_3 \
+ dup_mask[6] = set4x128(maskBase[6]); \
+ dup_mask[7] = set4x128(maskBase[7]);
+
+#define PREPARE_MASKS(n) \
+ m512 lo_mask = set64x8(0xf); \
+ m512 dup_mask[n * 2]; \
+ m512 sl_msk[n - 1]; \
+ PREPARE_MASKS_##n \
+ TEDDY_VBMI_LOAD_SHIFT_MASK_M##n
+
+#define TEDDY_VBMI_CONF_MASK_HEAD (0xffffffffffffffffULL >> n_sh)
+#define TEDDY_VBMI_CONF_MASK_FULL (0xffffffffffffffffULL << n_sh)
+#define TEDDY_VBMI_CONF_MASK_VAR(n) (0xffffffffffffffffULL >> (64 - n) << overlap)
+#define TEDDY_VBMI_LOAD_MASK_PATCH (0xffffffffffffffffULL >> (64 - n_sh))
+
+#define FDR_EXEC_TEDDY(fdr, a, control, n_msk, conf_fn) \
+do { \
+ const u8 *buf_end = a->buf + a->len; \
+ const u8 *ptr = a->buf + a->start_offset; \
+ u32 floodBackoff = FLOOD_BACKOFF_START; \
+ const u8 *tryFloodDetect = a->firstFloodDetect; \
+ u32 last_match = ones_u32; \
+ const struct Teddy *teddy = (const struct Teddy *)fdr; \
+ const size_t iterBytes = 64; \
+ u32 n_sh = n_msk - 1; \
+ const size_t loopBytes = 64 - n_sh; \
+ DEBUG_PRINTF("params: buf %p len %zu start_offset %zu\n", \
+ a->buf, a->len, a->start_offset); \
+ \
+ const m128 *maskBase = getMaskBase(teddy); \
+ PREPARE_MASKS(n_msk); \
+ const u32 *confBase = getConfBase(teddy); \
+ \
+ u64a k = TEDDY_VBMI_CONF_MASK_FULL; \
+ m512 p_mask = set_mask_m512(~k); \
+ u32 overlap = 0; \
+ u64a patch = 0; \
+ if (likely(ptr + loopBytes <= buf_end)) { \
+ m512 p_mask0 = set_mask_m512(~TEDDY_VBMI_CONF_MASK_HEAD); \
+ m512 r_0 = PREP_CONF_FN(loadu512(ptr), n_msk); \
+ r_0 = or512(r_0, p_mask0); \
+ CONFIRM_TEDDY(r_0, 8, 0, VECTORING, ptr, conf_fn); \
+ ptr += loopBytes; \
+ overlap = n_sh; \
+ patch = TEDDY_VBMI_LOAD_MASK_PATCH; \
+ } \
+ \
+ for (; ptr + loopBytes <= buf_end; ptr += loopBytes) { \
+ __builtin_prefetch(ptr - n_sh + (64 * 2)); \
+ CHECK_FLOOD; \
+ m512 r_0 = PREP_CONF_FN(loadu512(ptr - n_sh), n_msk); \
+ r_0 = or512(r_0, p_mask); \
+ CONFIRM_TEDDY(r_0, 8, 0, NOT_CAUTIOUS, ptr - n_sh, conf_fn); \
+ } \
+ \
+ assert(ptr + loopBytes > buf_end); \
+ if (ptr < buf_end) { \
+ u32 left = (u32)(buf_end - ptr); \
+ u64a k1 = TEDDY_VBMI_CONF_MASK_VAR(left); \
+ m512 p_mask1 = set_mask_m512(~k1); \
+ m512 val_0 = loadu_maskz_m512(k1 | patch, ptr - overlap); \
+ m512 r_0 = PREP_CONF_FN(val_0, n_msk); \
+ r_0 = or512(r_0, p_mask1); \
+ CONFIRM_TEDDY(r_0, 8, 0, VECTORING, ptr - overlap, conf_fn); \
+ } \
+ \
+ return HWLM_SUCCESS; \
+} while(0)
+
+#elif defined(HAVE_AVX512) // AVX512 reinforced teddy
-#if defined(HAVE_AVX512VBMI) // VBMI strong teddy
-
#ifdef ARCH_64_BIT
-#define CONFIRM_TEDDY(var, bucket, offset, reason, pt, conf_fn) \
-do { \
- if (unlikely(diff512(var, ones512()))) { \
- m128 p128_0 = extract128from512(var, 0); \
- m128 p128_1 = extract128from512(var, 1); \
- m128 p128_2 = extract128from512(var, 2); \
- m128 p128_3 = extract128from512(var, 3); \
- u64a part1 = movq(p128_0); \
- u64a part2 = movq(rshiftbyte_m128(p128_0, 8)); \
- u64a part3 = movq(p128_1); \
- u64a part4 = movq(rshiftbyte_m128(p128_1, 8)); \
- u64a part5 = movq(p128_2); \
- u64a part6 = movq(rshiftbyte_m128(p128_2, 8)); \
- u64a part7 = movq(p128_3); \
- u64a part8 = movq(rshiftbyte_m128(p128_3, 8)); \
- CONF_CHUNK_64(part1, bucket, offset, reason, pt, conf_fn); \
- CONF_CHUNK_64(part2, bucket, offset + 8, reason, pt, conf_fn); \
- CONF_CHUNK_64(part3, bucket, offset + 16, reason, pt, conf_fn); \
- CONF_CHUNK_64(part4, bucket, offset + 24, reason, pt, conf_fn); \
- CONF_CHUNK_64(part5, bucket, offset + 32, reason, pt, conf_fn); \
- CONF_CHUNK_64(part6, bucket, offset + 40, reason, pt, conf_fn); \
- CONF_CHUNK_64(part7, bucket, offset + 48, reason, pt, conf_fn); \
- CONF_CHUNK_64(part8, bucket, offset + 56, reason, pt, conf_fn); \
- } \
-} while(0)
-#else
-#define CONFIRM_TEDDY(var, bucket, offset, reason, pt, conf_fn) \
-do { \
- if (unlikely(diff512(var, ones512()))) { \
- m128 p128_0 = extract128from512(var, 0); \
- m128 p128_1 = extract128from512(var, 1); \
- m128 p128_2 = extract128from512(var, 2); \
- m128 p128_3 = extract128from512(var, 3); \
- u32 part1 = movd(p128_0); \
- u32 part2 = movd(rshiftbyte_m128(p128_0, 4)); \
- u32 part3 = movd(rshiftbyte_m128(p128_0, 8)); \
- u32 part4 = movd(rshiftbyte_m128(p128_0, 12)); \
- u32 part5 = movd(p128_1); \
- u32 part6 = movd(rshiftbyte_m128(p128_1, 4)); \
- u32 part7 = movd(rshiftbyte_m128(p128_1, 8)); \
- u32 part8 = movd(rshiftbyte_m128(p128_1, 12)); \
- u32 part9 = movd(p128_2); \
- u32 part10 = movd(rshiftbyte_m128(p128_2, 4)); \
- u32 part11 = movd(rshiftbyte_m128(p128_2, 8)); \
- u32 part12 = movd(rshiftbyte_m128(p128_2, 12)); \
- u32 part13 = movd(p128_3); \
- u32 part14 = movd(rshiftbyte_m128(p128_3, 4)); \
- u32 part15 = movd(rshiftbyte_m128(p128_3, 8)); \
- u32 part16 = movd(rshiftbyte_m128(p128_3, 12)); \
- CONF_CHUNK_32(part1, bucket, offset, reason, pt, conf_fn); \
- CONF_CHUNK_32(part2, bucket, offset + 4, reason, pt, conf_fn); \
- CONF_CHUNK_32(part3, bucket, offset + 8, reason, pt, conf_fn); \
- CONF_CHUNK_32(part4, bucket, offset + 12, reason, pt, conf_fn); \
- CONF_CHUNK_32(part5, bucket, offset + 16, reason, pt, conf_fn); \
- CONF_CHUNK_32(part6, bucket, offset + 20, reason, pt, conf_fn); \
- CONF_CHUNK_32(part7, bucket, offset + 24, reason, pt, conf_fn); \
- CONF_CHUNK_32(part8, bucket, offset + 28, reason, pt, conf_fn); \
- CONF_CHUNK_32(part9, bucket, offset + 32, reason, pt, conf_fn); \
- CONF_CHUNK_32(part10, bucket, offset + 36, reason, pt, conf_fn); \
- CONF_CHUNK_32(part11, bucket, offset + 40, reason, pt, conf_fn); \
- CONF_CHUNK_32(part12, bucket, offset + 44, reason, pt, conf_fn); \
- CONF_CHUNK_32(part13, bucket, offset + 48, reason, pt, conf_fn); \
- CONF_CHUNK_32(part14, bucket, offset + 52, reason, pt, conf_fn); \
- CONF_CHUNK_32(part15, bucket, offset + 56, reason, pt, conf_fn); \
- CONF_CHUNK_32(part16, bucket, offset + 60, reason, pt, conf_fn); \
- } \
-} while(0)
-#endif
-
-#define PREP_SHUF_MASK \
- m512 lo = and512(val, *lo_mask); \
- m512 hi = and512(rshift64_m512(val, 4), *lo_mask)
-
-#define TEDDY_VBMI_PSHUFB_OR_M1 \
- m512 shuf_or_b0 = or512(pshufb_m512(dup_mask[0], lo), \
- pshufb_m512(dup_mask[1], hi));
-
-#define TEDDY_VBMI_PSHUFB_OR_M2 \
- TEDDY_VBMI_PSHUFB_OR_M1 \
- m512 shuf_or_b1 = or512(pshufb_m512(dup_mask[2], lo), \
- pshufb_m512(dup_mask[3], hi));
-
-#define TEDDY_VBMI_PSHUFB_OR_M3 \
- TEDDY_VBMI_PSHUFB_OR_M2 \
- m512 shuf_or_b2 = or512(pshufb_m512(dup_mask[4], lo), \
- pshufb_m512(dup_mask[5], hi));
-
-#define TEDDY_VBMI_PSHUFB_OR_M4 \
- TEDDY_VBMI_PSHUFB_OR_M3 \
- m512 shuf_or_b3 = or512(pshufb_m512(dup_mask[6], lo), \
- pshufb_m512(dup_mask[7], hi));
-
-#define TEDDY_VBMI_SL1_MASK 0xfffffffffffffffeULL
-#define TEDDY_VBMI_SL2_MASK 0xfffffffffffffffcULL
-#define TEDDY_VBMI_SL3_MASK 0xfffffffffffffff8ULL
-
-#define TEDDY_VBMI_SHIFT_M1
-
-#define TEDDY_VBMI_SHIFT_M2 \
- TEDDY_VBMI_SHIFT_M1 \
- m512 sl1 = maskz_vpermb512(TEDDY_VBMI_SL1_MASK, sl_msk[0], shuf_or_b1);
-
-#define TEDDY_VBMI_SHIFT_M3 \
- TEDDY_VBMI_SHIFT_M2 \
- m512 sl2 = maskz_vpermb512(TEDDY_VBMI_SL2_MASK, sl_msk[1], shuf_or_b2);
-
-#define TEDDY_VBMI_SHIFT_M4 \
- TEDDY_VBMI_SHIFT_M3 \
- m512 sl3 = maskz_vpermb512(TEDDY_VBMI_SL3_MASK, sl_msk[2], shuf_or_b3);
-
-#define SHIFT_OR_M1 \
- shuf_or_b0
-
-#define SHIFT_OR_M2 \
- or512(sl1, SHIFT_OR_M1)
-
-#define SHIFT_OR_M3 \
- or512(sl2, SHIFT_OR_M2)
-
-#define SHIFT_OR_M4 \
- or512(sl3, SHIFT_OR_M3)
-
-static really_inline
-m512 prep_conf_teddy_m1(const m512 *lo_mask, const m512 *dup_mask,
- UNUSED const m512 *sl_msk, const m512 val) {
- PREP_SHUF_MASK;
- TEDDY_VBMI_PSHUFB_OR_M1;
- TEDDY_VBMI_SHIFT_M1;
- return SHIFT_OR_M1;
-}
-
-static really_inline
-m512 prep_conf_teddy_m2(const m512 *lo_mask, const m512 *dup_mask,
- const m512 *sl_msk, const m512 val) {
- PREP_SHUF_MASK;
- TEDDY_VBMI_PSHUFB_OR_M2;
- TEDDY_VBMI_SHIFT_M2;
- return SHIFT_OR_M2;
-}
-
-static really_inline
-m512 prep_conf_teddy_m3(const m512 *lo_mask, const m512 *dup_mask,
- const m512 *sl_msk, const m512 val) {
- PREP_SHUF_MASK;
- TEDDY_VBMI_PSHUFB_OR_M3;
- TEDDY_VBMI_SHIFT_M3;
- return SHIFT_OR_M3;
-}
-
-static really_inline
-m512 prep_conf_teddy_m4(const m512 *lo_mask, const m512 *dup_mask,
- const m512 *sl_msk, const m512 val) {
- PREP_SHUF_MASK;
- TEDDY_VBMI_PSHUFB_OR_M4;
- TEDDY_VBMI_SHIFT_M4;
- return SHIFT_OR_M4;
-}
-
-#define PREP_CONF_FN(val, n) \
- prep_conf_teddy_m##n(&lo_mask, dup_mask, sl_msk, val)
-
-#define TEDDY_VBMI_SL1_POS 15
-#define TEDDY_VBMI_SL2_POS 14
-#define TEDDY_VBMI_SL3_POS 13
-
-#define TEDDY_VBMI_LOAD_SHIFT_MASK_M1
-
-#define TEDDY_VBMI_LOAD_SHIFT_MASK_M2 \
- TEDDY_VBMI_LOAD_SHIFT_MASK_M1 \
- sl_msk[0] = loadu512(p_sh_mask_arr + TEDDY_VBMI_SL1_POS);
-
-#define TEDDY_VBMI_LOAD_SHIFT_MASK_M3 \
- TEDDY_VBMI_LOAD_SHIFT_MASK_M2 \
- sl_msk[1] = loadu512(p_sh_mask_arr + TEDDY_VBMI_SL2_POS);
-
-#define TEDDY_VBMI_LOAD_SHIFT_MASK_M4 \
- TEDDY_VBMI_LOAD_SHIFT_MASK_M3 \
- sl_msk[2] = loadu512(p_sh_mask_arr + TEDDY_VBMI_SL3_POS);
-
-#define PREPARE_MASKS_1 \
- dup_mask[0] = set4x128(maskBase[0]); \
- dup_mask[1] = set4x128(maskBase[1]);
-
-#define PREPARE_MASKS_2 \
- PREPARE_MASKS_1 \
- dup_mask[2] = set4x128(maskBase[2]); \
- dup_mask[3] = set4x128(maskBase[3]);
-
-#define PREPARE_MASKS_3 \
- PREPARE_MASKS_2 \
- dup_mask[4] = set4x128(maskBase[4]); \
- dup_mask[5] = set4x128(maskBase[5]);
-
-#define PREPARE_MASKS_4 \
- PREPARE_MASKS_3 \
- dup_mask[6] = set4x128(maskBase[6]); \
- dup_mask[7] = set4x128(maskBase[7]);
-
-#define PREPARE_MASKS(n) \
- m512 lo_mask = set64x8(0xf); \
- m512 dup_mask[n * 2]; \
- m512 sl_msk[n - 1]; \
- PREPARE_MASKS_##n \
- TEDDY_VBMI_LOAD_SHIFT_MASK_M##n
-
-#define TEDDY_VBMI_CONF_MASK_HEAD (0xffffffffffffffffULL >> n_sh)
-#define TEDDY_VBMI_CONF_MASK_FULL (0xffffffffffffffffULL << n_sh)
-#define TEDDY_VBMI_CONF_MASK_VAR(n) (0xffffffffffffffffULL >> (64 - n) << overlap)
-#define TEDDY_VBMI_LOAD_MASK_PATCH (0xffffffffffffffffULL >> (64 - n_sh))
-
-#define FDR_EXEC_TEDDY(fdr, a, control, n_msk, conf_fn) \
-do { \
- const u8 *buf_end = a->buf + a->len; \
- const u8 *ptr = a->buf + a->start_offset; \
- u32 floodBackoff = FLOOD_BACKOFF_START; \
- const u8 *tryFloodDetect = a->firstFloodDetect; \
- u32 last_match = ones_u32; \
- const struct Teddy *teddy = (const struct Teddy *)fdr; \
- const size_t iterBytes = 64; \
- u32 n_sh = n_msk - 1; \
- const size_t loopBytes = 64 - n_sh; \
- DEBUG_PRINTF("params: buf %p len %zu start_offset %zu\n", \
- a->buf, a->len, a->start_offset); \
- \
- const m128 *maskBase = getMaskBase(teddy); \
- PREPARE_MASKS(n_msk); \
- const u32 *confBase = getConfBase(teddy); \
- \
- u64a k = TEDDY_VBMI_CONF_MASK_FULL; \
- m512 p_mask = set_mask_m512(~k); \
- u32 overlap = 0; \
- u64a patch = 0; \
- if (likely(ptr + loopBytes <= buf_end)) { \
- m512 p_mask0 = set_mask_m512(~TEDDY_VBMI_CONF_MASK_HEAD); \
- m512 r_0 = PREP_CONF_FN(loadu512(ptr), n_msk); \
- r_0 = or512(r_0, p_mask0); \
- CONFIRM_TEDDY(r_0, 8, 0, VECTORING, ptr, conf_fn); \
- ptr += loopBytes; \
- overlap = n_sh; \
- patch = TEDDY_VBMI_LOAD_MASK_PATCH; \
- } \
- \
- for (; ptr + loopBytes <= buf_end; ptr += loopBytes) { \
- __builtin_prefetch(ptr - n_sh + (64 * 2)); \
- CHECK_FLOOD; \
- m512 r_0 = PREP_CONF_FN(loadu512(ptr - n_sh), n_msk); \
- r_0 = or512(r_0, p_mask); \
- CONFIRM_TEDDY(r_0, 8, 0, NOT_CAUTIOUS, ptr - n_sh, conf_fn); \
- } \
- \
- assert(ptr + loopBytes > buf_end); \
- if (ptr < buf_end) { \
- u32 left = (u32)(buf_end - ptr); \
- u64a k1 = TEDDY_VBMI_CONF_MASK_VAR(left); \
- m512 p_mask1 = set_mask_m512(~k1); \
- m512 val_0 = loadu_maskz_m512(k1 | patch, ptr - overlap); \
- m512 r_0 = PREP_CONF_FN(val_0, n_msk); \
- r_0 = or512(r_0, p_mask1); \
- CONFIRM_TEDDY(r_0, 8, 0, VECTORING, ptr - overlap, conf_fn); \
- } \
- \
- return HWLM_SUCCESS; \
-} while(0)
-
-#elif defined(HAVE_AVX512) // AVX512 reinforced teddy
-
-#ifdef ARCH_64_BIT
#define CONFIRM_TEDDY(var, bucket, offset, reason, conf_fn) \
do { \
if (unlikely(diff512(var, ones512()))) { \
diff --git a/contrib/libs/hyperscan/src/fdr/teddy_avx2.c b/contrib/libs/hyperscan/src/fdr/teddy_avx2.c
index 34c0b2e171..6a6b27a5f2 100644
--- a/contrib/libs/hyperscan/src/fdr/teddy_avx2.c
+++ b/contrib/libs/hyperscan/src/fdr/teddy_avx2.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Intel Corporation
+ * Copyright (c) 2016-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -109,36 +109,36 @@ const u8 ALIGN_AVX_DIRECTIVE p_mask_arr256[33][64] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
};
-#if defined(HAVE_AVX512VBMI) // VBMI strong fat teddy
-
-#define CONF_FAT_CHUNK_64(chunk, bucket, off, reason, pt, conf_fn) \
-do { \
- if (unlikely(chunk != ones_u64a)) { \
- chunk = ~chunk; \
- conf_fn(&chunk, bucket, off, confBase, reason, a, pt, \
- &control, &last_match); \
- CHECK_HWLM_TERMINATE_MATCHING; \
- } \
-} while(0)
-
-#define CONF_FAT_CHUNK_32(chunk, bucket, off, reason, pt, conf_fn) \
-do { \
- if (unlikely(chunk != ones_u32)) { \
- chunk = ~chunk; \
- conf_fn(&chunk, bucket, off, confBase, reason, a, pt, \
- &control, &last_match); \
- CHECK_HWLM_TERMINATE_MATCHING; \
- } \
-} while(0)
-
-static really_inline
-const m512 *getDupMaskBase(const struct Teddy *teddy, u8 numMask) {
- return (const m512 *)((const u8 *)teddy + ROUNDUP_CL(sizeof(struct Teddy))
- + ROUNDUP_CL(2 * numMask * sizeof(m256)));
-}
-
-#else
-
+#if defined(HAVE_AVX512VBMI) // VBMI strong fat teddy
+
+#define CONF_FAT_CHUNK_64(chunk, bucket, off, reason, pt, conf_fn) \
+do { \
+ if (unlikely(chunk != ones_u64a)) { \
+ chunk = ~chunk; \
+ conf_fn(&chunk, bucket, off, confBase, reason, a, pt, \
+ &control, &last_match); \
+ CHECK_HWLM_TERMINATE_MATCHING; \
+ } \
+} while(0)
+
+#define CONF_FAT_CHUNK_32(chunk, bucket, off, reason, pt, conf_fn) \
+do { \
+ if (unlikely(chunk != ones_u32)) { \
+ chunk = ~chunk; \
+ conf_fn(&chunk, bucket, off, confBase, reason, a, pt, \
+ &control, &last_match); \
+ CHECK_HWLM_TERMINATE_MATCHING; \
+ } \
+} while(0)
+
+static really_inline
+const m512 *getDupMaskBase(const struct Teddy *teddy, u8 numMask) {
+ return (const m512 *)((const u8 *)teddy + ROUNDUP_CL(sizeof(struct Teddy))
+ + ROUNDUP_CL(2 * numMask * sizeof(m256)));
+}
+
+#else
+
#define CONF_FAT_CHUNK_64(chunk, bucket, off, reason, conf_fn) \
do { \
if (unlikely(chunk != ones_u64a)) { \
@@ -164,201 +164,201 @@ const m256 *getMaskBase_fat(const struct Teddy *teddy) {
return (const m256 *)((const u8 *)teddy + ROUNDUP_CL(sizeof(struct Teddy)));
}
-#endif
+#endif
+
+#if defined(HAVE_AVX512VBMI) // VBMI strong fat teddy
-#if defined(HAVE_AVX512VBMI) // VBMI strong fat teddy
+const u8 ALIGN_AVX_DIRECTIVE p_mask_interleave[64] = {
+ 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
+ 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
+ 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55,
+ 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63
+};
-const u8 ALIGN_AVX_DIRECTIVE p_mask_interleave[64] = {
- 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
- 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
- 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55,
- 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63
-};
-
#ifdef ARCH_64_BIT
-#define CONFIRM_FAT_TEDDY(var, bucket, offset, reason, pt, conf_fn) \
+#define CONFIRM_FAT_TEDDY(var, bucket, offset, reason, pt, conf_fn) \
do { \
if (unlikely(diff512(var, ones512()))) { \
- m512 msk_interleave = load512(p_mask_interleave); \
- m512 r = vpermb512(msk_interleave, var); \
+ m512 msk_interleave = load512(p_mask_interleave); \
+ m512 r = vpermb512(msk_interleave, var); \
m128 r0 = extract128from512(r, 0); \
m128 r1 = extract128from512(r, 1); \
- m128 r2 = extract128from512(r, 2); \
- m128 r3 = extract128from512(r, 3); \
+ m128 r2 = extract128from512(r, 2); \
+ m128 r3 = extract128from512(r, 3); \
u64a part1 = movq(r0); \
u64a part2 = extract64from128(r0, 1); \
- u64a part3 = movq(r1); \
- u64a part4 = extract64from128(r1, 1); \
- u64a part5 = movq(r2); \
- u64a part6 = extract64from128(r2, 1); \
- u64a part7 = movq(r3); \
- u64a part8 = extract64from128(r3, 1); \
- CONF_FAT_CHUNK_64(part1, bucket, offset, reason, pt, conf_fn); \
- CONF_FAT_CHUNK_64(part2, bucket, offset + 4, reason, pt, conf_fn); \
- CONF_FAT_CHUNK_64(part3, bucket, offset + 8, reason, pt, conf_fn); \
- CONF_FAT_CHUNK_64(part4, bucket, offset + 12, reason, pt, conf_fn); \
- CONF_FAT_CHUNK_64(part5, bucket, offset + 16, reason, pt, conf_fn); \
- CONF_FAT_CHUNK_64(part6, bucket, offset + 20, reason, pt, conf_fn); \
- CONF_FAT_CHUNK_64(part7, bucket, offset + 24, reason, pt, conf_fn); \
- CONF_FAT_CHUNK_64(part8, bucket, offset + 28, reason, pt, conf_fn); \
+ u64a part3 = movq(r1); \
+ u64a part4 = extract64from128(r1, 1); \
+ u64a part5 = movq(r2); \
+ u64a part6 = extract64from128(r2, 1); \
+ u64a part7 = movq(r3); \
+ u64a part8 = extract64from128(r3, 1); \
+ CONF_FAT_CHUNK_64(part1, bucket, offset, reason, pt, conf_fn); \
+ CONF_FAT_CHUNK_64(part2, bucket, offset + 4, reason, pt, conf_fn); \
+ CONF_FAT_CHUNK_64(part3, bucket, offset + 8, reason, pt, conf_fn); \
+ CONF_FAT_CHUNK_64(part4, bucket, offset + 12, reason, pt, conf_fn); \
+ CONF_FAT_CHUNK_64(part5, bucket, offset + 16, reason, pt, conf_fn); \
+ CONF_FAT_CHUNK_64(part6, bucket, offset + 20, reason, pt, conf_fn); \
+ CONF_FAT_CHUNK_64(part7, bucket, offset + 24, reason, pt, conf_fn); \
+ CONF_FAT_CHUNK_64(part8, bucket, offset + 28, reason, pt, conf_fn); \
} \
} while(0)
#else
-#define CONFIRM_FAT_TEDDY(var, bucket, offset, reason, pt, conf_fn) \
+#define CONFIRM_FAT_TEDDY(var, bucket, offset, reason, pt, conf_fn) \
do { \
if (unlikely(diff512(var, ones512()))) { \
- m512 msk_interleave = load512(p_mask_interleave); \
- m512 r = vpermb512(msk_interleave, var); \
+ m512 msk_interleave = load512(p_mask_interleave); \
+ m512 r = vpermb512(msk_interleave, var); \
m128 r0 = extract128from512(r, 0); \
m128 r1 = extract128from512(r, 1); \
- m128 r2 = extract128from512(r, 2); \
- m128 r3 = extract128from512(r, 3); \
+ m128 r2 = extract128from512(r, 2); \
+ m128 r3 = extract128from512(r, 3); \
u32 part1 = movd(r0); \
u32 part2 = extract32from128(r0, 1); \
u32 part3 = extract32from128(r0, 2); \
u32 part4 = extract32from128(r0, 3); \
- u32 part5 = movd(r1); \
- u32 part6 = extract32from128(r1, 1); \
- u32 part7 = extract32from128(r1, 2); \
- u32 part8 = extract32from128(r1, 3); \
- u32 part9 = movd(r2); \
- u32 part10 = extract32from128(r2, 1); \
- u32 part11 = extract32from128(r2, 2); \
- u32 part12 = extract32from128(r2, 3); \
- u32 part13 = movd(r3); \
- u32 part14 = extract32from128(r3, 1); \
- u32 part15 = extract32from128(r3, 2); \
- u32 part16 = extract32from128(r3, 3); \
- CONF_FAT_CHUNK_32(part1, bucket, offset, reason, pt, conf_fn); \
- CONF_FAT_CHUNK_32(part2, bucket, offset + 2, reason, pt, conf_fn); \
- CONF_FAT_CHUNK_32(part3, bucket, offset + 4, reason, pt, conf_fn); \
- CONF_FAT_CHUNK_32(part4, bucket, offset + 6, reason, pt, conf_fn); \
- CONF_FAT_CHUNK_32(part5, bucket, offset + 8, reason, pt, conf_fn); \
- CONF_FAT_CHUNK_32(part6, bucket, offset + 10, reason, pt, conf_fn); \
- CONF_FAT_CHUNK_32(part7, bucket, offset + 12, reason, pt, conf_fn); \
- CONF_FAT_CHUNK_32(part8, bucket, offset + 14, reason, pt, conf_fn); \
- CONF_FAT_CHUNK_32(part9, bucket, offset + 16, reason, pt, conf_fn); \
- CONF_FAT_CHUNK_32(part10, bucket, offset + 18, reason, pt, conf_fn);\
- CONF_FAT_CHUNK_32(part11, bucket, offset + 20, reason, pt, conf_fn);\
- CONF_FAT_CHUNK_32(part12, bucket, offset + 22, reason, pt, conf_fn);\
- CONF_FAT_CHUNK_32(part13, bucket, offset + 24, reason, pt, conf_fn);\
- CONF_FAT_CHUNK_32(part14, bucket, offset + 26, reason, pt, conf_fn);\
- CONF_FAT_CHUNK_32(part15, bucket, offset + 28, reason, pt, conf_fn);\
- CONF_FAT_CHUNK_32(part16, bucket, offset + 30, reason, pt, conf_fn);\
+ u32 part5 = movd(r1); \
+ u32 part6 = extract32from128(r1, 1); \
+ u32 part7 = extract32from128(r1, 2); \
+ u32 part8 = extract32from128(r1, 3); \
+ u32 part9 = movd(r2); \
+ u32 part10 = extract32from128(r2, 1); \
+ u32 part11 = extract32from128(r2, 2); \
+ u32 part12 = extract32from128(r2, 3); \
+ u32 part13 = movd(r3); \
+ u32 part14 = extract32from128(r3, 1); \
+ u32 part15 = extract32from128(r3, 2); \
+ u32 part16 = extract32from128(r3, 3); \
+ CONF_FAT_CHUNK_32(part1, bucket, offset, reason, pt, conf_fn); \
+ CONF_FAT_CHUNK_32(part2, bucket, offset + 2, reason, pt, conf_fn); \
+ CONF_FAT_CHUNK_32(part3, bucket, offset + 4, reason, pt, conf_fn); \
+ CONF_FAT_CHUNK_32(part4, bucket, offset + 6, reason, pt, conf_fn); \
+ CONF_FAT_CHUNK_32(part5, bucket, offset + 8, reason, pt, conf_fn); \
+ CONF_FAT_CHUNK_32(part6, bucket, offset + 10, reason, pt, conf_fn); \
+ CONF_FAT_CHUNK_32(part7, bucket, offset + 12, reason, pt, conf_fn); \
+ CONF_FAT_CHUNK_32(part8, bucket, offset + 14, reason, pt, conf_fn); \
+ CONF_FAT_CHUNK_32(part9, bucket, offset + 16, reason, pt, conf_fn); \
+ CONF_FAT_CHUNK_32(part10, bucket, offset + 18, reason, pt, conf_fn);\
+ CONF_FAT_CHUNK_32(part11, bucket, offset + 20, reason, pt, conf_fn);\
+ CONF_FAT_CHUNK_32(part12, bucket, offset + 22, reason, pt, conf_fn);\
+ CONF_FAT_CHUNK_32(part13, bucket, offset + 24, reason, pt, conf_fn);\
+ CONF_FAT_CHUNK_32(part14, bucket, offset + 26, reason, pt, conf_fn);\
+ CONF_FAT_CHUNK_32(part15, bucket, offset + 28, reason, pt, conf_fn);\
+ CONF_FAT_CHUNK_32(part16, bucket, offset + 30, reason, pt, conf_fn);\
} \
} while(0)
#endif
-#define PREP_FAT_SHUF_MASK \
+#define PREP_FAT_SHUF_MASK \
m512 lo = and512(val, *lo_mask); \
m512 hi = and512(rshift64_m512(val, 4), *lo_mask)
-#define FAT_TEDDY_VBMI_PSHUFB_OR_M1 \
- m512 shuf_or_b0 = or512(pshufb_m512(dup_mask[0], lo), \
- pshufb_m512(dup_mask[1], hi));
-
-#define FAT_TEDDY_VBMI_PSHUFB_OR_M2 \
- FAT_TEDDY_VBMI_PSHUFB_OR_M1 \
- m512 shuf_or_b1 = or512(pshufb_m512(dup_mask[2], lo), \
- pshufb_m512(dup_mask[3], hi));
-
-#define FAT_TEDDY_VBMI_PSHUFB_OR_M3 \
- FAT_TEDDY_VBMI_PSHUFB_OR_M2 \
- m512 shuf_or_b2 = or512(pshufb_m512(dup_mask[4], lo), \
- pshufb_m512(dup_mask[5], hi));
-
-#define FAT_TEDDY_VBMI_PSHUFB_OR_M4 \
- FAT_TEDDY_VBMI_PSHUFB_OR_M3 \
- m512 shuf_or_b3 = or512(pshufb_m512(dup_mask[6], lo), \
- pshufb_m512(dup_mask[7], hi));
-
-#define FAT_TEDDY_VBMI_SL1_MASK 0xfffffffefffffffeULL
-#define FAT_TEDDY_VBMI_SL2_MASK 0xfffffffcfffffffcULL
-#define FAT_TEDDY_VBMI_SL3_MASK 0xfffffff8fffffff8ULL
-
-#define FAT_TEDDY_VBMI_SHIFT_M1
-
-#define FAT_TEDDY_VBMI_SHIFT_M2 \
- FAT_TEDDY_VBMI_SHIFT_M1 \
- m512 sl1 = maskz_vpermb512(FAT_TEDDY_VBMI_SL1_MASK, sl_msk[0], shuf_or_b1);
-
-#define FAT_TEDDY_VBMI_SHIFT_M3 \
- FAT_TEDDY_VBMI_SHIFT_M2 \
- m512 sl2 = maskz_vpermb512(FAT_TEDDY_VBMI_SL2_MASK, sl_msk[1], shuf_or_b2);
-
-#define FAT_TEDDY_VBMI_SHIFT_M4 \
- FAT_TEDDY_VBMI_SHIFT_M3 \
- m512 sl3 = maskz_vpermb512(FAT_TEDDY_VBMI_SL3_MASK, sl_msk[2], shuf_or_b3);
-
-#define FAT_SHIFT_OR_M1 \
- shuf_or_b0
-
-#define FAT_SHIFT_OR_M2 \
- or512(sl1, FAT_SHIFT_OR_M1)
-
-#define FAT_SHIFT_OR_M3 \
- or512(sl2, FAT_SHIFT_OR_M2)
-
-#define FAT_SHIFT_OR_M4 \
- or512(sl3, FAT_SHIFT_OR_M3)
-
+#define FAT_TEDDY_VBMI_PSHUFB_OR_M1 \
+ m512 shuf_or_b0 = or512(pshufb_m512(dup_mask[0], lo), \
+ pshufb_m512(dup_mask[1], hi));
+
+#define FAT_TEDDY_VBMI_PSHUFB_OR_M2 \
+ FAT_TEDDY_VBMI_PSHUFB_OR_M1 \
+ m512 shuf_or_b1 = or512(pshufb_m512(dup_mask[2], lo), \
+ pshufb_m512(dup_mask[3], hi));
+
+#define FAT_TEDDY_VBMI_PSHUFB_OR_M3 \
+ FAT_TEDDY_VBMI_PSHUFB_OR_M2 \
+ m512 shuf_or_b2 = or512(pshufb_m512(dup_mask[4], lo), \
+ pshufb_m512(dup_mask[5], hi));
+
+#define FAT_TEDDY_VBMI_PSHUFB_OR_M4 \
+ FAT_TEDDY_VBMI_PSHUFB_OR_M3 \
+ m512 shuf_or_b3 = or512(pshufb_m512(dup_mask[6], lo), \
+ pshufb_m512(dup_mask[7], hi));
+
+#define FAT_TEDDY_VBMI_SL1_MASK 0xfffffffefffffffeULL
+#define FAT_TEDDY_VBMI_SL2_MASK 0xfffffffcfffffffcULL
+#define FAT_TEDDY_VBMI_SL3_MASK 0xfffffff8fffffff8ULL
+
+#define FAT_TEDDY_VBMI_SHIFT_M1
+
+#define FAT_TEDDY_VBMI_SHIFT_M2 \
+ FAT_TEDDY_VBMI_SHIFT_M1 \
+ m512 sl1 = maskz_vpermb512(FAT_TEDDY_VBMI_SL1_MASK, sl_msk[0], shuf_or_b1);
+
+#define FAT_TEDDY_VBMI_SHIFT_M3 \
+ FAT_TEDDY_VBMI_SHIFT_M2 \
+ m512 sl2 = maskz_vpermb512(FAT_TEDDY_VBMI_SL2_MASK, sl_msk[1], shuf_or_b2);
+
+#define FAT_TEDDY_VBMI_SHIFT_M4 \
+ FAT_TEDDY_VBMI_SHIFT_M3 \
+ m512 sl3 = maskz_vpermb512(FAT_TEDDY_VBMI_SL3_MASK, sl_msk[2], shuf_or_b3);
+
+#define FAT_SHIFT_OR_M1 \
+ shuf_or_b0
+
+#define FAT_SHIFT_OR_M2 \
+ or512(sl1, FAT_SHIFT_OR_M1)
+
+#define FAT_SHIFT_OR_M3 \
+ or512(sl2, FAT_SHIFT_OR_M2)
+
+#define FAT_SHIFT_OR_M4 \
+ or512(sl3, FAT_SHIFT_OR_M3)
+
static really_inline
m512 prep_conf_fat_teddy_m1(const m512 *lo_mask, const m512 *dup_mask,
- UNUSED const m512 *sl_msk, const m512 val) {
+ UNUSED const m512 *sl_msk, const m512 val) {
PREP_FAT_SHUF_MASK;
- FAT_TEDDY_VBMI_PSHUFB_OR_M1;
- FAT_TEDDY_VBMI_SHIFT_M1;
- return FAT_SHIFT_OR_M1;
+ FAT_TEDDY_VBMI_PSHUFB_OR_M1;
+ FAT_TEDDY_VBMI_SHIFT_M1;
+ return FAT_SHIFT_OR_M1;
}
static really_inline
m512 prep_conf_fat_teddy_m2(const m512 *lo_mask, const m512 *dup_mask,
- const m512 *sl_msk, const m512 val) {
+ const m512 *sl_msk, const m512 val) {
PREP_FAT_SHUF_MASK;
- FAT_TEDDY_VBMI_PSHUFB_OR_M2;
- FAT_TEDDY_VBMI_SHIFT_M2;
- return FAT_SHIFT_OR_M2;
+ FAT_TEDDY_VBMI_PSHUFB_OR_M2;
+ FAT_TEDDY_VBMI_SHIFT_M2;
+ return FAT_SHIFT_OR_M2;
}
static really_inline
m512 prep_conf_fat_teddy_m3(const m512 *lo_mask, const m512 *dup_mask,
- const m512 *sl_msk, const m512 val) {
+ const m512 *sl_msk, const m512 val) {
PREP_FAT_SHUF_MASK;
- FAT_TEDDY_VBMI_PSHUFB_OR_M3;
- FAT_TEDDY_VBMI_SHIFT_M3;
- return FAT_SHIFT_OR_M3;
+ FAT_TEDDY_VBMI_PSHUFB_OR_M3;
+ FAT_TEDDY_VBMI_SHIFT_M3;
+ return FAT_SHIFT_OR_M3;
}
static really_inline
m512 prep_conf_fat_teddy_m4(const m512 *lo_mask, const m512 *dup_mask,
- const m512 *sl_msk, const m512 val) {
+ const m512 *sl_msk, const m512 val) {
PREP_FAT_SHUF_MASK;
- FAT_TEDDY_VBMI_PSHUFB_OR_M4;
- FAT_TEDDY_VBMI_SHIFT_M4;
- return FAT_SHIFT_OR_M4;
+ FAT_TEDDY_VBMI_PSHUFB_OR_M4;
+ FAT_TEDDY_VBMI_SHIFT_M4;
+ return FAT_SHIFT_OR_M4;
}
-#define PREP_CONF_FAT_FN(val, n) \
- prep_conf_fat_teddy_m##n(&lo_mask, dup_mask, sl_msk, val)
-
-#define FAT_TEDDY_VBMI_SL1_POS 15
-#define FAT_TEDDY_VBMI_SL2_POS 14
-#define FAT_TEDDY_VBMI_SL3_POS 13
-
-#define FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M1
-
-#define FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M2 \
- FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M1 \
- sl_msk[0] = loadu512(p_sh_mask_arr + FAT_TEDDY_VBMI_SL1_POS);
-
-#define FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M3 \
- FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M2 \
- sl_msk[1] = loadu512(p_sh_mask_arr + FAT_TEDDY_VBMI_SL2_POS);
-
-#define FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M4 \
- FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M3 \
- sl_msk[2] = loadu512(p_sh_mask_arr + FAT_TEDDY_VBMI_SL3_POS);
-
+#define PREP_CONF_FAT_FN(val, n) \
+ prep_conf_fat_teddy_m##n(&lo_mask, dup_mask, sl_msk, val)
+
+#define FAT_TEDDY_VBMI_SL1_POS 15
+#define FAT_TEDDY_VBMI_SL2_POS 14
+#define FAT_TEDDY_VBMI_SL3_POS 13
+
+#define FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M1
+
+#define FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M2 \
+ FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M1 \
+ sl_msk[0] = loadu512(p_sh_mask_arr + FAT_TEDDY_VBMI_SL1_POS);
+
+#define FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M3 \
+ FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M2 \
+ sl_msk[1] = loadu512(p_sh_mask_arr + FAT_TEDDY_VBMI_SL2_POS);
+
+#define FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M4 \
+ FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M3 \
+ sl_msk[2] = loadu512(p_sh_mask_arr + FAT_TEDDY_VBMI_SL3_POS);
+
/*
* In FAT teddy, it needs 2 bytes to represent result of each position,
* so each nibble's(for example, lo nibble of last byte) FAT teddy mask
@@ -384,14 +384,14 @@ m512 prep_conf_fat_teddy_m4(const m512 *lo_mask, const m512 *dup_mask,
#define PREPARE_FAT_MASKS(n) \
m512 lo_mask = set64x8(0xf); \
- m512 sl_msk[n - 1]; \
- FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M##n
-
-#define FAT_TEDDY_VBMI_CONF_MASK_HEAD (0xffffffffULL >> n_sh)
-#define FAT_TEDDY_VBMI_CONF_MASK_FULL ((0xffffffffULL << n_sh) & 0xffffffffULL)
-#define FAT_TEDDY_VBMI_CONF_MASK_VAR(n) (0xffffffffULL >> (32 - n) << overlap)
-#define FAT_TEDDY_VBMI_LOAD_MASK_PATCH (0xffffffffULL >> (32 - n_sh))
-
+ m512 sl_msk[n - 1]; \
+ FAT_TEDDY_VBMI_LOAD_SHIFT_MASK_M##n
+
+#define FAT_TEDDY_VBMI_CONF_MASK_HEAD (0xffffffffULL >> n_sh)
+#define FAT_TEDDY_VBMI_CONF_MASK_FULL ((0xffffffffULL << n_sh) & 0xffffffffULL)
+#define FAT_TEDDY_VBMI_CONF_MASK_VAR(n) (0xffffffffULL >> (32 - n) << overlap)
+#define FAT_TEDDY_VBMI_LOAD_MASK_PATCH (0xffffffffULL >> (32 - n_sh))
+
#define FDR_EXEC_FAT_TEDDY(fdr, a, control, n_msk, conf_fn) \
do { \
const u8 *buf_end = a->buf + a->len; \
@@ -400,53 +400,53 @@ do { \
const u8 *tryFloodDetect = a->firstFloodDetect; \
u32 last_match = ones_u32; \
const struct Teddy *teddy = (const struct Teddy *)fdr; \
- const size_t iterBytes = 32; \
- u32 n_sh = n_msk - 1; \
- const size_t loopBytes = 32 - n_sh; \
+ const size_t iterBytes = 32; \
+ u32 n_sh = n_msk - 1; \
+ const size_t loopBytes = 32 - n_sh; \
DEBUG_PRINTF("params: buf %p len %zu start_offset %zu\n", \
a->buf, a->len, a->start_offset); \
\
- const m512 *dup_mask = getDupMaskBase(teddy, n_msk); \
+ const m512 *dup_mask = getDupMaskBase(teddy, n_msk); \
PREPARE_FAT_MASKS(n_msk); \
const u32 *confBase = getConfBase(teddy); \
\
- u64a k = FAT_TEDDY_VBMI_CONF_MASK_FULL; \
- m512 p_mask = set_mask_m512(~((k << 32) | k)); \
- u32 overlap = 0; \
- u64a patch = 0; \
- if (likely(ptr + loopBytes <= buf_end)) { \
- u64a k0 = FAT_TEDDY_VBMI_CONF_MASK_HEAD; \
- m512 p_mask0 = set_mask_m512(~((k0 << 32) | k0)); \
- m512 r_0 = PREP_CONF_FAT_FN(set2x256(loadu256(ptr)), n_msk); \
- r_0 = or512(r_0, p_mask0); \
- CONFIRM_FAT_TEDDY(r_0, 16, 0, VECTORING, ptr, conf_fn); \
- ptr += loopBytes; \
- overlap = n_sh; \
- patch = FAT_TEDDY_VBMI_LOAD_MASK_PATCH; \
+ u64a k = FAT_TEDDY_VBMI_CONF_MASK_FULL; \
+ m512 p_mask = set_mask_m512(~((k << 32) | k)); \
+ u32 overlap = 0; \
+ u64a patch = 0; \
+ if (likely(ptr + loopBytes <= buf_end)) { \
+ u64a k0 = FAT_TEDDY_VBMI_CONF_MASK_HEAD; \
+ m512 p_mask0 = set_mask_m512(~((k0 << 32) | k0)); \
+ m512 r_0 = PREP_CONF_FAT_FN(set2x256(loadu256(ptr)), n_msk); \
+ r_0 = or512(r_0, p_mask0); \
+ CONFIRM_FAT_TEDDY(r_0, 16, 0, VECTORING, ptr, conf_fn); \
+ ptr += loopBytes; \
+ overlap = n_sh; \
+ patch = FAT_TEDDY_VBMI_LOAD_MASK_PATCH; \
} \
\
- for (; ptr + loopBytes <= buf_end; ptr += loopBytes) { \
+ for (; ptr + loopBytes <= buf_end; ptr += loopBytes) { \
CHECK_FLOOD; \
- m512 r_0 = PREP_CONF_FAT_FN(set2x256(loadu256(ptr - n_sh)), n_msk); \
- r_0 = or512(r_0, p_mask); \
- CONFIRM_FAT_TEDDY(r_0, 16, 0, NOT_CAUTIOUS, ptr - n_sh, conf_fn); \
+ m512 r_0 = PREP_CONF_FAT_FN(set2x256(loadu256(ptr - n_sh)), n_msk); \
+ r_0 = or512(r_0, p_mask); \
+ CONFIRM_FAT_TEDDY(r_0, 16, 0, NOT_CAUTIOUS, ptr - n_sh, conf_fn); \
} \
\
- assert(ptr + loopBytes > buf_end); \
+ assert(ptr + loopBytes > buf_end); \
if (ptr < buf_end) { \
- u32 left = (u32)(buf_end - ptr); \
- u64a k1 = FAT_TEDDY_VBMI_CONF_MASK_VAR(left); \
- m512 p_mask1 = set_mask_m512(~((k1 << 32) | k1)); \
- m512 val_0 = set2x256(loadu_maskz_m256(k1 | patch, ptr - overlap)); \
- m512 r_0 = PREP_CONF_FAT_FN(val_0, n_msk); \
- r_0 = or512(r_0, p_mask1); \
- CONFIRM_FAT_TEDDY(r_0, 16, 0, VECTORING, ptr - overlap, conf_fn); \
+ u32 left = (u32)(buf_end - ptr); \
+ u64a k1 = FAT_TEDDY_VBMI_CONF_MASK_VAR(left); \
+ m512 p_mask1 = set_mask_m512(~((k1 << 32) | k1)); \
+ m512 val_0 = set2x256(loadu_maskz_m256(k1 | patch, ptr - overlap)); \
+ m512 r_0 = PREP_CONF_FAT_FN(val_0, n_msk); \
+ r_0 = or512(r_0, p_mask1); \
+ CONFIRM_FAT_TEDDY(r_0, 16, 0, VECTORING, ptr - overlap, conf_fn); \
} \
\
return HWLM_SUCCESS; \
} while(0)
-#else // !HAVE_AVX512VBMI, AVX2 normal fat teddy
+#else // !HAVE_AVX512VBMI, AVX2 normal fat teddy
#ifdef ARCH_64_BIT
#define CONFIRM_FAT_TEDDY(var, bucket, offset, reason, conf_fn) \
@@ -656,7 +656,7 @@ do { \
return HWLM_SUCCESS; \
} while(0)
-#endif // HAVE_AVX512VBMI
+#endif // HAVE_AVX512VBMI
hwlm_error_t fdr_exec_fat_teddy_msks1(const struct FDR *fdr,
const struct FDR_Runtime_Args *a,
diff --git a/contrib/libs/hyperscan/src/fdr/teddy_compile.cpp b/contrib/libs/hyperscan/src/fdr/teddy_compile.cpp
index 82b478666d..eae9c2c136 100644
--- a/contrib/libs/hyperscan/src/fdr/teddy_compile.cpp
+++ b/contrib/libs/hyperscan/src/fdr/teddy_compile.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -354,89 +354,89 @@ void fillReinforcedMsk(u8 *rmsk, u16 c, u32 j, u8 bmsk) {
}
static
-void fillDupNibbleMasks(const map<BucketIndex,
- vector<LiteralIndex>> &bucketToLits,
- const vector<hwlmLiteral> &lits,
- u32 numMasks, size_t maskLen,
- u8 *baseMsk) {
- u32 maskWidth = 2;
- memset(baseMsk, 0xff, maskLen);
-
- for (const auto &b2l : bucketToLits) {
- const u32 &bucket_id = b2l.first;
- const vector<LiteralIndex> &ids = b2l.second;
- const u8 bmsk = 1U << (bucket_id % 8);
-
- for (const LiteralIndex &lit_id : ids) {
- const hwlmLiteral &l = lits[lit_id];
- DEBUG_PRINTF("putting lit %u into bucket %u\n", lit_id, bucket_id);
- const u32 sz = verify_u32(l.s.size());
-
- // fill in masks
- for (u32 j = 0; j < numMasks; j++) {
- const u32 msk_id_lo = j * 2 * maskWidth + (bucket_id / 8);
- const u32 msk_id_hi = (j * 2 + 1) * maskWidth + (bucket_id / 8);
- const u32 lo_base0 = msk_id_lo * 32;
- const u32 lo_base1 = msk_id_lo * 32 + 16;
- const u32 hi_base0 = msk_id_hi * 32;
- const u32 hi_base1 = msk_id_hi * 32 + 16;
-
- // if we don't have a char at this position, fill in i
- // locations in these masks with '1'
- if (j >= sz) {
- for (u32 n = 0; n < 16; n++) {
- baseMsk[lo_base0 + n] &= ~bmsk;
- baseMsk[lo_base1 + n] &= ~bmsk;
- baseMsk[hi_base0 + n] &= ~bmsk;
- baseMsk[hi_base1 + n] &= ~bmsk;
- }
- } else {
- u8 c = l.s[sz - 1 - j];
- // if we do have a char at this position
- const u32 hiShift = 4;
- u32 n_hi = (c >> hiShift) & 0xf;
- u32 n_lo = c & 0xf;
-
- if (j < l.msk.size() && l.msk[l.msk.size() - 1 - j]) {
- u8 m = l.msk[l.msk.size() - 1 - j];
- u8 m_hi = (m >> hiShift) & 0xf;
- u8 m_lo = m & 0xf;
- u8 cmp = l.cmp[l.msk.size() - 1 - j];
- u8 cmp_lo = cmp & 0xf;
- u8 cmp_hi = (cmp >> hiShift) & 0xf;
-
- for (u8 cm = 0; cm < 0x10; cm++) {
- if ((cm & m_lo) == (cmp_lo & m_lo)) {
- baseMsk[lo_base0 + cm] &= ~bmsk;
- baseMsk[lo_base1 + cm] &= ~bmsk;
- }
- if ((cm & m_hi) == (cmp_hi & m_hi)) {
- baseMsk[hi_base0 + cm] &= ~bmsk;
- baseMsk[hi_base1 + cm] &= ~bmsk;
- }
- }
- } else {
- if (l.nocase && ourisalpha(c)) {
- u32 cmHalfClear = (0xdf >> hiShift) & 0xf;
- u32 cmHalfSet = (0x20 >> hiShift) & 0xf;
- baseMsk[hi_base0 + (n_hi & cmHalfClear)] &= ~bmsk;
- baseMsk[hi_base1 + (n_hi & cmHalfClear)] &= ~bmsk;
- baseMsk[hi_base0 + (n_hi | cmHalfSet)] &= ~bmsk;
- baseMsk[hi_base1 + (n_hi | cmHalfSet)] &= ~bmsk;
- } else {
- baseMsk[hi_base0 + n_hi] &= ~bmsk;
- baseMsk[hi_base1 + n_hi] &= ~bmsk;
- }
- baseMsk[lo_base0 + n_lo] &= ~bmsk;
- baseMsk[lo_base1 + n_lo] &= ~bmsk;
- }
- }
- }
- }
- }
-}
-
-static
+void fillDupNibbleMasks(const map<BucketIndex,
+ vector<LiteralIndex>> &bucketToLits,
+ const vector<hwlmLiteral> &lits,
+ u32 numMasks, size_t maskLen,
+ u8 *baseMsk) {
+ u32 maskWidth = 2;
+ memset(baseMsk, 0xff, maskLen);
+
+ for (const auto &b2l : bucketToLits) {
+ const u32 &bucket_id = b2l.first;
+ const vector<LiteralIndex> &ids = b2l.second;
+ const u8 bmsk = 1U << (bucket_id % 8);
+
+ for (const LiteralIndex &lit_id : ids) {
+ const hwlmLiteral &l = lits[lit_id];
+ DEBUG_PRINTF("putting lit %u into bucket %u\n", lit_id, bucket_id);
+ const u32 sz = verify_u32(l.s.size());
+
+ // fill in masks
+ for (u32 j = 0; j < numMasks; j++) {
+ const u32 msk_id_lo = j * 2 * maskWidth + (bucket_id / 8);
+ const u32 msk_id_hi = (j * 2 + 1) * maskWidth + (bucket_id / 8);
+ const u32 lo_base0 = msk_id_lo * 32;
+ const u32 lo_base1 = msk_id_lo * 32 + 16;
+ const u32 hi_base0 = msk_id_hi * 32;
+ const u32 hi_base1 = msk_id_hi * 32 + 16;
+
+ // if we don't have a char at this position, fill in i
+ // locations in these masks with '1'
+ if (j >= sz) {
+ for (u32 n = 0; n < 16; n++) {
+ baseMsk[lo_base0 + n] &= ~bmsk;
+ baseMsk[lo_base1 + n] &= ~bmsk;
+ baseMsk[hi_base0 + n] &= ~bmsk;
+ baseMsk[hi_base1 + n] &= ~bmsk;
+ }
+ } else {
+ u8 c = l.s[sz - 1 - j];
+ // if we do have a char at this position
+ const u32 hiShift = 4;
+ u32 n_hi = (c >> hiShift) & 0xf;
+ u32 n_lo = c & 0xf;
+
+ if (j < l.msk.size() && l.msk[l.msk.size() - 1 - j]) {
+ u8 m = l.msk[l.msk.size() - 1 - j];
+ u8 m_hi = (m >> hiShift) & 0xf;
+ u8 m_lo = m & 0xf;
+ u8 cmp = l.cmp[l.msk.size() - 1 - j];
+ u8 cmp_lo = cmp & 0xf;
+ u8 cmp_hi = (cmp >> hiShift) & 0xf;
+
+ for (u8 cm = 0; cm < 0x10; cm++) {
+ if ((cm & m_lo) == (cmp_lo & m_lo)) {
+ baseMsk[lo_base0 + cm] &= ~bmsk;
+ baseMsk[lo_base1 + cm] &= ~bmsk;
+ }
+ if ((cm & m_hi) == (cmp_hi & m_hi)) {
+ baseMsk[hi_base0 + cm] &= ~bmsk;
+ baseMsk[hi_base1 + cm] &= ~bmsk;
+ }
+ }
+ } else {
+ if (l.nocase && ourisalpha(c)) {
+ u32 cmHalfClear = (0xdf >> hiShift) & 0xf;
+ u32 cmHalfSet = (0x20 >> hiShift) & 0xf;
+ baseMsk[hi_base0 + (n_hi & cmHalfClear)] &= ~bmsk;
+ baseMsk[hi_base1 + (n_hi & cmHalfClear)] &= ~bmsk;
+ baseMsk[hi_base0 + (n_hi | cmHalfSet)] &= ~bmsk;
+ baseMsk[hi_base1 + (n_hi | cmHalfSet)] &= ~bmsk;
+ } else {
+ baseMsk[hi_base0 + n_hi] &= ~bmsk;
+ baseMsk[hi_base1 + n_hi] &= ~bmsk;
+ }
+ baseMsk[lo_base0 + n_lo] &= ~bmsk;
+ baseMsk[lo_base1 + n_lo] &= ~bmsk;
+ }
+ }
+ }
+ }
+ }
+}
+
+static
void fillNibbleMasks(const map<BucketIndex,
vector<LiteralIndex>> &bucketToLits,
const vector<hwlmLiteral> &lits,
@@ -562,17 +562,17 @@ bytecode_ptr<FDR> TeddyCompiler::build() {
size_t headerSize = sizeof(Teddy);
size_t maskLen = eng.numMasks * 16 * 2 * maskWidth;
- size_t reinforcedDupMaskLen = RTABLE_SIZE * maskWidth;
- if (maskWidth == 2) { // dup nibble mask table in Fat Teddy
- reinforcedDupMaskLen = maskLen * 2;
- }
+ size_t reinforcedDupMaskLen = RTABLE_SIZE * maskWidth;
+ if (maskWidth == 2) { // dup nibble mask table in Fat Teddy
+ reinforcedDupMaskLen = maskLen * 2;
+ }
auto floodTable = setupFDRFloodControl(lits, eng, grey);
auto confirmTable = setupFullConfs(lits, eng, bucketToLits, make_small);
// Note: we place each major structure here on a cacheline boundary.
size_t size = ROUNDUP_CL(headerSize) + ROUNDUP_CL(maskLen) +
- ROUNDUP_CL(reinforcedDupMaskLen) +
+ ROUNDUP_CL(reinforcedDupMaskLen) +
ROUNDUP_CL(confirmTable.size()) + floodTable.size();
auto fdr = make_zeroed_bytecode_ptr<FDR>(size, 64);
@@ -588,7 +588,7 @@ bytecode_ptr<FDR> TeddyCompiler::build() {
// Write confirm structures.
u8 *ptr = teddy_base + ROUNDUP_CL(headerSize) + ROUNDUP_CL(maskLen) +
- ROUNDUP_CL(reinforcedDupMaskLen);
+ ROUNDUP_CL(reinforcedDupMaskLen);
assert(ISALIGNED_CL(ptr));
teddy->confOffset = verify_u32(ptr - teddy_base);
memcpy(ptr, confirmTable.get(), confirmTable.size());
@@ -605,16 +605,16 @@ bytecode_ptr<FDR> TeddyCompiler::build() {
fillNibbleMasks(bucketToLits, lits, eng.numMasks, maskWidth, maskLen,
baseMsk);
- if (maskWidth == 1) { // reinforcement table in Teddy
- // Write reinforcement masks.
- u8 *reinforcedMsk = baseMsk + ROUNDUP_CL(maskLen);
- fillReinforcedTable(bucketToLits, lits, reinforcedMsk, maskWidth);
- } else { // dup nibble mask table in Fat Teddy
- assert(maskWidth == 2);
- u8 *dupMsk = baseMsk + ROUNDUP_CL(maskLen);
- fillDupNibbleMasks(bucketToLits, lits, eng.numMasks,
- reinforcedDupMaskLen, dupMsk);
- }
+ if (maskWidth == 1) { // reinforcement table in Teddy
+ // Write reinforcement masks.
+ u8 *reinforcedMsk = baseMsk + ROUNDUP_CL(maskLen);
+ fillReinforcedTable(bucketToLits, lits, reinforcedMsk, maskWidth);
+ } else { // dup nibble mask table in Fat Teddy
+ assert(maskWidth == 2);
+ u8 *dupMsk = baseMsk + ROUNDUP_CL(maskLen);
+ fillDupNibbleMasks(bucketToLits, lits, eng.numMasks,
+ reinforcedDupMaskLen, dupMsk);
+ }
return fdr;
}
diff --git a/contrib/libs/hyperscan/src/fdr/teddy_runtime_common.h b/contrib/libs/hyperscan/src/fdr/teddy_runtime_common.h
index 223a6b8c81..b76800eb04 100644
--- a/contrib/libs/hyperscan/src/fdr/teddy_runtime_common.h
+++ b/contrib/libs/hyperscan/src/fdr/teddy_runtime_common.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Intel Corporation
+ * Copyright (c) 2016-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -45,16 +45,16 @@ extern const u8 ALIGN_DIRECTIVE p_mask_arr[17][32];
extern const u8 ALIGN_AVX_DIRECTIVE p_mask_arr256[33][64];
#endif
-#if defined(HAVE_AVX512VBMI)
-static const u8 ALIGN_DIRECTIVE p_sh_mask_arr[80] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
- 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
- 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
- 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f
-};
-#endif
-
+#if defined(HAVE_AVX512VBMI)
+static const u8 ALIGN_DIRECTIVE p_sh_mask_arr[80] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f
+};
+#endif
+
#ifdef ARCH_64_BIT
#define TEDDY_CONF_TYPE u64a
#define TEDDY_FIND_AND_CLEAR_LSB(conf) findAndClearLSB_64(conf)
@@ -393,16 +393,16 @@ m512 vectoredLoad512(m512 *p_mask, const u8 *ptr, const size_t start_offset,
static really_inline
u64a getConfVal(const struct FDR_Runtime_Args *a, const u8 *ptr, u32 byte,
- UNUSED CautionReason reason) {
+ UNUSED CautionReason reason) {
u64a confVal = 0;
const u8 *buf = a->buf;
size_t len = a->len;
const u8 *confirm_loc = ptr + byte - 7;
-#if defined(HAVE_AVX512VBMI)
- if (likely(confirm_loc >= buf)) {
-#else
+#if defined(HAVE_AVX512VBMI)
+ if (likely(confirm_loc >= buf)) {
+#else
if (likely(reason == NOT_CAUTIOUS || confirm_loc >= buf)) {
-#endif
+#endif
confVal = lv_u64a(confirm_loc, buf, buf + len);
} else { // r == VECTORING, confirm_loc < buf
u64a histBytes = a->histBytes;
diff --git a/contrib/libs/hyperscan/src/grey.cpp b/contrib/libs/hyperscan/src/grey.cpp
index 5637a363dd..86a93d25aa 100644
--- a/contrib/libs/hyperscan/src/grey.cpp
+++ b/contrib/libs/hyperscan/src/grey.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -82,7 +82,7 @@ Grey::Grey(void) :
onlyOneOutfix(false),
allowShermanStates(true),
allowMcClellan8(true),
- allowWideStates(true), // enable wide state for McClellan8
+ allowWideStates(true), // enable wide state for McClellan8
highlanderPruneDFA(true),
minimizeDFA(true),
accelerateDFA(true),
@@ -198,15 +198,15 @@ void applyGreyOverrides(Grey *g, const string &s) {
string::const_iterator ve = find(ke, pe, ';');
- unsigned int value = 0;
- try {
- value = lexical_cast<unsigned int>(string(ke + 1, ve));
- } catch (boost::bad_lexical_cast &e) {
- printf("Invalid grey override key %s:%s\n", key.c_str(),
- string(ke + 1, ve).c_str());
- invalid_key_seen = true;
- break;
- }
+ unsigned int value = 0;
+ try {
+ value = lexical_cast<unsigned int>(string(ke + 1, ve));
+ } catch (boost::bad_lexical_cast &e) {
+ printf("Invalid grey override key %s:%s\n", key.c_str(),
+ string(ke + 1, ve).c_str());
+ invalid_key_seen = true;
+ break;
+ }
bool done = false;
/* surely there exists a nice template to go with this macro to make
@@ -260,7 +260,7 @@ void applyGreyOverrides(Grey *g, const string &s) {
G_UPDATE(onlyOneOutfix);
G_UPDATE(allowShermanStates);
G_UPDATE(allowMcClellan8);
- G_UPDATE(allowWideStates);
+ G_UPDATE(allowWideStates);
G_UPDATE(highlanderPruneDFA);
G_UPDATE(minimizeDFA);
G_UPDATE(accelerateDFA);
diff --git a/contrib/libs/hyperscan/src/grey.h b/contrib/libs/hyperscan/src/grey.h
index 0e6ce2dc24..ed2f845a4b 100644
--- a/contrib/libs/hyperscan/src/grey.h
+++ b/contrib/libs/hyperscan/src/grey.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -87,7 +87,7 @@ struct Grey {
bool allowShermanStates;
bool allowMcClellan8;
- bool allowWideStates; // enable wide state for McClellan8
+ bool allowWideStates; // enable wide state for McClellan8
bool highlanderPruneDFA;
bool minimizeDFA;
diff --git a/contrib/libs/hyperscan/src/hs.cpp b/contrib/libs/hyperscan/src/hs.cpp
index 81e004b11c..eac588891c 100644
--- a/contrib/libs/hyperscan/src/hs.cpp
+++ b/contrib/libs/hyperscan/src/hs.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -120,10 +120,10 @@ bool checkMode(unsigned int mode, hs_compile_error **comp_error) {
static
bool checkPlatform(const hs_platform_info *p, hs_compile_error **comp_error) {
- static constexpr u32 HS_TUNE_LAST = HS_TUNE_FAMILY_ICX;
+ static constexpr u32 HS_TUNE_LAST = HS_TUNE_FAMILY_ICX;
static constexpr u32 HS_CPU_FEATURES_ALL =
- HS_CPU_FEATURES_AVX2 | HS_CPU_FEATURES_AVX512 |
- HS_CPU_FEATURES_AVX512VBMI;
+ HS_CPU_FEATURES_AVX2 | HS_CPU_FEATURES_AVX512 |
+ HS_CPU_FEATURES_AVX512VBMI;
if (!p) {
return true;
@@ -246,13 +246,13 @@ hs_compile_multi_int(const char *const *expressions, const unsigned *flags,
}
}
- // Check sub-expression ids
- ng.rm.pl.validateSubIDs(ids, expressions, flags, elements);
- // Renumber and assign lkey to reports
- ng.rm.logicalKeyRenumber();
-
+ // Check sub-expression ids
+ ng.rm.pl.validateSubIDs(ids, expressions, flags, elements);
+ // Renumber and assign lkey to reports
+ ng.rm.logicalKeyRenumber();
+
unsigned length = 0;
- struct hs_database *out = build(ng, &length, 0);
+ struct hs_database *out = build(ng, &length, 0);
assert(out); // should have thrown exception on error
assert(length);
@@ -282,130 +282,130 @@ hs_compile_multi_int(const char *const *expressions, const unsigned *flags,
}
}
-hs_error_t
-hs_compile_lit_multi_int(const char *const *expressions, const unsigned *flags,
- const unsigned *ids, const hs_expr_ext *const *ext,
- const size_t *lens, unsigned elements, unsigned mode,
- const hs_platform_info_t *platform, hs_database_t **db,
- hs_compile_error_t **comp_error, const Grey &g) {
- // Check the args: note that it's OK for flags, ids or ext to be null.
- if (!comp_error) {
- if (db) {
- *db = nullptr;
- }
- // nowhere to write the string, but we can still report an error code
- return HS_COMPILER_ERROR;
- }
- if (!db) {
- *comp_error = generateCompileError("Invalid parameter: db is NULL", -1);
- return HS_COMPILER_ERROR;
- }
- if (!expressions) {
- *db = nullptr;
- *comp_error
- = generateCompileError("Invalid parameter: expressions is NULL",
- -1);
- return HS_COMPILER_ERROR;
- }
- if (!lens) {
- *db = nullptr;
- *comp_error = generateCompileError("Invalid parameter: len is NULL", -1);
- return HS_COMPILER_ERROR;
- }
- if (elements == 0) {
- *db = nullptr;
- *comp_error = generateCompileError("Invalid parameter: elements is zero", -1);
- return HS_COMPILER_ERROR;
- }
-
-#if defined(FAT_RUNTIME)
- if (!check_ssse3()) {
- *db = nullptr;
- *comp_error = generateCompileError("Unsupported architecture", -1);
- return HS_ARCH_ERROR;
- }
-#endif
-
- if (!checkMode(mode, comp_error)) {
- *db = nullptr;
- assert(*comp_error); // set by checkMode.
- return HS_COMPILER_ERROR;
- }
-
- if (!checkPlatform(platform, comp_error)) {
- *db = nullptr;
- assert(*comp_error); // set by checkPlattform.
- return HS_COMPILER_ERROR;
- }
-
- if (elements > g.limitPatternCount) {
- *db = nullptr;
- *comp_error = generateCompileError("Number of patterns too large", -1);
- return HS_COMPILER_ERROR;
- }
-
- // This function is simply a wrapper around both the parser and compiler
- bool isStreaming = mode & (HS_MODE_STREAM | HS_MODE_VECTORED);
- bool isVectored = mode & HS_MODE_VECTORED;
- unsigned somPrecision = getSomPrecision(mode);
-
- target_t target_info = platform ? target_t(*platform)
- : get_current_target();
-
- try {
- CompileContext cc(isStreaming, isVectored, target_info, g);
- NG ng(cc, elements, somPrecision);
-
- for (unsigned int i = 0; i < elements; i++) {
- // Add this expression to the compiler
- try {
- addLitExpression(ng, i, expressions[i], flags ? flags[i] : 0,
- ext ? ext[i] : nullptr, ids ? ids[i] : 0,
- lens[i]);
- } catch (CompileError &e) {
- /* Caught a parse error;
- * throw it upstream as a CompileError with a specific index */
- e.setExpressionIndex(i);
- throw; /* do not slice */
- }
- }
-
- // Check sub-expression ids
- ng.rm.pl.validateSubIDs(ids, expressions, flags, elements);
- // Renumber and assign lkey to reports
- ng.rm.logicalKeyRenumber();
-
- unsigned length = 0;
- struct hs_database *out = build(ng, &length, 1);
-
- assert(out); //should have thrown exception on error
- assert(length);
-
- *db = out;
- *comp_error = nullptr;
-
- return HS_SUCCESS;
- }
- catch (const CompileError &e) {
- // Compiler error occurred
- *db = nullptr;
- *comp_error = generateCompileError(e.reason,
- e.hasIndex ? (int)e.index : -1);
- return HS_COMPILER_ERROR;
- }
- catch (const std::bad_alloc &) {
- *db = nullptr;
- *comp_error = const_cast<hs_compile_error_t *>(&hs_enomem);
- return HS_COMPILER_ERROR;
- }
- catch (...) {
- assert(!"Internal errror, unexpected exception");
- *db = nullptr;
- *comp_error = const_cast<hs_compile_error_t *>(&hs_einternal);
- return HS_COMPILER_ERROR;
- }
-}
-
+hs_error_t
+hs_compile_lit_multi_int(const char *const *expressions, const unsigned *flags,
+ const unsigned *ids, const hs_expr_ext *const *ext,
+ const size_t *lens, unsigned elements, unsigned mode,
+ const hs_platform_info_t *platform, hs_database_t **db,
+ hs_compile_error_t **comp_error, const Grey &g) {
+ // Check the args: note that it's OK for flags, ids or ext to be null.
+ if (!comp_error) {
+ if (db) {
+ *db = nullptr;
+ }
+ // nowhere to write the string, but we can still report an error code
+ return HS_COMPILER_ERROR;
+ }
+ if (!db) {
+ *comp_error = generateCompileError("Invalid parameter: db is NULL", -1);
+ return HS_COMPILER_ERROR;
+ }
+ if (!expressions) {
+ *db = nullptr;
+ *comp_error
+ = generateCompileError("Invalid parameter: expressions is NULL",
+ -1);
+ return HS_COMPILER_ERROR;
+ }
+ if (!lens) {
+ *db = nullptr;
+ *comp_error = generateCompileError("Invalid parameter: len is NULL", -1);
+ return HS_COMPILER_ERROR;
+ }
+ if (elements == 0) {
+ *db = nullptr;
+ *comp_error = generateCompileError("Invalid parameter: elements is zero", -1);
+ return HS_COMPILER_ERROR;
+ }
+
+#if defined(FAT_RUNTIME)
+ if (!check_ssse3()) {
+ *db = nullptr;
+ *comp_error = generateCompileError("Unsupported architecture", -1);
+ return HS_ARCH_ERROR;
+ }
+#endif
+
+ if (!checkMode(mode, comp_error)) {
+ *db = nullptr;
+ assert(*comp_error); // set by checkMode.
+ return HS_COMPILER_ERROR;
+ }
+
+ if (!checkPlatform(platform, comp_error)) {
+ *db = nullptr;
+ assert(*comp_error); // set by checkPlattform.
+ return HS_COMPILER_ERROR;
+ }
+
+ if (elements > g.limitPatternCount) {
+ *db = nullptr;
+ *comp_error = generateCompileError("Number of patterns too large", -1);
+ return HS_COMPILER_ERROR;
+ }
+
+ // This function is simply a wrapper around both the parser and compiler
+ bool isStreaming = mode & (HS_MODE_STREAM | HS_MODE_VECTORED);
+ bool isVectored = mode & HS_MODE_VECTORED;
+ unsigned somPrecision = getSomPrecision(mode);
+
+ target_t target_info = platform ? target_t(*platform)
+ : get_current_target();
+
+ try {
+ CompileContext cc(isStreaming, isVectored, target_info, g);
+ NG ng(cc, elements, somPrecision);
+
+ for (unsigned int i = 0; i < elements; i++) {
+ // Add this expression to the compiler
+ try {
+ addLitExpression(ng, i, expressions[i], flags ? flags[i] : 0,
+ ext ? ext[i] : nullptr, ids ? ids[i] : 0,
+ lens[i]);
+ } catch (CompileError &e) {
+ /* Caught a parse error;
+ * throw it upstream as a CompileError with a specific index */
+ e.setExpressionIndex(i);
+ throw; /* do not slice */
+ }
+ }
+
+ // Check sub-expression ids
+ ng.rm.pl.validateSubIDs(ids, expressions, flags, elements);
+ // Renumber and assign lkey to reports
+ ng.rm.logicalKeyRenumber();
+
+ unsigned length = 0;
+ struct hs_database *out = build(ng, &length, 1);
+
+ assert(out); //should have thrown exception on error
+ assert(length);
+
+ *db = out;
+ *comp_error = nullptr;
+
+ return HS_SUCCESS;
+ }
+ catch (const CompileError &e) {
+ // Compiler error occurred
+ *db = nullptr;
+ *comp_error = generateCompileError(e.reason,
+ e.hasIndex ? (int)e.index : -1);
+ return HS_COMPILER_ERROR;
+ }
+ catch (const std::bad_alloc &) {
+ *db = nullptr;
+ *comp_error = const_cast<hs_compile_error_t *>(&hs_enomem);
+ return HS_COMPILER_ERROR;
+ }
+ catch (...) {
+ assert(!"Internal errror, unexpected exception");
+ *db = nullptr;
+ *comp_error = const_cast<hs_compile_error_t *>(&hs_einternal);
+ return HS_COMPILER_ERROR;
+ }
+}
+
} // namespace ue2
extern "C" HS_PUBLIC_API
@@ -451,41 +451,41 @@ hs_error_t HS_CDECL hs_compile_ext_multi(const char * const *expressions,
platform, db, error, Grey());
}
-extern "C" HS_PUBLIC_API
-hs_error_t HS_CDECL hs_compile_lit(const char *expression, unsigned flags,
- const size_t len, unsigned mode,
- const hs_platform_info_t *platform,
- hs_database_t **db,
- hs_compile_error_t **error) {
- if (expression == nullptr) {
- *db = nullptr;
- *error = generateCompileError("Invalid parameter: expression is NULL",
- -1);
- return HS_COMPILER_ERROR;
- }
-
- unsigned id = 0; // single expressions get zero as an ID
- const hs_expr_ext * const *ext = nullptr; // unused for this call.
-
- return hs_compile_lit_multi_int(&expression, &flags, &id, ext, &len, 1,
- mode, platform, db, error, Grey());
-}
-
-extern "C" HS_PUBLIC_API
-hs_error_t HS_CDECL hs_compile_lit_multi(const char * const *expressions,
- const unsigned *flags,
- const unsigned *ids,
- const size_t *lens,
- unsigned elements, unsigned mode,
- const hs_platform_info_t *platform,
- hs_database_t **db,
- hs_compile_error_t **error) {
- const hs_expr_ext * const *ext = nullptr; // unused for this call.
- return hs_compile_lit_multi_int(expressions, flags, ids, ext, lens,
- elements, mode, platform, db, error,
- Grey());
-}
-
+extern "C" HS_PUBLIC_API
+hs_error_t HS_CDECL hs_compile_lit(const char *expression, unsigned flags,
+ const size_t len, unsigned mode,
+ const hs_platform_info_t *platform,
+ hs_database_t **db,
+ hs_compile_error_t **error) {
+ if (expression == nullptr) {
+ *db = nullptr;
+ *error = generateCompileError("Invalid parameter: expression is NULL",
+ -1);
+ return HS_COMPILER_ERROR;
+ }
+
+ unsigned id = 0; // single expressions get zero as an ID
+ const hs_expr_ext * const *ext = nullptr; // unused for this call.
+
+ return hs_compile_lit_multi_int(&expression, &flags, &id, ext, &len, 1,
+ mode, platform, db, error, Grey());
+}
+
+extern "C" HS_PUBLIC_API
+hs_error_t HS_CDECL hs_compile_lit_multi(const char * const *expressions,
+ const unsigned *flags,
+ const unsigned *ids,
+ const size_t *lens,
+ unsigned elements, unsigned mode,
+ const hs_platform_info_t *platform,
+ hs_database_t **db,
+ hs_compile_error_t **error) {
+ const hs_expr_ext * const *ext = nullptr; // unused for this call.
+ return hs_compile_lit_multi_int(expressions, flags, ids, ext, lens,
+ elements, mode, platform, db, error,
+ Grey());
+}
+
static
hs_error_t hs_expression_info_int(const char *expression, unsigned int flags,
const hs_expr_ext_t *ext, unsigned int mode,
diff --git a/contrib/libs/hyperscan/src/hs.h b/contrib/libs/hyperscan/src/hs.h
index fa2fbbe00b..2fe5d248b7 100644
--- a/contrib/libs/hyperscan/src/hs.h
+++ b/contrib/libs/hyperscan/src/hs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -39,12 +39,12 @@
* the individual component headers for documentation.
*/
-/* The current Hyperscan version information. */
-
-#define HS_MAJOR 5
-#define HS_MINOR 4
-#define HS_PATCH 0
-
+/* The current Hyperscan version information. */
+
+#define HS_MAJOR 5
+#define HS_MINOR 4
+#define HS_PATCH 0
+
#include "hs_compile.h"
#include "hs_runtime.h"
diff --git a/contrib/libs/hyperscan/src/hs_common.h b/contrib/libs/hyperscan/src/hs_common.h
index afc436da51..93dc1fe8a1 100644
--- a/contrib/libs/hyperscan/src/hs_common.h
+++ b/contrib/libs/hyperscan/src/hs_common.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2019, Intel Corporation
+ * Copyright (c) 2015-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -577,16 +577,16 @@ hs_error_t HS_CDECL hs_valid_platform(void);
*/
#define HS_INSUFFICIENT_SPACE (-12)
-/**
- * Unexpected internal error.
- *
- * This error indicates that there was unexpected matching behaviors. This
- * could be related to invalid usage of stream and scratch space or invalid memory
- * operations by users.
- *
- */
-#define HS_UNKNOWN_ERROR (-13)
-
+/**
+ * Unexpected internal error.
+ *
+ * This error indicates that there was unexpected matching behaviors. This
+ * could be related to invalid usage of stream and scratch space or invalid memory
+ * operations by users.
+ *
+ */
+#define HS_UNKNOWN_ERROR (-13)
+
/** @} */
#ifdef __cplusplus
diff --git a/contrib/libs/hyperscan/src/hs_compile.h b/contrib/libs/hyperscan/src/hs_compile.h
index 86ea5ee291..b318c29db1 100644
--- a/contrib/libs/hyperscan/src/hs_compile.h
+++ b/contrib/libs/hyperscan/src/hs_compile.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -323,10 +323,10 @@ typedef struct hs_expr_ext {
* - HS_FLAG_PREFILTER - Compile pattern in prefiltering mode.
* - HS_FLAG_SOM_LEFTMOST - Report the leftmost start of match offset
* when a match is found.
- * - HS_FLAG_COMBINATION - Parse the expression in logical combination
- * syntax.
- * - HS_FLAG_QUIET - Ignore match reporting for this expression. Used for
- * the sub-expressions in logical combinations.
+ * - HS_FLAG_COMBINATION - Parse the expression in logical combination
+ * syntax.
+ * - HS_FLAG_QUIET - Ignore match reporting for this expression. Used for
+ * the sub-expressions in logical combinations.
*
* @param mode
* Compiler mode flags that affect the database as a whole. One of @ref
@@ -396,10 +396,10 @@ hs_error_t HS_CDECL hs_compile(const char *expression, unsigned int flags,
* - HS_FLAG_PREFILTER - Compile pattern in prefiltering mode.
* - HS_FLAG_SOM_LEFTMOST - Report the leftmost start of match offset
* when a match is found.
- * - HS_FLAG_COMBINATION - Parse the expression in logical combination
- * syntax.
- * - HS_FLAG_QUIET - Ignore match reporting for this expression. Used for
- * the sub-expressions in logical combinations.
+ * - HS_FLAG_COMBINATION - Parse the expression in logical combination
+ * syntax.
+ * - HS_FLAG_QUIET - Ignore match reporting for this expression. Used for
+ * the sub-expressions in logical combinations.
*
* @param ids
* An array of integers specifying the ID number to be associated with the
@@ -480,10 +480,10 @@ hs_error_t HS_CDECL hs_compile_multi(const char *const *expressions,
* - HS_FLAG_PREFILTER - Compile pattern in prefiltering mode.
* - HS_FLAG_SOM_LEFTMOST - Report the leftmost start of match offset
* when a match is found.
- * - HS_FLAG_COMBINATION - Parse the expression in logical combination
- * syntax.
- * - HS_FLAG_QUIET - Ignore match reporting for this expression. Used for
- * the sub-expressions in logical combinations.
+ * - HS_FLAG_COMBINATION - Parse the expression in logical combination
+ * syntax.
+ * - HS_FLAG_QUIET - Ignore match reporting for this expression. Used for
+ * the sub-expressions in logical combinations.
*
* @param ids
* An array of integers specifying the ID number to be associated with the
@@ -540,163 +540,163 @@ hs_error_t HS_CDECL hs_compile_ext_multi(const char *const *expressions,
hs_database_t **db, hs_compile_error_t **error);
/**
- * The basic pure literal expression compiler.
- *
- * This is the function call with which a pure literal expression (not a
- * common regular expression) is compiled into a Hyperscan database which
- * can be passed to the runtime functions (such as @ref hs_scan(),
- * @ref hs_open_stream(), etc.)
- *
- * @param expression
- * The NULL-terminated expression to parse. Note that this string must
- * represent ONLY the pattern to be matched, with no delimiters or flags;
- * any global flags should be specified with the @p flags argument. For
- * example, the expression `/abc?def/i` should be compiled by providing
- * `abc?def` as the @p expression, and @ref HS_FLAG_CASELESS as the @a
- * flags. Meanwhile, the string content shall be fully parsed in a literal
- * sense without any regular grammars. For example, the @p expression
- * `abc?` simply means a char sequence of `a`, `b`, `c`, and `?`. The `?`
- * here doesn't mean 0 or 1 quantifier under regular semantics.
- *
- * @param flags
- * Flags which modify the behaviour of the expression. Multiple flags may
- * be used by ORing them together. Compared to @ref hs_compile(), fewer
- * valid values are provided:
- * - HS_FLAG_CASELESS - Matching will be performed case-insensitively.
- * - HS_FLAG_SINGLEMATCH - Only one match will be generated for the
- * expression per stream.
- * - HS_FLAG_SOM_LEFTMOST - Report the leftmost start of match offset
- * when a match is found.
- *
- * @param len
- * The length of the text content of the pure literal expression. As the
- * text content indicated by @p expression is treated as single character
- * one by one, the special terminating character `\0` should be allowed
- * to appear in expression, and not treated as a terminator for a string.
- * Thus, the end of a pure literal expression cannot be indicated by
- * identifying `\0`, but by counting to the expression length.
- *
- * @param mode
- * Compiler mode flags that affect the database as a whole. One of @ref
- * HS_MODE_STREAM or @ref HS_MODE_BLOCK or @ref HS_MODE_VECTORED must be
- * supplied, to select between the generation of a streaming, block or
- * vectored database. In addition, other flags (beginning with HS_MODE_)
- * may be supplied to enable specific features. See @ref HS_MODE_FLAG for
- * more details.
- *
- * @param platform
- * If not NULL, the platform structure is used to determine the target
- * platform for the database. If NULL, a database suitable for running
- * on the current host platform is produced.
- *
- * @param db
- * On success, a pointer to the generated database will be returned in
- * this parameter, or NULL on failure. The caller is responsible for
- * deallocating the buffer using the @ref hs_free_database() function.
- *
- * @param error
- * If the compile fails, a pointer to a @ref hs_compile_error_t will be
- * returned, providing details of the error condition. The caller is
- * responsible for deallocating the buffer using the @ref
- * hs_free_compile_error() function.
- *
- * @return
- * @ref HS_SUCCESS is returned on successful compilation; @ref
- * HS_COMPILER_ERROR on failure, with details provided in the error
- * parameter.
- */
-hs_error_t HS_CDECL hs_compile_lit(const char *expression, unsigned flags,
- const size_t len, unsigned mode,
- const hs_platform_info_t *platform,
- hs_database_t **db,
- hs_compile_error_t **error);
-/**
- * The multiple pure literal expression compiler.
- *
- * This is the function call with which a set of pure literal expressions is
- * compiled into a database which can be passed to the runtime functions (such
- * as @ref hs_scan(), @ref hs_open_stream(), etc.) Each expression can be
- * labelled with a unique integer which is passed into the match callback to
- * identify the pattern that has matched.
- *
- * @param expressions
- * The NULL-terminated expression to parse. Note that this string must
- * represent ONLY the pattern to be matched, with no delimiters or flags;
- * any global flags should be specified with the @p flags argument. For
- * example, the expression `/abc?def/i` should be compiled by providing
- * `abc?def` as the @p expression, and @ref HS_FLAG_CASELESS as the @a
- * flags. Meanwhile, the string content shall be fully parsed in a literal
- * sense without any regular grammars. For example, the @p expression
- * `abc?` simply means a char sequence of `a`, `b`, `c`, and `?`. The `?`
- * here doesn't mean 0 or 1 quantifier under regular semantics.
- *
- * @param flags
- * Array of flags which modify the behaviour of each expression. Multiple
- * flags may be used by ORing them together. Specifying the NULL pointer
- * in place of an array will set the flags value for all patterns to zero.
- * Compared to @ref hs_compile_multi(), fewer valid values are provided:
- * - HS_FLAG_CASELESS - Matching will be performed case-insensitively.
- * - HS_FLAG_SINGLEMATCH - Only one match will be generated for the
- * expression per stream.
- * - HS_FLAG_SOM_LEFTMOST - Report the leftmost start of match offset
- * when a match is found.
- *
- * @param ids
- * An array of integers specifying the ID number to be associated with the
- * corresponding pattern in the expressions array. Specifying the NULL
- * pointer in place of an array will set the ID value for all patterns to
- * zero.
- *
- * @param lens
- * Array of lengths of the text content of each pure literal expression.
- * As the text content indicated by @p expression is treated as single
- * character one by one, the special terminating character `\0` should be
- * allowed to appear in expression, and not treated as a terminator for a
- * string. Thus, the end of a pure literal expression cannot be indicated
- * by identifying `\0`, but by counting to the expression length.
- *
- * @param elements
- * The number of elements in the input arrays.
- *
- * @param mode
- * Compiler mode flags that affect the database as a whole. One of @ref
- * HS_MODE_STREAM or @ref HS_MODE_BLOCK or @ref HS_MODE_VECTORED must be
- * supplied, to select between the generation of a streaming, block or
- * vectored database. In addition, other flags (beginning with HS_MODE_)
- * may be supplied to enable specific features. See @ref HS_MODE_FLAG for
- * more details.
- *
- * @param platform
- * If not NULL, the platform structure is used to determine the target
- * platform for the database. If NULL, a database suitable for running
- * on the current host platform is produced.
- *
- * @param db
- * On success, a pointer to the generated database will be returned in
- * this parameter, or NULL on failure. The caller is responsible for
- * deallocating the buffer using the @ref hs_free_database() function.
- *
- * @param error
- * If the compile fails, a pointer to a @ref hs_compile_error_t will be
- * returned, providing details of the error condition. The caller is
- * responsible for deallocating the buffer using the @ref
- * hs_free_compile_error() function.
- *
- * @return
- * @ref HS_SUCCESS is returned on successful compilation; @ref
- * HS_COMPILER_ERROR on failure, with details provided in the error
- * parameter.
- */
-hs_error_t HS_CDECL hs_compile_lit_multi(const char * const *expressions,
- const unsigned *flags,
- const unsigned *ids,
- const size_t *lens,
- unsigned elements, unsigned mode,
- const hs_platform_info_t *platform,
- hs_database_t **db,
- hs_compile_error_t **error);
-
-/**
+ * The basic pure literal expression compiler.
+ *
+ * This is the function call with which a pure literal expression (not a
+ * common regular expression) is compiled into a Hyperscan database which
+ * can be passed to the runtime functions (such as @ref hs_scan(),
+ * @ref hs_open_stream(), etc.)
+ *
+ * @param expression
+ * The NULL-terminated expression to parse. Note that this string must
+ * represent ONLY the pattern to be matched, with no delimiters or flags;
+ * any global flags should be specified with the @p flags argument. For
+ * example, the expression `/abc?def/i` should be compiled by providing
+ * `abc?def` as the @p expression, and @ref HS_FLAG_CASELESS as the @a
+ * flags. Meanwhile, the string content shall be fully parsed in a literal
+ * sense without any regular grammars. For example, the @p expression
+ * `abc?` simply means a char sequence of `a`, `b`, `c`, and `?`. The `?`
+ * here doesn't mean 0 or 1 quantifier under regular semantics.
+ *
+ * @param flags
+ * Flags which modify the behaviour of the expression. Multiple flags may
+ * be used by ORing them together. Compared to @ref hs_compile(), fewer
+ * valid values are provided:
+ * - HS_FLAG_CASELESS - Matching will be performed case-insensitively.
+ * - HS_FLAG_SINGLEMATCH - Only one match will be generated for the
+ * expression per stream.
+ * - HS_FLAG_SOM_LEFTMOST - Report the leftmost start of match offset
+ * when a match is found.
+ *
+ * @param len
+ * The length of the text content of the pure literal expression. As the
+ * text content indicated by @p expression is treated as single character
+ * one by one, the special terminating character `\0` should be allowed
+ * to appear in expression, and not treated as a terminator for a string.
+ * Thus, the end of a pure literal expression cannot be indicated by
+ * identifying `\0`, but by counting to the expression length.
+ *
+ * @param mode
+ * Compiler mode flags that affect the database as a whole. One of @ref
+ * HS_MODE_STREAM or @ref HS_MODE_BLOCK or @ref HS_MODE_VECTORED must be
+ * supplied, to select between the generation of a streaming, block or
+ * vectored database. In addition, other flags (beginning with HS_MODE_)
+ * may be supplied to enable specific features. See @ref HS_MODE_FLAG for
+ * more details.
+ *
+ * @param platform
+ * If not NULL, the platform structure is used to determine the target
+ * platform for the database. If NULL, a database suitable for running
+ * on the current host platform is produced.
+ *
+ * @param db
+ * On success, a pointer to the generated database will be returned in
+ * this parameter, or NULL on failure. The caller is responsible for
+ * deallocating the buffer using the @ref hs_free_database() function.
+ *
+ * @param error
+ * If the compile fails, a pointer to a @ref hs_compile_error_t will be
+ * returned, providing details of the error condition. The caller is
+ * responsible for deallocating the buffer using the @ref
+ * hs_free_compile_error() function.
+ *
+ * @return
+ * @ref HS_SUCCESS is returned on successful compilation; @ref
+ * HS_COMPILER_ERROR on failure, with details provided in the error
+ * parameter.
+ */
+hs_error_t HS_CDECL hs_compile_lit(const char *expression, unsigned flags,
+ const size_t len, unsigned mode,
+ const hs_platform_info_t *platform,
+ hs_database_t **db,
+ hs_compile_error_t **error);
+/**
+ * The multiple pure literal expression compiler.
+ *
+ * This is the function call with which a set of pure literal expressions is
+ * compiled into a database which can be passed to the runtime functions (such
+ * as @ref hs_scan(), @ref hs_open_stream(), etc.) Each expression can be
+ * labelled with a unique integer which is passed into the match callback to
+ * identify the pattern that has matched.
+ *
+ * @param expressions
+ * The NULL-terminated expression to parse. Note that this string must
+ * represent ONLY the pattern to be matched, with no delimiters or flags;
+ * any global flags should be specified with the @p flags argument. For
+ * example, the expression `/abc?def/i` should be compiled by providing
+ * `abc?def` as the @p expression, and @ref HS_FLAG_CASELESS as the @a
+ * flags. Meanwhile, the string content shall be fully parsed in a literal
+ * sense without any regular grammars. For example, the @p expression
+ * `abc?` simply means a char sequence of `a`, `b`, `c`, and `?`. The `?`
+ * here doesn't mean 0 or 1 quantifier under regular semantics.
+ *
+ * @param flags
+ * Array of flags which modify the behaviour of each expression. Multiple
+ * flags may be used by ORing them together. Specifying the NULL pointer
+ * in place of an array will set the flags value for all patterns to zero.
+ * Compared to @ref hs_compile_multi(), fewer valid values are provided:
+ * - HS_FLAG_CASELESS - Matching will be performed case-insensitively.
+ * - HS_FLAG_SINGLEMATCH - Only one match will be generated for the
+ * expression per stream.
+ * - HS_FLAG_SOM_LEFTMOST - Report the leftmost start of match offset
+ * when a match is found.
+ *
+ * @param ids
+ * An array of integers specifying the ID number to be associated with the
+ * corresponding pattern in the expressions array. Specifying the NULL
+ * pointer in place of an array will set the ID value for all patterns to
+ * zero.
+ *
+ * @param lens
+ * Array of lengths of the text content of each pure literal expression.
+ * As the text content indicated by @p expression is treated as single
+ * character one by one, the special terminating character `\0` should be
+ * allowed to appear in expression, and not treated as a terminator for a
+ * string. Thus, the end of a pure literal expression cannot be indicated
+ * by identifying `\0`, but by counting to the expression length.
+ *
+ * @param elements
+ * The number of elements in the input arrays.
+ *
+ * @param mode
+ * Compiler mode flags that affect the database as a whole. One of @ref
+ * HS_MODE_STREAM or @ref HS_MODE_BLOCK or @ref HS_MODE_VECTORED must be
+ * supplied, to select between the generation of a streaming, block or
+ * vectored database. In addition, other flags (beginning with HS_MODE_)
+ * may be supplied to enable specific features. See @ref HS_MODE_FLAG for
+ * more details.
+ *
+ * @param platform
+ * If not NULL, the platform structure is used to determine the target
+ * platform for the database. If NULL, a database suitable for running
+ * on the current host platform is produced.
+ *
+ * @param db
+ * On success, a pointer to the generated database will be returned in
+ * this parameter, or NULL on failure. The caller is responsible for
+ * deallocating the buffer using the @ref hs_free_database() function.
+ *
+ * @param error
+ * If the compile fails, a pointer to a @ref hs_compile_error_t will be
+ * returned, providing details of the error condition. The caller is
+ * responsible for deallocating the buffer using the @ref
+ * hs_free_compile_error() function.
+ *
+ * @return
+ * @ref HS_SUCCESS is returned on successful compilation; @ref
+ * HS_COMPILER_ERROR on failure, with details provided in the error
+ * parameter.
+ */
+hs_error_t HS_CDECL hs_compile_lit_multi(const char * const *expressions,
+ const unsigned *flags,
+ const unsigned *ids,
+ const size_t *lens,
+ unsigned elements, unsigned mode,
+ const hs_platform_info_t *platform,
+ hs_database_t **db,
+ hs_compile_error_t **error);
+
+/**
* Free an error structure generated by @ref hs_compile(), @ref
* hs_compile_multi() or @ref hs_compile_ext_multi().
*
@@ -748,10 +748,10 @@ hs_error_t HS_CDECL hs_free_compile_error(hs_compile_error_t *error);
* - HS_FLAG_PREFILTER - Compile pattern in prefiltering mode.
* - HS_FLAG_SOM_LEFTMOST - Report the leftmost start of match offset
* when a match is found.
- * - HS_FLAG_COMBINATION - Parse the expression in logical combination
- * syntax.
- * - HS_FLAG_QUIET - Ignore match reporting for this expression. Used for
- * the sub-expressions in logical combinations.
+ * - HS_FLAG_COMBINATION - Parse the expression in logical combination
+ * syntax.
+ * - HS_FLAG_QUIET - Ignore match reporting for this expression. Used for
+ * the sub-expressions in logical combinations.
*
* @param info
* On success, a pointer to the pattern information will be returned in
@@ -814,10 +814,10 @@ hs_error_t HS_CDECL hs_expression_info(const char *expression,
* - HS_FLAG_PREFILTER - Compile pattern in prefiltering mode.
* - HS_FLAG_SOM_LEFTMOST - Report the leftmost start of match offset
* when a match is found.
- * - HS_FLAG_COMBINATION - Parse the expression in logical combination
- * syntax.
- * - HS_FLAG_QUIET - Ignore match reporting for this expression. Used for
- * the sub-expressions in logical combinations.
+ * - HS_FLAG_COMBINATION - Parse the expression in logical combination
+ * syntax.
+ * - HS_FLAG_QUIET - Ignore match reporting for this expression. Used for
+ * the sub-expressions in logical combinations.
*
* @param ext
* A pointer to a filled @ref hs_expr_ext_t structure that defines
@@ -983,33 +983,33 @@ hs_error_t HS_CDECL hs_populate_platform(hs_platform_info_t *platform);
* offset when a match is reported for this expression. (By default, no start
* of match is returned.)
*
- * For all the 3 modes, enabling this behaviour may reduce performance. And
- * particularly, it may increase stream state requirements in streaming mode.
+ * For all the 3 modes, enabling this behaviour may reduce performance. And
+ * particularly, it may increase stream state requirements in streaming mode.
*/
#define HS_FLAG_SOM_LEFTMOST 256
-/**
- * Compile flag: Logical combination.
- *
- * This flag instructs Hyperscan to parse this expression as logical
- * combination syntax.
- * Logical constraints consist of operands, operators and parentheses.
- * The operands are expression indices, and operators can be
- * '!'(NOT), '&'(AND) or '|'(OR).
- * For example:
- * (101&102&103)|(104&!105)
- * ((301|302)&303)&(304|305)
- */
-#define HS_FLAG_COMBINATION 512
-
-/**
- * Compile flag: Don't do any match reporting.
- *
- * This flag instructs Hyperscan to ignore match reporting for this expression.
- * It is designed to be used on the sub-expressions in logical combinations.
- */
-#define HS_FLAG_QUIET 1024
-
+/**
+ * Compile flag: Logical combination.
+ *
+ * This flag instructs Hyperscan to parse this expression as logical
+ * combination syntax.
+ * Logical constraints consist of operands, operators and parentheses.
+ * The operands are expression indices, and operators can be
+ * '!'(NOT), '&'(AND) or '|'(OR).
+ * For example:
+ * (101&102&103)|(104&!105)
+ * ((301|302)&303)&(304|305)
+ */
+#define HS_FLAG_COMBINATION 512
+
+/**
+ * Compile flag: Don't do any match reporting.
+ *
+ * This flag instructs Hyperscan to ignore match reporting for this expression.
+ * It is designed to be used on the sub-expressions in logical combinations.
+ */
+#define HS_FLAG_QUIET 1024
+
/** @} */
/**
@@ -1034,15 +1034,15 @@ hs_error_t HS_CDECL hs_populate_platform(hs_platform_info_t *platform);
*/
#define HS_CPU_FEATURES_AVX512 (1ULL << 3)
-/**
- * CPU features flag - Intel(R) Advanced Vector Extensions 512
- * Vector Byte Manipulation Instructions (Intel(R) AVX512VBMI)
- *
- * Setting this flag indicates that the target platform supports AVX512VBMI
- * instructions. Using AVX512VBMI implies the use of AVX512.
- */
-#define HS_CPU_FEATURES_AVX512VBMI (1ULL << 4)
-
+/**
+ * CPU features flag - Intel(R) Advanced Vector Extensions 512
+ * Vector Byte Manipulation Instructions (Intel(R) AVX512VBMI)
+ *
+ * Setting this flag indicates that the target platform supports AVX512VBMI
+ * instructions. Using AVX512VBMI implies the use of AVX512.
+ */
+#define HS_CPU_FEATURES_AVX512VBMI (1ULL << 4)
+
/** @} */
/**
@@ -1123,22 +1123,22 @@ hs_error_t HS_CDECL hs_populate_platform(hs_platform_info_t *platform);
*/
#define HS_TUNE_FAMILY_GLM 8
-/**
- * Tuning Parameter - Intel(R) microarchitecture code name Icelake
- *
- * This indicates that the compiled database should be tuned for the
- * Icelake microarchitecture.
- */
-#define HS_TUNE_FAMILY_ICL 9
-
-/**
- * Tuning Parameter - Intel(R) microarchitecture code name Icelake Server
- *
- * This indicates that the compiled database should be tuned for the
- * Icelake Server microarchitecture.
- */
-#define HS_TUNE_FAMILY_ICX 10
-
+/**
+ * Tuning Parameter - Intel(R) microarchitecture code name Icelake
+ *
+ * This indicates that the compiled database should be tuned for the
+ * Icelake microarchitecture.
+ */
+#define HS_TUNE_FAMILY_ICL 9
+
+/**
+ * Tuning Parameter - Intel(R) microarchitecture code name Icelake Server
+ *
+ * This indicates that the compiled database should be tuned for the
+ * Icelake Server microarchitecture.
+ */
+#define HS_TUNE_FAMILY_ICX 10
+
/** @} */
/**
diff --git a/contrib/libs/hyperscan/src/hs_internal.h b/contrib/libs/hyperscan/src/hs_internal.h
index 7e23220011..adf07b22cf 100644
--- a/contrib/libs/hyperscan/src/hs_internal.h
+++ b/contrib/libs/hyperscan/src/hs_internal.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, Intel Corporation
+ * Copyright (c) 2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -52,17 +52,17 @@ hs_error_t hs_compile_multi_int(const char *const *expressions,
hs_database_t **db,
hs_compile_error_t **comp_error, const Grey &g);
-/** \brief Internal use only: takes a Grey argument so that we can use it in
- * tools. */
-hs_error_t hs_compile_lit_multi_int(const char *const *expressions,
- const unsigned *flags, const unsigned *ids,
- const hs_expr_ext *const *ext,
- const size_t *lens, unsigned elements,
- unsigned mode,
- const hs_platform_info_t *platform,
- hs_database_t **db,
- hs_compile_error_t **comp_error,
- const Grey &g);
+/** \brief Internal use only: takes a Grey argument so that we can use it in
+ * tools. */
+hs_error_t hs_compile_lit_multi_int(const char *const *expressions,
+ const unsigned *flags, const unsigned *ids,
+ const hs_expr_ext *const *ext,
+ const size_t *lens, unsigned elements,
+ unsigned mode,
+ const hs_platform_info_t *platform,
+ hs_database_t **db,
+ hs_compile_error_t **comp_error,
+ const Grey &g);
} // namespace ue2
extern "C"
diff --git a/contrib/libs/hyperscan/src/hs_runtime.h b/contrib/libs/hyperscan/src/hs_runtime.h
index b443812dba..6d34b6c484 100644
--- a/contrib/libs/hyperscan/src/hs_runtime.h
+++ b/contrib/libs/hyperscan/src/hs_runtime.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -122,11 +122,11 @@ typedef struct hs_scratch hs_scratch_t;
* subsequent calls to @ref hs_scan_stream() for that stream will
* immediately return with @ref HS_SCAN_TERMINATED.
*/
-typedef int (HS_CDECL *match_event_handler)(unsigned int id,
- unsigned long long from,
- unsigned long long to,
- unsigned int flags,
- void *context);
+typedef int (HS_CDECL *match_event_handler)(unsigned int id,
+ unsigned long long from,
+ unsigned long long to,
+ unsigned int flags,
+ void *context);
/**
* Open and initialise a stream.
diff --git a/contrib/libs/hyperscan/src/hwlm/hwlm_literal.cpp b/contrib/libs/hyperscan/src/hwlm/hwlm_literal.cpp
index 53c5808f69..692f7c6c0e 100644
--- a/contrib/libs/hyperscan/src/hwlm/hwlm_literal.cpp
+++ b/contrib/libs/hyperscan/src/hwlm/hwlm_literal.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2019, Intel Corporation
+ * Copyright (c) 2015-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/contrib/libs/hyperscan/src/hwlm/hwlm_literal.h b/contrib/libs/hyperscan/src/hwlm/hwlm_literal.h
index f8f7588b48..598de81471 100644
--- a/contrib/libs/hyperscan/src/hwlm/hwlm_literal.h
+++ b/contrib/libs/hyperscan/src/hwlm/hwlm_literal.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2019, Intel Corporation
+ * Copyright (c) 2015-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -121,9 +121,9 @@ struct hwlmLiteral {
u32 id_in, hwlm_group_t groups_in,
const std::vector<u8> &msk_in, const std::vector<u8> &cmp_in);
- /** \brief Simple constructor: no group information, no msk/cmp.
- *
- * This constructor is only used in internal unit test. */
+ /** \brief Simple constructor: no group information, no msk/cmp.
+ *
+ * This constructor is only used in internal unit test. */
hwlmLiteral(const std::string &s_in, bool nocase_in, u32 id_in)
: hwlmLiteral(s_in, nocase_in, false, id_in, HWLM_ALL_GROUPS, {}, {}) {}
};
diff --git a/contrib/libs/hyperscan/src/hwlm/noodle_engine_avx2.c b/contrib/libs/hyperscan/src/hwlm/noodle_engine_avx2.c
index ab9150f46e..5edc646af1 100644
--- a/contrib/libs/hyperscan/src/hwlm/noodle_engine_avx2.c
+++ b/contrib/libs/hyperscan/src/hwlm/noodle_engine_avx2.c
@@ -210,7 +210,7 @@ hwlm_error_t scanDoubleFast(const struct noodTable *n, const u8 *buf,
const u8 *d = buf + start, *e = buf + end;
DEBUG_PRINTF("start %zu end %zu \n", start, end);
assert(d < e);
- u32 lastz0 = 0;
+ u32 lastz0 = 0;
for (; d < e; d += 32) {
m256 v = noCase ? and256(load256(d), caseMask) : load256(d);
diff --git a/contrib/libs/hyperscan/src/nfa/accel_dfa_build_strat.cpp b/contrib/libs/hyperscan/src/nfa/accel_dfa_build_strat.cpp
index 842665f1cc..ae71e141a2 100644
--- a/contrib/libs/hyperscan/src/nfa/accel_dfa_build_strat.cpp
+++ b/contrib/libs/hyperscan/src/nfa/accel_dfa_build_strat.cpp
@@ -214,7 +214,7 @@ static
bool double_byte_ok(const AccelScheme &info) {
return !info.double_byte.empty() &&
info.double_cr.count() < info.double_byte.size() &&
- info.double_cr.count() <= 2;
+ info.double_cr.count() <= 2;
}
static
diff --git a/contrib/libs/hyperscan/src/nfa/accel_dfa_build_strat.h b/contrib/libs/hyperscan/src/nfa/accel_dfa_build_strat.h
index cb47d38cc5..53a6f35b3d 100644
--- a/contrib/libs/hyperscan/src/nfa/accel_dfa_build_strat.h
+++ b/contrib/libs/hyperscan/src/nfa/accel_dfa_build_strat.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -40,11 +40,11 @@ namespace ue2 {
class ReportManager;
struct Grey;
-enum DfaType {
- McClellan,
- Sheng,
- Gough
-};
+enum DfaType {
+ McClellan,
+ Sheng,
+ Gough
+};
class accel_dfa_build_strat : public dfa_build_strat {
public:
@@ -58,8 +58,8 @@ public:
virtual void buildAccel(dstate_id_t this_idx, const AccelScheme &info,
void *accel_out);
virtual std::map<dstate_id_t, AccelScheme> getAccelInfo(const Grey &grey);
- virtual DfaType getType() const = 0;
-
+ virtual DfaType getType() const = 0;
+
private:
bool only_accel_init;
};
diff --git a/contrib/libs/hyperscan/src/nfa/goughcompile.cpp b/contrib/libs/hyperscan/src/nfa/goughcompile.cpp
index cb7f0eafc9..d41c6f4235 100644
--- a/contrib/libs/hyperscan/src/nfa/goughcompile.cpp
+++ b/contrib/libs/hyperscan/src/nfa/goughcompile.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -91,7 +91,7 @@ public:
void buildAccel(dstate_id_t this_idx, const AccelScheme &info,
void *accel_out) override;
u32 max_allowed_offset_accel() const override { return 0; }
- DfaType getType() const override { return Gough; }
+ DfaType getType() const override { return Gough; }
raw_som_dfa &rdfa;
const GoughGraph &gg;
@@ -375,7 +375,7 @@ unique_ptr<GoughGraph> makeCFG(const raw_som_dfa &raw) {
}
u16 top_sym = raw.alpha_remap[TOP];
- DEBUG_PRINTF("top: %hu, kind %s\n", top_sym, to_string(raw.kind).c_str());
+ DEBUG_PRINTF("top: %hu, kind %s\n", top_sym, to_string(raw.kind).c_str());
/* create edges, JOIN variables (on edge targets) */
map<dstate_id_t, GoughEdge> seen;
diff --git a/contrib/libs/hyperscan/src/nfa/limex_compile.cpp b/contrib/libs/hyperscan/src/nfa/limex_compile.cpp
index bad7434d79..9233ae515e 100644
--- a/contrib/libs/hyperscan/src/nfa/limex_compile.cpp
+++ b/contrib/libs/hyperscan/src/nfa/limex_compile.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -85,18 +85,18 @@ namespace ue2 {
*/
static constexpr u32 NO_STATE = ~0;
-/* Maximum number of states taken as a small NFA */
-static constexpr u32 MAX_SMALL_NFA_STATES = 64;
-
-/* Maximum bounded repeat upper bound to consider as a fast NFA */
-static constexpr u64a MAX_REPEAT_SIZE = 200;
-
-/* Maximum bounded repeat char reach size to consider as a fast NFA */
-static constexpr u32 MAX_REPEAT_CHAR_REACH = 26;
-
-/* Minimum bounded repeat trigger distance to consider as a fast NFA */
-static constexpr u8 MIN_REPEAT_TRIGGER_DISTANCE = 6;
-
+/* Maximum number of states taken as a small NFA */
+static constexpr u32 MAX_SMALL_NFA_STATES = 64;
+
+/* Maximum bounded repeat upper bound to consider as a fast NFA */
+static constexpr u64a MAX_REPEAT_SIZE = 200;
+
+/* Maximum bounded repeat char reach size to consider as a fast NFA */
+static constexpr u32 MAX_REPEAT_CHAR_REACH = 26;
+
+/* Minimum bounded repeat trigger distance to consider as a fast NFA */
+static constexpr u8 MIN_REPEAT_TRIGGER_DISTANCE = 6;
+
namespace {
struct precalcAccel {
@@ -992,7 +992,7 @@ u32 addSquashMask(const build_info &args, const NFAVertex &v,
// see if we've already seen it, otherwise add a new one.
auto it = find(squash.begin(), squash.end(), sit->second);
if (it != squash.end()) {
- return verify_u32(std::distance(squash.begin(), it));
+ return verify_u32(std::distance(squash.begin(), it));
}
u32 idx = verify_u32(squash.size());
squash.push_back(sit->second);
@@ -1019,7 +1019,7 @@ u32 addReports(const flat_set<ReportID> &r, vector<ReportID> &reports,
auto it = search(begin(reports), end(reports), begin(my_reports),
end(my_reports));
if (it != end(reports)) {
- u32 offset = verify_u32(std::distance(begin(reports), it));
+ u32 offset = verify_u32(std::distance(begin(reports), it));
DEBUG_PRINTF("reusing found report list at %u\n", offset);
return offset;
}
@@ -1922,8 +1922,8 @@ struct Factory {
}
static
- void writeExceptions(const build_info &args,
- const map<ExceptionProto, vector<u32>> &exceptionMap,
+ void writeExceptions(const build_info &args,
+ const map<ExceptionProto, vector<u32>> &exceptionMap,
const vector<u32> &repeatOffsets, implNFA_t *limex,
const u32 exceptionsOffset,
const u32 reportListOffset) {
@@ -1975,59 +1975,59 @@ struct Factory {
limex->exceptionOffset = exceptionsOffset;
limex->exceptionCount = ecount;
-
- if (args.num_states > 64 && args.cc.target_info.has_avx512vbmi()) {
- const u8 *exceptionMask = (const u8 *)(&limex->exceptionMask);
- u8 *shufMask = (u8 *)&limex->exceptionShufMask;
- u8 *bitMask = (u8 *)&limex->exceptionBitMask;
- u8 *andMask = (u8 *)&limex->exceptionAndMask;
-
- u32 tot_cnt = 0;
- u32 pos = 0;
- bool valid = true;
- size_t tot = sizeof(limex->exceptionMask);
- size_t base = 0;
-
- // We normally have up to 64 exceptions to handle,
- // but treat 384 state Limex differently to simplify operations
- size_t limit = 64;
- if (args.num_states > 256 && args.num_states <= 384) {
- limit = 48;
- }
-
- for (size_t i = 0; i < tot; i++) {
- if (!exceptionMask[i]) {
- continue;
- }
- u32 bit_cnt = popcount32(exceptionMask[i]);
-
- tot_cnt += bit_cnt;
- if (tot_cnt > limit) {
- valid = false;
- break;
- }
-
- u32 emsk = exceptionMask[i];
- while (emsk) {
- u32 t = findAndClearLSB_32(&emsk);
- bitMask[pos] = 1U << t;
- andMask[pos] = 1U << t;
- shufMask[pos++] = i + base;
-
- if (pos == 32 &&
- (args.num_states > 128 && args.num_states <= 256)) {
- base += 32;
- }
- }
- }
- // Avoid matching unused bytes
- for (u32 i = pos; i < 64; i++) {
- bitMask[i] = 0xff;
- }
- if (valid) {
- setLimexFlag(limex, LIMEX_FLAG_EXTRACT_EXP);
- }
- }
+
+ if (args.num_states > 64 && args.cc.target_info.has_avx512vbmi()) {
+ const u8 *exceptionMask = (const u8 *)(&limex->exceptionMask);
+ u8 *shufMask = (u8 *)&limex->exceptionShufMask;
+ u8 *bitMask = (u8 *)&limex->exceptionBitMask;
+ u8 *andMask = (u8 *)&limex->exceptionAndMask;
+
+ u32 tot_cnt = 0;
+ u32 pos = 0;
+ bool valid = true;
+ size_t tot = sizeof(limex->exceptionMask);
+ size_t base = 0;
+
+ // We normally have up to 64 exceptions to handle,
+ // but treat 384 state Limex differently to simplify operations
+ size_t limit = 64;
+ if (args.num_states > 256 && args.num_states <= 384) {
+ limit = 48;
+ }
+
+ for (size_t i = 0; i < tot; i++) {
+ if (!exceptionMask[i]) {
+ continue;
+ }
+ u32 bit_cnt = popcount32(exceptionMask[i]);
+
+ tot_cnt += bit_cnt;
+ if (tot_cnt > limit) {
+ valid = false;
+ break;
+ }
+
+ u32 emsk = exceptionMask[i];
+ while (emsk) {
+ u32 t = findAndClearLSB_32(&emsk);
+ bitMask[pos] = 1U << t;
+ andMask[pos] = 1U << t;
+ shufMask[pos++] = i + base;
+
+ if (pos == 32 &&
+ (args.num_states > 128 && args.num_states <= 256)) {
+ base += 32;
+ }
+ }
+ }
+ // Avoid matching unused bytes
+ for (u32 i = pos; i < 64; i++) {
+ bitMask[i] = 0xff;
+ }
+ if (valid) {
+ setLimexFlag(limex, LIMEX_FLAG_EXTRACT_EXP);
+ }
+ }
}
static
@@ -2353,7 +2353,7 @@ struct Factory {
writeRepeats(repeats, repeatOffsets, limex, repeatOffsetsOffset,
repeatsOffset);
- writeExceptions(args, exceptionMap, repeatOffsets, limex, exceptionsOffset,
+ writeExceptions(args, exceptionMap, repeatOffsets, limex, exceptionsOffset,
reportListOffset);
writeLimexMasks(args, limex);
@@ -2489,68 +2489,68 @@ bool isSane(const NGHolder &h, const map<u32, set<NFAVertex>> &tops,
#endif // NDEBUG
static
-bool isFast(const build_info &args) {
- const NGHolder &h = args.h;
- const u32 num_states = args.num_states;
-
- if (num_states > MAX_SMALL_NFA_STATES) {
- return false;
- }
-
- unordered_map<NFAVertex, bool> pos_trigger;
- for (u32 i = 0; i < args.repeats.size(); i++) {
- const BoundedRepeatData &br = args.repeats[i];
- assert(!contains(pos_trigger, br.pos_trigger));
- pos_trigger[br.pos_trigger] = br.repeatMax <= MAX_REPEAT_SIZE;
- }
-
- // Small NFA without bounded repeat should be fast.
- if (pos_trigger.empty()) {
- return true;
- }
-
- vector<NFAVertex> cur;
- unordered_set<NFAVertex> visited;
- for (const auto &m : args.tops) {
- for (NFAVertex v : m.second) {
- cur.push_back(v);
- visited.insert(v);
- }
- }
-
- u8 pos_dist = 0;
- while (!cur.empty()) {
- vector<NFAVertex> next;
- for (const auto &v : cur) {
- if (contains(pos_trigger, v)) {
- const CharReach &cr = h[v].char_reach;
- if (!pos_trigger[v] && cr.count() > MAX_REPEAT_CHAR_REACH) {
- return false;
- }
- }
- for (const auto &w : adjacent_vertices_range(v, h)) {
- if (w == v) {
- continue;
- }
- u32 j = args.state_ids.at(w);
- if (j == NO_STATE) {
- continue;
- }
- if (!contains(visited, w)) {
- next.push_back(w);
- visited.insert(w);
- }
- }
- }
- if (++pos_dist >= MIN_REPEAT_TRIGGER_DISTANCE) {
- break;
- }
- swap(cur, next);
- }
- return true;
-}
-
-static
+bool isFast(const build_info &args) {
+ const NGHolder &h = args.h;
+ const u32 num_states = args.num_states;
+
+ if (num_states > MAX_SMALL_NFA_STATES) {
+ return false;
+ }
+
+ unordered_map<NFAVertex, bool> pos_trigger;
+ for (u32 i = 0; i < args.repeats.size(); i++) {
+ const BoundedRepeatData &br = args.repeats[i];
+ assert(!contains(pos_trigger, br.pos_trigger));
+ pos_trigger[br.pos_trigger] = br.repeatMax <= MAX_REPEAT_SIZE;
+ }
+
+ // Small NFA without bounded repeat should be fast.
+ if (pos_trigger.empty()) {
+ return true;
+ }
+
+ vector<NFAVertex> cur;
+ unordered_set<NFAVertex> visited;
+ for (const auto &m : args.tops) {
+ for (NFAVertex v : m.second) {
+ cur.push_back(v);
+ visited.insert(v);
+ }
+ }
+
+ u8 pos_dist = 0;
+ while (!cur.empty()) {
+ vector<NFAVertex> next;
+ for (const auto &v : cur) {
+ if (contains(pos_trigger, v)) {
+ const CharReach &cr = h[v].char_reach;
+ if (!pos_trigger[v] && cr.count() > MAX_REPEAT_CHAR_REACH) {
+ return false;
+ }
+ }
+ for (const auto &w : adjacent_vertices_range(v, h)) {
+ if (w == v) {
+ continue;
+ }
+ u32 j = args.state_ids.at(w);
+ if (j == NO_STATE) {
+ continue;
+ }
+ if (!contains(visited, w)) {
+ next.push_back(w);
+ visited.insert(w);
+ }
+ }
+ }
+ if (++pos_dist >= MIN_REPEAT_TRIGGER_DISTANCE) {
+ break;
+ }
+ swap(cur, next);
+ }
+ return true;
+}
+
+static
u32 max_state(const unordered_map<NFAVertex, u32> &state_ids) {
u32 rv = 0;
for (const auto &m : state_ids) {
@@ -2570,7 +2570,7 @@ bytecode_ptr<NFA> generate(NGHolder &h,
const unordered_map<NFAVertex, NFAStateSet> &squashMap,
const map<u32, set<NFAVertex>> &tops,
const set<NFAVertex> &zombies, bool do_accel,
- bool stateCompression, bool &fast, u32 hint,
+ bool stateCompression, bool &fast, u32 hint,
const CompileContext &cc) {
const u32 num_states = max_state(states) + 1;
DEBUG_PRINTF("total states: %u\n", num_states);
@@ -2625,7 +2625,7 @@ bytecode_ptr<NFA> generate(NGHolder &h,
if (nfa) {
DEBUG_PRINTF("successful build with NFA engine: %s\n",
nfa_type_name(limex_model));
- fast = isFast(arg);
+ fast = isFast(arg);
return nfa;
}
}
diff --git a/contrib/libs/hyperscan/src/nfa/limex_compile.h b/contrib/libs/hyperscan/src/nfa/limex_compile.h
index 2562727d68..4afdcdb3e4 100644
--- a/contrib/libs/hyperscan/src/nfa/limex_compile.h
+++ b/contrib/libs/hyperscan/src/nfa/limex_compile.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -78,7 +78,7 @@ bytecode_ptr<NFA> generate(NGHolder &g,
const std::set<NFAVertex> &zombies,
bool do_accel,
bool stateCompression,
- bool &fast,
+ bool &fast,
u32 hint,
const CompileContext &cc);
diff --git a/contrib/libs/hyperscan/src/nfa/limex_exceptional.h b/contrib/libs/hyperscan/src/nfa/limex_exceptional.h
index 65bc9d97cd..6c7335f1b9 100644
--- a/contrib/libs/hyperscan/src/nfa/limex_exceptional.h
+++ b/contrib/libs/hyperscan/src/nfa/limex_exceptional.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -47,8 +47,8 @@
#define AND_STATE JOIN(and_, STATE_T)
#define EQ_STATE(a, b) (!JOIN(noteq_, STATE_T)((a), (b)))
#define OR_STATE JOIN(or_, STATE_T)
-#define EXPAND_STATE JOIN(expand_, STATE_T)
-#define SHUFFLE_BYTE_STATE JOIN(shuffle_byte_, STATE_T)
+#define EXPAND_STATE JOIN(expand_, STATE_T)
+#define SHUFFLE_BYTE_STATE JOIN(shuffle_byte_, STATE_T)
#define TESTBIT_STATE JOIN(testbit_, STATE_T)
#define EXCEPTION_T JOIN(struct NFAException, SIZE)
#define CONTEXT_T JOIN(NFAContext, SIZE)
@@ -210,7 +210,7 @@ int RUN_EXCEPTION_FN(const EXCEPTION_T *e, STATE_ARG,
/** \brief Process all of the exceptions associated with the states in the \a
* estate. */
static really_inline
-int PE_FN(STATE_ARG, ESTATE_ARG, UNUSED u32 diffmask, STATE_T *succ,
+int PE_FN(STATE_ARG, ESTATE_ARG, UNUSED u32 diffmask, STATE_T *succ,
const struct IMPL_NFA_T *limex, const EXCEPTION_T *exceptions,
u64a offset, struct CONTEXT_T *ctx, char in_rev, char flags) {
assert(diffmask > 0); // guaranteed by caller macro
@@ -235,72 +235,72 @@ int PE_FN(STATE_ARG, ESTATE_ARG, UNUSED u32 diffmask, STATE_T *succ,
ctx->local_succ = ZERO_STATE;
#endif
- struct proto_cache new_cache = {0, NULL};
- enum CacheResult cacheable = CACHE_RESULT;
-
-#if defined(HAVE_AVX512VBMI) && SIZE > 64
- if (likely(limex->flags & LIMEX_FLAG_EXTRACT_EXP)) {
- m512 emask = EXPAND_STATE(*STATE_ARG_P);
- emask = SHUFFLE_BYTE_STATE(load_m512(&limex->exceptionShufMask), emask);
- emask = and512(emask, load_m512(&limex->exceptionAndMask));
- u64a word = eq512mask(emask, load_m512(&limex->exceptionBitMask));
-
- do {
- u32 bit = FIND_AND_CLEAR_FN(&word);
- const EXCEPTION_T *e = &exceptions[bit];
-
- if (!RUN_EXCEPTION_FN(e, STATE_ARG_NAME, succ,
-#ifndef BIG_MODEL
- &local_succ,
-#endif
- limex, offset, ctx, &new_cache, &cacheable,
- in_rev, flags)) {
- return PE_RV_HALT;
- }
- } while (word);
- } else {
- // A copy of the estate as an array of GPR-sized chunks.
- CHUNK_T chunks[sizeof(STATE_T) / sizeof(CHUNK_T)];
- CHUNK_T emask_chunks[sizeof(STATE_T) / sizeof(CHUNK_T)];
-#ifdef ESTATE_ON_STACK
- memcpy(chunks, &estate, sizeof(STATE_T));
-#else
- memcpy(chunks, estatep, sizeof(STATE_T));
-#endif
- memcpy(emask_chunks, &limex->exceptionMask, sizeof(STATE_T));
-
- u32 base_index[sizeof(STATE_T) / sizeof(CHUNK_T)];
- base_index[0] = 0;
- for (s32 i = 0; i < (s32)ARRAY_LENGTH(base_index) - 1; i++) {
- base_index[i + 1] = base_index[i] + POPCOUNT_FN(emask_chunks[i]);
- }
-
- do {
- u32 t = findAndClearLSB_32(&diffmask);
-#ifdef ARCH_64_BIT
- t >>= 1; // Due to diffmask64, which leaves holes in the bitmask.
-#endif
- assert(t < ARRAY_LENGTH(chunks));
- CHUNK_T word = chunks[t];
- assert(word != 0);
- do {
- u32 bit = FIND_AND_CLEAR_FN(&word);
- u32 local_index = RANK_IN_MASK_FN(emask_chunks[t], bit);
- u32 idx = local_index + base_index[t];
- const EXCEPTION_T *e = &exceptions[idx];
-
- if (!RUN_EXCEPTION_FN(e, STATE_ARG_NAME, succ,
-#ifndef BIG_MODEL
- &local_succ,
-#endif
- limex, offset, ctx, &new_cache, &cacheable,
- in_rev, flags)) {
- return PE_RV_HALT;
- }
- } while (word);
- } while (diffmask);
- }
-#else
+ struct proto_cache new_cache = {0, NULL};
+ enum CacheResult cacheable = CACHE_RESULT;
+
+#if defined(HAVE_AVX512VBMI) && SIZE > 64
+ if (likely(limex->flags & LIMEX_FLAG_EXTRACT_EXP)) {
+ m512 emask = EXPAND_STATE(*STATE_ARG_P);
+ emask = SHUFFLE_BYTE_STATE(load_m512(&limex->exceptionShufMask), emask);
+ emask = and512(emask, load_m512(&limex->exceptionAndMask));
+ u64a word = eq512mask(emask, load_m512(&limex->exceptionBitMask));
+
+ do {
+ u32 bit = FIND_AND_CLEAR_FN(&word);
+ const EXCEPTION_T *e = &exceptions[bit];
+
+ if (!RUN_EXCEPTION_FN(e, STATE_ARG_NAME, succ,
+#ifndef BIG_MODEL
+ &local_succ,
+#endif
+ limex, offset, ctx, &new_cache, &cacheable,
+ in_rev, flags)) {
+ return PE_RV_HALT;
+ }
+ } while (word);
+ } else {
+ // A copy of the estate as an array of GPR-sized chunks.
+ CHUNK_T chunks[sizeof(STATE_T) / sizeof(CHUNK_T)];
+ CHUNK_T emask_chunks[sizeof(STATE_T) / sizeof(CHUNK_T)];
+#ifdef ESTATE_ON_STACK
+ memcpy(chunks, &estate, sizeof(STATE_T));
+#else
+ memcpy(chunks, estatep, sizeof(STATE_T));
+#endif
+ memcpy(emask_chunks, &limex->exceptionMask, sizeof(STATE_T));
+
+ u32 base_index[sizeof(STATE_T) / sizeof(CHUNK_T)];
+ base_index[0] = 0;
+ for (s32 i = 0; i < (s32)ARRAY_LENGTH(base_index) - 1; i++) {
+ base_index[i + 1] = base_index[i] + POPCOUNT_FN(emask_chunks[i]);
+ }
+
+ do {
+ u32 t = findAndClearLSB_32(&diffmask);
+#ifdef ARCH_64_BIT
+ t >>= 1; // Due to diffmask64, which leaves holes in the bitmask.
+#endif
+ assert(t < ARRAY_LENGTH(chunks));
+ CHUNK_T word = chunks[t];
+ assert(word != 0);
+ do {
+ u32 bit = FIND_AND_CLEAR_FN(&word);
+ u32 local_index = RANK_IN_MASK_FN(emask_chunks[t], bit);
+ u32 idx = local_index + base_index[t];
+ const EXCEPTION_T *e = &exceptions[idx];
+
+ if (!RUN_EXCEPTION_FN(e, STATE_ARG_NAME, succ,
+#ifndef BIG_MODEL
+ &local_succ,
+#endif
+ limex, offset, ctx, &new_cache, &cacheable,
+ in_rev, flags)) {
+ return PE_RV_HALT;
+ }
+ } while (word);
+ } while (diffmask);
+ }
+#else
// A copy of the estate as an array of GPR-sized chunks.
CHUNK_T chunks[sizeof(STATE_T) / sizeof(CHUNK_T)];
CHUNK_T emask_chunks[sizeof(STATE_T) / sizeof(CHUNK_T)];
@@ -341,7 +341,7 @@ int PE_FN(STATE_ARG, ESTATE_ARG, UNUSED u32 diffmask, STATE_T *succ,
}
} while (word);
} while (diffmask);
-#endif
+#endif
#ifndef BIG_MODEL
*succ = OR_STATE(*succ, local_succ);
@@ -373,8 +373,8 @@ int PE_FN(STATE_ARG, ESTATE_ARG, UNUSED u32 diffmask, STATE_T *succ,
#undef AND_STATE
#undef EQ_STATE
#undef OR_STATE
-#undef EXPAND_STATE
-#undef SHUFFLE_BYTE_STATE
+#undef EXPAND_STATE
+#undef SHUFFLE_BYTE_STATE
#undef TESTBIT_STATE
#undef PE_FN
#undef RUN_EXCEPTION_FN
diff --git a/contrib/libs/hyperscan/src/nfa/limex_internal.h b/contrib/libs/hyperscan/src/nfa/limex_internal.h
index 59795a65b7..23b1bd9707 100644
--- a/contrib/libs/hyperscan/src/nfa/limex_internal.h
+++ b/contrib/libs/hyperscan/src/nfa/limex_internal.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -86,7 +86,7 @@
#define LIMEX_FLAG_COMPRESS_STATE 1 /**< pack state into stream state */
#define LIMEX_FLAG_COMPRESS_MASKED 2 /**< use reach mask-based compression */
#define LIMEX_FLAG_CANNOT_DIE 4 /**< limex cannot have no states on */
-#define LIMEX_FLAG_EXTRACT_EXP 8 /**< use limex exception bit extraction */
+#define LIMEX_FLAG_EXTRACT_EXP 8 /**< use limex exception bit extraction */
enum LimExTrigger {
LIMEX_TRIGGER_NONE = 0,
@@ -158,9 +158,9 @@ struct LimExNFA##size { \
u_##size shift[MAX_SHIFT_COUNT]; \
u32 shiftCount; /**< number of shift masks used */ \
u8 shiftAmount[MAX_SHIFT_COUNT]; /**< shift amount for each mask */ \
- m512 exceptionShufMask; /**< exception byte shuffle mask */ \
- m512 exceptionBitMask; /**< exception bit mask */ \
- m512 exceptionAndMask; /**< exception and mask */ \
+ m512 exceptionShufMask; /**< exception byte shuffle mask */ \
+ m512 exceptionBitMask; /**< exception bit mask */ \
+ m512 exceptionAndMask; /**< exception and mask */ \
};
CREATE_NFA_LIMEX(32)
diff --git a/contrib/libs/hyperscan/src/nfa/mcclellan.c b/contrib/libs/hyperscan/src/nfa/mcclellan.c
index fe21700632..71f71e3275 100644
--- a/contrib/libs/hyperscan/src/nfa/mcclellan.c
+++ b/contrib/libs/hyperscan/src/nfa/mcclellan.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -167,68 +167,68 @@ u32 doNormal16(const struct mcclellan *m, const u8 **c_inout, const u8 *end,
}
static really_inline
-u32 doNormalWide16(const struct mcclellan *m, const u8 **c_inout,
- const u8 *end, u32 s, char *qstate, u16 *offset,
- char do_accel, enum MatchMode mode) {
- const u8 *c = *c_inout;
-
- u32 wide_limit = m->wide_limit;
- const char *wide_base
- = (const char *)m - sizeof(struct NFA) + m->wide_offset;
-
- const u16 *succ_table
- = (const u16 *)((const char *)m + sizeof(struct mcclellan));
- assert(ISALIGNED_N(succ_table, 2));
- u32 sherman_base = m->sherman_limit;
- const char *sherman_base_offset
- = (const char *)m - sizeof(struct NFA) + m->sherman_offset;
- u32 as = m->alphaShift;
-
- s &= STATE_MASK;
-
- while (c < end && s) {
- u8 cprime = m->remap[*c];
- DEBUG_PRINTF("c: %02hhx '%c' cp:%02hhx (s=%u) &c: %p\n", *c,
- ourisprint(*c) ? *c : '?', cprime, s, c);
-
- if (unlikely(s >= wide_limit)) {
- const char *wide_entry
- = findWideEntry16(m, wide_base, wide_limit, s);
- DEBUG_PRINTF("doing wide head (%u)\n", s);
- s = doWide16(wide_entry, &c, end, m->remap, (u16 *)&s, qstate,
- offset);
- } else if (s >= sherman_base) {
- const char *sherman_state
- = findShermanState(m, sherman_base_offset, sherman_base, s);
- DEBUG_PRINTF("doing sherman (%u)\n", s);
- s = doSherman16(sherman_state, cprime, succ_table, as);
- } else {
- DEBUG_PRINTF("doing normal\n");
- s = succ_table[(s << as) + cprime];
- }
-
- DEBUG_PRINTF("s: %u (%u)\n", s, s & STATE_MASK);
- c++;
-
- if (do_accel && (s & ACCEL_FLAG)) {
- break;
- }
- if (mode != NO_MATCHES && (s & ACCEPT_FLAG)) {
- break;
- }
-
- s &= STATE_MASK;
- }
-
- *c_inout = c;
- return s;
-}
-
-static really_inline
-char mcclellanExec16_i(const struct mcclellan *m, u32 *state, char *qstate,
- const u8 *buf, size_t len, u64a offAdj, NfaCallback cb,
- void *ctxt, char single, const u8 **c_final,
- enum MatchMode mode) {
+u32 doNormalWide16(const struct mcclellan *m, const u8 **c_inout,
+ const u8 *end, u32 s, char *qstate, u16 *offset,
+ char do_accel, enum MatchMode mode) {
+ const u8 *c = *c_inout;
+
+ u32 wide_limit = m->wide_limit;
+ const char *wide_base
+ = (const char *)m - sizeof(struct NFA) + m->wide_offset;
+
+ const u16 *succ_table
+ = (const u16 *)((const char *)m + sizeof(struct mcclellan));
+ assert(ISALIGNED_N(succ_table, 2));
+ u32 sherman_base = m->sherman_limit;
+ const char *sherman_base_offset
+ = (const char *)m - sizeof(struct NFA) + m->sherman_offset;
+ u32 as = m->alphaShift;
+
+ s &= STATE_MASK;
+
+ while (c < end && s) {
+ u8 cprime = m->remap[*c];
+ DEBUG_PRINTF("c: %02hhx '%c' cp:%02hhx (s=%u) &c: %p\n", *c,
+ ourisprint(*c) ? *c : '?', cprime, s, c);
+
+ if (unlikely(s >= wide_limit)) {
+ const char *wide_entry
+ = findWideEntry16(m, wide_base, wide_limit, s);
+ DEBUG_PRINTF("doing wide head (%u)\n", s);
+ s = doWide16(wide_entry, &c, end, m->remap, (u16 *)&s, qstate,
+ offset);
+ } else if (s >= sherman_base) {
+ const char *sherman_state
+ = findShermanState(m, sherman_base_offset, sherman_base, s);
+ DEBUG_PRINTF("doing sherman (%u)\n", s);
+ s = doSherman16(sherman_state, cprime, succ_table, as);
+ } else {
+ DEBUG_PRINTF("doing normal\n");
+ s = succ_table[(s << as) + cprime];
+ }
+
+ DEBUG_PRINTF("s: %u (%u)\n", s, s & STATE_MASK);
+ c++;
+
+ if (do_accel && (s & ACCEL_FLAG)) {
+ break;
+ }
+ if (mode != NO_MATCHES && (s & ACCEPT_FLAG)) {
+ break;
+ }
+
+ s &= STATE_MASK;
+ }
+
+ *c_inout = c;
+ return s;
+}
+
+static really_inline
+char mcclellanExec16_i(const struct mcclellan *m, u32 *state, char *qstate,
+ const u8 *buf, size_t len, u64a offAdj, NfaCallback cb,
+ void *ctxt, char single, const u8 **c_final,
+ enum MatchMode mode) {
assert(ISALIGNED_N(state, 2));
if (!len) {
if (mode == STOP_AT_MATCH) {
@@ -238,7 +238,7 @@ char mcclellanExec16_i(const struct mcclellan *m, u32 *state, char *qstate,
}
u32 s = *state;
- u16 offset = 0;
+ u16 offset = 0;
const u8 *c = buf;
const u8 *c_end = buf + len;
const struct mstate_aux *aux
@@ -267,12 +267,12 @@ without_accel:
goto exit;
}
- if (unlikely(m->has_wide)) {
- s = doNormalWide16(m, &c, min_accel_offset, s, qstate, &offset, 0,
- mode);
- } else {
- s = doNormal16(m, &c, min_accel_offset, s, 0, mode);
- }
+ if (unlikely(m->has_wide)) {
+ s = doNormalWide16(m, &c, min_accel_offset, s, qstate, &offset, 0,
+ mode);
+ } else {
+ s = doNormal16(m, &c, min_accel_offset, s, 0, mode);
+ }
if (mode != NO_MATCHES && (s & ACCEPT_FLAG)) {
if (mode == STOP_AT_MATCH) {
@@ -324,11 +324,11 @@ with_accel:
}
}
- if (unlikely(m->has_wide)) {
- s = doNormalWide16(m, &c, c_end, s, qstate, &offset, 1, mode);
- } else {
- s = doNormal16(m, &c, c_end, s, 1, mode);
- }
+ if (unlikely(m->has_wide)) {
+ s = doNormalWide16(m, &c, c_end, s, qstate, &offset, 1, mode);
+ } else {
+ s = doNormal16(m, &c, c_end, s, 1, mode);
+ }
if (mode != NO_MATCHES && (s & ACCEPT_FLAG)) {
if (mode == STOP_AT_MATCH) {
@@ -366,47 +366,47 @@ exit:
}
static never_inline
-char mcclellanExec16_i_cb(const struct mcclellan *m, u32 *state, char *qstate,
- const u8 *buf, size_t len, u64a offAdj,
- NfaCallback cb, void *ctxt, char single,
- const u8 **final_point) {
- return mcclellanExec16_i(m, state, qstate, buf, len, offAdj, cb, ctxt,
- single, final_point, CALLBACK_OUTPUT);
+char mcclellanExec16_i_cb(const struct mcclellan *m, u32 *state, char *qstate,
+ const u8 *buf, size_t len, u64a offAdj,
+ NfaCallback cb, void *ctxt, char single,
+ const u8 **final_point) {
+ return mcclellanExec16_i(m, state, qstate, buf, len, offAdj, cb, ctxt,
+ single, final_point, CALLBACK_OUTPUT);
}
static never_inline
-char mcclellanExec16_i_sam(const struct mcclellan *m, u32 *state, char *qstate,
- const u8 *buf, size_t len, u64a offAdj,
- NfaCallback cb, void *ctxt, char single,
- const u8 **final_point) {
- return mcclellanExec16_i(m, state, qstate, buf, len, offAdj, cb, ctxt,
- single, final_point, STOP_AT_MATCH);
+char mcclellanExec16_i_sam(const struct mcclellan *m, u32 *state, char *qstate,
+ const u8 *buf, size_t len, u64a offAdj,
+ NfaCallback cb, void *ctxt, char single,
+ const u8 **final_point) {
+ return mcclellanExec16_i(m, state, qstate, buf, len, offAdj, cb, ctxt,
+ single, final_point, STOP_AT_MATCH);
}
static never_inline
-char mcclellanExec16_i_nm(const struct mcclellan *m, u32 *state, char *qstate,
- const u8 *buf, size_t len, u64a offAdj,
- NfaCallback cb, void *ctxt, char single,
- const u8 **final_point) {
- return mcclellanExec16_i(m, state, qstate, buf, len, offAdj, cb, ctxt,
- single, final_point, NO_MATCHES);
+char mcclellanExec16_i_nm(const struct mcclellan *m, u32 *state, char *qstate,
+ const u8 *buf, size_t len, u64a offAdj,
+ NfaCallback cb, void *ctxt, char single,
+ const u8 **final_point) {
+ return mcclellanExec16_i(m, state, qstate, buf, len, offAdj, cb, ctxt,
+ single, final_point, NO_MATCHES);
}
static really_inline
-char mcclellanExec16_i_ni(const struct mcclellan *m, u32 *state, char *qstate,
- const u8 *buf, size_t len, u64a offAdj,
- NfaCallback cb, void *ctxt, char single,
- const u8 **final_point, enum MatchMode mode) {
+char mcclellanExec16_i_ni(const struct mcclellan *m, u32 *state, char *qstate,
+ const u8 *buf, size_t len, u64a offAdj,
+ NfaCallback cb, void *ctxt, char single,
+ const u8 **final_point, enum MatchMode mode) {
if (mode == CALLBACK_OUTPUT) {
- return mcclellanExec16_i_cb(m, state, qstate, buf, len, offAdj, cb,
- ctxt, single, final_point);
+ return mcclellanExec16_i_cb(m, state, qstate, buf, len, offAdj, cb,
+ ctxt, single, final_point);
} else if (mode == STOP_AT_MATCH) {
- return mcclellanExec16_i_sam(m, state, qstate, buf, len, offAdj, cb,
- ctxt, single, final_point);
+ return mcclellanExec16_i_sam(m, state, qstate, buf, len, offAdj, cb,
+ ctxt, single, final_point);
} else {
assert(mode == NO_MATCHES);
- return mcclellanExec16_i_nm(m, state, qstate, buf, len, offAdj, cb,
- ctxt, single, final_point);
+ return mcclellanExec16_i_nm(m, state, qstate, buf, len, offAdj, cb,
+ ctxt, single, final_point);
}
}
@@ -612,10 +612,10 @@ char mcclellanCheckEOD(const struct NFA *nfa, u32 s, u64a offset,
const struct mcclellan *m = getImplNfa(nfa);
const struct mstate_aux *aux = get_aux(m, s);
- if (m->has_wide == 1 && s >= m->wide_limit) {
- return MO_CONTINUE_MATCHING;
- }
-
+ if (m->has_wide == 1 && s >= m->wide_limit) {
+ return MO_CONTINUE_MATCHING;
+ }
+
if (!aux->accept_eod) {
return MO_CONTINUE_MATCHING;
}
@@ -688,9 +688,9 @@ char nfaExecMcClellan16_Q2i(const struct NFA *n, u64a offset, const u8 *buffer,
/* do main buffer region */
const u8 *final_look;
- char rv = mcclellanExec16_i_ni(m, &s, q->state, cur_buf + sp,
- local_ep - sp, offset + sp, cb, context,
- single, &final_look, mode);
+ char rv = mcclellanExec16_i_ni(m, &s, q->state, cur_buf + sp,
+ local_ep - sp, offset + sp, cb, context,
+ single, &final_look, mode);
if (rv == MO_DEAD) {
*(u16 *)q->state = 0;
return MO_DEAD;
@@ -760,16 +760,16 @@ char nfaExecMcClellan16_Bi(const struct NFA *n, u64a offset, const u8 *buffer,
const struct mcclellan *m = getImplNfa(n);
u32 s = m->start_anchored;
- if (mcclellanExec16_i(m, &s, NULL, buffer, length, offset, cb, context,
- single, NULL, CALLBACK_OUTPUT)
+ if (mcclellanExec16_i(m, &s, NULL, buffer, length, offset, cb, context,
+ single, NULL, CALLBACK_OUTPUT)
== MO_DEAD) {
return s ? MO_ALIVE : MO_DEAD;
}
- if (m->has_wide == 1 && s >= m->wide_limit) {
- return MO_ALIVE;
- }
-
+ if (m->has_wide == 1 && s >= m->wide_limit) {
+ return MO_ALIVE;
+ }
+
const struct mstate_aux *aux = get_aux(m, s);
if (aux->accept_eod) {
@@ -848,7 +848,7 @@ char nfaExecMcClellan8_Q2i(const struct NFA *n, u64a offset, const u8 *buffer,
char rv = mcclellanExec8_i_ni(m, &s, cur_buf + sp, local_ep - sp,
offset + sp, cb, context, single,
&final_look, mode);
-
+
if (rv == MO_HALT_MATCHING) {
*(u8 *)q->state = 0;
return MO_DEAD;
@@ -1097,8 +1097,8 @@ char nfaExecMcClellan16_inAccept(const struct NFA *n, ReportID report,
u16 s = *(u16 *)q->state;
DEBUG_PRINTF("checking accepts for %hu\n", s);
- return (m->has_wide == 1 && s >= m->wide_limit) ?
- 0 : mcclellanHasAccept(m, get_aux(m, s), report);
+ return (m->has_wide == 1 && s >= m->wide_limit) ?
+ 0 : mcclellanHasAccept(m, get_aux(m, s), report);
}
char nfaExecMcClellan16_inAnyAccept(const struct NFA *n, struct mq *q) {
@@ -1108,8 +1108,8 @@ char nfaExecMcClellan16_inAnyAccept(const struct NFA *n, struct mq *q) {
u16 s = *(u16 *)q->state;
DEBUG_PRINTF("checking accepts for %hu\n", s);
- return (m->has_wide == 1 && s >= m->wide_limit) ?
- 0 : !!get_aux(m, s)->accept;
+ return (m->has_wide == 1 && s >= m->wide_limit) ?
+ 0 : !!get_aux(m, s)->accept;
}
char nfaExecMcClellan8_Q2(const struct NFA *n, struct mq *q, s64a end) {
@@ -1194,12 +1194,12 @@ char nfaExecMcClellan16_initCompressedState(const struct NFA *nfa, u64a offset,
void *state, UNUSED u8 key) {
const struct mcclellan *m = getImplNfa(nfa);
u16 s = offset ? m->start_floating : m->start_anchored;
-
- // new byte
- if (m->has_wide) {
- unaligned_store_u16((u16 *)state + 1, 0);
- }
-
+
+ // new byte
+ if (m->has_wide) {
+ unaligned_store_u16((u16 *)state + 1, 0);
+ }
+
if (s) {
unaligned_store_u16(state, s);
return 1;
@@ -1229,24 +1229,24 @@ void nfaExecMcClellan16_SimpStream(const struct NFA *nfa, char *state,
const u8 *buf, char top, size_t start_off,
size_t len, NfaCallback cb, void *ctxt) {
const struct mcclellan *m = getImplNfa(nfa);
- u32 s;
-
- if (top) {
- s = m->start_anchored;
-
- // new byte
- if (m->has_wide) {
- unaligned_store_u16((u16 *)state + 1, 0);
- }
- } else {
- s = unaligned_load_u16(state);
- }
-
+ u32 s;
+
+ if (top) {
+ s = m->start_anchored;
+
+ // new byte
+ if (m->has_wide) {
+ unaligned_store_u16((u16 *)state + 1, 0);
+ }
+ } else {
+ s = unaligned_load_u16(state);
+ }
+
if (m->flags & MCCLELLAN_FLAG_SINGLE) {
- mcclellanExec16_i(m, &s, state, buf + start_off, len - start_off,
+ mcclellanExec16_i(m, &s, state, buf + start_off, len - start_off,
start_off, cb, ctxt, 1, NULL, CALLBACK_OUTPUT);
} else {
- mcclellanExec16_i(m, &s, state, buf + start_off, len - start_off,
+ mcclellanExec16_i(m, &s, state, buf + start_off, len - start_off,
start_off, cb, ctxt, 0, NULL, CALLBACK_OUTPUT);
}
@@ -1277,16 +1277,16 @@ char nfaExecMcClellan8_queueInitState(UNUSED const struct NFA *nfa,
char nfaExecMcClellan16_queueInitState(UNUSED const struct NFA *nfa,
struct mq *q) {
- const struct mcclellan *m = getImplNfa(nfa);
- assert(m->has_wide == 1 ? nfa->scratchStateSize == 4
- : nfa->scratchStateSize == 2);
+ const struct mcclellan *m = getImplNfa(nfa);
+ assert(m->has_wide == 1 ? nfa->scratchStateSize == 4
+ : nfa->scratchStateSize == 2);
assert(ISALIGNED_N(q->state, 2));
*(u16 *)q->state = 0;
-
- // new byte
- if (m->has_wide) {
- unaligned_store_u16((u16 *)q->state + 1, 0);
- }
+
+ // new byte
+ if (m->has_wide) {
+ unaligned_store_u16((u16 *)q->state + 1, 0);
+ }
return 0;
}
@@ -1312,39 +1312,39 @@ char nfaExecMcClellan8_expandState(UNUSED const struct NFA *nfa, void *dest,
char nfaExecMcClellan16_queueCompressState(UNUSED const struct NFA *nfa,
const struct mq *q,
UNUSED s64a loc) {
- const struct mcclellan *m = getImplNfa(nfa);
+ const struct mcclellan *m = getImplNfa(nfa);
void *dest = q->streamState;
const void *src = q->state;
- assert(m->has_wide == 1 ? nfa->scratchStateSize == 4
- : nfa->scratchStateSize == 2);
- assert(m->has_wide == 1 ? nfa->streamStateSize == 4
- : nfa->streamStateSize == 2);
-
+ assert(m->has_wide == 1 ? nfa->scratchStateSize == 4
+ : nfa->scratchStateSize == 2);
+ assert(m->has_wide == 1 ? nfa->streamStateSize == 4
+ : nfa->streamStateSize == 2);
+
assert(ISALIGNED_N(src, 2));
unaligned_store_u16(dest, *(const u16 *)(src));
-
- // new byte
- if (m->has_wide) {
- unaligned_store_u16((u16 *)dest + 1, *((const u16 *)src + 1));
- }
+
+ // new byte
+ if (m->has_wide) {
+ unaligned_store_u16((u16 *)dest + 1, *((const u16 *)src + 1));
+ }
return 0;
}
char nfaExecMcClellan16_expandState(UNUSED const struct NFA *nfa, void *dest,
const void *src, UNUSED u64a offset,
UNUSED u8 key) {
- const struct mcclellan *m = getImplNfa(nfa);
- assert(m->has_wide == 1 ? nfa->scratchStateSize == 4
- : nfa->scratchStateSize == 2);
- assert(m->has_wide == 1 ? nfa->streamStateSize == 4
- : nfa->streamStateSize == 2);
-
+ const struct mcclellan *m = getImplNfa(nfa);
+ assert(m->has_wide == 1 ? nfa->scratchStateSize == 4
+ : nfa->scratchStateSize == 2);
+ assert(m->has_wide == 1 ? nfa->streamStateSize == 4
+ : nfa->streamStateSize == 2);
+
assert(ISALIGNED_N(dest, 2));
*(u16 *)dest = unaligned_load_u16(src);
-
- // new byte
- if (m->has_wide) {
- *((u16 *)dest + 1) = unaligned_load_u16((const u16 *)src + 1);
- }
+
+ // new byte
+ if (m->has_wide) {
+ *((u16 *)dest + 1) = unaligned_load_u16((const u16 *)src + 1);
+ }
return 0;
}
diff --git a/contrib/libs/hyperscan/src/nfa/mcclellan_common_impl.h b/contrib/libs/hyperscan/src/nfa/mcclellan_common_impl.h
index 431b554693..7b0e7f48cd 100644
--- a/contrib/libs/hyperscan/src/nfa/mcclellan_common_impl.h
+++ b/contrib/libs/hyperscan/src/nfa/mcclellan_common_impl.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -82,108 +82,108 @@ u32 doSherman16(const char *sherman_state, u8 cprime, const u16 *succ_table,
u32 daddy = *(const u16 *)(sherman_state + SHERMAN_DADDY_OFFSET);
return succ_table[(daddy << as) + cprime];
}
-
-static really_inline
-u16 doWide16(const char *wide_entry, const u8 **c_inout, const u8 *end,
- const u8 *remap, const u16 *s, char *qstate, u16 *offset) {
- // Internal relative offset after the last visit of the wide state.
- if (qstate != NULL) { // stream mode
- *offset = unaligned_load_u16((const u16 *)(qstate + 2));
- }
-
- u8 successful = 0;
- const u8 *c = *c_inout;
- u32 len_c = end - c;
-
- u16 width = *(const u16 *)(wide_entry + WIDE_WIDTH_OFFSET);
- assert(width >= 8);
- const u8 *symbols = (const u8 *)(wide_entry + WIDE_SYMBOL_OFFSET16);
- const u16 *trans = (const u16 *)(wide_entry +
- WIDE_TRANSITION_OFFSET16(width));
-
- assert(*offset < width);
- u16 len_w = width - *offset;
- const u8 *sym = symbols + *offset;
-
- char tmp[16];
- u16 pos = 0;
-
- if (*offset == 0 && remap[*c] != *sym) {
- goto normal;
- }
-
- // both in (16, +oo).
- while (len_w >= 16 && len_c >= 16) {
- m128 str_w = loadu128(sym);
- for (size_t i = 0; i < 16; i++) {
- tmp[i] = remap[*(c + i)];
- }
- m128 str_c = loadu128(tmp);
-
- u32 z = movemask128(eq128(str_w, str_c));
- pos = ctz32(~z);
- assert(pos <= 16);
-
- if (pos < 16) {
- goto normal;
- }
-
- sym += 16;
- c += 16;
- len_w -= 16;
- len_c -= 16;
- }
-
- pos = 0;
- // at least one in (0, 16).
- u32 loadLength_w = MIN(len_w, 16);
- u32 loadLength_c = MIN(len_c, 16);
- m128 str_w = loadbytes128(sym, loadLength_w);
- for (size_t i = 0; i < loadLength_c; i++) {
- tmp[i] = remap[*(c + i)];
- }
- m128 str_c = loadbytes128(tmp, loadLength_c);
-
- u32 z = movemask128(eq128(str_w, str_c));
- pos = ctz32(~z);
-
- pos = MIN(pos, MIN(loadLength_w, loadLength_c));
-
- if (loadLength_w <= loadLength_c) {
- assert(pos <= loadLength_w);
- // successful matching.
- if (pos == loadLength_w) {
- c -= 1;
- successful = 1;
- }
- // failure, do nothing.
- } else {
- assert(pos <= loadLength_c);
- // successful partial matching.
- if (pos == loadLength_c) {
- c -= 1;
- goto partial;
- }
- // failure, do nothing.
- }
-
-normal:
- *offset = 0;
- if (qstate != NULL) {
- // Internal relative offset.
- unaligned_store_u16(qstate + 2, *offset);
- }
- c += pos;
- *c_inout = c;
- return successful ? *trans : *(trans + 1 + remap[*c]);
-
-partial:
- *offset = sym - symbols + pos;
- if (qstate != NULL) {
- // Internal relative offset.
- unaligned_store_u16(qstate + 2, *offset);
- }
- c += pos;
- *c_inout = c;
- return *s;
-}
+
+static really_inline
+u16 doWide16(const char *wide_entry, const u8 **c_inout, const u8 *end,
+ const u8 *remap, const u16 *s, char *qstate, u16 *offset) {
+ // Internal relative offset after the last visit of the wide state.
+ if (qstate != NULL) { // stream mode
+ *offset = unaligned_load_u16((const u16 *)(qstate + 2));
+ }
+
+ u8 successful = 0;
+ const u8 *c = *c_inout;
+ u32 len_c = end - c;
+
+ u16 width = *(const u16 *)(wide_entry + WIDE_WIDTH_OFFSET);
+ assert(width >= 8);
+ const u8 *symbols = (const u8 *)(wide_entry + WIDE_SYMBOL_OFFSET16);
+ const u16 *trans = (const u16 *)(wide_entry +
+ WIDE_TRANSITION_OFFSET16(width));
+
+ assert(*offset < width);
+ u16 len_w = width - *offset;
+ const u8 *sym = symbols + *offset;
+
+ char tmp[16];
+ u16 pos = 0;
+
+ if (*offset == 0 && remap[*c] != *sym) {
+ goto normal;
+ }
+
+ // both in (16, +oo).
+ while (len_w >= 16 && len_c >= 16) {
+ m128 str_w = loadu128(sym);
+ for (size_t i = 0; i < 16; i++) {
+ tmp[i] = remap[*(c + i)];
+ }
+ m128 str_c = loadu128(tmp);
+
+ u32 z = movemask128(eq128(str_w, str_c));
+ pos = ctz32(~z);
+ assert(pos <= 16);
+
+ if (pos < 16) {
+ goto normal;
+ }
+
+ sym += 16;
+ c += 16;
+ len_w -= 16;
+ len_c -= 16;
+ }
+
+ pos = 0;
+ // at least one in (0, 16).
+ u32 loadLength_w = MIN(len_w, 16);
+ u32 loadLength_c = MIN(len_c, 16);
+ m128 str_w = loadbytes128(sym, loadLength_w);
+ for (size_t i = 0; i < loadLength_c; i++) {
+ tmp[i] = remap[*(c + i)];
+ }
+ m128 str_c = loadbytes128(tmp, loadLength_c);
+
+ u32 z = movemask128(eq128(str_w, str_c));
+ pos = ctz32(~z);
+
+ pos = MIN(pos, MIN(loadLength_w, loadLength_c));
+
+ if (loadLength_w <= loadLength_c) {
+ assert(pos <= loadLength_w);
+ // successful matching.
+ if (pos == loadLength_w) {
+ c -= 1;
+ successful = 1;
+ }
+ // failure, do nothing.
+ } else {
+ assert(pos <= loadLength_c);
+ // successful partial matching.
+ if (pos == loadLength_c) {
+ c -= 1;
+ goto partial;
+ }
+ // failure, do nothing.
+ }
+
+normal:
+ *offset = 0;
+ if (qstate != NULL) {
+ // Internal relative offset.
+ unaligned_store_u16(qstate + 2, *offset);
+ }
+ c += pos;
+ *c_inout = c;
+ return successful ? *trans : *(trans + 1 + remap[*c]);
+
+partial:
+ *offset = sym - symbols + pos;
+ if (qstate != NULL) {
+ // Internal relative offset.
+ unaligned_store_u16(qstate + 2, *offset);
+ }
+ c += pos;
+ *c_inout = c;
+ return *s;
+}
diff --git a/contrib/libs/hyperscan/src/nfa/mcclellan_internal.h b/contrib/libs/hyperscan/src/nfa/mcclellan_internal.h
index 60b3cf028e..482fdb1bc9 100644
--- a/contrib/libs/hyperscan/src/nfa/mcclellan_internal.h
+++ b/contrib/libs/hyperscan/src/nfa/mcclellan_internal.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -50,16 +50,16 @@ extern "C"
#define SHERMAN_CHARS_OFFSET 4
#define SHERMAN_STATES_OFFSET(sso_len) (4 + (sso_len))
-#define WIDE_STATE 2
-#define WIDE_ENTRY_OFFSET8(weo_pos) (2 + (weo_pos))
-#define WIDE_ENTRY_OFFSET16(weo_pos) (4 + (weo_pos))
-
-#define WIDE_WIDTH_OFFSET 0
-#define WIDE_SYMBOL_OFFSET8 1
-#define WIDE_TRANSITION_OFFSET8(wto_width) (1 + (wto_width))
-#define WIDE_SYMBOL_OFFSET16 2
-#define WIDE_TRANSITION_OFFSET16(wto_width) (2 + ROUNDUP_N(wto_width, 2))
-
+#define WIDE_STATE 2
+#define WIDE_ENTRY_OFFSET8(weo_pos) (2 + (weo_pos))
+#define WIDE_ENTRY_OFFSET16(weo_pos) (4 + (weo_pos))
+
+#define WIDE_WIDTH_OFFSET 0
+#define WIDE_SYMBOL_OFFSET8 1
+#define WIDE_TRANSITION_OFFSET8(wto_width) (1 + (wto_width))
+#define WIDE_SYMBOL_OFFSET16 2
+#define WIDE_TRANSITION_OFFSET16(wto_width) (2 + ROUNDUP_N(wto_width, 2))
+
struct report_list {
u32 count;
ReportID report[];
@@ -89,17 +89,17 @@ struct mcclellan {
u16 accel_limit_8; /**< 8 bit, lowest accelerable state */
u16 accept_limit_8; /**< 8 bit, lowest accept state */
u16 sherman_limit; /**< lowest sherman state */
- u16 wide_limit; /**< 8/16 bit, lowest wide head state */
+ u16 wide_limit; /**< 8/16 bit, lowest wide head state */
u8 alphaShift;
u8 flags;
u8 has_accel; /**< 1 iff there are any accel plans */
- u8 has_wide; /**< 1 iff there exists any wide state */
+ u8 has_wide; /**< 1 iff there exists any wide state */
u8 remap[256]; /**< remaps characters to a smaller alphabet */
ReportID arb_report; /**< one of the accepts that this dfa may raise */
- u32 accel_offset; /**< offset of accel structures from start of McClellan */
+ u32 accel_offset; /**< offset of accel structures from start of McClellan */
u32 haig_offset; /**< reserved for use by Haig, relative to start of NFA */
- u32 wide_offset; /**< offset of the wide state entries to the start of the
- * nfa structure */
+ u32 wide_offset; /**< offset of the wide state entries to the start of the
+ * nfa structure */
};
static really_inline
@@ -120,43 +120,43 @@ char *findMutableShermanState(char *sherman_base_offset, u16 sherman_base,
return sherman_base_offset + SHERMAN_FIXED_SIZE * (s - sherman_base);
}
-static really_inline
-const char *findWideEntry8(UNUSED const struct mcclellan *m,
- const char *wide_base, u32 wide_limit, u32 s) {
- UNUSED u8 type = *(const u8 *)wide_base;
- assert(type == WIDE_STATE);
- const u32 entry_offset
- = *(const u32 *)(wide_base
- + WIDE_ENTRY_OFFSET8((s - wide_limit) * sizeof(u32)));
-
- const char *rv = wide_base + entry_offset;
- assert(rv < (const char *)m + m->length - sizeof(struct NFA));
- return rv;
-}
-
-static really_inline
-const char *findWideEntry16(UNUSED const struct mcclellan *m,
- const char *wide_base, u32 wide_limit, u32 s) {
- UNUSED u8 type = *(const u8 *)wide_base;
- assert(type == WIDE_STATE);
- const u32 entry_offset
- = *(const u32 *)(wide_base
- + WIDE_ENTRY_OFFSET16((s - wide_limit) * sizeof(u32)));
-
- const char *rv = wide_base + entry_offset;
- assert(rv < (const char *)m + m->length - sizeof(struct NFA));
- return rv;
-}
-
-static really_inline
-char *findMutableWideEntry16(char *wide_base, u32 wide_limit, u32 s) {
- u32 entry_offset
- = *(const u32 *)(wide_base
- + WIDE_ENTRY_OFFSET16((s - wide_limit) * sizeof(u32)));
-
- return wide_base + entry_offset;
-}
-
+static really_inline
+const char *findWideEntry8(UNUSED const struct mcclellan *m,
+ const char *wide_base, u32 wide_limit, u32 s) {
+ UNUSED u8 type = *(const u8 *)wide_base;
+ assert(type == WIDE_STATE);
+ const u32 entry_offset
+ = *(const u32 *)(wide_base
+ + WIDE_ENTRY_OFFSET8((s - wide_limit) * sizeof(u32)));
+
+ const char *rv = wide_base + entry_offset;
+ assert(rv < (const char *)m + m->length - sizeof(struct NFA));
+ return rv;
+}
+
+static really_inline
+const char *findWideEntry16(UNUSED const struct mcclellan *m,
+ const char *wide_base, u32 wide_limit, u32 s) {
+ UNUSED u8 type = *(const u8 *)wide_base;
+ assert(type == WIDE_STATE);
+ const u32 entry_offset
+ = *(const u32 *)(wide_base
+ + WIDE_ENTRY_OFFSET16((s - wide_limit) * sizeof(u32)));
+
+ const char *rv = wide_base + entry_offset;
+ assert(rv < (const char *)m + m->length - sizeof(struct NFA));
+ return rv;
+}
+
+static really_inline
+char *findMutableWideEntry16(char *wide_base, u32 wide_limit, u32 s) {
+ u32 entry_offset
+ = *(const u32 *)(wide_base
+ + WIDE_ENTRY_OFFSET16((s - wide_limit) * sizeof(u32)));
+
+ return wide_base + entry_offset;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/contrib/libs/hyperscan/src/nfa/mcclellancompile.cpp b/contrib/libs/hyperscan/src/nfa/mcclellancompile.cpp
index 3b73488581..27ec1716e9 100644
--- a/contrib/libs/hyperscan/src/nfa/mcclellancompile.cpp
+++ b/contrib/libs/hyperscan/src/nfa/mcclellancompile.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -56,19 +56,19 @@
#include <cstring>
#include <map>
#include <memory>
-#include <queue>
+#include <queue>
#include <set>
#include <vector>
#include <boost/range/adaptor/map.hpp>
-#include "mcclellandump.h"
-#include "util/dump_util.h"
-#include "util/dump_charclass.h"
-
+#include "mcclellandump.h"
+#include "util/dump_util.h"
+#include "util/dump_charclass.h"
+
using namespace std;
using boost::adaptors::map_keys;
-using boost::dynamic_bitset;
+using boost::dynamic_bitset;
#define ACCEL_DFA_MAX_OFFSET_DEPTH 4
@@ -88,8 +88,8 @@ namespace /* anon */ {
struct dstate_extra {
u16 daddytaken = 0;
bool shermanState = false;
- bool wideState = false;
- bool wideHead = false;
+ bool wideState = false;
+ bool wideHead = false;
};
struct dfa_info {
@@ -97,8 +97,8 @@ struct dfa_info {
raw_dfa &raw;
vector<dstate> &states;
vector<dstate_extra> extra;
- vector<vector<dstate_id_t>> wide_state_chain;
- vector<vector<symbol_t>> wide_symbol_chain;
+ vector<vector<dstate_id_t>> wide_state_chain;
+ vector<vector<symbol_t>> wide_symbol_chain;
const u16 alpha_size; /* including special symbols */
const array<u16, ALPHABET_SIZE> &alpha_remap;
const u16 impl_alpha_size;
@@ -122,14 +122,14 @@ struct dfa_info {
return extra[raw_id].shermanState;
}
- bool is_widestate(dstate_id_t raw_id) const {
- return extra[raw_id].wideState;
- }
-
- bool is_widehead(dstate_id_t raw_id) const {
- return extra[raw_id].wideHead;
- }
-
+ bool is_widestate(dstate_id_t raw_id) const {
+ return extra[raw_id].wideState;
+ }
+
+ bool is_widehead(dstate_id_t raw_id) const {
+ return extra[raw_id].wideHead;
+ }
+
size_t size(void) const { return states.size(); }
};
@@ -142,35 +142,35 @@ u8 dfa_info::getAlphaShift() const {
}
}
-struct state_prev_info {
- vector<vector<dstate_id_t>> prev_vec;
- explicit state_prev_info(size_t alpha_size) : prev_vec(alpha_size) {}
-};
-
-struct DfaPrevInfo {
- u16 impl_alpha_size;
- u16 state_num;
- vector<state_prev_info> states;
- set<dstate_id_t> accepts;
-
- explicit DfaPrevInfo(raw_dfa &rdfa);
-};
-
-DfaPrevInfo::DfaPrevInfo(raw_dfa &rdfa)
- : impl_alpha_size(rdfa.getImplAlphaSize()), state_num(rdfa.states.size()),
- states(state_num, state_prev_info(impl_alpha_size)){
- for (size_t i = 0; i < states.size(); i++) {
- for (symbol_t sym = 0; sym < impl_alpha_size; sym++) {
- dstate_id_t curr = rdfa.states[i].next[sym];
- states[curr].prev_vec[sym].push_back(i);
- }
- if (!rdfa.states[i].reports.empty()
- || !rdfa.states[i].reports_eod.empty()) {
- DEBUG_PRINTF("accept raw state: %ld\n", i);
- accepts.insert(i);
- }
- }
-}
+struct state_prev_info {
+ vector<vector<dstate_id_t>> prev_vec;
+ explicit state_prev_info(size_t alpha_size) : prev_vec(alpha_size) {}
+};
+
+struct DfaPrevInfo {
+ u16 impl_alpha_size;
+ u16 state_num;
+ vector<state_prev_info> states;
+ set<dstate_id_t> accepts;
+
+ explicit DfaPrevInfo(raw_dfa &rdfa);
+};
+
+DfaPrevInfo::DfaPrevInfo(raw_dfa &rdfa)
+ : impl_alpha_size(rdfa.getImplAlphaSize()), state_num(rdfa.states.size()),
+ states(state_num, state_prev_info(impl_alpha_size)){
+ for (size_t i = 0; i < states.size(); i++) {
+ for (symbol_t sym = 0; sym < impl_alpha_size; sym++) {
+ dstate_id_t curr = rdfa.states[i].next[sym];
+ states[curr].prev_vec[sym].push_back(i);
+ }
+ if (!rdfa.states[i].reports.empty()
+ || !rdfa.states[i].reports_eod.empty()) {
+ DEBUG_PRINTF("accept raw state: %ld\n", i);
+ accepts.insert(i);
+ }
+ }
+}
} // namespace
static
@@ -198,11 +198,11 @@ void markEdges(NFA *n, u16 *succ_table, const dfa_info &info) {
for (size_t j = 0; j < alphaSize; j++) {
size_t c_prime = (i << alphaShift) + j;
- // wide state has no aux structure.
- if (m->has_wide && succ_table[c_prime] >= m->wide_limit) {
- continue;
- }
-
+ // wide state has no aux structure.
+ if (m->has_wide && succ_table[c_prime] >= m->wide_limit) {
+ continue;
+ }
+
mstate_aux *aux = getAux(n, succ_table[c_prime]);
if (aux->accept) {
@@ -217,8 +217,8 @@ void markEdges(NFA *n, u16 *succ_table, const dfa_info &info) {
/* handle the sherman states */
char *sherman_base_offset = (char *)n + m->sherman_offset;
- u16 sherman_ceil = m->has_wide == 1 ? m->wide_limit : m->state_count;
- for (u16 j = m->sherman_limit; j < sherman_ceil; j++) {
+ u16 sherman_ceil = m->has_wide == 1 ? m->wide_limit : m->state_count;
+ for (u16 j = m->sherman_limit; j < sherman_ceil; j++) {
char *sherman_cur
= findMutableShermanState(sherman_base_offset, m->sherman_limit, j);
assert(*(sherman_cur + SHERMAN_TYPE_OFFSET) == SHERMAN_STATE);
@@ -227,11 +227,11 @@ void markEdges(NFA *n, u16 *succ_table, const dfa_info &info) {
for (u8 i = 0; i < len; i++) {
u16 succ_i = unaligned_load_u16((u8 *)&succs[i]);
- // wide state has no aux structure.
- if (m->has_wide && succ_i >= m->wide_limit) {
- continue;
- }
-
+ // wide state has no aux structure.
+ if (m->has_wide && succ_i >= m->wide_limit) {
+ continue;
+ }
+
mstate_aux *aux = getAux(n, succ_i);
if (aux->accept) {
@@ -245,51 +245,51 @@ void markEdges(NFA *n, u16 *succ_table, const dfa_info &info) {
unaligned_store_u16((u8 *)&succs[i], succ_i);
}
}
-
- /* handle the wide states */
- if (m->has_wide) {
- u32 wide_limit = m->wide_limit;
- char *wide_base = (char *)n + m->wide_offset;
- assert(*wide_base == WIDE_STATE);
- u16 wide_number = verify_u16(info.wide_symbol_chain.size());
- // traverse over wide head states.
- for (u16 j = wide_limit; j < wide_limit + wide_number; j++) {
- char *wide_cur
- = findMutableWideEntry16(wide_base, wide_limit, j);
- u16 width = *(const u16 *)(wide_cur + WIDE_WIDTH_OFFSET);
- u16 *trans = (u16 *)(wide_cur + WIDE_TRANSITION_OFFSET16(width));
-
- // check successful transition
- u16 next = unaligned_load_u16((u8 *)trans);
- if (next < wide_limit) {
- mstate_aux *aux = getAux(n, next);
- if (aux->accept) {
- next |= ACCEPT_FLAG;
- }
- if (aux->accel_offset) {
- next |= ACCEL_FLAG;
- }
- unaligned_store_u16((u8 *)trans, next);
- }
- trans++;
-
- // check failure transition
- for (symbol_t k = 0; k < alphaSize; k++) {
- u16 next_k = unaligned_load_u16((u8 *)&trans[k]);
- if (next_k >= wide_limit) {
- continue;
- }
- mstate_aux *aux_k = getAux(n, next_k);
- if (aux_k->accept) {
- next_k |= ACCEPT_FLAG;
- }
- if (aux_k->accel_offset) {
- next_k |= ACCEL_FLAG;
- }
- unaligned_store_u16((u8 *)&trans[k], next_k);
- }
- }
- }
+
+ /* handle the wide states */
+ if (m->has_wide) {
+ u32 wide_limit = m->wide_limit;
+ char *wide_base = (char *)n + m->wide_offset;
+ assert(*wide_base == WIDE_STATE);
+ u16 wide_number = verify_u16(info.wide_symbol_chain.size());
+ // traverse over wide head states.
+ for (u16 j = wide_limit; j < wide_limit + wide_number; j++) {
+ char *wide_cur
+ = findMutableWideEntry16(wide_base, wide_limit, j);
+ u16 width = *(const u16 *)(wide_cur + WIDE_WIDTH_OFFSET);
+ u16 *trans = (u16 *)(wide_cur + WIDE_TRANSITION_OFFSET16(width));
+
+ // check successful transition
+ u16 next = unaligned_load_u16((u8 *)trans);
+ if (next < wide_limit) {
+ mstate_aux *aux = getAux(n, next);
+ if (aux->accept) {
+ next |= ACCEPT_FLAG;
+ }
+ if (aux->accel_offset) {
+ next |= ACCEL_FLAG;
+ }
+ unaligned_store_u16((u8 *)trans, next);
+ }
+ trans++;
+
+ // check failure transition
+ for (symbol_t k = 0; k < alphaSize; k++) {
+ u16 next_k = unaligned_load_u16((u8 *)&trans[k]);
+ if (next_k >= wide_limit) {
+ continue;
+ }
+ mstate_aux *aux_k = getAux(n, next_k);
+ if (aux_k->accept) {
+ next_k |= ACCEPT_FLAG;
+ }
+ if (aux_k->accel_offset) {
+ next_k |= ACCEL_FLAG;
+ }
+ unaligned_store_u16((u8 *)&trans[k], next_k);
+ }
+ }
+ }
}
u32 mcclellan_build_strat::max_allowed_offset_accel() const {
@@ -335,20 +335,20 @@ void populateBasicInfo(size_t state_size, const dfa_info &info,
m->start_anchored = info.implId(info.raw.start_anchored);
m->start_floating = info.implId(info.raw.start_floating);
m->has_accel = accel_count ? 1 : 0;
- m->has_wide = info.wide_state_chain.size() > 0 ? 1 : 0;
-
- if (state_size == sizeof(u8) && m->has_wide == 1) {
- // allocate 1 more byte for wide state use.
- nfa->scratchStateSize += sizeof(u8);
- nfa->streamStateSize += sizeof(u8);
- }
-
- if (state_size == sizeof(u16) && m->has_wide == 1) {
- // allocate 2 more bytes for wide state use.
- nfa->scratchStateSize += sizeof(u16);
- nfa->streamStateSize += sizeof(u16);
- }
-
+ m->has_wide = info.wide_state_chain.size() > 0 ? 1 : 0;
+
+ if (state_size == sizeof(u8) && m->has_wide == 1) {
+ // allocate 1 more byte for wide state use.
+ nfa->scratchStateSize += sizeof(u8);
+ nfa->streamStateSize += sizeof(u8);
+ }
+
+ if (state_size == sizeof(u16) && m->has_wide == 1) {
+ // allocate 2 more bytes for wide state use.
+ nfa->scratchStateSize += sizeof(u16);
+ nfa->streamStateSize += sizeof(u16);
+ }
+
if (single) {
m->flags |= MCCLELLAN_FLAG_SINGLE;
}
@@ -521,24 +521,24 @@ size_t calcShermanRegionSize(const dfa_info &info) {
}
static
-size_t calcWideRegionSize(const dfa_info &info) {
- if (info.wide_state_chain.empty()) {
- return 0;
- }
-
- // wide info header
- size_t rv = info.wide_symbol_chain.size() * sizeof(u32) + 4;
-
- // wide info body
- for (const auto &chain : info.wide_symbol_chain) {
- rv += ROUNDUP_N(chain.size(), 2) +
- (info.impl_alpha_size + 1) * sizeof(u16) + 2;
- }
-
- return ROUNDUP_16(rv);
-}
-
-static
+size_t calcWideRegionSize(const dfa_info &info) {
+ if (info.wide_state_chain.empty()) {
+ return 0;
+ }
+
+ // wide info header
+ size_t rv = info.wide_symbol_chain.size() * sizeof(u32) + 4;
+
+ // wide info body
+ for (const auto &chain : info.wide_symbol_chain) {
+ rv += ROUNDUP_N(chain.size(), 2) +
+ (info.impl_alpha_size + 1) * sizeof(u16) + 2;
+ }
+
+ return ROUNDUP_16(rv);
+}
+
+static
void fillInAux(mstate_aux *aux, dstate_id_t i, const dfa_info &info,
const vector<u32> &reports, const vector<u32> &reports_eod,
vector<u32> &reportOffsets) {
@@ -552,60 +552,60 @@ void fillInAux(mstate_aux *aux, dstate_id_t i, const dfa_info &info,
/* returns false on error */
static
-bool allocateFSN16(dfa_info &info, dstate_id_t *sherman_base,
- dstate_id_t *wide_limit) {
+bool allocateFSN16(dfa_info &info, dstate_id_t *sherman_base,
+ dstate_id_t *wide_limit) {
info.states[0].impl_id = 0; /* dead is always 0 */
vector<dstate_id_t> norm;
vector<dstate_id_t> sherm;
- vector<dstate_id_t> wideHead;
- vector<dstate_id_t> wideState;
+ vector<dstate_id_t> wideHead;
+ vector<dstate_id_t> wideState;
if (info.size() > (1 << 16)) {
DEBUG_PRINTF("too many states\n");
- *wide_limit = 0;
+ *wide_limit = 0;
return false;
}
for (u32 i = 1; i < info.size(); i++) {
- if (info.is_widehead(i)) {
- wideHead.push_back(i);
- } else if (info.is_widestate(i)) {
- wideState.push_back(i);
- } else if (info.is_sherman(i)) {
+ if (info.is_widehead(i)) {
+ wideHead.push_back(i);
+ } else if (info.is_widestate(i)) {
+ wideState.push_back(i);
+ } else if (info.is_sherman(i)) {
sherm.push_back(i);
} else {
norm.push_back(i);
}
}
- dstate_id_t next = 1;
+ dstate_id_t next = 1;
for (const dstate_id_t &s : norm) {
- DEBUG_PRINTF("[norm] mapping state %u to %u\n", s, next);
- info.states[s].impl_id = next++;
+ DEBUG_PRINTF("[norm] mapping state %u to %u\n", s, next);
+ info.states[s].impl_id = next++;
}
- *sherman_base = next;
+ *sherman_base = next;
for (const dstate_id_t &s : sherm) {
- DEBUG_PRINTF("[sherm] mapping state %u to %u\n", s, next);
- info.states[s].impl_id = next++;
- }
-
- *wide_limit = next;
- for (const dstate_id_t &s : wideHead) {
- DEBUG_PRINTF("[widehead] mapping state %u to %u\n", s, next);
- info.states[s].impl_id = next++;
- }
-
- for (const dstate_id_t &s : wideState) {
- DEBUG_PRINTF("[wide] mapping state %u to %u\n", s, next);
- info.states[s].impl_id = next++;
- }
-
+ DEBUG_PRINTF("[sherm] mapping state %u to %u\n", s, next);
+ info.states[s].impl_id = next++;
+ }
+
+ *wide_limit = next;
+ for (const dstate_id_t &s : wideHead) {
+ DEBUG_PRINTF("[widehead] mapping state %u to %u\n", s, next);
+ info.states[s].impl_id = next++;
+ }
+
+ for (const dstate_id_t &s : wideState) {
+ DEBUG_PRINTF("[wide] mapping state %u to %u\n", s, next);
+ info.states[s].impl_id = next++;
+ }
+
/* Check to see if we haven't over allocated our states */
- DEBUG_PRINTF("next sherman %u masked %u\n", next,
- (dstate_id_t)(next & STATE_MASK));
- return (next - 1) == ((next - 1) & STATE_MASK);
+ DEBUG_PRINTF("next sherman %u masked %u\n", next,
+ (dstate_id_t)(next & STATE_MASK));
+ return (next - 1) == ((next - 1) & STATE_MASK);
}
static
@@ -622,16 +622,16 @@ bytecode_ptr<NFA> mcclellanCompile16(dfa_info &info, const CompileContext &cc,
assert(alphaShift <= 8);
u16 count_real_states;
- u16 wide_limit;
- if (!allocateFSN16(info, &count_real_states, &wide_limit)) {
+ u16 wide_limit;
+ if (!allocateFSN16(info, &count_real_states, &wide_limit)) {
DEBUG_PRINTF("failed to allocate state numbers, %zu states total\n",
info.size());
return nullptr;
}
- DEBUG_PRINTF("count_real_states: %d\n", count_real_states);
- DEBUG_PRINTF("non_wide_states: %d\n", wide_limit);
-
+ DEBUG_PRINTF("count_real_states: %d\n", count_real_states);
+ DEBUG_PRINTF("non_wide_states: %d\n", wide_limit);
+
auto ri = info.strat.gatherReports(reports, reports_eod, &single, &arb);
map<dstate_id_t, AccelScheme> accel_escape_info
= info.strat.getAccelInfo(cc.grey);
@@ -639,7 +639,7 @@ bytecode_ptr<NFA> mcclellanCompile16(dfa_info &info, const CompileContext &cc,
size_t tran_size = (1 << info.getAlphaShift())
* sizeof(u16) * count_real_states;
- size_t aux_size = sizeof(mstate_aux) * wide_limit;
+ size_t aux_size = sizeof(mstate_aux) * wide_limit;
size_t aux_offset = ROUNDUP_16(sizeof(NFA) + sizeof(mcclellan) + tran_size);
size_t accel_size = info.strat.accelSize() * accel_escape_info.size();
@@ -647,24 +647,24 @@ bytecode_ptr<NFA> mcclellanCompile16(dfa_info &info, const CompileContext &cc,
+ ri->getReportListSize(), 32);
size_t sherman_offset = ROUNDUP_16(accel_offset + accel_size);
size_t sherman_size = calcShermanRegionSize(info);
- size_t wide_offset = ROUNDUP_16(sherman_offset + sherman_size);
- size_t wide_size = calcWideRegionSize(info);
- size_t total_size = wide_offset + wide_size;
+ size_t wide_offset = ROUNDUP_16(sherman_offset + sherman_size);
+ size_t wide_size = calcWideRegionSize(info);
+ size_t total_size = wide_offset + wide_size;
accel_offset -= sizeof(NFA); /* adj accel offset to be relative to m */
assert(ISALIGNED_N(accel_offset, alignof(union AccelAux)));
- DEBUG_PRINTF("aux_offset %zu\n", aux_offset);
- DEBUG_PRINTF("aux_size %zu\n", aux_size);
- DEBUG_PRINTF("rl size %u\n", ri->getReportListSize());
- DEBUG_PRINTF("accel_offset %zu\n", accel_offset + sizeof(NFA));
- DEBUG_PRINTF("accel_size %zu\n", accel_size);
- DEBUG_PRINTF("sherman_offset %zu\n", sherman_offset);
- DEBUG_PRINTF("sherman_size %zu\n", sherman_size);
- DEBUG_PRINTF("wide_offset %zu\n", wide_offset);
- DEBUG_PRINTF("wide_size %zu\n", wide_size);
- DEBUG_PRINTF("total_size %zu\n", total_size);
-
+ DEBUG_PRINTF("aux_offset %zu\n", aux_offset);
+ DEBUG_PRINTF("aux_size %zu\n", aux_size);
+ DEBUG_PRINTF("rl size %u\n", ri->getReportListSize());
+ DEBUG_PRINTF("accel_offset %zu\n", accel_offset + sizeof(NFA));
+ DEBUG_PRINTF("accel_size %zu\n", accel_size);
+ DEBUG_PRINTF("sherman_offset %zu\n", sherman_offset);
+ DEBUG_PRINTF("sherman_size %zu\n", sherman_size);
+ DEBUG_PRINTF("wide_offset %zu\n", wide_offset);
+ DEBUG_PRINTF("wide_size %zu\n", wide_size);
+ DEBUG_PRINTF("total_size %zu\n", total_size);
+
auto nfa = make_zeroed_bytecode_ptr<NFA>(total_size);
char *nfa_base = (char *)nfa.get();
@@ -679,9 +679,9 @@ bytecode_ptr<NFA> mcclellanCompile16(dfa_info &info, const CompileContext &cc,
mstate_aux *aux = (mstate_aux *)(nfa_base + aux_offset);
mcclellan *m = (mcclellan *)getMutableImplNfa(nfa.get());
- m->wide_limit = wide_limit;
- m->wide_offset = wide_offset;
-
+ m->wide_limit = wide_limit;
+ m->wide_offset = wide_offset;
+
/* copy in the mc header information */
m->sherman_offset = sherman_offset;
m->sherman_end = total_size;
@@ -689,7 +689,7 @@ bytecode_ptr<NFA> mcclellanCompile16(dfa_info &info, const CompileContext &cc,
/* do normal states */
for (size_t i = 0; i < info.size(); i++) {
- if (info.is_sherman(i) || info.is_widestate(i)) {
+ if (info.is_sherman(i) || info.is_widestate(i)) {
continue;
}
@@ -727,7 +727,7 @@ bytecode_ptr<NFA> mcclellanCompile16(dfa_info &info, const CompileContext &cc,
mstate_aux *this_aux = getAux(nfa.get(), fs);
assert(fs >= count_real_states);
- assert(fs < wide_limit);
+ assert(fs < wide_limit);
char *curr_sherman_entry
= sherman_table + (fs - m->sherman_limit) * SHERMAN_FIXED_SIZE;
@@ -771,71 +771,71 @@ bytecode_ptr<NFA> mcclellanCompile16(dfa_info &info, const CompileContext &cc,
}
}
- if (!info.wide_state_chain.empty()) {
- /* do wide states using info */
- u16 wide_number = verify_u16(info.wide_symbol_chain.size());
- char *wide_base = nfa_base + m->wide_offset;
- assert(ISALIGNED_16(wide_base));
-
- char *wide_top = wide_base;
- *(u8 *)(wide_top++) = WIDE_STATE;
- wide_top = ROUNDUP_PTR(wide_top, 2);
- *(u16 *)(wide_top) = wide_number;
- wide_top += 2;
-
- char *curr_wide_entry = wide_top + wide_number * sizeof(u32);
- u32 *wide_offset_list = (u32 *)wide_top;
-
- /* get the order of writing wide states */
- vector<size_t> order(wide_number);
- for (size_t i = 0; i < wide_number; i++) {
- dstate_id_t head = info.wide_state_chain[i].front();
- size_t pos = info.implId(head) - m->wide_limit;
- order[pos] = i;
- }
-
- for (size_t i : order) {
- vector<dstate_id_t> &state_chain = info.wide_state_chain[i];
- vector<symbol_t> &symbol_chain = info.wide_symbol_chain[i];
-
- u16 width = verify_u16(symbol_chain.size());
- *(u16 *)(curr_wide_entry + WIDE_WIDTH_OFFSET) = width;
- u8 *chars = (u8 *)(curr_wide_entry + WIDE_SYMBOL_OFFSET16);
-
- // store wide state symbol chain
- for (size_t j = 0; j < width; j++) {
- *(chars++) = verify_u8(symbol_chain[j]);
- }
-
- // store wide state transition table
- u16 *trans = (u16 *)(curr_wide_entry
- + WIDE_TRANSITION_OFFSET16(width));
- dstate_id_t tail = state_chain[width - 1];
- symbol_t last = symbol_chain[width -1];
- dstate_id_t tran = info.states[tail].next[last];
- // 1. successful transition
- *trans++ = info.implId(tran);
- // 2. failure transition
- for (size_t j = 0; verify_u16(j) < width - 1; j++) {
- if (symbol_chain[j] != last) {
- tran = info.states[state_chain[j]].next[last];
- }
- }
- for (symbol_t sym = 0; sym < info.impl_alpha_size; sym++) {
- if (sym != last) {
- *trans++ = info.implId(info.states[tail].next[sym]);
- }
- else {
- *trans++ = info.implId(tran);
- }
- }
-
- *wide_offset_list++ = verify_u32(curr_wide_entry - wide_base);
-
- curr_wide_entry = (char *)trans;
- }
- }
-
+ if (!info.wide_state_chain.empty()) {
+ /* do wide states using info */
+ u16 wide_number = verify_u16(info.wide_symbol_chain.size());
+ char *wide_base = nfa_base + m->wide_offset;
+ assert(ISALIGNED_16(wide_base));
+
+ char *wide_top = wide_base;
+ *(u8 *)(wide_top++) = WIDE_STATE;
+ wide_top = ROUNDUP_PTR(wide_top, 2);
+ *(u16 *)(wide_top) = wide_number;
+ wide_top += 2;
+
+ char *curr_wide_entry = wide_top + wide_number * sizeof(u32);
+ u32 *wide_offset_list = (u32 *)wide_top;
+
+ /* get the order of writing wide states */
+ vector<size_t> order(wide_number);
+ for (size_t i = 0; i < wide_number; i++) {
+ dstate_id_t head = info.wide_state_chain[i].front();
+ size_t pos = info.implId(head) - m->wide_limit;
+ order[pos] = i;
+ }
+
+ for (size_t i : order) {
+ vector<dstate_id_t> &state_chain = info.wide_state_chain[i];
+ vector<symbol_t> &symbol_chain = info.wide_symbol_chain[i];
+
+ u16 width = verify_u16(symbol_chain.size());
+ *(u16 *)(curr_wide_entry + WIDE_WIDTH_OFFSET) = width;
+ u8 *chars = (u8 *)(curr_wide_entry + WIDE_SYMBOL_OFFSET16);
+
+ // store wide state symbol chain
+ for (size_t j = 0; j < width; j++) {
+ *(chars++) = verify_u8(symbol_chain[j]);
+ }
+
+ // store wide state transition table
+ u16 *trans = (u16 *)(curr_wide_entry
+ + WIDE_TRANSITION_OFFSET16(width));
+ dstate_id_t tail = state_chain[width - 1];
+ symbol_t last = symbol_chain[width -1];
+ dstate_id_t tran = info.states[tail].next[last];
+ // 1. successful transition
+ *trans++ = info.implId(tran);
+ // 2. failure transition
+ for (size_t j = 0; verify_u16(j) < width - 1; j++) {
+ if (symbol_chain[j] != last) {
+ tran = info.states[state_chain[j]].next[last];
+ }
+ }
+ for (symbol_t sym = 0; sym < info.impl_alpha_size; sym++) {
+ if (sym != last) {
+ *trans++ = info.implId(info.states[tail].next[sym]);
+ }
+ else {
+ *trans++ = info.implId(tran);
+ }
+ }
+
+ *wide_offset_list++ = verify_u32(curr_wide_entry - wide_base);
+
+ curr_wide_entry = (char *)trans;
+ }
+ }
+
markEdges(nfa.get(), succ_table, info);
if (accel_states && nfa) {
@@ -997,7 +997,7 @@ bytecode_ptr<NFA> mcclellanCompile8(dfa_info &info, const CompileContext &cc,
return nfa;
}
-#define MAX_SHERMAN_LIST_LEN 9
+#define MAX_SHERMAN_LIST_LEN 9
static
void addIfEarlier(flat_set<dstate_id_t> &dest, dstate_id_t candidate,
@@ -1081,16 +1081,16 @@ void find_better_daddy(dfa_info &info, dstate_id_t curr_id, bool using8bit,
if (trust_daddy_states) {
// Use the daddy already set for this state so long as it isn't already
// a Sherman state.
- dstate_id_t daddy = currState.daddy;
- if (!info.is_sherman(daddy) && !info.is_widestate(daddy)) {
+ dstate_id_t daddy = currState.daddy;
+ if (!info.is_sherman(daddy) && !info.is_widestate(daddy)) {
hinted.insert(currState.daddy);
} else {
// Fall back to granddaddy, which has already been processed (due
// to BFS ordering) and cannot be a Sherman state.
dstate_id_t granddaddy = info.states[currState.daddy].daddy;
- if (info.is_widestate(granddaddy)) {
- return;
- }
+ if (info.is_widestate(granddaddy)) {
+ return;
+ }
assert(!info.is_sherman(granddaddy));
hinted.insert(granddaddy);
}
@@ -1102,7 +1102,7 @@ void find_better_daddy(dfa_info &info, dstate_id_t curr_id, bool using8bit,
assert(donor < curr_id);
u32 score = 0;
- if (info.is_sherman(donor) || info.is_widestate(donor)) {
+ if (info.is_sherman(donor) || info.is_widestate(donor)) {
continue;
}
@@ -1175,290 +1175,290 @@ bool is_cyclic_near(const raw_dfa &raw, dstate_id_t root) {
return false;
}
-/* \brief Test for only-one-predecessor property. */
-static
-bool check_property1(const DfaPrevInfo &info, const u16 impl_alpha_size,
- const dstate_id_t curr_id, dstate_id_t &prev_id,
- symbol_t &prev_sym) {
- u32 num_prev = 0;
- bool test_p1 = false;
-
- for (symbol_t sym = 0; sym < impl_alpha_size; sym++) {
- num_prev += info.states[curr_id].prev_vec[sym].size();
- DEBUG_PRINTF("Check symbol: %u, with its vector size: %lu\n", sym,
- info.states[curr_id].prev_vec[sym].size());
- if (num_prev == 1 && !test_p1) {
- test_p1 = true;
- prev_id = info.states[curr_id].prev_vec[sym].front(); //[0] for sure???
- prev_sym = sym;
- }
- }
-
- return num_prev == 1;
-}
-
-/* \brief Test for same-failure-action property. */
-static
-bool check_property2(const raw_dfa &rdfa, const u16 impl_alpha_size,
- const dstate_id_t curr_id, const dstate_id_t prev_id,
- const symbol_t curr_sym, const symbol_t prev_sym) {
- const dstate &prevState = rdfa.states[prev_id];
- const dstate &currState = rdfa.states[curr_id];
-
- // Compare transition tables between currState and prevState.
- u16 score = 0;
- for (symbol_t sym = 0; sym < impl_alpha_size; sym++) {
- if (currState.next[sym] == prevState.next[sym]
- && sym != curr_sym && sym != prev_sym) {
- score++;
- }
- }
- DEBUG_PRINTF("(Score: %u/%u)\n", score, impl_alpha_size);
-
- // 2 cases.
- if (curr_sym != prev_sym && score >= impl_alpha_size - 2
- && currState.next[prev_sym] == prevState.next[curr_sym]) {
- return true;
- } else if (curr_sym == prev_sym && score == impl_alpha_size - 1) {
- return true;
- }
- return false;
-}
-
-/* \brief Check whether adding current prev_id will generate a circle.*/
-static
-bool check_circle(const DfaPrevInfo &info, const u16 impl_alpha_size,
- const vector<dstate_id_t> &chain, const dstate_id_t id) {
- const vector<vector<dstate_id_t>> &prev_vec = info.states[id].prev_vec;
- const dstate_id_t tail = chain.front();
- for (symbol_t sym = 0; sym < impl_alpha_size; sym++) {
- auto iter = find(prev_vec[sym].begin(), prev_vec[sym].end(), tail);
- if (iter != prev_vec[sym].end()) {
- // Tail is one of id's predecessors, forming a circle.
- return true;
- }
- }
- return false;
-}
-
-/* \brief Returns a chain of state ids and symbols. */
-static
-dstate_id_t find_chain_candidate(const raw_dfa &rdfa, const DfaPrevInfo &info,
- const dstate_id_t curr_id,
- const symbol_t curr_sym,
- vector<dstate_id_t> &temp_chain) {
- //Record current id first.
- temp_chain.push_back(curr_id);
-
- const u16 size = info.impl_alpha_size;
-
- // Stop when entering root cloud.
- if (rdfa.start_anchored != DEAD_STATE
- && is_cyclic_near(rdfa, rdfa.start_anchored)
- && curr_id < size) {
- return curr_id;
- }
- if (rdfa.start_floating != DEAD_STATE
- && curr_id >= rdfa.start_floating
- && curr_id < rdfa.start_floating + size * 3) {
- return curr_id;
- }
-
- // Stop when reaching anchored or floating.
- if (curr_id == rdfa.start_anchored || curr_id == rdfa.start_floating) {
- return curr_id;
- }
-
- dstate_id_t prev_id = 0;
- symbol_t prev_sym = ALPHABET_SIZE;
-
- // Check the only-one-predecessor property.
- if (!check_property1(info, size, curr_id, prev_id, prev_sym)) {
- return curr_id;
- }
- assert(prev_id != 0 && prev_sym != ALPHABET_SIZE);
- DEBUG_PRINTF("(P1 test passed.)\n");
-
- // Circle testing for the prev_id that passes the P1 test.
- if (check_circle(info, size, temp_chain, prev_id)) {
- DEBUG_PRINTF("(A circle is found.)\n");
- return curr_id;
- }
-
- // Check the same-failure-action property.
- if (!check_property2(rdfa, size, curr_id, prev_id, curr_sym, prev_sym)) {
- return curr_id;
- }
- DEBUG_PRINTF("(P2 test passed.)\n");
-
- if (!rdfa.states[prev_id].reports.empty()
- || !rdfa.states[prev_id].reports_eod.empty()) {
- return curr_id;
- } else {
- return find_chain_candidate(rdfa, info, prev_id, prev_sym, temp_chain);
- }
-}
-
-/* \brief Always store the non-extensible chains found till now. */
-static
-bool store_chain_longest(vector<vector<dstate_id_t>> &candidate_chain,
- vector<dstate_id_t> &temp_chain,
- dynamic_bitset<> &added, bool head_is_new) {
- dstate_id_t head = temp_chain.front();
- u16 length = temp_chain.size();
-
- if (head_is_new) {
- DEBUG_PRINTF("This is a new chain!\n");
-
- // Add this new chain and get it marked.
- candidate_chain.push_back(temp_chain);
-
- for (auto &id : temp_chain) {
- DEBUG_PRINTF("(Marking s%u ...)\n", id);
- added.set(id);
- }
-
- return true;
- }
-
- DEBUG_PRINTF("This is a longer chain!\n");
- assert(!candidate_chain.empty());
-
- auto chain = find_if(candidate_chain.begin(), candidate_chain.end(),
- [&](const vector<dstate_id_t> &it) {
- return it.front() == head;
- });
-
- // Not a valid head, just do nothing and return.
- if (chain == candidate_chain.end()) {
- return false;
- }
-
- u16 len = chain->size();
-
- if (length > len) {
- // Find out the branch node first.
- size_t piv = 0;
- for (; piv < length; piv++) {
- if ((*chain)[piv] != temp_chain[piv]) {
- break;
- }
- }
-
- for (size_t j = piv + 1; j < length; j++) {
- DEBUG_PRINTF("(Marking s%u (new branch) ...)\n", temp_chain[j]);
- added.set(temp_chain[j]);
- }
-
- // Unmark old unuseful nodes.
- // (Except the tail node, which is in working queue)
- for (size_t j = piv + 1; j < verify_u16(len - 1); j++) {
- DEBUG_PRINTF("(UnMarking s%u (old branch)...)\n", (*chain)[j]);
- added.reset((*chain)[j]);
- }
-
- chain->assign(temp_chain.begin(), temp_chain.end());
- }
-
- return false;
-}
-
-/* \brief Generate wide_symbol_chain from wide_state_chain. */
-static
-void generate_symbol_chain(dfa_info &info, vector<symbol_t> &chain_tail) {
- raw_dfa &rdfa = info.raw;
- assert(chain_tail.size() == info.wide_state_chain.size());
-
- for (size_t i = 0; i < info.wide_state_chain.size(); i++) {
- vector<dstate_id_t> &state_chain = info.wide_state_chain[i];
- vector<symbol_t> symbol_chain;
-
- info.extra[state_chain[0]].wideHead = true;
- size_t width = state_chain.size() - 1;
-
- for (size_t j = 0; j < width; j++) {
- dstate_id_t curr_id = state_chain[j];
- dstate_id_t next_id = state_chain[j + 1];
-
- // The last state of the chain doesn't belong to a wide state.
- info.extra[curr_id].wideState = true;
-
- // The tail symbol comes from vector chain_tail;
- if (j == width - 1) {
- symbol_chain.push_back(chain_tail[i]);
- } else {
- for (symbol_t sym = 0; sym < info.impl_alpha_size; sym++) {
- if (rdfa.states[curr_id].next[sym] == next_id) {
- symbol_chain.push_back(sym);
- break;
- }
- }
- }
- }
-
- info.wide_symbol_chain.push_back(symbol_chain);
- }
-}
-
-/* \brief Find potential regions of states to be packed into wide states. */
-static
-void find_wide_state(dfa_info &info) {
- DfaPrevInfo dinfo(info.raw);
- queue<dstate_id_t> work_queue;
-
- dynamic_bitset<> added(info.raw.states.size());
- for (auto it : dinfo.accepts) {
- work_queue.push(it);
- added.set(it);
- }
-
- vector<symbol_t> chain_tail;
- while (!work_queue.empty()) {
- dstate_id_t curr_id = work_queue.front();
- work_queue.pop();
- DEBUG_PRINTF("Newly popped state: s%u\n", curr_id);
-
- for (symbol_t sym = 0; sym < dinfo.impl_alpha_size; sym++) {
- for (auto info_it : dinfo.states[curr_id].prev_vec[sym]) {
- if (added.test(info_it)) {
- DEBUG_PRINTF("(s%u already marked.)\n", info_it);
- continue;
- }
-
- vector<dstate_id_t> temp_chain;
- // Head is a state failing the test of the chain.
- dstate_id_t head = find_chain_candidate(info.raw, dinfo,
- info_it, sym,
- temp_chain);
-
- // A candidate chain should contain 8 substates at least.
- if (temp_chain.size() < 8) {
- DEBUG_PRINTF("(Not enough substates, continue.)\n");
- continue;
- }
-
- bool head_is_new = !added.test(head);
- if (head_is_new) {
- added.set(head);
- work_queue.push(head);
- DEBUG_PRINTF("Newly pushed state: s%u\n", head);
- }
-
- reverse(temp_chain.begin(), temp_chain.end());
- temp_chain.push_back(curr_id);
-
- assert(head > 0 && head == temp_chain.front());
- if (store_chain_longest(info.wide_state_chain, temp_chain,
- added, head_is_new)) {
- chain_tail.push_back(sym);
- }
- }
- }
- }
-
- generate_symbol_chain(info, chain_tail);
-}
-
+/* \brief Test for only-one-predecessor property. */
+static
+bool check_property1(const DfaPrevInfo &info, const u16 impl_alpha_size,
+ const dstate_id_t curr_id, dstate_id_t &prev_id,
+ symbol_t &prev_sym) {
+ u32 num_prev = 0;
+ bool test_p1 = false;
+
+ for (symbol_t sym = 0; sym < impl_alpha_size; sym++) {
+ num_prev += info.states[curr_id].prev_vec[sym].size();
+ DEBUG_PRINTF("Check symbol: %u, with its vector size: %lu\n", sym,
+ info.states[curr_id].prev_vec[sym].size());
+ if (num_prev == 1 && !test_p1) {
+ test_p1 = true;
+ prev_id = info.states[curr_id].prev_vec[sym].front(); //[0] for sure???
+ prev_sym = sym;
+ }
+ }
+
+ return num_prev == 1;
+}
+
+/* \brief Test for same-failure-action property. */
+static
+bool check_property2(const raw_dfa &rdfa, const u16 impl_alpha_size,
+ const dstate_id_t curr_id, const dstate_id_t prev_id,
+ const symbol_t curr_sym, const symbol_t prev_sym) {
+ const dstate &prevState = rdfa.states[prev_id];
+ const dstate &currState = rdfa.states[curr_id];
+
+ // Compare transition tables between currState and prevState.
+ u16 score = 0;
+ for (symbol_t sym = 0; sym < impl_alpha_size; sym++) {
+ if (currState.next[sym] == prevState.next[sym]
+ && sym != curr_sym && sym != prev_sym) {
+ score++;
+ }
+ }
+ DEBUG_PRINTF("(Score: %u/%u)\n", score, impl_alpha_size);
+
+ // 2 cases.
+ if (curr_sym != prev_sym && score >= impl_alpha_size - 2
+ && currState.next[prev_sym] == prevState.next[curr_sym]) {
+ return true;
+ } else if (curr_sym == prev_sym && score == impl_alpha_size - 1) {
+ return true;
+ }
+ return false;
+}
+
+/* \brief Check whether adding current prev_id will generate a circle.*/
+static
+bool check_circle(const DfaPrevInfo &info, const u16 impl_alpha_size,
+ const vector<dstate_id_t> &chain, const dstate_id_t id) {
+ const vector<vector<dstate_id_t>> &prev_vec = info.states[id].prev_vec;
+ const dstate_id_t tail = chain.front();
+ for (symbol_t sym = 0; sym < impl_alpha_size; sym++) {
+ auto iter = find(prev_vec[sym].begin(), prev_vec[sym].end(), tail);
+ if (iter != prev_vec[sym].end()) {
+ // Tail is one of id's predecessors, forming a circle.
+ return true;
+ }
+ }
+ return false;
+}
+
+/* \brief Returns a chain of state ids and symbols. */
+static
+dstate_id_t find_chain_candidate(const raw_dfa &rdfa, const DfaPrevInfo &info,
+ const dstate_id_t curr_id,
+ const symbol_t curr_sym,
+ vector<dstate_id_t> &temp_chain) {
+ //Record current id first.
+ temp_chain.push_back(curr_id);
+
+ const u16 size = info.impl_alpha_size;
+
+ // Stop when entering root cloud.
+ if (rdfa.start_anchored != DEAD_STATE
+ && is_cyclic_near(rdfa, rdfa.start_anchored)
+ && curr_id < size) {
+ return curr_id;
+ }
+ if (rdfa.start_floating != DEAD_STATE
+ && curr_id >= rdfa.start_floating
+ && curr_id < rdfa.start_floating + size * 3) {
+ return curr_id;
+ }
+
+ // Stop when reaching anchored or floating.
+ if (curr_id == rdfa.start_anchored || curr_id == rdfa.start_floating) {
+ return curr_id;
+ }
+
+ dstate_id_t prev_id = 0;
+ symbol_t prev_sym = ALPHABET_SIZE;
+
+ // Check the only-one-predecessor property.
+ if (!check_property1(info, size, curr_id, prev_id, prev_sym)) {
+ return curr_id;
+ }
+ assert(prev_id != 0 && prev_sym != ALPHABET_SIZE);
+ DEBUG_PRINTF("(P1 test passed.)\n");
+
+ // Circle testing for the prev_id that passes the P1 test.
+ if (check_circle(info, size, temp_chain, prev_id)) {
+ DEBUG_PRINTF("(A circle is found.)\n");
+ return curr_id;
+ }
+
+ // Check the same-failure-action property.
+ if (!check_property2(rdfa, size, curr_id, prev_id, curr_sym, prev_sym)) {
+ return curr_id;
+ }
+ DEBUG_PRINTF("(P2 test passed.)\n");
+
+ if (!rdfa.states[prev_id].reports.empty()
+ || !rdfa.states[prev_id].reports_eod.empty()) {
+ return curr_id;
+ } else {
+ return find_chain_candidate(rdfa, info, prev_id, prev_sym, temp_chain);
+ }
+}
+
+/* \brief Always store the non-extensible chains found till now. */
+static
+bool store_chain_longest(vector<vector<dstate_id_t>> &candidate_chain,
+ vector<dstate_id_t> &temp_chain,
+ dynamic_bitset<> &added, bool head_is_new) {
+ dstate_id_t head = temp_chain.front();
+ u16 length = temp_chain.size();
+
+ if (head_is_new) {
+ DEBUG_PRINTF("This is a new chain!\n");
+
+ // Add this new chain and get it marked.
+ candidate_chain.push_back(temp_chain);
+
+ for (auto &id : temp_chain) {
+ DEBUG_PRINTF("(Marking s%u ...)\n", id);
+ added.set(id);
+ }
+
+ return true;
+ }
+
+ DEBUG_PRINTF("This is a longer chain!\n");
+ assert(!candidate_chain.empty());
+
+ auto chain = find_if(candidate_chain.begin(), candidate_chain.end(),
+ [&](const vector<dstate_id_t> &it) {
+ return it.front() == head;
+ });
+
+ // Not a valid head, just do nothing and return.
+ if (chain == candidate_chain.end()) {
+ return false;
+ }
+
+ u16 len = chain->size();
+
+ if (length > len) {
+ // Find out the branch node first.
+ size_t piv = 0;
+ for (; piv < length; piv++) {
+ if ((*chain)[piv] != temp_chain[piv]) {
+ break;
+ }
+ }
+
+ for (size_t j = piv + 1; j < length; j++) {
+ DEBUG_PRINTF("(Marking s%u (new branch) ...)\n", temp_chain[j]);
+ added.set(temp_chain[j]);
+ }
+
+ // Unmark old unuseful nodes.
+ // (Except the tail node, which is in working queue)
+ for (size_t j = piv + 1; j < verify_u16(len - 1); j++) {
+ DEBUG_PRINTF("(UnMarking s%u (old branch)...)\n", (*chain)[j]);
+ added.reset((*chain)[j]);
+ }
+
+ chain->assign(temp_chain.begin(), temp_chain.end());
+ }
+
+ return false;
+}
+
+/* \brief Generate wide_symbol_chain from wide_state_chain. */
+static
+void generate_symbol_chain(dfa_info &info, vector<symbol_t> &chain_tail) {
+ raw_dfa &rdfa = info.raw;
+ assert(chain_tail.size() == info.wide_state_chain.size());
+
+ for (size_t i = 0; i < info.wide_state_chain.size(); i++) {
+ vector<dstate_id_t> &state_chain = info.wide_state_chain[i];
+ vector<symbol_t> symbol_chain;
+
+ info.extra[state_chain[0]].wideHead = true;
+ size_t width = state_chain.size() - 1;
+
+ for (size_t j = 0; j < width; j++) {
+ dstate_id_t curr_id = state_chain[j];
+ dstate_id_t next_id = state_chain[j + 1];
+
+ // The last state of the chain doesn't belong to a wide state.
+ info.extra[curr_id].wideState = true;
+
+ // The tail symbol comes from vector chain_tail;
+ if (j == width - 1) {
+ symbol_chain.push_back(chain_tail[i]);
+ } else {
+ for (symbol_t sym = 0; sym < info.impl_alpha_size; sym++) {
+ if (rdfa.states[curr_id].next[sym] == next_id) {
+ symbol_chain.push_back(sym);
+ break;
+ }
+ }
+ }
+ }
+
+ info.wide_symbol_chain.push_back(symbol_chain);
+ }
+}
+
+/* \brief Find potential regions of states to be packed into wide states. */
+static
+void find_wide_state(dfa_info &info) {
+ DfaPrevInfo dinfo(info.raw);
+ queue<dstate_id_t> work_queue;
+
+ dynamic_bitset<> added(info.raw.states.size());
+ for (auto it : dinfo.accepts) {
+ work_queue.push(it);
+ added.set(it);
+ }
+
+ vector<symbol_t> chain_tail;
+ while (!work_queue.empty()) {
+ dstate_id_t curr_id = work_queue.front();
+ work_queue.pop();
+ DEBUG_PRINTF("Newly popped state: s%u\n", curr_id);
+
+ for (symbol_t sym = 0; sym < dinfo.impl_alpha_size; sym++) {
+ for (auto info_it : dinfo.states[curr_id].prev_vec[sym]) {
+ if (added.test(info_it)) {
+ DEBUG_PRINTF("(s%u already marked.)\n", info_it);
+ continue;
+ }
+
+ vector<dstate_id_t> temp_chain;
+ // Head is a state failing the test of the chain.
+ dstate_id_t head = find_chain_candidate(info.raw, dinfo,
+ info_it, sym,
+ temp_chain);
+
+ // A candidate chain should contain 8 substates at least.
+ if (temp_chain.size() < 8) {
+ DEBUG_PRINTF("(Not enough substates, continue.)\n");
+ continue;
+ }
+
+ bool head_is_new = !added.test(head);
+ if (head_is_new) {
+ added.set(head);
+ work_queue.push(head);
+ DEBUG_PRINTF("Newly pushed state: s%u\n", head);
+ }
+
+ reverse(temp_chain.begin(), temp_chain.end());
+ temp_chain.push_back(curr_id);
+
+ assert(head > 0 && head == temp_chain.front());
+ if (store_chain_longest(info.wide_state_chain, temp_chain,
+ added, head_is_new)) {
+ chain_tail.push_back(sym);
+ }
+ }
+ }
+ }
+
+ generate_symbol_chain(info, chain_tail);
+}
+
bytecode_ptr<NFA> mcclellanCompile_i(raw_dfa &raw, accel_dfa_build_strat &strat,
const CompileContext &cc,
bool trust_daddy_states,
@@ -1477,31 +1477,31 @@ bytecode_ptr<NFA> mcclellanCompile_i(raw_dfa &raw, accel_dfa_build_strat &strat,
bytecode_ptr<NFA> nfa;
if (!using8bit) {
- // Wide state optimization
- if (cc.grey.allowWideStates && strat.getType() == McClellan
- && !is_triggered(raw.kind)) {
- find_wide_state(info);
- }
-
+ // Wide state optimization
+ if (cc.grey.allowWideStates && strat.getType() == McClellan
+ && !is_triggered(raw.kind)) {
+ find_wide_state(info);
+ }
+
u16 total_daddy = 0;
bool any_cyclic_near_anchored_state
= is_cyclic_near(raw, raw.start_anchored);
- // Sherman optimization
- if (info.impl_alpha_size > 16) {
- for (u32 i = 0; i < info.size(); i++) {
- if (info.is_widestate(i)) {
- continue;
- }
- find_better_daddy(info, i, using8bit,
- any_cyclic_near_anchored_state,
- trust_daddy_states, cc.grey);
- total_daddy += info.extra[i].daddytaken;
- }
-
- DEBUG_PRINTF("daddy %hu/%zu states=%zu alpha=%hu\n", total_daddy,
- info.size() * info.impl_alpha_size, info.size(),
- info.impl_alpha_size);
+ // Sherman optimization
+ if (info.impl_alpha_size > 16) {
+ for (u32 i = 0; i < info.size(); i++) {
+ if (info.is_widestate(i)) {
+ continue;
+ }
+ find_better_daddy(info, i, using8bit,
+ any_cyclic_near_anchored_state,
+ trust_daddy_states, cc.grey);
+ total_daddy += info.extra[i].daddytaken;
+ }
+
+ DEBUG_PRINTF("daddy %hu/%zu states=%zu alpha=%hu\n", total_daddy,
+ info.size() * info.impl_alpha_size, info.size(),
+ info.impl_alpha_size);
}
nfa = mcclellanCompile16(info, cc, accel_states);
diff --git a/contrib/libs/hyperscan/src/nfa/mcclellancompile.h b/contrib/libs/hyperscan/src/nfa/mcclellancompile.h
index a56016018b..73cb9fd775 100644
--- a/contrib/libs/hyperscan/src/nfa/mcclellancompile.h
+++ b/contrib/libs/hyperscan/src/nfa/mcclellancompile.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -60,7 +60,7 @@ public:
u32 max_allowed_offset_accel() const override;
u32 max_stop_char() const override;
u32 max_floating_stop_char() const override;
- DfaType getType() const override { return McClellan; }
+ DfaType getType() const override { return McClellan; }
private:
raw_dfa &rdfa;
diff --git a/contrib/libs/hyperscan/src/nfa/mcclellandump.h b/contrib/libs/hyperscan/src/nfa/mcclellandump.h
index a4cd81c031..5b63a20634 100644
--- a/contrib/libs/hyperscan/src/nfa/mcclellandump.h
+++ b/contrib/libs/hyperscan/src/nfa/mcclellandump.h
@@ -1,62 +1,62 @@
-/*
- * Copyright (c) 2015-2016, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef MCCLELLAN_DUMP_H
-#define MCCLELLAN_DUMP_H
-
-#ifdef DUMP_SUPPORT
-
-#include "rdfa.h"
-
-#include <cstdio>
-#include <string>
-
-struct mcclellan;
-struct mstate_aux;
-struct NFA;
-union AccelAux;
-
-namespace ue2 {
-
-void nfaExecMcClellan8_dump(const struct NFA *nfa, const std::string &base);
-void nfaExecMcClellan16_dump(const struct NFA *nfa, const std::string &base);
-
-/* These functions are shared with the Gough dump code. */
-
-const mstate_aux *getAux(const NFA *n, dstate_id_t i);
-void describeEdge(FILE *f, const u16 *t, u16 i);
-void dumpAccelText(FILE *f, const union AccelAux *accel);
-void dumpAccelDot(FILE *f, u16 i, const union AccelAux *accel);
-void describeAlphabet(FILE *f, const mcclellan *m);
-void dumpDotPreambleDfa(FILE *f);
-
-} // namespace ue2
-
-#endif // DUMP_SUPPORT
-
-#endif // MCCLELLAN_DUMP_H
+/*
+ * Copyright (c) 2015-2016, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MCCLELLAN_DUMP_H
+#define MCCLELLAN_DUMP_H
+
+#ifdef DUMP_SUPPORT
+
+#include "rdfa.h"
+
+#include <cstdio>
+#include <string>
+
+struct mcclellan;
+struct mstate_aux;
+struct NFA;
+union AccelAux;
+
+namespace ue2 {
+
+void nfaExecMcClellan8_dump(const struct NFA *nfa, const std::string &base);
+void nfaExecMcClellan16_dump(const struct NFA *nfa, const std::string &base);
+
+/* These functions are shared with the Gough dump code. */
+
+const mstate_aux *getAux(const NFA *n, dstate_id_t i);
+void describeEdge(FILE *f, const u16 *t, u16 i);
+void dumpAccelText(FILE *f, const union AccelAux *accel);
+void dumpAccelDot(FILE *f, u16 i, const union AccelAux *accel);
+void describeAlphabet(FILE *f, const mcclellan *m);
+void dumpDotPreambleDfa(FILE *f);
+
+} // namespace ue2
+
+#endif // DUMP_SUPPORT
+
+#endif // MCCLELLAN_DUMP_H
diff --git a/contrib/libs/hyperscan/src/nfa/mcsheng.c b/contrib/libs/hyperscan/src/nfa/mcsheng.c
index d285793483..22cac119fb 100644
--- a/contrib/libs/hyperscan/src/nfa/mcsheng.c
+++ b/contrib/libs/hyperscan/src/nfa/mcsheng.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Intel Corporation
+ * Copyright (c) 2016-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -173,7 +173,7 @@ u32 doSheng(const struct mcsheng *m, const u8 **c_inout, const u8 *soft_c_end,
u32 sheng_limit_x4 = sheng_limit * 0x01010101;
m128 simd_stop_limit = set4x32(sheng_stop_limit_x4);
m128 accel_delta = set16x8(sheng_limit - sheng_stop_limit);
- DEBUG_PRINTF("end %hhu, accel %hu --> limit %hhu\n", sheng_limit,
+ DEBUG_PRINTF("end %hhu, accel %hu --> limit %hhu\n", sheng_limit,
m->sheng_accel_limit, sheng_stop_limit);
#endif
@@ -181,7 +181,7 @@ u32 doSheng(const struct mcsheng *m, const u8 **c_inout, const u8 *soft_c_end,
m128 shuffle_mask = masks[*(c++)]; \
s = pshufb_m128(shuffle_mask, s); \
u32 s_gpr_x4 = movd(s); /* convert to u8 */ \
- DEBUG_PRINTF("c %hhu (%c) --> s %u\n", c[-1], c[-1], s_gpr_x4); \
+ DEBUG_PRINTF("c %hhu (%c) --> s %u\n", c[-1], c[-1], s_gpr_x4); \
if (s_gpr_x4 >= sheng_stop_limit_x4) { \
s_gpr = s_gpr_x4; \
goto exit; \
@@ -191,7 +191,7 @@ u32 doSheng(const struct mcsheng *m, const u8 **c_inout, const u8 *soft_c_end,
u8 s_gpr;
while (c < c_end) {
#if defined(HAVE_BMI2) && defined(ARCH_64_BIT)
- /* This version uses pext for efficiently bitbashing out scaled
+ /* This version uses pext for efficiently bitbashing out scaled
* versions of the bytes to process from a u64a */
u64a data_bytes = unaligned_load_u64a(c);
@@ -201,7 +201,7 @@ u32 doSheng(const struct mcsheng *m, const u8 **c_inout, const u8 *soft_c_end,
s = pshufb_m128(shuffle_mask0, s);
m128 s_max = s;
m128 s_max0 = s_max;
- DEBUG_PRINTF("c %02llx --> s %u\n", cc0 >> 4, movd(s));
+ DEBUG_PRINTF("c %02llx --> s %u\n", cc0 >> 4, movd(s));
#define SHENG_SINGLE_UNROLL_ITER(iter) \
assert(iter); \
@@ -217,7 +217,7 @@ u32 doSheng(const struct mcsheng *m, const u8 **c_inout, const u8 *soft_c_end,
s_max = max_u8_m128(s_max, s); \
} \
m128 s_max##iter = s_max; \
- DEBUG_PRINTF("c %02llx --> s %u max %u\n", cc##iter >> 4, \
+ DEBUG_PRINTF("c %02llx --> s %u max %u\n", cc##iter >> 4, \
movd(s), movd(s_max));
SHENG_SINGLE_UNROLL_ITER(1);
@@ -1184,7 +1184,7 @@ char nfaExecMcSheng16_reportCurrent(const struct NFA *n, struct mq *q) {
static
char mcshengHasAccept(const struct mcsheng *m, const struct mstate_aux *aux,
- ReportID report) {
+ ReportID report) {
assert(m && aux);
if (!aux->accept) {
@@ -1405,1332 +1405,1332 @@ char nfaExecMcSheng16_expandState(UNUSED const struct NFA *nfa, void *dest,
*(u16 *)dest = unaligned_load_u16(src);
return 0;
}
-
-#if defined(HAVE_AVX512VBMI)
-static really_inline
-const struct mstate_aux *get_aux64(const struct mcsheng64 *m, u32 s) {
- const char *nfa = (const char *)m - sizeof(struct NFA);
- const struct mstate_aux *aux
- = s + (const struct mstate_aux *)(nfa + m->aux_offset);
-
- assert(ISALIGNED(aux));
- return aux;
-}
-
-static really_inline
-u32 mcshengEnableStarts64(const struct mcsheng64 *m, u32 s) {
- const struct mstate_aux *aux = get_aux64(m, s);
-
- DEBUG_PRINTF("enabling starts %u->%hu\n", s, aux->top);
- return aux->top;
-}
-
-static really_inline
-char doComplexReport64(NfaCallback cb, void *ctxt, const struct mcsheng64 *m,
- u32 s, u64a loc, char eod, u32 *cached_accept_state,
- u32 *cached_accept_id) {
- DEBUG_PRINTF("reporting state = %u, loc=%llu, eod %hhu\n",
- s & STATE_MASK, loc, eod);
-
- if (!eod && s == *cached_accept_state) {
- if (cb(0, loc, *cached_accept_id, ctxt) == MO_HALT_MATCHING) {
- return MO_HALT_MATCHING; /* termination requested */
- }
-
- return MO_CONTINUE_MATCHING; /* continue execution */
- }
-
- const struct mstate_aux *aux = get_aux64(m, s);
- size_t offset = eod ? aux->accept_eod : aux->accept;
-
- assert(offset);
- const struct report_list *rl
- = (const void *)((const char *)m + offset - sizeof(struct NFA));
- assert(ISALIGNED(rl));
-
- DEBUG_PRINTF("report list size %u\n", rl->count);
- u32 count = rl->count;
-
- if (!eod && count == 1) {
- *cached_accept_state = s;
- *cached_accept_id = rl->report[0];
-
- DEBUG_PRINTF("reporting %u\n", rl->report[0]);
- if (cb(0, loc, rl->report[0], ctxt) == MO_HALT_MATCHING) {
- return MO_HALT_MATCHING; /* termination requested */
- }
-
- return MO_CONTINUE_MATCHING; /* continue execution */
- }
-
- for (u32 i = 0; i < count; i++) {
- DEBUG_PRINTF("reporting %u\n", rl->report[i]);
- if (cb(0, loc, rl->report[i], ctxt) == MO_HALT_MATCHING) {
- return MO_HALT_MATCHING; /* termination requested */
- }
- }
-
- return MO_CONTINUE_MATCHING; /* continue execution */
-}
-
-static really_inline
-u32 doSheng64(const struct mcsheng64 *m, const u8 **c_inout, const u8 *soft_c_end,
- const u8 *hard_c_end, u32 s_in, char do_accel) {
- assert(s_in < m->sheng_end);
- assert(s_in); /* should not already be dead */
- assert(soft_c_end <= hard_c_end);
- DEBUG_PRINTF("s_in = %u (adjusted %u)\n", s_in, s_in - 1);
- m512 s = set64x8(s_in - 1);
- const u8 *c = *c_inout;
- const u8 *c_end = hard_c_end - SHENG_CHUNK + 1;
- if (!do_accel) {
- c_end = MIN(soft_c_end, hard_c_end - SHENG_CHUNK + 1);
- }
-
- const m512 *masks = m->sheng_succ_masks;
- u8 sheng_limit = m->sheng_end - 1; /* - 1: no dead state */
- u8 sheng_stop_limit = do_accel ? m->sheng_accel_limit : sheng_limit;
-
- /* When we use movd to get a u32 containing our state, it will have 4 lanes
- * all duplicating the state. We can create versions of our limits with 4
- * copies to directly compare against, this prevents us generating code to
- * extract a single copy of the state from the u32 for checking. */
- u32 sheng_stop_limit_x4 = sheng_stop_limit * 0x01010101;
-
-#if defined(HAVE_BMI2) && defined(ARCH_64_BIT)
- u32 sheng_limit_x4 = sheng_limit * 0x01010101;
- m512 simd_stop_limit = set16x32(sheng_stop_limit_x4);
- m512 accel_delta = set64x8(sheng_limit - sheng_stop_limit);
- DEBUG_PRINTF("end %hhu, accel %hu --> limit %hhu\n", sheng_limit,
- m->sheng_accel_limit, sheng_stop_limit);
-#endif
-
-#define SHENG64_SINGLE_ITER do { \
- m512 succ_mask = masks[*(c++)]; \
- s = vpermb512(s, succ_mask); \
- u32 s_gpr_x4 = movd512(s); /* convert to u8 */ \
- DEBUG_PRINTF("c %hhu (%c) --> s %u\n", c[-1], c[-1], s_gpr_x4); \
- if (s_gpr_x4 >= sheng_stop_limit_x4) { \
- s_gpr = s_gpr_x4; \
- goto exit; \
- } \
- } while (0)
-
- u8 s_gpr;
- while (c < c_end) {
-#if defined(HAVE_BMI2) && defined(ARCH_64_BIT)
- /* This version uses pext for efficiently bitbashing out scaled
- * versions of the bytes to process from a u64a */
-
- u64a data_bytes = unaligned_load_u64a(c);
- u64a cc0 = pdep64(data_bytes, 0x3fc0); /* extract scaled low byte */
- data_bytes &= ~0xffULL; /* clear low bits for scale space */
-
- m512 succ_mask0 = load512((const char *)masks + cc0);
- s = vpermb512(s, succ_mask0);
- m512 s_max = s;
- m512 s_max0 = s_max;
- DEBUG_PRINTF("c %02llx --> s %u\n", cc0 >> 6, movd512(s));
-
-#define SHENG64_SINGLE_UNROLL_ITER(iter) \
- assert(iter); \
- u64a cc##iter = pext64(data_bytes, mcsheng64_pext_mask[iter]); \
- assert(cc##iter == (u64a)c[iter] << 6); \
- m512 succ_mask##iter = load512((const char *)masks + cc##iter); \
- s = vpermb512(s, succ_mask##iter); \
- if (do_accel && iter == 7) { \
- /* in the final iteration we also have to check against accel */ \
- m512 s_temp = sadd_u8_m512(s, accel_delta); \
- s_max = max_u8_m512(s_max, s_temp); \
- } else { \
- s_max = max_u8_m512(s_max, s); \
- } \
- m512 s_max##iter = s_max; \
- DEBUG_PRINTF("c %02llx --> s %u max %u\n", cc##iter >> 6, \
- movd512(s), movd512(s_max));
-
- SHENG64_SINGLE_UNROLL_ITER(1);
- SHENG64_SINGLE_UNROLL_ITER(2);
- SHENG64_SINGLE_UNROLL_ITER(3);
- SHENG64_SINGLE_UNROLL_ITER(4);
- SHENG64_SINGLE_UNROLL_ITER(5);
- SHENG64_SINGLE_UNROLL_ITER(6);
- SHENG64_SINGLE_UNROLL_ITER(7);
-
- if (movd512(s_max7) >= sheng_limit_x4) {
- DEBUG_PRINTF("exit found\n");
-
- /* Explicitly check the last byte as it is more likely as it also
- * checks for acceleration. */
- if (movd512(s_max6) < sheng_limit_x4) {
- c += SHENG_CHUNK;
- s_gpr = movq512(s);
- assert(s_gpr >= sheng_stop_limit);
- goto exit;
- }
-
- /* use shift-xor to create a register containing all of the max
- * values */
- m512 blended = rshift64_m512(s_max0, 56);
- blended = xor512(blended, rshift64_m512(s_max1, 48));
- blended = xor512(blended, rshift64_m512(s_max2, 40));
- blended = xor512(blended, rshift64_m512(s_max3, 32));
- blended = xor512(blended, rshift64_m512(s_max4, 24));
- blended = xor512(blended, rshift64_m512(s_max5, 16));
- blended = xor512(blended, rshift64_m512(s_max6, 8));
- blended = xor512(blended, s);
- blended = xor512(blended, rshift64_m512(blended, 8));
- DEBUG_PRINTF("blended %016llx\n", movq512(blended));
-
- m512 final = min_u8_m512(blended, simd_stop_limit);
- m512 cmp = sub_u8_m512(final, simd_stop_limit);
- m128 tmp = cast512to128(cmp);
- u64a stops = ~movemask128(tmp);
- assert(stops);
- u32 earliest = ctz32(stops);
- DEBUG_PRINTF("stops %02llx, earliest %u\n", stops, earliest);
- assert(earliest < 8);
- c += earliest + 1;
- s_gpr = movq512(blended) >> (earliest * 8);
- assert(s_gpr >= sheng_stop_limit);
- goto exit;
- } else {
- c += SHENG_CHUNK;
- }
-#else
- SHENG64_SINGLE_ITER;
- SHENG64_SINGLE_ITER;
- SHENG64_SINGLE_ITER;
- SHENG64_SINGLE_ITER;
-
- SHENG64_SINGLE_ITER;
- SHENG64_SINGLE_ITER;
- SHENG64_SINGLE_ITER;
- SHENG64_SINGLE_ITER;
-#endif
- }
-
- assert(c_end - c < SHENG_CHUNK);
- if (c < soft_c_end) {
- assert(soft_c_end - c < SHENG_CHUNK);
- switch (soft_c_end - c) {
- case 7:
- SHENG64_SINGLE_ITER; // fallthrough
- case 6:
- SHENG64_SINGLE_ITER; // fallthrough
- case 5:
- SHENG64_SINGLE_ITER; // fallthrough
- case 4:
- SHENG64_SINGLE_ITER; // fallthrough
- case 3:
- SHENG64_SINGLE_ITER; // fallthrough
- case 2:
- SHENG64_SINGLE_ITER; // fallthrough
- case 1:
- SHENG64_SINGLE_ITER; // fallthrough
- }
- }
-
- assert(c >= soft_c_end);
-
- s_gpr = movq512(s);
-exit:
- assert(c <= hard_c_end);
- DEBUG_PRINTF("%zu from end; s %hhu\n", c_end - c, s_gpr);
- assert(c >= soft_c_end || s_gpr >= sheng_stop_limit);
- /* undo state adjustment to match mcclellan view */
- if (s_gpr == sheng_limit) {
- s_gpr = 0;
- } else if (s_gpr < sheng_limit) {
- s_gpr++;
- }
-
- *c_inout = c;
- return s_gpr;
-}
-
-static really_inline
-const char *findShermanState64(UNUSED const struct mcsheng64 *m,
- const char *sherman_base_offset,
- u32 sherman_base, u32 s) {
- const char *rv
- = sherman_base_offset + SHERMAN_FIXED_SIZE * (s - sherman_base);
- assert(rv < (const char *)m + m->length - sizeof(struct NFA));
- UNUSED u8 type = *(const u8 *)(rv + SHERMAN_TYPE_OFFSET);
- assert(type == SHERMAN_STATE);
- return rv;
-}
-
-static really_inline
-const u8 *run_mcsheng_accel64(const struct mcsheng64 *m,
- const struct mstate_aux *aux, u32 s,
- const u8 **min_accel_offset,
- const u8 *c, const u8 *c_end) {
- DEBUG_PRINTF("skipping\n");
- u32 accel_offset = aux[s].accel_offset;
-
- assert(aux[s].accel_offset);
- assert(accel_offset >= m->aux_offset);
- assert(!m->sherman_offset || accel_offset < m->sherman_offset);
-
- const union AccelAux *aaux = (const void *)((const char *)m + accel_offset);
- const u8 *c2 = run_accel(aaux, c, c_end);
-
- if (c2 < *min_accel_offset + BAD_ACCEL_DIST) {
- *min_accel_offset = c2 + BIG_ACCEL_PENALTY;
- } else {
- *min_accel_offset = c2 + SMALL_ACCEL_PENALTY;
- }
-
- if (*min_accel_offset >= c_end - ACCEL_MIN_LEN) {
- *min_accel_offset = c_end;
- }
-
- DEBUG_PRINTF("advanced %zd, next accel chance in %zd/%zd\n",
- c2 - c, *min_accel_offset - c2, c_end - c2);
-
- return c2;
-}
-
-static really_inline
-u32 doNormal64_16(const struct mcsheng64 *m, const u8 **c_inout, const u8 *end,
- u32 s, char do_accel, enum MatchMode mode) {
- const u8 *c = *c_inout;
- const u16 *succ_table
- = (const u16 *)((const char *)m + sizeof(struct mcsheng64));
- assert(ISALIGNED_N(succ_table, 2));
- u32 sheng_end = m->sheng_end;
- u32 sherman_base = m->sherman_limit;
- const char *sherman_base_offset
- = (const char *)m - sizeof(struct NFA) + m->sherman_offset;
- u32 as = m->alphaShift;
-
- /* Adjust start of succ table so we can index into using state id (rather
- * than adjust to normal id). As we will not be processing states with low
- * state ids, we will not be accessing data before the succ table. Note: due
- * to the size of the sheng tables, the succ_table pointer will still be
- * inside the engine.*/
- succ_table -= sheng_end << as;
- s &= STATE_MASK;
- while (c < end && s >= sheng_end) {
- u8 cprime = m->remap[*c];
- DEBUG_PRINTF("c: %02hhx '%c' cp:%02hhx (s=%u)\n", *c,
- ourisprint(*c) ? *c : '?', cprime, s);
- if (s < sherman_base) {
- DEBUG_PRINTF("doing normal\n");
- assert(s < m->state_count);
- s = succ_table[(s << as) + cprime];
- } else {
- const char *sherman_state
- = findShermanState64(m, sherman_base_offset, sherman_base, s);
- DEBUG_PRINTF("doing sherman (%u)\n", s);
- s = doSherman16(sherman_state, cprime, succ_table, as);
- }
-
- DEBUG_PRINTF("s: %u (%u)\n", s, s & STATE_MASK);
- c++;
-
- if (do_accel && (s & ACCEL_FLAG)) {
- break;
- }
- if (mode != NO_MATCHES && (s & ACCEPT_FLAG)) {
- break;
- }
-
- s &= STATE_MASK;
- }
-
- *c_inout = c;
- return s;
-}
-
-static really_inline
-char mcsheng64Exec16_i(const struct mcsheng64 *m, u32 *state, const u8 *buf,
- size_t len, u64a offAdj, NfaCallback cb, void *ctxt,
- char single, const u8 **c_final, enum MatchMode mode) {
- assert(ISALIGNED_N(state, 2));
- if (!len) {
- if (mode == STOP_AT_MATCH) {
- *c_final = buf;
- }
- return MO_ALIVE;
- }
-
- u32 s = *state;
- const u8 *c = buf;
- const u8 *c_end = buf + len;
- const u8 sheng_end = m->sheng_end;
- const struct mstate_aux *aux
- = (const struct mstate_aux *)((const char *)m + m->aux_offset
- - sizeof(struct NFA));
-
- s &= STATE_MASK;
-
- u32 cached_accept_id = 0;
- u32 cached_accept_state = 0;
-
- DEBUG_PRINTF("s: %u, len %zu\n", s, len);
-
- const u8 *min_accel_offset = c;
- if (!m->has_accel || len < ACCEL_MIN_LEN) {
- min_accel_offset = c_end;
- goto without_accel;
- }
-
- goto with_accel;
-
-without_accel:
- do {
- assert(c < min_accel_offset);
- int do_accept;
- if (!s) {
- goto exit;
- } else if (s < sheng_end) {
- s = doSheng64(m, &c, min_accel_offset, c_end, s, 0);
- do_accept = mode != NO_MATCHES && get_aux64(m, s)->accept;
- } else {
- s = doNormal64_16(m, &c, min_accel_offset, s, 0, mode);
-
- do_accept = mode != NO_MATCHES && (s & ACCEPT_FLAG);
- }
-
- if (do_accept) {
- if (mode == STOP_AT_MATCH) {
- *state = s & STATE_MASK;
- *c_final = c - 1;
- return MO_MATCHES_PENDING;
- }
-
- u64a loc = (c - 1) - buf + offAdj + 1;
-
- if (single) {
- DEBUG_PRINTF("reporting %u\n", m->arb_report);
- if (cb(0, loc, m->arb_report, ctxt) == MO_HALT_MATCHING) {
- return MO_DEAD; /* termination requested */
- }
- } else if (doComplexReport64(cb, ctxt, m, s & STATE_MASK, loc, 0,
- &cached_accept_state,
- &cached_accept_id)
- == MO_HALT_MATCHING) {
- return MO_DEAD;
- }
- }
-
- assert(c <= c_end); /* sheng is fuzzy for min_accel_offset */
- } while (c < min_accel_offset);
-
- if (c == c_end) {
- goto exit;
- }
-
-with_accel:
- do {
- assert(c < c_end);
- int do_accept;
-
- if (!s) {
- goto exit;
- } else if (s < sheng_end) {
- if (s > m->sheng_accel_limit) {
- c = run_mcsheng_accel64(m, aux, s, &min_accel_offset, c, c_end);
- if (c == c_end) {
- goto exit;
- } else {
- goto without_accel;
- }
- }
- s = doSheng64(m, &c, c_end, c_end, s, 1);
- do_accept = mode != NO_MATCHES && get_aux64(m, s)->accept;
- } else {
- if (s & ACCEL_FLAG) {
- DEBUG_PRINTF("skipping\n");
- s &= STATE_MASK;
- c = run_mcsheng_accel64(m, aux, s, &min_accel_offset, c, c_end);
- if (c == c_end) {
- goto exit;
- } else {
- goto without_accel;
- }
- }
-
- s = doNormal64_16(m, &c, c_end, s, 1, mode);
- do_accept = mode != NO_MATCHES && (s & ACCEPT_FLAG);
- }
-
- if (do_accept) {
- if (mode == STOP_AT_MATCH) {
- *state = s & STATE_MASK;
- *c_final = c - 1;
- return MO_MATCHES_PENDING;
- }
-
- u64a loc = (c - 1) - buf + offAdj + 1;
-
- if (single) {
- DEBUG_PRINTF("reporting %u\n", m->arb_report);
- if (cb(0, loc, m->arb_report, ctxt) == MO_HALT_MATCHING) {
- return MO_DEAD; /* termination requested */
- }
- } else if (doComplexReport64(cb, ctxt, m, s & STATE_MASK, loc, 0,
- &cached_accept_state,
- &cached_accept_id)
- == MO_HALT_MATCHING) {
- return MO_DEAD;
- }
- }
-
- assert(c <= c_end);
- } while (c < c_end);
-
-exit:
- s &= STATE_MASK;
-
- if (mode == STOP_AT_MATCH) {
- *c_final = c_end;
- }
- *state = s;
-
- return MO_ALIVE;
-}
-
-static never_inline
-char mcsheng64Exec16_i_cb(const struct mcsheng64 *m, u32 *state, const u8 *buf,
- size_t len, u64a offAdj, NfaCallback cb, void *ctxt,
- char single, const u8 **final_point) {
- return mcsheng64Exec16_i(m, state, buf, len, offAdj, cb, ctxt, single,
- final_point, CALLBACK_OUTPUT);
-}
-
-static never_inline
-char mcsheng64Exec16_i_sam(const struct mcsheng64 *m, u32 *state, const u8 *buf,
- size_t len, u64a offAdj, NfaCallback cb, void *ctxt,
- char single, const u8 **final_point) {
- return mcsheng64Exec16_i(m, state, buf, len, offAdj, cb, ctxt, single,
- final_point, STOP_AT_MATCH);
-}
-
-static never_inline
-char mcsheng64Exec16_i_nm(const struct mcsheng64 *m, u32 *state, const u8 *buf,
- size_t len, u64a offAdj, NfaCallback cb, void *ctxt,
- char single, const u8 **final_point) {
- return mcsheng64Exec16_i(m, state, buf, len, offAdj, cb, ctxt, single,
- final_point, NO_MATCHES);
-}
-
-static really_inline
-char mcsheng64Exec16_i_ni(const struct mcsheng64 *m, u32 *state, const u8 *buf,
- size_t len, u64a offAdj, NfaCallback cb, void *ctxt,
- char single, const u8 **final_point,
- enum MatchMode mode) {
- if (mode == CALLBACK_OUTPUT) {
- return mcsheng64Exec16_i_cb(m, state, buf, len, offAdj, cb, ctxt,
- single, final_point);
- } else if (mode == STOP_AT_MATCH) {
- return mcsheng64Exec16_i_sam(m, state, buf, len, offAdj, cb, ctxt,
- single, final_point);
- } else {
- assert (mode == NO_MATCHES);
- return mcsheng64Exec16_i_nm(m, state, buf, len, offAdj, cb, ctxt,
- single, final_point);
- }
-}
-
-static really_inline
-u32 doNormal64_8(const struct mcsheng64 *m, const u8 **c_inout, const u8 *end, u32 s,
- char do_accel, enum MatchMode mode) {
- const u8 *c = *c_inout;
- u32 sheng_end = m->sheng_end;
- u32 accel_limit = m->accel_limit_8;
- u32 accept_limit = m->accept_limit_8;
-
- const u32 as = m->alphaShift;
- const u8 *succ_table = (const u8 *)((const char *)m
- + sizeof(struct mcsheng64));
- /* Adjust start of succ table so we can index into using state id (rather
- * than adjust to normal id). As we will not be processing states with low
- * state ids, we will not be accessing data before the succ table. Note: due
- * to the size of the sheng tables, the succ_table pointer will still be
- * inside the engine.*/
- succ_table -= sheng_end << as;
-
- assert(s >= sheng_end);
- while (c < end && s >= sheng_end) {
- u8 cprime = m->remap[*c];
- DEBUG_PRINTF("c: %02hhx '%c' cp:%02hhx\n", *c,
- ourisprint(*c) ? *c : '?', cprime);
- s = succ_table[(s << as) + cprime];
-
- DEBUG_PRINTF("s: %u\n", s);
- c++;
- if (do_accel) {
- if (s >= accel_limit) {
- break;
- }
- } else {
- if (mode != NO_MATCHES && s >= accept_limit) {
- break;
- }
- }
- }
- *c_inout = c;
- return s;
-}
-
-static really_inline
-char mcsheng64Exec8_i(const struct mcsheng64 *m, u32 *state, const u8 *buf,
- size_t len, u64a offAdj, NfaCallback cb, void *ctxt,
- char single, const u8 **c_final, enum MatchMode mode) {
- if (!len) {
- *c_final = buf;
- return MO_ALIVE;
- }
- u32 s = *state;
- const u8 *c = buf;
- const u8 *c_end = buf + len;
- const u8 sheng_end = m->sheng_end;
-
- const struct mstate_aux *aux
- = (const struct mstate_aux *)((const char *)m + m->aux_offset
- - sizeof(struct NFA));
- u32 accept_limit = m->accept_limit_8;
-
- u32 cached_accept_id = 0;
- u32 cached_accept_state = 0;
-
- DEBUG_PRINTF("accel %hu, accept %u\n", m->accel_limit_8, accept_limit);
-
- DEBUG_PRINTF("s: %u, len %zu\n", s, len);
-
- const u8 *min_accel_offset = c;
- if (!m->has_accel || len < ACCEL_MIN_LEN) {
- min_accel_offset = c_end;
- goto without_accel;
- }
-
- goto with_accel;
-
-without_accel:
- do {
- assert(c < min_accel_offset);
- if (!s) {
- goto exit;
- } else if (s < sheng_end) {
- s = doSheng64(m, &c, min_accel_offset, c_end, s, 0);
- } else {
- s = doNormal64_8(m, &c, min_accel_offset, s, 0, mode);
- assert(c <= min_accel_offset);
- }
-
- if (mode != NO_MATCHES && s >= accept_limit) {
- if (mode == STOP_AT_MATCH) {
- DEBUG_PRINTF("match - pausing\n");
- *state = s;
- *c_final = c - 1;
- return MO_MATCHES_PENDING;
- }
-
- u64a loc = (c - 1) - buf + offAdj + 1;
- if (single) {
- DEBUG_PRINTF("reporting %u\n", m->arb_report);
- if (cb(0, loc, m->arb_report, ctxt) == MO_HALT_MATCHING) {
- return MO_DEAD;
- }
- } else if (doComplexReport64(cb, ctxt, m, s, loc, 0,
- &cached_accept_state,
- &cached_accept_id)
- == MO_HALT_MATCHING) {
- return MO_DEAD;
- }
- }
-
- assert(c <= c_end); /* sheng is fuzzy for min_accel_offset */
- } while (c < min_accel_offset);
-
- if (c == c_end) {
- goto exit;
- }
-
-with_accel:
- do {
- u32 accel_limit = m->accel_limit_8;
-
- assert(c < c_end);
- if (!s) {
- goto exit;
- } else if (s < sheng_end) {
- if (s > m->sheng_accel_limit) {
- c = run_mcsheng_accel64(m, aux, s, &min_accel_offset, c, c_end);
- if (c == c_end) {
- goto exit;
- } else {
- goto without_accel;
- }
- }
- s = doSheng64(m, &c, c_end, c_end, s, 1);
- } else {
- if (s >= accel_limit && aux[s].accel_offset) {
- c = run_mcsheng_accel64(m, aux, s, &min_accel_offset, c, c_end);
- if (c == c_end) {
- goto exit;
- } else {
- goto without_accel;
- }
- }
- s = doNormal64_8(m, &c, c_end, s, 1, mode);
- }
-
- if (mode != NO_MATCHES && s >= accept_limit) {
- if (mode == STOP_AT_MATCH) {
- DEBUG_PRINTF("match - pausing\n");
- *state = s;
- *c_final = c - 1;
- return MO_MATCHES_PENDING;
- }
-
- u64a loc = (c - 1) - buf + offAdj + 1;
- if (single) {
- DEBUG_PRINTF("reporting %u\n", m->arb_report);
- if (cb(0, loc, m->arb_report, ctxt) == MO_HALT_MATCHING) {
- return MO_DEAD;
- }
- } else if (doComplexReport64(cb, ctxt, m, s, loc, 0,
- &cached_accept_state,
- &cached_accept_id)
- == MO_HALT_MATCHING) {
- return MO_DEAD;
- }
- }
-
- assert(c <= c_end);
- } while (c < c_end);
-
-exit:
- *state = s;
- if (mode == STOP_AT_MATCH) {
- *c_final = c_end;
- }
- return MO_ALIVE;
-}
-
-static never_inline
-char mcsheng64Exec8_i_cb(const struct mcsheng64 *m, u32 *state, const u8 *buf,
- size_t len, u64a offAdj, NfaCallback cb, void *ctxt,
- char single, const u8 **final_point) {
- return mcsheng64Exec8_i(m, state, buf, len, offAdj, cb, ctxt, single,
- final_point, CALLBACK_OUTPUT);
-}
-
-static never_inline
-char mcsheng64Exec8_i_sam(const struct mcsheng64 *m, u32 *state, const u8 *buf,
- size_t len, u64a offAdj, NfaCallback cb, void *ctxt,
- char single, const u8 **final_point) {
- return mcsheng64Exec8_i(m, state, buf, len, offAdj, cb, ctxt, single,
- final_point, STOP_AT_MATCH);
-}
-
-static never_inline
-char mcsheng64Exec8_i_nm(const struct mcsheng64 *m, u32 *state, const u8 *buf,
- size_t len, u64a offAdj, NfaCallback cb, void *ctxt,
- char single, const u8 **final_point) {
- return mcsheng64Exec8_i(m, state, buf, len, offAdj, cb, ctxt, single,
- final_point, NO_MATCHES);
-}
-
-static really_inline
-char mcsheng64Exec8_i_ni(const struct mcsheng64 *m, u32 *state, const u8 *buf,
- size_t len, u64a offAdj, NfaCallback cb, void *ctxt,
- char single, const u8 **final_point,
- enum MatchMode mode) {
- if (mode == CALLBACK_OUTPUT) {
- return mcsheng64Exec8_i_cb(m, state, buf, len, offAdj, cb, ctxt, single,
- final_point);
- } else if (mode == STOP_AT_MATCH) {
- return mcsheng64Exec8_i_sam(m, state, buf, len, offAdj, cb, ctxt,
- single, final_point);
- } else {
- assert(mode == NO_MATCHES);
- return mcsheng64Exec8_i_nm(m, state, buf, len, offAdj, cb, ctxt, single,
- final_point);
- }
-}
-
-static really_inline
-char mcshengCheckEOD64(const struct NFA *nfa, u32 s, u64a offset,
- NfaCallback cb, void *ctxt) {
- const struct mcsheng64 *m = getImplNfa(nfa);
- const struct mstate_aux *aux = get_aux64(m, s);
-
- if (!aux->accept_eod) {
- return MO_CONTINUE_MATCHING;
- }
- return doComplexReport64(cb, ctxt, m, s, offset, 1, NULL, NULL);
-}
-
-static really_inline
-char nfaExecMcSheng64_16_Q2i(const struct NFA *n, u64a offset, const u8 *buffer,
- const u8 *hend, NfaCallback cb, void *context,
- struct mq *q, char single, s64a end,
- enum MatchMode mode) {
- assert(n->type == MCSHENG_64_NFA_16);
- const struct mcsheng64 *m = getImplNfa(n);
- s64a sp;
-
- assert(ISALIGNED_N(q->state, 2));
- u32 s = *(u16 *)q->state;
-
- if (q->report_current) {
- assert(s);
- assert(get_aux64(m, s)->accept);
-
- int rv;
- if (single) {
- DEBUG_PRINTF("reporting %u\n", m->arb_report);
- rv = cb(0, q_cur_offset(q), m->arb_report, context);
- } else {
- u32 cached_accept_id = 0;
- u32 cached_accept_state = 0;
-
- rv = doComplexReport64(cb, context, m, s, q_cur_offset(q), 0,
- &cached_accept_state, &cached_accept_id);
- }
-
- q->report_current = 0;
-
- if (rv == MO_HALT_MATCHING) {
- return MO_DEAD;
- }
- }
-
- sp = q_cur_loc(q);
- q->cur++;
-
- const u8 *cur_buf = sp < 0 ? hend : buffer;
-
- assert(q->cur);
- if (mode != NO_MATCHES && q->items[q->cur - 1].location > end) {
- DEBUG_PRINTF("this is as far as we go\n");
- q->cur--;
- q->items[q->cur].type = MQE_START;
- q->items[q->cur].location = end;
- *(u16 *)q->state = s;
- return MO_ALIVE;
- }
-
- while (1) {
- assert(q->cur < q->end);
- s64a ep = q->items[q->cur].location;
- if (mode != NO_MATCHES) {
- ep = MIN(ep, end);
- }
-
- assert(ep >= sp);
-
- s64a local_ep = ep;
- if (sp < 0) {
- local_ep = MIN(0, ep);
- }
-
- /* do main buffer region */
- const u8 *final_look;
- char rv = mcsheng64Exec16_i_ni(m, &s, cur_buf + sp, local_ep - sp,
- offset + sp, cb, context, single,
- &final_look, mode);
- if (rv == MO_DEAD) {
- *(u16 *)q->state = 0;
- return MO_DEAD;
- }
- if (mode == STOP_AT_MATCH && rv == MO_MATCHES_PENDING) {
- DEBUG_PRINTF("this is as far as we go\n");
- DEBUG_PRINTF("state %u final_look %zd\n", s, final_look - cur_buf);
-
- assert(q->cur);
- assert(final_look != cur_buf + local_ep);
-
- q->cur--;
- q->items[q->cur].type = MQE_START;
- q->items[q->cur].location = final_look - cur_buf + 1; /* due to
- * early -1 */
- *(u16 *)q->state = s;
- return MO_MATCHES_PENDING;
- }
-
- assert(rv == MO_ALIVE);
- assert(q->cur);
- if (mode != NO_MATCHES && q->items[q->cur].location > end) {
- DEBUG_PRINTF("this is as far as we go\n");
- q->cur--;
- q->items[q->cur].type = MQE_START;
- q->items[q->cur].location = end;
- *(u16 *)q->state = s;
- return MO_ALIVE;
- }
-
- sp = local_ep;
-
- if (sp == 0) {
- cur_buf = buffer;
- }
-
- if (sp != ep) {
- continue;
- }
-
- switch (q->items[q->cur].type) {
- case MQE_TOP:
- assert(sp + offset || !s);
- if (sp + offset == 0) {
- s = m->start_anchored;
- break;
- }
- s = mcshengEnableStarts64(m, s);
- break;
- case MQE_END:
- *(u16 *)q->state = s;
- q->cur++;
- return s ? MO_ALIVE : MO_DEAD;
- default:
- assert(!"invalid queue event");
- }
-
- q->cur++;
- }
-}
-
-static really_inline
-char nfaExecMcSheng64_8_Q2i(const struct NFA *n, u64a offset, const u8 *buffer,
- const u8 *hend, NfaCallback cb, void *context,
- struct mq *q, char single, s64a end,
- enum MatchMode mode) {
- assert(n->type == MCSHENG_64_NFA_8);
- const struct mcsheng64 *m = getImplNfa(n);
- s64a sp;
-
- u32 s = *(u8 *)q->state;
-
- if (q->report_current) {
- assert(s);
- assert(s >= m->accept_limit_8);
-
- int rv;
- if (single) {
- DEBUG_PRINTF("reporting %u\n", m->arb_report);
-
- rv = cb(0, q_cur_offset(q), m->arb_report, context);
- } else {
- u32 cached_accept_id = 0;
- u32 cached_accept_state = 0;
-
- rv = doComplexReport64(cb, context, m, s, q_cur_offset(q), 0,
- &cached_accept_state, &cached_accept_id);
- }
-
- q->report_current = 0;
-
- if (rv == MO_HALT_MATCHING) {
- return MO_DEAD;
- }
- }
-
- sp = q_cur_loc(q);
- q->cur++;
-
- const u8 *cur_buf = sp < 0 ? hend : buffer;
-
- if (mode != NO_MATCHES && q->items[q->cur - 1].location > end) {
- DEBUG_PRINTF("this is as far as we go\n");
- q->cur--;
- q->items[q->cur].type = MQE_START;
- q->items[q->cur].location = end;
- *(u8 *)q->state = s;
- return MO_ALIVE;
- }
-
- while (1) {
- DEBUG_PRINTF("%s @ %llu\n", q->items[q->cur].type == MQE_TOP ? "TOP" :
- q->items[q->cur].type == MQE_END ? "END" : "???",
- q->items[q->cur].location + offset);
- assert(q->cur < q->end);
- s64a ep = q->items[q->cur].location;
- if (mode != NO_MATCHES) {
- ep = MIN(ep, end);
- }
-
- assert(ep >= sp);
-
- s64a local_ep = ep;
- if (sp < 0) {
- local_ep = MIN(0, ep);
- }
-
- const u8 *final_look;
- char rv = mcsheng64Exec8_i_ni(m, &s, cur_buf + sp, local_ep - sp,
- offset + sp, cb, context, single,
- &final_look, mode);
- if (rv == MO_HALT_MATCHING) {
- *(u8 *)q->state = 0;
- return MO_DEAD;
- }
- if (mode == STOP_AT_MATCH && rv == MO_MATCHES_PENDING) {
- DEBUG_PRINTF("this is as far as we go\n");
- DEBUG_PRINTF("state %u final_look %zd\n", s, final_look - cur_buf);
-
- assert(q->cur);
- assert(final_look != cur_buf + local_ep);
-
- q->cur--;
- q->items[q->cur].type = MQE_START;
- q->items[q->cur].location = final_look - cur_buf + 1; /* due to
- * early -1 */
- *(u8 *)q->state = s;
- return MO_MATCHES_PENDING;
- }
-
- assert(rv == MO_ALIVE);
- assert(q->cur);
- if (mode != NO_MATCHES && q->items[q->cur].location > end) {
- DEBUG_PRINTF("this is as far as we go\n");
- assert(q->cur);
- q->cur--;
- q->items[q->cur].type = MQE_START;
- q->items[q->cur].location = end;
- *(u8 *)q->state = s;
- return MO_ALIVE;
- }
-
- sp = local_ep;
-
- if (sp == 0) {
- cur_buf = buffer;
- }
-
- if (sp != ep) {
- continue;
- }
-
- switch (q->items[q->cur].type) {
- case MQE_TOP:
- assert(sp + offset || !s);
- if (sp + offset == 0) {
- s = (u8)m->start_anchored;
- break;
- }
- s = mcshengEnableStarts64(m, s);
- break;
- case MQE_END:
- *(u8 *)q->state = s;
- q->cur++;
- return s ? MO_ALIVE : MO_DEAD;
- default:
- assert(!"invalid queue event");
- }
-
- q->cur++;
- }
-}
-
-char nfaExecMcSheng64_8_Q(const struct NFA *n, struct mq *q, s64a end) {
- u64a offset = q->offset;
- const u8 *buffer = q->buffer;
- NfaCallback cb = q->cb;
- void *context = q->context;
- assert(n->type == MCSHENG_64_NFA_8);
- const struct mcsheng64 *m = getImplNfa(n);
- const u8 *hend = q->history + q->hlength;
-
- return nfaExecMcSheng64_8_Q2i(n, offset, buffer, hend, cb, context, q,
- m->flags & MCSHENG_FLAG_SINGLE, end,
- CALLBACK_OUTPUT);
-}
-
-char nfaExecMcSheng64_16_Q(const struct NFA *n, struct mq *q, s64a end) {
- u64a offset = q->offset;
- const u8 *buffer = q->buffer;
- NfaCallback cb = q->cb;
- void *context = q->context;
- assert(n->type == MCSHENG_64_NFA_16);
- const struct mcsheng64 *m = getImplNfa(n);
- const u8 *hend = q->history + q->hlength;
-
- return nfaExecMcSheng64_16_Q2i(n, offset, buffer, hend, cb, context, q,
- m->flags & MCSHENG_FLAG_SINGLE, end,
- CALLBACK_OUTPUT);
-}
-
-char nfaExecMcSheng64_8_reportCurrent(const struct NFA *n, struct mq *q) {
- const struct mcsheng64 *m = getImplNfa(n);
- NfaCallback cb = q->cb;
- void *ctxt = q->context;
- u32 s = *(u8 *)q->state;
- u8 single = m->flags & MCSHENG_FLAG_SINGLE;
- u64a offset = q_cur_offset(q);
- assert(q_cur_type(q) == MQE_START);
- assert(s);
-
- if (s >= m->accept_limit_8) {
- if (single) {
- DEBUG_PRINTF("reporting %u\n", m->arb_report);
- cb(0, offset, m->arb_report, ctxt);
- } else {
- u32 cached_accept_id = 0;
- u32 cached_accept_state = 0;
-
- doComplexReport64(cb, ctxt, m, s, offset, 0, &cached_accept_state,
- &cached_accept_id);
- }
- }
-
- return 0;
-}
-
-char nfaExecMcSheng64_16_reportCurrent(const struct NFA *n, struct mq *q) {
- const struct mcsheng64 *m = getImplNfa(n);
- NfaCallback cb = q->cb;
- void *ctxt = q->context;
- u32 s = *(u16 *)q->state;
- const struct mstate_aux *aux = get_aux64(m, s);
- u8 single = m->flags & MCSHENG_FLAG_SINGLE;
- u64a offset = q_cur_offset(q);
- assert(q_cur_type(q) == MQE_START);
- DEBUG_PRINTF("state %u\n", s);
- assert(s);
-
- if (aux->accept) {
- if (single) {
- DEBUG_PRINTF("reporting %u\n", m->arb_report);
- cb(0, offset, m->arb_report, ctxt);
- } else {
- u32 cached_accept_id = 0;
- u32 cached_accept_state = 0;
-
- doComplexReport64(cb, ctxt, m, s, offset, 0, &cached_accept_state,
- &cached_accept_id);
- }
- }
-
- return 0;
-}
-
-static
-char mcshengHasAccept64(const struct mcsheng64 *m, const struct mstate_aux *aux,
- ReportID report) {
- assert(m && aux);
-
- if (!aux->accept) {
- return 0;
- }
-
- const struct report_list *rl = (const struct report_list *)
- ((const char *)m + aux->accept - sizeof(struct NFA));
- assert(ISALIGNED_N(rl, 4));
-
- DEBUG_PRINTF("report list has %u entries\n", rl->count);
-
- for (u32 i = 0; i < rl->count; i++) {
- if (rl->report[i] == report) {
- return 1;
- }
- }
-
- return 0;
-}
-
-char nfaExecMcSheng64_8_inAccept(const struct NFA *n, ReportID report,
- struct mq *q) {
- assert(n && q);
-
- const struct mcsheng64 *m = getImplNfa(n);
- u8 s = *(u8 *)q->state;
- DEBUG_PRINTF("checking accepts for %hhu\n", s);
-
- return mcshengHasAccept64(m, get_aux64(m, s), report);
-}
-
-char nfaExecMcSheng64_8_inAnyAccept(const struct NFA *n, struct mq *q) {
- assert(n && q);
-
- const struct mcsheng64 *m = getImplNfa(n);
- u8 s = *(u8 *)q->state;
- DEBUG_PRINTF("checking accepts for %hhu\n", s);
-
- return !!get_aux64(m, s)->accept;
-}
-
-char nfaExecMcSheng64_16_inAccept(const struct NFA *n, ReportID report,
- struct mq *q) {
- assert(n && q);
-
- const struct mcsheng64 *m = getImplNfa(n);
- u16 s = *(u16 *)q->state;
- DEBUG_PRINTF("checking accepts for %hu\n", s);
-
- return mcshengHasAccept64(m, get_aux64(m, s), report);
-}
-
-char nfaExecMcSheng64_16_inAnyAccept(const struct NFA *n, struct mq *q) {
- assert(n && q);
-
- const struct mcsheng64 *m = getImplNfa(n);
- u16 s = *(u16 *)q->state;
- DEBUG_PRINTF("checking accepts for %hu\n", s);
-
- return !!get_aux64(m, s)->accept;
-}
-
-char nfaExecMcSheng64_8_Q2(const struct NFA *n, struct mq *q, s64a end) {
- u64a offset = q->offset;
- const u8 *buffer = q->buffer;
- NfaCallback cb = q->cb;
- void *context = q->context;
- assert(n->type == MCSHENG_64_NFA_8);
- const struct mcsheng64 *m = getImplNfa(n);
- const u8 *hend = q->history + q->hlength;
-
- return nfaExecMcSheng64_8_Q2i(n, offset, buffer, hend, cb, context, q,
- m->flags & MCSHENG_FLAG_SINGLE, end,
- STOP_AT_MATCH);
-}
-
-char nfaExecMcSheng64_16_Q2(const struct NFA *n, struct mq *q, s64a end) {
- u64a offset = q->offset;
- const u8 *buffer = q->buffer;
- NfaCallback cb = q->cb;
- void *context = q->context;
- assert(n->type == MCSHENG_64_NFA_16);
- const struct mcsheng64 *m = getImplNfa(n);
- const u8 *hend = q->history + q->hlength;
-
- return nfaExecMcSheng64_16_Q2i(n, offset, buffer, hend, cb, context, q,
- m->flags & MCSHENG_FLAG_SINGLE, end,
- STOP_AT_MATCH);
-}
-
-char nfaExecMcSheng64_8_QR(const struct NFA *n, struct mq *q, ReportID report) {
- u64a offset = q->offset;
- const u8 *buffer = q->buffer;
- NfaCallback cb = q->cb;
- void *context = q->context;
- assert(n->type == MCSHENG_64_NFA_8);
- const struct mcsheng64 *m = getImplNfa(n);
- const u8 *hend = q->history + q->hlength;
-
- char rv = nfaExecMcSheng64_8_Q2i(n, offset, buffer, hend, cb, context, q,
- m->flags & MCSHENG_FLAG_SINGLE,
- 0 /* end */, NO_MATCHES);
- if (rv && nfaExecMcSheng64_8_inAccept(n, report, q)) {
- return MO_MATCHES_PENDING;
- } else {
- return rv;
- }
-}
-
-char nfaExecMcSheng64_16_QR(const struct NFA *n, struct mq *q, ReportID report) {
- u64a offset = q->offset;
- const u8 *buffer = q->buffer;
- NfaCallback cb = q->cb;
- void *context = q->context;
- assert(n->type == MCSHENG_64_NFA_16);
- const struct mcsheng64 *m = getImplNfa(n);
- const u8 *hend = q->history + q->hlength;
-
- char rv = nfaExecMcSheng64_16_Q2i(n, offset, buffer, hend, cb, context, q,
- m->flags & MCSHENG_FLAG_SINGLE,
- 0 /* end */, NO_MATCHES);
-
- if (rv && nfaExecMcSheng64_16_inAccept(n, report, q)) {
- return MO_MATCHES_PENDING;
- } else {
- return rv;
- }
-}
-
-char nfaExecMcSheng64_8_initCompressedState(const struct NFA *nfa, u64a offset,
- void *state, UNUSED u8 key) {
- const struct mcsheng64 *m = getImplNfa(nfa);
- u8 s = offset ? m->start_floating : m->start_anchored;
- if (s) {
- *(u8 *)state = s;
- return 1;
- }
- return 0;
-}
-
-char nfaExecMcSheng64_16_initCompressedState(const struct NFA *nfa, u64a offset,
- void *state, UNUSED u8 key) {
- const struct mcsheng64 *m = getImplNfa(nfa);
- u16 s = offset ? m->start_floating : m->start_anchored;
- if (s) {
- unaligned_store_u16(state, s);
- return 1;
- }
- return 0;
-}
-
-char nfaExecMcSheng64_8_testEOD(const struct NFA *nfa, const char *state,
- UNUSED const char *streamState, u64a offset,
- NfaCallback callback, void *context) {
- return mcshengCheckEOD64(nfa, *(const u8 *)state, offset, callback,
- context);
-}
-
-char nfaExecMcSheng64_16_testEOD(const struct NFA *nfa, const char *state,
- UNUSED const char *streamState, u64a offset,
- NfaCallback callback, void *context) {
- assert(ISALIGNED_N(state, 2));
- return mcshengCheckEOD64(nfa, *(const u16 *)state, offset, callback,
- context);
-}
-
-char nfaExecMcSheng64_8_queueInitState(UNUSED const struct NFA *nfa, struct mq *q) {
- assert(nfa->scratchStateSize == 1);
- *(u8 *)q->state = 0;
- return 0;
-}
-
-char nfaExecMcSheng64_16_queueInitState(UNUSED const struct NFA *nfa, struct mq *q) {
- assert(nfa->scratchStateSize == 2);
- assert(ISALIGNED_N(q->state, 2));
- *(u16 *)q->state = 0;
- return 0;
-}
-
-char nfaExecMcSheng64_8_queueCompressState(UNUSED const struct NFA *nfa,
- const struct mq *q, UNUSED s64a loc) {
- void *dest = q->streamState;
- const void *src = q->state;
- assert(nfa->scratchStateSize == 1);
- assert(nfa->streamStateSize == 1);
- *(u8 *)dest = *(const u8 *)src;
- return 0;
-}
-
-char nfaExecMcSheng64_8_expandState(UNUSED const struct NFA *nfa, void *dest,
- const void *src, UNUSED u64a offset,
- UNUSED u8 key) {
- assert(nfa->scratchStateSize == 1);
- assert(nfa->streamStateSize == 1);
- *(u8 *)dest = *(const u8 *)src;
- return 0;
-}
-
-char nfaExecMcSheng64_16_queueCompressState(UNUSED const struct NFA *nfa,
- const struct mq *q,
- UNUSED s64a loc) {
- void *dest = q->streamState;
- const void *src = q->state;
- assert(nfa->scratchStateSize == 2);
- assert(nfa->streamStateSize == 2);
- assert(ISALIGNED_N(src, 2));
- unaligned_store_u16(dest, *(const u16 *)(src));
- return 0;
-}
-
-char nfaExecMcSheng64_16_expandState(UNUSED const struct NFA *nfa, void *dest,
- const void *src, UNUSED u64a offset,
- UNUSED u8 key) {
- assert(nfa->scratchStateSize == 2);
- assert(nfa->streamStateSize == 2);
- assert(ISALIGNED_N(dest, 2));
- *(u16 *)dest = unaligned_load_u16(src);
- return 0;
-}
-#endif
+
+#if defined(HAVE_AVX512VBMI)
+static really_inline
+const struct mstate_aux *get_aux64(const struct mcsheng64 *m, u32 s) {
+ const char *nfa = (const char *)m - sizeof(struct NFA);
+ const struct mstate_aux *aux
+ = s + (const struct mstate_aux *)(nfa + m->aux_offset);
+
+ assert(ISALIGNED(aux));
+ return aux;
+}
+
+static really_inline
+u32 mcshengEnableStarts64(const struct mcsheng64 *m, u32 s) {
+ const struct mstate_aux *aux = get_aux64(m, s);
+
+ DEBUG_PRINTF("enabling starts %u->%hu\n", s, aux->top);
+ return aux->top;
+}
+
+static really_inline
+char doComplexReport64(NfaCallback cb, void *ctxt, const struct mcsheng64 *m,
+ u32 s, u64a loc, char eod, u32 *cached_accept_state,
+ u32 *cached_accept_id) {
+ DEBUG_PRINTF("reporting state = %u, loc=%llu, eod %hhu\n",
+ s & STATE_MASK, loc, eod);
+
+ if (!eod && s == *cached_accept_state) {
+ if (cb(0, loc, *cached_accept_id, ctxt) == MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING; /* termination requested */
+ }
+
+ return MO_CONTINUE_MATCHING; /* continue execution */
+ }
+
+ const struct mstate_aux *aux = get_aux64(m, s);
+ size_t offset = eod ? aux->accept_eod : aux->accept;
+
+ assert(offset);
+ const struct report_list *rl
+ = (const void *)((const char *)m + offset - sizeof(struct NFA));
+ assert(ISALIGNED(rl));
+
+ DEBUG_PRINTF("report list size %u\n", rl->count);
+ u32 count = rl->count;
+
+ if (!eod && count == 1) {
+ *cached_accept_state = s;
+ *cached_accept_id = rl->report[0];
+
+ DEBUG_PRINTF("reporting %u\n", rl->report[0]);
+ if (cb(0, loc, rl->report[0], ctxt) == MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING; /* termination requested */
+ }
+
+ return MO_CONTINUE_MATCHING; /* continue execution */
+ }
+
+ for (u32 i = 0; i < count; i++) {
+ DEBUG_PRINTF("reporting %u\n", rl->report[i]);
+ if (cb(0, loc, rl->report[i], ctxt) == MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING; /* termination requested */
+ }
+ }
+
+ return MO_CONTINUE_MATCHING; /* continue execution */
+}
+
+static really_inline
+u32 doSheng64(const struct mcsheng64 *m, const u8 **c_inout, const u8 *soft_c_end,
+ const u8 *hard_c_end, u32 s_in, char do_accel) {
+ assert(s_in < m->sheng_end);
+ assert(s_in); /* should not already be dead */
+ assert(soft_c_end <= hard_c_end);
+ DEBUG_PRINTF("s_in = %u (adjusted %u)\n", s_in, s_in - 1);
+ m512 s = set64x8(s_in - 1);
+ const u8 *c = *c_inout;
+ const u8 *c_end = hard_c_end - SHENG_CHUNK + 1;
+ if (!do_accel) {
+ c_end = MIN(soft_c_end, hard_c_end - SHENG_CHUNK + 1);
+ }
+
+ const m512 *masks = m->sheng_succ_masks;
+ u8 sheng_limit = m->sheng_end - 1; /* - 1: no dead state */
+ u8 sheng_stop_limit = do_accel ? m->sheng_accel_limit : sheng_limit;
+
+ /* When we use movd to get a u32 containing our state, it will have 4 lanes
+ * all duplicating the state. We can create versions of our limits with 4
+ * copies to directly compare against, this prevents us generating code to
+ * extract a single copy of the state from the u32 for checking. */
+ u32 sheng_stop_limit_x4 = sheng_stop_limit * 0x01010101;
+
+#if defined(HAVE_BMI2) && defined(ARCH_64_BIT)
+ u32 sheng_limit_x4 = sheng_limit * 0x01010101;
+ m512 simd_stop_limit = set16x32(sheng_stop_limit_x4);
+ m512 accel_delta = set64x8(sheng_limit - sheng_stop_limit);
+ DEBUG_PRINTF("end %hhu, accel %hu --> limit %hhu\n", sheng_limit,
+ m->sheng_accel_limit, sheng_stop_limit);
+#endif
+
+#define SHENG64_SINGLE_ITER do { \
+ m512 succ_mask = masks[*(c++)]; \
+ s = vpermb512(s, succ_mask); \
+ u32 s_gpr_x4 = movd512(s); /* convert to u8 */ \
+ DEBUG_PRINTF("c %hhu (%c) --> s %u\n", c[-1], c[-1], s_gpr_x4); \
+ if (s_gpr_x4 >= sheng_stop_limit_x4) { \
+ s_gpr = s_gpr_x4; \
+ goto exit; \
+ } \
+ } while (0)
+
+ u8 s_gpr;
+ while (c < c_end) {
+#if defined(HAVE_BMI2) && defined(ARCH_64_BIT)
+ /* This version uses pext for efficiently bitbashing out scaled
+ * versions of the bytes to process from a u64a */
+
+ u64a data_bytes = unaligned_load_u64a(c);
+ u64a cc0 = pdep64(data_bytes, 0x3fc0); /* extract scaled low byte */
+ data_bytes &= ~0xffULL; /* clear low bits for scale space */
+
+ m512 succ_mask0 = load512((const char *)masks + cc0);
+ s = vpermb512(s, succ_mask0);
+ m512 s_max = s;
+ m512 s_max0 = s_max;
+ DEBUG_PRINTF("c %02llx --> s %u\n", cc0 >> 6, movd512(s));
+
+#define SHENG64_SINGLE_UNROLL_ITER(iter) \
+ assert(iter); \
+ u64a cc##iter = pext64(data_bytes, mcsheng64_pext_mask[iter]); \
+ assert(cc##iter == (u64a)c[iter] << 6); \
+ m512 succ_mask##iter = load512((const char *)masks + cc##iter); \
+ s = vpermb512(s, succ_mask##iter); \
+ if (do_accel && iter == 7) { \
+ /* in the final iteration we also have to check against accel */ \
+ m512 s_temp = sadd_u8_m512(s, accel_delta); \
+ s_max = max_u8_m512(s_max, s_temp); \
+ } else { \
+ s_max = max_u8_m512(s_max, s); \
+ } \
+ m512 s_max##iter = s_max; \
+ DEBUG_PRINTF("c %02llx --> s %u max %u\n", cc##iter >> 6, \
+ movd512(s), movd512(s_max));
+
+ SHENG64_SINGLE_UNROLL_ITER(1);
+ SHENG64_SINGLE_UNROLL_ITER(2);
+ SHENG64_SINGLE_UNROLL_ITER(3);
+ SHENG64_SINGLE_UNROLL_ITER(4);
+ SHENG64_SINGLE_UNROLL_ITER(5);
+ SHENG64_SINGLE_UNROLL_ITER(6);
+ SHENG64_SINGLE_UNROLL_ITER(7);
+
+ if (movd512(s_max7) >= sheng_limit_x4) {
+ DEBUG_PRINTF("exit found\n");
+
+ /* Explicitly check the last byte as it is more likely as it also
+ * checks for acceleration. */
+ if (movd512(s_max6) < sheng_limit_x4) {
+ c += SHENG_CHUNK;
+ s_gpr = movq512(s);
+ assert(s_gpr >= sheng_stop_limit);
+ goto exit;
+ }
+
+ /* use shift-xor to create a register containing all of the max
+ * values */
+ m512 blended = rshift64_m512(s_max0, 56);
+ blended = xor512(blended, rshift64_m512(s_max1, 48));
+ blended = xor512(blended, rshift64_m512(s_max2, 40));
+ blended = xor512(blended, rshift64_m512(s_max3, 32));
+ blended = xor512(blended, rshift64_m512(s_max4, 24));
+ blended = xor512(blended, rshift64_m512(s_max5, 16));
+ blended = xor512(blended, rshift64_m512(s_max6, 8));
+ blended = xor512(blended, s);
+ blended = xor512(blended, rshift64_m512(blended, 8));
+ DEBUG_PRINTF("blended %016llx\n", movq512(blended));
+
+ m512 final = min_u8_m512(blended, simd_stop_limit);
+ m512 cmp = sub_u8_m512(final, simd_stop_limit);
+ m128 tmp = cast512to128(cmp);
+ u64a stops = ~movemask128(tmp);
+ assert(stops);
+ u32 earliest = ctz32(stops);
+ DEBUG_PRINTF("stops %02llx, earliest %u\n", stops, earliest);
+ assert(earliest < 8);
+ c += earliest + 1;
+ s_gpr = movq512(blended) >> (earliest * 8);
+ assert(s_gpr >= sheng_stop_limit);
+ goto exit;
+ } else {
+ c += SHENG_CHUNK;
+ }
+#else
+ SHENG64_SINGLE_ITER;
+ SHENG64_SINGLE_ITER;
+ SHENG64_SINGLE_ITER;
+ SHENG64_SINGLE_ITER;
+
+ SHENG64_SINGLE_ITER;
+ SHENG64_SINGLE_ITER;
+ SHENG64_SINGLE_ITER;
+ SHENG64_SINGLE_ITER;
+#endif
+ }
+
+ assert(c_end - c < SHENG_CHUNK);
+ if (c < soft_c_end) {
+ assert(soft_c_end - c < SHENG_CHUNK);
+ switch (soft_c_end - c) {
+ case 7:
+ SHENG64_SINGLE_ITER; // fallthrough
+ case 6:
+ SHENG64_SINGLE_ITER; // fallthrough
+ case 5:
+ SHENG64_SINGLE_ITER; // fallthrough
+ case 4:
+ SHENG64_SINGLE_ITER; // fallthrough
+ case 3:
+ SHENG64_SINGLE_ITER; // fallthrough
+ case 2:
+ SHENG64_SINGLE_ITER; // fallthrough
+ case 1:
+ SHENG64_SINGLE_ITER; // fallthrough
+ }
+ }
+
+ assert(c >= soft_c_end);
+
+ s_gpr = movq512(s);
+exit:
+ assert(c <= hard_c_end);
+ DEBUG_PRINTF("%zu from end; s %hhu\n", c_end - c, s_gpr);
+ assert(c >= soft_c_end || s_gpr >= sheng_stop_limit);
+ /* undo state adjustment to match mcclellan view */
+ if (s_gpr == sheng_limit) {
+ s_gpr = 0;
+ } else if (s_gpr < sheng_limit) {
+ s_gpr++;
+ }
+
+ *c_inout = c;
+ return s_gpr;
+}
+
+static really_inline
+const char *findShermanState64(UNUSED const struct mcsheng64 *m,
+ const char *sherman_base_offset,
+ u32 sherman_base, u32 s) {
+ const char *rv
+ = sherman_base_offset + SHERMAN_FIXED_SIZE * (s - sherman_base);
+ assert(rv < (const char *)m + m->length - sizeof(struct NFA));
+ UNUSED u8 type = *(const u8 *)(rv + SHERMAN_TYPE_OFFSET);
+ assert(type == SHERMAN_STATE);
+ return rv;
+}
+
+static really_inline
+const u8 *run_mcsheng_accel64(const struct mcsheng64 *m,
+ const struct mstate_aux *aux, u32 s,
+ const u8 **min_accel_offset,
+ const u8 *c, const u8 *c_end) {
+ DEBUG_PRINTF("skipping\n");
+ u32 accel_offset = aux[s].accel_offset;
+
+ assert(aux[s].accel_offset);
+ assert(accel_offset >= m->aux_offset);
+ assert(!m->sherman_offset || accel_offset < m->sherman_offset);
+
+ const union AccelAux *aaux = (const void *)((const char *)m + accel_offset);
+ const u8 *c2 = run_accel(aaux, c, c_end);
+
+ if (c2 < *min_accel_offset + BAD_ACCEL_DIST) {
+ *min_accel_offset = c2 + BIG_ACCEL_PENALTY;
+ } else {
+ *min_accel_offset = c2 + SMALL_ACCEL_PENALTY;
+ }
+
+ if (*min_accel_offset >= c_end - ACCEL_MIN_LEN) {
+ *min_accel_offset = c_end;
+ }
+
+ DEBUG_PRINTF("advanced %zd, next accel chance in %zd/%zd\n",
+ c2 - c, *min_accel_offset - c2, c_end - c2);
+
+ return c2;
+}
+
+static really_inline
+u32 doNormal64_16(const struct mcsheng64 *m, const u8 **c_inout, const u8 *end,
+ u32 s, char do_accel, enum MatchMode mode) {
+ const u8 *c = *c_inout;
+ const u16 *succ_table
+ = (const u16 *)((const char *)m + sizeof(struct mcsheng64));
+ assert(ISALIGNED_N(succ_table, 2));
+ u32 sheng_end = m->sheng_end;
+ u32 sherman_base = m->sherman_limit;
+ const char *sherman_base_offset
+ = (const char *)m - sizeof(struct NFA) + m->sherman_offset;
+ u32 as = m->alphaShift;
+
+ /* Adjust start of succ table so we can index into using state id (rather
+ * than adjust to normal id). As we will not be processing states with low
+ * state ids, we will not be accessing data before the succ table. Note: due
+ * to the size of the sheng tables, the succ_table pointer will still be
+ * inside the engine.*/
+ succ_table -= sheng_end << as;
+ s &= STATE_MASK;
+ while (c < end && s >= sheng_end) {
+ u8 cprime = m->remap[*c];
+ DEBUG_PRINTF("c: %02hhx '%c' cp:%02hhx (s=%u)\n", *c,
+ ourisprint(*c) ? *c : '?', cprime, s);
+ if (s < sherman_base) {
+ DEBUG_PRINTF("doing normal\n");
+ assert(s < m->state_count);
+ s = succ_table[(s << as) + cprime];
+ } else {
+ const char *sherman_state
+ = findShermanState64(m, sherman_base_offset, sherman_base, s);
+ DEBUG_PRINTF("doing sherman (%u)\n", s);
+ s = doSherman16(sherman_state, cprime, succ_table, as);
+ }
+
+ DEBUG_PRINTF("s: %u (%u)\n", s, s & STATE_MASK);
+ c++;
+
+ if (do_accel && (s & ACCEL_FLAG)) {
+ break;
+ }
+ if (mode != NO_MATCHES && (s & ACCEPT_FLAG)) {
+ break;
+ }
+
+ s &= STATE_MASK;
+ }
+
+ *c_inout = c;
+ return s;
+}
+
+static really_inline
+char mcsheng64Exec16_i(const struct mcsheng64 *m, u32 *state, const u8 *buf,
+ size_t len, u64a offAdj, NfaCallback cb, void *ctxt,
+ char single, const u8 **c_final, enum MatchMode mode) {
+ assert(ISALIGNED_N(state, 2));
+ if (!len) {
+ if (mode == STOP_AT_MATCH) {
+ *c_final = buf;
+ }
+ return MO_ALIVE;
+ }
+
+ u32 s = *state;
+ const u8 *c = buf;
+ const u8 *c_end = buf + len;
+ const u8 sheng_end = m->sheng_end;
+ const struct mstate_aux *aux
+ = (const struct mstate_aux *)((const char *)m + m->aux_offset
+ - sizeof(struct NFA));
+
+ s &= STATE_MASK;
+
+ u32 cached_accept_id = 0;
+ u32 cached_accept_state = 0;
+
+ DEBUG_PRINTF("s: %u, len %zu\n", s, len);
+
+ const u8 *min_accel_offset = c;
+ if (!m->has_accel || len < ACCEL_MIN_LEN) {
+ min_accel_offset = c_end;
+ goto without_accel;
+ }
+
+ goto with_accel;
+
+without_accel:
+ do {
+ assert(c < min_accel_offset);
+ int do_accept;
+ if (!s) {
+ goto exit;
+ } else if (s < sheng_end) {
+ s = doSheng64(m, &c, min_accel_offset, c_end, s, 0);
+ do_accept = mode != NO_MATCHES && get_aux64(m, s)->accept;
+ } else {
+ s = doNormal64_16(m, &c, min_accel_offset, s, 0, mode);
+
+ do_accept = mode != NO_MATCHES && (s & ACCEPT_FLAG);
+ }
+
+ if (do_accept) {
+ if (mode == STOP_AT_MATCH) {
+ *state = s & STATE_MASK;
+ *c_final = c - 1;
+ return MO_MATCHES_PENDING;
+ }
+
+ u64a loc = (c - 1) - buf + offAdj + 1;
+
+ if (single) {
+ DEBUG_PRINTF("reporting %u\n", m->arb_report);
+ if (cb(0, loc, m->arb_report, ctxt) == MO_HALT_MATCHING) {
+ return MO_DEAD; /* termination requested */
+ }
+ } else if (doComplexReport64(cb, ctxt, m, s & STATE_MASK, loc, 0,
+ &cached_accept_state,
+ &cached_accept_id)
+ == MO_HALT_MATCHING) {
+ return MO_DEAD;
+ }
+ }
+
+ assert(c <= c_end); /* sheng is fuzzy for min_accel_offset */
+ } while (c < min_accel_offset);
+
+ if (c == c_end) {
+ goto exit;
+ }
+
+with_accel:
+ do {
+ assert(c < c_end);
+ int do_accept;
+
+ if (!s) {
+ goto exit;
+ } else if (s < sheng_end) {
+ if (s > m->sheng_accel_limit) {
+ c = run_mcsheng_accel64(m, aux, s, &min_accel_offset, c, c_end);
+ if (c == c_end) {
+ goto exit;
+ } else {
+ goto without_accel;
+ }
+ }
+ s = doSheng64(m, &c, c_end, c_end, s, 1);
+ do_accept = mode != NO_MATCHES && get_aux64(m, s)->accept;
+ } else {
+ if (s & ACCEL_FLAG) {
+ DEBUG_PRINTF("skipping\n");
+ s &= STATE_MASK;
+ c = run_mcsheng_accel64(m, aux, s, &min_accel_offset, c, c_end);
+ if (c == c_end) {
+ goto exit;
+ } else {
+ goto without_accel;
+ }
+ }
+
+ s = doNormal64_16(m, &c, c_end, s, 1, mode);
+ do_accept = mode != NO_MATCHES && (s & ACCEPT_FLAG);
+ }
+
+ if (do_accept) {
+ if (mode == STOP_AT_MATCH) {
+ *state = s & STATE_MASK;
+ *c_final = c - 1;
+ return MO_MATCHES_PENDING;
+ }
+
+ u64a loc = (c - 1) - buf + offAdj + 1;
+
+ if (single) {
+ DEBUG_PRINTF("reporting %u\n", m->arb_report);
+ if (cb(0, loc, m->arb_report, ctxt) == MO_HALT_MATCHING) {
+ return MO_DEAD; /* termination requested */
+ }
+ } else if (doComplexReport64(cb, ctxt, m, s & STATE_MASK, loc, 0,
+ &cached_accept_state,
+ &cached_accept_id)
+ == MO_HALT_MATCHING) {
+ return MO_DEAD;
+ }
+ }
+
+ assert(c <= c_end);
+ } while (c < c_end);
+
+exit:
+ s &= STATE_MASK;
+
+ if (mode == STOP_AT_MATCH) {
+ *c_final = c_end;
+ }
+ *state = s;
+
+ return MO_ALIVE;
+}
+
+static never_inline
+char mcsheng64Exec16_i_cb(const struct mcsheng64 *m, u32 *state, const u8 *buf,
+ size_t len, u64a offAdj, NfaCallback cb, void *ctxt,
+ char single, const u8 **final_point) {
+ return mcsheng64Exec16_i(m, state, buf, len, offAdj, cb, ctxt, single,
+ final_point, CALLBACK_OUTPUT);
+}
+
+static never_inline
+char mcsheng64Exec16_i_sam(const struct mcsheng64 *m, u32 *state, const u8 *buf,
+ size_t len, u64a offAdj, NfaCallback cb, void *ctxt,
+ char single, const u8 **final_point) {
+ return mcsheng64Exec16_i(m, state, buf, len, offAdj, cb, ctxt, single,
+ final_point, STOP_AT_MATCH);
+}
+
+static never_inline
+char mcsheng64Exec16_i_nm(const struct mcsheng64 *m, u32 *state, const u8 *buf,
+ size_t len, u64a offAdj, NfaCallback cb, void *ctxt,
+ char single, const u8 **final_point) {
+ return mcsheng64Exec16_i(m, state, buf, len, offAdj, cb, ctxt, single,
+ final_point, NO_MATCHES);
+}
+
+static really_inline
+char mcsheng64Exec16_i_ni(const struct mcsheng64 *m, u32 *state, const u8 *buf,
+ size_t len, u64a offAdj, NfaCallback cb, void *ctxt,
+ char single, const u8 **final_point,
+ enum MatchMode mode) {
+ if (mode == CALLBACK_OUTPUT) {
+ return mcsheng64Exec16_i_cb(m, state, buf, len, offAdj, cb, ctxt,
+ single, final_point);
+ } else if (mode == STOP_AT_MATCH) {
+ return mcsheng64Exec16_i_sam(m, state, buf, len, offAdj, cb, ctxt,
+ single, final_point);
+ } else {
+ assert (mode == NO_MATCHES);
+ return mcsheng64Exec16_i_nm(m, state, buf, len, offAdj, cb, ctxt,
+ single, final_point);
+ }
+}
+
+static really_inline
+u32 doNormal64_8(const struct mcsheng64 *m, const u8 **c_inout, const u8 *end, u32 s,
+ char do_accel, enum MatchMode mode) {
+ const u8 *c = *c_inout;
+ u32 sheng_end = m->sheng_end;
+ u32 accel_limit = m->accel_limit_8;
+ u32 accept_limit = m->accept_limit_8;
+
+ const u32 as = m->alphaShift;
+ const u8 *succ_table = (const u8 *)((const char *)m
+ + sizeof(struct mcsheng64));
+ /* Adjust start of succ table so we can index into using state id (rather
+ * than adjust to normal id). As we will not be processing states with low
+ * state ids, we will not be accessing data before the succ table. Note: due
+ * to the size of the sheng tables, the succ_table pointer will still be
+ * inside the engine.*/
+ succ_table -= sheng_end << as;
+
+ assert(s >= sheng_end);
+ while (c < end && s >= sheng_end) {
+ u8 cprime = m->remap[*c];
+ DEBUG_PRINTF("c: %02hhx '%c' cp:%02hhx\n", *c,
+ ourisprint(*c) ? *c : '?', cprime);
+ s = succ_table[(s << as) + cprime];
+
+ DEBUG_PRINTF("s: %u\n", s);
+ c++;
+ if (do_accel) {
+ if (s >= accel_limit) {
+ break;
+ }
+ } else {
+ if (mode != NO_MATCHES && s >= accept_limit) {
+ break;
+ }
+ }
+ }
+ *c_inout = c;
+ return s;
+}
+
+static really_inline
+char mcsheng64Exec8_i(const struct mcsheng64 *m, u32 *state, const u8 *buf,
+ size_t len, u64a offAdj, NfaCallback cb, void *ctxt,
+ char single, const u8 **c_final, enum MatchMode mode) {
+ if (!len) {
+ *c_final = buf;
+ return MO_ALIVE;
+ }
+ u32 s = *state;
+ const u8 *c = buf;
+ const u8 *c_end = buf + len;
+ const u8 sheng_end = m->sheng_end;
+
+ const struct mstate_aux *aux
+ = (const struct mstate_aux *)((const char *)m + m->aux_offset
+ - sizeof(struct NFA));
+ u32 accept_limit = m->accept_limit_8;
+
+ u32 cached_accept_id = 0;
+ u32 cached_accept_state = 0;
+
+ DEBUG_PRINTF("accel %hu, accept %u\n", m->accel_limit_8, accept_limit);
+
+ DEBUG_PRINTF("s: %u, len %zu\n", s, len);
+
+ const u8 *min_accel_offset = c;
+ if (!m->has_accel || len < ACCEL_MIN_LEN) {
+ min_accel_offset = c_end;
+ goto without_accel;
+ }
+
+ goto with_accel;
+
+without_accel:
+ do {
+ assert(c < min_accel_offset);
+ if (!s) {
+ goto exit;
+ } else if (s < sheng_end) {
+ s = doSheng64(m, &c, min_accel_offset, c_end, s, 0);
+ } else {
+ s = doNormal64_8(m, &c, min_accel_offset, s, 0, mode);
+ assert(c <= min_accel_offset);
+ }
+
+ if (mode != NO_MATCHES && s >= accept_limit) {
+ if (mode == STOP_AT_MATCH) {
+ DEBUG_PRINTF("match - pausing\n");
+ *state = s;
+ *c_final = c - 1;
+ return MO_MATCHES_PENDING;
+ }
+
+ u64a loc = (c - 1) - buf + offAdj + 1;
+ if (single) {
+ DEBUG_PRINTF("reporting %u\n", m->arb_report);
+ if (cb(0, loc, m->arb_report, ctxt) == MO_HALT_MATCHING) {
+ return MO_DEAD;
+ }
+ } else if (doComplexReport64(cb, ctxt, m, s, loc, 0,
+ &cached_accept_state,
+ &cached_accept_id)
+ == MO_HALT_MATCHING) {
+ return MO_DEAD;
+ }
+ }
+
+ assert(c <= c_end); /* sheng is fuzzy for min_accel_offset */
+ } while (c < min_accel_offset);
+
+ if (c == c_end) {
+ goto exit;
+ }
+
+with_accel:
+ do {
+ u32 accel_limit = m->accel_limit_8;
+
+ assert(c < c_end);
+ if (!s) {
+ goto exit;
+ } else if (s < sheng_end) {
+ if (s > m->sheng_accel_limit) {
+ c = run_mcsheng_accel64(m, aux, s, &min_accel_offset, c, c_end);
+ if (c == c_end) {
+ goto exit;
+ } else {
+ goto without_accel;
+ }
+ }
+ s = doSheng64(m, &c, c_end, c_end, s, 1);
+ } else {
+ if (s >= accel_limit && aux[s].accel_offset) {
+ c = run_mcsheng_accel64(m, aux, s, &min_accel_offset, c, c_end);
+ if (c == c_end) {
+ goto exit;
+ } else {
+ goto without_accel;
+ }
+ }
+ s = doNormal64_8(m, &c, c_end, s, 1, mode);
+ }
+
+ if (mode != NO_MATCHES && s >= accept_limit) {
+ if (mode == STOP_AT_MATCH) {
+ DEBUG_PRINTF("match - pausing\n");
+ *state = s;
+ *c_final = c - 1;
+ return MO_MATCHES_PENDING;
+ }
+
+ u64a loc = (c - 1) - buf + offAdj + 1;
+ if (single) {
+ DEBUG_PRINTF("reporting %u\n", m->arb_report);
+ if (cb(0, loc, m->arb_report, ctxt) == MO_HALT_MATCHING) {
+ return MO_DEAD;
+ }
+ } else if (doComplexReport64(cb, ctxt, m, s, loc, 0,
+ &cached_accept_state,
+ &cached_accept_id)
+ == MO_HALT_MATCHING) {
+ return MO_DEAD;
+ }
+ }
+
+ assert(c <= c_end);
+ } while (c < c_end);
+
+exit:
+ *state = s;
+ if (mode == STOP_AT_MATCH) {
+ *c_final = c_end;
+ }
+ return MO_ALIVE;
+}
+
+static never_inline
+char mcsheng64Exec8_i_cb(const struct mcsheng64 *m, u32 *state, const u8 *buf,
+ size_t len, u64a offAdj, NfaCallback cb, void *ctxt,
+ char single, const u8 **final_point) {
+ return mcsheng64Exec8_i(m, state, buf, len, offAdj, cb, ctxt, single,
+ final_point, CALLBACK_OUTPUT);
+}
+
+static never_inline
+char mcsheng64Exec8_i_sam(const struct mcsheng64 *m, u32 *state, const u8 *buf,
+ size_t len, u64a offAdj, NfaCallback cb, void *ctxt,
+ char single, const u8 **final_point) {
+ return mcsheng64Exec8_i(m, state, buf, len, offAdj, cb, ctxt, single,
+ final_point, STOP_AT_MATCH);
+}
+
+static never_inline
+char mcsheng64Exec8_i_nm(const struct mcsheng64 *m, u32 *state, const u8 *buf,
+ size_t len, u64a offAdj, NfaCallback cb, void *ctxt,
+ char single, const u8 **final_point) {
+ return mcsheng64Exec8_i(m, state, buf, len, offAdj, cb, ctxt, single,
+ final_point, NO_MATCHES);
+}
+
+static really_inline
+char mcsheng64Exec8_i_ni(const struct mcsheng64 *m, u32 *state, const u8 *buf,
+ size_t len, u64a offAdj, NfaCallback cb, void *ctxt,
+ char single, const u8 **final_point,
+ enum MatchMode mode) {
+ if (mode == CALLBACK_OUTPUT) {
+ return mcsheng64Exec8_i_cb(m, state, buf, len, offAdj, cb, ctxt, single,
+ final_point);
+ } else if (mode == STOP_AT_MATCH) {
+ return mcsheng64Exec8_i_sam(m, state, buf, len, offAdj, cb, ctxt,
+ single, final_point);
+ } else {
+ assert(mode == NO_MATCHES);
+ return mcsheng64Exec8_i_nm(m, state, buf, len, offAdj, cb, ctxt, single,
+ final_point);
+ }
+}
+
+static really_inline
+char mcshengCheckEOD64(const struct NFA *nfa, u32 s, u64a offset,
+ NfaCallback cb, void *ctxt) {
+ const struct mcsheng64 *m = getImplNfa(nfa);
+ const struct mstate_aux *aux = get_aux64(m, s);
+
+ if (!aux->accept_eod) {
+ return MO_CONTINUE_MATCHING;
+ }
+ return doComplexReport64(cb, ctxt, m, s, offset, 1, NULL, NULL);
+}
+
+static really_inline
+char nfaExecMcSheng64_16_Q2i(const struct NFA *n, u64a offset, const u8 *buffer,
+ const u8 *hend, NfaCallback cb, void *context,
+ struct mq *q, char single, s64a end,
+ enum MatchMode mode) {
+ assert(n->type == MCSHENG_64_NFA_16);
+ const struct mcsheng64 *m = getImplNfa(n);
+ s64a sp;
+
+ assert(ISALIGNED_N(q->state, 2));
+ u32 s = *(u16 *)q->state;
+
+ if (q->report_current) {
+ assert(s);
+ assert(get_aux64(m, s)->accept);
+
+ int rv;
+ if (single) {
+ DEBUG_PRINTF("reporting %u\n", m->arb_report);
+ rv = cb(0, q_cur_offset(q), m->arb_report, context);
+ } else {
+ u32 cached_accept_id = 0;
+ u32 cached_accept_state = 0;
+
+ rv = doComplexReport64(cb, context, m, s, q_cur_offset(q), 0,
+ &cached_accept_state, &cached_accept_id);
+ }
+
+ q->report_current = 0;
+
+ if (rv == MO_HALT_MATCHING) {
+ return MO_DEAD;
+ }
+ }
+
+ sp = q_cur_loc(q);
+ q->cur++;
+
+ const u8 *cur_buf = sp < 0 ? hend : buffer;
+
+ assert(q->cur);
+ if (mode != NO_MATCHES && q->items[q->cur - 1].location > end) {
+ DEBUG_PRINTF("this is as far as we go\n");
+ q->cur--;
+ q->items[q->cur].type = MQE_START;
+ q->items[q->cur].location = end;
+ *(u16 *)q->state = s;
+ return MO_ALIVE;
+ }
+
+ while (1) {
+ assert(q->cur < q->end);
+ s64a ep = q->items[q->cur].location;
+ if (mode != NO_MATCHES) {
+ ep = MIN(ep, end);
+ }
+
+ assert(ep >= sp);
+
+ s64a local_ep = ep;
+ if (sp < 0) {
+ local_ep = MIN(0, ep);
+ }
+
+ /* do main buffer region */
+ const u8 *final_look;
+ char rv = mcsheng64Exec16_i_ni(m, &s, cur_buf + sp, local_ep - sp,
+ offset + sp, cb, context, single,
+ &final_look, mode);
+ if (rv == MO_DEAD) {
+ *(u16 *)q->state = 0;
+ return MO_DEAD;
+ }
+ if (mode == STOP_AT_MATCH && rv == MO_MATCHES_PENDING) {
+ DEBUG_PRINTF("this is as far as we go\n");
+ DEBUG_PRINTF("state %u final_look %zd\n", s, final_look - cur_buf);
+
+ assert(q->cur);
+ assert(final_look != cur_buf + local_ep);
+
+ q->cur--;
+ q->items[q->cur].type = MQE_START;
+ q->items[q->cur].location = final_look - cur_buf + 1; /* due to
+ * early -1 */
+ *(u16 *)q->state = s;
+ return MO_MATCHES_PENDING;
+ }
+
+ assert(rv == MO_ALIVE);
+ assert(q->cur);
+ if (mode != NO_MATCHES && q->items[q->cur].location > end) {
+ DEBUG_PRINTF("this is as far as we go\n");
+ q->cur--;
+ q->items[q->cur].type = MQE_START;
+ q->items[q->cur].location = end;
+ *(u16 *)q->state = s;
+ return MO_ALIVE;
+ }
+
+ sp = local_ep;
+
+ if (sp == 0) {
+ cur_buf = buffer;
+ }
+
+ if (sp != ep) {
+ continue;
+ }
+
+ switch (q->items[q->cur].type) {
+ case MQE_TOP:
+ assert(sp + offset || !s);
+ if (sp + offset == 0) {
+ s = m->start_anchored;
+ break;
+ }
+ s = mcshengEnableStarts64(m, s);
+ break;
+ case MQE_END:
+ *(u16 *)q->state = s;
+ q->cur++;
+ return s ? MO_ALIVE : MO_DEAD;
+ default:
+ assert(!"invalid queue event");
+ }
+
+ q->cur++;
+ }
+}
+
+static really_inline
+char nfaExecMcSheng64_8_Q2i(const struct NFA *n, u64a offset, const u8 *buffer,
+ const u8 *hend, NfaCallback cb, void *context,
+ struct mq *q, char single, s64a end,
+ enum MatchMode mode) {
+ assert(n->type == MCSHENG_64_NFA_8);
+ const struct mcsheng64 *m = getImplNfa(n);
+ s64a sp;
+
+ u32 s = *(u8 *)q->state;
+
+ if (q->report_current) {
+ assert(s);
+ assert(s >= m->accept_limit_8);
+
+ int rv;
+ if (single) {
+ DEBUG_PRINTF("reporting %u\n", m->arb_report);
+
+ rv = cb(0, q_cur_offset(q), m->arb_report, context);
+ } else {
+ u32 cached_accept_id = 0;
+ u32 cached_accept_state = 0;
+
+ rv = doComplexReport64(cb, context, m, s, q_cur_offset(q), 0,
+ &cached_accept_state, &cached_accept_id);
+ }
+
+ q->report_current = 0;
+
+ if (rv == MO_HALT_MATCHING) {
+ return MO_DEAD;
+ }
+ }
+
+ sp = q_cur_loc(q);
+ q->cur++;
+
+ const u8 *cur_buf = sp < 0 ? hend : buffer;
+
+ if (mode != NO_MATCHES && q->items[q->cur - 1].location > end) {
+ DEBUG_PRINTF("this is as far as we go\n");
+ q->cur--;
+ q->items[q->cur].type = MQE_START;
+ q->items[q->cur].location = end;
+ *(u8 *)q->state = s;
+ return MO_ALIVE;
+ }
+
+ while (1) {
+ DEBUG_PRINTF("%s @ %llu\n", q->items[q->cur].type == MQE_TOP ? "TOP" :
+ q->items[q->cur].type == MQE_END ? "END" : "???",
+ q->items[q->cur].location + offset);
+ assert(q->cur < q->end);
+ s64a ep = q->items[q->cur].location;
+ if (mode != NO_MATCHES) {
+ ep = MIN(ep, end);
+ }
+
+ assert(ep >= sp);
+
+ s64a local_ep = ep;
+ if (sp < 0) {
+ local_ep = MIN(0, ep);
+ }
+
+ const u8 *final_look;
+ char rv = mcsheng64Exec8_i_ni(m, &s, cur_buf + sp, local_ep - sp,
+ offset + sp, cb, context, single,
+ &final_look, mode);
+ if (rv == MO_HALT_MATCHING) {
+ *(u8 *)q->state = 0;
+ return MO_DEAD;
+ }
+ if (mode == STOP_AT_MATCH && rv == MO_MATCHES_PENDING) {
+ DEBUG_PRINTF("this is as far as we go\n");
+ DEBUG_PRINTF("state %u final_look %zd\n", s, final_look - cur_buf);
+
+ assert(q->cur);
+ assert(final_look != cur_buf + local_ep);
+
+ q->cur--;
+ q->items[q->cur].type = MQE_START;
+ q->items[q->cur].location = final_look - cur_buf + 1; /* due to
+ * early -1 */
+ *(u8 *)q->state = s;
+ return MO_MATCHES_PENDING;
+ }
+
+ assert(rv == MO_ALIVE);
+ assert(q->cur);
+ if (mode != NO_MATCHES && q->items[q->cur].location > end) {
+ DEBUG_PRINTF("this is as far as we go\n");
+ assert(q->cur);
+ q->cur--;
+ q->items[q->cur].type = MQE_START;
+ q->items[q->cur].location = end;
+ *(u8 *)q->state = s;
+ return MO_ALIVE;
+ }
+
+ sp = local_ep;
+
+ if (sp == 0) {
+ cur_buf = buffer;
+ }
+
+ if (sp != ep) {
+ continue;
+ }
+
+ switch (q->items[q->cur].type) {
+ case MQE_TOP:
+ assert(sp + offset || !s);
+ if (sp + offset == 0) {
+ s = (u8)m->start_anchored;
+ break;
+ }
+ s = mcshengEnableStarts64(m, s);
+ break;
+ case MQE_END:
+ *(u8 *)q->state = s;
+ q->cur++;
+ return s ? MO_ALIVE : MO_DEAD;
+ default:
+ assert(!"invalid queue event");
+ }
+
+ q->cur++;
+ }
+}
+
+char nfaExecMcSheng64_8_Q(const struct NFA *n, struct mq *q, s64a end) {
+ u64a offset = q->offset;
+ const u8 *buffer = q->buffer;
+ NfaCallback cb = q->cb;
+ void *context = q->context;
+ assert(n->type == MCSHENG_64_NFA_8);
+ const struct mcsheng64 *m = getImplNfa(n);
+ const u8 *hend = q->history + q->hlength;
+
+ return nfaExecMcSheng64_8_Q2i(n, offset, buffer, hend, cb, context, q,
+ m->flags & MCSHENG_FLAG_SINGLE, end,
+ CALLBACK_OUTPUT);
+}
+
+char nfaExecMcSheng64_16_Q(const struct NFA *n, struct mq *q, s64a end) {
+ u64a offset = q->offset;
+ const u8 *buffer = q->buffer;
+ NfaCallback cb = q->cb;
+ void *context = q->context;
+ assert(n->type == MCSHENG_64_NFA_16);
+ const struct mcsheng64 *m = getImplNfa(n);
+ const u8 *hend = q->history + q->hlength;
+
+ return nfaExecMcSheng64_16_Q2i(n, offset, buffer, hend, cb, context, q,
+ m->flags & MCSHENG_FLAG_SINGLE, end,
+ CALLBACK_OUTPUT);
+}
+
+char nfaExecMcSheng64_8_reportCurrent(const struct NFA *n, struct mq *q) {
+ const struct mcsheng64 *m = getImplNfa(n);
+ NfaCallback cb = q->cb;
+ void *ctxt = q->context;
+ u32 s = *(u8 *)q->state;
+ u8 single = m->flags & MCSHENG_FLAG_SINGLE;
+ u64a offset = q_cur_offset(q);
+ assert(q_cur_type(q) == MQE_START);
+ assert(s);
+
+ if (s >= m->accept_limit_8) {
+ if (single) {
+ DEBUG_PRINTF("reporting %u\n", m->arb_report);
+ cb(0, offset, m->arb_report, ctxt);
+ } else {
+ u32 cached_accept_id = 0;
+ u32 cached_accept_state = 0;
+
+ doComplexReport64(cb, ctxt, m, s, offset, 0, &cached_accept_state,
+ &cached_accept_id);
+ }
+ }
+
+ return 0;
+}
+
+char nfaExecMcSheng64_16_reportCurrent(const struct NFA *n, struct mq *q) {
+ const struct mcsheng64 *m = getImplNfa(n);
+ NfaCallback cb = q->cb;
+ void *ctxt = q->context;
+ u32 s = *(u16 *)q->state;
+ const struct mstate_aux *aux = get_aux64(m, s);
+ u8 single = m->flags & MCSHENG_FLAG_SINGLE;
+ u64a offset = q_cur_offset(q);
+ assert(q_cur_type(q) == MQE_START);
+ DEBUG_PRINTF("state %u\n", s);
+ assert(s);
+
+ if (aux->accept) {
+ if (single) {
+ DEBUG_PRINTF("reporting %u\n", m->arb_report);
+ cb(0, offset, m->arb_report, ctxt);
+ } else {
+ u32 cached_accept_id = 0;
+ u32 cached_accept_state = 0;
+
+ doComplexReport64(cb, ctxt, m, s, offset, 0, &cached_accept_state,
+ &cached_accept_id);
+ }
+ }
+
+ return 0;
+}
+
+static
+char mcshengHasAccept64(const struct mcsheng64 *m, const struct mstate_aux *aux,
+ ReportID report) {
+ assert(m && aux);
+
+ if (!aux->accept) {
+ return 0;
+ }
+
+ const struct report_list *rl = (const struct report_list *)
+ ((const char *)m + aux->accept - sizeof(struct NFA));
+ assert(ISALIGNED_N(rl, 4));
+
+ DEBUG_PRINTF("report list has %u entries\n", rl->count);
+
+ for (u32 i = 0; i < rl->count; i++) {
+ if (rl->report[i] == report) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+char nfaExecMcSheng64_8_inAccept(const struct NFA *n, ReportID report,
+ struct mq *q) {
+ assert(n && q);
+
+ const struct mcsheng64 *m = getImplNfa(n);
+ u8 s = *(u8 *)q->state;
+ DEBUG_PRINTF("checking accepts for %hhu\n", s);
+
+ return mcshengHasAccept64(m, get_aux64(m, s), report);
+}
+
+char nfaExecMcSheng64_8_inAnyAccept(const struct NFA *n, struct mq *q) {
+ assert(n && q);
+
+ const struct mcsheng64 *m = getImplNfa(n);
+ u8 s = *(u8 *)q->state;
+ DEBUG_PRINTF("checking accepts for %hhu\n", s);
+
+ return !!get_aux64(m, s)->accept;
+}
+
+char nfaExecMcSheng64_16_inAccept(const struct NFA *n, ReportID report,
+ struct mq *q) {
+ assert(n && q);
+
+ const struct mcsheng64 *m = getImplNfa(n);
+ u16 s = *(u16 *)q->state;
+ DEBUG_PRINTF("checking accepts for %hu\n", s);
+
+ return mcshengHasAccept64(m, get_aux64(m, s), report);
+}
+
+char nfaExecMcSheng64_16_inAnyAccept(const struct NFA *n, struct mq *q) {
+ assert(n && q);
+
+ const struct mcsheng64 *m = getImplNfa(n);
+ u16 s = *(u16 *)q->state;
+ DEBUG_PRINTF("checking accepts for %hu\n", s);
+
+ return !!get_aux64(m, s)->accept;
+}
+
+char nfaExecMcSheng64_8_Q2(const struct NFA *n, struct mq *q, s64a end) {
+ u64a offset = q->offset;
+ const u8 *buffer = q->buffer;
+ NfaCallback cb = q->cb;
+ void *context = q->context;
+ assert(n->type == MCSHENG_64_NFA_8);
+ const struct mcsheng64 *m = getImplNfa(n);
+ const u8 *hend = q->history + q->hlength;
+
+ return nfaExecMcSheng64_8_Q2i(n, offset, buffer, hend, cb, context, q,
+ m->flags & MCSHENG_FLAG_SINGLE, end,
+ STOP_AT_MATCH);
+}
+
+char nfaExecMcSheng64_16_Q2(const struct NFA *n, struct mq *q, s64a end) {
+ u64a offset = q->offset;
+ const u8 *buffer = q->buffer;
+ NfaCallback cb = q->cb;
+ void *context = q->context;
+ assert(n->type == MCSHENG_64_NFA_16);
+ const struct mcsheng64 *m = getImplNfa(n);
+ const u8 *hend = q->history + q->hlength;
+
+ return nfaExecMcSheng64_16_Q2i(n, offset, buffer, hend, cb, context, q,
+ m->flags & MCSHENG_FLAG_SINGLE, end,
+ STOP_AT_MATCH);
+}
+
+char nfaExecMcSheng64_8_QR(const struct NFA *n, struct mq *q, ReportID report) {
+ u64a offset = q->offset;
+ const u8 *buffer = q->buffer;
+ NfaCallback cb = q->cb;
+ void *context = q->context;
+ assert(n->type == MCSHENG_64_NFA_8);
+ const struct mcsheng64 *m = getImplNfa(n);
+ const u8 *hend = q->history + q->hlength;
+
+ char rv = nfaExecMcSheng64_8_Q2i(n, offset, buffer, hend, cb, context, q,
+ m->flags & MCSHENG_FLAG_SINGLE,
+ 0 /* end */, NO_MATCHES);
+ if (rv && nfaExecMcSheng64_8_inAccept(n, report, q)) {
+ return MO_MATCHES_PENDING;
+ } else {
+ return rv;
+ }
+}
+
+char nfaExecMcSheng64_16_QR(const struct NFA *n, struct mq *q, ReportID report) {
+ u64a offset = q->offset;
+ const u8 *buffer = q->buffer;
+ NfaCallback cb = q->cb;
+ void *context = q->context;
+ assert(n->type == MCSHENG_64_NFA_16);
+ const struct mcsheng64 *m = getImplNfa(n);
+ const u8 *hend = q->history + q->hlength;
+
+ char rv = nfaExecMcSheng64_16_Q2i(n, offset, buffer, hend, cb, context, q,
+ m->flags & MCSHENG_FLAG_SINGLE,
+ 0 /* end */, NO_MATCHES);
+
+ if (rv && nfaExecMcSheng64_16_inAccept(n, report, q)) {
+ return MO_MATCHES_PENDING;
+ } else {
+ return rv;
+ }
+}
+
+char nfaExecMcSheng64_8_initCompressedState(const struct NFA *nfa, u64a offset,
+ void *state, UNUSED u8 key) {
+ const struct mcsheng64 *m = getImplNfa(nfa);
+ u8 s = offset ? m->start_floating : m->start_anchored;
+ if (s) {
+ *(u8 *)state = s;
+ return 1;
+ }
+ return 0;
+}
+
+char nfaExecMcSheng64_16_initCompressedState(const struct NFA *nfa, u64a offset,
+ void *state, UNUSED u8 key) {
+ const struct mcsheng64 *m = getImplNfa(nfa);
+ u16 s = offset ? m->start_floating : m->start_anchored;
+ if (s) {
+ unaligned_store_u16(state, s);
+ return 1;
+ }
+ return 0;
+}
+
+char nfaExecMcSheng64_8_testEOD(const struct NFA *nfa, const char *state,
+ UNUSED const char *streamState, u64a offset,
+ NfaCallback callback, void *context) {
+ return mcshengCheckEOD64(nfa, *(const u8 *)state, offset, callback,
+ context);
+}
+
+char nfaExecMcSheng64_16_testEOD(const struct NFA *nfa, const char *state,
+ UNUSED const char *streamState, u64a offset,
+ NfaCallback callback, void *context) {
+ assert(ISALIGNED_N(state, 2));
+ return mcshengCheckEOD64(nfa, *(const u16 *)state, offset, callback,
+ context);
+}
+
+char nfaExecMcSheng64_8_queueInitState(UNUSED const struct NFA *nfa, struct mq *q) {
+ assert(nfa->scratchStateSize == 1);
+ *(u8 *)q->state = 0;
+ return 0;
+}
+
+char nfaExecMcSheng64_16_queueInitState(UNUSED const struct NFA *nfa, struct mq *q) {
+ assert(nfa->scratchStateSize == 2);
+ assert(ISALIGNED_N(q->state, 2));
+ *(u16 *)q->state = 0;
+ return 0;
+}
+
+char nfaExecMcSheng64_8_queueCompressState(UNUSED const struct NFA *nfa,
+ const struct mq *q, UNUSED s64a loc) {
+ void *dest = q->streamState;
+ const void *src = q->state;
+ assert(nfa->scratchStateSize == 1);
+ assert(nfa->streamStateSize == 1);
+ *(u8 *)dest = *(const u8 *)src;
+ return 0;
+}
+
+char nfaExecMcSheng64_8_expandState(UNUSED const struct NFA *nfa, void *dest,
+ const void *src, UNUSED u64a offset,
+ UNUSED u8 key) {
+ assert(nfa->scratchStateSize == 1);
+ assert(nfa->streamStateSize == 1);
+ *(u8 *)dest = *(const u8 *)src;
+ return 0;
+}
+
+char nfaExecMcSheng64_16_queueCompressState(UNUSED const struct NFA *nfa,
+ const struct mq *q,
+ UNUSED s64a loc) {
+ void *dest = q->streamState;
+ const void *src = q->state;
+ assert(nfa->scratchStateSize == 2);
+ assert(nfa->streamStateSize == 2);
+ assert(ISALIGNED_N(src, 2));
+ unaligned_store_u16(dest, *(const u16 *)(src));
+ return 0;
+}
+
+char nfaExecMcSheng64_16_expandState(UNUSED const struct NFA *nfa, void *dest,
+ const void *src, UNUSED u64a offset,
+ UNUSED u8 key) {
+ assert(nfa->scratchStateSize == 2);
+ assert(nfa->streamStateSize == 2);
+ assert(ISALIGNED_N(dest, 2));
+ *(u16 *)dest = unaligned_load_u16(src);
+ return 0;
+}
+#endif
diff --git a/contrib/libs/hyperscan/src/nfa/mcsheng.h b/contrib/libs/hyperscan/src/nfa/mcsheng.h
index 91872779cd..0329e12128 100644
--- a/contrib/libs/hyperscan/src/nfa/mcsheng.h
+++ b/contrib/libs/hyperscan/src/nfa/mcsheng.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Intel Corporation
+ * Copyright (c) 2016-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -80,78 +80,78 @@ char nfaExecMcSheng16_expandState(const struct NFA *nfa, void *dest,
#define nfaExecMcSheng16_B_Reverse NFA_API_NO_IMPL
#define nfaExecMcSheng16_zombie_status NFA_API_ZOMBIE_NO_IMPL
-#if defined(HAVE_AVX512VBMI)
-/* 64-8 bit Sheng-McClellan hybrid */
-char nfaExecMcSheng64_8_testEOD(const struct NFA *nfa, const char *state,
- const char *streamState, u64a offset,
- NfaCallback callback, void *context);
-char nfaExecMcSheng64_8_Q(const struct NFA *n, struct mq *q, s64a end);
-char nfaExecMcSheng64_8_Q2(const struct NFA *n, struct mq *q, s64a end);
-char nfaExecMcSheng64_8_QR(const struct NFA *n, struct mq *q, ReportID report);
-char nfaExecMcSheng64_8_reportCurrent(const struct NFA *n, struct mq *q);
-char nfaExecMcSheng64_8_inAccept(const struct NFA *n, ReportID report,
- struct mq *q);
-char nfaExecMcSheng64_8_inAnyAccept(const struct NFA *n, struct mq *q);
-char nfaExecMcSheng64_8_queueInitState(const struct NFA *n, struct mq *q);
-char nfaExecMcSheng64_8_initCompressedState(const struct NFA *n, u64a offset,
- void *state, u8 key);
-char nfaExecMcSheng64_8_queueCompressState(const struct NFA *nfa,
- const struct mq *q, s64a loc);
-char nfaExecMcSheng64_8_expandState(const struct NFA *nfa, void *dest,
- const void *src, u64a offset, u8 key);
+#if defined(HAVE_AVX512VBMI)
+/* 64-8 bit Sheng-McClellan hybrid */
+char nfaExecMcSheng64_8_testEOD(const struct NFA *nfa, const char *state,
+ const char *streamState, u64a offset,
+ NfaCallback callback, void *context);
+char nfaExecMcSheng64_8_Q(const struct NFA *n, struct mq *q, s64a end);
+char nfaExecMcSheng64_8_Q2(const struct NFA *n, struct mq *q, s64a end);
+char nfaExecMcSheng64_8_QR(const struct NFA *n, struct mq *q, ReportID report);
+char nfaExecMcSheng64_8_reportCurrent(const struct NFA *n, struct mq *q);
+char nfaExecMcSheng64_8_inAccept(const struct NFA *n, ReportID report,
+ struct mq *q);
+char nfaExecMcSheng64_8_inAnyAccept(const struct NFA *n, struct mq *q);
+char nfaExecMcSheng64_8_queueInitState(const struct NFA *n, struct mq *q);
+char nfaExecMcSheng64_8_initCompressedState(const struct NFA *n, u64a offset,
+ void *state, u8 key);
+char nfaExecMcSheng64_8_queueCompressState(const struct NFA *nfa,
+ const struct mq *q, s64a loc);
+char nfaExecMcSheng64_8_expandState(const struct NFA *nfa, void *dest,
+ const void *src, u64a offset, u8 key);
+
+#define nfaExecMcSheng64_8_B_Reverse NFA_API_NO_IMPL
+#define nfaExecMcSheng64_8_zombie_status NFA_API_ZOMBIE_NO_IMPL
+
+/* 64-16 bit Sheng-McClellan hybrid */
+char nfaExecMcSheng64_16_testEOD(const struct NFA *nfa, const char *state,
+ const char *streamState, u64a offset,
+ NfaCallback callback, void *context);
+char nfaExecMcSheng64_16_Q(const struct NFA *n, struct mq *q, s64a end);
+char nfaExecMcSheng64_16_Q2(const struct NFA *n, struct mq *q, s64a end);
+char nfaExecMcSheng64_16_QR(const struct NFA *n, struct mq *q, ReportID report);
+char nfaExecMcSheng64_16_reportCurrent(const struct NFA *n, struct mq *q);
+char nfaExecMcSheng64_16_inAccept(const struct NFA *n, ReportID report,
+ struct mq *q);
+char nfaExecMcSheng64_16_inAnyAccept(const struct NFA *n, struct mq *q);
+char nfaExecMcSheng64_16_queueInitState(const struct NFA *n, struct mq *q);
+char nfaExecMcSheng64_16_initCompressedState(const struct NFA *n, u64a offset,
+ void *state, u8 key);
+char nfaExecMcSheng64_16_queueCompressState(const struct NFA *nfa,
+ const struct mq *q, s64a loc);
+char nfaExecMcSheng64_16_expandState(const struct NFA *nfa, void *dest,
+ const void *src, u64a offset, u8 key);
+#define nfaExecMcSheng64_16_B_Reverse NFA_API_NO_IMPL
+#define nfaExecMcSheng64_16_zombie_status NFA_API_ZOMBIE_NO_IMPL
+#else // !HAVE_AVX512VBMI
+#define nfaExecMcSheng64_8_B_Reverse NFA_API_NO_IMPL
+#define nfaExecMcSheng64_8_zombie_status NFA_API_ZOMBIE_NO_IMPL
+#define nfaExecMcSheng64_8_Q NFA_API_NO_IMPL
+#define nfaExecMcSheng64_8_Q2 NFA_API_NO_IMPL
+#define nfaExecMcSheng64_8_QR NFA_API_NO_IMPL
+#define nfaExecMcSheng64_8_inAccept NFA_API_NO_IMPL
+#define nfaExecMcSheng64_8_inAnyAccept NFA_API_NO_IMPL
+#define nfaExecMcSheng64_8_queueInitState NFA_API_NO_IMPL
+#define nfaExecMcSheng64_8_queueCompressState NFA_API_NO_IMPL
+#define nfaExecMcSheng64_8_expandState NFA_API_NO_IMPL
+#define nfaExecMcSheng64_8_initCompressedState NFA_API_NO_IMPL
+#define nfaExecMcSheng64_8_testEOD NFA_API_NO_IMPL
+#define nfaExecMcSheng64_8_reportCurrent NFA_API_NO_IMPL
+
+#define nfaExecMcSheng64_16_B_Reverse NFA_API_NO_IMPL
+#define nfaExecMcSheng64_16_zombie_status NFA_API_ZOMBIE_NO_IMPL
+#define nfaExecMcSheng64_16_Q NFA_API_NO_IMPL
+#define nfaExecMcSheng64_16_Q2 NFA_API_NO_IMPL
+#define nfaExecMcSheng64_16_QR NFA_API_NO_IMPL
+#define nfaExecMcSheng64_16_inAccept NFA_API_NO_IMPL
+#define nfaExecMcSheng64_16_inAnyAccept NFA_API_NO_IMPL
+#define nfaExecMcSheng64_16_queueInitState NFA_API_NO_IMPL
+#define nfaExecMcSheng64_16_queueCompressState NFA_API_NO_IMPL
+#define nfaExecMcSheng64_16_expandState NFA_API_NO_IMPL
+#define nfaExecMcSheng64_16_initCompressedState NFA_API_NO_IMPL
+#define nfaExecMcSheng64_16_testEOD NFA_API_NO_IMPL
+#define nfaExecMcSheng64_16_reportCurrent NFA_API_NO_IMPL
+
+#endif //end of HAVE_AVX512VBMI
-#define nfaExecMcSheng64_8_B_Reverse NFA_API_NO_IMPL
-#define nfaExecMcSheng64_8_zombie_status NFA_API_ZOMBIE_NO_IMPL
-
-/* 64-16 bit Sheng-McClellan hybrid */
-char nfaExecMcSheng64_16_testEOD(const struct NFA *nfa, const char *state,
- const char *streamState, u64a offset,
- NfaCallback callback, void *context);
-char nfaExecMcSheng64_16_Q(const struct NFA *n, struct mq *q, s64a end);
-char nfaExecMcSheng64_16_Q2(const struct NFA *n, struct mq *q, s64a end);
-char nfaExecMcSheng64_16_QR(const struct NFA *n, struct mq *q, ReportID report);
-char nfaExecMcSheng64_16_reportCurrent(const struct NFA *n, struct mq *q);
-char nfaExecMcSheng64_16_inAccept(const struct NFA *n, ReportID report,
- struct mq *q);
-char nfaExecMcSheng64_16_inAnyAccept(const struct NFA *n, struct mq *q);
-char nfaExecMcSheng64_16_queueInitState(const struct NFA *n, struct mq *q);
-char nfaExecMcSheng64_16_initCompressedState(const struct NFA *n, u64a offset,
- void *state, u8 key);
-char nfaExecMcSheng64_16_queueCompressState(const struct NFA *nfa,
- const struct mq *q, s64a loc);
-char nfaExecMcSheng64_16_expandState(const struct NFA *nfa, void *dest,
- const void *src, u64a offset, u8 key);
-#define nfaExecMcSheng64_16_B_Reverse NFA_API_NO_IMPL
-#define nfaExecMcSheng64_16_zombie_status NFA_API_ZOMBIE_NO_IMPL
-#else // !HAVE_AVX512VBMI
-#define nfaExecMcSheng64_8_B_Reverse NFA_API_NO_IMPL
-#define nfaExecMcSheng64_8_zombie_status NFA_API_ZOMBIE_NO_IMPL
-#define nfaExecMcSheng64_8_Q NFA_API_NO_IMPL
-#define nfaExecMcSheng64_8_Q2 NFA_API_NO_IMPL
-#define nfaExecMcSheng64_8_QR NFA_API_NO_IMPL
-#define nfaExecMcSheng64_8_inAccept NFA_API_NO_IMPL
-#define nfaExecMcSheng64_8_inAnyAccept NFA_API_NO_IMPL
-#define nfaExecMcSheng64_8_queueInitState NFA_API_NO_IMPL
-#define nfaExecMcSheng64_8_queueCompressState NFA_API_NO_IMPL
-#define nfaExecMcSheng64_8_expandState NFA_API_NO_IMPL
-#define nfaExecMcSheng64_8_initCompressedState NFA_API_NO_IMPL
-#define nfaExecMcSheng64_8_testEOD NFA_API_NO_IMPL
-#define nfaExecMcSheng64_8_reportCurrent NFA_API_NO_IMPL
-
-#define nfaExecMcSheng64_16_B_Reverse NFA_API_NO_IMPL
-#define nfaExecMcSheng64_16_zombie_status NFA_API_ZOMBIE_NO_IMPL
-#define nfaExecMcSheng64_16_Q NFA_API_NO_IMPL
-#define nfaExecMcSheng64_16_Q2 NFA_API_NO_IMPL
-#define nfaExecMcSheng64_16_QR NFA_API_NO_IMPL
-#define nfaExecMcSheng64_16_inAccept NFA_API_NO_IMPL
-#define nfaExecMcSheng64_16_inAnyAccept NFA_API_NO_IMPL
-#define nfaExecMcSheng64_16_queueInitState NFA_API_NO_IMPL
-#define nfaExecMcSheng64_16_queueCompressState NFA_API_NO_IMPL
-#define nfaExecMcSheng64_16_expandState NFA_API_NO_IMPL
-#define nfaExecMcSheng64_16_initCompressedState NFA_API_NO_IMPL
-#define nfaExecMcSheng64_16_testEOD NFA_API_NO_IMPL
-#define nfaExecMcSheng64_16_reportCurrent NFA_API_NO_IMPL
-
-#endif //end of HAVE_AVX512VBMI
-
#endif
diff --git a/contrib/libs/hyperscan/src/nfa/mcsheng_compile.cpp b/contrib/libs/hyperscan/src/nfa/mcsheng_compile.cpp
index ffe630c554..fb75e49a35 100644
--- a/contrib/libs/hyperscan/src/nfa/mcsheng_compile.cpp
+++ b/contrib/libs/hyperscan/src/nfa/mcsheng_compile.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Intel Corporation
+ * Copyright (c) 2016-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -244,106 +244,106 @@ void populateBasicInfo(size_t state_size, const dfa_info &info,
}
static
-mstate_aux *getAux64(NFA *n, dstate_id_t i) {
- mcsheng64 *m = (mcsheng64 *)getMutableImplNfa(n);
- mstate_aux *aux_base = (mstate_aux *)((char *)n + m->aux_offset);
-
- mstate_aux *aux = aux_base + i;
- assert((const char *)aux < (const char *)n + m->length);
- return aux;
-}
-
-static
-void createShuffleMasks64(mcsheng64 *m, const dfa_info &info,
- dstate_id_t sheng_end,
- const map<dstate_id_t, AccelScheme> &accel_escape_info) {
- DEBUG_PRINTF("using first %hu states for a sheng\n", sheng_end);
- assert(sheng_end > DEAD_STATE + 1);
- assert(sheng_end <= sizeof(m512) + 1);
- vector<array<u8, sizeof(m512)>> masks;
- masks.resize(info.alpha_size);
- /* -1 to avoid wasting a slot as we do not include dead state */
- vector<dstate_id_t> raw_ids;
- raw_ids.resize(sheng_end - 1);
- for (dstate_id_t s = DEAD_STATE + 1; s < info.states.size(); s++) {
- assert(info.implId(s)); /* should not map to DEAD_STATE */
- if (info.is_sheng(s)) {
- raw_ids[info.extra[s].sheng_id] = s;
- }
- }
- for (u32 i = 0; i < info.alpha_size; i++) {
- if (i == info.alpha_remap[TOP]) {
- continue;
- }
- auto &mask = masks[i];
- assert(sizeof(mask) == sizeof(m512));
- mask.fill(0);
-
- for (dstate_id_t sheng_id = 0; sheng_id < sheng_end - 1; sheng_id++) {
- dstate_id_t raw_id = raw_ids[sheng_id];
- dstate_id_t next_id = info.implId(info.states[raw_id].next[i]);
- if (next_id == DEAD_STATE) {
- next_id = sheng_end - 1;
- } else if (next_id < sheng_end) {
- next_id--;
- }
- DEBUG_PRINTF("%hu: %u->next %hu\n", sheng_id, i, next_id);
- mask[sheng_id] = verify_u8(next_id);
- }
- }
- for (u32 i = 0; i < N_CHARS; i++) {
- assert(info.alpha_remap[i] != info.alpha_remap[TOP]);
- memcpy((u8 *)&m->sheng_succ_masks[i],
- (u8 *)masks[info.alpha_remap[i]].data(), sizeof(m512));
- }
- m->sheng_end = sheng_end;
- m->sheng_accel_limit = sheng_end - 1;
-
- for (dstate_id_t s : raw_ids) {
- if (contains(accel_escape_info, s)) {
- LIMIT_TO_AT_MOST(&m->sheng_accel_limit, info.extra[s].sheng_id);
- }
- }
-}
-
-static
-void populateBasicInfo64(size_t state_size, const dfa_info &info,
- u32 total_size, u32 aux_offset, u32 accel_offset,
- u32 accel_count, ReportID arb, bool single, NFA *nfa) {
- assert(state_size == sizeof(u16) || state_size == sizeof(u8));
-
- nfa->length = total_size;
- nfa->nPositions = info.states.size();
-
- nfa->scratchStateSize = verify_u32(state_size);
- nfa->streamStateSize = verify_u32(state_size);
-
- if (state_size == sizeof(u8)) {
- nfa->type = MCSHENG_64_NFA_8;
- } else {
- nfa->type = MCSHENG_64_NFA_16;
- }
-
- mcsheng64 *m = (mcsheng64 *)getMutableImplNfa(nfa);
- for (u32 i = 0; i < 256; i++) {
- m->remap[i] = verify_u8(info.alpha_remap[i]);
- }
- m->alphaShift = info.getAlphaShift();
- m->length = total_size;
- m->aux_offset = aux_offset;
- m->accel_offset = accel_offset;
- m->arb_report = arb;
- m->state_count = verify_u16(info.size());
- m->start_anchored = info.implId(info.raw.start_anchored);
- m->start_floating = info.implId(info.raw.start_floating);
- m->has_accel = accel_count ? 1 : 0;
-
- if (single) {
- m->flags |= MCSHENG_FLAG_SINGLE;
- }
-}
-
-static
+mstate_aux *getAux64(NFA *n, dstate_id_t i) {
+ mcsheng64 *m = (mcsheng64 *)getMutableImplNfa(n);
+ mstate_aux *aux_base = (mstate_aux *)((char *)n + m->aux_offset);
+
+ mstate_aux *aux = aux_base + i;
+ assert((const char *)aux < (const char *)n + m->length);
+ return aux;
+}
+
+static
+void createShuffleMasks64(mcsheng64 *m, const dfa_info &info,
+ dstate_id_t sheng_end,
+ const map<dstate_id_t, AccelScheme> &accel_escape_info) {
+ DEBUG_PRINTF("using first %hu states for a sheng\n", sheng_end);
+ assert(sheng_end > DEAD_STATE + 1);
+ assert(sheng_end <= sizeof(m512) + 1);
+ vector<array<u8, sizeof(m512)>> masks;
+ masks.resize(info.alpha_size);
+ /* -1 to avoid wasting a slot as we do not include dead state */
+ vector<dstate_id_t> raw_ids;
+ raw_ids.resize(sheng_end - 1);
+ for (dstate_id_t s = DEAD_STATE + 1; s < info.states.size(); s++) {
+ assert(info.implId(s)); /* should not map to DEAD_STATE */
+ if (info.is_sheng(s)) {
+ raw_ids[info.extra[s].sheng_id] = s;
+ }
+ }
+ for (u32 i = 0; i < info.alpha_size; i++) {
+ if (i == info.alpha_remap[TOP]) {
+ continue;
+ }
+ auto &mask = masks[i];
+ assert(sizeof(mask) == sizeof(m512));
+ mask.fill(0);
+
+ for (dstate_id_t sheng_id = 0; sheng_id < sheng_end - 1; sheng_id++) {
+ dstate_id_t raw_id = raw_ids[sheng_id];
+ dstate_id_t next_id = info.implId(info.states[raw_id].next[i]);
+ if (next_id == DEAD_STATE) {
+ next_id = sheng_end - 1;
+ } else if (next_id < sheng_end) {
+ next_id--;
+ }
+ DEBUG_PRINTF("%hu: %u->next %hu\n", sheng_id, i, next_id);
+ mask[sheng_id] = verify_u8(next_id);
+ }
+ }
+ for (u32 i = 0; i < N_CHARS; i++) {
+ assert(info.alpha_remap[i] != info.alpha_remap[TOP]);
+ memcpy((u8 *)&m->sheng_succ_masks[i],
+ (u8 *)masks[info.alpha_remap[i]].data(), sizeof(m512));
+ }
+ m->sheng_end = sheng_end;
+ m->sheng_accel_limit = sheng_end - 1;
+
+ for (dstate_id_t s : raw_ids) {
+ if (contains(accel_escape_info, s)) {
+ LIMIT_TO_AT_MOST(&m->sheng_accel_limit, info.extra[s].sheng_id);
+ }
+ }
+}
+
+static
+void populateBasicInfo64(size_t state_size, const dfa_info &info,
+ u32 total_size, u32 aux_offset, u32 accel_offset,
+ u32 accel_count, ReportID arb, bool single, NFA *nfa) {
+ assert(state_size == sizeof(u16) || state_size == sizeof(u8));
+
+ nfa->length = total_size;
+ nfa->nPositions = info.states.size();
+
+ nfa->scratchStateSize = verify_u32(state_size);
+ nfa->streamStateSize = verify_u32(state_size);
+
+ if (state_size == sizeof(u8)) {
+ nfa->type = MCSHENG_64_NFA_8;
+ } else {
+ nfa->type = MCSHENG_64_NFA_16;
+ }
+
+ mcsheng64 *m = (mcsheng64 *)getMutableImplNfa(nfa);
+ for (u32 i = 0; i < 256; i++) {
+ m->remap[i] = verify_u8(info.alpha_remap[i]);
+ }
+ m->alphaShift = info.getAlphaShift();
+ m->length = total_size;
+ m->aux_offset = aux_offset;
+ m->accel_offset = accel_offset;
+ m->arb_report = arb;
+ m->state_count = verify_u16(info.size());
+ m->start_anchored = info.implId(info.raw.start_anchored);
+ m->start_floating = info.implId(info.raw.start_floating);
+ m->has_accel = accel_count ? 1 : 0;
+
+ if (single) {
+ m->flags |= MCSHENG_FLAG_SINGLE;
+ }
+}
+
+static
size_t calcShermanRegionSize(const dfa_info &info) {
size_t rv = 0;
@@ -371,7 +371,7 @@ void fillInAux(mstate_aux *aux, dstate_id_t i, const dfa_info &info,
/* returns false on error */
static
bool allocateImplId16(dfa_info &info, dstate_id_t sheng_end,
- dstate_id_t *sherman_base) {
+ dstate_id_t *sherman_base) {
info.states[0].impl_id = 0; /* dead is always 0 */
vector<dstate_id_t> norm;
@@ -481,7 +481,7 @@ CharReach get_edge_reach(dstate_id_t u, dstate_id_t v, const dfa_info &info) {
}
#define MAX_SHENG_STATES 16
-#define MAX_SHENG64_STATES 64
+#define MAX_SHENG64_STATES 64
#define MAX_SHENG_LEAKINESS 0.05
using LeakinessCache = ue2_unordered_map<pair<RdfaVertex, u32>, double>;
@@ -535,8 +535,8 @@ double leakiness(const RdfaGraph &g, dfa_info &info,
static
dstate_id_t find_sheng_states(dfa_info &info,
- map<dstate_id_t, AccelScheme> &accel_escape_info,
- size_t max_sheng_states) {
+ map<dstate_id_t, AccelScheme> &accel_escape_info,
+ size_t max_sheng_states) {
RdfaGraph g(info.raw);
auto cyclics = find_vertices_in_cycles(g);
@@ -571,7 +571,7 @@ dstate_id_t find_sheng_states(dfa_info &info,
flat_set<dstate_id_t> considered = { DEAD_STATE };
bool seen_back_edge = false;
while (!to_consider.empty()
- && sheng_states.size() < max_sheng_states) {
+ && sheng_states.size() < max_sheng_states) {
auto v = to_consider.front();
to_consider.pop_front();
if (!considered.insert(g[v].index).second) {
@@ -717,80 +717,80 @@ void fill_in_succ_table_16(NFA *nfa, const dfa_info &info,
}
}
-static
-void fill_in_aux_info64(NFA *nfa, const dfa_info &info,
- const map<dstate_id_t, AccelScheme> &accel_escape_info,
- u32 accel_offset, UNUSED u32 accel_end_offset,
- const vector<u32> &reports,
- const vector<u32> &reports_eod,
- u32 report_base_offset,
- const raw_report_info &ri) {
- mcsheng64 *m = (mcsheng64 *)getMutableImplNfa(nfa);
-
- vector<u32> reportOffsets;
-
- ri.fillReportLists(nfa, report_base_offset, reportOffsets);
-
- for (u32 i = 0; i < info.size(); i++) {
- u16 impl_id = info.implId(i);
- mstate_aux *this_aux = getAux64(nfa, impl_id);
-
- fillInAux(this_aux, i, info, reports, reports_eod, reportOffsets);
- if (contains(accel_escape_info, i)) {
- this_aux->accel_offset = accel_offset;
- accel_offset += info.strat.accelSize();
- assert(accel_offset <= accel_end_offset);
- assert(ISALIGNED_N(accel_offset, alignof(union AccelAux)));
- info.strat.buildAccel(i, accel_escape_info.at(i),
- (void *)((char *)m + this_aux->accel_offset));
- }
- }
-}
-
-static
-u16 get_edge_flags64(NFA *nfa, dstate_id_t target_impl_id) {
- mstate_aux *aux = getAux64(nfa, target_impl_id);
- u16 flags = 0;
-
- if (aux->accept) {
- flags |= ACCEPT_FLAG;
- }
-
- if (aux->accel_offset) {
- flags |= ACCEL_FLAG;
- }
-
- return flags;
-}
-
-static
-void fill_in_succ_table_64_16(NFA *nfa, const dfa_info &info,
- dstate_id_t sheng_end,
- UNUSED dstate_id_t sherman_base) {
- u16 *succ_table = (u16 *)((char *)nfa + sizeof(NFA) + sizeof(mcsheng64));
-
- u8 alphaShift = info.getAlphaShift();
- assert(alphaShift <= 8);
-
- for (size_t i = 0; i < info.size(); i++) {
- if (!info.is_normal(i)) {
- assert(info.implId(i) < sheng_end || info.is_sherman(i));
- continue;
- }
-
- assert(info.implId(i) < sherman_base);
- u16 normal_id = verify_u16(info.implId(i) - sheng_end);
-
- for (size_t s = 0; s < info.impl_alpha_size; s++) {
- dstate_id_t raw_succ = info.states[i].next[s];
- u16 &entry = succ_table[((size_t)normal_id << alphaShift) + s];
-
- entry = info.implId(raw_succ);
- entry |= get_edge_flags64(nfa, entry);
- }
- }
-}
-
+static
+void fill_in_aux_info64(NFA *nfa, const dfa_info &info,
+ const map<dstate_id_t, AccelScheme> &accel_escape_info,
+ u32 accel_offset, UNUSED u32 accel_end_offset,
+ const vector<u32> &reports,
+ const vector<u32> &reports_eod,
+ u32 report_base_offset,
+ const raw_report_info &ri) {
+ mcsheng64 *m = (mcsheng64 *)getMutableImplNfa(nfa);
+
+ vector<u32> reportOffsets;
+
+ ri.fillReportLists(nfa, report_base_offset, reportOffsets);
+
+ for (u32 i = 0; i < info.size(); i++) {
+ u16 impl_id = info.implId(i);
+ mstate_aux *this_aux = getAux64(nfa, impl_id);
+
+ fillInAux(this_aux, i, info, reports, reports_eod, reportOffsets);
+ if (contains(accel_escape_info, i)) {
+ this_aux->accel_offset = accel_offset;
+ accel_offset += info.strat.accelSize();
+ assert(accel_offset <= accel_end_offset);
+ assert(ISALIGNED_N(accel_offset, alignof(union AccelAux)));
+ info.strat.buildAccel(i, accel_escape_info.at(i),
+ (void *)((char *)m + this_aux->accel_offset));
+ }
+ }
+}
+
+static
+u16 get_edge_flags64(NFA *nfa, dstate_id_t target_impl_id) {
+ mstate_aux *aux = getAux64(nfa, target_impl_id);
+ u16 flags = 0;
+
+ if (aux->accept) {
+ flags |= ACCEPT_FLAG;
+ }
+
+ if (aux->accel_offset) {
+ flags |= ACCEL_FLAG;
+ }
+
+ return flags;
+}
+
+static
+void fill_in_succ_table_64_16(NFA *nfa, const dfa_info &info,
+ dstate_id_t sheng_end,
+ UNUSED dstate_id_t sherman_base) {
+ u16 *succ_table = (u16 *)((char *)nfa + sizeof(NFA) + sizeof(mcsheng64));
+
+ u8 alphaShift = info.getAlphaShift();
+ assert(alphaShift <= 8);
+
+ for (size_t i = 0; i < info.size(); i++) {
+ if (!info.is_normal(i)) {
+ assert(info.implId(i) < sheng_end || info.is_sherman(i));
+ continue;
+ }
+
+ assert(info.implId(i) < sherman_base);
+ u16 normal_id = verify_u16(info.implId(i) - sheng_end);
+
+ for (size_t s = 0; s < info.impl_alpha_size; s++) {
+ dstate_id_t raw_succ = info.states[i].next[s];
+ u16 &entry = succ_table[((size_t)normal_id << alphaShift) + s];
+
+ entry = info.implId(raw_succ);
+ entry |= get_edge_flags64(nfa, entry);
+ }
+ }
+}
+
#define MAX_SHERMAN_LIST_LEN 8
static
@@ -1017,19 +1017,19 @@ bytecode_ptr<NFA> mcshengCompile16(dfa_info &info, dstate_id_t sheng_end,
assert(info.getAlphaShift() <= 8);
- // Sherman optimization
- if (info.impl_alpha_size > 16) {
- u16 total_daddy = 0;
- for (u32 i = 0; i < info.size(); i++) {
- find_better_daddy(info, i,
- is_cyclic_near(info.raw, info.raw.start_anchored),
- grey);
- total_daddy += info.extra[i].daddytaken;
- }
-
- DEBUG_PRINTF("daddy %hu/%zu states=%zu alpha=%hu\n", total_daddy,
- info.size() * info.impl_alpha_size, info.size(),
- info.impl_alpha_size);
+ // Sherman optimization
+ if (info.impl_alpha_size > 16) {
+ u16 total_daddy = 0;
+ for (u32 i = 0; i < info.size(); i++) {
+ find_better_daddy(info, i,
+ is_cyclic_near(info.raw, info.raw.start_anchored),
+ grey);
+ total_daddy += info.extra[i].daddytaken;
+ }
+
+ DEBUG_PRINTF("daddy %hu/%zu states=%zu alpha=%hu\n", total_daddy,
+ info.size() * info.impl_alpha_size, info.size(),
+ info.impl_alpha_size);
}
u16 sherman_limit;
@@ -1110,160 +1110,160 @@ void fill_in_succ_table_8(NFA *nfa, const dfa_info &info,
}
static
-void fill_in_sherman64(NFA *nfa, dfa_info &info, UNUSED u16 sherman_limit) {
- char *nfa_base = (char *)nfa;
- mcsheng64 *m = (mcsheng64 *)getMutableImplNfa(nfa);
- char *sherman_table = nfa_base + m->sherman_offset;
-
- assert(ISALIGNED_16(sherman_table));
- for (size_t i = 0; i < info.size(); i++) {
- if (!info.is_sherman(i)) {
- continue;
- }
- u16 fs = verify_u16(info.implId(i));
- DEBUG_PRINTF("building sherman %zu impl %hu\n", i, fs);
-
- assert(fs >= sherman_limit);
-
- char *curr_sherman_entry
- = sherman_table + (fs - m->sherman_limit) * SHERMAN_FIXED_SIZE;
- assert(curr_sherman_entry <= nfa_base + m->length);
-
- u8 len = verify_u8(info.impl_alpha_size - info.extra[i].daddytaken);
- assert(len <= 9);
- dstate_id_t d = info.states[i].daddy;
-
- *(u8 *)(curr_sherman_entry + SHERMAN_TYPE_OFFSET) = SHERMAN_STATE;
- *(u8 *)(curr_sherman_entry + SHERMAN_LEN_OFFSET) = len;
- *(u16 *)(curr_sherman_entry + SHERMAN_DADDY_OFFSET) = info.implId(d);
- u8 *chars = (u8 *)(curr_sherman_entry + SHERMAN_CHARS_OFFSET);
-
- for (u16 s = 0; s < info.impl_alpha_size; s++) {
- if (info.states[i].next[s] != info.states[d].next[s]) {
- *(chars++) = (u8)s;
- }
- }
-
- u16 *states = (u16 *)(curr_sherman_entry + SHERMAN_STATES_OFFSET(len));
- for (u16 s = 0; s < info.impl_alpha_size; s++) {
- if (info.states[i].next[s] != info.states[d].next[s]) {
- DEBUG_PRINTF("s overrider %hu dad %hu char next %hu\n", fs,
- info.implId(d),
- info.implId(info.states[i].next[s]));
- u16 entry_val = info.implId(info.states[i].next[s]);
- entry_val |= get_edge_flags64(nfa, entry_val);
- unaligned_store_u16((u8 *)states++, entry_val);
- }
- }
- }
-}
-
-static
-bytecode_ptr<NFA> mcsheng64Compile16(dfa_info&info, dstate_id_t sheng_end,
- const map<dstate_id_t, AccelScheme>&accel_escape_info,
- const Grey &grey) {
- DEBUG_PRINTF("building mcsheng 64-16\n");
-
- vector<u32> reports; /* index in ri for the appropriate report list */
- vector<u32> reports_eod; /* as above */
- ReportID arb;
- u8 single;
-
- assert(info.getAlphaShift() <= 8);
-
- // Sherman optimization
- if (info.impl_alpha_size > 16) {
- u16 total_daddy = 0;
- for (u32 i = 0; i < info.size(); i++) {
- find_better_daddy(info, i,
- is_cyclic_near(info.raw, info.raw.start_anchored),
- grey);
- total_daddy += info.extra[i].daddytaken;
- }
-
- DEBUG_PRINTF("daddy %hu/%zu states=%zu alpha=%hu\n", total_daddy,
- info.size() * info.impl_alpha_size, info.size(),
- info.impl_alpha_size);
- }
-
- u16 sherman_limit;
- if (!allocateImplId16(info, sheng_end, &sherman_limit)) {
- DEBUG_PRINTF("failed to allocate state numbers, %zu states total\n",
- info.size());
- return nullptr;
- }
- u16 count_real_states = sherman_limit - sheng_end;
-
- auto ri = info.strat.gatherReports(reports, reports_eod, &single, &arb);
-
- size_t tran_size = (1 << info.getAlphaShift()) * sizeof(u16)
- * count_real_states;
-
- size_t aux_size = sizeof(mstate_aux) * info.size();
-
- size_t aux_offset = ROUNDUP_16(sizeof(NFA) + sizeof(mcsheng64) + tran_size);
- size_t accel_size = info.strat.accelSize() * accel_escape_info.size();
- size_t accel_offset = ROUNDUP_N(aux_offset + aux_size
- + ri->getReportListSize(), 32);
- size_t sherman_offset = ROUNDUP_16(accel_offset + accel_size);
- size_t sherman_size = calcShermanRegionSize(info);
-
- size_t total_size = sherman_offset + sherman_size;
-
- accel_offset -= sizeof(NFA); /* adj accel offset to be relative to m */
- assert(ISALIGNED_N(accel_offset, alignof(union AccelAux)));
-
- auto nfa = make_zeroed_bytecode_ptr<NFA>(total_size);
- mcsheng64 *m = (mcsheng64 *)getMutableImplNfa(nfa.get());
-
- populateBasicInfo64(sizeof(u16), info, total_size, aux_offset, accel_offset,
- accel_escape_info.size(), arb, single, nfa.get());
- createShuffleMasks64(m, info, sheng_end, accel_escape_info);
-
- /* copy in the mc header information */
- m->sherman_offset = sherman_offset;
- m->sherman_end = total_size;
- m->sherman_limit = sherman_limit;
-
- DEBUG_PRINTF("%hu sheng, %hu norm, %zu total\n", sheng_end,
- count_real_states, info.size());
-
- fill_in_aux_info64(nfa.get(), info, accel_escape_info, accel_offset,
- sherman_offset - sizeof(NFA), reports, reports_eod,
- aux_offset + aux_size, *ri);
-
- fill_in_succ_table_64_16(nfa.get(), info, sheng_end, sherman_limit);
-
- fill_in_sherman64(nfa.get(), info, sherman_limit);
-
- return nfa;
-}
-
-static
-void fill_in_succ_table_64_8(NFA *nfa, const dfa_info &info,
- dstate_id_t sheng_end) {
- u8 *succ_table = (u8 *)nfa + sizeof(NFA) + sizeof(mcsheng64);
-
- u8 alphaShift = info.getAlphaShift();
- assert(alphaShift <= 8);
-
- for (size_t i = 0; i < info.size(); i++) {
- assert(!info.is_sherman(i));
- if (!info.is_normal(i)) {
- assert(info.implId(i) < sheng_end);
- continue;
- }
- u8 normal_id = verify_u8(info.implId(i) - sheng_end);
-
- for (size_t s = 0; s < info.impl_alpha_size; s++) {
- dstate_id_t raw_succ = info.states[i].next[s];
- succ_table[((size_t)normal_id << alphaShift) + s]
- = info.implId(raw_succ);
- }
- }
-}
-
-static
+void fill_in_sherman64(NFA *nfa, dfa_info &info, UNUSED u16 sherman_limit) {
+ char *nfa_base = (char *)nfa;
+ mcsheng64 *m = (mcsheng64 *)getMutableImplNfa(nfa);
+ char *sherman_table = nfa_base + m->sherman_offset;
+
+ assert(ISALIGNED_16(sherman_table));
+ for (size_t i = 0; i < info.size(); i++) {
+ if (!info.is_sherman(i)) {
+ continue;
+ }
+ u16 fs = verify_u16(info.implId(i));
+ DEBUG_PRINTF("building sherman %zu impl %hu\n", i, fs);
+
+ assert(fs >= sherman_limit);
+
+ char *curr_sherman_entry
+ = sherman_table + (fs - m->sherman_limit) * SHERMAN_FIXED_SIZE;
+ assert(curr_sherman_entry <= nfa_base + m->length);
+
+ u8 len = verify_u8(info.impl_alpha_size - info.extra[i].daddytaken);
+ assert(len <= 9);
+ dstate_id_t d = info.states[i].daddy;
+
+ *(u8 *)(curr_sherman_entry + SHERMAN_TYPE_OFFSET) = SHERMAN_STATE;
+ *(u8 *)(curr_sherman_entry + SHERMAN_LEN_OFFSET) = len;
+ *(u16 *)(curr_sherman_entry + SHERMAN_DADDY_OFFSET) = info.implId(d);
+ u8 *chars = (u8 *)(curr_sherman_entry + SHERMAN_CHARS_OFFSET);
+
+ for (u16 s = 0; s < info.impl_alpha_size; s++) {
+ if (info.states[i].next[s] != info.states[d].next[s]) {
+ *(chars++) = (u8)s;
+ }
+ }
+
+ u16 *states = (u16 *)(curr_sherman_entry + SHERMAN_STATES_OFFSET(len));
+ for (u16 s = 0; s < info.impl_alpha_size; s++) {
+ if (info.states[i].next[s] != info.states[d].next[s]) {
+ DEBUG_PRINTF("s overrider %hu dad %hu char next %hu\n", fs,
+ info.implId(d),
+ info.implId(info.states[i].next[s]));
+ u16 entry_val = info.implId(info.states[i].next[s]);
+ entry_val |= get_edge_flags64(nfa, entry_val);
+ unaligned_store_u16((u8 *)states++, entry_val);
+ }
+ }
+ }
+}
+
+static
+bytecode_ptr<NFA> mcsheng64Compile16(dfa_info&info, dstate_id_t sheng_end,
+ const map<dstate_id_t, AccelScheme>&accel_escape_info,
+ const Grey &grey) {
+ DEBUG_PRINTF("building mcsheng 64-16\n");
+
+ vector<u32> reports; /* index in ri for the appropriate report list */
+ vector<u32> reports_eod; /* as above */
+ ReportID arb;
+ u8 single;
+
+ assert(info.getAlphaShift() <= 8);
+
+ // Sherman optimization
+ if (info.impl_alpha_size > 16) {
+ u16 total_daddy = 0;
+ for (u32 i = 0; i < info.size(); i++) {
+ find_better_daddy(info, i,
+ is_cyclic_near(info.raw, info.raw.start_anchored),
+ grey);
+ total_daddy += info.extra[i].daddytaken;
+ }
+
+ DEBUG_PRINTF("daddy %hu/%zu states=%zu alpha=%hu\n", total_daddy,
+ info.size() * info.impl_alpha_size, info.size(),
+ info.impl_alpha_size);
+ }
+
+ u16 sherman_limit;
+ if (!allocateImplId16(info, sheng_end, &sherman_limit)) {
+ DEBUG_PRINTF("failed to allocate state numbers, %zu states total\n",
+ info.size());
+ return nullptr;
+ }
+ u16 count_real_states = sherman_limit - sheng_end;
+
+ auto ri = info.strat.gatherReports(reports, reports_eod, &single, &arb);
+
+ size_t tran_size = (1 << info.getAlphaShift()) * sizeof(u16)
+ * count_real_states;
+
+ size_t aux_size = sizeof(mstate_aux) * info.size();
+
+ size_t aux_offset = ROUNDUP_16(sizeof(NFA) + sizeof(mcsheng64) + tran_size);
+ size_t accel_size = info.strat.accelSize() * accel_escape_info.size();
+ size_t accel_offset = ROUNDUP_N(aux_offset + aux_size
+ + ri->getReportListSize(), 32);
+ size_t sherman_offset = ROUNDUP_16(accel_offset + accel_size);
+ size_t sherman_size = calcShermanRegionSize(info);
+
+ size_t total_size = sherman_offset + sherman_size;
+
+ accel_offset -= sizeof(NFA); /* adj accel offset to be relative to m */
+ assert(ISALIGNED_N(accel_offset, alignof(union AccelAux)));
+
+ auto nfa = make_zeroed_bytecode_ptr<NFA>(total_size);
+ mcsheng64 *m = (mcsheng64 *)getMutableImplNfa(nfa.get());
+
+ populateBasicInfo64(sizeof(u16), info, total_size, aux_offset, accel_offset,
+ accel_escape_info.size(), arb, single, nfa.get());
+ createShuffleMasks64(m, info, sheng_end, accel_escape_info);
+
+ /* copy in the mc header information */
+ m->sherman_offset = sherman_offset;
+ m->sherman_end = total_size;
+ m->sherman_limit = sherman_limit;
+
+ DEBUG_PRINTF("%hu sheng, %hu norm, %zu total\n", sheng_end,
+ count_real_states, info.size());
+
+ fill_in_aux_info64(nfa.get(), info, accel_escape_info, accel_offset,
+ sherman_offset - sizeof(NFA), reports, reports_eod,
+ aux_offset + aux_size, *ri);
+
+ fill_in_succ_table_64_16(nfa.get(), info, sheng_end, sherman_limit);
+
+ fill_in_sherman64(nfa.get(), info, sherman_limit);
+
+ return nfa;
+}
+
+static
+void fill_in_succ_table_64_8(NFA *nfa, const dfa_info &info,
+ dstate_id_t sheng_end) {
+ u8 *succ_table = (u8 *)nfa + sizeof(NFA) + sizeof(mcsheng64);
+
+ u8 alphaShift = info.getAlphaShift();
+ assert(alphaShift <= 8);
+
+ for (size_t i = 0; i < info.size(); i++) {
+ assert(!info.is_sherman(i));
+ if (!info.is_normal(i)) {
+ assert(info.implId(i) < sheng_end);
+ continue;
+ }
+ u8 normal_id = verify_u8(info.implId(i) - sheng_end);
+
+ for (size_t s = 0; s < info.impl_alpha_size; s++) {
+ dstate_id_t raw_succ = info.states[i].next[s];
+ succ_table[((size_t)normal_id << alphaShift) + s]
+ = info.implId(raw_succ);
+ }
+ }
+}
+
+static
void allocateImplId8(dfa_info &info, dstate_id_t sheng_end,
const map<dstate_id_t, AccelScheme> &accel_escape_info,
u16 *accel_limit, u16 *accept_limit) {
@@ -1360,58 +1360,58 @@ bytecode_ptr<NFA> mcshengCompile8(dfa_info &info, dstate_id_t sheng_end,
return nfa;
}
-static
-bytecode_ptr<NFA> mcsheng64Compile8(dfa_info &info, dstate_id_t sheng_end,
- const map<dstate_id_t, AccelScheme> &accel_escape_info) {
- DEBUG_PRINTF("building mcsheng 64-8\n");
-
- vector<u32> reports;
- vector<u32> reports_eod;
- ReportID arb;
- u8 single;
-
- auto ri = info.strat.gatherReports(reports, reports_eod, &single, &arb);
-
- size_t normal_count = info.size() - sheng_end;
-
- size_t tran_size = sizeof(u8) * (1 << info.getAlphaShift()) * normal_count;
- size_t aux_size = sizeof(mstate_aux) * info.size();
- size_t aux_offset = ROUNDUP_16(sizeof(NFA) + sizeof(mcsheng64) + tran_size);
- size_t accel_size = info.strat.accelSize() * accel_escape_info.size();
- size_t accel_offset = ROUNDUP_N(aux_offset + aux_size
- + ri->getReportListSize(), 32);
- size_t total_size = accel_offset + accel_size;
-
- DEBUG_PRINTF("aux_size %zu\n", aux_size);
- DEBUG_PRINTF("aux_offset %zu\n", aux_offset);
- DEBUG_PRINTF("rl size %u\n", ri->getReportListSize());
- DEBUG_PRINTF("accel_size %zu\n", accel_size);
- DEBUG_PRINTF("accel_offset %zu\n", accel_offset);
- DEBUG_PRINTF("total_size %zu\n", total_size);
-
- accel_offset -= sizeof(NFA); /* adj accel offset to be relative to m */
- assert(ISALIGNED_N(accel_offset, alignof(union AccelAux)));
-
- auto nfa = make_zeroed_bytecode_ptr<NFA>(total_size);
- mcsheng64 *m = (mcsheng64 *)getMutableImplNfa(nfa.get());
-
- allocateImplId8(info, sheng_end, accel_escape_info, &m->accel_limit_8,
- &m->accept_limit_8);
-
- populateBasicInfo64(sizeof(u8), info, total_size, aux_offset, accel_offset,
- accel_escape_info.size(), arb, single, nfa.get());
- createShuffleMasks64(m, info, sheng_end, accel_escape_info);
-
- fill_in_aux_info64(nfa.get(), info, accel_escape_info, accel_offset,
- total_size - sizeof(NFA), reports, reports_eod,
- aux_offset + aux_size, *ri);
-
- fill_in_succ_table_64_8(nfa.get(), info, sheng_end);
- DEBUG_PRINTF("rl size %zu\n", ri->size());
-
- return nfa;
-}
-
+static
+bytecode_ptr<NFA> mcsheng64Compile8(dfa_info &info, dstate_id_t sheng_end,
+ const map<dstate_id_t, AccelScheme> &accel_escape_info) {
+ DEBUG_PRINTF("building mcsheng 64-8\n");
+
+ vector<u32> reports;
+ vector<u32> reports_eod;
+ ReportID arb;
+ u8 single;
+
+ auto ri = info.strat.gatherReports(reports, reports_eod, &single, &arb);
+
+ size_t normal_count = info.size() - sheng_end;
+
+ size_t tran_size = sizeof(u8) * (1 << info.getAlphaShift()) * normal_count;
+ size_t aux_size = sizeof(mstate_aux) * info.size();
+ size_t aux_offset = ROUNDUP_16(sizeof(NFA) + sizeof(mcsheng64) + tran_size);
+ size_t accel_size = info.strat.accelSize() * accel_escape_info.size();
+ size_t accel_offset = ROUNDUP_N(aux_offset + aux_size
+ + ri->getReportListSize(), 32);
+ size_t total_size = accel_offset + accel_size;
+
+ DEBUG_PRINTF("aux_size %zu\n", aux_size);
+ DEBUG_PRINTF("aux_offset %zu\n", aux_offset);
+ DEBUG_PRINTF("rl size %u\n", ri->getReportListSize());
+ DEBUG_PRINTF("accel_size %zu\n", accel_size);
+ DEBUG_PRINTF("accel_offset %zu\n", accel_offset);
+ DEBUG_PRINTF("total_size %zu\n", total_size);
+
+ accel_offset -= sizeof(NFA); /* adj accel offset to be relative to m */
+ assert(ISALIGNED_N(accel_offset, alignof(union AccelAux)));
+
+ auto nfa = make_zeroed_bytecode_ptr<NFA>(total_size);
+ mcsheng64 *m = (mcsheng64 *)getMutableImplNfa(nfa.get());
+
+ allocateImplId8(info, sheng_end, accel_escape_info, &m->accel_limit_8,
+ &m->accept_limit_8);
+
+ populateBasicInfo64(sizeof(u8), info, total_size, aux_offset, accel_offset,
+ accel_escape_info.size(), arb, single, nfa.get());
+ createShuffleMasks64(m, info, sheng_end, accel_escape_info);
+
+ fill_in_aux_info64(nfa.get(), info, accel_escape_info, accel_offset,
+ total_size - sizeof(NFA), reports, reports_eod,
+ aux_offset + aux_size, *ri);
+
+ fill_in_succ_table_64_8(nfa.get(), info, sheng_end);
+ DEBUG_PRINTF("rl size %zu\n", ri->size());
+
+ return nfa;
+}
+
bytecode_ptr<NFA> mcshengCompile(raw_dfa &raw, const CompileContext &cc,
const ReportManager &rm) {
if (!cc.grey.allowMcSheng) {
@@ -1431,16 +1431,16 @@ bytecode_ptr<NFA> mcshengCompile(raw_dfa &raw, const CompileContext &cc,
map<dstate_id_t, AccelScheme> accel_escape_info
= info.strat.getAccelInfo(cc.grey);
- auto old_states = info.states;
- dstate_id_t sheng_end = find_sheng_states(info, accel_escape_info, MAX_SHENG_STATES);
+ auto old_states = info.states;
+ dstate_id_t sheng_end = find_sheng_states(info, accel_escape_info, MAX_SHENG_STATES);
if (sheng_end <= DEAD_STATE + 1) {
- info.states = old_states;
+ info.states = old_states;
return nullptr;
}
bytecode_ptr<NFA> nfa;
-
+
if (!using8bit) {
nfa = mcshengCompile16(info, sheng_end, accel_escape_info, cc.grey);
} else {
@@ -1448,7 +1448,67 @@ bytecode_ptr<NFA> mcshengCompile(raw_dfa &raw, const CompileContext &cc,
}
if (!nfa) {
- info.states = old_states;
+ info.states = old_states;
+ return nfa;
+ }
+
+ if (has_eod_reports) {
+ nfa->flags |= NFA_ACCEPTS_EOD;
+ }
+
+ DEBUG_PRINTF("compile done\n");
+ return nfa;
+}
+
+bytecode_ptr<NFA> mcshengCompile64(raw_dfa &raw, const CompileContext &cc,
+ const ReportManager &rm) {
+ if (!cc.grey.allowMcSheng) {
+ return nullptr;
+ }
+
+ if (!cc.target_info.has_avx512vbmi()) {
+ DEBUG_PRINTF("McSheng64 failed, no HS_CPU_FEATURES_AVX512VBMI!\n");
+ return nullptr;
+ }
+
+ mcclellan_build_strat mbs(raw, rm, false);
+ dfa_info info(mbs);
+ bool using8bit = cc.grey.allowMcClellan8 && info.size() <= 256;
+
+ if (!cc.streaming) { /* TODO: work out if we can do the strip in streaming
+ * mode with our semantics */
+ raw.stripExtraEodReports();
+ }
+
+ bool has_eod_reports = raw.hasEodReports();
+
+ map<dstate_id_t, AccelScheme> accel_escape_info
+ = info.strat.getAccelInfo(cc.grey);
+ bool using64state = false; /*default flag*/
+ dstate_id_t sheng_end64;
+ sheng_end64 = find_sheng_states(info, accel_escape_info, MAX_SHENG64_STATES);
+
+ if (sheng_end64 <= DEAD_STATE + 1) {
+ return nullptr;
+ } else {
+ using64state = true;
+ }
+
+ bytecode_ptr<NFA> nfa;
+
+ if (using64state) {
+ assert((sheng_end64 > 17) && (sheng_end64 <= 65));
+ if (!using8bit) {
+ nfa = mcsheng64Compile16(info, sheng_end64, accel_escape_info, cc.grey);
+ } else {
+ assert(using8bit);
+ nfa = mcsheng64Compile8(info, sheng_end64, accel_escape_info);
+ assert(nfa);
+ assert(nfa->type == MCSHENG_64_NFA_8);
+ }
+ }
+
+ if (!nfa) {
return nfa;
}
@@ -1460,66 +1520,6 @@ bytecode_ptr<NFA> mcshengCompile(raw_dfa &raw, const CompileContext &cc,
return nfa;
}
-bytecode_ptr<NFA> mcshengCompile64(raw_dfa &raw, const CompileContext &cc,
- const ReportManager &rm) {
- if (!cc.grey.allowMcSheng) {
- return nullptr;
- }
-
- if (!cc.target_info.has_avx512vbmi()) {
- DEBUG_PRINTF("McSheng64 failed, no HS_CPU_FEATURES_AVX512VBMI!\n");
- return nullptr;
- }
-
- mcclellan_build_strat mbs(raw, rm, false);
- dfa_info info(mbs);
- bool using8bit = cc.grey.allowMcClellan8 && info.size() <= 256;
-
- if (!cc.streaming) { /* TODO: work out if we can do the strip in streaming
- * mode with our semantics */
- raw.stripExtraEodReports();
- }
-
- bool has_eod_reports = raw.hasEodReports();
-
- map<dstate_id_t, AccelScheme> accel_escape_info
- = info.strat.getAccelInfo(cc.grey);
- bool using64state = false; /*default flag*/
- dstate_id_t sheng_end64;
- sheng_end64 = find_sheng_states(info, accel_escape_info, MAX_SHENG64_STATES);
-
- if (sheng_end64 <= DEAD_STATE + 1) {
- return nullptr;
- } else {
- using64state = true;
- }
-
- bytecode_ptr<NFA> nfa;
-
- if (using64state) {
- assert((sheng_end64 > 17) && (sheng_end64 <= 65));
- if (!using8bit) {
- nfa = mcsheng64Compile16(info, sheng_end64, accel_escape_info, cc.grey);
- } else {
- assert(using8bit);
- nfa = mcsheng64Compile8(info, sheng_end64, accel_escape_info);
- assert(nfa);
- assert(nfa->type == MCSHENG_64_NFA_8);
- }
- }
-
- if (!nfa) {
- return nfa;
- }
-
- if (has_eod_reports) {
- nfa->flags |= NFA_ACCEPTS_EOD;
- }
-
- DEBUG_PRINTF("compile done\n");
- return nfa;
-}
-
bool has_accel_mcsheng(const NFA *) {
return true; /* consider the sheng region as accelerated */
}
diff --git a/contrib/libs/hyperscan/src/nfa/mcsheng_compile.h b/contrib/libs/hyperscan/src/nfa/mcsheng_compile.h
index 7de7c14568..3a79b46a23 100644
--- a/contrib/libs/hyperscan/src/nfa/mcsheng_compile.h
+++ b/contrib/libs/hyperscan/src/nfa/mcsheng_compile.h
@@ -42,8 +42,8 @@ struct raw_dfa;
bytecode_ptr<NFA> mcshengCompile(raw_dfa &raw, const CompileContext &cc,
const ReportManager &rm);
-bytecode_ptr<NFA> mcshengCompile64(raw_dfa &raw, const CompileContext &cc,
- const ReportManager &rm);
+bytecode_ptr<NFA> mcshengCompile64(raw_dfa &raw, const CompileContext &cc,
+ const ReportManager &rm);
bool has_accel_mcsheng(const NFA *nfa);
} // namespace ue2
diff --git a/contrib/libs/hyperscan/src/nfa/mcsheng_data.c b/contrib/libs/hyperscan/src/nfa/mcsheng_data.c
index 304e383736..0701b4b313 100644
--- a/contrib/libs/hyperscan/src/nfa/mcsheng_data.c
+++ b/contrib/libs/hyperscan/src/nfa/mcsheng_data.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Intel Corporation
+ * Copyright (c) 2016-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -41,15 +41,15 @@ const u64a mcsheng_pext_mask[8] = {
0x00ff00000000000f,
0xff0000000000000f,
};
-#if defined(HAVE_AVX512VBMI)
-const u64a mcsheng64_pext_mask[8] = {
- 0, /* dummy */
- 0x000000000000ff3f,
- 0x0000000000ff003f,
- 0x00000000ff00003f,
- 0x000000ff0000003f,
- 0x0000ff000000003f,
- 0x00ff00000000003f,
- 0xff0000000000003f,
-};
-#endif
+#if defined(HAVE_AVX512VBMI)
+const u64a mcsheng64_pext_mask[8] = {
+ 0, /* dummy */
+ 0x000000000000ff3f,
+ 0x0000000000ff003f,
+ 0x00000000ff00003f,
+ 0x000000ff0000003f,
+ 0x0000ff000000003f,
+ 0x00ff00000000003f,
+ 0xff0000000000003f,
+};
+#endif
diff --git a/contrib/libs/hyperscan/src/nfa/mcsheng_internal.h b/contrib/libs/hyperscan/src/nfa/mcsheng_internal.h
index 646229709d..d985574624 100644
--- a/contrib/libs/hyperscan/src/nfa/mcsheng_internal.h
+++ b/contrib/libs/hyperscan/src/nfa/mcsheng_internal.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Intel Corporation
+ * Copyright (c) 2016-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -84,7 +84,7 @@ struct mcsheng {
u8 has_accel; /**< 1 iff there are any accel plans */
u8 remap[256]; /**< remaps characters to a smaller alphabet */
ReportID arb_report; /**< one of the accepts that this dfa may raise */
- u32 accel_offset; /**< offset of accel structures from start of McClellan */
+ u32 accel_offset; /**< offset of accel structures from start of McClellan */
m128 sheng_masks[N_CHARS];
};
@@ -92,33 +92,33 @@ struct mcsheng {
* representing the data from a u64a. */
extern const u64a mcsheng_pext_mask[8];
-struct mcsheng64 {
- u16 state_count; /**< total number of states */
- u32 length; /**< length of dfa in bytes */
- u16 start_anchored; /**< anchored start state */
- u16 start_floating; /**< floating start state */
- u32 aux_offset; /**< offset of the aux structures relative to the start of
- * the nfa structure */
- u32 sherman_offset; /**< offset of array of sherman state offsets the
- * state_info structures relative to the start of the
- * nfa structure */
- u32 sherman_end; /**< offset of the end of the state_info structures
- * relative to the start of the nfa structure */
- u16 sheng_end; /**< first non-sheng state */
- u16 sheng_accel_limit; /**< first sheng accel state. state given in terms of
- * internal sheng ids */
- u16 accel_limit_8; /**< 8 bit, lowest accelerable state */
- u16 accept_limit_8; /**< 8 bit, lowest accept state */
- u16 sherman_limit; /**< lowest sherman state */
- u8 alphaShift;
- u8 flags;
- u8 has_accel; /**< 1 iff there are any accel plans */
- u8 remap[256]; /**< remaps characters to a smaller alphabet */
- ReportID arb_report; /**< one of the accepts that this dfa may raise */
- u32 accel_offset; /**< offset of accel structures from start of McClellan */
- m512 sheng_succ_masks[N_CHARS];
-};
-
-extern const u64a mcsheng64_pext_mask[8];
-
+struct mcsheng64 {
+ u16 state_count; /**< total number of states */
+ u32 length; /**< length of dfa in bytes */
+ u16 start_anchored; /**< anchored start state */
+ u16 start_floating; /**< floating start state */
+ u32 aux_offset; /**< offset of the aux structures relative to the start of
+ * the nfa structure */
+ u32 sherman_offset; /**< offset of array of sherman state offsets the
+ * state_info structures relative to the start of the
+ * nfa structure */
+ u32 sherman_end; /**< offset of the end of the state_info structures
+ * relative to the start of the nfa structure */
+ u16 sheng_end; /**< first non-sheng state */
+ u16 sheng_accel_limit; /**< first sheng accel state. state given in terms of
+ * internal sheng ids */
+ u16 accel_limit_8; /**< 8 bit, lowest accelerable state */
+ u16 accept_limit_8; /**< 8 bit, lowest accept state */
+ u16 sherman_limit; /**< lowest sherman state */
+ u8 alphaShift;
+ u8 flags;
+ u8 has_accel; /**< 1 iff there are any accel plans */
+ u8 remap[256]; /**< remaps characters to a smaller alphabet */
+ ReportID arb_report; /**< one of the accepts that this dfa may raise */
+ u32 accel_offset; /**< offset of accel structures from start of McClellan */
+ m512 sheng_succ_masks[N_CHARS];
+};
+
+extern const u64a mcsheng64_pext_mask[8];
+
#endif
diff --git a/contrib/libs/hyperscan/src/nfa/nfa_api_dispatch.c b/contrib/libs/hyperscan/src/nfa/nfa_api_dispatch.c
index 7de11f3e97..75cac4b481 100644
--- a/contrib/libs/hyperscan/src/nfa/nfa_api_dispatch.c
+++ b/contrib/libs/hyperscan/src/nfa/nfa_api_dispatch.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -76,10 +76,10 @@
DISPATCH_CASE(TAMARAMA_NFA, Tamarama, dbnt_func); \
DISPATCH_CASE(MCSHENG_NFA_8, McSheng8, dbnt_func); \
DISPATCH_CASE(MCSHENG_NFA_16, McSheng16, dbnt_func); \
- DISPATCH_CASE(SHENG_NFA_32, Sheng32, dbnt_func); \
- DISPATCH_CASE(SHENG_NFA_64, Sheng64, dbnt_func); \
- DISPATCH_CASE(MCSHENG_64_NFA_8, McSheng64_8, dbnt_func); \
- DISPATCH_CASE(MCSHENG_64_NFA_16, McSheng64_16, dbnt_func); \
+ DISPATCH_CASE(SHENG_NFA_32, Sheng32, dbnt_func); \
+ DISPATCH_CASE(SHENG_NFA_64, Sheng64, dbnt_func); \
+ DISPATCH_CASE(MCSHENG_64_NFA_8, McSheng64_8, dbnt_func); \
+ DISPATCH_CASE(MCSHENG_64_NFA_16, McSheng64_16, dbnt_func); \
default: \
assert(0); \
}
diff --git a/contrib/libs/hyperscan/src/nfa/nfa_build_util.cpp b/contrib/libs/hyperscan/src/nfa/nfa_build_util.cpp
index 2645cdefab..47153163e9 100644
--- a/contrib/libs/hyperscan/src/nfa/nfa_build_util.cpp
+++ b/contrib/libs/hyperscan/src/nfa/nfa_build_util.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -430,65 +430,65 @@ const nfa_dispatch_fn NFATraits<MCSHENG_NFA_16>::has_repeats_other_than_firsts =
const char *NFATraits<MCSHENG_NFA_16>::name = "Shengy McShengFace 16";
#endif
-template<> struct NFATraits<SHENG_NFA_32> {
- UNUSED static const char *name;
- static const NFACategory category = NFA_OTHER;
- static const u32 stateAlign = 1;
- static const nfa_dispatch_fn has_accel;
- static const nfa_dispatch_fn has_repeats;
- static const nfa_dispatch_fn has_repeats_other_than_firsts;
-};
-const nfa_dispatch_fn NFATraits<SHENG_NFA_32>::has_accel = has_accel_sheng;
-const nfa_dispatch_fn NFATraits<SHENG_NFA_32>::has_repeats = dispatch_false;
-const nfa_dispatch_fn NFATraits<SHENG_NFA_32>::has_repeats_other_than_firsts = dispatch_false;
-#if defined(DUMP_SUPPORT)
-const char *NFATraits<SHENG_NFA_32>::name = "Sheng 32";
-#endif
-
-template<> struct NFATraits<SHENG_NFA_64> {
- UNUSED static const char *name;
- static const NFACategory category = NFA_OTHER;
- static const u32 stateAlign = 1;
- static const nfa_dispatch_fn has_accel;
- static const nfa_dispatch_fn has_repeats;
- static const nfa_dispatch_fn has_repeats_other_than_firsts;
-};
-const nfa_dispatch_fn NFATraits<SHENG_NFA_64>::has_accel = has_accel_sheng;
-const nfa_dispatch_fn NFATraits<SHENG_NFA_64>::has_repeats = dispatch_false;
-const nfa_dispatch_fn NFATraits<SHENG_NFA_64>::has_repeats_other_than_firsts = dispatch_false;
-#if defined(DUMP_SUPPORT)
-const char *NFATraits<SHENG_NFA_64>::name = "Sheng 64";
-#endif
-
-template<> struct NFATraits<MCSHENG_64_NFA_8> {
- UNUSED static const char *name;
- static const NFACategory category = NFA_OTHER;
- static const u32 stateAlign = 1;
- static const nfa_dispatch_fn has_accel;
- static const nfa_dispatch_fn has_repeats;
- static const nfa_dispatch_fn has_repeats_other_than_firsts;
-};
-const nfa_dispatch_fn NFATraits<MCSHENG_64_NFA_8>::has_accel = has_accel_mcsheng;
-const nfa_dispatch_fn NFATraits<MCSHENG_64_NFA_8>::has_repeats = dispatch_false;
-const nfa_dispatch_fn NFATraits<MCSHENG_64_NFA_8>::has_repeats_other_than_firsts = dispatch_false;
-#if defined(DUMP_SUPPORT)
-const char *NFATraits<MCSHENG_64_NFA_8>::name = "Shengy64 McShengFace 8";
-#endif
-
-template<> struct NFATraits<MCSHENG_64_NFA_16> {
- UNUSED static const char *name;
- static const NFACategory category = NFA_OTHER;
- static const u32 stateAlign = 2;
- static const nfa_dispatch_fn has_accel;
- static const nfa_dispatch_fn has_repeats;
- static const nfa_dispatch_fn has_repeats_other_than_firsts;
-};
-const nfa_dispatch_fn NFATraits<MCSHENG_64_NFA_16>::has_accel = has_accel_mcsheng;
-const nfa_dispatch_fn NFATraits<MCSHENG_64_NFA_16>::has_repeats = dispatch_false;
-const nfa_dispatch_fn NFATraits<MCSHENG_64_NFA_16>::has_repeats_other_than_firsts = dispatch_false;
-#if defined(DUMP_SUPPORT)
-const char *NFATraits<MCSHENG_64_NFA_16>::name = "Shengy64 McShengFace 16";
-#endif
+template<> struct NFATraits<SHENG_NFA_32> {
+ UNUSED static const char *name;
+ static const NFACategory category = NFA_OTHER;
+ static const u32 stateAlign = 1;
+ static const nfa_dispatch_fn has_accel;
+ static const nfa_dispatch_fn has_repeats;
+ static const nfa_dispatch_fn has_repeats_other_than_firsts;
+};
+const nfa_dispatch_fn NFATraits<SHENG_NFA_32>::has_accel = has_accel_sheng;
+const nfa_dispatch_fn NFATraits<SHENG_NFA_32>::has_repeats = dispatch_false;
+const nfa_dispatch_fn NFATraits<SHENG_NFA_32>::has_repeats_other_than_firsts = dispatch_false;
+#if defined(DUMP_SUPPORT)
+const char *NFATraits<SHENG_NFA_32>::name = "Sheng 32";
+#endif
+
+template<> struct NFATraits<SHENG_NFA_64> {
+ UNUSED static const char *name;
+ static const NFACategory category = NFA_OTHER;
+ static const u32 stateAlign = 1;
+ static const nfa_dispatch_fn has_accel;
+ static const nfa_dispatch_fn has_repeats;
+ static const nfa_dispatch_fn has_repeats_other_than_firsts;
+};
+const nfa_dispatch_fn NFATraits<SHENG_NFA_64>::has_accel = has_accel_sheng;
+const nfa_dispatch_fn NFATraits<SHENG_NFA_64>::has_repeats = dispatch_false;
+const nfa_dispatch_fn NFATraits<SHENG_NFA_64>::has_repeats_other_than_firsts = dispatch_false;
+#if defined(DUMP_SUPPORT)
+const char *NFATraits<SHENG_NFA_64>::name = "Sheng 64";
+#endif
+
+template<> struct NFATraits<MCSHENG_64_NFA_8> {
+ UNUSED static const char *name;
+ static const NFACategory category = NFA_OTHER;
+ static const u32 stateAlign = 1;
+ static const nfa_dispatch_fn has_accel;
+ static const nfa_dispatch_fn has_repeats;
+ static const nfa_dispatch_fn has_repeats_other_than_firsts;
+};
+const nfa_dispatch_fn NFATraits<MCSHENG_64_NFA_8>::has_accel = has_accel_mcsheng;
+const nfa_dispatch_fn NFATraits<MCSHENG_64_NFA_8>::has_repeats = dispatch_false;
+const nfa_dispatch_fn NFATraits<MCSHENG_64_NFA_8>::has_repeats_other_than_firsts = dispatch_false;
+#if defined(DUMP_SUPPORT)
+const char *NFATraits<MCSHENG_64_NFA_8>::name = "Shengy64 McShengFace 8";
+#endif
+
+template<> struct NFATraits<MCSHENG_64_NFA_16> {
+ UNUSED static const char *name;
+ static const NFACategory category = NFA_OTHER;
+ static const u32 stateAlign = 2;
+ static const nfa_dispatch_fn has_accel;
+ static const nfa_dispatch_fn has_repeats;
+ static const nfa_dispatch_fn has_repeats_other_than_firsts;
+};
+const nfa_dispatch_fn NFATraits<MCSHENG_64_NFA_16>::has_accel = has_accel_mcsheng;
+const nfa_dispatch_fn NFATraits<MCSHENG_64_NFA_16>::has_repeats = dispatch_false;
+const nfa_dispatch_fn NFATraits<MCSHENG_64_NFA_16>::has_repeats_other_than_firsts = dispatch_false;
+#if defined(DUMP_SUPPORT)
+const char *NFATraits<MCSHENG_64_NFA_16>::name = "Shengy64 McShengFace 16";
+#endif
} // namespace
#if defined(DUMP_SUPPORT)
diff --git a/contrib/libs/hyperscan/src/nfa/nfa_build_util.h b/contrib/libs/hyperscan/src/nfa/nfa_build_util.h
index 15a30becc9..ee7a309494 100644
--- a/contrib/libs/hyperscan/src/nfa/nfa_build_util.h
+++ b/contrib/libs/hyperscan/src/nfa/nfa_build_util.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/contrib/libs/hyperscan/src/nfa/nfa_internal.h b/contrib/libs/hyperscan/src/nfa/nfa_internal.h
index 46dbbecacc..ad27e28b14 100644
--- a/contrib/libs/hyperscan/src/nfa/nfa_internal.h
+++ b/contrib/libs/hyperscan/src/nfa/nfa_internal.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -72,10 +72,10 @@ enum NFAEngineType {
TAMARAMA_NFA, /**< magic nfa container */
MCSHENG_NFA_8, /**< magic pseudo nfa */
MCSHENG_NFA_16, /**< magic pseudo nfa */
- SHENG_NFA_32, /**< magic pseudo nfa */
- SHENG_NFA_64, /**< magic pseudo nfa */
- MCSHENG_64_NFA_8, /**< magic pseudo nfa */
- MCSHENG_64_NFA_16, /**< magic pseudo nfa */
+ SHENG_NFA_32, /**< magic pseudo nfa */
+ SHENG_NFA_64, /**< magic pseudo nfa */
+ MCSHENG_64_NFA_8, /**< magic pseudo nfa */
+ MCSHENG_64_NFA_16, /**< magic pseudo nfa */
/** \brief bogus NFA - not used */
INVALID_NFA
};
@@ -152,8 +152,8 @@ static really_inline int isMcClellanType(u8 t) {
/** \brief True if the given type (from NFA::type) is a Sheng-McClellan hybrid
* DFA. */
static really_inline int isShengMcClellanType(u8 t) {
- return t == MCSHENG_NFA_8 || t == MCSHENG_NFA_16 ||
- t == MCSHENG_64_NFA_8 || t == MCSHENG_64_NFA_16;
+ return t == MCSHENG_NFA_8 || t == MCSHENG_NFA_16 ||
+ t == MCSHENG_64_NFA_8 || t == MCSHENG_64_NFA_16;
}
/** \brief True if the given type (from NFA::type) is a Gough DFA. */
@@ -162,25 +162,25 @@ static really_inline int isGoughType(u8 t) {
}
/** \brief True if the given type (from NFA::type) is a Sheng DFA. */
-static really_inline int isSheng16Type(u8 t) {
+static really_inline int isSheng16Type(u8 t) {
return t == SHENG_NFA;
}
-/** \brief True if the given type (from NFA::type) is a Sheng32 DFA. */
-static really_inline int isSheng32Type(u8 t) {
- return t == SHENG_NFA_32;
-}
-
-/** \brief True if the given type (from NFA::type) is a Sheng64 DFA. */
-static really_inline int isSheng64Type(u8 t) {
- return t == SHENG_NFA_64;
-}
-
-/** \brief True if the given type (from NFA::type) is a Sheng16/32/64 DFA. */
-static really_inline int isShengType(u8 t) {
- return t == SHENG_NFA || t == SHENG_NFA_32 || t == SHENG_NFA_64;
-}
-
+/** \brief True if the given type (from NFA::type) is a Sheng32 DFA. */
+static really_inline int isSheng32Type(u8 t) {
+ return t == SHENG_NFA_32;
+}
+
+/** \brief True if the given type (from NFA::type) is a Sheng64 DFA. */
+static really_inline int isSheng64Type(u8 t) {
+ return t == SHENG_NFA_64;
+}
+
+/** \brief True if the given type (from NFA::type) is a Sheng16/32/64 DFA. */
+static really_inline int isShengType(u8 t) {
+ return t == SHENG_NFA || t == SHENG_NFA_32 || t == SHENG_NFA_64;
+}
+
/**
* \brief True if the given type (from NFA::type) is a McClellan, Gough or
* Sheng DFA.
diff --git a/contrib/libs/hyperscan/src/nfa/sheng.c b/contrib/libs/hyperscan/src/nfa/sheng.c
index 7673131501..3f36e21891 100644
--- a/contrib/libs/hyperscan/src/nfa/sheng.c
+++ b/contrib/libs/hyperscan/src/nfa/sheng.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Intel Corporation
+ * Copyright (c) 2016-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -154,205 +154,205 @@ char fireReports(const struct sheng *sh, NfaCallback cb, void *ctxt,
return MO_CONTINUE_MATCHING; /* continue execution */
}
-#if defined(HAVE_AVX512VBMI)
-// Sheng32
-static really_inline
-const struct sheng32 *get_sheng32(const struct NFA *n) {
- return (const struct sheng32 *)getImplNfa(n);
-}
-
-static really_inline
-const struct sstate_aux *get_aux32(const struct sheng32 *sh, u8 id) {
- u32 offset = sh->aux_offset - sizeof(struct NFA) +
- (id & SHENG32_STATE_MASK) * sizeof(struct sstate_aux);
- DEBUG_PRINTF("Getting aux for state %u at offset %llu\n",
- id & SHENG32_STATE_MASK, (u64a)offset + sizeof(struct NFA));
- return (const struct sstate_aux *)((const char *) sh + offset);
-}
-
-static really_inline
-const union AccelAux *get_accel32(const struct sheng32 *sh, u8 id) {
- const struct sstate_aux *saux = get_aux32(sh, id);
- DEBUG_PRINTF("Getting accel aux at offset %u\n", saux->accel);
- const union AccelAux *aux = (const union AccelAux *)
- ((const char *)sh + saux->accel - sizeof(struct NFA));
- return aux;
-}
-
-static really_inline
-const struct report_list *get_rl32(const struct sheng32 *sh,
- const struct sstate_aux *aux) {
- DEBUG_PRINTF("Getting report list at offset %u\n", aux->accept);
- return (const struct report_list *)
- ((const char *)sh + aux->accept - sizeof(struct NFA));
-}
-
-static really_inline
-const struct report_list *get_eod_rl32(const struct sheng32 *sh,
- const struct sstate_aux *aux) {
- DEBUG_PRINTF("Getting EOD report list at offset %u\n", aux->accept);
- return (const struct report_list *)
- ((const char *)sh + aux->accept_eod - sizeof(struct NFA));
-}
-
-static really_inline
-char sheng32HasAccept(const struct sheng32 *sh, const struct sstate_aux *aux,
- ReportID report) {
- assert(sh && aux);
-
- const struct report_list *rl = get_rl32(sh, aux);
- assert(ISALIGNED_N(rl, 4));
-
- DEBUG_PRINTF("report list has %u entries\n", rl->count);
-
- for (u32 i = 0; i < rl->count; i++) {
- if (rl->report[i] == report) {
- DEBUG_PRINTF("reporting %u\n", rl->report[i]);
- return 1;
- }
- }
-
- return 0;
-}
-
-static really_inline
-char fireReports32(const struct sheng32 *sh, NfaCallback cb, void *ctxt,
- const u8 state, u64a loc, u8 *const cached_accept_state,
- ReportID *const cached_accept_id, char eod) {
- DEBUG_PRINTF("reporting matches @ %llu\n", loc);
-
- if (!eod && state == *cached_accept_state) {
- DEBUG_PRINTF("reporting %u\n", *cached_accept_id);
- if (cb(0, loc, *cached_accept_id, ctxt) == MO_HALT_MATCHING) {
- return MO_HALT_MATCHING; /* termination requested */
- }
-
- return MO_CONTINUE_MATCHING; /* continue execution */
- }
- const struct sstate_aux *aux = get_aux32(sh, state);
- const struct report_list *rl = eod ? get_eod_rl32(sh, aux) :
- get_rl32(sh, aux);
- assert(ISALIGNED(rl));
-
- DEBUG_PRINTF("report list has %u entries\n", rl->count);
- u32 count = rl->count;
-
- if (!eod && count == 1) {
- *cached_accept_state = state;
- *cached_accept_id = rl->report[0];
-
- DEBUG_PRINTF("reporting %u\n", rl->report[0]);
- if (cb(0, loc, rl->report[0], ctxt) == MO_HALT_MATCHING) {
- return MO_HALT_MATCHING; /* termination requested */
- }
-
- return MO_CONTINUE_MATCHING; /* continue execution */
- }
-
- for (u32 i = 0; i < count; i++) {
- DEBUG_PRINTF("reporting %u\n", rl->report[i]);
- if (cb(0, loc, rl->report[i], ctxt) == MO_HALT_MATCHING) {
- return MO_HALT_MATCHING; /* termination requested */
- }
- }
- return MO_CONTINUE_MATCHING; /* continue execution */
-}
-
-// Sheng64
-static really_inline
-const struct sheng64 *get_sheng64(const struct NFA *n) {
- return (const struct sheng64 *)getImplNfa(n);
-}
-
-static really_inline
-const struct sstate_aux *get_aux64(const struct sheng64 *sh, u8 id) {
- u32 offset = sh->aux_offset - sizeof(struct NFA) +
- (id & SHENG64_STATE_MASK) * sizeof(struct sstate_aux);
- DEBUG_PRINTF("Getting aux for state %u at offset %llu\n",
- id & SHENG64_STATE_MASK, (u64a)offset + sizeof(struct NFA));
- return (const struct sstate_aux *)((const char *) sh + offset);
-}
-
-static really_inline
-const struct report_list *get_rl64(const struct sheng64 *sh,
- const struct sstate_aux *aux) {
- DEBUG_PRINTF("Getting report list at offset %u\n", aux->accept);
- return (const struct report_list *)
- ((const char *)sh + aux->accept - sizeof(struct NFA));
-}
-
-static really_inline
-const struct report_list *get_eod_rl64(const struct sheng64 *sh,
- const struct sstate_aux *aux) {
- DEBUG_PRINTF("Getting EOD report list at offset %u\n", aux->accept);
- return (const struct report_list *)
- ((const char *)sh + aux->accept_eod - sizeof(struct NFA));
-}
-
-static really_inline
-char sheng64HasAccept(const struct sheng64 *sh, const struct sstate_aux *aux,
- ReportID report) {
- assert(sh && aux);
-
- const struct report_list *rl = get_rl64(sh, aux);
- assert(ISALIGNED_N(rl, 4));
-
- DEBUG_PRINTF("report list has %u entries\n", rl->count);
-
- for (u32 i = 0; i < rl->count; i++) {
- if (rl->report[i] == report) {
- DEBUG_PRINTF("reporting %u\n", rl->report[i]);
- return 1;
- }
- }
-
- return 0;
-}
-
-static really_inline
-char fireReports64(const struct sheng64 *sh, NfaCallback cb, void *ctxt,
- const u8 state, u64a loc, u8 *const cached_accept_state,
- ReportID *const cached_accept_id, char eod) {
- DEBUG_PRINTF("reporting matches @ %llu\n", loc);
-
- if (!eod && state == *cached_accept_state) {
- DEBUG_PRINTF("reporting %u\n", *cached_accept_id);
- if (cb(0, loc, *cached_accept_id, ctxt) == MO_HALT_MATCHING) {
- return MO_HALT_MATCHING; /* termination requested */
- }
-
- return MO_CONTINUE_MATCHING; /* continue execution */
- }
- const struct sstate_aux *aux = get_aux64(sh, state);
- const struct report_list *rl = eod ? get_eod_rl64(sh, aux) :
- get_rl64(sh, aux);
- assert(ISALIGNED(rl));
-
- DEBUG_PRINTF("report list has %u entries\n", rl->count);
- u32 count = rl->count;
-
- if (!eod && count == 1) {
- *cached_accept_state = state;
- *cached_accept_id = rl->report[0];
-
- DEBUG_PRINTF("reporting %u\n", rl->report[0]);
- if (cb(0, loc, rl->report[0], ctxt) == MO_HALT_MATCHING) {
- return MO_HALT_MATCHING; /* termination requested */
- }
-
- return MO_CONTINUE_MATCHING; /* continue execution */
- }
-
- for (u32 i = 0; i < count; i++) {
- DEBUG_PRINTF("reporting %u\n", rl->report[i]);
- if (cb(0, loc, rl->report[i], ctxt) == MO_HALT_MATCHING) {
- return MO_HALT_MATCHING; /* termination requested */
- }
- }
- return MO_CONTINUE_MATCHING; /* continue execution */
-}
-#endif // end of HAVE_AVX512VBMI
-
+#if defined(HAVE_AVX512VBMI)
+// Sheng32
+static really_inline
+const struct sheng32 *get_sheng32(const struct NFA *n) {
+ return (const struct sheng32 *)getImplNfa(n);
+}
+
+static really_inline
+const struct sstate_aux *get_aux32(const struct sheng32 *sh, u8 id) {
+ u32 offset = sh->aux_offset - sizeof(struct NFA) +
+ (id & SHENG32_STATE_MASK) * sizeof(struct sstate_aux);
+ DEBUG_PRINTF("Getting aux for state %u at offset %llu\n",
+ id & SHENG32_STATE_MASK, (u64a)offset + sizeof(struct NFA));
+ return (const struct sstate_aux *)((const char *) sh + offset);
+}
+
+static really_inline
+const union AccelAux *get_accel32(const struct sheng32 *sh, u8 id) {
+ const struct sstate_aux *saux = get_aux32(sh, id);
+ DEBUG_PRINTF("Getting accel aux at offset %u\n", saux->accel);
+ const union AccelAux *aux = (const union AccelAux *)
+ ((const char *)sh + saux->accel - sizeof(struct NFA));
+ return aux;
+}
+
+static really_inline
+const struct report_list *get_rl32(const struct sheng32 *sh,
+ const struct sstate_aux *aux) {
+ DEBUG_PRINTF("Getting report list at offset %u\n", aux->accept);
+ return (const struct report_list *)
+ ((const char *)sh + aux->accept - sizeof(struct NFA));
+}
+
+static really_inline
+const struct report_list *get_eod_rl32(const struct sheng32 *sh,
+ const struct sstate_aux *aux) {
+ DEBUG_PRINTF("Getting EOD report list at offset %u\n", aux->accept);
+ return (const struct report_list *)
+ ((const char *)sh + aux->accept_eod - sizeof(struct NFA));
+}
+
+static really_inline
+char sheng32HasAccept(const struct sheng32 *sh, const struct sstate_aux *aux,
+ ReportID report) {
+ assert(sh && aux);
+
+ const struct report_list *rl = get_rl32(sh, aux);
+ assert(ISALIGNED_N(rl, 4));
+
+ DEBUG_PRINTF("report list has %u entries\n", rl->count);
+
+ for (u32 i = 0; i < rl->count; i++) {
+ if (rl->report[i] == report) {
+ DEBUG_PRINTF("reporting %u\n", rl->report[i]);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static really_inline
+char fireReports32(const struct sheng32 *sh, NfaCallback cb, void *ctxt,
+ const u8 state, u64a loc, u8 *const cached_accept_state,
+ ReportID *const cached_accept_id, char eod) {
+ DEBUG_PRINTF("reporting matches @ %llu\n", loc);
+
+ if (!eod && state == *cached_accept_state) {
+ DEBUG_PRINTF("reporting %u\n", *cached_accept_id);
+ if (cb(0, loc, *cached_accept_id, ctxt) == MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING; /* termination requested */
+ }
+
+ return MO_CONTINUE_MATCHING; /* continue execution */
+ }
+ const struct sstate_aux *aux = get_aux32(sh, state);
+ const struct report_list *rl = eod ? get_eod_rl32(sh, aux) :
+ get_rl32(sh, aux);
+ assert(ISALIGNED(rl));
+
+ DEBUG_PRINTF("report list has %u entries\n", rl->count);
+ u32 count = rl->count;
+
+ if (!eod && count == 1) {
+ *cached_accept_state = state;
+ *cached_accept_id = rl->report[0];
+
+ DEBUG_PRINTF("reporting %u\n", rl->report[0]);
+ if (cb(0, loc, rl->report[0], ctxt) == MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING; /* termination requested */
+ }
+
+ return MO_CONTINUE_MATCHING; /* continue execution */
+ }
+
+ for (u32 i = 0; i < count; i++) {
+ DEBUG_PRINTF("reporting %u\n", rl->report[i]);
+ if (cb(0, loc, rl->report[i], ctxt) == MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING; /* termination requested */
+ }
+ }
+ return MO_CONTINUE_MATCHING; /* continue execution */
+}
+
+// Sheng64
+static really_inline
+const struct sheng64 *get_sheng64(const struct NFA *n) {
+ return (const struct sheng64 *)getImplNfa(n);
+}
+
+static really_inline
+const struct sstate_aux *get_aux64(const struct sheng64 *sh, u8 id) {
+ u32 offset = sh->aux_offset - sizeof(struct NFA) +
+ (id & SHENG64_STATE_MASK) * sizeof(struct sstate_aux);
+ DEBUG_PRINTF("Getting aux for state %u at offset %llu\n",
+ id & SHENG64_STATE_MASK, (u64a)offset + sizeof(struct NFA));
+ return (const struct sstate_aux *)((const char *) sh + offset);
+}
+
+static really_inline
+const struct report_list *get_rl64(const struct sheng64 *sh,
+ const struct sstate_aux *aux) {
+ DEBUG_PRINTF("Getting report list at offset %u\n", aux->accept);
+ return (const struct report_list *)
+ ((const char *)sh + aux->accept - sizeof(struct NFA));
+}
+
+static really_inline
+const struct report_list *get_eod_rl64(const struct sheng64 *sh,
+ const struct sstate_aux *aux) {
+ DEBUG_PRINTF("Getting EOD report list at offset %u\n", aux->accept);
+ return (const struct report_list *)
+ ((const char *)sh + aux->accept_eod - sizeof(struct NFA));
+}
+
+static really_inline
+char sheng64HasAccept(const struct sheng64 *sh, const struct sstate_aux *aux,
+ ReportID report) {
+ assert(sh && aux);
+
+ const struct report_list *rl = get_rl64(sh, aux);
+ assert(ISALIGNED_N(rl, 4));
+
+ DEBUG_PRINTF("report list has %u entries\n", rl->count);
+
+ for (u32 i = 0; i < rl->count; i++) {
+ if (rl->report[i] == report) {
+ DEBUG_PRINTF("reporting %u\n", rl->report[i]);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static really_inline
+char fireReports64(const struct sheng64 *sh, NfaCallback cb, void *ctxt,
+ const u8 state, u64a loc, u8 *const cached_accept_state,
+ ReportID *const cached_accept_id, char eod) {
+ DEBUG_PRINTF("reporting matches @ %llu\n", loc);
+
+ if (!eod && state == *cached_accept_state) {
+ DEBUG_PRINTF("reporting %u\n", *cached_accept_id);
+ if (cb(0, loc, *cached_accept_id, ctxt) == MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING; /* termination requested */
+ }
+
+ return MO_CONTINUE_MATCHING; /* continue execution */
+ }
+ const struct sstate_aux *aux = get_aux64(sh, state);
+ const struct report_list *rl = eod ? get_eod_rl64(sh, aux) :
+ get_rl64(sh, aux);
+ assert(ISALIGNED(rl));
+
+ DEBUG_PRINTF("report list has %u entries\n", rl->count);
+ u32 count = rl->count;
+
+ if (!eod && count == 1) {
+ *cached_accept_state = state;
+ *cached_accept_id = rl->report[0];
+
+ DEBUG_PRINTF("reporting %u\n", rl->report[0]);
+ if (cb(0, loc, rl->report[0], ctxt) == MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING; /* termination requested */
+ }
+
+ return MO_CONTINUE_MATCHING; /* continue execution */
+ }
+
+ for (u32 i = 0; i < count; i++) {
+ DEBUG_PRINTF("reporting %u\n", rl->report[i]);
+ if (cb(0, loc, rl->report[i], ctxt) == MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING; /* termination requested */
+ }
+ }
+ return MO_CONTINUE_MATCHING; /* continue execution */
+}
+#endif // end of HAVE_AVX512VBMI
+
/* include Sheng function definitions */
#include "sheng_defs.h"
@@ -827,7 +827,7 @@ char nfaExecSheng_reportCurrent(const struct NFA *n, struct mq *q) {
fireSingleReport(cb, ctxt, sh->report, offset);
} else {
fireReports(sh, cb, ctxt, s, offset, &cached_state_id,
- &cached_report_id, 0);
+ &cached_report_id, 0);
}
}
@@ -870,1008 +870,1008 @@ char nfaExecSheng_expandState(UNUSED const struct NFA *nfa, void *dest,
*(u8 *)dest = *(const u8 *)src;
return 0;
}
-
-#if defined(HAVE_AVX512VBMI)
-// Sheng32
-static really_inline
-char runSheng32Cb(const struct sheng32 *sh, NfaCallback cb, void *ctxt,
- u64a offset, u8 *const cached_accept_state,
- ReportID *const cached_accept_id, const u8 *cur_buf,
- const u8 *start, const u8 *end, u8 can_die,
- u8 has_accel, u8 single, const u8 **scanned, u8 *state) {
- DEBUG_PRINTF("Scanning %llu bytes (offset %llu) in callback mode\n",
- (u64a)(end - start), offset);
- DEBUG_PRINTF("start: %lli end: %lli\n", (s64a)(start - cur_buf),
- (s64a)(end - cur_buf));
- DEBUG_PRINTF("can die: %u has accel: %u single: %u\n", !!can_die,
- !!has_accel, !!single);
- int rv;
- /* scan and report all matches */
- if (can_die) {
- if (has_accel) {
- rv = sheng32_4_coda(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- start, end, scanned);
- } else {
- rv = sheng32_4_cod(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- start, end, scanned);
- }
- if (rv == MO_HALT_MATCHING) {
- return MO_DEAD;
- }
- rv = sheng32_cod(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- *scanned, end, scanned);
- } else {
- if (has_accel) {
- rv = sheng32_4_coa(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- start, end, scanned);
- } else {
- rv = sheng32_4_co(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- start, end, scanned);
- }
- if (rv == MO_HALT_MATCHING) {
- return MO_DEAD;
- }
- rv = sheng32_co(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- *scanned, end, scanned);
- }
- if (rv == MO_HALT_MATCHING) {
- return MO_DEAD;
- }
- return MO_ALIVE;
-}
-
-static really_inline
-void runSheng32Nm(const struct sheng32 *sh, NfaCallback cb, void *ctxt,
- u64a offset, u8 *const cached_accept_state,
- ReportID *const cached_accept_id, const u8 *cur_buf,
- const u8 *start, const u8 *end, u8 can_die, u8 has_accel,
- u8 single, const u8 **scanned, u8 *state) {
- DEBUG_PRINTF("Scanning %llu bytes (offset %llu) in nomatch mode\n",
- (u64a)(end - start), offset);
- DEBUG_PRINTF("start: %lli end: %lli\n", (s64a)(start - cur_buf),
- (s64a)(end - cur_buf));
- DEBUG_PRINTF("can die: %u has accel: %u single: %u\n", !!can_die,
- !!has_accel, !!single);
- /* just scan the buffer */
- if (can_die) {
- if (has_accel) {
- sheng32_4_nmda(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- start, end, scanned);
- } else {
- sheng32_4_nmd(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- start, end, scanned);
- }
- sheng32_nmd(state, cb, ctxt, sh, cached_accept_state, cached_accept_id,
- single, offset, cur_buf, *scanned, end, scanned);
- } else {
- sheng32_4_nm(state, cb, ctxt, sh, cached_accept_state, cached_accept_id,
- single, offset, cur_buf, start, end, scanned);
- sheng32_nm(state, cb, ctxt, sh, cached_accept_state, cached_accept_id,
- single, offset, cur_buf, *scanned, end, scanned);
- }
-}
-
-static really_inline
-char runSheng32Sam(const struct sheng32 *sh, NfaCallback cb, void *ctxt,
- u64a offset, u8 *const cached_accept_state,
- ReportID *const cached_accept_id, const u8 *cur_buf,
- const u8 *start, const u8 *end, u8 can_die, u8 has_accel,
- u8 single, const u8 **scanned, u8 *state) {
- DEBUG_PRINTF("Scanning %llu bytes (offset %llu) in stop at match mode\n",
- (u64a)(end - start), offset);
- DEBUG_PRINTF("start: %lli end: %lli\n", (s64a)(start - cur_buf),
- (s64a)(end - cur_buf));
- DEBUG_PRINTF("can die: %u has accel: %u single: %u\n", !!can_die,
- !!has_accel, !!single);
- int rv;
- /* scan until first match */
- if (can_die) {
- if (has_accel) {
- rv = sheng32_4_samda(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- start, end, scanned);
- } else {
- rv = sheng32_4_samd(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- start, end, scanned);
- }
- if (rv == MO_HALT_MATCHING) {
- return MO_DEAD;
- }
- /* if we stopped before we expected, we found a match */
- if (rv == MO_MATCHES_PENDING) {
- return MO_MATCHES_PENDING;
- }
-
- rv = sheng32_samd(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- *scanned, end, scanned);
- } else {
- if (has_accel) {
- rv = sheng32_4_sama(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- start, end, scanned);
- } else {
- rv = sheng32_4_sam(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- start, end, scanned);
- }
- if (rv == MO_HALT_MATCHING) {
- return MO_DEAD;
- }
- /* if we stopped before we expected, we found a match */
- if (rv == MO_MATCHES_PENDING) {
- return MO_MATCHES_PENDING;
- }
-
- rv = sheng32_sam(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- *scanned, end, scanned);
- }
- if (rv == MO_HALT_MATCHING) {
- return MO_DEAD;
- }
- /* if we stopped before we expected, we found a match */
- if (rv == MO_MATCHES_PENDING) {
- return MO_MATCHES_PENDING;
- }
- return MO_ALIVE;
-}
-
-static never_inline
-char runSheng32(const struct sheng32 *sh, struct mq *q, s64a b_end,
- enum MatchMode mode) {
- u8 state = *(u8 *)q->state;
- u8 can_die = sh->flags & SHENG_FLAG_CAN_DIE;
- u8 has_accel = sh->flags & SHENG_FLAG_HAS_ACCEL;
- u8 single = sh->flags & SHENG_FLAG_SINGLE_REPORT;
-
- u8 cached_accept_state = 0;
- ReportID cached_accept_id = 0;
-
- DEBUG_PRINTF("starting Sheng32 execution in state %u\n",
- state & SHENG32_STATE_MASK);
-
- if (q->report_current) {
- DEBUG_PRINTF("reporting current pending matches\n");
- assert(sh);
-
- q->report_current = 0;
-
- int rv;
- if (single) {
- rv = fireSingleReport(q->cb, q->context, sh->report,
- q_cur_offset(q));
- } else {
- rv = fireReports32(sh, q->cb, q->context, state, q_cur_offset(q),
- &cached_accept_state, &cached_accept_id, 0);
- }
- if (rv == MO_HALT_MATCHING) {
- DEBUG_PRINTF("exiting in state %u\n", state & SHENG32_STATE_MASK);
- return MO_DEAD;
- }
-
- DEBUG_PRINTF("proceeding with matching\n");
- }
-
- assert(q_cur_type(q) == MQE_START);
- s64a start = q_cur_loc(q);
-
- DEBUG_PRINTF("offset: %lli, location: %lli, mode: %s\n", q->offset, start,
- mode == CALLBACK_OUTPUT ? "CALLBACK OUTPUT" :
- mode == NO_MATCHES ? "NO MATCHES" :
- mode == STOP_AT_MATCH ? "STOP AT MATCH" : "???");
-
- DEBUG_PRINTF("processing event @ %lli: %s\n", q->offset + q_cur_loc(q),
- q_cur_type(q) == MQE_START ? "START" :
- q_cur_type(q) == MQE_TOP ? "TOP" :
- q_cur_type(q) == MQE_END ? "END" : "???");
-
- const u8* cur_buf;
- if (start < 0) {
- DEBUG_PRINTF("negative location, scanning history\n");
- DEBUG_PRINTF("min location: %zd\n", -q->hlength);
- cur_buf = q->history + q->hlength;
- } else {
- DEBUG_PRINTF("positive location, scanning buffer\n");
- DEBUG_PRINTF("max location: %lli\n", b_end);
- cur_buf = q->buffer;
- }
-
- /* if we our queue event is past our end */
- if (mode != NO_MATCHES && q_cur_loc(q) > b_end) {
- DEBUG_PRINTF("current location past buffer end\n");
- DEBUG_PRINTF("setting q location to %llu\n", b_end);
- DEBUG_PRINTF("exiting in state %u\n", state & SHENG32_STATE_MASK);
- q->items[q->cur].location = b_end;
- return MO_ALIVE;
- }
-
- q->cur++;
-
- s64a cur_start = start;
-
- while (1) {
- DEBUG_PRINTF("processing event @ %lli: %s\n", q->offset + q_cur_loc(q),
- q_cur_type(q) == MQE_START ? "START" :
- q_cur_type(q) == MQE_TOP ? "TOP" :
- q_cur_type(q) == MQE_END ? "END" : "???");
- s64a end = q_cur_loc(q);
- if (mode != NO_MATCHES) {
- end = MIN(end, b_end);
- }
- assert(end <= (s64a) q->length);
- s64a cur_end = end;
-
- /* we may cross the border between history and current buffer */
- if (cur_start < 0) {
- cur_end = MIN(0, cur_end);
- }
-
- DEBUG_PRINTF("start: %lli end: %lli\n", start, end);
-
- /* don't scan zero length buffer */
- if (cur_start != cur_end) {
- const u8 * scanned = cur_buf;
- char rv;
-
- if (mode == NO_MATCHES) {
- runSheng32Nm(sh, q->cb, q->context, q->offset,
- &cached_accept_state, &cached_accept_id, cur_buf,
- cur_buf + cur_start, cur_buf + cur_end, can_die,
- has_accel, single, &scanned, &state);
- } else if (mode == CALLBACK_OUTPUT) {
- rv = runSheng32Cb(sh, q->cb, q->context, q->offset,
- &cached_accept_state, &cached_accept_id,
- cur_buf, cur_buf + cur_start, cur_buf + cur_end,
- can_die, has_accel, single, &scanned, &state);
- if (rv == MO_DEAD) {
- DEBUG_PRINTF("exiting in state %u\n",
- state & SHENG32_STATE_MASK);
- return MO_DEAD;
- }
- } else if (mode == STOP_AT_MATCH) {
- rv = runSheng32Sam(sh, q->cb, q->context, q->offset,
- &cached_accept_state, &cached_accept_id,
- cur_buf, cur_buf + cur_start,
- cur_buf + cur_end, can_die, has_accel, single,
- &scanned, &state);
- if (rv == MO_DEAD) {
- DEBUG_PRINTF("exiting in state %u\n",
- state & SHENG32_STATE_MASK);
- return rv;
- } else if (rv == MO_MATCHES_PENDING) {
- assert(q->cur);
- DEBUG_PRINTF("found a match, setting q location to %zd\n",
- scanned - cur_buf + 1);
- q->cur--;
- q->items[q->cur].type = MQE_START;
- q->items[q->cur].location =
- scanned - cur_buf + 1; /* due to exiting early */
- *(u8 *)q->state = state;
- DEBUG_PRINTF("exiting in state %u\n",
- state & SHENG32_STATE_MASK);
- return rv;
- }
- } else {
- assert(!"invalid scanning mode!");
- }
- assert(scanned == cur_buf + cur_end);
-
- cur_start = cur_end;
- }
-
- /* if we our queue event is past our end */
- if (mode != NO_MATCHES && q_cur_loc(q) > b_end) {
- DEBUG_PRINTF("current location past buffer end\n");
- DEBUG_PRINTF("setting q location to %llu\n", b_end);
- DEBUG_PRINTF("exiting in state %u\n", state & SHENG32_STATE_MASK);
- q->cur--;
- q->items[q->cur].type = MQE_START;
- q->items[q->cur].location = b_end;
- *(u8 *)q->state = state;
- return MO_ALIVE;
- }
-
- /* crossing over into actual buffer */
- if (cur_start == 0) {
- DEBUG_PRINTF("positive location, scanning buffer\n");
- DEBUG_PRINTF("max offset: %lli\n", b_end);
- cur_buf = q->buffer;
- }
-
- /* continue scanning the same buffer */
- if (end != cur_end) {
- continue;
- }
-
- switch (q_cur_type(q)) {
- case MQE_END:
- *(u8 *)q->state = state;
- q->cur++;
- DEBUG_PRINTF("exiting in state %u\n", state & SHENG32_STATE_MASK);
- if (can_die) {
- return (state & SHENG32_STATE_DEAD) ? MO_DEAD : MO_ALIVE;
- }
- return MO_ALIVE;
- case MQE_TOP:
- if (q->offset + cur_start == 0) {
- DEBUG_PRINTF("Anchored start, going to state %u\n",
- sh->anchored);
- state = sh->anchored;
- } else {
- u8 new_state = get_aux32(sh, state)->top;
- DEBUG_PRINTF("Top event %u->%u\n", state & SHENG32_STATE_MASK,
- new_state & SHENG32_STATE_MASK);
- state = new_state;
- }
- break;
- default:
- assert(!"invalid queue event");
- break;
- }
- q->cur++;
- }
-}
-
-char nfaExecSheng32_B(const struct NFA *n, u64a offset, const u8 *buffer,
- size_t length, NfaCallback cb, void *context) {
- DEBUG_PRINTF("smallwrite Sheng32\n");
- assert(n->type == SHENG_NFA_32);
- const struct sheng32 *sh = getImplNfa(n);
- u8 state = sh->anchored;
- u8 can_die = sh->flags & SHENG_FLAG_CAN_DIE;
- u8 has_accel = sh->flags & SHENG_FLAG_HAS_ACCEL;
- u8 single = sh->flags & SHENG_FLAG_SINGLE_REPORT;
- u8 cached_accept_state = 0;
- ReportID cached_accept_id = 0;
-
- /* scan and report all matches */
- int rv;
- s64a end = length;
- const u8 *scanned;
-
- rv = runSheng32Cb(sh, cb, context, offset, &cached_accept_state,
- &cached_accept_id, buffer, buffer, buffer + end, can_die,
- has_accel, single, &scanned, &state);
- if (rv == MO_DEAD) {
- DEBUG_PRINTF("exiting in state %u\n",
- state & SHENG32_STATE_MASK);
- return MO_DEAD;
- }
-
- DEBUG_PRINTF("%u\n", state & SHENG32_STATE_MASK);
-
- const struct sstate_aux *aux = get_aux32(sh, state);
-
- if (aux->accept_eod) {
- DEBUG_PRINTF("Reporting EOD matches\n");
- fireReports32(sh, cb, context, state, end + offset,
- &cached_accept_state, &cached_accept_id, 1);
- }
-
- return state & SHENG32_STATE_DEAD ? MO_DEAD : MO_ALIVE;
-}
-
-char nfaExecSheng32_Q(const struct NFA *n, struct mq *q, s64a end) {
- const struct sheng32 *sh = get_sheng32(n);
- char rv = runSheng32(sh, q, end, CALLBACK_OUTPUT);
- return rv;
-}
-
-char nfaExecSheng32_Q2(const struct NFA *n, struct mq *q, s64a end) {
- const struct sheng32 *sh = get_sheng32(n);
- char rv = runSheng32(sh, q, end, STOP_AT_MATCH);
- return rv;
-}
-
-char nfaExecSheng32_QR(const struct NFA *n, struct mq *q, ReportID report) {
- assert(q_cur_type(q) == MQE_START);
-
- const struct sheng32 *sh = get_sheng32(n);
- char rv = runSheng32(sh, q, 0 /* end */, NO_MATCHES);
-
- if (rv && nfaExecSheng32_inAccept(n, report, q)) {
- return MO_MATCHES_PENDING;
- }
- return rv;
-}
-
-char nfaExecSheng32_inAccept(const struct NFA *n, ReportID report,
- struct mq *q) {
- assert(n && q);
-
- const struct sheng32 *sh = get_sheng32(n);
- u8 s = *(const u8 *)q->state;
- DEBUG_PRINTF("checking accepts for %u\n", (u8)(s & SHENG32_STATE_MASK));
-
- const struct sstate_aux *aux = get_aux32(sh, s);
-
- if (!aux->accept) {
- return 0;
- }
-
- return sheng32HasAccept(sh, aux, report);
-}
-
-char nfaExecSheng32_inAnyAccept(const struct NFA *n, struct mq *q) {
- assert(n && q);
-
- const struct sheng32 *sh = get_sheng32(n);
- u8 s = *(const u8 *)q->state;
- DEBUG_PRINTF("checking accepts for %u\n", (u8)(s & SHENG32_STATE_MASK));
-
- const struct sstate_aux *aux = get_aux32(sh, s);
- return !!aux->accept;
-}
-
-char nfaExecSheng32_testEOD(const struct NFA *nfa, const char *state,
- UNUSED const char *streamState, u64a offset,
- NfaCallback cb, void *ctxt) {
- assert(nfa);
-
- const struct sheng32 *sh = get_sheng32(nfa);
- u8 s = *(const u8 *)state;
- DEBUG_PRINTF("checking EOD accepts for %u\n", (u8)(s & SHENG32_STATE_MASK));
-
- const struct sstate_aux *aux = get_aux32(sh, s);
-
- if (!aux->accept_eod) {
- return MO_CONTINUE_MATCHING;
- }
-
- return fireReports32(sh, cb, ctxt, s, offset, NULL, NULL, 1);
-}
-
-char nfaExecSheng32_reportCurrent(const struct NFA *n, struct mq *q) {
- const struct sheng32 *sh = (const struct sheng32 *)getImplNfa(n);
- NfaCallback cb = q->cb;
- void *ctxt = q->context;
- u8 s = *(u8 *)q->state;
- const struct sstate_aux *aux = get_aux32(sh, s);
- u64a offset = q_cur_offset(q);
- u8 cached_state_id = 0;
- ReportID cached_report_id = 0;
- assert(q_cur_type(q) == MQE_START);
-
- if (aux->accept) {
- if (sh->flags & SHENG_FLAG_SINGLE_REPORT) {
- fireSingleReport(cb, ctxt, sh->report, offset);
- } else {
- fireReports32(sh, cb, ctxt, s, offset, &cached_state_id,
- &cached_report_id, 0);
- }
- }
-
- return 0;
-}
-
-char nfaExecSheng32_initCompressedState(const struct NFA *nfa, u64a offset,
- void *state, UNUSED u8 key) {
- const struct sheng32 *sh = get_sheng32(nfa);
- u8 *s = (u8 *)state;
- *s = offset ? sh->floating: sh->anchored;
- return !(*s & SHENG32_STATE_DEAD);
-}
-
-char nfaExecSheng32_queueInitState(const struct NFA *nfa, struct mq *q) {
- assert(nfa->scratchStateSize == 1);
-
- /* starting in floating state */
- const struct sheng32 *sh = get_sheng32(nfa);
- *(u8 *)q->state = sh->floating;
- DEBUG_PRINTF("starting in floating state\n");
- return 0;
-}
-
-char nfaExecSheng32_queueCompressState(UNUSED const struct NFA *nfa,
- const struct mq *q, UNUSED s64a loc) {
- void *dest = q->streamState;
- const void *src = q->state;
- assert(nfa->scratchStateSize == 1);
- assert(nfa->streamStateSize == 1);
- *(u8 *)dest = *(const u8 *)src;
- return 0;
-}
-
-char nfaExecSheng32_expandState(UNUSED const struct NFA *nfa, void *dest,
- const void *src, UNUSED u64a offset,
- UNUSED u8 key) {
- assert(nfa->scratchStateSize == 1);
- assert(nfa->streamStateSize == 1);
- *(u8 *)dest = *(const u8 *)src;
- return 0;
-}
-
-// Sheng64
-static really_inline
-char runSheng64Cb(const struct sheng64 *sh, NfaCallback cb, void *ctxt,
- u64a offset, u8 *const cached_accept_state,
- ReportID *const cached_accept_id, const u8 *cur_buf,
- const u8 *start, const u8 *end, u8 can_die,
- u8 single, const u8 **scanned, u8 *state) {
- DEBUG_PRINTF("Scanning %llu bytes (offset %llu) in callback mode\n",
- (u64a)(end - start), offset);
- DEBUG_PRINTF("start: %lli end: %lli\n", (s64a)(start - cur_buf),
- (s64a)(end - cur_buf));
- DEBUG_PRINTF("can die: %u single: %u\n", !!can_die, !!single);
- int rv;
- /* scan and report all matches */
- if (can_die) {
- rv = sheng64_4_cod(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- start, end, scanned);
- if (rv == MO_HALT_MATCHING) {
- return MO_DEAD;
- }
- rv = sheng64_cod(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- *scanned, end, scanned);
- } else {
- rv = sheng64_4_co(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- start, end, scanned);
- if (rv == MO_HALT_MATCHING) {
- return MO_DEAD;
- }
- rv = sheng64_co(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- *scanned, end, scanned);
- }
- if (rv == MO_HALT_MATCHING) {
- return MO_DEAD;
- }
- return MO_ALIVE;
-}
-
-static really_inline
-void runSheng64Nm(const struct sheng64 *sh, NfaCallback cb, void *ctxt,
- u64a offset, u8 *const cached_accept_state,
- ReportID *const cached_accept_id, const u8 *cur_buf,
- const u8 *start, const u8 *end, u8 can_die,
- u8 single, const u8 **scanned, u8 *state) {
- DEBUG_PRINTF("Scanning %llu bytes (offset %llu) in nomatch mode\n",
- (u64a)(end - start), offset);
- DEBUG_PRINTF("start: %lli end: %lli\n", (s64a)(start - cur_buf),
- (s64a)(end - cur_buf));
- DEBUG_PRINTF("can die: %u single: %u\n", !!can_die, !!single);
- /* just scan the buffer */
- if (can_die) {
- sheng64_4_nmd(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- start, end, scanned);
- sheng64_nmd(state, cb, ctxt, sh, cached_accept_state, cached_accept_id,
- single, offset, cur_buf, *scanned, end, scanned);
- } else {
- sheng64_4_nm(state, cb, ctxt, sh, cached_accept_state, cached_accept_id,
- single, offset, cur_buf, start, end, scanned);
- sheng64_nm(state, cb, ctxt, sh, cached_accept_state, cached_accept_id,
- single, offset, cur_buf, *scanned, end, scanned);
- }
-}
-
-static really_inline
-char runSheng64Sam(const struct sheng64 *sh, NfaCallback cb, void *ctxt,
- u64a offset, u8 *const cached_accept_state,
- ReportID *const cached_accept_id, const u8 *cur_buf,
- const u8 *start, const u8 *end, u8 can_die,
- u8 single, const u8 **scanned, u8 *state) {
- DEBUG_PRINTF("Scanning %llu bytes (offset %llu) in stop at match mode\n",
- (u64a)(end - start), offset);
- DEBUG_PRINTF("start: %lli end: %lli\n", (s64a)(start - cur_buf),
- (s64a)(end - cur_buf));
- DEBUG_PRINTF("can die: %u single: %u\n", !!can_die, !!single);
- int rv;
- /* scan until first match */
- if (can_die) {
- rv = sheng64_4_samd(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- start, end, scanned);
- if (rv == MO_HALT_MATCHING) {
- return MO_DEAD;
- }
- /* if we stopped before we expected, we found a match */
- if (rv == MO_MATCHES_PENDING) {
- return MO_MATCHES_PENDING;
- }
-
- rv = sheng64_samd(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- *scanned, end, scanned);
- } else {
- rv = sheng64_4_sam(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- start, end, scanned);
- if (rv == MO_HALT_MATCHING) {
- return MO_DEAD;
- }
- /* if we stopped before we expected, we found a match */
- if (rv == MO_MATCHES_PENDING) {
- return MO_MATCHES_PENDING;
- }
-
- rv = sheng64_sam(state, cb, ctxt, sh, cached_accept_state,
- cached_accept_id, single, offset, cur_buf,
- *scanned, end, scanned);
- }
- if (rv == MO_HALT_MATCHING) {
- return MO_DEAD;
- }
- /* if we stopped before we expected, we found a match */
- if (rv == MO_MATCHES_PENDING) {
- return MO_MATCHES_PENDING;
- }
- return MO_ALIVE;
-}
-
-static never_inline
-char runSheng64(const struct sheng64 *sh, struct mq *q, s64a b_end,
- enum MatchMode mode) {
- u8 state = *(u8 *)q->state;
- u8 can_die = sh->flags & SHENG_FLAG_CAN_DIE;
- u8 single = sh->flags & SHENG_FLAG_SINGLE_REPORT;
-
- u8 cached_accept_state = 0;
- ReportID cached_accept_id = 0;
-
- DEBUG_PRINTF("starting Sheng64 execution in state %u\n",
- state & SHENG64_STATE_MASK);
-
- if (q->report_current) {
- DEBUG_PRINTF("reporting current pending matches\n");
- assert(sh);
-
- q->report_current = 0;
-
- int rv;
- if (single) {
- rv = fireSingleReport(q->cb, q->context, sh->report,
- q_cur_offset(q));
- } else {
- rv = fireReports64(sh, q->cb, q->context, state, q_cur_offset(q),
- &cached_accept_state, &cached_accept_id, 0);
- }
- if (rv == MO_HALT_MATCHING) {
- DEBUG_PRINTF("exiting in state %u\n", state & SHENG64_STATE_MASK);
- return MO_DEAD;
- }
-
- DEBUG_PRINTF("proceeding with matching\n");
- }
-
- assert(q_cur_type(q) == MQE_START);
- s64a start = q_cur_loc(q);
-
- DEBUG_PRINTF("offset: %lli, location: %lli, mode: %s\n", q->offset, start,
- mode == CALLBACK_OUTPUT ? "CALLBACK OUTPUT" :
- mode == NO_MATCHES ? "NO MATCHES" :
- mode == STOP_AT_MATCH ? "STOP AT MATCH" : "???");
-
- DEBUG_PRINTF("processing event @ %lli: %s\n", q->offset + q_cur_loc(q),
- q_cur_type(q) == MQE_START ? "START" :
- q_cur_type(q) == MQE_TOP ? "TOP" :
- q_cur_type(q) == MQE_END ? "END" : "???");
-
- const u8* cur_buf;
- if (start < 0) {
- DEBUG_PRINTF("negative location, scanning history\n");
- DEBUG_PRINTF("min location: %zd\n", -q->hlength);
- cur_buf = q->history + q->hlength;
- } else {
- DEBUG_PRINTF("positive location, scanning buffer\n");
- DEBUG_PRINTF("max location: %lli\n", b_end);
- cur_buf = q->buffer;
- }
-
- /* if we our queue event is past our end */
- if (mode != NO_MATCHES && q_cur_loc(q) > b_end) {
- DEBUG_PRINTF("current location past buffer end\n");
- DEBUG_PRINTF("setting q location to %llu\n", b_end);
- DEBUG_PRINTF("exiting in state %u\n", state & SHENG64_STATE_MASK);
- q->items[q->cur].location = b_end;
- return MO_ALIVE;
- }
-
- q->cur++;
-
- s64a cur_start = start;
-
- while (1) {
- DEBUG_PRINTF("processing event @ %lli: %s\n", q->offset + q_cur_loc(q),
- q_cur_type(q) == MQE_START ? "START" :
- q_cur_type(q) == MQE_TOP ? "TOP" :
- q_cur_type(q) == MQE_END ? "END" : "???");
- s64a end = q_cur_loc(q);
- if (mode != NO_MATCHES) {
- end = MIN(end, b_end);
- }
- assert(end <= (s64a) q->length);
- s64a cur_end = end;
-
- /* we may cross the border between history and current buffer */
- if (cur_start < 0) {
- cur_end = MIN(0, cur_end);
- }
-
- DEBUG_PRINTF("start: %lli end: %lli\n", start, end);
-
- /* don't scan zero length buffer */
- if (cur_start != cur_end) {
- const u8 * scanned = cur_buf;
- char rv;
-
- if (mode == NO_MATCHES) {
- runSheng64Nm(sh, q->cb, q->context, q->offset,
- &cached_accept_state, &cached_accept_id, cur_buf,
- cur_buf + cur_start, cur_buf + cur_end, can_die,
- single, &scanned, &state);
- } else if (mode == CALLBACK_OUTPUT) {
- rv = runSheng64Cb(sh, q->cb, q->context, q->offset,
- &cached_accept_state, &cached_accept_id,
- cur_buf, cur_buf + cur_start, cur_buf + cur_end,
- can_die, single, &scanned, &state);
- if (rv == MO_DEAD) {
- DEBUG_PRINTF("exiting in state %u\n",
- state & SHENG64_STATE_MASK);
- return MO_DEAD;
- }
- } else if (mode == STOP_AT_MATCH) {
- rv = runSheng64Sam(sh, q->cb, q->context, q->offset,
- &cached_accept_state, &cached_accept_id,
- cur_buf, cur_buf + cur_start,
- cur_buf + cur_end, can_die, single,
- &scanned, &state);
- if (rv == MO_DEAD) {
- DEBUG_PRINTF("exiting in state %u\n",
- state & SHENG64_STATE_MASK);
- return rv;
- } else if (rv == MO_MATCHES_PENDING) {
- assert(q->cur);
- DEBUG_PRINTF("found a match, setting q location to %zd\n",
- scanned - cur_buf + 1);
- q->cur--;
- q->items[q->cur].type = MQE_START;
- q->items[q->cur].location =
- scanned - cur_buf + 1; /* due to exiting early */
- *(u8 *)q->state = state;
- DEBUG_PRINTF("exiting in state %u\n",
- state & SHENG64_STATE_MASK);
- return rv;
- }
- } else {
- assert(!"invalid scanning mode!");
- }
- assert(scanned == cur_buf + cur_end);
-
- cur_start = cur_end;
- }
-
- /* if we our queue event is past our end */
- if (mode != NO_MATCHES && q_cur_loc(q) > b_end) {
- DEBUG_PRINTF("current location past buffer end\n");
- DEBUG_PRINTF("setting q location to %llu\n", b_end);
- DEBUG_PRINTF("exiting in state %u\n", state & SHENG64_STATE_MASK);
- q->cur--;
- q->items[q->cur].type = MQE_START;
- q->items[q->cur].location = b_end;
- *(u8 *)q->state = state;
- return MO_ALIVE;
- }
-
- /* crossing over into actual buffer */
- if (cur_start == 0) {
- DEBUG_PRINTF("positive location, scanning buffer\n");
- DEBUG_PRINTF("max offset: %lli\n", b_end);
- cur_buf = q->buffer;
- }
-
- /* continue scanning the same buffer */
- if (end != cur_end) {
- continue;
- }
-
- switch (q_cur_type(q)) {
- case MQE_END:
- *(u8 *)q->state = state;
- q->cur++;
- DEBUG_PRINTF("exiting in state %u\n", state & SHENG64_STATE_MASK);
- if (can_die) {
- return (state & SHENG64_STATE_DEAD) ? MO_DEAD : MO_ALIVE;
- }
- return MO_ALIVE;
- case MQE_TOP:
- if (q->offset + cur_start == 0) {
- DEBUG_PRINTF("Anchored start, going to state %u\n",
- sh->anchored);
- state = sh->anchored;
- } else {
- u8 new_state = get_aux64(sh, state)->top;
- DEBUG_PRINTF("Top event %u->%u\n", state & SHENG64_STATE_MASK,
- new_state & SHENG64_STATE_MASK);
- state = new_state;
- }
- break;
- default:
- assert(!"invalid queue event");
- break;
- }
- q->cur++;
- }
-}
-
-char nfaExecSheng64_B(const struct NFA *n, u64a offset, const u8 *buffer,
- size_t length, NfaCallback cb, void *context) {
- DEBUG_PRINTF("smallwrite Sheng64\n");
- assert(n->type == SHENG_NFA_64);
- const struct sheng64 *sh = getImplNfa(n);
- u8 state = sh->anchored;
- u8 can_die = sh->flags & SHENG_FLAG_CAN_DIE;
- u8 single = sh->flags & SHENG_FLAG_SINGLE_REPORT;
- u8 cached_accept_state = 0;
- ReportID cached_accept_id = 0;
-
- /* scan and report all matches */
- int rv;
- s64a end = length;
- const u8 *scanned;
-
- rv = runSheng64Cb(sh, cb, context, offset, &cached_accept_state,
- &cached_accept_id, buffer, buffer, buffer + end, can_die,
- single, &scanned, &state);
- if (rv == MO_DEAD) {
- DEBUG_PRINTF("exiting in state %u\n",
- state & SHENG64_STATE_MASK);
- return MO_DEAD;
- }
-
- DEBUG_PRINTF("%u\n", state & SHENG64_STATE_MASK);
-
- const struct sstate_aux *aux = get_aux64(sh, state);
-
- if (aux->accept_eod) {
- DEBUG_PRINTF("Reporting EOD matches\n");
- fireReports64(sh, cb, context, state, end + offset,
- &cached_accept_state, &cached_accept_id, 1);
- }
-
- return state & SHENG64_STATE_DEAD ? MO_DEAD : MO_ALIVE;
-}
-
-char nfaExecSheng64_Q(const struct NFA *n, struct mq *q, s64a end) {
- const struct sheng64 *sh = get_sheng64(n);
- char rv = runSheng64(sh, q, end, CALLBACK_OUTPUT);
- return rv;
-}
-
-char nfaExecSheng64_Q2(const struct NFA *n, struct mq *q, s64a end) {
- const struct sheng64 *sh = get_sheng64(n);
- char rv = runSheng64(sh, q, end, STOP_AT_MATCH);
- return rv;
-}
-
-char nfaExecSheng64_QR(const struct NFA *n, struct mq *q, ReportID report) {
- assert(q_cur_type(q) == MQE_START);
-
- const struct sheng64 *sh = get_sheng64(n);
- char rv = runSheng64(sh, q, 0 /* end */, NO_MATCHES);
-
- if (rv && nfaExecSheng64_inAccept(n, report, q)) {
- return MO_MATCHES_PENDING;
- }
- return rv;
-}
-
-char nfaExecSheng64_inAccept(const struct NFA *n, ReportID report,
- struct mq *q) {
- assert(n && q);
-
- const struct sheng64 *sh = get_sheng64(n);
- u8 s = *(const u8 *)q->state;
- DEBUG_PRINTF("checking accepts for %u\n", (u8)(s & SHENG64_STATE_MASK));
-
- const struct sstate_aux *aux = get_aux64(sh, s);
-
- if (!aux->accept) {
- return 0;
- }
-
- return sheng64HasAccept(sh, aux, report);
-}
-
-char nfaExecSheng64_inAnyAccept(const struct NFA *n, struct mq *q) {
- assert(n && q);
-
- const struct sheng64 *sh = get_sheng64(n);
- u8 s = *(const u8 *)q->state;
- DEBUG_PRINTF("checking accepts for %u\n", (u8)(s & SHENG64_STATE_MASK));
-
- const struct sstate_aux *aux = get_aux64(sh, s);
- return !!aux->accept;
-}
-
-char nfaExecSheng64_testEOD(const struct NFA *nfa, const char *state,
- UNUSED const char *streamState, u64a offset,
- NfaCallback cb, void *ctxt) {
- assert(nfa);
-
- const struct sheng64 *sh = get_sheng64(nfa);
- u8 s = *(const u8 *)state;
- DEBUG_PRINTF("checking EOD accepts for %u\n", (u8)(s & SHENG64_STATE_MASK));
-
- const struct sstate_aux *aux = get_aux64(sh, s);
-
- if (!aux->accept_eod) {
- return MO_CONTINUE_MATCHING;
- }
-
- return fireReports64(sh, cb, ctxt, s, offset, NULL, NULL, 1);
-}
-
-char nfaExecSheng64_reportCurrent(const struct NFA *n, struct mq *q) {
- const struct sheng64 *sh = (const struct sheng64 *)getImplNfa(n);
- NfaCallback cb = q->cb;
- void *ctxt = q->context;
- u8 s = *(u8 *)q->state;
- const struct sstate_aux *aux = get_aux64(sh, s);
- u64a offset = q_cur_offset(q);
- u8 cached_state_id = 0;
- ReportID cached_report_id = 0;
- assert(q_cur_type(q) == MQE_START);
-
- if (aux->accept) {
- if (sh->flags & SHENG_FLAG_SINGLE_REPORT) {
- fireSingleReport(cb, ctxt, sh->report, offset);
- } else {
- fireReports64(sh, cb, ctxt, s, offset, &cached_state_id,
- &cached_report_id, 0);
- }
- }
-
- return 0;
-}
-
-char nfaExecSheng64_initCompressedState(const struct NFA *nfa, u64a offset,
- void *state, UNUSED u8 key) {
- const struct sheng64 *sh = get_sheng64(nfa);
- u8 *s = (u8 *)state;
- *s = offset ? sh->floating: sh->anchored;
- return !(*s & SHENG64_STATE_DEAD);
-}
-
-char nfaExecSheng64_queueInitState(const struct NFA *nfa, struct mq *q) {
- assert(nfa->scratchStateSize == 1);
-
- /* starting in floating state */
- const struct sheng64 *sh = get_sheng64(nfa);
- *(u8 *)q->state = sh->floating;
- DEBUG_PRINTF("starting in floating state\n");
- return 0;
-}
-
-char nfaExecSheng64_queueCompressState(UNUSED const struct NFA *nfa,
- const struct mq *q, UNUSED s64a loc) {
- void *dest = q->streamState;
- const void *src = q->state;
- assert(nfa->scratchStateSize == 1);
- assert(nfa->streamStateSize == 1);
- *(u8 *)dest = *(const u8 *)src;
- return 0;
-}
-
-char nfaExecSheng64_expandState(UNUSED const struct NFA *nfa, void *dest,
- const void *src, UNUSED u64a offset,
- UNUSED u8 key) {
- assert(nfa->scratchStateSize == 1);
- assert(nfa->streamStateSize == 1);
- *(u8 *)dest = *(const u8 *)src;
- return 0;
-}
-#endif // end of HAVE_AVX512VBMI
+
+#if defined(HAVE_AVX512VBMI)
+// Sheng32
+static really_inline
+char runSheng32Cb(const struct sheng32 *sh, NfaCallback cb, void *ctxt,
+ u64a offset, u8 *const cached_accept_state,
+ ReportID *const cached_accept_id, const u8 *cur_buf,
+ const u8 *start, const u8 *end, u8 can_die,
+ u8 has_accel, u8 single, const u8 **scanned, u8 *state) {
+ DEBUG_PRINTF("Scanning %llu bytes (offset %llu) in callback mode\n",
+ (u64a)(end - start), offset);
+ DEBUG_PRINTF("start: %lli end: %lli\n", (s64a)(start - cur_buf),
+ (s64a)(end - cur_buf));
+ DEBUG_PRINTF("can die: %u has accel: %u single: %u\n", !!can_die,
+ !!has_accel, !!single);
+ int rv;
+ /* scan and report all matches */
+ if (can_die) {
+ if (has_accel) {
+ rv = sheng32_4_coda(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ start, end, scanned);
+ } else {
+ rv = sheng32_4_cod(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ start, end, scanned);
+ }
+ if (rv == MO_HALT_MATCHING) {
+ return MO_DEAD;
+ }
+ rv = sheng32_cod(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ *scanned, end, scanned);
+ } else {
+ if (has_accel) {
+ rv = sheng32_4_coa(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ start, end, scanned);
+ } else {
+ rv = sheng32_4_co(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ start, end, scanned);
+ }
+ if (rv == MO_HALT_MATCHING) {
+ return MO_DEAD;
+ }
+ rv = sheng32_co(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ *scanned, end, scanned);
+ }
+ if (rv == MO_HALT_MATCHING) {
+ return MO_DEAD;
+ }
+ return MO_ALIVE;
+}
+
+static really_inline
+void runSheng32Nm(const struct sheng32 *sh, NfaCallback cb, void *ctxt,
+ u64a offset, u8 *const cached_accept_state,
+ ReportID *const cached_accept_id, const u8 *cur_buf,
+ const u8 *start, const u8 *end, u8 can_die, u8 has_accel,
+ u8 single, const u8 **scanned, u8 *state) {
+ DEBUG_PRINTF("Scanning %llu bytes (offset %llu) in nomatch mode\n",
+ (u64a)(end - start), offset);
+ DEBUG_PRINTF("start: %lli end: %lli\n", (s64a)(start - cur_buf),
+ (s64a)(end - cur_buf));
+ DEBUG_PRINTF("can die: %u has accel: %u single: %u\n", !!can_die,
+ !!has_accel, !!single);
+ /* just scan the buffer */
+ if (can_die) {
+ if (has_accel) {
+ sheng32_4_nmda(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ start, end, scanned);
+ } else {
+ sheng32_4_nmd(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ start, end, scanned);
+ }
+ sheng32_nmd(state, cb, ctxt, sh, cached_accept_state, cached_accept_id,
+ single, offset, cur_buf, *scanned, end, scanned);
+ } else {
+ sheng32_4_nm(state, cb, ctxt, sh, cached_accept_state, cached_accept_id,
+ single, offset, cur_buf, start, end, scanned);
+ sheng32_nm(state, cb, ctxt, sh, cached_accept_state, cached_accept_id,
+ single, offset, cur_buf, *scanned, end, scanned);
+ }
+}
+
+static really_inline
+char runSheng32Sam(const struct sheng32 *sh, NfaCallback cb, void *ctxt,
+ u64a offset, u8 *const cached_accept_state,
+ ReportID *const cached_accept_id, const u8 *cur_buf,
+ const u8 *start, const u8 *end, u8 can_die, u8 has_accel,
+ u8 single, const u8 **scanned, u8 *state) {
+ DEBUG_PRINTF("Scanning %llu bytes (offset %llu) in stop at match mode\n",
+ (u64a)(end - start), offset);
+ DEBUG_PRINTF("start: %lli end: %lli\n", (s64a)(start - cur_buf),
+ (s64a)(end - cur_buf));
+ DEBUG_PRINTF("can die: %u has accel: %u single: %u\n", !!can_die,
+ !!has_accel, !!single);
+ int rv;
+ /* scan until first match */
+ if (can_die) {
+ if (has_accel) {
+ rv = sheng32_4_samda(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ start, end, scanned);
+ } else {
+ rv = sheng32_4_samd(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ start, end, scanned);
+ }
+ if (rv == MO_HALT_MATCHING) {
+ return MO_DEAD;
+ }
+ /* if we stopped before we expected, we found a match */
+ if (rv == MO_MATCHES_PENDING) {
+ return MO_MATCHES_PENDING;
+ }
+
+ rv = sheng32_samd(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ *scanned, end, scanned);
+ } else {
+ if (has_accel) {
+ rv = sheng32_4_sama(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ start, end, scanned);
+ } else {
+ rv = sheng32_4_sam(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ start, end, scanned);
+ }
+ if (rv == MO_HALT_MATCHING) {
+ return MO_DEAD;
+ }
+ /* if we stopped before we expected, we found a match */
+ if (rv == MO_MATCHES_PENDING) {
+ return MO_MATCHES_PENDING;
+ }
+
+ rv = sheng32_sam(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ *scanned, end, scanned);
+ }
+ if (rv == MO_HALT_MATCHING) {
+ return MO_DEAD;
+ }
+ /* if we stopped before we expected, we found a match */
+ if (rv == MO_MATCHES_PENDING) {
+ return MO_MATCHES_PENDING;
+ }
+ return MO_ALIVE;
+}
+
+static never_inline
+char runSheng32(const struct sheng32 *sh, struct mq *q, s64a b_end,
+ enum MatchMode mode) {
+ u8 state = *(u8 *)q->state;
+ u8 can_die = sh->flags & SHENG_FLAG_CAN_DIE;
+ u8 has_accel = sh->flags & SHENG_FLAG_HAS_ACCEL;
+ u8 single = sh->flags & SHENG_FLAG_SINGLE_REPORT;
+
+ u8 cached_accept_state = 0;
+ ReportID cached_accept_id = 0;
+
+ DEBUG_PRINTF("starting Sheng32 execution in state %u\n",
+ state & SHENG32_STATE_MASK);
+
+ if (q->report_current) {
+ DEBUG_PRINTF("reporting current pending matches\n");
+ assert(sh);
+
+ q->report_current = 0;
+
+ int rv;
+ if (single) {
+ rv = fireSingleReport(q->cb, q->context, sh->report,
+ q_cur_offset(q));
+ } else {
+ rv = fireReports32(sh, q->cb, q->context, state, q_cur_offset(q),
+ &cached_accept_state, &cached_accept_id, 0);
+ }
+ if (rv == MO_HALT_MATCHING) {
+ DEBUG_PRINTF("exiting in state %u\n", state & SHENG32_STATE_MASK);
+ return MO_DEAD;
+ }
+
+ DEBUG_PRINTF("proceeding with matching\n");
+ }
+
+ assert(q_cur_type(q) == MQE_START);
+ s64a start = q_cur_loc(q);
+
+ DEBUG_PRINTF("offset: %lli, location: %lli, mode: %s\n", q->offset, start,
+ mode == CALLBACK_OUTPUT ? "CALLBACK OUTPUT" :
+ mode == NO_MATCHES ? "NO MATCHES" :
+ mode == STOP_AT_MATCH ? "STOP AT MATCH" : "???");
+
+ DEBUG_PRINTF("processing event @ %lli: %s\n", q->offset + q_cur_loc(q),
+ q_cur_type(q) == MQE_START ? "START" :
+ q_cur_type(q) == MQE_TOP ? "TOP" :
+ q_cur_type(q) == MQE_END ? "END" : "???");
+
+ const u8* cur_buf;
+ if (start < 0) {
+ DEBUG_PRINTF("negative location, scanning history\n");
+ DEBUG_PRINTF("min location: %zd\n", -q->hlength);
+ cur_buf = q->history + q->hlength;
+ } else {
+ DEBUG_PRINTF("positive location, scanning buffer\n");
+ DEBUG_PRINTF("max location: %lli\n", b_end);
+ cur_buf = q->buffer;
+ }
+
+ /* if we our queue event is past our end */
+ if (mode != NO_MATCHES && q_cur_loc(q) > b_end) {
+ DEBUG_PRINTF("current location past buffer end\n");
+ DEBUG_PRINTF("setting q location to %llu\n", b_end);
+ DEBUG_PRINTF("exiting in state %u\n", state & SHENG32_STATE_MASK);
+ q->items[q->cur].location = b_end;
+ return MO_ALIVE;
+ }
+
+ q->cur++;
+
+ s64a cur_start = start;
+
+ while (1) {
+ DEBUG_PRINTF("processing event @ %lli: %s\n", q->offset + q_cur_loc(q),
+ q_cur_type(q) == MQE_START ? "START" :
+ q_cur_type(q) == MQE_TOP ? "TOP" :
+ q_cur_type(q) == MQE_END ? "END" : "???");
+ s64a end = q_cur_loc(q);
+ if (mode != NO_MATCHES) {
+ end = MIN(end, b_end);
+ }
+ assert(end <= (s64a) q->length);
+ s64a cur_end = end;
+
+ /* we may cross the border between history and current buffer */
+ if (cur_start < 0) {
+ cur_end = MIN(0, cur_end);
+ }
+
+ DEBUG_PRINTF("start: %lli end: %lli\n", start, end);
+
+ /* don't scan zero length buffer */
+ if (cur_start != cur_end) {
+ const u8 * scanned = cur_buf;
+ char rv;
+
+ if (mode == NO_MATCHES) {
+ runSheng32Nm(sh, q->cb, q->context, q->offset,
+ &cached_accept_state, &cached_accept_id, cur_buf,
+ cur_buf + cur_start, cur_buf + cur_end, can_die,
+ has_accel, single, &scanned, &state);
+ } else if (mode == CALLBACK_OUTPUT) {
+ rv = runSheng32Cb(sh, q->cb, q->context, q->offset,
+ &cached_accept_state, &cached_accept_id,
+ cur_buf, cur_buf + cur_start, cur_buf + cur_end,
+ can_die, has_accel, single, &scanned, &state);
+ if (rv == MO_DEAD) {
+ DEBUG_PRINTF("exiting in state %u\n",
+ state & SHENG32_STATE_MASK);
+ return MO_DEAD;
+ }
+ } else if (mode == STOP_AT_MATCH) {
+ rv = runSheng32Sam(sh, q->cb, q->context, q->offset,
+ &cached_accept_state, &cached_accept_id,
+ cur_buf, cur_buf + cur_start,
+ cur_buf + cur_end, can_die, has_accel, single,
+ &scanned, &state);
+ if (rv == MO_DEAD) {
+ DEBUG_PRINTF("exiting in state %u\n",
+ state & SHENG32_STATE_MASK);
+ return rv;
+ } else if (rv == MO_MATCHES_PENDING) {
+ assert(q->cur);
+ DEBUG_PRINTF("found a match, setting q location to %zd\n",
+ scanned - cur_buf + 1);
+ q->cur--;
+ q->items[q->cur].type = MQE_START;
+ q->items[q->cur].location =
+ scanned - cur_buf + 1; /* due to exiting early */
+ *(u8 *)q->state = state;
+ DEBUG_PRINTF("exiting in state %u\n",
+ state & SHENG32_STATE_MASK);
+ return rv;
+ }
+ } else {
+ assert(!"invalid scanning mode!");
+ }
+ assert(scanned == cur_buf + cur_end);
+
+ cur_start = cur_end;
+ }
+
+ /* if we our queue event is past our end */
+ if (mode != NO_MATCHES && q_cur_loc(q) > b_end) {
+ DEBUG_PRINTF("current location past buffer end\n");
+ DEBUG_PRINTF("setting q location to %llu\n", b_end);
+ DEBUG_PRINTF("exiting in state %u\n", state & SHENG32_STATE_MASK);
+ q->cur--;
+ q->items[q->cur].type = MQE_START;
+ q->items[q->cur].location = b_end;
+ *(u8 *)q->state = state;
+ return MO_ALIVE;
+ }
+
+ /* crossing over into actual buffer */
+ if (cur_start == 0) {
+ DEBUG_PRINTF("positive location, scanning buffer\n");
+ DEBUG_PRINTF("max offset: %lli\n", b_end);
+ cur_buf = q->buffer;
+ }
+
+ /* continue scanning the same buffer */
+ if (end != cur_end) {
+ continue;
+ }
+
+ switch (q_cur_type(q)) {
+ case MQE_END:
+ *(u8 *)q->state = state;
+ q->cur++;
+ DEBUG_PRINTF("exiting in state %u\n", state & SHENG32_STATE_MASK);
+ if (can_die) {
+ return (state & SHENG32_STATE_DEAD) ? MO_DEAD : MO_ALIVE;
+ }
+ return MO_ALIVE;
+ case MQE_TOP:
+ if (q->offset + cur_start == 0) {
+ DEBUG_PRINTF("Anchored start, going to state %u\n",
+ sh->anchored);
+ state = sh->anchored;
+ } else {
+ u8 new_state = get_aux32(sh, state)->top;
+ DEBUG_PRINTF("Top event %u->%u\n", state & SHENG32_STATE_MASK,
+ new_state & SHENG32_STATE_MASK);
+ state = new_state;
+ }
+ break;
+ default:
+ assert(!"invalid queue event");
+ break;
+ }
+ q->cur++;
+ }
+}
+
+char nfaExecSheng32_B(const struct NFA *n, u64a offset, const u8 *buffer,
+ size_t length, NfaCallback cb, void *context) {
+ DEBUG_PRINTF("smallwrite Sheng32\n");
+ assert(n->type == SHENG_NFA_32);
+ const struct sheng32 *sh = getImplNfa(n);
+ u8 state = sh->anchored;
+ u8 can_die = sh->flags & SHENG_FLAG_CAN_DIE;
+ u8 has_accel = sh->flags & SHENG_FLAG_HAS_ACCEL;
+ u8 single = sh->flags & SHENG_FLAG_SINGLE_REPORT;
+ u8 cached_accept_state = 0;
+ ReportID cached_accept_id = 0;
+
+ /* scan and report all matches */
+ int rv;
+ s64a end = length;
+ const u8 *scanned;
+
+ rv = runSheng32Cb(sh, cb, context, offset, &cached_accept_state,
+ &cached_accept_id, buffer, buffer, buffer + end, can_die,
+ has_accel, single, &scanned, &state);
+ if (rv == MO_DEAD) {
+ DEBUG_PRINTF("exiting in state %u\n",
+ state & SHENG32_STATE_MASK);
+ return MO_DEAD;
+ }
+
+ DEBUG_PRINTF("%u\n", state & SHENG32_STATE_MASK);
+
+ const struct sstate_aux *aux = get_aux32(sh, state);
+
+ if (aux->accept_eod) {
+ DEBUG_PRINTF("Reporting EOD matches\n");
+ fireReports32(sh, cb, context, state, end + offset,
+ &cached_accept_state, &cached_accept_id, 1);
+ }
+
+ return state & SHENG32_STATE_DEAD ? MO_DEAD : MO_ALIVE;
+}
+
+char nfaExecSheng32_Q(const struct NFA *n, struct mq *q, s64a end) {
+ const struct sheng32 *sh = get_sheng32(n);
+ char rv = runSheng32(sh, q, end, CALLBACK_OUTPUT);
+ return rv;
+}
+
+char nfaExecSheng32_Q2(const struct NFA *n, struct mq *q, s64a end) {
+ const struct sheng32 *sh = get_sheng32(n);
+ char rv = runSheng32(sh, q, end, STOP_AT_MATCH);
+ return rv;
+}
+
+char nfaExecSheng32_QR(const struct NFA *n, struct mq *q, ReportID report) {
+ assert(q_cur_type(q) == MQE_START);
+
+ const struct sheng32 *sh = get_sheng32(n);
+ char rv = runSheng32(sh, q, 0 /* end */, NO_MATCHES);
+
+ if (rv && nfaExecSheng32_inAccept(n, report, q)) {
+ return MO_MATCHES_PENDING;
+ }
+ return rv;
+}
+
+char nfaExecSheng32_inAccept(const struct NFA *n, ReportID report,
+ struct mq *q) {
+ assert(n && q);
+
+ const struct sheng32 *sh = get_sheng32(n);
+ u8 s = *(const u8 *)q->state;
+ DEBUG_PRINTF("checking accepts for %u\n", (u8)(s & SHENG32_STATE_MASK));
+
+ const struct sstate_aux *aux = get_aux32(sh, s);
+
+ if (!aux->accept) {
+ return 0;
+ }
+
+ return sheng32HasAccept(sh, aux, report);
+}
+
+char nfaExecSheng32_inAnyAccept(const struct NFA *n, struct mq *q) {
+ assert(n && q);
+
+ const struct sheng32 *sh = get_sheng32(n);
+ u8 s = *(const u8 *)q->state;
+ DEBUG_PRINTF("checking accepts for %u\n", (u8)(s & SHENG32_STATE_MASK));
+
+ const struct sstate_aux *aux = get_aux32(sh, s);
+ return !!aux->accept;
+}
+
+char nfaExecSheng32_testEOD(const struct NFA *nfa, const char *state,
+ UNUSED const char *streamState, u64a offset,
+ NfaCallback cb, void *ctxt) {
+ assert(nfa);
+
+ const struct sheng32 *sh = get_sheng32(nfa);
+ u8 s = *(const u8 *)state;
+ DEBUG_PRINTF("checking EOD accepts for %u\n", (u8)(s & SHENG32_STATE_MASK));
+
+ const struct sstate_aux *aux = get_aux32(sh, s);
+
+ if (!aux->accept_eod) {
+ return MO_CONTINUE_MATCHING;
+ }
+
+ return fireReports32(sh, cb, ctxt, s, offset, NULL, NULL, 1);
+}
+
+char nfaExecSheng32_reportCurrent(const struct NFA *n, struct mq *q) {
+ const struct sheng32 *sh = (const struct sheng32 *)getImplNfa(n);
+ NfaCallback cb = q->cb;
+ void *ctxt = q->context;
+ u8 s = *(u8 *)q->state;
+ const struct sstate_aux *aux = get_aux32(sh, s);
+ u64a offset = q_cur_offset(q);
+ u8 cached_state_id = 0;
+ ReportID cached_report_id = 0;
+ assert(q_cur_type(q) == MQE_START);
+
+ if (aux->accept) {
+ if (sh->flags & SHENG_FLAG_SINGLE_REPORT) {
+ fireSingleReport(cb, ctxt, sh->report, offset);
+ } else {
+ fireReports32(sh, cb, ctxt, s, offset, &cached_state_id,
+ &cached_report_id, 0);
+ }
+ }
+
+ return 0;
+}
+
+char nfaExecSheng32_initCompressedState(const struct NFA *nfa, u64a offset,
+ void *state, UNUSED u8 key) {
+ const struct sheng32 *sh = get_sheng32(nfa);
+ u8 *s = (u8 *)state;
+ *s = offset ? sh->floating: sh->anchored;
+ return !(*s & SHENG32_STATE_DEAD);
+}
+
+char nfaExecSheng32_queueInitState(const struct NFA *nfa, struct mq *q) {
+ assert(nfa->scratchStateSize == 1);
+
+ /* starting in floating state */
+ const struct sheng32 *sh = get_sheng32(nfa);
+ *(u8 *)q->state = sh->floating;
+ DEBUG_PRINTF("starting in floating state\n");
+ return 0;
+}
+
+char nfaExecSheng32_queueCompressState(UNUSED const struct NFA *nfa,
+ const struct mq *q, UNUSED s64a loc) {
+ void *dest = q->streamState;
+ const void *src = q->state;
+ assert(nfa->scratchStateSize == 1);
+ assert(nfa->streamStateSize == 1);
+ *(u8 *)dest = *(const u8 *)src;
+ return 0;
+}
+
+char nfaExecSheng32_expandState(UNUSED const struct NFA *nfa, void *dest,
+ const void *src, UNUSED u64a offset,
+ UNUSED u8 key) {
+ assert(nfa->scratchStateSize == 1);
+ assert(nfa->streamStateSize == 1);
+ *(u8 *)dest = *(const u8 *)src;
+ return 0;
+}
+
+// Sheng64
+static really_inline
+char runSheng64Cb(const struct sheng64 *sh, NfaCallback cb, void *ctxt,
+ u64a offset, u8 *const cached_accept_state,
+ ReportID *const cached_accept_id, const u8 *cur_buf,
+ const u8 *start, const u8 *end, u8 can_die,
+ u8 single, const u8 **scanned, u8 *state) {
+ DEBUG_PRINTF("Scanning %llu bytes (offset %llu) in callback mode\n",
+ (u64a)(end - start), offset);
+ DEBUG_PRINTF("start: %lli end: %lli\n", (s64a)(start - cur_buf),
+ (s64a)(end - cur_buf));
+ DEBUG_PRINTF("can die: %u single: %u\n", !!can_die, !!single);
+ int rv;
+ /* scan and report all matches */
+ if (can_die) {
+ rv = sheng64_4_cod(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ start, end, scanned);
+ if (rv == MO_HALT_MATCHING) {
+ return MO_DEAD;
+ }
+ rv = sheng64_cod(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ *scanned, end, scanned);
+ } else {
+ rv = sheng64_4_co(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ start, end, scanned);
+ if (rv == MO_HALT_MATCHING) {
+ return MO_DEAD;
+ }
+ rv = sheng64_co(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ *scanned, end, scanned);
+ }
+ if (rv == MO_HALT_MATCHING) {
+ return MO_DEAD;
+ }
+ return MO_ALIVE;
+}
+
+static really_inline
+void runSheng64Nm(const struct sheng64 *sh, NfaCallback cb, void *ctxt,
+ u64a offset, u8 *const cached_accept_state,
+ ReportID *const cached_accept_id, const u8 *cur_buf,
+ const u8 *start, const u8 *end, u8 can_die,
+ u8 single, const u8 **scanned, u8 *state) {
+ DEBUG_PRINTF("Scanning %llu bytes (offset %llu) in nomatch mode\n",
+ (u64a)(end - start), offset);
+ DEBUG_PRINTF("start: %lli end: %lli\n", (s64a)(start - cur_buf),
+ (s64a)(end - cur_buf));
+ DEBUG_PRINTF("can die: %u single: %u\n", !!can_die, !!single);
+ /* just scan the buffer */
+ if (can_die) {
+ sheng64_4_nmd(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ start, end, scanned);
+ sheng64_nmd(state, cb, ctxt, sh, cached_accept_state, cached_accept_id,
+ single, offset, cur_buf, *scanned, end, scanned);
+ } else {
+ sheng64_4_nm(state, cb, ctxt, sh, cached_accept_state, cached_accept_id,
+ single, offset, cur_buf, start, end, scanned);
+ sheng64_nm(state, cb, ctxt, sh, cached_accept_state, cached_accept_id,
+ single, offset, cur_buf, *scanned, end, scanned);
+ }
+}
+
+static really_inline
+char runSheng64Sam(const struct sheng64 *sh, NfaCallback cb, void *ctxt,
+ u64a offset, u8 *const cached_accept_state,
+ ReportID *const cached_accept_id, const u8 *cur_buf,
+ const u8 *start, const u8 *end, u8 can_die,
+ u8 single, const u8 **scanned, u8 *state) {
+ DEBUG_PRINTF("Scanning %llu bytes (offset %llu) in stop at match mode\n",
+ (u64a)(end - start), offset);
+ DEBUG_PRINTF("start: %lli end: %lli\n", (s64a)(start - cur_buf),
+ (s64a)(end - cur_buf));
+ DEBUG_PRINTF("can die: %u single: %u\n", !!can_die, !!single);
+ int rv;
+ /* scan until first match */
+ if (can_die) {
+ rv = sheng64_4_samd(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ start, end, scanned);
+ if (rv == MO_HALT_MATCHING) {
+ return MO_DEAD;
+ }
+ /* if we stopped before we expected, we found a match */
+ if (rv == MO_MATCHES_PENDING) {
+ return MO_MATCHES_PENDING;
+ }
+
+ rv = sheng64_samd(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ *scanned, end, scanned);
+ } else {
+ rv = sheng64_4_sam(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ start, end, scanned);
+ if (rv == MO_HALT_MATCHING) {
+ return MO_DEAD;
+ }
+ /* if we stopped before we expected, we found a match */
+ if (rv == MO_MATCHES_PENDING) {
+ return MO_MATCHES_PENDING;
+ }
+
+ rv = sheng64_sam(state, cb, ctxt, sh, cached_accept_state,
+ cached_accept_id, single, offset, cur_buf,
+ *scanned, end, scanned);
+ }
+ if (rv == MO_HALT_MATCHING) {
+ return MO_DEAD;
+ }
+ /* if we stopped before we expected, we found a match */
+ if (rv == MO_MATCHES_PENDING) {
+ return MO_MATCHES_PENDING;
+ }
+ return MO_ALIVE;
+}
+
+static never_inline
+char runSheng64(const struct sheng64 *sh, struct mq *q, s64a b_end,
+ enum MatchMode mode) {
+ u8 state = *(u8 *)q->state;
+ u8 can_die = sh->flags & SHENG_FLAG_CAN_DIE;
+ u8 single = sh->flags & SHENG_FLAG_SINGLE_REPORT;
+
+ u8 cached_accept_state = 0;
+ ReportID cached_accept_id = 0;
+
+ DEBUG_PRINTF("starting Sheng64 execution in state %u\n",
+ state & SHENG64_STATE_MASK);
+
+ if (q->report_current) {
+ DEBUG_PRINTF("reporting current pending matches\n");
+ assert(sh);
+
+ q->report_current = 0;
+
+ int rv;
+ if (single) {
+ rv = fireSingleReport(q->cb, q->context, sh->report,
+ q_cur_offset(q));
+ } else {
+ rv = fireReports64(sh, q->cb, q->context, state, q_cur_offset(q),
+ &cached_accept_state, &cached_accept_id, 0);
+ }
+ if (rv == MO_HALT_MATCHING) {
+ DEBUG_PRINTF("exiting in state %u\n", state & SHENG64_STATE_MASK);
+ return MO_DEAD;
+ }
+
+ DEBUG_PRINTF("proceeding with matching\n");
+ }
+
+ assert(q_cur_type(q) == MQE_START);
+ s64a start = q_cur_loc(q);
+
+ DEBUG_PRINTF("offset: %lli, location: %lli, mode: %s\n", q->offset, start,
+ mode == CALLBACK_OUTPUT ? "CALLBACK OUTPUT" :
+ mode == NO_MATCHES ? "NO MATCHES" :
+ mode == STOP_AT_MATCH ? "STOP AT MATCH" : "???");
+
+ DEBUG_PRINTF("processing event @ %lli: %s\n", q->offset + q_cur_loc(q),
+ q_cur_type(q) == MQE_START ? "START" :
+ q_cur_type(q) == MQE_TOP ? "TOP" :
+ q_cur_type(q) == MQE_END ? "END" : "???");
+
+ const u8* cur_buf;
+ if (start < 0) {
+ DEBUG_PRINTF("negative location, scanning history\n");
+ DEBUG_PRINTF("min location: %zd\n", -q->hlength);
+ cur_buf = q->history + q->hlength;
+ } else {
+ DEBUG_PRINTF("positive location, scanning buffer\n");
+ DEBUG_PRINTF("max location: %lli\n", b_end);
+ cur_buf = q->buffer;
+ }
+
+ /* if we our queue event is past our end */
+ if (mode != NO_MATCHES && q_cur_loc(q) > b_end) {
+ DEBUG_PRINTF("current location past buffer end\n");
+ DEBUG_PRINTF("setting q location to %llu\n", b_end);
+ DEBUG_PRINTF("exiting in state %u\n", state & SHENG64_STATE_MASK);
+ q->items[q->cur].location = b_end;
+ return MO_ALIVE;
+ }
+
+ q->cur++;
+
+ s64a cur_start = start;
+
+ while (1) {
+ DEBUG_PRINTF("processing event @ %lli: %s\n", q->offset + q_cur_loc(q),
+ q_cur_type(q) == MQE_START ? "START" :
+ q_cur_type(q) == MQE_TOP ? "TOP" :
+ q_cur_type(q) == MQE_END ? "END" : "???");
+ s64a end = q_cur_loc(q);
+ if (mode != NO_MATCHES) {
+ end = MIN(end, b_end);
+ }
+ assert(end <= (s64a) q->length);
+ s64a cur_end = end;
+
+ /* we may cross the border between history and current buffer */
+ if (cur_start < 0) {
+ cur_end = MIN(0, cur_end);
+ }
+
+ DEBUG_PRINTF("start: %lli end: %lli\n", start, end);
+
+ /* don't scan zero length buffer */
+ if (cur_start != cur_end) {
+ const u8 * scanned = cur_buf;
+ char rv;
+
+ if (mode == NO_MATCHES) {
+ runSheng64Nm(sh, q->cb, q->context, q->offset,
+ &cached_accept_state, &cached_accept_id, cur_buf,
+ cur_buf + cur_start, cur_buf + cur_end, can_die,
+ single, &scanned, &state);
+ } else if (mode == CALLBACK_OUTPUT) {
+ rv = runSheng64Cb(sh, q->cb, q->context, q->offset,
+ &cached_accept_state, &cached_accept_id,
+ cur_buf, cur_buf + cur_start, cur_buf + cur_end,
+ can_die, single, &scanned, &state);
+ if (rv == MO_DEAD) {
+ DEBUG_PRINTF("exiting in state %u\n",
+ state & SHENG64_STATE_MASK);
+ return MO_DEAD;
+ }
+ } else if (mode == STOP_AT_MATCH) {
+ rv = runSheng64Sam(sh, q->cb, q->context, q->offset,
+ &cached_accept_state, &cached_accept_id,
+ cur_buf, cur_buf + cur_start,
+ cur_buf + cur_end, can_die, single,
+ &scanned, &state);
+ if (rv == MO_DEAD) {
+ DEBUG_PRINTF("exiting in state %u\n",
+ state & SHENG64_STATE_MASK);
+ return rv;
+ } else if (rv == MO_MATCHES_PENDING) {
+ assert(q->cur);
+ DEBUG_PRINTF("found a match, setting q location to %zd\n",
+ scanned - cur_buf + 1);
+ q->cur--;
+ q->items[q->cur].type = MQE_START;
+ q->items[q->cur].location =
+ scanned - cur_buf + 1; /* due to exiting early */
+ *(u8 *)q->state = state;
+ DEBUG_PRINTF("exiting in state %u\n",
+ state & SHENG64_STATE_MASK);
+ return rv;
+ }
+ } else {
+ assert(!"invalid scanning mode!");
+ }
+ assert(scanned == cur_buf + cur_end);
+
+ cur_start = cur_end;
+ }
+
+ /* if we our queue event is past our end */
+ if (mode != NO_MATCHES && q_cur_loc(q) > b_end) {
+ DEBUG_PRINTF("current location past buffer end\n");
+ DEBUG_PRINTF("setting q location to %llu\n", b_end);
+ DEBUG_PRINTF("exiting in state %u\n", state & SHENG64_STATE_MASK);
+ q->cur--;
+ q->items[q->cur].type = MQE_START;
+ q->items[q->cur].location = b_end;
+ *(u8 *)q->state = state;
+ return MO_ALIVE;
+ }
+
+ /* crossing over into actual buffer */
+ if (cur_start == 0) {
+ DEBUG_PRINTF("positive location, scanning buffer\n");
+ DEBUG_PRINTF("max offset: %lli\n", b_end);
+ cur_buf = q->buffer;
+ }
+
+ /* continue scanning the same buffer */
+ if (end != cur_end) {
+ continue;
+ }
+
+ switch (q_cur_type(q)) {
+ case MQE_END:
+ *(u8 *)q->state = state;
+ q->cur++;
+ DEBUG_PRINTF("exiting in state %u\n", state & SHENG64_STATE_MASK);
+ if (can_die) {
+ return (state & SHENG64_STATE_DEAD) ? MO_DEAD : MO_ALIVE;
+ }
+ return MO_ALIVE;
+ case MQE_TOP:
+ if (q->offset + cur_start == 0) {
+ DEBUG_PRINTF("Anchored start, going to state %u\n",
+ sh->anchored);
+ state = sh->anchored;
+ } else {
+ u8 new_state = get_aux64(sh, state)->top;
+ DEBUG_PRINTF("Top event %u->%u\n", state & SHENG64_STATE_MASK,
+ new_state & SHENG64_STATE_MASK);
+ state = new_state;
+ }
+ break;
+ default:
+ assert(!"invalid queue event");
+ break;
+ }
+ q->cur++;
+ }
+}
+
+char nfaExecSheng64_B(const struct NFA *n, u64a offset, const u8 *buffer,
+ size_t length, NfaCallback cb, void *context) {
+ DEBUG_PRINTF("smallwrite Sheng64\n");
+ assert(n->type == SHENG_NFA_64);
+ const struct sheng64 *sh = getImplNfa(n);
+ u8 state = sh->anchored;
+ u8 can_die = sh->flags & SHENG_FLAG_CAN_DIE;
+ u8 single = sh->flags & SHENG_FLAG_SINGLE_REPORT;
+ u8 cached_accept_state = 0;
+ ReportID cached_accept_id = 0;
+
+ /* scan and report all matches */
+ int rv;
+ s64a end = length;
+ const u8 *scanned;
+
+ rv = runSheng64Cb(sh, cb, context, offset, &cached_accept_state,
+ &cached_accept_id, buffer, buffer, buffer + end, can_die,
+ single, &scanned, &state);
+ if (rv == MO_DEAD) {
+ DEBUG_PRINTF("exiting in state %u\n",
+ state & SHENG64_STATE_MASK);
+ return MO_DEAD;
+ }
+
+ DEBUG_PRINTF("%u\n", state & SHENG64_STATE_MASK);
+
+ const struct sstate_aux *aux = get_aux64(sh, state);
+
+ if (aux->accept_eod) {
+ DEBUG_PRINTF("Reporting EOD matches\n");
+ fireReports64(sh, cb, context, state, end + offset,
+ &cached_accept_state, &cached_accept_id, 1);
+ }
+
+ return state & SHENG64_STATE_DEAD ? MO_DEAD : MO_ALIVE;
+}
+
+char nfaExecSheng64_Q(const struct NFA *n, struct mq *q, s64a end) {
+ const struct sheng64 *sh = get_sheng64(n);
+ char rv = runSheng64(sh, q, end, CALLBACK_OUTPUT);
+ return rv;
+}
+
+char nfaExecSheng64_Q2(const struct NFA *n, struct mq *q, s64a end) {
+ const struct sheng64 *sh = get_sheng64(n);
+ char rv = runSheng64(sh, q, end, STOP_AT_MATCH);
+ return rv;
+}
+
+char nfaExecSheng64_QR(const struct NFA *n, struct mq *q, ReportID report) {
+ assert(q_cur_type(q) == MQE_START);
+
+ const struct sheng64 *sh = get_sheng64(n);
+ char rv = runSheng64(sh, q, 0 /* end */, NO_MATCHES);
+
+ if (rv && nfaExecSheng64_inAccept(n, report, q)) {
+ return MO_MATCHES_PENDING;
+ }
+ return rv;
+}
+
+char nfaExecSheng64_inAccept(const struct NFA *n, ReportID report,
+ struct mq *q) {
+ assert(n && q);
+
+ const struct sheng64 *sh = get_sheng64(n);
+ u8 s = *(const u8 *)q->state;
+ DEBUG_PRINTF("checking accepts for %u\n", (u8)(s & SHENG64_STATE_MASK));
+
+ const struct sstate_aux *aux = get_aux64(sh, s);
+
+ if (!aux->accept) {
+ return 0;
+ }
+
+ return sheng64HasAccept(sh, aux, report);
+}
+
+char nfaExecSheng64_inAnyAccept(const struct NFA *n, struct mq *q) {
+ assert(n && q);
+
+ const struct sheng64 *sh = get_sheng64(n);
+ u8 s = *(const u8 *)q->state;
+ DEBUG_PRINTF("checking accepts for %u\n", (u8)(s & SHENG64_STATE_MASK));
+
+ const struct sstate_aux *aux = get_aux64(sh, s);
+ return !!aux->accept;
+}
+
+char nfaExecSheng64_testEOD(const struct NFA *nfa, const char *state,
+ UNUSED const char *streamState, u64a offset,
+ NfaCallback cb, void *ctxt) {
+ assert(nfa);
+
+ const struct sheng64 *sh = get_sheng64(nfa);
+ u8 s = *(const u8 *)state;
+ DEBUG_PRINTF("checking EOD accepts for %u\n", (u8)(s & SHENG64_STATE_MASK));
+
+ const struct sstate_aux *aux = get_aux64(sh, s);
+
+ if (!aux->accept_eod) {
+ return MO_CONTINUE_MATCHING;
+ }
+
+ return fireReports64(sh, cb, ctxt, s, offset, NULL, NULL, 1);
+}
+
+char nfaExecSheng64_reportCurrent(const struct NFA *n, struct mq *q) {
+ const struct sheng64 *sh = (const struct sheng64 *)getImplNfa(n);
+ NfaCallback cb = q->cb;
+ void *ctxt = q->context;
+ u8 s = *(u8 *)q->state;
+ const struct sstate_aux *aux = get_aux64(sh, s);
+ u64a offset = q_cur_offset(q);
+ u8 cached_state_id = 0;
+ ReportID cached_report_id = 0;
+ assert(q_cur_type(q) == MQE_START);
+
+ if (aux->accept) {
+ if (sh->flags & SHENG_FLAG_SINGLE_REPORT) {
+ fireSingleReport(cb, ctxt, sh->report, offset);
+ } else {
+ fireReports64(sh, cb, ctxt, s, offset, &cached_state_id,
+ &cached_report_id, 0);
+ }
+ }
+
+ return 0;
+}
+
+char nfaExecSheng64_initCompressedState(const struct NFA *nfa, u64a offset,
+ void *state, UNUSED u8 key) {
+ const struct sheng64 *sh = get_sheng64(nfa);
+ u8 *s = (u8 *)state;
+ *s = offset ? sh->floating: sh->anchored;
+ return !(*s & SHENG64_STATE_DEAD);
+}
+
+char nfaExecSheng64_queueInitState(const struct NFA *nfa, struct mq *q) {
+ assert(nfa->scratchStateSize == 1);
+
+ /* starting in floating state */
+ const struct sheng64 *sh = get_sheng64(nfa);
+ *(u8 *)q->state = sh->floating;
+ DEBUG_PRINTF("starting in floating state\n");
+ return 0;
+}
+
+char nfaExecSheng64_queueCompressState(UNUSED const struct NFA *nfa,
+ const struct mq *q, UNUSED s64a loc) {
+ void *dest = q->streamState;
+ const void *src = q->state;
+ assert(nfa->scratchStateSize == 1);
+ assert(nfa->streamStateSize == 1);
+ *(u8 *)dest = *(const u8 *)src;
+ return 0;
+}
+
+char nfaExecSheng64_expandState(UNUSED const struct NFA *nfa, void *dest,
+ const void *src, UNUSED u64a offset,
+ UNUSED u8 key) {
+ assert(nfa->scratchStateSize == 1);
+ assert(nfa->streamStateSize == 1);
+ *(u8 *)dest = *(const u8 *)src;
+ return 0;
+}
+#endif // end of HAVE_AVX512VBMI
diff --git a/contrib/libs/hyperscan/src/nfa/sheng.h b/contrib/libs/hyperscan/src/nfa/sheng.h
index 6111c6dec5..7b90e3034f 100644
--- a/contrib/libs/hyperscan/src/nfa/sheng.h
+++ b/contrib/libs/hyperscan/src/nfa/sheng.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Intel Corporation
+ * Copyright (c) 2016-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -58,86 +58,86 @@ char nfaExecSheng_reportCurrent(const struct NFA *n, struct mq *q);
char nfaExecSheng_B(const struct NFA *n, u64a offset, const u8 *buffer,
size_t length, NfaCallback cb, void *context);
-#if defined(HAVE_AVX512VBMI)
-#define nfaExecSheng32_B_Reverse NFA_API_NO_IMPL
-#define nfaExecSheng32_zombie_status NFA_API_ZOMBIE_NO_IMPL
-
-char nfaExecSheng32_Q(const struct NFA *n, struct mq *q, s64a end);
-char nfaExecSheng32_Q2(const struct NFA *n, struct mq *q, s64a end);
-char nfaExecSheng32_QR(const struct NFA *n, struct mq *q, ReportID report);
-char nfaExecSheng32_inAccept(const struct NFA *n, ReportID report,
- struct mq *q);
-char nfaExecSheng32_inAnyAccept(const struct NFA *n, struct mq *q);
-char nfaExecSheng32_queueInitState(const struct NFA *nfa, struct mq *q);
-char nfaExecSheng32_queueCompressState(const struct NFA *nfa,
- const struct mq *q, s64a loc);
-char nfaExecSheng32_expandState(const struct NFA *nfa, void *dest,
- const void *src, u64a offset, u8 key);
-char nfaExecSheng32_initCompressedState(const struct NFA *nfa, u64a offset,
- void *state, u8 key);
-char nfaExecSheng32_testEOD(const struct NFA *nfa, const char *state,
- const char *streamState, u64a offset,
- NfaCallback callback, void *context);
-char nfaExecSheng32_reportCurrent(const struct NFA *n, struct mq *q);
-
-char nfaExecSheng32_B(const struct NFA *n, u64a offset, const u8 *buffer,
- size_t length, NfaCallback cb, void *context);
-
-#define nfaExecSheng64_B_Reverse NFA_API_NO_IMPL
-#define nfaExecSheng64_zombie_status NFA_API_ZOMBIE_NO_IMPL
-
-char nfaExecSheng64_Q(const struct NFA *n, struct mq *q, s64a end);
-char nfaExecSheng64_Q2(const struct NFA *n, struct mq *q, s64a end);
-char nfaExecSheng64_QR(const struct NFA *n, struct mq *q, ReportID report);
-char nfaExecSheng64_inAccept(const struct NFA *n, ReportID report,
- struct mq *q);
-char nfaExecSheng64_inAnyAccept(const struct NFA *n, struct mq *q);
-char nfaExecSheng64_queueInitState(const struct NFA *nfa, struct mq *q);
-char nfaExecSheng64_queueCompressState(const struct NFA *nfa,
- const struct mq *q, s64a loc);
-char nfaExecSheng64_expandState(const struct NFA *nfa, void *dest,
- const void *src, u64a offset, u8 key);
-char nfaExecSheng64_initCompressedState(const struct NFA *nfa, u64a offset,
- void *state, u8 key);
-char nfaExecSheng64_testEOD(const struct NFA *nfa, const char *state,
- const char *streamState, u64a offset,
- NfaCallback callback, void *context);
-char nfaExecSheng64_reportCurrent(const struct NFA *n, struct mq *q);
-
-char nfaExecSheng64_B(const struct NFA *n, u64a offset, const u8 *buffer,
- size_t length, NfaCallback cb, void *context);
-
-#else // !HAVE_AVX512VBMI
-
-#define nfaExecSheng32_B_Reverse NFA_API_NO_IMPL
-#define nfaExecSheng32_zombie_status NFA_API_ZOMBIE_NO_IMPL
-#define nfaExecSheng32_Q NFA_API_NO_IMPL
-#define nfaExecSheng32_Q2 NFA_API_NO_IMPL
-#define nfaExecSheng32_QR NFA_API_NO_IMPL
-#define nfaExecSheng32_inAccept NFA_API_NO_IMPL
-#define nfaExecSheng32_inAnyAccept NFA_API_NO_IMPL
-#define nfaExecSheng32_queueInitState NFA_API_NO_IMPL
-#define nfaExecSheng32_queueCompressState NFA_API_NO_IMPL
-#define nfaExecSheng32_expandState NFA_API_NO_IMPL
-#define nfaExecSheng32_initCompressedState NFA_API_NO_IMPL
-#define nfaExecSheng32_testEOD NFA_API_NO_IMPL
-#define nfaExecSheng32_reportCurrent NFA_API_NO_IMPL
-#define nfaExecSheng32_B NFA_API_NO_IMPL
-
-#define nfaExecSheng64_B_Reverse NFA_API_NO_IMPL
-#define nfaExecSheng64_zombie_status NFA_API_ZOMBIE_NO_IMPL
-#define nfaExecSheng64_Q NFA_API_NO_IMPL
-#define nfaExecSheng64_Q2 NFA_API_NO_IMPL
-#define nfaExecSheng64_QR NFA_API_NO_IMPL
-#define nfaExecSheng64_inAccept NFA_API_NO_IMPL
-#define nfaExecSheng64_inAnyAccept NFA_API_NO_IMPL
-#define nfaExecSheng64_queueInitState NFA_API_NO_IMPL
-#define nfaExecSheng64_queueCompressState NFA_API_NO_IMPL
-#define nfaExecSheng64_expandState NFA_API_NO_IMPL
-#define nfaExecSheng64_initCompressedState NFA_API_NO_IMPL
-#define nfaExecSheng64_testEOD NFA_API_NO_IMPL
-#define nfaExecSheng64_reportCurrent NFA_API_NO_IMPL
-#define nfaExecSheng64_B NFA_API_NO_IMPL
-#endif // end of HAVE_AVX512VBMI
-
+#if defined(HAVE_AVX512VBMI)
+#define nfaExecSheng32_B_Reverse NFA_API_NO_IMPL
+#define nfaExecSheng32_zombie_status NFA_API_ZOMBIE_NO_IMPL
+
+char nfaExecSheng32_Q(const struct NFA *n, struct mq *q, s64a end);
+char nfaExecSheng32_Q2(const struct NFA *n, struct mq *q, s64a end);
+char nfaExecSheng32_QR(const struct NFA *n, struct mq *q, ReportID report);
+char nfaExecSheng32_inAccept(const struct NFA *n, ReportID report,
+ struct mq *q);
+char nfaExecSheng32_inAnyAccept(const struct NFA *n, struct mq *q);
+char nfaExecSheng32_queueInitState(const struct NFA *nfa, struct mq *q);
+char nfaExecSheng32_queueCompressState(const struct NFA *nfa,
+ const struct mq *q, s64a loc);
+char nfaExecSheng32_expandState(const struct NFA *nfa, void *dest,
+ const void *src, u64a offset, u8 key);
+char nfaExecSheng32_initCompressedState(const struct NFA *nfa, u64a offset,
+ void *state, u8 key);
+char nfaExecSheng32_testEOD(const struct NFA *nfa, const char *state,
+ const char *streamState, u64a offset,
+ NfaCallback callback, void *context);
+char nfaExecSheng32_reportCurrent(const struct NFA *n, struct mq *q);
+
+char nfaExecSheng32_B(const struct NFA *n, u64a offset, const u8 *buffer,
+ size_t length, NfaCallback cb, void *context);
+
+#define nfaExecSheng64_B_Reverse NFA_API_NO_IMPL
+#define nfaExecSheng64_zombie_status NFA_API_ZOMBIE_NO_IMPL
+
+char nfaExecSheng64_Q(const struct NFA *n, struct mq *q, s64a end);
+char nfaExecSheng64_Q2(const struct NFA *n, struct mq *q, s64a end);
+char nfaExecSheng64_QR(const struct NFA *n, struct mq *q, ReportID report);
+char nfaExecSheng64_inAccept(const struct NFA *n, ReportID report,
+ struct mq *q);
+char nfaExecSheng64_inAnyAccept(const struct NFA *n, struct mq *q);
+char nfaExecSheng64_queueInitState(const struct NFA *nfa, struct mq *q);
+char nfaExecSheng64_queueCompressState(const struct NFA *nfa,
+ const struct mq *q, s64a loc);
+char nfaExecSheng64_expandState(const struct NFA *nfa, void *dest,
+ const void *src, u64a offset, u8 key);
+char nfaExecSheng64_initCompressedState(const struct NFA *nfa, u64a offset,
+ void *state, u8 key);
+char nfaExecSheng64_testEOD(const struct NFA *nfa, const char *state,
+ const char *streamState, u64a offset,
+ NfaCallback callback, void *context);
+char nfaExecSheng64_reportCurrent(const struct NFA *n, struct mq *q);
+
+char nfaExecSheng64_B(const struct NFA *n, u64a offset, const u8 *buffer,
+ size_t length, NfaCallback cb, void *context);
+
+#else // !HAVE_AVX512VBMI
+
+#define nfaExecSheng32_B_Reverse NFA_API_NO_IMPL
+#define nfaExecSheng32_zombie_status NFA_API_ZOMBIE_NO_IMPL
+#define nfaExecSheng32_Q NFA_API_NO_IMPL
+#define nfaExecSheng32_Q2 NFA_API_NO_IMPL
+#define nfaExecSheng32_QR NFA_API_NO_IMPL
+#define nfaExecSheng32_inAccept NFA_API_NO_IMPL
+#define nfaExecSheng32_inAnyAccept NFA_API_NO_IMPL
+#define nfaExecSheng32_queueInitState NFA_API_NO_IMPL
+#define nfaExecSheng32_queueCompressState NFA_API_NO_IMPL
+#define nfaExecSheng32_expandState NFA_API_NO_IMPL
+#define nfaExecSheng32_initCompressedState NFA_API_NO_IMPL
+#define nfaExecSheng32_testEOD NFA_API_NO_IMPL
+#define nfaExecSheng32_reportCurrent NFA_API_NO_IMPL
+#define nfaExecSheng32_B NFA_API_NO_IMPL
+
+#define nfaExecSheng64_B_Reverse NFA_API_NO_IMPL
+#define nfaExecSheng64_zombie_status NFA_API_ZOMBIE_NO_IMPL
+#define nfaExecSheng64_Q NFA_API_NO_IMPL
+#define nfaExecSheng64_Q2 NFA_API_NO_IMPL
+#define nfaExecSheng64_QR NFA_API_NO_IMPL
+#define nfaExecSheng64_inAccept NFA_API_NO_IMPL
+#define nfaExecSheng64_inAnyAccept NFA_API_NO_IMPL
+#define nfaExecSheng64_queueInitState NFA_API_NO_IMPL
+#define nfaExecSheng64_queueCompressState NFA_API_NO_IMPL
+#define nfaExecSheng64_expandState NFA_API_NO_IMPL
+#define nfaExecSheng64_initCompressedState NFA_API_NO_IMPL
+#define nfaExecSheng64_testEOD NFA_API_NO_IMPL
+#define nfaExecSheng64_reportCurrent NFA_API_NO_IMPL
+#define nfaExecSheng64_B NFA_API_NO_IMPL
+#endif // end of HAVE_AVX512VBMI
+
#endif /* SHENG_H_ */
diff --git a/contrib/libs/hyperscan/src/nfa/sheng_defs.h b/contrib/libs/hyperscan/src/nfa/sheng_defs.h
index d14018829c..390af75221 100644
--- a/contrib/libs/hyperscan/src/nfa/sheng_defs.h
+++ b/contrib/libs/hyperscan/src/nfa/sheng_defs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Intel Corporation
+ * Copyright (c) 2016-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -52,43 +52,43 @@ u8 hasInterestingStates(const u8 a, const u8 b, const u8 c, const u8 d) {
return (a | b | c | d) & (SHENG_STATE_FLAG_MASK);
}
-#if defined(HAVE_AVX512VBMI)
-static really_inline
-u8 isDeadState32(const u8 a) {
- return a & SHENG32_STATE_DEAD;
-}
-
-static really_inline
-u8 isAcceptState32(const u8 a) {
- return a & SHENG32_STATE_ACCEPT;
-}
-
-static really_inline
-u8 isAccelState32(const u8 a) {
- return a & SHENG32_STATE_ACCEL;
-}
-
-static really_inline
-u8 hasInterestingStates32(const u8 a, const u8 b, const u8 c, const u8 d) {
- return (a | b | c | d) & (SHENG32_STATE_FLAG_MASK);
-}
-
-static really_inline
-u8 isDeadState64(const u8 a) {
- return a & SHENG64_STATE_DEAD;
-}
-
-static really_inline
-u8 isAcceptState64(const u8 a) {
- return a & SHENG64_STATE_ACCEPT;
-}
-
-static really_inline
-u8 hasInterestingStates64(const u8 a, const u8 b, const u8 c, const u8 d) {
- return (a | b | c | d) & (SHENG64_STATE_FLAG_MASK);
-}
-#endif
-
+#if defined(HAVE_AVX512VBMI)
+static really_inline
+u8 isDeadState32(const u8 a) {
+ return a & SHENG32_STATE_DEAD;
+}
+
+static really_inline
+u8 isAcceptState32(const u8 a) {
+ return a & SHENG32_STATE_ACCEPT;
+}
+
+static really_inline
+u8 isAccelState32(const u8 a) {
+ return a & SHENG32_STATE_ACCEL;
+}
+
+static really_inline
+u8 hasInterestingStates32(const u8 a, const u8 b, const u8 c, const u8 d) {
+ return (a | b | c | d) & (SHENG32_STATE_FLAG_MASK);
+}
+
+static really_inline
+u8 isDeadState64(const u8 a) {
+ return a & SHENG64_STATE_DEAD;
+}
+
+static really_inline
+u8 isAcceptState64(const u8 a) {
+ return a & SHENG64_STATE_ACCEPT;
+}
+
+static really_inline
+u8 hasInterestingStates64(const u8 a, const u8 b, const u8 c, const u8 d) {
+ return (a | b | c | d) & (SHENG64_STATE_FLAG_MASK);
+}
+#endif
+
/* these functions should be optimized out, used by NO_MATCHES mode */
static really_inline
u8 dummyFunc4(UNUSED const u8 a, UNUSED const u8 b, UNUSED const u8 c,
@@ -108,162 +108,162 @@ u8 dummyFunc(UNUSED const u8 a) {
#define SHENG_IMPL sheng_cod
#define DEAD_FUNC isDeadState
#define ACCEPT_FUNC isAcceptState
-#if defined(HAVE_AVX512VBMI)
-#define SHENG32_IMPL sheng32_cod
-#define DEAD_FUNC32 isDeadState32
-#define ACCEPT_FUNC32 isAcceptState32
-#define SHENG64_IMPL sheng64_cod
-#define DEAD_FUNC64 isDeadState64
-#define ACCEPT_FUNC64 isAcceptState64
-#endif
+#if defined(HAVE_AVX512VBMI)
+#define SHENG32_IMPL sheng32_cod
+#define DEAD_FUNC32 isDeadState32
+#define ACCEPT_FUNC32 isAcceptState32
+#define SHENG64_IMPL sheng64_cod
+#define DEAD_FUNC64 isDeadState64
+#define ACCEPT_FUNC64 isAcceptState64
+#endif
#define STOP_AT_MATCH 0
#include "sheng_impl.h"
#undef SHENG_IMPL
#undef DEAD_FUNC
#undef ACCEPT_FUNC
-#if defined(HAVE_AVX512VBMI)
-#undef SHENG32_IMPL
-#undef DEAD_FUNC32
-#undef ACCEPT_FUNC32
-#undef SHENG64_IMPL
-#undef DEAD_FUNC64
-#undef ACCEPT_FUNC64
-#endif
+#if defined(HAVE_AVX512VBMI)
+#undef SHENG32_IMPL
+#undef DEAD_FUNC32
+#undef ACCEPT_FUNC32
+#undef SHENG64_IMPL
+#undef DEAD_FUNC64
+#undef ACCEPT_FUNC64
+#endif
#undef STOP_AT_MATCH
/* callback output, can't die */
#define SHENG_IMPL sheng_co
#define DEAD_FUNC dummyFunc
#define ACCEPT_FUNC isAcceptState
-#if defined(HAVE_AVX512VBMI)
-#define SHENG32_IMPL sheng32_co
-#define DEAD_FUNC32 dummyFunc
-#define ACCEPT_FUNC32 isAcceptState32
-#define SHENG64_IMPL sheng64_co
-#define DEAD_FUNC64 dummyFunc
-#define ACCEPT_FUNC64 isAcceptState64
-#endif
+#if defined(HAVE_AVX512VBMI)
+#define SHENG32_IMPL sheng32_co
+#define DEAD_FUNC32 dummyFunc
+#define ACCEPT_FUNC32 isAcceptState32
+#define SHENG64_IMPL sheng64_co
+#define DEAD_FUNC64 dummyFunc
+#define ACCEPT_FUNC64 isAcceptState64
+#endif
#define STOP_AT_MATCH 0
#include "sheng_impl.h"
#undef SHENG_IMPL
#undef DEAD_FUNC
#undef ACCEPT_FUNC
-#if defined(HAVE_AVX512VBMI)
-#undef SHENG32_IMPL
-#undef DEAD_FUNC32
-#undef ACCEPT_FUNC32
-#undef SHENG64_IMPL
-#undef DEAD_FUNC64
-#undef ACCEPT_FUNC64
-#endif
+#if defined(HAVE_AVX512VBMI)
+#undef SHENG32_IMPL
+#undef DEAD_FUNC32
+#undef ACCEPT_FUNC32
+#undef SHENG64_IMPL
+#undef DEAD_FUNC64
+#undef ACCEPT_FUNC64
+#endif
#undef STOP_AT_MATCH
/* stop at match, can die */
#define SHENG_IMPL sheng_samd
#define DEAD_FUNC isDeadState
#define ACCEPT_FUNC isAcceptState
-#if defined(HAVE_AVX512VBMI)
-#define SHENG32_IMPL sheng32_samd
-#define DEAD_FUNC32 isDeadState32
-#define ACCEPT_FUNC32 isAcceptState32
-#define SHENG64_IMPL sheng64_samd
-#define DEAD_FUNC64 isDeadState64
-#define ACCEPT_FUNC64 isAcceptState64
-#endif
+#if defined(HAVE_AVX512VBMI)
+#define SHENG32_IMPL sheng32_samd
+#define DEAD_FUNC32 isDeadState32
+#define ACCEPT_FUNC32 isAcceptState32
+#define SHENG64_IMPL sheng64_samd
+#define DEAD_FUNC64 isDeadState64
+#define ACCEPT_FUNC64 isAcceptState64
+#endif
#define STOP_AT_MATCH 1
#include "sheng_impl.h"
#undef SHENG_IMPL
#undef DEAD_FUNC
#undef ACCEPT_FUNC
-#if defined(HAVE_AVX512VBMI)
-#undef SHENG32_IMPL
-#undef DEAD_FUNC32
-#undef ACCEPT_FUNC32
-#undef SHENG64_IMPL
-#undef DEAD_FUNC64
-#undef ACCEPT_FUNC64
-#endif
+#if defined(HAVE_AVX512VBMI)
+#undef SHENG32_IMPL
+#undef DEAD_FUNC32
+#undef ACCEPT_FUNC32
+#undef SHENG64_IMPL
+#undef DEAD_FUNC64
+#undef ACCEPT_FUNC64
+#endif
#undef STOP_AT_MATCH
/* stop at match, can't die */
#define SHENG_IMPL sheng_sam
#define DEAD_FUNC dummyFunc
#define ACCEPT_FUNC isAcceptState
-#if defined(HAVE_AVX512VBMI)
-#define SHENG32_IMPL sheng32_sam
-#define DEAD_FUNC32 dummyFunc
-#define ACCEPT_FUNC32 isAcceptState32
-#define SHENG64_IMPL sheng64_sam
-#define DEAD_FUNC64 dummyFunc
-#define ACCEPT_FUNC64 isAcceptState64
-#endif
+#if defined(HAVE_AVX512VBMI)
+#define SHENG32_IMPL sheng32_sam
+#define DEAD_FUNC32 dummyFunc
+#define ACCEPT_FUNC32 isAcceptState32
+#define SHENG64_IMPL sheng64_sam
+#define DEAD_FUNC64 dummyFunc
+#define ACCEPT_FUNC64 isAcceptState64
+#endif
#define STOP_AT_MATCH 1
#include "sheng_impl.h"
#undef SHENG_IMPL
#undef DEAD_FUNC
#undef ACCEPT_FUNC
-#if defined(HAVE_AVX512VBMI)
-#undef SHENG32_IMPL
-#undef DEAD_FUNC32
-#undef ACCEPT_FUNC32
-#undef SHENG64_IMPL
-#undef DEAD_FUNC64
-#undef ACCEPT_FUNC64
-#endif
+#if defined(HAVE_AVX512VBMI)
+#undef SHENG32_IMPL
+#undef DEAD_FUNC32
+#undef ACCEPT_FUNC32
+#undef SHENG64_IMPL
+#undef DEAD_FUNC64
+#undef ACCEPT_FUNC64
+#endif
#undef STOP_AT_MATCH
/* no match, can die */
#define SHENG_IMPL sheng_nmd
#define DEAD_FUNC isDeadState
#define ACCEPT_FUNC dummyFunc
-#if defined(HAVE_AVX512VBMI)
-#define SHENG32_IMPL sheng32_nmd
-#define DEAD_FUNC32 isDeadState32
-#define ACCEPT_FUNC32 dummyFunc
-#define SHENG64_IMPL sheng64_nmd
-#define DEAD_FUNC64 isDeadState64
-#define ACCEPT_FUNC64 dummyFunc
-#endif
+#if defined(HAVE_AVX512VBMI)
+#define SHENG32_IMPL sheng32_nmd
+#define DEAD_FUNC32 isDeadState32
+#define ACCEPT_FUNC32 dummyFunc
+#define SHENG64_IMPL sheng64_nmd
+#define DEAD_FUNC64 isDeadState64
+#define ACCEPT_FUNC64 dummyFunc
+#endif
#define STOP_AT_MATCH 0
#include "sheng_impl.h"
#undef SHENG_IMPL
#undef DEAD_FUNC
#undef ACCEPT_FUNC
-#if defined(HAVE_AVX512VBMI)
-#undef SHENG32_IMPL
-#undef DEAD_FUNC32
-#undef ACCEPT_FUNC32
-#undef SHENG64_IMPL
-#undef DEAD_FUNC64
-#undef ACCEPT_FUNC64
-#endif
+#if defined(HAVE_AVX512VBMI)
+#undef SHENG32_IMPL
+#undef DEAD_FUNC32
+#undef ACCEPT_FUNC32
+#undef SHENG64_IMPL
+#undef DEAD_FUNC64
+#undef ACCEPT_FUNC64
+#endif
#undef STOP_AT_MATCH
/* no match, can't die */
#define SHENG_IMPL sheng_nm
#define DEAD_FUNC dummyFunc
#define ACCEPT_FUNC dummyFunc
-#if defined(HAVE_AVX512VBMI)
-#define SHENG32_IMPL sheng32_nm
-#define DEAD_FUNC32 dummyFunc
-#define ACCEPT_FUNC32 dummyFunc
-#define SHENG64_IMPL sheng64_nm
-#define DEAD_FUNC64 dummyFunc
-#define ACCEPT_FUNC64 dummyFunc
-#endif
+#if defined(HAVE_AVX512VBMI)
+#define SHENG32_IMPL sheng32_nm
+#define DEAD_FUNC32 dummyFunc
+#define ACCEPT_FUNC32 dummyFunc
+#define SHENG64_IMPL sheng64_nm
+#define DEAD_FUNC64 dummyFunc
+#define ACCEPT_FUNC64 dummyFunc
+#endif
#define STOP_AT_MATCH 0
#include "sheng_impl.h"
#undef SHENG_IMPL
#undef DEAD_FUNC
#undef ACCEPT_FUNC
-#if defined(HAVE_AVX512VBMI)
-#undef SHENG32_IMPL
-#undef DEAD_FUNC32
-#undef ACCEPT_FUNC32
-#undef SHENG64_IMPL
-#undef DEAD_FUNC64
-#undef ACCEPT_FUNC64
-#endif
+#if defined(HAVE_AVX512VBMI)
+#undef SHENG32_IMPL
+#undef DEAD_FUNC32
+#undef ACCEPT_FUNC32
+#undef SHENG64_IMPL
+#undef DEAD_FUNC64
+#undef ACCEPT_FUNC64
+#endif
#undef STOP_AT_MATCH
/*
@@ -277,16 +277,16 @@ u8 dummyFunc(UNUSED const u8 a) {
#define INNER_ACCEL_FUNC isAccelState
#define OUTER_ACCEL_FUNC dummyFunc
#define ACCEPT_FUNC isAcceptState
-#if defined(HAVE_AVX512VBMI)
-#define SHENG32_IMPL sheng32_4_coda
-#define INTERESTING_FUNC32 hasInterestingStates32
-#define INNER_DEAD_FUNC32 isDeadState32
-#define OUTER_DEAD_FUNC32 dummyFunc
-#define INNER_ACCEL_FUNC32 isAccelState32
-#define OUTER_ACCEL_FUNC32 dummyFunc
-#define ACCEPT_FUNC32 isAcceptState32
-#define NO_SHENG64_IMPL
-#endif
+#if defined(HAVE_AVX512VBMI)
+#define SHENG32_IMPL sheng32_4_coda
+#define INTERESTING_FUNC32 hasInterestingStates32
+#define INNER_DEAD_FUNC32 isDeadState32
+#define OUTER_DEAD_FUNC32 dummyFunc
+#define INNER_ACCEL_FUNC32 isAccelState32
+#define OUTER_ACCEL_FUNC32 dummyFunc
+#define ACCEPT_FUNC32 isAcceptState32
+#define NO_SHENG64_IMPL
+#endif
#define STOP_AT_MATCH 0
#include "sheng_impl4.h"
#undef SHENG_IMPL
@@ -296,16 +296,16 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef INNER_ACCEL_FUNC
#undef OUTER_ACCEL_FUNC
#undef ACCEPT_FUNC
-#if defined(HAVE_AVX512VBMI)
-#undef SHENG32_IMPL
-#undef INTERESTING_FUNC32
-#undef INNER_DEAD_FUNC32
-#undef OUTER_DEAD_FUNC32
-#undef INNER_ACCEL_FUNC32
-#undef OUTER_ACCEL_FUNC32
-#undef ACCEPT_FUNC32
-#undef NO_SHENG64_IMPL
-#endif
+#if defined(HAVE_AVX512VBMI)
+#undef SHENG32_IMPL
+#undef INTERESTING_FUNC32
+#undef INNER_DEAD_FUNC32
+#undef OUTER_DEAD_FUNC32
+#undef INNER_ACCEL_FUNC32
+#undef OUTER_ACCEL_FUNC32
+#undef ACCEPT_FUNC32
+#undef NO_SHENG64_IMPL
+#endif
#undef STOP_AT_MATCH
/* callback output, can die, not accelerated */
@@ -316,20 +316,20 @@ u8 dummyFunc(UNUSED const u8 a) {
#define INNER_ACCEL_FUNC dummyFunc
#define OUTER_ACCEL_FUNC dummyFunc
#define ACCEPT_FUNC isAcceptState
-#if defined(HAVE_AVX512VBMI)
-#define SHENG32_IMPL sheng32_4_cod
-#define INTERESTING_FUNC32 hasInterestingStates32
-#define INNER_DEAD_FUNC32 isDeadState32
-#define OUTER_DEAD_FUNC32 dummyFunc
-#define INNER_ACCEL_FUNC32 dummyFunc
-#define OUTER_ACCEL_FUNC32 dummyFunc
-#define ACCEPT_FUNC32 isAcceptState32
-#define SHENG64_IMPL sheng64_4_cod
-#define INTERESTING_FUNC64 hasInterestingStates64
-#define INNER_DEAD_FUNC64 isDeadState64
-#define OUTER_DEAD_FUNC64 dummyFunc
-#define ACCEPT_FUNC64 isAcceptState64
-#endif
+#if defined(HAVE_AVX512VBMI)
+#define SHENG32_IMPL sheng32_4_cod
+#define INTERESTING_FUNC32 hasInterestingStates32
+#define INNER_DEAD_FUNC32 isDeadState32
+#define OUTER_DEAD_FUNC32 dummyFunc
+#define INNER_ACCEL_FUNC32 dummyFunc
+#define OUTER_ACCEL_FUNC32 dummyFunc
+#define ACCEPT_FUNC32 isAcceptState32
+#define SHENG64_IMPL sheng64_4_cod
+#define INTERESTING_FUNC64 hasInterestingStates64
+#define INNER_DEAD_FUNC64 isDeadState64
+#define OUTER_DEAD_FUNC64 dummyFunc
+#define ACCEPT_FUNC64 isAcceptState64
+#endif
#define STOP_AT_MATCH 0
#include "sheng_impl4.h"
#undef SHENG_IMPL
@@ -339,20 +339,20 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef INNER_ACCEL_FUNC
#undef OUTER_ACCEL_FUNC
#undef ACCEPT_FUNC
-#if defined(HAVE_AVX512VBMI)
-#undef SHENG32_IMPL
-#undef INTERESTING_FUNC32
-#undef INNER_DEAD_FUNC32
-#undef OUTER_DEAD_FUNC32
-#undef INNER_ACCEL_FUNC32
-#undef OUTER_ACCEL_FUNC32
-#undef ACCEPT_FUNC32
-#undef SHENG64_IMPL
-#undef INTERESTING_FUNC64
-#undef INNER_DEAD_FUNC64
-#undef OUTER_DEAD_FUNC64
-#undef ACCEPT_FUNC64
-#endif
+#if defined(HAVE_AVX512VBMI)
+#undef SHENG32_IMPL
+#undef INTERESTING_FUNC32
+#undef INNER_DEAD_FUNC32
+#undef OUTER_DEAD_FUNC32
+#undef INNER_ACCEL_FUNC32
+#undef OUTER_ACCEL_FUNC32
+#undef ACCEPT_FUNC32
+#undef SHENG64_IMPL
+#undef INTERESTING_FUNC64
+#undef INNER_DEAD_FUNC64
+#undef OUTER_DEAD_FUNC64
+#undef ACCEPT_FUNC64
+#endif
#undef STOP_AT_MATCH
/* callback output, can't die, accelerated */
@@ -363,16 +363,16 @@ u8 dummyFunc(UNUSED const u8 a) {
#define INNER_ACCEL_FUNC isAccelState
#define OUTER_ACCEL_FUNC dummyFunc
#define ACCEPT_FUNC isAcceptState
-#if defined(HAVE_AVX512VBMI)
-#define SHENG32_IMPL sheng32_4_coa
-#define INTERESTING_FUNC32 hasInterestingStates32
-#define INNER_DEAD_FUNC32 dummyFunc
-#define OUTER_DEAD_FUNC32 dummyFunc
-#define INNER_ACCEL_FUNC32 isAccelState32
-#define OUTER_ACCEL_FUNC32 dummyFunc
-#define ACCEPT_FUNC32 isAcceptState32
-#define NO_SHENG64_IMPL
-#endif
+#if defined(HAVE_AVX512VBMI)
+#define SHENG32_IMPL sheng32_4_coa
+#define INTERESTING_FUNC32 hasInterestingStates32
+#define INNER_DEAD_FUNC32 dummyFunc
+#define OUTER_DEAD_FUNC32 dummyFunc
+#define INNER_ACCEL_FUNC32 isAccelState32
+#define OUTER_ACCEL_FUNC32 dummyFunc
+#define ACCEPT_FUNC32 isAcceptState32
+#define NO_SHENG64_IMPL
+#endif
#define STOP_AT_MATCH 0
#include "sheng_impl4.h"
#undef SHENG_IMPL
@@ -382,16 +382,16 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef INNER_ACCEL_FUNC
#undef OUTER_ACCEL_FUNC
#undef ACCEPT_FUNC
-#if defined(HAVE_AVX512VBMI)
-#undef SHENG32_IMPL
-#undef INTERESTING_FUNC32
-#undef INNER_DEAD_FUNC32
-#undef OUTER_DEAD_FUNC32
-#undef INNER_ACCEL_FUNC32
-#undef OUTER_ACCEL_FUNC32
-#undef ACCEPT_FUNC32
-#undef NO_SHENG64_IMPL
-#endif
+#if defined(HAVE_AVX512VBMI)
+#undef SHENG32_IMPL
+#undef INTERESTING_FUNC32
+#undef INNER_DEAD_FUNC32
+#undef OUTER_DEAD_FUNC32
+#undef INNER_ACCEL_FUNC32
+#undef OUTER_ACCEL_FUNC32
+#undef ACCEPT_FUNC32
+#undef NO_SHENG64_IMPL
+#endif
#undef STOP_AT_MATCH
/* callback output, can't die, not accelerated */
@@ -402,20 +402,20 @@ u8 dummyFunc(UNUSED const u8 a) {
#define INNER_ACCEL_FUNC dummyFunc
#define OUTER_ACCEL_FUNC dummyFunc
#define ACCEPT_FUNC isAcceptState
-#if defined(HAVE_AVX512VBMI)
-#define SHENG32_IMPL sheng32_4_co
-#define INTERESTING_FUNC32 hasInterestingStates32
-#define INNER_DEAD_FUNC32 dummyFunc
-#define OUTER_DEAD_FUNC32 dummyFunc
-#define INNER_ACCEL_FUNC32 dummyFunc
-#define OUTER_ACCEL_FUNC32 dummyFunc
-#define ACCEPT_FUNC32 isAcceptState32
-#define SHENG64_IMPL sheng64_4_co
-#define INTERESTING_FUNC64 hasInterestingStates64
-#define INNER_DEAD_FUNC64 dummyFunc
-#define OUTER_DEAD_FUNC64 dummyFunc
-#define ACCEPT_FUNC64 isAcceptState64
-#endif
+#if defined(HAVE_AVX512VBMI)
+#define SHENG32_IMPL sheng32_4_co
+#define INTERESTING_FUNC32 hasInterestingStates32
+#define INNER_DEAD_FUNC32 dummyFunc
+#define OUTER_DEAD_FUNC32 dummyFunc
+#define INNER_ACCEL_FUNC32 dummyFunc
+#define OUTER_ACCEL_FUNC32 dummyFunc
+#define ACCEPT_FUNC32 isAcceptState32
+#define SHENG64_IMPL sheng64_4_co
+#define INTERESTING_FUNC64 hasInterestingStates64
+#define INNER_DEAD_FUNC64 dummyFunc
+#define OUTER_DEAD_FUNC64 dummyFunc
+#define ACCEPT_FUNC64 isAcceptState64
+#endif
#define STOP_AT_MATCH 0
#include "sheng_impl4.h"
#undef SHENG_IMPL
@@ -425,20 +425,20 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef INNER_ACCEL_FUNC
#undef OUTER_ACCEL_FUNC
#undef ACCEPT_FUNC
-#if defined(HAVE_AVX512VBMI)
-#undef SHENG32_IMPL
-#undef INTERESTING_FUNC32
-#undef INNER_DEAD_FUNC32
-#undef OUTER_DEAD_FUNC32
-#undef INNER_ACCEL_FUNC32
-#undef OUTER_ACCEL_FUNC32
-#undef ACCEPT_FUNC32
-#undef SHENG64_IMPL
-#undef INTERESTING_FUNC64
-#undef INNER_DEAD_FUNC64
-#undef OUTER_DEAD_FUNC64
-#undef ACCEPT_FUNC64
-#endif
+#if defined(HAVE_AVX512VBMI)
+#undef SHENG32_IMPL
+#undef INTERESTING_FUNC32
+#undef INNER_DEAD_FUNC32
+#undef OUTER_DEAD_FUNC32
+#undef INNER_ACCEL_FUNC32
+#undef OUTER_ACCEL_FUNC32
+#undef ACCEPT_FUNC32
+#undef SHENG64_IMPL
+#undef INTERESTING_FUNC64
+#undef INNER_DEAD_FUNC64
+#undef OUTER_DEAD_FUNC64
+#undef ACCEPT_FUNC64
+#endif
#undef STOP_AT_MATCH
/* stop at match, can die, accelerated */
@@ -449,16 +449,16 @@ u8 dummyFunc(UNUSED const u8 a) {
#define INNER_ACCEL_FUNC isAccelState
#define OUTER_ACCEL_FUNC dummyFunc
#define ACCEPT_FUNC isAcceptState
-#if defined(HAVE_AVX512VBMI)
-#define SHENG32_IMPL sheng32_4_samda
-#define INTERESTING_FUNC32 hasInterestingStates32
-#define INNER_DEAD_FUNC32 isDeadState32
-#define OUTER_DEAD_FUNC32 dummyFunc
-#define INNER_ACCEL_FUNC32 isAccelState32
-#define OUTER_ACCEL_FUNC32 dummyFunc
-#define ACCEPT_FUNC32 isAcceptState32
-#define NO_SHENG64_IMPL
-#endif
+#if defined(HAVE_AVX512VBMI)
+#define SHENG32_IMPL sheng32_4_samda
+#define INTERESTING_FUNC32 hasInterestingStates32
+#define INNER_DEAD_FUNC32 isDeadState32
+#define OUTER_DEAD_FUNC32 dummyFunc
+#define INNER_ACCEL_FUNC32 isAccelState32
+#define OUTER_ACCEL_FUNC32 dummyFunc
+#define ACCEPT_FUNC32 isAcceptState32
+#define NO_SHENG64_IMPL
+#endif
#define STOP_AT_MATCH 1
#include "sheng_impl4.h"
#undef SHENG_IMPL
@@ -468,16 +468,16 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef INNER_ACCEL_FUNC
#undef OUTER_ACCEL_FUNC
#undef ACCEPT_FUNC
-#if defined(HAVE_AVX512VBMI)
-#undef SHENG32_IMPL
-#undef INTERESTING_FUNC32
-#undef INNER_DEAD_FUNC32
-#undef OUTER_DEAD_FUNC32
-#undef INNER_ACCEL_FUNC32
-#undef OUTER_ACCEL_FUNC32
-#undef ACCEPT_FUNC32
-#undef NO_SHENG64_IMPL
-#endif
+#if defined(HAVE_AVX512VBMI)
+#undef SHENG32_IMPL
+#undef INTERESTING_FUNC32
+#undef INNER_DEAD_FUNC32
+#undef OUTER_DEAD_FUNC32
+#undef INNER_ACCEL_FUNC32
+#undef OUTER_ACCEL_FUNC32
+#undef ACCEPT_FUNC32
+#undef NO_SHENG64_IMPL
+#endif
#undef STOP_AT_MATCH
/* stop at match, can die, not accelerated */
@@ -488,20 +488,20 @@ u8 dummyFunc(UNUSED const u8 a) {
#define INNER_ACCEL_FUNC dummyFunc
#define OUTER_ACCEL_FUNC dummyFunc
#define ACCEPT_FUNC isAcceptState
-#if defined(HAVE_AVX512VBMI)
-#define SHENG32_IMPL sheng32_4_samd
-#define INTERESTING_FUNC32 hasInterestingStates32
-#define INNER_DEAD_FUNC32 isDeadState32
-#define OUTER_DEAD_FUNC32 dummyFunc
-#define INNER_ACCEL_FUNC32 dummyFunc
-#define OUTER_ACCEL_FUNC32 dummyFunc
-#define ACCEPT_FUNC32 isAcceptState32
-#define SHENG64_IMPL sheng64_4_samd
-#define INTERESTING_FUNC64 hasInterestingStates64
-#define INNER_DEAD_FUNC64 isDeadState64
-#define OUTER_DEAD_FUNC64 dummyFunc
-#define ACCEPT_FUNC64 isAcceptState64
-#endif
+#if defined(HAVE_AVX512VBMI)
+#define SHENG32_IMPL sheng32_4_samd
+#define INTERESTING_FUNC32 hasInterestingStates32
+#define INNER_DEAD_FUNC32 isDeadState32
+#define OUTER_DEAD_FUNC32 dummyFunc
+#define INNER_ACCEL_FUNC32 dummyFunc
+#define OUTER_ACCEL_FUNC32 dummyFunc
+#define ACCEPT_FUNC32 isAcceptState32
+#define SHENG64_IMPL sheng64_4_samd
+#define INTERESTING_FUNC64 hasInterestingStates64
+#define INNER_DEAD_FUNC64 isDeadState64
+#define OUTER_DEAD_FUNC64 dummyFunc
+#define ACCEPT_FUNC64 isAcceptState64
+#endif
#define STOP_AT_MATCH 1
#include "sheng_impl4.h"
#undef SHENG_IMPL
@@ -511,20 +511,20 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef INNER_ACCEL_FUNC
#undef OUTER_ACCEL_FUNC
#undef ACCEPT_FUNC
-#if defined(HAVE_AVX512VBMI)
-#undef SHENG32_IMPL
-#undef INTERESTING_FUNC32
-#undef INNER_DEAD_FUNC32
-#undef OUTER_DEAD_FUNC32
-#undef INNER_ACCEL_FUNC32
-#undef OUTER_ACCEL_FUNC32
-#undef ACCEPT_FUNC32
-#undef SHENG64_IMPL
-#undef INTERESTING_FUNC64
-#undef INNER_DEAD_FUNC64
-#undef OUTER_DEAD_FUNC64
-#undef ACCEPT_FUNC64
-#endif
+#if defined(HAVE_AVX512VBMI)
+#undef SHENG32_IMPL
+#undef INTERESTING_FUNC32
+#undef INNER_DEAD_FUNC32
+#undef OUTER_DEAD_FUNC32
+#undef INNER_ACCEL_FUNC32
+#undef OUTER_ACCEL_FUNC32
+#undef ACCEPT_FUNC32
+#undef SHENG64_IMPL
+#undef INTERESTING_FUNC64
+#undef INNER_DEAD_FUNC64
+#undef OUTER_DEAD_FUNC64
+#undef ACCEPT_FUNC64
+#endif
#undef STOP_AT_MATCH
/* stop at match, can't die, accelerated */
@@ -535,16 +535,16 @@ u8 dummyFunc(UNUSED const u8 a) {
#define INNER_ACCEL_FUNC isAccelState
#define OUTER_ACCEL_FUNC dummyFunc
#define ACCEPT_FUNC isAcceptState
-#if defined(HAVE_AVX512VBMI)
-#define SHENG32_IMPL sheng32_4_sama
-#define INTERESTING_FUNC32 hasInterestingStates32
-#define INNER_DEAD_FUNC32 dummyFunc
-#define OUTER_DEAD_FUNC32 dummyFunc
-#define INNER_ACCEL_FUNC32 isAccelState32
-#define OUTER_ACCEL_FUNC32 dummyFunc
-#define ACCEPT_FUNC32 isAcceptState32
-#define NO_SHENG64_IMPL
-#endif
+#if defined(HAVE_AVX512VBMI)
+#define SHENG32_IMPL sheng32_4_sama
+#define INTERESTING_FUNC32 hasInterestingStates32
+#define INNER_DEAD_FUNC32 dummyFunc
+#define OUTER_DEAD_FUNC32 dummyFunc
+#define INNER_ACCEL_FUNC32 isAccelState32
+#define OUTER_ACCEL_FUNC32 dummyFunc
+#define ACCEPT_FUNC32 isAcceptState32
+#define NO_SHENG64_IMPL
+#endif
#define STOP_AT_MATCH 1
#include "sheng_impl4.h"
#undef SHENG_IMPL
@@ -554,16 +554,16 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef INNER_ACCEL_FUNC
#undef OUTER_ACCEL_FUNC
#undef ACCEPT_FUNC
-#if defined(HAVE_AVX512VBMI)
-#undef SHENG32_IMPL
-#undef INTERESTING_FUNC32
-#undef INNER_DEAD_FUNC32
-#undef OUTER_DEAD_FUNC32
-#undef INNER_ACCEL_FUNC32
-#undef OUTER_ACCEL_FUNC32
-#undef ACCEPT_FUNC32
-#undef NO_SHENG64_IMPL
-#endif
+#if defined(HAVE_AVX512VBMI)
+#undef SHENG32_IMPL
+#undef INTERESTING_FUNC32
+#undef INNER_DEAD_FUNC32
+#undef OUTER_DEAD_FUNC32
+#undef INNER_ACCEL_FUNC32
+#undef OUTER_ACCEL_FUNC32
+#undef ACCEPT_FUNC32
+#undef NO_SHENG64_IMPL
+#endif
#undef STOP_AT_MATCH
/* stop at match, can't die, not accelerated */
@@ -574,20 +574,20 @@ u8 dummyFunc(UNUSED const u8 a) {
#define INNER_ACCEL_FUNC dummyFunc
#define OUTER_ACCEL_FUNC dummyFunc
#define ACCEPT_FUNC isAcceptState
-#if defined(HAVE_AVX512VBMI)
-#define SHENG32_IMPL sheng32_4_sam
-#define INTERESTING_FUNC32 hasInterestingStates32
-#define INNER_DEAD_FUNC32 dummyFunc
-#define OUTER_DEAD_FUNC32 dummyFunc
-#define INNER_ACCEL_FUNC32 dummyFunc
-#define OUTER_ACCEL_FUNC32 dummyFunc
-#define ACCEPT_FUNC32 isAcceptState32
-#define SHENG64_IMPL sheng64_4_sam
-#define INTERESTING_FUNC64 hasInterestingStates64
-#define INNER_DEAD_FUNC64 dummyFunc
-#define OUTER_DEAD_FUNC64 dummyFunc
-#define ACCEPT_FUNC64 isAcceptState64
-#endif
+#if defined(HAVE_AVX512VBMI)
+#define SHENG32_IMPL sheng32_4_sam
+#define INTERESTING_FUNC32 hasInterestingStates32
+#define INNER_DEAD_FUNC32 dummyFunc
+#define OUTER_DEAD_FUNC32 dummyFunc
+#define INNER_ACCEL_FUNC32 dummyFunc
+#define OUTER_ACCEL_FUNC32 dummyFunc
+#define ACCEPT_FUNC32 isAcceptState32
+#define SHENG64_IMPL sheng64_4_sam
+#define INTERESTING_FUNC64 hasInterestingStates64
+#define INNER_DEAD_FUNC64 dummyFunc
+#define OUTER_DEAD_FUNC64 dummyFunc
+#define ACCEPT_FUNC64 isAcceptState64
+#endif
#define STOP_AT_MATCH 1
#include "sheng_impl4.h"
#undef SHENG_IMPL
@@ -597,20 +597,20 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef INNER_ACCEL_FUNC
#undef OUTER_ACCEL_FUNC
#undef ACCEPT_FUNC
-#if defined(HAVE_AVX512VBMI)
-#undef SHENG32_IMPL
-#undef INTERESTING_FUNC32
-#undef INNER_DEAD_FUNC32
-#undef OUTER_DEAD_FUNC32
-#undef INNER_ACCEL_FUNC32
-#undef OUTER_ACCEL_FUNC32
-#undef ACCEPT_FUNC32
-#undef SHENG64_IMPL
-#undef INTERESTING_FUNC64
-#undef INNER_DEAD_FUNC64
-#undef OUTER_DEAD_FUNC64
-#undef ACCEPT_FUNC64
-#endif
+#if defined(HAVE_AVX512VBMI)
+#undef SHENG32_IMPL
+#undef INTERESTING_FUNC32
+#undef INNER_DEAD_FUNC32
+#undef OUTER_DEAD_FUNC32
+#undef INNER_ACCEL_FUNC32
+#undef OUTER_ACCEL_FUNC32
+#undef ACCEPT_FUNC32
+#undef SHENG64_IMPL
+#undef INTERESTING_FUNC64
+#undef INNER_DEAD_FUNC64
+#undef OUTER_DEAD_FUNC64
+#undef ACCEPT_FUNC64
+#endif
#undef STOP_AT_MATCH
/* no-match have interesting func as dummy, and die/accel checks are outer */
@@ -623,16 +623,16 @@ u8 dummyFunc(UNUSED const u8 a) {
#define INNER_ACCEL_FUNC dummyFunc
#define OUTER_ACCEL_FUNC isAccelState
#define ACCEPT_FUNC dummyFunc
-#if defined(HAVE_AVX512VBMI)
-#define SHENG32_IMPL sheng32_4_nmda
-#define INTERESTING_FUNC32 dummyFunc4
-#define INNER_DEAD_FUNC32 dummyFunc
-#define OUTER_DEAD_FUNC32 isDeadState32
-#define INNER_ACCEL_FUNC32 dummyFunc
-#define OUTER_ACCEL_FUNC32 isAccelState32
-#define ACCEPT_FUNC32 dummyFunc
-#define NO_SHENG64_IMPL
-#endif
+#if defined(HAVE_AVX512VBMI)
+#define SHENG32_IMPL sheng32_4_nmda
+#define INTERESTING_FUNC32 dummyFunc4
+#define INNER_DEAD_FUNC32 dummyFunc
+#define OUTER_DEAD_FUNC32 isDeadState32
+#define INNER_ACCEL_FUNC32 dummyFunc
+#define OUTER_ACCEL_FUNC32 isAccelState32
+#define ACCEPT_FUNC32 dummyFunc
+#define NO_SHENG64_IMPL
+#endif
#define STOP_AT_MATCH 0
#include "sheng_impl4.h"
#undef SHENG_IMPL
@@ -642,16 +642,16 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef INNER_ACCEL_FUNC
#undef OUTER_ACCEL_FUNC
#undef ACCEPT_FUNC
-#if defined(HAVE_AVX512VBMI)
-#undef SHENG32_IMPL
-#undef INTERESTING_FUNC32
-#undef INNER_DEAD_FUNC32
-#undef OUTER_DEAD_FUNC32
-#undef INNER_ACCEL_FUNC32
-#undef OUTER_ACCEL_FUNC32
-#undef ACCEPT_FUNC32
-#undef NO_SHENG64_IMPL
-#endif
+#if defined(HAVE_AVX512VBMI)
+#undef SHENG32_IMPL
+#undef INTERESTING_FUNC32
+#undef INNER_DEAD_FUNC32
+#undef OUTER_DEAD_FUNC32
+#undef INNER_ACCEL_FUNC32
+#undef OUTER_ACCEL_FUNC32
+#undef ACCEPT_FUNC32
+#undef NO_SHENG64_IMPL
+#endif
#undef STOP_AT_MATCH
/* no match, can die, not accelerated */
@@ -662,20 +662,20 @@ u8 dummyFunc(UNUSED const u8 a) {
#define INNER_ACCEL_FUNC dummyFunc
#define OUTER_ACCEL_FUNC dummyFunc
#define ACCEPT_FUNC dummyFunc
-#if defined(HAVE_AVX512VBMI)
-#define SHENG32_IMPL sheng32_4_nmd
-#define INTERESTING_FUNC32 dummyFunc4
-#define INNER_DEAD_FUNC32 dummyFunc
-#define OUTER_DEAD_FUNC32 isDeadState32
-#define INNER_ACCEL_FUNC32 dummyFunc
-#define OUTER_ACCEL_FUNC32 dummyFunc
-#define ACCEPT_FUNC32 dummyFunc
-#define SHENG64_IMPL sheng64_4_nmd
-#define INTERESTING_FUNC64 dummyFunc4
-#define INNER_DEAD_FUNC64 dummyFunc
-#define OUTER_DEAD_FUNC64 isDeadState64
-#define ACCEPT_FUNC64 dummyFunc
-#endif
+#if defined(HAVE_AVX512VBMI)
+#define SHENG32_IMPL sheng32_4_nmd
+#define INTERESTING_FUNC32 dummyFunc4
+#define INNER_DEAD_FUNC32 dummyFunc
+#define OUTER_DEAD_FUNC32 isDeadState32
+#define INNER_ACCEL_FUNC32 dummyFunc
+#define OUTER_ACCEL_FUNC32 dummyFunc
+#define ACCEPT_FUNC32 dummyFunc
+#define SHENG64_IMPL sheng64_4_nmd
+#define INTERESTING_FUNC64 dummyFunc4
+#define INNER_DEAD_FUNC64 dummyFunc
+#define OUTER_DEAD_FUNC64 isDeadState64
+#define ACCEPT_FUNC64 dummyFunc
+#endif
#define STOP_AT_MATCH 0
#include "sheng_impl4.h"
#undef SHENG_IMPL
@@ -685,20 +685,20 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef INNER_ACCEL_FUNC
#undef OUTER_ACCEL_FUNC
#undef ACCEPT_FUNC
-#if defined(HAVE_AVX512VBMI)
-#undef SHENG32_IMPL
-#undef INTERESTING_FUNC32
-#undef INNER_DEAD_FUNC32
-#undef OUTER_DEAD_FUNC32
-#undef INNER_ACCEL_FUNC32
-#undef OUTER_ACCEL_FUNC32
-#undef ACCEPT_FUNC32
-#undef SHENG64_IMPL
-#undef INTERESTING_FUNC64
-#undef INNER_DEAD_FUNC64
-#undef OUTER_DEAD_FUNC64
-#undef ACCEPT_FUNC64
-#endif
+#if defined(HAVE_AVX512VBMI)
+#undef SHENG32_IMPL
+#undef INTERESTING_FUNC32
+#undef INNER_DEAD_FUNC32
+#undef OUTER_DEAD_FUNC32
+#undef INNER_ACCEL_FUNC32
+#undef OUTER_ACCEL_FUNC32
+#undef ACCEPT_FUNC32
+#undef SHENG64_IMPL
+#undef INTERESTING_FUNC64
+#undef INNER_DEAD_FUNC64
+#undef OUTER_DEAD_FUNC64
+#undef ACCEPT_FUNC64
+#endif
#undef STOP_AT_MATCH
/* there is no performance benefit in accelerating a no-match case that can't
@@ -712,20 +712,20 @@ u8 dummyFunc(UNUSED const u8 a) {
#define INNER_ACCEL_FUNC dummyFunc
#define OUTER_ACCEL_FUNC dummyFunc
#define ACCEPT_FUNC dummyFunc
-#if defined(HAVE_AVX512VBMI)
-#define SHENG32_IMPL sheng32_4_nm
-#define INTERESTING_FUNC32 dummyFunc4
-#define INNER_DEAD_FUNC32 dummyFunc
-#define OUTER_DEAD_FUNC32 dummyFunc
-#define INNER_ACCEL_FUNC32 dummyFunc
-#define OUTER_ACCEL_FUNC32 dummyFunc
-#define ACCEPT_FUNC32 dummyFunc
-#define SHENG64_IMPL sheng64_4_nm
-#define INTERESTING_FUNC64 dummyFunc4
-#define INNER_DEAD_FUNC64 dummyFunc
-#define OUTER_DEAD_FUNC64 dummyFunc
-#define ACCEPT_FUNC64 dummyFunc
-#endif
+#if defined(HAVE_AVX512VBMI)
+#define SHENG32_IMPL sheng32_4_nm
+#define INTERESTING_FUNC32 dummyFunc4
+#define INNER_DEAD_FUNC32 dummyFunc
+#define OUTER_DEAD_FUNC32 dummyFunc
+#define INNER_ACCEL_FUNC32 dummyFunc
+#define OUTER_ACCEL_FUNC32 dummyFunc
+#define ACCEPT_FUNC32 dummyFunc
+#define SHENG64_IMPL sheng64_4_nm
+#define INTERESTING_FUNC64 dummyFunc4
+#define INNER_DEAD_FUNC64 dummyFunc
+#define OUTER_DEAD_FUNC64 dummyFunc
+#define ACCEPT_FUNC64 dummyFunc
+#endif
#define STOP_AT_MATCH 0
#include "sheng_impl4.h"
#undef SHENG_IMPL
@@ -735,20 +735,20 @@ u8 dummyFunc(UNUSED const u8 a) {
#undef INNER_ACCEL_FUNC
#undef OUTER_ACCEL_FUNC
#undef ACCEPT_FUNC
-#if defined(HAVE_AVX512VBMI)
-#undef SHENG32_IMPL
-#undef INTERESTING_FUNC32
-#undef INNER_DEAD_FUNC32
-#undef OUTER_DEAD_FUNC32
-#undef INNER_ACCEL_FUNC32
-#undef OUTER_ACCEL_FUNC32
-#undef ACCEPT_FUNC32
-#undef SHENG64_IMPL
-#undef INTERESTING_FUNC64
-#undef INNER_DEAD_FUNC64
-#undef OUTER_DEAD_FUNC64
-#undef ACCEPT_FUNC64
-#endif
+#if defined(HAVE_AVX512VBMI)
+#undef SHENG32_IMPL
+#undef INTERESTING_FUNC32
+#undef INNER_DEAD_FUNC32
+#undef OUTER_DEAD_FUNC32
+#undef INNER_ACCEL_FUNC32
+#undef OUTER_ACCEL_FUNC32
+#undef ACCEPT_FUNC32
+#undef SHENG64_IMPL
+#undef INTERESTING_FUNC64
+#undef INNER_DEAD_FUNC64
+#undef OUTER_DEAD_FUNC64
+#undef ACCEPT_FUNC64
+#endif
#undef STOP_AT_MATCH
#endif // SHENG_DEFS_H
diff --git a/contrib/libs/hyperscan/src/nfa/sheng_impl.h b/contrib/libs/hyperscan/src/nfa/sheng_impl.h
index 924296699f..fb8ee16834 100644
--- a/contrib/libs/hyperscan/src/nfa/sheng_impl.h
+++ b/contrib/libs/hyperscan/src/nfa/sheng_impl.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Intel Corporation
+ * Copyright (c) 2016-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -95,127 +95,127 @@ char SHENG_IMPL(u8 *state, NfaCallback cb, void *ctxt, const struct sheng *s,
*scan_end = cur_buf;
return MO_CONTINUE_MATCHING;
}
-
-#if defined(HAVE_AVX512VBMI)
-static really_inline
-char SHENG32_IMPL(u8 *state, NfaCallback cb, void *ctxt,
- const struct sheng32 *s,
- u8 *const cached_accept_state,
- ReportID *const cached_accept_id,
- u8 single, u64a base_offset, const u8 *buf, const u8 *start,
- const u8 *end, const u8 **scan_end) {
- DEBUG_PRINTF("Starting DFA execution in state %u\n",
- *state & SHENG32_STATE_MASK);
- const u8 *cur_buf = start;
- if (DEAD_FUNC32(*state)) {
- DEBUG_PRINTF("Dead on arrival\n");
- *scan_end = end;
- return MO_CONTINUE_MATCHING;
- }
- DEBUG_PRINTF("Scanning %lli bytes\n", (s64a)(end - start));
-
- m512 cur_state = set64x8(*state);
- const m512 *masks = s->succ_masks;
-
- while (likely(cur_buf != end)) {
- const u8 c = *cur_buf;
- const m512 succ_mask = masks[c];
- cur_state = vpermb512(cur_state, succ_mask);
- const u8 tmp = movd512(cur_state);
-
- DEBUG_PRINTF("c: %02hhx '%c'\n", c, ourisprint(c) ? c : '?');
- DEBUG_PRINTF("s: %u (flag: %u)\n", tmp & SHENG32_STATE_MASK,
- tmp & SHENG32_STATE_FLAG_MASK);
-
- if (unlikely(ACCEPT_FUNC32(tmp))) {
- DEBUG_PRINTF("Accept state %u reached\n", tmp & SHENG32_STATE_MASK);
- u64a match_offset = base_offset + (cur_buf - buf) + 1;
- DEBUG_PRINTF("Match @ %llu\n", match_offset);
- if (STOP_AT_MATCH) {
- DEBUG_PRINTF("Stopping at match @ %lli\n",
- (u64a)(cur_buf - start));
- *state = tmp;
- *scan_end = cur_buf;
- return MO_MATCHES_PENDING;
- }
- if (single) {
- if (fireSingleReport(cb, ctxt, s->report, match_offset) ==
- MO_HALT_MATCHING) {
- return MO_HALT_MATCHING;
- }
- } else {
- if (fireReports32(s, cb, ctxt, tmp, match_offset,
- cached_accept_state, cached_accept_id,
- 0) == MO_HALT_MATCHING) {
- return MO_HALT_MATCHING;
- }
- }
- }
- cur_buf++;
- }
- *state = movd512(cur_state);
- *scan_end = cur_buf;
- return MO_CONTINUE_MATCHING;
-}
-
-static really_inline
-char SHENG64_IMPL(u8 *state, NfaCallback cb, void *ctxt,
- const struct sheng64 *s,
- u8 *const cached_accept_state,
- ReportID *const cached_accept_id,
- u8 single, u64a base_offset, const u8 *buf, const u8 *start,
- const u8 *end, const u8 **scan_end) {
- DEBUG_PRINTF("Starting DFA execution in state %u\n",
- *state & SHENG64_STATE_MASK);
- const u8 *cur_buf = start;
- if (DEAD_FUNC64(*state)) {
- DEBUG_PRINTF("Dead on arrival\n");
- *scan_end = end;
- return MO_CONTINUE_MATCHING;
- }
- DEBUG_PRINTF("Scanning %lli bytes\n", (s64a)(end - start));
-
- m512 cur_state = set64x8(*state);
- const m512 *masks = s->succ_masks;
-
- while (likely(cur_buf != end)) {
- const u8 c = *cur_buf;
- const m512 succ_mask = masks[c];
- cur_state = vpermb512(cur_state, succ_mask);
- const u8 tmp = movd512(cur_state);
-
- DEBUG_PRINTF("c: %02hhx '%c'\n", c, ourisprint(c) ? c : '?');
- DEBUG_PRINTF("s: %u (flag: %u)\n", tmp & SHENG64_STATE_MASK,
- tmp & SHENG64_STATE_FLAG_MASK);
-
- if (unlikely(ACCEPT_FUNC64(tmp))) {
- DEBUG_PRINTF("Accept state %u reached\n", tmp & SHENG64_STATE_MASK);
- u64a match_offset = base_offset + (cur_buf - buf) + 1;
- DEBUG_PRINTF("Match @ %llu\n", match_offset);
- if (STOP_AT_MATCH) {
- DEBUG_PRINTF("Stopping at match @ %lli\n",
- (u64a)(cur_buf - start));
- *state = tmp;
- *scan_end = cur_buf;
- return MO_MATCHES_PENDING;
- }
- if (single) {
- if (fireSingleReport(cb, ctxt, s->report, match_offset) ==
- MO_HALT_MATCHING) {
- return MO_HALT_MATCHING;
- }
- } else {
- if (fireReports64(s, cb, ctxt, tmp, match_offset,
- cached_accept_state, cached_accept_id,
- 0) == MO_HALT_MATCHING) {
- return MO_HALT_MATCHING;
- }
- }
- }
- cur_buf++;
- }
- *state = movd512(cur_state);
- *scan_end = cur_buf;
- return MO_CONTINUE_MATCHING;
-}
-#endif
+
+#if defined(HAVE_AVX512VBMI)
+static really_inline
+char SHENG32_IMPL(u8 *state, NfaCallback cb, void *ctxt,
+ const struct sheng32 *s,
+ u8 *const cached_accept_state,
+ ReportID *const cached_accept_id,
+ u8 single, u64a base_offset, const u8 *buf, const u8 *start,
+ const u8 *end, const u8 **scan_end) {
+ DEBUG_PRINTF("Starting DFA execution in state %u\n",
+ *state & SHENG32_STATE_MASK);
+ const u8 *cur_buf = start;
+ if (DEAD_FUNC32(*state)) {
+ DEBUG_PRINTF("Dead on arrival\n");
+ *scan_end = end;
+ return MO_CONTINUE_MATCHING;
+ }
+ DEBUG_PRINTF("Scanning %lli bytes\n", (s64a)(end - start));
+
+ m512 cur_state = set64x8(*state);
+ const m512 *masks = s->succ_masks;
+
+ while (likely(cur_buf != end)) {
+ const u8 c = *cur_buf;
+ const m512 succ_mask = masks[c];
+ cur_state = vpermb512(cur_state, succ_mask);
+ const u8 tmp = movd512(cur_state);
+
+ DEBUG_PRINTF("c: %02hhx '%c'\n", c, ourisprint(c) ? c : '?');
+ DEBUG_PRINTF("s: %u (flag: %u)\n", tmp & SHENG32_STATE_MASK,
+ tmp & SHENG32_STATE_FLAG_MASK);
+
+ if (unlikely(ACCEPT_FUNC32(tmp))) {
+ DEBUG_PRINTF("Accept state %u reached\n", tmp & SHENG32_STATE_MASK);
+ u64a match_offset = base_offset + (cur_buf - buf) + 1;
+ DEBUG_PRINTF("Match @ %llu\n", match_offset);
+ if (STOP_AT_MATCH) {
+ DEBUG_PRINTF("Stopping at match @ %lli\n",
+ (u64a)(cur_buf - start));
+ *state = tmp;
+ *scan_end = cur_buf;
+ return MO_MATCHES_PENDING;
+ }
+ if (single) {
+ if (fireSingleReport(cb, ctxt, s->report, match_offset) ==
+ MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING;
+ }
+ } else {
+ if (fireReports32(s, cb, ctxt, tmp, match_offset,
+ cached_accept_state, cached_accept_id,
+ 0) == MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING;
+ }
+ }
+ }
+ cur_buf++;
+ }
+ *state = movd512(cur_state);
+ *scan_end = cur_buf;
+ return MO_CONTINUE_MATCHING;
+}
+
+static really_inline
+char SHENG64_IMPL(u8 *state, NfaCallback cb, void *ctxt,
+ const struct sheng64 *s,
+ u8 *const cached_accept_state,
+ ReportID *const cached_accept_id,
+ u8 single, u64a base_offset, const u8 *buf, const u8 *start,
+ const u8 *end, const u8 **scan_end) {
+ DEBUG_PRINTF("Starting DFA execution in state %u\n",
+ *state & SHENG64_STATE_MASK);
+ const u8 *cur_buf = start;
+ if (DEAD_FUNC64(*state)) {
+ DEBUG_PRINTF("Dead on arrival\n");
+ *scan_end = end;
+ return MO_CONTINUE_MATCHING;
+ }
+ DEBUG_PRINTF("Scanning %lli bytes\n", (s64a)(end - start));
+
+ m512 cur_state = set64x8(*state);
+ const m512 *masks = s->succ_masks;
+
+ while (likely(cur_buf != end)) {
+ const u8 c = *cur_buf;
+ const m512 succ_mask = masks[c];
+ cur_state = vpermb512(cur_state, succ_mask);
+ const u8 tmp = movd512(cur_state);
+
+ DEBUG_PRINTF("c: %02hhx '%c'\n", c, ourisprint(c) ? c : '?');
+ DEBUG_PRINTF("s: %u (flag: %u)\n", tmp & SHENG64_STATE_MASK,
+ tmp & SHENG64_STATE_FLAG_MASK);
+
+ if (unlikely(ACCEPT_FUNC64(tmp))) {
+ DEBUG_PRINTF("Accept state %u reached\n", tmp & SHENG64_STATE_MASK);
+ u64a match_offset = base_offset + (cur_buf - buf) + 1;
+ DEBUG_PRINTF("Match @ %llu\n", match_offset);
+ if (STOP_AT_MATCH) {
+ DEBUG_PRINTF("Stopping at match @ %lli\n",
+ (u64a)(cur_buf - start));
+ *state = tmp;
+ *scan_end = cur_buf;
+ return MO_MATCHES_PENDING;
+ }
+ if (single) {
+ if (fireSingleReport(cb, ctxt, s->report, match_offset) ==
+ MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING;
+ }
+ } else {
+ if (fireReports64(s, cb, ctxt, tmp, match_offset,
+ cached_accept_state, cached_accept_id,
+ 0) == MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING;
+ }
+ }
+ }
+ cur_buf++;
+ }
+ *state = movd512(cur_state);
+ *scan_end = cur_buf;
+ return MO_CONTINUE_MATCHING;
+}
+#endif
diff --git a/contrib/libs/hyperscan/src/nfa/sheng_impl4.h b/contrib/libs/hyperscan/src/nfa/sheng_impl4.h
index e033cdadf0..440e7396e2 100644
--- a/contrib/libs/hyperscan/src/nfa/sheng_impl4.h
+++ b/contrib/libs/hyperscan/src/nfa/sheng_impl4.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Intel Corporation
+ * Copyright (c) 2016-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -282,430 +282,430 @@ char SHENG_IMPL(u8 *state, NfaCallback cb, void *ctxt, const struct sheng *s,
*scan_end = cur_buf;
return MO_CONTINUE_MATCHING;
}
-
-#if defined(HAVE_AVX512VBMI)
-static really_inline
-char SHENG32_IMPL(u8 *state, NfaCallback cb, void *ctxt,
- const struct sheng32 *s,
- u8 *const cached_accept_state,
- ReportID *const cached_accept_id,
- u8 single, u64a base_offset, const u8 *buf, const u8 *start,
- const u8 *end, const u8 **scan_end) {
- DEBUG_PRINTF("Starting DFAx4 execution in state %u\n",
- *state & SHENG32_STATE_MASK);
- const u8 *cur_buf = start;
- const u8 *min_accel_dist = start;
- base_offset++;
- DEBUG_PRINTF("Scanning %llu bytes\n", (u64a)(end - start));
-
- if (INNER_ACCEL_FUNC32(*state) || OUTER_ACCEL_FUNC32(*state)) {
- DEBUG_PRINTF("Accel state reached @ 0\n");
- const union AccelAux *aaux =
- get_accel32(s, *state & SHENG32_STATE_MASK);
- const u8 *new_offset = run_accel(aaux, cur_buf, end);
- if (new_offset < cur_buf + BAD_ACCEL_DIST) {
- min_accel_dist = new_offset + BIG_ACCEL_PENALTY;
- } else {
- min_accel_dist = new_offset + SMALL_ACCEL_PENALTY;
- }
- DEBUG_PRINTF("Next accel chance: %llu\n",
- (u64a)(min_accel_dist - start));
- DEBUG_PRINTF("Accel scanned %zu bytes\n", new_offset - cur_buf);
- cur_buf = new_offset;
- DEBUG_PRINTF("New offset: %lli\n", (s64a)(cur_buf - start));
- }
- if (INNER_DEAD_FUNC32(*state) || OUTER_DEAD_FUNC32(*state)) {
- DEBUG_PRINTF("Dead on arrival\n");
- *scan_end = end;
- return MO_CONTINUE_MATCHING;
- }
-
- m512 cur_state = set64x8(*state);
- const m512 *masks = s->succ_masks;
-
- while (likely(end - cur_buf >= 4)) {
- const u8 *b1 = cur_buf;
- const u8 *b2 = cur_buf + 1;
- const u8 *b3 = cur_buf + 2;
- const u8 *b4 = cur_buf + 3;
- const u8 c1 = *b1;
- const u8 c2 = *b2;
- const u8 c3 = *b3;
- const u8 c4 = *b4;
-
- const m512 succ_mask1 = masks[c1];
- cur_state = vpermb512(cur_state, succ_mask1);
- const u8 a1 = movd512(cur_state);
-
- const m512 succ_mask2 = masks[c2];
- cur_state = vpermb512(cur_state, succ_mask2);
- const u8 a2 = movd512(cur_state);
-
- const m512 succ_mask3 = masks[c3];
- cur_state = vpermb512(cur_state, succ_mask3);
- const u8 a3 = movd512(cur_state);
-
- const m512 succ_mask4 = masks[c4];
- cur_state = vpermb512(cur_state, succ_mask4);
- const u8 a4 = movd512(cur_state);
-
- DEBUG_PRINTF("c: %02hhx '%c'\n", c1, ourisprint(c1) ? c1 : '?');
- DEBUG_PRINTF("s: %u (flag: %u)\n", a1 & SHENG32_STATE_MASK,
- a1 & SHENG32_STATE_FLAG_MASK);
-
- DEBUG_PRINTF("c: %02hhx '%c'\n", c2, ourisprint(c2) ? c2 : '?');
- DEBUG_PRINTF("s: %u (flag: %u)\n", a2 & SHENG32_STATE_MASK,
- a2 & SHENG32_STATE_FLAG_MASK);
-
- DEBUG_PRINTF("c: %02hhx '%c'\n", c3, ourisprint(c3) ? c3 : '?');
- DEBUG_PRINTF("s: %u (flag: %u)\n", a3 & SHENG32_STATE_MASK,
- a3 & SHENG32_STATE_FLAG_MASK);
-
- DEBUG_PRINTF("c: %02hhx '%c'\n", c4, ourisprint(c4) ? c4 : '?');
- DEBUG_PRINTF("s: %u (flag: %u)\n", a4 & SHENG32_STATE_MASK,
- a4 & SHENG32_STATE_FLAG_MASK);
-
- if (unlikely(INTERESTING_FUNC32(a1, a2, a3, a4))) {
- if (ACCEPT_FUNC32(a1)) {
- u64a match_offset = base_offset + b1 - buf;
- DEBUG_PRINTF("Accept state %u reached\n",
- a1 & SHENG32_STATE_MASK);
- DEBUG_PRINTF("Match @ %llu\n", match_offset);
- if (STOP_AT_MATCH) {
- DEBUG_PRINTF("Stopping at match @ %lli\n",
- (s64a)(b1 - start));
- *scan_end = b1;
- *state = a1;
- return MO_MATCHES_PENDING;
- }
- if (single) {
- if (fireSingleReport(cb, ctxt, s->report, match_offset) ==
- MO_HALT_MATCHING) {
- return MO_HALT_MATCHING;
- }
- } else {
- if (fireReports32(s, cb, ctxt, a1, match_offset,
- cached_accept_state, cached_accept_id,
- 0) == MO_HALT_MATCHING) {
- return MO_HALT_MATCHING;
- }
- }
- }
- if (ACCEPT_FUNC32(a2)) {
- u64a match_offset = base_offset + b2 - buf;
- DEBUG_PRINTF("Accept state %u reached\n",
- a2 & SHENG32_STATE_MASK);
- DEBUG_PRINTF("Match @ %llu\n", match_offset);
- if (STOP_AT_MATCH) {
- DEBUG_PRINTF("Stopping at match @ %lli\n",
- (s64a)(b2 - start));
- *scan_end = b2;
- *state = a2;
- return MO_MATCHES_PENDING;
- }
- if (single) {
- if (fireSingleReport(cb, ctxt, s->report, match_offset) ==
- MO_HALT_MATCHING) {
- return MO_HALT_MATCHING;
- }
- } else {
- if (fireReports32(s, cb, ctxt, a2, match_offset,
- cached_accept_state, cached_accept_id,
- 0) == MO_HALT_MATCHING) {
- return MO_HALT_MATCHING;
- }
- }
- }
- if (ACCEPT_FUNC32(a3)) {
- u64a match_offset = base_offset + b3 - buf;
- DEBUG_PRINTF("Accept state %u reached\n",
- a3 & SHENG32_STATE_MASK);
- DEBUG_PRINTF("Match @ %llu\n", match_offset);
- if (STOP_AT_MATCH) {
- DEBUG_PRINTF("Stopping at match @ %lli\n",
- (s64a)(b3 - start));
- *scan_end = b3;
- *state = a3;
- return MO_MATCHES_PENDING;
- }
- if (single) {
- if (fireSingleReport(cb, ctxt, s->report, match_offset) ==
- MO_HALT_MATCHING) {
- return MO_HALT_MATCHING;
- }
- } else {
- if (fireReports32(s, cb, ctxt, a3, match_offset,
- cached_accept_state, cached_accept_id,
- 0) == MO_HALT_MATCHING) {
- return MO_HALT_MATCHING;
- }
- }
- }
- if (ACCEPT_FUNC32(a4)) {
- u64a match_offset = base_offset + b4 - buf;
- DEBUG_PRINTF("Accept state %u reached\n",
- a4 & SHENG32_STATE_MASK);
- DEBUG_PRINTF("Match @ %llu\n", match_offset);
- if (STOP_AT_MATCH) {
- DEBUG_PRINTF("Stopping at match @ %lli\n",
- (s64a)(b4 - start));
- *scan_end = b4;
- *state = a4;
- return MO_MATCHES_PENDING;
- }
- if (single) {
- if (fireSingleReport(cb, ctxt, s->report, match_offset) ==
- MO_HALT_MATCHING) {
- return MO_HALT_MATCHING;
- }
- } else {
- if (fireReports32(s, cb, ctxt, a4, match_offset,
- cached_accept_state, cached_accept_id,
- 0) == MO_HALT_MATCHING) {
- return MO_HALT_MATCHING;
- }
- }
- }
- if (INNER_DEAD_FUNC32(a4)) {
- DEBUG_PRINTF("Dead state reached @ %lli\n", (s64a)(b4 - buf));
- *scan_end = end;
- *state = a4;
- return MO_CONTINUE_MATCHING;
- }
- if (cur_buf > min_accel_dist && INNER_ACCEL_FUNC32(a4)) {
- DEBUG_PRINTF("Accel state reached @ %lli\n", (s64a)(b4 - buf));
- const union AccelAux *aaux =
- get_accel32(s, a4 & SHENG32_STATE_MASK);
- const u8 *new_offset = run_accel(aaux, cur_buf + 4, end);
- if (new_offset < cur_buf + 4 + BAD_ACCEL_DIST) {
- min_accel_dist = new_offset + BIG_ACCEL_PENALTY;
- } else {
- min_accel_dist = new_offset + SMALL_ACCEL_PENALTY;
- }
- DEBUG_PRINTF("Next accel chance: %llu\n",
- (u64a)(min_accel_dist - start));
- DEBUG_PRINTF("Accel scanned %llu bytes\n",
- (u64a)(new_offset - cur_buf - 4));
- cur_buf = new_offset;
- DEBUG_PRINTF("New offset: %llu\n", (u64a)(cur_buf - buf));
- continue;
- }
- }
- if (OUTER_DEAD_FUNC32(a4)) {
- DEBUG_PRINTF("Dead state reached @ %lli\n", (s64a)(cur_buf - buf));
- *scan_end = end;
- *state = a4;
- return MO_CONTINUE_MATCHING;
- };
- if (cur_buf > min_accel_dist && OUTER_ACCEL_FUNC32(a4)) {
- DEBUG_PRINTF("Accel state reached @ %lli\n", (s64a)(b4 - buf));
- const union AccelAux *aaux =
- get_accel32(s, a4 & SHENG32_STATE_MASK);
- const u8 *new_offset = run_accel(aaux, cur_buf + 4, end);
- if (new_offset < cur_buf + 4 + BAD_ACCEL_DIST) {
- min_accel_dist = new_offset + BIG_ACCEL_PENALTY;
- } else {
- min_accel_dist = new_offset + SMALL_ACCEL_PENALTY;
- }
- DEBUG_PRINTF("Next accel chance: %llu\n",
- (u64a)(min_accel_dist - start));
- DEBUG_PRINTF("Accel scanned %llu bytes\n",
- (u64a)(new_offset - cur_buf - 4));
- cur_buf = new_offset;
- DEBUG_PRINTF("New offset: %llu\n", (u64a)(cur_buf - buf));
- continue;
- };
- cur_buf += 4;
- }
- *state = movd512(cur_state);
- *scan_end = cur_buf;
- return MO_CONTINUE_MATCHING;
-}
-
-#ifndef NO_SHENG64_IMPL
-static really_inline
-char SHENG64_IMPL(u8 *state, NfaCallback cb, void *ctxt,
- const struct sheng64 *s,
- u8 *const cached_accept_state,
- ReportID *const cached_accept_id,
- u8 single, u64a base_offset, const u8 *buf, const u8 *start,
- const u8 *end, const u8 **scan_end) {
- DEBUG_PRINTF("Starting DFAx4 execution in state %u\n",
- *state & SHENG64_STATE_MASK);
- const u8 *cur_buf = start;
- base_offset++;
- DEBUG_PRINTF("Scanning %llu bytes\n", (u64a)(end - start));
-
- if (INNER_DEAD_FUNC64(*state) || OUTER_DEAD_FUNC64(*state)) {
- DEBUG_PRINTF("Dead on arrival\n");
- *scan_end = end;
- return MO_CONTINUE_MATCHING;
- }
-
- m512 cur_state = set64x8(*state);
- const m512 *masks = s->succ_masks;
-
- while (likely(end - cur_buf >= 4)) {
- const u8 *b1 = cur_buf;
- const u8 *b2 = cur_buf + 1;
- const u8 *b3 = cur_buf + 2;
- const u8 *b4 = cur_buf + 3;
- const u8 c1 = *b1;
- const u8 c2 = *b2;
- const u8 c3 = *b3;
- const u8 c4 = *b4;
-
- const m512 succ_mask1 = masks[c1];
- cur_state = vpermb512(cur_state, succ_mask1);
- const u8 a1 = movd512(cur_state);
-
- const m512 succ_mask2 = masks[c2];
- cur_state = vpermb512(cur_state, succ_mask2);
- const u8 a2 = movd512(cur_state);
-
- const m512 succ_mask3 = masks[c3];
- cur_state = vpermb512(cur_state, succ_mask3);
- const u8 a3 = movd512(cur_state);
-
- const m512 succ_mask4 = masks[c4];
- cur_state = vpermb512(cur_state, succ_mask4);
- const u8 a4 = movd512(cur_state);
-
- DEBUG_PRINTF("c: %02hhx '%c'\n", c1, ourisprint(c1) ? c1 : '?');
- DEBUG_PRINTF("s: %u (flag: %u)\n", a1 & SHENG64_STATE_MASK,
- a1 & SHENG64_STATE_FLAG_MASK);
-
- DEBUG_PRINTF("c: %02hhx '%c'\n", c2, ourisprint(c2) ? c2 : '?');
- DEBUG_PRINTF("s: %u (flag: %u)\n", a2 & SHENG64_STATE_MASK,
- a2 & SHENG64_STATE_FLAG_MASK);
-
- DEBUG_PRINTF("c: %02hhx '%c'\n", c3, ourisprint(c3) ? c3 : '?');
- DEBUG_PRINTF("s: %u (flag: %u)\n", a3 & SHENG64_STATE_MASK,
- a3 & SHENG64_STATE_FLAG_MASK);
-
- DEBUG_PRINTF("c: %02hhx '%c'\n", c4, ourisprint(c4) ? c4 : '?');
- DEBUG_PRINTF("s: %u (flag: %u)\n", a4 & SHENG64_STATE_MASK,
- a4 & SHENG64_STATE_FLAG_MASK);
-
- if (unlikely(INTERESTING_FUNC64(a1, a2, a3, a4))) {
- if (ACCEPT_FUNC64(a1)) {
- u64a match_offset = base_offset + b1 - buf;
- DEBUG_PRINTF("Accept state %u reached\n",
- a1 & SHENG64_STATE_MASK);
- DEBUG_PRINTF("Match @ %llu\n", match_offset);
- if (STOP_AT_MATCH) {
- DEBUG_PRINTF("Stopping at match @ %lli\n",
- (s64a)(b1 - start));
- *scan_end = b1;
- *state = a1;
- return MO_MATCHES_PENDING;
- }
- if (single) {
- if (fireSingleReport(cb, ctxt, s->report, match_offset) ==
- MO_HALT_MATCHING) {
- return MO_HALT_MATCHING;
- }
- } else {
- if (fireReports64(s, cb, ctxt, a1, match_offset,
- cached_accept_state, cached_accept_id,
- 0) == MO_HALT_MATCHING) {
- return MO_HALT_MATCHING;
- }
- }
- }
- if (ACCEPT_FUNC64(a2)) {
- u64a match_offset = base_offset + b2 - buf;
- DEBUG_PRINTF("Accept state %u reached\n",
- a2 & SHENG64_STATE_MASK);
- DEBUG_PRINTF("Match @ %llu\n", match_offset);
- if (STOP_AT_MATCH) {
- DEBUG_PRINTF("Stopping at match @ %lli\n",
- (s64a)(b2 - start));
- *scan_end = b2;
- *state = a2;
- return MO_MATCHES_PENDING;
- }
- if (single) {
- if (fireSingleReport(cb, ctxt, s->report, match_offset) ==
- MO_HALT_MATCHING) {
- return MO_HALT_MATCHING;
- }
- } else {
- if (fireReports64(s, cb, ctxt, a2, match_offset,
- cached_accept_state, cached_accept_id,
- 0) == MO_HALT_MATCHING) {
- return MO_HALT_MATCHING;
- }
- }
- }
- if (ACCEPT_FUNC64(a3)) {
- u64a match_offset = base_offset + b3 - buf;
- DEBUG_PRINTF("Accept state %u reached\n",
- a3 & SHENG64_STATE_MASK);
- DEBUG_PRINTF("Match @ %llu\n", match_offset);
- if (STOP_AT_MATCH) {
- DEBUG_PRINTF("Stopping at match @ %lli\n",
- (s64a)(b3 - start));
- *scan_end = b3;
- *state = a3;
- return MO_MATCHES_PENDING;
- }
- if (single) {
- if (fireSingleReport(cb, ctxt, s->report, match_offset) ==
- MO_HALT_MATCHING) {
- return MO_HALT_MATCHING;
- }
- } else {
- if (fireReports64(s, cb, ctxt, a3, match_offset,
- cached_accept_state, cached_accept_id,
- 0) == MO_HALT_MATCHING) {
- return MO_HALT_MATCHING;
- }
- }
- }
- if (ACCEPT_FUNC64(a4)) {
- u64a match_offset = base_offset + b4 - buf;
- DEBUG_PRINTF("Accept state %u reached\n",
- a4 & SHENG64_STATE_MASK);
- DEBUG_PRINTF("Match @ %llu\n", match_offset);
- if (STOP_AT_MATCH) {
- DEBUG_PRINTF("Stopping at match @ %lli\n",
- (s64a)(b4 - start));
- *scan_end = b4;
- *state = a4;
- return MO_MATCHES_PENDING;
- }
- if (single) {
- if (fireSingleReport(cb, ctxt, s->report, match_offset) ==
- MO_HALT_MATCHING) {
- return MO_HALT_MATCHING;
- }
- } else {
- if (fireReports64(s, cb, ctxt, a4, match_offset,
- cached_accept_state, cached_accept_id,
- 0) == MO_HALT_MATCHING) {
- return MO_HALT_MATCHING;
- }
- }
- }
- if (INNER_DEAD_FUNC64(a4)) {
- DEBUG_PRINTF("Dead state reached @ %lli\n", (s64a)(b4 - buf));
- *scan_end = end;
- *state = a4;
- return MO_CONTINUE_MATCHING;
- }
- }
- if (OUTER_DEAD_FUNC64(a4)) {
- DEBUG_PRINTF("Dead state reached @ %lli\n", (s64a)(cur_buf - buf));
- *scan_end = end;
- *state = a4;
- return MO_CONTINUE_MATCHING;
- }
- cur_buf += 4;
- }
- *state = movd512(cur_state);
- *scan_end = cur_buf;
- return MO_CONTINUE_MATCHING;
-}
-#endif // !NO_SHENG64_IMPL
-#endif
+
+#if defined(HAVE_AVX512VBMI)
+static really_inline
+char SHENG32_IMPL(u8 *state, NfaCallback cb, void *ctxt,
+ const struct sheng32 *s,
+ u8 *const cached_accept_state,
+ ReportID *const cached_accept_id,
+ u8 single, u64a base_offset, const u8 *buf, const u8 *start,
+ const u8 *end, const u8 **scan_end) {
+ DEBUG_PRINTF("Starting DFAx4 execution in state %u\n",
+ *state & SHENG32_STATE_MASK);
+ const u8 *cur_buf = start;
+ const u8 *min_accel_dist = start;
+ base_offset++;
+ DEBUG_PRINTF("Scanning %llu bytes\n", (u64a)(end - start));
+
+ if (INNER_ACCEL_FUNC32(*state) || OUTER_ACCEL_FUNC32(*state)) {
+ DEBUG_PRINTF("Accel state reached @ 0\n");
+ const union AccelAux *aaux =
+ get_accel32(s, *state & SHENG32_STATE_MASK);
+ const u8 *new_offset = run_accel(aaux, cur_buf, end);
+ if (new_offset < cur_buf + BAD_ACCEL_DIST) {
+ min_accel_dist = new_offset + BIG_ACCEL_PENALTY;
+ } else {
+ min_accel_dist = new_offset + SMALL_ACCEL_PENALTY;
+ }
+ DEBUG_PRINTF("Next accel chance: %llu\n",
+ (u64a)(min_accel_dist - start));
+ DEBUG_PRINTF("Accel scanned %zu bytes\n", new_offset - cur_buf);
+ cur_buf = new_offset;
+ DEBUG_PRINTF("New offset: %lli\n", (s64a)(cur_buf - start));
+ }
+ if (INNER_DEAD_FUNC32(*state) || OUTER_DEAD_FUNC32(*state)) {
+ DEBUG_PRINTF("Dead on arrival\n");
+ *scan_end = end;
+ return MO_CONTINUE_MATCHING;
+ }
+
+ m512 cur_state = set64x8(*state);
+ const m512 *masks = s->succ_masks;
+
+ while (likely(end - cur_buf >= 4)) {
+ const u8 *b1 = cur_buf;
+ const u8 *b2 = cur_buf + 1;
+ const u8 *b3 = cur_buf + 2;
+ const u8 *b4 = cur_buf + 3;
+ const u8 c1 = *b1;
+ const u8 c2 = *b2;
+ const u8 c3 = *b3;
+ const u8 c4 = *b4;
+
+ const m512 succ_mask1 = masks[c1];
+ cur_state = vpermb512(cur_state, succ_mask1);
+ const u8 a1 = movd512(cur_state);
+
+ const m512 succ_mask2 = masks[c2];
+ cur_state = vpermb512(cur_state, succ_mask2);
+ const u8 a2 = movd512(cur_state);
+
+ const m512 succ_mask3 = masks[c3];
+ cur_state = vpermb512(cur_state, succ_mask3);
+ const u8 a3 = movd512(cur_state);
+
+ const m512 succ_mask4 = masks[c4];
+ cur_state = vpermb512(cur_state, succ_mask4);
+ const u8 a4 = movd512(cur_state);
+
+ DEBUG_PRINTF("c: %02hhx '%c'\n", c1, ourisprint(c1) ? c1 : '?');
+ DEBUG_PRINTF("s: %u (flag: %u)\n", a1 & SHENG32_STATE_MASK,
+ a1 & SHENG32_STATE_FLAG_MASK);
+
+ DEBUG_PRINTF("c: %02hhx '%c'\n", c2, ourisprint(c2) ? c2 : '?');
+ DEBUG_PRINTF("s: %u (flag: %u)\n", a2 & SHENG32_STATE_MASK,
+ a2 & SHENG32_STATE_FLAG_MASK);
+
+ DEBUG_PRINTF("c: %02hhx '%c'\n", c3, ourisprint(c3) ? c3 : '?');
+ DEBUG_PRINTF("s: %u (flag: %u)\n", a3 & SHENG32_STATE_MASK,
+ a3 & SHENG32_STATE_FLAG_MASK);
+
+ DEBUG_PRINTF("c: %02hhx '%c'\n", c4, ourisprint(c4) ? c4 : '?');
+ DEBUG_PRINTF("s: %u (flag: %u)\n", a4 & SHENG32_STATE_MASK,
+ a4 & SHENG32_STATE_FLAG_MASK);
+
+ if (unlikely(INTERESTING_FUNC32(a1, a2, a3, a4))) {
+ if (ACCEPT_FUNC32(a1)) {
+ u64a match_offset = base_offset + b1 - buf;
+ DEBUG_PRINTF("Accept state %u reached\n",
+ a1 & SHENG32_STATE_MASK);
+ DEBUG_PRINTF("Match @ %llu\n", match_offset);
+ if (STOP_AT_MATCH) {
+ DEBUG_PRINTF("Stopping at match @ %lli\n",
+ (s64a)(b1 - start));
+ *scan_end = b1;
+ *state = a1;
+ return MO_MATCHES_PENDING;
+ }
+ if (single) {
+ if (fireSingleReport(cb, ctxt, s->report, match_offset) ==
+ MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING;
+ }
+ } else {
+ if (fireReports32(s, cb, ctxt, a1, match_offset,
+ cached_accept_state, cached_accept_id,
+ 0) == MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING;
+ }
+ }
+ }
+ if (ACCEPT_FUNC32(a2)) {
+ u64a match_offset = base_offset + b2 - buf;
+ DEBUG_PRINTF("Accept state %u reached\n",
+ a2 & SHENG32_STATE_MASK);
+ DEBUG_PRINTF("Match @ %llu\n", match_offset);
+ if (STOP_AT_MATCH) {
+ DEBUG_PRINTF("Stopping at match @ %lli\n",
+ (s64a)(b2 - start));
+ *scan_end = b2;
+ *state = a2;
+ return MO_MATCHES_PENDING;
+ }
+ if (single) {
+ if (fireSingleReport(cb, ctxt, s->report, match_offset) ==
+ MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING;
+ }
+ } else {
+ if (fireReports32(s, cb, ctxt, a2, match_offset,
+ cached_accept_state, cached_accept_id,
+ 0) == MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING;
+ }
+ }
+ }
+ if (ACCEPT_FUNC32(a3)) {
+ u64a match_offset = base_offset + b3 - buf;
+ DEBUG_PRINTF("Accept state %u reached\n",
+ a3 & SHENG32_STATE_MASK);
+ DEBUG_PRINTF("Match @ %llu\n", match_offset);
+ if (STOP_AT_MATCH) {
+ DEBUG_PRINTF("Stopping at match @ %lli\n",
+ (s64a)(b3 - start));
+ *scan_end = b3;
+ *state = a3;
+ return MO_MATCHES_PENDING;
+ }
+ if (single) {
+ if (fireSingleReport(cb, ctxt, s->report, match_offset) ==
+ MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING;
+ }
+ } else {
+ if (fireReports32(s, cb, ctxt, a3, match_offset,
+ cached_accept_state, cached_accept_id,
+ 0) == MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING;
+ }
+ }
+ }
+ if (ACCEPT_FUNC32(a4)) {
+ u64a match_offset = base_offset + b4 - buf;
+ DEBUG_PRINTF("Accept state %u reached\n",
+ a4 & SHENG32_STATE_MASK);
+ DEBUG_PRINTF("Match @ %llu\n", match_offset);
+ if (STOP_AT_MATCH) {
+ DEBUG_PRINTF("Stopping at match @ %lli\n",
+ (s64a)(b4 - start));
+ *scan_end = b4;
+ *state = a4;
+ return MO_MATCHES_PENDING;
+ }
+ if (single) {
+ if (fireSingleReport(cb, ctxt, s->report, match_offset) ==
+ MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING;
+ }
+ } else {
+ if (fireReports32(s, cb, ctxt, a4, match_offset,
+ cached_accept_state, cached_accept_id,
+ 0) == MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING;
+ }
+ }
+ }
+ if (INNER_DEAD_FUNC32(a4)) {
+ DEBUG_PRINTF("Dead state reached @ %lli\n", (s64a)(b4 - buf));
+ *scan_end = end;
+ *state = a4;
+ return MO_CONTINUE_MATCHING;
+ }
+ if (cur_buf > min_accel_dist && INNER_ACCEL_FUNC32(a4)) {
+ DEBUG_PRINTF("Accel state reached @ %lli\n", (s64a)(b4 - buf));
+ const union AccelAux *aaux =
+ get_accel32(s, a4 & SHENG32_STATE_MASK);
+ const u8 *new_offset = run_accel(aaux, cur_buf + 4, end);
+ if (new_offset < cur_buf + 4 + BAD_ACCEL_DIST) {
+ min_accel_dist = new_offset + BIG_ACCEL_PENALTY;
+ } else {
+ min_accel_dist = new_offset + SMALL_ACCEL_PENALTY;
+ }
+ DEBUG_PRINTF("Next accel chance: %llu\n",
+ (u64a)(min_accel_dist - start));
+ DEBUG_PRINTF("Accel scanned %llu bytes\n",
+ (u64a)(new_offset - cur_buf - 4));
+ cur_buf = new_offset;
+ DEBUG_PRINTF("New offset: %llu\n", (u64a)(cur_buf - buf));
+ continue;
+ }
+ }
+ if (OUTER_DEAD_FUNC32(a4)) {
+ DEBUG_PRINTF("Dead state reached @ %lli\n", (s64a)(cur_buf - buf));
+ *scan_end = end;
+ *state = a4;
+ return MO_CONTINUE_MATCHING;
+ };
+ if (cur_buf > min_accel_dist && OUTER_ACCEL_FUNC32(a4)) {
+ DEBUG_PRINTF("Accel state reached @ %lli\n", (s64a)(b4 - buf));
+ const union AccelAux *aaux =
+ get_accel32(s, a4 & SHENG32_STATE_MASK);
+ const u8 *new_offset = run_accel(aaux, cur_buf + 4, end);
+ if (new_offset < cur_buf + 4 + BAD_ACCEL_DIST) {
+ min_accel_dist = new_offset + BIG_ACCEL_PENALTY;
+ } else {
+ min_accel_dist = new_offset + SMALL_ACCEL_PENALTY;
+ }
+ DEBUG_PRINTF("Next accel chance: %llu\n",
+ (u64a)(min_accel_dist - start));
+ DEBUG_PRINTF("Accel scanned %llu bytes\n",
+ (u64a)(new_offset - cur_buf - 4));
+ cur_buf = new_offset;
+ DEBUG_PRINTF("New offset: %llu\n", (u64a)(cur_buf - buf));
+ continue;
+ };
+ cur_buf += 4;
+ }
+ *state = movd512(cur_state);
+ *scan_end = cur_buf;
+ return MO_CONTINUE_MATCHING;
+}
+
+#ifndef NO_SHENG64_IMPL
+static really_inline
+char SHENG64_IMPL(u8 *state, NfaCallback cb, void *ctxt,
+ const struct sheng64 *s,
+ u8 *const cached_accept_state,
+ ReportID *const cached_accept_id,
+ u8 single, u64a base_offset, const u8 *buf, const u8 *start,
+ const u8 *end, const u8 **scan_end) {
+ DEBUG_PRINTF("Starting DFAx4 execution in state %u\n",
+ *state & SHENG64_STATE_MASK);
+ const u8 *cur_buf = start;
+ base_offset++;
+ DEBUG_PRINTF("Scanning %llu bytes\n", (u64a)(end - start));
+
+ if (INNER_DEAD_FUNC64(*state) || OUTER_DEAD_FUNC64(*state)) {
+ DEBUG_PRINTF("Dead on arrival\n");
+ *scan_end = end;
+ return MO_CONTINUE_MATCHING;
+ }
+
+ m512 cur_state = set64x8(*state);
+ const m512 *masks = s->succ_masks;
+
+ while (likely(end - cur_buf >= 4)) {
+ const u8 *b1 = cur_buf;
+ const u8 *b2 = cur_buf + 1;
+ const u8 *b3 = cur_buf + 2;
+ const u8 *b4 = cur_buf + 3;
+ const u8 c1 = *b1;
+ const u8 c2 = *b2;
+ const u8 c3 = *b3;
+ const u8 c4 = *b4;
+
+ const m512 succ_mask1 = masks[c1];
+ cur_state = vpermb512(cur_state, succ_mask1);
+ const u8 a1 = movd512(cur_state);
+
+ const m512 succ_mask2 = masks[c2];
+ cur_state = vpermb512(cur_state, succ_mask2);
+ const u8 a2 = movd512(cur_state);
+
+ const m512 succ_mask3 = masks[c3];
+ cur_state = vpermb512(cur_state, succ_mask3);
+ const u8 a3 = movd512(cur_state);
+
+ const m512 succ_mask4 = masks[c4];
+ cur_state = vpermb512(cur_state, succ_mask4);
+ const u8 a4 = movd512(cur_state);
+
+ DEBUG_PRINTF("c: %02hhx '%c'\n", c1, ourisprint(c1) ? c1 : '?');
+ DEBUG_PRINTF("s: %u (flag: %u)\n", a1 & SHENG64_STATE_MASK,
+ a1 & SHENG64_STATE_FLAG_MASK);
+
+ DEBUG_PRINTF("c: %02hhx '%c'\n", c2, ourisprint(c2) ? c2 : '?');
+ DEBUG_PRINTF("s: %u (flag: %u)\n", a2 & SHENG64_STATE_MASK,
+ a2 & SHENG64_STATE_FLAG_MASK);
+
+ DEBUG_PRINTF("c: %02hhx '%c'\n", c3, ourisprint(c3) ? c3 : '?');
+ DEBUG_PRINTF("s: %u (flag: %u)\n", a3 & SHENG64_STATE_MASK,
+ a3 & SHENG64_STATE_FLAG_MASK);
+
+ DEBUG_PRINTF("c: %02hhx '%c'\n", c4, ourisprint(c4) ? c4 : '?');
+ DEBUG_PRINTF("s: %u (flag: %u)\n", a4 & SHENG64_STATE_MASK,
+ a4 & SHENG64_STATE_FLAG_MASK);
+
+ if (unlikely(INTERESTING_FUNC64(a1, a2, a3, a4))) {
+ if (ACCEPT_FUNC64(a1)) {
+ u64a match_offset = base_offset + b1 - buf;
+ DEBUG_PRINTF("Accept state %u reached\n",
+ a1 & SHENG64_STATE_MASK);
+ DEBUG_PRINTF("Match @ %llu\n", match_offset);
+ if (STOP_AT_MATCH) {
+ DEBUG_PRINTF("Stopping at match @ %lli\n",
+ (s64a)(b1 - start));
+ *scan_end = b1;
+ *state = a1;
+ return MO_MATCHES_PENDING;
+ }
+ if (single) {
+ if (fireSingleReport(cb, ctxt, s->report, match_offset) ==
+ MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING;
+ }
+ } else {
+ if (fireReports64(s, cb, ctxt, a1, match_offset,
+ cached_accept_state, cached_accept_id,
+ 0) == MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING;
+ }
+ }
+ }
+ if (ACCEPT_FUNC64(a2)) {
+ u64a match_offset = base_offset + b2 - buf;
+ DEBUG_PRINTF("Accept state %u reached\n",
+ a2 & SHENG64_STATE_MASK);
+ DEBUG_PRINTF("Match @ %llu\n", match_offset);
+ if (STOP_AT_MATCH) {
+ DEBUG_PRINTF("Stopping at match @ %lli\n",
+ (s64a)(b2 - start));
+ *scan_end = b2;
+ *state = a2;
+ return MO_MATCHES_PENDING;
+ }
+ if (single) {
+ if (fireSingleReport(cb, ctxt, s->report, match_offset) ==
+ MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING;
+ }
+ } else {
+ if (fireReports64(s, cb, ctxt, a2, match_offset,
+ cached_accept_state, cached_accept_id,
+ 0) == MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING;
+ }
+ }
+ }
+ if (ACCEPT_FUNC64(a3)) {
+ u64a match_offset = base_offset + b3 - buf;
+ DEBUG_PRINTF("Accept state %u reached\n",
+ a3 & SHENG64_STATE_MASK);
+ DEBUG_PRINTF("Match @ %llu\n", match_offset);
+ if (STOP_AT_MATCH) {
+ DEBUG_PRINTF("Stopping at match @ %lli\n",
+ (s64a)(b3 - start));
+ *scan_end = b3;
+ *state = a3;
+ return MO_MATCHES_PENDING;
+ }
+ if (single) {
+ if (fireSingleReport(cb, ctxt, s->report, match_offset) ==
+ MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING;
+ }
+ } else {
+ if (fireReports64(s, cb, ctxt, a3, match_offset,
+ cached_accept_state, cached_accept_id,
+ 0) == MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING;
+ }
+ }
+ }
+ if (ACCEPT_FUNC64(a4)) {
+ u64a match_offset = base_offset + b4 - buf;
+ DEBUG_PRINTF("Accept state %u reached\n",
+ a4 & SHENG64_STATE_MASK);
+ DEBUG_PRINTF("Match @ %llu\n", match_offset);
+ if (STOP_AT_MATCH) {
+ DEBUG_PRINTF("Stopping at match @ %lli\n",
+ (s64a)(b4 - start));
+ *scan_end = b4;
+ *state = a4;
+ return MO_MATCHES_PENDING;
+ }
+ if (single) {
+ if (fireSingleReport(cb, ctxt, s->report, match_offset) ==
+ MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING;
+ }
+ } else {
+ if (fireReports64(s, cb, ctxt, a4, match_offset,
+ cached_accept_state, cached_accept_id,
+ 0) == MO_HALT_MATCHING) {
+ return MO_HALT_MATCHING;
+ }
+ }
+ }
+ if (INNER_DEAD_FUNC64(a4)) {
+ DEBUG_PRINTF("Dead state reached @ %lli\n", (s64a)(b4 - buf));
+ *scan_end = end;
+ *state = a4;
+ return MO_CONTINUE_MATCHING;
+ }
+ }
+ if (OUTER_DEAD_FUNC64(a4)) {
+ DEBUG_PRINTF("Dead state reached @ %lli\n", (s64a)(cur_buf - buf));
+ *scan_end = end;
+ *state = a4;
+ return MO_CONTINUE_MATCHING;
+ }
+ cur_buf += 4;
+ }
+ *state = movd512(cur_state);
+ *scan_end = cur_buf;
+ return MO_CONTINUE_MATCHING;
+}
+#endif // !NO_SHENG64_IMPL
+#endif
diff --git a/contrib/libs/hyperscan/src/nfa/sheng_internal.h b/contrib/libs/hyperscan/src/nfa/sheng_internal.h
index e133d32f5b..98536886c5 100644
--- a/contrib/libs/hyperscan/src/nfa/sheng_internal.h
+++ b/contrib/libs/hyperscan/src/nfa/sheng_internal.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Intel Corporation
+ * Copyright (c) 2016-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -38,17 +38,17 @@
#define SHENG_STATE_MASK 0xF
#define SHENG_STATE_FLAG_MASK 0x70
-#define SHENG32_STATE_ACCEPT 0x20
-#define SHENG32_STATE_DEAD 0x40
-#define SHENG32_STATE_ACCEL 0x80
-#define SHENG32_STATE_MASK 0x1F
-#define SHENG32_STATE_FLAG_MASK 0xE0
-
-#define SHENG64_STATE_ACCEPT 0x40
-#define SHENG64_STATE_DEAD 0x80
-#define SHENG64_STATE_MASK 0x3F
-#define SHENG64_STATE_FLAG_MASK 0xC0
-
+#define SHENG32_STATE_ACCEPT 0x20
+#define SHENG32_STATE_DEAD 0x40
+#define SHENG32_STATE_ACCEL 0x80
+#define SHENG32_STATE_MASK 0x1F
+#define SHENG32_STATE_FLAG_MASK 0xE0
+
+#define SHENG64_STATE_ACCEPT 0x40
+#define SHENG64_STATE_DEAD 0x80
+#define SHENG64_STATE_MASK 0x3F
+#define SHENG64_STATE_FLAG_MASK 0xC0
+
#define SHENG_FLAG_SINGLE_REPORT 0x1
#define SHENG_FLAG_CAN_DIE 0x2
#define SHENG_FLAG_HAS_ACCEL 0x4
@@ -78,30 +78,30 @@ struct sheng {
ReportID report;
};
-struct sheng32 {
- m512 succ_masks[256];
- u32 length;
- u32 aux_offset;
- u32 report_offset;
- u32 accel_offset;
- u8 n_states;
- u8 anchored;
- u8 floating;
- u8 flags;
- ReportID report;
-};
-
-struct sheng64 {
- m512 succ_masks[256];
- u32 length;
- u32 aux_offset;
- u32 report_offset;
- u32 accel_offset;
- u8 n_states;
- u8 anchored;
- u8 floating;
- u8 flags;
- ReportID report;
-};
-
+struct sheng32 {
+ m512 succ_masks[256];
+ u32 length;
+ u32 aux_offset;
+ u32 report_offset;
+ u32 accel_offset;
+ u8 n_states;
+ u8 anchored;
+ u8 floating;
+ u8 flags;
+ ReportID report;
+};
+
+struct sheng64 {
+ m512 succ_masks[256];
+ u32 length;
+ u32 aux_offset;
+ u32 report_offset;
+ u32 accel_offset;
+ u8 n_states;
+ u8 anchored;
+ u8 floating;
+ u8 flags;
+ ReportID report;
+};
+
#endif /* SHENG_INTERNAL_H_ */
diff --git a/contrib/libs/hyperscan/src/nfa/shengcompile.cpp b/contrib/libs/hyperscan/src/nfa/shengcompile.cpp
index f4ab79ce70..aa3faeb09d 100644
--- a/contrib/libs/hyperscan/src/nfa/shengcompile.cpp
+++ b/contrib/libs/hyperscan/src/nfa/shengcompile.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Intel Corporation
+ * Copyright (c) 2016-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -301,28 +301,28 @@ void dumpShuffleMask(const u8 chr, const u8 *buf, unsigned sz) {
}
DEBUG_PRINTF("chr %3u: %s\n", chr, o.str().c_str());
}
-
-static really_inline
-void dumpShuffleMask32(const u8 chr, const u8 *buf, unsigned sz) {
- stringstream o;
-
- for (unsigned i = 0; i < sz; i++) {
- o.width(2);
- o << (buf[i] & SHENG32_STATE_MASK) << " ";
- }
- DEBUG_PRINTF("chr %3u: %s\n", chr, o.str().c_str());
-}
-
-static really_inline
-void dumpShuffleMask64(const u8 chr, const u8 *buf, unsigned sz) {
- stringstream o;
-
- for (unsigned i = 0; i < sz; i++) {
- o.width(2);
- o << (buf[i] & SHENG64_STATE_MASK) << " ";
- }
- DEBUG_PRINTF("chr %3u: %s\n", chr, o.str().c_str());
-}
+
+static really_inline
+void dumpShuffleMask32(const u8 chr, const u8 *buf, unsigned sz) {
+ stringstream o;
+
+ for (unsigned i = 0; i < sz; i++) {
+ o.width(2);
+ o << (buf[i] & SHENG32_STATE_MASK) << " ";
+ }
+ DEBUG_PRINTF("chr %3u: %s\n", chr, o.str().c_str());
+}
+
+static really_inline
+void dumpShuffleMask64(const u8 chr, const u8 *buf, unsigned sz) {
+ stringstream o;
+
+ for (unsigned i = 0; i < sz; i++) {
+ o.width(2);
+ o << (buf[i] & SHENG64_STATE_MASK) << " ";
+ }
+ DEBUG_PRINTF("chr %3u: %s\n", chr, o.str().c_str());
+}
#endif
static
@@ -333,16 +333,16 @@ void fillAccelOut(const map<dstate_id_t, AccelScheme> &accel_escape_info,
}
}
-template <typename T>
+template <typename T>
static
-u8 getShengState(UNUSED dstate &state, UNUSED dfa_info &info,
- UNUSED map<dstate_id_t, AccelScheme> &accelInfo) {
- return 0;
-}
-
-template <>
-u8 getShengState<sheng>(dstate &state, dfa_info &info,
- map<dstate_id_t, AccelScheme> &accelInfo) {
+u8 getShengState(UNUSED dstate &state, UNUSED dfa_info &info,
+ UNUSED map<dstate_id_t, AccelScheme> &accelInfo) {
+ return 0;
+}
+
+template <>
+u8 getShengState<sheng>(dstate &state, dfa_info &info,
+ map<dstate_id_t, AccelScheme> &accelInfo) {
u8 s = state.impl_id;
if (!state.reports.empty()) {
s |= SHENG_STATE_ACCEPT;
@@ -356,41 +356,41 @@ u8 getShengState<sheng>(dstate &state, dfa_info &info,
return s;
}
-template <>
-u8 getShengState<sheng32>(dstate &state, dfa_info &info,
- map<dstate_id_t, AccelScheme> &accelInfo) {
- u8 s = state.impl_id;
- if (!state.reports.empty()) {
- s |= SHENG32_STATE_ACCEPT;
- }
- if (info.isDead(state)) {
- s |= SHENG32_STATE_DEAD;
- }
- if (accelInfo.find(info.raw_id(state.impl_id)) != accelInfo.end()) {
- s |= SHENG32_STATE_ACCEL;
- }
- return s;
-}
-
-template <>
-u8 getShengState<sheng64>(dstate &state, dfa_info &info,
- UNUSED map<dstate_id_t, AccelScheme> &accelInfo) {
- u8 s = state.impl_id;
- if (!state.reports.empty()) {
- s |= SHENG64_STATE_ACCEPT;
- }
- if (info.isDead(state)) {
- s |= SHENG64_STATE_DEAD;
- }
- return s;
-}
-
-template <typename T>
+template <>
+u8 getShengState<sheng32>(dstate &state, dfa_info &info,
+ map<dstate_id_t, AccelScheme> &accelInfo) {
+ u8 s = state.impl_id;
+ if (!state.reports.empty()) {
+ s |= SHENG32_STATE_ACCEPT;
+ }
+ if (info.isDead(state)) {
+ s |= SHENG32_STATE_DEAD;
+ }
+ if (accelInfo.find(info.raw_id(state.impl_id)) != accelInfo.end()) {
+ s |= SHENG32_STATE_ACCEL;
+ }
+ return s;
+}
+
+template <>
+u8 getShengState<sheng64>(dstate &state, dfa_info &info,
+ UNUSED map<dstate_id_t, AccelScheme> &accelInfo) {
+ u8 s = state.impl_id;
+ if (!state.reports.empty()) {
+ s |= SHENG64_STATE_ACCEPT;
+ }
+ if (info.isDead(state)) {
+ s |= SHENG64_STATE_DEAD;
+ }
+ return s;
+}
+
+template <typename T>
static
void fillAccelAux(struct NFA *n, dfa_info &info,
map<dstate_id_t, AccelScheme> &accelInfo) {
DEBUG_PRINTF("Filling accel aux structures\n");
- T *s = (T *)getMutableImplNfa(n);
+ T *s = (T *)getMutableImplNfa(n);
u32 offset = s->accel_offset;
for (dstate_id_t i = 0; i < info.size(); i++) {
@@ -408,21 +408,21 @@ void fillAccelAux(struct NFA *n, dfa_info &info,
}
}
-template <typename T>
+template <typename T>
static
-void populateBasicInfo(UNUSED struct NFA *n, UNUSED dfa_info &info,
- UNUSED map<dstate_id_t, AccelScheme> &accelInfo,
- UNUSED u32 aux_offset, UNUSED u32 report_offset,
- UNUSED u32 accel_offset, UNUSED u32 total_size,
- UNUSED u32 dfa_size) {
-}
-
-template <>
-void populateBasicInfo<sheng>(struct NFA *n, dfa_info &info,
- map<dstate_id_t, AccelScheme> &accelInfo,
- u32 aux_offset, u32 report_offset,
- u32 accel_offset, u32 total_size,
- u32 dfa_size) {
+void populateBasicInfo(UNUSED struct NFA *n, UNUSED dfa_info &info,
+ UNUSED map<dstate_id_t, AccelScheme> &accelInfo,
+ UNUSED u32 aux_offset, UNUSED u32 report_offset,
+ UNUSED u32 accel_offset, UNUSED u32 total_size,
+ UNUSED u32 dfa_size) {
+}
+
+template <>
+void populateBasicInfo<sheng>(struct NFA *n, dfa_info &info,
+ map<dstate_id_t, AccelScheme> &accelInfo,
+ u32 aux_offset, u32 report_offset,
+ u32 accel_offset, u32 total_size,
+ u32 dfa_size) {
n->length = total_size;
n->scratchStateSize = 1;
n->streamStateSize = 1;
@@ -438,65 +438,65 @@ void populateBasicInfo<sheng>(struct NFA *n, dfa_info &info,
s->length = dfa_size;
s->flags |= info.can_die ? SHENG_FLAG_CAN_DIE : 0;
- s->anchored = getShengState<sheng>(info.anchored, info, accelInfo);
- s->floating = getShengState<sheng>(info.floating, info, accelInfo);
-}
-
-template <>
-void populateBasicInfo<sheng32>(struct NFA *n, dfa_info &info,
- map<dstate_id_t, AccelScheme> &accelInfo,
- u32 aux_offset, u32 report_offset,
- u32 accel_offset, u32 total_size,
- u32 dfa_size) {
- n->length = total_size;
- n->scratchStateSize = 1;
- n->streamStateSize = 1;
- n->nPositions = info.size();
- n->type = SHENG_NFA_32;
- n->flags |= info.raw.hasEodReports() ? NFA_ACCEPTS_EOD : 0;
-
- sheng32 *s = (sheng32 *)getMutableImplNfa(n);
- s->aux_offset = aux_offset;
- s->report_offset = report_offset;
- s->accel_offset = accel_offset;
- s->n_states = info.size();
- s->length = dfa_size;
- s->flags |= info.can_die ? SHENG_FLAG_CAN_DIE : 0;
-
- s->anchored = getShengState<sheng32>(info.anchored, info, accelInfo);
- s->floating = getShengState<sheng32>(info.floating, info, accelInfo);
-}
-
-template <>
-void populateBasicInfo<sheng64>(struct NFA *n, dfa_info &info,
- map<dstate_id_t, AccelScheme> &accelInfo,
- u32 aux_offset, u32 report_offset,
- u32 accel_offset, u32 total_size,
- u32 dfa_size) {
- n->length = total_size;
- n->scratchStateSize = 1;
- n->streamStateSize = 1;
- n->nPositions = info.size();
- n->type = SHENG_NFA_64;
- n->flags |= info.raw.hasEodReports() ? NFA_ACCEPTS_EOD : 0;
-
- sheng64 *s = (sheng64 *)getMutableImplNfa(n);
- s->aux_offset = aux_offset;
- s->report_offset = report_offset;
- s->accel_offset = accel_offset;
- s->n_states = info.size();
- s->length = dfa_size;
- s->flags |= info.can_die ? SHENG_FLAG_CAN_DIE : 0;
-
- s->anchored = getShengState<sheng64>(info.anchored, info, accelInfo);
- s->floating = getShengState<sheng64>(info.floating, info, accelInfo);
-}
-
-template <typename T>
+ s->anchored = getShengState<sheng>(info.anchored, info, accelInfo);
+ s->floating = getShengState<sheng>(info.floating, info, accelInfo);
+}
+
+template <>
+void populateBasicInfo<sheng32>(struct NFA *n, dfa_info &info,
+ map<dstate_id_t, AccelScheme> &accelInfo,
+ u32 aux_offset, u32 report_offset,
+ u32 accel_offset, u32 total_size,
+ u32 dfa_size) {
+ n->length = total_size;
+ n->scratchStateSize = 1;
+ n->streamStateSize = 1;
+ n->nPositions = info.size();
+ n->type = SHENG_NFA_32;
+ n->flags |= info.raw.hasEodReports() ? NFA_ACCEPTS_EOD : 0;
+
+ sheng32 *s = (sheng32 *)getMutableImplNfa(n);
+ s->aux_offset = aux_offset;
+ s->report_offset = report_offset;
+ s->accel_offset = accel_offset;
+ s->n_states = info.size();
+ s->length = dfa_size;
+ s->flags |= info.can_die ? SHENG_FLAG_CAN_DIE : 0;
+
+ s->anchored = getShengState<sheng32>(info.anchored, info, accelInfo);
+ s->floating = getShengState<sheng32>(info.floating, info, accelInfo);
+}
+
+template <>
+void populateBasicInfo<sheng64>(struct NFA *n, dfa_info &info,
+ map<dstate_id_t, AccelScheme> &accelInfo,
+ u32 aux_offset, u32 report_offset,
+ u32 accel_offset, u32 total_size,
+ u32 dfa_size) {
+ n->length = total_size;
+ n->scratchStateSize = 1;
+ n->streamStateSize = 1;
+ n->nPositions = info.size();
+ n->type = SHENG_NFA_64;
+ n->flags |= info.raw.hasEodReports() ? NFA_ACCEPTS_EOD : 0;
+
+ sheng64 *s = (sheng64 *)getMutableImplNfa(n);
+ s->aux_offset = aux_offset;
+ s->report_offset = report_offset;
+ s->accel_offset = accel_offset;
+ s->n_states = info.size();
+ s->length = dfa_size;
+ s->flags |= info.can_die ? SHENG_FLAG_CAN_DIE : 0;
+
+ s->anchored = getShengState<sheng64>(info.anchored, info, accelInfo);
+ s->floating = getShengState<sheng64>(info.floating, info, accelInfo);
+}
+
+template <typename T>
static
void fillTops(NFA *n, dfa_info &info, dstate_id_t id,
map<dstate_id_t, AccelScheme> &accelInfo) {
- T *s = (T *)getMutableImplNfa(n);
+ T *s = (T *)getMutableImplNfa(n);
u32 aux_base = s->aux_offset;
DEBUG_PRINTF("Filling tops for state %u\n", id);
@@ -513,14 +513,14 @@ void fillTops(NFA *n, dfa_info &info, dstate_id_t id,
DEBUG_PRINTF("Top transition for state %u: %u\n", id, top_state.impl_id);
- aux->top = getShengState<T>(top_state, info, accelInfo);
+ aux->top = getShengState<T>(top_state, info, accelInfo);
}
-template <typename T>
+template <typename T>
static
void fillAux(NFA *n, dfa_info &info, dstate_id_t id, vector<u32> &reports,
vector<u32> &reports_eod, vector<u32> &report_offsets) {
- T *s = (T *)getMutableImplNfa(n);
+ T *s = (T *)getMutableImplNfa(n);
u32 aux_base = s->aux_offset;
auto raw_id = info.raw_id(id);
@@ -540,97 +540,97 @@ void fillAux(NFA *n, dfa_info &info, dstate_id_t id, vector<u32> &reports,
DEBUG_PRINTF("EOD report list offset: %u\n", aux->accept_eod);
}
-template <typename T>
+template <typename T>
static
void fillSingleReport(NFA *n, ReportID r_id) {
- T *s = (T *)getMutableImplNfa(n);
+ T *s = (T *)getMutableImplNfa(n);
DEBUG_PRINTF("Single report ID: %u\n", r_id);
s->report = r_id;
s->flags |= SHENG_FLAG_SINGLE_REPORT;
}
-template <typename T>
+template <typename T>
static
-bool createShuffleMasks(UNUSED T *s, UNUSED dfa_info &info,
- UNUSED map<dstate_id_t, AccelScheme> &accelInfo) {
- return true;
-}
-
-template <>
-bool createShuffleMasks<sheng>(sheng *s, dfa_info &info,
- map<dstate_id_t, AccelScheme> &accelInfo) {
+bool createShuffleMasks(UNUSED T *s, UNUSED dfa_info &info,
+ UNUSED map<dstate_id_t, AccelScheme> &accelInfo) {
+ return true;
+}
+
+template <>
+bool createShuffleMasks<sheng>(sheng *s, dfa_info &info,
+ map<dstate_id_t, AccelScheme> &accelInfo) {
for (u16 chr = 0; chr < 256; chr++) {
u8 buf[16] = {0};
for (dstate_id_t idx = 0; idx < info.size(); idx++) {
auto &succ_state = info.next(idx, chr);
- buf[idx] = getShengState<sheng>(succ_state, info, accelInfo);
+ buf[idx] = getShengState<sheng>(succ_state, info, accelInfo);
}
#ifdef DEBUG
dumpShuffleMask(chr, buf, sizeof(buf));
#endif
memcpy(&s->shuffle_masks[chr], buf, sizeof(m128));
}
- return true;
-}
-
-template <>
-bool createShuffleMasks<sheng32>(sheng32 *s, dfa_info &info,
- map<dstate_id_t, AccelScheme> &accelInfo) {
- for (u16 chr = 0; chr < 256; chr++) {
- u8 buf[64] = {0};
-
- assert(info.size() <= 32);
- for (dstate_id_t idx = 0; idx < info.size(); idx++) {
- auto &succ_state = info.next(idx, chr);
-
- buf[idx] = getShengState<sheng32>(succ_state, info, accelInfo);
- buf[32 + idx] = buf[idx];
- }
-#ifdef DEBUG
- dumpShuffleMask32(chr, buf, sizeof(buf));
-#endif
- memcpy(&s->succ_masks[chr], buf, sizeof(m512));
- }
- return true;
-}
-
-template <>
-bool createShuffleMasks<sheng64>(sheng64 *s, dfa_info &info,
- map<dstate_id_t, AccelScheme> &accelInfo) {
- for (u16 chr = 0; chr < 256; chr++) {
- u8 buf[64] = {0};
-
- assert(info.size() <= 64);
- for (dstate_id_t idx = 0; idx < info.size(); idx++) {
- auto &succ_state = info.next(idx, chr);
-
- if (accelInfo.find(info.raw_id(succ_state.impl_id))
- != accelInfo.end()) {
- return false;
- }
- buf[idx] = getShengState<sheng64>(succ_state, info, accelInfo);
- }
-#ifdef DEBUG
- dumpShuffleMask64(chr, buf, sizeof(buf));
-#endif
- memcpy(&s->succ_masks[chr], buf, sizeof(m512));
- }
- return true;
-}
-
-bool has_accel_sheng(const NFA *) {
- return true; /* consider the sheng region as accelerated */
-}
-
-template <typename T>
-static
-bytecode_ptr<NFA> shengCompile_int(raw_dfa &raw, const CompileContext &cc,
- set<dstate_id_t> *accel_states,
- sheng_build_strat &strat,
- dfa_info &info) {
+ return true;
+}
+
+template <>
+bool createShuffleMasks<sheng32>(sheng32 *s, dfa_info &info,
+ map<dstate_id_t, AccelScheme> &accelInfo) {
+ for (u16 chr = 0; chr < 256; chr++) {
+ u8 buf[64] = {0};
+
+ assert(info.size() <= 32);
+ for (dstate_id_t idx = 0; idx < info.size(); idx++) {
+ auto &succ_state = info.next(idx, chr);
+
+ buf[idx] = getShengState<sheng32>(succ_state, info, accelInfo);
+ buf[32 + idx] = buf[idx];
+ }
+#ifdef DEBUG
+ dumpShuffleMask32(chr, buf, sizeof(buf));
+#endif
+ memcpy(&s->succ_masks[chr], buf, sizeof(m512));
+ }
+ return true;
+}
+
+template <>
+bool createShuffleMasks<sheng64>(sheng64 *s, dfa_info &info,
+ map<dstate_id_t, AccelScheme> &accelInfo) {
+ for (u16 chr = 0; chr < 256; chr++) {
+ u8 buf[64] = {0};
+
+ assert(info.size() <= 64);
+ for (dstate_id_t idx = 0; idx < info.size(); idx++) {
+ auto &succ_state = info.next(idx, chr);
+
+ if (accelInfo.find(info.raw_id(succ_state.impl_id))
+ != accelInfo.end()) {
+ return false;
+ }
+ buf[idx] = getShengState<sheng64>(succ_state, info, accelInfo);
+ }
+#ifdef DEBUG
+ dumpShuffleMask64(chr, buf, sizeof(buf));
+#endif
+ memcpy(&s->succ_masks[chr], buf, sizeof(m512));
+ }
+ return true;
+}
+
+bool has_accel_sheng(const NFA *) {
+ return true; /* consider the sheng region as accelerated */
+}
+
+template <typename T>
+static
+bytecode_ptr<NFA> shengCompile_int(raw_dfa &raw, const CompileContext &cc,
+ set<dstate_id_t> *accel_states,
+ sheng_build_strat &strat,
+ dfa_info &info) {
if (!cc.streaming) { /* TODO: work out if we can do the strip in streaming
* mode with our semantics */
raw.stripExtraEodReports();
@@ -645,7 +645,7 @@ bytecode_ptr<NFA> shengCompile_int(raw_dfa &raw, const CompileContext &cc,
DEBUG_PRINTF("Anchored start state: %u, floating start state: %u\n",
info.anchored.impl_id, info.floating.impl_id);
- u32 nfa_size = ROUNDUP_16(sizeof(NFA) + sizeof(T));
+ u32 nfa_size = ROUNDUP_16(sizeof(NFA) + sizeof(T));
vector<u32> reports, eod_reports, report_offsets;
u8 isSingle = 0;
ReportID single_report = 0;
@@ -667,129 +667,129 @@ bytecode_ptr<NFA> shengCompile_int(raw_dfa &raw, const CompileContext &cc,
auto nfa = make_zeroed_bytecode_ptr<NFA>(total_size);
- populateBasicInfo<T>(nfa.get(), info, accelInfo, nfa_size,
- reports_offset, accel_offset, total_size,
- total_size - sizeof(NFA));
+ populateBasicInfo<T>(nfa.get(), info, accelInfo, nfa_size,
+ reports_offset, accel_offset, total_size,
+ total_size - sizeof(NFA));
DEBUG_PRINTF("Setting up aux and report structures\n");
ri->fillReportLists(nfa.get(), reports_offset, report_offsets);
for (dstate_id_t idx = 0; idx < info.size(); idx++) {
- fillTops<T>(nfa.get(), info, idx, accelInfo);
- fillAux<T>(nfa.get(), info, idx, reports, eod_reports,
- report_offsets);
+ fillTops<T>(nfa.get(), info, idx, accelInfo);
+ fillAux<T>(nfa.get(), info, idx, reports, eod_reports,
+ report_offsets);
}
if (isSingle) {
- fillSingleReport<T>(nfa.get(), single_report);
+ fillSingleReport<T>(nfa.get(), single_report);
}
- fillAccelAux<T>(nfa.get(), info, accelInfo);
+ fillAccelAux<T>(nfa.get(), info, accelInfo);
if (accel_states) {
fillAccelOut(accelInfo, accel_states);
}
- if (!createShuffleMasks<T>((T *)getMutableImplNfa(nfa.get()), info, accelInfo)) {
- return nullptr;
- }
+ if (!createShuffleMasks<T>((T *)getMutableImplNfa(nfa.get()), info, accelInfo)) {
+ return nullptr;
+ }
+
+ return nfa;
+}
+
+bytecode_ptr<NFA> shengCompile(raw_dfa &raw, const CompileContext &cc,
+ const ReportManager &rm, bool only_accel_init,
+ set<dstate_id_t> *accel_states) {
+ if (!cc.grey.allowSheng) {
+ DEBUG_PRINTF("Sheng is not allowed!\n");
+ return nullptr;
+ }
+
+ sheng_build_strat strat(raw, rm, only_accel_init);
+ dfa_info info(strat);
+
+ DEBUG_PRINTF("Trying to compile a %zu state Sheng\n", raw.states.size());
+
+ DEBUG_PRINTF("Anchored start state id: %u, floating start state id: %u\n",
+ raw.start_anchored, raw.start_floating);
+
+ DEBUG_PRINTF("This DFA %s die so effective number of states is %zu\n",
+ info.can_die ? "can" : "cannot", info.size());
+ if (info.size() > 16) {
+ DEBUG_PRINTF("Too many states\n");
+ return nullptr;
+ }
+
+ return shengCompile_int<sheng>(raw, cc, accel_states, strat, info);
+}
+
+bytecode_ptr<NFA> sheng32Compile(raw_dfa &raw, const CompileContext &cc,
+ const ReportManager &rm, bool only_accel_init,
+ set<dstate_id_t> *accel_states) {
+ if (!cc.grey.allowSheng) {
+ DEBUG_PRINTF("Sheng is not allowed!\n");
+ return nullptr;
+ }
+
+ if (!cc.target_info.has_avx512vbmi()) {
+ DEBUG_PRINTF("Sheng32 failed, no HS_CPU_FEATURES_AVX512VBMI!\n");
+ return nullptr;
+ }
+ sheng_build_strat strat(raw, rm, only_accel_init);
+ dfa_info info(strat);
+
+ DEBUG_PRINTF("Trying to compile a %zu state Sheng\n", raw.states.size());
+
+ DEBUG_PRINTF("Anchored start state id: %u, floating start state id: %u\n",
+ raw.start_anchored, raw.start_floating);
+
+ DEBUG_PRINTF("This DFA %s die so effective number of states is %zu\n",
+ info.can_die ? "can" : "cannot", info.size());
+ assert(info.size() > 16);
+ if (info.size() > 32) {
+ DEBUG_PRINTF("Too many states\n");
+ return nullptr;
+ }
+
+ return shengCompile_int<sheng32>(raw, cc, accel_states, strat, info);
+}
+
+bytecode_ptr<NFA> sheng64Compile(raw_dfa &raw, const CompileContext &cc,
+ const ReportManager &rm, bool only_accel_init,
+ set<dstate_id_t> *accel_states) {
+ if (!cc.grey.allowSheng) {
+ DEBUG_PRINTF("Sheng is not allowed!\n");
+ return nullptr;
+ }
+
+ if (!cc.target_info.has_avx512vbmi()) {
+ DEBUG_PRINTF("Sheng64 failed, no HS_CPU_FEATURES_AVX512VBMI!\n");
+ return nullptr;
+ }
+
+ sheng_build_strat strat(raw, rm, only_accel_init);
+ dfa_info info(strat);
+
+ DEBUG_PRINTF("Trying to compile a %zu state Sheng\n", raw.states.size());
+
+ DEBUG_PRINTF("Anchored start state id: %u, floating start state id: %u\n",
+ raw.start_anchored, raw.start_floating);
+
+ DEBUG_PRINTF("This DFA %s die so effective number of states is %zu\n",
+ info.can_die ? "can" : "cannot", info.size());
+ assert(info.size() > 32);
+ if (info.size() > 64) {
+ DEBUG_PRINTF("Too many states\n");
+ return nullptr;
+ }
+ vector<dstate> old_states;
+ old_states = info.states;
+ auto nfa = shengCompile_int<sheng64>(raw, cc, accel_states, strat, info);
+ if (!nfa) {
+ info.states = old_states;
+ }
return nfa;
}
-bytecode_ptr<NFA> shengCompile(raw_dfa &raw, const CompileContext &cc,
- const ReportManager &rm, bool only_accel_init,
- set<dstate_id_t> *accel_states) {
- if (!cc.grey.allowSheng) {
- DEBUG_PRINTF("Sheng is not allowed!\n");
- return nullptr;
- }
-
- sheng_build_strat strat(raw, rm, only_accel_init);
- dfa_info info(strat);
-
- DEBUG_PRINTF("Trying to compile a %zu state Sheng\n", raw.states.size());
-
- DEBUG_PRINTF("Anchored start state id: %u, floating start state id: %u\n",
- raw.start_anchored, raw.start_floating);
-
- DEBUG_PRINTF("This DFA %s die so effective number of states is %zu\n",
- info.can_die ? "can" : "cannot", info.size());
- if (info.size() > 16) {
- DEBUG_PRINTF("Too many states\n");
- return nullptr;
- }
-
- return shengCompile_int<sheng>(raw, cc, accel_states, strat, info);
-}
-
-bytecode_ptr<NFA> sheng32Compile(raw_dfa &raw, const CompileContext &cc,
- const ReportManager &rm, bool only_accel_init,
- set<dstate_id_t> *accel_states) {
- if (!cc.grey.allowSheng) {
- DEBUG_PRINTF("Sheng is not allowed!\n");
- return nullptr;
- }
-
- if (!cc.target_info.has_avx512vbmi()) {
- DEBUG_PRINTF("Sheng32 failed, no HS_CPU_FEATURES_AVX512VBMI!\n");
- return nullptr;
- }
-
- sheng_build_strat strat(raw, rm, only_accel_init);
- dfa_info info(strat);
-
- DEBUG_PRINTF("Trying to compile a %zu state Sheng\n", raw.states.size());
-
- DEBUG_PRINTF("Anchored start state id: %u, floating start state id: %u\n",
- raw.start_anchored, raw.start_floating);
-
- DEBUG_PRINTF("This DFA %s die so effective number of states is %zu\n",
- info.can_die ? "can" : "cannot", info.size());
- assert(info.size() > 16);
- if (info.size() > 32) {
- DEBUG_PRINTF("Too many states\n");
- return nullptr;
- }
-
- return shengCompile_int<sheng32>(raw, cc, accel_states, strat, info);
-}
-
-bytecode_ptr<NFA> sheng64Compile(raw_dfa &raw, const CompileContext &cc,
- const ReportManager &rm, bool only_accel_init,
- set<dstate_id_t> *accel_states) {
- if (!cc.grey.allowSheng) {
- DEBUG_PRINTF("Sheng is not allowed!\n");
- return nullptr;
- }
-
- if (!cc.target_info.has_avx512vbmi()) {
- DEBUG_PRINTF("Sheng64 failed, no HS_CPU_FEATURES_AVX512VBMI!\n");
- return nullptr;
- }
-
- sheng_build_strat strat(raw, rm, only_accel_init);
- dfa_info info(strat);
-
- DEBUG_PRINTF("Trying to compile a %zu state Sheng\n", raw.states.size());
-
- DEBUG_PRINTF("Anchored start state id: %u, floating start state id: %u\n",
- raw.start_anchored, raw.start_floating);
-
- DEBUG_PRINTF("This DFA %s die so effective number of states is %zu\n",
- info.can_die ? "can" : "cannot", info.size());
- assert(info.size() > 32);
- if (info.size() > 64) {
- DEBUG_PRINTF("Too many states\n");
- return nullptr;
- }
- vector<dstate> old_states;
- old_states = info.states;
- auto nfa = shengCompile_int<sheng64>(raw, cc, accel_states, strat, info);
- if (!nfa) {
- info.states = old_states;
- }
- return nfa;
-}
-
} // namespace ue2
diff --git a/contrib/libs/hyperscan/src/nfa/shengcompile.h b/contrib/libs/hyperscan/src/nfa/shengcompile.h
index 175bf6a86f..256f4a4e50 100644
--- a/contrib/libs/hyperscan/src/nfa/shengcompile.h
+++ b/contrib/libs/hyperscan/src/nfa/shengcompile.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Intel Corporation
+ * Copyright (c) 2016-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -61,7 +61,7 @@ public:
u32 max_allowed_offset_accel() const override;
u32 max_stop_char() const override;
u32 max_floating_stop_char() const override;
- DfaType getType() const override { return Sheng; }
+ DfaType getType() const override { return Sheng; }
private:
raw_dfa &rdfa;
@@ -71,14 +71,14 @@ bytecode_ptr<NFA> shengCompile(raw_dfa &raw, const CompileContext &cc,
const ReportManager &rm, bool only_accel_init,
std::set<dstate_id_t> *accel_states = nullptr);
-bytecode_ptr<NFA> sheng32Compile(raw_dfa &raw, const CompileContext &cc,
- const ReportManager &rm, bool only_accel_init,
- std::set<dstate_id_t> *accel_states = nullptr);
-
-bytecode_ptr<NFA> sheng64Compile(raw_dfa &raw, const CompileContext &cc,
- const ReportManager &rm, bool only_accel_init,
- std::set<dstate_id_t> *accel_states = nullptr);
-
+bytecode_ptr<NFA> sheng32Compile(raw_dfa &raw, const CompileContext &cc,
+ const ReportManager &rm, bool only_accel_init,
+ std::set<dstate_id_t> *accel_states = nullptr);
+
+bytecode_ptr<NFA> sheng64Compile(raw_dfa &raw, const CompileContext &cc,
+ const ReportManager &rm, bool only_accel_init,
+ std::set<dstate_id_t> *accel_states = nullptr);
+
struct sheng_escape_info {
CharReach outs;
CharReach outs2_single;
diff --git a/contrib/libs/hyperscan/src/nfa/vermicelli.h b/contrib/libs/hyperscan/src/nfa/vermicelli.h
index 7b35deb8d6..ed797d83f9 100644
--- a/contrib/libs/hyperscan/src/nfa/vermicelli.h
+++ b/contrib/libs/hyperscan/src/nfa/vermicelli.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -46,20 +46,20 @@ const u8 *vermicelliExec(char c, char nocase, const u8 *buf,
nocase ? "nocase " : "", c, (size_t)(buf_end - buf));
assert(buf < buf_end);
- VERM_TYPE chars = VERM_SET_FN(c); /* nocase already uppercase */
-
+ VERM_TYPE chars = VERM_SET_FN(c); /* nocase already uppercase */
+
// Handle small scans.
-#ifdef HAVE_AVX512
- if (buf_end - buf <= VERM_BOUNDARY) {
- const u8 *ptr = nocase
- ? vermMiniNocase(chars, buf, buf_end, 0)
- : vermMini(chars, buf, buf_end, 0);
- if (ptr) {
- return ptr;
- }
- return buf_end;
- }
-#else
+#ifdef HAVE_AVX512
+ if (buf_end - buf <= VERM_BOUNDARY) {
+ const u8 *ptr = nocase
+ ? vermMiniNocase(chars, buf, buf_end, 0)
+ : vermMini(chars, buf, buf_end, 0);
+ if (ptr) {
+ return ptr;
+ }
+ return buf_end;
+ }
+#else
if (buf_end - buf < VERM_BOUNDARY) {
for (; buf < buf_end; buf++) {
char cur = (char)*buf;
@@ -72,7 +72,7 @@ const u8 *vermicelliExec(char c, char nocase, const u8 *buf,
}
return buf;
}
-#endif
+#endif
uintptr_t min = (uintptr_t)buf % VERM_BOUNDARY;
if (min) {
@@ -112,20 +112,20 @@ const u8 *nvermicelliExec(char c, char nocase, const u8 *buf,
nocase ? "nocase " : "", c, (size_t)(buf_end - buf));
assert(buf < buf_end);
- VERM_TYPE chars = VERM_SET_FN(c); /* nocase already uppercase */
-
+ VERM_TYPE chars = VERM_SET_FN(c); /* nocase already uppercase */
+
// Handle small scans.
-#ifdef HAVE_AVX512
- if (buf_end - buf <= VERM_BOUNDARY) {
- const u8 *ptr = nocase
- ? vermMiniNocase(chars, buf, buf_end, 1)
- : vermMini(chars, buf, buf_end, 1);
- if (ptr) {
- return ptr;
- }
- return buf_end;
- }
-#else
+#ifdef HAVE_AVX512
+ if (buf_end - buf <= VERM_BOUNDARY) {
+ const u8 *ptr = nocase
+ ? vermMiniNocase(chars, buf, buf_end, 1)
+ : vermMini(chars, buf, buf_end, 1);
+ if (ptr) {
+ return ptr;
+ }
+ return buf_end;
+ }
+#else
if (buf_end - buf < VERM_BOUNDARY) {
for (; buf < buf_end; buf++) {
char cur = (char)*buf;
@@ -138,7 +138,7 @@ const u8 *nvermicelliExec(char c, char nocase, const u8 *buf,
}
return buf;
}
-#endif
+#endif
size_t min = (size_t)buf % VERM_BOUNDARY;
if (min) {
@@ -179,28 +179,28 @@ const u8 *vermicelliDoubleExec(char c1, char c2, char nocase, const u8 *buf,
VERM_TYPE chars1 = VERM_SET_FN(c1); /* nocase already uppercase */
VERM_TYPE chars2 = VERM_SET_FN(c2); /* nocase already uppercase */
-#ifdef HAVE_AVX512
- if (buf_end - buf <= VERM_BOUNDARY) {
- const u8 *ptr = nocase
- ? dvermMiniNocase(chars1, chars2, buf, buf_end)
- : dvermMini(chars1, chars2, buf, buf_end);
- if (ptr) {
- return ptr;
- }
-
- /* check for partial match at end */
- u8 mask = nocase ? CASE_CLEAR : 0xff;
- if ((buf_end[-1] & mask) == (u8)c1) {
- DEBUG_PRINTF("partial!!!\n");
- return buf_end - 1;
- }
-
- return buf_end;
- }
-#endif
-
- assert((buf_end - buf) >= VERM_BOUNDARY);
- uintptr_t min = (uintptr_t)buf % VERM_BOUNDARY;
+#ifdef HAVE_AVX512
+ if (buf_end - buf <= VERM_BOUNDARY) {
+ const u8 *ptr = nocase
+ ? dvermMiniNocase(chars1, chars2, buf, buf_end)
+ : dvermMini(chars1, chars2, buf, buf_end);
+ if (ptr) {
+ return ptr;
+ }
+
+ /* check for partial match at end */
+ u8 mask = nocase ? CASE_CLEAR : 0xff;
+ if ((buf_end[-1] & mask) == (u8)c1) {
+ DEBUG_PRINTF("partial!!!\n");
+ return buf_end - 1;
+ }
+
+ return buf_end;
+ }
+#endif
+
+ assert((buf_end - buf) >= VERM_BOUNDARY);
+ uintptr_t min = (uintptr_t)buf % VERM_BOUNDARY;
if (min) {
// Input isn't aligned, so we need to run one iteration with an
// unaligned load, then skip buf forward to the next aligned address.
@@ -257,26 +257,26 @@ const u8 *vermicelliDoubleMaskedExec(char c1, char c2, char m1, char m2,
VERM_TYPE mask1 = VERM_SET_FN(m1);
VERM_TYPE mask2 = VERM_SET_FN(m2);
-#ifdef HAVE_AVX512
- if (buf_end - buf <= VERM_BOUNDARY) {
- const u8 *ptr = dvermMiniMasked(chars1, chars2, mask1, mask2, buf,
- buf_end);
- if (ptr) {
- return ptr;
- }
-
- /* check for partial match at end */
- if ((buf_end[-1] & m1) == (u8)c1) {
- DEBUG_PRINTF("partial!!!\n");
- return buf_end - 1;
- }
-
- return buf_end;
- }
-#endif
-
- assert((buf_end - buf) >= VERM_BOUNDARY);
- uintptr_t min = (uintptr_t)buf % VERM_BOUNDARY;
+#ifdef HAVE_AVX512
+ if (buf_end - buf <= VERM_BOUNDARY) {
+ const u8 *ptr = dvermMiniMasked(chars1, chars2, mask1, mask2, buf,
+ buf_end);
+ if (ptr) {
+ return ptr;
+ }
+
+ /* check for partial match at end */
+ if ((buf_end[-1] & m1) == (u8)c1) {
+ DEBUG_PRINTF("partial!!!\n");
+ return buf_end - 1;
+ }
+
+ return buf_end;
+ }
+#endif
+
+ assert((buf_end - buf) >= VERM_BOUNDARY);
+ uintptr_t min = (uintptr_t)buf % VERM_BOUNDARY;
if (min) {
// Input isn't aligned, so we need to run one iteration with an
// unaligned load, then skip buf forward to the next aligned address.
@@ -308,7 +308,7 @@ const u8 *vermicelliDoubleMaskedExec(char c1, char c2, char m1, char m2,
/* check for partial match at end */
if ((buf_end[-1] & m1) == (u8)c1) {
- DEBUG_PRINTF("partial!!!\n");
+ DEBUG_PRINTF("partial!!!\n");
return buf_end - 1;
}
@@ -324,20 +324,20 @@ const u8 *rvermicelliExec(char c, char nocase, const u8 *buf,
nocase ? "nocase " : "", c, (size_t)(buf_end - buf));
assert(buf < buf_end);
- VERM_TYPE chars = VERM_SET_FN(c); /* nocase already uppercase */
-
+ VERM_TYPE chars = VERM_SET_FN(c); /* nocase already uppercase */
+
// Handle small scans.
-#ifdef HAVE_AVX512
- if (buf_end - buf <= VERM_BOUNDARY) {
- const u8 *ptr = nocase
- ? rvermMiniNocase(chars, buf, buf_end, 0)
- : rvermMini(chars, buf, buf_end, 0);
- if (ptr) {
- return ptr;
- }
- return buf - 1;
- }
-#else
+#ifdef HAVE_AVX512
+ if (buf_end - buf <= VERM_BOUNDARY) {
+ const u8 *ptr = nocase
+ ? rvermMiniNocase(chars, buf, buf_end, 0)
+ : rvermMini(chars, buf, buf_end, 0);
+ if (ptr) {
+ return ptr;
+ }
+ return buf - 1;
+ }
+#else
if (buf_end - buf < VERM_BOUNDARY) {
for (buf_end--; buf_end >= buf; buf_end--) {
char cur = (char)*buf_end;
@@ -350,7 +350,7 @@ const u8 *rvermicelliExec(char c, char nocase, const u8 *buf,
}
return buf_end;
}
-#endif
+#endif
size_t min = (size_t)buf_end % VERM_BOUNDARY;
if (min) {
@@ -358,14 +358,14 @@ const u8 *rvermicelliExec(char c, char nocase, const u8 *buf,
// unaligned load, then skip buf backward to the next aligned address.
// There's some small overlap here, but we don't mind scanning it twice
// if we can do it quickly, do we?
- const u8 *ptr = nocase ? rvermUnalignNocase(chars,
- buf_end - VERM_BOUNDARY,
- 0)
- : rvermUnalign(chars, buf_end - VERM_BOUNDARY,
- 0);
-
- if (ptr) {
- return ptr;
+ const u8 *ptr = nocase ? rvermUnalignNocase(chars,
+ buf_end - VERM_BOUNDARY,
+ 0)
+ : rvermUnalign(chars, buf_end - VERM_BOUNDARY,
+ 0);
+
+ if (ptr) {
+ return ptr;
}
buf_end -= min;
@@ -396,20 +396,20 @@ const u8 *rnvermicelliExec(char c, char nocase, const u8 *buf,
nocase ? "nocase " : "", c, (size_t)(buf_end - buf));
assert(buf < buf_end);
- VERM_TYPE chars = VERM_SET_FN(c); /* nocase already uppercase */
-
+ VERM_TYPE chars = VERM_SET_FN(c); /* nocase already uppercase */
+
// Handle small scans.
-#ifdef HAVE_AVX512
- if (buf_end - buf <= VERM_BOUNDARY) {
- const u8 *ptr = nocase
- ? rvermMiniNocase(chars, buf, buf_end, 1)
- : rvermMini(chars, buf, buf_end, 1);
- if (ptr) {
- return ptr;
- }
- return buf - 1;
- }
-#else
+#ifdef HAVE_AVX512
+ if (buf_end - buf <= VERM_BOUNDARY) {
+ const u8 *ptr = nocase
+ ? rvermMiniNocase(chars, buf, buf_end, 1)
+ : rvermMini(chars, buf, buf_end, 1);
+ if (ptr) {
+ return ptr;
+ }
+ return buf - 1;
+ }
+#else
if (buf_end - buf < VERM_BOUNDARY) {
for (buf_end--; buf_end >= buf; buf_end--) {
char cur = (char)*buf_end;
@@ -422,7 +422,7 @@ const u8 *rnvermicelliExec(char c, char nocase, const u8 *buf,
}
return buf_end;
}
-#endif
+#endif
size_t min = (size_t)buf_end % VERM_BOUNDARY;
if (min) {
@@ -430,14 +430,14 @@ const u8 *rnvermicelliExec(char c, char nocase, const u8 *buf,
// unaligned load, then skip buf backward to the next aligned address.
// There's some small overlap here, but we don't mind scanning it twice
// if we can do it quickly, do we?
- const u8 *ptr = nocase ? rvermUnalignNocase(chars,
- buf_end - VERM_BOUNDARY,
- 1)
- : rvermUnalign(chars, buf_end - VERM_BOUNDARY,
- 1);
-
- if (ptr) {
- return ptr;
+ const u8 *ptr = nocase ? rvermUnalignNocase(chars,
+ buf_end - VERM_BOUNDARY,
+ 1)
+ : rvermUnalign(chars, buf_end - VERM_BOUNDARY,
+ 1);
+
+ if (ptr) {
+ return ptr;
}
buf_end -= min;
@@ -470,32 +470,32 @@ const u8 *rvermicelliDoubleExec(char c1, char c2, char nocase, const u8 *buf,
VERM_TYPE chars1 = VERM_SET_FN(c1); /* nocase already uppercase */
VERM_TYPE chars2 = VERM_SET_FN(c2); /* nocase already uppercase */
-#ifdef HAVE_AVX512
- if (buf_end - buf <= VERM_BOUNDARY) {
- const u8 *ptr = nocase
- ? rdvermMiniNocase(chars1, chars2, buf, buf_end)
- : rdvermMini(chars1, chars2, buf, buf_end);
-
- if (ptr) {
- return ptr;
- }
-
- // check for partial match at end ???
- return buf - 1;
- }
-#endif
-
- assert((buf_end - buf) >= VERM_BOUNDARY);
- size_t min = (size_t)buf_end % VERM_BOUNDARY;
+#ifdef HAVE_AVX512
+ if (buf_end - buf <= VERM_BOUNDARY) {
+ const u8 *ptr = nocase
+ ? rdvermMiniNocase(chars1, chars2, buf, buf_end)
+ : rdvermMini(chars1, chars2, buf, buf_end);
+
+ if (ptr) {
+ return ptr;
+ }
+
+ // check for partial match at end ???
+ return buf - 1;
+ }
+#endif
+
+ assert((buf_end - buf) >= VERM_BOUNDARY);
+ size_t min = (size_t)buf_end % VERM_BOUNDARY;
if (min) {
// input not aligned, so we need to run one iteration with an unaligned
// load, then skip buf forward to the next aligned address. There's
// some small overlap here, but we don't mind scanning it twice if we
// can do it quickly, do we?
- const u8 *ptr = nocase ? rdvermPreconditionNocase(chars1, chars2,
- buf_end - VERM_BOUNDARY)
- : rdvermPrecondition(chars1, chars2,
- buf_end - VERM_BOUNDARY);
+ const u8 *ptr = nocase ? rdvermPreconditionNocase(chars1, chars2,
+ buf_end - VERM_BOUNDARY)
+ : rdvermPrecondition(chars1, chars2,
+ buf_end - VERM_BOUNDARY);
if (ptr) {
return ptr;
diff --git a/contrib/libs/hyperscan/src/nfa/vermicelli_sse.h b/contrib/libs/hyperscan/src/nfa/vermicelli_sse.h
index 3ec28dbf77..3307486cff 100644
--- a/contrib/libs/hyperscan/src/nfa/vermicelli_sse.h
+++ b/contrib/libs/hyperscan/src/nfa/vermicelli_sse.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,8 +32,8 @@
* (users should include vermicelli.h)
*/
-#if !defined(HAVE_AVX512)
-
+#if !defined(HAVE_AVX512)
+
#define VERM_BOUNDARY 16
#define VERM_TYPE m128
#define VERM_SET_FN set16x8
@@ -393,497 +393,497 @@ const u8 *rdvermPreconditionNocase(m128 chars1, m128 chars2, const u8 *buf) {
return NULL;
}
-
-#else // HAVE_AVX512
-
-#define VERM_BOUNDARY 64
-#define VERM_TYPE m512
-#define VERM_SET_FN set64x8
-
-static really_inline
-const u8 *vermMini(m512 chars, const u8 *buf, const u8 *buf_end, char negate) {
- uintptr_t len = buf_end - buf;
- __mmask64 mask = (~0ULL) >> (64 - len);
- m512 data = loadu_maskz_m512(mask, buf);
-
- u64a z = eq512mask(chars, data);
-
- if (negate) {
- z = ~z & mask;
- }
- z &= mask;
- if (unlikely(z)) {
- return buf + ctz64(z);
- }
- return NULL;
-}
-
-static really_inline
-const u8 *vermMiniNocase(m512 chars, const u8 *buf, const u8 *buf_end,
- char negate) {
- uintptr_t len = buf_end - buf;
- __mmask64 mask = (~0ULL) >> (64 - len);
- m512 data = loadu_maskz_m512(mask, buf);
- m512 casemask = set64x8(CASE_CLEAR);
- m512 v = and512(casemask, data);
-
- u64a z = eq512mask(chars, v);
-
- if (negate) {
- z = ~z & mask;
- }
- z &= mask;
- if (unlikely(z)) {
- return buf + ctz64(z);
- }
- return NULL;
-}
-
-static really_inline
-const u8 *vermSearchAligned(m512 chars, const u8 *buf, const u8 *buf_end,
- char negate) {
- assert((size_t)buf % 64 == 0);
- for (; buf + 63 < buf_end; buf += 64) {
- m512 data = load512(buf);
- u64a z = eq512mask(chars, data);
- if (negate) {
- z = ~z & ~0ULL;
- }
- if (unlikely(z)) {
- u64a pos = ctz64(z);
- return buf + pos;
- }
- }
- return NULL;
-}
-
-static really_inline
-const u8 *vermSearchAlignedNocase(m512 chars, const u8 *buf,
- const u8 *buf_end, char negate) {
- assert((size_t)buf % 64 == 0);
- m512 casemask = set64x8(CASE_CLEAR);
-
- for (; buf + 63 < buf_end; buf += 64) {
- m512 data = load512(buf);
- u64a z = eq512mask(chars, and512(casemask, data));
- if (negate) {
- z = ~z & ~0ULL;
- }
- if (unlikely(z)) {
- u64a pos = ctz64(z);
- return buf + pos;
- }
- }
- return NULL;
-}
-
-// returns NULL if not found
-static really_inline
-const u8 *vermUnalign(m512 chars, const u8 *buf, char negate) {
- m512 data = loadu512(buf); // unaligned
- u64a z = eq512mask(chars, data);
- if (negate) {
- z = ~z & ~0ULL;
- }
- if (unlikely(z)) {
- return buf + ctz64(z);
- }
- return NULL;
-}
-
-// returns NULL if not found
-static really_inline
-const u8 *vermUnalignNocase(m512 chars, const u8 *buf, char negate) {
- m512 casemask = set64x8(CASE_CLEAR);
- m512 data = loadu512(buf); // unaligned
- u64a z = eq512mask(chars, and512(casemask, data));
- if (negate) {
- z = ~z & ~0ULL;
- }
- if (unlikely(z)) {
- return buf + ctz64(z);
- }
- return NULL;
-}
-
-static really_inline
-const u8 *dvermMini(m512 chars1, m512 chars2, const u8 *buf,
- const u8 *buf_end) {
- uintptr_t len = buf_end - buf;
- __mmask64 mask = (~0ULL) >> (64 - len);
- m512 data = loadu_maskz_m512(mask, buf);
-
- u64a z = eq512mask(chars1, data) & (eq512mask(chars2, data) >> 1);
-
- z &= mask;
- if (unlikely(z)) {
- u64a pos = ctz64(z);
- return buf + pos;
- }
- return NULL;
-}
-
-static really_inline
-const u8 *dvermMiniNocase(m512 chars1, m512 chars2, const u8 *buf,
- const u8 *buf_end) {
- uintptr_t len = buf_end - buf;
- __mmask64 mask = (~0ULL) >> (64 - len);
- m512 data = loadu_maskz_m512(mask, buf);
- m512 casemask = set64x8(CASE_CLEAR);
- m512 v = and512(casemask, data);
-
- u64a z = eq512mask(chars1, v) & (eq512mask(chars2, v) >> 1);
-
- z &= mask;
- if (unlikely(z)) {
- u64a pos = ctz64(z);
- return buf + pos;
- }
- return NULL;
-}
-
-static really_inline
-const u8 *dvermMiniMasked(m512 chars1, m512 chars2, m512 mask1, m512 mask2,
- const u8 *buf, const u8 *buf_end) {
- uintptr_t len = buf_end - buf;
- __mmask64 mask = (~0ULL) >> (64 - len);
- m512 data = loadu_maskz_m512(mask, buf);
- m512 v1 = and512(data, mask1);
- m512 v2 = and512(data, mask2);
-
- u64a z = eq512mask(chars1, v1) & (eq512mask(chars2, v2) >> 1);
-
- z &= mask;
- if (unlikely(z)) {
- u64a pos = ctz64(z);
- return buf + pos;
- }
- return NULL;
-}
-
-static really_inline
-const u8 *dvermSearchAligned(m512 chars1, m512 chars2, u8 c1, u8 c2,
- const u8 *buf, const u8 *buf_end) {
- for (; buf + 64 < buf_end; buf += 64) {
- m512 data = load512(buf);
- u64a z = eq512mask(chars1, data) & (eq512mask(chars2, data) >> 1);
- if (buf[63] == c1 && buf[64] == c2) {
- z |= (1ULL << 63);
- }
- if (unlikely(z)) {
- u64a pos = ctz64(z);
- return buf + pos;
- }
- }
-
- return NULL;
-}
-
-static really_inline
-const u8 *dvermSearchAlignedNocase(m512 chars1, m512 chars2, u8 c1, u8 c2,
- const u8 *buf, const u8 *buf_end) {
- assert((size_t)buf % 64 == 0);
- m512 casemask = set64x8(CASE_CLEAR);
-
- for (; buf + 64 < buf_end; buf += 64) {
- m512 data = load512(buf);
- m512 v = and512(casemask, data);
- u64a z = eq512mask(chars1, v) & (eq512mask(chars2, v) >> 1);
- if ((buf[63] & CASE_CLEAR) == c1 && (buf[64] & CASE_CLEAR) == c2) {
- z |= (1ULL << 63);
- }
- if (unlikely(z)) {
- u64a pos = ctz64(z);
- return buf + pos;
- }
- }
-
- return NULL;
-}
-
-static really_inline
-const u8 *dvermSearchAlignedMasked(m512 chars1, m512 chars2,
- m512 mask1, m512 mask2, u8 c1, u8 c2, u8 m1,
- u8 m2, const u8 *buf, const u8 *buf_end) {
- assert((size_t)buf % 64 == 0);
-
- for (; buf + 64 < buf_end; buf += 64) {
- m512 data = load512(buf);
- m512 v1 = and512(data, mask1);
- m512 v2 = and512(data, mask2);
- u64a z = eq512mask(chars1, v1) & (eq512mask(chars2, v2) >> 1);
-
- if ((buf[63] & m1) == c1 && (buf[64] & m2) == c2) {
- z |= (1ULL << 63);
- }
- if (unlikely(z)) {
- u64a pos = ctz64(z);
- return buf + pos;
- }
- }
-
- return NULL;
-}
-
-// returns NULL if not found
-static really_inline
-const u8 *dvermPrecondition(m512 chars1, m512 chars2, const u8 *buf) {
- m512 data = loadu512(buf); // unaligned
- u64a z = eq512mask(chars1, data) & (eq512mask(chars2, data) >> 1);
-
- /* no fixup of the boundary required - the aligned run will pick it up */
- if (unlikely(z)) {
- u64a pos = ctz64(z);
- return buf + pos;
- }
- return NULL;
-}
-
-// returns NULL if not found
-static really_inline
-const u8 *dvermPreconditionNocase(m512 chars1, m512 chars2, const u8 *buf) {
- /* due to laziness, nonalphas and nocase having interesting behaviour */
- m512 casemask = set64x8(CASE_CLEAR);
- m512 data = loadu512(buf); // unaligned
- m512 v = and512(casemask, data);
- u64a z = eq512mask(chars1, v) & (eq512mask(chars2, v) >> 1);
-
- /* no fixup of the boundary required - the aligned run will pick it up */
- if (unlikely(z)) {
- u64a pos = ctz64(z);
- return buf + pos;
- }
- return NULL;
-}
-
-// returns NULL if not found
-static really_inline
-const u8 *dvermPreconditionMasked(m512 chars1, m512 chars2,
- m512 mask1, m512 mask2, const u8 *buf) {
- m512 data = loadu512(buf); // unaligned
- m512 v1 = and512(data, mask1);
- m512 v2 = and512(data, mask2);
- u64a z = eq512mask(chars1, v1) & (eq512mask(chars2, v2) >> 1);
-
- /* no fixup of the boundary required - the aligned run will pick it up */
- if (unlikely(z)) {
- u64a pos = ctz64(z);
- return buf + pos;
- }
- return NULL;
-}
-
-static really_inline
-const u8 *lastMatchOffset(const u8 *buf_end, u64a z) {
- assert(z);
- return buf_end - 64 + 63 - clz64(z);
-}
-
-static really_inline
-const u8 *rvermMini(m512 chars, const u8 *buf, const u8 *buf_end, char negate) {
- uintptr_t len = buf_end - buf;
- __mmask64 mask = (~0ULL) >> (64 - len);
- m512 data = loadu_maskz_m512(mask, buf);
-
- u64a z = eq512mask(chars, data);
-
- if (negate) {
- z = ~z & mask;
- }
- z &= mask;
- if (unlikely(z)) {
- return lastMatchOffset(buf + 64, z);
- }
- return NULL;
-}
-
-static really_inline
-const u8 *rvermMiniNocase(m512 chars, const u8 *buf, const u8 *buf_end,
- char negate) {
- uintptr_t len = buf_end - buf;
- __mmask64 mask = (~0ULL) >> (64 - len);
- m512 data = loadu_maskz_m512(mask, buf);
- m512 casemask = set64x8(CASE_CLEAR);
- m512 v = and512(casemask, data);
-
- u64a z = eq512mask(chars, v);
-
- if (negate) {
- z = ~z & mask;
- }
- z &= mask;
- if (unlikely(z)) {
- return lastMatchOffset(buf + 64, z);
- }
- return NULL;
-}
-
-static really_inline
-const u8 *rvermSearchAligned(m512 chars, const u8 *buf, const u8 *buf_end,
- char negate) {
- assert((size_t)buf_end % 64 == 0);
- for (; buf + 63 < buf_end; buf_end -= 64) {
- m512 data = load512(buf_end - 64);
- u64a z = eq512mask(chars, data);
- if (negate) {
- z = ~z & ~0ULL;
- }
- if (unlikely(z)) {
- return lastMatchOffset(buf_end, z);
- }
- }
- return NULL;
-}
-
-static really_inline
-const u8 *rvermSearchAlignedNocase(m512 chars, const u8 *buf,
- const u8 *buf_end, char negate) {
- assert((size_t)buf_end % 64 == 0);
- m512 casemask = set64x8(CASE_CLEAR);
-
- for (; buf + 63 < buf_end; buf_end -= 64) {
- m512 data = load512(buf_end - 64);
- u64a z = eq512mask(chars, and512(casemask, data));
- if (negate) {
- z = ~z & ~0ULL;
- }
- if (unlikely(z)) {
- return lastMatchOffset(buf_end, z);
- }
- }
- return NULL;
-}
-
-// returns NULL if not found
-static really_inline
-const u8 *rvermUnalign(m512 chars, const u8 *buf, char negate) {
- m512 data = loadu512(buf); // unaligned
- u64a z = eq512mask(chars, data);
- if (negate) {
- z = ~z & ~0ULL;
- }
- if (unlikely(z)) {
- return lastMatchOffset(buf + 64, z);
- }
- return NULL;
-}
-
-// returns NULL if not found
-static really_inline
-const u8 *rvermUnalignNocase(m512 chars, const u8 *buf, char negate) {
- m512 casemask = set64x8(CASE_CLEAR);
- m512 data = loadu512(buf); // unaligned
- u64a z = eq512mask(chars, and512(casemask, data));
- if (negate) {
- z = ~z & ~0ULL;
- }
- if (unlikely(z)) {
- return lastMatchOffset(buf + 64, z);
- }
- return NULL;
-}
-
-static really_inline
-const u8 *rdvermMini(m512 chars1, m512 chars2, const u8 *buf,
- const u8 *buf_end) {
- uintptr_t len = buf_end - buf;
- __mmask64 mask = (~0ULL) >> (64 - len);
- m512 data = loadu_maskz_m512(mask, buf);
-
- u64a z = eq512mask(chars2, data) & (eq512mask(chars1, data) << 1);
-
- z &= mask;
- if (unlikely(z)) {
- return lastMatchOffset(buf + 64, z);
- }
- return NULL;
-}
-
-static really_inline
-const u8 *rdvermMiniNocase(m512 chars1, m512 chars2, const u8 *buf,
- const u8 *buf_end) {
- uintptr_t len = buf_end - buf;
- __mmask64 mask = (~0ULL) >> (64 - len);
- m512 data = loadu_maskz_m512(mask, buf);
- m512 casemask = set64x8(CASE_CLEAR);
- m512 v = and512(casemask, data);
-
- u64a z = eq512mask(chars2, v) & (eq512mask(chars1, v) << 1);
-
- z &= mask;
- if (unlikely(z)) {
- return lastMatchOffset(buf + 64, z);
- }
- return NULL;
-}
-
-static really_inline
-const u8 *rdvermSearchAligned(m512 chars1, m512 chars2, u8 c1, u8 c2,
- const u8 *buf, const u8 *buf_end) {
- assert((size_t)buf_end % 64 == 0);
-
- for (; buf + 64 < buf_end; buf_end -= 64) {
- m512 data = load512(buf_end - 64);
- u64a z = eq512mask(chars2, data) & (eq512mask(chars1, data) << 1);
- if (buf_end[-65] == c1 && buf_end[-64] == c2) {
- z |= 1;
- }
- if (unlikely(z)) {
- return lastMatchOffset(buf_end, z);
- }
- }
- return buf_end;
-}
-
-static really_inline
-const u8 *rdvermSearchAlignedNocase(m512 chars1, m512 chars2, u8 c1, u8 c2,
- const u8 *buf, const u8 *buf_end) {
- assert((size_t)buf_end % 64 == 0);
- m512 casemask = set64x8(CASE_CLEAR);
-
- for (; buf + 64 < buf_end; buf_end -= 64) {
- m512 data = load512(buf_end - 64);
- m512 v = and512(casemask, data);
- u64a z = eq512mask(chars2, v) & (eq512mask(chars1, v) << 1);
- if ((buf_end[-65] & CASE_CLEAR) == c1
- && (buf_end[-64] & CASE_CLEAR) == c2) {
- z |= 1;
- }
- if (unlikely(z)) {
- return lastMatchOffset(buf_end, z);
- }
- }
- return buf_end;
-}
-
-// returns NULL if not found
-static really_inline
-const u8 *rdvermPrecondition(m512 chars1, m512 chars2, const u8 *buf) {
- m512 data = loadu512(buf);
- u64a z = eq512mask(chars2, data) & (eq512mask(chars1, data) << 1);
-
- // no fixup of the boundary required - the aligned run will pick it up
- if (unlikely(z)) {
- return lastMatchOffset(buf + 64, z);
- }
-
- return NULL;
-}
-
-// returns NULL if not found
-static really_inline
-const u8 *rdvermPreconditionNocase(m512 chars1, m512 chars2, const u8 *buf) {
- // due to laziness, nonalphas and nocase having interesting behaviour
- m512 casemask = set64x8(CASE_CLEAR);
- m512 data = loadu512(buf);
- m512 v = and512(casemask, data);
- u64a z = eq512mask(chars2, v) & (eq512mask(chars1, v) << 1);
- // no fixup of the boundary required - the aligned run will pick it up
- if (unlikely(z)) {
- return lastMatchOffset(buf + 64, z);
- }
-
- return NULL;
-}
-
-#endif // HAVE_AVX512
+
+#else // HAVE_AVX512
+
+#define VERM_BOUNDARY 64
+#define VERM_TYPE m512
+#define VERM_SET_FN set64x8
+
+static really_inline
+const u8 *vermMini(m512 chars, const u8 *buf, const u8 *buf_end, char negate) {
+ uintptr_t len = buf_end - buf;
+ __mmask64 mask = (~0ULL) >> (64 - len);
+ m512 data = loadu_maskz_m512(mask, buf);
+
+ u64a z = eq512mask(chars, data);
+
+ if (negate) {
+ z = ~z & mask;
+ }
+ z &= mask;
+ if (unlikely(z)) {
+ return buf + ctz64(z);
+ }
+ return NULL;
+}
+
+static really_inline
+const u8 *vermMiniNocase(m512 chars, const u8 *buf, const u8 *buf_end,
+ char negate) {
+ uintptr_t len = buf_end - buf;
+ __mmask64 mask = (~0ULL) >> (64 - len);
+ m512 data = loadu_maskz_m512(mask, buf);
+ m512 casemask = set64x8(CASE_CLEAR);
+ m512 v = and512(casemask, data);
+
+ u64a z = eq512mask(chars, v);
+
+ if (negate) {
+ z = ~z & mask;
+ }
+ z &= mask;
+ if (unlikely(z)) {
+ return buf + ctz64(z);
+ }
+ return NULL;
+}
+
+static really_inline
+const u8 *vermSearchAligned(m512 chars, const u8 *buf, const u8 *buf_end,
+ char negate) {
+ assert((size_t)buf % 64 == 0);
+ for (; buf + 63 < buf_end; buf += 64) {
+ m512 data = load512(buf);
+ u64a z = eq512mask(chars, data);
+ if (negate) {
+ z = ~z & ~0ULL;
+ }
+ if (unlikely(z)) {
+ u64a pos = ctz64(z);
+ return buf + pos;
+ }
+ }
+ return NULL;
+}
+
+static really_inline
+const u8 *vermSearchAlignedNocase(m512 chars, const u8 *buf,
+ const u8 *buf_end, char negate) {
+ assert((size_t)buf % 64 == 0);
+ m512 casemask = set64x8(CASE_CLEAR);
+
+ for (; buf + 63 < buf_end; buf += 64) {
+ m512 data = load512(buf);
+ u64a z = eq512mask(chars, and512(casemask, data));
+ if (negate) {
+ z = ~z & ~0ULL;
+ }
+ if (unlikely(z)) {
+ u64a pos = ctz64(z);
+ return buf + pos;
+ }
+ }
+ return NULL;
+}
+
+// returns NULL if not found
+static really_inline
+const u8 *vermUnalign(m512 chars, const u8 *buf, char negate) {
+ m512 data = loadu512(buf); // unaligned
+ u64a z = eq512mask(chars, data);
+ if (negate) {
+ z = ~z & ~0ULL;
+ }
+ if (unlikely(z)) {
+ return buf + ctz64(z);
+ }
+ return NULL;
+}
+
+// returns NULL if not found
+static really_inline
+const u8 *vermUnalignNocase(m512 chars, const u8 *buf, char negate) {
+ m512 casemask = set64x8(CASE_CLEAR);
+ m512 data = loadu512(buf); // unaligned
+ u64a z = eq512mask(chars, and512(casemask, data));
+ if (negate) {
+ z = ~z & ~0ULL;
+ }
+ if (unlikely(z)) {
+ return buf + ctz64(z);
+ }
+ return NULL;
+}
+
+static really_inline
+const u8 *dvermMini(m512 chars1, m512 chars2, const u8 *buf,
+ const u8 *buf_end) {
+ uintptr_t len = buf_end - buf;
+ __mmask64 mask = (~0ULL) >> (64 - len);
+ m512 data = loadu_maskz_m512(mask, buf);
+
+ u64a z = eq512mask(chars1, data) & (eq512mask(chars2, data) >> 1);
+
+ z &= mask;
+ if (unlikely(z)) {
+ u64a pos = ctz64(z);
+ return buf + pos;
+ }
+ return NULL;
+}
+
+static really_inline
+const u8 *dvermMiniNocase(m512 chars1, m512 chars2, const u8 *buf,
+ const u8 *buf_end) {
+ uintptr_t len = buf_end - buf;
+ __mmask64 mask = (~0ULL) >> (64 - len);
+ m512 data = loadu_maskz_m512(mask, buf);
+ m512 casemask = set64x8(CASE_CLEAR);
+ m512 v = and512(casemask, data);
+
+ u64a z = eq512mask(chars1, v) & (eq512mask(chars2, v) >> 1);
+
+ z &= mask;
+ if (unlikely(z)) {
+ u64a pos = ctz64(z);
+ return buf + pos;
+ }
+ return NULL;
+}
+
+static really_inline
+const u8 *dvermMiniMasked(m512 chars1, m512 chars2, m512 mask1, m512 mask2,
+ const u8 *buf, const u8 *buf_end) {
+ uintptr_t len = buf_end - buf;
+ __mmask64 mask = (~0ULL) >> (64 - len);
+ m512 data = loadu_maskz_m512(mask, buf);
+ m512 v1 = and512(data, mask1);
+ m512 v2 = and512(data, mask2);
+
+ u64a z = eq512mask(chars1, v1) & (eq512mask(chars2, v2) >> 1);
+
+ z &= mask;
+ if (unlikely(z)) {
+ u64a pos = ctz64(z);
+ return buf + pos;
+ }
+ return NULL;
+}
+
+static really_inline
+const u8 *dvermSearchAligned(m512 chars1, m512 chars2, u8 c1, u8 c2,
+ const u8 *buf, const u8 *buf_end) {
+ for (; buf + 64 < buf_end; buf += 64) {
+ m512 data = load512(buf);
+ u64a z = eq512mask(chars1, data) & (eq512mask(chars2, data) >> 1);
+ if (buf[63] == c1 && buf[64] == c2) {
+ z |= (1ULL << 63);
+ }
+ if (unlikely(z)) {
+ u64a pos = ctz64(z);
+ return buf + pos;
+ }
+ }
+
+ return NULL;
+}
+
+static really_inline
+const u8 *dvermSearchAlignedNocase(m512 chars1, m512 chars2, u8 c1, u8 c2,
+ const u8 *buf, const u8 *buf_end) {
+ assert((size_t)buf % 64 == 0);
+ m512 casemask = set64x8(CASE_CLEAR);
+
+ for (; buf + 64 < buf_end; buf += 64) {
+ m512 data = load512(buf);
+ m512 v = and512(casemask, data);
+ u64a z = eq512mask(chars1, v) & (eq512mask(chars2, v) >> 1);
+ if ((buf[63] & CASE_CLEAR) == c1 && (buf[64] & CASE_CLEAR) == c2) {
+ z |= (1ULL << 63);
+ }
+ if (unlikely(z)) {
+ u64a pos = ctz64(z);
+ return buf + pos;
+ }
+ }
+
+ return NULL;
+}
+
+static really_inline
+const u8 *dvermSearchAlignedMasked(m512 chars1, m512 chars2,
+ m512 mask1, m512 mask2, u8 c1, u8 c2, u8 m1,
+ u8 m2, const u8 *buf, const u8 *buf_end) {
+ assert((size_t)buf % 64 == 0);
+
+ for (; buf + 64 < buf_end; buf += 64) {
+ m512 data = load512(buf);
+ m512 v1 = and512(data, mask1);
+ m512 v2 = and512(data, mask2);
+ u64a z = eq512mask(chars1, v1) & (eq512mask(chars2, v2) >> 1);
+
+ if ((buf[63] & m1) == c1 && (buf[64] & m2) == c2) {
+ z |= (1ULL << 63);
+ }
+ if (unlikely(z)) {
+ u64a pos = ctz64(z);
+ return buf + pos;
+ }
+ }
+
+ return NULL;
+}
+
+// returns NULL if not found
+static really_inline
+const u8 *dvermPrecondition(m512 chars1, m512 chars2, const u8 *buf) {
+ m512 data = loadu512(buf); // unaligned
+ u64a z = eq512mask(chars1, data) & (eq512mask(chars2, data) >> 1);
+
+ /* no fixup of the boundary required - the aligned run will pick it up */
+ if (unlikely(z)) {
+ u64a pos = ctz64(z);
+ return buf + pos;
+ }
+ return NULL;
+}
+
+// returns NULL if not found
+static really_inline
+const u8 *dvermPreconditionNocase(m512 chars1, m512 chars2, const u8 *buf) {
+ /* due to laziness, nonalphas and nocase having interesting behaviour */
+ m512 casemask = set64x8(CASE_CLEAR);
+ m512 data = loadu512(buf); // unaligned
+ m512 v = and512(casemask, data);
+ u64a z = eq512mask(chars1, v) & (eq512mask(chars2, v) >> 1);
+
+ /* no fixup of the boundary required - the aligned run will pick it up */
+ if (unlikely(z)) {
+ u64a pos = ctz64(z);
+ return buf + pos;
+ }
+ return NULL;
+}
+
+// returns NULL if not found
+static really_inline
+const u8 *dvermPreconditionMasked(m512 chars1, m512 chars2,
+ m512 mask1, m512 mask2, const u8 *buf) {
+ m512 data = loadu512(buf); // unaligned
+ m512 v1 = and512(data, mask1);
+ m512 v2 = and512(data, mask2);
+ u64a z = eq512mask(chars1, v1) & (eq512mask(chars2, v2) >> 1);
+
+ /* no fixup of the boundary required - the aligned run will pick it up */
+ if (unlikely(z)) {
+ u64a pos = ctz64(z);
+ return buf + pos;
+ }
+ return NULL;
+}
+
+static really_inline
+const u8 *lastMatchOffset(const u8 *buf_end, u64a z) {
+ assert(z);
+ return buf_end - 64 + 63 - clz64(z);
+}
+
+static really_inline
+const u8 *rvermMini(m512 chars, const u8 *buf, const u8 *buf_end, char negate) {
+ uintptr_t len = buf_end - buf;
+ __mmask64 mask = (~0ULL) >> (64 - len);
+ m512 data = loadu_maskz_m512(mask, buf);
+
+ u64a z = eq512mask(chars, data);
+
+ if (negate) {
+ z = ~z & mask;
+ }
+ z &= mask;
+ if (unlikely(z)) {
+ return lastMatchOffset(buf + 64, z);
+ }
+ return NULL;
+}
+
+static really_inline
+const u8 *rvermMiniNocase(m512 chars, const u8 *buf, const u8 *buf_end,
+ char negate) {
+ uintptr_t len = buf_end - buf;
+ __mmask64 mask = (~0ULL) >> (64 - len);
+ m512 data = loadu_maskz_m512(mask, buf);
+ m512 casemask = set64x8(CASE_CLEAR);
+ m512 v = and512(casemask, data);
+
+ u64a z = eq512mask(chars, v);
+
+ if (negate) {
+ z = ~z & mask;
+ }
+ z &= mask;
+ if (unlikely(z)) {
+ return lastMatchOffset(buf + 64, z);
+ }
+ return NULL;
+}
+
+static really_inline
+const u8 *rvermSearchAligned(m512 chars, const u8 *buf, const u8 *buf_end,
+ char negate) {
+ assert((size_t)buf_end % 64 == 0);
+ for (; buf + 63 < buf_end; buf_end -= 64) {
+ m512 data = load512(buf_end - 64);
+ u64a z = eq512mask(chars, data);
+ if (negate) {
+ z = ~z & ~0ULL;
+ }
+ if (unlikely(z)) {
+ return lastMatchOffset(buf_end, z);
+ }
+ }
+ return NULL;
+}
+
+static really_inline
+const u8 *rvermSearchAlignedNocase(m512 chars, const u8 *buf,
+ const u8 *buf_end, char negate) {
+ assert((size_t)buf_end % 64 == 0);
+ m512 casemask = set64x8(CASE_CLEAR);
+
+ for (; buf + 63 < buf_end; buf_end -= 64) {
+ m512 data = load512(buf_end - 64);
+ u64a z = eq512mask(chars, and512(casemask, data));
+ if (negate) {
+ z = ~z & ~0ULL;
+ }
+ if (unlikely(z)) {
+ return lastMatchOffset(buf_end, z);
+ }
+ }
+ return NULL;
+}
+
+// returns NULL if not found
+static really_inline
+const u8 *rvermUnalign(m512 chars, const u8 *buf, char negate) {
+ m512 data = loadu512(buf); // unaligned
+ u64a z = eq512mask(chars, data);
+ if (negate) {
+ z = ~z & ~0ULL;
+ }
+ if (unlikely(z)) {
+ return lastMatchOffset(buf + 64, z);
+ }
+ return NULL;
+}
+
+// returns NULL if not found
+static really_inline
+const u8 *rvermUnalignNocase(m512 chars, const u8 *buf, char negate) {
+ m512 casemask = set64x8(CASE_CLEAR);
+ m512 data = loadu512(buf); // unaligned
+ u64a z = eq512mask(chars, and512(casemask, data));
+ if (negate) {
+ z = ~z & ~0ULL;
+ }
+ if (unlikely(z)) {
+ return lastMatchOffset(buf + 64, z);
+ }
+ return NULL;
+}
+
+static really_inline
+const u8 *rdvermMini(m512 chars1, m512 chars2, const u8 *buf,
+ const u8 *buf_end) {
+ uintptr_t len = buf_end - buf;
+ __mmask64 mask = (~0ULL) >> (64 - len);
+ m512 data = loadu_maskz_m512(mask, buf);
+
+ u64a z = eq512mask(chars2, data) & (eq512mask(chars1, data) << 1);
+
+ z &= mask;
+ if (unlikely(z)) {
+ return lastMatchOffset(buf + 64, z);
+ }
+ return NULL;
+}
+
+static really_inline
+const u8 *rdvermMiniNocase(m512 chars1, m512 chars2, const u8 *buf,
+ const u8 *buf_end) {
+ uintptr_t len = buf_end - buf;
+ __mmask64 mask = (~0ULL) >> (64 - len);
+ m512 data = loadu_maskz_m512(mask, buf);
+ m512 casemask = set64x8(CASE_CLEAR);
+ m512 v = and512(casemask, data);
+
+ u64a z = eq512mask(chars2, v) & (eq512mask(chars1, v) << 1);
+
+ z &= mask;
+ if (unlikely(z)) {
+ return lastMatchOffset(buf + 64, z);
+ }
+ return NULL;
+}
+
+static really_inline
+const u8 *rdvermSearchAligned(m512 chars1, m512 chars2, u8 c1, u8 c2,
+ const u8 *buf, const u8 *buf_end) {
+ assert((size_t)buf_end % 64 == 0);
+
+ for (; buf + 64 < buf_end; buf_end -= 64) {
+ m512 data = load512(buf_end - 64);
+ u64a z = eq512mask(chars2, data) & (eq512mask(chars1, data) << 1);
+ if (buf_end[-65] == c1 && buf_end[-64] == c2) {
+ z |= 1;
+ }
+ if (unlikely(z)) {
+ return lastMatchOffset(buf_end, z);
+ }
+ }
+ return buf_end;
+}
+
+static really_inline
+const u8 *rdvermSearchAlignedNocase(m512 chars1, m512 chars2, u8 c1, u8 c2,
+ const u8 *buf, const u8 *buf_end) {
+ assert((size_t)buf_end % 64 == 0);
+ m512 casemask = set64x8(CASE_CLEAR);
+
+ for (; buf + 64 < buf_end; buf_end -= 64) {
+ m512 data = load512(buf_end - 64);
+ m512 v = and512(casemask, data);
+ u64a z = eq512mask(chars2, v) & (eq512mask(chars1, v) << 1);
+ if ((buf_end[-65] & CASE_CLEAR) == c1
+ && (buf_end[-64] & CASE_CLEAR) == c2) {
+ z |= 1;
+ }
+ if (unlikely(z)) {
+ return lastMatchOffset(buf_end, z);
+ }
+ }
+ return buf_end;
+}
+
+// returns NULL if not found
+static really_inline
+const u8 *rdvermPrecondition(m512 chars1, m512 chars2, const u8 *buf) {
+ m512 data = loadu512(buf);
+ u64a z = eq512mask(chars2, data) & (eq512mask(chars1, data) << 1);
+
+ // no fixup of the boundary required - the aligned run will pick it up
+ if (unlikely(z)) {
+ return lastMatchOffset(buf + 64, z);
+ }
+
+ return NULL;
+}
+
+// returns NULL if not found
+static really_inline
+const u8 *rdvermPreconditionNocase(m512 chars1, m512 chars2, const u8 *buf) {
+ // due to laziness, nonalphas and nocase having interesting behaviour
+ m512 casemask = set64x8(CASE_CLEAR);
+ m512 data = loadu512(buf);
+ m512 v = and512(casemask, data);
+ u64a z = eq512mask(chars2, v) & (eq512mask(chars1, v) << 1);
+ // no fixup of the boundary required - the aligned run will pick it up
+ if (unlikely(z)) {
+ return lastMatchOffset(buf + 64, z);
+ }
+
+ return NULL;
+}
+
+#endif // HAVE_AVX512
diff --git a/contrib/libs/hyperscan/src/nfagraph/ng.cpp b/contrib/libs/hyperscan/src/nfagraph/ng.cpp
index 45af26cfa4..8dccf9863d 100644
--- a/contrib/libs/hyperscan/src/nfagraph/ng.cpp
+++ b/contrib/libs/hyperscan/src/nfagraph/ng.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -577,8 +577,8 @@ bool NG::addHolder(NGHolder &g) {
}
bool NG::addLiteral(const ue2_literal &literal, u32 expr_index,
- u32 external_report, bool highlander, som_type som,
- bool quiet) {
+ u32 external_report, bool highlander, som_type som,
+ bool quiet) {
assert(!literal.empty());
if (!cc.grey.shortcutLiterals) {
@@ -606,7 +606,7 @@ bool NG::addLiteral(const ue2_literal &literal, u32 expr_index,
} else {
u32 ekey = highlander ? rm.getExhaustibleKey(external_report)
: INVALID_EKEY;
- Report r = makeECallback(external_report, 0, ekey, quiet);
+ Report r = makeECallback(external_report, 0, ekey, quiet);
id = rm.getInternalId(r);
}
diff --git a/contrib/libs/hyperscan/src/nfagraph/ng.h b/contrib/libs/hyperscan/src/nfagraph/ng.h
index 46e147c3d6..a5a9077d4f 100644
--- a/contrib/libs/hyperscan/src/nfagraph/ng.h
+++ b/contrib/libs/hyperscan/src/nfagraph/ng.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -77,7 +77,7 @@ public:
/** \brief Adds a literal to Rose, used by literal shortcut passes (instead
* of using \ref addGraph) */
bool addLiteral(const ue2_literal &lit, u32 expr_index, u32 external_report,
- bool highlander, som_type som, bool quiet);
+ bool highlander, som_type som, bool quiet);
/** \brief Maximum history in bytes available for use by SOM reverse NFAs,
* a hack for pattern support (see UE-1903). This is always set to the max
diff --git a/contrib/libs/hyperscan/src/nfagraph/ng_calc_components.cpp b/contrib/libs/hyperscan/src/nfagraph/ng_calc_components.cpp
index cbe17e3eb7..3e9454eeed 100644
--- a/contrib/libs/hyperscan/src/nfagraph/ng_calc_components.cpp
+++ b/contrib/libs/hyperscan/src/nfagraph/ng_calc_components.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -57,7 +57,7 @@
#include "grey.h"
#include "ue2common.h"
#include "util/graph_range.h"
-#include "util/graph_undirected.h"
+#include "util/graph_undirected.h"
#include "util/make_unique.h"
#include <map>
@@ -310,19 +310,19 @@ void splitIntoComponents(unique_ptr<NGHolder> g,
return;
}
- auto ug = make_undirected_graph(*g);
+ auto ug = make_undirected_graph(*g);
- // Filter specials and shell vertices from undirected graph.
- unordered_set<NFAVertex> bad_vertices(
- {g->start, g->startDs, g->accept, g->acceptEod});
- bad_vertices.insert(head_shell.begin(), head_shell.end());
- bad_vertices.insert(tail_shell.begin(), tail_shell.end());
+ // Filter specials and shell vertices from undirected graph.
+ unordered_set<NFAVertex> bad_vertices(
+ {g->start, g->startDs, g->accept, g->acceptEod});
+ bad_vertices.insert(head_shell.begin(), head_shell.end());
+ bad_vertices.insert(tail_shell.begin(), tail_shell.end());
auto filtered_ug = boost::make_filtered_graph(
- ug, boost::keep_all(), make_bad_vertex_filter(&bad_vertices));
+ ug, boost::keep_all(), make_bad_vertex_filter(&bad_vertices));
// Actually run the connected components algorithm.
- map<NFAVertex, u32> split_components;
+ map<NFAVertex, u32> split_components;
const u32 num = connected_components(
filtered_ug, boost::make_assoc_property_map(split_components));
@@ -339,7 +339,7 @@ void splitIntoComponents(unique_ptr<NGHolder> g,
// Collect vertex lists per component.
for (const auto &m : split_components) {
- NFAVertex v = m.first;
+ NFAVertex v = m.first;
u32 c = m.second;
verts[c].push_back(v);
DEBUG_PRINTF("vertex %zu is in comp %u\n", (*g)[v].index, c);
diff --git a/contrib/libs/hyperscan/src/nfagraph/ng_cyclic_redundancy.cpp b/contrib/libs/hyperscan/src/nfagraph/ng_cyclic_redundancy.cpp
index b655c15ed3..0b24bf07a8 100644
--- a/contrib/libs/hyperscan/src/nfagraph/ng_cyclic_redundancy.cpp
+++ b/contrib/libs/hyperscan/src/nfagraph/ng_cyclic_redundancy.cpp
@@ -205,7 +205,7 @@ bool removeCyclicPathRedundancy(Graph &g, typename Graph::vertex_descriptor v,
DEBUG_PRINTF(" - checking w %zu\n", g[w].index);
- if (!searchForward(g, reach, colours, succ_v, w)) {
+ if (!searchForward(g, reach, colours, succ_v, w)) {
continue;
}
diff --git a/contrib/libs/hyperscan/src/nfagraph/ng_equivalence.cpp b/contrib/libs/hyperscan/src/nfagraph/ng_equivalence.cpp
index 1d482b5145..fba8ce7b74 100644
--- a/contrib/libs/hyperscan/src/nfagraph/ng_equivalence.cpp
+++ b/contrib/libs/hyperscan/src/nfagraph/ng_equivalence.cpp
@@ -269,7 +269,7 @@ vector<unique_ptr<VertexInfo>> getVertexInfos(const NGHolder &g) {
vertex_map.resize(num_verts);
for (auto v : vertices_range(g)) {
- infos.push_back(std::make_unique<VertexInfo>(v, g));
+ infos.push_back(std::make_unique<VertexInfo>(v, g));
vertex_map[g[v].index] = infos.back().get();
}
@@ -516,7 +516,7 @@ void mergeClass(vector<unique_ptr<VertexInfo>> &infos, NGHolder &g,
g[new_v].reports.clear(); /* populated as we pull in succs */
// store this vertex in our global vertex list
- infos.push_back(std::make_unique<VertexInfo>(new_v, g));
+ infos.push_back(std::make_unique<VertexInfo>(new_v, g));
VertexInfo *new_vertex_info = infos.back().get();
NFAVertex new_v_eod = NGHolder::null_vertex();
@@ -525,7 +525,7 @@ void mergeClass(vector<unique_ptr<VertexInfo>> &infos, NGHolder &g,
if (require_separate_eod_vertex(cur_class_vertices, g)) {
new_v_eod = clone_vertex(g, old_v);
g[new_v_eod].reports.clear();
- infos.push_back(std::make_unique<VertexInfo>(new_v_eod, g));
+ infos.push_back(std::make_unique<VertexInfo>(new_v_eod, g));
new_vertex_info_eod = infos.back().get();
}
diff --git a/contrib/libs/hyperscan/src/nfagraph/ng_haig.cpp b/contrib/libs/hyperscan/src/nfagraph/ng_haig.cpp
index 8fdb1acf12..8054544772 100644
--- a/contrib/libs/hyperscan/src/nfagraph/ng_haig.cpp
+++ b/contrib/libs/hyperscan/src/nfagraph/ng_haig.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -73,7 +73,7 @@ static
void populateInit(const NGHolder &g, const flat_set<NFAVertex> &unused,
stateset *init, stateset *initDS,
vector<NFAVertex> *v_by_index) {
- DEBUG_PRINTF("graph kind: %s\n", to_string(g.kind).c_str());
+ DEBUG_PRINTF("graph kind: %s\n", to_string(g.kind).c_str());
for (auto v : vertices_range(g)) {
if (contains(unused, v)) {
continue;
diff --git a/contrib/libs/hyperscan/src/nfagraph/ng_is_equal.h b/contrib/libs/hyperscan/src/nfagraph/ng_is_equal.h
index dea770c540..d8046270ff 100644
--- a/contrib/libs/hyperscan/src/nfagraph/ng_is_equal.h
+++ b/contrib/libs/hyperscan/src/nfagraph/ng_is_equal.h
@@ -49,16 +49,16 @@ bool is_equal(const NGHolder &a, ReportID a_r, const NGHolder &b, ReportID b_r);
u64a hash_holder(const NGHolder &g);
-// Util Functors
-struct NGHolderHasher {
- size_t operator()(const std::shared_ptr<const NGHolder> &h) const {
- return hash_holder(*h);
- }
- size_t operator()(const std::shared_ptr<NGHolder> &h) const {
- return hash_holder(*h);
- }
-};
-
+// Util Functors
+struct NGHolderHasher {
+ size_t operator()(const std::shared_ptr<const NGHolder> &h) const {
+ return hash_holder(*h);
+ }
+ size_t operator()(const std::shared_ptr<NGHolder> &h) const {
+ return hash_holder(*h);
+ }
+};
+
struct NGHolderEqual {
bool operator()(const std::shared_ptr<const NGHolder> &a,
const std::shared_ptr<const NGHolder> &b) const {
diff --git a/contrib/libs/hyperscan/src/nfagraph/ng_limex.cpp b/contrib/libs/hyperscan/src/nfagraph/ng_limex.cpp
index 49c59e7c11..2f0a55eab9 100644
--- a/contrib/libs/hyperscan/src/nfagraph/ng_limex.cpp
+++ b/contrib/libs/hyperscan/src/nfagraph/ng_limex.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -632,8 +632,8 @@ bytecode_ptr<NFA>
constructNFA(const NGHolder &h_in, const ReportManager *rm,
const map<u32, u32> &fixed_depth_tops,
const map<u32, vector<vector<CharReach>>> &triggers,
- bool compress_state, bool do_accel, bool impl_test_only,
- bool &fast, u32 hint, const CompileContext &cc) {
+ bool compress_state, bool do_accel, bool impl_test_only,
+ bool &fast, u32 hint, const CompileContext &cc) {
if (!has_managed_reports(h_in)) {
rm = nullptr;
} else {
@@ -684,19 +684,19 @@ constructNFA(const NGHolder &h_in, const ReportManager *rm,
}
return generate(*h, state_ids, repeats, reportSquashMap, squashMap, tops,
- zombies, do_accel, compress_state, fast, hint, cc);
+ zombies, do_accel, compress_state, fast, hint, cc);
}
bytecode_ptr<NFA>
constructNFA(const NGHolder &h_in, const ReportManager *rm,
const map<u32, u32> &fixed_depth_tops,
const map<u32, vector<vector<CharReach>>> &triggers,
- bool compress_state, bool &fast, const CompileContext &cc) {
+ bool compress_state, bool &fast, const CompileContext &cc) {
const u32 hint = INVALID_NFA;
const bool do_accel = cc.grey.accelerateNFA;
const bool impl_test_only = false;
return constructNFA(h_in, rm, fixed_depth_tops, triggers, compress_state,
- do_accel, impl_test_only, fast, hint, cc);
+ do_accel, impl_test_only, fast, hint, cc);
}
#ifndef RELEASE_BUILD
@@ -705,11 +705,11 @@ bytecode_ptr<NFA>
constructNFA(const NGHolder &h_in, const ReportManager *rm,
const map<u32, u32> &fixed_depth_tops,
const map<u32, vector<vector<CharReach>>> &triggers,
- bool compress_state, bool &fast, u32 hint, const CompileContext &cc) {
+ bool compress_state, bool &fast, u32 hint, const CompileContext &cc) {
const bool do_accel = cc.grey.accelerateNFA;
const bool impl_test_only = false;
- return constructNFA(h_in, rm, fixed_depth_tops, triggers, compress_state,
- do_accel, impl_test_only, fast, hint, cc);
+ return constructNFA(h_in, rm, fixed_depth_tops, triggers, compress_state,
+ do_accel, impl_test_only, fast, hint, cc);
}
#endif // RELEASE_BUILD
@@ -739,10 +739,10 @@ bytecode_ptr<NFA> constructReversedNFA_i(const NGHolder &h_in, u32 hint,
vector<BoundedRepeatData> repeats;
unordered_map<NFAVertex, NFAStateSet> reportSquashMap;
unordered_map<NFAVertex, NFAStateSet> squashMap;
- UNUSED bool fast = false;
+ UNUSED bool fast = false;
return generate(h, state_ids, repeats, reportSquashMap, squashMap, tops,
- zombies, false, false, fast, hint, cc);
+ zombies, false, false, fast, hint, cc);
}
bytecode_ptr<NFA> constructReversedNFA(const NGHolder &h_in,
diff --git a/contrib/libs/hyperscan/src/nfagraph/ng_limex.h b/contrib/libs/hyperscan/src/nfagraph/ng_limex.h
index 1e478cf8f8..7eba2eff06 100644
--- a/contrib/libs/hyperscan/src/nfagraph/ng_limex.h
+++ b/contrib/libs/hyperscan/src/nfagraph/ng_limex.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -100,7 +100,7 @@ bytecode_ptr<NFA>
constructNFA(const NGHolder &g, const ReportManager *rm,
const std::map<u32, u32> &fixed_depth_tops,
const std::map<u32, std::vector<std::vector<CharReach>>> &triggers,
- bool compress_state, bool &fast, const CompileContext &cc);
+ bool compress_state, bool &fast, const CompileContext &cc);
/**
* \brief Build a reverse NFA from the graph given, which should have already
@@ -129,7 +129,7 @@ bytecode_ptr<NFA>
constructNFA(const NGHolder &g, const ReportManager *rm,
const std::map<u32, u32> &fixed_depth_tops,
const std::map<u32, std::vector<std::vector<CharReach>>> &triggers,
- bool compress_state, bool &fast, u32 hint, const CompileContext &cc);
+ bool compress_state, bool &fast, u32 hint, const CompileContext &cc);
/**
* \brief Build a reverse NFA (with model type hint) from the graph given,
diff --git a/contrib/libs/hyperscan/src/nfagraph/ng_limex_accel.cpp b/contrib/libs/hyperscan/src/nfagraph/ng_limex_accel.cpp
index c016c44240..f1f829f2c1 100644
--- a/contrib/libs/hyperscan/src/nfagraph/ng_limex_accel.cpp
+++ b/contrib/libs/hyperscan/src/nfagraph/ng_limex_accel.cpp
@@ -170,7 +170,7 @@ void findPaths(const NGHolder &g, NFAVertex v,
/* path has looped back to one of the active+boring acceleration
* states. We can ignore this path if we have sufficient back-
* off. */
- paths->push_back({cr});
+ paths->push_back({cr});
continue;
}
diff --git a/contrib/libs/hyperscan/src/nfagraph/ng_literal_analysis.cpp b/contrib/libs/hyperscan/src/nfagraph/ng_literal_analysis.cpp
index 3d18c8f2c4..d25ac43e87 100644
--- a/contrib/libs/hyperscan/src/nfagraph/ng_literal_analysis.cpp
+++ b/contrib/libs/hyperscan/src/nfagraph/ng_literal_analysis.cpp
@@ -69,14 +69,14 @@ struct LitGraphVertexProps {
LitGraphVertexProps() = default;
explicit LitGraphVertexProps(ue2_literal::elem c_in) : c(move(c_in)) {}
ue2_literal::elem c; // string element (char + bool)
- size_t index = 0; // managed by ue2_graph
+ size_t index = 0; // managed by ue2_graph
};
struct LitGraphEdgeProps {
LitGraphEdgeProps() = default;
explicit LitGraphEdgeProps(u64a score_in) : score(score_in) {}
u64a score = NO_LITERAL_AT_EDGE_SCORE;
- size_t index = 0; // managed by ue2_graph
+ size_t index = 0; // managed by ue2_graph
};
struct LitGraph
diff --git a/contrib/libs/hyperscan/src/nfagraph/ng_mcclellan.cpp b/contrib/libs/hyperscan/src/nfagraph/ng_mcclellan.cpp
index 4a3b58f97c..4ce5dc153b 100644
--- a/contrib/libs/hyperscan/src/nfagraph/ng_mcclellan.cpp
+++ b/contrib/libs/hyperscan/src/nfagraph/ng_mcclellan.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -542,8 +542,8 @@ unique_ptr<raw_dfa> buildMcClellan(const NGHolder &graph,
return nullptr;
}
- DEBUG_PRINTF("attempting to build %s mcclellan\n",
- to_string(graph.kind).c_str());
+ DEBUG_PRINTF("attempting to build %s mcclellan\n",
+ to_string(graph.kind).c_str());
assert(allMatchStatesHaveReports(graph));
bool prunable = grey.highlanderPruneDFA && has_managed_reports(graph);
diff --git a/contrib/libs/hyperscan/src/nfagraph/ng_repeat.cpp b/contrib/libs/hyperscan/src/nfagraph/ng_repeat.cpp
index 5808d87440..1f63ad3c6f 100644
--- a/contrib/libs/hyperscan/src/nfagraph/ng_repeat.cpp
+++ b/contrib/libs/hyperscan/src/nfagraph/ng_repeat.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -47,7 +47,7 @@
#include "util/dump_charclass.h"
#include "util/graph_range.h"
#include "util/graph_small_color_map.h"
-#include "util/graph_undirected.h"
+#include "util/graph_undirected.h"
#include "util/report_manager.h"
#include "util/unordered.h"
@@ -73,31 +73,31 @@ namespace ue2 {
namespace {
-/**
- * \brief Filter that retains only edges between vertices with the same
- * reachability. Special vertices are dropped.
- */
+/**
+ * \brief Filter that retains only edges between vertices with the same
+ * reachability. Special vertices are dropped.
+ */
template<class Graph>
struct ReachFilter {
- ReachFilter() = default;
+ ReachFilter() = default;
explicit ReachFilter(const Graph *g_in) : g(g_in) {}
// Convenience typedefs.
- using Traits = typename boost::graph_traits<Graph>;
- using VertexDescriptor = typename Traits::vertex_descriptor;
- using EdgeDescriptor = typename Traits::edge_descriptor;
+ using Traits = typename boost::graph_traits<Graph>;
+ using VertexDescriptor = typename Traits::vertex_descriptor;
+ using EdgeDescriptor = typename Traits::edge_descriptor;
- bool operator()(const VertexDescriptor &v) const {
+ bool operator()(const VertexDescriptor &v) const {
assert(g);
// Disallow special vertices, as otherwise we will try to remove them
// later.
- return !is_special(v, *g);
- }
+ return !is_special(v, *g);
+ }
- bool operator()(const EdgeDescriptor &e) const {
- assert(g);
+ bool operator()(const EdgeDescriptor &e) const {
+ assert(g);
// Vertices must have the same reach.
- auto u = source(e, *g), v = target(e, *g);
+ auto u = source(e, *g), v = target(e, *g);
const CharReach &cr_u = (*g)[u].char_reach;
const CharReach &cr_v = (*g)[v].char_reach;
return cr_u == cr_v;
@@ -106,8 +106,8 @@ struct ReachFilter {
const Graph *g = nullptr;
};
-using RepeatGraph = boost::filtered_graph<NGHolder, ReachFilter<NGHolder>,
- ReachFilter<NGHolder>>;
+using RepeatGraph = boost::filtered_graph<NGHolder, ReachFilter<NGHolder>,
+ ReachFilter<NGHolder>>;
struct ReachSubgraph {
vector<NFAVertex> vertices;
@@ -301,9 +301,9 @@ void splitSubgraph(const NGHolder &g, const deque<NFAVertex> &verts,
unordered_map<NFAVertex, NFAVertex> verts_map; // in g -> in verts_g
fillHolder(&verts_g, g, verts, &verts_map);
- const auto ug = make_undirected_graph(verts_g);
+ const auto ug = make_undirected_graph(verts_g);
- unordered_map<NFAVertex, u32> repeatMap;
+ unordered_map<NFAVertex, u32> repeatMap;
size_t num = connected_components(ug, make_assoc_property_map(repeatMap));
DEBUG_PRINTF("found %zu connected repeat components\n", num);
@@ -312,8 +312,8 @@ void splitSubgraph(const NGHolder &g, const deque<NFAVertex> &verts,
vector<ReachSubgraph> rs(num);
for (auto v : verts) {
- assert(!is_special(v, g));
- auto vu = verts_map.at(v);
+ assert(!is_special(v, g));
+ auto vu = verts_map.at(v);
auto rit = repeatMap.find(vu);
if (rit == repeatMap.end()) {
continue; /* not part of a repeat */
@@ -324,13 +324,13 @@ void splitSubgraph(const NGHolder &g, const deque<NFAVertex> &verts,
}
for (const auto &rsi : rs) {
- if (rsi.vertices.empty()) {
- // Empty elements can happen when connected_components finds a
- // subgraph consisting entirely of specials (which aren't added to
- // ReachSubgraph in the loop above). There's nothing we can do with
- // these, so we skip them.
- continue;
- }
+ if (rsi.vertices.empty()) {
+ // Empty elements can happen when connected_components finds a
+ // subgraph consisting entirely of specials (which aren't added to
+ // ReachSubgraph in the loop above). There's nothing we can do with
+ // these, so we skip them.
+ continue;
+ }
DEBUG_PRINTF("repeat with %zu vertices\n", rsi.vertices.size());
if (rsi.vertices.size() >= minNumVertices) {
DEBUG_PRINTF("enqueuing\n");
@@ -1030,16 +1030,16 @@ static
void buildReachSubgraphs(const NGHolder &g, vector<ReachSubgraph> &rs,
const u32 minNumVertices) {
const ReachFilter<NGHolder> fil(&g);
- const RepeatGraph rg(g, fil, fil);
+ const RepeatGraph rg(g, fil, fil);
if (!isCompBigEnough(rg, minNumVertices)) {
DEBUG_PRINTF("component not big enough, bailing\n");
return;
}
- const auto ug = make_undirected_graph(rg);
+ const auto ug = make_undirected_graph(rg);
- unordered_map<NFAVertex, u32> repeatMap;
+ unordered_map<NFAVertex, u32> repeatMap;
unsigned int num;
num = connected_components(ug, make_assoc_property_map(repeatMap));
@@ -1051,7 +1051,7 @@ void buildReachSubgraphs(const NGHolder &g, vector<ReachSubgraph> &rs,
rs.resize(num);
for (auto v : topoOrder) {
- auto rit = repeatMap.find(v);
+ auto rit = repeatMap.find(v);
if (rit == repeatMap.end()) {
continue; /* not part of a repeat */
}
diff --git a/contrib/libs/hyperscan/src/nfagraph/ng_stop.cpp b/contrib/libs/hyperscan/src/nfagraph/ng_stop.cpp
index 4b0e865adc..5e627bb593 100644
--- a/contrib/libs/hyperscan/src/nfagraph/ng_stop.cpp
+++ b/contrib/libs/hyperscan/src/nfagraph/ng_stop.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -87,11 +87,11 @@ private:
/** Find the set of characters that are not present in the reachability of
* graph \p g after a certain depth (currently 8). If a character in this set
* is encountered, it means that the NFA is either dead or has not progressed
- * more than 8 characters from its start states.
- *
- * This is only used to guide merging heuristics, use
- * findLeftOffsetStopAlphabet for real uses.
- */
+ * more than 8 characters from its start states.
+ *
+ * This is only used to guide merging heuristics, use
+ * findLeftOffsetStopAlphabet for real uses.
+ */
CharReach findStopAlphabet(const NGHolder &g, som_type som) {
const depth max_depth(MAX_STOP_DEPTH);
const InitDepths depths(g);
diff --git a/contrib/libs/hyperscan/src/nfagraph/ng_stop.h b/contrib/libs/hyperscan/src/nfagraph/ng_stop.h
index c6f64d78ab..4a889dca09 100644
--- a/contrib/libs/hyperscan/src/nfagraph/ng_stop.h
+++ b/contrib/libs/hyperscan/src/nfagraph/ng_stop.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -47,11 +47,11 @@ class NGHolder;
/** Find the set of characters that are not present in the reachability of
* graph \p g after a certain depth (currently 8). If a character in this set
* is encountered, it means that the NFA is either dead or has not progressed
- * more than 8 characters from its start states.
- *
- * This is only used to guide merging heuristics, use
- * findLeftOffsetStopAlphabet for real uses.
- */
+ * more than 8 characters from its start states.
+ *
+ * This is only used to guide merging heuristics, use
+ * findLeftOffsetStopAlphabet for real uses.
+ */
CharReach findStopAlphabet(const NGHolder &g, som_type som);
/** Calculate the stop alphabet for each depth from 0 to MAX_STOP_DEPTH. Then
diff --git a/contrib/libs/hyperscan/src/nfagraph/ng_violet.cpp b/contrib/libs/hyperscan/src/nfagraph/ng_violet.cpp
index 62c51f731a..685d452150 100644
--- a/contrib/libs/hyperscan/src/nfagraph/ng_violet.cpp
+++ b/contrib/libs/hyperscan/src/nfagraph/ng_violet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018, Intel Corporation
+ * Copyright (c) 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -60,7 +60,7 @@
#include "util/flat_containers.h"
#include "util/graph.h"
#include "util/graph_range.h"
-#include "util/graph_small_color_map.h"
+#include "util/graph_small_color_map.h"
#include "util/insertion_ordered.h"
#include "util/make_unique.h"
#include "util/order_check.h"
@@ -134,21 +134,21 @@ bool createsTransientLHS(const NGHolder &g, const vector<NFAVertex> &vv,
return true;
}
-/**
- * Counts the number of vertices that are reachable from the set of sources
- * given.
- */
+/**
+ * Counts the number of vertices that are reachable from the set of sources
+ * given.
+ */
static
-size_t count_reachable(const NGHolder &g, const vector<NFAVertex> &sources,
- small_color_map<decltype(get(vertex_index, g))> &color_map) {
- auto null_visitor = boost::make_dfs_visitor(boost::null_visitor());
- color_map.fill(small_color::white);
+size_t count_reachable(const NGHolder &g, const vector<NFAVertex> &sources,
+ small_color_map<decltype(get(vertex_index, g))> &color_map) {
+ auto null_visitor = boost::make_dfs_visitor(boost::null_visitor());
+ color_map.fill(small_color::white);
+
+ for (auto v : sources) {
+ boost::depth_first_visit(g, v, null_visitor, color_map);
+ }
- for (auto v : sources) {
- boost::depth_first_visit(g, v, null_visitor, color_map);
- }
-
- return color_map.count(small_color::black);
+ return color_map.count(small_color::black);
}
static
@@ -695,12 +695,12 @@ unique_ptr<VertLitInfo> findBestSplit(const NGHolder &g,
}
if (last_chance) {
- const size_t num_verts = num_vertices(g);
- auto color_map = make_small_color_map(g);
+ const size_t num_verts = num_vertices(g);
+ auto color_map = make_small_color_map(g);
for (auto &a : lits) {
- size_t num_reachable = count_reachable(g, a->vv, color_map);
- double ratio = (double)num_reachable / (double)num_verts;
- a->split_ratio = ratio > 0.5 ? 1 - ratio : ratio;
+ size_t num_reachable = count_reachable(g, a->vv, color_map);
+ double ratio = (double)num_reachable / (double)num_verts;
+ a->split_ratio = ratio > 0.5 ? 1 - ratio : ratio;
}
}
@@ -2040,7 +2040,7 @@ bool improvePrefix(NGHolder &h, RoseInGraph &vg, const vector<RoseInEdge> &ee,
if (ee.size() > 1) {
DEBUG_PRINTF("split the prefix apart based on succ literals\n");
unordered_map<shared_ptr<NGHolder>, vector<pair<RoseInEdge, u32> >,
- NGHolderHasher, NGHolderEqual> trimmed;
+ NGHolderHasher, NGHolderEqual> trimmed;
for (const auto &e : ee) {
shared_ptr<NGHolder> hh = cloneHolder(h);
diff --git a/contrib/libs/hyperscan/src/nfagraph/ng_width.cpp b/contrib/libs/hyperscan/src/nfagraph/ng_width.cpp
index 55b01ea973..219241ca55 100644
--- a/contrib/libs/hyperscan/src/nfagraph/ng_width.cpp
+++ b/contrib/libs/hyperscan/src/nfagraph/ng_width.cpp
@@ -176,7 +176,7 @@ depth findMaxWidth(const NGHolder &h, const SpecialEdgeFilter &filter,
}
if (d.is_unreachable()) {
- assert(findMinWidth(h, filter, src).is_unreachable());
+ assert(findMinWidth(h, filter, src).is_unreachable());
return d;
}
diff --git a/contrib/libs/hyperscan/src/parser/logical_combination.cpp b/contrib/libs/hyperscan/src/parser/logical_combination.cpp
index feacedc263..de017a1108 100644
--- a/contrib/libs/hyperscan/src/parser/logical_combination.cpp
+++ b/contrib/libs/hyperscan/src/parser/logical_combination.cpp
@@ -1,336 +1,336 @@
-/*
- * Copyright (c) 2018-2020, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Parse and build ParsedLogical::logicalTree and combInfoMap.
- */
-#include "logical_combination.h"
-#include "parser/parse_error.h"
-#include "util/container.h"
-#include "hs_compile.h"
-#include "allocator.h"
-
-#include <vector>
-
-using namespace std;
-
-namespace ue2 {
-
-u32 ParsedLogical::getLogicalKey(u32 a) {
- auto it = toLogicalKeyMap.find(a);
- if (it == toLogicalKeyMap.end()) {
- // get size before assigning to avoid wacky LHS shenanigans
- u32 size = toLogicalKeyMap.size();
- bool inserted;
- tie(it, inserted) = toLogicalKeyMap.emplace(a, size);
- assert(inserted);
- }
- DEBUG_PRINTF("%u -> lkey %u\n", it->first, it->second);
- return it->second;
-}
-
-u32 ParsedLogical::getCombKey(u32 a) {
- auto it = toCombKeyMap.find(a);
- if (it == toCombKeyMap.end()) {
- u32 size = toCombKeyMap.size();
- bool inserted;
- tie(it, inserted) = toCombKeyMap.emplace(a, size);
- assert(inserted);
- }
- DEBUG_PRINTF("%u -> ckey %u\n", it->first, it->second);
- return it->second;
-}
-
-void ParsedLogical::addRelateCKey(u32 lkey, u32 ckey) {
- auto it = lkey2ckeys.find(lkey);
- if (it == lkey2ckeys.end()) {
- bool inserted;
- tie(it, inserted) = lkey2ckeys.emplace(lkey, set<u32>());
- assert(inserted);
- }
- it->second.insert(ckey);
- DEBUG_PRINTF("lkey %u belongs to combination key %u\n",
- it->first, ckey);
-}
-
-#define TRY_RENUM_OP(ckey) \
-do { \
- if (ckey & LOGICAL_OP_BIT) { \
- ckey = (ckey & ~LOGICAL_OP_BIT) + toLogicalKeyMap.size(); \
- } \
-} while(0)
-
-u32 ParsedLogical::logicalTreeAdd(u32 op, u32 left, u32 right) {
- LogicalOp lop;
- assert((LOGICAL_OP_BIT & (u32)logicalTree.size()) == 0);
- lop.id = LOGICAL_OP_BIT | (u32)logicalTree.size();
- lop.op = op;
- lop.lo = left;
- lop.ro = right;
- logicalTree.push_back(lop);
- return lop.id;
-}
-
-void ParsedLogical::combinationInfoAdd(UNUSED u32 ckey, u32 id, u32 ekey,
- u32 lkey_start, u32 lkey_result,
- u64a min_offset, u64a max_offset) {
- assert(ckey == combInfoMap.size());
- CombInfo ci;
- ci.id = id;
- ci.ekey = ekey;
- ci.start = lkey_start;
- ci.result = lkey_result;
- ci.min_offset = min_offset;
- ci.max_offset = max_offset;
- combInfoMap.push_back(ci);
-
- DEBUG_PRINTF("ckey %u (id %u) -> lkey %u..%u, ekey=0x%x\n", ckey, ci.id,
- ci.start, ci.result, ci.ekey);
-}
-
-void ParsedLogical::validateSubIDs(const unsigned *ids,
- const char *const *expressions,
- const unsigned *flags,
- unsigned elements) {
- for (const auto &it : toLogicalKeyMap) {
- bool unknown = true;
- u32 i = 0;
- for (i = 0; i < elements; i++) {
- if ((ids ? ids[i] : 0) == it.first) {
- unknown = false;
- break;
- }
- }
- if (unknown) {
- throw CompileError("Unknown sub-expression id.");
- }
- if (contains(toCombKeyMap, it.first)) {
- throw CompileError("Have combination of combination.");
- }
- if (flags && (flags[i] & HS_FLAG_SOM_LEFTMOST)) {
- throw CompileError("Have SOM flag in sub-expression.");
- }
- if (flags && (flags[i] & HS_FLAG_PREFILTER)) {
- throw CompileError("Have PREFILTER flag in sub-expression.");
- }
- hs_compile_error_t *compile_err = NULL;
- hs_expr_info_t *info = NULL;
- hs_error_t err = hs_expression_info(expressions[i], flags[i], &info,
- &compile_err);
- if (err != HS_SUCCESS) {
- hs_free_compile_error(compile_err);
- throw CompileError("Run hs_expression_info() failed.");
- }
- if (!info) {
- throw CompileError("Get hs_expr_info_t failed.");
- } else {
- if (info->unordered_matches) {
- throw CompileError("Have unordered match in sub-expressions.");
- }
- hs_misc_free(info);
- }
- }
-}
-
-void ParsedLogical::logicalKeyRenumber() {
- // renumber operation lkey in op vector
- for (auto &op : logicalTree) {
- TRY_RENUM_OP(op.id);
- TRY_RENUM_OP(op.lo);
- TRY_RENUM_OP(op.ro);
- }
- // renumber operation lkey in info map
- for (auto &ci : combInfoMap) {
- TRY_RENUM_OP(ci.start);
- TRY_RENUM_OP(ci.result);
- }
-}
-
-struct LogicalOperator {
- LogicalOperator(u32 op_in, u32 paren_in)
- : op(op_in), paren(paren_in) {}
- u32 op;
- u32 paren;
-};
-
-static
-u32 toOperator(char c) {
- u32 op = UNKNOWN_OP;
- switch (c) {
- case '!' :
- op = LOGICAL_OP_NOT;
- break;
- case '&' :
- op = LOGICAL_OP_AND;
- break;
- case '|' :
- op = LOGICAL_OP_OR;
- break;
- default:
- break;
- };
- return op;
-}
-
-static
-bool cmpOperator(const LogicalOperator &op1, const LogicalOperator &op2) {
- if (op1.paren < op2.paren) {
- return false;
- }
- if (op1.paren > op2.paren) {
- return true;
- }
- assert(op1.paren == op2.paren);
- if (op1.op > op2.op) {
- return false;
- }
- if (op1.op < op2.op) {
- return true;
- }
- return true;
-}
-
-static
-u32 fetchSubID(const char *logical, u32 &digit, u32 end) {
- if (digit == (u32)-1) { // no digit parsing in progress
- return (u32)-1;
- }
- assert(end > digit);
- if (end - digit > 9) {
- throw LocatedParseError("Expression id too large");
- }
- u32 mult = 1;
- u32 sum = 0;
- for (u32 j = end - 1; (j >= digit) && (j != (u32)-1) ; j--) {
- assert(isdigit(logical[j]));
- sum += (logical[j] - '0') * mult;
- mult *= 10;
- }
- digit = (u32)-1;
- return sum;
-}
-
-static
-void popOperator(vector<LogicalOperator> &op_stack, vector<u32> &subid_stack,
- ParsedLogical &pl) {
- if (subid_stack.empty()) {
- throw LocatedParseError("Not enough operand");
- }
- u32 right = subid_stack.back();
- subid_stack.pop_back();
- u32 left = 0;
- if (op_stack.back().op != LOGICAL_OP_NOT) {
- if (subid_stack.empty()) {
- throw LocatedParseError("Not enough operand");
- }
- left = subid_stack.back();
- subid_stack.pop_back();
- }
- subid_stack.push_back(pl.logicalTreeAdd(op_stack.back().op, left, right));
- op_stack.pop_back();
-}
-
-void ParsedLogical::parseLogicalCombination(unsigned id, const char *logical,
- u32 ekey, u64a min_offset,
- u64a max_offset) {
- u32 ckey = getCombKey(id);
- vector<LogicalOperator> op_stack;
- vector<u32> subid_stack;
- u32 lkey_start = INVALID_LKEY; // logical operation's lkey
- u32 paren = 0; // parentheses
- u32 digit = (u32)-1; // digit start offset, invalid offset is -1
- u32 subid = (u32)-1;
- u32 i;
- try {
- for (i = 0; logical[i]; i++) {
- if (isdigit(logical[i])) {
- if (digit == (u32)-1) { // new digit start
- digit = i;
- }
- } else {
- if ((subid = fetchSubID(logical, digit, i)) != (u32)-1) {
- subid_stack.push_back(getLogicalKey(subid));
- addRelateCKey(subid_stack.back(), ckey);
- }
- if (logical[i] == ' ') { // skip whitespace
- continue;
- }
- if (logical[i] == '(') {
- paren += 1;
- } else if (logical[i] == ')') {
- if (paren <= 0) {
- throw LocatedParseError("Not enough left parentheses");
- }
- paren -= 1;
- } else {
- u32 prio = toOperator(logical[i]);
- if (prio != UNKNOWN_OP) {
- LogicalOperator op(prio, paren);
- while (!op_stack.empty()
- && cmpOperator(op_stack.back(), op)) {
- popOperator(op_stack, subid_stack, *this);
- if (lkey_start == INVALID_LKEY) {
- lkey_start = subid_stack.back();
- }
- }
- op_stack.push_back(op);
- } else {
- throw LocatedParseError("Unknown character");
- }
- }
- }
- }
- if (paren != 0) {
- throw LocatedParseError("Not enough right parentheses");
- }
- if ((subid = fetchSubID(logical, digit, i)) != (u32)-1) {
- subid_stack.push_back(getLogicalKey(subid));
- addRelateCKey(subid_stack.back(), ckey);
- }
- while (!op_stack.empty()) {
- popOperator(op_stack, subid_stack, *this);
- if (lkey_start == INVALID_LKEY) {
- lkey_start = subid_stack.back();
- }
- }
- if (subid_stack.size() != 1) {
- throw LocatedParseError("Not enough operator");
- }
- } catch (LocatedParseError &error) {
- error.locate(i);
- throw;
- }
- u32 lkey_result = subid_stack.back(); // logical operation's lkey
- if (lkey_start == INVALID_LKEY) {
- throw CompileError("No logical operation.");
- }
- combinationInfoAdd(ckey, id, ekey, lkey_start, lkey_result,
- min_offset, max_offset);
-}
-
-} // namespace ue2
+/*
+ * Copyright (c) 2018-2020, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Parse and build ParsedLogical::logicalTree and combInfoMap.
+ */
+#include "logical_combination.h"
+#include "parser/parse_error.h"
+#include "util/container.h"
+#include "hs_compile.h"
+#include "allocator.h"
+
+#include <vector>
+
+using namespace std;
+
+namespace ue2 {
+
+u32 ParsedLogical::getLogicalKey(u32 a) {
+ auto it = toLogicalKeyMap.find(a);
+ if (it == toLogicalKeyMap.end()) {
+ // get size before assigning to avoid wacky LHS shenanigans
+ u32 size = toLogicalKeyMap.size();
+ bool inserted;
+ tie(it, inserted) = toLogicalKeyMap.emplace(a, size);
+ assert(inserted);
+ }
+ DEBUG_PRINTF("%u -> lkey %u\n", it->first, it->second);
+ return it->second;
+}
+
+u32 ParsedLogical::getCombKey(u32 a) {
+ auto it = toCombKeyMap.find(a);
+ if (it == toCombKeyMap.end()) {
+ u32 size = toCombKeyMap.size();
+ bool inserted;
+ tie(it, inserted) = toCombKeyMap.emplace(a, size);
+ assert(inserted);
+ }
+ DEBUG_PRINTF("%u -> ckey %u\n", it->first, it->second);
+ return it->second;
+}
+
+void ParsedLogical::addRelateCKey(u32 lkey, u32 ckey) {
+ auto it = lkey2ckeys.find(lkey);
+ if (it == lkey2ckeys.end()) {
+ bool inserted;
+ tie(it, inserted) = lkey2ckeys.emplace(lkey, set<u32>());
+ assert(inserted);
+ }
+ it->second.insert(ckey);
+ DEBUG_PRINTF("lkey %u belongs to combination key %u\n",
+ it->first, ckey);
+}
+
+#define TRY_RENUM_OP(ckey) \
+do { \
+ if (ckey & LOGICAL_OP_BIT) { \
+ ckey = (ckey & ~LOGICAL_OP_BIT) + toLogicalKeyMap.size(); \
+ } \
+} while(0)
+
+u32 ParsedLogical::logicalTreeAdd(u32 op, u32 left, u32 right) {
+ LogicalOp lop;
+ assert((LOGICAL_OP_BIT & (u32)logicalTree.size()) == 0);
+ lop.id = LOGICAL_OP_BIT | (u32)logicalTree.size();
+ lop.op = op;
+ lop.lo = left;
+ lop.ro = right;
+ logicalTree.push_back(lop);
+ return lop.id;
+}
+
+void ParsedLogical::combinationInfoAdd(UNUSED u32 ckey, u32 id, u32 ekey,
+ u32 lkey_start, u32 lkey_result,
+ u64a min_offset, u64a max_offset) {
+ assert(ckey == combInfoMap.size());
+ CombInfo ci;
+ ci.id = id;
+ ci.ekey = ekey;
+ ci.start = lkey_start;
+ ci.result = lkey_result;
+ ci.min_offset = min_offset;
+ ci.max_offset = max_offset;
+ combInfoMap.push_back(ci);
+
+ DEBUG_PRINTF("ckey %u (id %u) -> lkey %u..%u, ekey=0x%x\n", ckey, ci.id,
+ ci.start, ci.result, ci.ekey);
+}
+
+void ParsedLogical::validateSubIDs(const unsigned *ids,
+ const char *const *expressions,
+ const unsigned *flags,
+ unsigned elements) {
+ for (const auto &it : toLogicalKeyMap) {
+ bool unknown = true;
+ u32 i = 0;
+ for (i = 0; i < elements; i++) {
+ if ((ids ? ids[i] : 0) == it.first) {
+ unknown = false;
+ break;
+ }
+ }
+ if (unknown) {
+ throw CompileError("Unknown sub-expression id.");
+ }
+ if (contains(toCombKeyMap, it.first)) {
+ throw CompileError("Have combination of combination.");
+ }
+ if (flags && (flags[i] & HS_FLAG_SOM_LEFTMOST)) {
+ throw CompileError("Have SOM flag in sub-expression.");
+ }
+ if (flags && (flags[i] & HS_FLAG_PREFILTER)) {
+ throw CompileError("Have PREFILTER flag in sub-expression.");
+ }
+ hs_compile_error_t *compile_err = NULL;
+ hs_expr_info_t *info = NULL;
+ hs_error_t err = hs_expression_info(expressions[i], flags[i], &info,
+ &compile_err);
+ if (err != HS_SUCCESS) {
+ hs_free_compile_error(compile_err);
+ throw CompileError("Run hs_expression_info() failed.");
+ }
+ if (!info) {
+ throw CompileError("Get hs_expr_info_t failed.");
+ } else {
+ if (info->unordered_matches) {
+ throw CompileError("Have unordered match in sub-expressions.");
+ }
+ hs_misc_free(info);
+ }
+ }
+}
+
+void ParsedLogical::logicalKeyRenumber() {
+ // renumber operation lkey in op vector
+ for (auto &op : logicalTree) {
+ TRY_RENUM_OP(op.id);
+ TRY_RENUM_OP(op.lo);
+ TRY_RENUM_OP(op.ro);
+ }
+ // renumber operation lkey in info map
+ for (auto &ci : combInfoMap) {
+ TRY_RENUM_OP(ci.start);
+ TRY_RENUM_OP(ci.result);
+ }
+}
+
+struct LogicalOperator {
+ LogicalOperator(u32 op_in, u32 paren_in)
+ : op(op_in), paren(paren_in) {}
+ u32 op;
+ u32 paren;
+};
+
+static
+u32 toOperator(char c) {
+ u32 op = UNKNOWN_OP;
+ switch (c) {
+ case '!' :
+ op = LOGICAL_OP_NOT;
+ break;
+ case '&' :
+ op = LOGICAL_OP_AND;
+ break;
+ case '|' :
+ op = LOGICAL_OP_OR;
+ break;
+ default:
+ break;
+ };
+ return op;
+}
+
+static
+bool cmpOperator(const LogicalOperator &op1, const LogicalOperator &op2) {
+ if (op1.paren < op2.paren) {
+ return false;
+ }
+ if (op1.paren > op2.paren) {
+ return true;
+ }
+ assert(op1.paren == op2.paren);
+ if (op1.op > op2.op) {
+ return false;
+ }
+ if (op1.op < op2.op) {
+ return true;
+ }
+ return true;
+}
+
+static
+u32 fetchSubID(const char *logical, u32 &digit, u32 end) {
+ if (digit == (u32)-1) { // no digit parsing in progress
+ return (u32)-1;
+ }
+ assert(end > digit);
+ if (end - digit > 9) {
+ throw LocatedParseError("Expression id too large");
+ }
+ u32 mult = 1;
+ u32 sum = 0;
+ for (u32 j = end - 1; (j >= digit) && (j != (u32)-1) ; j--) {
+ assert(isdigit(logical[j]));
+ sum += (logical[j] - '0') * mult;
+ mult *= 10;
+ }
+ digit = (u32)-1;
+ return sum;
+}
+
+static
+void popOperator(vector<LogicalOperator> &op_stack, vector<u32> &subid_stack,
+ ParsedLogical &pl) {
+ if (subid_stack.empty()) {
+ throw LocatedParseError("Not enough operand");
+ }
+ u32 right = subid_stack.back();
+ subid_stack.pop_back();
+ u32 left = 0;
+ if (op_stack.back().op != LOGICAL_OP_NOT) {
+ if (subid_stack.empty()) {
+ throw LocatedParseError("Not enough operand");
+ }
+ left = subid_stack.back();
+ subid_stack.pop_back();
+ }
+ subid_stack.push_back(pl.logicalTreeAdd(op_stack.back().op, left, right));
+ op_stack.pop_back();
+}
+
+void ParsedLogical::parseLogicalCombination(unsigned id, const char *logical,
+ u32 ekey, u64a min_offset,
+ u64a max_offset) {
+ u32 ckey = getCombKey(id);
+ vector<LogicalOperator> op_stack;
+ vector<u32> subid_stack;
+ u32 lkey_start = INVALID_LKEY; // logical operation's lkey
+ u32 paren = 0; // parentheses
+ u32 digit = (u32)-1; // digit start offset, invalid offset is -1
+ u32 subid = (u32)-1;
+ u32 i;
+ try {
+ for (i = 0; logical[i]; i++) {
+ if (isdigit(logical[i])) {
+ if (digit == (u32)-1) { // new digit start
+ digit = i;
+ }
+ } else {
+ if ((subid = fetchSubID(logical, digit, i)) != (u32)-1) {
+ subid_stack.push_back(getLogicalKey(subid));
+ addRelateCKey(subid_stack.back(), ckey);
+ }
+ if (logical[i] == ' ') { // skip whitespace
+ continue;
+ }
+ if (logical[i] == '(') {
+ paren += 1;
+ } else if (logical[i] == ')') {
+ if (paren <= 0) {
+ throw LocatedParseError("Not enough left parentheses");
+ }
+ paren -= 1;
+ } else {
+ u32 prio = toOperator(logical[i]);
+ if (prio != UNKNOWN_OP) {
+ LogicalOperator op(prio, paren);
+ while (!op_stack.empty()
+ && cmpOperator(op_stack.back(), op)) {
+ popOperator(op_stack, subid_stack, *this);
+ if (lkey_start == INVALID_LKEY) {
+ lkey_start = subid_stack.back();
+ }
+ }
+ op_stack.push_back(op);
+ } else {
+ throw LocatedParseError("Unknown character");
+ }
+ }
+ }
+ }
+ if (paren != 0) {
+ throw LocatedParseError("Not enough right parentheses");
+ }
+ if ((subid = fetchSubID(logical, digit, i)) != (u32)-1) {
+ subid_stack.push_back(getLogicalKey(subid));
+ addRelateCKey(subid_stack.back(), ckey);
+ }
+ while (!op_stack.empty()) {
+ popOperator(op_stack, subid_stack, *this);
+ if (lkey_start == INVALID_LKEY) {
+ lkey_start = subid_stack.back();
+ }
+ }
+ if (subid_stack.size() != 1) {
+ throw LocatedParseError("Not enough operator");
+ }
+ } catch (LocatedParseError &error) {
+ error.locate(i);
+ throw;
+ }
+ u32 lkey_result = subid_stack.back(); // logical operation's lkey
+ if (lkey_start == INVALID_LKEY) {
+ throw CompileError("No logical operation.");
+ }
+ combinationInfoAdd(ckey, id, ekey, lkey_start, lkey_result,
+ min_offset, max_offset);
+}
+
+} // namespace ue2
diff --git a/contrib/libs/hyperscan/src/parser/logical_combination.h b/contrib/libs/hyperscan/src/parser/logical_combination.h
index b4b994a437..7c8eb36ef7 100644
--- a/contrib/libs/hyperscan/src/parser/logical_combination.h
+++ b/contrib/libs/hyperscan/src/parser/logical_combination.h
@@ -1,112 +1,112 @@
-/*
- * Copyright (c) 2018, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Parse and build ParsedLogical::logicalTree and combInfoMap.
- */
-
-#ifndef LOGICAL_COMBINATION_H
-#define LOGICAL_COMBINATION_H
-
-#include "util/logical.h"
-
-#include <map>
-#include <set>
-#include <vector>
-
-namespace ue2 {
-
-class ParsedLogical {
- friend class ReportManager;
-public:
- /** \brief Parse 1 logical expression \a logical, assign temporary ckey. */
- void parseLogicalCombination(unsigned id, const char *logical, u32 ekey,
- u64a min_offset, u64a max_offset);
-
- /** \brief Check if all sub-expression id in combinations are valid. */
- void validateSubIDs(const unsigned *ids, const char *const *expressions,
- const unsigned *flags, unsigned elements);
-
- /** \brief Renumber and assign final lkey for each logical operation
- * after parsed all logical expressions. */
- void logicalKeyRenumber();
-
- /** \brief Fetch the lkey associated with the given expression id,
- * assigning one if necessary. */
- u32 getLogicalKey(u32 expressionId);
-
- /** \brief Fetch the ckey associated with the given expression id,
- * assigning one if necessary. */
- u32 getCombKey(u32 expressionId);
-
- /** \brief Add lkey's corresponding combination id. */
- void addRelateCKey(u32 lkey, u32 ckey);
-
- /** \brief Add one Logical Operation. */
- u32 logicalTreeAdd(u32 op, u32 left, u32 right);
-
- /** \brief Assign the combination info associated with the given ckey. */
- void combinationInfoAdd(u32 ckey, u32 id, u32 ekey, u32 lkey_start,
- u32 lkey_result, u64a min_offset, u64a max_offset);
-
- const std::map<u32, u32> &getLkeyMap() const {
- return toLogicalKeyMap;
- }
-
- const std::vector<LogicalOp> &getLogicalTree() const {
- return logicalTree;
- }
-
- CombInfo getCombInfoById(u32 id) const {
- u32 ckey = toCombKeyMap.at(id);
- assert(ckey < combInfoMap.size());
- return combInfoMap.at(ckey);
- }
-
-private:
- /** \brief Mapping from ckey to combination info. */
- std::vector<CombInfo> combInfoMap;
-
- /** \brief Mapping from combination expression id to combination key,
- * combination key is used in combination bit-vector cache. */
- std::map<u32, u32> toCombKeyMap;
-
- /** \brief Mapping from expression id to logical key, logical key is used
- * as index in LogicalOp array. */
- std::map<u32, u32> toLogicalKeyMap;
-
- /** \brief Mapping from logical key to related combination keys. */
- std::map<u32, std::set<u32>> lkey2ckeys;
-
- /** \brief Logical constraints, each operation from postfix notation. */
- std::vector<LogicalOp> logicalTree;
-};
-
-} // namespace ue2
-
-#endif
+/*
+ * Copyright (c) 2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Parse and build ParsedLogical::logicalTree and combInfoMap.
+ */
+
+#ifndef LOGICAL_COMBINATION_H
+#define LOGICAL_COMBINATION_H
+
+#include "util/logical.h"
+
+#include <map>
+#include <set>
+#include <vector>
+
+namespace ue2 {
+
+class ParsedLogical {
+ friend class ReportManager;
+public:
+ /** \brief Parse 1 logical expression \a logical, assign temporary ckey. */
+ void parseLogicalCombination(unsigned id, const char *logical, u32 ekey,
+ u64a min_offset, u64a max_offset);
+
+ /** \brief Check if all sub-expression id in combinations are valid. */
+ void validateSubIDs(const unsigned *ids, const char *const *expressions,
+ const unsigned *flags, unsigned elements);
+
+ /** \brief Renumber and assign final lkey for each logical operation
+ * after parsed all logical expressions. */
+ void logicalKeyRenumber();
+
+ /** \brief Fetch the lkey associated with the given expression id,
+ * assigning one if necessary. */
+ u32 getLogicalKey(u32 expressionId);
+
+ /** \brief Fetch the ckey associated with the given expression id,
+ * assigning one if necessary. */
+ u32 getCombKey(u32 expressionId);
+
+ /** \brief Add lkey's corresponding combination id. */
+ void addRelateCKey(u32 lkey, u32 ckey);
+
+ /** \brief Add one Logical Operation. */
+ u32 logicalTreeAdd(u32 op, u32 left, u32 right);
+
+ /** \brief Assign the combination info associated with the given ckey. */
+ void combinationInfoAdd(u32 ckey, u32 id, u32 ekey, u32 lkey_start,
+ u32 lkey_result, u64a min_offset, u64a max_offset);
+
+ const std::map<u32, u32> &getLkeyMap() const {
+ return toLogicalKeyMap;
+ }
+
+ const std::vector<LogicalOp> &getLogicalTree() const {
+ return logicalTree;
+ }
+
+ CombInfo getCombInfoById(u32 id) const {
+ u32 ckey = toCombKeyMap.at(id);
+ assert(ckey < combInfoMap.size());
+ return combInfoMap.at(ckey);
+ }
+
+private:
+ /** \brief Mapping from ckey to combination info. */
+ std::vector<CombInfo> combInfoMap;
+
+ /** \brief Mapping from combination expression id to combination key,
+ * combination key is used in combination bit-vector cache. */
+ std::map<u32, u32> toCombKeyMap;
+
+ /** \brief Mapping from expression id to logical key, logical key is used
+ * as index in LogicalOp array. */
+ std::map<u32, u32> toLogicalKeyMap;
+
+ /** \brief Mapping from logical key to related combination keys. */
+ std::map<u32, std::set<u32>> lkey2ckeys;
+
+ /** \brief Logical constraints, each operation from postfix notation. */
+ std::vector<LogicalOp> logicalTree;
+};
+
+} // namespace ue2
+
+#endif
diff --git a/contrib/libs/hyperscan/src/parser/shortcut_literal.cpp b/contrib/libs/hyperscan/src/parser/shortcut_literal.cpp
index d82f25e2a3..a5d67f30d8 100644
--- a/contrib/libs/hyperscan/src/parser/shortcut_literal.cpp
+++ b/contrib/libs/hyperscan/src/parser/shortcut_literal.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2019, Intel Corporation
+ * Copyright (c) 2015-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -199,7 +199,7 @@ bool shortcutLiteral(NG &ng, const ParsedExpression &pe) {
DEBUG_PRINTF("constructed literal %s\n", dumpString(lit).c_str());
return ng.addLiteral(lit, expr.index, expr.report, expr.highlander,
- expr.som, expr.quiet);
+ expr.som, expr.quiet);
}
} // namespace ue2
diff --git a/contrib/libs/hyperscan/src/parser/utf8_validate.cpp b/contrib/libs/hyperscan/src/parser/utf8_validate.cpp
index db3ae8403f..50aa06d8e7 100644
--- a/contrib/libs/hyperscan/src/parser/utf8_validate.cpp
+++ b/contrib/libs/hyperscan/src/parser/utf8_validate.cpp
@@ -60,7 +60,7 @@ bool isAllowedCodepoint(u32 val) {
return true;
}
-bool isValidUtf8(const char *expression, const size_t len) {
+bool isValidUtf8(const char *expression, const size_t len) {
if (!expression) {
return true;
}
diff --git a/contrib/libs/hyperscan/src/parser/utf8_validate.h b/contrib/libs/hyperscan/src/parser/utf8_validate.h
index 8095c7a4c2..6389a0859f 100644
--- a/contrib/libs/hyperscan/src/parser/utf8_validate.h
+++ b/contrib/libs/hyperscan/src/parser/utf8_validate.h
@@ -29,12 +29,12 @@
#ifndef PARSER_UTF8_VALIDATE_H
#define PARSER_UTF8_VALIDATE_H
-#include <cstddef> // size_t
-
+#include <cstddef> // size_t
+
namespace ue2 {
/** \brief Validate that the given expression is well-formed UTF-8. */
-bool isValidUtf8(const char *expression, const size_t len);
+bool isValidUtf8(const char *expression, const size_t len);
} // namespace ue2
diff --git a/contrib/libs/hyperscan/src/report.h b/contrib/libs/hyperscan/src/report.h
index 6e3d0b7565..b35f4c052d 100644
--- a/contrib/libs/hyperscan/src/report.h
+++ b/contrib/libs/hyperscan/src/report.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019, Intel Corporation
+ * Copyright (c) 2016-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -42,7 +42,7 @@
#include "rose/runtime.h"
#include "som/som_runtime.h"
#include "util/exhaust.h"
-#include "util/logical.h"
+#include "util/logical.h"
#include "util/fatbit.h"
enum DedupeResult {
@@ -152,145 +152,145 @@ void clearEvec(const struct RoseEngine *rose, char *evec) {
mmbit_clear((u8 *)evec, rose->ekeyCount);
}
-/** \brief Test whether the given key (\a lkey) is set in the logical vector
- * \a lvec. */
-static really_inline
-char getLogicalVal(const struct RoseEngine *rose, const char *lvec, u32 lkey) {
- DEBUG_PRINTF("checking lkey matching %p %u\n", lvec, lkey);
- assert(lkey != INVALID_LKEY);
- assert(lkey < rose->lkeyCount + rose->lopCount);
- return mmbit_isset((const u8 *)lvec, rose->lkeyCount + rose->lopCount,
- lkey);
-}
-
-/** \brief Mark key \a lkey on in the logical vector. */
-static really_inline
-void setLogicalVal(const struct RoseEngine *rose, char *lvec, u32 lkey,
- char val) {
- DEBUG_PRINTF("marking as matched logical key %u\n", lkey);
- assert(lkey != INVALID_LKEY);
- assert(lkey < rose->lkeyCount + rose->lopCount);
- switch (val) {
- case 0:
- mmbit_unset((u8 *)lvec, rose->lkeyCount + rose->lopCount, lkey);
- break;
- default:
- mmbit_set((u8 *)lvec, rose->lkeyCount + rose->lopCount, lkey);
- break;
- }
-}
-
-/** \brief Mark key \a ckey on in the combination vector. */
-static really_inline
-void setCombinationActive(const struct RoseEngine *rose, char *cvec, u32 ckey) {
- DEBUG_PRINTF("marking as active combination key %u\n", ckey);
- assert(ckey != INVALID_CKEY);
- assert(ckey < rose->ckeyCount);
- mmbit_set((u8 *)cvec, rose->ckeyCount, ckey);
-}
-
-/** \brief Returns 1 if compliant to all logical combinations. */
-static really_inline
-char isLogicalCombination(const struct RoseEngine *rose, char *lvec,
- u32 start, u32 result) {
- const struct LogicalOp *logicalTree = (const struct LogicalOp *)
- ((const char *)rose + rose->logicalTreeOffset);
- assert(start >= rose->lkeyCount);
- assert(start <= result);
- assert(result < rose->lkeyCount + rose->lopCount);
- for (u32 i = start; i <= result; i++) {
- const struct LogicalOp *op = logicalTree + (i - rose->lkeyCount);
- assert(i == op->id);
- assert(op->op <= LAST_LOGICAL_OP);
- switch ((enum LogicalOpType)op->op) {
- case LOGICAL_OP_NOT:
- setLogicalVal(rose, lvec, op->id,
- !getLogicalVal(rose, lvec, op->ro));
- break;
- case LOGICAL_OP_AND:
- setLogicalVal(rose, lvec, op->id,
- getLogicalVal(rose, lvec, op->lo) &
- getLogicalVal(rose, lvec, op->ro)); // &&
- break;
- case LOGICAL_OP_OR:
- setLogicalVal(rose, lvec, op->id,
- getLogicalVal(rose, lvec, op->lo) |
- getLogicalVal(rose, lvec, op->ro)); // ||
- break;
- }
- }
- return getLogicalVal(rose, lvec, result);
-}
-
-/** \brief Returns 1 if combination matches when no sub-expression matches. */
-static really_inline
-char isPurelyNegativeMatch(const struct RoseEngine *rose, char *lvec,
- u32 start, u32 result) {
- const struct LogicalOp *logicalTree = (const struct LogicalOp *)
- ((const char *)rose + rose->logicalTreeOffset);
- assert(start >= rose->lkeyCount);
- assert(start <= result);
- assert(result < rose->lkeyCount + rose->lopCount);
- for (u32 i = start; i <= result; i++) {
- const struct LogicalOp *op = logicalTree + (i - rose->lkeyCount);
- assert(i == op->id);
- assert(op->op <= LAST_LOGICAL_OP);
- switch ((enum LogicalOpType)op->op) {
- case LOGICAL_OP_NOT:
- if ((op->ro < rose->lkeyCount) &&
- getLogicalVal(rose, lvec, op->ro)) {
- // sub-expression not negative
- return 0;
- }
- setLogicalVal(rose, lvec, op->id,
- !getLogicalVal(rose, lvec, op->ro));
- break;
- case LOGICAL_OP_AND:
- if (((op->lo < rose->lkeyCount) &&
- getLogicalVal(rose, lvec, op->lo)) ||
- ((op->ro < rose->lkeyCount) &&
- getLogicalVal(rose, lvec, op->ro))) {
- // sub-expression not negative
- return 0;
- }
- setLogicalVal(rose, lvec, op->id,
- getLogicalVal(rose, lvec, op->lo) &
- getLogicalVal(rose, lvec, op->ro)); // &&
- break;
- case LOGICAL_OP_OR:
- if (((op->lo < rose->lkeyCount) &&
- getLogicalVal(rose, lvec, op->lo)) ||
- ((op->ro < rose->lkeyCount) &&
- getLogicalVal(rose, lvec, op->ro))) {
- // sub-expression not negative
- return 0;
- }
- setLogicalVal(rose, lvec, op->id,
- getLogicalVal(rose, lvec, op->lo) |
- getLogicalVal(rose, lvec, op->ro)); // ||
- break;
- }
- }
- return getLogicalVal(rose, lvec, result);
-}
-
-/** \brief Clear all keys in the logical vector. */
-static really_inline
-void clearLvec(const struct RoseEngine *rose, char *lvec, char *cvec) {
- DEBUG_PRINTF("clearing lvec %p %u\n", lvec,
- rose->lkeyCount + rose->lopCount);
- DEBUG_PRINTF("clearing cvec %p %u\n", cvec, rose->ckeyCount);
- mmbit_clear((u8 *)lvec, rose->lkeyCount + rose->lopCount);
- mmbit_clear((u8 *)cvec, rose->ckeyCount);
-}
-
-/** \brief Clear all keys in the combination vector. */
-static really_inline
-void clearCvec(const struct RoseEngine *rose, char *cvec) {
- DEBUG_PRINTF("clearing cvec %p %u\n", cvec, rose->ckeyCount);
- mmbit_clear((u8 *)cvec, rose->ckeyCount);
-}
-
+/** \brief Test whether the given key (\a lkey) is set in the logical vector
+ * \a lvec. */
+static really_inline
+char getLogicalVal(const struct RoseEngine *rose, const char *lvec, u32 lkey) {
+ DEBUG_PRINTF("checking lkey matching %p %u\n", lvec, lkey);
+ assert(lkey != INVALID_LKEY);
+ assert(lkey < rose->lkeyCount + rose->lopCount);
+ return mmbit_isset((const u8 *)lvec, rose->lkeyCount + rose->lopCount,
+ lkey);
+}
+
+/** \brief Mark key \a lkey on in the logical vector. */
+static really_inline
+void setLogicalVal(const struct RoseEngine *rose, char *lvec, u32 lkey,
+ char val) {
+ DEBUG_PRINTF("marking as matched logical key %u\n", lkey);
+ assert(lkey != INVALID_LKEY);
+ assert(lkey < rose->lkeyCount + rose->lopCount);
+ switch (val) {
+ case 0:
+ mmbit_unset((u8 *)lvec, rose->lkeyCount + rose->lopCount, lkey);
+ break;
+ default:
+ mmbit_set((u8 *)lvec, rose->lkeyCount + rose->lopCount, lkey);
+ break;
+ }
+}
+
+/** \brief Mark key \a ckey on in the combination vector. */
+static really_inline
+void setCombinationActive(const struct RoseEngine *rose, char *cvec, u32 ckey) {
+ DEBUG_PRINTF("marking as active combination key %u\n", ckey);
+ assert(ckey != INVALID_CKEY);
+ assert(ckey < rose->ckeyCount);
+ mmbit_set((u8 *)cvec, rose->ckeyCount, ckey);
+}
+
+/** \brief Returns 1 if compliant to all logical combinations. */
+static really_inline
+char isLogicalCombination(const struct RoseEngine *rose, char *lvec,
+ u32 start, u32 result) {
+ const struct LogicalOp *logicalTree = (const struct LogicalOp *)
+ ((const char *)rose + rose->logicalTreeOffset);
+ assert(start >= rose->lkeyCount);
+ assert(start <= result);
+ assert(result < rose->lkeyCount + rose->lopCount);
+ for (u32 i = start; i <= result; i++) {
+ const struct LogicalOp *op = logicalTree + (i - rose->lkeyCount);
+ assert(i == op->id);
+ assert(op->op <= LAST_LOGICAL_OP);
+ switch ((enum LogicalOpType)op->op) {
+ case LOGICAL_OP_NOT:
+ setLogicalVal(rose, lvec, op->id,
+ !getLogicalVal(rose, lvec, op->ro));
+ break;
+ case LOGICAL_OP_AND:
+ setLogicalVal(rose, lvec, op->id,
+ getLogicalVal(rose, lvec, op->lo) &
+ getLogicalVal(rose, lvec, op->ro)); // &&
+ break;
+ case LOGICAL_OP_OR:
+ setLogicalVal(rose, lvec, op->id,
+ getLogicalVal(rose, lvec, op->lo) |
+ getLogicalVal(rose, lvec, op->ro)); // ||
+ break;
+ }
+ }
+ return getLogicalVal(rose, lvec, result);
+}
+
+/** \brief Returns 1 if combination matches when no sub-expression matches. */
+static really_inline
+char isPurelyNegativeMatch(const struct RoseEngine *rose, char *lvec,
+ u32 start, u32 result) {
+ const struct LogicalOp *logicalTree = (const struct LogicalOp *)
+ ((const char *)rose + rose->logicalTreeOffset);
+ assert(start >= rose->lkeyCount);
+ assert(start <= result);
+ assert(result < rose->lkeyCount + rose->lopCount);
+ for (u32 i = start; i <= result; i++) {
+ const struct LogicalOp *op = logicalTree + (i - rose->lkeyCount);
+ assert(i == op->id);
+ assert(op->op <= LAST_LOGICAL_OP);
+ switch ((enum LogicalOpType)op->op) {
+ case LOGICAL_OP_NOT:
+ if ((op->ro < rose->lkeyCount) &&
+ getLogicalVal(rose, lvec, op->ro)) {
+ // sub-expression not negative
+ return 0;
+ }
+ setLogicalVal(rose, lvec, op->id,
+ !getLogicalVal(rose, lvec, op->ro));
+ break;
+ case LOGICAL_OP_AND:
+ if (((op->lo < rose->lkeyCount) &&
+ getLogicalVal(rose, lvec, op->lo)) ||
+ ((op->ro < rose->lkeyCount) &&
+ getLogicalVal(rose, lvec, op->ro))) {
+ // sub-expression not negative
+ return 0;
+ }
+ setLogicalVal(rose, lvec, op->id,
+ getLogicalVal(rose, lvec, op->lo) &
+ getLogicalVal(rose, lvec, op->ro)); // &&
+ break;
+ case LOGICAL_OP_OR:
+ if (((op->lo < rose->lkeyCount) &&
+ getLogicalVal(rose, lvec, op->lo)) ||
+ ((op->ro < rose->lkeyCount) &&
+ getLogicalVal(rose, lvec, op->ro))) {
+ // sub-expression not negative
+ return 0;
+ }
+ setLogicalVal(rose, lvec, op->id,
+ getLogicalVal(rose, lvec, op->lo) |
+ getLogicalVal(rose, lvec, op->ro)); // ||
+ break;
+ }
+ }
+ return getLogicalVal(rose, lvec, result);
+}
+
+/** \brief Clear all keys in the logical vector. */
+static really_inline
+void clearLvec(const struct RoseEngine *rose, char *lvec, char *cvec) {
+ DEBUG_PRINTF("clearing lvec %p %u\n", lvec,
+ rose->lkeyCount + rose->lopCount);
+ DEBUG_PRINTF("clearing cvec %p %u\n", cvec, rose->ckeyCount);
+ mmbit_clear((u8 *)lvec, rose->lkeyCount + rose->lopCount);
+ mmbit_clear((u8 *)cvec, rose->ckeyCount);
+}
+
+/** \brief Clear all keys in the combination vector. */
+static really_inline
+void clearCvec(const struct RoseEngine *rose, char *cvec) {
+ DEBUG_PRINTF("clearing cvec %p %u\n", cvec, rose->ckeyCount);
+ mmbit_clear((u8 *)cvec, rose->ckeyCount);
+}
+
/**
* \brief Deliver the given report to the user callback.
*
diff --git a/contrib/libs/hyperscan/src/rose/block.c b/contrib/libs/hyperscan/src/rose/block.c
index 7aadf024ca..b3f424cb73 100644
--- a/contrib/libs/hyperscan/src/rose/block.c
+++ b/contrib/libs/hyperscan/src/rose/block.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2019, Intel Corporation
+ * Copyright (c) 2015-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -145,7 +145,7 @@ void init_for_block(const struct RoseEngine *t, struct hs_scratch *scratch,
tctxt->lastEndOffset = 0;
tctxt->filledDelayedSlots = 0;
tctxt->lastMatchOffset = 0;
- tctxt->lastCombMatchOffset = 0;
+ tctxt->lastCombMatchOffset = 0;
tctxt->minMatchOffset = 0;
tctxt->minNonMpvMatchOffset = 0;
tctxt->next_mpv_offset = 0;
diff --git a/contrib/libs/hyperscan/src/rose/catchup.c b/contrib/libs/hyperscan/src/rose/catchup.c
index d54982046f..7a6648da98 100644
--- a/contrib/libs/hyperscan/src/rose/catchup.c
+++ b/contrib/libs/hyperscan/src/rose/catchup.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -424,12 +424,12 @@ hwlmcb_rv_t roseCatchUpMPV_i(const struct RoseEngine *t, s64a loc,
}
done:
- if (t->flushCombProgramOffset) {
- if (roseRunFlushCombProgram(t, scratch, mpv_exec_end)
- == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- }
+ if (t->flushCombProgramOffset) {
+ if (roseRunFlushCombProgram(t, scratch, mpv_exec_end)
+ == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ }
updateMinMatchOffsetFromMpv(&scratch->tctxt, mpv_exec_end);
scratch->tctxt.next_mpv_offset
= MAX(next_pos_match_loc + scratch->core_info.buf_offset,
diff --git a/contrib/libs/hyperscan/src/rose/catchup.h b/contrib/libs/hyperscan/src/rose/catchup.h
index 89adbbb179..8188d5af01 100644
--- a/contrib/libs/hyperscan/src/rose/catchup.h
+++ b/contrib/libs/hyperscan/src/rose/catchup.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -51,7 +51,7 @@
#include "hwlm/hwlm.h"
#include "runtime.h"
#include "scratch.h"
-#include "rose.h"
+#include "rose.h"
#include "rose_common.h"
#include "rose_internal.h"
#include "ue2common.h"
@@ -106,12 +106,12 @@ hwlmcb_rv_t roseCatchUpMPV(const struct RoseEngine *t, s64a loc,
assert(!can_stop_matching(scratch));
if (canSkipCatchUpMPV(t, scratch, cur_offset)) {
- if (t->flushCombProgramOffset) {
- if (roseRunFlushCombProgram(t, scratch, cur_offset)
- == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- }
+ if (t->flushCombProgramOffset) {
+ if (roseRunFlushCombProgram(t, scratch, cur_offset)
+ == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ }
updateMinMatchOffsetFromMpv(&scratch->tctxt, cur_offset);
return HWLM_CONTINUE_MATCHING;
}
@@ -146,12 +146,12 @@ hwlmcb_rv_t roseCatchUpTo(const struct RoseEngine *t,
hwlmcb_rv_t rv;
if (!t->activeArrayCount
|| !mmbit_any(getActiveLeafArray(t, state), t->activeArrayCount)) {
- if (t->flushCombProgramOffset) {
- if (roseRunFlushCombProgram(t, scratch, end)
- == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- }
+ if (t->flushCombProgramOffset) {
+ if (roseRunFlushCombProgram(t, scratch, end)
+ == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ }
updateMinMatchOffset(&scratch->tctxt, end);
rv = HWLM_CONTINUE_MATCHING;
} else {
diff --git a/contrib/libs/hyperscan/src/rose/match.c b/contrib/libs/hyperscan/src/rose/match.c
index 00183e64fa..84d3b1fdc2 100644
--- a/contrib/libs/hyperscan/src/rose/match.c
+++ b/contrib/libs/hyperscan/src/rose/match.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2019, Intel Corporation
+ * Copyright (c) 2015-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -238,11 +238,11 @@ hwlmcb_rv_t roseProcessMatchInline(const struct RoseEngine *t,
assert(id && id < t->size); // id is an offset into bytecode
const u64a som = 0;
const u8 flags = 0;
- if (t->pureLiteral) {
- return roseRunProgram_l(t, scratch, id, som, end, flags);
- } else {
- return roseRunProgram(t, scratch, id, som, end, flags);
- }
+ if (t->pureLiteral) {
+ return roseRunProgram_l(t, scratch, id, som, end, flags);
+ } else {
+ return roseRunProgram(t, scratch, id, som, end, flags);
+ }
}
static rose_inline
@@ -575,39 +575,39 @@ int roseRunBoundaryProgram(const struct RoseEngine *rose, u32 program,
return MO_CONTINUE_MATCHING;
}
-/**
- * \brief Execute a flush combination program.
- *
- * Returns MO_HALT_MATCHING if the stream is exhausted or the user has
- * instructed us to halt, or MO_CONTINUE_MATCHING otherwise.
- */
-int roseRunFlushCombProgram(const struct RoseEngine *rose,
- struct hs_scratch *scratch, u64a end) {
- hwlmcb_rv_t rv = roseRunProgram(rose, scratch, rose->flushCombProgramOffset,
- 0, end, 0);
- if (rv == HWLM_TERMINATE_MATCHING) {
- return MO_HALT_MATCHING;
- }
- return MO_CONTINUE_MATCHING;
-}
-
-/**
- * \brief Execute last flush combination program.
- *
- * Returns MO_HALT_MATCHING if the stream is exhausted or the user has
- * instructed us to halt, or MO_CONTINUE_MATCHING otherwise.
- */
-int roseRunLastFlushCombProgram(const struct RoseEngine *rose,
- struct hs_scratch *scratch, u64a end) {
- hwlmcb_rv_t rv = roseRunProgram(rose, scratch,
- rose->lastFlushCombProgramOffset,
- 0, end, 0);
- if (rv == HWLM_TERMINATE_MATCHING) {
- return MO_HALT_MATCHING;
- }
- return MO_CONTINUE_MATCHING;
-}
-
+/**
+ * \brief Execute a flush combination program.
+ *
+ * Returns MO_HALT_MATCHING if the stream is exhausted or the user has
+ * instructed us to halt, or MO_CONTINUE_MATCHING otherwise.
+ */
+int roseRunFlushCombProgram(const struct RoseEngine *rose,
+ struct hs_scratch *scratch, u64a end) {
+ hwlmcb_rv_t rv = roseRunProgram(rose, scratch, rose->flushCombProgramOffset,
+ 0, end, 0);
+ if (rv == HWLM_TERMINATE_MATCHING) {
+ return MO_HALT_MATCHING;
+ }
+ return MO_CONTINUE_MATCHING;
+}
+
+/**
+ * \brief Execute last flush combination program.
+ *
+ * Returns MO_HALT_MATCHING if the stream is exhausted or the user has
+ * instructed us to halt, or MO_CONTINUE_MATCHING otherwise.
+ */
+int roseRunLastFlushCombProgram(const struct RoseEngine *rose,
+ struct hs_scratch *scratch, u64a end) {
+ hwlmcb_rv_t rv = roseRunProgram(rose, scratch,
+ rose->lastFlushCombProgramOffset,
+ 0, end, 0);
+ if (rv == HWLM_TERMINATE_MATCHING) {
+ return MO_HALT_MATCHING;
+ }
+ return MO_CONTINUE_MATCHING;
+}
+
int roseReportAdaptor(u64a start, u64a end, ReportID id, void *context) {
struct hs_scratch *scratch = context;
assert(scratch && scratch->magic == SCRATCH_MAGIC);
@@ -619,12 +619,12 @@ int roseReportAdaptor(u64a start, u64a end, ReportID id, void *context) {
// Our match ID is the program offset.
const u32 program = id;
const u8 flags = ROSE_PROG_FLAG_SKIP_MPV_CATCHUP;
- hwlmcb_rv_t rv;
- if (rose->pureLiteral) {
- rv = roseRunProgram_l(rose, scratch, program, start, end, flags);
- } else {
- rv = roseRunProgram(rose, scratch, program, start, end, flags);
- }
+ hwlmcb_rv_t rv;
+ if (rose->pureLiteral) {
+ rv = roseRunProgram_l(rose, scratch, program, start, end, flags);
+ } else {
+ rv = roseRunProgram(rose, scratch, program, start, end, flags);
+ }
if (rv == HWLM_TERMINATE_MATCHING) {
return MO_HALT_MATCHING;
}
diff --git a/contrib/libs/hyperscan/src/rose/match.h b/contrib/libs/hyperscan/src/rose/match.h
index 1d5ccd6b64..c03b1ebbae 100644
--- a/contrib/libs/hyperscan/src/rose/match.h
+++ b/contrib/libs/hyperscan/src/rose/match.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -66,7 +66,7 @@ hwlmcb_rv_t roseHandleChainMatch(const struct RoseEngine *t,
u64a top_squash_distance, u64a end,
char in_catchup);
-/** \brief Initialize the queue for a suffix/outfix engine. */
+/** \brief Initialize the queue for a suffix/outfix engine. */
static really_inline
void initQueue(struct mq *q, u32 qi, const struct RoseEngine *t,
struct hs_scratch *scratch) {
@@ -91,7 +91,7 @@ void initQueue(struct mq *q, u32 qi, const struct RoseEngine *t,
info->stateOffset, *(u32 *)q->state);
}
-/** \brief Initialize the queue for a leftfix (prefix/infix) engine. */
+/** \brief Initialize the queue for a leftfix (prefix/infix) engine. */
static really_inline
void initRoseQueue(const struct RoseEngine *t, u32 qi,
const struct LeftNfaInfo *left,
diff --git a/contrib/libs/hyperscan/src/rose/program_runtime.c b/contrib/libs/hyperscan/src/rose/program_runtime.c
index 0f21fdea03..ff5a5099c9 100644
--- a/contrib/libs/hyperscan/src/rose/program_runtime.c
+++ b/contrib/libs/hyperscan/src/rose/program_runtime.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -33,1662 +33,1662 @@
#include "program_runtime.h"
-#include "catchup.h"
-#include "counting_miracle.h"
-#include "infix.h"
-#include "match.h"
-#include "miracle.h"
-#include "report.h"
-#include "rose_common.h"
-#include "rose_internal.h"
-#include "rose_program.h"
-#include "rose_types.h"
-#include "validate_mask.h"
-#include "validate_shufti.h"
-#include "runtime.h"
-#include "util/compare.h"
-#include "util/copybytes.h"
-#include "util/fatbit.h"
-#include "util/multibit.h"
-
-/* Inline implementation follows. */
-
-static rose_inline
-void rosePushDelayedMatch(const struct RoseEngine *t,
- struct hs_scratch *scratch, u32 delay,
- u32 delay_index, u64a offset) {
- assert(delay);
-
- const u32 src_slot_index = delay;
- u32 slot_index = (src_slot_index + offset) & DELAY_MASK;
-
- struct RoseContext *tctxt = &scratch->tctxt;
- if (offset + src_slot_index <= tctxt->delayLastEndOffset) {
- DEBUG_PRINTF("skip too late\n");
- return;
- }
-
- const u32 delay_count = t->delay_count;
- struct fatbit **delaySlots = getDelaySlots(scratch);
- struct fatbit *slot = delaySlots[slot_index];
-
- DEBUG_PRINTF("pushing tab %u into slot %u\n", delay_index, slot_index);
- if (!(tctxt->filledDelayedSlots & (1U << slot_index))) {
- tctxt->filledDelayedSlots |= 1U << slot_index;
- fatbit_clear(slot);
- }
-
- fatbit_set(slot, delay_count, delay_index);
-}
-
-static rose_inline
-void recordAnchoredLiteralMatch(const struct RoseEngine *t,
- struct hs_scratch *scratch, u32 anch_id,
- u64a end) {
- assert(end);
-
- if (end <= t->floatingMinLiteralMatchOffset) {
- return;
- }
-
- struct fatbit **anchoredLiteralRows = getAnchoredLiteralLog(scratch);
-
- DEBUG_PRINTF("record %u (of %u) @ %llu\n", anch_id, t->anchored_count, end);
-
- if (!bf64_set(&scratch->al_log_sum, end - 1)) {
- // first time, clear row
- DEBUG_PRINTF("clearing %llu/%u\n", end - 1, t->anchored_count);
- fatbit_clear(anchoredLiteralRows[end - 1]);
- }
-
- assert(anch_id < t->anchored_count);
- fatbit_set(anchoredLiteralRows[end - 1], t->anchored_count, anch_id);
-}
-
-static rose_inline
-char roseLeftfixCheckMiracles(const struct RoseEngine *t,
- const struct LeftNfaInfo *left,
- struct core_info *ci, struct mq *q, u64a end,
- const char is_infix) {
- if (!is_infix && left->transient) {
- // Miracles won't help us with transient leftfix engines; they only
- // scan for a limited time anyway.
- return 1;
- }
-
- if (!left->stopTable) {
- return 1;
- }
-
- DEBUG_PRINTF("looking for miracle on queue %u\n", q->nfa->queueIndex);
-
- const s64a begin_loc = q_cur_loc(q);
- const s64a end_loc = end - ci->buf_offset;
-
- s64a miracle_loc;
- if (roseMiracleOccurs(t, left, ci, begin_loc, end_loc, &miracle_loc)) {
- goto found_miracle;
- }
-
- if (roseCountingMiracleOccurs(t, left, ci, begin_loc, end_loc,
- &miracle_loc)) {
- goto found_miracle;
- }
-
- return 1;
-
-found_miracle:
- DEBUG_PRINTF("miracle at %lld\n", miracle_loc);
- assert(miracle_loc >= begin_loc);
-
- // If we're a prefix, then a miracle effectively results in us needing to
- // re-init our state and start fresh.
- if (!is_infix) {
- if (miracle_loc != begin_loc) {
- DEBUG_PRINTF("re-init prefix state\n");
- q->cur = q->end = 0;
- pushQueueAt(q, 0, MQE_START, miracle_loc);
- pushQueueAt(q, 1, MQE_TOP, miracle_loc);
- nfaQueueInitState(q->nfa, q);
- }
- return 1;
- }
-
- // Otherwise, we're an infix. Remove tops before the miracle from the queue
- // and re-init at that location.
-
- q_skip_forward_to(q, miracle_loc);
-
- if (q_last_type(q) == MQE_START) {
- DEBUG_PRINTF("miracle caused infix to die\n");
- return 0;
- }
-
- DEBUG_PRINTF("re-init infix state\n");
- assert(q->items[q->cur].type == MQE_START);
- q->items[q->cur].location = miracle_loc;
- nfaQueueInitState(q->nfa, q);
-
- return 1;
-}
-
-static rose_inline
-hwlmcb_rv_t roseTriggerSuffix(const struct RoseEngine *t,
- struct hs_scratch *scratch, u32 qi, u32 top,
- u64a som, u64a end) {
- DEBUG_PRINTF("suffix qi=%u, top event=%u\n", qi, top);
-
- struct core_info *ci = &scratch->core_info;
- u8 *aa = getActiveLeafArray(t, ci->state);
- const u32 aaCount = t->activeArrayCount;
- const u32 qCount = t->queueCount;
- struct mq *q = &scratch->queues[qi];
- const struct NfaInfo *info = getNfaInfoByQueue(t, qi);
- const struct NFA *nfa = getNfaByInfo(t, info);
-
- s64a loc = (s64a)end - ci->buf_offset;
- assert(loc <= (s64a)ci->len && loc >= -(s64a)ci->hlen);
-
- if (!mmbit_set(aa, aaCount, qi)) {
- initQueue(q, qi, t, scratch);
- nfaQueueInitState(nfa, q);
- pushQueueAt(q, 0, MQE_START, loc);
- fatbit_set(scratch->aqa, qCount, qi);
- } else if (info->no_retrigger) {
- DEBUG_PRINTF("yawn\n");
- /* nfa only needs one top; we can go home now */
- return HWLM_CONTINUE_MATCHING;
- } else if (!fatbit_set(scratch->aqa, qCount, qi)) {
- initQueue(q, qi, t, scratch);
- loadStreamState(nfa, q, 0);
- pushQueueAt(q, 0, MQE_START, 0);
- } else if (isQueueFull(q)) {
- DEBUG_PRINTF("queue %u full -> catching up nfas\n", qi);
- if (info->eod) {
- /* can catch up suffix independently no pq */
- q->context = NULL;
- pushQueueNoMerge(q, MQE_END, loc);
- nfaQueueExecRose(q->nfa, q, MO_INVALID_IDX);
- q->cur = q->end = 0;
- pushQueueAt(q, 0, MQE_START, loc);
- } else if (ensureQueueFlushed(t, scratch, qi, loc)
- == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- }
-
- assert(top == MQE_TOP || (top >= MQE_TOP_FIRST && top < MQE_INVALID));
- pushQueueSom(q, top, loc, som);
-
- if (q_cur_loc(q) == (s64a)ci->len && !info->eod) {
- /* we may not run the nfa; need to ensure state is fine */
- DEBUG_PRINTF("empty run\n");
- pushQueueNoMerge(q, MQE_END, loc);
- char alive = nfaQueueExec(nfa, q, loc);
- if (alive) {
- q->cur = q->end = 0;
- pushQueueAt(q, 0, MQE_START, loc);
- } else {
- mmbit_unset(aa, aaCount, qi);
- fatbit_unset(scratch->aqa, qCount, qi);
- }
- }
-
- return HWLM_CONTINUE_MATCHING;
-}
-
-static really_inline
-char roseTestLeftfix(const struct RoseEngine *t, struct hs_scratch *scratch,
- u32 qi, u32 leftfixLag, ReportID leftfixReport, u64a end,
- const char is_infix) {
- struct core_info *ci = &scratch->core_info;
-
- u32 ri = queueToLeftIndex(t, qi);
- const struct LeftNfaInfo *left = getLeftTable(t) + ri;
-
- DEBUG_PRINTF("testing %s %s %u/%u with lag %u (maxLag=%u)\n",
- (left->transient ? "transient" : "active"),
- (is_infix ? "infix" : "prefix"),
- ri, qi, leftfixLag, left->maxLag);
-
- assert(leftfixLag <= left->maxLag);
- assert(left->infix == is_infix);
- assert(!is_infix || !left->transient); // Only prefixes can be transient.
-
- struct mq *q = scratch->queues + qi;
- char *state = scratch->core_info.state;
- u8 *activeLeftArray = getActiveLeftArray(t, state);
- u32 qCount = t->queueCount;
- u32 arCount = t->activeLeftCount;
-
- if (!mmbit_isset(activeLeftArray, arCount, ri)) {
- DEBUG_PRINTF("engine is dead nothing to see here\n");
- return 0;
- }
-
- if (unlikely(end < leftfixLag)) {
- assert(0); /* lag is the literal length */
- return 0;
- }
-
- if (nfaSupportsZombie(getNfaByQueue(t, qi)) && ci->buf_offset
- && !fatbit_isset(scratch->aqa, qCount, qi)
- && isZombie(t, state, left)) {
- DEBUG_PRINTF("zombie\n");
- return 1;
- }
-
- if (!fatbit_set(scratch->aqa, qCount, qi)) {
- DEBUG_PRINTF("initing q %u\n", qi);
- initRoseQueue(t, qi, left, scratch);
- if (ci->buf_offset) { // there have been writes before us!
- s32 sp;
- if (!is_infix && left->transient) {
- sp = -(s32)ci->hlen;
- } else {
- sp = -(s32)loadRoseDelay(t, state, left);
- }
-
- /* transient nfas are always started fresh -> state not maintained
- * at stream boundary */
-
- pushQueueAt(q, 0, MQE_START, sp);
- if (is_infix || (ci->buf_offset + sp > 0 && !left->transient)) {
- loadStreamState(q->nfa, q, sp);
- } else {
- pushQueueAt(q, 1, MQE_TOP, sp);
- nfaQueueInitState(q->nfa, q);
- }
- } else { // first write ever
- pushQueueAt(q, 0, MQE_START, 0);
- pushQueueAt(q, 1, MQE_TOP, 0);
- nfaQueueInitState(q->nfa, q);
- }
- }
-
- s64a loc = (s64a)end - ci->buf_offset - leftfixLag;
- assert(loc >= q_cur_loc(q) || left->eager);
- assert(leftfixReport != MO_INVALID_IDX);
-
- if (!is_infix && left->transient) {
- s64a start_loc = loc - left->transient;
- if (q_cur_loc(q) < start_loc) {
- q->cur = q->end = 0;
- pushQueueAt(q, 0, MQE_START, start_loc);
- pushQueueAt(q, 1, MQE_TOP, start_loc);
- nfaQueueInitState(q->nfa, q);
- }
- }
-
- if (q_cur_loc(q) < loc || q_last_type(q) != MQE_START) {
- if (is_infix) {
- if (infixTooOld(q, loc)) {
- DEBUG_PRINTF("infix %u died of old age\n", ri);
- goto nfa_dead;
- }
-
- reduceInfixQueue(q, loc, left->maxQueueLen, q->nfa->maxWidth);
- }
-
- if (!roseLeftfixCheckMiracles(t, left, ci, q, end, is_infix)) {
- DEBUG_PRINTF("leftfix %u died due to miracle\n", ri);
- goto nfa_dead;
- }
-
-#ifdef DEBUG
- debugQueue(q);
-#endif
-
- pushQueueNoMerge(q, MQE_END, loc);
-
- char rv = nfaQueueExecRose(q->nfa, q, leftfixReport);
- if (!rv) { /* nfa is dead */
- DEBUG_PRINTF("leftfix %u died while trying to catch up\n", ri);
- goto nfa_dead;
- }
-
- // Queue must have next start loc before we call nfaInAcceptState.
- q->cur = q->end = 0;
- pushQueueAt(q, 0, MQE_START, loc);
-
- DEBUG_PRINTF("checking for report %u\n", leftfixReport);
- DEBUG_PRINTF("leftfix done %hhd\n", (signed char)rv);
- return rv == MO_MATCHES_PENDING;
- } else if (q_cur_loc(q) > loc) {
- /* an eager leftfix may have already progressed past loc if there is no
- * match at loc. */
- assert(left->eager);
- return 0;
- } else {
- assert(q_cur_loc(q) == loc);
- DEBUG_PRINTF("checking for report %u\n", leftfixReport);
- char rv = nfaInAcceptState(q->nfa, leftfixReport, q);
- DEBUG_PRINTF("leftfix done %hhd\n", (signed char)rv);
- return rv;
- }
-
-nfa_dead:
- mmbit_unset(activeLeftArray, arCount, ri);
- scratch->tctxt.groups &= left->squash_mask;
- return 0;
-}
-
-static rose_inline
-char roseTestPrefix(const struct RoseEngine *t, struct hs_scratch *scratch,
- u32 qi, u32 leftfixLag, ReportID leftfixReport, u64a end) {
- return roseTestLeftfix(t, scratch, qi, leftfixLag, leftfixReport, end, 0);
-}
-
-static rose_inline
-char roseTestInfix(const struct RoseEngine *t, struct hs_scratch *scratch,
- u32 qi, u32 leftfixLag, ReportID leftfixReport, u64a end) {
- return roseTestLeftfix(t, scratch, qi, leftfixLag, leftfixReport, end, 1);
-}
-
-static rose_inline
-void roseTriggerInfix(const struct RoseEngine *t, struct hs_scratch *scratch,
- u64a start, u64a end, u32 qi, u32 topEvent, u8 cancel) {
- struct core_info *ci = &scratch->core_info;
- s64a loc = (s64a)end - ci->buf_offset;
-
- u32 ri = queueToLeftIndex(t, qi);
- assert(topEvent < MQE_INVALID);
-
- const struct LeftNfaInfo *left = getLeftInfoByQueue(t, qi);
- assert(!left->transient);
-
- DEBUG_PRINTF("rose %u (qi=%u) event %u\n", ri, qi, topEvent);
-
- struct mq *q = scratch->queues + qi;
- const struct NfaInfo *info = getNfaInfoByQueue(t, qi);
-
- char *state = ci->state;
- u8 *activeLeftArray = getActiveLeftArray(t, state);
- const u32 arCount = t->activeLeftCount;
- char alive = mmbit_set(activeLeftArray, arCount, ri);
-
- if (alive && info->no_retrigger) {
- DEBUG_PRINTF("yawn\n");
- return;
- }
-
- struct fatbit *aqa = scratch->aqa;
- const u32 qCount = t->queueCount;
-
- if (alive && nfaSupportsZombie(getNfaByInfo(t, info)) && ci->buf_offset &&
- !fatbit_isset(aqa, qCount, qi) && isZombie(t, state, left)) {
- DEBUG_PRINTF("yawn - zombie\n");
- return;
- }
-
- if (cancel) {
- DEBUG_PRINTF("dominating top: (re)init\n");
- fatbit_set(aqa, qCount, qi);
- initRoseQueue(t, qi, left, scratch);
- pushQueueAt(q, 0, MQE_START, loc);
- nfaQueueInitState(q->nfa, q);
- } else if (!fatbit_set(aqa, qCount, qi)) {
- DEBUG_PRINTF("initing %u\n", qi);
- initRoseQueue(t, qi, left, scratch);
- if (alive) {
- s32 sp = -(s32)loadRoseDelay(t, state, left);
- pushQueueAt(q, 0, MQE_START, sp);
- loadStreamState(q->nfa, q, sp);
- } else {
- pushQueueAt(q, 0, MQE_START, loc);
- nfaQueueInitState(q->nfa, q);
- }
- } else if (!alive) {
- q->cur = q->end = 0;
- pushQueueAt(q, 0, MQE_START, loc);
- nfaQueueInitState(q->nfa, q);
- } else if (isQueueFull(q)) {
- reduceInfixQueue(q, loc, left->maxQueueLen, q->nfa->maxWidth);
-
- if (isQueueFull(q)) {
- /* still full - reduceInfixQueue did nothing */
- DEBUG_PRINTF("queue %u full (%u items) -> catching up nfa\n", qi,
- q->end - q->cur);
- pushQueueNoMerge(q, MQE_END, loc);
- nfaQueueExecRose(q->nfa, q, MO_INVALID_IDX);
-
- q->cur = q->end = 0;
- pushQueueAt(q, 0, MQE_START, loc);
- }
- }
-
- pushQueueSom(q, topEvent, loc, start);
-}
-
-static rose_inline
-hwlmcb_rv_t roseReport(const struct RoseEngine *t, struct hs_scratch *scratch,
- u64a end, ReportID onmatch, s32 offset_adjust,
- u32 ekey) {
- DEBUG_PRINTF("firing callback onmatch=%u, end=%llu\n", onmatch, end);
- updateLastMatchOffset(&scratch->tctxt, end);
-
- int cb_rv = roseDeliverReport(end, onmatch, offset_adjust, scratch, ekey);
- if (cb_rv == MO_HALT_MATCHING) {
- DEBUG_PRINTF("termination requested\n");
- return HWLM_TERMINATE_MATCHING;
- }
-
- if (ekey == INVALID_EKEY || cb_rv == ROSE_CONTINUE_MATCHING_NO_EXHAUST) {
- return HWLM_CONTINUE_MATCHING;
- }
-
- return roseHaltIfExhausted(t, scratch);
-}
-
-static rose_inline
-hwlmcb_rv_t roseReportComb(const struct RoseEngine *t,
- struct hs_scratch *scratch, u64a end,
- ReportID onmatch, s32 offset_adjust, u32 ekey) {
- DEBUG_PRINTF("firing callback onmatch=%u, end=%llu\n", onmatch, end);
-
- int cb_rv = roseDeliverReport(end, onmatch, offset_adjust, scratch, ekey);
- if (cb_rv == MO_HALT_MATCHING) {
- DEBUG_PRINTF("termination requested\n");
- return HWLM_TERMINATE_MATCHING;
- }
-
- if (ekey == INVALID_EKEY || cb_rv == ROSE_CONTINUE_MATCHING_NO_EXHAUST) {
- return HWLM_CONTINUE_MATCHING;
- }
-
- return roseHaltIfExhausted(t, scratch);
-}
-
-/* catches up engines enough to ensure any earlier mpv triggers are enqueued
- * and then adds the trigger to the mpv queue. */
-static rose_inline
-hwlmcb_rv_t roseCatchUpAndHandleChainMatch(const struct RoseEngine *t,
- struct hs_scratch *scratch,
- u32 event, u64a top_squash_distance,
- u64a end, const char in_catchup) {
- if (!in_catchup &&
- roseCatchUpMpvFeeders(t, scratch, end) == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- return roseHandleChainMatch(t, scratch, event, top_squash_distance, end,
- in_catchup);
-}
-
-static rose_inline
-void roseHandleSom(struct hs_scratch *scratch, const struct som_operation *sr,
- u64a end) {
- DEBUG_PRINTF("end=%llu, minMatchOffset=%llu\n", end,
- scratch->tctxt.minMatchOffset);
-
- updateLastMatchOffset(&scratch->tctxt, end);
- handleSomInternal(scratch, sr, end);
-}
-
-static rose_inline
-hwlmcb_rv_t roseReportSom(const struct RoseEngine *t,
- struct hs_scratch *scratch, u64a start, u64a end,
- ReportID onmatch, s32 offset_adjust, u32 ekey) {
- DEBUG_PRINTF("firing som callback onmatch=%u, start=%llu, end=%llu\n",
- onmatch, start, end);
- updateLastMatchOffset(&scratch->tctxt, end);
-
- int cb_rv = roseDeliverSomReport(start, end, onmatch, offset_adjust,
- scratch, ekey);
- if (cb_rv == MO_HALT_MATCHING) {
- DEBUG_PRINTF("termination requested\n");
- return HWLM_TERMINATE_MATCHING;
- }
-
- if (ekey == INVALID_EKEY || cb_rv == ROSE_CONTINUE_MATCHING_NO_EXHAUST) {
- return HWLM_CONTINUE_MATCHING;
- }
-
- return roseHaltIfExhausted(t, scratch);
-}
-
-static rose_inline
-void roseHandleSomSom(struct hs_scratch *scratch,
- const struct som_operation *sr, u64a start, u64a end) {
- DEBUG_PRINTF("start=%llu, end=%llu, minMatchOffset=%llu\n", start, end,
- scratch->tctxt.minMatchOffset);
-
- updateLastMatchOffset(&scratch->tctxt, end);
- setSomFromSomAware(scratch, sr, start, end);
-}
-
-static rose_inline
-hwlmcb_rv_t roseSetExhaust(const struct RoseEngine *t,
- struct hs_scratch *scratch, u32 ekey) {
- assert(scratch);
- assert(scratch->magic == SCRATCH_MAGIC);
-
- struct core_info *ci = &scratch->core_info;
-
- assert(!can_stop_matching(scratch));
- assert(!isExhausted(ci->rose, ci->exhaustionVector, ekey));
-
- markAsMatched(ci->rose, ci->exhaustionVector, ekey);
-
- return roseHaltIfExhausted(t, scratch);
-}
-
-static really_inline
-int reachHasBit(const u8 *reach, u8 c) {
- return !!(reach[c / 8U] & (u8)1U << (c % 8U));
-}
-
-/*
- * Generate a 8-byte valid_mask with #high bytes 0 from the highest side
- * and #low bytes 0 from the lowest side
- * and (8 - high - low) bytes '0xff' in the middle.
- */
-static rose_inline
-u64a generateValidMask(const s32 high, const s32 low) {
- assert(high + low < 8);
- DEBUG_PRINTF("high %d low %d\n", high, low);
- const u64a ones = ~0ull;
- return (ones << ((high + low) * 8)) >> (high * 8);
-}
-
-/*
- * Do the single-byte check if only one lookaround entry exists
- * and it's a single mask.
- * Return success if the byte is in the future or before history
- * (offset is greater than (history) buffer length).
- */
-static rose_inline
-int roseCheckByte(const struct core_info *ci, u8 and_mask, u8 cmp_mask,
- u8 negation, s32 checkOffset, u64a end) {
- DEBUG_PRINTF("end=%llu, buf_offset=%llu, buf_end=%llu\n", end,
- ci->buf_offset, ci->buf_offset + ci->len);
- if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
- DEBUG_PRINTF("too early, fail\n");
- return 0;
- }
-
- const s64a base_offset = end - ci->buf_offset;
- s64a offset = base_offset + checkOffset;
- DEBUG_PRINTF("checkOffset=%d offset=%lld\n", checkOffset, offset);
- u8 c;
- if (offset >= 0) {
- if (offset >= (s64a)ci->len) {
- DEBUG_PRINTF("in the future\n");
- return 1;
- } else {
- assert(offset < (s64a)ci->len);
- DEBUG_PRINTF("check byte in buffer\n");
- c = ci->buf[offset];
- }
- } else {
- if (offset >= -(s64a) ci->hlen) {
- DEBUG_PRINTF("check byte in history\n");
- c = ci->hbuf[ci->hlen + offset];
- } else {
- DEBUG_PRINTF("before history and return\n");
- return 1;
- }
- }
-
- if (((and_mask & c) != cmp_mask) ^ negation) {
- DEBUG_PRINTF("char 0x%02x at offset %lld failed byte check\n",
- c, offset);
- return 0;
- }
-
- DEBUG_PRINTF("real offset=%lld char=%02x\n", offset, c);
- DEBUG_PRINTF("OK :)\n");
- return 1;
-}
-
-static rose_inline
-int roseCheckMask(const struct core_info *ci, u64a and_mask, u64a cmp_mask,
- u64a neg_mask, s32 checkOffset, u64a end) {
- const s64a base_offset = (s64a)end - ci->buf_offset;
- s64a offset = base_offset + checkOffset;
- DEBUG_PRINTF("rel offset %lld\n",base_offset);
- DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
- if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
- DEBUG_PRINTF("too early, fail\n");
- return 0;
- }
-
- u64a data = 0;
- u64a valid_data_mask = ~0ULL; // mask for validate check.
- //A 0xff byte means that this byte is in the buffer.
- s32 shift_l = 0; // size of bytes in the future.
- s32 shift_r = 0; // size of bytes before the history.
- s32 h_len = 0; // size of bytes in the history buffer.
- s32 c_len = 8; // size of bytes in the current buffer.
- if (offset < 0) {
- // in or before history buffer.
- if (offset + 8 <= -(s64a)ci->hlen) {
- DEBUG_PRINTF("before history and return\n");
- return 1;
- }
- const u8 *h_start = ci->hbuf; // start pointer in history buffer.
- if (offset < -(s64a)ci->hlen) {
- // some bytes are before history.
- shift_r = -(offset + (s64a)ci->hlen);
- DEBUG_PRINTF("shift_r %d", shift_r);
- } else {
- h_start += ci->hlen + offset;
- }
- if (offset + 7 < 0) {
- DEBUG_PRINTF("all in history buffer\n");
- data = partial_load_u64a(h_start, 8 - shift_r);
- } else {
- // history part
- c_len = offset + 8;
- h_len = -offset - shift_r;
- DEBUG_PRINTF("%d bytes in history\n", h_len);
- s64a data_h = 0;
- data_h = partial_load_u64a(h_start, h_len);
- // current part
- if (c_len > (s64a)ci->len) {
- shift_l = c_len - ci->len;
- c_len = ci->len;
- }
- data = partial_load_u64a(ci->buf, c_len);
- data <<= h_len << 3;
- data |= data_h;
- }
- if (shift_r) {
- data <<= shift_r << 3;
- }
- } else {
- // current buffer.
- if (offset + c_len > (s64a)ci->len) {
- if (offset >= (s64a)ci->len) {
- DEBUG_PRINTF("all in the future\n");
- return 1;
- }
- // some bytes in the future.
- shift_l = offset + c_len - ci->len;
- c_len = ci->len - offset;
- data = partial_load_u64a(ci->buf + offset, c_len);
- } else {
- data = unaligned_load_u64a(ci->buf + offset);
- }
- }
-
- if (shift_l || shift_r) {
- valid_data_mask = generateValidMask(shift_l, shift_r);
- }
- DEBUG_PRINTF("valid_data_mask %llx\n", valid_data_mask);
-
- if (validateMask(data, valid_data_mask,
- and_mask, cmp_mask, neg_mask)) {
- DEBUG_PRINTF("check mask successfully\n");
- return 1;
- } else {
- return 0;
- }
-}
-
-static rose_inline
-int roseCheckMask32(const struct core_info *ci, const u8 *and_mask,
- const u8 *cmp_mask, const u32 neg_mask,
- s32 checkOffset, u64a end) {
- const s64a base_offset = (s64a)end - ci->buf_offset;
- s64a offset = base_offset + checkOffset;
- DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
- DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
-
- if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
- DEBUG_PRINTF("too early, fail\n");
- return 0;
- }
-
- m256 data = zeroes256(); // consists of the following four parts.
- s32 c_shift = 0; // blank bytes after current.
- s32 h_shift = 0; // blank bytes before history.
- s32 h_len = 32; // number of bytes from history buffer.
- s32 c_len = 0; // number of bytes from current buffer.
- /* h_shift + h_len + c_len + c_shift = 32 need to be hold.*/
-
- if (offset < 0) {
- s32 h_offset = 0; // the start offset in history buffer.
- if (offset < -(s64a)ci->hlen) {
- if (offset + 32 <= -(s64a)ci->hlen) {
- DEBUG_PRINTF("all before history\n");
- return 1;
- }
- h_shift = -(offset + (s64a)ci->hlen);
- h_len = 32 - h_shift;
- } else {
- h_offset = ci->hlen + offset;
- }
- if (offset + 32 > 0) {
- // part in current buffer.
- c_len = offset + 32;
- h_len = -(offset + h_shift);
- if (c_len > (s64a)ci->len) {
- // out of current buffer.
- c_shift = c_len - ci->len;
- c_len = ci->len;
- }
- copy_upto_64_bytes((u8 *)&data - offset, ci->buf, c_len);
- }
- assert(h_shift + h_len + c_len + c_shift == 32);
- copy_upto_64_bytes((u8 *)&data + h_shift, ci->hbuf + h_offset, h_len);
- } else {
- if (offset + 32 > (s64a)ci->len) {
- if (offset >= (s64a)ci->len) {
- DEBUG_PRINTF("all in the future.\n");
- return 1;
- }
- c_len = ci->len - offset;
- c_shift = 32 - c_len;
- copy_upto_64_bytes((u8 *)&data, ci->buf + offset, c_len);
- } else {
- data = loadu256(ci->buf + offset);
- }
- }
- DEBUG_PRINTF("h_shift %d c_shift %d\n", h_shift, c_shift);
- DEBUG_PRINTF("h_len %d c_len %d\n", h_len, c_len);
- // we use valid_data_mask to blind bytes before history/in the future.
- u32 valid_data_mask;
- valid_data_mask = (~0u) << (h_shift + c_shift) >> (c_shift);
-
- m256 and_mask_m256 = loadu256(and_mask);
- m256 cmp_mask_m256 = loadu256(cmp_mask);
- if (validateMask32(data, valid_data_mask, and_mask_m256,
- cmp_mask_m256, neg_mask)) {
- DEBUG_PRINTF("Mask32 passed\n");
- return 1;
- }
- return 0;
-}
-
-#ifdef HAVE_AVX512
-static rose_inline
-int roseCheckMask64(const struct core_info *ci, const u8 *and_mask,
- const u8 *cmp_mask, const u64a neg_mask,
- s32 checkOffset, u64a end) {
- const s64a base_offset = (s64a)end - ci->buf_offset;
- s64a offset = base_offset + checkOffset;
- DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
- DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
-
- if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
- DEBUG_PRINTF("too early, fail\n");
- return 0;
- }
-
- m512 data = zeroes512(); // consists of the following four parts.
- s32 c_shift = 0; // blank bytes after current.
- s32 h_shift = 0; // blank bytes before history.
- s32 h_len = 64; // number of bytes from history buffer.
- s32 c_len = 0; // number of bytes from current buffer.
- /* h_shift + h_len + c_len + c_shift = 64 need to be hold.*/
-
- if (offset < 0) {
- s32 h_offset = 0; // the start offset in history buffer.
- if (offset < -(s64a)ci->hlen) {
- if (offset + 64 <= -(s64a)ci->hlen) {
- DEBUG_PRINTF("all before history\n");
- return 1;
- }
- h_shift = -(offset + (s64a)ci->hlen);
- h_len = 64 - h_shift;
- } else {
- h_offset = ci->hlen + offset;
- }
- if (offset + 64 > 0) {
- // part in current buffer.
- c_len = offset + 64;
- h_len = -(offset + h_shift);
- if (c_len > (s64a)ci->len) {
- // out of current buffer.
- c_shift = c_len - ci->len;
- c_len = ci->len;
- }
- copy_upto_64_bytes((u8 *)&data - offset, ci->buf, c_len);
- }
- assert(h_shift + h_len + c_len + c_shift == 64);
- copy_upto_64_bytes((u8 *)&data + h_shift, ci->hbuf + h_offset, h_len);
- } else {
- if (offset + 64 > (s64a)ci->len) {
- if (offset >= (s64a)ci->len) {
- DEBUG_PRINTF("all in the future.\n");
- return 1;
- }
- c_len = ci->len - offset;
- c_shift = 64 - c_len;
- copy_upto_64_bytes((u8 *)&data, ci->buf + offset, c_len);
- } else {
- data = loadu512(ci->buf + offset);
- }
- }
- DEBUG_PRINTF("h_shift %d c_shift %d\n", h_shift, c_shift);
- DEBUG_PRINTF("h_len %d c_len %d\n", h_len, c_len);
- // we use valid_data_mask to blind bytes before history/in the future.
- u64a valid_data_mask;
- valid_data_mask = (~0ULL) << (h_shift + c_shift) >> (c_shift);
-
- m512 and_mask_m512 = loadu512(and_mask);
- m512 cmp_mask_m512 = loadu512(cmp_mask);
-
- if (validateMask64(data, valid_data_mask, and_mask_m512,
- cmp_mask_m512, neg_mask)) {
- DEBUG_PRINTF("Mask64 passed\n");
- return 1;
- }
- return 0;
-}
-#endif
-
-// get 128/256/512 bits data from history and current buffer.
-// return data and valid_data_mask.
-static rose_inline
-u64a getBufferDataComplex(const struct core_info *ci, const s64a loc,
- u8 *data, const u32 data_len) {
- assert(data_len == 16 || data_len == 32 || data_len == 64);
- s32 c_shift = 0; // blank bytes after current.
- s32 h_shift = 0; // blank bytes before history.
- s32 h_len = data_len; // number of bytes from history buffer.
- s32 c_len = 0; // number of bytes from current buffer.
- if (loc < 0) {
- s32 h_offset = 0; // the start offset in history buffer.
- if (loc < -(s64a)ci->hlen) {
- if (loc + data_len <= -(s64a)ci->hlen) {
- DEBUG_PRINTF("all before history\n");
- return 0;
- }
- h_shift = -(loc + (s64a)ci->hlen);
- h_len = data_len - h_shift;
- } else {
- h_offset = ci->hlen + loc;
- }
- if (loc + data_len > 0) {
- // part in current buffer.
- c_len = loc + data_len;
- h_len = -(loc + h_shift);
- if (c_len > (s64a)ci->len) {
- // out of current buffer.
- c_shift = c_len - ci->len;
- c_len = ci->len;
- }
- copy_upto_64_bytes(data - loc, ci->buf, c_len);
- }
- assert(h_shift + h_len + c_len + c_shift == (s32)data_len);
- copy_upto_64_bytes(data + h_shift, ci->hbuf + h_offset, h_len);
- } else {
- if (loc + data_len > (s64a)ci->len) {
- if (loc >= (s64a)ci->len) {
- DEBUG_PRINTF("all in the future.\n");
- return 0;
- }
- c_len = ci->len - loc;
- c_shift = data_len - c_len;
- copy_upto_64_bytes(data, ci->buf + loc, c_len);
- } else {
-#ifdef HAVE_AVX512
- if (data_len == 64) {
- storeu512(data, loadu512(ci->buf + loc));
- return ~0ULL;
- }
-#endif
- if (data_len == 16) {
- storeu128(data, loadu128(ci->buf + loc));
- return 0xffff;
- } else {
- storeu256(data, loadu256(ci->buf + loc));
- return 0xffffffff;
- }
- }
- }
- DEBUG_PRINTF("h_shift %d c_shift %d\n", h_shift, c_shift);
- DEBUG_PRINTF("h_len %d c_len %d\n", h_len, c_len);
-
-#ifdef HAVE_AVX512
- if (data_len == 64) {
- return (~0ULL) << (h_shift + c_shift) >> c_shift;
- }
-#endif
- if (data_len == 16) {
- return (u16)(0xffff << (h_shift + c_shift)) >> c_shift;
- } else {
- return (~0u) << (h_shift + c_shift) >> c_shift;
- }
-}
-
-static rose_inline
-m128 getData128(const struct core_info *ci, s64a offset, u32 *valid_data_mask) {
- if (offset > 0 && offset + sizeof(m128) <= ci->len) {
- *valid_data_mask = 0xffff;
- return loadu128(ci->buf + offset);
- }
- ALIGN_DIRECTIVE u8 data[sizeof(m128)];
- *valid_data_mask = getBufferDataComplex(ci, offset, data, 16);
- return *(m128 *)data;
-}
-
-static rose_inline
-m256 getData256(const struct core_info *ci, s64a offset, u32 *valid_data_mask) {
- if (offset > 0 && offset + sizeof(m256) <= ci->len) {
- *valid_data_mask = ~0u;
- return loadu256(ci->buf + offset);
- }
- ALIGN_AVX_DIRECTIVE u8 data[sizeof(m256)];
- *valid_data_mask = getBufferDataComplex(ci, offset, data, 32);
- return *(m256 *)data;
-}
-
-#ifdef HAVE_AVX512
-static rose_inline
-m512 getData512(const struct core_info *ci, s64a offset, u64a *valid_data_mask) {
- if (offset > 0 && offset + sizeof(m512) <= ci->len) {
- *valid_data_mask = ~0ULL;
- return loadu512(ci->buf + offset);
- }
- ALIGN_CL_DIRECTIVE u8 data[sizeof(m512)];
- *valid_data_mask = getBufferDataComplex(ci, offset, data, 64);
- return *(m512 *)data;
-}
-#endif
-
-static rose_inline
-int roseCheckShufti16x8(const struct core_info *ci, const u8 *nib_mask,
- const u8 *bucket_select_mask, u32 neg_mask,
- s32 checkOffset, u64a end) {
- const s64a base_offset = (s64a)end - ci->buf_offset;
- s64a offset = base_offset + checkOffset;
- DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
- DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
-
- if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
- DEBUG_PRINTF("too early, fail\n");
- return 0;
- }
-
- u32 valid_data_mask = 0;
- m128 data = getData128(ci, offset, &valid_data_mask);
- if (unlikely(!valid_data_mask)) {
- return 1;
- }
-
- m256 nib_mask_m256 = loadu256(nib_mask);
- m128 bucket_select_mask_m128 = loadu128(bucket_select_mask);
- if (validateShuftiMask16x8(data, nib_mask_m256,
- bucket_select_mask_m128,
- neg_mask, valid_data_mask)) {
- DEBUG_PRINTF("check shufti 16x8 successfully\n");
- return 1;
- } else {
- return 0;
- }
-}
-
-static rose_inline
-int roseCheckShufti16x16(const struct core_info *ci, const u8 *hi_mask,
- const u8 *lo_mask, const u8 *bucket_select_mask,
- u32 neg_mask, s32 checkOffset, u64a end) {
- const s64a base_offset = (s64a)end - ci->buf_offset;
- s64a offset = base_offset + checkOffset;
- DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
- DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
-
- if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
- DEBUG_PRINTF("too early, fail\n");
- return 0;
- }
-
- u32 valid_data_mask = 0;
- m128 data = getData128(ci, offset, &valid_data_mask);
- if (unlikely(!valid_data_mask)) {
- return 1;
- }
-
- m256 data_m256 = set2x128(data);
- m256 hi_mask_m256 = loadu256(hi_mask);
- m256 lo_mask_m256 = loadu256(lo_mask);
- m256 bucket_select_mask_m256 = loadu256(bucket_select_mask);
- if (validateShuftiMask16x16(data_m256, hi_mask_m256, lo_mask_m256,
- bucket_select_mask_m256,
- neg_mask, valid_data_mask)) {
- DEBUG_PRINTF("check shufti 16x16 successfully\n");
- return 1;
- } else {
- return 0;
- }
-}
-
-static rose_inline
-int roseCheckShufti32x8(const struct core_info *ci, const u8 *hi_mask,
- const u8 *lo_mask, const u8 *bucket_select_mask,
- u32 neg_mask, s32 checkOffset, u64a end) {
- const s64a base_offset = (s64a)end - ci->buf_offset;
- s64a offset = base_offset + checkOffset;
- DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
- DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
-
- if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
- DEBUG_PRINTF("too early, fail\n");
- return 0;
- }
-
- u32 valid_data_mask = 0;
- m256 data = getData256(ci, offset, &valid_data_mask);
- if (unlikely(!valid_data_mask)) {
- return 1;
- }
-
- m128 hi_mask_m128 = loadu128(hi_mask);
- m128 lo_mask_m128 = loadu128(lo_mask);
- m256 hi_mask_m256 = set2x128(hi_mask_m128);
- m256 lo_mask_m256 = set2x128(lo_mask_m128);
- m256 bucket_select_mask_m256 = loadu256(bucket_select_mask);
- if (validateShuftiMask32x8(data, hi_mask_m256, lo_mask_m256,
- bucket_select_mask_m256,
- neg_mask, valid_data_mask)) {
- DEBUG_PRINTF("check shufti 32x8 successfully\n");
- return 1;
- } else {
- return 0;
- }
-}
-
-static rose_inline
-int roseCheckShufti32x16(const struct core_info *ci, const u8 *hi_mask,
- const u8 *lo_mask, const u8 *bucket_select_mask_hi,
- const u8 *bucket_select_mask_lo, u32 neg_mask,
- s32 checkOffset, u64a end) {
- const s64a base_offset = (s64a)end - ci->buf_offset;
- s64a offset = base_offset + checkOffset;
- DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
- DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
-
- if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
- DEBUG_PRINTF("too early, fail\n");
- return 0;
- }
-
- u32 valid_data_mask = 0;
- m256 data = getData256(ci, offset, &valid_data_mask);
- if (unlikely(!valid_data_mask)) {
- return 1;
- }
-
- m256 hi_mask_1 = loadu2x128(hi_mask);
- m256 hi_mask_2 = loadu2x128(hi_mask + 16);
- m256 lo_mask_1 = loadu2x128(lo_mask);
- m256 lo_mask_2 = loadu2x128(lo_mask + 16);
-
- m256 bucket_mask_hi = loadu256(bucket_select_mask_hi);
- m256 bucket_mask_lo = loadu256(bucket_select_mask_lo);
- if (validateShuftiMask32x16(data, hi_mask_1, hi_mask_2,
- lo_mask_1, lo_mask_2, bucket_mask_hi,
- bucket_mask_lo, neg_mask, valid_data_mask)) {
- DEBUG_PRINTF("check shufti 32x16 successfully\n");
- return 1;
- } else {
- return 0;
- }
-}
-
-#ifdef HAVE_AVX512
-static rose_inline
-int roseCheckShufti64x8(const struct core_info *ci, const u8 *hi_mask,
- const u8 *lo_mask, const u8 *bucket_select_mask,
- u64a neg_mask, s32 checkOffset, u64a end) {
- const s64a base_offset = (s64a)end - ci->buf_offset;
- s64a offset = base_offset + checkOffset;
- DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
- DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
-
- if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
- DEBUG_PRINTF("too early, fail\n");
- return 0;
- }
-
- u64a valid_data_mask = 0;
- m512 data = getData512(ci, offset, &valid_data_mask);
-
- if (unlikely(!valid_data_mask)) {
- return 1;
- }
-
- m512 hi_mask_m512 = loadu512(hi_mask);
- m512 lo_mask_m512 = loadu512(lo_mask);
- m512 bucket_select_mask_m512 = loadu512(bucket_select_mask);
- if (validateShuftiMask64x8(data, hi_mask_m512, lo_mask_m512,
- bucket_select_mask_m512,
- neg_mask, valid_data_mask)) {
- DEBUG_PRINTF("check shufti 64x8 successfully\n");
- return 1;
- } else {
- return 0;
- }
-}
-
-static rose_inline
-int roseCheckShufti64x16(const struct core_info *ci, const u8 *hi_mask_1,
- const u8 *hi_mask_2, const u8 *lo_mask_1,
- const u8 *lo_mask_2, const u8 *bucket_select_mask_hi,
- const u8 *bucket_select_mask_lo, u64a neg_mask,
- s32 checkOffset, u64a end) {
- const s64a base_offset = (s64a)end - ci->buf_offset;
- s64a offset = base_offset + checkOffset;
- DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
- DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
-
- if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
- DEBUG_PRINTF("too early, fail\n");
- return 0;
- }
-
- u64a valid_data_mask = 0;
- m512 data = getData512(ci, offset, &valid_data_mask);
- if (unlikely(!valid_data_mask)) {
- return 1;
- }
-
- m512 hi_mask_1_m512 = loadu512(hi_mask_1);
- m512 hi_mask_2_m512 = loadu512(hi_mask_2);
- m512 lo_mask_1_m512 = loadu512(lo_mask_1);
- m512 lo_mask_2_m512 = loadu512(lo_mask_2);
-
- m512 bucket_select_mask_hi_m512 = loadu512(bucket_select_mask_hi);
- m512 bucket_select_mask_lo_m512 = loadu512(bucket_select_mask_lo);
- if (validateShuftiMask64x16(data, hi_mask_1_m512, hi_mask_2_m512,
- lo_mask_1_m512, lo_mask_2_m512,
- bucket_select_mask_hi_m512,
- bucket_select_mask_lo_m512,
- neg_mask, valid_data_mask)) {
- DEBUG_PRINTF("check shufti 64x16 successfully\n");
- return 1;
- } else {
- return 0;
- }
-}
-#endif
-
-static rose_inline
-int roseCheckSingleLookaround(const struct RoseEngine *t,
- const struct hs_scratch *scratch,
- s8 checkOffset, u32 lookaroundReachIndex,
- u64a end) {
- assert(lookaroundReachIndex != MO_INVALID_IDX);
- const struct core_info *ci = &scratch->core_info;
- DEBUG_PRINTF("end=%llu, buf_offset=%llu, buf_end=%llu\n", end,
- ci->buf_offset, ci->buf_offset + ci->len);
-
- const s64a base_offset = end - ci->buf_offset;
- const s64a offset = base_offset + checkOffset;
- DEBUG_PRINTF("base_offset=%lld\n", base_offset);
- DEBUG_PRINTF("checkOffset=%d offset=%lld\n", checkOffset, offset);
-
- if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
- DEBUG_PRINTF("too early, fail\n");
- return 0;
- }
-
- const u8 *reach = getByOffset(t, lookaroundReachIndex);
-
- u8 c;
- if (offset >= 0 && offset < (s64a)ci->len) {
- c = ci->buf[offset];
- } else if (offset < 0 && offset >= -(s64a)ci->hlen) {
- c = ci->hbuf[ci->hlen + offset];
- } else {
- return 1;
- }
-
- if (!reachHasBit(reach, c)) {
- DEBUG_PRINTF("char 0x%02x failed reach check\n", c);
- return 0;
- }
-
- DEBUG_PRINTF("OK :)\n");
- return 1;
-}
-
-/**
- * \brief Scan around a literal, checking that that "lookaround" reach masks
- * are satisfied.
- */
-static rose_inline
-int roseCheckLookaround(const struct RoseEngine *t,
- const struct hs_scratch *scratch,
- u32 lookaroundLookIndex, u32 lookaroundReachIndex,
- u32 lookaroundCount, u64a end) {
- assert(lookaroundLookIndex != MO_INVALID_IDX);
- assert(lookaroundReachIndex != MO_INVALID_IDX);
- assert(lookaroundCount > 0);
-
- const struct core_info *ci = &scratch->core_info;
- DEBUG_PRINTF("end=%llu, buf_offset=%llu, buf_end=%llu\n", end,
- ci->buf_offset, ci->buf_offset + ci->len);
-
- const s8 *look = getByOffset(t, lookaroundLookIndex);
- const s8 *look_end = look + lookaroundCount;
- assert(look < look_end);
-
- const u8 *reach = getByOffset(t, lookaroundReachIndex);
-
- // The following code assumes that the lookaround structures are ordered by
- // increasing offset.
-
- const s64a base_offset = end - ci->buf_offset;
- DEBUG_PRINTF("base_offset=%lld\n", base_offset);
- DEBUG_PRINTF("first look has offset %d\n", *look);
-
- // If our first check tells us we need to look at an offset before the
- // start of the stream, this role cannot match.
- if (unlikely(*look < 0 && (u64a)(0 - *look) > end)) {
- DEBUG_PRINTF("too early, fail\n");
- return 0;
- }
-
- // Skip over offsets that are before the history buffer.
- do {
- s64a offset = base_offset + *look;
- if (offset >= -(s64a)ci->hlen) {
- goto in_history;
- }
- DEBUG_PRINTF("look=%d before history\n", *look);
- look++;
- reach += REACH_BITVECTOR_LEN;
- } while (look < look_end);
-
- // History buffer.
- DEBUG_PRINTF("scan history (%zu looks left)\n", look_end - look);
- for (; look < look_end; ++look, reach += REACH_BITVECTOR_LEN) {
- in_history:
- ;
- s64a offset = base_offset + *look;
- DEBUG_PRINTF("reach=%p, rel offset=%lld\n", reach, offset);
-
- if (offset >= 0) {
- DEBUG_PRINTF("in buffer\n");
- goto in_buffer;
- }
-
- assert(offset >= -(s64a)ci->hlen && offset < 0);
- u8 c = ci->hbuf[ci->hlen + offset];
- if (!reachHasBit(reach, c)) {
- DEBUG_PRINTF("char 0x%02x failed reach check\n", c);
- return 0;
- }
- }
- // Current buffer.
- DEBUG_PRINTF("scan buffer (%zu looks left)\n", look_end - look);
- for (; look < look_end; ++look, reach += REACH_BITVECTOR_LEN) {
- in_buffer:
- ;
- s64a offset = base_offset + *look;
- DEBUG_PRINTF("reach=%p, rel offset=%lld\n", reach, offset);
-
- if (offset >= (s64a)ci->len) {
- DEBUG_PRINTF("in the future\n");
- break;
- }
-
- assert(offset >= 0 && offset < (s64a)ci->len);
- u8 c = ci->buf[offset];
- if (!reachHasBit(reach, c)) {
- DEBUG_PRINTF("char 0x%02x failed reach check\n", c);
- return 0;
- }
- }
-
- DEBUG_PRINTF("OK :)\n");
- return 1;
-}
-
-/**
- * \brief Trying to find a matching path by the corresponding path mask of
- * every lookaround location.
- */
-static rose_inline
-int roseMultipathLookaround(const struct RoseEngine *t,
- const struct hs_scratch *scratch,
- u32 multipathLookaroundLookIndex,
- u32 multipathLookaroundReachIndex,
- u32 multipathLookaroundCount,
- s32 last_start, const u8 *start_mask,
- u64a end) {
- assert(multipathLookaroundCount > 0);
-
- const struct core_info *ci = &scratch->core_info;
- DEBUG_PRINTF("end=%llu, buf_offset=%llu, buf_end=%llu\n", end,
- ci->buf_offset, ci->buf_offset + ci->len);
-
- const s8 *look = getByOffset(t, multipathLookaroundLookIndex);
- const s8 *look_end = look + multipathLookaroundCount;
- assert(look < look_end);
-
- const u8 *reach = getByOffset(t, multipathLookaroundReachIndex);
-
- const s64a base_offset = (s64a)end - ci->buf_offset;
- DEBUG_PRINTF("base_offset=%lld\n", base_offset);
-
- u8 path = 0xff;
-
- assert(last_start < 0);
-
- if (unlikely((u64a)(0 - last_start) > end)) {
- DEBUG_PRINTF("too early, fail\n");
- return 0;
- }
-
- s8 base_look_offset = *look;
- do {
- s64a offset = base_offset + *look;
- u32 start_offset = (u32)(*look - base_look_offset);
- DEBUG_PRINTF("start_mask[%u] = %x\n", start_offset,
- start_mask[start_offset]);
- path = start_mask[start_offset];
- if (offset >= -(s64a)ci->hlen) {
- break;
- }
- DEBUG_PRINTF("look=%d before history\n", *look);
- look++;
- reach += MULTI_REACH_BITVECTOR_LEN;
- } while (look < look_end);
-
- DEBUG_PRINTF("scan history (%zu looks left)\n", look_end - look);
- for (; look < look_end; ++look, reach += MULTI_REACH_BITVECTOR_LEN) {
- s64a offset = base_offset + *look;
- DEBUG_PRINTF("reach=%p, rel offset=%lld\n", reach, offset);
-
- if (offset >= 0) {
- DEBUG_PRINTF("in buffer\n");
- break;
- }
-
- assert(offset >= -(s64a)ci->hlen && offset < 0);
- u8 c = ci->hbuf[ci->hlen + offset];
- path &= reach[c];
- DEBUG_PRINTF("reach[%x] = %02x path = %0xx\n", c, reach[c], path);
- if (!path) {
- DEBUG_PRINTF("char 0x%02x failed reach check\n", c);
- return 0;
- }
- }
-
- DEBUG_PRINTF("scan buffer (%zu looks left)\n", look_end - look);
- for(; look < look_end; ++look, reach += MULTI_REACH_BITVECTOR_LEN) {
- s64a offset = base_offset + *look;
- DEBUG_PRINTF("reach=%p, rel offset=%lld\n", reach, offset);
-
- if (offset >= (s64a)ci->len) {
- DEBUG_PRINTF("in the future\n");
- break;
- }
-
- assert(offset >= 0 && offset < (s64a)ci->len);
- u8 c = ci->buf[offset];
- path &= reach[c];
- DEBUG_PRINTF("reach[%x] = %02x path = %0xx\n", c, reach[c], path);
- if (!path) {
- DEBUG_PRINTF("char 0x%02x failed reach check\n", c);
- return 0;
- }
- }
-
- DEBUG_PRINTF("OK :)\n");
- return 1;
-}
-
-static never_inline
-int roseCheckMultipathShufti16x8(const struct hs_scratch *scratch,
- const struct ROSE_STRUCT_CHECK_MULTIPATH_SHUFTI_16x8 *ri,
- u64a end) {
- const struct core_info *ci = &scratch->core_info;
- s32 checkOffset = ri->base_offset;
- const s64a base_offset = (s64a)end - ci->buf_offset;
- s64a offset = base_offset + checkOffset;
- DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
- DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
-
- assert(ri->last_start <= 0);
- if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
- if ((u64a)(0 - ri->last_start) > end) {
- DEBUG_PRINTF("too early, fail\n");
- return 0;
- }
- }
-
- u32 valid_data_mask;
- m128 data_init = getData128(ci, offset, &valid_data_mask);
- m128 data_select_mask = loadu128(ri->data_select_mask);
-
- u32 valid_path_mask = 0;
- if (unlikely(!(valid_data_mask & 1))) {
- DEBUG_PRINTF("lose part of backward data\n");
- DEBUG_PRINTF("valid_data_mask %x\n", valid_data_mask);
-
- m128 expand_valid;
- u64a expand_mask = 0x8080808080808080ULL;
- u64a valid_lo = expand64(valid_data_mask & 0xff, expand_mask);
- u64a valid_hi = expand64(valid_data_mask >> 8, expand_mask);
- DEBUG_PRINTF("expand_hi %llx\n", valid_hi);
- DEBUG_PRINTF("expand_lo %llx\n", valid_lo);
- expand_valid = set64x2(valid_hi, valid_lo);
- valid_path_mask = ~movemask128(pshufb_m128(expand_valid,
- data_select_mask));
- }
-
- m128 data = pshufb_m128(data_init, data_select_mask);
- m256 nib_mask = loadu256(ri->nib_mask);
- m128 bucket_select_mask = loadu128(ri->bucket_select_mask);
-
- u32 hi_bits_mask = ri->hi_bits_mask;
- u32 lo_bits_mask = ri->lo_bits_mask;
- u32 neg_mask = ri->neg_mask;
-
- if (validateMultipathShuftiMask16x8(data, nib_mask,
- bucket_select_mask,
- hi_bits_mask, lo_bits_mask,
- neg_mask, valid_path_mask)) {
- DEBUG_PRINTF("check multi-path shufti-16x8 successfully\n");
- return 1;
- } else {
- return 0;
- }
-}
-
-static never_inline
-int roseCheckMultipathShufti32x8(const struct hs_scratch *scratch,
- const struct ROSE_STRUCT_CHECK_MULTIPATH_SHUFTI_32x8 *ri,
- u64a end) {
- const struct core_info *ci = &scratch->core_info;
- s32 checkOffset = ri->base_offset;
- const s64a base_offset = (s64a)end - ci->buf_offset;
- s64a offset = base_offset + checkOffset;
- DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
- DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
-
- assert(ri->last_start <= 0);
- if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
- if ((u64a)(0 - ri->last_start) > end) {
- DEBUG_PRINTF("too early, fail\n");
- return 0;
- }
- }
-
- u32 valid_data_mask;
- m128 data_m128 = getData128(ci, offset, &valid_data_mask);
- m256 data_double = set2x128(data_m128);
- m256 data_select_mask = loadu256(ri->data_select_mask);
-
- u32 valid_path_mask = 0;
- m256 expand_valid;
- if (unlikely(!(valid_data_mask & 1))) {
- DEBUG_PRINTF("lose part of backward data\n");
- DEBUG_PRINTF("valid_data_mask %x\n", valid_data_mask);
-
- u64a expand_mask = 0x8080808080808080ULL;
- u64a valid_lo = expand64(valid_data_mask & 0xff, expand_mask);
- u64a valid_hi = expand64(valid_data_mask >> 8, expand_mask);
- DEBUG_PRINTF("expand_hi %llx\n", valid_hi);
- DEBUG_PRINTF("expand_lo %llx\n", valid_lo);
- expand_valid = set64x4(valid_hi, valid_lo, valid_hi,
- valid_lo);
- valid_path_mask = ~movemask256(pshufb_m256(expand_valid,
- data_select_mask));
- }
-
- m256 data = pshufb_m256(data_double, data_select_mask);
- m256 hi_mask = loadu2x128(ri->hi_mask);
- m256 lo_mask = loadu2x128(ri->lo_mask);
- m256 bucket_select_mask = loadu256(ri->bucket_select_mask);
-
- u32 hi_bits_mask = ri->hi_bits_mask;
- u32 lo_bits_mask = ri->lo_bits_mask;
- u32 neg_mask = ri->neg_mask;
-
- if (validateMultipathShuftiMask32x8(data, hi_mask, lo_mask,
- bucket_select_mask,
- hi_bits_mask, lo_bits_mask,
- neg_mask, valid_path_mask)) {
- DEBUG_PRINTF("check multi-path shufti-32x8 successfully\n");
- return 1;
- } else {
- return 0;
- }
-}
-
-static never_inline
-int roseCheckMultipathShufti32x16(const struct hs_scratch *scratch,
- const struct ROSE_STRUCT_CHECK_MULTIPATH_SHUFTI_32x16 *ri,
- u64a end) {
- const struct core_info *ci = &scratch->core_info;
- const s64a base_offset = (s64a)end - ci->buf_offset;
- s32 checkOffset = ri->base_offset;
- s64a offset = base_offset + checkOffset;
- DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
- DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
-
- assert(ri->last_start <= 0);
- if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
- if ((u64a)(0 - ri->last_start) > end) {
- DEBUG_PRINTF("too early, fail\n");
- return 0;
- }
- }
-
- u32 valid_data_mask;
- m128 data_m128 = getData128(ci, offset, &valid_data_mask);
- m256 data_double = set2x128(data_m128);
- m256 data_select_mask = loadu256(ri->data_select_mask);
-
- u32 valid_path_mask = 0;
- m256 expand_valid;
- if (unlikely(!(valid_data_mask & 1))) {
- DEBUG_PRINTF("lose part of backward data\n");
- DEBUG_PRINTF("valid_data_mask %x\n", valid_data_mask);
-
- u64a expand_mask = 0x8080808080808080ULL;
- u64a valid_lo = expand64(valid_data_mask & 0xff, expand_mask);
- u64a valid_hi = expand64(valid_data_mask >> 8, expand_mask);
- DEBUG_PRINTF("expand_hi %llx\n", valid_hi);
- DEBUG_PRINTF("expand_lo %llx\n", valid_lo);
- expand_valid = set64x4(valid_hi, valid_lo, valid_hi,
- valid_lo);
- valid_path_mask = ~movemask256(pshufb_m256(expand_valid,
- data_select_mask));
- }
-
- m256 data = pshufb_m256(data_double, data_select_mask);
-
- m256 hi_mask_1 = loadu2x128(ri->hi_mask);
- m256 hi_mask_2 = loadu2x128(ri->hi_mask + 16);
- m256 lo_mask_1 = loadu2x128(ri->lo_mask);
- m256 lo_mask_2 = loadu2x128(ri->lo_mask + 16);
-
- m256 bucket_select_mask_hi = loadu256(ri->bucket_select_mask_hi);
- m256 bucket_select_mask_lo = loadu256(ri->bucket_select_mask_lo);
-
- u32 hi_bits_mask = ri->hi_bits_mask;
- u32 lo_bits_mask = ri->lo_bits_mask;
- u32 neg_mask = ri->neg_mask;
-
- if (validateMultipathShuftiMask32x16(data, hi_mask_1, hi_mask_2,
- lo_mask_1, lo_mask_2,
- bucket_select_mask_hi,
- bucket_select_mask_lo,
- hi_bits_mask, lo_bits_mask,
- neg_mask, valid_path_mask)) {
- DEBUG_PRINTF("check multi-path shufti-32x16 successfully\n");
- return 1;
- } else {
- return 0;
- }
-}
-
-static never_inline
-int roseCheckMultipathShufti64(const struct hs_scratch *scratch,
- const struct ROSE_STRUCT_CHECK_MULTIPATH_SHUFTI_64 *ri,
- u64a end) {
- const struct core_info *ci = &scratch->core_info;
- const s64a base_offset = (s64a)end - ci->buf_offset;
- s32 checkOffset = ri->base_offset;
- s64a offset = base_offset + checkOffset;
- DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
- DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
-
- if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
- if ((u64a)(0 - ri->last_start) > end) {
- DEBUG_PRINTF("too early, fail\n");
- return 0;
- }
- }
-
- u32 valid_data_mask;
- m128 data_m128 = getData128(ci, offset, &valid_data_mask);
- m256 data_m256 = set2x128(data_m128);
- m256 data_select_mask_1 = loadu256(ri->data_select_mask);
- m256 data_select_mask_2 = loadu256(ri->data_select_mask + 32);
-
- u64a valid_path_mask = 0;
- m256 expand_valid;
- if (unlikely(!(valid_data_mask & 1))) {
- DEBUG_PRINTF("lose part of backward data\n");
- DEBUG_PRINTF("valid_data_mask %x\n", valid_data_mask);
-
- u64a expand_mask = 0x8080808080808080ULL;
- u64a valid_lo = expand64(valid_data_mask & 0xff, expand_mask);
- u64a valid_hi = expand64(valid_data_mask >> 8, expand_mask);
- DEBUG_PRINTF("expand_hi %llx\n", valid_hi);
- DEBUG_PRINTF("expand_lo %llx\n", valid_lo);
- expand_valid = set64x4(valid_hi, valid_lo, valid_hi,
- valid_lo);
- u32 valid_path_1 = movemask256(pshufb_m256(expand_valid,
- data_select_mask_1));
- u32 valid_path_2 = movemask256(pshufb_m256(expand_valid,
- data_select_mask_2));
- valid_path_mask = ~((u64a)valid_path_1 | (u64a)valid_path_2 << 32);
- }
-
- m256 data_1 = pshufb_m256(data_m256, data_select_mask_1);
- m256 data_2 = pshufb_m256(data_m256, data_select_mask_2);
-
- m256 hi_mask = loadu2x128(ri->hi_mask);
- m256 lo_mask = loadu2x128(ri->lo_mask);
-
- m256 bucket_select_mask_1 = loadu256(ri->bucket_select_mask);
- m256 bucket_select_mask_2 = loadu256(ri->bucket_select_mask + 32);
-
- u64a hi_bits_mask = ri->hi_bits_mask;
- u64a lo_bits_mask = ri->lo_bits_mask;
- u64a neg_mask = ri->neg_mask;
-
- if (validateMultipathShuftiMask64(data_1, data_2, hi_mask, lo_mask,
- bucket_select_mask_1,
- bucket_select_mask_2, hi_bits_mask,
- lo_bits_mask, neg_mask,
- valid_path_mask)) {
- DEBUG_PRINTF("check multi-path shufti-64 successfully\n");
- return 1;
- } else {
- return 0;
- }
-}
-
-static rose_inline
+#include "catchup.h"
+#include "counting_miracle.h"
+#include "infix.h"
+#include "match.h"
+#include "miracle.h"
+#include "report.h"
+#include "rose_common.h"
+#include "rose_internal.h"
+#include "rose_program.h"
+#include "rose_types.h"
+#include "validate_mask.h"
+#include "validate_shufti.h"
+#include "runtime.h"
+#include "util/compare.h"
+#include "util/copybytes.h"
+#include "util/fatbit.h"
+#include "util/multibit.h"
+
+/* Inline implementation follows. */
+
+static rose_inline
+void rosePushDelayedMatch(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u32 delay,
+ u32 delay_index, u64a offset) {
+ assert(delay);
+
+ const u32 src_slot_index = delay;
+ u32 slot_index = (src_slot_index + offset) & DELAY_MASK;
+
+ struct RoseContext *tctxt = &scratch->tctxt;
+ if (offset + src_slot_index <= tctxt->delayLastEndOffset) {
+ DEBUG_PRINTF("skip too late\n");
+ return;
+ }
+
+ const u32 delay_count = t->delay_count;
+ struct fatbit **delaySlots = getDelaySlots(scratch);
+ struct fatbit *slot = delaySlots[slot_index];
+
+ DEBUG_PRINTF("pushing tab %u into slot %u\n", delay_index, slot_index);
+ if (!(tctxt->filledDelayedSlots & (1U << slot_index))) {
+ tctxt->filledDelayedSlots |= 1U << slot_index;
+ fatbit_clear(slot);
+ }
+
+ fatbit_set(slot, delay_count, delay_index);
+}
+
+static rose_inline
+void recordAnchoredLiteralMatch(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u32 anch_id,
+ u64a end) {
+ assert(end);
+
+ if (end <= t->floatingMinLiteralMatchOffset) {
+ return;
+ }
+
+ struct fatbit **anchoredLiteralRows = getAnchoredLiteralLog(scratch);
+
+ DEBUG_PRINTF("record %u (of %u) @ %llu\n", anch_id, t->anchored_count, end);
+
+ if (!bf64_set(&scratch->al_log_sum, end - 1)) {
+ // first time, clear row
+ DEBUG_PRINTF("clearing %llu/%u\n", end - 1, t->anchored_count);
+ fatbit_clear(anchoredLiteralRows[end - 1]);
+ }
+
+ assert(anch_id < t->anchored_count);
+ fatbit_set(anchoredLiteralRows[end - 1], t->anchored_count, anch_id);
+}
+
+static rose_inline
+char roseLeftfixCheckMiracles(const struct RoseEngine *t,
+ const struct LeftNfaInfo *left,
+ struct core_info *ci, struct mq *q, u64a end,
+ const char is_infix) {
+ if (!is_infix && left->transient) {
+ // Miracles won't help us with transient leftfix engines; they only
+ // scan for a limited time anyway.
+ return 1;
+ }
+
+ if (!left->stopTable) {
+ return 1;
+ }
+
+ DEBUG_PRINTF("looking for miracle on queue %u\n", q->nfa->queueIndex);
+
+ const s64a begin_loc = q_cur_loc(q);
+ const s64a end_loc = end - ci->buf_offset;
+
+ s64a miracle_loc;
+ if (roseMiracleOccurs(t, left, ci, begin_loc, end_loc, &miracle_loc)) {
+ goto found_miracle;
+ }
+
+ if (roseCountingMiracleOccurs(t, left, ci, begin_loc, end_loc,
+ &miracle_loc)) {
+ goto found_miracle;
+ }
+
+ return 1;
+
+found_miracle:
+ DEBUG_PRINTF("miracle at %lld\n", miracle_loc);
+ assert(miracle_loc >= begin_loc);
+
+ // If we're a prefix, then a miracle effectively results in us needing to
+ // re-init our state and start fresh.
+ if (!is_infix) {
+ if (miracle_loc != begin_loc) {
+ DEBUG_PRINTF("re-init prefix state\n");
+ q->cur = q->end = 0;
+ pushQueueAt(q, 0, MQE_START, miracle_loc);
+ pushQueueAt(q, 1, MQE_TOP, miracle_loc);
+ nfaQueueInitState(q->nfa, q);
+ }
+ return 1;
+ }
+
+ // Otherwise, we're an infix. Remove tops before the miracle from the queue
+ // and re-init at that location.
+
+ q_skip_forward_to(q, miracle_loc);
+
+ if (q_last_type(q) == MQE_START) {
+ DEBUG_PRINTF("miracle caused infix to die\n");
+ return 0;
+ }
+
+ DEBUG_PRINTF("re-init infix state\n");
+ assert(q->items[q->cur].type == MQE_START);
+ q->items[q->cur].location = miracle_loc;
+ nfaQueueInitState(q->nfa, q);
+
+ return 1;
+}
+
+static rose_inline
+hwlmcb_rv_t roseTriggerSuffix(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u32 qi, u32 top,
+ u64a som, u64a end) {
+ DEBUG_PRINTF("suffix qi=%u, top event=%u\n", qi, top);
+
+ struct core_info *ci = &scratch->core_info;
+ u8 *aa = getActiveLeafArray(t, ci->state);
+ const u32 aaCount = t->activeArrayCount;
+ const u32 qCount = t->queueCount;
+ struct mq *q = &scratch->queues[qi];
+ const struct NfaInfo *info = getNfaInfoByQueue(t, qi);
+ const struct NFA *nfa = getNfaByInfo(t, info);
+
+ s64a loc = (s64a)end - ci->buf_offset;
+ assert(loc <= (s64a)ci->len && loc >= -(s64a)ci->hlen);
+
+ if (!mmbit_set(aa, aaCount, qi)) {
+ initQueue(q, qi, t, scratch);
+ nfaQueueInitState(nfa, q);
+ pushQueueAt(q, 0, MQE_START, loc);
+ fatbit_set(scratch->aqa, qCount, qi);
+ } else if (info->no_retrigger) {
+ DEBUG_PRINTF("yawn\n");
+ /* nfa only needs one top; we can go home now */
+ return HWLM_CONTINUE_MATCHING;
+ } else if (!fatbit_set(scratch->aqa, qCount, qi)) {
+ initQueue(q, qi, t, scratch);
+ loadStreamState(nfa, q, 0);
+ pushQueueAt(q, 0, MQE_START, 0);
+ } else if (isQueueFull(q)) {
+ DEBUG_PRINTF("queue %u full -> catching up nfas\n", qi);
+ if (info->eod) {
+ /* can catch up suffix independently no pq */
+ q->context = NULL;
+ pushQueueNoMerge(q, MQE_END, loc);
+ nfaQueueExecRose(q->nfa, q, MO_INVALID_IDX);
+ q->cur = q->end = 0;
+ pushQueueAt(q, 0, MQE_START, loc);
+ } else if (ensureQueueFlushed(t, scratch, qi, loc)
+ == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ }
+
+ assert(top == MQE_TOP || (top >= MQE_TOP_FIRST && top < MQE_INVALID));
+ pushQueueSom(q, top, loc, som);
+
+ if (q_cur_loc(q) == (s64a)ci->len && !info->eod) {
+ /* we may not run the nfa; need to ensure state is fine */
+ DEBUG_PRINTF("empty run\n");
+ pushQueueNoMerge(q, MQE_END, loc);
+ char alive = nfaQueueExec(nfa, q, loc);
+ if (alive) {
+ q->cur = q->end = 0;
+ pushQueueAt(q, 0, MQE_START, loc);
+ } else {
+ mmbit_unset(aa, aaCount, qi);
+ fatbit_unset(scratch->aqa, qCount, qi);
+ }
+ }
+
+ return HWLM_CONTINUE_MATCHING;
+}
+
+static really_inline
+char roseTestLeftfix(const struct RoseEngine *t, struct hs_scratch *scratch,
+ u32 qi, u32 leftfixLag, ReportID leftfixReport, u64a end,
+ const char is_infix) {
+ struct core_info *ci = &scratch->core_info;
+
+ u32 ri = queueToLeftIndex(t, qi);
+ const struct LeftNfaInfo *left = getLeftTable(t) + ri;
+
+ DEBUG_PRINTF("testing %s %s %u/%u with lag %u (maxLag=%u)\n",
+ (left->transient ? "transient" : "active"),
+ (is_infix ? "infix" : "prefix"),
+ ri, qi, leftfixLag, left->maxLag);
+
+ assert(leftfixLag <= left->maxLag);
+ assert(left->infix == is_infix);
+ assert(!is_infix || !left->transient); // Only prefixes can be transient.
+
+ struct mq *q = scratch->queues + qi;
+ char *state = scratch->core_info.state;
+ u8 *activeLeftArray = getActiveLeftArray(t, state);
+ u32 qCount = t->queueCount;
+ u32 arCount = t->activeLeftCount;
+
+ if (!mmbit_isset(activeLeftArray, arCount, ri)) {
+ DEBUG_PRINTF("engine is dead nothing to see here\n");
+ return 0;
+ }
+
+ if (unlikely(end < leftfixLag)) {
+ assert(0); /* lag is the literal length */
+ return 0;
+ }
+
+ if (nfaSupportsZombie(getNfaByQueue(t, qi)) && ci->buf_offset
+ && !fatbit_isset(scratch->aqa, qCount, qi)
+ && isZombie(t, state, left)) {
+ DEBUG_PRINTF("zombie\n");
+ return 1;
+ }
+
+ if (!fatbit_set(scratch->aqa, qCount, qi)) {
+ DEBUG_PRINTF("initing q %u\n", qi);
+ initRoseQueue(t, qi, left, scratch);
+ if (ci->buf_offset) { // there have been writes before us!
+ s32 sp;
+ if (!is_infix && left->transient) {
+ sp = -(s32)ci->hlen;
+ } else {
+ sp = -(s32)loadRoseDelay(t, state, left);
+ }
+
+ /* transient nfas are always started fresh -> state not maintained
+ * at stream boundary */
+
+ pushQueueAt(q, 0, MQE_START, sp);
+ if (is_infix || (ci->buf_offset + sp > 0 && !left->transient)) {
+ loadStreamState(q->nfa, q, sp);
+ } else {
+ pushQueueAt(q, 1, MQE_TOP, sp);
+ nfaQueueInitState(q->nfa, q);
+ }
+ } else { // first write ever
+ pushQueueAt(q, 0, MQE_START, 0);
+ pushQueueAt(q, 1, MQE_TOP, 0);
+ nfaQueueInitState(q->nfa, q);
+ }
+ }
+
+ s64a loc = (s64a)end - ci->buf_offset - leftfixLag;
+ assert(loc >= q_cur_loc(q) || left->eager);
+ assert(leftfixReport != MO_INVALID_IDX);
+
+ if (!is_infix && left->transient) {
+ s64a start_loc = loc - left->transient;
+ if (q_cur_loc(q) < start_loc) {
+ q->cur = q->end = 0;
+ pushQueueAt(q, 0, MQE_START, start_loc);
+ pushQueueAt(q, 1, MQE_TOP, start_loc);
+ nfaQueueInitState(q->nfa, q);
+ }
+ }
+
+ if (q_cur_loc(q) < loc || q_last_type(q) != MQE_START) {
+ if (is_infix) {
+ if (infixTooOld(q, loc)) {
+ DEBUG_PRINTF("infix %u died of old age\n", ri);
+ goto nfa_dead;
+ }
+
+ reduceInfixQueue(q, loc, left->maxQueueLen, q->nfa->maxWidth);
+ }
+
+ if (!roseLeftfixCheckMiracles(t, left, ci, q, end, is_infix)) {
+ DEBUG_PRINTF("leftfix %u died due to miracle\n", ri);
+ goto nfa_dead;
+ }
+
+#ifdef DEBUG
+ debugQueue(q);
+#endif
+
+ pushQueueNoMerge(q, MQE_END, loc);
+
+ char rv = nfaQueueExecRose(q->nfa, q, leftfixReport);
+ if (!rv) { /* nfa is dead */
+ DEBUG_PRINTF("leftfix %u died while trying to catch up\n", ri);
+ goto nfa_dead;
+ }
+
+ // Queue must have next start loc before we call nfaInAcceptState.
+ q->cur = q->end = 0;
+ pushQueueAt(q, 0, MQE_START, loc);
+
+ DEBUG_PRINTF("checking for report %u\n", leftfixReport);
+ DEBUG_PRINTF("leftfix done %hhd\n", (signed char)rv);
+ return rv == MO_MATCHES_PENDING;
+ } else if (q_cur_loc(q) > loc) {
+ /* an eager leftfix may have already progressed past loc if there is no
+ * match at loc. */
+ assert(left->eager);
+ return 0;
+ } else {
+ assert(q_cur_loc(q) == loc);
+ DEBUG_PRINTF("checking for report %u\n", leftfixReport);
+ char rv = nfaInAcceptState(q->nfa, leftfixReport, q);
+ DEBUG_PRINTF("leftfix done %hhd\n", (signed char)rv);
+ return rv;
+ }
+
+nfa_dead:
+ mmbit_unset(activeLeftArray, arCount, ri);
+ scratch->tctxt.groups &= left->squash_mask;
+ return 0;
+}
+
+static rose_inline
+char roseTestPrefix(const struct RoseEngine *t, struct hs_scratch *scratch,
+ u32 qi, u32 leftfixLag, ReportID leftfixReport, u64a end) {
+ return roseTestLeftfix(t, scratch, qi, leftfixLag, leftfixReport, end, 0);
+}
+
+static rose_inline
+char roseTestInfix(const struct RoseEngine *t, struct hs_scratch *scratch,
+ u32 qi, u32 leftfixLag, ReportID leftfixReport, u64a end) {
+ return roseTestLeftfix(t, scratch, qi, leftfixLag, leftfixReport, end, 1);
+}
+
+static rose_inline
+void roseTriggerInfix(const struct RoseEngine *t, struct hs_scratch *scratch,
+ u64a start, u64a end, u32 qi, u32 topEvent, u8 cancel) {
+ struct core_info *ci = &scratch->core_info;
+ s64a loc = (s64a)end - ci->buf_offset;
+
+ u32 ri = queueToLeftIndex(t, qi);
+ assert(topEvent < MQE_INVALID);
+
+ const struct LeftNfaInfo *left = getLeftInfoByQueue(t, qi);
+ assert(!left->transient);
+
+ DEBUG_PRINTF("rose %u (qi=%u) event %u\n", ri, qi, topEvent);
+
+ struct mq *q = scratch->queues + qi;
+ const struct NfaInfo *info = getNfaInfoByQueue(t, qi);
+
+ char *state = ci->state;
+ u8 *activeLeftArray = getActiveLeftArray(t, state);
+ const u32 arCount = t->activeLeftCount;
+ char alive = mmbit_set(activeLeftArray, arCount, ri);
+
+ if (alive && info->no_retrigger) {
+ DEBUG_PRINTF("yawn\n");
+ return;
+ }
+
+ struct fatbit *aqa = scratch->aqa;
+ const u32 qCount = t->queueCount;
+
+ if (alive && nfaSupportsZombie(getNfaByInfo(t, info)) && ci->buf_offset &&
+ !fatbit_isset(aqa, qCount, qi) && isZombie(t, state, left)) {
+ DEBUG_PRINTF("yawn - zombie\n");
+ return;
+ }
+
+ if (cancel) {
+ DEBUG_PRINTF("dominating top: (re)init\n");
+ fatbit_set(aqa, qCount, qi);
+ initRoseQueue(t, qi, left, scratch);
+ pushQueueAt(q, 0, MQE_START, loc);
+ nfaQueueInitState(q->nfa, q);
+ } else if (!fatbit_set(aqa, qCount, qi)) {
+ DEBUG_PRINTF("initing %u\n", qi);
+ initRoseQueue(t, qi, left, scratch);
+ if (alive) {
+ s32 sp = -(s32)loadRoseDelay(t, state, left);
+ pushQueueAt(q, 0, MQE_START, sp);
+ loadStreamState(q->nfa, q, sp);
+ } else {
+ pushQueueAt(q, 0, MQE_START, loc);
+ nfaQueueInitState(q->nfa, q);
+ }
+ } else if (!alive) {
+ q->cur = q->end = 0;
+ pushQueueAt(q, 0, MQE_START, loc);
+ nfaQueueInitState(q->nfa, q);
+ } else if (isQueueFull(q)) {
+ reduceInfixQueue(q, loc, left->maxQueueLen, q->nfa->maxWidth);
+
+ if (isQueueFull(q)) {
+ /* still full - reduceInfixQueue did nothing */
+ DEBUG_PRINTF("queue %u full (%u items) -> catching up nfa\n", qi,
+ q->end - q->cur);
+ pushQueueNoMerge(q, MQE_END, loc);
+ nfaQueueExecRose(q->nfa, q, MO_INVALID_IDX);
+
+ q->cur = q->end = 0;
+ pushQueueAt(q, 0, MQE_START, loc);
+ }
+ }
+
+ pushQueueSom(q, topEvent, loc, start);
+}
+
+static rose_inline
+hwlmcb_rv_t roseReport(const struct RoseEngine *t, struct hs_scratch *scratch,
+ u64a end, ReportID onmatch, s32 offset_adjust,
+ u32 ekey) {
+ DEBUG_PRINTF("firing callback onmatch=%u, end=%llu\n", onmatch, end);
+ updateLastMatchOffset(&scratch->tctxt, end);
+
+ int cb_rv = roseDeliverReport(end, onmatch, offset_adjust, scratch, ekey);
+ if (cb_rv == MO_HALT_MATCHING) {
+ DEBUG_PRINTF("termination requested\n");
+ return HWLM_TERMINATE_MATCHING;
+ }
+
+ if (ekey == INVALID_EKEY || cb_rv == ROSE_CONTINUE_MATCHING_NO_EXHAUST) {
+ return HWLM_CONTINUE_MATCHING;
+ }
+
+ return roseHaltIfExhausted(t, scratch);
+}
+
+static rose_inline
+hwlmcb_rv_t roseReportComb(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u64a end,
+ ReportID onmatch, s32 offset_adjust, u32 ekey) {
+ DEBUG_PRINTF("firing callback onmatch=%u, end=%llu\n", onmatch, end);
+
+ int cb_rv = roseDeliverReport(end, onmatch, offset_adjust, scratch, ekey);
+ if (cb_rv == MO_HALT_MATCHING) {
+ DEBUG_PRINTF("termination requested\n");
+ return HWLM_TERMINATE_MATCHING;
+ }
+
+ if (ekey == INVALID_EKEY || cb_rv == ROSE_CONTINUE_MATCHING_NO_EXHAUST) {
+ return HWLM_CONTINUE_MATCHING;
+ }
+
+ return roseHaltIfExhausted(t, scratch);
+}
+
+/* catches up engines enough to ensure any earlier mpv triggers are enqueued
+ * and then adds the trigger to the mpv queue. */
+static rose_inline
+hwlmcb_rv_t roseCatchUpAndHandleChainMatch(const struct RoseEngine *t,
+ struct hs_scratch *scratch,
+ u32 event, u64a top_squash_distance,
+ u64a end, const char in_catchup) {
+ if (!in_catchup &&
+ roseCatchUpMpvFeeders(t, scratch, end) == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ return roseHandleChainMatch(t, scratch, event, top_squash_distance, end,
+ in_catchup);
+}
+
+static rose_inline
+void roseHandleSom(struct hs_scratch *scratch, const struct som_operation *sr,
+ u64a end) {
+ DEBUG_PRINTF("end=%llu, minMatchOffset=%llu\n", end,
+ scratch->tctxt.minMatchOffset);
+
+ updateLastMatchOffset(&scratch->tctxt, end);
+ handleSomInternal(scratch, sr, end);
+}
+
+static rose_inline
+hwlmcb_rv_t roseReportSom(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u64a start, u64a end,
+ ReportID onmatch, s32 offset_adjust, u32 ekey) {
+ DEBUG_PRINTF("firing som callback onmatch=%u, start=%llu, end=%llu\n",
+ onmatch, start, end);
+ updateLastMatchOffset(&scratch->tctxt, end);
+
+ int cb_rv = roseDeliverSomReport(start, end, onmatch, offset_adjust,
+ scratch, ekey);
+ if (cb_rv == MO_HALT_MATCHING) {
+ DEBUG_PRINTF("termination requested\n");
+ return HWLM_TERMINATE_MATCHING;
+ }
+
+ if (ekey == INVALID_EKEY || cb_rv == ROSE_CONTINUE_MATCHING_NO_EXHAUST) {
+ return HWLM_CONTINUE_MATCHING;
+ }
+
+ return roseHaltIfExhausted(t, scratch);
+}
+
+static rose_inline
+void roseHandleSomSom(struct hs_scratch *scratch,
+ const struct som_operation *sr, u64a start, u64a end) {
+ DEBUG_PRINTF("start=%llu, end=%llu, minMatchOffset=%llu\n", start, end,
+ scratch->tctxt.minMatchOffset);
+
+ updateLastMatchOffset(&scratch->tctxt, end);
+ setSomFromSomAware(scratch, sr, start, end);
+}
+
+static rose_inline
+hwlmcb_rv_t roseSetExhaust(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u32 ekey) {
+ assert(scratch);
+ assert(scratch->magic == SCRATCH_MAGIC);
+
+ struct core_info *ci = &scratch->core_info;
+
+ assert(!can_stop_matching(scratch));
+ assert(!isExhausted(ci->rose, ci->exhaustionVector, ekey));
+
+ markAsMatched(ci->rose, ci->exhaustionVector, ekey);
+
+ return roseHaltIfExhausted(t, scratch);
+}
+
+static really_inline
+int reachHasBit(const u8 *reach, u8 c) {
+ return !!(reach[c / 8U] & (u8)1U << (c % 8U));
+}
+
+/*
+ * Generate a 8-byte valid_mask with #high bytes 0 from the highest side
+ * and #low bytes 0 from the lowest side
+ * and (8 - high - low) bytes '0xff' in the middle.
+ */
+static rose_inline
+u64a generateValidMask(const s32 high, const s32 low) {
+ assert(high + low < 8);
+ DEBUG_PRINTF("high %d low %d\n", high, low);
+ const u64a ones = ~0ull;
+ return (ones << ((high + low) * 8)) >> (high * 8);
+}
+
+/*
+ * Do the single-byte check if only one lookaround entry exists
+ * and it's a single mask.
+ * Return success if the byte is in the future or before history
+ * (offset is greater than (history) buffer length).
+ */
+static rose_inline
+int roseCheckByte(const struct core_info *ci, u8 and_mask, u8 cmp_mask,
+ u8 negation, s32 checkOffset, u64a end) {
+ DEBUG_PRINTF("end=%llu, buf_offset=%llu, buf_end=%llu\n", end,
+ ci->buf_offset, ci->buf_offset + ci->len);
+ if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
+ DEBUG_PRINTF("too early, fail\n");
+ return 0;
+ }
+
+ const s64a base_offset = end - ci->buf_offset;
+ s64a offset = base_offset + checkOffset;
+ DEBUG_PRINTF("checkOffset=%d offset=%lld\n", checkOffset, offset);
+ u8 c;
+ if (offset >= 0) {
+ if (offset >= (s64a)ci->len) {
+ DEBUG_PRINTF("in the future\n");
+ return 1;
+ } else {
+ assert(offset < (s64a)ci->len);
+ DEBUG_PRINTF("check byte in buffer\n");
+ c = ci->buf[offset];
+ }
+ } else {
+ if (offset >= -(s64a) ci->hlen) {
+ DEBUG_PRINTF("check byte in history\n");
+ c = ci->hbuf[ci->hlen + offset];
+ } else {
+ DEBUG_PRINTF("before history and return\n");
+ return 1;
+ }
+ }
+
+ if (((and_mask & c) != cmp_mask) ^ negation) {
+ DEBUG_PRINTF("char 0x%02x at offset %lld failed byte check\n",
+ c, offset);
+ return 0;
+ }
+
+ DEBUG_PRINTF("real offset=%lld char=%02x\n", offset, c);
+ DEBUG_PRINTF("OK :)\n");
+ return 1;
+}
+
+static rose_inline
+int roseCheckMask(const struct core_info *ci, u64a and_mask, u64a cmp_mask,
+ u64a neg_mask, s32 checkOffset, u64a end) {
+ const s64a base_offset = (s64a)end - ci->buf_offset;
+ s64a offset = base_offset + checkOffset;
+ DEBUG_PRINTF("rel offset %lld\n",base_offset);
+ DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
+ if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
+ DEBUG_PRINTF("too early, fail\n");
+ return 0;
+ }
+
+ u64a data = 0;
+ u64a valid_data_mask = ~0ULL; // mask for validate check.
+ //A 0xff byte means that this byte is in the buffer.
+ s32 shift_l = 0; // size of bytes in the future.
+ s32 shift_r = 0; // size of bytes before the history.
+ s32 h_len = 0; // size of bytes in the history buffer.
+ s32 c_len = 8; // size of bytes in the current buffer.
+ if (offset < 0) {
+ // in or before history buffer.
+ if (offset + 8 <= -(s64a)ci->hlen) {
+ DEBUG_PRINTF("before history and return\n");
+ return 1;
+ }
+ const u8 *h_start = ci->hbuf; // start pointer in history buffer.
+ if (offset < -(s64a)ci->hlen) {
+ // some bytes are before history.
+ shift_r = -(offset + (s64a)ci->hlen);
+ DEBUG_PRINTF("shift_r %d", shift_r);
+ } else {
+ h_start += ci->hlen + offset;
+ }
+ if (offset + 7 < 0) {
+ DEBUG_PRINTF("all in history buffer\n");
+ data = partial_load_u64a(h_start, 8 - shift_r);
+ } else {
+ // history part
+ c_len = offset + 8;
+ h_len = -offset - shift_r;
+ DEBUG_PRINTF("%d bytes in history\n", h_len);
+ s64a data_h = 0;
+ data_h = partial_load_u64a(h_start, h_len);
+ // current part
+ if (c_len > (s64a)ci->len) {
+ shift_l = c_len - ci->len;
+ c_len = ci->len;
+ }
+ data = partial_load_u64a(ci->buf, c_len);
+ data <<= h_len << 3;
+ data |= data_h;
+ }
+ if (shift_r) {
+ data <<= shift_r << 3;
+ }
+ } else {
+ // current buffer.
+ if (offset + c_len > (s64a)ci->len) {
+ if (offset >= (s64a)ci->len) {
+ DEBUG_PRINTF("all in the future\n");
+ return 1;
+ }
+ // some bytes in the future.
+ shift_l = offset + c_len - ci->len;
+ c_len = ci->len - offset;
+ data = partial_load_u64a(ci->buf + offset, c_len);
+ } else {
+ data = unaligned_load_u64a(ci->buf + offset);
+ }
+ }
+
+ if (shift_l || shift_r) {
+ valid_data_mask = generateValidMask(shift_l, shift_r);
+ }
+ DEBUG_PRINTF("valid_data_mask %llx\n", valid_data_mask);
+
+ if (validateMask(data, valid_data_mask,
+ and_mask, cmp_mask, neg_mask)) {
+ DEBUG_PRINTF("check mask successfully\n");
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static rose_inline
+int roseCheckMask32(const struct core_info *ci, const u8 *and_mask,
+ const u8 *cmp_mask, const u32 neg_mask,
+ s32 checkOffset, u64a end) {
+ const s64a base_offset = (s64a)end - ci->buf_offset;
+ s64a offset = base_offset + checkOffset;
+ DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
+ DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
+
+ if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
+ DEBUG_PRINTF("too early, fail\n");
+ return 0;
+ }
+
+ m256 data = zeroes256(); // consists of the following four parts.
+ s32 c_shift = 0; // blank bytes after current.
+ s32 h_shift = 0; // blank bytes before history.
+ s32 h_len = 32; // number of bytes from history buffer.
+ s32 c_len = 0; // number of bytes from current buffer.
+ /* h_shift + h_len + c_len + c_shift = 32 need to be hold.*/
+
+ if (offset < 0) {
+ s32 h_offset = 0; // the start offset in history buffer.
+ if (offset < -(s64a)ci->hlen) {
+ if (offset + 32 <= -(s64a)ci->hlen) {
+ DEBUG_PRINTF("all before history\n");
+ return 1;
+ }
+ h_shift = -(offset + (s64a)ci->hlen);
+ h_len = 32 - h_shift;
+ } else {
+ h_offset = ci->hlen + offset;
+ }
+ if (offset + 32 > 0) {
+ // part in current buffer.
+ c_len = offset + 32;
+ h_len = -(offset + h_shift);
+ if (c_len > (s64a)ci->len) {
+ // out of current buffer.
+ c_shift = c_len - ci->len;
+ c_len = ci->len;
+ }
+ copy_upto_64_bytes((u8 *)&data - offset, ci->buf, c_len);
+ }
+ assert(h_shift + h_len + c_len + c_shift == 32);
+ copy_upto_64_bytes((u8 *)&data + h_shift, ci->hbuf + h_offset, h_len);
+ } else {
+ if (offset + 32 > (s64a)ci->len) {
+ if (offset >= (s64a)ci->len) {
+ DEBUG_PRINTF("all in the future.\n");
+ return 1;
+ }
+ c_len = ci->len - offset;
+ c_shift = 32 - c_len;
+ copy_upto_64_bytes((u8 *)&data, ci->buf + offset, c_len);
+ } else {
+ data = loadu256(ci->buf + offset);
+ }
+ }
+ DEBUG_PRINTF("h_shift %d c_shift %d\n", h_shift, c_shift);
+ DEBUG_PRINTF("h_len %d c_len %d\n", h_len, c_len);
+ // we use valid_data_mask to blind bytes before history/in the future.
+ u32 valid_data_mask;
+ valid_data_mask = (~0u) << (h_shift + c_shift) >> (c_shift);
+
+ m256 and_mask_m256 = loadu256(and_mask);
+ m256 cmp_mask_m256 = loadu256(cmp_mask);
+ if (validateMask32(data, valid_data_mask, and_mask_m256,
+ cmp_mask_m256, neg_mask)) {
+ DEBUG_PRINTF("Mask32 passed\n");
+ return 1;
+ }
+ return 0;
+}
+
+#ifdef HAVE_AVX512
+static rose_inline
+int roseCheckMask64(const struct core_info *ci, const u8 *and_mask,
+ const u8 *cmp_mask, const u64a neg_mask,
+ s32 checkOffset, u64a end) {
+ const s64a base_offset = (s64a)end - ci->buf_offset;
+ s64a offset = base_offset + checkOffset;
+ DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
+ DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
+
+ if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
+ DEBUG_PRINTF("too early, fail\n");
+ return 0;
+ }
+
+ m512 data = zeroes512(); // consists of the following four parts.
+ s32 c_shift = 0; // blank bytes after current.
+ s32 h_shift = 0; // blank bytes before history.
+ s32 h_len = 64; // number of bytes from history buffer.
+ s32 c_len = 0; // number of bytes from current buffer.
+ /* h_shift + h_len + c_len + c_shift = 64 need to be hold.*/
+
+ if (offset < 0) {
+ s32 h_offset = 0; // the start offset in history buffer.
+ if (offset < -(s64a)ci->hlen) {
+ if (offset + 64 <= -(s64a)ci->hlen) {
+ DEBUG_PRINTF("all before history\n");
+ return 1;
+ }
+ h_shift = -(offset + (s64a)ci->hlen);
+ h_len = 64 - h_shift;
+ } else {
+ h_offset = ci->hlen + offset;
+ }
+ if (offset + 64 > 0) {
+ // part in current buffer.
+ c_len = offset + 64;
+ h_len = -(offset + h_shift);
+ if (c_len > (s64a)ci->len) {
+ // out of current buffer.
+ c_shift = c_len - ci->len;
+ c_len = ci->len;
+ }
+ copy_upto_64_bytes((u8 *)&data - offset, ci->buf, c_len);
+ }
+ assert(h_shift + h_len + c_len + c_shift == 64);
+ copy_upto_64_bytes((u8 *)&data + h_shift, ci->hbuf + h_offset, h_len);
+ } else {
+ if (offset + 64 > (s64a)ci->len) {
+ if (offset >= (s64a)ci->len) {
+ DEBUG_PRINTF("all in the future.\n");
+ return 1;
+ }
+ c_len = ci->len - offset;
+ c_shift = 64 - c_len;
+ copy_upto_64_bytes((u8 *)&data, ci->buf + offset, c_len);
+ } else {
+ data = loadu512(ci->buf + offset);
+ }
+ }
+ DEBUG_PRINTF("h_shift %d c_shift %d\n", h_shift, c_shift);
+ DEBUG_PRINTF("h_len %d c_len %d\n", h_len, c_len);
+ // we use valid_data_mask to blind bytes before history/in the future.
+ u64a valid_data_mask;
+ valid_data_mask = (~0ULL) << (h_shift + c_shift) >> (c_shift);
+
+ m512 and_mask_m512 = loadu512(and_mask);
+ m512 cmp_mask_m512 = loadu512(cmp_mask);
+
+ if (validateMask64(data, valid_data_mask, and_mask_m512,
+ cmp_mask_m512, neg_mask)) {
+ DEBUG_PRINTF("Mask64 passed\n");
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+// get 128/256/512 bits data from history and current buffer.
+// return data and valid_data_mask.
+static rose_inline
+u64a getBufferDataComplex(const struct core_info *ci, const s64a loc,
+ u8 *data, const u32 data_len) {
+ assert(data_len == 16 || data_len == 32 || data_len == 64);
+ s32 c_shift = 0; // blank bytes after current.
+ s32 h_shift = 0; // blank bytes before history.
+ s32 h_len = data_len; // number of bytes from history buffer.
+ s32 c_len = 0; // number of bytes from current buffer.
+ if (loc < 0) {
+ s32 h_offset = 0; // the start offset in history buffer.
+ if (loc < -(s64a)ci->hlen) {
+ if (loc + data_len <= -(s64a)ci->hlen) {
+ DEBUG_PRINTF("all before history\n");
+ return 0;
+ }
+ h_shift = -(loc + (s64a)ci->hlen);
+ h_len = data_len - h_shift;
+ } else {
+ h_offset = ci->hlen + loc;
+ }
+ if (loc + data_len > 0) {
+ // part in current buffer.
+ c_len = loc + data_len;
+ h_len = -(loc + h_shift);
+ if (c_len > (s64a)ci->len) {
+ // out of current buffer.
+ c_shift = c_len - ci->len;
+ c_len = ci->len;
+ }
+ copy_upto_64_bytes(data - loc, ci->buf, c_len);
+ }
+ assert(h_shift + h_len + c_len + c_shift == (s32)data_len);
+ copy_upto_64_bytes(data + h_shift, ci->hbuf + h_offset, h_len);
+ } else {
+ if (loc + data_len > (s64a)ci->len) {
+ if (loc >= (s64a)ci->len) {
+ DEBUG_PRINTF("all in the future.\n");
+ return 0;
+ }
+ c_len = ci->len - loc;
+ c_shift = data_len - c_len;
+ copy_upto_64_bytes(data, ci->buf + loc, c_len);
+ } else {
+#ifdef HAVE_AVX512
+ if (data_len == 64) {
+ storeu512(data, loadu512(ci->buf + loc));
+ return ~0ULL;
+ }
+#endif
+ if (data_len == 16) {
+ storeu128(data, loadu128(ci->buf + loc));
+ return 0xffff;
+ } else {
+ storeu256(data, loadu256(ci->buf + loc));
+ return 0xffffffff;
+ }
+ }
+ }
+ DEBUG_PRINTF("h_shift %d c_shift %d\n", h_shift, c_shift);
+ DEBUG_PRINTF("h_len %d c_len %d\n", h_len, c_len);
+
+#ifdef HAVE_AVX512
+ if (data_len == 64) {
+ return (~0ULL) << (h_shift + c_shift) >> c_shift;
+ }
+#endif
+ if (data_len == 16) {
+ return (u16)(0xffff << (h_shift + c_shift)) >> c_shift;
+ } else {
+ return (~0u) << (h_shift + c_shift) >> c_shift;
+ }
+}
+
+static rose_inline
+m128 getData128(const struct core_info *ci, s64a offset, u32 *valid_data_mask) {
+ if (offset > 0 && offset + sizeof(m128) <= ci->len) {
+ *valid_data_mask = 0xffff;
+ return loadu128(ci->buf + offset);
+ }
+ ALIGN_DIRECTIVE u8 data[sizeof(m128)];
+ *valid_data_mask = getBufferDataComplex(ci, offset, data, 16);
+ return *(m128 *)data;
+}
+
+static rose_inline
+m256 getData256(const struct core_info *ci, s64a offset, u32 *valid_data_mask) {
+ if (offset > 0 && offset + sizeof(m256) <= ci->len) {
+ *valid_data_mask = ~0u;
+ return loadu256(ci->buf + offset);
+ }
+ ALIGN_AVX_DIRECTIVE u8 data[sizeof(m256)];
+ *valid_data_mask = getBufferDataComplex(ci, offset, data, 32);
+ return *(m256 *)data;
+}
+
+#ifdef HAVE_AVX512
+static rose_inline
+m512 getData512(const struct core_info *ci, s64a offset, u64a *valid_data_mask) {
+ if (offset > 0 && offset + sizeof(m512) <= ci->len) {
+ *valid_data_mask = ~0ULL;
+ return loadu512(ci->buf + offset);
+ }
+ ALIGN_CL_DIRECTIVE u8 data[sizeof(m512)];
+ *valid_data_mask = getBufferDataComplex(ci, offset, data, 64);
+ return *(m512 *)data;
+}
+#endif
+
+static rose_inline
+int roseCheckShufti16x8(const struct core_info *ci, const u8 *nib_mask,
+ const u8 *bucket_select_mask, u32 neg_mask,
+ s32 checkOffset, u64a end) {
+ const s64a base_offset = (s64a)end - ci->buf_offset;
+ s64a offset = base_offset + checkOffset;
+ DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
+ DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
+
+ if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
+ DEBUG_PRINTF("too early, fail\n");
+ return 0;
+ }
+
+ u32 valid_data_mask = 0;
+ m128 data = getData128(ci, offset, &valid_data_mask);
+ if (unlikely(!valid_data_mask)) {
+ return 1;
+ }
+
+ m256 nib_mask_m256 = loadu256(nib_mask);
+ m128 bucket_select_mask_m128 = loadu128(bucket_select_mask);
+ if (validateShuftiMask16x8(data, nib_mask_m256,
+ bucket_select_mask_m128,
+ neg_mask, valid_data_mask)) {
+ DEBUG_PRINTF("check shufti 16x8 successfully\n");
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static rose_inline
+int roseCheckShufti16x16(const struct core_info *ci, const u8 *hi_mask,
+ const u8 *lo_mask, const u8 *bucket_select_mask,
+ u32 neg_mask, s32 checkOffset, u64a end) {
+ const s64a base_offset = (s64a)end - ci->buf_offset;
+ s64a offset = base_offset + checkOffset;
+ DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
+ DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
+
+ if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
+ DEBUG_PRINTF("too early, fail\n");
+ return 0;
+ }
+
+ u32 valid_data_mask = 0;
+ m128 data = getData128(ci, offset, &valid_data_mask);
+ if (unlikely(!valid_data_mask)) {
+ return 1;
+ }
+
+ m256 data_m256 = set2x128(data);
+ m256 hi_mask_m256 = loadu256(hi_mask);
+ m256 lo_mask_m256 = loadu256(lo_mask);
+ m256 bucket_select_mask_m256 = loadu256(bucket_select_mask);
+ if (validateShuftiMask16x16(data_m256, hi_mask_m256, lo_mask_m256,
+ bucket_select_mask_m256,
+ neg_mask, valid_data_mask)) {
+ DEBUG_PRINTF("check shufti 16x16 successfully\n");
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static rose_inline
+int roseCheckShufti32x8(const struct core_info *ci, const u8 *hi_mask,
+ const u8 *lo_mask, const u8 *bucket_select_mask,
+ u32 neg_mask, s32 checkOffset, u64a end) {
+ const s64a base_offset = (s64a)end - ci->buf_offset;
+ s64a offset = base_offset + checkOffset;
+ DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
+ DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
+
+ if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
+ DEBUG_PRINTF("too early, fail\n");
+ return 0;
+ }
+
+ u32 valid_data_mask = 0;
+ m256 data = getData256(ci, offset, &valid_data_mask);
+ if (unlikely(!valid_data_mask)) {
+ return 1;
+ }
+
+ m128 hi_mask_m128 = loadu128(hi_mask);
+ m128 lo_mask_m128 = loadu128(lo_mask);
+ m256 hi_mask_m256 = set2x128(hi_mask_m128);
+ m256 lo_mask_m256 = set2x128(lo_mask_m128);
+ m256 bucket_select_mask_m256 = loadu256(bucket_select_mask);
+ if (validateShuftiMask32x8(data, hi_mask_m256, lo_mask_m256,
+ bucket_select_mask_m256,
+ neg_mask, valid_data_mask)) {
+ DEBUG_PRINTF("check shufti 32x8 successfully\n");
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static rose_inline
+int roseCheckShufti32x16(const struct core_info *ci, const u8 *hi_mask,
+ const u8 *lo_mask, const u8 *bucket_select_mask_hi,
+ const u8 *bucket_select_mask_lo, u32 neg_mask,
+ s32 checkOffset, u64a end) {
+ const s64a base_offset = (s64a)end - ci->buf_offset;
+ s64a offset = base_offset + checkOffset;
+ DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
+ DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
+
+ if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
+ DEBUG_PRINTF("too early, fail\n");
+ return 0;
+ }
+
+ u32 valid_data_mask = 0;
+ m256 data = getData256(ci, offset, &valid_data_mask);
+ if (unlikely(!valid_data_mask)) {
+ return 1;
+ }
+
+ m256 hi_mask_1 = loadu2x128(hi_mask);
+ m256 hi_mask_2 = loadu2x128(hi_mask + 16);
+ m256 lo_mask_1 = loadu2x128(lo_mask);
+ m256 lo_mask_2 = loadu2x128(lo_mask + 16);
+
+ m256 bucket_mask_hi = loadu256(bucket_select_mask_hi);
+ m256 bucket_mask_lo = loadu256(bucket_select_mask_lo);
+ if (validateShuftiMask32x16(data, hi_mask_1, hi_mask_2,
+ lo_mask_1, lo_mask_2, bucket_mask_hi,
+ bucket_mask_lo, neg_mask, valid_data_mask)) {
+ DEBUG_PRINTF("check shufti 32x16 successfully\n");
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+#ifdef HAVE_AVX512
+static rose_inline
+int roseCheckShufti64x8(const struct core_info *ci, const u8 *hi_mask,
+ const u8 *lo_mask, const u8 *bucket_select_mask,
+ u64a neg_mask, s32 checkOffset, u64a end) {
+ const s64a base_offset = (s64a)end - ci->buf_offset;
+ s64a offset = base_offset + checkOffset;
+ DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
+ DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
+
+ if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
+ DEBUG_PRINTF("too early, fail\n");
+ return 0;
+ }
+
+ u64a valid_data_mask = 0;
+ m512 data = getData512(ci, offset, &valid_data_mask);
+
+ if (unlikely(!valid_data_mask)) {
+ return 1;
+ }
+
+ m512 hi_mask_m512 = loadu512(hi_mask);
+ m512 lo_mask_m512 = loadu512(lo_mask);
+ m512 bucket_select_mask_m512 = loadu512(bucket_select_mask);
+ if (validateShuftiMask64x8(data, hi_mask_m512, lo_mask_m512,
+ bucket_select_mask_m512,
+ neg_mask, valid_data_mask)) {
+ DEBUG_PRINTF("check shufti 64x8 successfully\n");
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static rose_inline
+int roseCheckShufti64x16(const struct core_info *ci, const u8 *hi_mask_1,
+ const u8 *hi_mask_2, const u8 *lo_mask_1,
+ const u8 *lo_mask_2, const u8 *bucket_select_mask_hi,
+ const u8 *bucket_select_mask_lo, u64a neg_mask,
+ s32 checkOffset, u64a end) {
+ const s64a base_offset = (s64a)end - ci->buf_offset;
+ s64a offset = base_offset + checkOffset;
+ DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
+ DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
+
+ if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
+ DEBUG_PRINTF("too early, fail\n");
+ return 0;
+ }
+
+ u64a valid_data_mask = 0;
+ m512 data = getData512(ci, offset, &valid_data_mask);
+ if (unlikely(!valid_data_mask)) {
+ return 1;
+ }
+
+ m512 hi_mask_1_m512 = loadu512(hi_mask_1);
+ m512 hi_mask_2_m512 = loadu512(hi_mask_2);
+ m512 lo_mask_1_m512 = loadu512(lo_mask_1);
+ m512 lo_mask_2_m512 = loadu512(lo_mask_2);
+
+ m512 bucket_select_mask_hi_m512 = loadu512(bucket_select_mask_hi);
+ m512 bucket_select_mask_lo_m512 = loadu512(bucket_select_mask_lo);
+ if (validateShuftiMask64x16(data, hi_mask_1_m512, hi_mask_2_m512,
+ lo_mask_1_m512, lo_mask_2_m512,
+ bucket_select_mask_hi_m512,
+ bucket_select_mask_lo_m512,
+ neg_mask, valid_data_mask)) {
+ DEBUG_PRINTF("check shufti 64x16 successfully\n");
+ return 1;
+ } else {
+ return 0;
+ }
+}
+#endif
+
+static rose_inline
+int roseCheckSingleLookaround(const struct RoseEngine *t,
+ const struct hs_scratch *scratch,
+ s8 checkOffset, u32 lookaroundReachIndex,
+ u64a end) {
+ assert(lookaroundReachIndex != MO_INVALID_IDX);
+ const struct core_info *ci = &scratch->core_info;
+ DEBUG_PRINTF("end=%llu, buf_offset=%llu, buf_end=%llu\n", end,
+ ci->buf_offset, ci->buf_offset + ci->len);
+
+ const s64a base_offset = end - ci->buf_offset;
+ const s64a offset = base_offset + checkOffset;
+ DEBUG_PRINTF("base_offset=%lld\n", base_offset);
+ DEBUG_PRINTF("checkOffset=%d offset=%lld\n", checkOffset, offset);
+
+ if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
+ DEBUG_PRINTF("too early, fail\n");
+ return 0;
+ }
+
+ const u8 *reach = getByOffset(t, lookaroundReachIndex);
+
+ u8 c;
+ if (offset >= 0 && offset < (s64a)ci->len) {
+ c = ci->buf[offset];
+ } else if (offset < 0 && offset >= -(s64a)ci->hlen) {
+ c = ci->hbuf[ci->hlen + offset];
+ } else {
+ return 1;
+ }
+
+ if (!reachHasBit(reach, c)) {
+ DEBUG_PRINTF("char 0x%02x failed reach check\n", c);
+ return 0;
+ }
+
+ DEBUG_PRINTF("OK :)\n");
+ return 1;
+}
+
+/**
+ * \brief Scan around a literal, checking that that "lookaround" reach masks
+ * are satisfied.
+ */
+static rose_inline
+int roseCheckLookaround(const struct RoseEngine *t,
+ const struct hs_scratch *scratch,
+ u32 lookaroundLookIndex, u32 lookaroundReachIndex,
+ u32 lookaroundCount, u64a end) {
+ assert(lookaroundLookIndex != MO_INVALID_IDX);
+ assert(lookaroundReachIndex != MO_INVALID_IDX);
+ assert(lookaroundCount > 0);
+
+ const struct core_info *ci = &scratch->core_info;
+ DEBUG_PRINTF("end=%llu, buf_offset=%llu, buf_end=%llu\n", end,
+ ci->buf_offset, ci->buf_offset + ci->len);
+
+ const s8 *look = getByOffset(t, lookaroundLookIndex);
+ const s8 *look_end = look + lookaroundCount;
+ assert(look < look_end);
+
+ const u8 *reach = getByOffset(t, lookaroundReachIndex);
+
+ // The following code assumes that the lookaround structures are ordered by
+ // increasing offset.
+
+ const s64a base_offset = end - ci->buf_offset;
+ DEBUG_PRINTF("base_offset=%lld\n", base_offset);
+ DEBUG_PRINTF("first look has offset %d\n", *look);
+
+ // If our first check tells us we need to look at an offset before the
+ // start of the stream, this role cannot match.
+ if (unlikely(*look < 0 && (u64a)(0 - *look) > end)) {
+ DEBUG_PRINTF("too early, fail\n");
+ return 0;
+ }
+
+ // Skip over offsets that are before the history buffer.
+ do {
+ s64a offset = base_offset + *look;
+ if (offset >= -(s64a)ci->hlen) {
+ goto in_history;
+ }
+ DEBUG_PRINTF("look=%d before history\n", *look);
+ look++;
+ reach += REACH_BITVECTOR_LEN;
+ } while (look < look_end);
+
+ // History buffer.
+ DEBUG_PRINTF("scan history (%zu looks left)\n", look_end - look);
+ for (; look < look_end; ++look, reach += REACH_BITVECTOR_LEN) {
+ in_history:
+ ;
+ s64a offset = base_offset + *look;
+ DEBUG_PRINTF("reach=%p, rel offset=%lld\n", reach, offset);
+
+ if (offset >= 0) {
+ DEBUG_PRINTF("in buffer\n");
+ goto in_buffer;
+ }
+
+ assert(offset >= -(s64a)ci->hlen && offset < 0);
+ u8 c = ci->hbuf[ci->hlen + offset];
+ if (!reachHasBit(reach, c)) {
+ DEBUG_PRINTF("char 0x%02x failed reach check\n", c);
+ return 0;
+ }
+ }
+ // Current buffer.
+ DEBUG_PRINTF("scan buffer (%zu looks left)\n", look_end - look);
+ for (; look < look_end; ++look, reach += REACH_BITVECTOR_LEN) {
+ in_buffer:
+ ;
+ s64a offset = base_offset + *look;
+ DEBUG_PRINTF("reach=%p, rel offset=%lld\n", reach, offset);
+
+ if (offset >= (s64a)ci->len) {
+ DEBUG_PRINTF("in the future\n");
+ break;
+ }
+
+ assert(offset >= 0 && offset < (s64a)ci->len);
+ u8 c = ci->buf[offset];
+ if (!reachHasBit(reach, c)) {
+ DEBUG_PRINTF("char 0x%02x failed reach check\n", c);
+ return 0;
+ }
+ }
+
+ DEBUG_PRINTF("OK :)\n");
+ return 1;
+}
+
+/**
+ * \brief Trying to find a matching path by the corresponding path mask of
+ * every lookaround location.
+ */
+static rose_inline
+int roseMultipathLookaround(const struct RoseEngine *t,
+ const struct hs_scratch *scratch,
+ u32 multipathLookaroundLookIndex,
+ u32 multipathLookaroundReachIndex,
+ u32 multipathLookaroundCount,
+ s32 last_start, const u8 *start_mask,
+ u64a end) {
+ assert(multipathLookaroundCount > 0);
+
+ const struct core_info *ci = &scratch->core_info;
+ DEBUG_PRINTF("end=%llu, buf_offset=%llu, buf_end=%llu\n", end,
+ ci->buf_offset, ci->buf_offset + ci->len);
+
+ const s8 *look = getByOffset(t, multipathLookaroundLookIndex);
+ const s8 *look_end = look + multipathLookaroundCount;
+ assert(look < look_end);
+
+ const u8 *reach = getByOffset(t, multipathLookaroundReachIndex);
+
+ const s64a base_offset = (s64a)end - ci->buf_offset;
+ DEBUG_PRINTF("base_offset=%lld\n", base_offset);
+
+ u8 path = 0xff;
+
+ assert(last_start < 0);
+
+ if (unlikely((u64a)(0 - last_start) > end)) {
+ DEBUG_PRINTF("too early, fail\n");
+ return 0;
+ }
+
+ s8 base_look_offset = *look;
+ do {
+ s64a offset = base_offset + *look;
+ u32 start_offset = (u32)(*look - base_look_offset);
+ DEBUG_PRINTF("start_mask[%u] = %x\n", start_offset,
+ start_mask[start_offset]);
+ path = start_mask[start_offset];
+ if (offset >= -(s64a)ci->hlen) {
+ break;
+ }
+ DEBUG_PRINTF("look=%d before history\n", *look);
+ look++;
+ reach += MULTI_REACH_BITVECTOR_LEN;
+ } while (look < look_end);
+
+ DEBUG_PRINTF("scan history (%zu looks left)\n", look_end - look);
+ for (; look < look_end; ++look, reach += MULTI_REACH_BITVECTOR_LEN) {
+ s64a offset = base_offset + *look;
+ DEBUG_PRINTF("reach=%p, rel offset=%lld\n", reach, offset);
+
+ if (offset >= 0) {
+ DEBUG_PRINTF("in buffer\n");
+ break;
+ }
+
+ assert(offset >= -(s64a)ci->hlen && offset < 0);
+ u8 c = ci->hbuf[ci->hlen + offset];
+ path &= reach[c];
+ DEBUG_PRINTF("reach[%x] = %02x path = %0xx\n", c, reach[c], path);
+ if (!path) {
+ DEBUG_PRINTF("char 0x%02x failed reach check\n", c);
+ return 0;
+ }
+ }
+
+ DEBUG_PRINTF("scan buffer (%zu looks left)\n", look_end - look);
+ for(; look < look_end; ++look, reach += MULTI_REACH_BITVECTOR_LEN) {
+ s64a offset = base_offset + *look;
+ DEBUG_PRINTF("reach=%p, rel offset=%lld\n", reach, offset);
+
+ if (offset >= (s64a)ci->len) {
+ DEBUG_PRINTF("in the future\n");
+ break;
+ }
+
+ assert(offset >= 0 && offset < (s64a)ci->len);
+ u8 c = ci->buf[offset];
+ path &= reach[c];
+ DEBUG_PRINTF("reach[%x] = %02x path = %0xx\n", c, reach[c], path);
+ if (!path) {
+ DEBUG_PRINTF("char 0x%02x failed reach check\n", c);
+ return 0;
+ }
+ }
+
+ DEBUG_PRINTF("OK :)\n");
+ return 1;
+}
+
+static never_inline
+int roseCheckMultipathShufti16x8(const struct hs_scratch *scratch,
+ const struct ROSE_STRUCT_CHECK_MULTIPATH_SHUFTI_16x8 *ri,
+ u64a end) {
+ const struct core_info *ci = &scratch->core_info;
+ s32 checkOffset = ri->base_offset;
+ const s64a base_offset = (s64a)end - ci->buf_offset;
+ s64a offset = base_offset + checkOffset;
+ DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
+ DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
+
+ assert(ri->last_start <= 0);
+ if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
+ if ((u64a)(0 - ri->last_start) > end) {
+ DEBUG_PRINTF("too early, fail\n");
+ return 0;
+ }
+ }
+
+ u32 valid_data_mask;
+ m128 data_init = getData128(ci, offset, &valid_data_mask);
+ m128 data_select_mask = loadu128(ri->data_select_mask);
+
+ u32 valid_path_mask = 0;
+ if (unlikely(!(valid_data_mask & 1))) {
+ DEBUG_PRINTF("lose part of backward data\n");
+ DEBUG_PRINTF("valid_data_mask %x\n", valid_data_mask);
+
+ m128 expand_valid;
+ u64a expand_mask = 0x8080808080808080ULL;
+ u64a valid_lo = expand64(valid_data_mask & 0xff, expand_mask);
+ u64a valid_hi = expand64(valid_data_mask >> 8, expand_mask);
+ DEBUG_PRINTF("expand_hi %llx\n", valid_hi);
+ DEBUG_PRINTF("expand_lo %llx\n", valid_lo);
+ expand_valid = set64x2(valid_hi, valid_lo);
+ valid_path_mask = ~movemask128(pshufb_m128(expand_valid,
+ data_select_mask));
+ }
+
+ m128 data = pshufb_m128(data_init, data_select_mask);
+ m256 nib_mask = loadu256(ri->nib_mask);
+ m128 bucket_select_mask = loadu128(ri->bucket_select_mask);
+
+ u32 hi_bits_mask = ri->hi_bits_mask;
+ u32 lo_bits_mask = ri->lo_bits_mask;
+ u32 neg_mask = ri->neg_mask;
+
+ if (validateMultipathShuftiMask16x8(data, nib_mask,
+ bucket_select_mask,
+ hi_bits_mask, lo_bits_mask,
+ neg_mask, valid_path_mask)) {
+ DEBUG_PRINTF("check multi-path shufti-16x8 successfully\n");
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static never_inline
+int roseCheckMultipathShufti32x8(const struct hs_scratch *scratch,
+ const struct ROSE_STRUCT_CHECK_MULTIPATH_SHUFTI_32x8 *ri,
+ u64a end) {
+ const struct core_info *ci = &scratch->core_info;
+ s32 checkOffset = ri->base_offset;
+ const s64a base_offset = (s64a)end - ci->buf_offset;
+ s64a offset = base_offset + checkOffset;
+ DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
+ DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
+
+ assert(ri->last_start <= 0);
+ if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
+ if ((u64a)(0 - ri->last_start) > end) {
+ DEBUG_PRINTF("too early, fail\n");
+ return 0;
+ }
+ }
+
+ u32 valid_data_mask;
+ m128 data_m128 = getData128(ci, offset, &valid_data_mask);
+ m256 data_double = set2x128(data_m128);
+ m256 data_select_mask = loadu256(ri->data_select_mask);
+
+ u32 valid_path_mask = 0;
+ m256 expand_valid;
+ if (unlikely(!(valid_data_mask & 1))) {
+ DEBUG_PRINTF("lose part of backward data\n");
+ DEBUG_PRINTF("valid_data_mask %x\n", valid_data_mask);
+
+ u64a expand_mask = 0x8080808080808080ULL;
+ u64a valid_lo = expand64(valid_data_mask & 0xff, expand_mask);
+ u64a valid_hi = expand64(valid_data_mask >> 8, expand_mask);
+ DEBUG_PRINTF("expand_hi %llx\n", valid_hi);
+ DEBUG_PRINTF("expand_lo %llx\n", valid_lo);
+ expand_valid = set64x4(valid_hi, valid_lo, valid_hi,
+ valid_lo);
+ valid_path_mask = ~movemask256(pshufb_m256(expand_valid,
+ data_select_mask));
+ }
+
+ m256 data = pshufb_m256(data_double, data_select_mask);
+ m256 hi_mask = loadu2x128(ri->hi_mask);
+ m256 lo_mask = loadu2x128(ri->lo_mask);
+ m256 bucket_select_mask = loadu256(ri->bucket_select_mask);
+
+ u32 hi_bits_mask = ri->hi_bits_mask;
+ u32 lo_bits_mask = ri->lo_bits_mask;
+ u32 neg_mask = ri->neg_mask;
+
+ if (validateMultipathShuftiMask32x8(data, hi_mask, lo_mask,
+ bucket_select_mask,
+ hi_bits_mask, lo_bits_mask,
+ neg_mask, valid_path_mask)) {
+ DEBUG_PRINTF("check multi-path shufti-32x8 successfully\n");
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static never_inline
+int roseCheckMultipathShufti32x16(const struct hs_scratch *scratch,
+ const struct ROSE_STRUCT_CHECK_MULTIPATH_SHUFTI_32x16 *ri,
+ u64a end) {
+ const struct core_info *ci = &scratch->core_info;
+ const s64a base_offset = (s64a)end - ci->buf_offset;
+ s32 checkOffset = ri->base_offset;
+ s64a offset = base_offset + checkOffset;
+ DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
+ DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
+
+ assert(ri->last_start <= 0);
+ if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
+ if ((u64a)(0 - ri->last_start) > end) {
+ DEBUG_PRINTF("too early, fail\n");
+ return 0;
+ }
+ }
+
+ u32 valid_data_mask;
+ m128 data_m128 = getData128(ci, offset, &valid_data_mask);
+ m256 data_double = set2x128(data_m128);
+ m256 data_select_mask = loadu256(ri->data_select_mask);
+
+ u32 valid_path_mask = 0;
+ m256 expand_valid;
+ if (unlikely(!(valid_data_mask & 1))) {
+ DEBUG_PRINTF("lose part of backward data\n");
+ DEBUG_PRINTF("valid_data_mask %x\n", valid_data_mask);
+
+ u64a expand_mask = 0x8080808080808080ULL;
+ u64a valid_lo = expand64(valid_data_mask & 0xff, expand_mask);
+ u64a valid_hi = expand64(valid_data_mask >> 8, expand_mask);
+ DEBUG_PRINTF("expand_hi %llx\n", valid_hi);
+ DEBUG_PRINTF("expand_lo %llx\n", valid_lo);
+ expand_valid = set64x4(valid_hi, valid_lo, valid_hi,
+ valid_lo);
+ valid_path_mask = ~movemask256(pshufb_m256(expand_valid,
+ data_select_mask));
+ }
+
+ m256 data = pshufb_m256(data_double, data_select_mask);
+
+ m256 hi_mask_1 = loadu2x128(ri->hi_mask);
+ m256 hi_mask_2 = loadu2x128(ri->hi_mask + 16);
+ m256 lo_mask_1 = loadu2x128(ri->lo_mask);
+ m256 lo_mask_2 = loadu2x128(ri->lo_mask + 16);
+
+ m256 bucket_select_mask_hi = loadu256(ri->bucket_select_mask_hi);
+ m256 bucket_select_mask_lo = loadu256(ri->bucket_select_mask_lo);
+
+ u32 hi_bits_mask = ri->hi_bits_mask;
+ u32 lo_bits_mask = ri->lo_bits_mask;
+ u32 neg_mask = ri->neg_mask;
+
+ if (validateMultipathShuftiMask32x16(data, hi_mask_1, hi_mask_2,
+ lo_mask_1, lo_mask_2,
+ bucket_select_mask_hi,
+ bucket_select_mask_lo,
+ hi_bits_mask, lo_bits_mask,
+ neg_mask, valid_path_mask)) {
+ DEBUG_PRINTF("check multi-path shufti-32x16 successfully\n");
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static never_inline
+int roseCheckMultipathShufti64(const struct hs_scratch *scratch,
+ const struct ROSE_STRUCT_CHECK_MULTIPATH_SHUFTI_64 *ri,
+ u64a end) {
+ const struct core_info *ci = &scratch->core_info;
+ const s64a base_offset = (s64a)end - ci->buf_offset;
+ s32 checkOffset = ri->base_offset;
+ s64a offset = base_offset + checkOffset;
+ DEBUG_PRINTF("end %lld base_offset %lld\n", end, base_offset);
+ DEBUG_PRINTF("checkOffset %d offset %lld\n", checkOffset, offset);
+
+ if (unlikely(checkOffset < 0 && (u64a)(0 - checkOffset) > end)) {
+ if ((u64a)(0 - ri->last_start) > end) {
+ DEBUG_PRINTF("too early, fail\n");
+ return 0;
+ }
+ }
+
+ u32 valid_data_mask;
+ m128 data_m128 = getData128(ci, offset, &valid_data_mask);
+ m256 data_m256 = set2x128(data_m128);
+ m256 data_select_mask_1 = loadu256(ri->data_select_mask);
+ m256 data_select_mask_2 = loadu256(ri->data_select_mask + 32);
+
+ u64a valid_path_mask = 0;
+ m256 expand_valid;
+ if (unlikely(!(valid_data_mask & 1))) {
+ DEBUG_PRINTF("lose part of backward data\n");
+ DEBUG_PRINTF("valid_data_mask %x\n", valid_data_mask);
+
+ u64a expand_mask = 0x8080808080808080ULL;
+ u64a valid_lo = expand64(valid_data_mask & 0xff, expand_mask);
+ u64a valid_hi = expand64(valid_data_mask >> 8, expand_mask);
+ DEBUG_PRINTF("expand_hi %llx\n", valid_hi);
+ DEBUG_PRINTF("expand_lo %llx\n", valid_lo);
+ expand_valid = set64x4(valid_hi, valid_lo, valid_hi,
+ valid_lo);
+ u32 valid_path_1 = movemask256(pshufb_m256(expand_valid,
+ data_select_mask_1));
+ u32 valid_path_2 = movemask256(pshufb_m256(expand_valid,
+ data_select_mask_2));
+ valid_path_mask = ~((u64a)valid_path_1 | (u64a)valid_path_2 << 32);
+ }
+
+ m256 data_1 = pshufb_m256(data_m256, data_select_mask_1);
+ m256 data_2 = pshufb_m256(data_m256, data_select_mask_2);
+
+ m256 hi_mask = loadu2x128(ri->hi_mask);
+ m256 lo_mask = loadu2x128(ri->lo_mask);
+
+ m256 bucket_select_mask_1 = loadu256(ri->bucket_select_mask);
+ m256 bucket_select_mask_2 = loadu256(ri->bucket_select_mask + 32);
+
+ u64a hi_bits_mask = ri->hi_bits_mask;
+ u64a lo_bits_mask = ri->lo_bits_mask;
+ u64a neg_mask = ri->neg_mask;
+
+ if (validateMultipathShuftiMask64(data_1, data_2, hi_mask, lo_mask,
+ bucket_select_mask_1,
+ bucket_select_mask_2, hi_bits_mask,
+ lo_bits_mask, neg_mask,
+ valid_path_mask)) {
+ DEBUG_PRINTF("check multi-path shufti-64 successfully\n");
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static rose_inline
int roseNfaEarliestSom(u64a start, UNUSED u64a end, UNUSED ReportID id,
void *context) {
assert(context);
@@ -1697,1813 +1697,1813 @@ int roseNfaEarliestSom(u64a start, UNUSED u64a end, UNUSED ReportID id,
return MO_CONTINUE_MATCHING;
}
-static rose_inline
-u64a roseGetHaigSom(const struct RoseEngine *t, struct hs_scratch *scratch,
- const u32 qi, UNUSED const u32 leftfixLag) {
- u32 ri = queueToLeftIndex(t, qi);
-
- UNUSED const struct LeftNfaInfo *left = getLeftTable(t) + ri;
-
- DEBUG_PRINTF("testing %s prefix %u/%u with lag %u (maxLag=%u)\n",
- left->transient ? "transient" : "active", ri, qi,
- leftfixLag, left->maxLag);
-
- assert(leftfixLag <= left->maxLag);
-
- struct mq *q = scratch->queues + qi;
-
- u64a start = ~0ULL;
-
- /* switch the callback + context for a fun one */
- q->cb = roseNfaEarliestSom;
- q->context = &start;
-
- nfaReportCurrentMatches(q->nfa, q);
-
- /* restore the old callback + context */
- q->cb = roseNfaAdaptor;
- q->context = NULL;
- DEBUG_PRINTF("earliest som is %llu\n", start);
- return start;
-}
-
-static rose_inline
-char roseCheckBounds(u64a end, u64a min_bound, u64a max_bound) {
- DEBUG_PRINTF("check offset=%llu against bounds [%llu,%llu]\n", end,
- min_bound, max_bound);
- assert(min_bound <= max_bound);
- return end >= min_bound && end <= max_bound;
-}
-
-static rose_inline
-hwlmcb_rv_t roseEnginesEod(const struct RoseEngine *rose,
- struct hs_scratch *scratch, u64a offset,
- u32 iter_offset) {
- const char is_streaming = rose->mode != HS_MODE_BLOCK;
-
- /* data, len is used for state decompress, should be full available data */
- u8 key = 0;
- if (is_streaming) {
- const u8 *eod_data = scratch->core_info.hbuf;
- size_t eod_len = scratch->core_info.hlen;
- key = eod_len ? eod_data[eod_len - 1] : 0;
- }
-
- const u8 *aa = getActiveLeafArray(rose, scratch->core_info.state);
- const u32 aaCount = rose->activeArrayCount;
- const u32 qCount = rose->queueCount;
- struct fatbit *aqa = scratch->aqa;
-
- const struct mmbit_sparse_iter *it = getByOffset(rose, iter_offset);
- assert(ISALIGNED(it));
-
- u32 idx = 0;
- struct mmbit_sparse_state si_state[MAX_SPARSE_ITER_STATES];
-
- for (u32 qi = mmbit_sparse_iter_begin(aa, aaCount, &idx, it, si_state);
- qi != MMB_INVALID;
- qi = mmbit_sparse_iter_next(aa, aaCount, qi, &idx, it, si_state)) {
- DEBUG_PRINTF("checking nfa %u\n", qi);
- struct mq *q = scratch->queues + qi;
- if (!fatbit_set(aqa, qCount, qi)) {
- initQueue(q, qi, rose, scratch);
- }
-
- assert(q->nfa == getNfaByQueue(rose, qi));
- assert(nfaAcceptsEod(q->nfa));
-
- if (is_streaming) {
- // Decompress stream state.
- nfaExpandState(q->nfa, q->state, q->streamState, offset, key);
- }
-
- if (nfaCheckFinalState(q->nfa, q->state, q->streamState, offset,
- roseReportAdaptor,
- scratch) == MO_HALT_MATCHING) {
- DEBUG_PRINTF("user instructed us to stop\n");
- return HWLM_TERMINATE_MATCHING;
- }
- }
-
- return HWLM_CONTINUE_MATCHING;
-}
-
-static rose_inline
-hwlmcb_rv_t roseSuffixesEod(const struct RoseEngine *rose,
- struct hs_scratch *scratch, u64a offset) {
- const u8 *aa = getActiveLeafArray(rose, scratch->core_info.state);
- const u32 aaCount = rose->activeArrayCount;
-
- for (u32 qi = mmbit_iterate(aa, aaCount, MMB_INVALID); qi != MMB_INVALID;
- qi = mmbit_iterate(aa, aaCount, qi)) {
- DEBUG_PRINTF("checking nfa %u\n", qi);
- struct mq *q = scratch->queues + qi;
- assert(q->nfa == getNfaByQueue(rose, qi));
- assert(nfaAcceptsEod(q->nfa));
-
- /* We have just been triggered. */
- assert(fatbit_isset(scratch->aqa, rose->queueCount, qi));
-
- pushQueueNoMerge(q, MQE_END, scratch->core_info.len);
- q->context = NULL;
-
- /* rose exec is used as we don't want to / can't raise matches in the
- * history buffer. */
- if (!nfaQueueExecRose(q->nfa, q, MO_INVALID_IDX)) {
- DEBUG_PRINTF("nfa is dead\n");
- continue;
- }
- if (nfaCheckFinalState(q->nfa, q->state, q->streamState, offset,
- roseReportAdaptor,
- scratch) == MO_HALT_MATCHING) {
- DEBUG_PRINTF("user instructed us to stop\n");
- return HWLM_TERMINATE_MATCHING;
- }
- }
- return HWLM_CONTINUE_MATCHING;
-}
-
-static rose_inline
-hwlmcb_rv_t roseMatcherEod(const struct RoseEngine *rose,
- struct hs_scratch *scratch, u64a offset) {
- assert(rose->ematcherOffset);
- assert(rose->ematcherRegionSize);
-
- // Clear role state and active engines, since we have already handled all
- // outstanding work there.
- DEBUG_PRINTF("clear role state and active leaf array\n");
- char *state = scratch->core_info.state;
- mmbit_clear(getRoleState(state), rose->rolesWithStateCount);
- mmbit_clear(getActiveLeafArray(rose, state), rose->activeArrayCount);
-
- const char is_streaming = rose->mode != HS_MODE_BLOCK;
-
- size_t eod_len;
- const u8 *eod_data;
- if (!is_streaming) { /* Block */
- eod_data = scratch->core_info.buf;
- eod_len = scratch->core_info.len;
- } else { /* Streaming */
- eod_len = scratch->core_info.hlen;
- eod_data = scratch->core_info.hbuf;
- }
-
- assert(eod_data);
- assert(eod_len);
-
- DEBUG_PRINTF("%zu bytes of eod data to scan at offset %llu\n", eod_len,
- offset);
-
- // If we don't have enough bytes to produce a match from an EOD table scan,
- // there's no point scanning.
- if (eod_len < rose->eodmatcherMinWidth) {
- DEBUG_PRINTF("too short for min width %u\n", rose->eodmatcherMinWidth);
- return HWLM_CONTINUE_MATCHING;
- }
-
- // Ensure that we only need scan the last N bytes, where N is the length of
- // the eod-anchored matcher region.
- size_t adj = eod_len - MIN(eod_len, rose->ematcherRegionSize);
-
- const struct HWLM *etable = getByOffset(rose, rose->ematcherOffset);
- hwlmExec(etable, eod_data, eod_len, adj, roseCallback, scratch,
- scratch->tctxt.groups);
-
- // We may need to fire delayed matches.
- if (cleanUpDelayed(rose, scratch, 0, offset) == HWLM_TERMINATE_MATCHING) {
- DEBUG_PRINTF("user instructed us to stop\n");
- return HWLM_TERMINATE_MATCHING;
- }
-
- roseFlushLastByteHistory(rose, scratch, offset);
- return HWLM_CONTINUE_MATCHING;
-}
-
-static rose_inline
-int roseCheckLongLiteral(const struct RoseEngine *t,
- const struct hs_scratch *scratch, u64a end,
- u32 lit_offset, u32 lit_length, char nocase) {
- const struct core_info *ci = &scratch->core_info;
- const u8 *lit = getByOffset(t, lit_offset);
-
- DEBUG_PRINTF("check lit at %llu, length %u\n", end, lit_length);
- DEBUG_PRINTF("base buf_offset=%llu\n", ci->buf_offset);
-
- if (end < lit_length) {
- DEBUG_PRINTF("too short!\n");
- return 0;
- }
-
- // If any portion of the literal matched in the current buffer, check it.
- if (end > ci->buf_offset) {
- u32 scan_len = MIN(end - ci->buf_offset, lit_length);
- u64a scan_start = end - ci->buf_offset - scan_len;
- DEBUG_PRINTF("checking suffix (%u bytes) in buf[%llu:%llu]\n", scan_len,
- scan_start, end);
- if (cmpForward(ci->buf + scan_start, lit + lit_length - scan_len,
- scan_len, nocase)) {
- DEBUG_PRINTF("cmp of suffix failed\n");
- return 0;
- }
- }
-
- // If the entirety of the literal was in the current block, we are done.
- if (end - lit_length >= ci->buf_offset) {
- DEBUG_PRINTF("literal confirmed in current block\n");
- return 1;
- }
-
- // We still have a prefix which we must test against the buffer prepared by
- // the long literal table. This is only done in streaming mode.
-
- assert(t->mode != HS_MODE_BLOCK);
-
- const u8 *ll_buf;
- size_t ll_len;
- if (nocase) {
- ll_buf = scratch->tctxt.ll_buf_nocase;
- ll_len = scratch->tctxt.ll_len_nocase;
- } else {
- ll_buf = scratch->tctxt.ll_buf;
- ll_len = scratch->tctxt.ll_len;
- }
-
- assert(ll_buf);
-
- u64a lit_start_offset = end - lit_length;
- u32 prefix_len = MIN(lit_length, ci->buf_offset - lit_start_offset);
- u32 hist_rewind = ci->buf_offset - lit_start_offset;
- DEBUG_PRINTF("ll_len=%zu, hist_rewind=%u\n", ll_len, hist_rewind);
- if (hist_rewind > ll_len) {
- DEBUG_PRINTF("not enough history\n");
- return 0;
- }
-
- DEBUG_PRINTF("check prefix len=%u from hist (len %zu, rewind %u)\n",
- prefix_len, ll_len, hist_rewind);
- assert(hist_rewind <= ll_len);
- if (cmpForward(ll_buf + ll_len - hist_rewind, lit, prefix_len, nocase)) {
- DEBUG_PRINTF("cmp of prefix failed\n");
- return 0;
- }
-
- DEBUG_PRINTF("cmp succeeded\n");
- return 1;
-}
-
-static rose_inline
-int roseCheckMediumLiteral(const struct RoseEngine *t,
- const struct hs_scratch *scratch, u64a end,
- u32 lit_offset, u32 lit_length, char nocase) {
- const struct core_info *ci = &scratch->core_info;
- const u8 *lit = getByOffset(t, lit_offset);
-
- DEBUG_PRINTF("check lit at %llu, length %u\n", end, lit_length);
- DEBUG_PRINTF("base buf_offset=%llu\n", ci->buf_offset);
-
- if (end < lit_length) {
- DEBUG_PRINTF("too short!\n");
- return 0;
- }
-
- // If any portion of the literal matched in the current buffer, check it.
- if (end > ci->buf_offset) {
- u32 scan_len = MIN(end - ci->buf_offset, lit_length);
- u64a scan_start = end - ci->buf_offset - scan_len;
- DEBUG_PRINTF("checking suffix (%u bytes) in buf[%llu:%llu]\n", scan_len,
- scan_start, end);
- if (cmpForward(ci->buf + scan_start, lit + lit_length - scan_len,
- scan_len, nocase)) {
- DEBUG_PRINTF("cmp of suffix failed\n");
- return 0;
- }
- }
-
- // If the entirety of the literal was in the current block, we are done.
- if (end - lit_length >= ci->buf_offset) {
- DEBUG_PRINTF("literal confirmed in current block\n");
- return 1;
- }
-
- // We still have a prefix which we must test against the history buffer.
- assert(t->mode != HS_MODE_BLOCK);
-
- u64a lit_start_offset = end - lit_length;
- u32 prefix_len = MIN(lit_length, ci->buf_offset - lit_start_offset);
- u32 hist_rewind = ci->buf_offset - lit_start_offset;
- DEBUG_PRINTF("hlen=%zu, hist_rewind=%u\n", ci->hlen, hist_rewind);
-
- // History length check required for confirm in the EOD and delayed
- // rebuild paths.
- if (hist_rewind > ci->hlen) {
- DEBUG_PRINTF("not enough history\n");
- return 0;
- }
-
- DEBUG_PRINTF("check prefix len=%u from hist (len %zu, rewind %u)\n",
- prefix_len, ci->hlen, hist_rewind);
- assert(hist_rewind <= ci->hlen);
- if (cmpForward(ci->hbuf + ci->hlen - hist_rewind, lit, prefix_len,
- nocase)) {
- DEBUG_PRINTF("cmp of prefix failed\n");
- return 0;
- }
-
- DEBUG_PRINTF("cmp succeeded\n");
- return 1;
-}
-
-static
-void updateSeqPoint(struct RoseContext *tctxt, u64a offset,
- const char from_mpv) {
- if (from_mpv) {
- updateMinMatchOffsetFromMpv(tctxt, offset);
- } else {
- updateMinMatchOffset(tctxt, offset);
- }
-}
-
-static rose_inline
-hwlmcb_rv_t flushActiveCombinations(const struct RoseEngine *t,
- struct hs_scratch *scratch) {
- u8 *cvec = (u8 *)scratch->core_info.combVector;
- if (!mmbit_any(cvec, t->ckeyCount)) {
- return HWLM_CONTINUE_MATCHING;
- }
- u64a end = scratch->tctxt.lastCombMatchOffset;
- for (u32 i = mmbit_iterate(cvec, t->ckeyCount, MMB_INVALID);
- i != MMB_INVALID; i = mmbit_iterate(cvec, t->ckeyCount, i)) {
- const struct CombInfo *combInfoMap = (const struct CombInfo *)
- ((const char *)t + t->combInfoMapOffset);
- const struct CombInfo *ci = combInfoMap + i;
- if ((ci->min_offset != 0) && (end < ci->min_offset)) {
- DEBUG_PRINTF("halt: before min_offset=%llu\n", ci->min_offset);
- continue;
- }
- if ((ci->max_offset != MAX_OFFSET) && (end > ci->max_offset)) {
- DEBUG_PRINTF("halt: after max_offset=%llu\n", ci->max_offset);
- continue;
- }
-
- DEBUG_PRINTF("check ekey %u\n", ci->ekey);
- if (ci->ekey != INVALID_EKEY) {
- assert(ci->ekey < t->ekeyCount);
- const char *evec = scratch->core_info.exhaustionVector;
- if (isExhausted(t, evec, ci->ekey)) {
- DEBUG_PRINTF("ekey %u already set, match is exhausted\n",
- ci->ekey);
- continue;
- }
- }
-
- DEBUG_PRINTF("check ckey %u\n", i);
- char *lvec = scratch->core_info.logicalVector;
- if (!isLogicalCombination(t, lvec, ci->start, ci->result)) {
- DEBUG_PRINTF("Logical Combination Failed!\n");
- continue;
- }
-
- DEBUG_PRINTF("Logical Combination Passed!\n");
- if (roseReportComb(t, scratch, end, ci->id, 0,
- ci->ekey) == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- }
- clearCvec(t, (char *)cvec);
- return HWLM_CONTINUE_MATCHING;
-}
-
-static rose_inline
-hwlmcb_rv_t checkPurelyNegatives(const struct RoseEngine *t,
- struct hs_scratch *scratch, u64a end) {
- for (u32 i = 0; i < t->ckeyCount; i++) {
- const struct CombInfo *combInfoMap = (const struct CombInfo *)
- ((const char *)t + t->combInfoMapOffset);
- const struct CombInfo *ci = combInfoMap + i;
- if ((ci->min_offset != 0) && (end < ci->min_offset)) {
- DEBUG_PRINTF("halt: before min_offset=%llu\n", ci->min_offset);
- continue;
- }
- if ((ci->max_offset != MAX_OFFSET) && (end > ci->max_offset)) {
- DEBUG_PRINTF("halt: after max_offset=%llu\n", ci->max_offset);
- continue;
- }
-
- DEBUG_PRINTF("check ekey %u\n", ci->ekey);
- if (ci->ekey != INVALID_EKEY) {
- assert(ci->ekey < t->ekeyCount);
- const char *evec = scratch->core_info.exhaustionVector;
- if (isExhausted(t, evec, ci->ekey)) {
- DEBUG_PRINTF("ekey %u already set, match is exhausted\n",
- ci->ekey);
- continue;
- }
- }
-
- DEBUG_PRINTF("check ckey %u purely negative\n", i);
- char *lvec = scratch->core_info.logicalVector;
- if (!isPurelyNegativeMatch(t, lvec, ci->start, ci->result)) {
- DEBUG_PRINTF("Logical Combination from purely negative Failed!\n");
- continue;
- }
-
- DEBUG_PRINTF("Logical Combination from purely negative Passed!\n");
- if (roseReportComb(t, scratch, end, ci->id, 0,
- ci->ekey) == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- }
- return HWLM_CONTINUE_MATCHING;
-}
-
-#if !defined(_WIN32)
-#define PROGRAM_CASE(name) \
- case ROSE_INSTR_##name: { \
- LABEL_ROSE_INSTR_##name: \
- DEBUG_PRINTF("instruction: " #name " (pc=%u)\n", \
- programOffset + (u32)(pc - pc_base)); \
- const struct ROSE_STRUCT_##name *ri = \
- (const struct ROSE_STRUCT_##name *)pc;
-
-#define PROGRAM_NEXT_INSTRUCTION \
- pc += ROUNDUP_N(sizeof(*ri), ROSE_INSTR_MIN_ALIGN); \
- goto *(next_instr[*(const u8 *)pc]); \
- }
-
-#define PROGRAM_NEXT_INSTRUCTION_JUMP \
- goto *(next_instr[*(const u8 *)pc]);
-#else
-#define PROGRAM_CASE(name) \
- case ROSE_INSTR_##name: { \
- DEBUG_PRINTF("instruction: " #name " (pc=%u)\n", \
- programOffset + (u32)(pc - pc_base)); \
- const struct ROSE_STRUCT_##name *ri = \
- (const struct ROSE_STRUCT_##name *)pc;
-
-#define PROGRAM_NEXT_INSTRUCTION \
- pc += ROUNDUP_N(sizeof(*ri), ROSE_INSTR_MIN_ALIGN); \
- break; \
- }
-
-#define PROGRAM_NEXT_INSTRUCTION_JUMP continue;
-#endif
-
+static rose_inline
+u64a roseGetHaigSom(const struct RoseEngine *t, struct hs_scratch *scratch,
+ const u32 qi, UNUSED const u32 leftfixLag) {
+ u32 ri = queueToLeftIndex(t, qi);
+
+ UNUSED const struct LeftNfaInfo *left = getLeftTable(t) + ri;
+
+ DEBUG_PRINTF("testing %s prefix %u/%u with lag %u (maxLag=%u)\n",
+ left->transient ? "transient" : "active", ri, qi,
+ leftfixLag, left->maxLag);
+
+ assert(leftfixLag <= left->maxLag);
+
+ struct mq *q = scratch->queues + qi;
+
+ u64a start = ~0ULL;
+
+ /* switch the callback + context for a fun one */
+ q->cb = roseNfaEarliestSom;
+ q->context = &start;
+
+ nfaReportCurrentMatches(q->nfa, q);
+
+ /* restore the old callback + context */
+ q->cb = roseNfaAdaptor;
+ q->context = NULL;
+ DEBUG_PRINTF("earliest som is %llu\n", start);
+ return start;
+}
+
+static rose_inline
+char roseCheckBounds(u64a end, u64a min_bound, u64a max_bound) {
+ DEBUG_PRINTF("check offset=%llu against bounds [%llu,%llu]\n", end,
+ min_bound, max_bound);
+ assert(min_bound <= max_bound);
+ return end >= min_bound && end <= max_bound;
+}
+
+static rose_inline
+hwlmcb_rv_t roseEnginesEod(const struct RoseEngine *rose,
+ struct hs_scratch *scratch, u64a offset,
+ u32 iter_offset) {
+ const char is_streaming = rose->mode != HS_MODE_BLOCK;
+
+ /* data, len is used for state decompress, should be full available data */
+ u8 key = 0;
+ if (is_streaming) {
+ const u8 *eod_data = scratch->core_info.hbuf;
+ size_t eod_len = scratch->core_info.hlen;
+ key = eod_len ? eod_data[eod_len - 1] : 0;
+ }
+
+ const u8 *aa = getActiveLeafArray(rose, scratch->core_info.state);
+ const u32 aaCount = rose->activeArrayCount;
+ const u32 qCount = rose->queueCount;
+ struct fatbit *aqa = scratch->aqa;
+
+ const struct mmbit_sparse_iter *it = getByOffset(rose, iter_offset);
+ assert(ISALIGNED(it));
+
+ u32 idx = 0;
+ struct mmbit_sparse_state si_state[MAX_SPARSE_ITER_STATES];
+
+ for (u32 qi = mmbit_sparse_iter_begin(aa, aaCount, &idx, it, si_state);
+ qi != MMB_INVALID;
+ qi = mmbit_sparse_iter_next(aa, aaCount, qi, &idx, it, si_state)) {
+ DEBUG_PRINTF("checking nfa %u\n", qi);
+ struct mq *q = scratch->queues + qi;
+ if (!fatbit_set(aqa, qCount, qi)) {
+ initQueue(q, qi, rose, scratch);
+ }
+
+ assert(q->nfa == getNfaByQueue(rose, qi));
+ assert(nfaAcceptsEod(q->nfa));
+
+ if (is_streaming) {
+ // Decompress stream state.
+ nfaExpandState(q->nfa, q->state, q->streamState, offset, key);
+ }
+
+ if (nfaCheckFinalState(q->nfa, q->state, q->streamState, offset,
+ roseReportAdaptor,
+ scratch) == MO_HALT_MATCHING) {
+ DEBUG_PRINTF("user instructed us to stop\n");
+ return HWLM_TERMINATE_MATCHING;
+ }
+ }
+
+ return HWLM_CONTINUE_MATCHING;
+}
+
+static rose_inline
+hwlmcb_rv_t roseSuffixesEod(const struct RoseEngine *rose,
+ struct hs_scratch *scratch, u64a offset) {
+ const u8 *aa = getActiveLeafArray(rose, scratch->core_info.state);
+ const u32 aaCount = rose->activeArrayCount;
+
+ for (u32 qi = mmbit_iterate(aa, aaCount, MMB_INVALID); qi != MMB_INVALID;
+ qi = mmbit_iterate(aa, aaCount, qi)) {
+ DEBUG_PRINTF("checking nfa %u\n", qi);
+ struct mq *q = scratch->queues + qi;
+ assert(q->nfa == getNfaByQueue(rose, qi));
+ assert(nfaAcceptsEod(q->nfa));
+
+ /* We have just been triggered. */
+ assert(fatbit_isset(scratch->aqa, rose->queueCount, qi));
+
+ pushQueueNoMerge(q, MQE_END, scratch->core_info.len);
+ q->context = NULL;
+
+ /* rose exec is used as we don't want to / can't raise matches in the
+ * history buffer. */
+ if (!nfaQueueExecRose(q->nfa, q, MO_INVALID_IDX)) {
+ DEBUG_PRINTF("nfa is dead\n");
+ continue;
+ }
+ if (nfaCheckFinalState(q->nfa, q->state, q->streamState, offset,
+ roseReportAdaptor,
+ scratch) == MO_HALT_MATCHING) {
+ DEBUG_PRINTF("user instructed us to stop\n");
+ return HWLM_TERMINATE_MATCHING;
+ }
+ }
+ return HWLM_CONTINUE_MATCHING;
+}
+
+static rose_inline
+hwlmcb_rv_t roseMatcherEod(const struct RoseEngine *rose,
+ struct hs_scratch *scratch, u64a offset) {
+ assert(rose->ematcherOffset);
+ assert(rose->ematcherRegionSize);
+
+ // Clear role state and active engines, since we have already handled all
+ // outstanding work there.
+ DEBUG_PRINTF("clear role state and active leaf array\n");
+ char *state = scratch->core_info.state;
+ mmbit_clear(getRoleState(state), rose->rolesWithStateCount);
+ mmbit_clear(getActiveLeafArray(rose, state), rose->activeArrayCount);
+
+ const char is_streaming = rose->mode != HS_MODE_BLOCK;
+
+ size_t eod_len;
+ const u8 *eod_data;
+ if (!is_streaming) { /* Block */
+ eod_data = scratch->core_info.buf;
+ eod_len = scratch->core_info.len;
+ } else { /* Streaming */
+ eod_len = scratch->core_info.hlen;
+ eod_data = scratch->core_info.hbuf;
+ }
+
+ assert(eod_data);
+ assert(eod_len);
+
+ DEBUG_PRINTF("%zu bytes of eod data to scan at offset %llu\n", eod_len,
+ offset);
+
+ // If we don't have enough bytes to produce a match from an EOD table scan,
+ // there's no point scanning.
+ if (eod_len < rose->eodmatcherMinWidth) {
+ DEBUG_PRINTF("too short for min width %u\n", rose->eodmatcherMinWidth);
+ return HWLM_CONTINUE_MATCHING;
+ }
+
+ // Ensure that we only need scan the last N bytes, where N is the length of
+ // the eod-anchored matcher region.
+ size_t adj = eod_len - MIN(eod_len, rose->ematcherRegionSize);
+
+ const struct HWLM *etable = getByOffset(rose, rose->ematcherOffset);
+ hwlmExec(etable, eod_data, eod_len, adj, roseCallback, scratch,
+ scratch->tctxt.groups);
+
+ // We may need to fire delayed matches.
+ if (cleanUpDelayed(rose, scratch, 0, offset) == HWLM_TERMINATE_MATCHING) {
+ DEBUG_PRINTF("user instructed us to stop\n");
+ return HWLM_TERMINATE_MATCHING;
+ }
+
+ roseFlushLastByteHistory(rose, scratch, offset);
+ return HWLM_CONTINUE_MATCHING;
+}
+
+static rose_inline
+int roseCheckLongLiteral(const struct RoseEngine *t,
+ const struct hs_scratch *scratch, u64a end,
+ u32 lit_offset, u32 lit_length, char nocase) {
+ const struct core_info *ci = &scratch->core_info;
+ const u8 *lit = getByOffset(t, lit_offset);
+
+ DEBUG_PRINTF("check lit at %llu, length %u\n", end, lit_length);
+ DEBUG_PRINTF("base buf_offset=%llu\n", ci->buf_offset);
+
+ if (end < lit_length) {
+ DEBUG_PRINTF("too short!\n");
+ return 0;
+ }
+
+ // If any portion of the literal matched in the current buffer, check it.
+ if (end > ci->buf_offset) {
+ u32 scan_len = MIN(end - ci->buf_offset, lit_length);
+ u64a scan_start = end - ci->buf_offset - scan_len;
+ DEBUG_PRINTF("checking suffix (%u bytes) in buf[%llu:%llu]\n", scan_len,
+ scan_start, end);
+ if (cmpForward(ci->buf + scan_start, lit + lit_length - scan_len,
+ scan_len, nocase)) {
+ DEBUG_PRINTF("cmp of suffix failed\n");
+ return 0;
+ }
+ }
+
+ // If the entirety of the literal was in the current block, we are done.
+ if (end - lit_length >= ci->buf_offset) {
+ DEBUG_PRINTF("literal confirmed in current block\n");
+ return 1;
+ }
+
+ // We still have a prefix which we must test against the buffer prepared by
+ // the long literal table. This is only done in streaming mode.
+
+ assert(t->mode != HS_MODE_BLOCK);
+
+ const u8 *ll_buf;
+ size_t ll_len;
+ if (nocase) {
+ ll_buf = scratch->tctxt.ll_buf_nocase;
+ ll_len = scratch->tctxt.ll_len_nocase;
+ } else {
+ ll_buf = scratch->tctxt.ll_buf;
+ ll_len = scratch->tctxt.ll_len;
+ }
+
+ assert(ll_buf);
+
+ u64a lit_start_offset = end - lit_length;
+ u32 prefix_len = MIN(lit_length, ci->buf_offset - lit_start_offset);
+ u32 hist_rewind = ci->buf_offset - lit_start_offset;
+ DEBUG_PRINTF("ll_len=%zu, hist_rewind=%u\n", ll_len, hist_rewind);
+ if (hist_rewind > ll_len) {
+ DEBUG_PRINTF("not enough history\n");
+ return 0;
+ }
+
+ DEBUG_PRINTF("check prefix len=%u from hist (len %zu, rewind %u)\n",
+ prefix_len, ll_len, hist_rewind);
+ assert(hist_rewind <= ll_len);
+ if (cmpForward(ll_buf + ll_len - hist_rewind, lit, prefix_len, nocase)) {
+ DEBUG_PRINTF("cmp of prefix failed\n");
+ return 0;
+ }
+
+ DEBUG_PRINTF("cmp succeeded\n");
+ return 1;
+}
+
+static rose_inline
+int roseCheckMediumLiteral(const struct RoseEngine *t,
+ const struct hs_scratch *scratch, u64a end,
+ u32 lit_offset, u32 lit_length, char nocase) {
+ const struct core_info *ci = &scratch->core_info;
+ const u8 *lit = getByOffset(t, lit_offset);
+
+ DEBUG_PRINTF("check lit at %llu, length %u\n", end, lit_length);
+ DEBUG_PRINTF("base buf_offset=%llu\n", ci->buf_offset);
+
+ if (end < lit_length) {
+ DEBUG_PRINTF("too short!\n");
+ return 0;
+ }
+
+ // If any portion of the literal matched in the current buffer, check it.
+ if (end > ci->buf_offset) {
+ u32 scan_len = MIN(end - ci->buf_offset, lit_length);
+ u64a scan_start = end - ci->buf_offset - scan_len;
+ DEBUG_PRINTF("checking suffix (%u bytes) in buf[%llu:%llu]\n", scan_len,
+ scan_start, end);
+ if (cmpForward(ci->buf + scan_start, lit + lit_length - scan_len,
+ scan_len, nocase)) {
+ DEBUG_PRINTF("cmp of suffix failed\n");
+ return 0;
+ }
+ }
+
+ // If the entirety of the literal was in the current block, we are done.
+ if (end - lit_length >= ci->buf_offset) {
+ DEBUG_PRINTF("literal confirmed in current block\n");
+ return 1;
+ }
+
+ // We still have a prefix which we must test against the history buffer.
+ assert(t->mode != HS_MODE_BLOCK);
+
+ u64a lit_start_offset = end - lit_length;
+ u32 prefix_len = MIN(lit_length, ci->buf_offset - lit_start_offset);
+ u32 hist_rewind = ci->buf_offset - lit_start_offset;
+ DEBUG_PRINTF("hlen=%zu, hist_rewind=%u\n", ci->hlen, hist_rewind);
+
+ // History length check required for confirm in the EOD and delayed
+ // rebuild paths.
+ if (hist_rewind > ci->hlen) {
+ DEBUG_PRINTF("not enough history\n");
+ return 0;
+ }
+
+ DEBUG_PRINTF("check prefix len=%u from hist (len %zu, rewind %u)\n",
+ prefix_len, ci->hlen, hist_rewind);
+ assert(hist_rewind <= ci->hlen);
+ if (cmpForward(ci->hbuf + ci->hlen - hist_rewind, lit, prefix_len,
+ nocase)) {
+ DEBUG_PRINTF("cmp of prefix failed\n");
+ return 0;
+ }
+
+ DEBUG_PRINTF("cmp succeeded\n");
+ return 1;
+}
+
+static
+void updateSeqPoint(struct RoseContext *tctxt, u64a offset,
+ const char from_mpv) {
+ if (from_mpv) {
+ updateMinMatchOffsetFromMpv(tctxt, offset);
+ } else {
+ updateMinMatchOffset(tctxt, offset);
+ }
+}
+
+static rose_inline
+hwlmcb_rv_t flushActiveCombinations(const struct RoseEngine *t,
+ struct hs_scratch *scratch) {
+ u8 *cvec = (u8 *)scratch->core_info.combVector;
+ if (!mmbit_any(cvec, t->ckeyCount)) {
+ return HWLM_CONTINUE_MATCHING;
+ }
+ u64a end = scratch->tctxt.lastCombMatchOffset;
+ for (u32 i = mmbit_iterate(cvec, t->ckeyCount, MMB_INVALID);
+ i != MMB_INVALID; i = mmbit_iterate(cvec, t->ckeyCount, i)) {
+ const struct CombInfo *combInfoMap = (const struct CombInfo *)
+ ((const char *)t + t->combInfoMapOffset);
+ const struct CombInfo *ci = combInfoMap + i;
+ if ((ci->min_offset != 0) && (end < ci->min_offset)) {
+ DEBUG_PRINTF("halt: before min_offset=%llu\n", ci->min_offset);
+ continue;
+ }
+ if ((ci->max_offset != MAX_OFFSET) && (end > ci->max_offset)) {
+ DEBUG_PRINTF("halt: after max_offset=%llu\n", ci->max_offset);
+ continue;
+ }
+
+ DEBUG_PRINTF("check ekey %u\n", ci->ekey);
+ if (ci->ekey != INVALID_EKEY) {
+ assert(ci->ekey < t->ekeyCount);
+ const char *evec = scratch->core_info.exhaustionVector;
+ if (isExhausted(t, evec, ci->ekey)) {
+ DEBUG_PRINTF("ekey %u already set, match is exhausted\n",
+ ci->ekey);
+ continue;
+ }
+ }
+
+ DEBUG_PRINTF("check ckey %u\n", i);
+ char *lvec = scratch->core_info.logicalVector;
+ if (!isLogicalCombination(t, lvec, ci->start, ci->result)) {
+ DEBUG_PRINTF("Logical Combination Failed!\n");
+ continue;
+ }
+
+ DEBUG_PRINTF("Logical Combination Passed!\n");
+ if (roseReportComb(t, scratch, end, ci->id, 0,
+ ci->ekey) == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ }
+ clearCvec(t, (char *)cvec);
+ return HWLM_CONTINUE_MATCHING;
+}
+
+static rose_inline
+hwlmcb_rv_t checkPurelyNegatives(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u64a end) {
+ for (u32 i = 0; i < t->ckeyCount; i++) {
+ const struct CombInfo *combInfoMap = (const struct CombInfo *)
+ ((const char *)t + t->combInfoMapOffset);
+ const struct CombInfo *ci = combInfoMap + i;
+ if ((ci->min_offset != 0) && (end < ci->min_offset)) {
+ DEBUG_PRINTF("halt: before min_offset=%llu\n", ci->min_offset);
+ continue;
+ }
+ if ((ci->max_offset != MAX_OFFSET) && (end > ci->max_offset)) {
+ DEBUG_PRINTF("halt: after max_offset=%llu\n", ci->max_offset);
+ continue;
+ }
+
+ DEBUG_PRINTF("check ekey %u\n", ci->ekey);
+ if (ci->ekey != INVALID_EKEY) {
+ assert(ci->ekey < t->ekeyCount);
+ const char *evec = scratch->core_info.exhaustionVector;
+ if (isExhausted(t, evec, ci->ekey)) {
+ DEBUG_PRINTF("ekey %u already set, match is exhausted\n",
+ ci->ekey);
+ continue;
+ }
+ }
+
+ DEBUG_PRINTF("check ckey %u purely negative\n", i);
+ char *lvec = scratch->core_info.logicalVector;
+ if (!isPurelyNegativeMatch(t, lvec, ci->start, ci->result)) {
+ DEBUG_PRINTF("Logical Combination from purely negative Failed!\n");
+ continue;
+ }
+
+ DEBUG_PRINTF("Logical Combination from purely negative Passed!\n");
+ if (roseReportComb(t, scratch, end, ci->id, 0,
+ ci->ekey) == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ }
+ return HWLM_CONTINUE_MATCHING;
+}
+
+#if !defined(_WIN32)
+#define PROGRAM_CASE(name) \
+ case ROSE_INSTR_##name: { \
+ LABEL_ROSE_INSTR_##name: \
+ DEBUG_PRINTF("instruction: " #name " (pc=%u)\n", \
+ programOffset + (u32)(pc - pc_base)); \
+ const struct ROSE_STRUCT_##name *ri = \
+ (const struct ROSE_STRUCT_##name *)pc;
+
+#define PROGRAM_NEXT_INSTRUCTION \
+ pc += ROUNDUP_N(sizeof(*ri), ROSE_INSTR_MIN_ALIGN); \
+ goto *(next_instr[*(const u8 *)pc]); \
+ }
+
+#define PROGRAM_NEXT_INSTRUCTION_JUMP \
+ goto *(next_instr[*(const u8 *)pc]);
+#else
+#define PROGRAM_CASE(name) \
+ case ROSE_INSTR_##name: { \
+ DEBUG_PRINTF("instruction: " #name " (pc=%u)\n", \
+ programOffset + (u32)(pc - pc_base)); \
+ const struct ROSE_STRUCT_##name *ri = \
+ (const struct ROSE_STRUCT_##name *)pc;
+
+#define PROGRAM_NEXT_INSTRUCTION \
+ pc += ROUNDUP_N(sizeof(*ri), ROSE_INSTR_MIN_ALIGN); \
+ break; \
+ }
+
+#define PROGRAM_NEXT_INSTRUCTION_JUMP continue;
+#endif
+
hwlmcb_rv_t roseRunProgram(const struct RoseEngine *t,
struct hs_scratch *scratch, u32 programOffset,
u64a som, u64a end, u8 prog_flags) {
- DEBUG_PRINTF("program=%u, offsets [%llu,%llu], flags=%u\n", programOffset,
- som, end, prog_flags);
-
- assert(programOffset != ROSE_INVALID_PROG_OFFSET);
- assert(programOffset >= sizeof(struct RoseEngine));
- assert(programOffset < t->size);
-
- const char in_anchored = prog_flags & ROSE_PROG_FLAG_IN_ANCHORED;
- const char in_catchup = prog_flags & ROSE_PROG_FLAG_IN_CATCHUP;
- const char from_mpv = prog_flags & ROSE_PROG_FLAG_FROM_MPV;
- const char skip_mpv_catchup = prog_flags & ROSE_PROG_FLAG_SKIP_MPV_CATCHUP;
-
- const char *pc_base = getByOffset(t, programOffset);
- const char *pc = pc_base;
-
- // Local sparse iterator state for programs that use the SPARSE_ITER_BEGIN
- // and SPARSE_ITER_NEXT instructions.
- struct mmbit_sparse_state si_state[MAX_SPARSE_ITER_STATES];
-
- // If this program has an effect, work_done will be set to one (which may
- // allow the program to squash groups).
- int work_done = 0;
-
- struct RoseContext *tctxt = &scratch->tctxt;
-
- assert(*(const u8 *)pc != ROSE_INSTR_END);
-
-#if !defined(_WIN32)
- static const void *next_instr[] = {
- &&LABEL_ROSE_INSTR_END, //!< End of program.
- &&LABEL_ROSE_INSTR_ANCHORED_DELAY, //!< Delay until after anchored matcher.
- &&LABEL_ROSE_INSTR_CHECK_LIT_EARLY, //!< Skip matches before floating min offset.
- &&LABEL_ROSE_INSTR_CHECK_GROUPS, //!< Check that literal groups are on.
- &&LABEL_ROSE_INSTR_CHECK_ONLY_EOD, //!< Role matches only at EOD.
- &&LABEL_ROSE_INSTR_CHECK_BOUNDS, //!< Bounds on distance from offset 0.
- &&LABEL_ROSE_INSTR_CHECK_NOT_HANDLED, //!< Test & set role in "handled".
- &&LABEL_ROSE_INSTR_CHECK_SINGLE_LOOKAROUND, //!< Single lookaround check.
- &&LABEL_ROSE_INSTR_CHECK_LOOKAROUND, //!< Lookaround check.
- &&LABEL_ROSE_INSTR_CHECK_MASK, //!< 8-bytes mask check.
- &&LABEL_ROSE_INSTR_CHECK_MASK_32, //!< 32-bytes and/cmp/neg mask check.
- &&LABEL_ROSE_INSTR_CHECK_BYTE, //!< Single Byte check.
- &&LABEL_ROSE_INSTR_CHECK_SHUFTI_16x8, //!< Check 16-byte data by 8-bucket shufti.
- &&LABEL_ROSE_INSTR_CHECK_SHUFTI_32x8, //!< Check 32-byte data by 8-bucket shufti.
- &&LABEL_ROSE_INSTR_CHECK_SHUFTI_16x16, //!< Check 16-byte data by 16-bucket shufti.
- &&LABEL_ROSE_INSTR_CHECK_SHUFTI_32x16, //!< Check 32-byte data by 16-bucket shufti.
- &&LABEL_ROSE_INSTR_CHECK_INFIX, //!< Infix engine must be in accept state.
- &&LABEL_ROSE_INSTR_CHECK_PREFIX, //!< Prefix engine must be in accept state.
- &&LABEL_ROSE_INSTR_PUSH_DELAYED, //!< Push delayed literal matches.
- &&LABEL_ROSE_INSTR_DUMMY_NOP, //!< NOP. Should not exist in build programs.
- &&LABEL_ROSE_INSTR_CATCH_UP, //!< Catch up engines, anchored matches.
- &&LABEL_ROSE_INSTR_CATCH_UP_MPV, //!< Catch up the MPV.
- &&LABEL_ROSE_INSTR_SOM_ADJUST, //!< Set SOM from a distance to EOM.
- &&LABEL_ROSE_INSTR_SOM_LEFTFIX, //!< Acquire SOM from a leftfix engine.
- &&LABEL_ROSE_INSTR_SOM_FROM_REPORT, //!< Acquire SOM from a som_operation.
- &&LABEL_ROSE_INSTR_SOM_ZERO, //!< Set SOM to zero.
- &&LABEL_ROSE_INSTR_TRIGGER_INFIX, //!< Trigger an infix engine.
- &&LABEL_ROSE_INSTR_TRIGGER_SUFFIX, //!< Trigger a suffix engine.
- &&LABEL_ROSE_INSTR_DEDUPE, //!< Run deduplication for report.
- &&LABEL_ROSE_INSTR_DEDUPE_SOM, //!< Run deduplication for SOM report.
- &&LABEL_ROSE_INSTR_REPORT_CHAIN, //!< Fire a chained report (MPV).
- &&LABEL_ROSE_INSTR_REPORT_SOM_INT, //!< Manipulate SOM only.
- &&LABEL_ROSE_INSTR_REPORT_SOM_AWARE, //!< Manipulate SOM from SOM-aware source.
- &&LABEL_ROSE_INSTR_REPORT,
- &&LABEL_ROSE_INSTR_REPORT_EXHAUST,
- &&LABEL_ROSE_INSTR_REPORT_SOM,
- &&LABEL_ROSE_INSTR_REPORT_SOM_EXHAUST,
- &&LABEL_ROSE_INSTR_DEDUPE_AND_REPORT,
- &&LABEL_ROSE_INSTR_FINAL_REPORT,
- &&LABEL_ROSE_INSTR_CHECK_EXHAUSTED, //!< Check if an ekey has already been set.
- &&LABEL_ROSE_INSTR_CHECK_MIN_LENGTH, //!< Check (EOM - SOM) against min length.
- &&LABEL_ROSE_INSTR_SET_STATE, //!< Switch a state index on.
- &&LABEL_ROSE_INSTR_SET_GROUPS, //!< Set some literal group bits.
- &&LABEL_ROSE_INSTR_SQUASH_GROUPS, //!< Conditionally turn off some groups.
- &&LABEL_ROSE_INSTR_CHECK_STATE, //!< Test a single bit in the state multibit.
- &&LABEL_ROSE_INSTR_SPARSE_ITER_BEGIN, //!< Begin running a sparse iter over states.
- &&LABEL_ROSE_INSTR_SPARSE_ITER_NEXT, //!< Continue running sparse iter over states.
- &&LABEL_ROSE_INSTR_SPARSE_ITER_ANY, //!< Test for any bit in the sparse iterator.
- &&LABEL_ROSE_INSTR_ENGINES_EOD,
- &&LABEL_ROSE_INSTR_SUFFIXES_EOD,
- &&LABEL_ROSE_INSTR_MATCHER_EOD,
- &&LABEL_ROSE_INSTR_CHECK_LONG_LIT,
- &&LABEL_ROSE_INSTR_CHECK_LONG_LIT_NOCASE,
- &&LABEL_ROSE_INSTR_CHECK_MED_LIT,
- &&LABEL_ROSE_INSTR_CHECK_MED_LIT_NOCASE,
- &&LABEL_ROSE_INSTR_CLEAR_WORK_DONE,
- &&LABEL_ROSE_INSTR_MULTIPATH_LOOKAROUND,
- &&LABEL_ROSE_INSTR_CHECK_MULTIPATH_SHUFTI_16x8,
- &&LABEL_ROSE_INSTR_CHECK_MULTIPATH_SHUFTI_32x8,
- &&LABEL_ROSE_INSTR_CHECK_MULTIPATH_SHUFTI_32x16,
- &&LABEL_ROSE_INSTR_CHECK_MULTIPATH_SHUFTI_64,
- &&LABEL_ROSE_INSTR_INCLUDED_JUMP,
- &&LABEL_ROSE_INSTR_SET_LOGICAL,
- &&LABEL_ROSE_INSTR_SET_COMBINATION,
- &&LABEL_ROSE_INSTR_FLUSH_COMBINATION,
- &&LABEL_ROSE_INSTR_SET_EXHAUST,
- &&LABEL_ROSE_INSTR_LAST_FLUSH_COMBINATION
-#ifdef HAVE_AVX512
- ,
- &&LABEL_ROSE_INSTR_CHECK_SHUFTI_64x8, //!< Check 64-byte data by 8-bucket shufti.
- &&LABEL_ROSE_INSTR_CHECK_SHUFTI_64x16, //!< Check 64-byte data by 16-bucket shufti.
- &&LABEL_ROSE_INSTR_CHECK_MASK_64 //!< 64-bytes and/cmp/neg mask check.
-#endif
- };
-#endif
-
- for (;;) {
- assert(ISALIGNED_N(pc, ROSE_INSTR_MIN_ALIGN));
- assert(pc >= pc_base);
- assert((size_t)(pc - pc_base) < t->size);
- const u8 code = *(const u8 *)pc;
- assert(code <= LAST_ROSE_INSTRUCTION);
-
- switch ((enum RoseInstructionCode)code) {
- PROGRAM_CASE(END) {
- DEBUG_PRINTF("finished\n");
- return HWLM_CONTINUE_MATCHING;
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(ANCHORED_DELAY) {
- if (in_anchored && end > t->floatingMinLiteralMatchOffset) {
- DEBUG_PRINTF("delay until playback\n");
- tctxt->groups |= ri->groups;
- work_done = 1;
- recordAnchoredLiteralMatch(t, scratch, ri->anch_id, end);
-
- assert(ri->done_jump); // must progress
- pc += ri->done_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_LIT_EARLY) {
- if (end < ri->min_offset) {
- DEBUG_PRINTF("halt: before min_offset=%u\n",
- ri->min_offset);
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_GROUPS) {
- DEBUG_PRINTF("groups=0x%llx, checking instr groups=0x%llx\n",
- tctxt->groups, ri->groups);
- if (!(ri->groups & tctxt->groups)) {
- DEBUG_PRINTF("halt: no groups are set\n");
- return HWLM_CONTINUE_MATCHING;
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_ONLY_EOD) {
- struct core_info *ci = &scratch->core_info;
- if (end != ci->buf_offset + ci->len) {
- DEBUG_PRINTF("should only match at end of data\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_BOUNDS) {
- if (!roseCheckBounds(end, ri->min_bound, ri->max_bound)) {
- DEBUG_PRINTF("failed bounds check\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_NOT_HANDLED) {
- struct fatbit *handled = scratch->handled_roles;
- if (fatbit_set(handled, t->handledKeyCount, ri->key)) {
- DEBUG_PRINTF("key %u already set\n", ri->key);
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_SINGLE_LOOKAROUND) {
- if (!roseCheckSingleLookaround(t, scratch, ri->offset,
- ri->reach_index, end)) {
- DEBUG_PRINTF("failed lookaround check\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_LOOKAROUND) {
- if (!roseCheckLookaround(t, scratch, ri->look_index,
- ri->reach_index, ri->count, end)) {
- DEBUG_PRINTF("failed lookaround check\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_MASK) {
- struct core_info *ci = &scratch->core_info;
- if (!roseCheckMask(ci, ri->and_mask, ri->cmp_mask,
- ri->neg_mask, ri->offset, end)) {
- DEBUG_PRINTF("failed mask check\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_MASK_32) {
- struct core_info *ci = &scratch->core_info;
- if (!roseCheckMask32(ci, ri->and_mask, ri->cmp_mask,
- ri->neg_mask, ri->offset, end)) {
- assert(ri->fail_jump);
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_BYTE) {
- const struct core_info *ci = &scratch->core_info;
- if (!roseCheckByte(ci, ri->and_mask, ri->cmp_mask,
- ri->negation, ri->offset, end)) {
- DEBUG_PRINTF("failed byte check\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_SHUFTI_16x8) {
- const struct core_info *ci = &scratch->core_info;
- if (!roseCheckShufti16x8(ci, ri->nib_mask,
- ri->bucket_select_mask,
- ri->neg_mask, ri->offset, end)) {
- assert(ri->fail_jump);
- pc += ri-> fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_SHUFTI_32x8) {
- const struct core_info *ci = &scratch->core_info;
- if (!roseCheckShufti32x8(ci, ri->hi_mask, ri->lo_mask,
- ri->bucket_select_mask,
- ri->neg_mask, ri->offset, end)) {
- assert(ri->fail_jump);
- pc += ri-> fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_SHUFTI_16x16) {
- const struct core_info *ci = &scratch->core_info;
- if (!roseCheckShufti16x16(ci, ri->hi_mask, ri->lo_mask,
- ri->bucket_select_mask,
- ri->neg_mask, ri->offset, end)) {
- assert(ri->fail_jump);
- pc += ri-> fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_SHUFTI_32x16) {
- const struct core_info *ci = &scratch->core_info;
- if (!roseCheckShufti32x16(ci, ri->hi_mask, ri->lo_mask,
- ri->bucket_select_mask_hi,
- ri->bucket_select_mask_lo,
- ri->neg_mask, ri->offset, end)) {
- assert(ri->fail_jump);
- pc += ri-> fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
-#ifdef HAVE_AVX512
- PROGRAM_CASE(CHECK_MASK_64) {
- struct core_info *ci = &scratch->core_info;
- if (!roseCheckMask64(ci, ri->and_mask, ri->cmp_mask,
- ri->neg_mask, ri->offset, end)) {
- assert(ri->fail_jump);
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_SHUFTI_64x8) {
- const struct core_info *ci = &scratch->core_info;
- if (!roseCheckShufti64x8(ci, ri->hi_mask, ri->lo_mask,
- ri->bucket_select_mask,
- ri->neg_mask, ri->offset, end)) {
- assert(ri->fail_jump);
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP;
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_SHUFTI_64x16) {
- const struct core_info *ci = &scratch->core_info;
- if (!roseCheckShufti64x16(ci, ri->hi_mask_1, ri->hi_mask_2,
- ri->lo_mask_1, ri->lo_mask_2,
- ri->bucket_select_mask_hi,
- ri->bucket_select_mask_lo,
- ri->neg_mask, ri->offset, end)) {
- assert(ri->fail_jump);
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP;
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-#endif
-
- PROGRAM_CASE(CHECK_INFIX) {
- if (!roseTestInfix(t, scratch, ri->queue, ri->lag, ri->report,
- end)) {
- DEBUG_PRINTF("failed infix check\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_PREFIX) {
- if (!roseTestPrefix(t, scratch, ri->queue, ri->lag, ri->report,
- end)) {
- DEBUG_PRINTF("failed prefix check\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(PUSH_DELAYED) {
- rosePushDelayedMatch(t, scratch, ri->delay, ri->index, end);
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(DUMMY_NOP) {
- assert(0);
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CATCH_UP) {
- if (roseCatchUpTo(t, scratch, end) == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CATCH_UP_MPV) {
- if (from_mpv || skip_mpv_catchup) {
- DEBUG_PRINTF("skipping mpv catchup\n");
- } else if (roseCatchUpMPV(t,
- end - scratch->core_info.buf_offset,
- scratch) == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(SOM_ADJUST) {
- assert(ri->distance <= end);
- som = end - ri->distance;
- DEBUG_PRINTF("som is (end - %u) = %llu\n", ri->distance, som);
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(SOM_LEFTFIX) {
- som = roseGetHaigSom(t, scratch, ri->queue, ri->lag);
- DEBUG_PRINTF("som from leftfix is %llu\n", som);
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(SOM_FROM_REPORT) {
- som = handleSomExternal(scratch, &ri->som, end);
- DEBUG_PRINTF("som from report %u is %llu\n", ri->som.onmatch,
- som);
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(SOM_ZERO) {
- DEBUG_PRINTF("setting SOM to zero\n");
- som = 0;
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(TRIGGER_INFIX) {
- roseTriggerInfix(t, scratch, som, end, ri->queue, ri->event,
- ri->cancel);
- work_done = 1;
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(TRIGGER_SUFFIX) {
- if (roseTriggerSuffix(t, scratch, ri->queue, ri->event, som,
- end) == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- work_done = 1;
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(DEDUPE) {
- updateSeqPoint(tctxt, end, from_mpv);
- const char do_som = t->hasSom; // TODO: constant propagate
- const char is_external_report = 1;
- enum DedupeResult rv =
- dedupeCatchup(t, scratch, end, som, end + ri->offset_adjust,
- ri->dkey, ri->offset_adjust,
- is_external_report, ri->quash_som, do_som);
- switch (rv) {
- case DEDUPE_HALT:
- return HWLM_TERMINATE_MATCHING;
- case DEDUPE_SKIP:
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- case DEDUPE_CONTINUE:
- break;
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(DEDUPE_SOM) {
- updateSeqPoint(tctxt, end, from_mpv);
- const char is_external_report = 0;
- const char do_som = 1;
- enum DedupeResult rv =
- dedupeCatchup(t, scratch, end, som, end + ri->offset_adjust,
- ri->dkey, ri->offset_adjust,
- is_external_report, ri->quash_som, do_som);
- switch (rv) {
- case DEDUPE_HALT:
- return HWLM_TERMINATE_MATCHING;
- case DEDUPE_SKIP:
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- case DEDUPE_CONTINUE:
- break;
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(REPORT_CHAIN) {
- // Note: sequence points updated inside this function.
- if (roseCatchUpAndHandleChainMatch(
- t, scratch, ri->event, ri->top_squash_distance, end,
- in_catchup) == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- work_done = 1;
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(REPORT_SOM_INT) {
- updateSeqPoint(tctxt, end, from_mpv);
- roseHandleSom(scratch, &ri->som, end);
- work_done = 1;
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(REPORT_SOM_AWARE) {
- updateSeqPoint(tctxt, end, from_mpv);
- roseHandleSomSom(scratch, &ri->som, som, end);
- work_done = 1;
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(REPORT) {
- updateSeqPoint(tctxt, end, from_mpv);
- if (roseReport(t, scratch, end, ri->onmatch, ri->offset_adjust,
- INVALID_EKEY) == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- work_done = 1;
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(REPORT_EXHAUST) {
- updateSeqPoint(tctxt, end, from_mpv);
- if (roseReport(t, scratch, end, ri->onmatch, ri->offset_adjust,
- ri->ekey) == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- work_done = 1;
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(REPORT_SOM) {
- updateSeqPoint(tctxt, end, from_mpv);
- if (roseReportSom(t, scratch, som, end, ri->onmatch,
- ri->offset_adjust,
- INVALID_EKEY) == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- work_done = 1;
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(REPORT_SOM_EXHAUST) {
- updateSeqPoint(tctxt, end, from_mpv);
- if (roseReportSom(t, scratch, som, end, ri->onmatch,
- ri->offset_adjust,
- ri->ekey) == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- work_done = 1;
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(DEDUPE_AND_REPORT) {
- updateSeqPoint(tctxt, end, from_mpv);
- const char do_som = t->hasSom; // TODO: constant propagate
- const char is_external_report = 1;
- enum DedupeResult rv =
- dedupeCatchup(t, scratch, end, som, end + ri->offset_adjust,
- ri->dkey, ri->offset_adjust,
- is_external_report, ri->quash_som, do_som);
- switch (rv) {
- case DEDUPE_HALT:
- return HWLM_TERMINATE_MATCHING;
- case DEDUPE_SKIP:
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- case DEDUPE_CONTINUE:
- break;
- }
-
- const u32 ekey = INVALID_EKEY;
- if (roseReport(t, scratch, end, ri->onmatch, ri->offset_adjust,
- ekey) == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- work_done = 1;
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(FINAL_REPORT) {
- updateSeqPoint(tctxt, end, from_mpv);
- if (roseReport(t, scratch, end, ri->onmatch, ri->offset_adjust,
- INVALID_EKEY) == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- /* One-shot specialisation: this instruction always terminates
- * execution of the program. */
- return HWLM_CONTINUE_MATCHING;
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_EXHAUSTED) {
- DEBUG_PRINTF("check ekey %u\n", ri->ekey);
- assert(ri->ekey != INVALID_EKEY);
- assert(ri->ekey < t->ekeyCount);
- const char *evec = scratch->core_info.exhaustionVector;
- if (isExhausted(t, evec, ri->ekey)) {
- DEBUG_PRINTF("ekey %u already set, match is exhausted\n",
- ri->ekey);
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_MIN_LENGTH) {
- DEBUG_PRINTF("check min length %llu (adj %d)\n", ri->min_length,
- ri->end_adj);
- assert(ri->min_length > 0);
- assert(ri->end_adj == 0 || ri->end_adj == -1);
- assert(som == HS_OFFSET_PAST_HORIZON || som <= end);
- if (som != HS_OFFSET_PAST_HORIZON &&
- ((end + ri->end_adj) - som < ri->min_length)) {
- DEBUG_PRINTF("failed check, match len %llu\n",
- (u64a)((end + ri->end_adj) - som));
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(SET_STATE) {
- DEBUG_PRINTF("set state index %u\n", ri->index);
- mmbit_set(getRoleState(scratch->core_info.state),
- t->rolesWithStateCount, ri->index);
- work_done = 1;
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(SET_GROUPS) {
- tctxt->groups |= ri->groups;
- DEBUG_PRINTF("set groups 0x%llx -> 0x%llx\n", ri->groups,
- tctxt->groups);
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(SQUASH_GROUPS) {
- assert(popcount64(ri->groups) == 63); // Squash only one group.
- if (work_done) {
- tctxt->groups &= ri->groups;
- DEBUG_PRINTF("squash groups 0x%llx -> 0x%llx\n", ri->groups,
- tctxt->groups);
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_STATE) {
- DEBUG_PRINTF("check state %u\n", ri->index);
- const u8 *roles = getRoleState(scratch->core_info.state);
- if (!mmbit_isset(roles, t->rolesWithStateCount, ri->index)) {
- DEBUG_PRINTF("state not on\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(SPARSE_ITER_BEGIN) {
- DEBUG_PRINTF("iter_offset=%u\n", ri->iter_offset);
- const struct mmbit_sparse_iter *it =
- getByOffset(t, ri->iter_offset);
- assert(ISALIGNED(it));
-
- const u8 *roles = getRoleState(scratch->core_info.state);
-
- u32 idx = 0;
- u32 i = mmbit_sparse_iter_begin(roles, t->rolesWithStateCount,
- &idx, it, si_state);
- if (i == MMB_INVALID) {
- DEBUG_PRINTF("no states in sparse iter are on\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
-
- fatbit_clear(scratch->handled_roles);
-
- const u32 *jumps = getByOffset(t, ri->jump_table);
- DEBUG_PRINTF("state %u (idx=%u) is on, jump to %u\n", i, idx,
- jumps[idx]);
- pc = pc_base + jumps[idx];
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(SPARSE_ITER_NEXT) {
- DEBUG_PRINTF("iter_offset=%u, state=%u\n", ri->iter_offset,
- ri->state);
- const struct mmbit_sparse_iter *it =
- getByOffset(t, ri->iter_offset);
- assert(ISALIGNED(it));
-
- const u8 *roles = getRoleState(scratch->core_info.state);
-
- u32 idx = 0;
- u32 i = mmbit_sparse_iter_next(roles, t->rolesWithStateCount,
- ri->state, &idx, it, si_state);
- if (i == MMB_INVALID) {
- DEBUG_PRINTF("no more states in sparse iter are on\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
-
- const u32 *jumps = getByOffset(t, ri->jump_table);
- DEBUG_PRINTF("state %u (idx=%u) is on, jump to %u\n", i, idx,
- jumps[idx]);
- pc = pc_base + jumps[idx];
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(SPARSE_ITER_ANY) {
- DEBUG_PRINTF("iter_offset=%u\n", ri->iter_offset);
- const struct mmbit_sparse_iter *it =
- getByOffset(t, ri->iter_offset);
- assert(ISALIGNED(it));
-
- const u8 *roles = getRoleState(scratch->core_info.state);
-
- u32 idx = 0;
- u32 i = mmbit_sparse_iter_begin(roles, t->rolesWithStateCount,
- &idx, it, si_state);
- if (i == MMB_INVALID) {
- DEBUG_PRINTF("no states in sparse iter are on\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- DEBUG_PRINTF("state %u (idx=%u) is on\n", i, idx);
- fatbit_clear(scratch->handled_roles);
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(ENGINES_EOD) {
- if (roseEnginesEod(t, scratch, end, ri->iter_offset) ==
- HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(SUFFIXES_EOD) {
- if (roseSuffixesEod(t, scratch, end) ==
- HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(MATCHER_EOD) {
- if (roseMatcherEod(t, scratch, end) ==
- HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_LONG_LIT) {
- const char nocase = 0;
- if (!roseCheckLongLiteral(t, scratch, end, ri->lit_offset,
- ri->lit_length, nocase)) {
- DEBUG_PRINTF("failed long lit check\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_LONG_LIT_NOCASE) {
- const char nocase = 1;
- if (!roseCheckLongLiteral(t, scratch, end, ri->lit_offset,
- ri->lit_length, nocase)) {
- DEBUG_PRINTF("failed nocase long lit check\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_MED_LIT) {
- const char nocase = 0;
- if (!roseCheckMediumLiteral(t, scratch, end, ri->lit_offset,
- ri->lit_length, nocase)) {
- DEBUG_PRINTF("failed lit check\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_MED_LIT_NOCASE) {
- const char nocase = 1;
- if (!roseCheckMediumLiteral(t, scratch, end, ri->lit_offset,
- ri->lit_length, nocase)) {
- DEBUG_PRINTF("failed long lit check\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CLEAR_WORK_DONE) {
- DEBUG_PRINTF("clear work_done flag\n");
- work_done = 0;
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(MULTIPATH_LOOKAROUND) {
- if (!roseMultipathLookaround(t, scratch, ri->look_index,
- ri->reach_index, ri->count,
- ri->last_start, ri->start_mask,
- end)) {
- DEBUG_PRINTF("failed multi-path lookaround check\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_MULTIPATH_SHUFTI_16x8) {
- if (!roseCheckMultipathShufti16x8(scratch, ri, end)) {
- DEBUG_PRINTF("failed multi-path shufti 16x8 check\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_MULTIPATH_SHUFTI_32x8) {
- if (!roseCheckMultipathShufti32x8(scratch, ri, end)) {
- DEBUG_PRINTF("failed multi-path shufti 32x8 check\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_MULTIPATH_SHUFTI_32x16) {
- if (!roseCheckMultipathShufti32x16(scratch, ri, end)) {
- DEBUG_PRINTF("failed multi-path shufti 32x16 check\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(CHECK_MULTIPATH_SHUFTI_64) {
- if (!roseCheckMultipathShufti64(scratch, ri, end)) {
- DEBUG_PRINTF("failed multi-path shufti 64 check\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(INCLUDED_JUMP) {
- if (scratch->fdr_conf) {
- // squash the bucket of included literal
- u8 shift = scratch->fdr_conf_offset & ~7U;
- u64a mask = ((~(u64a)ri->squash) << shift);
- *(scratch->fdr_conf) &= mask;
-
- pc = getByOffset(t, ri->child_offset);
- pc_base = pc;
- programOffset = (const u8 *)pc_base -(const u8 *)t;
- DEBUG_PRINTF("pc_base %p pc %p child_offset %u squash %u\n",
- pc_base, pc, ri->child_offset, ri->squash);
- work_done = 0;
- PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(SET_LOGICAL) {
- DEBUG_PRINTF("set logical value of lkey %u, offset_adjust=%d\n",
- ri->lkey, ri->offset_adjust);
- assert(ri->lkey != INVALID_LKEY);
- assert(ri->lkey < t->lkeyCount);
- char *lvec = scratch->core_info.logicalVector;
- setLogicalVal(t, lvec, ri->lkey, 1);
- updateLastCombMatchOffset(tctxt, end + ri->offset_adjust);
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(SET_COMBINATION) {
- DEBUG_PRINTF("set ckey %u as active\n", ri->ckey);
- assert(ri->ckey != INVALID_CKEY);
- assert(ri->ckey < t->ckeyCount);
- char *cvec = scratch->core_info.combVector;
- setCombinationActive(t, cvec, ri->ckey);
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(FLUSH_COMBINATION) {
- assert(end >= tctxt->lastCombMatchOffset);
- if (end > tctxt->lastCombMatchOffset) {
- if (flushActiveCombinations(t, scratch)
- == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(SET_EXHAUST) {
- updateSeqPoint(tctxt, end, from_mpv);
- if (roseSetExhaust(t, scratch, ri->ekey)
- == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- work_done = 1;
- }
- PROGRAM_NEXT_INSTRUCTION
-
- PROGRAM_CASE(LAST_FLUSH_COMBINATION) {
- assert(end >= tctxt->lastCombMatchOffset);
- if (flushActiveCombinations(t, scratch)
- == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- if (checkPurelyNegatives(t, scratch, end)
- == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- }
- PROGRAM_NEXT_INSTRUCTION
-
- default: {
- assert(0); // unreachable
- scratch->core_info.status |= STATUS_ERROR;
- return HWLM_TERMINATE_MATCHING;
- }
- }
- }
-
- assert(0); // unreachable
- return HWLM_CONTINUE_MATCHING;
+ DEBUG_PRINTF("program=%u, offsets [%llu,%llu], flags=%u\n", programOffset,
+ som, end, prog_flags);
+
+ assert(programOffset != ROSE_INVALID_PROG_OFFSET);
+ assert(programOffset >= sizeof(struct RoseEngine));
+ assert(programOffset < t->size);
+
+ const char in_anchored = prog_flags & ROSE_PROG_FLAG_IN_ANCHORED;
+ const char in_catchup = prog_flags & ROSE_PROG_FLAG_IN_CATCHUP;
+ const char from_mpv = prog_flags & ROSE_PROG_FLAG_FROM_MPV;
+ const char skip_mpv_catchup = prog_flags & ROSE_PROG_FLAG_SKIP_MPV_CATCHUP;
+
+ const char *pc_base = getByOffset(t, programOffset);
+ const char *pc = pc_base;
+
+ // Local sparse iterator state for programs that use the SPARSE_ITER_BEGIN
+ // and SPARSE_ITER_NEXT instructions.
+ struct mmbit_sparse_state si_state[MAX_SPARSE_ITER_STATES];
+
+ // If this program has an effect, work_done will be set to one (which may
+ // allow the program to squash groups).
+ int work_done = 0;
+
+ struct RoseContext *tctxt = &scratch->tctxt;
+
+ assert(*(const u8 *)pc != ROSE_INSTR_END);
+
+#if !defined(_WIN32)
+ static const void *next_instr[] = {
+ &&LABEL_ROSE_INSTR_END, //!< End of program.
+ &&LABEL_ROSE_INSTR_ANCHORED_DELAY, //!< Delay until after anchored matcher.
+ &&LABEL_ROSE_INSTR_CHECK_LIT_EARLY, //!< Skip matches before floating min offset.
+ &&LABEL_ROSE_INSTR_CHECK_GROUPS, //!< Check that literal groups are on.
+ &&LABEL_ROSE_INSTR_CHECK_ONLY_EOD, //!< Role matches only at EOD.
+ &&LABEL_ROSE_INSTR_CHECK_BOUNDS, //!< Bounds on distance from offset 0.
+ &&LABEL_ROSE_INSTR_CHECK_NOT_HANDLED, //!< Test & set role in "handled".
+ &&LABEL_ROSE_INSTR_CHECK_SINGLE_LOOKAROUND, //!< Single lookaround check.
+ &&LABEL_ROSE_INSTR_CHECK_LOOKAROUND, //!< Lookaround check.
+ &&LABEL_ROSE_INSTR_CHECK_MASK, //!< 8-bytes mask check.
+ &&LABEL_ROSE_INSTR_CHECK_MASK_32, //!< 32-bytes and/cmp/neg mask check.
+ &&LABEL_ROSE_INSTR_CHECK_BYTE, //!< Single Byte check.
+ &&LABEL_ROSE_INSTR_CHECK_SHUFTI_16x8, //!< Check 16-byte data by 8-bucket shufti.
+ &&LABEL_ROSE_INSTR_CHECK_SHUFTI_32x8, //!< Check 32-byte data by 8-bucket shufti.
+ &&LABEL_ROSE_INSTR_CHECK_SHUFTI_16x16, //!< Check 16-byte data by 16-bucket shufti.
+ &&LABEL_ROSE_INSTR_CHECK_SHUFTI_32x16, //!< Check 32-byte data by 16-bucket shufti.
+ &&LABEL_ROSE_INSTR_CHECK_INFIX, //!< Infix engine must be in accept state.
+ &&LABEL_ROSE_INSTR_CHECK_PREFIX, //!< Prefix engine must be in accept state.
+ &&LABEL_ROSE_INSTR_PUSH_DELAYED, //!< Push delayed literal matches.
+ &&LABEL_ROSE_INSTR_DUMMY_NOP, //!< NOP. Should not exist in build programs.
+ &&LABEL_ROSE_INSTR_CATCH_UP, //!< Catch up engines, anchored matches.
+ &&LABEL_ROSE_INSTR_CATCH_UP_MPV, //!< Catch up the MPV.
+ &&LABEL_ROSE_INSTR_SOM_ADJUST, //!< Set SOM from a distance to EOM.
+ &&LABEL_ROSE_INSTR_SOM_LEFTFIX, //!< Acquire SOM from a leftfix engine.
+ &&LABEL_ROSE_INSTR_SOM_FROM_REPORT, //!< Acquire SOM from a som_operation.
+ &&LABEL_ROSE_INSTR_SOM_ZERO, //!< Set SOM to zero.
+ &&LABEL_ROSE_INSTR_TRIGGER_INFIX, //!< Trigger an infix engine.
+ &&LABEL_ROSE_INSTR_TRIGGER_SUFFIX, //!< Trigger a suffix engine.
+ &&LABEL_ROSE_INSTR_DEDUPE, //!< Run deduplication for report.
+ &&LABEL_ROSE_INSTR_DEDUPE_SOM, //!< Run deduplication for SOM report.
+ &&LABEL_ROSE_INSTR_REPORT_CHAIN, //!< Fire a chained report (MPV).
+ &&LABEL_ROSE_INSTR_REPORT_SOM_INT, //!< Manipulate SOM only.
+ &&LABEL_ROSE_INSTR_REPORT_SOM_AWARE, //!< Manipulate SOM from SOM-aware source.
+ &&LABEL_ROSE_INSTR_REPORT,
+ &&LABEL_ROSE_INSTR_REPORT_EXHAUST,
+ &&LABEL_ROSE_INSTR_REPORT_SOM,
+ &&LABEL_ROSE_INSTR_REPORT_SOM_EXHAUST,
+ &&LABEL_ROSE_INSTR_DEDUPE_AND_REPORT,
+ &&LABEL_ROSE_INSTR_FINAL_REPORT,
+ &&LABEL_ROSE_INSTR_CHECK_EXHAUSTED, //!< Check if an ekey has already been set.
+ &&LABEL_ROSE_INSTR_CHECK_MIN_LENGTH, //!< Check (EOM - SOM) against min length.
+ &&LABEL_ROSE_INSTR_SET_STATE, //!< Switch a state index on.
+ &&LABEL_ROSE_INSTR_SET_GROUPS, //!< Set some literal group bits.
+ &&LABEL_ROSE_INSTR_SQUASH_GROUPS, //!< Conditionally turn off some groups.
+ &&LABEL_ROSE_INSTR_CHECK_STATE, //!< Test a single bit in the state multibit.
+ &&LABEL_ROSE_INSTR_SPARSE_ITER_BEGIN, //!< Begin running a sparse iter over states.
+ &&LABEL_ROSE_INSTR_SPARSE_ITER_NEXT, //!< Continue running sparse iter over states.
+ &&LABEL_ROSE_INSTR_SPARSE_ITER_ANY, //!< Test for any bit in the sparse iterator.
+ &&LABEL_ROSE_INSTR_ENGINES_EOD,
+ &&LABEL_ROSE_INSTR_SUFFIXES_EOD,
+ &&LABEL_ROSE_INSTR_MATCHER_EOD,
+ &&LABEL_ROSE_INSTR_CHECK_LONG_LIT,
+ &&LABEL_ROSE_INSTR_CHECK_LONG_LIT_NOCASE,
+ &&LABEL_ROSE_INSTR_CHECK_MED_LIT,
+ &&LABEL_ROSE_INSTR_CHECK_MED_LIT_NOCASE,
+ &&LABEL_ROSE_INSTR_CLEAR_WORK_DONE,
+ &&LABEL_ROSE_INSTR_MULTIPATH_LOOKAROUND,
+ &&LABEL_ROSE_INSTR_CHECK_MULTIPATH_SHUFTI_16x8,
+ &&LABEL_ROSE_INSTR_CHECK_MULTIPATH_SHUFTI_32x8,
+ &&LABEL_ROSE_INSTR_CHECK_MULTIPATH_SHUFTI_32x16,
+ &&LABEL_ROSE_INSTR_CHECK_MULTIPATH_SHUFTI_64,
+ &&LABEL_ROSE_INSTR_INCLUDED_JUMP,
+ &&LABEL_ROSE_INSTR_SET_LOGICAL,
+ &&LABEL_ROSE_INSTR_SET_COMBINATION,
+ &&LABEL_ROSE_INSTR_FLUSH_COMBINATION,
+ &&LABEL_ROSE_INSTR_SET_EXHAUST,
+ &&LABEL_ROSE_INSTR_LAST_FLUSH_COMBINATION
+#ifdef HAVE_AVX512
+ ,
+ &&LABEL_ROSE_INSTR_CHECK_SHUFTI_64x8, //!< Check 64-byte data by 8-bucket shufti.
+ &&LABEL_ROSE_INSTR_CHECK_SHUFTI_64x16, //!< Check 64-byte data by 16-bucket shufti.
+ &&LABEL_ROSE_INSTR_CHECK_MASK_64 //!< 64-bytes and/cmp/neg mask check.
+#endif
+ };
+#endif
+
+ for (;;) {
+ assert(ISALIGNED_N(pc, ROSE_INSTR_MIN_ALIGN));
+ assert(pc >= pc_base);
+ assert((size_t)(pc - pc_base) < t->size);
+ const u8 code = *(const u8 *)pc;
+ assert(code <= LAST_ROSE_INSTRUCTION);
+
+ switch ((enum RoseInstructionCode)code) {
+ PROGRAM_CASE(END) {
+ DEBUG_PRINTF("finished\n");
+ return HWLM_CONTINUE_MATCHING;
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(ANCHORED_DELAY) {
+ if (in_anchored && end > t->floatingMinLiteralMatchOffset) {
+ DEBUG_PRINTF("delay until playback\n");
+ tctxt->groups |= ri->groups;
+ work_done = 1;
+ recordAnchoredLiteralMatch(t, scratch, ri->anch_id, end);
+
+ assert(ri->done_jump); // must progress
+ pc += ri->done_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_LIT_EARLY) {
+ if (end < ri->min_offset) {
+ DEBUG_PRINTF("halt: before min_offset=%u\n",
+ ri->min_offset);
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_GROUPS) {
+ DEBUG_PRINTF("groups=0x%llx, checking instr groups=0x%llx\n",
+ tctxt->groups, ri->groups);
+ if (!(ri->groups & tctxt->groups)) {
+ DEBUG_PRINTF("halt: no groups are set\n");
+ return HWLM_CONTINUE_MATCHING;
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_ONLY_EOD) {
+ struct core_info *ci = &scratch->core_info;
+ if (end != ci->buf_offset + ci->len) {
+ DEBUG_PRINTF("should only match at end of data\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_BOUNDS) {
+ if (!roseCheckBounds(end, ri->min_bound, ri->max_bound)) {
+ DEBUG_PRINTF("failed bounds check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_NOT_HANDLED) {
+ struct fatbit *handled = scratch->handled_roles;
+ if (fatbit_set(handled, t->handledKeyCount, ri->key)) {
+ DEBUG_PRINTF("key %u already set\n", ri->key);
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_SINGLE_LOOKAROUND) {
+ if (!roseCheckSingleLookaround(t, scratch, ri->offset,
+ ri->reach_index, end)) {
+ DEBUG_PRINTF("failed lookaround check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_LOOKAROUND) {
+ if (!roseCheckLookaround(t, scratch, ri->look_index,
+ ri->reach_index, ri->count, end)) {
+ DEBUG_PRINTF("failed lookaround check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_MASK) {
+ struct core_info *ci = &scratch->core_info;
+ if (!roseCheckMask(ci, ri->and_mask, ri->cmp_mask,
+ ri->neg_mask, ri->offset, end)) {
+ DEBUG_PRINTF("failed mask check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_MASK_32) {
+ struct core_info *ci = &scratch->core_info;
+ if (!roseCheckMask32(ci, ri->and_mask, ri->cmp_mask,
+ ri->neg_mask, ri->offset, end)) {
+ assert(ri->fail_jump);
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_BYTE) {
+ const struct core_info *ci = &scratch->core_info;
+ if (!roseCheckByte(ci, ri->and_mask, ri->cmp_mask,
+ ri->negation, ri->offset, end)) {
+ DEBUG_PRINTF("failed byte check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_SHUFTI_16x8) {
+ const struct core_info *ci = &scratch->core_info;
+ if (!roseCheckShufti16x8(ci, ri->nib_mask,
+ ri->bucket_select_mask,
+ ri->neg_mask, ri->offset, end)) {
+ assert(ri->fail_jump);
+ pc += ri-> fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_SHUFTI_32x8) {
+ const struct core_info *ci = &scratch->core_info;
+ if (!roseCheckShufti32x8(ci, ri->hi_mask, ri->lo_mask,
+ ri->bucket_select_mask,
+ ri->neg_mask, ri->offset, end)) {
+ assert(ri->fail_jump);
+ pc += ri-> fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_SHUFTI_16x16) {
+ const struct core_info *ci = &scratch->core_info;
+ if (!roseCheckShufti16x16(ci, ri->hi_mask, ri->lo_mask,
+ ri->bucket_select_mask,
+ ri->neg_mask, ri->offset, end)) {
+ assert(ri->fail_jump);
+ pc += ri-> fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_SHUFTI_32x16) {
+ const struct core_info *ci = &scratch->core_info;
+ if (!roseCheckShufti32x16(ci, ri->hi_mask, ri->lo_mask,
+ ri->bucket_select_mask_hi,
+ ri->bucket_select_mask_lo,
+ ri->neg_mask, ri->offset, end)) {
+ assert(ri->fail_jump);
+ pc += ri-> fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+#ifdef HAVE_AVX512
+ PROGRAM_CASE(CHECK_MASK_64) {
+ struct core_info *ci = &scratch->core_info;
+ if (!roseCheckMask64(ci, ri->and_mask, ri->cmp_mask,
+ ri->neg_mask, ri->offset, end)) {
+ assert(ri->fail_jump);
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_SHUFTI_64x8) {
+ const struct core_info *ci = &scratch->core_info;
+ if (!roseCheckShufti64x8(ci, ri->hi_mask, ri->lo_mask,
+ ri->bucket_select_mask,
+ ri->neg_mask, ri->offset, end)) {
+ assert(ri->fail_jump);
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP;
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_SHUFTI_64x16) {
+ const struct core_info *ci = &scratch->core_info;
+ if (!roseCheckShufti64x16(ci, ri->hi_mask_1, ri->hi_mask_2,
+ ri->lo_mask_1, ri->lo_mask_2,
+ ri->bucket_select_mask_hi,
+ ri->bucket_select_mask_lo,
+ ri->neg_mask, ri->offset, end)) {
+ assert(ri->fail_jump);
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP;
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+#endif
+
+ PROGRAM_CASE(CHECK_INFIX) {
+ if (!roseTestInfix(t, scratch, ri->queue, ri->lag, ri->report,
+ end)) {
+ DEBUG_PRINTF("failed infix check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_PREFIX) {
+ if (!roseTestPrefix(t, scratch, ri->queue, ri->lag, ri->report,
+ end)) {
+ DEBUG_PRINTF("failed prefix check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(PUSH_DELAYED) {
+ rosePushDelayedMatch(t, scratch, ri->delay, ri->index, end);
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(DUMMY_NOP) {
+ assert(0);
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CATCH_UP) {
+ if (roseCatchUpTo(t, scratch, end) == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CATCH_UP_MPV) {
+ if (from_mpv || skip_mpv_catchup) {
+ DEBUG_PRINTF("skipping mpv catchup\n");
+ } else if (roseCatchUpMPV(t,
+ end - scratch->core_info.buf_offset,
+ scratch) == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(SOM_ADJUST) {
+ assert(ri->distance <= end);
+ som = end - ri->distance;
+ DEBUG_PRINTF("som is (end - %u) = %llu\n", ri->distance, som);
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(SOM_LEFTFIX) {
+ som = roseGetHaigSom(t, scratch, ri->queue, ri->lag);
+ DEBUG_PRINTF("som from leftfix is %llu\n", som);
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(SOM_FROM_REPORT) {
+ som = handleSomExternal(scratch, &ri->som, end);
+ DEBUG_PRINTF("som from report %u is %llu\n", ri->som.onmatch,
+ som);
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(SOM_ZERO) {
+ DEBUG_PRINTF("setting SOM to zero\n");
+ som = 0;
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(TRIGGER_INFIX) {
+ roseTriggerInfix(t, scratch, som, end, ri->queue, ri->event,
+ ri->cancel);
+ work_done = 1;
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(TRIGGER_SUFFIX) {
+ if (roseTriggerSuffix(t, scratch, ri->queue, ri->event, som,
+ end) == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ work_done = 1;
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(DEDUPE) {
+ updateSeqPoint(tctxt, end, from_mpv);
+ const char do_som = t->hasSom; // TODO: constant propagate
+ const char is_external_report = 1;
+ enum DedupeResult rv =
+ dedupeCatchup(t, scratch, end, som, end + ri->offset_adjust,
+ ri->dkey, ri->offset_adjust,
+ is_external_report, ri->quash_som, do_som);
+ switch (rv) {
+ case DEDUPE_HALT:
+ return HWLM_TERMINATE_MATCHING;
+ case DEDUPE_SKIP:
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ case DEDUPE_CONTINUE:
+ break;
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(DEDUPE_SOM) {
+ updateSeqPoint(tctxt, end, from_mpv);
+ const char is_external_report = 0;
+ const char do_som = 1;
+ enum DedupeResult rv =
+ dedupeCatchup(t, scratch, end, som, end + ri->offset_adjust,
+ ri->dkey, ri->offset_adjust,
+ is_external_report, ri->quash_som, do_som);
+ switch (rv) {
+ case DEDUPE_HALT:
+ return HWLM_TERMINATE_MATCHING;
+ case DEDUPE_SKIP:
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ case DEDUPE_CONTINUE:
+ break;
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(REPORT_CHAIN) {
+ // Note: sequence points updated inside this function.
+ if (roseCatchUpAndHandleChainMatch(
+ t, scratch, ri->event, ri->top_squash_distance, end,
+ in_catchup) == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ work_done = 1;
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(REPORT_SOM_INT) {
+ updateSeqPoint(tctxt, end, from_mpv);
+ roseHandleSom(scratch, &ri->som, end);
+ work_done = 1;
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(REPORT_SOM_AWARE) {
+ updateSeqPoint(tctxt, end, from_mpv);
+ roseHandleSomSom(scratch, &ri->som, som, end);
+ work_done = 1;
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(REPORT) {
+ updateSeqPoint(tctxt, end, from_mpv);
+ if (roseReport(t, scratch, end, ri->onmatch, ri->offset_adjust,
+ INVALID_EKEY) == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ work_done = 1;
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(REPORT_EXHAUST) {
+ updateSeqPoint(tctxt, end, from_mpv);
+ if (roseReport(t, scratch, end, ri->onmatch, ri->offset_adjust,
+ ri->ekey) == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ work_done = 1;
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(REPORT_SOM) {
+ updateSeqPoint(tctxt, end, from_mpv);
+ if (roseReportSom(t, scratch, som, end, ri->onmatch,
+ ri->offset_adjust,
+ INVALID_EKEY) == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ work_done = 1;
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(REPORT_SOM_EXHAUST) {
+ updateSeqPoint(tctxt, end, from_mpv);
+ if (roseReportSom(t, scratch, som, end, ri->onmatch,
+ ri->offset_adjust,
+ ri->ekey) == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ work_done = 1;
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(DEDUPE_AND_REPORT) {
+ updateSeqPoint(tctxt, end, from_mpv);
+ const char do_som = t->hasSom; // TODO: constant propagate
+ const char is_external_report = 1;
+ enum DedupeResult rv =
+ dedupeCatchup(t, scratch, end, som, end + ri->offset_adjust,
+ ri->dkey, ri->offset_adjust,
+ is_external_report, ri->quash_som, do_som);
+ switch (rv) {
+ case DEDUPE_HALT:
+ return HWLM_TERMINATE_MATCHING;
+ case DEDUPE_SKIP:
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ case DEDUPE_CONTINUE:
+ break;
+ }
+
+ const u32 ekey = INVALID_EKEY;
+ if (roseReport(t, scratch, end, ri->onmatch, ri->offset_adjust,
+ ekey) == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ work_done = 1;
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(FINAL_REPORT) {
+ updateSeqPoint(tctxt, end, from_mpv);
+ if (roseReport(t, scratch, end, ri->onmatch, ri->offset_adjust,
+ INVALID_EKEY) == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ /* One-shot specialisation: this instruction always terminates
+ * execution of the program. */
+ return HWLM_CONTINUE_MATCHING;
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_EXHAUSTED) {
+ DEBUG_PRINTF("check ekey %u\n", ri->ekey);
+ assert(ri->ekey != INVALID_EKEY);
+ assert(ri->ekey < t->ekeyCount);
+ const char *evec = scratch->core_info.exhaustionVector;
+ if (isExhausted(t, evec, ri->ekey)) {
+ DEBUG_PRINTF("ekey %u already set, match is exhausted\n",
+ ri->ekey);
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_MIN_LENGTH) {
+ DEBUG_PRINTF("check min length %llu (adj %d)\n", ri->min_length,
+ ri->end_adj);
+ assert(ri->min_length > 0);
+ assert(ri->end_adj == 0 || ri->end_adj == -1);
+ assert(som == HS_OFFSET_PAST_HORIZON || som <= end);
+ if (som != HS_OFFSET_PAST_HORIZON &&
+ ((end + ri->end_adj) - som < ri->min_length)) {
+ DEBUG_PRINTF("failed check, match len %llu\n",
+ (u64a)((end + ri->end_adj) - som));
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(SET_STATE) {
+ DEBUG_PRINTF("set state index %u\n", ri->index);
+ mmbit_set(getRoleState(scratch->core_info.state),
+ t->rolesWithStateCount, ri->index);
+ work_done = 1;
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(SET_GROUPS) {
+ tctxt->groups |= ri->groups;
+ DEBUG_PRINTF("set groups 0x%llx -> 0x%llx\n", ri->groups,
+ tctxt->groups);
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(SQUASH_GROUPS) {
+ assert(popcount64(ri->groups) == 63); // Squash only one group.
+ if (work_done) {
+ tctxt->groups &= ri->groups;
+ DEBUG_PRINTF("squash groups 0x%llx -> 0x%llx\n", ri->groups,
+ tctxt->groups);
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_STATE) {
+ DEBUG_PRINTF("check state %u\n", ri->index);
+ const u8 *roles = getRoleState(scratch->core_info.state);
+ if (!mmbit_isset(roles, t->rolesWithStateCount, ri->index)) {
+ DEBUG_PRINTF("state not on\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(SPARSE_ITER_BEGIN) {
+ DEBUG_PRINTF("iter_offset=%u\n", ri->iter_offset);
+ const struct mmbit_sparse_iter *it =
+ getByOffset(t, ri->iter_offset);
+ assert(ISALIGNED(it));
+
+ const u8 *roles = getRoleState(scratch->core_info.state);
+
+ u32 idx = 0;
+ u32 i = mmbit_sparse_iter_begin(roles, t->rolesWithStateCount,
+ &idx, it, si_state);
+ if (i == MMB_INVALID) {
+ DEBUG_PRINTF("no states in sparse iter are on\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+
+ fatbit_clear(scratch->handled_roles);
+
+ const u32 *jumps = getByOffset(t, ri->jump_table);
+ DEBUG_PRINTF("state %u (idx=%u) is on, jump to %u\n", i, idx,
+ jumps[idx]);
+ pc = pc_base + jumps[idx];
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(SPARSE_ITER_NEXT) {
+ DEBUG_PRINTF("iter_offset=%u, state=%u\n", ri->iter_offset,
+ ri->state);
+ const struct mmbit_sparse_iter *it =
+ getByOffset(t, ri->iter_offset);
+ assert(ISALIGNED(it));
+
+ const u8 *roles = getRoleState(scratch->core_info.state);
+
+ u32 idx = 0;
+ u32 i = mmbit_sparse_iter_next(roles, t->rolesWithStateCount,
+ ri->state, &idx, it, si_state);
+ if (i == MMB_INVALID) {
+ DEBUG_PRINTF("no more states in sparse iter are on\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+
+ const u32 *jumps = getByOffset(t, ri->jump_table);
+ DEBUG_PRINTF("state %u (idx=%u) is on, jump to %u\n", i, idx,
+ jumps[idx]);
+ pc = pc_base + jumps[idx];
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(SPARSE_ITER_ANY) {
+ DEBUG_PRINTF("iter_offset=%u\n", ri->iter_offset);
+ const struct mmbit_sparse_iter *it =
+ getByOffset(t, ri->iter_offset);
+ assert(ISALIGNED(it));
+
+ const u8 *roles = getRoleState(scratch->core_info.state);
+
+ u32 idx = 0;
+ u32 i = mmbit_sparse_iter_begin(roles, t->rolesWithStateCount,
+ &idx, it, si_state);
+ if (i == MMB_INVALID) {
+ DEBUG_PRINTF("no states in sparse iter are on\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ DEBUG_PRINTF("state %u (idx=%u) is on\n", i, idx);
+ fatbit_clear(scratch->handled_roles);
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(ENGINES_EOD) {
+ if (roseEnginesEod(t, scratch, end, ri->iter_offset) ==
+ HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(SUFFIXES_EOD) {
+ if (roseSuffixesEod(t, scratch, end) ==
+ HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(MATCHER_EOD) {
+ if (roseMatcherEod(t, scratch, end) ==
+ HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_LONG_LIT) {
+ const char nocase = 0;
+ if (!roseCheckLongLiteral(t, scratch, end, ri->lit_offset,
+ ri->lit_length, nocase)) {
+ DEBUG_PRINTF("failed long lit check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_LONG_LIT_NOCASE) {
+ const char nocase = 1;
+ if (!roseCheckLongLiteral(t, scratch, end, ri->lit_offset,
+ ri->lit_length, nocase)) {
+ DEBUG_PRINTF("failed nocase long lit check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_MED_LIT) {
+ const char nocase = 0;
+ if (!roseCheckMediumLiteral(t, scratch, end, ri->lit_offset,
+ ri->lit_length, nocase)) {
+ DEBUG_PRINTF("failed lit check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_MED_LIT_NOCASE) {
+ const char nocase = 1;
+ if (!roseCheckMediumLiteral(t, scratch, end, ri->lit_offset,
+ ri->lit_length, nocase)) {
+ DEBUG_PRINTF("failed long lit check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CLEAR_WORK_DONE) {
+ DEBUG_PRINTF("clear work_done flag\n");
+ work_done = 0;
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(MULTIPATH_LOOKAROUND) {
+ if (!roseMultipathLookaround(t, scratch, ri->look_index,
+ ri->reach_index, ri->count,
+ ri->last_start, ri->start_mask,
+ end)) {
+ DEBUG_PRINTF("failed multi-path lookaround check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_MULTIPATH_SHUFTI_16x8) {
+ if (!roseCheckMultipathShufti16x8(scratch, ri, end)) {
+ DEBUG_PRINTF("failed multi-path shufti 16x8 check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_MULTIPATH_SHUFTI_32x8) {
+ if (!roseCheckMultipathShufti32x8(scratch, ri, end)) {
+ DEBUG_PRINTF("failed multi-path shufti 32x8 check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_MULTIPATH_SHUFTI_32x16) {
+ if (!roseCheckMultipathShufti32x16(scratch, ri, end)) {
+ DEBUG_PRINTF("failed multi-path shufti 32x16 check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(CHECK_MULTIPATH_SHUFTI_64) {
+ if (!roseCheckMultipathShufti64(scratch, ri, end)) {
+ DEBUG_PRINTF("failed multi-path shufti 64 check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(INCLUDED_JUMP) {
+ if (scratch->fdr_conf) {
+ // squash the bucket of included literal
+ u8 shift = scratch->fdr_conf_offset & ~7U;
+ u64a mask = ((~(u64a)ri->squash) << shift);
+ *(scratch->fdr_conf) &= mask;
+
+ pc = getByOffset(t, ri->child_offset);
+ pc_base = pc;
+ programOffset = (const u8 *)pc_base -(const u8 *)t;
+ DEBUG_PRINTF("pc_base %p pc %p child_offset %u squash %u\n",
+ pc_base, pc, ri->child_offset, ri->squash);
+ work_done = 0;
+ PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(SET_LOGICAL) {
+ DEBUG_PRINTF("set logical value of lkey %u, offset_adjust=%d\n",
+ ri->lkey, ri->offset_adjust);
+ assert(ri->lkey != INVALID_LKEY);
+ assert(ri->lkey < t->lkeyCount);
+ char *lvec = scratch->core_info.logicalVector;
+ setLogicalVal(t, lvec, ri->lkey, 1);
+ updateLastCombMatchOffset(tctxt, end + ri->offset_adjust);
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(SET_COMBINATION) {
+ DEBUG_PRINTF("set ckey %u as active\n", ri->ckey);
+ assert(ri->ckey != INVALID_CKEY);
+ assert(ri->ckey < t->ckeyCount);
+ char *cvec = scratch->core_info.combVector;
+ setCombinationActive(t, cvec, ri->ckey);
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(FLUSH_COMBINATION) {
+ assert(end >= tctxt->lastCombMatchOffset);
+ if (end > tctxt->lastCombMatchOffset) {
+ if (flushActiveCombinations(t, scratch)
+ == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(SET_EXHAUST) {
+ updateSeqPoint(tctxt, end, from_mpv);
+ if (roseSetExhaust(t, scratch, ri->ekey)
+ == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ work_done = 1;
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ PROGRAM_CASE(LAST_FLUSH_COMBINATION) {
+ assert(end >= tctxt->lastCombMatchOffset);
+ if (flushActiveCombinations(t, scratch)
+ == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ if (checkPurelyNegatives(t, scratch, end)
+ == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ }
+ PROGRAM_NEXT_INSTRUCTION
+
+ default: {
+ assert(0); // unreachable
+ scratch->core_info.status |= STATUS_ERROR;
+ return HWLM_TERMINATE_MATCHING;
+ }
+ }
+ }
+
+ assert(0); // unreachable
+ return HWLM_CONTINUE_MATCHING;
}
-
-#define L_PROGRAM_CASE(name) \
- case ROSE_INSTR_##name: { \
- DEBUG_PRINTF("l_instruction: " #name " (pc=%u)\n", \
- programOffset + (u32)(pc - pc_base)); \
- const struct ROSE_STRUCT_##name *ri = \
- (const struct ROSE_STRUCT_##name *)pc;
-
-#define L_PROGRAM_NEXT_INSTRUCTION \
- pc += ROUNDUP_N(sizeof(*ri), ROSE_INSTR_MIN_ALIGN); \
- break; \
- }
-
-#define L_PROGRAM_NEXT_INSTRUCTION_JUMP continue;
-
-hwlmcb_rv_t roseRunProgram_l(const struct RoseEngine *t,
- struct hs_scratch *scratch, u32 programOffset,
- u64a som, u64a end, u8 prog_flags) {
- DEBUG_PRINTF("program=%u, offsets [%llu,%llu], flags=%u\n", programOffset,
- som, end, prog_flags);
-
- assert(programOffset != ROSE_INVALID_PROG_OFFSET);
- assert(programOffset >= sizeof(struct RoseEngine));
- assert(programOffset < t->size);
-
- const char in_catchup = prog_flags & ROSE_PROG_FLAG_IN_CATCHUP;
- const char from_mpv = prog_flags & ROSE_PROG_FLAG_FROM_MPV;
-
- const char *pc_base = getByOffset(t, programOffset);
- const char *pc = pc_base;
-
- // If this program has an effect, work_done will be set to one (which may
- // allow the program to squash groups).
- int work_done = 0;
-
- struct RoseContext *tctxt = &scratch->tctxt;
-
- assert(*(const u8 *)pc != ROSE_INSTR_END);
-
- for (;;) {
- assert(ISALIGNED_N(pc, ROSE_INSTR_MIN_ALIGN));
- assert(pc >= pc_base);
- assert((size_t)(pc - pc_base) < t->size);
- const u8 code = *(const u8 *)pc;
- assert(code <= LAST_ROSE_INSTRUCTION);
-
- switch ((enum RoseInstructionCode)code) {
- L_PROGRAM_CASE(END) {
- DEBUG_PRINTF("finished\n");
- return HWLM_CONTINUE_MATCHING;
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(CHECK_GROUPS) {
- DEBUG_PRINTF("groups=0x%llx, checking instr groups=0x%llx\n",
- tctxt->groups, ri->groups);
- if (!(ri->groups & tctxt->groups)) {
- DEBUG_PRINTF("halt: no groups are set\n");
- return HWLM_CONTINUE_MATCHING;
- }
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(CHECK_MASK) {
- struct core_info *ci = &scratch->core_info;
- if (!roseCheckMask(ci, ri->and_mask, ri->cmp_mask,
- ri->neg_mask, ri->offset, end)) {
- DEBUG_PRINTF("failed mask check\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- L_PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(CHECK_MASK_32) {
- struct core_info *ci = &scratch->core_info;
- if (!roseCheckMask32(ci, ri->and_mask, ri->cmp_mask,
- ri->neg_mask, ri->offset, end)) {
- assert(ri->fail_jump);
- pc += ri->fail_jump;
- L_PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
-#ifdef HAVE_AVX512
- L_PROGRAM_CASE(CHECK_MASK_64) {
- struct core_info *ci = &scratch->core_info;
- if (!roseCheckMask64(ci, ri->and_mask, ri->cmp_mask,
- ri->neg_mask, ri->offset, end)) {
- assert(ri->fail_jump);
- pc += ri->fail_jump;
- L_PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- L_PROGRAM_NEXT_INSTRUCTION
-#endif
-
- L_PROGRAM_CASE(CHECK_BYTE) {
- const struct core_info *ci = &scratch->core_info;
- if (!roseCheckByte(ci, ri->and_mask, ri->cmp_mask,
- ri->negation, ri->offset, end)) {
- DEBUG_PRINTF("failed byte check\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- L_PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(PUSH_DELAYED) {
- rosePushDelayedMatch(t, scratch, ri->delay, ri->index, end);
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(CATCH_UP) {
- if (roseCatchUpTo(t, scratch, end) == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(SOM_FROM_REPORT) {
- som = handleSomExternal(scratch, &ri->som, end);
- DEBUG_PRINTF("som from report %u is %llu\n", ri->som.onmatch,
- som);
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(DEDUPE) {
- updateSeqPoint(tctxt, end, from_mpv);
- const char do_som = t->hasSom; // TODO: constant propagate
- const char is_external_report = 1;
- enum DedupeResult rv =
- dedupeCatchup(t, scratch, end, som, end + ri->offset_adjust,
- ri->dkey, ri->offset_adjust,
- is_external_report, ri->quash_som, do_som);
- switch (rv) {
- case DEDUPE_HALT:
- return HWLM_TERMINATE_MATCHING;
- case DEDUPE_SKIP:
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- L_PROGRAM_NEXT_INSTRUCTION_JUMP
- case DEDUPE_CONTINUE:
- break;
- }
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(DEDUPE_SOM) {
- updateSeqPoint(tctxt, end, from_mpv);
- const char is_external_report = 0;
- const char do_som = 1;
- enum DedupeResult rv =
- dedupeCatchup(t, scratch, end, som, end + ri->offset_adjust,
- ri->dkey, ri->offset_adjust,
- is_external_report, ri->quash_som, do_som);
- switch (rv) {
- case DEDUPE_HALT:
- return HWLM_TERMINATE_MATCHING;
- case DEDUPE_SKIP:
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- L_PROGRAM_NEXT_INSTRUCTION_JUMP
- case DEDUPE_CONTINUE:
- break;
- }
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(REPORT_CHAIN) {
- // Note: sequence points updated inside this function.
- if (roseCatchUpAndHandleChainMatch(
- t, scratch, ri->event, ri->top_squash_distance, end,
- in_catchup) == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- work_done = 1;
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(REPORT) {
- updateSeqPoint(tctxt, end, from_mpv);
- if (roseReport(t, scratch, end, ri->onmatch, ri->offset_adjust,
- INVALID_EKEY) == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- work_done = 1;
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(REPORT_EXHAUST) {
- updateSeqPoint(tctxt, end, from_mpv);
- if (roseReport(t, scratch, end, ri->onmatch, ri->offset_adjust,
- ri->ekey) == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- work_done = 1;
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(REPORT_SOM) {
- updateSeqPoint(tctxt, end, from_mpv);
- if (roseReportSom(t, scratch, som, end, ri->onmatch,
- ri->offset_adjust,
- INVALID_EKEY) == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- work_done = 1;
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(DEDUPE_AND_REPORT) {
- updateSeqPoint(tctxt, end, from_mpv);
- const char do_som = t->hasSom; // TODO: constant propagate
- const char is_external_report = 1;
- enum DedupeResult rv =
- dedupeCatchup(t, scratch, end, som, end + ri->offset_adjust,
- ri->dkey, ri->offset_adjust,
- is_external_report, ri->quash_som, do_som);
- switch (rv) {
- case DEDUPE_HALT:
- return HWLM_TERMINATE_MATCHING;
- case DEDUPE_SKIP:
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- L_PROGRAM_NEXT_INSTRUCTION_JUMP
- case DEDUPE_CONTINUE:
- break;
- }
-
- const u32 ekey = INVALID_EKEY;
- if (roseReport(t, scratch, end, ri->onmatch, ri->offset_adjust,
- ekey) == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- work_done = 1;
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(FINAL_REPORT) {
- updateSeqPoint(tctxt, end, from_mpv);
- if (roseReport(t, scratch, end, ri->onmatch, ri->offset_adjust,
- INVALID_EKEY) == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- /* One-shot specialisation: this instruction always terminates
- * execution of the program. */
- return HWLM_CONTINUE_MATCHING;
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(CHECK_EXHAUSTED) {
- DEBUG_PRINTF("check ekey %u\n", ri->ekey);
- assert(ri->ekey != INVALID_EKEY);
- assert(ri->ekey < t->ekeyCount);
- const char *evec = scratch->core_info.exhaustionVector;
- if (isExhausted(t, evec, ri->ekey)) {
- DEBUG_PRINTF("ekey %u already set, match is exhausted\n",
- ri->ekey);
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- L_PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(SQUASH_GROUPS) {
- assert(popcount64(ri->groups) == 63); // Squash only one group.
- if (work_done) {
- tctxt->groups &= ri->groups;
- DEBUG_PRINTF("squash groups 0x%llx -> 0x%llx\n", ri->groups,
- tctxt->groups);
- }
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(CHECK_LONG_LIT) {
- const char nocase = 0;
- if (!roseCheckLongLiteral(t, scratch, end, ri->lit_offset,
- ri->lit_length, nocase)) {
- DEBUG_PRINTF("failed long lit check\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- L_PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(CHECK_LONG_LIT_NOCASE) {
- const char nocase = 1;
- if (!roseCheckLongLiteral(t, scratch, end, ri->lit_offset,
- ri->lit_length, nocase)) {
- DEBUG_PRINTF("failed nocase long lit check\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- L_PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(CHECK_MED_LIT) {
- const char nocase = 0;
- if (!roseCheckMediumLiteral(t, scratch, end, ri->lit_offset,
- ri->lit_length, nocase)) {
- DEBUG_PRINTF("failed lit check\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- L_PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(CHECK_MED_LIT_NOCASE) {
- const char nocase = 1;
- if (!roseCheckMediumLiteral(t, scratch, end, ri->lit_offset,
- ri->lit_length, nocase)) {
- DEBUG_PRINTF("failed long lit check\n");
- assert(ri->fail_jump); // must progress
- pc += ri->fail_jump;
- L_PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(CLEAR_WORK_DONE) {
- DEBUG_PRINTF("clear work_done flag\n");
- work_done = 0;
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(INCLUDED_JUMP) {
- if (scratch->fdr_conf) {
- // squash the bucket of included literal
- u8 shift = scratch->fdr_conf_offset & ~7U;
- u64a mask = ((~(u64a)ri->squash) << shift);
- *(scratch->fdr_conf) &= mask;
-
- pc = getByOffset(t, ri->child_offset);
- pc_base = pc;
- programOffset = (const u8 *)pc_base -(const u8 *)t;
- DEBUG_PRINTF("pc_base %p pc %p child_offset %u squash %u\n",
- pc_base, pc, ri->child_offset, ri->squash);
- work_done = 0;
- L_PROGRAM_NEXT_INSTRUCTION_JUMP
- }
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(SET_LOGICAL) {
- DEBUG_PRINTF("set logical value of lkey %u, offset_adjust=%d\n",
- ri->lkey, ri->offset_adjust);
- assert(ri->lkey != INVALID_LKEY);
- assert(ri->lkey < t->lkeyCount);
- char *lvec = scratch->core_info.logicalVector;
- setLogicalVal(t, lvec, ri->lkey, 1);
- updateLastCombMatchOffset(tctxt, end + ri->offset_adjust);
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(SET_COMBINATION) {
- DEBUG_PRINTF("set ckey %u as active\n", ri->ckey);
- assert(ri->ckey != INVALID_CKEY);
- assert(ri->ckey < t->ckeyCount);
- char *cvec = scratch->core_info.combVector;
- setCombinationActive(t, cvec, ri->ckey);
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(FLUSH_COMBINATION) {
- assert(end >= tctxt->lastCombMatchOffset);
- if (end > tctxt->lastCombMatchOffset) {
- if (flushActiveCombinations(t, scratch)
- == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- }
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(SET_EXHAUST) {
- updateSeqPoint(tctxt, end, from_mpv);
- if (roseSetExhaust(t, scratch, ri->ekey)
- == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- work_done = 1;
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- L_PROGRAM_CASE(LAST_FLUSH_COMBINATION) {
- assert(end >= tctxt->lastCombMatchOffset);
- if (flushActiveCombinations(t, scratch)
- == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- if (checkPurelyNegatives(t, scratch, end)
- == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATE_MATCHING;
- }
- }
- L_PROGRAM_NEXT_INSTRUCTION
-
- default: {
- assert(0); // unreachable
- scratch->core_info.status |= STATUS_ERROR;
- return HWLM_TERMINATE_MATCHING;
- }
- }
- }
-
- assert(0); // unreachable
- return HWLM_CONTINUE_MATCHING;
-}
-
-#undef L_PROGRAM_CASE
-#undef L_PROGRAM_NEXT_INSTRUCTION
-#undef L_PROGRAM_NEXT_INSTRUCTION_JUMP
-
-#undef PROGRAM_CASE
-#undef PROGRAM_NEXT_INSTRUCTION
-#undef PROGRAM_NEXT_INSTRUCTION_JUMP
+
+#define L_PROGRAM_CASE(name) \
+ case ROSE_INSTR_##name: { \
+ DEBUG_PRINTF("l_instruction: " #name " (pc=%u)\n", \
+ programOffset + (u32)(pc - pc_base)); \
+ const struct ROSE_STRUCT_##name *ri = \
+ (const struct ROSE_STRUCT_##name *)pc;
+
+#define L_PROGRAM_NEXT_INSTRUCTION \
+ pc += ROUNDUP_N(sizeof(*ri), ROSE_INSTR_MIN_ALIGN); \
+ break; \
+ }
+
+#define L_PROGRAM_NEXT_INSTRUCTION_JUMP continue;
+
+hwlmcb_rv_t roseRunProgram_l(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u32 programOffset,
+ u64a som, u64a end, u8 prog_flags) {
+ DEBUG_PRINTF("program=%u, offsets [%llu,%llu], flags=%u\n", programOffset,
+ som, end, prog_flags);
+
+ assert(programOffset != ROSE_INVALID_PROG_OFFSET);
+ assert(programOffset >= sizeof(struct RoseEngine));
+ assert(programOffset < t->size);
+
+ const char in_catchup = prog_flags & ROSE_PROG_FLAG_IN_CATCHUP;
+ const char from_mpv = prog_flags & ROSE_PROG_FLAG_FROM_MPV;
+
+ const char *pc_base = getByOffset(t, programOffset);
+ const char *pc = pc_base;
+
+ // If this program has an effect, work_done will be set to one (which may
+ // allow the program to squash groups).
+ int work_done = 0;
+
+ struct RoseContext *tctxt = &scratch->tctxt;
+
+ assert(*(const u8 *)pc != ROSE_INSTR_END);
+
+ for (;;) {
+ assert(ISALIGNED_N(pc, ROSE_INSTR_MIN_ALIGN));
+ assert(pc >= pc_base);
+ assert((size_t)(pc - pc_base) < t->size);
+ const u8 code = *(const u8 *)pc;
+ assert(code <= LAST_ROSE_INSTRUCTION);
+
+ switch ((enum RoseInstructionCode)code) {
+ L_PROGRAM_CASE(END) {
+ DEBUG_PRINTF("finished\n");
+ return HWLM_CONTINUE_MATCHING;
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(CHECK_GROUPS) {
+ DEBUG_PRINTF("groups=0x%llx, checking instr groups=0x%llx\n",
+ tctxt->groups, ri->groups);
+ if (!(ri->groups & tctxt->groups)) {
+ DEBUG_PRINTF("halt: no groups are set\n");
+ return HWLM_CONTINUE_MATCHING;
+ }
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(CHECK_MASK) {
+ struct core_info *ci = &scratch->core_info;
+ if (!roseCheckMask(ci, ri->and_mask, ri->cmp_mask,
+ ri->neg_mask, ri->offset, end)) {
+ DEBUG_PRINTF("failed mask check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ L_PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(CHECK_MASK_32) {
+ struct core_info *ci = &scratch->core_info;
+ if (!roseCheckMask32(ci, ri->and_mask, ri->cmp_mask,
+ ri->neg_mask, ri->offset, end)) {
+ assert(ri->fail_jump);
+ pc += ri->fail_jump;
+ L_PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+#ifdef HAVE_AVX512
+ L_PROGRAM_CASE(CHECK_MASK_64) {
+ struct core_info *ci = &scratch->core_info;
+ if (!roseCheckMask64(ci, ri->and_mask, ri->cmp_mask,
+ ri->neg_mask, ri->offset, end)) {
+ assert(ri->fail_jump);
+ pc += ri->fail_jump;
+ L_PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+#endif
+
+ L_PROGRAM_CASE(CHECK_BYTE) {
+ const struct core_info *ci = &scratch->core_info;
+ if (!roseCheckByte(ci, ri->and_mask, ri->cmp_mask,
+ ri->negation, ri->offset, end)) {
+ DEBUG_PRINTF("failed byte check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ L_PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(PUSH_DELAYED) {
+ rosePushDelayedMatch(t, scratch, ri->delay, ri->index, end);
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(CATCH_UP) {
+ if (roseCatchUpTo(t, scratch, end) == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(SOM_FROM_REPORT) {
+ som = handleSomExternal(scratch, &ri->som, end);
+ DEBUG_PRINTF("som from report %u is %llu\n", ri->som.onmatch,
+ som);
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(DEDUPE) {
+ updateSeqPoint(tctxt, end, from_mpv);
+ const char do_som = t->hasSom; // TODO: constant propagate
+ const char is_external_report = 1;
+ enum DedupeResult rv =
+ dedupeCatchup(t, scratch, end, som, end + ri->offset_adjust,
+ ri->dkey, ri->offset_adjust,
+ is_external_report, ri->quash_som, do_som);
+ switch (rv) {
+ case DEDUPE_HALT:
+ return HWLM_TERMINATE_MATCHING;
+ case DEDUPE_SKIP:
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ L_PROGRAM_NEXT_INSTRUCTION_JUMP
+ case DEDUPE_CONTINUE:
+ break;
+ }
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(DEDUPE_SOM) {
+ updateSeqPoint(tctxt, end, from_mpv);
+ const char is_external_report = 0;
+ const char do_som = 1;
+ enum DedupeResult rv =
+ dedupeCatchup(t, scratch, end, som, end + ri->offset_adjust,
+ ri->dkey, ri->offset_adjust,
+ is_external_report, ri->quash_som, do_som);
+ switch (rv) {
+ case DEDUPE_HALT:
+ return HWLM_TERMINATE_MATCHING;
+ case DEDUPE_SKIP:
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ L_PROGRAM_NEXT_INSTRUCTION_JUMP
+ case DEDUPE_CONTINUE:
+ break;
+ }
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(REPORT_CHAIN) {
+ // Note: sequence points updated inside this function.
+ if (roseCatchUpAndHandleChainMatch(
+ t, scratch, ri->event, ri->top_squash_distance, end,
+ in_catchup) == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ work_done = 1;
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(REPORT) {
+ updateSeqPoint(tctxt, end, from_mpv);
+ if (roseReport(t, scratch, end, ri->onmatch, ri->offset_adjust,
+ INVALID_EKEY) == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ work_done = 1;
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(REPORT_EXHAUST) {
+ updateSeqPoint(tctxt, end, from_mpv);
+ if (roseReport(t, scratch, end, ri->onmatch, ri->offset_adjust,
+ ri->ekey) == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ work_done = 1;
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(REPORT_SOM) {
+ updateSeqPoint(tctxt, end, from_mpv);
+ if (roseReportSom(t, scratch, som, end, ri->onmatch,
+ ri->offset_adjust,
+ INVALID_EKEY) == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ work_done = 1;
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(DEDUPE_AND_REPORT) {
+ updateSeqPoint(tctxt, end, from_mpv);
+ const char do_som = t->hasSom; // TODO: constant propagate
+ const char is_external_report = 1;
+ enum DedupeResult rv =
+ dedupeCatchup(t, scratch, end, som, end + ri->offset_adjust,
+ ri->dkey, ri->offset_adjust,
+ is_external_report, ri->quash_som, do_som);
+ switch (rv) {
+ case DEDUPE_HALT:
+ return HWLM_TERMINATE_MATCHING;
+ case DEDUPE_SKIP:
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ L_PROGRAM_NEXT_INSTRUCTION_JUMP
+ case DEDUPE_CONTINUE:
+ break;
+ }
+
+ const u32 ekey = INVALID_EKEY;
+ if (roseReport(t, scratch, end, ri->onmatch, ri->offset_adjust,
+ ekey) == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ work_done = 1;
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(FINAL_REPORT) {
+ updateSeqPoint(tctxt, end, from_mpv);
+ if (roseReport(t, scratch, end, ri->onmatch, ri->offset_adjust,
+ INVALID_EKEY) == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ /* One-shot specialisation: this instruction always terminates
+ * execution of the program. */
+ return HWLM_CONTINUE_MATCHING;
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(CHECK_EXHAUSTED) {
+ DEBUG_PRINTF("check ekey %u\n", ri->ekey);
+ assert(ri->ekey != INVALID_EKEY);
+ assert(ri->ekey < t->ekeyCount);
+ const char *evec = scratch->core_info.exhaustionVector;
+ if (isExhausted(t, evec, ri->ekey)) {
+ DEBUG_PRINTF("ekey %u already set, match is exhausted\n",
+ ri->ekey);
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ L_PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(SQUASH_GROUPS) {
+ assert(popcount64(ri->groups) == 63); // Squash only one group.
+ if (work_done) {
+ tctxt->groups &= ri->groups;
+ DEBUG_PRINTF("squash groups 0x%llx -> 0x%llx\n", ri->groups,
+ tctxt->groups);
+ }
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(CHECK_LONG_LIT) {
+ const char nocase = 0;
+ if (!roseCheckLongLiteral(t, scratch, end, ri->lit_offset,
+ ri->lit_length, nocase)) {
+ DEBUG_PRINTF("failed long lit check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ L_PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(CHECK_LONG_LIT_NOCASE) {
+ const char nocase = 1;
+ if (!roseCheckLongLiteral(t, scratch, end, ri->lit_offset,
+ ri->lit_length, nocase)) {
+ DEBUG_PRINTF("failed nocase long lit check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ L_PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(CHECK_MED_LIT) {
+ const char nocase = 0;
+ if (!roseCheckMediumLiteral(t, scratch, end, ri->lit_offset,
+ ri->lit_length, nocase)) {
+ DEBUG_PRINTF("failed lit check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ L_PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(CHECK_MED_LIT_NOCASE) {
+ const char nocase = 1;
+ if (!roseCheckMediumLiteral(t, scratch, end, ri->lit_offset,
+ ri->lit_length, nocase)) {
+ DEBUG_PRINTF("failed long lit check\n");
+ assert(ri->fail_jump); // must progress
+ pc += ri->fail_jump;
+ L_PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(CLEAR_WORK_DONE) {
+ DEBUG_PRINTF("clear work_done flag\n");
+ work_done = 0;
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(INCLUDED_JUMP) {
+ if (scratch->fdr_conf) {
+ // squash the bucket of included literal
+ u8 shift = scratch->fdr_conf_offset & ~7U;
+ u64a mask = ((~(u64a)ri->squash) << shift);
+ *(scratch->fdr_conf) &= mask;
+
+ pc = getByOffset(t, ri->child_offset);
+ pc_base = pc;
+ programOffset = (const u8 *)pc_base -(const u8 *)t;
+ DEBUG_PRINTF("pc_base %p pc %p child_offset %u squash %u\n",
+ pc_base, pc, ri->child_offset, ri->squash);
+ work_done = 0;
+ L_PROGRAM_NEXT_INSTRUCTION_JUMP
+ }
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(SET_LOGICAL) {
+ DEBUG_PRINTF("set logical value of lkey %u, offset_adjust=%d\n",
+ ri->lkey, ri->offset_adjust);
+ assert(ri->lkey != INVALID_LKEY);
+ assert(ri->lkey < t->lkeyCount);
+ char *lvec = scratch->core_info.logicalVector;
+ setLogicalVal(t, lvec, ri->lkey, 1);
+ updateLastCombMatchOffset(tctxt, end + ri->offset_adjust);
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(SET_COMBINATION) {
+ DEBUG_PRINTF("set ckey %u as active\n", ri->ckey);
+ assert(ri->ckey != INVALID_CKEY);
+ assert(ri->ckey < t->ckeyCount);
+ char *cvec = scratch->core_info.combVector;
+ setCombinationActive(t, cvec, ri->ckey);
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(FLUSH_COMBINATION) {
+ assert(end >= tctxt->lastCombMatchOffset);
+ if (end > tctxt->lastCombMatchOffset) {
+ if (flushActiveCombinations(t, scratch)
+ == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ }
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(SET_EXHAUST) {
+ updateSeqPoint(tctxt, end, from_mpv);
+ if (roseSetExhaust(t, scratch, ri->ekey)
+ == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ work_done = 1;
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ L_PROGRAM_CASE(LAST_FLUSH_COMBINATION) {
+ assert(end >= tctxt->lastCombMatchOffset);
+ if (flushActiveCombinations(t, scratch)
+ == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ if (checkPurelyNegatives(t, scratch, end)
+ == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATE_MATCHING;
+ }
+ }
+ L_PROGRAM_NEXT_INSTRUCTION
+
+ default: {
+ assert(0); // unreachable
+ scratch->core_info.status |= STATUS_ERROR;
+ return HWLM_TERMINATE_MATCHING;
+ }
+ }
+ }
+
+ assert(0); // unreachable
+ return HWLM_CONTINUE_MATCHING;
+}
+
+#undef L_PROGRAM_CASE
+#undef L_PROGRAM_NEXT_INSTRUCTION
+#undef L_PROGRAM_NEXT_INSTRUCTION_JUMP
+
+#undef PROGRAM_CASE
+#undef PROGRAM_NEXT_INSTRUCTION
+#undef PROGRAM_NEXT_INSTRUCTION_JUMP
diff --git a/contrib/libs/hyperscan/src/rose/program_runtime.h b/contrib/libs/hyperscan/src/rose/program_runtime.h
index 3fca3301c2..50bf202c6f 100644
--- a/contrib/libs/hyperscan/src/rose/program_runtime.h
+++ b/contrib/libs/hyperscan/src/rose/program_runtime.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2019, Intel Corporation
+ * Copyright (c) 2015-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -34,7 +34,7 @@
#ifndef PROGRAM_RUNTIME_H
#define PROGRAM_RUNTIME_H
-#include "hwlm/hwlm.h" // for hwlmcb_rv_t
+#include "hwlm/hwlm.h" // for hwlmcb_rv_t
#include "rose.h"
#include "scratch.h"
#include "ue2common.h"
@@ -54,8 +54,8 @@ hwlmcb_rv_t roseRunProgram(const struct RoseEngine *t,
struct hs_scratch *scratch, u32 programOffset,
u64a som, u64a end, u8 prog_flags);
-hwlmcb_rv_t roseRunProgram_l(const struct RoseEngine *t,
+hwlmcb_rv_t roseRunProgram_l(const struct RoseEngine *t,
struct hs_scratch *scratch, u32 programOffset,
- u64a som, u64a end, u8 prog_flags);
+ u64a som, u64a end, u8 prog_flags);
#endif // PROGRAM_RUNTIME_H
diff --git a/contrib/libs/hyperscan/src/rose/rose.h b/contrib/libs/hyperscan/src/rose/rose.h
index 0c2ae08073..409b70028f 100644
--- a/contrib/libs/hyperscan/src/rose/rose.h
+++ b/contrib/libs/hyperscan/src/rose/rose.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2019, Intel Corporation
+ * Copyright (c) 2015-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -53,10 +53,10 @@ int roseReportAdaptor(u64a start, u64a end, ReportID id, void *context);
int roseRunBoundaryProgram(const struct RoseEngine *rose, u32 program,
u64a stream_offset, struct hs_scratch *scratch);
-int roseRunFlushCombProgram(const struct RoseEngine *rose,
- struct hs_scratch *scratch, u64a end);
-
-int roseRunLastFlushCombProgram(const struct RoseEngine *rose,
- struct hs_scratch *scratch, u64a end);
-
+int roseRunFlushCombProgram(const struct RoseEngine *rose,
+ struct hs_scratch *scratch, u64a end);
+
+int roseRunLastFlushCombProgram(const struct RoseEngine *rose,
+ struct hs_scratch *scratch, u64a end);
+
#endif // ROSE_H
diff --git a/contrib/libs/hyperscan/src/rose/rose_build_add.cpp b/contrib/libs/hyperscan/src/rose/rose_build_add.cpp
index ee748c6a8b..4929c95fce 100644
--- a/contrib/libs/hyperscan/src/rose/rose_build_add.cpp
+++ b/contrib/libs/hyperscan/src/rose/rose_build_add.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -453,7 +453,7 @@ RoseVertex tryForAnchoredVertex(RoseBuildImpl *tbi,
<= tbi->cc.grey.maxAnchoredRegion) {
if (ep.maxBound || ep.minBound) {
/* TODO: handle, however these cases are not generated currently by
- ng_violet */
+ ng_violet */
return RoseGraph::null_vertex();
}
max_width = depth(ep.maxBound + iv_info.s.length());
@@ -567,7 +567,7 @@ void doRoseLiteralVertex(RoseBuildImpl *tbi, bool use_eod_table,
assert(iv_info.type == RIV_LITERAL);
assert(!parents.empty()); /* start vertices should not be here */
- // ng_violet should have ensured that mixed-sensitivity literals are no
+ // ng_violet should have ensured that mixed-sensitivity literals are no
// longer than the benefits max width.
assert(iv_info.s.length() <= MAX_MASK2_WIDTH ||
!mixed_sensitivity(iv_info.s));
@@ -1851,10 +1851,10 @@ bool RoseBuildImpl::addChainTail(const raw_puff &rp, u32 *queue_out,
static
bool prepAcceptForAddAnchoredNFA(RoseBuildImpl &tbi, const NGHolder &w,
- NFAVertex u,
+ NFAVertex u,
const vector<DepthMinMax> &vertexDepths,
map<u32, DepthMinMax> &depthMap,
- map<NFAVertex, set<u32>> &reportMap,
+ map<NFAVertex, set<u32>> &reportMap,
map<ReportID, u32> &allocated_reports,
flat_set<u32> &added_lit_ids) {
const depth max_anchored_depth(tbi.cc.grey.maxAnchoredRegion);
@@ -1882,9 +1882,9 @@ bool prepAcceptForAddAnchoredNFA(RoseBuildImpl &tbi, const NGHolder &w,
depthMap[lit_id] = unionDepthMinMax(depthMap[lit_id], d);
}
- if (depthMap[lit_id].max > max_anchored_depth) {
+ if (depthMap[lit_id].max > max_anchored_depth) {
DEBUG_PRINTF("depth=%s exceeds maxAnchoredRegion=%u\n",
- depthMap[lit_id].max.str().c_str(),
+ depthMap[lit_id].max.str().c_str(),
tbi.cc.grey.maxAnchoredRegion);
return false;
}
@@ -1931,7 +1931,7 @@ bool RoseBuildImpl::addAnchoredAcyclic(const NGHolder &h) {
flat_set<u32> added_lit_ids; /* literal ids added for this NFA */
for (auto v : inv_adjacent_vertices_range(h.accept, h)) {
- if (!prepAcceptForAddAnchoredNFA(*this, h, v, vertexDepths, depthMap,
+ if (!prepAcceptForAddAnchoredNFA(*this, h, v, vertexDepths, depthMap,
reportMap, allocated_reports,
added_lit_ids)) {
removeAddedLiterals(*this, added_lit_ids);
@@ -1945,7 +1945,7 @@ bool RoseBuildImpl::addAnchoredAcyclic(const NGHolder &h) {
if (v == h.accept) {
continue;
}
- if (!prepAcceptForAddAnchoredNFA(*this, h, v, vertexDepths, depthMap,
+ if (!prepAcceptForAddAnchoredNFA(*this, h, v, vertexDepths, depthMap,
reportMap, allocated_reports_eod,
added_lit_ids)) {
removeAddedLiterals(*this, added_lit_ids);
diff --git a/contrib/libs/hyperscan/src/rose/rose_build_bytecode.cpp b/contrib/libs/hyperscan/src/rose/rose_build_bytecode.cpp
index da2fc4c2eb..df464c2800 100644
--- a/contrib/libs/hyperscan/src/rose/rose_build_bytecode.cpp
+++ b/contrib/libs/hyperscan/src/rose/rose_build_bytecode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -426,17 +426,17 @@ void fillStateOffsets(const RoseBuildImpl &build, u32 rolesWithStateCount,
curr_offset += mmbit_size(build.rm.numEkeys());
so->exhausted_size = mmbit_size(build.rm.numEkeys());
- // Logical multibit.
- so->logicalVec = curr_offset;
- so->logicalVec_size = mmbit_size(build.rm.numLogicalKeys() +
- build.rm.numLogicalOps());
- curr_offset += so->logicalVec_size;
-
- // Combination multibit.
- so->combVec = curr_offset;
- so->combVec_size = mmbit_size(build.rm.numCkeys());
- curr_offset += so->combVec_size;
-
+ // Logical multibit.
+ so->logicalVec = curr_offset;
+ so->logicalVec_size = mmbit_size(build.rm.numLogicalKeys() +
+ build.rm.numLogicalOps());
+ curr_offset += so->logicalVec_size;
+
+ // Combination multibit.
+ so->combVec = curr_offset;
+ so->combVec_size = mmbit_size(build.rm.numCkeys());
+ curr_offset += so->combVec_size;
+
// SOM locations and valid/writeable multibit structures.
if (build.ssm.numSomSlots()) {
const u32 somWidth = build.ssm.somPrecision();
@@ -554,8 +554,8 @@ void findFixedDepthTops(const RoseGraph &g, const set<PredTopPair> &triggers,
*/
static
bytecode_ptr<NFA> pickImpl(bytecode_ptr<NFA> dfa_impl,
- bytecode_ptr<NFA> nfa_impl,
- bool fast_nfa) {
+ bytecode_ptr<NFA> nfa_impl,
+ bool fast_nfa) {
assert(nfa_impl);
assert(dfa_impl);
assert(isDfaType(dfa_impl->type));
@@ -585,7 +585,7 @@ bytecode_ptr<NFA> pickImpl(bytecode_ptr<NFA> dfa_impl,
return nfa_impl;
}
} else {
- if (n_accel && fast_nfa) {
+ if (n_accel && fast_nfa) {
return nfa_impl;
} else {
return dfa_impl;
@@ -634,15 +634,15 @@ bytecode_ptr<NFA> getDfa(raw_dfa &rdfa, bool is_transient,
dfa = mcshengCompile(rdfa, cc, rm);
}
if (!dfa) {
- dfa = sheng32Compile(rdfa, cc, rm, false);
- }
- if (!dfa) {
- dfa = sheng64Compile(rdfa, cc, rm, false);
- }
- if (!dfa && !is_transient) {
- dfa = mcshengCompile64(rdfa, cc, rm);
- }
- if (!dfa) {
+ dfa = sheng32Compile(rdfa, cc, rm, false);
+ }
+ if (!dfa) {
+ dfa = sheng64Compile(rdfa, cc, rm, false);
+ }
+ if (!dfa && !is_transient) {
+ dfa = mcshengCompile64(rdfa, cc, rm);
+ }
+ if (!dfa) {
// Sheng wasn't successful, so unleash McClellan!
dfa = mcclellanCompile(rdfa, cc, rm, false);
}
@@ -688,21 +688,21 @@ buildSuffix(const ReportManager &rm, const SomSlotManager &ssm,
}
}
- bool fast_nfa = false;
+ bool fast_nfa = false;
auto n = constructNFA(holder, &rm, fixed_depth_tops, triggers,
- compress_state, fast_nfa, cc);
+ compress_state, fast_nfa, cc);
assert(n);
if (oneTop && cc.grey.roseMcClellanSuffix) {
if (cc.grey.roseMcClellanSuffix == 2 || n->nPositions > 128 ||
- !has_bounded_repeats_other_than_firsts(*n) || !fast_nfa) {
+ !has_bounded_repeats_other_than_firsts(*n) || !fast_nfa) {
auto rdfa = buildMcClellan(holder, &rm, false, triggers.at(0),
cc.grey);
if (rdfa) {
auto d = getDfa(*rdfa, false, cc, rm);
assert(d);
if (cc.grey.roseMcClellanSuffix != 2) {
- n = pickImpl(move(d), move(n), fast_nfa);
+ n = pickImpl(move(d), move(n), fast_nfa);
} else {
n = move(d);
}
@@ -837,24 +837,24 @@ bytecode_ptr<NFA> makeLeftNfa(const RoseBuildImpl &tbi, left_id &left,
n = constructLBR(*left.graph(), triggers.begin()->second, cc, rm);
}
- bool fast_nfa = false;
+ bool fast_nfa = false;
if (!n && left.graph()) {
map<u32, vector<vector<CharReach>>> triggers;
if (left.graph()->kind == NFA_INFIX) {
findTriggerSequences(tbi, infixTriggers.at(left), &triggers);
}
n = constructNFA(*left.graph(), nullptr, fixed_depth_tops, triggers,
- compress_state, fast_nfa, cc);
+ compress_state, fast_nfa, cc);
}
if (cc.grey.roseMcClellanPrefix == 1 && is_prefix && !left.dfa()
&& left.graph()
- && (!n || !has_bounded_repeats_other_than_firsts(*n) || !fast_nfa)) {
+ && (!n || !has_bounded_repeats_other_than_firsts(*n) || !fast_nfa)) {
auto rdfa = buildMcClellan(*left.graph(), nullptr, cc.grey);
if (rdfa) {
auto d = getDfa(*rdfa, is_transient, cc, rm);
assert(d);
- n = pickImpl(move(d), move(n), fast_nfa);
+ n = pickImpl(move(d), move(n), fast_nfa);
}
}
@@ -1639,18 +1639,18 @@ public:
const map<u32, u32> fixed_depth_tops; /* no tops */
const map<u32, vector<vector<CharReach>>> triggers; /* no tops */
bool compress_state = cc.streaming;
- bool fast_nfa = false;
+ bool fast_nfa = false;
auto n = constructNFA(h, &rm, fixed_depth_tops, triggers,
- compress_state, fast_nfa, cc);
+ compress_state, fast_nfa, cc);
// Try for a DFA upgrade.
if (n && cc.grey.roseMcClellanOutfix &&
- (!has_bounded_repeats_other_than_firsts(*n) || !fast_nfa)) {
+ (!has_bounded_repeats_other_than_firsts(*n) || !fast_nfa)) {
auto rdfa = buildMcClellan(h, &rm, cc.grey);
if (rdfa) {
auto d = getDfa(*rdfa, false, cc, rm);
if (d) {
- n = pickImpl(move(d), move(n), fast_nfa);
+ n = pickImpl(move(d), move(n), fast_nfa);
}
}
}
@@ -2494,18 +2494,18 @@ void writeLeftInfo(RoseEngineBlob &engine_blob, RoseEngine &proto,
}
static
-void writeLogicalInfo(const ReportManager &rm, RoseEngineBlob &engine_blob,
- RoseEngine &proto) {
- const auto &tree = rm.getLogicalTree();
- proto.logicalTreeOffset = engine_blob.add_range(tree);
- const auto &combMap = rm.getCombInfoMap();
- proto.combInfoMapOffset = engine_blob.add_range(combMap);
- proto.lkeyCount = rm.numLogicalKeys();
- proto.lopCount = rm.numLogicalOps();
- proto.ckeyCount = rm.numCkeys();
-}
-
-static
+void writeLogicalInfo(const ReportManager &rm, RoseEngineBlob &engine_blob,
+ RoseEngine &proto) {
+ const auto &tree = rm.getLogicalTree();
+ proto.logicalTreeOffset = engine_blob.add_range(tree);
+ const auto &combMap = rm.getCombInfoMap();
+ proto.combInfoMapOffset = engine_blob.add_range(combMap);
+ proto.lkeyCount = rm.numLogicalKeys();
+ proto.lopCount = rm.numLogicalOps();
+ proto.ckeyCount = rm.numCkeys();
+}
+
+static
void writeNfaInfo(const RoseBuildImpl &build, build_context &bc,
RoseEngine &proto, const set<u32> &no_retrigger_queues) {
const u32 queue_count = build.qif.allocated_count();
@@ -3350,24 +3350,24 @@ RoseProgram makeEodProgram(const RoseBuildImpl &build, build_context &bc,
}
static
-RoseProgram makeFlushCombProgram(const RoseEngine &t) {
- RoseProgram program;
- if (t.ckeyCount) {
- addFlushCombinationProgram(program);
- }
- return program;
-}
-
-static
-RoseProgram makeLastFlushCombProgram(const RoseEngine &t) {
- RoseProgram program;
- if (t.ckeyCount) {
- addLastFlushCombinationProgram(program);
- }
- return program;
-}
-
-static
+RoseProgram makeFlushCombProgram(const RoseEngine &t) {
+ RoseProgram program;
+ if (t.ckeyCount) {
+ addFlushCombinationProgram(program);
+ }
+ return program;
+}
+
+static
+RoseProgram makeLastFlushCombProgram(const RoseEngine &t) {
+ RoseProgram program;
+ if (t.ckeyCount) {
+ addLastFlushCombinationProgram(program);
+ }
+ return program;
+}
+
+static
u32 history_required(const rose_literal_id &key) {
if (key.msk.size() < key.s.length()) {
return key.elength() - 1;
@@ -3732,15 +3732,15 @@ bytecode_ptr<RoseEngine> RoseBuildImpl::buildFinalEngine(u32 minWidth) {
writeDkeyInfo(rm, bc.engine_blob, proto);
writeLeftInfo(bc.engine_blob, proto, leftInfoTable);
- writeLogicalInfo(rm, bc.engine_blob, proto);
-
- auto flushComb_prog = makeFlushCombProgram(proto);
- proto.flushCombProgramOffset = writeProgram(bc, move(flushComb_prog));
-
- auto lastFlushComb_prog = makeLastFlushCombProgram(proto);
- proto.lastFlushCombProgramOffset =
- writeProgram(bc, move(lastFlushComb_prog));
-
+ writeLogicalInfo(rm, bc.engine_blob, proto);
+
+ auto flushComb_prog = makeFlushCombProgram(proto);
+ proto.flushCombProgramOffset = writeProgram(bc, move(flushComb_prog));
+
+ auto lastFlushComb_prog = makeLastFlushCombProgram(proto);
+ proto.lastFlushCombProgramOffset =
+ writeProgram(bc, move(lastFlushComb_prog));
+
// Build anchored matcher.
auto atable = buildAnchoredMatcher(*this, fragments, anchored_dfas);
if (atable) {
diff --git a/contrib/libs/hyperscan/src/rose/rose_build_dedupe.cpp b/contrib/libs/hyperscan/src/rose/rose_build_dedupe.cpp
index 9f4ebce338..d5d002d43b 100644
--- a/contrib/libs/hyperscan/src/rose/rose_build_dedupe.cpp
+++ b/contrib/libs/hyperscan/src/rose/rose_build_dedupe.cpp
@@ -29,7 +29,7 @@
#include "rose_build_impl.h"
#include "nfa/castlecompile.h"
#include "nfagraph/ng_repeat.h"
-#include "smallwrite/smallwrite_build.h"
+#include "smallwrite/smallwrite_build.h"
#include "util/compile_context.h"
#include "util/boundary_reports.h"
#include "util/make_unique.h"
@@ -160,10 +160,10 @@ RoseDedupeAuxImpl::RoseDedupeAuxImpl(const RoseBuildImpl &build_in)
}
}
- for (const auto &report_id : build.smwr.all_reports()) {
- live_reports.insert(report_id);
- }
-
+ for (const auto &report_id : build.smwr.all_reports()) {
+ live_reports.insert(report_id);
+ }
+
// Collect live reports from boundary reports.
insert(&live_reports, build.boundary.report_at_0);
insert(&live_reports, build.boundary.report_at_0_eod);
diff --git a/contrib/libs/hyperscan/src/rose/rose_build_groups.cpp b/contrib/libs/hyperscan/src/rose/rose_build_groups.cpp
index 5320be3188..209889e558 100644
--- a/contrib/libs/hyperscan/src/rose/rose_build_groups.cpp
+++ b/contrib/libs/hyperscan/src/rose/rose_build_groups.cpp
@@ -96,7 +96,7 @@ bool eligibleForAlwaysOnGroup(const RoseBuildImpl &build, u32 id) {
static
bool requires_group_assignment(const rose_literal_id &lit,
const rose_literal_info &info) {
- if (lit.delay) { /* we will check the shadow's leader */
+ if (lit.delay) { /* we will check the shadow's leader */
return false;
}
diff --git a/contrib/libs/hyperscan/src/rose/rose_build_impl.h b/contrib/libs/hyperscan/src/rose/rose_build_impl.h
index 24f44ea3cb..7780848b1b 100644
--- a/contrib/libs/hyperscan/src/rose/rose_build_impl.h
+++ b/contrib/libs/hyperscan/src/rose/rose_build_impl.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2019, Intel Corporation
+ * Copyright (c) 2015-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/contrib/libs/hyperscan/src/rose/rose_build_instructions.cpp b/contrib/libs/hyperscan/src/rose/rose_build_instructions.cpp
index 42ede9fc0d..f96221b247 100644
--- a/contrib/libs/hyperscan/src/rose/rose_build_instructions.cpp
+++ b/contrib/libs/hyperscan/src/rose/rose_build_instructions.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, Intel Corporation
+ * Copyright (c) 2017-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -47,8 +47,8 @@ RoseInstrSuffixesEod::~RoseInstrSuffixesEod() = default;
RoseInstrMatcherEod::~RoseInstrMatcherEod() = default;
RoseInstrEnd::~RoseInstrEnd() = default;
RoseInstrClearWorkDone::~RoseInstrClearWorkDone() = default;
-RoseInstrFlushCombination::~RoseInstrFlushCombination() = default;
-RoseInstrLastFlushCombination::~RoseInstrLastFlushCombination() = default;
+RoseInstrFlushCombination::~RoseInstrFlushCombination() = default;
+RoseInstrLastFlushCombination::~RoseInstrLastFlushCombination() = default;
using OffsetMap = RoseInstruction::OffsetMap;
@@ -162,17 +162,17 @@ void RoseInstrCheckMask32::write(void *dest, RoseEngineBlob &blob,
inst->fail_jump = calc_jump(offset_map, this, target);
}
-void RoseInstrCheckMask64::write(void *dest, RoseEngineBlob &blob,
- const OffsetMap &offset_map) const {
- RoseInstrBase::write(dest, blob, offset_map);
- auto *inst = static_cast<impl_type *>(dest);
- copy(begin(and_mask), end(and_mask), inst->and_mask);
- copy(begin(cmp_mask), end(cmp_mask), inst->cmp_mask);
- inst->neg_mask = neg_mask;
- inst->offset = offset;
- inst->fail_jump = calc_jump(offset_map, this, target);
-}
-
+void RoseInstrCheckMask64::write(void *dest, RoseEngineBlob &blob,
+ const OffsetMap &offset_map) const {
+ RoseInstrBase::write(dest, blob, offset_map);
+ auto *inst = static_cast<impl_type *>(dest);
+ copy(begin(and_mask), end(and_mask), inst->and_mask);
+ copy(begin(cmp_mask), end(cmp_mask), inst->cmp_mask);
+ inst->neg_mask = neg_mask;
+ inst->offset = offset;
+ inst->fail_jump = calc_jump(offset_map, this, target);
+}
+
void RoseInstrCheckByte::write(void *dest, RoseEngineBlob &blob,
const OffsetMap &offset_map) const {
RoseInstrBase::write(dest, blob, offset_map);
@@ -238,36 +238,36 @@ void RoseInstrCheckShufti32x16::write(void *dest, RoseEngineBlob &blob,
inst->fail_jump = calc_jump(offset_map, this, target);
}
-void RoseInstrCheckShufti64x8::write(void *dest, RoseEngineBlob &blob,
- const OffsetMap &offset_map) const {
- RoseInstrBase::write(dest, blob, offset_map);
- auto *inst = static_cast<impl_type *>(dest);
- copy(begin(hi_mask), end(hi_mask), inst->hi_mask);
- copy(begin(lo_mask), end(lo_mask), inst->lo_mask);
- copy(begin(bucket_select_mask), end(bucket_select_mask),
- inst->bucket_select_mask);
- inst->neg_mask = neg_mask;
- inst->offset = offset;
- inst->fail_jump = calc_jump(offset_map, this, target);
-}
-
-void RoseInstrCheckShufti64x16::write(void *dest, RoseEngineBlob &blob,
- const OffsetMap &offset_map) const {
- RoseInstrBase::write(dest, blob, offset_map);
- auto *inst = static_cast<impl_type *>(dest);
- copy(begin(hi_mask_1), end(hi_mask_1), inst->hi_mask_1);
- copy(begin(hi_mask_2), end(hi_mask_2), inst->hi_mask_2);
- copy(begin(lo_mask_1), end(lo_mask_1), inst->lo_mask_1);
- copy(begin(lo_mask_2), end(lo_mask_2), inst->lo_mask_2);
- copy(begin(bucket_select_mask_hi), end(bucket_select_mask_hi),
- inst->bucket_select_mask_hi);
- copy(begin(bucket_select_mask_lo), end(bucket_select_mask_lo),
- inst->bucket_select_mask_lo);
- inst->neg_mask = neg_mask;
- inst->offset = offset;
- inst->fail_jump = calc_jump(offset_map, this, target);
-}
-
+void RoseInstrCheckShufti64x8::write(void *dest, RoseEngineBlob &blob,
+ const OffsetMap &offset_map) const {
+ RoseInstrBase::write(dest, blob, offset_map);
+ auto *inst = static_cast<impl_type *>(dest);
+ copy(begin(hi_mask), end(hi_mask), inst->hi_mask);
+ copy(begin(lo_mask), end(lo_mask), inst->lo_mask);
+ copy(begin(bucket_select_mask), end(bucket_select_mask),
+ inst->bucket_select_mask);
+ inst->neg_mask = neg_mask;
+ inst->offset = offset;
+ inst->fail_jump = calc_jump(offset_map, this, target);
+}
+
+void RoseInstrCheckShufti64x16::write(void *dest, RoseEngineBlob &blob,
+ const OffsetMap &offset_map) const {
+ RoseInstrBase::write(dest, blob, offset_map);
+ auto *inst = static_cast<impl_type *>(dest);
+ copy(begin(hi_mask_1), end(hi_mask_1), inst->hi_mask_1);
+ copy(begin(hi_mask_2), end(hi_mask_2), inst->hi_mask_2);
+ copy(begin(lo_mask_1), end(lo_mask_1), inst->lo_mask_1);
+ copy(begin(lo_mask_2), end(lo_mask_2), inst->lo_mask_2);
+ copy(begin(bucket_select_mask_hi), end(bucket_select_mask_hi),
+ inst->bucket_select_mask_hi);
+ copy(begin(bucket_select_mask_lo), end(bucket_select_mask_lo),
+ inst->bucket_select_mask_lo);
+ inst->neg_mask = neg_mask;
+ inst->offset = offset;
+ inst->fail_jump = calc_jump(offset_map, this, target);
+}
+
void RoseInstrCheckInfix::write(void *dest, RoseEngineBlob &blob,
const OffsetMap &offset_map) const {
RoseInstrBase::write(dest, blob, offset_map);
@@ -687,26 +687,26 @@ void RoseInstrIncludedJump::write(void *dest, RoseEngineBlob &blob,
inst->squash = squash;
}
-void RoseInstrSetLogical::write(void *dest, RoseEngineBlob &blob,
- const OffsetMap &offset_map) const {
- RoseInstrBase::write(dest, blob, offset_map);
- auto *inst = static_cast<impl_type *>(dest);
- inst->lkey = lkey;
- inst->offset_adjust = offset_adjust;
-}
-
-void RoseInstrSetCombination::write(void *dest, RoseEngineBlob &blob,
- const OffsetMap &offset_map) const {
- RoseInstrBase::write(dest, blob, offset_map);
- auto *inst = static_cast<impl_type *>(dest);
- inst->ckey = ckey;
-}
-
-void RoseInstrSetExhaust::write(void *dest, RoseEngineBlob &blob,
- const OffsetMap &offset_map) const {
- RoseInstrBase::write(dest, blob, offset_map);
- auto *inst = static_cast<impl_type *>(dest);
- inst->ekey = ekey;
-}
-
-}
+void RoseInstrSetLogical::write(void *dest, RoseEngineBlob &blob,
+ const OffsetMap &offset_map) const {
+ RoseInstrBase::write(dest, blob, offset_map);
+ auto *inst = static_cast<impl_type *>(dest);
+ inst->lkey = lkey;
+ inst->offset_adjust = offset_adjust;
+}
+
+void RoseInstrSetCombination::write(void *dest, RoseEngineBlob &blob,
+ const OffsetMap &offset_map) const {
+ RoseInstrBase::write(dest, blob, offset_map);
+ auto *inst = static_cast<impl_type *>(dest);
+ inst->ckey = ckey;
+}
+
+void RoseInstrSetExhaust::write(void *dest, RoseEngineBlob &blob,
+ const OffsetMap &offset_map) const {
+ RoseInstrBase::write(dest, blob, offset_map);
+ auto *inst = static_cast<impl_type *>(dest);
+ inst->ekey = ekey;
+}
+
+}
diff --git a/contrib/libs/hyperscan/src/rose/rose_build_instructions.h b/contrib/libs/hyperscan/src/rose/rose_build_instructions.h
index 9cddc014a8..f18f4a4715 100644
--- a/contrib/libs/hyperscan/src/rose/rose_build_instructions.h
+++ b/contrib/libs/hyperscan/src/rose/rose_build_instructions.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, Intel Corporation
+ * Copyright (c) 2017-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -519,43 +519,43 @@ public:
}
};
-class RoseInstrCheckMask64
- : public RoseInstrBaseOneTarget<ROSE_INSTR_CHECK_MASK_64,
- ROSE_STRUCT_CHECK_MASK_64,
- RoseInstrCheckMask64> {
-public:
- std::array<u8, 64> and_mask;
- std::array<u8, 64> cmp_mask;
- u64a neg_mask;
- s32 offset;
- const RoseInstruction *target;
-
- RoseInstrCheckMask64(std::array<u8, 64> and_mask_in,
- std::array<u8, 64> cmp_mask_in, u64a neg_mask_in,
- s32 offset_in, const RoseInstruction *target_in)
- : and_mask(std::move(and_mask_in)), cmp_mask(std::move(cmp_mask_in)),
- neg_mask(neg_mask_in), offset(offset_in), target(target_in) {}
- bool operator==(const RoseInstrCheckMask64 &ri) const {
- return and_mask == ri.and_mask && cmp_mask == ri.cmp_mask &&
- neg_mask == ri.neg_mask && offset == ri.offset &&
- target == ri.target;
- }
-
- size_t hash() const override {
- return hash_all(opcode, and_mask, cmp_mask, neg_mask, offset);
- }
-
- void write(void *dest, RoseEngineBlob &blob,
- const OffsetMap &offset_map) const override;
-
- bool equiv_to(const RoseInstrCheckMask64 &ri, const OffsetMap &offsets,
- const OffsetMap &other_offsets) const {
- return and_mask == ri.and_mask && cmp_mask == ri.cmp_mask &&
- neg_mask == ri.neg_mask && offset == ri.offset &&
- offsets.at(target) == other_offsets.at(ri.target);
- }
-};
-
+class RoseInstrCheckMask64
+ : public RoseInstrBaseOneTarget<ROSE_INSTR_CHECK_MASK_64,
+ ROSE_STRUCT_CHECK_MASK_64,
+ RoseInstrCheckMask64> {
+public:
+ std::array<u8, 64> and_mask;
+ std::array<u8, 64> cmp_mask;
+ u64a neg_mask;
+ s32 offset;
+ const RoseInstruction *target;
+
+ RoseInstrCheckMask64(std::array<u8, 64> and_mask_in,
+ std::array<u8, 64> cmp_mask_in, u64a neg_mask_in,
+ s32 offset_in, const RoseInstruction *target_in)
+ : and_mask(std::move(and_mask_in)), cmp_mask(std::move(cmp_mask_in)),
+ neg_mask(neg_mask_in), offset(offset_in), target(target_in) {}
+ bool operator==(const RoseInstrCheckMask64 &ri) const {
+ return and_mask == ri.and_mask && cmp_mask == ri.cmp_mask &&
+ neg_mask == ri.neg_mask && offset == ri.offset &&
+ target == ri.target;
+ }
+
+ size_t hash() const override {
+ return hash_all(opcode, and_mask, cmp_mask, neg_mask, offset);
+ }
+
+ void write(void *dest, RoseEngineBlob &blob,
+ const OffsetMap &offset_map) const override;
+
+ bool equiv_to(const RoseInstrCheckMask64 &ri, const OffsetMap &offsets,
+ const OffsetMap &other_offsets) const {
+ return and_mask == ri.and_mask && cmp_mask == ri.cmp_mask &&
+ neg_mask == ri.neg_mask && offset == ri.offset &&
+ offsets.at(target) == other_offsets.at(ri.target);
+ }
+};
+
class RoseInstrCheckByte
: public RoseInstrBaseOneTarget<ROSE_INSTR_CHECK_BYTE,
ROSE_STRUCT_CHECK_BYTE,
@@ -775,109 +775,109 @@ public:
}
};
-class RoseInstrCheckShufti64x8
- : public RoseInstrBaseOneTarget<ROSE_INSTR_CHECK_SHUFTI_64x8,
- ROSE_STRUCT_CHECK_SHUFTI_64x8,
- RoseInstrCheckShufti64x8> {
-public:
- std::array<u8, 64> hi_mask;
- std::array<u8, 64> lo_mask;
- std::array<u8, 64> bucket_select_mask;
- u64a neg_mask;
- s32 offset;
- const RoseInstruction *target;
-
- RoseInstrCheckShufti64x8(std::array<u8, 64> hi_mask_in,
- std::array<u8, 64> lo_mask_in,
- std::array<u8, 64> bucket_select_mask_in,
- u64a neg_mask_in, s32 offset_in,
- const RoseInstruction *target_in)
- : hi_mask(std::move(hi_mask_in)), lo_mask(std::move(lo_mask_in)),
- bucket_select_mask(std::move(bucket_select_mask_in)),
- neg_mask(neg_mask_in), offset(offset_in), target(target_in) {}
-
- bool operator==(const RoseInstrCheckShufti64x8 &ri) const {
- return hi_mask == ri.hi_mask && lo_mask == ri.lo_mask &&
- bucket_select_mask == ri.bucket_select_mask &&
- neg_mask == ri.neg_mask && offset == ri.offset &&
- target == ri.target;
- }
-
- size_t hash() const override {
- return hash_all(opcode, hi_mask, lo_mask, bucket_select_mask, neg_mask,
- offset);
- }
-
- void write(void *dest, RoseEngineBlob &blob,
- const OffsetMap &offset_map) const override;
-
- bool equiv_to(const RoseInstrCheckShufti64x8 &ri, const OffsetMap &offsets,
- const OffsetMap &other_offsets) const {
- return hi_mask == ri.hi_mask && lo_mask == ri.lo_mask &&
- bucket_select_mask == ri.bucket_select_mask &&
- neg_mask == ri.neg_mask && offset == ri.offset &&
- offsets.at(target) == other_offsets.at(ri.target);
- }
-};
-
-class RoseInstrCheckShufti64x16
- : public RoseInstrBaseOneTarget<ROSE_INSTR_CHECK_SHUFTI_64x16,
- ROSE_STRUCT_CHECK_SHUFTI_64x16,
- RoseInstrCheckShufti64x16> {
-public:
- std::array<u8, 64> hi_mask_1;
- std::array<u8, 64> hi_mask_2;
- std::array<u8, 64> lo_mask_1;
- std::array<u8, 64> lo_mask_2;
- std::array<u8, 64> bucket_select_mask_hi;
- std::array<u8, 64> bucket_select_mask_lo;
- u64a neg_mask;
- s32 offset;
- const RoseInstruction *target;
-
- RoseInstrCheckShufti64x16(std::array<u8, 64> hi_mask_1_in,
- std::array<u8, 64> hi_mask_2_in,
- std::array<u8, 64> lo_mask_1_in,
- std::array<u8, 64> lo_mask_2_in,
- std::array<u8, 64> bucket_select_mask_hi_in,
- std::array<u8, 64> bucket_select_mask_lo_in,
- u64a neg_mask_in, s32 offset_in,
- const RoseInstruction *target_in)
- : hi_mask_1(std::move(hi_mask_1_in)), hi_mask_2(std::move(hi_mask_2_in)),
- lo_mask_1(std::move(lo_mask_1_in)), lo_mask_2(std::move(lo_mask_2_in)),
- bucket_select_mask_hi(std::move(bucket_select_mask_hi_in)),
- bucket_select_mask_lo(std::move(bucket_select_mask_lo_in)),
- neg_mask(neg_mask_in), offset(offset_in), target(target_in) {}
-
- bool operator==(const RoseInstrCheckShufti64x16 &ri) const {
- return hi_mask_1 == ri.hi_mask_1 && hi_mask_2 == ri.hi_mask_2 &&
- lo_mask_1 == ri.lo_mask_1 && lo_mask_2 == ri.lo_mask_2 &&
- bucket_select_mask_hi == ri.bucket_select_mask_hi &&
- bucket_select_mask_lo == ri.bucket_select_mask_lo &&
- neg_mask == ri.neg_mask && offset == ri.offset &&
- target == ri.target;
- }
-
- size_t hash() const override {
- return hash_all(opcode, hi_mask_1, hi_mask_2, lo_mask_1, lo_mask_2,
- bucket_select_mask_hi, bucket_select_mask_lo, neg_mask,
- offset);
- }
-
- void write(void *dest, RoseEngineBlob &blob,
- const OffsetMap &offset_map) const override;
-
- bool equiv_to(const RoseInstrCheckShufti64x16 &ri, const OffsetMap &offsets,
- const OffsetMap &other_offsets) const {
- return hi_mask_1 == ri.hi_mask_1 && hi_mask_2 == ri.hi_mask_2 &&
- lo_mask_1 == ri.lo_mask_1 && lo_mask_2 == ri.lo_mask_2 &&
- bucket_select_mask_hi == ri.bucket_select_mask_hi &&
- bucket_select_mask_lo == ri.bucket_select_mask_lo &&
- neg_mask == ri.neg_mask && offset == ri.offset &&
- offsets.at(target) == other_offsets.at(ri.target);
- }
-};
-
+class RoseInstrCheckShufti64x8
+ : public RoseInstrBaseOneTarget<ROSE_INSTR_CHECK_SHUFTI_64x8,
+ ROSE_STRUCT_CHECK_SHUFTI_64x8,
+ RoseInstrCheckShufti64x8> {
+public:
+ std::array<u8, 64> hi_mask;
+ std::array<u8, 64> lo_mask;
+ std::array<u8, 64> bucket_select_mask;
+ u64a neg_mask;
+ s32 offset;
+ const RoseInstruction *target;
+
+ RoseInstrCheckShufti64x8(std::array<u8, 64> hi_mask_in,
+ std::array<u8, 64> lo_mask_in,
+ std::array<u8, 64> bucket_select_mask_in,
+ u64a neg_mask_in, s32 offset_in,
+ const RoseInstruction *target_in)
+ : hi_mask(std::move(hi_mask_in)), lo_mask(std::move(lo_mask_in)),
+ bucket_select_mask(std::move(bucket_select_mask_in)),
+ neg_mask(neg_mask_in), offset(offset_in), target(target_in) {}
+
+ bool operator==(const RoseInstrCheckShufti64x8 &ri) const {
+ return hi_mask == ri.hi_mask && lo_mask == ri.lo_mask &&
+ bucket_select_mask == ri.bucket_select_mask &&
+ neg_mask == ri.neg_mask && offset == ri.offset &&
+ target == ri.target;
+ }
+
+ size_t hash() const override {
+ return hash_all(opcode, hi_mask, lo_mask, bucket_select_mask, neg_mask,
+ offset);
+ }
+
+ void write(void *dest, RoseEngineBlob &blob,
+ const OffsetMap &offset_map) const override;
+
+ bool equiv_to(const RoseInstrCheckShufti64x8 &ri, const OffsetMap &offsets,
+ const OffsetMap &other_offsets) const {
+ return hi_mask == ri.hi_mask && lo_mask == ri.lo_mask &&
+ bucket_select_mask == ri.bucket_select_mask &&
+ neg_mask == ri.neg_mask && offset == ri.offset &&
+ offsets.at(target) == other_offsets.at(ri.target);
+ }
+};
+
+class RoseInstrCheckShufti64x16
+ : public RoseInstrBaseOneTarget<ROSE_INSTR_CHECK_SHUFTI_64x16,
+ ROSE_STRUCT_CHECK_SHUFTI_64x16,
+ RoseInstrCheckShufti64x16> {
+public:
+ std::array<u8, 64> hi_mask_1;
+ std::array<u8, 64> hi_mask_2;
+ std::array<u8, 64> lo_mask_1;
+ std::array<u8, 64> lo_mask_2;
+ std::array<u8, 64> bucket_select_mask_hi;
+ std::array<u8, 64> bucket_select_mask_lo;
+ u64a neg_mask;
+ s32 offset;
+ const RoseInstruction *target;
+
+ RoseInstrCheckShufti64x16(std::array<u8, 64> hi_mask_1_in,
+ std::array<u8, 64> hi_mask_2_in,
+ std::array<u8, 64> lo_mask_1_in,
+ std::array<u8, 64> lo_mask_2_in,
+ std::array<u8, 64> bucket_select_mask_hi_in,
+ std::array<u8, 64> bucket_select_mask_lo_in,
+ u64a neg_mask_in, s32 offset_in,
+ const RoseInstruction *target_in)
+ : hi_mask_1(std::move(hi_mask_1_in)), hi_mask_2(std::move(hi_mask_2_in)),
+ lo_mask_1(std::move(lo_mask_1_in)), lo_mask_2(std::move(lo_mask_2_in)),
+ bucket_select_mask_hi(std::move(bucket_select_mask_hi_in)),
+ bucket_select_mask_lo(std::move(bucket_select_mask_lo_in)),
+ neg_mask(neg_mask_in), offset(offset_in), target(target_in) {}
+
+ bool operator==(const RoseInstrCheckShufti64x16 &ri) const {
+ return hi_mask_1 == ri.hi_mask_1 && hi_mask_2 == ri.hi_mask_2 &&
+ lo_mask_1 == ri.lo_mask_1 && lo_mask_2 == ri.lo_mask_2 &&
+ bucket_select_mask_hi == ri.bucket_select_mask_hi &&
+ bucket_select_mask_lo == ri.bucket_select_mask_lo &&
+ neg_mask == ri.neg_mask && offset == ri.offset &&
+ target == ri.target;
+ }
+
+ size_t hash() const override {
+ return hash_all(opcode, hi_mask_1, hi_mask_2, lo_mask_1, lo_mask_2,
+ bucket_select_mask_hi, bucket_select_mask_lo, neg_mask,
+ offset);
+ }
+
+ void write(void *dest, RoseEngineBlob &blob,
+ const OffsetMap &offset_map) const override;
+
+ bool equiv_to(const RoseInstrCheckShufti64x16 &ri, const OffsetMap &offsets,
+ const OffsetMap &other_offsets) const {
+ return hi_mask_1 == ri.hi_mask_1 && hi_mask_2 == ri.hi_mask_2 &&
+ lo_mask_1 == ri.lo_mask_1 && lo_mask_2 == ri.lo_mask_2 &&
+ bucket_select_mask_hi == ri.bucket_select_mask_hi &&
+ bucket_select_mask_lo == ri.bucket_select_mask_lo &&
+ neg_mask == ri.neg_mask && offset == ri.offset &&
+ offsets.at(target) == other_offsets.at(ri.target);
+ }
+};
+
class RoseInstrCheckInfix
: public RoseInstrBaseOneTarget<ROSE_INSTR_CHECK_INFIX,
ROSE_STRUCT_CHECK_INFIX,
@@ -2284,102 +2284,102 @@ public:
}
};
-class RoseInstrSetLogical
- : public RoseInstrBaseNoTargets<ROSE_INSTR_SET_LOGICAL,
- ROSE_STRUCT_SET_LOGICAL,
- RoseInstrSetLogical> {
-public:
- u32 lkey;
- s32 offset_adjust;
-
- RoseInstrSetLogical(u32 lkey_in, s32 offset_adjust_in)
- : lkey(lkey_in), offset_adjust(offset_adjust_in) {}
-
- bool operator==(const RoseInstrSetLogical &ri) const {
- return lkey == ri.lkey && offset_adjust == ri.offset_adjust;
- }
-
- size_t hash() const override {
- return hash_all(opcode, lkey, offset_adjust);
- }
-
- void write(void *dest, RoseEngineBlob &blob,
- const OffsetMap &offset_map) const override;
-
- bool equiv_to(const RoseInstrSetLogical &ri, const OffsetMap &,
- const OffsetMap &) const {
- return lkey == ri.lkey && offset_adjust == ri.offset_adjust;
- }
-};
-
-class RoseInstrSetCombination
- : public RoseInstrBaseNoTargets<ROSE_INSTR_SET_COMBINATION,
- ROSE_STRUCT_SET_COMBINATION,
- RoseInstrSetCombination> {
-public:
- u32 ckey;
-
- RoseInstrSetCombination(u32 ckey_in) : ckey(ckey_in) {}
-
- bool operator==(const RoseInstrSetCombination &ri) const {
- return ckey == ri.ckey;
- }
-
- size_t hash() const override {
- return hash_all(opcode, ckey);
- }
-
- void write(void *dest, RoseEngineBlob &blob,
- const OffsetMap &offset_map) const override;
-
- bool equiv_to(const RoseInstrSetCombination &ri, const OffsetMap &,
- const OffsetMap &) const {
- return ckey == ri.ckey;
- }
-};
-
-class RoseInstrFlushCombination
- : public RoseInstrBaseTrivial<ROSE_INSTR_FLUSH_COMBINATION,
- ROSE_STRUCT_FLUSH_COMBINATION,
- RoseInstrFlushCombination> {
-public:
- ~RoseInstrFlushCombination() override;
-};
-
-class RoseInstrLastFlushCombination
- : public RoseInstrBaseTrivial<ROSE_INSTR_LAST_FLUSH_COMBINATION,
- ROSE_STRUCT_LAST_FLUSH_COMBINATION,
- RoseInstrLastFlushCombination> {
-public:
- ~RoseInstrLastFlushCombination() override;
-};
-
-class RoseInstrSetExhaust
- : public RoseInstrBaseNoTargets<ROSE_INSTR_SET_EXHAUST,
- ROSE_STRUCT_SET_EXHAUST,
- RoseInstrSetExhaust> {
-public:
- u32 ekey;
-
- RoseInstrSetExhaust(u32 ekey_in) : ekey(ekey_in) {}
-
- bool operator==(const RoseInstrSetExhaust &ri) const {
- return ekey == ri.ekey;
- }
-
- size_t hash() const override {
- return hash_all(opcode, ekey);
- }
-
- void write(void *dest, RoseEngineBlob &blob,
- const OffsetMap &offset_map) const override;
-
- bool equiv_to(const RoseInstrSetExhaust &ri, const OffsetMap &,
- const OffsetMap &) const {
- return ekey == ri.ekey;
- }
-};
-
+class RoseInstrSetLogical
+ : public RoseInstrBaseNoTargets<ROSE_INSTR_SET_LOGICAL,
+ ROSE_STRUCT_SET_LOGICAL,
+ RoseInstrSetLogical> {
+public:
+ u32 lkey;
+ s32 offset_adjust;
+
+ RoseInstrSetLogical(u32 lkey_in, s32 offset_adjust_in)
+ : lkey(lkey_in), offset_adjust(offset_adjust_in) {}
+
+ bool operator==(const RoseInstrSetLogical &ri) const {
+ return lkey == ri.lkey && offset_adjust == ri.offset_adjust;
+ }
+
+ size_t hash() const override {
+ return hash_all(opcode, lkey, offset_adjust);
+ }
+
+ void write(void *dest, RoseEngineBlob &blob,
+ const OffsetMap &offset_map) const override;
+
+ bool equiv_to(const RoseInstrSetLogical &ri, const OffsetMap &,
+ const OffsetMap &) const {
+ return lkey == ri.lkey && offset_adjust == ri.offset_adjust;
+ }
+};
+
+class RoseInstrSetCombination
+ : public RoseInstrBaseNoTargets<ROSE_INSTR_SET_COMBINATION,
+ ROSE_STRUCT_SET_COMBINATION,
+ RoseInstrSetCombination> {
+public:
+ u32 ckey;
+
+ RoseInstrSetCombination(u32 ckey_in) : ckey(ckey_in) {}
+
+ bool operator==(const RoseInstrSetCombination &ri) const {
+ return ckey == ri.ckey;
+ }
+
+ size_t hash() const override {
+ return hash_all(opcode, ckey);
+ }
+
+ void write(void *dest, RoseEngineBlob &blob,
+ const OffsetMap &offset_map) const override;
+
+ bool equiv_to(const RoseInstrSetCombination &ri, const OffsetMap &,
+ const OffsetMap &) const {
+ return ckey == ri.ckey;
+ }
+};
+
+class RoseInstrFlushCombination
+ : public RoseInstrBaseTrivial<ROSE_INSTR_FLUSH_COMBINATION,
+ ROSE_STRUCT_FLUSH_COMBINATION,
+ RoseInstrFlushCombination> {
+public:
+ ~RoseInstrFlushCombination() override;
+};
+
+class RoseInstrLastFlushCombination
+ : public RoseInstrBaseTrivial<ROSE_INSTR_LAST_FLUSH_COMBINATION,
+ ROSE_STRUCT_LAST_FLUSH_COMBINATION,
+ RoseInstrLastFlushCombination> {
+public:
+ ~RoseInstrLastFlushCombination() override;
+};
+
+class RoseInstrSetExhaust
+ : public RoseInstrBaseNoTargets<ROSE_INSTR_SET_EXHAUST,
+ ROSE_STRUCT_SET_EXHAUST,
+ RoseInstrSetExhaust> {
+public:
+ u32 ekey;
+
+ RoseInstrSetExhaust(u32 ekey_in) : ekey(ekey_in) {}
+
+ bool operator==(const RoseInstrSetExhaust &ri) const {
+ return ekey == ri.ekey;
+ }
+
+ size_t hash() const override {
+ return hash_all(opcode, ekey);
+ }
+
+ void write(void *dest, RoseEngineBlob &blob,
+ const OffsetMap &offset_map) const override;
+
+ bool equiv_to(const RoseInstrSetExhaust &ri, const OffsetMap &,
+ const OffsetMap &) const {
+ return ekey == ri.ekey;
+ }
+};
+
class RoseInstrEnd
: public RoseInstrBaseTrivial<ROSE_INSTR_END, ROSE_STRUCT_END,
RoseInstrEnd> {
diff --git a/contrib/libs/hyperscan/src/rose/rose_build_lookaround.cpp b/contrib/libs/hyperscan/src/rose/rose_build_lookaround.cpp
index f19ad07a86..d0540d79b0 100644
--- a/contrib/libs/hyperscan/src/rose/rose_build_lookaround.cpp
+++ b/contrib/libs/hyperscan/src/rose/rose_build_lookaround.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -58,7 +58,7 @@ static const u32 MAX_FWD_LEN = 64;
static const u32 MAX_BACK_LEN = 64;
/** \brief Max lookaround entries for a role. */
-static const u32 MAX_LOOKAROUND_ENTRIES = 32;
+static const u32 MAX_LOOKAROUND_ENTRIES = 32;
/** \brief We would rather have lookarounds with smaller reach than this. */
static const u32 LOOKAROUND_WIDE_REACH = 200;
diff --git a/contrib/libs/hyperscan/src/rose/rose_build_matchers.cpp b/contrib/libs/hyperscan/src/rose/rose_build_matchers.cpp
index 53a0e62f1b..4fde4c4418 100644
--- a/contrib/libs/hyperscan/src/rose/rose_build_matchers.cpp
+++ b/contrib/libs/hyperscan/src/rose/rose_build_matchers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019, Intel Corporation
+ * Copyright (c) 2016-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/contrib/libs/hyperscan/src/rose/rose_build_merge.cpp b/contrib/libs/hyperscan/src/rose/rose_build_merge.cpp
index 0fc032e477..5066dbd578 100644
--- a/contrib/libs/hyperscan/src/rose/rose_build_merge.cpp
+++ b/contrib/libs/hyperscan/src/rose/rose_build_merge.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -459,7 +459,7 @@ public:
const_iterator end() const { return ordering.end(); }
};
-typedef Bouquet<left_id> LeftfixBouquet;
+typedef Bouquet<left_id> LeftfixBouquet;
typedef Bouquet<suffix_id> SuffixBouquet;
} // namespace
@@ -565,7 +565,7 @@ bool hasSameEngineType(const RoseVertexProps &u_prop,
*
* Parameters are vectors of literals + lag pairs.
*
- * Note: if more constraints of when the leftfixes were going to be checked
+ * Note: if more constraints of when the leftfixes were going to be checked
* (mandatory lookarounds passing, offset checks), more merges may be allowed.
*/
static
@@ -599,7 +599,7 @@ bool compatibleLiteralsForMerge(
/* An engine requires that all accesses to it are ordered by offsets. (ie,
we can not check an engine's state at offset Y, if we have already
checked its status at offset X and X > Y). If we can not establish that
- the literals used for triggering will satisfy this property, then it is
+ the literals used for triggering will satisfy this property, then it is
not safe to merge the engine. */
for (const auto &ue : ulits) {
const rose_literal_id &ul = *ue.first;
@@ -1437,19 +1437,19 @@ void mergeLeftfixesVariableLag(RoseBuildImpl &build) {
assert(!parents.empty());
-#ifndef _WIN32
+#ifndef _WIN32
engine_groups[MergeKey(left, parents)].push_back(left);
-#else
- // On windows, when passing MergeKey object into map 'engine_groups',
- // it will not be copied, but will be freed along with
- // engine_groups.clear().
- // If we construct MergeKey object on the stack, it will be destructed
- // on its life cycle ending, then on engine_groups.clear(), which
- // will cause is_block_type_valid() assertion error in MergeKey
- // destructor.
- MergeKey *mk = new MergeKey(left, parents);
- engine_groups[*mk].push_back(left);
-#endif
+#else
+ // On windows, when passing MergeKey object into map 'engine_groups',
+ // it will not be copied, but will be freed along with
+ // engine_groups.clear().
+ // If we construct MergeKey object on the stack, it will be destructed
+ // on its life cycle ending, then on engine_groups.clear(), which
+ // will cause is_block_type_valid() assertion error in MergeKey
+ // destructor.
+ MergeKey *mk = new MergeKey(left, parents);
+ engine_groups[*mk].push_back(left);
+#endif
}
vector<vector<left_id>> chunks;
@@ -1790,7 +1790,7 @@ u32 estimatedAccelStates(const RoseBuildImpl &tbi, const NGHolder &h) {
}
static
-void mergeNfaLeftfixes(RoseBuildImpl &tbi, LeftfixBouquet &roses) {
+void mergeNfaLeftfixes(RoseBuildImpl &tbi, LeftfixBouquet &roses) {
RoseGraph &g = tbi.g;
DEBUG_PRINTF("%zu nfa rose merge candidates\n", roses.size());
@@ -1906,7 +1906,7 @@ void mergeSmallLeftfixes(RoseBuildImpl &tbi) {
RoseGraph &g = tbi.g;
- LeftfixBouquet nfa_leftfixes;
+ LeftfixBouquet nfa_leftfixes;
for (auto v : vertices_range(g)) {
if (!g[v].left) {
@@ -1951,16 +1951,16 @@ void mergeSmallLeftfixes(RoseBuildImpl &tbi) {
continue;
}
- nfa_leftfixes.insert(left, v);
+ nfa_leftfixes.insert(left, v);
}
- deque<LeftfixBouquet> leftfix_groups;
- chunkBouquets(nfa_leftfixes, leftfix_groups, MERGE_GROUP_SIZE_MAX);
- nfa_leftfixes.clear();
- DEBUG_PRINTF("chunked nfa leftfixes into %zu groups\n",
- leftfix_groups.size());
+ deque<LeftfixBouquet> leftfix_groups;
+ chunkBouquets(nfa_leftfixes, leftfix_groups, MERGE_GROUP_SIZE_MAX);
+ nfa_leftfixes.clear();
+ DEBUG_PRINTF("chunked nfa leftfixes into %zu groups\n",
+ leftfix_groups.size());
- for (auto &group : leftfix_groups) {
+ for (auto &group : leftfix_groups) {
mergeNfaLeftfixes(tbi, group);
}
}
diff --git a/contrib/libs/hyperscan/src/rose/rose_build_misc.cpp b/contrib/libs/hyperscan/src/rose/rose_build_misc.cpp
index 9b619cedf5..0b0e689c99 100644
--- a/contrib/libs/hyperscan/src/rose/rose_build_misc.cpp
+++ b/contrib/libs/hyperscan/src/rose/rose_build_misc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -993,19 +993,19 @@ bool canImplementGraphs(const RoseBuildImpl &tbi) {
return true;
}
-/**
- * \brief True if there is an engine with a top that is not triggered by a
- * vertex in the Rose graph. This is a consistency check used in assertions.
- */
+/**
+ * \brief True if there is an engine with a top that is not triggered by a
+ * vertex in the Rose graph. This is a consistency check used in assertions.
+ */
bool hasOrphanedTops(const RoseBuildImpl &build) {
const RoseGraph &g = build.g;
- unordered_map<left_id, set<u32>> leftfixes;
+ unordered_map<left_id, set<u32>> leftfixes;
unordered_map<suffix_id, set<u32>> suffixes;
for (auto v : vertices_range(g)) {
if (g[v].left) {
- set<u32> &tops = leftfixes[g[v].left];
+ set<u32> &tops = leftfixes[g[v].left];
if (!build.isRootSuccessor(v)) {
// Tops for infixes come from the in-edges.
for (const auto &e : in_edges_range(v, g)) {
@@ -1018,7 +1018,7 @@ bool hasOrphanedTops(const RoseBuildImpl &build) {
}
}
- for (const auto &e : leftfixes) {
+ for (const auto &e : leftfixes) {
if (all_tops(e.first) != e.second) {
DEBUG_PRINTF("rose tops (%s) don't match rose graph (%s)\n",
as_string_list(all_tops(e.first)).c_str(),
diff --git a/contrib/libs/hyperscan/src/rose/rose_build_program.cpp b/contrib/libs/hyperscan/src/rose/rose_build_program.cpp
index 7672abe16f..7d1d7ecbb5 100644
--- a/contrib/libs/hyperscan/src/rose/rose_build_program.cpp
+++ b/contrib/libs/hyperscan/src/rose/rose_build_program.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Intel Corporation
+ * Copyright (c) 2016-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -95,7 +95,7 @@ OffsetMap makeOffsetMap(const RoseProgram &program, u32 *total_len) {
}
RoseProgram::RoseProgram() {
- prog.push_back(std::make_unique<RoseInstrEnd>());
+ prog.push_back(std::make_unique<RoseInstrEnd>());
}
RoseProgram::~RoseProgram() = default;
@@ -280,7 +280,7 @@ void stripCheckHandledInstruction(RoseProgram &prog) {
}
-/** Returns true if the program may read the interpreter's work_done flag */
+/** Returns true if the program may read the interpreter's work_done flag */
static
bool reads_work_done_flag(const RoseProgram &prog) {
for (const auto &ri : prog) {
@@ -297,30 +297,30 @@ void addEnginesEodProgram(u32 eodNfaIterOffset, RoseProgram &program) {
}
RoseProgram block;
- block.add_before_end(std::make_unique<RoseInstrEnginesEod>(eodNfaIterOffset));
+ block.add_before_end(std::make_unique<RoseInstrEnginesEod>(eodNfaIterOffset));
program.add_block(move(block));
}
void addSuffixesEodProgram(RoseProgram &program) {
RoseProgram block;
- block.add_before_end(std::make_unique<RoseInstrSuffixesEod>());
+ block.add_before_end(std::make_unique<RoseInstrSuffixesEod>());
program.add_block(move(block));
}
void addMatcherEodProgram(RoseProgram &program) {
RoseProgram block;
- block.add_before_end(std::make_unique<RoseInstrMatcherEod>());
+ block.add_before_end(std::make_unique<RoseInstrMatcherEod>());
program.add_block(move(block));
}
-void addFlushCombinationProgram(RoseProgram &program) {
- program.add_before_end(std::make_unique<RoseInstrFlushCombination>());
-}
-
-void addLastFlushCombinationProgram(RoseProgram &program) {
- program.add_before_end(std::make_unique<RoseInstrLastFlushCombination>());
-}
-
+void addFlushCombinationProgram(RoseProgram &program) {
+ program.add_before_end(std::make_unique<RoseInstrFlushCombination>());
+}
+
+void addLastFlushCombinationProgram(RoseProgram &program) {
+ program.add_before_end(std::make_unique<RoseInstrLastFlushCombination>());
+}
+
static
void makeRoleCheckLeftfix(const RoseBuildImpl &build,
const map<RoseVertex, left_build_info> &leftfix_info,
@@ -428,7 +428,7 @@ void makeCatchup(const ReportManager &rm, bool needs_catchup,
return;
}
- program.add_before_end(std::make_unique<RoseInstrCatchUp>());
+ program.add_before_end(std::make_unique<RoseInstrCatchUp>());
}
static
@@ -505,23 +505,23 @@ void writeSomOperation(const Report &report, som_operation *op) {
}
static
-void addLogicalSetRequired(const Report &report, ReportManager &rm,
- RoseProgram &program) {
- if (report.lkey == INVALID_LKEY) {
- return;
- }
- // set matching status of current lkey
- auto risl = std::make_unique<RoseInstrSetLogical>(report.lkey,
- report.offsetAdjust);
- program.add_before_end(move(risl));
- // set current lkey's corresponding ckeys active, pending to check
- for (auto ckey : rm.getRelateCKeys(report.lkey)) {
- auto risc = std::make_unique<RoseInstrSetCombination>(ckey);
- program.add_before_end(move(risc));
- }
-}
-
-static
+void addLogicalSetRequired(const Report &report, ReportManager &rm,
+ RoseProgram &program) {
+ if (report.lkey == INVALID_LKEY) {
+ return;
+ }
+ // set matching status of current lkey
+ auto risl = std::make_unique<RoseInstrSetLogical>(report.lkey,
+ report.offsetAdjust);
+ program.add_before_end(move(risl));
+ // set current lkey's corresponding ckeys active, pending to check
+ for (auto ckey : rm.getRelateCKeys(report.lkey)) {
+ auto risc = std::make_unique<RoseInstrSetCombination>(ckey);
+ program.add_before_end(move(risc));
+ }
+}
+
+static
void makeReport(const RoseBuildImpl &build, const ReportID id,
const bool has_som, RoseProgram &program) {
assert(id < build.rm.numReports());
@@ -562,67 +562,67 @@ void makeReport(const RoseBuildImpl &build, const ReportID id,
}
if (report.quashSom) {
- report_block.add_before_end(std::make_unique<RoseInstrSomZero>());
+ report_block.add_before_end(std::make_unique<RoseInstrSomZero>());
}
switch (report.type) {
case EXTERNAL_CALLBACK:
- if (build.rm.numCkeys()) {
- addFlushCombinationProgram(report_block);
- }
+ if (build.rm.numCkeys()) {
+ addFlushCombinationProgram(report_block);
+ }
if (!has_som) {
// Dedupe is only necessary if this report has a dkey, or if there
// are SOM reports to catch up.
bool needs_dedupe = build.rm.getDkey(report) != ~0U || build.hasSom;
if (report.ekey == INVALID_EKEY) {
if (needs_dedupe) {
- if (!report.quiet) {
- report_block.add_before_end(
- std::make_unique<RoseInstrDedupeAndReport>(
- report.quashSom, build.rm.getDkey(report),
- report.onmatch, report.offsetAdjust, end_inst));
- } else {
- makeDedupe(build.rm, report, report_block);
- }
+ if (!report.quiet) {
+ report_block.add_before_end(
+ std::make_unique<RoseInstrDedupeAndReport>(
+ report.quashSom, build.rm.getDkey(report),
+ report.onmatch, report.offsetAdjust, end_inst));
+ } else {
+ makeDedupe(build.rm, report, report_block);
+ }
} else {
- if (!report.quiet) {
- report_block.add_before_end(
- std::make_unique<RoseInstrReport>(
- report.onmatch, report.offsetAdjust));
- }
+ if (!report.quiet) {
+ report_block.add_before_end(
+ std::make_unique<RoseInstrReport>(
+ report.onmatch, report.offsetAdjust));
+ }
}
} else {
if (needs_dedupe) {
makeDedupe(build.rm, report, report_block);
}
- if (!report.quiet) {
- report_block.add_before_end(
- std::make_unique<RoseInstrReportExhaust>(
- report.onmatch, report.offsetAdjust, report.ekey));
- } else {
- report_block.add_before_end(
- std::make_unique<RoseInstrSetExhaust>(report.ekey));
- }
+ if (!report.quiet) {
+ report_block.add_before_end(
+ std::make_unique<RoseInstrReportExhaust>(
+ report.onmatch, report.offsetAdjust, report.ekey));
+ } else {
+ report_block.add_before_end(
+ std::make_unique<RoseInstrSetExhaust>(report.ekey));
+ }
}
} else { // has_som
makeDedupeSom(build.rm, report, report_block);
if (report.ekey == INVALID_EKEY) {
- if (!report.quiet) {
- report_block.add_before_end(std::make_unique<RoseInstrReportSom>(
- report.onmatch, report.offsetAdjust));
- }
+ if (!report.quiet) {
+ report_block.add_before_end(std::make_unique<RoseInstrReportSom>(
+ report.onmatch, report.offsetAdjust));
+ }
} else {
- if (!report.quiet) {
- report_block.add_before_end(
- std::make_unique<RoseInstrReportSomExhaust>(
- report.onmatch, report.offsetAdjust, report.ekey));
- } else {
- report_block.add_before_end(
- std::make_unique<RoseInstrSetExhaust>(report.ekey));
- }
+ if (!report.quiet) {
+ report_block.add_before_end(
+ std::make_unique<RoseInstrReportSomExhaust>(
+ report.onmatch, report.offsetAdjust, report.ekey));
+ } else {
+ report_block.add_before_end(
+ std::make_unique<RoseInstrSetExhaust>(report.ekey));
+ }
}
}
- addLogicalSetRequired(report, build.rm, report_block);
+ addLogicalSetRequired(report, build.rm, report_block);
break;
case INTERNAL_SOM_LOC_SET:
case INTERNAL_SOM_LOC_SET_IF_UNSET:
@@ -635,9 +635,9 @@ void makeReport(const RoseBuildImpl &build, const ReportID id,
case INTERNAL_SOM_LOC_MAKE_WRITABLE:
case INTERNAL_SOM_LOC_SET_FROM:
case INTERNAL_SOM_LOC_SET_FROM_IF_WRITABLE:
- if (build.rm.numCkeys()) {
- addFlushCombinationProgram(report_block);
- }
+ if (build.rm.numCkeys()) {
+ addFlushCombinationProgram(report_block);
+ }
if (has_som) {
auto ri = std::make_unique<RoseInstrReportSomAware>();
writeSomOperation(report, &ri->som);
@@ -649,7 +649,7 @@ void makeReport(const RoseBuildImpl &build, const ReportID id,
}
break;
case INTERNAL_ROSE_CHAIN: {
- report_block.add_before_end(std::make_unique<RoseInstrReportChain>(
+ report_block.add_before_end(std::make_unique<RoseInstrReportChain>(
report.onmatch, report.topSquashDistance));
break;
}
@@ -657,48 +657,48 @@ void makeReport(const RoseBuildImpl &build, const ReportID id,
case EXTERNAL_CALLBACK_SOM_STORED:
case EXTERNAL_CALLBACK_SOM_ABS:
case EXTERNAL_CALLBACK_SOM_REV_NFA:
- if (build.rm.numCkeys()) {
- addFlushCombinationProgram(report_block);
- }
+ if (build.rm.numCkeys()) {
+ addFlushCombinationProgram(report_block);
+ }
makeDedupeSom(build.rm, report, report_block);
if (report.ekey == INVALID_EKEY) {
- if (!report.quiet) {
- report_block.add_before_end(std::make_unique<RoseInstrReportSom>(
- report.onmatch, report.offsetAdjust));
- }
+ if (!report.quiet) {
+ report_block.add_before_end(std::make_unique<RoseInstrReportSom>(
+ report.onmatch, report.offsetAdjust));
+ }
} else {
- if (!report.quiet) {
- report_block.add_before_end(
- std::make_unique<RoseInstrReportSomExhaust>(
- report.onmatch, report.offsetAdjust, report.ekey));
- } else {
- report_block.add_before_end(
- std::make_unique<RoseInstrSetExhaust>(report.ekey));
- }
+ if (!report.quiet) {
+ report_block.add_before_end(
+ std::make_unique<RoseInstrReportSomExhaust>(
+ report.onmatch, report.offsetAdjust, report.ekey));
+ } else {
+ report_block.add_before_end(
+ std::make_unique<RoseInstrSetExhaust>(report.ekey));
+ }
}
- addLogicalSetRequired(report, build.rm, report_block);
+ addLogicalSetRequired(report, build.rm, report_block);
break;
case EXTERNAL_CALLBACK_SOM_PASS:
- if (build.rm.numCkeys()) {
- addFlushCombinationProgram(report_block);
- }
+ if (build.rm.numCkeys()) {
+ addFlushCombinationProgram(report_block);
+ }
makeDedupeSom(build.rm, report, report_block);
if (report.ekey == INVALID_EKEY) {
- if (!report.quiet) {
- report_block.add_before_end(std::make_unique<RoseInstrReportSom>(
- report.onmatch, report.offsetAdjust));
- }
+ if (!report.quiet) {
+ report_block.add_before_end(std::make_unique<RoseInstrReportSom>(
+ report.onmatch, report.offsetAdjust));
+ }
} else {
- if (!report.quiet) {
- report_block.add_before_end(
- std::make_unique<RoseInstrReportSomExhaust>(
- report.onmatch, report.offsetAdjust, report.ekey));
- } else {
- report_block.add_before_end(
- std::make_unique<RoseInstrSetExhaust>(report.ekey));
- }
+ if (!report.quiet) {
+ report_block.add_before_end(
+ std::make_unique<RoseInstrReportSomExhaust>(
+ report.onmatch, report.offsetAdjust, report.ekey));
+ } else {
+ report_block.add_before_end(
+ std::make_unique<RoseInstrSetExhaust>(report.ekey));
+ }
}
- addLogicalSetRequired(report, build.rm, report_block);
+ addLogicalSetRequired(report, build.rm, report_block);
break;
default:
@@ -748,7 +748,7 @@ void makeRoleSetState(const unordered_map<RoseVertex, u32> &roleStateIndices,
if (it == end(roleStateIndices)) {
return;
}
- program.add_before_end(std::make_unique<RoseInstrSetState>(it->second));
+ program.add_before_end(std::make_unique<RoseInstrSetState>(it->second));
}
static
@@ -772,7 +772,7 @@ void makePushDelayedInstructions(const RoseLiteralMap &literals,
});
for (const auto &ri : delay_instructions) {
- program.add_before_end(std::make_unique<RoseInstrPushDelayed>(ri));
+ program.add_before_end(std::make_unique<RoseInstrPushDelayed>(ri));
}
}
@@ -924,7 +924,7 @@ void makeRoleGroups(const RoseGraph &g, ProgramBuild &prog_build,
return;
}
- program.add_before_end(std::make_unique<RoseInstrSetGroups>(groups));
+ program.add_before_end(std::make_unique<RoseInstrSetGroups>(groups));
}
static
@@ -1061,49 +1061,49 @@ bool makeRoleMask32(const vector<LookEntry> &look,
return true;
}
-static
-bool makeRoleMask64(const vector<LookEntry> &look,
- RoseProgram &program, const target_t &target) {
- if (!target.has_avx512()) {
- return false;
- }
-
- if (look.back().offset >= look.front().offset + 64) {
- return false;
- }
- s32 base_offset = verify_s32(look.front().offset);
- array<u8, 64> and_mask, cmp_mask;
- and_mask.fill(0);
- cmp_mask.fill(0);
- u64a neg_mask = 0;
- for (const auto &entry : look) {
- u8 andmask_u8, cmpmask_u8, flip;
- if (!checkReachWithFlip(entry.reach, andmask_u8, cmpmask_u8, flip)) {
- return false;
- }
- u32 shift = entry.offset - base_offset;
- assert(shift < 64);
- and_mask[shift] = andmask_u8;
- cmp_mask[shift] = cmpmask_u8;
- if (flip) {
- neg_mask |= 1ULL << shift;
- }
- }
-
- DEBUG_PRINTF("and_mask %s\n",
- convertMaskstoString(and_mask.data(), 64).c_str());
- DEBUG_PRINTF("cmp_mask %s\n",
- convertMaskstoString(cmp_mask.data(), 64).c_str());
- DEBUG_PRINTF("neg_mask %llx\n", neg_mask);
- DEBUG_PRINTF("base_offset %d\n", base_offset);
-
- const auto *end_inst = program.end_instruction();
- auto ri = std::make_unique<RoseInstrCheckMask64>(and_mask, cmp_mask, neg_mask,
- base_offset, end_inst);
- program.add_before_end(move(ri));
- return true;
-}
-
+static
+bool makeRoleMask64(const vector<LookEntry> &look,
+ RoseProgram &program, const target_t &target) {
+ if (!target.has_avx512()) {
+ return false;
+ }
+
+ if (look.back().offset >= look.front().offset + 64) {
+ return false;
+ }
+ s32 base_offset = verify_s32(look.front().offset);
+ array<u8, 64> and_mask, cmp_mask;
+ and_mask.fill(0);
+ cmp_mask.fill(0);
+ u64a neg_mask = 0;
+ for (const auto &entry : look) {
+ u8 andmask_u8, cmpmask_u8, flip;
+ if (!checkReachWithFlip(entry.reach, andmask_u8, cmpmask_u8, flip)) {
+ return false;
+ }
+ u32 shift = entry.offset - base_offset;
+ assert(shift < 64);
+ and_mask[shift] = andmask_u8;
+ cmp_mask[shift] = cmpmask_u8;
+ if (flip) {
+ neg_mask |= 1ULL << shift;
+ }
+ }
+
+ DEBUG_PRINTF("and_mask %s\n",
+ convertMaskstoString(and_mask.data(), 64).c_str());
+ DEBUG_PRINTF("cmp_mask %s\n",
+ convertMaskstoString(cmp_mask.data(), 64).c_str());
+ DEBUG_PRINTF("neg_mask %llx\n", neg_mask);
+ DEBUG_PRINTF("base_offset %d\n", base_offset);
+
+ const auto *end_inst = program.end_instruction();
+ auto ri = std::make_unique<RoseInstrCheckMask64>(and_mask, cmp_mask, neg_mask,
+ base_offset, end_inst);
+ program.add_before_end(move(ri));
+ return true;
+}
+
// Sorting by the size of every bucket.
// Used in map<u32, vector<s8>, cmpNibble>.
struct cmpNibble {
@@ -1127,7 +1127,7 @@ void getAllBuckets(const vector<LookEntry> &look,
} else {
neg_mask ^= 1ULL << (entry.offset - base_offset);
}
-
+
map <u16, u16> lo2hi;
// We treat Ascii Table as a 16x16 grid.
// Push every row in cr into lo2hi and mark the row number.
@@ -1281,7 +1281,7 @@ makeCheckShufti16x16(u32 offset_range, u8 bucket_idx,
(hi_mask, lo_mask, bucket_select_mask_32,
neg_mask & 0xffff, base_offset, end_inst);
}
-
+
static
unique_ptr<RoseInstruction>
makeCheckShufti32x16(u32 offset_range, u8 bucket_idx,
@@ -1300,83 +1300,83 @@ makeCheckShufti32x16(u32 offset_range, u8 bucket_idx,
}
static
-unique_ptr<RoseInstruction>
-makeCheckShufti64x8(u32 offset_range, u8 bucket_idx,
- const array<u8, 32> &hi_mask, const array<u8, 32> &lo_mask,
- const array<u8, 64> &bucket_select_mask,
- u64a neg_mask, s32 base_offset,
- const RoseInstruction *end_inst) {
- if (offset_range > 64 || bucket_idx > 8) {
- return nullptr;
- }
-
- array<u8, 64> hi_mask_64;
- array<u8, 64> lo_mask_64;
- copy(hi_mask.begin(), hi_mask.begin() + 16, hi_mask_64.begin());
- copy(hi_mask.begin(), hi_mask.begin() + 16, hi_mask_64.begin() + 16);
- copy(hi_mask.begin(), hi_mask.begin() + 16, hi_mask_64.begin() + 32);
- copy(hi_mask.begin(), hi_mask.begin() + 16, hi_mask_64.begin() + 48);
- copy(lo_mask.begin(), lo_mask.begin() + 16, lo_mask_64.begin());
- copy(lo_mask.begin(), lo_mask.begin() + 16, lo_mask_64.begin() + 16);
- copy(lo_mask.begin(), lo_mask.begin() + 16, lo_mask_64.begin() + 32);
- copy(lo_mask.begin(), lo_mask.begin() + 16, lo_mask_64.begin() + 48);
-
- return std::make_unique<RoseInstrCheckShufti64x8>
- (hi_mask_64, lo_mask_64, bucket_select_mask,
- neg_mask, base_offset, end_inst);
-}
-
-static
-unique_ptr<RoseInstruction>
-makeCheckShufti64x16(u32 offset_range, u8 bucket_idx,
- const array<u8, 32> &hi_mask, const array<u8, 32> &lo_mask,
- const array<u8, 64> &bucket_select_mask_lo,
- const array<u8, 64> &bucket_select_mask_hi,
- u64a neg_mask, s32 base_offset,
- const RoseInstruction *end_inst) {
- if (offset_range > 64 || bucket_idx > 16) {
- return nullptr;
- }
-
- array<u8, 64> hi_mask_1;
- array<u8, 64> hi_mask_2;
- array<u8, 64> lo_mask_1;
- array<u8, 64> lo_mask_2;
-
- copy(hi_mask.begin(), hi_mask.begin() + 16, hi_mask_1.begin());
- copy(hi_mask.begin(), hi_mask.begin() + 16, hi_mask_1.begin() + 16);
- copy(hi_mask.begin(), hi_mask.begin() + 16, hi_mask_1.begin() + 32);
- copy(hi_mask.begin(), hi_mask.begin() + 16, hi_mask_1.begin() + 48);
- copy(hi_mask.begin() + 16, hi_mask.begin() + 32, hi_mask_2.begin());
- copy(hi_mask.begin() + 16, hi_mask.begin() + 32, hi_mask_2.begin() + 16);
- copy(hi_mask.begin() + 16, hi_mask.begin() + 32, hi_mask_2.begin() + 32);
- copy(hi_mask.begin() + 16, hi_mask.begin() + 32, hi_mask_2.begin() + 48);
-
- copy(lo_mask.begin(), lo_mask.begin() + 16, lo_mask_1.begin());
- copy(lo_mask.begin(), lo_mask.begin() + 16, lo_mask_1.begin() + 16);
- copy(lo_mask.begin(), lo_mask.begin() + 16, lo_mask_1.begin() + 32);
- copy(lo_mask.begin(), lo_mask.begin() + 16, lo_mask_1.begin() + 48);
- copy(lo_mask.begin() + 16, lo_mask.begin() + 32, lo_mask_2.begin());
- copy(lo_mask.begin() + 16, lo_mask.begin() + 32, lo_mask_2.begin() + 16);
- copy(lo_mask.begin() + 16, lo_mask.begin() + 32, lo_mask_2.begin() + 32);
- copy(lo_mask.begin() + 16, lo_mask.begin() + 32, lo_mask_2.begin() + 48);
-
- return std::make_unique<RoseInstrCheckShufti64x16>
- (hi_mask_1, hi_mask_2, lo_mask_1, lo_mask_2, bucket_select_mask_hi,
- bucket_select_mask_lo, neg_mask, base_offset, end_inst);
-}
-
-static
-bool makeRoleShufti(const vector<LookEntry> &look, RoseProgram &program,
- const target_t &target) {
- s32 offset_limit;
- if (target.has_avx512()) {
- offset_limit = 64;
- } else {
- offset_limit = 32;
- }
+unique_ptr<RoseInstruction>
+makeCheckShufti64x8(u32 offset_range, u8 bucket_idx,
+ const array<u8, 32> &hi_mask, const array<u8, 32> &lo_mask,
+ const array<u8, 64> &bucket_select_mask,
+ u64a neg_mask, s32 base_offset,
+ const RoseInstruction *end_inst) {
+ if (offset_range > 64 || bucket_idx > 8) {
+ return nullptr;
+ }
+
+ array<u8, 64> hi_mask_64;
+ array<u8, 64> lo_mask_64;
+ copy(hi_mask.begin(), hi_mask.begin() + 16, hi_mask_64.begin());
+ copy(hi_mask.begin(), hi_mask.begin() + 16, hi_mask_64.begin() + 16);
+ copy(hi_mask.begin(), hi_mask.begin() + 16, hi_mask_64.begin() + 32);
+ copy(hi_mask.begin(), hi_mask.begin() + 16, hi_mask_64.begin() + 48);
+ copy(lo_mask.begin(), lo_mask.begin() + 16, lo_mask_64.begin());
+ copy(lo_mask.begin(), lo_mask.begin() + 16, lo_mask_64.begin() + 16);
+ copy(lo_mask.begin(), lo_mask.begin() + 16, lo_mask_64.begin() + 32);
+ copy(lo_mask.begin(), lo_mask.begin() + 16, lo_mask_64.begin() + 48);
+
+ return std::make_unique<RoseInstrCheckShufti64x8>
+ (hi_mask_64, lo_mask_64, bucket_select_mask,
+ neg_mask, base_offset, end_inst);
+}
+
+static
+unique_ptr<RoseInstruction>
+makeCheckShufti64x16(u32 offset_range, u8 bucket_idx,
+ const array<u8, 32> &hi_mask, const array<u8, 32> &lo_mask,
+ const array<u8, 64> &bucket_select_mask_lo,
+ const array<u8, 64> &bucket_select_mask_hi,
+ u64a neg_mask, s32 base_offset,
+ const RoseInstruction *end_inst) {
+ if (offset_range > 64 || bucket_idx > 16) {
+ return nullptr;
+ }
+
+ array<u8, 64> hi_mask_1;
+ array<u8, 64> hi_mask_2;
+ array<u8, 64> lo_mask_1;
+ array<u8, 64> lo_mask_2;
+
+ copy(hi_mask.begin(), hi_mask.begin() + 16, hi_mask_1.begin());
+ copy(hi_mask.begin(), hi_mask.begin() + 16, hi_mask_1.begin() + 16);
+ copy(hi_mask.begin(), hi_mask.begin() + 16, hi_mask_1.begin() + 32);
+ copy(hi_mask.begin(), hi_mask.begin() + 16, hi_mask_1.begin() + 48);
+ copy(hi_mask.begin() + 16, hi_mask.begin() + 32, hi_mask_2.begin());
+ copy(hi_mask.begin() + 16, hi_mask.begin() + 32, hi_mask_2.begin() + 16);
+ copy(hi_mask.begin() + 16, hi_mask.begin() + 32, hi_mask_2.begin() + 32);
+ copy(hi_mask.begin() + 16, hi_mask.begin() + 32, hi_mask_2.begin() + 48);
+
+ copy(lo_mask.begin(), lo_mask.begin() + 16, lo_mask_1.begin());
+ copy(lo_mask.begin(), lo_mask.begin() + 16, lo_mask_1.begin() + 16);
+ copy(lo_mask.begin(), lo_mask.begin() + 16, lo_mask_1.begin() + 32);
+ copy(lo_mask.begin(), lo_mask.begin() + 16, lo_mask_1.begin() + 48);
+ copy(lo_mask.begin() + 16, lo_mask.begin() + 32, lo_mask_2.begin());
+ copy(lo_mask.begin() + 16, lo_mask.begin() + 32, lo_mask_2.begin() + 16);
+ copy(lo_mask.begin() + 16, lo_mask.begin() + 32, lo_mask_2.begin() + 32);
+ copy(lo_mask.begin() + 16, lo_mask.begin() + 32, lo_mask_2.begin() + 48);
+
+ return std::make_unique<RoseInstrCheckShufti64x16>
+ (hi_mask_1, hi_mask_2, lo_mask_1, lo_mask_2, bucket_select_mask_hi,
+ bucket_select_mask_lo, neg_mask, base_offset, end_inst);
+}
+
+static
+bool makeRoleShufti(const vector<LookEntry> &look, RoseProgram &program,
+ const target_t &target) {
+ s32 offset_limit;
+ if (target.has_avx512()) {
+ offset_limit = 64;
+ } else {
+ offset_limit = 32;
+ }
s32 base_offset = verify_s32(look.front().offset);
- if (look.back().offset >= base_offset + offset_limit) {
+ if (look.back().offset >= base_offset + offset_limit) {
return false;
}
@@ -1384,40 +1384,40 @@ bool makeRoleShufti(const vector<LookEntry> &look, RoseProgram &program,
u64a neg_mask_64;
array<u8, 32> hi_mask;
array<u8, 32> lo_mask;
- array<u8, 64> bucket_select_hi_64; // for AVX512
- array<u8, 64> bucket_select_lo_64; // for AVX512
+ array<u8, 64> bucket_select_hi_64; // for AVX512
+ array<u8, 64> bucket_select_lo_64; // for AVX512
array<u8, 32> bucket_select_hi;
array<u8, 32> bucket_select_lo;
hi_mask.fill(0);
lo_mask.fill(0);
- bucket_select_hi_64.fill(0);
- bucket_select_lo_64.fill(0);
+ bucket_select_hi_64.fill(0);
+ bucket_select_lo_64.fill(0);
bucket_select_hi.fill(0); // will not be used in 16x8 and 32x8.
bucket_select_lo.fill(0);
- if (target.has_avx512()) {
- if (!getShuftiMasks(look, hi_mask, lo_mask, bucket_select_hi_64.data(),
- bucket_select_lo_64.data(), neg_mask_64, bucket_idx,
- 32)) {
- return false;
- }
- copy(bucket_select_hi_64.begin(), bucket_select_hi_64.begin() + 32,
- bucket_select_hi.begin());
- copy(bucket_select_lo_64.begin(), bucket_select_lo_64.begin() + 32,
- bucket_select_lo.begin());
-
- DEBUG_PRINTF("bucket_select_hi_64 %s\n",
- convertMaskstoString(bucket_select_hi_64.data(), 64).c_str());
- DEBUG_PRINTF("bucket_select_lo_64 %s\n",
- convertMaskstoString(bucket_select_lo_64.data(), 64).c_str());
- } else {
- if (!getShuftiMasks(look, hi_mask, lo_mask, bucket_select_hi.data(),
- bucket_select_lo.data(), neg_mask_64, bucket_idx,
- 32)) {
- return false;
- }
- }
-
+ if (target.has_avx512()) {
+ if (!getShuftiMasks(look, hi_mask, lo_mask, bucket_select_hi_64.data(),
+ bucket_select_lo_64.data(), neg_mask_64, bucket_idx,
+ 32)) {
+ return false;
+ }
+ copy(bucket_select_hi_64.begin(), bucket_select_hi_64.begin() + 32,
+ bucket_select_hi.begin());
+ copy(bucket_select_lo_64.begin(), bucket_select_lo_64.begin() + 32,
+ bucket_select_lo.begin());
+
+ DEBUG_PRINTF("bucket_select_hi_64 %s\n",
+ convertMaskstoString(bucket_select_hi_64.data(), 64).c_str());
+ DEBUG_PRINTF("bucket_select_lo_64 %s\n",
+ convertMaskstoString(bucket_select_lo_64.data(), 64).c_str());
+ } else {
+ if (!getShuftiMasks(look, hi_mask, lo_mask, bucket_select_hi.data(),
+ bucket_select_lo.data(), neg_mask_64, bucket_idx,
+ 32)) {
+ return false;
+ }
+ }
+
u32 neg_mask = (u32)neg_mask_64;
DEBUG_PRINTF("hi_mask %s\n",
@@ -1440,13 +1440,13 @@ bool makeRoleShufti(const vector<LookEntry> &look, RoseProgram &program,
bucket_select_lo, neg_mask, base_offset,
end_inst);
}
- if (target.has_avx512()) {
- if (!ri) {
- ri = makeCheckShufti64x8(offset_range, bucket_idx, hi_mask, lo_mask,
- bucket_select_lo_64, neg_mask_64,
- base_offset, end_inst);
- }
- }
+ if (target.has_avx512()) {
+ if (!ri) {
+ ri = makeCheckShufti64x8(offset_range, bucket_idx, hi_mask, lo_mask,
+ bucket_select_lo_64, neg_mask_64,
+ base_offset, end_inst);
+ }
+ }
if (!ri) {
ri = makeCheckShufti16x16(offset_range, bucket_idx, hi_mask, lo_mask,
bucket_select_lo, bucket_select_hi,
@@ -1457,13 +1457,13 @@ bool makeRoleShufti(const vector<LookEntry> &look, RoseProgram &program,
bucket_select_lo, bucket_select_hi,
neg_mask, base_offset, end_inst);
}
- if (target.has_avx512()) {
- if (!ri) {
- ri = makeCheckShufti64x16(offset_range, bucket_idx, hi_mask, lo_mask,
- bucket_select_lo_64, bucket_select_hi_64,
- neg_mask_64, base_offset, end_inst);
- }
- }
+ if (target.has_avx512()) {
+ if (!ri) {
+ ri = makeCheckShufti64x16(offset_range, bucket_idx, hi_mask, lo_mask,
+ bucket_select_lo_64, bucket_select_hi_64,
+ neg_mask_64, base_offset, end_inst);
+ }
+ }
assert(ri);
program.add_before_end(move(ri));
@@ -1476,7 +1476,7 @@ bool makeRoleShufti(const vector<LookEntry> &look, RoseProgram &program,
*/
static
void makeLookaroundInstruction(const vector<LookEntry> &look,
- RoseProgram &program, const target_t &target) {
+ RoseProgram &program, const target_t &target) {
assert(!look.empty());
if (makeRoleByte(look, program)) {
@@ -1500,14 +1500,14 @@ void makeLookaroundInstruction(const vector<LookEntry> &look,
return;
}
- if (makeRoleMask64(look, program, target)) {
+ if (makeRoleMask64(look, program, target)) {
+ return;
+ }
+
+ if (makeRoleShufti(look, program, target)) {
return;
}
- if (makeRoleShufti(look, program, target)) {
- return;
- }
-
auto ri = std::make_unique<RoseInstrCheckLookaround>(look,
program.end_instruction());
program.add_before_end(move(ri));
@@ -1545,7 +1545,7 @@ void makeCheckLitMaskInstruction(const RoseBuildImpl &build, u32 lit_id,
return; // all caseful chars handled by HWLM mask.
}
- makeLookaroundInstruction(look, program, build.cc.target_info);
+ makeLookaroundInstruction(look, program, build.cc.target_info);
}
static
@@ -1584,7 +1584,7 @@ void makeCheckLitEarlyInstruction(const RoseBuildImpl &build, u32 lit_id,
DEBUG_PRINTF("adding lit early check, min_offset=%u\n", min_offset);
const auto *end = prog.end_instruction();
- prog.add_before_end(std::make_unique<RoseInstrCheckLitEarly>(min_offset, end));
+ prog.add_before_end(std::make_unique<RoseInstrCheckLitEarly>(min_offset, end));
}
static
@@ -1595,7 +1595,7 @@ void makeGroupCheckInstruction(const RoseBuildImpl &build, u32 lit_id,
if (!info.group_mask) {
return;
}
- prog.add_before_end(std::make_unique<RoseInstrCheckGroups>(info.group_mask));
+ prog.add_before_end(std::make_unique<RoseInstrCheckGroups>(info.group_mask));
}
static
@@ -1889,7 +1889,7 @@ void makeRoleLookaround(const RoseBuildImpl &build,
findLookaroundMasks(build, v, look_more);
mergeLookaround(look, look_more);
if (!look.empty()) {
- makeLookaroundInstruction(look, program, build.cc.target_info);
+ makeLookaroundInstruction(look, program, build.cc.target_info);
}
return;
}
@@ -1932,7 +1932,7 @@ void makeRoleSuffix(const RoseBuildImpl &build,
event = MQE_TOP;
}
- prog.add_before_end(std::make_unique<RoseInstrTriggerSuffix>(queue, event));
+ prog.add_before_end(std::make_unique<RoseInstrTriggerSuffix>(queue, event));
}
static
@@ -2039,7 +2039,7 @@ static
void addCheckOnlyEodInstruction(RoseProgram &prog) {
DEBUG_PRINTF("only at eod\n");
const auto *end_inst = prog.end_instruction();
- prog.add_before_end(std::make_unique<RoseInstrCheckOnlyEod>(end_inst));
+ prog.add_before_end(std::make_unique<RoseInstrCheckOnlyEod>(end_inst));
}
static
@@ -2071,7 +2071,7 @@ void makeRoleEagerEodReports(const RoseBuildImpl &build,
program.add_before_end(move(eod_program));
}
-/** Makes a program for a role/vertex given a specific pred/in_edge. */
+/** Makes a program for a role/vertex given a specific pred/in_edge. */
static
RoseProgram makeRoleProgram(const RoseBuildImpl &build,
const map<RoseVertex, left_build_info> &leftfix_info,
@@ -2164,7 +2164,7 @@ void makeGroupSquashInstruction(const RoseBuildImpl &build, u32 lit_id,
DEBUG_PRINTF("squashes 0x%llx\n", info.group_mask);
assert(info.group_mask);
/* Note: group_mask is negated. */
- prog.add_before_end(std::make_unique<RoseInstrSquashGroups>(~info.group_mask));
+ prog.add_before_end(std::make_unique<RoseInstrSquashGroups>(~info.group_mask));
}
namespace {
@@ -2209,7 +2209,7 @@ RoseProgram assembleProgramBlocks(vector<RoseProgram> &&blocks_in) {
* only set if a state has been. */
if (!prog.empty() && reads_work_done_flag(block)) {
RoseProgram clear_block;
- clear_block.add_before_end(std::make_unique<RoseInstrClearWorkDone>());
+ clear_block.add_before_end(std::make_unique<RoseInstrClearWorkDone>());
prog.add_block(move(clear_block));
}
@@ -2279,7 +2279,7 @@ RoseProgram makeLiteralProgram(const RoseBuildImpl &build,
}
if (lit_id == build.eod_event_literal_id) {
- /* Note: does not require the lit initial program */
+ /* Note: does not require the lit initial program */
assert(build.eod_event_literal_id != MO_INVALID_IDX);
return role_programs;
}
@@ -2369,7 +2369,7 @@ void makeCatchupMpv(const ReportManager &rm, bool needs_mpv_catchup,
return;
}
- program.add_before_end(std::make_unique<RoseInstrCatchUpMpv>());
+ program.add_before_end(std::make_unique<RoseInstrCatchUpMpv>());
}
RoseProgram makeReportProgram(const RoseBuildImpl &build,
@@ -2402,7 +2402,7 @@ RoseProgram makeBoundaryProgram(const RoseBuildImpl &build,
void addIncludedJumpProgram(RoseProgram &program, u32 child_offset,
u8 squash) {
RoseProgram block;
- block.add_before_end(std::make_unique<RoseInstrIncludedJump>(child_offset,
+ block.add_before_end(std::make_unique<RoseInstrIncludedJump>(child_offset,
squash));
program.add_block(move(block));
}
diff --git a/contrib/libs/hyperscan/src/rose/rose_build_program.h b/contrib/libs/hyperscan/src/rose/rose_build_program.h
index efb9821732..7d781f3191 100644
--- a/contrib/libs/hyperscan/src/rose/rose_build_program.h
+++ b/contrib/libs/hyperscan/src/rose/rose_build_program.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019, Intel Corporation
+ * Copyright (c) 2016-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -187,8 +187,8 @@ struct ProgramBuild : noncopyable {
void addEnginesEodProgram(u32 eodNfaIterOffset, RoseProgram &program);
void addSuffixesEodProgram(RoseProgram &program);
void addMatcherEodProgram(RoseProgram &program);
-void addFlushCombinationProgram(RoseProgram &program);
-void addLastFlushCombinationProgram(RoseProgram &program);
+void addFlushCombinationProgram(RoseProgram &program);
+void addLastFlushCombinationProgram(RoseProgram &program);
static constexpr u32 INVALID_QUEUE = ~0U;
diff --git a/contrib/libs/hyperscan/src/rose/rose_graph.h b/contrib/libs/hyperscan/src/rose/rose_graph.h
index 4acb65a38b..b5bf1985d8 100644
--- a/contrib/libs/hyperscan/src/rose/rose_graph.h
+++ b/contrib/libs/hyperscan/src/rose/rose_graph.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/contrib/libs/hyperscan/src/rose/rose_in_graph.h b/contrib/libs/hyperscan/src/rose/rose_in_graph.h
index ebf08cd7b8..da0ea08da1 100644
--- a/contrib/libs/hyperscan/src/rose/rose_in_graph.h
+++ b/contrib/libs/hyperscan/src/rose/rose_in_graph.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -27,7 +27,7 @@
*/
/** \file
- * \brief Rose Input Graph: Used for ng_violet -> rose_build_add communication.
+ * \brief Rose Input Graph: Used for ng_violet -> rose_build_add communication.
*
* The input graph MUST be a DAG.
* There MUST be exactly 1 START or ANCHORED_START vertex.
@@ -127,7 +127,7 @@ public:
flat_set<ReportID> reports; /**< for RIV_ACCEPT/RIV_ACCEPT_EOD */
u32 min_offset; /**< Minimum offset at which this vertex can match. */
u32 max_offset; /**< Maximum offset at which this vertex can match. */
- size_t index = 0; /**< \brief Unique vertex index. */
+ size_t index = 0; /**< \brief Unique vertex index. */
};
struct RoseInEdgeProps {
@@ -176,13 +176,13 @@ struct RoseInEdgeProps {
/** \brief Haig version of graph, if required. */
std::shared_ptr<raw_som_dfa> haig;
- /**
- * \brief Distance behind the match offset for the literal in the target
- * vertex that the leftfix needs to be checked at.
- */
+ /**
+ * \brief Distance behind the match offset for the literal in the target
+ * vertex that the leftfix needs to be checked at.
+ */
u32 graph_lag;
-
- /** \brief Unique edge index. */
+
+ /** \brief Unique edge index. */
size_t index = 0;
};
diff --git a/contrib/libs/hyperscan/src/rose/rose_internal.h b/contrib/libs/hyperscan/src/rose/rose_internal.h
index 4e0c3aed21..7bd6779c3d 100644
--- a/contrib/libs/hyperscan/src/rose/rose_internal.h
+++ b/contrib/libs/hyperscan/src/rose/rose_internal.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2019, Intel Corporation
+ * Copyright (c) 2015-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -199,25 +199,25 @@ struct RoseStateOffsets {
* reports with that ekey should not be delivered to the user. */
u32 exhausted;
- /** size in bytes of exhausted multibit */
+ /** size in bytes of exhausted multibit */
u32 exhausted_size;
- /** Logical multibit.
- *
- * entry per logical key(operand/operator) (used by Logical Combination). */
- u32 logicalVec;
-
- /** size in bytes of logical multibit */
- u32 logicalVec_size;
-
- /** Combination multibit.
- *
- * entry per combination key (used by Logical Combination). */
- u32 combVec;
-
- /** size in bytes of combination multibit */
- u32 combVec_size;
-
+ /** Logical multibit.
+ *
+ * entry per logical key(operand/operator) (used by Logical Combination). */
+ u32 logicalVec;
+
+ /** size in bytes of logical multibit */
+ u32 logicalVec_size;
+
+ /** Combination multibit.
+ *
+ * entry per combination key (used by Logical Combination). */
+ u32 combVec;
+
+ /** size in bytes of combination multibit */
+ u32 combVec_size;
+
/** Multibit for active suffix/outfix engines. */
u32 activeLeafArray;
@@ -328,7 +328,7 @@ struct RoseBoundaryReports {
* nfas). Rose nfa info table can distinguish the cases.
*/
struct RoseEngine {
- u8 pureLiteral; /* Indicator of pure literal API */
+ u8 pureLiteral; /* Indicator of pure literal API */
u8 noFloatingRoots; /* only need to run the anchored table if something
* matched in the anchored table */
u8 requiresEodCheck; /* stuff happens at eod time */
@@ -344,11 +344,11 @@ struct RoseEngine {
u32 mode; /**< scanning mode, one of HS_MODE_{BLOCK,STREAM,VECTORED} */
u32 historyRequired; /**< max amount of history required for streaming */
u32 ekeyCount; /**< number of exhaustion keys */
- u32 lkeyCount; /**< number of logical keys */
- u32 lopCount; /**< number of logical ops */
- u32 ckeyCount; /**< number of combination keys */
- u32 logicalTreeOffset; /**< offset to mapping from lkey to LogicalOp */
- u32 combInfoMapOffset; /**< offset to mapping from ckey to combInfo */
+ u32 lkeyCount; /**< number of logical keys */
+ u32 lopCount; /**< number of logical ops */
+ u32 ckeyCount; /**< number of combination keys */
+ u32 logicalTreeOffset; /**< offset to mapping from lkey to LogicalOp */
+ u32 combInfoMapOffset; /**< offset to mapping from ckey to combInfo */
u32 dkeyCount; /**< number of dedupe keys */
u32 dkeyLogSize; /**< size of fatbit for storing dkey log (bytes) */
u32 invDkeyOffset; /**< offset to table mapping from dkeys to the external
@@ -426,9 +426,9 @@ struct RoseEngine {
u32 roseCount;
u32 eodProgramOffset; //!< EOD program, otherwise 0.
- u32 flushCombProgramOffset; /**< FlushCombination program, otherwise 0 */
- u32 lastFlushCombProgramOffset; /**< LastFlushCombination program,
- * otherwise 0 */
+ u32 flushCombProgramOffset; /**< FlushCombination program, otherwise 0 */
+ u32 lastFlushCombProgramOffset; /**< LastFlushCombination program,
+ * otherwise 0 */
u32 lastByteHistoryIterOffset; // if non-zero
diff --git a/contrib/libs/hyperscan/src/rose/rose_program.h b/contrib/libs/hyperscan/src/rose/rose_program.h
index 34caf240b0..7e21303cb7 100644
--- a/contrib/libs/hyperscan/src/rose/rose_program.h
+++ b/contrib/libs/hyperscan/src/rose/rose_program.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -183,36 +183,36 @@ enum RoseInstructionCode {
*/
ROSE_INSTR_INCLUDED_JUMP,
- /**
- * \brief Set matching status of a sub-expression.
- */
- ROSE_INSTR_SET_LOGICAL,
-
- /**
- * \brief Set combination status pending checking.
- */
- ROSE_INSTR_SET_COMBINATION,
-
- /**
- * \brief Check if compliant with any logical constraints.
- */
- ROSE_INSTR_FLUSH_COMBINATION,
-
- /** \brief Mark as exhausted instead of report while quiet. */
- ROSE_INSTR_SET_EXHAUST,
-
- /**
- * \brief Calculate any combination's logical value if none of its
- * sub-expression matches until EOD, then check if compliant with any
- * logical constraints.
- */
- ROSE_INSTR_LAST_FLUSH_COMBINATION,
-
- ROSE_INSTR_CHECK_SHUFTI_64x8, //!< Check 64-byte data by 8-bucket shufti.
- ROSE_INSTR_CHECK_SHUFTI_64x16, //!< Check 64-byte data by 16-bucket shufti.
- ROSE_INSTR_CHECK_MASK_64, //!< 64-bytes and/cmp/neg mask check.
-
- LAST_ROSE_INSTRUCTION = ROSE_INSTR_CHECK_MASK_64 //!< Sentinel.
+ /**
+ * \brief Set matching status of a sub-expression.
+ */
+ ROSE_INSTR_SET_LOGICAL,
+
+ /**
+ * \brief Set combination status pending checking.
+ */
+ ROSE_INSTR_SET_COMBINATION,
+
+ /**
+ * \brief Check if compliant with any logical constraints.
+ */
+ ROSE_INSTR_FLUSH_COMBINATION,
+
+ /** \brief Mark as exhausted instead of report while quiet. */
+ ROSE_INSTR_SET_EXHAUST,
+
+ /**
+ * \brief Calculate any combination's logical value if none of its
+ * sub-expression matches until EOD, then check if compliant with any
+ * logical constraints.
+ */
+ ROSE_INSTR_LAST_FLUSH_COMBINATION,
+
+ ROSE_INSTR_CHECK_SHUFTI_64x8, //!< Check 64-byte data by 8-bucket shufti.
+ ROSE_INSTR_CHECK_SHUFTI_64x16, //!< Check 64-byte data by 16-bucket shufti.
+ ROSE_INSTR_CHECK_MASK_64, //!< 64-bytes and/cmp/neg mask check.
+
+ LAST_ROSE_INSTRUCTION = ROSE_INSTR_CHECK_MASK_64 //!< Sentinel.
};
struct ROSE_STRUCT_END {
@@ -289,15 +289,15 @@ struct ROSE_STRUCT_CHECK_MASK_32 {
u32 fail_jump; //!< Jump forward this many bytes on failure.
};
-struct ROSE_STRUCT_CHECK_MASK_64 {
- u8 code; //!< From enum RoseInstructionCode.
- u8 and_mask[64]; //!< 64-byte and mask.
- u8 cmp_mask[64]; //!< 64-byte cmp mask.
- u64a neg_mask; //!< negation mask with 32 bits.
- s32 offset; //!< Relative offset of the first byte.
- u32 fail_jump; //!< Jump forward this many bytes on failure.
-};
-
+struct ROSE_STRUCT_CHECK_MASK_64 {
+ u8 code; //!< From enum RoseInstructionCode.
+ u8 and_mask[64]; //!< 64-byte and mask.
+ u8 cmp_mask[64]; //!< 64-byte cmp mask.
+ u64a neg_mask; //!< negation mask with 32 bits.
+ s32 offset; //!< Relative offset of the first byte.
+ u32 fail_jump; //!< Jump forward this many bytes on failure.
+};
+
struct ROSE_STRUCT_CHECK_BYTE {
u8 code; //!< From enum RoseInstructionCode.
u8 and_mask; //!< 8-bits and mask.
@@ -349,29 +349,29 @@ struct ROSE_STRUCT_CHECK_SHUFTI_32x16 {
u32 fail_jump; //!< Jump forward this many bytes on failure.
};
-struct ROSE_STRUCT_CHECK_SHUFTI_64x8 {
- u8 code; //!< From enum RoseInstructionCode.
- u8 hi_mask[64]; //!< High nibble mask in shufti.
- u8 lo_mask[64]; //!< Low nibble mask in shufti.
- u8 bucket_select_mask[64]; //!< Mask for bucket assigning.
- u64a neg_mask; //!< 64 bits negation mask.
- s32 offset; //!< Relative offset of the first byte.
- u32 fail_jump; //!< Jump forward this many bytes on failure.
-};
-
-struct ROSE_STRUCT_CHECK_SHUFTI_64x16 {
- u8 code; //!< From enum RoseInstructionCode.
- u8 hi_mask_1[64]; //!< 4 copies of 0-15 High nibble mask.
- u8 hi_mask_2[64]; //!< 4 copies of 16-32 High nibble mask.
- u8 lo_mask_1[64]; //!< 4 copies of 0-15 Low nibble mask.
- u8 lo_mask_2[64]; //!< 4 copies of 16-32 Low nibble mask.
- u8 bucket_select_mask_hi[64]; //!< Bucket mask for high 8 buckets.
- u8 bucket_select_mask_lo[64]; //!< Bucket mask for low 8 buckets.
- u64a neg_mask; //!< 64 bits negation mask.
- s32 offset; //!< Relative offset of the first byte.
- u32 fail_jump; //!< Jump forward this many bytes on failure.
-};
-
+struct ROSE_STRUCT_CHECK_SHUFTI_64x8 {
+ u8 code; //!< From enum RoseInstructionCode.
+ u8 hi_mask[64]; //!< High nibble mask in shufti.
+ u8 lo_mask[64]; //!< Low nibble mask in shufti.
+ u8 bucket_select_mask[64]; //!< Mask for bucket assigning.
+ u64a neg_mask; //!< 64 bits negation mask.
+ s32 offset; //!< Relative offset of the first byte.
+ u32 fail_jump; //!< Jump forward this many bytes on failure.
+};
+
+struct ROSE_STRUCT_CHECK_SHUFTI_64x16 {
+ u8 code; //!< From enum RoseInstructionCode.
+ u8 hi_mask_1[64]; //!< 4 copies of 0-15 High nibble mask.
+ u8 hi_mask_2[64]; //!< 4 copies of 16-32 High nibble mask.
+ u8 lo_mask_1[64]; //!< 4 copies of 0-15 Low nibble mask.
+ u8 lo_mask_2[64]; //!< 4 copies of 16-32 Low nibble mask.
+ u8 bucket_select_mask_hi[64]; //!< Bucket mask for high 8 buckets.
+ u8 bucket_select_mask_lo[64]; //!< Bucket mask for low 8 buckets.
+ u64a neg_mask; //!< 64 bits negation mask.
+ s32 offset; //!< Relative offset of the first byte.
+ u32 fail_jump; //!< Jump forward this many bytes on failure.
+};
+
struct ROSE_STRUCT_CHECK_INFIX {
u8 code; //!< From enum RoseInstructionCode.
u32 queue; //!< Queue of leftfix to check.
@@ -697,28 +697,28 @@ struct ROSE_STRUCT_INCLUDED_JUMP {
u8 squash; //!< FDR confirm squash mask for included literal.
u32 child_offset; //!< Program offset of included literal.
};
-
-struct ROSE_STRUCT_SET_LOGICAL {
- u8 code; //!< From enum RoseInstructionCode.
- u32 lkey; //!< Logical key to set.
- s32 offset_adjust; //!< offsetAdjust from struct Report triggers the flush.
-};
-
-struct ROSE_STRUCT_SET_COMBINATION {
- u8 code; //!< From enum RoseInstructionCode.
- u32 ckey; //!< Combination key to set.
-};
-
-struct ROSE_STRUCT_FLUSH_COMBINATION {
- u8 code; //!< From enum RoseInstructionCode.
-};
-
-struct ROSE_STRUCT_SET_EXHAUST {
- u8 code; //!< From enum RoseInstructionCode.
- u32 ekey; //!< Exhaustion key.
-};
-
-struct ROSE_STRUCT_LAST_FLUSH_COMBINATION {
- u8 code; //!< From enum RoseInstructionCode.
-};
+
+struct ROSE_STRUCT_SET_LOGICAL {
+ u8 code; //!< From enum RoseInstructionCode.
+ u32 lkey; //!< Logical key to set.
+ s32 offset_adjust; //!< offsetAdjust from struct Report triggers the flush.
+};
+
+struct ROSE_STRUCT_SET_COMBINATION {
+ u8 code; //!< From enum RoseInstructionCode.
+ u32 ckey; //!< Combination key to set.
+};
+
+struct ROSE_STRUCT_FLUSH_COMBINATION {
+ u8 code; //!< From enum RoseInstructionCode.
+};
+
+struct ROSE_STRUCT_SET_EXHAUST {
+ u8 code; //!< From enum RoseInstructionCode.
+ u32 ekey; //!< Exhaustion key.
+};
+
+struct ROSE_STRUCT_LAST_FLUSH_COMBINATION {
+ u8 code; //!< From enum RoseInstructionCode.
+};
#endif // ROSE_ROSE_PROGRAM_H
diff --git a/contrib/libs/hyperscan/src/rose/runtime.h b/contrib/libs/hyperscan/src/rose/runtime.h
index 9579ff5e31..5fbb2b7416 100644
--- a/contrib/libs/hyperscan/src/rose/runtime.h
+++ b/contrib/libs/hyperscan/src/rose/runtime.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -128,15 +128,15 @@ void updateLastMatchOffset(struct RoseContext *tctxt, u64a offset) {
}
static really_inline
-void updateLastCombMatchOffset(struct RoseContext *tctxt, u64a offset) {
- DEBUG_PRINTF("match @%llu, last match @%llu\n", offset,
- tctxt->lastCombMatchOffset);
-
- assert(offset >= tctxt->lastCombMatchOffset);
- tctxt->lastCombMatchOffset = offset;
-}
-
-static really_inline
+void updateLastCombMatchOffset(struct RoseContext *tctxt, u64a offset) {
+ DEBUG_PRINTF("match @%llu, last match @%llu\n", offset,
+ tctxt->lastCombMatchOffset);
+
+ assert(offset >= tctxt->lastCombMatchOffset);
+ tctxt->lastCombMatchOffset = offset;
+}
+
+static really_inline
void updateMinMatchOffset(struct RoseContext *tctxt, u64a offset) {
DEBUG_PRINTF("min match now @%llu, was @%llu\n", offset,
tctxt->minMatchOffset);
diff --git a/contrib/libs/hyperscan/src/rose/stream.c b/contrib/libs/hyperscan/src/rose/stream.c
index 9a5452b851..26268dd574 100644
--- a/contrib/libs/hyperscan/src/rose/stream.c
+++ b/contrib/libs/hyperscan/src/rose/stream.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -578,7 +578,7 @@ void roseStreamExec(const struct RoseEngine *t, struct hs_scratch *scratch) {
tctxt->lastEndOffset = offset;
tctxt->filledDelayedSlots = 0;
tctxt->lastMatchOffset = 0;
- tctxt->lastCombMatchOffset = offset;
+ tctxt->lastCombMatchOffset = offset;
tctxt->minMatchOffset = offset;
tctxt->minNonMpvMatchOffset = offset;
tctxt->next_mpv_offset = 0;
@@ -701,7 +701,7 @@ void roseStreamInitEod(const struct RoseEngine *t, u64a offset,
tctxt->lastEndOffset = offset;
tctxt->filledDelayedSlots = 0;
tctxt->lastMatchOffset = 0;
- tctxt->lastCombMatchOffset = offset; /* DO NOT set 0 here! */
+ tctxt->lastCombMatchOffset = offset; /* DO NOT set 0 here! */
tctxt->minMatchOffset = offset;
tctxt->minNonMpvMatchOffset = offset;
tctxt->next_mpv_offset = offset;
diff --git a/contrib/libs/hyperscan/src/rose/stream_long_lit.h b/contrib/libs/hyperscan/src/rose/stream_long_lit.h
index 5eef3b8f63..df9b57f4e2 100644
--- a/contrib/libs/hyperscan/src/rose/stream_long_lit.h
+++ b/contrib/libs/hyperscan/src/rose/stream_long_lit.h
@@ -33,7 +33,7 @@
#include "rose_common.h"
#include "rose_internal.h"
#include "stream_long_lit_hash.h"
-#include "util/compare.h"
+#include "util/compare.h"
#include "util/copybytes.h"
static really_inline
@@ -201,12 +201,12 @@ const u8 *prepScanBuffer(const struct core_info *ci,
} else {
// Copy: first chunk from history buffer.
assert(overhang <= ci->hlen);
- copy_upto_64_bytes(tempbuf, ci->hbuf + ci->hlen - overhang,
+ copy_upto_64_bytes(tempbuf, ci->hbuf + ci->hlen - overhang,
overhang);
// Copy: second chunk from current buffer.
size_t copy_buf_len = LONG_LIT_HASH_LEN - overhang;
assert(copy_buf_len <= ci->len);
- copy_upto_64_bytes(tempbuf + overhang, ci->buf, copy_buf_len);
+ copy_upto_64_bytes(tempbuf + overhang, ci->buf, copy_buf_len);
// Read from our temporary buffer for the hash.
base = tempbuf;
}
diff --git a/contrib/libs/hyperscan/src/rose/validate_mask.h b/contrib/libs/hyperscan/src/rose/validate_mask.h
index f29d2c76c8..8191db52f8 100644
--- a/contrib/libs/hyperscan/src/rose/validate_mask.h
+++ b/contrib/libs/hyperscan/src/rose/validate_mask.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Intel Corporation
+ * Copyright (c) 2016-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -41,18 +41,18 @@ void validateMask32Print(const u8 *mask) {
}
printf("\n");
}
-
-#ifdef HAVE_AVX512
-static
-void validateMask64Print(const u8 *mask) {
- int i;
- for (i = 0; i < 64; i++) {
- printf("%02x ", mask[i]);
- }
- printf("\n");
-}
+
+#ifdef HAVE_AVX512
+static
+void validateMask64Print(const u8 *mask) {
+ int i;
+ for (i = 0; i < 64; i++) {
+ printf("%02x ", mask[i]);
+ }
+ printf("\n");
+}
+#endif
#endif
-#endif
// check positive bytes in cmp_result.
// return one if the check passed, zero otherwise.
@@ -126,29 +126,29 @@ int validateMask32(const m256 data, const u32 valid_data_mask,
}
}
-#ifdef HAVE_AVX512
-static really_inline
-int validateMask64(const m512 data, const u64a valid_data_mask,
- const m512 and_mask, const m512 cmp_mask,
- const u64a neg_mask) {
- u64a cmp_result = ~eq512mask(and512(data, and_mask), cmp_mask);
-#ifdef DEBUG
- DEBUG_PRINTF("data\n");
- validateMask64Print((const u8 *)&data);
- DEBUG_PRINTF("cmp_result\n");
- validateMask64Print((const u8 *)&cmp_result);
+#ifdef HAVE_AVX512
+static really_inline
+int validateMask64(const m512 data, const u64a valid_data_mask,
+ const m512 and_mask, const m512 cmp_mask,
+ const u64a neg_mask) {
+ u64a cmp_result = ~eq512mask(and512(data, and_mask), cmp_mask);
+#ifdef DEBUG
+ DEBUG_PRINTF("data\n");
+ validateMask64Print((const u8 *)&data);
+ DEBUG_PRINTF("cmp_result\n");
+ validateMask64Print((const u8 *)&cmp_result);
+#endif
+ DEBUG_PRINTF("cmp_result %016llx neg_mask %016llx\n", cmp_result, neg_mask);
+ DEBUG_PRINTF("valid_data_mask %016llx\n", valid_data_mask);
+
+ if ((cmp_result & valid_data_mask) == (neg_mask & valid_data_mask)) {
+ DEBUG_PRINTF("checkCompareResult64 passed\n");
+ return 1;
+ } else {
+ DEBUG_PRINTF("checkCompareResult64 failed\n");
+ return 0;
+ }
+}
+#endif
+
#endif
- DEBUG_PRINTF("cmp_result %016llx neg_mask %016llx\n", cmp_result, neg_mask);
- DEBUG_PRINTF("valid_data_mask %016llx\n", valid_data_mask);
-
- if ((cmp_result & valid_data_mask) == (neg_mask & valid_data_mask)) {
- DEBUG_PRINTF("checkCompareResult64 passed\n");
- return 1;
- } else {
- DEBUG_PRINTF("checkCompareResult64 failed\n");
- return 0;
- }
-}
-#endif
-
-#endif
diff --git a/contrib/libs/hyperscan/src/rose/validate_shufti.h b/contrib/libs/hyperscan/src/rose/validate_shufti.h
index de300df4a3..351df36a76 100644
--- a/contrib/libs/hyperscan/src/rose/validate_shufti.h
+++ b/contrib/libs/hyperscan/src/rose/validate_shufti.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Intel Corporation
+ * Copyright (c) 2016-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -175,85 +175,85 @@ int validateShuftiMask32x16(const m256 data,
return !cmp_result;
}
-#ifdef HAVE_AVX512
+#ifdef HAVE_AVX512
+static really_inline
+int validateShuftiMask64x8(const m512 data, const m512 hi_mask,
+ const m512 lo_mask, const m512 and_mask,
+ const u64a neg_mask, const u64a valid_data_mask) {
+ m512 low4bits = set64x8(0xf);
+ m512 c_lo = pshufb_m512(lo_mask, and512(data, low4bits));
+ m512 c_hi = pshufb_m512(hi_mask,
+ rshift64_m512(andnot512(low4bits, data), 4));
+ m512 t = and512(c_lo, c_hi);
+ u64a nresult = eq512mask(and512(t, and_mask), zeroes512());
+#ifdef DEBUG
+ DEBUG_PRINTF("data\n");
+ dumpMask(&data, 64);
+ DEBUG_PRINTF("hi_mask\n");
+ dumpMask(&hi_mask, 64);
+ DEBUG_PRINTF("lo_mask\n");
+ dumpMask(&lo_mask, 64);
+ DEBUG_PRINTF("c_lo\n");
+ dumpMask(&c_lo, 64);
+ DEBUG_PRINTF("c_hi\n");
+ dumpMask(&c_hi, 64);
+ DEBUG_PRINTF("nresult %llx\n", nresult);
+ DEBUG_PRINTF("valid_data_mask %llx\n", valid_data_mask);
+#endif
+ u64a cmp_result = (nresult ^ neg_mask) & valid_data_mask;
+ return !cmp_result;
+}
+
+static really_inline
+int validateShuftiMask64x16(const m512 data,
+ const m512 hi_mask_1, const m512 hi_mask_2,
+ const m512 lo_mask_1, const m512 lo_mask_2,
+ const m512 and_mask_hi, const m512 and_mask_lo,
+ const u64a neg_mask, const u64a valid_data_mask) {
+ m512 low4bits = set64x8(0xf);
+ m512 data_lo = and512(data, low4bits);
+ m512 data_hi = and512(rshift64_m512(data, 4), low4bits);
+ m512 c_lo_1 = pshufb_m512(lo_mask_1, data_lo);
+ m512 c_lo_2 = pshufb_m512(lo_mask_2, data_lo);
+ m512 c_hi_1 = pshufb_m512(hi_mask_1, data_hi);
+ m512 c_hi_2 = pshufb_m512(hi_mask_2, data_hi);
+ m512 t1 = and512(c_lo_1, c_hi_1);
+ m512 t2 = and512(c_lo_2, c_hi_2);
+ m512 result = or512(and512(t1, and_mask_lo), and512(t2, and_mask_hi));
+ u64a nresult = eq512mask(result, zeroes512());
+#ifdef DEBUG
+ DEBUG_PRINTF("data\n");
+ dumpMask(&data, 64);
+ DEBUG_PRINTF("data_lo\n");
+ dumpMask(&data_lo, 64);
+ DEBUG_PRINTF("data_hi\n");
+ dumpMask(&data_hi, 64);
+ DEBUG_PRINTF("hi_mask_1\n");
+ dumpMask(&hi_mask_1, 64);
+ DEBUG_PRINTF("hi_mask_2\n");
+ dumpMask(&hi_mask_2, 64);
+ DEBUG_PRINTF("lo_mask_1\n");
+ dumpMask(&lo_mask_1, 64);
+ DEBUG_PRINTF("lo_mask_2\n");
+ dumpMask(&lo_mask_2, 64);
+ DEBUG_PRINTF("c_lo_1\n");
+ dumpMask(&c_lo_1, 64);
+ DEBUG_PRINTF("c_lo_2\n");
+ dumpMask(&c_lo_2, 64);
+ DEBUG_PRINTF("c_hi_1\n");
+ dumpMask(&c_hi_1, 64);
+ DEBUG_PRINTF("c_hi_2\n");
+ dumpMask(&c_hi_2, 64);
+ DEBUG_PRINTF("result\n");
+ dumpMask(&result, 64);
+ DEBUG_PRINTF("valid_data_mask %llx\n", valid_data_mask);
+#endif
+ u64a cmp_result = (nresult ^ neg_mask) & valid_data_mask;
+ return !cmp_result;
+}
+#endif
+
static really_inline
-int validateShuftiMask64x8(const m512 data, const m512 hi_mask,
- const m512 lo_mask, const m512 and_mask,
- const u64a neg_mask, const u64a valid_data_mask) {
- m512 low4bits = set64x8(0xf);
- m512 c_lo = pshufb_m512(lo_mask, and512(data, low4bits));
- m512 c_hi = pshufb_m512(hi_mask,
- rshift64_m512(andnot512(low4bits, data), 4));
- m512 t = and512(c_lo, c_hi);
- u64a nresult = eq512mask(and512(t, and_mask), zeroes512());
-#ifdef DEBUG
- DEBUG_PRINTF("data\n");
- dumpMask(&data, 64);
- DEBUG_PRINTF("hi_mask\n");
- dumpMask(&hi_mask, 64);
- DEBUG_PRINTF("lo_mask\n");
- dumpMask(&lo_mask, 64);
- DEBUG_PRINTF("c_lo\n");
- dumpMask(&c_lo, 64);
- DEBUG_PRINTF("c_hi\n");
- dumpMask(&c_hi, 64);
- DEBUG_PRINTF("nresult %llx\n", nresult);
- DEBUG_PRINTF("valid_data_mask %llx\n", valid_data_mask);
-#endif
- u64a cmp_result = (nresult ^ neg_mask) & valid_data_mask;
- return !cmp_result;
-}
-
-static really_inline
-int validateShuftiMask64x16(const m512 data,
- const m512 hi_mask_1, const m512 hi_mask_2,
- const m512 lo_mask_1, const m512 lo_mask_2,
- const m512 and_mask_hi, const m512 and_mask_lo,
- const u64a neg_mask, const u64a valid_data_mask) {
- m512 low4bits = set64x8(0xf);
- m512 data_lo = and512(data, low4bits);
- m512 data_hi = and512(rshift64_m512(data, 4), low4bits);
- m512 c_lo_1 = pshufb_m512(lo_mask_1, data_lo);
- m512 c_lo_2 = pshufb_m512(lo_mask_2, data_lo);
- m512 c_hi_1 = pshufb_m512(hi_mask_1, data_hi);
- m512 c_hi_2 = pshufb_m512(hi_mask_2, data_hi);
- m512 t1 = and512(c_lo_1, c_hi_1);
- m512 t2 = and512(c_lo_2, c_hi_2);
- m512 result = or512(and512(t1, and_mask_lo), and512(t2, and_mask_hi));
- u64a nresult = eq512mask(result, zeroes512());
-#ifdef DEBUG
- DEBUG_PRINTF("data\n");
- dumpMask(&data, 64);
- DEBUG_PRINTF("data_lo\n");
- dumpMask(&data_lo, 64);
- DEBUG_PRINTF("data_hi\n");
- dumpMask(&data_hi, 64);
- DEBUG_PRINTF("hi_mask_1\n");
- dumpMask(&hi_mask_1, 64);
- DEBUG_PRINTF("hi_mask_2\n");
- dumpMask(&hi_mask_2, 64);
- DEBUG_PRINTF("lo_mask_1\n");
- dumpMask(&lo_mask_1, 64);
- DEBUG_PRINTF("lo_mask_2\n");
- dumpMask(&lo_mask_2, 64);
- DEBUG_PRINTF("c_lo_1\n");
- dumpMask(&c_lo_1, 64);
- DEBUG_PRINTF("c_lo_2\n");
- dumpMask(&c_lo_2, 64);
- DEBUG_PRINTF("c_hi_1\n");
- dumpMask(&c_hi_1, 64);
- DEBUG_PRINTF("c_hi_2\n");
- dumpMask(&c_hi_2, 64);
- DEBUG_PRINTF("result\n");
- dumpMask(&result, 64);
- DEBUG_PRINTF("valid_data_mask %llx\n", valid_data_mask);
-#endif
- u64a cmp_result = (nresult ^ neg_mask) & valid_data_mask;
- return !cmp_result;
-}
-#endif
-
-static really_inline
int checkMultipath32(u32 data, u32 hi_bits, u32 lo_bits) {
u32 t = ~(data | hi_bits);
t += lo_bits;
diff --git a/contrib/libs/hyperscan/src/runtime.c b/contrib/libs/hyperscan/src/runtime.c
index 82ea99c0a2..a3659348c5 100644
--- a/contrib/libs/hyperscan/src/runtime.c
+++ b/contrib/libs/hyperscan/src/runtime.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2019, Intel Corporation
+ * Copyright (c) 2015-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -67,9 +67,9 @@ void prefetch_data(const char *data, unsigned length) {
/** dummy event handler for use when user does not provide one */
static
-int HS_CDECL null_onEvent(UNUSED unsigned id, UNUSED unsigned long long from,
- UNUSED unsigned long long to, UNUSED unsigned flags,
- UNUSED void *ctxt) {
+int HS_CDECL null_onEvent(UNUSED unsigned id, UNUSED unsigned long long from,
+ UNUSED unsigned long long to, UNUSED unsigned flags,
+ UNUSED void *ctxt) {
return 0;
}
@@ -150,7 +150,7 @@ void populateCoreInfo(struct hs_scratch *s, const struct RoseEngine *rose,
}
#define STATUS_VALID_BITS \
- (STATUS_TERMINATED | STATUS_EXHAUSTED | STATUS_DELAY_DIRTY | STATUS_ERROR)
+ (STATUS_TERMINATED | STATUS_EXHAUSTED | STATUS_DELAY_DIRTY | STATUS_ERROR)
/** \brief Retrieve status bitmask from stream state. */
static really_inline
@@ -356,15 +356,15 @@ hs_error_t HS_CDECL hs_scan(const hs_database_t *db, const char *data,
length, NULL, 0, 0, 0, flags);
clearEvec(rose, scratch->core_info.exhaustionVector);
- if (rose->ckeyCount) {
- scratch->core_info.logicalVector = scratch->bstate +
- rose->stateOffsets.logicalVec;
- scratch->core_info.combVector = scratch->bstate +
- rose->stateOffsets.combVec;
- scratch->tctxt.lastCombMatchOffset = 0;
- clearLvec(rose, scratch->core_info.logicalVector,
- scratch->core_info.combVector);
- }
+ if (rose->ckeyCount) {
+ scratch->core_info.logicalVector = scratch->bstate +
+ rose->stateOffsets.logicalVec;
+ scratch->core_info.combVector = scratch->bstate +
+ rose->stateOffsets.combVec;
+ scratch->tctxt.lastCombMatchOffset = 0;
+ clearLvec(rose, scratch->core_info.logicalVector,
+ scratch->core_info.combVector);
+ }
if (!length) {
if (rose->boundary.reportZeroEodOffset) {
@@ -427,11 +427,11 @@ hs_error_t HS_CDECL hs_scan(const hs_database_t *db, const char *data,
}
done_scan:
- if (unlikely(internal_matching_error(scratch))) {
+ if (unlikely(internal_matching_error(scratch))) {
+ unmarkScratchInUse(scratch);
+ return HS_UNKNOWN_ERROR;
+ } else if (told_to_stop_matching(scratch)) {
unmarkScratchInUse(scratch);
- return HS_UNKNOWN_ERROR;
- } else if (told_to_stop_matching(scratch)) {
- unmarkScratchInUse(scratch);
return HS_SCAN_TERMINATED;
}
@@ -449,23 +449,23 @@ done_scan:
}
set_retval:
- if (unlikely(internal_matching_error(scratch))) {
- unmarkScratchInUse(scratch);
- return HS_UNKNOWN_ERROR;
- }
-
- if (rose->lastFlushCombProgramOffset) {
- if (roseRunLastFlushCombProgram(rose, scratch, length)
- == MO_HALT_MATCHING) {
- if (unlikely(internal_matching_error(scratch))) {
- unmarkScratchInUse(scratch);
- return HS_UNKNOWN_ERROR;
- }
- unmarkScratchInUse(scratch);
- return HS_SCAN_TERMINATED;
- }
- }
-
+ if (unlikely(internal_matching_error(scratch))) {
+ unmarkScratchInUse(scratch);
+ return HS_UNKNOWN_ERROR;
+ }
+
+ if (rose->lastFlushCombProgramOffset) {
+ if (roseRunLastFlushCombProgram(rose, scratch, length)
+ == MO_HALT_MATCHING) {
+ if (unlikely(internal_matching_error(scratch))) {
+ unmarkScratchInUse(scratch);
+ return HS_UNKNOWN_ERROR;
+ }
+ unmarkScratchInUse(scratch);
+ return HS_SCAN_TERMINATED;
+ }
+ }
+
DEBUG_PRINTF("done. told_to_stop_matching=%d\n",
told_to_stop_matching(scratch));
hs_error_t rv = told_to_stop_matching(scratch) ? HS_SCAN_TERMINATED
@@ -529,10 +529,10 @@ void init_stream(struct hs_stream *s, const struct RoseEngine *rose,
roseInitState(rose, state);
clearEvec(rose, state + rose->stateOffsets.exhausted);
- if (rose->ckeyCount) {
- clearLvec(rose, state + rose->stateOffsets.logicalVec,
- state + rose->stateOffsets.combVec);
- }
+ if (rose->ckeyCount) {
+ clearLvec(rose, state + rose->stateOffsets.logicalVec,
+ state + rose->stateOffsets.combVec);
+ }
// SOM state multibit structures.
initSomState(rose, state);
@@ -638,7 +638,7 @@ void report_eod_matches(hs_stream_t *id, hs_scratch_t *scratch,
char *state = getMultiState(id);
u8 status = getStreamStatus(state);
- if (status & (STATUS_TERMINATED | STATUS_EXHAUSTED | STATUS_ERROR)) {
+ if (status & (STATUS_TERMINATED | STATUS_EXHAUSTED | STATUS_ERROR)) {
DEBUG_PRINTF("stream is broken, just freeing storage\n");
return;
}
@@ -647,15 +647,15 @@ void report_eod_matches(hs_stream_t *id, hs_scratch_t *scratch,
getHistory(state, rose, id->offset),
getHistoryAmount(rose, id->offset), id->offset, status, 0);
- if (rose->ckeyCount) {
- scratch->core_info.logicalVector = state +
- rose->stateOffsets.logicalVec;
- scratch->core_info.combVector = state + rose->stateOffsets.combVec;
- if (!id->offset) {
- scratch->tctxt.lastCombMatchOffset = id->offset;
- }
- }
-
+ if (rose->ckeyCount) {
+ scratch->core_info.logicalVector = state +
+ rose->stateOffsets.logicalVec;
+ scratch->core_info.combVector = state + rose->stateOffsets.combVec;
+ if (!id->offset) {
+ scratch->tctxt.lastCombMatchOffset = id->offset;
+ }
+ }
+
if (rose->somLocationCount) {
loadSomFromStream(scratch, id->offset);
}
@@ -699,14 +699,14 @@ void report_eod_matches(hs_stream_t *id, hs_scratch_t *scratch,
scratch->core_info.status |= STATUS_TERMINATED;
}
}
-
- if (rose->lastFlushCombProgramOffset && !told_to_stop_matching(scratch)) {
- if (roseRunLastFlushCombProgram(rose, scratch, id->offset)
- == MO_HALT_MATCHING) {
- DEBUG_PRINTF("told to stop matching\n");
- scratch->core_info.status |= STATUS_TERMINATED;
- }
- }
+
+ if (rose->lastFlushCombProgramOffset && !told_to_stop_matching(scratch)) {
+ if (roseRunLastFlushCombProgram(rose, scratch, id->offset)
+ == MO_HALT_MATCHING) {
+ DEBUG_PRINTF("told to stop matching\n");
+ scratch->core_info.status |= STATUS_TERMINATED;
+ }
+ }
}
HS_PUBLIC_API
@@ -763,10 +763,10 @@ hs_error_t HS_CDECL hs_reset_and_copy_stream(hs_stream_t *to_id,
return HS_SCRATCH_IN_USE;
}
report_eod_matches(to_id, scratch, onEvent, context);
- if (unlikely(internal_matching_error(scratch))) {
- unmarkScratchInUse(scratch);
- return HS_UNKNOWN_ERROR;
- }
+ if (unlikely(internal_matching_error(scratch))) {
+ unmarkScratchInUse(scratch);
+ return HS_UNKNOWN_ERROR;
+ }
unmarkScratchInUse(scratch);
}
@@ -882,11 +882,11 @@ hs_error_t hs_scan_stream_internal(hs_stream_t *id, const char *data,
char *state = getMultiState(id);
u8 status = getStreamStatus(state);
- if (status & (STATUS_TERMINATED | STATUS_EXHAUSTED | STATUS_ERROR)) {
+ if (status & (STATUS_TERMINATED | STATUS_EXHAUSTED | STATUS_ERROR)) {
DEBUG_PRINTF("stream is broken, halting scan\n");
- if (status & STATUS_ERROR) {
- return HS_UNKNOWN_ERROR;
- } else if (status & STATUS_TERMINATED) {
+ if (status & STATUS_ERROR) {
+ return HS_UNKNOWN_ERROR;
+ } else if (status & STATUS_TERMINATED) {
return HS_SCAN_TERMINATED;
} else {
return HS_SUCCESS;
@@ -905,14 +905,14 @@ hs_error_t hs_scan_stream_internal(hs_stream_t *id, const char *data,
populateCoreInfo(scratch, rose, state, onEvent, context, data, length,
getHistory(state, rose, id->offset), historyAmount,
id->offset, status, flags);
- if (rose->ckeyCount) {
- scratch->core_info.logicalVector = state +
- rose->stateOffsets.logicalVec;
- scratch->core_info.combVector = state + rose->stateOffsets.combVec;
- if (!id->offset) {
- scratch->tctxt.lastCombMatchOffset = id->offset;
- }
- }
+ if (rose->ckeyCount) {
+ scratch->core_info.logicalVector = state +
+ rose->stateOffsets.logicalVec;
+ scratch->core_info.combVector = state + rose->stateOffsets.combVec;
+ if (!id->offset) {
+ scratch->tctxt.lastCombMatchOffset = id->offset;
+ }
+ }
assert(scratch->core_info.hlen <= id->offset
&& scratch->core_info.hlen <= rose->historyRequired);
@@ -960,9 +960,9 @@ hs_error_t hs_scan_stream_internal(hs_stream_t *id, const char *data,
setStreamStatus(state, scratch->core_info.status);
- if (unlikely(internal_matching_error(scratch))) {
- return HS_UNKNOWN_ERROR;
- } else if (likely(!can_stop_matching(scratch))) {
+ if (unlikely(internal_matching_error(scratch))) {
+ return HS_UNKNOWN_ERROR;
+ } else if (likely(!can_stop_matching(scratch))) {
maintainHistoryBuffer(rose, state, data, length);
id->offset += length; /* maintain offset */
@@ -1011,10 +1011,10 @@ hs_error_t HS_CDECL hs_close_stream(hs_stream_t *id, hs_scratch_t *scratch,
return HS_SCRATCH_IN_USE;
}
report_eod_matches(id, scratch, onEvent, context);
- if (unlikely(internal_matching_error(scratch))) {
- unmarkScratchInUse(scratch);
- return HS_UNKNOWN_ERROR;
- }
+ if (unlikely(internal_matching_error(scratch))) {
+ unmarkScratchInUse(scratch);
+ return HS_UNKNOWN_ERROR;
+ }
unmarkScratchInUse(scratch);
}
@@ -1040,10 +1040,10 @@ hs_error_t HS_CDECL hs_reset_stream(hs_stream_t *id, UNUSED unsigned int flags,
return HS_SCRATCH_IN_USE;
}
report_eod_matches(id, scratch, onEvent, context);
- if (unlikely(internal_matching_error(scratch))) {
- unmarkScratchInUse(scratch);
- return HS_UNKNOWN_ERROR;
- }
+ if (unlikely(internal_matching_error(scratch))) {
+ unmarkScratchInUse(scratch);
+ return HS_UNKNOWN_ERROR;
+ }
unmarkScratchInUse(scratch);
}
@@ -1158,11 +1158,11 @@ hs_error_t HS_CDECL hs_scan_vector(const hs_database_t *db,
if (onEvent) {
report_eod_matches(id, scratch, onEvent, context);
- if (unlikely(internal_matching_error(scratch))) {
+ if (unlikely(internal_matching_error(scratch))) {
+ unmarkScratchInUse(scratch);
+ return HS_UNKNOWN_ERROR;
+ } else if (told_to_stop_matching(scratch)) {
unmarkScratchInUse(scratch);
- return HS_UNKNOWN_ERROR;
- } else if (told_to_stop_matching(scratch)) {
- unmarkScratchInUse(scratch);
return HS_SCAN_TERMINATED;
}
}
@@ -1259,10 +1259,10 @@ hs_error_t HS_CDECL hs_reset_and_expand_stream(hs_stream_t *to_stream,
return HS_SCRATCH_IN_USE;
}
report_eod_matches(to_stream, scratch, onEvent, context);
- if (unlikely(internal_matching_error(scratch))) {
- unmarkScratchInUse(scratch);
- return HS_UNKNOWN_ERROR;
- }
+ if (unlikely(internal_matching_error(scratch))) {
+ unmarkScratchInUse(scratch);
+ return HS_UNKNOWN_ERROR;
+ }
unmarkScratchInUse(scratch);
}
diff --git a/contrib/libs/hyperscan/src/scratch.c b/contrib/libs/hyperscan/src/scratch.c
index becf0f17b8..25991e2bba 100644
--- a/contrib/libs/hyperscan/src/scratch.c
+++ b/contrib/libs/hyperscan/src/scratch.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2019, Intel Corporation
+ * Copyright (c) 2015-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -279,9 +279,9 @@ hs_error_t HS_CDECL hs_alloc_scratch(const hs_database_t *db,
hs_error_t proto_ret = hs_check_alloc(proto_tmp);
if (proto_ret != HS_SUCCESS) {
hs_scratch_free(proto_tmp);
- if (*scratch) {
- hs_scratch_free((*scratch)->scratch_alloc);
- }
+ if (*scratch) {
+ hs_scratch_free((*scratch)->scratch_alloc);
+ }
*scratch = NULL;
return proto_ret;
}
diff --git a/contrib/libs/hyperscan/src/scratch.h b/contrib/libs/hyperscan/src/scratch.h
index 168033c818..1256f7aba8 100644
--- a/contrib/libs/hyperscan/src/scratch.h
+++ b/contrib/libs/hyperscan/src/scratch.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2019, Intel Corporation
+ * Copyright (c) 2015-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -36,7 +36,7 @@
#ifndef SCRATCH_H_DA6D4FC06FF410
#define SCRATCH_H_DA6D4FC06FF410
-#include "hs_common.h"
+#include "hs_common.h"
#include "ue2common.h"
#include "rose/rose_types.h"
@@ -84,23 +84,23 @@ struct catchup_pq {
* history. */
#define STATUS_DELAY_DIRTY (1U << 2)
-/** \brief Status flag: Unexpected Rose program error. */
-#define STATUS_ERROR (1U << 3)
-
+/** \brief Status flag: Unexpected Rose program error. */
+#define STATUS_ERROR (1U << 3)
+
/** \brief Core information about the current scan, used everywhere. */
struct core_info {
void *userContext; /**< user-supplied context */
/** \brief user-supplied match callback */
- int (HS_CDECL *userCallback)(unsigned int id, unsigned long long from,
- unsigned long long to, unsigned int flags,
- void *ctx);
+ int (HS_CDECL *userCallback)(unsigned int id, unsigned long long from,
+ unsigned long long to, unsigned int flags,
+ void *ctx);
const struct RoseEngine *rose;
char *state; /**< full stream state */
char *exhaustionVector; /**< pointer to evec for this stream */
- char *logicalVector; /**< pointer to lvec for this stream */
- char *combVector; /**< pointer to cvec for this stream */
+ char *logicalVector; /**< pointer to lvec for this stream */
+ char *combVector; /**< pointer to cvec for this stream */
const u8 *buf; /**< main scan buffer */
size_t len; /**< length of main scan buffer in bytes */
const u8 *hbuf; /**< history buffer */
@@ -122,7 +122,7 @@ struct RoseContext {
* stream */
u64a lastMatchOffset; /**< last match offset report up out of rose;
* used _only_ for debugging, asserts */
- u64a lastCombMatchOffset; /**< last match offset of active combinations */
+ u64a lastCombMatchOffset; /**< last match offset of active combinations */
u64a minMatchOffset; /**< the earliest offset that we are still allowed to
* report */
u64a minNonMpvMatchOffset; /**< the earliest offset that non-mpv engines are
@@ -231,15 +231,15 @@ char told_to_stop_matching(const struct hs_scratch *scratch) {
static really_inline
char can_stop_matching(const struct hs_scratch *scratch) {
- return scratch->core_info.status &
- (STATUS_TERMINATED | STATUS_EXHAUSTED | STATUS_ERROR);
+ return scratch->core_info.status &
+ (STATUS_TERMINATED | STATUS_EXHAUSTED | STATUS_ERROR);
+}
+
+static really_inline
+char internal_matching_error(const struct hs_scratch *scratch) {
+ return scratch->core_info.status & STATUS_ERROR;
}
-static really_inline
-char internal_matching_error(const struct hs_scratch *scratch) {
- return scratch->core_info.status & STATUS_ERROR;
-}
-
/**
* \brief Mark scratch as in use.
*
diff --git a/contrib/libs/hyperscan/src/smallwrite/smallwrite_build.cpp b/contrib/libs/hyperscan/src/smallwrite/smallwrite_build.cpp
index b6dbfa317e..d993137632 100644
--- a/contrib/libs/hyperscan/src/smallwrite/smallwrite_build.cpp
+++ b/contrib/libs/hyperscan/src/smallwrite/smallwrite_build.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -793,12 +793,12 @@ bytecode_ptr<NFA> getDfa(raw_dfa &rdfa, const CompileContext &cc,
bytecode_ptr<NFA> dfa = nullptr;
if (cc.grey.allowSmallWriteSheng) {
dfa = shengCompile(rdfa, cc, rm, only_accel_init, &accel_states);
- if (!dfa) {
- dfa = sheng32Compile(rdfa, cc, rm, only_accel_init, &accel_states);
- }
- if (!dfa) {
- dfa = sheng64Compile(rdfa, cc, rm, only_accel_init, &accel_states);
- }
+ if (!dfa) {
+ dfa = sheng32Compile(rdfa, cc, rm, only_accel_init, &accel_states);
+ }
+ if (!dfa) {
+ dfa = sheng64Compile(rdfa, cc, rm, only_accel_init, &accel_states);
+ }
}
if (!dfa) {
dfa = mcclellanCompile(rdfa, cc, rm, only_accel_init,
diff --git a/contrib/libs/hyperscan/src/som/slot_manager_internal.h b/contrib/libs/hyperscan/src/som/slot_manager_internal.h
index 4a3651c37f..7e1fecc7e6 100644
--- a/contrib/libs/hyperscan/src/som/slot_manager_internal.h
+++ b/contrib/libs/hyperscan/src/som/slot_manager_internal.h
@@ -97,7 +97,7 @@ struct SlotCache {
CacheStore store;
- std::unordered_set<std::shared_ptr<const NGHolder>, NGHolderHasher,
+ std::unordered_set<std::shared_ptr<const NGHolder>, NGHolderHasher,
NGHolderEqual> initial_prefixes;
std::vector<InitialResetInfo> initial_resets;
};
diff --git a/contrib/libs/hyperscan/src/stream_compress_impl.h b/contrib/libs/hyperscan/src/stream_compress_impl.h
index 30c86f5290..d1ccf5e6d0 100644
--- a/contrib/libs/hyperscan/src/stream_compress_impl.h
+++ b/contrib/libs/hyperscan/src/stream_compress_impl.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, Intel Corporation
+ * Copyright (c) 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -148,13 +148,13 @@ size_t JOIN(sc_, FN_SUFFIX)(const struct RoseEngine *rose,
/* copy the exhaustion multibit */
COPY_MULTIBIT(stream_body + so->exhausted, rose->ekeyCount);
- /* copy the logical multibit */
- COPY_MULTIBIT(stream_body + so->logicalVec,
- rose->lkeyCount + rose->lopCount);
-
- /* copy the combination multibit */
- COPY_MULTIBIT(stream_body + so->combVec, rose->ckeyCount);
-
+ /* copy the logical multibit */
+ COPY_MULTIBIT(stream_body + so->logicalVec,
+ rose->lkeyCount + rose->lopCount);
+
+ /* copy the combination multibit */
+ COPY_MULTIBIT(stream_body + so->combVec, rose->ckeyCount);
+
/* copy nfa stream state for endfixes */
/* Note: in the expand case the active array has already been copied into
* the stream. */
diff --git a/contrib/libs/hyperscan/src/ue2common.h b/contrib/libs/hyperscan/src/ue2common.h
index 3edd35560f..5705af7be4 100644
--- a/contrib/libs/hyperscan/src/ue2common.h
+++ b/contrib/libs/hyperscan/src/ue2common.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -66,13 +66,13 @@ typedef signed int s32;
/* We append the 'a' for aligned, since these aren't common, garden variety
* 64 bit values. The alignment is necessary for structs on some platforms,
* so we don't end up performing accidental unaligned accesses. */
-#if defined(_WIN32) && ! defined(_WIN64)
-typedef unsigned long long ALIGN_ATTR(4) u64a;
-typedef signed long long ALIGN_ATTR(4) s64a;
-#else
+#if defined(_WIN32) && ! defined(_WIN64)
+typedef unsigned long long ALIGN_ATTR(4) u64a;
+typedef signed long long ALIGN_ATTR(4) s64a;
+#else
typedef unsigned long long ALIGN_ATTR(8) u64a;
typedef signed long long ALIGN_ATTR(8) s64a;
-#endif
+#endif
/* get the SIMD types */
#include "util/simd_types.h"
@@ -84,7 +84,7 @@ typedef u32 ReportID;
/* Shorthand for attribute to mark a function as part of our public API.
* Functions without this attribute will be hidden. */
#if !defined(_WIN32)
-#define HS_PUBLIC_API __attribute__((visibility("default")))
+#define HS_PUBLIC_API __attribute__((visibility("default")))
#else
// TODO: dllexport defines for windows
#define HS_PUBLIC_API
@@ -206,7 +206,7 @@ typedef u32 ReportID;
#endif
#endif
-#if defined(DEBUG) && !defined(DEBUG_PRINTF)
+#if defined(DEBUG) && !defined(DEBUG_PRINTF)
#include <string.h>
#include <stdio.h>
#define DEBUG_PRINTF(format, ...) printf("%s:%s:%d:" format, \
diff --git a/contrib/libs/hyperscan/src/util/arch.h b/contrib/libs/hyperscan/src/util/arch.h
index 5bcca08c94..6220f12bc1 100644
--- a/contrib/libs/hyperscan/src/util/arch.h
+++ b/contrib/libs/hyperscan/src/util/arch.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, Intel Corporation
+ * Copyright (c) 2017-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -34,7 +34,7 @@
#define UTIL_ARCH_H_
#define HAVE_SSE2
-
+
/*
* MSVC uses a different form of inline asm
*/
diff --git a/contrib/libs/hyperscan/src/util/bitfield.h b/contrib/libs/hyperscan/src/util/bitfield.h
index 181c1100b6..a580da7b60 100644
--- a/contrib/libs/hyperscan/src/util/bitfield.h
+++ b/contrib/libs/hyperscan/src/util/bitfield.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -305,10 +305,10 @@ public:
}
/// Bitwise OR.
- bitfield operator|(const bitfield &a) const {
- bitfield b = a;
- b |= *this;
- return b;
+ bitfield operator|(const bitfield &a) const {
+ bitfield b = a;
+ b |= *this;
+ return b;
}
/// Bitwise OR-equals.
@@ -326,10 +326,10 @@ public:
}
/// Bitwise AND.
- bitfield operator&(const bitfield &a) const {
- bitfield b = a;
- b &= *this;
- return b;
+ bitfield operator&(const bitfield &a) const {
+ bitfield b = a;
+ b &= *this;
+ return b;
}
/// Bitwise AND-equals.
diff --git a/contrib/libs/hyperscan/src/util/copybytes.h b/contrib/libs/hyperscan/src/util/copybytes.h
index cb703c30f9..7f37d96bc5 100644
--- a/contrib/libs/hyperscan/src/util/copybytes.h
+++ b/contrib/libs/hyperscan/src/util/copybytes.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020, Intel Corporation
+ * Copyright (c) 2016-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -33,7 +33,7 @@
#include "simd_utils.h"
static really_inline
-void copy_upto_64_bytes(u8 *dst, const u8 *src, unsigned int len) {
+void copy_upto_64_bytes(u8 *dst, const u8 *src, unsigned int len) {
switch (len) {
case 0:
break;
@@ -72,41 +72,41 @@ void copy_upto_64_bytes(u8 *dst, const u8 *src, unsigned int len) {
case 16:
storeu128(dst, loadu128(src));
break;
- case 17:
- case 18:
- case 19:
- case 20:
- case 21:
- case 22:
- case 23:
- case 24:
- case 25:
- case 26:
- case 27:
- case 28:
- case 29:
- case 30:
- case 31:
- storeu128(dst + len - 16, loadu128(src + len - 16));
- storeu128(dst, loadu128(src));
- break;
+ case 17:
+ case 18:
+ case 19:
+ case 20:
+ case 21:
+ case 22:
+ case 23:
+ case 24:
+ case 25:
+ case 26:
+ case 27:
+ case 28:
+ case 29:
+ case 30:
+ case 31:
+ storeu128(dst + len - 16, loadu128(src + len - 16));
+ storeu128(dst, loadu128(src));
+ break;
case 32:
storeu256(dst, loadu256(src));
break;
-#ifdef HAVE_AVX512
- case 64:
- storebytes512(dst, loadu512(src), 64);
- break;
+#ifdef HAVE_AVX512
+ case 64:
+ storebytes512(dst, loadu512(src), 64);
+ break;
default:
- assert(len < 64);
- u64a k = (1ULL << len) - 1;
- storeu_mask_m512(dst, k, loadu_maskz_m512(k, src));
+ assert(len < 64);
+ u64a k = (1ULL << len) - 1;
+ storeu_mask_m512(dst, k, loadu_maskz_m512(k, src));
break;
-#else
- default:
- assert(0);
- break;
-#endif
+#else
+ default:
+ assert(0);
+ break;
+#endif
}
}
diff --git a/contrib/libs/hyperscan/src/util/cpuid_flags.c b/contrib/libs/hyperscan/src/util/cpuid_flags.c
index 42d920da29..c00ce58e2d 100644
--- a/contrib/libs/hyperscan/src/util/cpuid_flags.c
+++ b/contrib/libs/hyperscan/src/util/cpuid_flags.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -50,11 +50,11 @@ u64a cpuid_flags(void) {
cap |= HS_CPU_FEATURES_AVX512;
}
- if (check_avx512vbmi()) {
- DEBUG_PRINTF("AVX512VBMI enabled\n");
- cap |= HS_CPU_FEATURES_AVX512VBMI;
- }
-
+ if (check_avx512vbmi()) {
+ DEBUG_PRINTF("AVX512VBMI enabled\n");
+ cap |= HS_CPU_FEATURES_AVX512VBMI;
+ }
+
#if !defined(FAT_RUNTIME) && !defined(HAVE_AVX2)
cap &= ~HS_CPU_FEATURES_AVX2;
#endif
@@ -64,11 +64,11 @@ u64a cpuid_flags(void) {
cap &= ~HS_CPU_FEATURES_AVX512;
#endif
-#if (!defined(FAT_RUNTIME) && !defined(HAVE_AVX512VBMI)) || \
- (defined(FAT_RUNTIME) && !defined(BUILD_AVX512VBMI))
- cap &= ~HS_CPU_FEATURES_AVX512VBMI;
-#endif
-
+#if (!defined(FAT_RUNTIME) && !defined(HAVE_AVX512VBMI)) || \
+ (defined(FAT_RUNTIME) && !defined(BUILD_AVX512VBMI))
+ cap &= ~HS_CPU_FEATURES_AVX512VBMI;
+#endif
+
return cap;
}
@@ -115,11 +115,11 @@ static const struct family_id known_microarch[] = {
{ 0x6, 0x8E, HS_TUNE_FAMILY_SKL }, /* Kabylake Mobile */
{ 0x6, 0x9E, HS_TUNE_FAMILY_SKL }, /* Kabylake desktop */
- { 0x6, 0x7D, HS_TUNE_FAMILY_ICL }, /* Icelake */
- { 0x6, 0x7E, HS_TUNE_FAMILY_ICL }, /* Icelake */
- { 0x6, 0x6A, HS_TUNE_FAMILY_ICX }, /* Icelake Xeon-D */
- { 0x6, 0x6C, HS_TUNE_FAMILY_ICX }, /* Icelake Xeon */
-
+ { 0x6, 0x7D, HS_TUNE_FAMILY_ICL }, /* Icelake */
+ { 0x6, 0x7E, HS_TUNE_FAMILY_ICL }, /* Icelake */
+ { 0x6, 0x6A, HS_TUNE_FAMILY_ICX }, /* Icelake Xeon-D */
+ { 0x6, 0x6C, HS_TUNE_FAMILY_ICX }, /* Icelake Xeon */
+
};
#ifdef DUMP_SUPPORT
@@ -135,8 +135,8 @@ const char *dumpTune(u32 tune) {
T_CASE(HS_TUNE_FAMILY_BDW);
T_CASE(HS_TUNE_FAMILY_SKL);
T_CASE(HS_TUNE_FAMILY_SKX);
- T_CASE(HS_TUNE_FAMILY_ICL);
- T_CASE(HS_TUNE_FAMILY_ICX);
+ T_CASE(HS_TUNE_FAMILY_ICL);
+ T_CASE(HS_TUNE_FAMILY_ICX);
}
#undef T_CASE
return "unknown";
diff --git a/contrib/libs/hyperscan/src/util/cpuid_inline.h b/contrib/libs/hyperscan/src/util/cpuid_inline.h
index 4e4e7f6d6d..b7b4245289 100644
--- a/contrib/libs/hyperscan/src/util/cpuid_inline.h
+++ b/contrib/libs/hyperscan/src/util/cpuid_inline.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, Intel Corporation
+ * Copyright (c) 2017-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -74,9 +74,9 @@ void cpuid(unsigned int op, unsigned int leaf, unsigned int *eax,
#define CPUID_HTT (1 << 28)
// Structured Extended Feature Flags Enumeration Leaf ECX values
-#define CPUID_AVX512VBMI (1 << 1)
-
-// Structured Extended Feature Flags Enumeration Leaf EBX values
+#define CPUID_AVX512VBMI (1 << 1)
+
+// Structured Extended Feature Flags Enumeration Leaf EBX values
#define CPUID_BMI (1 << 3)
#define CPUID_AVX2 (1 << 5)
#define CPUID_BMI2 (1 << 8)
@@ -188,51 +188,51 @@ int check_avx512(void) {
}
static inline
-int check_avx512vbmi(void) {
-#if defined(__INTEL_COMPILER)
- return _may_i_use_cpu_feature(_FEATURE_AVX512VBMI);
-#else
- unsigned int eax, ebx, ecx, edx;
-
- cpuid(1, 0, &eax, &ebx, &ecx, &edx);
-
- /* check XSAVE is enabled by OS */
- if (!(ecx & CPUID_XSAVE)) {
- DEBUG_PRINTF("AVX and XSAVE not supported\n");
- return 0;
- }
-
- /* check that AVX 512 registers are enabled by OS */
- u64a xcr0 = xgetbv(0);
- if ((xcr0 & CPUID_XCR0_AVX512) != CPUID_XCR0_AVX512) {
- DEBUG_PRINTF("AVX512 registers not enabled\n");
- return 0;
- }
-
- /* ECX and EDX contain capability flags */
- ecx = 0;
- cpuid(7, 0, &eax, &ebx, &ecx, &edx);
-
- if (!(ebx & CPUID_AVX512F)) {
- DEBUG_PRINTF("AVX512F (AVX512 Foundation) instructions not enabled\n");
- return 0;
- }
-
- if (!(ebx & CPUID_AVX512BW)) {
- DEBUG_PRINTF("AVX512BW instructions not enabled\n");
- return 0;
- }
-
- if (ecx & CPUID_AVX512VBMI) {
- DEBUG_PRINTF("AVX512VBMI instructions enabled\n");
- return 1;
- }
-
- return 0;
-#endif
-}
-
-static inline
+int check_avx512vbmi(void) {
+#if defined(__INTEL_COMPILER)
+ return _may_i_use_cpu_feature(_FEATURE_AVX512VBMI);
+#else
+ unsigned int eax, ebx, ecx, edx;
+
+ cpuid(1, 0, &eax, &ebx, &ecx, &edx);
+
+ /* check XSAVE is enabled by OS */
+ if (!(ecx & CPUID_XSAVE)) {
+ DEBUG_PRINTF("AVX and XSAVE not supported\n");
+ return 0;
+ }
+
+ /* check that AVX 512 registers are enabled by OS */
+ u64a xcr0 = xgetbv(0);
+ if ((xcr0 & CPUID_XCR0_AVX512) != CPUID_XCR0_AVX512) {
+ DEBUG_PRINTF("AVX512 registers not enabled\n");
+ return 0;
+ }
+
+ /* ECX and EDX contain capability flags */
+ ecx = 0;
+ cpuid(7, 0, &eax, &ebx, &ecx, &edx);
+
+ if (!(ebx & CPUID_AVX512F)) {
+ DEBUG_PRINTF("AVX512F (AVX512 Foundation) instructions not enabled\n");
+ return 0;
+ }
+
+ if (!(ebx & CPUID_AVX512BW)) {
+ DEBUG_PRINTF("AVX512BW instructions not enabled\n");
+ return 0;
+ }
+
+ if (ecx & CPUID_AVX512VBMI) {
+ DEBUG_PRINTF("AVX512VBMI instructions enabled\n");
+ return 1;
+ }
+
+ return 0;
+#endif
+}
+
+static inline
int check_ssse3(void) {
unsigned int eax, ebx, ecx, edx;
cpuid(1, 0, &eax, &ebx, &ecx, &edx);
diff --git a/contrib/libs/hyperscan/src/util/dump_util.h b/contrib/libs/hyperscan/src/util/dump_util.h
index 48f0821810..dc352c28ee 100644
--- a/contrib/libs/hyperscan/src/util/dump_util.h
+++ b/contrib/libs/hyperscan/src/util/dump_util.h
@@ -1,63 +1,63 @@
-/*
- * Copyright (c) 2016-2017, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DUMP_UTIL
-#define DUMP_UTIL
-
-#include "noncopyable.h"
-
-#include <cstdio>
-#include <memory>
-#include <string>
-
-namespace ue2 {
-
-/**
- * Same as fopen(), but on error throws an exception rather than returning NULL.
- */
-FILE *fopen_or_throw(const char *path, const char *mode);
-
-/**
- * \brief Helper class: wraps C stdio FILE* handle and takes care of closing
- * the file on destruction.
- */
-class StdioFile : noncopyable {
-public:
- StdioFile(const std::string &filename, const char *mode)
- : handle(fopen_or_throw(filename.c_str(), mode), &fclose) {}
-
- // Implicit conversion to FILE* for use by stdio calls.
- operator FILE *() { return handle.get(); }
-
-private:
- std::unique_ptr<FILE, decltype(&fclose)> handle;
-};
-
-} // namespace ue2
-
-#endif
+/*
+ * Copyright (c) 2016-2017, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DUMP_UTIL
+#define DUMP_UTIL
+
+#include "noncopyable.h"
+
+#include <cstdio>
+#include <memory>
+#include <string>
+
+namespace ue2 {
+
+/**
+ * Same as fopen(), but on error throws an exception rather than returning NULL.
+ */
+FILE *fopen_or_throw(const char *path, const char *mode);
+
+/**
+ * \brief Helper class: wraps C stdio FILE* handle and takes care of closing
+ * the file on destruction.
+ */
+class StdioFile : noncopyable {
+public:
+ StdioFile(const std::string &filename, const char *mode)
+ : handle(fopen_or_throw(filename.c_str(), mode), &fclose) {}
+
+ // Implicit conversion to FILE* for use by stdio calls.
+ operator FILE *() { return handle.get(); }
+
+private:
+ std::unique_ptr<FILE, decltype(&fclose)> handle;
+};
+
+} // namespace ue2
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/graph.h b/contrib/libs/hyperscan/src/util/graph.h
index 9fd55304fd..3e18dae552 100644
--- a/contrib/libs/hyperscan/src/util/graph.h
+++ b/contrib/libs/hyperscan/src/util/graph.h
@@ -170,7 +170,7 @@ find_vertices_in_cycles(const Graph &g) {
assert(!comp.empty());
if (comp.size() > 1) {
insert(&rv, comp);
- continue;
+ continue;
}
vertex_descriptor v = *comp.begin();
if (hasSelfLoop(v, g)) {
diff --git a/contrib/libs/hyperscan/src/util/graph_small_color_map.h b/contrib/libs/hyperscan/src/util/graph_small_color_map.h
index 9918bc771f..249b71531c 100644
--- a/contrib/libs/hyperscan/src/util/graph_small_color_map.h
+++ b/contrib/libs/hyperscan/src/util/graph_small_color_map.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, Intel Corporation
+ * Copyright (c) 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -114,21 +114,21 @@ public:
std::memset(data->data(), val, data->size());
}
- size_t count(small_color color) const {
- assert(static_cast<u8>(color) < sizeof(fill_lut));
- size_t num = 0;
- for (size_t i = 0; i < n; i++) {
- size_t byte = i / entries_per_byte;
- assert(byte < data->size());
- size_t bit = (i % entries_per_byte) * bit_size;
- u8 val = ((*data)[byte] >> bit) & bit_mask;
- if (static_cast<small_color>(val) == color) {
- num++;
- }
- }
- return num;
- }
-
+ size_t count(small_color color) const {
+ assert(static_cast<u8>(color) < sizeof(fill_lut));
+ size_t num = 0;
+ for (size_t i = 0; i < n; i++) {
+ size_t byte = i / entries_per_byte;
+ assert(byte < data->size());
+ size_t bit = (i % entries_per_byte) * bit_size;
+ u8 val = ((*data)[byte] >> bit) & bit_mask;
+ if (static_cast<small_color>(val) == color) {
+ num++;
+ }
+ }
+ return num;
+ }
+
small_color get_impl(key_type key) const {
auto i = get(index_map, key);
assert(i < n);
diff --git a/contrib/libs/hyperscan/src/util/graph_undirected.h b/contrib/libs/hyperscan/src/util/graph_undirected.h
index 75b6084c4d..049964ab07 100644
--- a/contrib/libs/hyperscan/src/util/graph_undirected.h
+++ b/contrib/libs/hyperscan/src/util/graph_undirected.h
@@ -1,501 +1,501 @@
-/*
- * Copyright (c) 2018, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * \file
- * \brief Adaptor that presents an undirected view of a bidirectional BGL graph.
- *
- * Analogous to the reverse_graph adapter. You can construct one of these for
- * bidirectional graph g with:
- *
- * auto ug = make_undirected_graph(g);
- *
- * The vertex descriptor type is the same as that of the underlying graph, but
- * the edge descriptor is different.
- */
-
-#ifndef GRAPH_UNDIRECTED_H
-#define GRAPH_UNDIRECTED_H
-
-#include "util/operators.h"
-
-#include <boost/graph/adjacency_iterator.hpp>
-#include <boost/graph/graph_traits.hpp>
-#include <boost/graph/properties.hpp>
-#include <boost/iterator/iterator_facade.hpp>
-
-#include <type_traits>
-#include <utility>
-
-namespace ue2 {
-
-struct undirected_graph_tag {};
-
-template <class BidirectionalGraph, class GraphRef>
-class undirected_graph;
-
-namespace undirected_detail {
-
-template <typename BidirectionalGraph>
-class undirected_graph_edge_descriptor
- : totally_ordered<undirected_graph_edge_descriptor<BidirectionalGraph>> {
- using base_graph_type = BidirectionalGraph;
- using base_graph_traits = typename boost::graph_traits<base_graph_type>;
- using base_edge_type = typename base_graph_traits::edge_descriptor;
- using base_vertex_type = typename base_graph_traits::vertex_descriptor;
-
- base_edge_type underlying_edge;
- const base_graph_type *g;
- bool reverse; // if true, reverse vertices in source() and target()
-
- inline std::pair<base_vertex_type, base_vertex_type>
- canonical_edge() const {
- auto u = std::min(source(underlying_edge, *g),
- target(underlying_edge, *g));
- auto v = std::max(source(underlying_edge, *g),
- target(underlying_edge, *g));
- return std::make_pair(u, v);
- }
-
- template <class BidiGraph, class GraphRef>
- friend class ::ue2::undirected_graph;
-
-public:
- undirected_graph_edge_descriptor() = default;
-
- undirected_graph_edge_descriptor(base_edge_type edge,
- const base_graph_type &g_in,
- bool reverse_in)
- : underlying_edge(std::move(edge)), g(&g_in), reverse(reverse_in) {}
-
- bool operator==(const undirected_graph_edge_descriptor &other) const {
- return canonical_edge() == other.canonical_edge();
- }
-
- bool operator<(const undirected_graph_edge_descriptor &other) const {
- return canonical_edge() < other.canonical_edge();
- }
-
- base_vertex_type get_source() const {
- return reverse ? target(underlying_edge, *g)
- : source(underlying_edge, *g);
- }
-
- base_vertex_type get_target() const {
- return reverse ? source(underlying_edge, *g)
- : target(underlying_edge, *g);
- }
-};
-
-} // namespace undirected_detail
-
-template <class BidirectionalGraph, class GraphRef = const BidirectionalGraph &>
-class undirected_graph {
-private:
- using Self = undirected_graph<BidirectionalGraph, GraphRef>;
- using Traits = boost::graph_traits<BidirectionalGraph>;
-
-public:
- using base_type = BidirectionalGraph;
- using base_ref_type = GraphRef;
-
- explicit undirected_graph(GraphRef g_in) : g(g_in) {}
-
- // Graph requirements
- using vertex_descriptor = typename Traits::vertex_descriptor;
- using edge_descriptor =
- undirected_detail::undirected_graph_edge_descriptor<base_type>;
- using directed_category = boost::undirected_tag;
- using edge_parallel_category = boost::disallow_parallel_edge_tag;
- using traversal_category = typename Traits::traversal_category;
-
- // IncidenceGraph requirements
-
- /**
- * \brief Templated iterator used for out_edge_iterator and
- * in_edge_iterator, depending on the value of Reverse.
- */
- template <bool Reverse>
- class adj_edge_iterator
- : public boost::iterator_facade<
- adj_edge_iterator<Reverse>, edge_descriptor,
- boost::forward_traversal_tag, edge_descriptor> {
- vertex_descriptor u;
- const base_type *g;
- typename Traits::in_edge_iterator in_it;
- typename Traits::out_edge_iterator out_it;
- bool done_in = false;
- public:
- adj_edge_iterator() = default;
-
- adj_edge_iterator(vertex_descriptor u_in, const base_type &g_in,
- bool end_iter)
- : u(std::move(u_in)), g(&g_in) {
- auto pi = in_edges(u, *g);
- auto po = out_edges(u, *g);
- if (end_iter) {
- in_it = pi.second;
- out_it = po.second;
- done_in = true;
- } else {
- in_it = pi.first;
- out_it = po.first;
- if (in_it == pi.second) {
- done_in = true;
- find_first_valid_out();
- }
- }
- }
-
- private:
- friend class boost::iterator_core_access;
-
- void find_first_valid_out() {
- auto out_end = out_edges(u, *g).second;
- for (; out_it != out_end; ++out_it) {
- auto v = target(*out_it, *g);
- if (!edge(v, u, *g).second) {
- break;
- }
- }
- }
-
- void increment() {
- if (!done_in) {
- auto in_end = in_edges(u, *g).second;
- assert(in_it != in_end);
- ++in_it;
- if (in_it == in_end) {
- done_in = true;
- find_first_valid_out();
- }
- } else {
- ++out_it;
- find_first_valid_out();
- }
- }
- bool equal(const adj_edge_iterator &other) const {
- return in_it == other.in_it && out_it == other.out_it;
- }
- edge_descriptor dereference() const {
- if (done_in) {
- return edge_descriptor(*out_it, *g, Reverse);
- } else {
- return edge_descriptor(*in_it, *g, !Reverse);
- }
- }
- };
-
- using out_edge_iterator = adj_edge_iterator<false>;
- using in_edge_iterator = adj_edge_iterator<true>;
-
- using degree_size_type = typename Traits::degree_size_type;
-
- // AdjacencyGraph requirements
- using adjacency_iterator =
- typename boost::adjacency_iterator_generator<Self, vertex_descriptor,
- out_edge_iterator>::type;
- using inv_adjacency_iterator =
- typename boost::inv_adjacency_iterator_generator<
- Self, vertex_descriptor, in_edge_iterator>::type;
-
- // VertexListGraph requirements
- using vertex_iterator = typename Traits::vertex_iterator;
-
- // EdgeListGraph requirements
- enum {
- is_edge_list = std::is_convertible<traversal_category,
- boost::edge_list_graph_tag>::value
- };
-
- /** \brief Iterator used for edges(). */
- class edge_iterator
- : public boost::iterator_facade<edge_iterator, edge_descriptor,
- boost::forward_traversal_tag,
- edge_descriptor> {
- const base_type *g;
- typename Traits::edge_iterator it;
- public:
- edge_iterator() = default;
-
- edge_iterator(typename Traits::edge_iterator it_in,
- const base_type &g_in)
- : g(&g_in), it(std::move(it_in)) {
- find_first_valid_edge();
- }
-
- private:
- friend class boost::iterator_core_access;
-
- void find_first_valid_edge() {
- const auto end = edges(*g).second;
- for (; it != end; ++it) {
- const auto &u = source(*it, *g);
- const auto &v = target(*it, *g);
- if (!edge(v, u, *g).second) {
- break; // No reverse edge, we must visit this one
- }
- if (u <= v) {
- // We have a reverse edge, but we'll return this one (and
- // skip the other). Note that (u, u) shouldn't be skipped.
- break;
- }
- }
- }
-
- void increment() {
- assert(it != edges(*g).second);
- ++it;
- find_first_valid_edge();
- }
- bool equal(const edge_iterator &other) const {
- return it == other.it;
- }
- edge_descriptor dereference() const {
- return edge_descriptor(*it, *g, false);
- }
- };
-
- using vertices_size_type = typename Traits::vertices_size_type;
- using edges_size_type = typename Traits::edges_size_type;
-
- using graph_tag = undirected_graph_tag;
-
- using vertex_bundle_type =
- typename boost::vertex_bundle_type<base_type>::type;
- using edge_bundle_type = typename boost::edge_bundle_type<base_type>::type;
-
- vertex_bundle_type &operator[](const vertex_descriptor &d) {
- return const_cast<base_type &>(g)[d];
- }
- const vertex_bundle_type &operator[](const vertex_descriptor &d) const {
- return g[d];
- }
-
- edge_bundle_type &operator[](const edge_descriptor &d) {
- return const_cast<base_type &>(g)[d.underlying_edge];
- }
- const edge_bundle_type &operator[](const edge_descriptor &d) const {
- return g[d.underlying_edge];
- }
-
- static vertex_descriptor null_vertex() { return Traits::null_vertex(); }
-
- // Accessor free functions follow
-
- friend std::pair<vertex_iterator, vertex_iterator>
- vertices(const undirected_graph &ug) {
- return vertices(ug.g);
- }
-
- friend std::pair<edge_iterator, edge_iterator>
- edges(const undirected_graph &ug) {
- auto e = edges(ug.g);
- return std::make_pair(edge_iterator(e.first, ug.g),
- edge_iterator(e.second, ug.g));
- }
-
- friend std::pair<out_edge_iterator, out_edge_iterator>
- out_edges(const vertex_descriptor &u, const undirected_graph &ug) {
- return std::make_pair(out_edge_iterator(u, ug.g, false),
- out_edge_iterator(u, ug.g, true));
- }
-
- friend vertices_size_type num_vertices(const undirected_graph &ug) {
- return num_vertices(ug.g);
- }
-
- friend edges_size_type num_edges(const undirected_graph &ug) {
- auto p = edges(ug);
- return std::distance(p.first, p.second);
- }
-
- friend degree_size_type out_degree(const vertex_descriptor &u,
- const undirected_graph &ug) {
- return degree(u, ug);
- }
-
- friend vertex_descriptor vertex(vertices_size_type n,
- const undirected_graph &ug) {
- return vertex(n, ug.g);
- }
-
- friend std::pair<edge_descriptor, bool> edge(const vertex_descriptor &u,
- const vertex_descriptor &v,
- const undirected_graph &ug) {
- auto e = edge(u, v, ug.g);
- if (e.second) {
- return std::make_pair(edge_descriptor(e.first, ug.g, false), true);
- }
- auto e_rev = edge(v, u, ug.g);
- if (e_rev.second) {
- return std::make_pair(edge_descriptor(e_rev.first, ug.g, true),
- true);
- }
- return std::make_pair(edge_descriptor(), false);
- }
-
- friend std::pair<in_edge_iterator, in_edge_iterator>
- in_edges(const vertex_descriptor &v, const undirected_graph &ug) {
- return std::make_pair(in_edge_iterator(v, ug.g, false),
- in_edge_iterator(v, ug.g, true));
- }
-
- friend std::pair<adjacency_iterator, adjacency_iterator>
- adjacent_vertices(const vertex_descriptor &u, const undirected_graph &ug) {
- out_edge_iterator oi, oe;
- std::tie(oi, oe) = out_edges(u, ug);
- return std::make_pair(adjacency_iterator(oi, &ug),
- adjacency_iterator(oe, &ug));
- }
-
- friend std::pair<inv_adjacency_iterator, inv_adjacency_iterator>
- inv_adjacent_vertices(const vertex_descriptor &v,
- const undirected_graph &ug) {
- in_edge_iterator ei, ee;
- std::tie(ei, ee) = in_edges(v, ug);
- return std::make_pair(inv_adjacency_iterator(ei, &ug),
- inv_adjacency_iterator(ee, &ug));
- }
-
- friend degree_size_type in_degree(const vertex_descriptor &v,
- const undirected_graph &ug) {
- return degree(v, ug);
- }
-
- friend vertex_descriptor source(const edge_descriptor &e,
- const undirected_graph &) {
- return e.get_source();
- }
-
- friend vertex_descriptor target(const edge_descriptor &e,
- const undirected_graph &) {
- return e.get_target();
- }
-
- friend degree_size_type degree(const vertex_descriptor &u,
- const undirected_graph &ug) {
- auto p = out_edges(u, ug);
- return std::distance(p.first, p.second);
- }
-
- // Property accessors.
-
- template <typename Property>
- using prop_map = typename boost::property_map<undirected_graph, Property>;
-
- template <typename Property>
- friend typename prop_map<Property>::type
- get(Property p, undirected_graph &ug) {
- return get(p, ug.g);
- }
-
- template <typename Property>
- friend typename prop_map<Property>::const_type
- get(Property p, const undirected_graph &ug) {
- return get(p, ug.g);
- }
-
- template <typename Property, typename Key>
- friend typename boost::property_traits<
- typename prop_map<Property>::const_type>::value_type
- get(Property p, const undirected_graph &ug, const Key &k) {
- return get(p, ug.g, get_underlying_descriptor(k));
- }
-
- template <typename Property, typename Value, typename Key>
- friend void put(Property p, const undirected_graph &ug,
- const Key &k, const Value &val) {
- put(p, const_cast<BidirectionalGraph &>(ug.g),
- get_underlying_descriptor(k), val);
- }
-
-private:
- // Accessors are here because our free friend functions (above) cannot see
- // edge_descriptor's private members.
- static typename base_type::vertex_descriptor
- get_underlying_descriptor(const vertex_descriptor &v) {
- return v;
- }
- static typename base_type::edge_descriptor
- get_underlying_descriptor(const edge_descriptor &e) {
- return e.underlying_edge;
- }
-
- // Reference to underlying bidirectional graph
- GraphRef g;
-};
-
-template <class BidirectionalGraph>
-undirected_graph<BidirectionalGraph>
-make_undirected_graph(const BidirectionalGraph &g) {
- return undirected_graph<BidirectionalGraph>(g);
-}
-
-} // namespace ue2
-
-namespace boost {
-
-/* Derive all the property map specializations from the underlying
- * bidirectional graph. */
-
-template <typename BidirectionalGraph, typename GraphRef, typename Property>
-struct property_map<ue2::undirected_graph<BidirectionalGraph, GraphRef>,
- Property> {
- using base_map_type = property_map<BidirectionalGraph, Property>;
- using type = typename base_map_type::type;
- using const_type = typename base_map_type::const_type;
-};
-
-template <class BidirectionalGraph, class GraphRef>
-struct vertex_property_type<ue2::undirected_graph<BidirectionalGraph, GraphRef>>
- : vertex_property_type<BidirectionalGraph> {};
-
-template <class BidirectionalGraph, class GraphRef>
-struct edge_property_type<ue2::undirected_graph<BidirectionalGraph, GraphRef>>
- : edge_property_type<BidirectionalGraph> {};
-
-template <class BidirectionalGraph, class GraphRef>
-struct graph_property_type<ue2::undirected_graph<BidirectionalGraph, GraphRef>>
- : graph_property_type<BidirectionalGraph> {};
-
-template <typename BidirectionalGraph, typename GraphRef>
-struct vertex_bundle_type<ue2::undirected_graph<BidirectionalGraph, GraphRef>>
- : vertex_bundle_type<BidirectionalGraph> {};
-
-template <typename BidirectionalGraph, typename GraphRef>
-struct edge_bundle_type<ue2::undirected_graph<BidirectionalGraph, GraphRef>>
- : edge_bundle_type<BidirectionalGraph> {};
-
-template <typename BidirectionalGraph, typename GraphRef>
-struct graph_bundle_type<ue2::undirected_graph<BidirectionalGraph, GraphRef>>
- : graph_bundle_type<BidirectionalGraph> {};
-
-} // namespace boost
-
-#endif // GRAPH_UNDIRECTED_H
+/*
+ * Copyright (c) 2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ * \brief Adaptor that presents an undirected view of a bidirectional BGL graph.
+ *
+ * Analogous to the reverse_graph adapter. You can construct one of these for
+ * bidirectional graph g with:
+ *
+ * auto ug = make_undirected_graph(g);
+ *
+ * The vertex descriptor type is the same as that of the underlying graph, but
+ * the edge descriptor is different.
+ */
+
+#ifndef GRAPH_UNDIRECTED_H
+#define GRAPH_UNDIRECTED_H
+
+#include "util/operators.h"
+
+#include <boost/graph/adjacency_iterator.hpp>
+#include <boost/graph/graph_traits.hpp>
+#include <boost/graph/properties.hpp>
+#include <boost/iterator/iterator_facade.hpp>
+
+#include <type_traits>
+#include <utility>
+
+namespace ue2 {
+
+struct undirected_graph_tag {};
+
+template <class BidirectionalGraph, class GraphRef>
+class undirected_graph;
+
+namespace undirected_detail {
+
+template <typename BidirectionalGraph>
+class undirected_graph_edge_descriptor
+ : totally_ordered<undirected_graph_edge_descriptor<BidirectionalGraph>> {
+ using base_graph_type = BidirectionalGraph;
+ using base_graph_traits = typename boost::graph_traits<base_graph_type>;
+ using base_edge_type = typename base_graph_traits::edge_descriptor;
+ using base_vertex_type = typename base_graph_traits::vertex_descriptor;
+
+ base_edge_type underlying_edge;
+ const base_graph_type *g;
+ bool reverse; // if true, reverse vertices in source() and target()
+
+ inline std::pair<base_vertex_type, base_vertex_type>
+ canonical_edge() const {
+ auto u = std::min(source(underlying_edge, *g),
+ target(underlying_edge, *g));
+ auto v = std::max(source(underlying_edge, *g),
+ target(underlying_edge, *g));
+ return std::make_pair(u, v);
+ }
+
+ template <class BidiGraph, class GraphRef>
+ friend class ::ue2::undirected_graph;
+
+public:
+ undirected_graph_edge_descriptor() = default;
+
+ undirected_graph_edge_descriptor(base_edge_type edge,
+ const base_graph_type &g_in,
+ bool reverse_in)
+ : underlying_edge(std::move(edge)), g(&g_in), reverse(reverse_in) {}
+
+ bool operator==(const undirected_graph_edge_descriptor &other) const {
+ return canonical_edge() == other.canonical_edge();
+ }
+
+ bool operator<(const undirected_graph_edge_descriptor &other) const {
+ return canonical_edge() < other.canonical_edge();
+ }
+
+ base_vertex_type get_source() const {
+ return reverse ? target(underlying_edge, *g)
+ : source(underlying_edge, *g);
+ }
+
+ base_vertex_type get_target() const {
+ return reverse ? source(underlying_edge, *g)
+ : target(underlying_edge, *g);
+ }
+};
+
+} // namespace undirected_detail
+
+template <class BidirectionalGraph, class GraphRef = const BidirectionalGraph &>
+class undirected_graph {
+private:
+ using Self = undirected_graph<BidirectionalGraph, GraphRef>;
+ using Traits = boost::graph_traits<BidirectionalGraph>;
+
+public:
+ using base_type = BidirectionalGraph;
+ using base_ref_type = GraphRef;
+
+ explicit undirected_graph(GraphRef g_in) : g(g_in) {}
+
+ // Graph requirements
+ using vertex_descriptor = typename Traits::vertex_descriptor;
+ using edge_descriptor =
+ undirected_detail::undirected_graph_edge_descriptor<base_type>;
+ using directed_category = boost::undirected_tag;
+ using edge_parallel_category = boost::disallow_parallel_edge_tag;
+ using traversal_category = typename Traits::traversal_category;
+
+ // IncidenceGraph requirements
+
+ /**
+ * \brief Templated iterator used for out_edge_iterator and
+ * in_edge_iterator, depending on the value of Reverse.
+ */
+ template <bool Reverse>
+ class adj_edge_iterator
+ : public boost::iterator_facade<
+ adj_edge_iterator<Reverse>, edge_descriptor,
+ boost::forward_traversal_tag, edge_descriptor> {
+ vertex_descriptor u;
+ const base_type *g;
+ typename Traits::in_edge_iterator in_it;
+ typename Traits::out_edge_iterator out_it;
+ bool done_in = false;
+ public:
+ adj_edge_iterator() = default;
+
+ adj_edge_iterator(vertex_descriptor u_in, const base_type &g_in,
+ bool end_iter)
+ : u(std::move(u_in)), g(&g_in) {
+ auto pi = in_edges(u, *g);
+ auto po = out_edges(u, *g);
+ if (end_iter) {
+ in_it = pi.second;
+ out_it = po.second;
+ done_in = true;
+ } else {
+ in_it = pi.first;
+ out_it = po.first;
+ if (in_it == pi.second) {
+ done_in = true;
+ find_first_valid_out();
+ }
+ }
+ }
+
+ private:
+ friend class boost::iterator_core_access;
+
+ void find_first_valid_out() {
+ auto out_end = out_edges(u, *g).second;
+ for (; out_it != out_end; ++out_it) {
+ auto v = target(*out_it, *g);
+ if (!edge(v, u, *g).second) {
+ break;
+ }
+ }
+ }
+
+ void increment() {
+ if (!done_in) {
+ auto in_end = in_edges(u, *g).second;
+ assert(in_it != in_end);
+ ++in_it;
+ if (in_it == in_end) {
+ done_in = true;
+ find_first_valid_out();
+ }
+ } else {
+ ++out_it;
+ find_first_valid_out();
+ }
+ }
+ bool equal(const adj_edge_iterator &other) const {
+ return in_it == other.in_it && out_it == other.out_it;
+ }
+ edge_descriptor dereference() const {
+ if (done_in) {
+ return edge_descriptor(*out_it, *g, Reverse);
+ } else {
+ return edge_descriptor(*in_it, *g, !Reverse);
+ }
+ }
+ };
+
+ using out_edge_iterator = adj_edge_iterator<false>;
+ using in_edge_iterator = adj_edge_iterator<true>;
+
+ using degree_size_type = typename Traits::degree_size_type;
+
+ // AdjacencyGraph requirements
+ using adjacency_iterator =
+ typename boost::adjacency_iterator_generator<Self, vertex_descriptor,
+ out_edge_iterator>::type;
+ using inv_adjacency_iterator =
+ typename boost::inv_adjacency_iterator_generator<
+ Self, vertex_descriptor, in_edge_iterator>::type;
+
+ // VertexListGraph requirements
+ using vertex_iterator = typename Traits::vertex_iterator;
+
+ // EdgeListGraph requirements
+ enum {
+ is_edge_list = std::is_convertible<traversal_category,
+ boost::edge_list_graph_tag>::value
+ };
+
+ /** \brief Iterator used for edges(). */
+ class edge_iterator
+ : public boost::iterator_facade<edge_iterator, edge_descriptor,
+ boost::forward_traversal_tag,
+ edge_descriptor> {
+ const base_type *g;
+ typename Traits::edge_iterator it;
+ public:
+ edge_iterator() = default;
+
+ edge_iterator(typename Traits::edge_iterator it_in,
+ const base_type &g_in)
+ : g(&g_in), it(std::move(it_in)) {
+ find_first_valid_edge();
+ }
+
+ private:
+ friend class boost::iterator_core_access;
+
+ void find_first_valid_edge() {
+ const auto end = edges(*g).second;
+ for (; it != end; ++it) {
+ const auto &u = source(*it, *g);
+ const auto &v = target(*it, *g);
+ if (!edge(v, u, *g).second) {
+ break; // No reverse edge, we must visit this one
+ }
+ if (u <= v) {
+ // We have a reverse edge, but we'll return this one (and
+ // skip the other). Note that (u, u) shouldn't be skipped.
+ break;
+ }
+ }
+ }
+
+ void increment() {
+ assert(it != edges(*g).second);
+ ++it;
+ find_first_valid_edge();
+ }
+ bool equal(const edge_iterator &other) const {
+ return it == other.it;
+ }
+ edge_descriptor dereference() const {
+ return edge_descriptor(*it, *g, false);
+ }
+ };
+
+ using vertices_size_type = typename Traits::vertices_size_type;
+ using edges_size_type = typename Traits::edges_size_type;
+
+ using graph_tag = undirected_graph_tag;
+
+ using vertex_bundle_type =
+ typename boost::vertex_bundle_type<base_type>::type;
+ using edge_bundle_type = typename boost::edge_bundle_type<base_type>::type;
+
+ vertex_bundle_type &operator[](const vertex_descriptor &d) {
+ return const_cast<base_type &>(g)[d];
+ }
+ const vertex_bundle_type &operator[](const vertex_descriptor &d) const {
+ return g[d];
+ }
+
+ edge_bundle_type &operator[](const edge_descriptor &d) {
+ return const_cast<base_type &>(g)[d.underlying_edge];
+ }
+ const edge_bundle_type &operator[](const edge_descriptor &d) const {
+ return g[d.underlying_edge];
+ }
+
+ static vertex_descriptor null_vertex() { return Traits::null_vertex(); }
+
+ // Accessor free functions follow
+
+ friend std::pair<vertex_iterator, vertex_iterator>
+ vertices(const undirected_graph &ug) {
+ return vertices(ug.g);
+ }
+
+ friend std::pair<edge_iterator, edge_iterator>
+ edges(const undirected_graph &ug) {
+ auto e = edges(ug.g);
+ return std::make_pair(edge_iterator(e.first, ug.g),
+ edge_iterator(e.second, ug.g));
+ }
+
+ friend std::pair<out_edge_iterator, out_edge_iterator>
+ out_edges(const vertex_descriptor &u, const undirected_graph &ug) {
+ return std::make_pair(out_edge_iterator(u, ug.g, false),
+ out_edge_iterator(u, ug.g, true));
+ }
+
+ friend vertices_size_type num_vertices(const undirected_graph &ug) {
+ return num_vertices(ug.g);
+ }
+
+ friend edges_size_type num_edges(const undirected_graph &ug) {
+ auto p = edges(ug);
+ return std::distance(p.first, p.second);
+ }
+
+ friend degree_size_type out_degree(const vertex_descriptor &u,
+ const undirected_graph &ug) {
+ return degree(u, ug);
+ }
+
+ friend vertex_descriptor vertex(vertices_size_type n,
+ const undirected_graph &ug) {
+ return vertex(n, ug.g);
+ }
+
+ friend std::pair<edge_descriptor, bool> edge(const vertex_descriptor &u,
+ const vertex_descriptor &v,
+ const undirected_graph &ug) {
+ auto e = edge(u, v, ug.g);
+ if (e.second) {
+ return std::make_pair(edge_descriptor(e.first, ug.g, false), true);
+ }
+ auto e_rev = edge(v, u, ug.g);
+ if (e_rev.second) {
+ return std::make_pair(edge_descriptor(e_rev.first, ug.g, true),
+ true);
+ }
+ return std::make_pair(edge_descriptor(), false);
+ }
+
+ friend std::pair<in_edge_iterator, in_edge_iterator>
+ in_edges(const vertex_descriptor &v, const undirected_graph &ug) {
+ return std::make_pair(in_edge_iterator(v, ug.g, false),
+ in_edge_iterator(v, ug.g, true));
+ }
+
+ friend std::pair<adjacency_iterator, adjacency_iterator>
+ adjacent_vertices(const vertex_descriptor &u, const undirected_graph &ug) {
+ out_edge_iterator oi, oe;
+ std::tie(oi, oe) = out_edges(u, ug);
+ return std::make_pair(adjacency_iterator(oi, &ug),
+ adjacency_iterator(oe, &ug));
+ }
+
+ friend std::pair<inv_adjacency_iterator, inv_adjacency_iterator>
+ inv_adjacent_vertices(const vertex_descriptor &v,
+ const undirected_graph &ug) {
+ in_edge_iterator ei, ee;
+ std::tie(ei, ee) = in_edges(v, ug);
+ return std::make_pair(inv_adjacency_iterator(ei, &ug),
+ inv_adjacency_iterator(ee, &ug));
+ }
+
+ friend degree_size_type in_degree(const vertex_descriptor &v,
+ const undirected_graph &ug) {
+ return degree(v, ug);
+ }
+
+ friend vertex_descriptor source(const edge_descriptor &e,
+ const undirected_graph &) {
+ return e.get_source();
+ }
+
+ friend vertex_descriptor target(const edge_descriptor &e,
+ const undirected_graph &) {
+ return e.get_target();
+ }
+
+ friend degree_size_type degree(const vertex_descriptor &u,
+ const undirected_graph &ug) {
+ auto p = out_edges(u, ug);
+ return std::distance(p.first, p.second);
+ }
+
+ // Property accessors.
+
+ template <typename Property>
+ using prop_map = typename boost::property_map<undirected_graph, Property>;
+
+ template <typename Property>
+ friend typename prop_map<Property>::type
+ get(Property p, undirected_graph &ug) {
+ return get(p, ug.g);
+ }
+
+ template <typename Property>
+ friend typename prop_map<Property>::const_type
+ get(Property p, const undirected_graph &ug) {
+ return get(p, ug.g);
+ }
+
+ template <typename Property, typename Key>
+ friend typename boost::property_traits<
+ typename prop_map<Property>::const_type>::value_type
+ get(Property p, const undirected_graph &ug, const Key &k) {
+ return get(p, ug.g, get_underlying_descriptor(k));
+ }
+
+ template <typename Property, typename Value, typename Key>
+ friend void put(Property p, const undirected_graph &ug,
+ const Key &k, const Value &val) {
+ put(p, const_cast<BidirectionalGraph &>(ug.g),
+ get_underlying_descriptor(k), val);
+ }
+
+private:
+ // Accessors are here because our free friend functions (above) cannot see
+ // edge_descriptor's private members.
+ static typename base_type::vertex_descriptor
+ get_underlying_descriptor(const vertex_descriptor &v) {
+ return v;
+ }
+ static typename base_type::edge_descriptor
+ get_underlying_descriptor(const edge_descriptor &e) {
+ return e.underlying_edge;
+ }
+
+ // Reference to underlying bidirectional graph
+ GraphRef g;
+};
+
+template <class BidirectionalGraph>
+undirected_graph<BidirectionalGraph>
+make_undirected_graph(const BidirectionalGraph &g) {
+ return undirected_graph<BidirectionalGraph>(g);
+}
+
+} // namespace ue2
+
+namespace boost {
+
+/* Derive all the property map specializations from the underlying
+ * bidirectional graph. */
+
+template <typename BidirectionalGraph, typename GraphRef, typename Property>
+struct property_map<ue2::undirected_graph<BidirectionalGraph, GraphRef>,
+ Property> {
+ using base_map_type = property_map<BidirectionalGraph, Property>;
+ using type = typename base_map_type::type;
+ using const_type = typename base_map_type::const_type;
+};
+
+template <class BidirectionalGraph, class GraphRef>
+struct vertex_property_type<ue2::undirected_graph<BidirectionalGraph, GraphRef>>
+ : vertex_property_type<BidirectionalGraph> {};
+
+template <class BidirectionalGraph, class GraphRef>
+struct edge_property_type<ue2::undirected_graph<BidirectionalGraph, GraphRef>>
+ : edge_property_type<BidirectionalGraph> {};
+
+template <class BidirectionalGraph, class GraphRef>
+struct graph_property_type<ue2::undirected_graph<BidirectionalGraph, GraphRef>>
+ : graph_property_type<BidirectionalGraph> {};
+
+template <typename BidirectionalGraph, typename GraphRef>
+struct vertex_bundle_type<ue2::undirected_graph<BidirectionalGraph, GraphRef>>
+ : vertex_bundle_type<BidirectionalGraph> {};
+
+template <typename BidirectionalGraph, typename GraphRef>
+struct edge_bundle_type<ue2::undirected_graph<BidirectionalGraph, GraphRef>>
+ : edge_bundle_type<BidirectionalGraph> {};
+
+template <typename BidirectionalGraph, typename GraphRef>
+struct graph_bundle_type<ue2::undirected_graph<BidirectionalGraph, GraphRef>>
+ : graph_bundle_type<BidirectionalGraph> {};
+
+} // namespace boost
+
+#endif // GRAPH_UNDIRECTED_H
diff --git a/contrib/libs/hyperscan/src/util/logical.h b/contrib/libs/hyperscan/src/util/logical.h
index 134c786ccd..0c8b6469aa 100644
--- a/contrib/libs/hyperscan/src/util/logical.h
+++ b/contrib/libs/hyperscan/src/util/logical.h
@@ -1,77 +1,77 @@
-/*
- * Copyright (c) 2018, Intel Corporation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Intel Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/** \file
- * \brief Inline functions for manipulating logical combinations.
- */
-
-#ifndef LOGICAL_H
-#define LOGICAL_H
-
-#include "ue2common.h"
-
-/** Index meaning a given logical key is invalid. */
-#define INVALID_LKEY (~(u32)0)
-#define INVALID_CKEY INVALID_LKEY
-
-/** Logical operation type, the priority is from high to low. */
-enum LogicalOpType {
- LOGICAL_OP_NOT,
- LOGICAL_OP_AND,
- LOGICAL_OP_OR,
- LAST_LOGICAL_OP = LOGICAL_OP_OR //!< Sentinel.
-};
-
-#define UNKNOWN_OP (~(u32)0)
-
-/** Logical Operation is consist of 4 parts. */
-struct LogicalOp {
- u32 id; //!< logical operator/operation id
- u32 op; //!< LogicalOpType
- u32 lo; //!< left operand
- u32 ro; //!< right operand
-};
-
-/** Each logical combination has its info:
- * It occupies a region in LogicalOp vector.
- * It has an exhaustion key for single-match mode. */
-struct CombInfo {
- u32 id;
- u32 ekey; //!< exhaustion key
- u32 start; //!< ckey of logical operation to start calculating
- u32 result; //!< ckey of logical operation to give final result
- u64a min_offset;
- u64a max_offset;
-};
-
-/** Temporarily use to seperate operations' id from reports' lkey
- * when building logicalTree in shunting yard algorithm,
- * operations' id will be finally renumbered following reports' lkey. */
-#define LOGICAL_OP_BIT 0x80000000UL
-
-#endif
+/*
+ * Copyright (c) 2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** \file
+ * \brief Inline functions for manipulating logical combinations.
+ */
+
+#ifndef LOGICAL_H
+#define LOGICAL_H
+
+#include "ue2common.h"
+
+/** Index meaning a given logical key is invalid. */
+#define INVALID_LKEY (~(u32)0)
+#define INVALID_CKEY INVALID_LKEY
+
+/** Logical operation type, the priority is from high to low. */
+enum LogicalOpType {
+ LOGICAL_OP_NOT,
+ LOGICAL_OP_AND,
+ LOGICAL_OP_OR,
+ LAST_LOGICAL_OP = LOGICAL_OP_OR //!< Sentinel.
+};
+
+#define UNKNOWN_OP (~(u32)0)
+
+/** Logical Operation is consist of 4 parts. */
+struct LogicalOp {
+ u32 id; //!< logical operator/operation id
+ u32 op; //!< LogicalOpType
+ u32 lo; //!< left operand
+ u32 ro; //!< right operand
+};
+
+/** Each logical combination has its info:
+ * It occupies a region in LogicalOp vector.
+ * It has an exhaustion key for single-match mode. */
+struct CombInfo {
+ u32 id;
+ u32 ekey; //!< exhaustion key
+ u32 start; //!< ckey of logical operation to start calculating
+ u32 result; //!< ckey of logical operation to give final result
+ u64a min_offset;
+ u64a max_offset;
+};
+
+/** Temporarily use to seperate operations' id from reports' lkey
+ * when building logicalTree in shunting yard algorithm,
+ * operations' id will be finally renumbered following reports' lkey. */
+#define LOGICAL_OP_BIT 0x80000000UL
+
+#endif
diff --git a/contrib/libs/hyperscan/src/util/multibit.h b/contrib/libs/hyperscan/src/util/multibit.h
index caa7bd7b20..c3a4ba461a 100644
--- a/contrib/libs/hyperscan/src/util/multibit.h
+++ b/contrib/libs/hyperscan/src/util/multibit.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -1197,11 +1197,11 @@ u32 mmbit_sparse_iter_begin(const u8 *bits, u32 total_bits, u32 *idx,
assert(ISALIGNED_N(it_root, alignof(struct mmbit_sparse_iter)));
// Our state _may_ be on the stack
-#ifndef _WIN32
+#ifndef _WIN32
assert(ISALIGNED_N(s, alignof(struct mmbit_sparse_state)));
-#else
- assert(ISALIGNED_N(s, 4));
-#endif
+#else
+ assert(ISALIGNED_N(s, 4));
+#endif
MDEBUG_PRINTF("%p total_bits %u\n", bits, total_bits);
// iterator should have _something_ at the root level
@@ -1309,11 +1309,11 @@ u32 mmbit_sparse_iter_next(const u8 *bits, u32 total_bits, u32 last_key,
assert(ISALIGNED_N(it_root, alignof(struct mmbit_sparse_iter)));
// Our state _may_ be on the stack
-#ifndef _WIN32
+#ifndef _WIN32
assert(ISALIGNED_N(s, alignof(struct mmbit_sparse_state)));
-#else
- assert(ISALIGNED_N(s, 4));
-#endif
+#else
+ assert(ISALIGNED_N(s, 4));
+#endif
MDEBUG_PRINTF("%p total_bits %u\n", bits, total_bits);
MDEBUG_PRINTF("NEXT (total_bits=%u, last_key=%u)\n", total_bits, last_key);
@@ -1466,11 +1466,11 @@ void mmbit_sparse_iter_unset(u8 *bits, u32 total_bits,
assert(ISALIGNED_N(it, alignof(struct mmbit_sparse_iter)));
// Our state _may_ be on the stack
-#ifndef _WIN32
+#ifndef _WIN32
assert(ISALIGNED_N(s, alignof(struct mmbit_sparse_state)));
-#else
- assert(ISALIGNED_N(s, 4));
-#endif
+#else
+ assert(ISALIGNED_N(s, 4));
+#endif
MDEBUG_PRINTF("%p total_bits %u\n", bits, total_bits);
diff --git a/contrib/libs/hyperscan/src/util/multibit_build.cpp b/contrib/libs/hyperscan/src/util/multibit_build.cpp
index 9cf5799424..67bb9ec702 100644
--- a/contrib/libs/hyperscan/src/util/multibit_build.cpp
+++ b/contrib/libs/hyperscan/src/util/multibit_build.cpp
@@ -192,8 +192,8 @@ vector<mmbit_sparse_iter> mmbBuildSparseIterator(const vector<u32> &bits,
template<typename T>
static
void add_scatter(vector<T> *out, u32 offset, u64a mask) {
- out->emplace_back();
- T &su = out->back();
+ out->emplace_back();
+ T &su = out->back();
memset(&su, 0, sizeof(su));
su.offset = offset;
su.val = mask;
diff --git a/contrib/libs/hyperscan/src/util/multibit_build.h b/contrib/libs/hyperscan/src/util/multibit_build.h
index 52bac41f6a..24f1bb55b0 100644
--- a/contrib/libs/hyperscan/src/util/multibit_build.h
+++ b/contrib/libs/hyperscan/src/util/multibit_build.h
@@ -33,7 +33,7 @@
#ifndef MULTIBIT_BUILD_H
#define MULTIBIT_BUILD_H
-#include "hs_common.h"
+#include "hs_common.h"
#include "multibit_internal.h"
#include "hash.h"
diff --git a/contrib/libs/hyperscan/src/util/report.h b/contrib/libs/hyperscan/src/util/report.h
index 35922bcedb..ee830d0f10 100644
--- a/contrib/libs/hyperscan/src/util/report.h
+++ b/contrib/libs/hyperscan/src/util/report.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -36,7 +36,7 @@
#include "ue2common.h"
#include "util/exhaust.h" // for INVALID_EKEY
-#include "util/logical.h" // for INVALID_LKEY
+#include "util/logical.h" // for INVALID_LKEY
#include "util/hash.h"
#include "util/order_check.h"
@@ -108,16 +108,16 @@ struct Report {
* exhaustible, this will be INVALID_EKEY. */
u32 ekey = INVALID_EKEY;
- /** \brief Logical Combination key in each combination.
- *
- * If in Logical Combination, the lkey to check before reporting a match.
- * Additionally before checking the lkey will be set. If not
- * in Logical Combination, this will be INVALID_LKEY. */
- u32 lkey = INVALID_LKEY;
-
- /** \brief Quiet flag for expressions in any logical combination. */
- bool quiet = false;
-
+ /** \brief Logical Combination key in each combination.
+ *
+ * If in Logical Combination, the lkey to check before reporting a match.
+ * Additionally before checking the lkey will be set. If not
+ * in Logical Combination, this will be INVALID_LKEY. */
+ u32 lkey = INVALID_LKEY;
+
+ /** \brief Quiet flag for expressions in any logical combination. */
+ bool quiet = false;
+
/** \brief Adjustment to add to the match offset when we report a match.
*
* This is usually used for reports attached to states that form part of a
@@ -218,17 +218,17 @@ bool operator==(const Report &a, const Report &b) {
}
static inline
-Report makeECallback(u32 report, s32 offsetAdjust, u32 ekey, bool quiet) {
+Report makeECallback(u32 report, s32 offsetAdjust, u32 ekey, bool quiet) {
Report ir(EXTERNAL_CALLBACK, report);
ir.offsetAdjust = offsetAdjust;
ir.ekey = ekey;
- ir.quiet = (u8)quiet;
+ ir.quiet = (u8)quiet;
return ir;
}
static inline
Report makeCallback(u32 report, s32 offsetAdjust) {
- return makeECallback(report, offsetAdjust, INVALID_EKEY, false);
+ return makeECallback(report, offsetAdjust, INVALID_EKEY, false);
}
static inline
diff --git a/contrib/libs/hyperscan/src/util/report_manager.cpp b/contrib/libs/hyperscan/src/util/report_manager.cpp
index bf6208849d..78b9b73dfc 100644
--- a/contrib/libs/hyperscan/src/util/report_manager.cpp
+++ b/contrib/libs/hyperscan/src/util/report_manager.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -95,31 +95,31 @@ u32 ReportManager::getExhaustibleKey(u32 a) {
return it->second;
}
-const set<u32> &ReportManager::getRelateCKeys(u32 lkey) {
- auto it = pl.lkey2ckeys.find(lkey);
- assert(it != pl.lkey2ckeys.end());
- return it->second;
-}
-
-void ReportManager::logicalKeyRenumber() {
- pl.logicalKeyRenumber();
- // assign to corresponding report
- for (u32 i = 0; i < reportIds.size(); i++) {
- Report &ir = reportIds[i];
- if (contains(pl.toLogicalKeyMap, ir.onmatch)) {
- ir.lkey = pl.toLogicalKeyMap.at(ir.onmatch);
- }
- }
-}
-
-const vector<LogicalOp> &ReportManager::getLogicalTree() const {
- return pl.logicalTree;
-}
-
-const vector<CombInfo> &ReportManager::getCombInfoMap() const {
- return pl.combInfoMap;
-}
-
+const set<u32> &ReportManager::getRelateCKeys(u32 lkey) {
+ auto it = pl.lkey2ckeys.find(lkey);
+ assert(it != pl.lkey2ckeys.end());
+ return it->second;
+}
+
+void ReportManager::logicalKeyRenumber() {
+ pl.logicalKeyRenumber();
+ // assign to corresponding report
+ for (u32 i = 0; i < reportIds.size(); i++) {
+ Report &ir = reportIds[i];
+ if (contains(pl.toLogicalKeyMap, ir.onmatch)) {
+ ir.lkey = pl.toLogicalKeyMap.at(ir.onmatch);
+ }
+ }
+}
+
+const vector<LogicalOp> &ReportManager::getLogicalTree() const {
+ return pl.logicalTree;
+}
+
+const vector<CombInfo> &ReportManager::getCombInfoMap() const {
+ return pl.combInfoMap;
+}
+
u32 ReportManager::getUnassociatedExhaustibleKey(void) {
u32 rv = toExhaustibleKeyMap.size();
bool inserted;
@@ -140,18 +140,18 @@ u32 ReportManager::numEkeys() const {
return (u32) toExhaustibleKeyMap.size();
}
-u32 ReportManager::numLogicalKeys() const {
- return (u32) pl.toLogicalKeyMap.size();
-}
-
-u32 ReportManager::numLogicalOps() const {
- return (u32) pl.logicalTree.size();
-}
-
-u32 ReportManager::numCkeys() const {
- return (u32) pl.toCombKeyMap.size();
-}
-
+u32 ReportManager::numLogicalKeys() const {
+ return (u32) pl.toLogicalKeyMap.size();
+}
+
+u32 ReportManager::numLogicalOps() const {
+ return (u32) pl.logicalTree.size();
+}
+
+u32 ReportManager::numCkeys() const {
+ return (u32) pl.toCombKeyMap.size();
+}
+
bool ReportManager::patternSetCanExhaust() const {
return global_exhaust && !toExhaustibleKeyMap.empty();
}
@@ -256,7 +256,7 @@ Report ReportManager::getBasicInternalReport(const ExpressionInfo &expr,
ekey = getExhaustibleKey(expr.report);
}
- return makeECallback(expr.report, adj, ekey, expr.quiet);
+ return makeECallback(expr.report, adj, ekey, expr.quiet);
}
void ReportManager::setProgramOffset(ReportID id, u32 programOffset) {
diff --git a/contrib/libs/hyperscan/src/util/report_manager.h b/contrib/libs/hyperscan/src/util/report_manager.h
index 017545a5d1..015dc9c855 100644
--- a/contrib/libs/hyperscan/src/util/report_manager.h
+++ b/contrib/libs/hyperscan/src/util/report_manager.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, Intel Corporation
+ * Copyright (c) 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -38,7 +38,7 @@
#include "util/compile_error.h"
#include "util/noncopyable.h"
#include "util/report.h"
-#include "parser/logical_combination.h"
+#include "parser/logical_combination.h"
#include <map>
#include <set>
@@ -81,15 +81,15 @@ public:
/** \brief Total number of exhaustion keys. */
u32 numEkeys() const;
- /** \brief Total number of logical keys. */
- u32 numLogicalKeys() const;
-
- /** \brief Total number of logical operators. */
- u32 numLogicalOps() const;
-
- /** \brief Total number of combination keys. */
- u32 numCkeys() const;
-
+ /** \brief Total number of logical keys. */
+ u32 numLogicalKeys() const;
+
+ /** \brief Total number of logical operators. */
+ u32 numLogicalOps() const;
+
+ /** \brief Total number of combination keys. */
+ u32 numCkeys() const;
+
/** \brief True if the pattern set can exhaust (i.e. all patterns are
* highlander). */
bool patternSetCanExhaust() const;
@@ -120,19 +120,19 @@ public:
* assigning one if necessary. */
u32 getExhaustibleKey(u32 expressionIndex);
- /** \brief Get lkey's corresponding ckeys. */
- const std::set<u32> &getRelateCKeys(u32 lkey);
-
- /** \brief Renumber lkey for logical operations, after parsed
- * all logical expressions. */
- void logicalKeyRenumber();
-
- /** \brief Used in Rose for writing bytecode. */
- const std::vector<LogicalOp> &getLogicalTree() const;
-
- /** \brief Used in Rose for writing bytecode. */
- const std::vector<CombInfo> &getCombInfoMap() const;
-
+ /** \brief Get lkey's corresponding ckeys. */
+ const std::set<u32> &getRelateCKeys(u32 lkey);
+
+ /** \brief Renumber lkey for logical operations, after parsed
+ * all logical expressions. */
+ void logicalKeyRenumber();
+
+ /** \brief Used in Rose for writing bytecode. */
+ const std::vector<LogicalOp> &getLogicalTree() const;
+
+ /** \brief Used in Rose for writing bytecode. */
+ const std::vector<CombInfo> &getCombInfoMap() const;
+
/** \brief Fetch the dedupe key associated with the given report. Returns
* ~0U if no dkey is needed. */
u32 getDkey(const Report &r) const;
@@ -145,9 +145,9 @@ public:
* set. */
u32 getProgramOffset(ReportID id) const;
- /** \brief Parsed logical combination structure. */
- ParsedLogical pl;
-
+ /** \brief Parsed logical combination structure. */
+ ParsedLogical pl;
+
private:
/** \brief Grey box ref, for checking resource limits. */
const Grey &grey;
diff --git a/contrib/libs/hyperscan/src/util/simd_utils.h b/contrib/libs/hyperscan/src/util/simd_utils.h
index 03b90e2c7a..d1f060b070 100644
--- a/contrib/libs/hyperscan/src/util/simd_utils.h
+++ b/contrib/libs/hyperscan/src/util/simd_utils.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -138,12 +138,12 @@ m128 lshift64_m128(m128 a, unsigned b) {
#define eq128(a, b) _mm_cmpeq_epi8((a), (b))
#define movemask128(a) ((u32)_mm_movemask_epi8((a)))
-#if defined(HAVE_AVX512)
-static really_inline m128 cast512to128(const m512 in) {
- return _mm512_castsi512_si128(in);
-}
-#endif
-
+#if defined(HAVE_AVX512)
+static really_inline m128 cast512to128(const m512 in) {
+ return _mm512_castsi512_si128(in);
+}
+#endif
+
static really_inline m128 set16x8(u8 c) {
return _mm_set1_epi8(c);
}
@@ -156,20 +156,20 @@ static really_inline u32 movd(const m128 in) {
return _mm_cvtsi128_si32(in);
}
-#if defined(HAVE_AVX512)
-static really_inline u32 movd512(const m512 in) {
- // NOTE: seems gcc doesn't support _mm512_cvtsi512_si32(in),
- // so we use 2-step convertions to work around.
- return _mm_cvtsi128_si32(_mm512_castsi512_si128(in));
-}
-
-static really_inline u64a movq512(const m512 in) {
- // NOTE: seems AVX512 doesn't support _mm512_cvtsi512_si64(in),
- // so we use 2-step convertions to work around.
- return _mm_cvtsi128_si64(_mm512_castsi512_si128(in));
-}
-#endif
-
+#if defined(HAVE_AVX512)
+static really_inline u32 movd512(const m512 in) {
+ // NOTE: seems gcc doesn't support _mm512_cvtsi512_si32(in),
+ // so we use 2-step convertions to work around.
+ return _mm_cvtsi128_si32(_mm512_castsi512_si128(in));
+}
+
+static really_inline u64a movq512(const m512 in) {
+ // NOTE: seems AVX512 doesn't support _mm512_cvtsi512_si64(in),
+ // so we use 2-step convertions to work around.
+ return _mm_cvtsi128_si64(_mm512_castsi512_si128(in));
+}
+#endif
+
static really_inline u64a movq(const m128 in) {
#if defined(ARCH_X86_64)
return _mm_cvtsi128_si64(in);
@@ -223,24 +223,24 @@ static really_inline m128 or128(m128 a, m128 b) {
return _mm_or_si128(a,b);
}
-#if defined(HAVE_AVX512VBMI)
-static really_inline m512 expand128(m128 a) {
- return _mm512_broadcast_i32x4(a);
-}
-
-static really_inline m512 expand256(m256 a) {
- return _mm512_broadcast_i64x4(a);
-}
-
-static really_inline m512 expand384(m384 a) {
- u64a *lo = (u64a*)&a.lo;
- u64a *mid = (u64a*)&a.mid;
- u64a *hi = (u64a*)&a.hi;
- return _mm512_set_epi64(0ULL, 0ULL, hi[1], hi[0], mid[1], mid[0],
- lo[1], lo[0]);
-}
-#endif
-
+#if defined(HAVE_AVX512VBMI)
+static really_inline m512 expand128(m128 a) {
+ return _mm512_broadcast_i32x4(a);
+}
+
+static really_inline m512 expand256(m256 a) {
+ return _mm512_broadcast_i64x4(a);
+}
+
+static really_inline m512 expand384(m384 a) {
+ u64a *lo = (u64a*)&a.lo;
+ u64a *mid = (u64a*)&a.mid;
+ u64a *hi = (u64a*)&a.hi;
+ return _mm512_set_epi64(0ULL, 0ULL, hi[1], hi[0], mid[1], mid[0],
+ lo[1], lo[0]);
+}
+#endif
+
static really_inline m128 andnot128(m128 a, m128 b) {
return _mm_andnot_si128(a, b);
}
@@ -356,14 +356,14 @@ static really_inline
m512 maskz_pshufb_m512(__mmask64 k, m512 a, m512 b) {
return _mm512_maskz_shuffle_epi8(k, a, b);
}
-
-#if defined(HAVE_AVX512VBMI)
-#define vpermb512(idx, a) _mm512_permutexvar_epi8(idx, a)
-#define maskz_vpermb512(k, idx, a) _mm512_maskz_permutexvar_epi8(k, idx, a)
+
+#if defined(HAVE_AVX512VBMI)
+#define vpermb512(idx, a) _mm512_permutexvar_epi8(idx, a)
+#define maskz_vpermb512(k, idx, a) _mm512_maskz_permutexvar_epi8(k, idx, a)
+#endif
+
#endif
-#endif
-
static really_inline
m128 variable_byte_shift_m128(m128 in, s32 amount) {
assert(amount >= -16 && amount <= 16);
@@ -1031,11 +1031,11 @@ m512 set8x64(u64a a) {
}
static really_inline
-m512 set16x32(u32 a) {
- return _mm512_set1_epi32(a);
-}
-
-static really_inline
+m512 set16x32(u32 a) {
+ return _mm512_set1_epi32(a);
+}
+
+static really_inline
m512 set512_64(u64a hi_3, u64a hi_2, u64a hi_1, u64a hi_0,
u64a lo_3, u64a lo_2, u64a lo_1, u64a lo_0) {
return _mm512_set_epi64(hi_3, hi_2, hi_1, hi_0,
@@ -1052,26 +1052,26 @@ static really_inline
m512 set4x128(m128 a) {
return _mm512_broadcast_i32x4(a);
}
-
-static really_inline
-m512 sadd_u8_m512(m512 a, m512 b) {
- return _mm512_adds_epu8(a, b);
-}
-
-static really_inline
-m512 max_u8_m512(m512 a, m512 b) {
- return _mm512_max_epu8(a, b);
-}
-
-static really_inline
-m512 min_u8_m512(m512 a, m512 b) {
- return _mm512_min_epu8(a, b);
-}
-
-static really_inline
-m512 sub_u8_m512(m512 a, m512 b) {
- return _mm512_sub_epi8(a, b);
-}
+
+static really_inline
+m512 sadd_u8_m512(m512 a, m512 b) {
+ return _mm512_adds_epu8(a, b);
+}
+
+static really_inline
+m512 max_u8_m512(m512 a, m512 b) {
+ return _mm512_max_epu8(a, b);
+}
+
+static really_inline
+m512 min_u8_m512(m512 a, m512 b) {
+ return _mm512_min_epu8(a, b);
+}
+
+static really_inline
+m512 sub_u8_m512(m512 a, m512 b) {
+ return _mm512_sub_epi8(a, b);
+}
#endif
static really_inline
@@ -1259,23 +1259,23 @@ m512 loadu512(const void *ptr) {
#endif
}
-// unaligned store
-static really_inline
-void storeu512(void *ptr, m512 a) {
+// unaligned store
+static really_inline
+void storeu512(void *ptr, m512 a) {
+#if defined(HAVE_AVX512)
+ _mm512_storeu_si512((m512 *)ptr, a);
+#elif defined(HAVE_AVX2)
+ storeu256(ptr, a.lo);
+ storeu256((char *)ptr + 32, a.hi);
+#else
+ storeu128(ptr, a.lo.lo);
+ storeu128((char *)ptr + 16, a.lo.hi);
+ storeu128((char *)ptr + 32, a.hi.lo);
+ storeu128((char *)ptr + 48, a.hi.hi);
+#endif
+}
+
#if defined(HAVE_AVX512)
- _mm512_storeu_si512((m512 *)ptr, a);
-#elif defined(HAVE_AVX2)
- storeu256(ptr, a.lo);
- storeu256((char *)ptr + 32, a.hi);
-#else
- storeu128(ptr, a.lo.lo);
- storeu128((char *)ptr + 16, a.lo.hi);
- storeu128((char *)ptr + 32, a.hi.lo);
- storeu128((char *)ptr + 48, a.hi.hi);
-#endif
-}
-
-#if defined(HAVE_AVX512)
static really_inline
m512 loadu_maskz_m512(__mmask64 k, const void *ptr) {
return _mm512_maskz_loadu_epi8(k, ptr);
@@ -1287,19 +1287,19 @@ m512 loadu_mask_m512(m512 src, __mmask64 k, const void *ptr) {
}
static really_inline
-void storeu_mask_m512(void *ptr, __mmask64 k, m512 a) {
- _mm512_mask_storeu_epi8(ptr, k, a);
-}
-
-static really_inline
+void storeu_mask_m512(void *ptr, __mmask64 k, m512 a) {
+ _mm512_mask_storeu_epi8(ptr, k, a);
+}
+
+static really_inline
m512 set_mask_m512(__mmask64 k) {
return _mm512_movm_epi8(k);
}
-
-static really_inline
-m256 loadu_maskz_m256(__mmask32 k, const void *ptr) {
- return _mm256_maskz_loadu_epi8(k, ptr);
-}
+
+static really_inline
+m256 loadu_maskz_m256(__mmask32 k, const void *ptr) {
+ return _mm256_maskz_loadu_epi8(k, ptr);
+}
#endif
// packed unaligned store of first N bytes
diff --git a/contrib/libs/hyperscan/src/util/target_info.cpp b/contrib/libs/hyperscan/src/util/target_info.cpp
index 21fbb6d39b..66ba5f5acc 100644
--- a/contrib/libs/hyperscan/src/util/target_info.cpp
+++ b/contrib/libs/hyperscan/src/util/target_info.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -50,10 +50,10 @@ bool target_t::can_run_on_code_built_for(const target_t &code_target) const {
return false;
}
- if (!has_avx512vbmi() && code_target.has_avx512vbmi()) {
- return false;
- }
-
+ if (!has_avx512vbmi() && code_target.has_avx512vbmi()) {
+ return false;
+ }
+
return true;
}
@@ -68,10 +68,10 @@ bool target_t::has_avx512(void) const {
return cpu_features & HS_CPU_FEATURES_AVX512;
}
-bool target_t::has_avx512vbmi(void) const {
- return cpu_features & HS_CPU_FEATURES_AVX512VBMI;
-}
-
+bool target_t::has_avx512vbmi(void) const {
+ return cpu_features & HS_CPU_FEATURES_AVX512VBMI;
+}
+
bool target_t::is_atom_class(void) const {
return tune == HS_TUNE_FAMILY_SLM || tune == HS_TUNE_FAMILY_GLM;
}
diff --git a/contrib/libs/hyperscan/src/util/target_info.h b/contrib/libs/hyperscan/src/util/target_info.h
index 803c002a48..f64573aeda 100644
--- a/contrib/libs/hyperscan/src/util/target_info.h
+++ b/contrib/libs/hyperscan/src/util/target_info.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -42,8 +42,8 @@ struct target_t {
bool has_avx512(void) const;
- bool has_avx512vbmi(void) const;
-
+ bool has_avx512vbmi(void) const;
+
bool is_atom_class(void) const;
// This asks: can this target (the object) run on code that was built for
diff --git a/contrib/libs/hyperscan/src/util/ue2_graph.h b/contrib/libs/hyperscan/src/util/ue2_graph.h
index f99f28f5cd..aa9718d73a 100644
--- a/contrib/libs/hyperscan/src/util/ue2_graph.h
+++ b/contrib/libs/hyperscan/src/util/ue2_graph.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018, Intel Corporation
+ * Copyright (c) 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -89,7 +89,7 @@
* (1) Deterministic ordering for vertices and edges
* boost::adjacency_list<> uses pointer ordering for vertex_descriptors. As
* a result, ordering of vertices and edges between runs is
- * non-deterministic unless containers, etc use custom comparators.
+ * non-deterministic unless containers, etc use custom comparators.
*
* (2) Proper types for descriptors, etc.
* No more void * for vertex_descriptors and trying to use it for the wrong
@@ -288,7 +288,7 @@ private:
vertex_edge_list<in_edge_hook> in_edge_list;
/* The out going edges are considered owned by the vertex and
- * need to be freed when the graph is being destroyed */
+ * need to be freed when the graph is being destroyed */
vertex_edge_list<out_edge_hook> out_edge_list;
/* The destructor only frees memory owned by the vertex and will leave
@@ -1025,208 +1025,208 @@ public:
}
};
-/** \brief Type trait to enable on whether the Graph is an ue2_graph. */
+/** \brief Type trait to enable on whether the Graph is an ue2_graph. */
template<typename Graph>
-struct is_ue2_graph
- : public ::std::integral_constant<
- bool, std::is_base_of<graph_detail::graph_base, Graph>::value> {};
-
-template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value,
- typename Graph::vertex_descriptor>::type
+struct is_ue2_graph
+ : public ::std::integral_constant<
+ bool, std::is_base_of<graph_detail::graph_base, Graph>::value> {};
+
+template<typename Graph>
+typename std::enable_if<is_ue2_graph<Graph>::value,
+ typename Graph::vertex_descriptor>::type
add_vertex(Graph &g) {
return g.add_vertex_impl();
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value>::type
+typename std::enable_if<is_ue2_graph<Graph>::value>::type
remove_vertex(typename Graph::vertex_descriptor v, Graph &g) {
g.remove_vertex_impl(v);
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value>::type
+typename std::enable_if<is_ue2_graph<Graph>::value>::type
clear_in_edges(typename Graph::vertex_descriptor v, Graph &g) {
g.clear_in_edges_impl(v);
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value>::type
+typename std::enable_if<is_ue2_graph<Graph>::value>::type
clear_out_edges(typename Graph::vertex_descriptor v, Graph &g) {
g.clear_out_edges_impl(v);
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value>::type
+typename std::enable_if<is_ue2_graph<Graph>::value>::type
clear_vertex(typename Graph::vertex_descriptor v, Graph &g) {
g.clear_in_edges_impl(v);
g.clear_out_edges_impl(v);
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value,
- typename Graph::vertex_descriptor>::type
+typename std::enable_if<is_ue2_graph<Graph>::value,
+ typename Graph::vertex_descriptor>::type
source(typename Graph::edge_descriptor e, const Graph &) {
return Graph::source_impl(e);
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value,
- typename Graph::vertex_descriptor>::type
+typename std::enable_if<is_ue2_graph<Graph>::value,
+ typename Graph::vertex_descriptor>::type
target(typename Graph::edge_descriptor e, const Graph &) {
return Graph::target_impl(e);
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value,
- typename Graph::degree_size_type>::type
+typename std::enable_if<is_ue2_graph<Graph>::value,
+ typename Graph::degree_size_type>::type
out_degree(typename Graph::vertex_descriptor v, const Graph &) {
return Graph::out_degree_impl(v);
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value,
- std::pair<typename Graph::out_edge_iterator,
- typename Graph::out_edge_iterator>>::type
+typename std::enable_if<is_ue2_graph<Graph>::value,
+ std::pair<typename Graph::out_edge_iterator,
+ typename Graph::out_edge_iterator>>::type
out_edges(typename Graph::vertex_descriptor v, const Graph &) {
return Graph::out_edges_impl(v);
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value,
- typename Graph::degree_size_type>::type
+typename std::enable_if<is_ue2_graph<Graph>::value,
+ typename Graph::degree_size_type>::type
in_degree(typename Graph::vertex_descriptor v, const Graph &) {
return Graph::in_degree_impl(v);
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value,
- std::pair<typename Graph::in_edge_iterator,
- typename Graph::in_edge_iterator>>::type
+typename std::enable_if<is_ue2_graph<Graph>::value,
+ std::pair<typename Graph::in_edge_iterator,
+ typename Graph::in_edge_iterator>>::type
in_edges(typename Graph::vertex_descriptor v, const Graph &) {
return Graph::in_edges_impl(v);
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value,
- typename Graph::degree_size_type>::type
+typename std::enable_if<is_ue2_graph<Graph>::value,
+ typename Graph::degree_size_type>::type
degree(typename Graph::vertex_descriptor v, const Graph &) {
return Graph::degree_impl(v);
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value,
- std::pair<typename Graph::adjacency_iterator,
- typename Graph::adjacency_iterator>>::type
+typename std::enable_if<is_ue2_graph<Graph>::value,
+ std::pair<typename Graph::adjacency_iterator,
+ typename Graph::adjacency_iterator>>::type
adjacent_vertices(typename Graph::vertex_descriptor v, const Graph &) {
return Graph::adjacent_vertices_impl(v);
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value,
- std::pair<typename Graph::edge_descriptor, bool>>::type
+typename std::enable_if<is_ue2_graph<Graph>::value,
+ std::pair<typename Graph::edge_descriptor, bool>>::type
edge(typename Graph::vertex_descriptor u, typename Graph::vertex_descriptor v,
const Graph &g) {
return g.edge_impl(u, v);
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value,
- std::pair<typename Graph::inv_adjacency_iterator,
- typename Graph::inv_adjacency_iterator>>::type
+typename std::enable_if<is_ue2_graph<Graph>::value,
+ std::pair<typename Graph::inv_adjacency_iterator,
+ typename Graph::inv_adjacency_iterator>>::type
inv_adjacent_vertices(typename Graph::vertex_descriptor v, const Graph &) {
return Graph::inv_adjacent_vertices_impl(v);
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value,
- std::pair<typename Graph::edge_descriptor, bool>>::type
+typename std::enable_if<is_ue2_graph<Graph>::value,
+ std::pair<typename Graph::edge_descriptor, bool>>::type
add_edge(typename Graph::vertex_descriptor u,
typename Graph::vertex_descriptor v, Graph &g) {
return g.add_edge_impl(u, v);
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value>::type
+typename std::enable_if<is_ue2_graph<Graph>::value>::type
remove_edge(typename Graph::edge_descriptor e, Graph &g) {
g.remove_edge_impl(e);
}
template<typename Graph, typename Iter>
typename std::enable_if<
- !std::is_convertible<Iter, typename Graph::edge_descriptor>::value &&
- is_ue2_graph<Graph>::value>::type
+ !std::is_convertible<Iter, typename Graph::edge_descriptor>::value &&
+ is_ue2_graph<Graph>::value>::type
remove_edge(Iter it, Graph &g) {
g.remove_edge_impl(*it);
}
template<typename Graph, typename Predicate>
-typename std::enable_if<is_ue2_graph<Graph>::value>::type
+typename std::enable_if<is_ue2_graph<Graph>::value>::type
remove_out_edge_if(typename Graph::vertex_descriptor v, Predicate pred,
Graph &g) {
g.remove_out_edge_if_impl(v, pred);
}
template<typename Graph, typename Predicate>
-typename std::enable_if<is_ue2_graph<Graph>::value>::type
+typename std::enable_if<is_ue2_graph<Graph>::value>::type
remove_in_edge_if(typename Graph::vertex_descriptor v, Predicate pred,
Graph &g) {
g.remove_in_edge_if_impl(v, pred);
}
template<typename Graph, typename Predicate>
-typename std::enable_if<is_ue2_graph<Graph>::value>::type
+typename std::enable_if<is_ue2_graph<Graph>::value>::type
remove_edge_if(Predicate pred, Graph &g) {
g.remove_edge_if_impl(pred);
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value>::type
+typename std::enable_if<is_ue2_graph<Graph>::value>::type
remove_edge(const typename Graph::vertex_descriptor &u,
const typename Graph::vertex_descriptor &v, Graph &g) {
g.remove_edge_impl(u, v);
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value,
- typename Graph::vertices_size_type>::type
+typename std::enable_if<is_ue2_graph<Graph>::value,
+ typename Graph::vertices_size_type>::type
num_vertices(const Graph &g) {
return g.num_vertices_impl();
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value,
- std::pair<typename Graph::vertex_iterator,
- typename Graph::vertex_iterator>>::type
+typename std::enable_if<is_ue2_graph<Graph>::value,
+ std::pair<typename Graph::vertex_iterator,
+ typename Graph::vertex_iterator>>::type
vertices(const Graph &g) {
return g.vertices_impl();
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value,
- typename Graph::edges_size_type>::type
+typename std::enable_if<is_ue2_graph<Graph>::value,
+ typename Graph::edges_size_type>::type
num_edges(const Graph &g) {
return g.num_edges_impl();
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value,
- std::pair<typename Graph::edge_iterator,
- typename Graph::edge_iterator>>::type
+typename std::enable_if<is_ue2_graph<Graph>::value,
+ std::pair<typename Graph::edge_iterator,
+ typename Graph::edge_iterator>>::type
edges(const Graph &g) {
return g.edges_impl();
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value,
- typename Graph::vertex_descriptor>::type
+typename std::enable_if<is_ue2_graph<Graph>::value,
+ typename Graph::vertex_descriptor>::type
add_vertex(const typename Graph::vertex_property_type &vp, Graph &g) {
return g.add_vertex_impl(vp);
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value,
- std::pair<typename Graph::edge_descriptor, bool>>::type
+typename std::enable_if<is_ue2_graph<Graph>::value,
+ std::pair<typename Graph::edge_descriptor, bool>>::type
add_edge(typename Graph::vertex_descriptor u,
typename Graph::vertex_descriptor v,
const typename Graph::edge_property_type &ep, Graph &g) {
@@ -1234,59 +1234,59 @@ add_edge(typename Graph::vertex_descriptor u,
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value>::type
+typename std::enable_if<is_ue2_graph<Graph>::value>::type
renumber_edges(Graph &g) {
g.renumber_edges_impl();
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value>::type
+typename std::enable_if<is_ue2_graph<Graph>::value>::type
renumber_vertices(Graph &g) {
g.renumber_vertices_impl();
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value,
- typename Graph::vertices_size_type>::type
+typename std::enable_if<is_ue2_graph<Graph>::value,
+ typename Graph::vertices_size_type>::type
vertex_index_upper_bound(const Graph &g) {
return g.vertex_index_upper_bound_impl();
}
template<typename Graph>
-typename std::enable_if<is_ue2_graph<Graph>::value,
- typename Graph::edges_size_type>::type
+typename std::enable_if<is_ue2_graph<Graph>::value,
+ typename Graph::edges_size_type>::type
edge_index_upper_bound(const Graph &g) {
return g.edge_index_upper_bound_impl();
}
-template<typename T> struct pointer_to_member_traits {};
-
-template<typename Return, typename Class>
-struct pointer_to_member_traits<Return(Class::*)> {
- using member_type = Return;
- using class_type = Class;
-};
-
-template<typename Graph, typename Property, typename Enable = void>
-struct is_ue2_vertex_or_edge_property {
- static constexpr bool value = false;
-};
-
-template<typename Graph, typename Property>
-struct is_ue2_vertex_or_edge_property<
- Graph, Property, typename std::enable_if<is_ue2_graph<Graph>::value &&
- std::is_member_object_pointer<
- Property>::value>::type> {
-private:
- using class_type = typename pointer_to_member_traits<Property>::class_type;
- using vertex_type = typename Graph::vertex_property_type;
- using edge_type = typename Graph::edge_property_type;
-public:
- static constexpr bool value =
- std::is_same<class_type, vertex_type>::value ||
- std::is_same<class_type, edge_type>::value;
-};
-
+template<typename T> struct pointer_to_member_traits {};
+
+template<typename Return, typename Class>
+struct pointer_to_member_traits<Return(Class::*)> {
+ using member_type = Return;
+ using class_type = Class;
+};
+
+template<typename Graph, typename Property, typename Enable = void>
+struct is_ue2_vertex_or_edge_property {
+ static constexpr bool value = false;
+};
+
+template<typename Graph, typename Property>
+struct is_ue2_vertex_or_edge_property<
+ Graph, Property, typename std::enable_if<is_ue2_graph<Graph>::value &&
+ std::is_member_object_pointer<
+ Property>::value>::type> {
+private:
+ using class_type = typename pointer_to_member_traits<Property>::class_type;
+ using vertex_type = typename Graph::vertex_property_type;
+ using edge_type = typename Graph::edge_property_type;
+public:
+ static constexpr bool value =
+ std::is_same<class_type, vertex_type>::value ||
+ std::is_same<class_type, edge_type>::value;
+};
+
using boost::vertex_index;
using boost::edge_index;
@@ -1298,55 +1298,55 @@ namespace boost {
* adaptors (like filtered_graph) to know the type of the property maps */
template<typename Graph, typename Prop>
struct property_map<Graph, Prop,
- typename std::enable_if<ue2::is_ue2_graph<Graph>::value &&
- ue2::is_ue2_vertex_or_edge_property<
- Graph, Prop>::value>::type> {
-private:
- using prop_traits = ue2::pointer_to_member_traits<Prop>;
- using member_type = typename prop_traits::member_type;
- using class_type = typename prop_traits::class_type;
-public:
- using type = typename Graph::template prop_map<member_type &, class_type>;
- using const_type = typename Graph::template prop_map<const member_type &,
- class_type>;
+ typename std::enable_if<ue2::is_ue2_graph<Graph>::value &&
+ ue2::is_ue2_vertex_or_edge_property<
+ Graph, Prop>::value>::type> {
+private:
+ using prop_traits = ue2::pointer_to_member_traits<Prop>;
+ using member_type = typename prop_traits::member_type;
+ using class_type = typename prop_traits::class_type;
+public:
+ using type = typename Graph::template prop_map<member_type &, class_type>;
+ using const_type = typename Graph::template prop_map<const member_type &,
+ class_type>;
+};
+
+template<typename Graph>
+struct property_map<Graph, vertex_index_t,
+ typename std::enable_if<ue2::is_ue2_graph<Graph>::value>::type> {
+ using v_prop_type = typename Graph::vertex_property_type;
+ using type = typename Graph::template prop_map<size_t &, v_prop_type>;
+ using const_type =
+ typename Graph::template prop_map<const size_t &, v_prop_type>;
+};
+
+template<typename Graph>
+struct property_map<Graph, edge_index_t,
+ typename std::enable_if<ue2::is_ue2_graph<Graph>::value>::type> {
+ using e_prop_type = typename Graph::edge_property_type;
+ using type = typename Graph::template prop_map<size_t &, e_prop_type>;
+ using const_type =
+ typename Graph::template prop_map<const size_t &, e_prop_type>;
+};
+
+template<typename Graph>
+struct property_map<Graph, vertex_all_t,
+ typename std::enable_if<ue2::is_ue2_graph<Graph>::value>::type> {
+ using v_prop_type = typename Graph::vertex_property_type;
+ using type = typename Graph::template prop_map_all<v_prop_type &>;
+ using const_type =
+ typename Graph::template prop_map_all<const v_prop_type &>;
+};
+
+template<typename Graph>
+struct property_map<Graph, edge_all_t,
+ typename std::enable_if<ue2::is_ue2_graph<Graph>::value>::type> {
+ using e_prop_type = typename Graph::edge_property_type;
+ using type = typename Graph::template prop_map_all<e_prop_type &>;
+ using const_type =
+ typename Graph::template prop_map_all<const e_prop_type &>;
};
-template<typename Graph>
-struct property_map<Graph, vertex_index_t,
- typename std::enable_if<ue2::is_ue2_graph<Graph>::value>::type> {
- using v_prop_type = typename Graph::vertex_property_type;
- using type = typename Graph::template prop_map<size_t &, v_prop_type>;
- using const_type =
- typename Graph::template prop_map<const size_t &, v_prop_type>;
-};
-
-template<typename Graph>
-struct property_map<Graph, edge_index_t,
- typename std::enable_if<ue2::is_ue2_graph<Graph>::value>::type> {
- using e_prop_type = typename Graph::edge_property_type;
- using type = typename Graph::template prop_map<size_t &, e_prop_type>;
- using const_type =
- typename Graph::template prop_map<const size_t &, e_prop_type>;
-};
-
-template<typename Graph>
-struct property_map<Graph, vertex_all_t,
- typename std::enable_if<ue2::is_ue2_graph<Graph>::value>::type> {
- using v_prop_type = typename Graph::vertex_property_type;
- using type = typename Graph::template prop_map_all<v_prop_type &>;
- using const_type =
- typename Graph::template prop_map_all<const v_prop_type &>;
-};
-
-template<typename Graph>
-struct property_map<Graph, edge_all_t,
- typename std::enable_if<ue2::is_ue2_graph<Graph>::value>::type> {
- using e_prop_type = typename Graph::edge_property_type;
- using type = typename Graph::template prop_map_all<e_prop_type &>;
- using const_type =
- typename Graph::template prop_map_all<const e_prop_type &>;
-};
-
} // namespace boost
namespace std {
diff --git a/contrib/libs/hyperscan/src/util/ue2string.cpp b/contrib/libs/hyperscan/src/util/ue2string.cpp
index 0d0590024a..50b2bbcc89 100644
--- a/contrib/libs/hyperscan/src/util/ue2string.cpp
+++ b/contrib/libs/hyperscan/src/util/ue2string.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2019, Intel Corporation
+ * Copyright (c) 2015-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/contrib/libs/hyperscan/src/util/ue2string.h b/contrib/libs/hyperscan/src/util/ue2string.h
index e9d4632a38..0aa846896e 100644
--- a/contrib/libs/hyperscan/src/util/ue2string.h
+++ b/contrib/libs/hyperscan/src/util/ue2string.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2019, Intel Corporation
+ * Copyright (c) 2015-2019, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/contrib/libs/hyperscan/src/util/uniform_ops.h b/contrib/libs/hyperscan/src/util/uniform_ops.h
index 074cc40dce..262104aca2 100644
--- a/contrib/libs/hyperscan/src/util/uniform_ops.h
+++ b/contrib/libs/hyperscan/src/util/uniform_ops.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2020, Intel Corporation
+ * Copyright (c) 2015-2020, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -101,18 +101,18 @@
#define or_m384(a, b) (or384(a, b))
#define or_m512(a, b) (or512(a, b))
-#if defined(HAVE_AVX512VBMI)
-#define expand_m128(a) (expand128(a))
-#define expand_m256(a) (expand256(a))
-#define expand_m384(a) (expand384(a))
-#define expand_m512(a) (a)
-
-#define shuffle_byte_m128(a, b) (pshufb_m512(b, a))
-#define shuffle_byte_m256(a, b) (vpermb512(a, b))
-#define shuffle_byte_m384(a, b) (vpermb512(a, b))
-#define shuffle_byte_m512(a, b) (vpermb512(a, b))
-#endif
-
+#if defined(HAVE_AVX512VBMI)
+#define expand_m128(a) (expand128(a))
+#define expand_m256(a) (expand256(a))
+#define expand_m384(a) (expand384(a))
+#define expand_m512(a) (a)
+
+#define shuffle_byte_m128(a, b) (pshufb_m512(b, a))
+#define shuffle_byte_m256(a, b) (vpermb512(a, b))
+#define shuffle_byte_m384(a, b) (vpermb512(a, b))
+#define shuffle_byte_m512(a, b) (vpermb512(a, b))
+#endif
+
#define and_u8(a, b) ((a) & (b))
#define and_u32(a, b) ((a) & (b))
#define and_u64a(a, b) ((a) & (b))
diff --git a/contrib/libs/hyperscan/ya.make b/contrib/libs/hyperscan/ya.make
index 0a76c02e0b..7783969e4a 100644
--- a/contrib/libs/hyperscan/ya.make
+++ b/contrib/libs/hyperscan/ya.make
@@ -1,115 +1,115 @@
-# Generated by devtools/yamaker from nixpkgs cc3b147ed182a6cae239348ef094158815da14ae.
+# Generated by devtools/yamaker from nixpkgs cc3b147ed182a6cae239348ef094158815da14ae.
-LIBRARY()
+LIBRARY()
-OWNER(
- galtsev
- g:antiinfra
- g:cpp-contrib
- g:yql
-)
+OWNER(
+ galtsev
+ g:antiinfra
+ g:cpp-contrib
+ g:yql
+)
-VERSION(5.4.0)
+VERSION(5.4.0)
-ORIGINAL_SOURCE(https://github.com/intel/hyperscan/archive/v5.4.0.tar.gz)
+ORIGINAL_SOURCE(https://github.com/intel/hyperscan/archive/v5.4.0.tar.gz)
-LICENSE(
+LICENSE(
BSD-2-Clause AND
BSD-3-Clause AND
- BSL-1.0
-)
+ BSL-1.0
+)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/restricted/boost
+)
-LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-
-PEERDIR(
- contrib/restricted/boost
-)
-
ADDINCL(
contrib/libs/hyperscan
- contrib/libs/hyperscan/include
+ contrib/libs/hyperscan/include
contrib/libs/hyperscan/src
)
-NO_COMPILER_WARNINGS()
+NO_COMPILER_WARNINGS()
-NO_UTIL()
+NO_UTIL()
CFLAGS(
-DDISABLE_AVX512VBMI_DISPATCH
)
-
+
SRCS(
src/alloc.c
src/compiler/asserts.cpp
src/compiler/compiler.cpp
src/compiler/error.cpp
- src/crc32.c
- src/database.c
+ src/crc32.c
+ src/database.c
src/fdr/engine_description.cpp
- src/fdr/fdr.c
+ src/fdr/fdr.c
src/fdr/fdr_compile.cpp
src/fdr/fdr_compile_util.cpp
src/fdr/fdr_confirm_compile.cpp
src/fdr/fdr_engine_description.cpp
src/fdr/flood_compile.cpp
- src/fdr/teddy.c
- src/fdr/teddy_avx2.c
+ src/fdr/teddy.c
+ src/fdr/teddy_avx2.c
src/fdr/teddy_compile.cpp
src/fdr/teddy_engine_description.cpp
src/grey.cpp
src/hs.cpp
- src/hs_valid_platform.c
- src/hs_version.c
- src/hwlm/hwlm.c
+ src/hs_valid_platform.c
+ src/hs_version.c
+ src/hwlm/hwlm.c
src/hwlm/hwlm_build.cpp
src/hwlm/hwlm_literal.cpp
src/hwlm/noodle_build.cpp
- src/hwlm/noodle_engine.c
- src/nfa/accel.c
- src/nfa/accel_dfa_build_strat.cpp
+ src/hwlm/noodle_engine.c
+ src/nfa/accel.c
+ src/nfa/accel_dfa_build_strat.cpp
src/nfa/accelcompile.cpp
- src/nfa/castle.c
+ src/nfa/castle.c
src/nfa/castlecompile.cpp
src/nfa/dfa_build_strat.cpp
src/nfa/dfa_min.cpp
- src/nfa/gough.c
- src/nfa/goughcompile.cpp
+ src/nfa/gough.c
+ src/nfa/goughcompile.cpp
src/nfa/goughcompile_accel.cpp
src/nfa/goughcompile_reg.cpp
- src/nfa/lbr.c
- src/nfa/limex_64.c
- src/nfa/limex_accel.c
- src/nfa/limex_compile.cpp
- src/nfa/limex_native.c
- src/nfa/limex_simd128.c
- src/nfa/limex_simd256.c
- src/nfa/limex_simd384.c
- src/nfa/limex_simd512.c
- src/nfa/mcclellan.c
- src/nfa/mcclellancompile.cpp
- src/nfa/mcclellancompile_util.cpp
- src/nfa/mcsheng.c
- src/nfa/mcsheng_compile.cpp
- src/nfa/mcsheng_data.c
- src/nfa/mpv.c
- src/nfa/mpvcompile.cpp
- src/nfa/nfa_api_dispatch.c
- src/nfa/nfa_build_util.cpp
- src/nfa/rdfa.cpp
- src/nfa/rdfa_graph.cpp
- src/nfa/rdfa_merge.cpp
- src/nfa/repeat.c
- src/nfa/repeatcompile.cpp
- src/nfa/sheng.c
- src/nfa/shengcompile.cpp
- src/nfa/shufti.c
- src/nfa/shufticompile.cpp
- src/nfa/tamarama.c
- src/nfa/tamaramacompile.cpp
- src/nfa/truffle.c
- src/nfa/trufflecompile.cpp
- src/nfagraph/ng.cpp
+ src/nfa/lbr.c
+ src/nfa/limex_64.c
+ src/nfa/limex_accel.c
+ src/nfa/limex_compile.cpp
+ src/nfa/limex_native.c
+ src/nfa/limex_simd128.c
+ src/nfa/limex_simd256.c
+ src/nfa/limex_simd384.c
+ src/nfa/limex_simd512.c
+ src/nfa/mcclellan.c
+ src/nfa/mcclellancompile.cpp
+ src/nfa/mcclellancompile_util.cpp
+ src/nfa/mcsheng.c
+ src/nfa/mcsheng_compile.cpp
+ src/nfa/mcsheng_data.c
+ src/nfa/mpv.c
+ src/nfa/mpvcompile.cpp
+ src/nfa/nfa_api_dispatch.c
+ src/nfa/nfa_build_util.cpp
+ src/nfa/rdfa.cpp
+ src/nfa/rdfa_graph.cpp
+ src/nfa/rdfa_merge.cpp
+ src/nfa/repeat.c
+ src/nfa/repeatcompile.cpp
+ src/nfa/sheng.c
+ src/nfa/shengcompile.cpp
+ src/nfa/shufti.c
+ src/nfa/shufticompile.cpp
+ src/nfa/tamarama.c
+ src/nfa/tamaramacompile.cpp
+ src/nfa/truffle.c
+ src/nfa/trufflecompile.cpp
+ src/nfagraph/ng.cpp
src/nfagraph/ng_anchored_acyclic.cpp
src/nfagraph/ng_anchored_dots.cpp
src/nfagraph/ng_asserts.cpp
@@ -129,7 +129,7 @@ SRCS(
src/nfagraph/ng_holder.cpp
src/nfagraph/ng_is_equal.cpp
src/nfagraph/ng_lbr.cpp
- src/nfagraph/ng_limex.cpp
+ src/nfagraph/ng_limex.cpp
src/nfagraph/ng_limex_accel.cpp
src/nfagraph/ng_literal_analysis.cpp
src/nfagraph/ng_literal_component.cpp
@@ -149,7 +149,7 @@ SRCS(
src/nfagraph/ng_revacc.cpp
src/nfagraph/ng_sep.cpp
src/nfagraph/ng_small_literal_set.cpp
- src/nfagraph/ng_som.cpp
+ src/nfagraph/ng_som.cpp
src/nfagraph/ng_som_add_redundancy.cpp
src/nfagraph/ng_som_util.cpp
src/nfagraph/ng_split.cpp
@@ -162,7 +162,7 @@ SRCS(
src/nfagraph/ng_violet.cpp
src/nfagraph/ng_width.cpp
src/parser/AsciiComponentClass.cpp
- src/parser/Component.cpp
+ src/parser/Component.cpp
src/parser/ComponentAlternation.cpp
src/parser/ComponentAssertion.cpp
src/parser/ComponentAtomicGroup.cpp
@@ -171,19 +171,19 @@ SRCS(
src/parser/ComponentByte.cpp
src/parser/ComponentClass.cpp
src/parser/ComponentCondReference.cpp
- src/parser/ComponentEUS.cpp
+ src/parser/ComponentEUS.cpp
src/parser/ComponentEmpty.cpp
src/parser/ComponentRepeat.cpp
src/parser/ComponentSequence.cpp
src/parser/ComponentVisitor.cpp
src/parser/ComponentWordBoundary.cpp
src/parser/ConstComponentVisitor.cpp
- src/parser/Parser.rl6
- src/parser/Utf8ComponentClass.cpp
- src/parser/buildstate.cpp
- src/parser/check_refs.cpp
- src/parser/control_verbs.rl6
- src/parser/logical_combination.cpp
+ src/parser/Parser.rl6
+ src/parser/Utf8ComponentClass.cpp
+ src/parser/buildstate.cpp
+ src/parser/check_refs.cpp
+ src/parser/control_verbs.rl6
+ src/parser/logical_combination.cpp
src/parser/parse_error.cpp
src/parser/parser_util.cpp
src/parser/prefilter.cpp
@@ -191,11 +191,11 @@ SRCS(
src/parser/ucp_table.cpp
src/parser/unsupported.cpp
src/parser/utf8_validate.cpp
- src/rose/block.c
- src/rose/catchup.c
- src/rose/init.c
- src/rose/match.c
- src/rose/program_runtime.c
+ src/rose/block.c
+ src/rose/catchup.c
+ src/rose/init.c
+ src/rose/match.c
+ src/rose/program_runtime.c
src/rose/rose_build_add.cpp
src/rose/rose_build_add_mask.cpp
src/rose/rose_build_anchored.cpp
@@ -220,38 +220,38 @@ SRCS(
src/rose/rose_build_scatter.cpp
src/rose/rose_build_width.cpp
src/rose/rose_in_util.cpp
- src/rose/stream.c
- src/runtime.c
- src/scratch.c
+ src/rose/stream.c
+ src/runtime.c
+ src/scratch.c
src/smallwrite/smallwrite_build.cpp
src/som/slot_manager.cpp
- src/som/som_runtime.c
- src/som/som_stream.c
- src/stream_compress.c
+ src/som/som_runtime.c
+ src/som/som_stream.c
+ src/stream_compress.c
src/util/alloc.cpp
src/util/charreach.cpp
src/util/clique.cpp
src/util/compile_context.cpp
src/util/compile_error.cpp
- src/util/cpuid_flags.c
+ src/util/cpuid_flags.c
src/util/depth.cpp
- src/util/dump_mask.cpp
+ src/util/dump_mask.cpp
src/util/fatbit_build.cpp
- src/util/masked_move.c
- src/util/multibit.c
+ src/util/masked_move.c
+ src/util/multibit.c
src/util/multibit_build.cpp
src/util/report_manager.cpp
- src/util/simd_utils.c
- src/util/state_compress.c
+ src/util/simd_utils.c
+ src/util/state_compress.c
src/util/target_info.cpp
src/util/ue2string.cpp
)
END()
-
-RECURSE(
- runtime_avx2
- runtime_avx512
- runtime_core2
- runtime_corei7
-)
+
+RECURSE(
+ runtime_avx2
+ runtime_avx512
+ runtime_core2
+ runtime_corei7
+)