aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/hyperscan/src/rose/catchup.h
diff options
context:
space:
mode:
authorIvan Blinkov <ivan@blinkov.ru>2022-02-10 16:47:10 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:47:10 +0300
commit1aeb9a455974457866f78722ad98114bafc84e8a (patch)
treee4340eaf1668684d83a0a58c36947c5def5350ad /contrib/libs/hyperscan/src/rose/catchup.h
parentbd5ef432f5cfb1e18851381329d94665a4c22470 (diff)
downloadydb-1aeb9a455974457866f78722ad98114bafc84e8a.tar.gz
Restoring authorship annotation for Ivan Blinkov <ivan@blinkov.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/libs/hyperscan/src/rose/catchup.h')
-rw-r--r--contrib/libs/hyperscan/src/rose/catchup.h156
1 files changed, 78 insertions, 78 deletions
diff --git a/contrib/libs/hyperscan/src/rose/catchup.h b/contrib/libs/hyperscan/src/rose/catchup.h
index 8188d5af01..978a8c7007 100644
--- a/contrib/libs/hyperscan/src/rose/catchup.h
+++ b/contrib/libs/hyperscan/src/rose/catchup.h
@@ -26,25 +26,25 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-/**
- * \file
- * \brief Rose runtime: code for catching up output-exposed engines.
- *
- * Rose has several components which run behind the main (floating table) clock
- * and need to be caught up before we report matches.
- *
- * Currently we have to deal with:
- * 1. Suffix/Outfix NFAs
- * 2. A single MPV NFA (chained), which may also be triggered by (1).
- *
- * The approach is to:
- * - (A) build a priority queue of the suffix/outfixes based on their first
- * match location;
- * - (B) process the matches from the priority queue in order;
- * - (C) As we report matches from (B) we interleave matches from the MPV if it
- * exists.
- */
-
+/**
+ * \file
+ * \brief Rose runtime: code for catching up output-exposed engines.
+ *
+ * Rose has several components which run behind the main (floating table) clock
+ * and need to be caught up before we report matches.
+ *
+ * Currently we have to deal with:
+ * 1. Suffix/Outfix NFAs
+ * 2. A single MPV NFA (chained), which may also be triggered by (1).
+ *
+ * The approach is to:
+ * - (A) build a priority queue of the suffix/outfixes based on their first
+ * match location;
+ * - (B) process the matches from the priority queue in order;
+ * - (C) As we report matches from (B) we interleave matches from the MPV if it
+ * exists.
+ */
+
#ifndef ROSE_CATCHUP_H
#define ROSE_CATCHUP_H
@@ -59,74 +59,74 @@
hwlmcb_rv_t roseCatchUpAll(s64a loc, struct hs_scratch *scratch);
-/* will only catch mpv up to last reported external match */
+/* will only catch mpv up to last reported external match */
hwlmcb_rv_t roseCatchUpSuf(s64a loc, struct hs_scratch *scratch);
-hwlmcb_rv_t roseCatchUpMPV_i(const struct RoseEngine *t, s64a loc,
+hwlmcb_rv_t roseCatchUpMPV_i(const struct RoseEngine *t, s64a loc,
struct hs_scratch *scratch);
-void blockInitSufPQ(const struct RoseEngine *t, char *state,
+void blockInitSufPQ(const struct RoseEngine *t, char *state,
struct hs_scratch *scratch, char is_small_block);
-void streamInitSufPQ(const struct RoseEngine *t, char *state,
+void streamInitSufPQ(const struct RoseEngine *t, char *state,
struct hs_scratch *scratch);
static really_inline
-int canSkipCatchUpMPV(const struct RoseEngine *t, struct hs_scratch *scratch,
- u64a cur_offset) {
+int canSkipCatchUpMPV(const struct RoseEngine *t, struct hs_scratch *scratch,
+ u64a cur_offset) {
if (!has_chained_nfas(t)) {
- return 1;
+ return 1;
}
/* note: we may have to run at less than tctxt.minMatchOffset as we may
* have a full queue of postponed events that we need to flush */
if (cur_offset < scratch->tctxt.next_mpv_offset) {
- DEBUG_PRINTF("skipping cur_offset %llu min %llu, mpv %llu\n",
+ DEBUG_PRINTF("skipping cur_offset %llu min %llu, mpv %llu\n",
cur_offset, scratch->tctxt.minMatchOffset,
scratch->tctxt.next_mpv_offset);
- return 1;
+ return 1;
}
assert(t->activeArrayCount);
- DEBUG_PRINTF("cur offset offset: %llu\n", cur_offset);
+ DEBUG_PRINTF("cur offset offset: %llu\n", cur_offset);
DEBUG_PRINTF("min match offset %llu\n", scratch->tctxt.minMatchOffset);
assert(t->outfixBeginQueue == 1); /* if it exists mpv is queue 0 */
- const u8 *aa = getActiveLeafArray(t, scratch->core_info.state);
- return !mmbit_isset(aa, t->activeArrayCount, 0);
-}
-
-/** \brief Catches up the MPV. */
-static really_inline
-hwlmcb_rv_t roseCatchUpMPV(const struct RoseEngine *t, s64a loc,
- struct hs_scratch *scratch) {
- u64a cur_offset = loc + scratch->core_info.buf_offset;
- assert(cur_offset >= scratch->tctxt.minMatchOffset);
- assert(!can_stop_matching(scratch));
-
- if (canSkipCatchUpMPV(t, scratch, cur_offset)) {
+ const u8 *aa = getActiveLeafArray(t, scratch->core_info.state);
+ return !mmbit_isset(aa, t->activeArrayCount, 0);
+}
+
+/** \brief Catches up the MPV. */
+static really_inline
+hwlmcb_rv_t roseCatchUpMPV(const struct RoseEngine *t, s64a loc,
+ struct hs_scratch *scratch) {
+ u64a cur_offset = loc + scratch->core_info.buf_offset;
+ assert(cur_offset >= scratch->tctxt.minMatchOffset);
+ assert(!can_stop_matching(scratch));
+
+ if (canSkipCatchUpMPV(t, scratch, cur_offset)) {
if (t->flushCombProgramOffset) {
if (roseRunFlushCombProgram(t, scratch, cur_offset)
== HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
}
- updateMinMatchOffsetFromMpv(&scratch->tctxt, cur_offset);
- return HWLM_CONTINUE_MATCHING;
+ updateMinMatchOffsetFromMpv(&scratch->tctxt, cur_offset);
+ return HWLM_CONTINUE_MATCHING;
}
/* Note: chained tails MUST not participate in the priority queue as
* they may have events pushed on during this process which may be before
* the catch up point */
- return roseCatchUpMPV_i(t, loc, scratch);
+ return roseCatchUpMPV_i(t, loc, scratch);
}
-/** \brief Catches up NFAs and the MPV. */
+/** \brief Catches up NFAs and the MPV. */
static rose_inline
-hwlmcb_rv_t roseCatchUpTo(const struct RoseEngine *t,
- struct hs_scratch *scratch, u64a end) {
+hwlmcb_rv_t roseCatchUpTo(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u64a end) {
/* no need to catch up if we are at the same offset as last time */
if (end <= scratch->tctxt.minMatchOffset) {
/* we must already be up to date */
@@ -134,48 +134,48 @@ hwlmcb_rv_t roseCatchUpTo(const struct RoseEngine *t,
return HWLM_CONTINUE_MATCHING;
}
- char *state = scratch->core_info.state;
+ char *state = scratch->core_info.state;
s64a loc = end - scratch->core_info.buf_offset;
if (end <= scratch->tctxt.minNonMpvMatchOffset) {
/* only need to catch up the mpv */
- return roseCatchUpMPV(t, loc, scratch);
+ return roseCatchUpMPV(t, loc, scratch);
}
assert(scratch->tctxt.minMatchOffset >= scratch->core_info.buf_offset);
hwlmcb_rv_t rv;
- if (!t->activeArrayCount
- || !mmbit_any(getActiveLeafArray(t, state), t->activeArrayCount)) {
+ if (!t->activeArrayCount
+ || !mmbit_any(getActiveLeafArray(t, state), t->activeArrayCount)) {
if (t->flushCombProgramOffset) {
if (roseRunFlushCombProgram(t, scratch, end)
== HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATE_MATCHING;
}
}
- updateMinMatchOffset(&scratch->tctxt, end);
- rv = HWLM_CONTINUE_MATCHING;
+ updateMinMatchOffset(&scratch->tctxt, end);
+ rv = HWLM_CONTINUE_MATCHING;
} else {
- rv = roseCatchUpAll(loc, scratch);
+ rv = roseCatchUpAll(loc, scratch);
}
assert(rv != HWLM_CONTINUE_MATCHING
|| scratch->tctxt.minMatchOffset == end);
assert(rv != HWLM_CONTINUE_MATCHING
|| scratch->tctxt.minNonMpvMatchOffset == end);
- assert(!can_stop_matching(scratch) || rv == HWLM_TERMINATE_MATCHING);
+ assert(!can_stop_matching(scratch) || rv == HWLM_TERMINATE_MATCHING);
return rv;
}
-/**
- * \brief Catches up anything which may add triggers on the MPV (suffixes and
- * outfixes).
- *
- * The MPV will be run only to intersperse matches in the output match stream
- * if external matches are raised.
- */
+/**
+ * \brief Catches up anything which may add triggers on the MPV (suffixes and
+ * outfixes).
+ *
+ * The MPV will be run only to intersperse matches in the output match stream
+ * if external matches are raised.
+ */
static rose_inline
-hwlmcb_rv_t roseCatchUpMpvFeeders(const struct RoseEngine *t,
- struct hs_scratch *scratch, u64a end) {
+hwlmcb_rv_t roseCatchUpMpvFeeders(const struct RoseEngine *t,
+ struct hs_scratch *scratch, u64a end) {
/* no need to catch up if we are at the same offset as last time */
if (end <= scratch->tctxt.minNonMpvMatchOffset) {
/* we must already be up to date */
@@ -188,20 +188,20 @@ hwlmcb_rv_t roseCatchUpMpvFeeders(const struct RoseEngine *t,
assert(t->activeArrayCount); /* mpv is in active array */
assert(scratch->tctxt.minMatchOffset >= scratch->core_info.buf_offset);
- if (!t->mpvTriggeredByLeaf) {
- /* no need to check as they never put triggers onto the mpv */
- return HWLM_CONTINUE_MATCHING;
- }
-
- /* sadly, this branch rarely gets taken as the mpv itself is usually
- * alive. */
- char *state = scratch->core_info.state;
- if (!mmbit_any(getActiveLeafArray(t, state), t->activeArrayCount)) {
- scratch->tctxt.minNonMpvMatchOffset = end;
- return HWLM_CONTINUE_MATCHING;
+ if (!t->mpvTriggeredByLeaf) {
+ /* no need to check as they never put triggers onto the mpv */
+ return HWLM_CONTINUE_MATCHING;
+ }
+
+ /* sadly, this branch rarely gets taken as the mpv itself is usually
+ * alive. */
+ char *state = scratch->core_info.state;
+ if (!mmbit_any(getActiveLeafArray(t, state), t->activeArrayCount)) {
+ scratch->tctxt.minNonMpvMatchOffset = end;
+ return HWLM_CONTINUE_MATCHING;
}
-
- return roseCatchUpSuf(loc, scratch);
+
+ return roseCatchUpSuf(loc, scratch);
}
#endif