aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/hyperscan/src/hwlm/noodle_engine.c
diff options
context:
space:
mode:
authorIvan Blinkov <ivan@blinkov.ru>2022-02-10 16:47:10 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:47:10 +0300
commit1aeb9a455974457866f78722ad98114bafc84e8a (patch)
treee4340eaf1668684d83a0a58c36947c5def5350ad /contrib/libs/hyperscan/src/hwlm/noodle_engine.c
parentbd5ef432f5cfb1e18851381329d94665a4c22470 (diff)
downloadydb-1aeb9a455974457866f78722ad98114bafc84e8a.tar.gz
Restoring authorship annotation for Ivan Blinkov <ivan@blinkov.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/libs/hyperscan/src/hwlm/noodle_engine.c')
-rw-r--r--contrib/libs/hyperscan/src/hwlm/noodle_engine.c394
1 files changed, 197 insertions, 197 deletions
diff --git a/contrib/libs/hyperscan/src/hwlm/noodle_engine.c b/contrib/libs/hyperscan/src/hwlm/noodle_engine.c
index d4f6902a2d..5ecbee679a 100644
--- a/contrib/libs/hyperscan/src/hwlm/noodle_engine.c
+++ b/contrib/libs/hyperscan/src/hwlm/noodle_engine.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, Intel Corporation
+ * Copyright (c) 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,15 +32,15 @@
#include "hwlm.h"
#include "noodle_engine.h"
#include "noodle_internal.h"
-#include "scratch.h"
+#include "scratch.h"
#include "ue2common.h"
-#include "util/arch.h"
+#include "util/arch.h"
#include "util/bitutils.h"
#include "util/compare.h"
-#include "util/intrinsics.h"
-#include "util/join.h"
+#include "util/intrinsics.h"
+#include "util/join.h"
#include "util/masked_move.h"
-#include "util/partial_store.h"
+#include "util/partial_store.h"
#include "util/simd_utils.h"
#include <ctype.h>
@@ -51,28 +51,28 @@
struct cb_info {
HWLMCallback cb; //!< callback function called on match
u32 id; //!< ID to pass to callback on match
- struct hs_scratch *scratch; //!< scratch to pass to callback
+ struct hs_scratch *scratch; //!< scratch to pass to callback
size_t offsetAdj; //!< used in streaming mode
};
-#if defined(HAVE_AVX512)
-#define CHUNKSIZE 64
-#define MASK_TYPE m512
-#define Z_BITS 64
-#define Z_TYPE u64a
-#elif defined(HAVE_AVX2)
-#define CHUNKSIZE 32
-#define MASK_TYPE m256
-#define Z_BITS 32
-#define Z_TYPE u32
-#else
-#define CHUNKSIZE 16
-#define MASK_TYPE m128
-#define Z_BITS 32
-#define Z_TYPE u32
-#endif
-
-
+#if defined(HAVE_AVX512)
+#define CHUNKSIZE 64
+#define MASK_TYPE m512
+#define Z_BITS 64
+#define Z_TYPE u64a
+#elif defined(HAVE_AVX2)
+#define CHUNKSIZE 32
+#define MASK_TYPE m256
+#define Z_BITS 32
+#define Z_TYPE u32
+#else
+#define CHUNKSIZE 16
+#define MASK_TYPE m128
+#define Z_BITS 32
+#define Z_TYPE u32
+#endif
+
+
#define RETURN_IF_TERMINATED(x) \
{ \
if ((x) == HWLM_TERMINATED) { \
@@ -83,10 +83,10 @@ struct cb_info {
#define SINGLE_ZSCAN() \
do { \
while (unlikely(z)) { \
- Z_TYPE pos = JOIN(findAndClearLSB_, Z_BITS)(&z); \
+ Z_TYPE pos = JOIN(findAndClearLSB_, Z_BITS)(&z); \
size_t matchPos = d - buf + pos; \
- DEBUG_PRINTF("match pos %zu\n", matchPos); \
- hwlmcb_rv_t rv = final(n, buf, len, 1, cbi, matchPos); \
+ DEBUG_PRINTF("match pos %zu\n", matchPos); \
+ hwlmcb_rv_t rv = final(n, buf, len, 1, cbi, matchPos); \
RETURN_IF_TERMINATED(rv); \
} \
} while (0)
@@ -94,10 +94,10 @@ struct cb_info {
#define DOUBLE_ZSCAN() \
do { \
while (unlikely(z)) { \
- Z_TYPE pos = JOIN(findAndClearLSB_, Z_BITS)(&z); \
+ Z_TYPE pos = JOIN(findAndClearLSB_, Z_BITS)(&z); \
size_t matchPos = d - buf + pos - 1; \
- DEBUG_PRINTF("match pos %zu\n", matchPos); \
- hwlmcb_rv_t rv = final(n, buf, len, 0, cbi, matchPos); \
+ DEBUG_PRINTF("match pos %zu\n", matchPos); \
+ hwlmcb_rv_t rv = final(n, buf, len, 0, cbi, matchPos); \
RETURN_IF_TERMINATED(rv); \
} \
} while (0)
@@ -111,37 +111,37 @@ u8 caseClear8(u8 x, bool noCase) {
// is used only for single chars with case insensitivity used correctly,
// so it can go straight to the callback if we get this far.
static really_inline
-hwlm_error_t final(const struct noodTable *n, const u8 *buf, UNUSED size_t len,
- char single, const struct cb_info *cbi, size_t pos) {
- if (single) {
- if (n->msk_len == 1) {
- goto match;
+hwlm_error_t final(const struct noodTable *n, const u8 *buf, UNUSED size_t len,
+ char single, const struct cb_info *cbi, size_t pos) {
+ if (single) {
+ if (n->msk_len == 1) {
+ goto match;
}
}
- assert(len >= n->msk_len);
- u64a v =
- partial_load_u64a(buf + pos + n->key_offset - n->msk_len, n->msk_len);
- DEBUG_PRINTF("v %016llx msk %016llx cmp %016llx\n", v, n->msk, n->cmp);
- if ((v & n->msk) != n->cmp) {
- /* mask didn't match */
- return HWLM_SUCCESS;
- }
-
-match:
- pos -= cbi->offsetAdj;
- DEBUG_PRINTF("match @ %zu\n", pos + n->key_offset);
- hwlmcb_rv_t rv = cbi->cb(pos + n->key_offset - 1, cbi->id, cbi->scratch);
+ assert(len >= n->msk_len);
+ u64a v =
+ partial_load_u64a(buf + pos + n->key_offset - n->msk_len, n->msk_len);
+ DEBUG_PRINTF("v %016llx msk %016llx cmp %016llx\n", v, n->msk, n->cmp);
+ if ((v & n->msk) != n->cmp) {
+ /* mask didn't match */
+ return HWLM_SUCCESS;
+ }
+
+match:
+ pos -= cbi->offsetAdj;
+ DEBUG_PRINTF("match @ %zu\n", pos + n->key_offset);
+ hwlmcb_rv_t rv = cbi->cb(pos + n->key_offset - 1, cbi->id, cbi->scratch);
if (rv == HWLM_TERMINATE_MATCHING) {
return HWLM_TERMINATED;
}
return HWLM_SUCCESS;
}
-#if defined(HAVE_AVX512)
-#define CHUNKSIZE 64
-#define MASK_TYPE m512
-#include "noodle_engine_avx512.c"
-#elif defined(HAVE_AVX2)
+#if defined(HAVE_AVX512)
+#define CHUNKSIZE 64
+#define MASK_TYPE m512
+#include "noodle_engine_avx512.c"
+#elif defined(HAVE_AVX2)
#define CHUNKSIZE 32
#define MASK_TYPE m256
#include "noodle_engine_avx2.c"
@@ -152,43 +152,43 @@ match:
#endif
static really_inline
-hwlm_error_t scanSingleMain(const struct noodTable *n, const u8 *buf,
- size_t len, size_t start, bool noCase,
- const struct cb_info *cbi) {
+hwlm_error_t scanSingleMain(const struct noodTable *n, const u8 *buf,
+ size_t len, size_t start, bool noCase,
+ const struct cb_info *cbi) {
- const MASK_TYPE mask1 = getMask(n->key0, noCase);
+ const MASK_TYPE mask1 = getMask(n->key0, noCase);
const MASK_TYPE caseMask = getCaseMask();
- size_t offset = start + n->msk_len - 1;
- size_t end = len;
- assert(offset < end);
-
-#if !defined(HAVE_AVX512)
- hwlm_error_t rv;
-
- if (end - offset < CHUNKSIZE) {
- rv = scanSingleShort(n, buf, len, noCase, caseMask, mask1, cbi, offset,
- end);
+ size_t offset = start + n->msk_len - 1;
+ size_t end = len;
+ assert(offset < end);
+
+#if !defined(HAVE_AVX512)
+ hwlm_error_t rv;
+
+ if (end - offset < CHUNKSIZE) {
+ rv = scanSingleShort(n, buf, len, noCase, caseMask, mask1, cbi, offset,
+ end);
return rv;
}
- if (end - offset == CHUNKSIZE) {
- rv = scanSingleUnaligned(n, buf, len, offset, noCase, caseMask, mask1,
- cbi, offset, end);
+ if (end - offset == CHUNKSIZE) {
+ rv = scanSingleUnaligned(n, buf, len, offset, noCase, caseMask, mask1,
+ cbi, offset, end);
return rv;
}
uintptr_t data = (uintptr_t)buf;
- uintptr_t s2Start = ROUNDUP_N(data + offset, CHUNKSIZE) - data;
+ uintptr_t s2Start = ROUNDUP_N(data + offset, CHUNKSIZE) - data;
uintptr_t last = data + end;
uintptr_t s2End = ROUNDDOWN_N(last, CHUNKSIZE) - data;
- uintptr_t s3Start = end - CHUNKSIZE;
+ uintptr_t s3Start = end - CHUNKSIZE;
- if (offset != s2Start) {
+ if (offset != s2Start) {
// first scan out to the fast scan starting point
DEBUG_PRINTF("stage 1: -> %zu\n", s2Start);
- rv = scanSingleUnaligned(n, buf, len, offset, noCase, caseMask, mask1,
- cbi, offset, s2Start);
+ rv = scanSingleUnaligned(n, buf, len, offset, noCase, caseMask, mask1,
+ cbi, offset, s2Start);
RETURN_IF_TERMINATED(rv);
}
@@ -196,70 +196,70 @@ hwlm_error_t scanSingleMain(const struct noodTable *n, const u8 *buf,
// scan as far as we can, bounded by the last point this key can
// possibly match
DEBUG_PRINTF("fast: ~ %zu -> %zu\n", s2Start, s2End);
- rv = scanSingleFast(n, buf, len, noCase, caseMask, mask1, cbi, s2Start,
- s2End);
+ rv = scanSingleFast(n, buf, len, noCase, caseMask, mask1, cbi, s2Start,
+ s2End);
RETURN_IF_TERMINATED(rv);
}
// if we are done bail out
- if (s2End == len) {
+ if (s2End == len) {
return HWLM_SUCCESS;
}
- DEBUG_PRINTF("stage 3: %zu -> %zu\n", s2End, len);
- rv = scanSingleUnaligned(n, buf, len, s3Start, noCase, caseMask, mask1, cbi,
- s2End, len);
+ DEBUG_PRINTF("stage 3: %zu -> %zu\n", s2End, len);
+ rv = scanSingleUnaligned(n, buf, len, s3Start, noCase, caseMask, mask1, cbi,
+ s2End, len);
return rv;
-#else // HAVE_AVX512
- return scanSingle512(n, buf, len, noCase, caseMask, mask1, cbi, offset,
- end);
-#endif
+#else // HAVE_AVX512
+ return scanSingle512(n, buf, len, noCase, caseMask, mask1, cbi, offset,
+ end);
+#endif
}
static really_inline
-hwlm_error_t scanDoubleMain(const struct noodTable *n, const u8 *buf,
- size_t len, size_t start, bool noCase,
+hwlm_error_t scanDoubleMain(const struct noodTable *n, const u8 *buf,
+ size_t len, size_t start, bool noCase,
const struct cb_info *cbi) {
// we stop scanning for the key-fragment when the rest of the key can't
// possibly fit in the remaining buffer
- size_t end = len - n->key_offset + 2;
-
- // the first place the key can match
- size_t offset = start + n->msk_len - n->key_offset;
+ size_t end = len - n->key_offset + 2;
+ // the first place the key can match
+ size_t offset = start + n->msk_len - n->key_offset;
+
const MASK_TYPE caseMask = getCaseMask();
- const MASK_TYPE mask1 = getMask(n->key0, noCase);
- const MASK_TYPE mask2 = getMask(n->key1, noCase);
-
-#if !defined(HAVE_AVX512)
- hwlm_error_t rv;
-
- if (end - offset < CHUNKSIZE) {
- rv = scanDoubleShort(n, buf, len, noCase, caseMask, mask1, mask2, cbi,
- offset, end);
+ const MASK_TYPE mask1 = getMask(n->key0, noCase);
+ const MASK_TYPE mask2 = getMask(n->key1, noCase);
+
+#if !defined(HAVE_AVX512)
+ hwlm_error_t rv;
+
+ if (end - offset < CHUNKSIZE) {
+ rv = scanDoubleShort(n, buf, len, noCase, caseMask, mask1, mask2, cbi,
+ offset, end);
return rv;
}
- if (end - offset == CHUNKSIZE) {
- rv = scanDoubleUnaligned(n, buf, len, offset, noCase, caseMask, mask1,
- mask2, cbi, offset, end);
+ if (end - offset == CHUNKSIZE) {
+ rv = scanDoubleUnaligned(n, buf, len, offset, noCase, caseMask, mask1,
+ mask2, cbi, offset, end);
return rv;
}
uintptr_t data = (uintptr_t)buf;
- uintptr_t s2Start = ROUNDUP_N(data + offset, CHUNKSIZE) - data;
+ uintptr_t s2Start = ROUNDUP_N(data + offset, CHUNKSIZE) - data;
uintptr_t s1End = s2Start + 1;
uintptr_t last = data + end;
uintptr_t s2End = ROUNDDOWN_N(last, CHUNKSIZE) - data;
uintptr_t s3Start = end - CHUNKSIZE;
- uintptr_t off = offset;
+ uintptr_t off = offset;
- if (s2Start != off) {
+ if (s2Start != off) {
// first scan out to the fast scan starting point plus one char past to
// catch the key on the overlap
- DEBUG_PRINTF("stage 1: %zu -> %zu\n", off, s2Start);
- rv = scanDoubleUnaligned(n, buf, len, offset, noCase, caseMask, mask1,
- mask2, cbi, off, s1End);
+ DEBUG_PRINTF("stage 1: %zu -> %zu\n", off, s2Start);
+ rv = scanDoubleUnaligned(n, buf, len, offset, noCase, caseMask, mask1,
+ mask2, cbi, off, s1End);
RETURN_IF_TERMINATED(rv);
}
off = s1End;
@@ -273,8 +273,8 @@ hwlm_error_t scanDoubleMain(const struct noodTable *n, const u8 *buf,
// scan as far as we can, bounded by the last point this key can
// possibly match
DEBUG_PRINTF("fast: ~ %zu -> %zu\n", s2Start, s3Start);
- rv = scanDoubleFast(n, buf, len, noCase, caseMask, mask1, mask2, cbi,
- s2Start, s2End);
+ rv = scanDoubleFast(n, buf, len, noCase, caseMask, mask1, mask2, cbi,
+ s2Start, s2End);
RETURN_IF_TERMINATED(rv);
off = s2End;
}
@@ -285,158 +285,158 @@ hwlm_error_t scanDoubleMain(const struct noodTable *n, const u8 *buf,
}
DEBUG_PRINTF("stage 3: %zu -> %zu\n", s3Start, end);
- rv = scanDoubleUnaligned(n, buf, len, s3Start, noCase, caseMask, mask1,
- mask2, cbi, off, end);
+ rv = scanDoubleUnaligned(n, buf, len, s3Start, noCase, caseMask, mask1,
+ mask2, cbi, off, end);
return rv;
-#else // AVX512
- return scanDouble512(n, buf, len, noCase, caseMask, mask1, mask2, cbi,
- offset, end);
-#endif // AVX512
+#else // AVX512
+ return scanDouble512(n, buf, len, noCase, caseMask, mask1, mask2, cbi,
+ offset, end);
+#endif // AVX512
}
static really_inline
-hwlm_error_t scanSingleNoCase(const struct noodTable *n, const u8 *buf,
- size_t len, size_t start,
+hwlm_error_t scanSingleNoCase(const struct noodTable *n, const u8 *buf,
+ size_t len, size_t start,
const struct cb_info *cbi) {
- return scanSingleMain(n, buf, len, start, 1, cbi);
+ return scanSingleMain(n, buf, len, start, 1, cbi);
}
static really_inline
-hwlm_error_t scanSingleCase(const struct noodTable *n, const u8 *buf,
- size_t len, size_t start,
+hwlm_error_t scanSingleCase(const struct noodTable *n, const u8 *buf,
+ size_t len, size_t start,
const struct cb_info *cbi) {
- return scanSingleMain(n, buf, len, start, 0, cbi);
+ return scanSingleMain(n, buf, len, start, 0, cbi);
}
// Single-character specialisation, used when keyLen = 1
static really_inline
-hwlm_error_t scanSingle(const struct noodTable *n, const u8 *buf, size_t len,
- size_t start, bool noCase, const struct cb_info *cbi) {
- if (!ourisalpha(n->key0)) {
+hwlm_error_t scanSingle(const struct noodTable *n, const u8 *buf, size_t len,
+ size_t start, bool noCase, const struct cb_info *cbi) {
+ if (!ourisalpha(n->key0)) {
noCase = 0; // force noCase off if we don't have an alphabetic char
}
// kinda ugly, but this forces constant propagation
if (noCase) {
- return scanSingleNoCase(n, buf, len, start, cbi);
+ return scanSingleNoCase(n, buf, len, start, cbi);
} else {
- return scanSingleCase(n, buf, len, start, cbi);
+ return scanSingleCase(n, buf, len, start, cbi);
}
}
static really_inline
-hwlm_error_t scanDoubleNoCase(const struct noodTable *n, const u8 *buf,
- size_t len, size_t start,
+hwlm_error_t scanDoubleNoCase(const struct noodTable *n, const u8 *buf,
+ size_t len, size_t start,
const struct cb_info *cbi) {
- return scanDoubleMain(n, buf, len, start, 1, cbi);
+ return scanDoubleMain(n, buf, len, start, 1, cbi);
}
static really_inline
-hwlm_error_t scanDoubleCase(const struct noodTable *n, const u8 *buf,
- size_t len, size_t start,
+hwlm_error_t scanDoubleCase(const struct noodTable *n, const u8 *buf,
+ size_t len, size_t start,
const struct cb_info *cbi) {
- return scanDoubleMain(n, buf, len, start, 0, cbi);
+ return scanDoubleMain(n, buf, len, start, 0, cbi);
}
static really_inline
-hwlm_error_t scanDouble(const struct noodTable *n, const u8 *buf, size_t len,
- size_t start, bool noCase, const struct cb_info *cbi) {
+hwlm_error_t scanDouble(const struct noodTable *n, const u8 *buf, size_t len,
+ size_t start, bool noCase, const struct cb_info *cbi) {
// kinda ugly, but this forces constant propagation
if (noCase) {
- return scanDoubleNoCase(n, buf, len, start, cbi);
+ return scanDoubleNoCase(n, buf, len, start, cbi);
} else {
- return scanDoubleCase(n, buf, len, start, cbi);
+ return scanDoubleCase(n, buf, len, start, cbi);
}
}
// main entry point for the scan code
static really_inline
-hwlm_error_t scan(const struct noodTable *n, const u8 *buf, size_t len,
- size_t start, char single, bool noCase,
- const struct cb_info *cbi) {
- if (len - start < n->msk_len) {
+hwlm_error_t scan(const struct noodTable *n, const u8 *buf, size_t len,
+ size_t start, char single, bool noCase,
+ const struct cb_info *cbi) {
+ if (len - start < n->msk_len) {
// can't find string of length keyLen in a shorter buffer
return HWLM_SUCCESS;
}
- if (single) {
- return scanSingle(n, buf, len, start, noCase, cbi);
+ if (single) {
+ return scanSingle(n, buf, len, start, noCase, cbi);
} else {
- return scanDouble(n, buf, len, start, noCase, cbi);
+ return scanDouble(n, buf, len, start, noCase, cbi);
}
}
/** \brief Block-mode scanner. */
hwlm_error_t noodExec(const struct noodTable *n, const u8 *buf, size_t len,
- size_t start, HWLMCallback cb,
- struct hs_scratch *scratch) {
+ size_t start, HWLMCallback cb,
+ struct hs_scratch *scratch) {
assert(n && buf);
- struct cb_info cbi = {cb, n->id, scratch, 0};
- DEBUG_PRINTF("nood scan of %zu bytes for %*s @ %p\n", len, n->msk_len,
- (const char *)&n->cmp, buf);
-
- return scan(n, buf, len, start, n->single, n->nocase, &cbi);
+ struct cb_info cbi = {cb, n->id, scratch, 0};
+ DEBUG_PRINTF("nood scan of %zu bytes for %*s @ %p\n", len, n->msk_len,
+ (const char *)&n->cmp, buf);
+
+ return scan(n, buf, len, start, n->single, n->nocase, &cbi);
}
/** \brief Streaming-mode scanner. */
hwlm_error_t noodExecStreaming(const struct noodTable *n, const u8 *hbuf,
size_t hlen, const u8 *buf, size_t len,
- HWLMCallback cb, struct hs_scratch *scratch) {
+ HWLMCallback cb, struct hs_scratch *scratch) {
assert(n);
- if (len + hlen < n->msk_len) {
- DEBUG_PRINTF("not enough bytes for a match\n");
- return HWLM_SUCCESS;
- }
-
- struct cb_info cbi = {cb, n->id, scratch, 0};
- DEBUG_PRINTF("nood scan of %zu bytes (%zu hlen) for %*s @ %p\n", len, hlen,
- n->msk_len, (const char *)&n->cmp, buf);
-
- if (hlen && n->msk_len > 1) {
- /*
- * we have history, so build up a buffer from enough of the history
- * buffer plus what we've been given to scan. Since this is relatively
- * short, just check against msk+cmp per byte offset for matches.
- */
+ if (len + hlen < n->msk_len) {
+ DEBUG_PRINTF("not enough bytes for a match\n");
+ return HWLM_SUCCESS;
+ }
+
+ struct cb_info cbi = {cb, n->id, scratch, 0};
+ DEBUG_PRINTF("nood scan of %zu bytes (%zu hlen) for %*s @ %p\n", len, hlen,
+ n->msk_len, (const char *)&n->cmp, buf);
+
+ if (hlen && n->msk_len > 1) {
+ /*
+ * we have history, so build up a buffer from enough of the history
+ * buffer plus what we've been given to scan. Since this is relatively
+ * short, just check against msk+cmp per byte offset for matches.
+ */
assert(hbuf);
- u8 ALIGN_DIRECTIVE temp_buf[HWLM_LITERAL_MAX_LEN * 2];
- memset(temp_buf, 0, sizeof(temp_buf));
-
- assert(n->msk_len);
- size_t tl1 = MIN((size_t)n->msk_len - 1, hlen);
- size_t tl2 = MIN((size_t)n->msk_len - 1, len);
-
- assert(tl1 + tl2 <= sizeof(temp_buf));
- assert(tl1 + tl2 >= n->msk_len);
- assert(tl1 <= sizeof(u64a));
- assert(tl2 <= sizeof(u64a));
- DEBUG_PRINTF("using %zu bytes of hist and %zu bytes of buf\n", tl1, tl2);
-
- unaligned_store_u64a(temp_buf,
- partial_load_u64a(hbuf + hlen - tl1, tl1));
- unaligned_store_u64a(temp_buf + tl1, partial_load_u64a(buf, tl2));
-
- for (size_t i = 0; i <= tl1 + tl2 - n->msk_len; i++) {
- u64a v = unaligned_load_u64a(temp_buf + i);
- if ((v & n->msk) == n->cmp) {
- size_t m_end = -tl1 + i + n->msk_len - 1;
- DEBUG_PRINTF("match @ %zu (i %zu)\n", m_end, i);
- hwlmcb_rv_t rv = cb(m_end, n->id, scratch);
- if (rv == HWLM_TERMINATE_MATCHING) {
- return HWLM_TERMINATED;
- }
- }
+ u8 ALIGN_DIRECTIVE temp_buf[HWLM_LITERAL_MAX_LEN * 2];
+ memset(temp_buf, 0, sizeof(temp_buf));
+
+ assert(n->msk_len);
+ size_t tl1 = MIN((size_t)n->msk_len - 1, hlen);
+ size_t tl2 = MIN((size_t)n->msk_len - 1, len);
+
+ assert(tl1 + tl2 <= sizeof(temp_buf));
+ assert(tl1 + tl2 >= n->msk_len);
+ assert(tl1 <= sizeof(u64a));
+ assert(tl2 <= sizeof(u64a));
+ DEBUG_PRINTF("using %zu bytes of hist and %zu bytes of buf\n", tl1, tl2);
+
+ unaligned_store_u64a(temp_buf,
+ partial_load_u64a(hbuf + hlen - tl1, tl1));
+ unaligned_store_u64a(temp_buf + tl1, partial_load_u64a(buf, tl2));
+
+ for (size_t i = 0; i <= tl1 + tl2 - n->msk_len; i++) {
+ u64a v = unaligned_load_u64a(temp_buf + i);
+ if ((v & n->msk) == n->cmp) {
+ size_t m_end = -tl1 + i + n->msk_len - 1;
+ DEBUG_PRINTF("match @ %zu (i %zu)\n", m_end, i);
+ hwlmcb_rv_t rv = cb(m_end, n->id, scratch);
+ if (rv == HWLM_TERMINATE_MATCHING) {
+ return HWLM_TERMINATED;
+ }
+ }
}
}
assert(buf);
cbi.offsetAdj = 0;
- return scan(n, buf, len, 0, n->single, n->nocase, &cbi);
+ return scan(n, buf, len, 0, n->single, n->nocase, &cbi);
}