diff options
author | Ivan Blinkov <ivan@blinkov.ru> | 2022-02-10 16:47:10 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:47:10 +0300 |
commit | 1aeb9a455974457866f78722ad98114bafc84e8a (patch) | |
tree | e4340eaf1668684d83a0a58c36947c5def5350ad /contrib/libs/hyperscan/src/hwlm/noodle_engine_sse.c | |
parent | bd5ef432f5cfb1e18851381329d94665a4c22470 (diff) | |
download | ydb-1aeb9a455974457866f78722ad98114bafc84e8a.tar.gz |
Restoring authorship annotation for Ivan Blinkov <ivan@blinkov.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/libs/hyperscan/src/hwlm/noodle_engine_sse.c')
-rw-r--r-- | contrib/libs/hyperscan/src/hwlm/noodle_engine_sse.c | 56 |
1 files changed, 28 insertions, 28 deletions
diff --git a/contrib/libs/hyperscan/src/hwlm/noodle_engine_sse.c b/contrib/libs/hyperscan/src/hwlm/noodle_engine_sse.c index 7cd53d7ced..0fc33bc342 100644 --- a/contrib/libs/hyperscan/src/hwlm/noodle_engine_sse.c +++ b/contrib/libs/hyperscan/src/hwlm/noodle_engine_sse.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2017, Intel Corporation + * Copyright (c) 2015-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -38,8 +38,8 @@ static really_inline m128 getCaseMask(void) { } static really_inline -hwlm_error_t scanSingleShort(const struct noodTable *n, const u8 *buf, - size_t len, bool noCase, m128 caseMask, m128 mask1, +hwlm_error_t scanSingleShort(const struct noodTable *n, const u8 *buf, + size_t len, bool noCase, m128 caseMask, m128 mask1, const struct cb_info *cbi, size_t start, size_t end) { const u8 *d = buf + start; @@ -67,11 +67,11 @@ hwlm_error_t scanSingleShort(const struct noodTable *n, const u8 *buf, } static really_inline -hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf, - size_t len, size_t offset, bool noCase, - m128 caseMask, m128 mask1, - const struct cb_info *cbi, size_t start, - size_t end) { +hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf, + size_t len, size_t offset, bool noCase, + m128 caseMask, m128 mask1, + const struct cb_info *cbi, size_t start, + size_t end) { const u8 *d = buf + offset; DEBUG_PRINTF("start %zu end %zu offset %zu\n", start, end, offset); const size_t l = end - start; @@ -97,10 +97,10 @@ hwlm_error_t scanSingleUnaligned(const struct noodTable *n, const u8 *buf, } static really_inline -hwlm_error_t scanDoubleShort(const struct noodTable *n, const u8 *buf, - size_t len, bool noCase, m128 caseMask, m128 mask1, - m128 mask2, const struct cb_info *cbi, - size_t start, size_t end) { +hwlm_error_t scanDoubleShort(const struct noodTable *n, const u8 *buf, + size_t len, bool noCase, m128 caseMask, m128 mask1, + m128 mask2, const struct cb_info *cbi, + size_t start, size_t end) { const u8 *d = buf + start; size_t l = end - start; if (!l) { @@ -115,8 +115,8 @@ hwlm_error_t scanDoubleShort(const struct noodTable *n, const u8 *buf, v = and128(v, caseMask); } - u32 z = movemask128(and128(lshiftbyte_m128(eq128(mask1, v), 1), - eq128(mask2, v))); + u32 z = movemask128(and128(lshiftbyte_m128(eq128(mask1, v), 1), + eq128(mask2, v))); // mask out where we can't match u32 mask = (0xFFFF >> (16 - l)); @@ -128,11 +128,11 @@ hwlm_error_t scanDoubleShort(const struct noodTable *n, const u8 *buf, } static really_inline -hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf, - size_t len, size_t offset, bool noCase, - m128 caseMask, m128 mask1, m128 mask2, - const struct cb_info *cbi, size_t start, - size_t end) { +hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf, + size_t len, size_t offset, bool noCase, + m128 caseMask, m128 mask1, m128 mask2, + const struct cb_info *cbi, size_t start, + size_t end) { const u8 *d = buf + offset; DEBUG_PRINTF("start %zu end %zu offset %zu\n", start, end, offset); size_t l = end - start; @@ -143,8 +143,8 @@ hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf, v = and128(v, caseMask); } - u32 z = movemask128(and128(lshiftbyte_m128(eq128(mask1, v), 1), - eq128(mask2, v))); + u32 z = movemask128(and128(lshiftbyte_m128(eq128(mask1, v), 1), + eq128(mask2, v))); // mask out where we can't match u32 buf_off = start - offset; @@ -158,8 +158,8 @@ hwlm_error_t scanDoubleUnaligned(const struct noodTable *n, const u8 *buf, } static really_inline -hwlm_error_t scanSingleFast(const struct noodTable *n, const u8 *buf, - size_t len, bool noCase, m128 caseMask, m128 mask1, +hwlm_error_t scanSingleFast(const struct noodTable *n, const u8 *buf, + size_t len, bool noCase, m128 caseMask, m128 mask1, const struct cb_info *cbi, size_t start, size_t end) { const u8 *d = buf + start, *e = buf + end; @@ -179,9 +179,9 @@ hwlm_error_t scanSingleFast(const struct noodTable *n, const u8 *buf, } static really_inline -hwlm_error_t scanDoubleFast(const struct noodTable *n, const u8 *buf, - size_t len, bool noCase, m128 caseMask, m128 mask1, - m128 mask2, const struct cb_info *cbi, size_t start, +hwlm_error_t scanDoubleFast(const struct noodTable *n, const u8 *buf, + size_t len, bool noCase, m128 caseMask, m128 mask1, + m128 mask2, const struct cb_info *cbi, size_t start, size_t end) { const u8 *d = buf + start, *e = buf + end; assert(d < e); @@ -191,8 +191,8 @@ hwlm_error_t scanDoubleFast(const struct noodTable *n, const u8 *buf, m128 v = noCase ? and128(load128(d), caseMask) : load128(d); m128 z1 = eq128(mask1, v); m128 z2 = eq128(mask2, v); - u32 z = movemask128(and128(palignr(z1, lastz1, 15), z2)); - lastz1 = z1; + u32 z = movemask128(and128(palignr(z1, lastz1, 15), z2)); + lastz1 = z1; // On large packet buffers, this prefetch appears to get us about 2%. __builtin_prefetch(d + 128); |