aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/lzma/liblzma/common/memcmplen.h
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.com>2023-01-05 21:33:07 +0300
committershadchin <shadchin@yandex-team.com>2023-01-05 21:33:07 +0300
commit97c1b4fc326f4a9435bc743e56681bb148b17c37 (patch)
treec7d8dd514d722f747f5c6584343aeeddd5d2f8be /contrib/libs/lzma/liblzma/common/memcmplen.h
parentd7b4c95518652104d51c915f0fd20fd3e954135c (diff)
downloadydb-97c1b4fc326f4a9435bc743e56681bb148b17c37.tar.gz
Update contrib/libs/lzma to 5.4.0
Diffstat (limited to 'contrib/libs/lzma/liblzma/common/memcmplen.h')
-rw-r--r--contrib/libs/lzma/liblzma/common/memcmplen.h12
1 files changed, 4 insertions, 8 deletions
diff --git a/contrib/libs/lzma/liblzma/common/memcmplen.h b/contrib/libs/lzma/liblzma/common/memcmplen.h
index dcfd8d6f89..5a481a02c9 100644
--- a/contrib/libs/lzma/liblzma/common/memcmplen.h
+++ b/contrib/libs/lzma/liblzma/common/memcmplen.h
@@ -51,10 +51,6 @@ lzma_memcmplen(const uint8_t *buf1, const uint8_t *buf2,
|| (defined(__INTEL_COMPILER) && defined(__x86_64__)) \
|| (defined(__INTEL_COMPILER) && defined(_M_X64)) \
|| (defined(_MSC_VER) && defined(_M_X64)))
- // NOTE: This will use 64-bit unaligned access which
- // TUKLIB_FAST_UNALIGNED_ACCESS wasn't meant to permit, but
- // it's convenient here at least as long as it's x86-64 only.
- //
// I keep this x86-64 only for now since that's where I know this
// to be a good method. This may be fine on other 64-bit CPUs too.
// On big endian one should use xor instead of subtraction and switch
@@ -80,12 +76,12 @@ lzma_memcmplen(const uint8_t *buf1, const uint8_t *buf2,
#elif defined(TUKLIB_FAST_UNALIGNED_ACCESS) \
&& defined(HAVE__MM_MOVEMASK_EPI8) \
- && ((defined(__GNUC__) && defined(__SSE2_MATH__)) \
- || (defined(__INTEL_COMPILER) && defined(__SSE2__)) \
+ && (defined(__SSE2__) \
|| (defined(_MSC_VER) && defined(_M_IX86_FP) \
&& _M_IX86_FP >= 2))
- // NOTE: Like above, this will use 128-bit unaligned access which
- // TUKLIB_FAST_UNALIGNED_ACCESS wasn't meant to permit.
+ // NOTE: This will use 128-bit unaligned access which
+ // TUKLIB_FAST_UNALIGNED_ACCESS wasn't meant to permit,
+ // but it's convenient here since this is x86-only.
//
// SSE2 version for 32-bit and 64-bit x86. On x86-64 the above
// version is sometimes significantly faster and sometimes