summaryrefslogtreecommitdiffstats
path: root/contrib/libs/zstd/lib/common/mem.h
diff options
context:
space:
mode:
authorthegeorg <[email protected]>2023-02-27 12:38:58 +0300
committerthegeorg <[email protected]>2023-02-27 12:38:58 +0300
commite82ffade6959fe8feaf41fe12a57d9f56873be40 (patch)
tree784bf61d9ca6cab4beabc995186d9b41e762ae60 /contrib/libs/zstd/lib/common/mem.h
parente58cceed352de42c1526ab4eecdfeee158b1ede3 (diff)
Update contrib/libs/zstd to 1.5.4
Diffstat (limited to 'contrib/libs/zstd/lib/common/mem.h')
-rw-r--r--contrib/libs/zstd/lib/common/mem.h85
1 files changed, 39 insertions, 46 deletions
diff --git a/contrib/libs/zstd/lib/common/mem.h b/contrib/libs/zstd/lib/common/mem.h
index 85581c38478..98dd47a0476 100644
--- a/contrib/libs/zstd/lib/common/mem.h
+++ b/contrib/libs/zstd/lib/common/mem.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -133,21 +133,15 @@ MEM_STATIC size_t MEM_swapST(size_t in);
/*-**************************************************************
* Memory I/O Implementation
*****************************************************************/
-/* MEM_FORCE_MEMORY_ACCESS :
- * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
- * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
- * The below switch allow to select different access method for improved performance.
- * Method 0 (default) : use `memcpy()`. Safe and portable.
- * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable).
- * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+/* MEM_FORCE_MEMORY_ACCESS : For accessing unaligned memory:
+ * Method 0 : always use `memcpy()`. Safe and portable.
+ * Method 1 : Use compiler extension to set unaligned access.
* Method 2 : direct access. This method is portable but violate C standard.
* It can generate buggy code on targets depending on alignment.
- * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6)
- * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
- * Prefer these methods in priority order (0 > 1 > 2)
+ * Default : method 1 if supported, else method 0
*/
#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
-# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__)
+# ifdef __GNUC__
# define MEM_FORCE_MEMORY_ACCESS 1
# endif
#endif
@@ -190,30 +184,19 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
-/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
-/* currently only defined for gcc and icc */
-#if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32))
- __pragma( pack(push, 1) )
- typedef struct { U16 v; } unalign16;
- typedef struct { U32 v; } unalign32;
- typedef struct { U64 v; } unalign64;
- typedef struct { size_t v; } unalignArch;
- __pragma( pack(pop) )
-#else
- typedef struct { U16 v; } __attribute__((packed)) unalign16;
- typedef struct { U32 v; } __attribute__((packed)) unalign32;
- typedef struct { U64 v; } __attribute__((packed)) unalign64;
- typedef struct { size_t v; } __attribute__((packed)) unalignArch;
-#endif
+typedef __attribute__((aligned(1))) U16 unalign16;
+typedef __attribute__((aligned(1))) U32 unalign32;
+typedef __attribute__((aligned(1))) U64 unalign64;
+typedef __attribute__((aligned(1))) size_t unalignArch;
-MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; }
-MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; }
-MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; }
-MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; }
+MEM_STATIC U16 MEM_read16(const void* ptr) { return *(const unalign16*)ptr; }
+MEM_STATIC U32 MEM_read32(const void* ptr) { return *(const unalign32*)ptr; }
+MEM_STATIC U64 MEM_read64(const void* ptr) { return *(const unalign64*)ptr; }
+MEM_STATIC size_t MEM_readST(const void* ptr) { return *(const unalignArch*)ptr; }
-MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; }
-MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; }
-MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; }
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(unalign16*)memPtr = value; }
+MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(unalign32*)memPtr = value; }
+MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(unalign64*)memPtr = value; }
#else
@@ -257,6 +240,14 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value)
#endif /* MEM_FORCE_MEMORY_ACCESS */
+MEM_STATIC U32 MEM_swap32_fallback(U32 in)
+{
+ return ((in << 24) & 0xff000000 ) |
+ ((in << 8) & 0x00ff0000 ) |
+ ((in >> 8) & 0x0000ff00 ) |
+ ((in >> 24) & 0x000000ff );
+}
+
MEM_STATIC U32 MEM_swap32(U32 in)
{
#if defined(_MSC_VER) /* Visual Studio */
@@ -265,22 +256,13 @@ MEM_STATIC U32 MEM_swap32(U32 in)
|| (defined(__clang__) && __has_builtin(__builtin_bswap32))
return __builtin_bswap32(in);
#else
- return ((in << 24) & 0xff000000 ) |
- ((in << 8) & 0x00ff0000 ) |
- ((in >> 8) & 0x0000ff00 ) |
- ((in >> 24) & 0x000000ff );
+ return MEM_swap32_fallback(in);
#endif
}
-MEM_STATIC U64 MEM_swap64(U64 in)
+MEM_STATIC U64 MEM_swap64_fallback(U64 in)
{
-#if defined(_MSC_VER) /* Visual Studio */
- return _byteswap_uint64(in);
-#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
- || (defined(__clang__) && __has_builtin(__builtin_bswap64))
- return __builtin_bswap64(in);
-#else
- return ((in << 56) & 0xff00000000000000ULL) |
+ return ((in << 56) & 0xff00000000000000ULL) |
((in << 40) & 0x00ff000000000000ULL) |
((in << 24) & 0x0000ff0000000000ULL) |
((in << 8) & 0x000000ff00000000ULL) |
@@ -288,6 +270,17 @@ MEM_STATIC U64 MEM_swap64(U64 in)
((in >> 24) & 0x0000000000ff0000ULL) |
((in >> 40) & 0x000000000000ff00ULL) |
((in >> 56) & 0x00000000000000ffULL);
+}
+
+MEM_STATIC U64 MEM_swap64(U64 in)
+{
+#if defined(_MSC_VER) /* Visual Studio */
+ return _byteswap_uint64(in);
+#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
+ || (defined(__clang__) && __has_builtin(__builtin_bswap64))
+ return __builtin_bswap64(in);
+#else
+ return MEM_swap64_fallback(in);
#endif
}