aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/lz4
diff options
context:
space:
mode:
authortpashkin <tpashkin@yandex-team.ru>2022-02-10 16:46:41 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:46:41 +0300
commit5475379a04e37df30085bd1724f1c57e3f40996f (patch)
tree95d77e29785a3bd5be6260b1c9d226a551376ecf /contrib/libs/lz4
parentc3d34b9b40eb534dfd2c549342274f3d61844688 (diff)
downloadydb-5475379a04e37df30085bd1724f1c57e3f40996f.tar.gz
Restoring authorship annotation for <tpashkin@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/libs/lz4')
-rw-r--r--contrib/libs/lz4/README.md6
-rw-r--r--contrib/libs/lz4/lz4.c448
-rw-r--r--contrib/libs/lz4/lz4.h224
-rw-r--r--contrib/libs/lz4/lz4frame.c50
-rw-r--r--contrib/libs/lz4/lz4frame.h18
-rw-r--r--contrib/libs/lz4/lz4hc.c174
-rw-r--r--contrib/libs/lz4/lz4hc.h6
-rw-r--r--contrib/libs/lz4/ya.make2
8 files changed, 464 insertions, 464 deletions
diff --git a/contrib/libs/lz4/README.md b/contrib/libs/lz4/README.md
index e2af868ff4..15dd32829d 100644
--- a/contrib/libs/lz4/README.md
+++ b/contrib/libs/lz4/README.md
@@ -57,8 +57,8 @@ The following build macro can be selected to adjust source code behavior at comp
- `LZ4_DISTANCE_MAX` : control the maximum offset that the compressor will allow.
Set to 65535 by default, which is the maximum value supported by lz4 format.
Reducing maximum distance will reduce opportunities for LZ4 to find matches,
- hence will produce a worse compression ratio.
- However, a smaller max distance can allow compatibility with specific decoders using limited memory budget.
+ hence will produce a worse compression ratio.
+ However, a smaller max distance can allow compatibility with specific decoders using limited memory budget.
This build macro only influences the compressed output of the compressor.
- `LZ4_DISABLE_DEPRECATE_WARNINGS` : invoking a deprecated function will make the compiler generate a warning.
@@ -91,7 +91,7 @@ The following build macro can be selected to adjust source code behavior at comp
lz4 source code can be amalgamated into a single file.
One can combine all source code into `lz4_all.c` by using following command:
```
-cat lz4.c lz4hc.c lz4frame.c > lz4_all.c
+cat lz4.c lz4hc.c lz4frame.c > lz4_all.c
```
(`cat` file order is important) then compile `lz4_all.c`.
All `*.h` files present in `/lib` remain necessary to compile `lz4_all.c`.
diff --git a/contrib/libs/lz4/lz4.c b/contrib/libs/lz4/lz4.c
index c864ba73ba..0c3f0c7e0a 100644
--- a/contrib/libs/lz4/lz4.c
+++ b/contrib/libs/lz4/lz4.c
@@ -113,7 +113,7 @@
#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
#endif
-#define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */
+#define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */
#include "lz4.h"
/* see also "memory routines" below */
@@ -209,47 +209,47 @@ void LZ4_free(void* p);
/*-************************************
-* Common Constants
-**************************************/
-#define MINMATCH 4
-
-#define WILDCOPYLENGTH 8
-#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
-#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
-#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */
-#define FASTLOOP_SAFE_DISTANCE 64
-static const int LZ4_minLength = (MFLIMIT+1);
-
-#define KB *(1 <<10)
-#define MB *(1 <<20)
-#define GB *(1U<<30)
-
-#define LZ4_DISTANCE_ABSOLUTE_MAX 65535
-#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */
-# error "LZ4_DISTANCE_MAX is too big : must be <= 65535"
-#endif
-
-#define ML_BITS 4
-#define ML_MASK ((1U<<ML_BITS)-1)
-#define RUN_BITS (8-ML_BITS)
-#define RUN_MASK ((1U<<RUN_BITS)-1)
-
-
-/*-************************************
-* Error detection
-**************************************/
-#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
-# include <assert.h>
-#else
-# ifndef assert
-# define assert(condition) ((void)0)
-# endif
-#endif
-
-#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */
-
-#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
-# include <stdio.h>
+* Common Constants
+**************************************/
+#define MINMATCH 4
+
+#define WILDCOPYLENGTH 8
+#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
+#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
+#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */
+#define FASTLOOP_SAFE_DISTANCE 64
+static const int LZ4_minLength = (MFLIMIT+1);
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define LZ4_DISTANCE_ABSOLUTE_MAX 65535
+#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */
+# error "LZ4_DISTANCE_MAX is too big : must be <= 65535"
+#endif
+
+#define ML_BITS 4
+#define ML_MASK ((1U<<ML_BITS)-1)
+#define RUN_BITS (8-ML_BITS)
+#define RUN_MASK ((1U<<RUN_BITS)-1)
+
+
+/*-************************************
+* Error detection
+**************************************/
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
+# include <assert.h>
+#else
+# ifndef assert
+# define assert(condition) ((void)0)
+# endif
+#endif
+
+#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */
+
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
+# include <stdio.h>
static int g_debuglog_enable = 1;
# define DEBUGLOG(l, ...) { \
if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
@@ -257,17 +257,17 @@ static const int LZ4_minLength = (MFLIMIT+1);
fprintf(stderr, __VA_ARGS__); \
fprintf(stderr, " \n"); \
} }
-#else
+#else
# define DEBUGLOG(l, ...) {} /* disabled */
-#endif
-
+#endif
+
static int LZ4_isAligned(const void* ptr, size_t alignment)
{
return ((size_t)ptr & (alignment -1)) == 0;
}
+
-
-/*-************************************
+/*-************************************
* Types
**************************************/
#include <limits.h>
@@ -423,11 +423,11 @@ static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
#ifndef LZ4_FAST_DEC_LOOP
# if defined __i386__ || defined _M_IX86 || defined __x86_64__ || defined _M_X64
# define LZ4_FAST_DEC_LOOP 1
-# elif defined(__aarch64__) && !defined(__clang__)
- /* On aarch64, we disable this optimization for clang because on certain
+# elif defined(__aarch64__) && !defined(__clang__)
+ /* On aarch64, we disable this optimization for clang because on certain
* mobile chipsets, performance is reduced with clang. For information
* refer to https://github.com/lz4/lz4/pull/707 */
-# define LZ4_FAST_DEC_LOOP 1
+# define LZ4_FAST_DEC_LOOP 1
# else
# define LZ4_FAST_DEC_LOOP 0
# endif
@@ -471,29 +471,29 @@ LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)
do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e);
}
-/* LZ4_memcpy_using_offset() presumes :
- * - dstEnd >= dstPtr + MINMATCH
- * - there is at least 8 bytes available to write after dstEnd */
+/* LZ4_memcpy_using_offset() presumes :
+ * - dstEnd >= dstPtr + MINMATCH
+ * - there is at least 8 bytes available to write after dstEnd */
LZ4_FORCE_INLINE void
LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
{
BYTE v[8];
-
- assert(dstEnd >= dstPtr + MINMATCH);
-
+
+ assert(dstEnd >= dstPtr + MINMATCH);
+
switch(offset) {
case 1:
MEM_INIT(v, *srcPtr, 8);
- break;
+ break;
case 2:
LZ4_memcpy(v, srcPtr, 2);
LZ4_memcpy(&v[2], srcPtr, 2);
LZ4_memcpy(&v[4], v, 4);
- break;
+ break;
case 4:
LZ4_memcpy(v, srcPtr, 4);
LZ4_memcpy(&v[4], srcPtr, 4);
- break;
+ break;
default:
LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);
return;
@@ -527,7 +527,7 @@ static unsigned LZ4_NbCommonBytes (reg_t val)
# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
!defined(LZ4_FORCE_SW_BITCOUNT)
- return (unsigned)__builtin_ctzll((U64)val) >> 3;
+ return (unsigned)__builtin_ctzll((U64)val) >> 3;
# else
const U64 m = 0x0101010101010101ULL;
val ^= val - 1;
@@ -541,7 +541,7 @@ static unsigned LZ4_NbCommonBytes (reg_t val)
# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
!defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
- return (unsigned)__builtin_ctz((U32)val) >> 3;
+ return (unsigned)__builtin_ctz((U32)val) >> 3;
# else
const U32 m = 0x01010101;
return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;
@@ -552,7 +552,7 @@ static unsigned LZ4_NbCommonBytes (reg_t val)
# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
!defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
- return (unsigned)__builtin_clzll((U64)val) >> 3;
+ return (unsigned)__builtin_clzll((U64)val) >> 3;
# else
#if 1
/* this method is probably faster,
@@ -588,7 +588,7 @@ static unsigned LZ4_NbCommonBytes (reg_t val)
# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
!defined(LZ4_FORCE_SW_BITCOUNT)
- return (unsigned)__builtin_clz((U32)val) >> 3;
+ return (unsigned)__builtin_clz((U32)val) >> 3;
# else
val >>= 8;
val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |
@@ -684,11 +684,11 @@ int LZ4_sizeofState(void) { return LZ4_STREAMSIZE; }
extern "C" {
#endif
-int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize);
+int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize);
-int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
- int compressedSize, int maxOutputSize,
- const void* dictStart, size_t dictSize);
+int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
+ int compressedSize, int maxOutputSize,
+ const void* dictStart, size_t dictSize);
#if defined (__cplusplus) && !defined(ONLY_COMPRESS)
}
@@ -724,17 +724,17 @@ LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tab
}
LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)
-{
- switch (tableType)
- {
- default: /* fallthrough */
- case clearedTable: { /* illegal! */ assert(0); return; }
- case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; }
- case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; }
- case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; }
- }
-}
-
+{
+ switch (tableType)
+ {
+ default: /* fallthrough */
+ case clearedTable: { /* illegal! */ assert(0); return; }
+ case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; }
+ case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; }
+ case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; }
+ }
+}
+
LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)
{
switch (tableType)
@@ -795,28 +795,28 @@ static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType
{ const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
}
-LZ4_FORCE_INLINE const BYTE*
-LZ4_getPosition(const BYTE* p,
- const void* tableBase, tableType_t tableType,
- const BYTE* srcBase)
+LZ4_FORCE_INLINE const BYTE*
+LZ4_getPosition(const BYTE* p,
+ const void* tableBase, tableType_t tableType,
+ const BYTE* srcBase)
{
U32 const h = LZ4_hashPosition(p, tableType);
return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
}
-LZ4_FORCE_INLINE void
-LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
- const int inputSize,
- const tableType_t tableType) {
+LZ4_FORCE_INLINE void
+LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
+ const int inputSize,
+ const tableType_t tableType) {
/* If the table hasn't been used, it's guaranteed to be zeroed out, and is
* therefore safe to use no matter what mode we're in. Otherwise, we figure
* out if it's safe to leave as is or whether it needs to be reset.
*/
if ((tableType_t)cctx->tableType != clearedTable) {
- assert(inputSize >= 0);
+ assert(inputSize >= 0);
if ((tableType_t)cctx->tableType != tableType
- || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)
- || ((tableType == byU32) && cctx->currentOffset > 1 GB)
+ || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)
+ || ((tableType == byU32) && cctx->currentOffset > 1 GB)
|| tableType == byPtr
|| inputSize >= 4 KB)
{
@@ -901,8 +901,8 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
assert(ip != NULL);
/* If init conditions are not met, we don't have to mark stream
* as having dirty context, since no action was taken yet */
- if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */
- if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) { return 0; } /* Size too large (not within 64K limit) */
+ if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */
+ if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) { return 0; } /* Size too large (not within 64K limit) */
if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */
assert(acceleration >= 1);
@@ -930,7 +930,7 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
for ( ; ; ) {
const BYTE* match;
BYTE* token;
- const BYTE* filledIp;
+ const BYTE* filledIp;
/* Find a match */
if (tableType == byPtr) {
@@ -999,14 +999,14 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
forwardH = LZ4_hashPosition(forwardIp, tableType);
LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
- DEBUGLOG(7, "candidate at pos=%u (offset=%u \n", matchIndex, current - matchIndex);
- if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */
+ DEBUGLOG(7, "candidate at pos=%u (offset=%u \n", matchIndex, current - matchIndex);
+ if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */
assert(matchIndex < current);
- if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX))
- && (matchIndex+LZ4_DISTANCE_MAX < current)) {
- continue;
- } /* too far */
- assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */
+ if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX))
+ && (matchIndex+LZ4_DISTANCE_MAX < current)) {
+ continue;
+ } /* too far */
+ assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */
if (LZ4_read32(match) == LZ4_read32(ip)) {
if (maybe_extMem) offset = current - matchIndex;
@@ -1017,16 +1017,16 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
}
/* Catch up */
- filledIp = ip;
+ filledIp = ip;
while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
/* Encode Literals */
{ unsigned const litLength = (unsigned)(ip - anchor);
token = op++;
if ((outputDirective == limitedOutput) && /* Check output buffer overflow */
- (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) {
+ (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) {
return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
- }
+ }
if ((outputDirective == fillOutput) &&
(unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) {
op--;
@@ -1097,26 +1097,26 @@ _next_match:
}
if ((outputDirective) && /* Check output buffer overflow */
- (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) {
+ (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) {
if (outputDirective == fillOutput) {
/* Match description too long : reduce it */
- U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255;
+ U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255;
ip -= matchCode - newMatchCode;
- assert(newMatchCode < matchCode);
+ assert(newMatchCode < matchCode);
matchCode = newMatchCode;
- if (unlikely(ip <= filledIp)) {
- /* We have already filled up to filledIp so if ip ends up less than filledIp
- * we have positions in the hash table beyond the current position. This is
- * a problem if we reuse the hash table. So we have to remove these positions
- * from the hash table.
- */
- const BYTE* ptr;
- DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip));
- for (ptr = ip; ptr <= filledIp; ++ptr) {
- U32 const h = LZ4_hashPosition(ptr, tableType);
- LZ4_clearHash(h, cctx->hashTable, tableType);
- }
- }
+ if (unlikely(ip <= filledIp)) {
+ /* We have already filled up to filledIp so if ip ends up less than filledIp
+ * we have positions in the hash table beyond the current position. This is
+ * a problem if we reuse the hash table. So we have to remove these positions
+ * from the hash table.
+ */
+ const BYTE* ptr;
+ DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip));
+ for (ptr = ip; ptr <= filledIp; ++ptr) {
+ U32 const h = LZ4_hashPosition(ptr, tableType);
+ LZ4_clearHash(h, cctx->hashTable, tableType);
+ }
+ }
} else {
assert(outputDirective == limitedOutput);
return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
@@ -1136,8 +1136,8 @@ _next_match:
} else
*token += (BYTE)(matchCode);
}
- /* Ensure we have enough space for the last literals. */
- assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit));
+ /* Ensure we have enough space for the last literals. */
+ assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit));
anchor = ip;
@@ -1187,7 +1187,7 @@ _next_match:
LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
assert(matchIndex < current);
if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1)
- && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current))
+ && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current))
&& (LZ4_read32(match) == LZ4_read32(ip)) ) {
token=op++;
*token=0;
@@ -1297,7 +1297,7 @@ int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int
return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
}
} else {
- if (inputSize < LZ4_64Klimit) {
+ if (inputSize < LZ4_64Klimit) {
return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
} else {
const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
@@ -1447,8 +1447,8 @@ static size_t LZ4_stream_t_alignment(void)
LZ4_stream_t* LZ4_initStream (void* buffer, size_t size)
{
DEBUGLOG(5, "LZ4_initStream");
- if (buffer == NULL) { return NULL; }
- if (size < sizeof(LZ4_stream_t)) { return NULL; }
+ if (buffer == NULL) { return NULL; }
+ if (size < sizeof(LZ4_stream_t)) { return NULL; }
if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL;
MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal));
return (LZ4_stream_t*)buffer;
@@ -1499,14 +1499,14 @@ int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
* there are only valid offsets in the window, which allows an optimization
* in LZ4_compress_fast_continue() where it uses noDictIssue even when the
* dictionary isn't a full 64k. */
- dict->currentOffset += 64 KB;
-
- if (dictSize < (int)HASH_UNIT) {
- return 0;
- }
+ dict->currentOffset += 64 KB;
+ if (dictSize < (int)HASH_UNIT) {
+ return 0;
+ }
+
if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
- base = dictEnd - dict->currentOffset;
+ base = dictEnd - dict->currentOffset;
dict->dictionary = p;
dict->dictSize = (U32)(dictEnd - p);
dict->tableType = (U32)tableType;
@@ -1519,31 +1519,31 @@ int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
return (int)dict->dictSize;
}
-void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream) {
- const LZ4_stream_t_internal* dictCtx = dictionaryStream == NULL ? NULL :
- &(dictionaryStream->internal_donotuse);
-
- DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)",
- workingStream, dictionaryStream,
- dictCtx != NULL ? dictCtx->dictSize : 0);
-
- if (dictCtx != NULL) {
+void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream) {
+ const LZ4_stream_t_internal* dictCtx = dictionaryStream == NULL ? NULL :
+ &(dictionaryStream->internal_donotuse);
+
+ DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)",
+ workingStream, dictionaryStream,
+ dictCtx != NULL ? dictCtx->dictSize : 0);
+
+ if (dictCtx != NULL) {
/* If the current offset is zero, we will never look in the
* external dictionary context, since there is no value a table
* entry can take that indicate a miss. In that case, we need
* to bump the offset to something non-zero.
*/
- if (workingStream->internal_donotuse.currentOffset == 0) {
- workingStream->internal_donotuse.currentOffset = 64 KB;
- }
-
- /* Don't actually attach an empty dictionary.
- */
- if (dictCtx->dictSize == 0) {
- dictCtx = NULL;
+ if (workingStream->internal_donotuse.currentOffset == 0) {
+ workingStream->internal_donotuse.currentOffset = 64 KB;
}
+
+ /* Don't actually attach an empty dictionary.
+ */
+ if (dictCtx->dictSize == 0) {
+ dictCtx = NULL;
+ }
}
- workingStream->internal_donotuse.dictCtx = dictCtx;
+ workingStream->internal_donotuse.dictCtx = dictCtx;
}
@@ -1675,8 +1675,8 @@ int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
- if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */
- if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }
+ if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */
+ if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }
if (safeBuffer == NULL) assert(dictSize == 0);
if (dictSize > 0)
@@ -1754,7 +1754,7 @@ LZ4_decompress_generic(
const size_t dictSize /* note : = 0 if noDict */
)
{
- if (src == NULL) { return -1; }
+ if (src == NULL) { return -1; }
{ const BYTE* ip = (const BYTE*) src;
const BYTE* const iend = ip + srcSize;
@@ -1783,13 +1783,13 @@ LZ4_decompress_generic(
/* Special cases */
assert(lowPrefix <= op);
- if ((endOnInput) && (unlikely(outputSize==0))) {
- /* Empty output buffer */
- if (partialDecoding) return 0;
- return ((srcSize==1) && (*ip==0)) ? 0 : -1;
- }
- if ((!endOnInput) && (unlikely(outputSize==0))) { return (*ip==0 ? 1 : -1); }
- if ((endOnInput) && unlikely(srcSize==0)) { return -1; }
+ if ((endOnInput) && (unlikely(outputSize==0))) {
+ /* Empty output buffer */
+ if (partialDecoding) return 0;
+ return ((srcSize==1) && (*ip==0)) ? 0 : -1;
+ }
+ if ((!endOnInput) && (unlikely(outputSize==0))) { return (*ip==0 ? 1 : -1); }
+ if ((endOnInput) && unlikely(srcSize==0)) { return -1; }
/* Currently the fast loop shows a regression on qualcomm arm chips. */
#if LZ4_FAST_DEC_LOOP
@@ -1802,7 +1802,7 @@ LZ4_decompress_generic(
while (1) {
/* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */
assert(oend - op >= FASTLOOP_SAFE_DISTANCE);
- if (endOnInput) { assert(ip < iend); }
+ if (endOnInput) { assert(ip < iend); }
token = *ip++;
length = token >> ML_BITS; /* literal length */
@@ -1812,18 +1812,18 @@ LZ4_decompress_generic(
if (length == RUN_MASK) {
variable_length_error error = ok;
length += read_variable_length(&ip, iend-RUN_MASK, (int)endOnInput, (int)endOnInput, &error);
- if (error == initial_error) { goto _output_error; }
- if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
- if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
+ if (error == initial_error) { goto _output_error; }
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
+ if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
/* copy literals */
cpy = op+length;
LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
if (endOnInput) { /* LZ4_decompress_safe() */
- if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }
+ if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }
LZ4_wildCopy32(op, ip, cpy);
} else { /* LZ4_decompress_fast() */
- if (cpy>oend-8) { goto safe_literal_copy; }
+ if (cpy>oend-8) { goto safe_literal_copy; }
LZ4_wildCopy8(op, ip, cpy); /* LZ4_decompress_fast() cannot copy more than 8 bytes at a time :
* it doesn't know input length, and only relies on end-of-block properties */
}
@@ -1833,7 +1833,7 @@ LZ4_decompress_generic(
if (endOnInput) { /* LZ4_decompress_safe() */
DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length);
/* We don't need to check oend, since we check it once for each loop below */
- if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; }
+ if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; }
/* Literals can only be 14, but hope compilers optimize if we copy by a register size */
LZ4_memcpy(op, ip, 16);
} else { /* LZ4_decompress_fast() */
@@ -1848,7 +1848,7 @@ LZ4_decompress_generic(
/* get offset */
offset = LZ4_readLE16(ip); ip+=2;
match = op - offset;
- assert(match <= op);
+ assert(match <= op);
/* get matchlength */
length = token & ML_MASK;
@@ -1858,7 +1858,7 @@ LZ4_decompress_generic(
if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
length += read_variable_length(&ip, iend - LASTLITERALS + 1, (int)endOnInput, 0, &error);
if (error != ok) { goto _output_error; }
- if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */
length += MINMATCH;
if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
goto safe_match_copy;
@@ -1870,12 +1870,12 @@ LZ4_decompress_generic(
}
/* Fastpath check: Avoids a branch in LZ4_wildCopy32 if true */
- if ((dict == withPrefix64k) || (match >= lowPrefix)) {
+ if ((dict == withPrefix64k) || (match >= lowPrefix)) {
if (offset >= 8) {
- assert(match >= lowPrefix);
- assert(match <= op);
- assert(op + 18 <= oend);
-
+ assert(match >= lowPrefix);
+ assert(match <= op);
+ assert(op + 18 <= oend);
+
LZ4_memcpy(op, match, 8);
LZ4_memcpy(op+8, match+8, 8);
LZ4_memcpy(op+16, match+16, 2);
@@ -1887,12 +1887,12 @@ LZ4_decompress_generic(
/* match starting within external dictionary */
if ((dict==usingExtDict) && (match < lowPrefix)) {
if (unlikely(op+length > oend-LASTLITERALS)) {
- if (partialDecoding) {
+ if (partialDecoding) {
DEBUGLOG(7, "partialDecoding: dictionary match, close to dstEnd");
length = MIN(length, (size_t)(oend-op));
- } else {
- goto _output_error; /* end-of-block condition violated */
- } }
+ } else {
+ goto _output_error; /* end-of-block condition violated */
+ } }
if (length <= (size_t)(lowPrefix-match)) {
/* match fits entirely within external dictionary : just copy */
@@ -1907,7 +1907,7 @@ LZ4_decompress_generic(
if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
BYTE* const endOfMatch = op + restSize;
const BYTE* copyFrom = lowPrefix;
- while (op < endOfMatch) { *op++ = *copyFrom++; }
+ while (op < endOfMatch) { *op++ = *copyFrom++; }
} else {
LZ4_memcpy(op, lowPrefix, restSize);
op += restSize;
@@ -1980,11 +1980,11 @@ LZ4_decompress_generic(
/* decode literal length */
if (length == RUN_MASK) {
- variable_length_error error = ok;
+ variable_length_error error = ok;
length += read_variable_length(&ip, iend-RUN_MASK, (int)endOnInput, (int)endOnInput, &error);
- if (error == initial_error) { goto _output_error; }
- if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
- if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
+ if (error == initial_error) { goto _output_error; }
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
+ if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
}
/* copy literals */
@@ -1996,43 +1996,43 @@ LZ4_decompress_generic(
if ( ((endOnInput) && ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) )
|| ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) )
{
- /* We've either hit the input parsing restriction or the output parsing restriction.
+ /* We've either hit the input parsing restriction or the output parsing restriction.
* In the normal scenario, decoding a full block, it must be the last sequence,
* otherwise it's an error (invalid input or dimensions).
* In partialDecoding scenario, it's necessary to ensure there is no buffer overflow.
- */
+ */
if (partialDecoding) {
- /* Since we are partial decoding we may be in this block because of the output parsing
- * restriction, which is not valid since the output buffer is allowed to be undersized.
- */
- assert(endOnInput);
+ /* Since we are partial decoding we may be in this block because of the output parsing
+ * restriction, which is not valid since the output buffer is allowed to be undersized.
+ */
+ assert(endOnInput);
DEBUGLOG(7, "partialDecoding: copying literals, close to input or output end")
DEBUGLOG(7, "partialDecoding: literal length = %u", (unsigned)length);
DEBUGLOG(7, "partialDecoding: remaining space in dstBuffer : %i", (int)(oend - op));
DEBUGLOG(7, "partialDecoding: remaining space in srcBuffer : %i", (int)(iend - ip));
/* Finishing in the middle of a literals segment,
* due to lack of input.
- */
+ */
if (ip+length > iend) {
length = (size_t)(iend-ip);
cpy = op + length;
}
/* Finishing in the middle of a literals segment,
* due to lack of output space.
- */
- if (cpy > oend) {
- cpy = oend;
- assert(op<=oend);
- length = (size_t)(oend-op);
- }
+ */
+ if (cpy > oend) {
+ cpy = oend;
+ assert(op<=oend);
+ length = (size_t)(oend-op);
+ }
} else {
- /* We must be on the last sequence because of the parsing limitations so check
- * that we exactly regenerate the original size (must be exact when !endOnInput).
- */
- if ((!endOnInput) && (cpy != oend)) { goto _output_error; }
- /* We must be on the last sequence (or invalid) because of the parsing limitations
- * so check that we exactly consume the input and don't overrun the output buffer.
- */
+ /* We must be on the last sequence because of the parsing limitations so check
+ * that we exactly regenerate the original size (must be exact when !endOnInput).
+ */
+ if ((!endOnInput) && (cpy != oend)) { goto _output_error; }
+ /* We must be on the last sequence (or invalid) because of the parsing limitations
+ * so check that we exactly consume the input and don't overrun the output buffer.
+ */
if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) {
DEBUGLOG(6, "should have been last run of literals")
DEBUGLOG(6, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend);
@@ -2047,7 +2047,7 @@ LZ4_decompress_generic(
* When partialDecoding, it is EOF if we've either
* filled the output buffer or
* can't proceed with reading an offset for following match.
- */
+ */
if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) {
break;
}
@@ -2075,7 +2075,7 @@ LZ4_decompress_generic(
#if LZ4_FAST_DEC_LOOP
safe_match_copy:
#endif
- if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */
/* match starting within external dictionary */
if ((dict==usingExtDict) && (match < lowPrefix)) {
if (unlikely(op+length > oend-LASTLITERALS)) {
@@ -2103,7 +2103,7 @@ LZ4_decompress_generic(
} }
continue;
}
- assert(match >= lowPrefix);
+ assert(match >= lowPrefix);
/* copy match within block */
cpy = op + length;
@@ -2115,17 +2115,17 @@ LZ4_decompress_generic(
const BYTE* const matchEnd = match + mlen;
BYTE* const copyEnd = op + mlen;
if (matchEnd > op) { /* overlap copy */
- while (op < copyEnd) { *op++ = *match++; }
+ while (op < copyEnd) { *op++ = *match++; }
} else {
LZ4_memcpy(op, match, mlen);
}
op = copyEnd;
- if (op == oend) { break; }
+ if (op == oend) { break; }
continue;
}
if (unlikely(offset<8)) {
- LZ4_write32(op, 0); /* silence msan warning when offset==0 */
+ LZ4_write32(op, 0); /* silence msan warning when offset==0 */
op[0] = match[0];
op[1] = match[1];
op[2] = match[2];
@@ -2141,27 +2141,27 @@ LZ4_decompress_generic(
if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {
BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1);
- if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
+ if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
if (op < oCopyLimit) {
LZ4_wildCopy8(op, match, oCopyLimit);
match += oCopyLimit - op;
op = oCopyLimit;
}
- while (op < cpy) { *op++ = *match++; }
+ while (op < cpy) { *op++ = *match++; }
} else {
LZ4_memcpy(op, match, 8);
- if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }
+ if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }
}
op = cpy; /* wildcopy correction */
}
/* end of decoding */
- if (endOnInput) {
+ if (endOnInput) {
DEBUGLOG(5, "decoded %i bytes", (int) (((char*)op)-dst));
return (int) (((char*)op)-dst); /* Nb of output bytes decoded */
- } else {
+ } else {
return (int) (((const char*)ip)-src); /* Nb of input bytes read */
- }
+ }
/* Overflow error detected */
_output_error:
@@ -2276,7 +2276,7 @@ LZ4_streamDecode_t* LZ4_createStreamDecode(void)
int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
{
- if (LZ4_stream == NULL) { return 0; } /* support free on NULL */
+ if (LZ4_stream == NULL) { return 0; } /* support free on NULL */
FREEMEM(LZ4_stream);
return 0;
}
@@ -2411,22 +2411,22 @@ int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressed
if (dictSize==0)
return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
if (dictStart+dictSize == dest) {
- if (dictSize >= 64 KB - 1) {
+ if (dictSize >= 64 KB - 1) {
return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);
- }
- assert(dictSize >= 0);
- return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize);
+ }
+ assert(dictSize >= 0);
+ return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize);
}
- assert(dictSize >= 0);
- return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize);
+ assert(dictSize >= 0);
+ return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize);
}
int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
{
if (dictSize==0 || dictStart+dictSize == dest)
return LZ4_decompress_fast(source, dest, originalSize);
- assert(dictSize >= 0);
- return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize);
+ assert(dictSize >= 0);
+ return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize);
}
@@ -2438,9 +2438,9 @@ int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, in
{
return LZ4_compress_default(source, dest, inputSize, maxOutputSize);
}
-int LZ4_compress(const char* src, char* dest, int srcSize)
+int LZ4_compress(const char* src, char* dest, int srcSize)
{
- return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize));
+ return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize));
}
int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize)
{
diff --git a/contrib/libs/lz4/lz4.h b/contrib/libs/lz4/lz4.h
index 66b8547bbc..b3c838c593 100644
--- a/contrib/libs/lz4/lz4.h
+++ b/contrib/libs/lz4/lz4.h
@@ -46,7 +46,7 @@ extern "C" {
/**
Introduction
- LZ4 is lossless compression algorithm, providing compression speed >500 MB/s per core,
+ LZ4 is lossless compression algorithm, providing compression speed >500 MB/s per core,
scalable with multi-cores CPU. It features an extremely fast decoder, with speed in
multiple GB/s per core, typically reaching RAM speed limits on multi-core systems.
@@ -58,19 +58,19 @@ extern "C" {
- unbounded multiple steps (described as Streaming compression)
lz4.h generates and decodes LZ4-compressed blocks (doc/lz4_Block_format.md).
- Decompressing such a compressed block requires additional metadata.
- Exact metadata depends on exact decompression function.
- For the typical case of LZ4_decompress_safe(),
- metadata includes block's compressed size, and maximum bound of decompressed size.
+ Decompressing such a compressed block requires additional metadata.
+ Exact metadata depends on exact decompression function.
+ For the typical case of LZ4_decompress_safe(),
+ metadata includes block's compressed size, and maximum bound of decompressed size.
Each application is free to encode and pass such metadata in whichever way it wants.
lz4.h only handle blocks, it can not generate Frames.
Blocks are different from Frames (doc/lz4_Frame_format.md).
Frames bundle both blocks and metadata in a specified manner.
- Embedding metadata is required for compressed data to be self-contained and portable.
+ Embedding metadata is required for compressed data to be self-contained and portable.
Frame format is delivered through a companion API, declared in lz4frame.h.
- The `lz4` CLI can only manage frames.
+ The `lz4` CLI can only manage frames.
*/
/*^***************************************************************
@@ -132,35 +132,35 @@ LZ4LIB_API const char* LZ4_versionString (void); /**< library version string;
* Simple Functions
**************************************/
/*! LZ4_compress_default() :
- * Compresses 'srcSize' bytes from buffer 'src'
- * into already allocated 'dst' buffer of size 'dstCapacity'.
- * Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize).
- * It also runs faster, so it's a recommended setting.
- * If the function cannot compress 'src' into a more limited 'dst' budget,
- * compression stops *immediately*, and the function result is zero.
- * In which case, 'dst' content is undefined (invalid).
- * srcSize : max supported value is LZ4_MAX_INPUT_SIZE.
- * dstCapacity : size of buffer 'dst' (which must be already allocated)
- * @return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity)
- * or 0 if compression fails
- * Note : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer).
- */
+ * Compresses 'srcSize' bytes from buffer 'src'
+ * into already allocated 'dst' buffer of size 'dstCapacity'.
+ * Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize).
+ * It also runs faster, so it's a recommended setting.
+ * If the function cannot compress 'src' into a more limited 'dst' budget,
+ * compression stops *immediately*, and the function result is zero.
+ * In which case, 'dst' content is undefined (invalid).
+ * srcSize : max supported value is LZ4_MAX_INPUT_SIZE.
+ * dstCapacity : size of buffer 'dst' (which must be already allocated)
+ * @return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity)
+ * or 0 if compression fails
+ * Note : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer).
+ */
LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity);
/*! LZ4_decompress_safe() :
- * compressedSize : is the exact complete size of the compressed block.
- * dstCapacity : is the size of destination buffer (which must be already allocated), presumed an upper bound of decompressed size.
- * @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity)
- * If destination buffer is not large enough, decoding will stop and output an error code (negative value).
- * If the source stream is detected malformed, the function will stop decoding and return a negative result.
- * Note 1 : This function is protected against malicious data packets :
- * it will never writes outside 'dst' buffer, nor read outside 'source' buffer,
- * even if the compressed block is maliciously modified to order the decoder to do these actions.
- * In such case, the decoder stops immediately, and considers the compressed block malformed.
- * Note 2 : compressedSize and dstCapacity must be provided to the function, the compressed block does not contain them.
- * The implementation is free to send / store / derive this information in whichever way is most beneficial.
- * If there is a need for a different format which bundles together both compressed data and its metadata, consider looking at lz4frame.h instead.
- */
+ * compressedSize : is the exact complete size of the compressed block.
+ * dstCapacity : is the size of destination buffer (which must be already allocated), presumed an upper bound of decompressed size.
+ * @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity)
+ * If destination buffer is not large enough, decoding will stop and output an error code (negative value).
+ * If the source stream is detected malformed, the function will stop decoding and return a negative result.
+ * Note 1 : This function is protected against malicious data packets :
+ * it will never writes outside 'dst' buffer, nor read outside 'source' buffer,
+ * even if the compressed block is maliciously modified to order the decoder to do these actions.
+ * In such case, the decoder stops immediately, and considers the compressed block malformed.
+ * Note 2 : compressedSize and dstCapacity must be provided to the function, the compressed block does not contain them.
+ * The implementation is free to send / store / derive this information in whichever way is most beneficial.
+ * If there is a need for a different format which bundles together both compressed data and its metadata, consider looking at lz4frame.h instead.
+ */
LZ4LIB_API int LZ4_decompress_safe (const char* src, char* dst, int compressedSize, int dstCapacity);
@@ -419,9 +419,9 @@ LZ4LIB_API int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecod
*/
LZ4LIB_API int LZ4_decompress_safe_usingDict (const char* src, char* dst, int srcSize, int dstCapcity, const char* dictStart, int dictSize);
-#endif /* LZ4_H_2983827168210 */
-
+#endif /* LZ4_H_2983827168210 */
+
/*^*************************************
* !!!!!! STATIC LINKING ONLY !!!!!!
***************************************/
@@ -446,11 +446,11 @@ LZ4LIB_API int LZ4_decompress_safe_usingDict (const char* src, char* dst, int sr
* define LZ4_PUBLISH_STATIC_FUNCTIONS when building the LZ4 library.
******************************************************************************/
-#ifdef LZ4_STATIC_LINKING_ONLY
-
-#ifndef LZ4_STATIC_3504398509
-#define LZ4_STATIC_3504398509
-
+#ifdef LZ4_STATIC_LINKING_ONLY
+
+#ifndef LZ4_STATIC_3504398509
+#define LZ4_STATIC_3504398509
+
#ifdef LZ4_PUBLISH_STATIC_FUNCTIONS
#define LZ4LIB_STATIC_API LZ4LIB_API
#else
@@ -498,76 +498,76 @@ LZ4LIB_STATIC_API int LZ4_compress_fast_extState_fastReset (void* state, const c
*/
LZ4LIB_STATIC_API void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream);
-
-/*! In-place compression and decompression
- *
- * It's possible to have input and output sharing the same buffer,
- * for highly contrained memory environments.
- * In both cases, it requires input to lay at the end of the buffer,
- * and decompression to start at beginning of the buffer.
- * Buffer size must feature some margin, hence be larger than final size.
- *
- * |<------------------------buffer--------------------------------->|
- * |<-----------compressed data--------->|
- * |<-----------decompressed size------------------>|
- * |<----margin---->|
- *
- * This technique is more useful for decompression,
- * since decompressed size is typically larger,
- * and margin is short.
- *
- * In-place decompression will work inside any buffer
- * which size is >= LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize).
- * This presumes that decompressedSize > compressedSize.
- * Otherwise, it means compression actually expanded data,
- * and it would be more efficient to store such data with a flag indicating it's not compressed.
- * This can happen when data is not compressible (already compressed, or encrypted).
- *
- * For in-place compression, margin is larger, as it must be able to cope with both
- * history preservation, requiring input data to remain unmodified up to LZ4_DISTANCE_MAX,
- * and data expansion, which can happen when input is not compressible.
- * As a consequence, buffer size requirements are much higher,
- * and memory savings offered by in-place compression are more limited.
- *
- * There are ways to limit this cost for compression :
- * - Reduce history size, by modifying LZ4_DISTANCE_MAX.
- * Note that it is a compile-time constant, so all compressions will apply this limit.
- * Lower values will reduce compression ratio, except when input_size < LZ4_DISTANCE_MAX,
- * so it's a reasonable trick when inputs are known to be small.
- * - Require the compressor to deliver a "maximum compressed size".
- * This is the `dstCapacity` parameter in `LZ4_compress*()`.
- * When this size is < LZ4_COMPRESSBOUND(inputSize), then compression can fail,
- * in which case, the return code will be 0 (zero).
- * The caller must be ready for these cases to happen,
- * and typically design a backup scheme to send data uncompressed.
- * The combination of both techniques can significantly reduce
- * the amount of margin required for in-place compression.
- *
- * In-place compression can work in any buffer
- * which size is >= (maxCompressedSize)
- * with maxCompressedSize == LZ4_COMPRESSBOUND(srcSize) for guaranteed compression success.
- * LZ4_COMPRESS_INPLACE_BUFFER_SIZE() depends on both maxCompressedSize and LZ4_DISTANCE_MAX,
- * so it's possible to reduce memory requirements by playing with them.
- */
-
-#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) (((compressedSize) >> 8) + 32)
-#define LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize) ((decompressedSize) + LZ4_DECOMPRESS_INPLACE_MARGIN(decompressedSize)) /**< note: presumes that compressedSize < decompressedSize. note2: margin is overestimated a bit, since it could use compressedSize instead */
-
-#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */
-# define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
+
+/*! In-place compression and decompression
+ *
+ * It's possible to have input and output sharing the same buffer,
+ * for highly contrained memory environments.
+ * In both cases, it requires input to lay at the end of the buffer,
+ * and decompression to start at beginning of the buffer.
+ * Buffer size must feature some margin, hence be larger than final size.
+ *
+ * |<------------------------buffer--------------------------------->|
+ * |<-----------compressed data--------->|
+ * |<-----------decompressed size------------------>|
+ * |<----margin---->|
+ *
+ * This technique is more useful for decompression,
+ * since decompressed size is typically larger,
+ * and margin is short.
+ *
+ * In-place decompression will work inside any buffer
+ * which size is >= LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize).
+ * This presumes that decompressedSize > compressedSize.
+ * Otherwise, it means compression actually expanded data,
+ * and it would be more efficient to store such data with a flag indicating it's not compressed.
+ * This can happen when data is not compressible (already compressed, or encrypted).
+ *
+ * For in-place compression, margin is larger, as it must be able to cope with both
+ * history preservation, requiring input data to remain unmodified up to LZ4_DISTANCE_MAX,
+ * and data expansion, which can happen when input is not compressible.
+ * As a consequence, buffer size requirements are much higher,
+ * and memory savings offered by in-place compression are more limited.
+ *
+ * There are ways to limit this cost for compression :
+ * - Reduce history size, by modifying LZ4_DISTANCE_MAX.
+ * Note that it is a compile-time constant, so all compressions will apply this limit.
+ * Lower values will reduce compression ratio, except when input_size < LZ4_DISTANCE_MAX,
+ * so it's a reasonable trick when inputs are known to be small.
+ * - Require the compressor to deliver a "maximum compressed size".
+ * This is the `dstCapacity` parameter in `LZ4_compress*()`.
+ * When this size is < LZ4_COMPRESSBOUND(inputSize), then compression can fail,
+ * in which case, the return code will be 0 (zero).
+ * The caller must be ready for these cases to happen,
+ * and typically design a backup scheme to send data uncompressed.
+ * The combination of both techniques can significantly reduce
+ * the amount of margin required for in-place compression.
+ *
+ * In-place compression can work in any buffer
+ * which size is >= (maxCompressedSize)
+ * with maxCompressedSize == LZ4_COMPRESSBOUND(srcSize) for guaranteed compression success.
+ * LZ4_COMPRESS_INPLACE_BUFFER_SIZE() depends on both maxCompressedSize and LZ4_DISTANCE_MAX,
+ * so it's possible to reduce memory requirements by playing with them.
+ */
+
+#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) (((compressedSize) >> 8) + 32)
+#define LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize) ((decompressedSize) + LZ4_DECOMPRESS_INPLACE_MARGIN(decompressedSize)) /**< note: presumes that compressedSize < decompressedSize. note2: margin is overestimated a bit, since it could use compressedSize instead */
+
+#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */
+# define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
#endif
-#define LZ4_COMPRESS_INPLACE_MARGIN (LZ4_DISTANCE_MAX + 32) /* LZ4_DISTANCE_MAX can be safely replaced by srcSize when it's smaller */
-#define LZ4_COMPRESS_INPLACE_BUFFER_SIZE(maxCompressedSize) ((maxCompressedSize) + LZ4_COMPRESS_INPLACE_MARGIN) /**< maxCompressedSize is generally LZ4_COMPRESSBOUND(inputSize), but can be set to any lower value, with the risk that compression can fail (return code 0(zero)) */
-
-#endif /* LZ4_STATIC_3504398509 */
-#endif /* LZ4_STATIC_LINKING_ONLY */
-
-
-
-#ifndef LZ4_H_98237428734687
-#define LZ4_H_98237428734687
-
+#define LZ4_COMPRESS_INPLACE_MARGIN (LZ4_DISTANCE_MAX + 32) /* LZ4_DISTANCE_MAX can be safely replaced by srcSize when it's smaller */
+#define LZ4_COMPRESS_INPLACE_BUFFER_SIZE(maxCompressedSize) ((maxCompressedSize) + LZ4_COMPRESS_INPLACE_MARGIN) /**< maxCompressedSize is generally LZ4_COMPRESSBOUND(inputSize), but can be set to any lower value, with the risk that compression can fail (return code 0(zero)) */
+
+#endif /* LZ4_STATIC_3504398509 */
+#endif /* LZ4_STATIC_LINKING_ONLY */
+
+
+
+#ifndef LZ4_H_98237428734687
+#define LZ4_H_98237428734687
+
/*-************************************************************
* Private Definitions
**************************************************************
@@ -660,7 +660,7 @@ union LZ4_streamDecode_u {
} ; /* previously typedef'd to LZ4_streamDecode_t */
-
+
/*-************************************
* Obsolete Functions
**************************************/
@@ -699,8 +699,8 @@ union LZ4_streamDecode_u {
#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */
/*! Obsolete compression functions (since v1.7.3) */
-LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress (const char* src, char* dest, int srcSize);
-LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress_limitedOutput (const char* src, char* dest, int srcSize, int maxOutputSize);
+LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress (const char* src, char* dest, int srcSize);
+LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress_limitedOutput (const char* src, char* dest, int srcSize, int maxOutputSize);
LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize);
LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize);
@@ -771,7 +771,7 @@ LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int or
LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr);
-#endif /* LZ4_H_98237428734687 */
+#endif /* LZ4_H_98237428734687 */
#if defined (__cplusplus) && !defined(LZ4_NAMESPACE)
diff --git a/contrib/libs/lz4/lz4frame.c b/contrib/libs/lz4/lz4frame.c
index ec02c92f72..8e88af356b 100644
--- a/contrib/libs/lz4/lz4frame.c
+++ b/contrib/libs/lz4/lz4frame.c
@@ -213,8 +213,8 @@ static void LZ4F_writeLE64 (void* dst, U64 value64)
static const size_t minFHSize = LZ4F_HEADER_SIZE_MIN; /* 7 */
static const size_t maxFHSize = LZ4F_HEADER_SIZE_MAX; /* 19 */
-static const size_t BHSize = LZ4F_BLOCK_HEADER_SIZE; /* block header : size, and compress flag */
-static const size_t BFSize = LZ4F_BLOCK_CHECKSUM_SIZE; /* block footer : checksum (optional) */
+static const size_t BHSize = LZ4F_BLOCK_HEADER_SIZE; /* block header : size, and compress flag */
+static const size_t BFSize = LZ4F_BLOCK_CHECKSUM_SIZE; /* block footer : checksum (optional) */
/*-************************************
@@ -327,7 +327,7 @@ static size_t LZ4F_compressBound_internal(size_t srcSize,
{
LZ4F_preferences_t prefsNull = LZ4F_INIT_PREFERENCES;
prefsNull.frameInfo.contentChecksumFlag = LZ4F_contentChecksumEnabled; /* worst case */
- prefsNull.frameInfo.blockChecksumFlag = LZ4F_blockChecksumEnabled; /* worst case */
+ prefsNull.frameInfo.blockChecksumFlag = LZ4F_blockChecksumEnabled; /* worst case */
{ const LZ4F_preferences_t* const prefsPtr = (preferencesPtr==NULL) ? &prefsNull : preferencesPtr;
U32 const flush = prefsPtr->autoFlush | (srcSize==0);
LZ4F_blockSizeID_t const blockID = prefsPtr->frameInfo.blockSizeID;
@@ -1136,12 +1136,12 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
}
/* control magic number */
-#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
if (LZ4F_readLE32(srcPtr) != LZ4F_MAGICNUMBER) {
DEBUGLOG(4, "frame header error : unknown magic number");
return err0r(LZ4F_ERROR_frameType_unknown);
}
-#endif
+#endif
dctx->frameInfo.frameType = LZ4F_frame;
/* Flags */
@@ -1180,12 +1180,12 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
/* check header */
assert(frameHeaderSize > 5);
-#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
{ BYTE const HC = LZ4F_headerChecksum(srcPtr+4, frameHeaderSize-5);
if (HC != srcPtr[frameHeaderSize-1])
return err0r(LZ4F_ERROR_headerChecksum_invalid);
}
-#endif
+#endif
/* save */
dctx->frameInfo.blockMode = (LZ4F_blockMode_t)blockMode;
@@ -1222,10 +1222,10 @@ size_t LZ4F_headerSize(const void* src, size_t srcSize)
return 8;
/* control magic number */
-#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
if (LZ4F_readLE32(src) != LZ4F_MAGICNUMBER)
return err0r(LZ4F_ERROR_frameType_unknown);
-#endif
+#endif
/* Frame Header Size */
{ BYTE const FLG = ((const BYTE*)src)[4];
@@ -1524,7 +1524,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
/* next block is a compressed block */
dctx->tmpInTarget = nextCBlockSize + crcSize;
dctx->dStage = dstage_getCBlock;
- if (dstPtr==dstEnd || srcPtr==srcEnd) {
+ if (dstPtr==dstEnd || srcPtr==srcEnd) {
nextSrcSizeHint = BHSize + nextCBlockSize + crcSize;
doAnotherStage = 0;
}
@@ -1592,17 +1592,17 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
}
{ U32 const readCRC = LZ4F_readLE32(crcSrc);
U32 const calcCRC = XXH32_digest(&dctx->blockChecksum);
-#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
DEBUGLOG(6, "compare block checksum");
if (readCRC != calcCRC) {
DEBUGLOG(4, "incorrect block checksum: %08X != %08X",
readCRC, calcCRC);
return err0r(LZ4F_ERROR_blockChecksum_invalid);
}
-#else
- (void)readCRC;
- (void)calcCRC;
-#endif
+#else
+ (void)readCRC;
+ (void)calcCRC;
+#endif
} }
dctx->dStage = dstage_getBlockHeader; /* new block */
break;
@@ -1642,13 +1642,13 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
assert(selectedIn != NULL); /* selectedIn is defined at this stage (either srcPtr, or dctx->tmpIn) */
{ U32 const readBlockCrc = LZ4F_readLE32(selectedIn + dctx->tmpInTarget);
U32 const calcBlockCrc = XXH32(selectedIn, dctx->tmpInTarget, 0);
-#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
if (readBlockCrc != calcBlockCrc)
return err0r(LZ4F_ERROR_blockChecksum_invalid);
-#else
- (void)readBlockCrc;
- (void)calcBlockCrc;
-#endif
+#else
+ (void)readBlockCrc;
+ (void)calcBlockCrc;
+#endif
} }
if ((size_t)(dstEnd-dstPtr) >= dctx->maxBlockSize) {
@@ -1779,13 +1779,13 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
/* case dstage_checkSuffix: */ /* no direct entry, avoid initialization risks */
{ U32 const readCRC = LZ4F_readLE32(selectedIn);
U32 const resultCRC = XXH32_digest(&(dctx->xxh));
-#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
if (readCRC != resultCRC)
return err0r(LZ4F_ERROR_contentChecksum_invalid);
-#else
- (void)readCRC;
- (void)resultCRC;
-#endif
+#else
+ (void)readCRC;
+ (void)resultCRC;
+#endif
nextSrcSizeHint = 0;
LZ4F_resetDecompressionContext(dctx);
doAnotherStage = 0;
diff --git a/contrib/libs/lz4/lz4frame.h b/contrib/libs/lz4/lz4frame.h
index 4573317ef2..60ca3ec9d4 100644
--- a/contrib/libs/lz4/lz4frame.h
+++ b/contrib/libs/lz4/lz4frame.h
@@ -259,15 +259,15 @@ LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctx);
#define LZ4F_HEADER_SIZE_MIN 7 /* LZ4 Frame header size can vary, depending on selected paramaters */
#define LZ4F_HEADER_SIZE_MAX 19
-/* Size in bytes of a block header in little-endian format. Highest bit indicates if block data is uncompressed */
-#define LZ4F_BLOCK_HEADER_SIZE 4
-
-/* Size in bytes of a block checksum footer in little-endian format. */
-#define LZ4F_BLOCK_CHECKSUM_SIZE 4
-
-/* Size in bytes of the content checksum. */
-#define LZ4F_CONTENT_CHECKSUM_SIZE 4
-
+/* Size in bytes of a block header in little-endian format. Highest bit indicates if block data is uncompressed */
+#define LZ4F_BLOCK_HEADER_SIZE 4
+
+/* Size in bytes of a block checksum footer in little-endian format. */
+#define LZ4F_BLOCK_CHECKSUM_SIZE 4
+
+/* Size in bytes of the content checksum. */
+#define LZ4F_CONTENT_CHECKSUM_SIZE 4
+
/*! LZ4F_compressBegin() :
* will write the frame header into dstBuffer.
* dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
diff --git a/contrib/libs/lz4/lz4hc.c b/contrib/libs/lz4/lz4hc.c
index a556d47920..b0b927ed67 100644
--- a/contrib/libs/lz4/lz4hc.c
+++ b/contrib/libs/lz4/lz4hc.c
@@ -152,20 +152,20 @@ int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match,
return back;
}
-#if defined(_MSC_VER)
-# define LZ4HC_rotl32(x,r) _rotl(x,r)
-#else
-# define LZ4HC_rotl32(x,r) ((x << r) | (x >> (32 - r)))
-#endif
-
-
-static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern)
-{
- size_t const bitsToRotate = (rotate & (sizeof(pattern) - 1)) << 3;
+#if defined(_MSC_VER)
+# define LZ4HC_rotl32(x,r) _rotl(x,r)
+#else
+# define LZ4HC_rotl32(x,r) ((x << r) | (x >> (32 - r)))
+#endif
+
+
+static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern)
+{
+ size_t const bitsToRotate = (rotate & (sizeof(pattern) - 1)) << 3;
if (bitsToRotate == 0) return pattern;
- return LZ4HC_rotl32(pattern, (int)bitsToRotate);
-}
-
+ return LZ4HC_rotl32(pattern, (int)bitsToRotate);
+}
+
/* LZ4HC_countPattern() :
* pattern32 must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) */
static unsigned
@@ -219,16 +219,16 @@ LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern)
return (unsigned)(iStart - ip);
}
-/* LZ4HC_protectDictEnd() :
- * Checks if the match is in the last 3 bytes of the dictionary, so reading the
- * 4 byte MINMATCH would overflow.
- * @returns true if the match index is okay.
- */
-static int LZ4HC_protectDictEnd(U32 const dictLimit, U32 const matchIndex)
-{
- return ((U32)((dictLimit - 1) - matchIndex) >= 3);
-}
-
+/* LZ4HC_protectDictEnd() :
+ * Checks if the match is in the last 3 bytes of the dictionary, so reading the
+ * 4 byte MINMATCH would overflow.
+ * @returns true if the match index is okay.
+ */
+static int LZ4HC_protectDictEnd(U32 const dictLimit, U32 const matchIndex)
+{
+ return ((U32)((dictLimit - 1) - matchIndex) >= 3);
+}
+
typedef enum { rep_untested, rep_not, rep_confirmed } repeat_state_e;
typedef enum { favorCompressionRatio=0, favorDecompressionSpeed } HCfavor_e;
@@ -254,7 +254,7 @@ LZ4HC_InsertAndGetWiderMatch (
const U32 dictLimit = hc4->dictLimit;
const BYTE* const lowPrefixPtr = base + dictLimit;
const U32 ipIndex = (U32)(ip - base);
- const U32 lowestMatchIndex = (hc4->lowLimit + (LZ4_DISTANCE_MAX + 1) > ipIndex) ? hc4->lowLimit : ipIndex - LZ4_DISTANCE_MAX;
+ const U32 lowestMatchIndex = (hc4->lowLimit + (LZ4_DISTANCE_MAX + 1) > ipIndex) ? hc4->lowLimit : ipIndex - LZ4_DISTANCE_MAX;
const BYTE* const dictBase = hc4->dictBase;
int const lookBackLength = (int)(ip-iLowLimit);
int nbAttempts = maxNbAttempts;
@@ -313,21 +313,21 @@ LZ4HC_InsertAndGetWiderMatch (
if (chainSwap && matchLength==longest) { /* better match => select a better chain */
assert(lookBackLength==0); /* search forward only */
if (matchIndex + (U32)longest <= ipIndex) {
- int const kTrigger = 4;
+ int const kTrigger = 4;
U32 distanceToNextMatch = 1;
- int const end = longest - MINMATCH + 1;
- int step = 1;
- int accel = 1 << kTrigger;
+ int const end = longest - MINMATCH + 1;
+ int step = 1;
+ int accel = 1 << kTrigger;
int pos;
- for (pos = 0; pos < end; pos += step) {
+ for (pos = 0; pos < end; pos += step) {
U32 const candidateDist = DELTANEXTU16(chainTable, matchIndex + (U32)pos);
- step = (accel++ >> kTrigger);
+ step = (accel++ >> kTrigger);
if (candidateDist > distanceToNextMatch) {
distanceToNextMatch = candidateDist;
matchChainPos = (U32)pos;
- accel = 1 << kTrigger;
- }
- }
+ accel = 1 << kTrigger;
+ }
+ }
if (distanceToNextMatch > 1) {
if (distanceToNextMatch > matchIndex) break; /* avoid overflow */
matchIndex -= distanceToNextMatch;
@@ -346,61 +346,61 @@ LZ4HC_InsertAndGetWiderMatch (
} else {
repeat = rep_not;
} }
- if ( (repeat == rep_confirmed) && (matchCandidateIdx >= lowestMatchIndex)
- && LZ4HC_protectDictEnd(dictLimit, matchCandidateIdx) ) {
- const int extDict = matchCandidateIdx < dictLimit;
- const BYTE* const matchPtr = (extDict ? dictBase : base) + matchCandidateIdx;
+ if ( (repeat == rep_confirmed) && (matchCandidateIdx >= lowestMatchIndex)
+ && LZ4HC_protectDictEnd(dictLimit, matchCandidateIdx) ) {
+ const int extDict = matchCandidateIdx < dictLimit;
+ const BYTE* const matchPtr = (extDict ? dictBase : base) + matchCandidateIdx;
if (LZ4_read32(matchPtr) == pattern) { /* good candidate */
- const BYTE* const dictStart = dictBase + hc4->lowLimit;
- const BYTE* const iLimit = extDict ? dictBase + dictLimit : iHighLimit;
- size_t forwardPatternLength = LZ4HC_countPattern(matchPtr+sizeof(pattern), iLimit, pattern) + sizeof(pattern);
- if (extDict && matchPtr + forwardPatternLength == iLimit) {
- U32 const rotatedPattern = LZ4HC_rotatePattern(forwardPatternLength, pattern);
- forwardPatternLength += LZ4HC_countPattern(lowPrefixPtr, iHighLimit, rotatedPattern);
- }
- { const BYTE* const lowestMatchPtr = extDict ? dictStart : lowPrefixPtr;
- size_t backLength = LZ4HC_reverseCountPattern(matchPtr, lowestMatchPtr, pattern);
- size_t currentSegmentLength;
- if (!extDict && matchPtr - backLength == lowPrefixPtr && hc4->lowLimit < dictLimit) {
- U32 const rotatedPattern = LZ4HC_rotatePattern((U32)(-(int)backLength), pattern);
- backLength += LZ4HC_reverseCountPattern(dictBase + dictLimit, dictStart, rotatedPattern);
- }
- /* Limit backLength not go further than lowestMatchIndex */
- backLength = matchCandidateIdx - MAX(matchCandidateIdx - (U32)backLength, lowestMatchIndex);
- assert(matchCandidateIdx - backLength >= lowestMatchIndex);
- currentSegmentLength = backLength + forwardPatternLength;
- /* Adjust to end of pattern if the source pattern fits, otherwise the beginning of the pattern */
- if ( (currentSegmentLength >= srcPatternLength) /* current pattern segment large enough to contain full srcPatternLength */
- && (forwardPatternLength <= srcPatternLength) ) { /* haven't reached this position yet */
- U32 const newMatchIndex = matchCandidateIdx + (U32)forwardPatternLength - (U32)srcPatternLength; /* best position, full pattern, might be followed by more match */
- if (LZ4HC_protectDictEnd(dictLimit, newMatchIndex))
- matchIndex = newMatchIndex;
- else {
- /* Can only happen if started in the prefix */
- assert(newMatchIndex >= dictLimit - 3 && newMatchIndex < dictLimit && !extDict);
- matchIndex = dictLimit;
+ const BYTE* const dictStart = dictBase + hc4->lowLimit;
+ const BYTE* const iLimit = extDict ? dictBase + dictLimit : iHighLimit;
+ size_t forwardPatternLength = LZ4HC_countPattern(matchPtr+sizeof(pattern), iLimit, pattern) + sizeof(pattern);
+ if (extDict && matchPtr + forwardPatternLength == iLimit) {
+ U32 const rotatedPattern = LZ4HC_rotatePattern(forwardPatternLength, pattern);
+ forwardPatternLength += LZ4HC_countPattern(lowPrefixPtr, iHighLimit, rotatedPattern);
+ }
+ { const BYTE* const lowestMatchPtr = extDict ? dictStart : lowPrefixPtr;
+ size_t backLength = LZ4HC_reverseCountPattern(matchPtr, lowestMatchPtr, pattern);
+ size_t currentSegmentLength;
+ if (!extDict && matchPtr - backLength == lowPrefixPtr && hc4->lowLimit < dictLimit) {
+ U32 const rotatedPattern = LZ4HC_rotatePattern((U32)(-(int)backLength), pattern);
+ backLength += LZ4HC_reverseCountPattern(dictBase + dictLimit, dictStart, rotatedPattern);
+ }
+ /* Limit backLength not go further than lowestMatchIndex */
+ backLength = matchCandidateIdx - MAX(matchCandidateIdx - (U32)backLength, lowestMatchIndex);
+ assert(matchCandidateIdx - backLength >= lowestMatchIndex);
+ currentSegmentLength = backLength + forwardPatternLength;
+ /* Adjust to end of pattern if the source pattern fits, otherwise the beginning of the pattern */
+ if ( (currentSegmentLength >= srcPatternLength) /* current pattern segment large enough to contain full srcPatternLength */
+ && (forwardPatternLength <= srcPatternLength) ) { /* haven't reached this position yet */
+ U32 const newMatchIndex = matchCandidateIdx + (U32)forwardPatternLength - (U32)srcPatternLength; /* best position, full pattern, might be followed by more match */
+ if (LZ4HC_protectDictEnd(dictLimit, newMatchIndex))
+ matchIndex = newMatchIndex;
+ else {
+ /* Can only happen if started in the prefix */
+ assert(newMatchIndex >= dictLimit - 3 && newMatchIndex < dictLimit && !extDict);
+ matchIndex = dictLimit;
}
- } else {
- U32 const newMatchIndex = matchCandidateIdx - (U32)backLength; /* farthest position in current segment, will find a match of length currentSegmentLength + maybe some back */
- if (!LZ4HC_protectDictEnd(dictLimit, newMatchIndex)) {
- assert(newMatchIndex >= dictLimit - 3 && newMatchIndex < dictLimit && !extDict);
- matchIndex = dictLimit;
- } else {
- matchIndex = newMatchIndex;
- if (lookBackLength==0) { /* no back possible */
- size_t const maxML = MIN(currentSegmentLength, srcPatternLength);
- if ((size_t)longest < maxML) {
+ } else {
+ U32 const newMatchIndex = matchCandidateIdx - (U32)backLength; /* farthest position in current segment, will find a match of length currentSegmentLength + maybe some back */
+ if (!LZ4HC_protectDictEnd(dictLimit, newMatchIndex)) {
+ assert(newMatchIndex >= dictLimit - 3 && newMatchIndex < dictLimit && !extDict);
+ matchIndex = dictLimit;
+ } else {
+ matchIndex = newMatchIndex;
+ if (lookBackLength==0) { /* no back possible */
+ size_t const maxML = MIN(currentSegmentLength, srcPatternLength);
+ if ((size_t)longest < maxML) {
assert(base + matchIndex != ip);
if ((size_t)(ip - base) - matchIndex > LZ4_DISTANCE_MAX) break;
- assert(maxML < 2 GB);
- longest = (int)maxML;
- *matchpos = base + matchIndex; /* virtual pos, relative to ip, to retrieve offset */
- *startpos = ip;
- }
- { U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex);
- if (distToNextPattern > matchIndex) break; /* avoid overflow */
- matchIndex -= distToNextPattern;
- } } } } }
+ assert(maxML < 2 GB);
+ longest = (int)maxML;
+ *matchpos = base + matchIndex; /* virtual pos, relative to ip, to retrieve offset */
+ *startpos = ip;
+ }
+ { U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex);
+ if (distToNextPattern > matchIndex) break; /* avoid overflow */
+ matchIndex -= distToNextPattern;
+ } } } } }
continue;
} }
} } /* PA optimization */
@@ -1097,9 +1097,9 @@ static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBl
ctxPtr->base = newBlock - ctxPtr->dictLimit;
ctxPtr->end = newBlock;
ctxPtr->nextToUpdate = ctxPtr->dictLimit; /* match referencing will resume from there */
-
- /* cannot reference an extDict and a dictCtx at the same time */
- ctxPtr->dictCtx = NULL;
+
+ /* cannot reference an extDict and a dictCtx at the same time */
+ ctxPtr->dictCtx = NULL;
}
static int
diff --git a/contrib/libs/lz4/lz4hc.h b/contrib/libs/lz4/lz4hc.h
index 3d441fb6fa..7ab3a49a3e 100644
--- a/contrib/libs/lz4/lz4hc.h
+++ b/contrib/libs/lz4/lz4hc.h
@@ -311,9 +311,9 @@ LZ4LIB_API void LZ4_resetStreamHC (LZ4_streamHC_t* streamHCPtr, int compressionL
#ifndef LZ4_HC_SLO_098092834
#define LZ4_HC_SLO_098092834
-#define LZ4_STATIC_LINKING_ONLY /* LZ4LIB_STATIC_API */
-#include "lz4.h"
-
+#define LZ4_STATIC_LINKING_ONLY /* LZ4LIB_STATIC_API */
+#include "lz4.h"
+
#if defined (__cplusplus)
extern "C" {
#endif
diff --git a/contrib/libs/lz4/ya.make b/contrib/libs/lz4/ya.make
index 282dfe3920..5a4499b052 100644
--- a/contrib/libs/lz4/ya.make
+++ b/contrib/libs/lz4/ya.make
@@ -26,7 +26,7 @@ ADDINCL(
contrib/libs/xxhash
)
-NO_RUNTIME()
+NO_RUNTIME()
SRCS(
lz4.c