aboutsummaryrefslogtreecommitdiffstats
path: root/contrib
diff options
context:
space:
mode:
authorrobot-contrib <robot-contrib@yandex-team.com>2024-07-24 12:03:09 +0300
committerrobot-contrib <robot-contrib@yandex-team.com>2024-07-24 12:15:10 +0300
commit2cda5506bf4b2fb1c7870fd352b1d4f8dd1c76bf (patch)
tree7976a154a3d209dd8e55a834b02c4b4c3467b897 /contrib
parentaf231441770fd2f0caaa23a8bd4106eaefe80759 (diff)
downloadydb-2cda5506bf4b2fb1c7870fd352b1d4f8dd1c76bf.tar.gz
Update contrib/libs/lz4 to 1.10.0
46c03ca2c4745ff3ce7fdebd47424736854d67e9
Diffstat (limited to 'contrib')
-rw-r--r--contrib/libs/lz4/README.md26
-rw-r--r--contrib/libs/lz4/lz4.c305
-rw-r--r--contrib/libs/lz4/lz4.h180
-rw-r--r--contrib/libs/lz4/lz4file.c192
-rw-r--r--contrib/libs/lz4/lz4file.h4
-rw-r--r--contrib/libs/lz4/lz4frame.c300
-rw-r--r--contrib/libs/lz4/lz4frame.h265
-rw-r--r--contrib/libs/lz4/lz4hc.c1309
-rw-r--r--contrib/libs/lz4/lz4hc.h85
-rw-r--r--contrib/libs/lz4/ya.make4
10 files changed, 1776 insertions, 894 deletions
diff --git a/contrib/libs/lz4/README.md b/contrib/libs/lz4/README.md
index 08d1cef2bf..9039da35af 100644
--- a/contrib/libs/lz4/README.md
+++ b/contrib/libs/lz4/README.md
@@ -100,7 +100,7 @@ The following build macro can be selected to adjust source code behavior at comp
passed as argument to become a compression state is suitably aligned.
This test can be disabled if it proves flaky, by setting this value to 0.
-- `LZ4_USER_MEMORY_FUNCTIONS` : replace calls to `<stdlib,h>`'s `malloc()`, `calloc()` and `free()`
+- `LZ4_USER_MEMORY_FUNCTIONS` : replace calls to `<stdlib.h>`'s `malloc()`, `calloc()` and `free()`
by user-defined functions, which must be named `LZ4_malloc()`, `LZ4_calloc()` and `LZ4_free()`.
User functions must be available at link time.
@@ -108,6 +108,12 @@ The following build macro can be selected to adjust source code behavior at comp
Remove support of dynamic memory allocation.
For more details, see description of this macro in `lib/lz4.c`.
+- `LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT` : experimental feature aimed at producing the same
+ compressed output on platforms of different endianness (i.e. little-endian and big-endian).
+ Output on little-endian platforms shall remain unchanged, while big-endian platforms will start producing
+ the same output as little-endian ones. This isn't expected to impact backward- and forward-compatibility
+ in any way.
+
- `LZ4_FREESTANDING` : by setting this build macro to 1,
LZ4/HC removes dependencies on the C standard library,
including allocation functions and `memmove()`, `memcpy()`, and `memset()`.
@@ -115,6 +121,24 @@ The following build macro can be selected to adjust source code behavior at comp
(embedded, bootloader, etc).
For more details, see description of this macro in `lib/lz4.h`.
+- `LZ4_HEAPMODE` : Select how stateless compression functions like `LZ4_compress_default()`
+ allocate memory for their hash table,
+ in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
+
+- `LZ4HC_HEAPMODE` : Select how stateless HC compression functions like `LZ4_compress_HC()`
+ allocate memory for their workspace:
+ in stack (0), or in heap (1:default).
+ Since workspace is rather large, stack can be inconvenient, hence heap mode is recommended.
+
+- `LZ4F_HEAPMODE` : selects how `LZ4F_compressFrame()` allocates the compression state,
+ either on stack (default, value 0) or using heap memory (value 1).
+
+
+#### Makefile variables
+
+The following `Makefile` variables can be selected to alter the profile of produced binaries :
+- `BUILD_SHARED` : generate `liblz4` dynamic library (enabled by default)
+- `BUILD_STATIC` : generate `liblz4` static library (enabled by default)
#### Amalgamation
diff --git a/contrib/libs/lz4/lz4.c b/contrib/libs/lz4/lz4.c
index 654bfdf32f..a2f7abee19 100644
--- a/contrib/libs/lz4/lz4.c
+++ b/contrib/libs/lz4/lz4.c
@@ -1,6 +1,6 @@
/*
LZ4 - Fast LZ compression algorithm
- Copyright (C) 2011-2020, Yann Collet.
+ Copyright (C) 2011-2023, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
@@ -37,7 +37,8 @@
**************************************/
/*
* LZ4_HEAPMODE :
- * Select how default compression functions will allocate memory for their hash table,
+ * Select how stateless compression functions like `LZ4_compress_default()`
+ * allocate memory for their hash table,
* in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
*/
#ifndef LZ4_HEAPMODE
@@ -78,7 +79,7 @@
( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \
|| defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
# define LZ4_FORCE_MEMORY_ACCESS 2
-# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__)
+# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) || defined(_MSC_VER)
# define LZ4_FORCE_MEMORY_ACCESS 1
# endif
#endif
@@ -105,15 +106,13 @@
# define LZ4_SRC_INCLUDED 1
#endif
-#ifndef LZ4_STATIC_LINKING_ONLY
-#define LZ4_STATIC_LINKING_ONLY
-#endif
-
#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS
-#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
+# define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
#endif
-#define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */
+#ifndef LZ4_STATIC_LINKING_ONLY
+# define LZ4_STATIC_LINKING_ONLY
+#endif
#include "lz4.h"
/* see also "memory routines" below */
@@ -125,14 +124,17 @@
# include <intrin.h> /* only present in VS2005+ */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# pragma warning(disable : 6237) /* disable: C6237: conditional expression is always 0 */
+# pragma warning(disable : 6239) /* disable: C6239: (<non-zero constant> && <expression>) always evaluates to the result of <expression> */
+# pragma warning(disable : 6240) /* disable: C6240: (<expression> && <non-zero constant>) always evaluates to the result of <expression> */
+# pragma warning(disable : 6326) /* disable: C6326: Potential comparison of a constant with another constant */
#endif /* _MSC_VER */
#ifndef LZ4_FORCE_INLINE
-# ifdef _MSC_VER /* Visual Studio */
+# if defined (_MSC_VER) && !defined (__clang__) /* MSVC */
# define LZ4_FORCE_INLINE static __forceinline
# else
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
-# ifdef __GNUC__
+# if defined (__GNUC__) || defined (__clang__)
# define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
# else
# define LZ4_FORCE_INLINE static inline
@@ -279,7 +281,7 @@ static const int LZ4_minLength = (MFLIMIT+1);
static int g_debuglog_enable = 1;
# define DEBUGLOG(l, ...) { \
if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
- fprintf(stderr, __FILE__ ": "); \
+ fprintf(stderr, __FILE__ " %i: ", __LINE__); \
fprintf(stderr, __VA_ARGS__); \
fprintf(stderr, " \n"); \
} }
@@ -364,6 +366,11 @@ static unsigned LZ4_isLittleEndian(void)
return one.c[0];
}
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
+#define LZ4_PACK( __Declaration__ ) __Declaration__ __attribute__((__packed__))
+#elif defined(_MSC_VER)
+#define LZ4_PACK( __Declaration__ ) __pragma( pack(push, 1) ) __Declaration__ __pragma( pack(pop))
+#endif
#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
/* lie to the compiler about data alignment; use with caution */
@@ -379,14 +386,16 @@ static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
/* currently only defined for gcc and icc */
-typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) LZ4_unalign;
+LZ4_PACK(typedef struct { U16 u16; }) LZ4_unalign16;
+LZ4_PACK(typedef struct { U32 u32; }) LZ4_unalign32;
+LZ4_PACK(typedef struct { reg_t uArch; }) LZ4_unalignST;
-static U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign*)ptr)->u16; }
-static U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign*)ptr)->u32; }
-static reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalign*)ptr)->uArch; }
+static U16 LZ4_read16(const void* ptr) { return ((const LZ4_unalign16*)ptr)->u16; }
+static U32 LZ4_read32(const void* ptr) { return ((const LZ4_unalign32*)ptr)->u32; }
+static reg_t LZ4_read_ARCH(const void* ptr) { return ((const LZ4_unalignST*)ptr)->uArch; }
-static void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign*)memPtr)->u16 = value; }
-static void LZ4_write32(void* memPtr, U32 value) { ((LZ4_unalign*)memPtr)->u32 = value; }
+static void LZ4_write16(void* memPtr, U16 value) { ((LZ4_unalign16*)memPtr)->u16 = value; }
+static void LZ4_write32(void* memPtr, U32 value) { ((LZ4_unalign32*)memPtr)->u32 = value; }
#else /* safe and portable access using memcpy() */
@@ -424,9 +433,21 @@ static U16 LZ4_readLE16(const void* memPtr)
return LZ4_read16(memPtr);
} else {
const BYTE* p = (const BYTE*)memPtr;
- return (U16)((U16)p[0] + (p[1]<<8));
+ return (U16)((U16)p[0] | (p[1]<<8));
+ }
+}
+
+#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT
+static U32 LZ4_readLE32(const void* memPtr)
+{
+ if (LZ4_isLittleEndian()) {
+ return LZ4_read32(memPtr);
+ } else {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U32)p[0] | (p[1]<<8) | (p[2]<<16) | (p[3]<<24);
}
}
+#endif
static void LZ4_writeLE16(void* memPtr, U16 value)
{
@@ -509,7 +530,7 @@ LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)
/* LZ4_memcpy_using_offset() presumes :
* - dstEnd >= dstPtr + MINMATCH
- * - there is at least 8 bytes available to write after dstEnd */
+ * - there is at least 12 bytes available to write after dstEnd */
LZ4_FORCE_INLINE void
LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
{
@@ -524,12 +545,12 @@ LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const si
case 2:
LZ4_memcpy(v, srcPtr, 2);
LZ4_memcpy(&v[2], srcPtr, 2);
-#if defined(_MSC_VER) && (_MSC_VER <= 1933) /* MSVC 2022 ver 17.3 or earlier */
+#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */
# pragma warning(push)
# pragma warning(disable : 6385) /* warning C6385: Reading invalid data from 'v'. */
#endif
LZ4_memcpy(&v[4], v, 4);
-#if defined(_MSC_VER) && (_MSC_VER <= 1933) /* MSVC 2022 ver 17.3 or earlier */
+#if defined(_MSC_VER) && (_MSC_VER <= 1937) /* MSVC 2022 ver 17.7 or earlier */
# pragma warning(pop)
#endif
break;
@@ -776,7 +797,12 @@ LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
{
if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
+
+#ifdef LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT
+ return LZ4_hash4(LZ4_readLE32(p), tableType);
+#else
return LZ4_hash4(LZ4_read32(p), tableType);
+#endif
}
LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)
@@ -803,23 +829,19 @@ LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableT
}
}
+/* LZ4_putPosition*() : only used in byPtr mode */
LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h,
- void* tableBase, tableType_t const tableType,
- const BYTE* srcBase)
+ void* tableBase, tableType_t const tableType)
{
- switch (tableType)
- {
- case clearedTable: { /* illegal! */ assert(0); return; }
- case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
- case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
- case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
- }
+ const BYTE** const hashTable = (const BYTE**)tableBase;
+ assert(tableType == byPtr); (void)tableType;
+ hashTable[h] = p;
}
-LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
+LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType)
{
U32 const h = LZ4_hashPosition(p, tableType);
- LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
+ LZ4_putPositionOnHash(p, h, tableBase, tableType);
}
/* LZ4_getIndexOnHash() :
@@ -844,20 +866,18 @@ LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_
assert(0); return 0; /* forbidden case */
}
-static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase)
+static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType)
{
- if (tableType == byPtr) { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }
- if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; return hashTable[h] + srcBase; }
- { const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
+ assert(tableType == byPtr); (void)tableType;
+ { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }
}
LZ4_FORCE_INLINE const BYTE*
LZ4_getPosition(const BYTE* p,
- const void* tableBase, tableType_t tableType,
- const BYTE* srcBase)
+ const void* tableBase, tableType_t tableType)
{
U32 const h = LZ4_hashPosition(p, tableType);
- return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
+ return LZ4_getPositionOnHash(h, tableBase, tableType);
}
LZ4_FORCE_INLINE void
@@ -901,9 +921,9 @@ LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
cctx->dictSize = 0;
}
-/** LZ4_compress_generic() :
+/** LZ4_compress_generic_validated() :
* inlined, to ensure branches are decided at compilation time.
- * Presumed already validated at this stage:
+ * The following conditions are presumed already validated:
* - source != NULL
* - inputSize > 0
*/
@@ -921,10 +941,10 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
const int acceleration)
{
int result;
- const BYTE* ip = (const BYTE*) source;
+ const BYTE* ip = (const BYTE*)source;
U32 const startIndex = cctx->currentOffset;
- const BYTE* base = (const BYTE*) source - startIndex;
+ const BYTE* base = (const BYTE*)source - startIndex;
const BYTE* lowLimit;
const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;
@@ -932,7 +952,8 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;
const U32 dictSize =
dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;
- const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with index in current context */
+ const U32 dictDelta =
+ (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with indexes in current context */
int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);
U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */
@@ -957,11 +978,11 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType);
assert(ip != NULL);
+ if (tableType == byU16) assert(inputSize<LZ4_64Klimit); /* Size too large (not within 64K limit) */
+ if (tableType == byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */
/* If init conditions are not met, we don't have to mark stream
* as having dirty context, since no action was taken yet */
if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */
- if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) { return 0; } /* Size too large (not within 64K limit) */
- if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */
assert(acceleration >= 1);
lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);
@@ -981,7 +1002,12 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
/* First Byte */
- LZ4_putPosition(ip, cctx->hashTable, tableType, base);
+ { U32 const h = LZ4_hashPosition(ip, tableType);
+ if (tableType == byPtr) {
+ LZ4_putPositionOnHash(ip, h, cctx->hashTable, byPtr);
+ } else {
+ LZ4_putIndexOnHash(startIndex, h, cctx->hashTable, tableType);
+ } }
ip++; forwardH = LZ4_hashPosition(ip, tableType);
/* Main Loop */
@@ -1004,9 +1030,9 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
assert(ip < mflimitPlusOne);
- match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
+ match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType);
forwardH = LZ4_hashPosition(forwardIp, tableType);
- LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
+ LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType);
} while ( (match+LZ4_DISTANCE_MAX < ip)
|| (LZ4_read32(match) != LZ4_read32(ip)) );
@@ -1077,7 +1103,10 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
/* Catch up */
filledIp = ip;
- while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
+ assert(ip > anchor); /* this is always true as ip has been advanced before entering the main loop */
+ if ((match > lowLimit) && unlikely(ip[-1] == match[-1])) {
+ do { ip--; match--; } while (((ip > anchor) & (match > lowLimit)) && (unlikely(ip[-1] == match[-1])));
+ }
/* Encode Literals */
{ unsigned const litLength = (unsigned)(ip - anchor);
@@ -1092,7 +1121,7 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
goto _last_literals;
}
if (litLength >= RUN_MASK) {
- int len = (int)(litLength - RUN_MASK);
+ unsigned len = litLength - RUN_MASK;
*token = (RUN_MASK<<ML_BITS);
for(; len >= 255 ; len-=255) *op++ = 255;
*op++ = (BYTE)len;
@@ -1204,13 +1233,19 @@ _next_match:
if (ip >= mflimitPlusOne) break;
/* Fill table */
- LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
+ { U32 const h = LZ4_hashPosition(ip-2, tableType);
+ if (tableType == byPtr) {
+ LZ4_putPositionOnHash(ip-2, h, cctx->hashTable, byPtr);
+ } else {
+ U32 const idx = (U32)((ip-2) - base);
+ LZ4_putIndexOnHash(idx, h, cctx->hashTable, tableType);
+ } }
/* Test next position */
if (tableType == byPtr) {
- match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
- LZ4_putPosition(ip, cctx->hashTable, tableType, base);
+ match = LZ4_getPosition(ip, cctx->hashTable, tableType);
+ LZ4_putPosition(ip, cctx->hashTable, tableType);
if ( (match+LZ4_DISTANCE_MAX >= ip)
&& (LZ4_read32(match) == LZ4_read32(ip)) )
{ token=op++; *token=0; goto _next_match; }
@@ -1224,6 +1259,7 @@ _next_match:
if (dictDirective == usingDictCtx) {
if (matchIndex < startIndex) {
/* there was no match, try the dictionary */
+ assert(tableType == byU32);
matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
match = dictBase + matchIndex;
lowLimit = dictionary; /* required for match length counter */
@@ -1377,9 +1413,10 @@ int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int
*/
int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
{
- LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
+ LZ4_stream_t_internal* const ctx = &((LZ4_stream_t*)state)->internal_donotuse;
if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
+ assert(ctx != NULL);
if (dstCapacity >= LZ4_compressBound(srcSize)) {
if (srcSize < LZ4_64Klimit) {
@@ -1413,17 +1450,17 @@ int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst
}
-int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
+int LZ4_compress_fast(const char* src, char* dest, int srcSize, int dstCapacity, int acceleration)
{
int result;
#if (LZ4_HEAPMODE)
- LZ4_stream_t* ctxPtr = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
+ LZ4_stream_t* const ctxPtr = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
if (ctxPtr == NULL) return 0;
#else
LZ4_stream_t ctx;
LZ4_stream_t* const ctxPtr = &ctx;
#endif
- result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
+ result = LZ4_compress_fast_extState(ctxPtr, src, dest, srcSize, dstCapacity, acceleration);
#if (LZ4_HEAPMODE)
FREEMEM(ctxPtr);
@@ -1432,43 +1469,51 @@ int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutp
}
-int LZ4_compress_default(const char* src, char* dst, int srcSize, int maxOutputSize)
+int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity)
{
- return LZ4_compress_fast(src, dst, srcSize, maxOutputSize, 1);
+ return LZ4_compress_fast(src, dst, srcSize, dstCapacity, 1);
}
/* Note!: This function leaves the stream in an unclean/broken state!
* It is not safe to subsequently use the same state with a _fastReset() or
* _continue() call without resetting it. */
-static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
+static int LZ4_compress_destSize_extState_internal(LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration)
{
void* const s = LZ4_initStream(state, sizeof (*state));
assert(s != NULL); (void)s;
if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */
- return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
+ return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, acceleration);
} else {
if (*srcSizePtr < LZ4_64Klimit) {
- return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1);
+ return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, acceleration);
} else {
tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
- return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, 1);
+ return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, acceleration);
} }
}
+int LZ4_compress_destSize_extState(void* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration)
+{
+ int const r = LZ4_compress_destSize_extState_internal((LZ4_stream_t*)state, src, dst, srcSizePtr, targetDstSize, acceleration);
+ /* clean the state on exit */
+ LZ4_initStream(state, sizeof (LZ4_stream_t));
+ return r;
+}
+
int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
{
#if (LZ4_HEAPMODE)
- LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
+ LZ4_stream_t* const ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
if (ctx == NULL) return 0;
#else
LZ4_stream_t ctxBody;
- LZ4_stream_t* ctx = &ctxBody;
+ LZ4_stream_t* const ctx = &ctxBody;
#endif
- int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
+ int result = LZ4_compress_destSize_extState_internal(ctx, src, dst, srcSizePtr, targetDstSize, 1);
#if (LZ4_HEAPMODE)
FREEMEM(ctx);
@@ -1537,14 +1582,17 @@ int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
#endif
+typedef enum { _ld_fast, _ld_slow } LoadDict_mode_e;
#define HASH_UNIT sizeof(reg_t)
-int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
+int LZ4_loadDict_internal(LZ4_stream_t* LZ4_dict,
+ const char* dictionary, int dictSize,
+ LoadDict_mode_e _ld)
{
- LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
+ LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
const tableType_t tableType = byU32;
const BYTE* p = (const BYTE*)dictionary;
const BYTE* const dictEnd = p + dictSize;
- const BYTE* base;
+ U32 idx32;
DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict);
@@ -1567,19 +1615,46 @@ int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
}
if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
- base = dictEnd - dict->currentOffset;
dict->dictionary = p;
dict->dictSize = (U32)(dictEnd - p);
dict->tableType = (U32)tableType;
+ idx32 = dict->currentOffset - dict->dictSize;
while (p <= dictEnd-HASH_UNIT) {
- LZ4_putPosition(p, dict->hashTable, tableType, base);
- p+=3;
+ U32 const h = LZ4_hashPosition(p, tableType);
+ /* Note: overwriting => favors positions end of dictionary */
+ LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);
+ p+=3; idx32+=3;
+ }
+
+ if (_ld == _ld_slow) {
+ /* Fill hash table with additional references, to improve compression capability */
+ p = dict->dictionary;
+ idx32 = dict->currentOffset - dict->dictSize;
+ while (p <= dictEnd-HASH_UNIT) {
+ U32 const h = LZ4_hashPosition(p, tableType);
+ U32 const limit = dict->currentOffset - 64 KB;
+ if (LZ4_getIndexOnHash(h, dict->hashTable, tableType) <= limit) {
+ /* Note: not overwriting => favors positions beginning of dictionary */
+ LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);
+ }
+ p++; idx32++;
+ }
}
return (int)dict->dictSize;
}
+int LZ4_loadDict(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
+{
+ return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_fast);
+}
+
+int LZ4_loadDictSlow(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
+{
+ return LZ4_loadDict_internal(LZ4_dict, dictionary, dictSize, _ld_slow);
+}
+
void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream)
{
const LZ4_stream_t_internal* dictCtx = (dictionaryStream == NULL) ? NULL :
@@ -1711,7 +1786,7 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
/* Hidden debug function, to force-test external dictionary mode */
int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)
{
- LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
+ LZ4_stream_t_internal* const streamPtr = &LZ4_dict->internal_donotuse;
int result;
LZ4_renormDictT(streamPtr, srcSize);
@@ -1774,7 +1849,7 @@ typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
* does not know end of input
* presumes input is well formed
* note : will consume at least one byte */
-size_t read_long_length_no_check(const BYTE** pp)
+static size_t read_long_length_no_check(const BYTE** pp)
{
size_t b, l = 0;
do { b = **pp; (*pp)++; l += b; } while (b==255);
@@ -1911,6 +1986,17 @@ read_variable_length(const BYTE** ip, const BYTE* ilimit,
if (initial_check && unlikely((*ip) >= ilimit)) { /* read limit reached */
return rvl_error;
}
+ s = **ip;
+ (*ip)++;
+ length += s;
+ if (unlikely((*ip) > ilimit)) { /* read limit reached */
+ return rvl_error;
+ }
+ /* accumulator overflow detection (32-bit mode only) */
+ if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {
+ return rvl_error;
+ }
+ if (likely(s != 255)) return length;
do {
s = **ip;
(*ip)++;
@@ -1919,10 +2005,10 @@ read_variable_length(const BYTE** ip, const BYTE* ilimit,
return rvl_error;
}
/* accumulator overflow detection (32-bit mode only) */
- if ((sizeof(length)<8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {
+ if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1)/2)) ) {
return rvl_error;
}
- } while (s==255);
+ } while (s == 255);
return length;
}
@@ -1988,63 +2074,73 @@ LZ4_decompress_generic(
* note : fast loop may show a regression for some client arm chips. */
#if LZ4_FAST_DEC_LOOP
if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {
- DEBUGLOG(6, "skip fast decode loop");
+ DEBUGLOG(6, "move to safe decode loop");
goto safe_decode;
}
/* Fast loop : decode sequences as long as output < oend-FASTLOOP_SAFE_DISTANCE */
+ DEBUGLOG(6, "using fast decode loop");
while (1) {
/* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */
assert(oend - op >= FASTLOOP_SAFE_DISTANCE);
assert(ip < iend);
token = *ip++;
length = token >> ML_BITS; /* literal length */
+ DEBUGLOG(7, "blockPos%6u: litLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length);
/* decode literal length */
if (length == RUN_MASK) {
size_t const addl = read_variable_length(&ip, iend-RUN_MASK, 1);
- if (addl == rvl_error) { goto _output_error; }
+ if (addl == rvl_error) {
+ DEBUGLOG(6, "error reading long literal length");
+ goto _output_error;
+ }
length += addl;
if (unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
/* copy literals */
- cpy = op+length;
LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
- if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }
- LZ4_wildCopy32(op, ip, cpy);
- ip += length; op = cpy;
- } else {
- cpy = op+length;
- DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length);
+ if ((op+length>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }
+ LZ4_wildCopy32(op, ip, op+length);
+ ip += length; op += length;
+ } else if (ip <= iend-(16 + 1/*max lit + offset + nextToken*/)) {
/* We don't need to check oend, since we check it once for each loop below */
- if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; }
+ DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length);
/* Literals can only be <= 14, but hope compilers optimize better when copy by a register size */
LZ4_memcpy(op, ip, 16);
- ip += length; op = cpy;
+ ip += length; op += length;
+ } else {
+ goto safe_literal_copy;
}
/* get offset */
offset = LZ4_readLE16(ip); ip+=2;
+ DEBUGLOG(6, "blockPos%6u: offset = %u", (unsigned)(op-(BYTE*)dst), (unsigned)offset);
match = op - offset;
assert(match <= op); /* overflow check */
/* get matchlength */
length = token & ML_MASK;
+ DEBUGLOG(7, " match length token = %u (len==%u)", (unsigned)length, (unsigned)length+MINMATCH);
if (length == ML_MASK) {
size_t const addl = read_variable_length(&ip, iend - LASTLITERALS + 1, 0);
- if (addl == rvl_error) { goto _output_error; }
+ if (addl == rvl_error) {
+ DEBUGLOG(5, "error reading long match length");
+ goto _output_error;
+ }
length += addl;
length += MINMATCH;
+ DEBUGLOG(7, " long match length == %u", (unsigned)length);
if (unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */
- if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
goto safe_match_copy;
}
} else {
length += MINMATCH;
if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
+ DEBUGLOG(7, "moving to safe_match_copy (ml==%u)", (unsigned)length);
goto safe_match_copy;
}
@@ -2062,7 +2158,10 @@ LZ4_decompress_generic(
continue;
} } }
- if (checkOffset && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
+ if ( checkOffset && (unlikely(match + dictSize < lowPrefix)) ) {
+ DEBUGLOG(5, "Error : pos=%zi, offset=%zi => outside buffers", op-lowPrefix, op-match);
+ goto _output_error;
+ }
/* match starting within external dictionary */
if ((dict==usingExtDict) && (match < lowPrefix)) {
assert(dictEnd != NULL);
@@ -2071,7 +2170,8 @@ LZ4_decompress_generic(
DEBUGLOG(7, "partialDecoding: dictionary match, close to dstEnd");
length = MIN(length, (size_t)(oend-op));
} else {
- goto _output_error; /* end-of-block condition violated */
+ DEBUGLOG(6, "end-of-block condition violated")
+ goto _output_error;
} }
if (length <= (size_t)(lowPrefix-match)) {
@@ -2111,10 +2211,12 @@ LZ4_decompress_generic(
#endif
/* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */
+ DEBUGLOG(6, "using safe decode loop");
while (1) {
assert(ip < iend);
token = *ip++;
length = token >> ML_BITS; /* literal length */
+ DEBUGLOG(7, "blockPos%6u: litLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length);
/* A two-stage shortcut for the most common case:
* 1) If the literal length is 0..14, and there is enough space,
@@ -2135,6 +2237,7 @@ LZ4_decompress_generic(
/* The second stage: prepare for match copying, decode full info.
* If it doesn't work out, the info won't be wasted. */
length = token & ML_MASK; /* match length */
+ DEBUGLOG(7, "blockPos%6u: matchLength token = %u (len=%u)", (unsigned)(op-(BYTE*)dst), (unsigned)length, (unsigned)length + 4);
offset = LZ4_readLE16(ip); ip += 2;
match = op - offset;
assert(match <= op); /* check overflow */
@@ -2166,11 +2269,12 @@ LZ4_decompress_generic(
if (unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
}
- /* copy literals */
- cpy = op+length;
#if LZ4_FAST_DEC_LOOP
safe_literal_copy:
#endif
+ /* copy literals */
+ cpy = op+length;
+
LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
if ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) {
/* We've either hit the input parsing restriction or the output parsing restriction.
@@ -2206,9 +2310,10 @@ LZ4_decompress_generic(
* so check that we exactly consume the input and don't overrun the output buffer.
*/
if ((ip+length != iend) || (cpy > oend)) {
- DEBUGLOG(6, "should have been last run of literals")
- DEBUGLOG(6, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend);
- DEBUGLOG(6, "or cpy(%p) > oend(%p)", cpy, oend);
+ DEBUGLOG(5, "should have been last run of literals")
+ DEBUGLOG(5, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend);
+ DEBUGLOG(5, "or cpy(%p) > (oend-MFLIMIT)(%p)", cpy, oend-MFLIMIT);
+ DEBUGLOG(5, "after writing %u bytes / %i bytes available", (unsigned)(op-(BYTE*)dst), outputSize);
goto _output_error;
}
}
@@ -2234,6 +2339,7 @@ LZ4_decompress_generic(
/* get matchlength */
length = token & ML_MASK;
+ DEBUGLOG(7, "blockPos%6u: matchLength token = %u", (unsigned)(op-(BYTE*)dst), (unsigned)length);
_copy_match:
if (length == ML_MASK) {
@@ -2323,7 +2429,7 @@ LZ4_decompress_generic(
while (op < cpy) { *op++ = *match++; }
} else {
LZ4_memcpy(op, match, 8);
- if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }
+ if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }
}
op = cpy; /* wildcopy correction */
}
@@ -2418,6 +2524,7 @@ int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
int compressedSize, int maxOutputSize,
const void* dictStart, size_t dictSize)
{
+ DEBUGLOG(5, "LZ4_decompress_safe_forceExtDict");
return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
decode_full_block, usingExtDict,
(BYTE*)dest, (const BYTE*)dictStart, dictSize);
diff --git a/contrib/libs/lz4/lz4.h b/contrib/libs/lz4/lz4.h
index 46caa7f6f5..427c1e6e94 100644
--- a/contrib/libs/lz4/lz4.h
+++ b/contrib/libs/lz4/lz4.h
@@ -1,7 +1,7 @@
/*
* LZ4 - Fast LZ compression algorithm
* Header File
- * Copyright (C) 2011-2020, Yann Collet.
+ * Copyright (C) 2011-2023, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
@@ -129,8 +129,8 @@ extern "C" {
/*------ Version ------*/
#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */
-#define LZ4_VERSION_MINOR 9 /* for new (non-breaking) interface capabilities */
-#define LZ4_VERSION_RELEASE 4 /* for tweaks, bug-fixes, or development */
+#define LZ4_VERSION_MINOR 10 /* for new (non-breaking) interface capabilities */
+#define LZ4_VERSION_RELEASE 0 /* for tweaks, bug-fixes, or development */
#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE)
@@ -144,23 +144,25 @@ LZ4LIB_API const char* LZ4_versionString (void); /**< library version string;
/*-************************************
-* Tuning parameter
+* Tuning memory usage
**************************************/
-#define LZ4_MEMORY_USAGE_MIN 10
-#define LZ4_MEMORY_USAGE_DEFAULT 14
-#define LZ4_MEMORY_USAGE_MAX 20
-
/*!
* LZ4_MEMORY_USAGE :
- * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; )
- * Increasing memory usage improves compression ratio, at the cost of speed.
+ * Can be selected at compile time, by setting LZ4_MEMORY_USAGE.
+ * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB)
+ * Increasing memory usage improves compression ratio, generally at the cost of speed.
* Reduced memory usage may improve speed at the cost of ratio, thanks to better cache locality.
- * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
+ * Default value is 14, for 16KB, which nicely fits into most L1 caches.
*/
#ifndef LZ4_MEMORY_USAGE
# define LZ4_MEMORY_USAGE LZ4_MEMORY_USAGE_DEFAULT
#endif
+/* These are absolute limits, they should not be changed by users */
+#define LZ4_MEMORY_USAGE_MIN 10
+#define LZ4_MEMORY_USAGE_DEFAULT 14
+#define LZ4_MEMORY_USAGE_MAX 20
+
#if (LZ4_MEMORY_USAGE < LZ4_MEMORY_USAGE_MIN)
# error "LZ4_MEMORY_USAGE is too small !"
#endif
@@ -189,8 +191,9 @@ LZ4LIB_API const char* LZ4_versionString (void); /**< library version string;
LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity);
/*! LZ4_decompress_safe() :
- * compressedSize : is the exact complete size of the compressed block.
- * dstCapacity : is the size of destination buffer (which must be already allocated), presumed an upper bound of decompressed size.
+ * @compressedSize : is the exact complete size of the compressed block.
+ * @dstCapacity : is the size of destination buffer (which must be already allocated),
+ * presumed an upper bound of decompressed size.
* @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity)
* If destination buffer is not large enough, decoding will stop and output an error code (negative value).
* If the source stream is detected malformed, the function will stop decoding and return a negative result.
@@ -242,20 +245,20 @@ LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int d
LZ4LIB_API int LZ4_sizeofState(void);
LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
-
/*! LZ4_compress_destSize() :
* Reverse the logic : compresses as much data as possible from 'src' buffer
- * into already allocated buffer 'dst', of size >= 'targetDestSize'.
+ * into already allocated buffer 'dst', of size >= 'dstCapacity'.
* This function either compresses the entire 'src' content into 'dst' if it's large enough,
* or fill 'dst' buffer completely with as much data as possible from 'src'.
* note: acceleration parameter is fixed to "default".
*
- * *srcSizePtr : will be modified to indicate how many bytes where read from 'src' to fill 'dst'.
+ * *srcSizePtr : in+out parameter. Initially contains size of input.
+ * Will be modified to indicate how many bytes where read from 'src' to fill 'dst'.
* New value is necessarily <= input value.
- * @return : Nb bytes written into 'dst' (necessarily <= targetDestSize)
+ * @return : Nb bytes written into 'dst' (necessarily <= dstCapacity)
* or 0 if compression fails.
*
- * Note : from v1.8.2 to v1.9.1, this function had a bug (fixed un v1.9.2+):
+ * Note : from v1.8.2 to v1.9.1, this function had a bug (fixed in v1.9.2+):
* the produced compressed content could, in specific circumstances,
* require to be decompressed into a destination buffer larger
* by at least 1 byte than the content to decompress.
@@ -266,8 +269,7 @@ LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* d
* a dstCapacity which is > decompressedSize, by at least 1 byte.
* See https://github.com/lz4/lz4/issues/859 for details
*/
-LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize);
-
+LZ4LIB_API int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize);
/*! LZ4_decompress_safe_partial() :
* Decompress an LZ4 compressed block, of size 'srcSize' at position 'src',
@@ -311,7 +313,7 @@ LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcS
***********************************************/
typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */
-/**
+/*!
Note about RC_INVOKED
- RC_INVOKED is predefined symbol of rc.exe (the resource compiler which is part of MSVC/Visual Studio).
@@ -361,13 +363,58 @@ LZ4LIB_API void LZ4_resetStream_fast (LZ4_stream_t* streamPtr);
* LZ4_loadDict() triggers a reset, so any previous data will be forgotten.
* The same dictionary will have to be loaded on decompression side for successful decoding.
* Dictionary are useful for better compression of small data (KB range).
- * While LZ4 accept any input as dictionary,
- * results are generally better when using Zstandard's Dictionary Builder.
+ * While LZ4 itself accepts any input as dictionary, dictionary efficiency is also a topic.
+ * When in doubt, employ the Zstandard's Dictionary Builder.
* Loading a size of 0 is allowed, and is the same as reset.
- * @return : loaded dictionary size, in bytes (necessarily <= 64 KB)
+ * @return : loaded dictionary size, in bytes (note: only the last 64 KB are loaded)
*/
LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize);
+/*! LZ4_loadDictSlow() : v1.10.0+
+ * Same as LZ4_loadDict(),
+ * but uses a bit more cpu to reference the dictionary content more thoroughly.
+ * This is expected to slightly improve compression ratio.
+ * The extra-cpu cost is likely worth it if the dictionary is re-used across multiple sessions.
+ * @return : loaded dictionary size, in bytes (note: only the last 64 KB are loaded)
+ */
+LZ4LIB_API int LZ4_loadDictSlow(LZ4_stream_t* streamPtr, const char* dictionary, int dictSize);
+
+/*! LZ4_attach_dictionary() : stable since v1.10.0
+ *
+ * This allows efficient re-use of a static dictionary multiple times.
+ *
+ * Rather than re-loading the dictionary buffer into a working context before
+ * each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a
+ * working LZ4_stream_t, this function introduces a no-copy setup mechanism,
+ * in which the working stream references @dictionaryStream in-place.
+ *
+ * Several assumptions are made about the state of @dictionaryStream.
+ * Currently, only states which have been prepared by LZ4_loadDict() or
+ * LZ4_loadDictSlow() should be expected to work.
+ *
+ * Alternatively, the provided @dictionaryStream may be NULL,
+ * in which case any existing dictionary stream is unset.
+ *
+ * If a dictionary is provided, it replaces any pre-existing stream history.
+ * The dictionary contents are the only history that can be referenced and
+ * logically immediately precede the data compressed in the first subsequent
+ * compression call.
+ *
+ * The dictionary will only remain attached to the working stream through the
+ * first compression call, at the end of which it is cleared.
+ * @dictionaryStream stream (and source buffer) must remain in-place / accessible / unchanged
+ * through the completion of the compression session.
+ *
+ * Note: there is no equivalent LZ4_attach_*() method on the decompression side
+ * because there is no initialization cost, hence no need to share the cost across multiple sessions.
+ * To decompress LZ4 blocks using dictionary, attached or not,
+ * just employ the regular LZ4_setStreamDecode() for streaming,
+ * or the stateless LZ4_decompress_safe_usingDict() for one-shot decompression.
+ */
+LZ4LIB_API void
+LZ4_attach_dictionary(LZ4_stream_t* workingStream,
+ const LZ4_stream_t* dictionaryStream);
+
/*! LZ4_compress_fast_continue() :
* Compress 'src' content using data from previously compressed blocks, for better compression ratio.
* 'dst' buffer must be already allocated.
@@ -443,11 +490,24 @@ LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const
LZ4LIB_API int LZ4_decoderRingBufferSize(int maxBlockSize);
#define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize) (65536 + 14 + (maxBlockSize)) /* for static allocation; maxBlockSize presumed valid */
-/*! LZ4_decompress_*_continue() :
- * These decoding functions allow decompression of consecutive blocks in "streaming" mode.
- * A block is an unsplittable entity, it must be presented entirely to a decompression function.
- * Decompression functions only accepts one block at a time.
- * The last 64KB of previously decoded data *must* remain available and unmodified at the memory position where they were decoded.
+/*! LZ4_decompress_safe_continue() :
+ * This decoding function allows decompression of consecutive blocks in "streaming" mode.
+ * The difference with the usual independent blocks is that
+ * new blocks are allowed to find references into former blocks.
+ * A block is an unsplittable entity, and must be presented entirely to the decompression function.
+ * LZ4_decompress_safe_continue() only accepts one block at a time.
+ * It's modeled after `LZ4_decompress_safe()` and behaves similarly.
+ *
+ * @LZ4_streamDecode : decompression state, tracking the position in memory of past data
+ * @compressedSize : exact complete size of one compressed block.
+ * @dstCapacity : size of destination buffer (which must be already allocated),
+ * must be an upper bound of decompressed size.
+ * @return : number of bytes decompressed into destination buffer (necessarily <= dstCapacity)
+ * If destination buffer is not large enough, decoding will stop and output an error code (negative value).
+ * If the source stream is detected malformed, the function will stop decoding and return a negative result.
+ *
+ * The last 64KB of previously decoded data *must* remain available and unmodified
+ * at the memory position where they were previously decoded.
* If less than 64KB of data has been decoded, all the data must be present.
*
* Special : if decompression side sets a ring buffer, it must respect one of the following conditions :
@@ -474,10 +534,10 @@ LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode,
int srcSize, int dstCapacity);
-/*! LZ4_decompress_*_usingDict() :
- * These decoding functions work the same as
- * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_*_continue()
- * They are stand-alone, and don't need an LZ4_streamDecode_t structure.
+/*! LZ4_decompress_safe_usingDict() :
+ * Works the same as
+ * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_safe_continue()
+ * However, it's stateless: it doesn't need any LZ4_streamDecode_t state.
* Dictionary is presumed stable : it must remain accessible and unmodified during decompression.
* Performance tip : Decompression speed can be substantially increased
* when dst == dictStart + dictSize.
@@ -487,6 +547,12 @@ LZ4_decompress_safe_usingDict(const char* src, char* dst,
int srcSize, int dstCapacity,
const char* dictStart, int dictSize);
+/*! LZ4_decompress_safe_partial_usingDict() :
+ * Behaves the same as LZ4_decompress_safe_partial()
+ * with the added ability to specify a memory segment for past data.
+ * Performance tip : Decompression speed can be substantially increased
+ * when dst == dictStart + dictSize.
+ */
LZ4LIB_API int
LZ4_decompress_safe_partial_usingDict(const char* src, char* dst,
int compressedSize,
@@ -526,9 +592,9 @@ LZ4_decompress_safe_partial_usingDict(const char* src, char* dst,
#define LZ4_STATIC_3504398509
#ifdef LZ4_PUBLISH_STATIC_FUNCTIONS
-#define LZ4LIB_STATIC_API LZ4LIB_API
+# define LZ4LIB_STATIC_API LZ4LIB_API
#else
-#define LZ4LIB_STATIC_API
+# define LZ4LIB_STATIC_API
#endif
@@ -544,36 +610,11 @@ LZ4_decompress_safe_partial_usingDict(const char* src, char* dst,
*/
LZ4LIB_STATIC_API int LZ4_compress_fast_extState_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
-/*! LZ4_attach_dictionary() :
- * This is an experimental API that allows
- * efficient use of a static dictionary many times.
- *
- * Rather than re-loading the dictionary buffer into a working context before
- * each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a
- * working LZ4_stream_t, this function introduces a no-copy setup mechanism,
- * in which the working stream references the dictionary stream in-place.
- *
- * Several assumptions are made about the state of the dictionary stream.
- * Currently, only streams which have been prepared by LZ4_loadDict() should
- * be expected to work.
- *
- * Alternatively, the provided dictionaryStream may be NULL,
- * in which case any existing dictionary stream is unset.
- *
- * If a dictionary is provided, it replaces any pre-existing stream history.
- * The dictionary contents are the only history that can be referenced and
- * logically immediately precede the data compressed in the first subsequent
- * compression call.
- *
- * The dictionary will only remain attached to the working stream through the
- * first compression call, at the end of which it is cleared. The dictionary
- * stream (and source buffer) must remain in-place / accessible / unchanged
- * through the completion of the first compression call on the stream.
+/*! LZ4_compress_destSize_extState() : introduced in v1.10.0
+ * Same as LZ4_compress_destSize(), but using an externally allocated state.
+ * Also: exposes @acceleration
*/
-LZ4LIB_STATIC_API void
-LZ4_attach_dictionary(LZ4_stream_t* workingStream,
- const LZ4_stream_t* dictionaryStream);
-
+int LZ4_compress_destSize_extState(void* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize, int acceleration);
/*! In-place compression and decompression
*
@@ -685,7 +726,7 @@ struct LZ4_stream_t_internal {
/* Implicit padding to ensure structure is aligned */
};
-#define LZ4_STREAM_MINSIZE ((1UL << LZ4_MEMORY_USAGE) + 32) /* static size, for inter-version compatibility */
+#define LZ4_STREAM_MINSIZE ((1UL << (LZ4_MEMORY_USAGE)) + 32) /* static size, for inter-version compatibility */
union LZ4_stream_u {
char minStateSize[LZ4_STREAM_MINSIZE];
LZ4_stream_t_internal internal_donotuse;
@@ -706,7 +747,7 @@ union LZ4_stream_u {
* Note2: An LZ4_stream_t structure guarantees correct alignment and size.
* Note3: Before v1.9.0, use LZ4_resetStream() instead
**/
-LZ4LIB_API LZ4_stream_t* LZ4_initStream (void* buffer, size_t size);
+LZ4LIB_API LZ4_stream_t* LZ4_initStream (void* stateBuffer, size_t size);
/*! LZ4_streamDecode_t :
@@ -823,11 +864,12 @@ LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4
* But they may happen if input data is invalid (error or intentional tampering).
* As a consequence, use these functions in trusted environments with trusted data **only**.
*/
-LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe() instead")
+LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_partial() instead")
LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize);
-LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_continue() instead")
+LZ4_DEPRECATED("This function is deprecated and unsafe. Consider migrating towards LZ4_decompress_safe_continue() instead. "
+ "Note that the contract will change (requires block's compressed size, instead of decompressed size)")
LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int originalSize);
-LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_usingDict() instead")
+LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_partial_usingDict() instead")
LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int originalSize, const char* dictStart, int dictSize);
/*! LZ4_resetStream() :
diff --git a/contrib/libs/lz4/lz4file.c b/contrib/libs/lz4/lz4file.c
index eaf9b1704d..a4197ea80d 100644
--- a/contrib/libs/lz4/lz4file.c
+++ b/contrib/libs/lz4/lz4file.c
@@ -31,11 +31,21 @@
* - LZ4 homepage : http://www.lz4.org
* - LZ4 source repository : https://github.com/lz4/lz4
*/
-#include <stdlib.h>
+#include <stdlib.h> /* malloc, free */
#include <string.h>
+#include <assert.h>
#include "lz4.h"
#include "lz4file.h"
+static LZ4F_errorCode_t returnErrorCode(LZ4F_errorCodes code)
+{
+ return (LZ4F_errorCode_t)-(ptrdiff_t)code;
+}
+#undef RETURN_ERROR
+#define RETURN_ERROR(e) return returnErrorCode(LZ4F_ERROR_ ## e)
+
+/* ===== read API ===== */
+
struct LZ4_readFile_s {
LZ4F_dctx* dctxPtr;
FILE* fp;
@@ -45,76 +55,80 @@ struct LZ4_readFile_s {
size_t srcBufMaxSize;
};
-struct LZ4_writeFile_s {
- LZ4F_cctx* cctxPtr;
- FILE* fp;
- LZ4_byte* dstBuf;
- size_t maxWriteSize;
- size_t dstBufMaxSize;
- LZ4F_errorCode_t errCode;
-};
+static void LZ4F_freeReadFile(LZ4_readFile_t* lz4fRead)
+{
+ if (lz4fRead==NULL) return;
+ LZ4F_freeDecompressionContext(lz4fRead->dctxPtr);
+ free(lz4fRead->srcBuf);
+ free(lz4fRead);
+}
+
+static void LZ4F_freeAndNullReadFile(LZ4_readFile_t** statePtr)
+{
+ assert(statePtr != NULL);
+ LZ4F_freeReadFile(*statePtr);
+ *statePtr = NULL;
+}
LZ4F_errorCode_t LZ4F_readOpen(LZ4_readFile_t** lz4fRead, FILE* fp)
{
char buf[LZ4F_HEADER_SIZE_MAX];
size_t consumedSize;
LZ4F_errorCode_t ret;
- LZ4F_frameInfo_t info;
if (fp == NULL || lz4fRead == NULL) {
- return -LZ4F_ERROR_GENERIC;
+ RETURN_ERROR(parameter_null);
}
*lz4fRead = (LZ4_readFile_t*)calloc(1, sizeof(LZ4_readFile_t));
if (*lz4fRead == NULL) {
- return -LZ4F_ERROR_allocation_failed;
+ RETURN_ERROR(allocation_failed);
}
- ret = LZ4F_createDecompressionContext(&(*lz4fRead)->dctxPtr, LZ4F_getVersion());
+ ret = LZ4F_createDecompressionContext(&(*lz4fRead)->dctxPtr, LZ4F_VERSION);
if (LZ4F_isError(ret)) {
- free(*lz4fRead);
+ LZ4F_freeAndNullReadFile(lz4fRead);
return ret;
}
(*lz4fRead)->fp = fp;
consumedSize = fread(buf, 1, sizeof(buf), (*lz4fRead)->fp);
if (consumedSize != sizeof(buf)) {
- free(*lz4fRead);
- return -LZ4F_ERROR_GENERIC;
+ LZ4F_freeAndNullReadFile(lz4fRead);
+ RETURN_ERROR(io_read);
}
- ret = LZ4F_getFrameInfo((*lz4fRead)->dctxPtr, &info, buf, &consumedSize);
- if (LZ4F_isError(ret)) {
- LZ4F_freeDecompressionContext((*lz4fRead)->dctxPtr);
- free(*lz4fRead);
- return ret;
+ { LZ4F_frameInfo_t info;
+ LZ4F_errorCode_t const r = LZ4F_getFrameInfo((*lz4fRead)->dctxPtr, &info, buf, &consumedSize);
+ if (LZ4F_isError(r)) {
+ LZ4F_freeAndNullReadFile(lz4fRead);
+ return r;
}
- switch (info.blockSizeID) {
- case LZ4F_default :
- case LZ4F_max64KB :
- (*lz4fRead)->srcBufMaxSize = 64 * 1024;
- break;
- case LZ4F_max256KB:
- (*lz4fRead)->srcBufMaxSize = 256 * 1024;
- break;
- case LZ4F_max1MB:
- (*lz4fRead)->srcBufMaxSize = 1 * 1024 * 1024;
- break;
- case LZ4F_max4MB:
- (*lz4fRead)->srcBufMaxSize = 4 * 1024 * 1024;
- break;
- default:
- LZ4F_freeDecompressionContext((*lz4fRead)->dctxPtr);
- free(*lz4fRead);
- return -LZ4F_ERROR_maxBlockSize_invalid;
+ switch (info.blockSizeID) {
+ case LZ4F_default :
+ case LZ4F_max64KB :
+ (*lz4fRead)->srcBufMaxSize = 64 * 1024;
+ break;
+ case LZ4F_max256KB:
+ (*lz4fRead)->srcBufMaxSize = 256 * 1024;
+ break;
+ case LZ4F_max1MB:
+ (*lz4fRead)->srcBufMaxSize = 1 * 1024 * 1024;
+ break;
+ case LZ4F_max4MB:
+ (*lz4fRead)->srcBufMaxSize = 4 * 1024 * 1024;
+ break;
+ default:
+ LZ4F_freeAndNullReadFile(lz4fRead);
+ RETURN_ERROR(maxBlockSize_invalid);
+ }
}
(*lz4fRead)->srcBuf = (LZ4_byte*)malloc((*lz4fRead)->srcBufMaxSize);
if ((*lz4fRead)->srcBuf == NULL) {
- LZ4F_freeDecompressionContext((*lz4fRead)->dctxPtr);
- free(lz4fRead);
- return -LZ4F_ERROR_allocation_failed;
+ LZ4F_freeAndNullReadFile(lz4fRead);
+ RETURN_ERROR(allocation_failed);
}
(*lz4fRead)->srcBufSize = sizeof(buf) - consumedSize;
@@ -129,7 +143,7 @@ size_t LZ4F_read(LZ4_readFile_t* lz4fRead, void* buf, size_t size)
size_t next = 0;
if (lz4fRead == NULL || buf == NULL)
- return -LZ4F_ERROR_GENERIC;
+ RETURN_ERROR(parameter_null);
while (next < size) {
size_t srcsize = lz4fRead->srcBufSize - lz4fRead->srcBufNext;
@@ -142,12 +156,10 @@ size_t LZ4F_read(LZ4_readFile_t* lz4fRead, void* buf, size_t size)
lz4fRead->srcBufSize = ret;
srcsize = lz4fRead->srcBufSize;
lz4fRead->srcBufNext = 0;
- }
- else if (ret == 0) {
+ } else if (ret == 0) {
break;
- }
- else {
- return -LZ4F_ERROR_GENERIC;
+ } else {
+ RETURN_ERROR(io_read);
}
}
@@ -171,24 +183,48 @@ size_t LZ4F_read(LZ4_readFile_t* lz4fRead, void* buf, size_t size)
LZ4F_errorCode_t LZ4F_readClose(LZ4_readFile_t* lz4fRead)
{
if (lz4fRead == NULL)
- return -LZ4F_ERROR_GENERIC;
- LZ4F_freeDecompressionContext(lz4fRead->dctxPtr);
- free(lz4fRead->srcBuf);
- free(lz4fRead);
+ RETURN_ERROR(parameter_null);
+ LZ4F_freeReadFile(lz4fRead);
return LZ4F_OK_NoError;
}
+/* ===== write API ===== */
+
+struct LZ4_writeFile_s {
+ LZ4F_cctx* cctxPtr;
+ FILE* fp;
+ LZ4_byte* dstBuf;
+ size_t maxWriteSize;
+ size_t dstBufMaxSize;
+ LZ4F_errorCode_t errCode;
+};
+
+static void LZ4F_freeWriteFile(LZ4_writeFile_t* state)
+{
+ if (state == NULL) return;
+ LZ4F_freeCompressionContext(state->cctxPtr);
+ free(state->dstBuf);
+ free(state);
+}
+
+static void LZ4F_freeAndNullWriteFile(LZ4_writeFile_t** statePtr)
+{
+ assert(statePtr != NULL);
+ LZ4F_freeWriteFile(*statePtr);
+ *statePtr = NULL;
+}
+
LZ4F_errorCode_t LZ4F_writeOpen(LZ4_writeFile_t** lz4fWrite, FILE* fp, const LZ4F_preferences_t* prefsPtr)
{
LZ4_byte buf[LZ4F_HEADER_SIZE_MAX];
size_t ret;
if (fp == NULL || lz4fWrite == NULL)
- return -LZ4F_ERROR_GENERIC;
+ RETURN_ERROR(parameter_null);
- *lz4fWrite = (LZ4_writeFile_t*)malloc(sizeof(LZ4_writeFile_t));
+ *lz4fWrite = (LZ4_writeFile_t*)calloc(1, sizeof(LZ4_writeFile_t));
if (*lz4fWrite == NULL) {
- return -LZ4F_ERROR_allocation_failed;
+ RETURN_ERROR(allocation_failed);
}
if (prefsPtr != NULL) {
switch (prefsPtr->frameInfo.blockSizeID) {
@@ -206,8 +242,8 @@ LZ4F_errorCode_t LZ4F_writeOpen(LZ4_writeFile_t** lz4fWrite, FILE* fp, const LZ4
(*lz4fWrite)->maxWriteSize = 4 * 1024 * 1024;
break;
default:
- free(lz4fWrite);
- return -LZ4F_ERROR_maxBlockSize_invalid;
+ LZ4F_freeAndNullWriteFile(lz4fWrite);
+ RETURN_ERROR(maxBlockSize_invalid);
}
} else {
(*lz4fWrite)->maxWriteSize = 64 * 1024;
@@ -216,30 +252,25 @@ LZ4F_errorCode_t LZ4F_writeOpen(LZ4_writeFile_t** lz4fWrite, FILE* fp, const LZ4
(*lz4fWrite)->dstBufMaxSize = LZ4F_compressBound((*lz4fWrite)->maxWriteSize, prefsPtr);
(*lz4fWrite)->dstBuf = (LZ4_byte*)malloc((*lz4fWrite)->dstBufMaxSize);
if ((*lz4fWrite)->dstBuf == NULL) {
- free(*lz4fWrite);
- return -LZ4F_ERROR_allocation_failed;
+ LZ4F_freeAndNullWriteFile(lz4fWrite);
+ RETURN_ERROR(allocation_failed);
}
- ret = LZ4F_createCompressionContext(&(*lz4fWrite)->cctxPtr, LZ4F_getVersion());
+ ret = LZ4F_createCompressionContext(&(*lz4fWrite)->cctxPtr, LZ4F_VERSION);
if (LZ4F_isError(ret)) {
- free((*lz4fWrite)->dstBuf);
- free(*lz4fWrite);
+ LZ4F_freeAndNullWriteFile(lz4fWrite);
return ret;
}
ret = LZ4F_compressBegin((*lz4fWrite)->cctxPtr, buf, LZ4F_HEADER_SIZE_MAX, prefsPtr);
if (LZ4F_isError(ret)) {
- LZ4F_freeCompressionContext((*lz4fWrite)->cctxPtr);
- free((*lz4fWrite)->dstBuf);
- free(*lz4fWrite);
+ LZ4F_freeAndNullWriteFile(lz4fWrite);
return ret;
}
if (ret != fwrite(buf, 1, ret, fp)) {
- LZ4F_freeCompressionContext((*lz4fWrite)->cctxPtr);
- free((*lz4fWrite)->dstBuf);
- free(*lz4fWrite);
- return -LZ4F_ERROR_GENERIC;
+ LZ4F_freeAndNullWriteFile(lz4fWrite);
+ RETURN_ERROR(io_write);
}
(*lz4fWrite)->fp = fp;
@@ -247,15 +278,15 @@ LZ4F_errorCode_t LZ4F_writeOpen(LZ4_writeFile_t** lz4fWrite, FILE* fp, const LZ4
return LZ4F_OK_NoError;
}
-size_t LZ4F_write(LZ4_writeFile_t* lz4fWrite, void* buf, size_t size)
+size_t LZ4F_write(LZ4_writeFile_t* lz4fWrite, const void* buf, size_t size)
{
- LZ4_byte* p = (LZ4_byte*)buf;
+ const LZ4_byte* p = (const LZ4_byte*)buf;
size_t remain = size;
size_t chunk;
size_t ret;
if (lz4fWrite == NULL || buf == NULL)
- return -LZ4F_ERROR_GENERIC;
+ RETURN_ERROR(parameter_null);
while (remain) {
if (remain > lz4fWrite->maxWriteSize)
chunk = lz4fWrite->maxWriteSize;
@@ -271,9 +302,9 @@ size_t LZ4F_write(LZ4_writeFile_t* lz4fWrite, void* buf, size_t size)
return ret;
}
- if(ret != fwrite(lz4fWrite->dstBuf, 1, ret, lz4fWrite->fp)) {
- lz4fWrite->errCode = -LZ4F_ERROR_GENERIC;
- return -LZ4F_ERROR_GENERIC;
+ if (ret != fwrite(lz4fWrite->dstBuf, 1, ret, lz4fWrite->fp)) {
+ lz4fWrite->errCode = returnErrorCode(LZ4F_ERROR_io_write);
+ RETURN_ERROR(io_write);
}
p += chunk;
@@ -287,8 +318,9 @@ LZ4F_errorCode_t LZ4F_writeClose(LZ4_writeFile_t* lz4fWrite)
{
LZ4F_errorCode_t ret = LZ4F_OK_NoError;
- if (lz4fWrite == NULL)
- return -LZ4F_ERROR_GENERIC;
+ if (lz4fWrite == NULL) {
+ RETURN_ERROR(parameter_null);
+ }
if (lz4fWrite->errCode == LZ4F_OK_NoError) {
ret = LZ4F_compressEnd(lz4fWrite->cctxPtr,
@@ -299,13 +331,11 @@ LZ4F_errorCode_t LZ4F_writeClose(LZ4_writeFile_t* lz4fWrite)
}
if (ret != fwrite(lz4fWrite->dstBuf, 1, ret, lz4fWrite->fp)) {
- ret = -LZ4F_ERROR_GENERIC;
+ ret = returnErrorCode(LZ4F_ERROR_io_write);
}
}
out:
- LZ4F_freeCompressionContext(lz4fWrite->cctxPtr);
- free(lz4fWrite->dstBuf);
- free(lz4fWrite);
+ LZ4F_freeWriteFile(lz4fWrite);
return ret;
}
diff --git a/contrib/libs/lz4/lz4file.h b/contrib/libs/lz4/lz4file.h
index 5527130720..598ad705ea 100644
--- a/contrib/libs/lz4/lz4file.h
+++ b/contrib/libs/lz4/lz4file.h
@@ -38,7 +38,7 @@ extern "C" {
#ifndef LZ4FILE_H
#define LZ4FILE_H
-#include <stdio.h>
+#include <stdio.h> /* FILE* */
#include "lz4frame_static.h"
typedef struct LZ4_readFile_s LZ4_readFile_t;
@@ -78,7 +78,7 @@ LZ4FLIB_STATIC_API LZ4F_errorCode_t LZ4F_writeOpen(LZ4_writeFile_t** lz4fWrite,
* `buf` write data buffer.
* `size` write data buffer size.
*/
-LZ4FLIB_STATIC_API size_t LZ4F_write(LZ4_writeFile_t* lz4fWrite, void* buf, size_t size);
+LZ4FLIB_STATIC_API size_t LZ4F_write(LZ4_writeFile_t* lz4fWrite, const void* buf, size_t size);
/*! LZ4F_writeClose() :
* Close lz4file handle.
diff --git a/contrib/libs/lz4/lz4frame.c b/contrib/libs/lz4/lz4frame.c
index 174f9ae4f2..f89c055799 100644
--- a/contrib/libs/lz4/lz4frame.c
+++ b/contrib/libs/lz4/lz4frame.c
@@ -44,6 +44,7 @@
/*-************************************
* Compiler Options
**************************************/
+#include <limits.h>
#ifdef _MSC_VER /* Visual Studio */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
#endif
@@ -54,8 +55,8 @@
**************************************/
/*
* LZ4F_HEAPMODE :
- * Select how default compression functions will allocate memory for their hash table,
- * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
+ * Control how LZ4F_compressFrame allocates the Compression State,
+ * either on stack (0:default, fastest), or in memory heap (1:requires malloc()).
*/
#ifndef LZ4F_HEAPMODE
# define LZ4F_HEAPMODE 0
@@ -125,8 +126,9 @@ static void* LZ4F_malloc(size_t s, LZ4F_CustomMem cmem)
static void LZ4F_free(void* p, LZ4F_CustomMem cmem)
{
- /* custom malloc defined : use it */
+ if (p == NULL) return;
if (cmem.customFree != NULL) {
+ /* custom allocation defined : use it */
cmem.customFree(cmem.opaqueState, p);
return;
}
@@ -153,7 +155,7 @@ static void LZ4F_free(void* p, LZ4F_CustomMem cmem)
static int g_debuglog_enable = 1;
# define DEBUGLOG(l, ...) { \
if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
- fprintf(stderr, __FILE__ ": "); \
+ fprintf(stderr, __FILE__ " (%i): ", __LINE__ ); \
fprintf(stderr, __VA_ARGS__); \
fprintf(stderr, " \n"); \
} }
@@ -186,9 +188,9 @@ static U32 LZ4F_readLE32 (const void* src)
{
const BYTE* const srcPtr = (const BYTE*)src;
U32 value32 = srcPtr[0];
- value32 += ((U32)srcPtr[1])<< 8;
- value32 += ((U32)srcPtr[2])<<16;
- value32 += ((U32)srcPtr[3])<<24;
+ value32 |= ((U32)srcPtr[1])<< 8;
+ value32 |= ((U32)srcPtr[2])<<16;
+ value32 |= ((U32)srcPtr[3])<<24;
return value32;
}
@@ -205,13 +207,13 @@ static U64 LZ4F_readLE64 (const void* src)
{
const BYTE* const srcPtr = (const BYTE*)src;
U64 value64 = srcPtr[0];
- value64 += ((U64)srcPtr[1]<<8);
- value64 += ((U64)srcPtr[2]<<16);
- value64 += ((U64)srcPtr[3]<<24);
- value64 += ((U64)srcPtr[4]<<32);
- value64 += ((U64)srcPtr[5]<<40);
- value64 += ((U64)srcPtr[6]<<48);
- value64 += ((U64)srcPtr[7]<<56);
+ value64 |= ((U64)srcPtr[1]<<8);
+ value64 |= ((U64)srcPtr[2]<<16);
+ value64 |= ((U64)srcPtr[3]<<24);
+ value64 |= ((U64)srcPtr[4]<<32);
+ value64 |= ((U64)srcPtr[5]<<40);
+ value64 |= ((U64)srcPtr[6]<<48);
+ value64 |= ((U64)srcPtr[7]<<56);
return value64;
}
@@ -257,14 +259,15 @@ static const size_t BFSize = LZ4F_BLOCK_CHECKSUM_SIZE; /* block footer : checks
* Structures and local types
**************************************/
-typedef enum { LZ4B_COMPRESSED, LZ4B_UNCOMPRESSED} LZ4F_blockCompression_t;
+typedef enum { LZ4B_COMPRESSED, LZ4B_UNCOMPRESSED} LZ4F_BlockCompressMode_e;
+typedef enum { ctxNone, ctxFast, ctxHC } LZ4F_CtxType_e;
typedef struct LZ4F_cctx_s
{
LZ4F_CustomMem cmem;
LZ4F_preferences_t prefs;
U32 version;
- U32 cStage;
+ U32 cStage; /* 0 : compression uninitialized ; 1 : initialized, can compress */
const LZ4F_CDict* cdict;
size_t maxBlockSize;
size_t maxBufferSize;
@@ -275,8 +278,8 @@ typedef struct LZ4F_cctx_s
XXH32_state_t xxh;
void* lz4CtxPtr;
U16 lz4CtxAlloc; /* sized for: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
- U16 lz4CtxState; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
- LZ4F_blockCompression_t blockCompression;
+ U16 lz4CtxType; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
+ LZ4F_BlockCompressMode_e blockCompressMode;
} LZ4F_cctx_t;
@@ -314,9 +317,14 @@ static LZ4F_errorCode_t LZ4F_returnErrorCode(LZ4F_errorCodes code)
#define RETURN_ERROR(e) return LZ4F_returnErrorCode(LZ4F_ERROR_ ## e)
-#define RETURN_ERROR_IF(c,e) if (c) RETURN_ERROR(e)
+#define RETURN_ERROR_IF(c,e) do { \
+ if (c) { \
+ DEBUGLOG(3, "Error: " #c); \
+ RETURN_ERROR(e); \
+ } \
+ } while (0)
-#define FORWARD_IF_ERROR(r) if (LZ4F_isError(r)) return (r)
+#define FORWARD_IF_ERROR(r) do { if (LZ4F_isError(r)) return (r); } while (0)
unsigned LZ4F_getVersion(void) { return LZ4F_VERSION; }
@@ -429,6 +437,7 @@ size_t LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx,
BYTE* dstPtr = dstStart;
BYTE* const dstEnd = dstStart + dstCapacity;
+ DEBUGLOG(4, "LZ4F_compressFrame_usingCDict (srcSize=%u)", (unsigned)srcSize);
if (preferencesPtr!=NULL)
prefs = *preferencesPtr;
else
@@ -494,7 +503,7 @@ size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
LZ4_initStream(&lz4ctx, sizeof(lz4ctx));
cctxPtr->lz4CtxPtr = &lz4ctx;
cctxPtr->lz4CtxAlloc = 1;
- cctxPtr->lz4CtxState = 1;
+ cctxPtr->lz4CtxType = ctxFast;
}
#endif
DEBUGLOG(4, "LZ4F_compressFrame");
@@ -539,18 +548,19 @@ LZ4F_createCDict_advanced(LZ4F_CustomMem cmem, const void* dictBuffer, size_t di
dictSize = 64 KB;
}
cdict->dictContent = LZ4F_malloc(dictSize, cmem);
+ /* note: using @cmem to allocate => can't use default create */
cdict->fastCtx = (LZ4_stream_t*)LZ4F_malloc(sizeof(LZ4_stream_t), cmem);
- if (cdict->fastCtx)
- LZ4_initStream(cdict->fastCtx, sizeof(LZ4_stream_t));
cdict->HCCtx = (LZ4_streamHC_t*)LZ4F_malloc(sizeof(LZ4_streamHC_t), cmem);
- if (cdict->HCCtx)
- LZ4_initStream(cdict->HCCtx, sizeof(LZ4_streamHC_t));
if (!cdict->dictContent || !cdict->fastCtx || !cdict->HCCtx) {
LZ4F_freeCDict(cdict);
return NULL;
}
memcpy(cdict->dictContent, dictStart, dictSize);
- LZ4_loadDict (cdict->fastCtx, (const char*)cdict->dictContent, (int)dictSize);
+ LZ4_initStream(cdict->fastCtx, sizeof(LZ4_stream_t));
+ LZ4_loadDictSlow(cdict->fastCtx, (const char*)cdict->dictContent, (int)dictSize);
+ LZ4_initStreamHC(cdict->HCCtx, sizeof(LZ4_streamHC_t));
+ /* note: we don't know at this point which compression level is going to be used
+ * as a consequence, HCCtx is created for the more common HC mode */
LZ4_setCompressionLevel(cdict->HCCtx, LZ4HC_CLEVEL_DEFAULT);
LZ4_loadDictHC(cdict->HCCtx, (const char*)cdict->dictContent, (int)dictSize);
return cdict;
@@ -616,7 +626,6 @@ LZ4F_createCompressionContext(LZ4F_cctx** LZ4F_compressionContextPtr, unsigned v
return LZ4F_OK_NoError;
}
-
LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctxPtr)
{
if (cctxPtr != NULL) { /* support free on NULL */
@@ -641,7 +650,7 @@ static void LZ4F_initStream(void* ctx,
int level,
LZ4F_blockMode_t blockMode) {
if (level < LZ4HC_CLEVEL_MIN) {
- if (cdict != NULL || blockMode == LZ4F_blockLinked) {
+ if (cdict || blockMode == LZ4F_blockLinked) {
/* In these cases, we will call LZ4_compress_fast_continue(),
* which needs an already reset context. Otherwise, we'll call a
* one-shot API. The non-continued APIs internally perform their own
@@ -649,11 +658,18 @@ static void LZ4F_initStream(void* ctx,
* tableType they need the context to be in. So in that case this
* would be misguided / wasted work. */
LZ4_resetStream_fast((LZ4_stream_t*)ctx);
+ if (cdict)
+ LZ4_attach_dictionary((LZ4_stream_t*)ctx, cdict->fastCtx);
}
- LZ4_attach_dictionary((LZ4_stream_t *)ctx, cdict ? cdict->fastCtx : NULL);
+ /* In these cases, we'll call a one-shot API.
+ * The non-continued APIs internally perform their own resets
+ * at the beginning of their calls, where they know
+ * which tableType they need the context to be in.
+ * Therefore, a reset here would be wasted work. */
} else {
LZ4_resetStreamHC_fast((LZ4_streamHC_t*)ctx, level);
- LZ4_attach_HC_dictionary((LZ4_streamHC_t *)ctx, cdict ? cdict->HCCtx : NULL);
+ if (cdict)
+ LZ4_attach_HC_dictionary((LZ4_streamHC_t*)ctx, cdict->HCCtx);
}
}
@@ -668,14 +684,12 @@ static int ctxTypeID_to_size(int ctxTypeID) {
}
}
-/*! LZ4F_compressBegin_usingCDict() :
- * init streaming compression AND writes frame header into @dstBuffer.
- * @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
- * @return : number of bytes written into @dstBuffer for the header
- * or an error code (can be tested using LZ4F_isError())
+/* LZ4F_compressBegin_internal()
+ * Note: only accepts @cdict _or_ @dictBuffer as non NULL.
*/
-size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
+size_t LZ4F_compressBegin_internal(LZ4F_cctx* cctx,
void* dstBuffer, size_t dstCapacity,
+ const void* dictBuffer, size_t dictSize,
const LZ4F_CDict* cdict,
const LZ4F_preferences_t* preferencesPtr)
{
@@ -685,71 +699,85 @@ size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
RETURN_ERROR_IF(dstCapacity < maxFHSize, dstMaxSize_tooSmall);
if (preferencesPtr == NULL) preferencesPtr = &prefNull;
- cctxPtr->prefs = *preferencesPtr;
+ cctx->prefs = *preferencesPtr;
/* cctx Management */
- { U16 const ctxTypeID = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2;
+ { U16 const ctxTypeID = (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2;
int requiredSize = ctxTypeID_to_size(ctxTypeID);
- int allocatedSize = ctxTypeID_to_size(cctxPtr->lz4CtxAlloc);
+ int allocatedSize = ctxTypeID_to_size(cctx->lz4CtxAlloc);
if (allocatedSize < requiredSize) {
/* not enough space allocated */
- LZ4F_free(cctxPtr->lz4CtxPtr, cctxPtr->cmem);
- if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
+ LZ4F_free(cctx->lz4CtxPtr, cctx->cmem);
+ if (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
/* must take ownership of memory allocation,
* in order to respect custom allocator contract */
- cctxPtr->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_stream_t), cctxPtr->cmem);
- if (cctxPtr->lz4CtxPtr)
- LZ4_initStream(cctxPtr->lz4CtxPtr, sizeof(LZ4_stream_t));
+ cctx->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_stream_t), cctx->cmem);
+ if (cctx->lz4CtxPtr)
+ LZ4_initStream(cctx->lz4CtxPtr, sizeof(LZ4_stream_t));
} else {
- cctxPtr->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_streamHC_t), cctxPtr->cmem);
- if (cctxPtr->lz4CtxPtr)
- LZ4_initStreamHC(cctxPtr->lz4CtxPtr, sizeof(LZ4_streamHC_t));
+ cctx->lz4CtxPtr = LZ4F_malloc(sizeof(LZ4_streamHC_t), cctx->cmem);
+ if (cctx->lz4CtxPtr)
+ LZ4_initStreamHC(cctx->lz4CtxPtr, sizeof(LZ4_streamHC_t));
}
- RETURN_ERROR_IF(cctxPtr->lz4CtxPtr == NULL, allocation_failed);
- cctxPtr->lz4CtxAlloc = ctxTypeID;
- cctxPtr->lz4CtxState = ctxTypeID;
- } else if (cctxPtr->lz4CtxState != ctxTypeID) {
+ RETURN_ERROR_IF(cctx->lz4CtxPtr == NULL, allocation_failed);
+ cctx->lz4CtxAlloc = ctxTypeID;
+ cctx->lz4CtxType = ctxTypeID;
+ } else if (cctx->lz4CtxType != ctxTypeID) {
/* otherwise, a sufficient buffer is already allocated,
* but we need to reset it to the correct context type */
- if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
- LZ4_initStream((LZ4_stream_t*)cctxPtr->lz4CtxPtr, sizeof(LZ4_stream_t));
+ if (cctx->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
+ LZ4_initStream((LZ4_stream_t*)cctx->lz4CtxPtr, sizeof(LZ4_stream_t));
} else {
- LZ4_initStreamHC((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, sizeof(LZ4_streamHC_t));
- LZ4_setCompressionLevel((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel);
+ LZ4_initStreamHC((LZ4_streamHC_t*)cctx->lz4CtxPtr, sizeof(LZ4_streamHC_t));
+ LZ4_setCompressionLevel((LZ4_streamHC_t*)cctx->lz4CtxPtr, cctx->prefs.compressionLevel);
}
- cctxPtr->lz4CtxState = ctxTypeID;
+ cctx->lz4CtxType = ctxTypeID;
} }
/* Buffer Management */
- if (cctxPtr->prefs.frameInfo.blockSizeID == 0)
- cctxPtr->prefs.frameInfo.blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
- cctxPtr->maxBlockSize = LZ4F_getBlockSize(cctxPtr->prefs.frameInfo.blockSizeID);
+ if (cctx->prefs.frameInfo.blockSizeID == 0)
+ cctx->prefs.frameInfo.blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
+ cctx->maxBlockSize = LZ4F_getBlockSize(cctx->prefs.frameInfo.blockSizeID);
{ size_t const requiredBuffSize = preferencesPtr->autoFlush ?
- ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 64 KB : 0) : /* only needs past data up to window size */
- cctxPtr->maxBlockSize + ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 128 KB : 0);
-
- if (cctxPtr->maxBufferSize < requiredBuffSize) {
- cctxPtr->maxBufferSize = 0;
- LZ4F_free(cctxPtr->tmpBuff, cctxPtr->cmem);
- cctxPtr->tmpBuff = (BYTE*)LZ4F_calloc(requiredBuffSize, cctxPtr->cmem);
- RETURN_ERROR_IF(cctxPtr->tmpBuff == NULL, allocation_failed);
- cctxPtr->maxBufferSize = requiredBuffSize;
+ ((cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 64 KB : 0) : /* only needs past data up to window size */
+ cctx->maxBlockSize + ((cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 128 KB : 0);
+
+ if (cctx->maxBufferSize < requiredBuffSize) {
+ cctx->maxBufferSize = 0;
+ LZ4F_free(cctx->tmpBuff, cctx->cmem);
+ cctx->tmpBuff = (BYTE*)LZ4F_malloc(requiredBuffSize, cctx->cmem);
+ RETURN_ERROR_IF(cctx->tmpBuff == NULL, allocation_failed);
+ cctx->maxBufferSize = requiredBuffSize;
} }
- cctxPtr->tmpIn = cctxPtr->tmpBuff;
- cctxPtr->tmpInSize = 0;
- (void)XXH32_reset(&(cctxPtr->xxh), 0);
+ cctx->tmpIn = cctx->tmpBuff;
+ cctx->tmpInSize = 0;
+ (void)XXH32_reset(&(cctx->xxh), 0);
/* context init */
- cctxPtr->cdict = cdict;
- if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) {
+ cctx->cdict = cdict;
+ if (cctx->prefs.frameInfo.blockMode == LZ4F_blockLinked) {
/* frame init only for blockLinked : blockIndependent will be init at each block */
- LZ4F_initStream(cctxPtr->lz4CtxPtr, cdict, cctxPtr->prefs.compressionLevel, LZ4F_blockLinked);
+ LZ4F_initStream(cctx->lz4CtxPtr, cdict, cctx->prefs.compressionLevel, LZ4F_blockLinked);
}
if (preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN) {
- LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, (int)preferencesPtr->favorDecSpeed);
+ LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)cctx->lz4CtxPtr, (int)preferencesPtr->favorDecSpeed);
+ }
+ if (dictBuffer) {
+ assert(cdict == NULL);
+ RETURN_ERROR_IF(dictSize > INT_MAX, parameter_invalid);
+ if (cctx->lz4CtxType == ctxFast) {
+ /* lz4 fast*/
+ LZ4_loadDict((LZ4_stream_t*)cctx->lz4CtxPtr, (const char*)dictBuffer, (int)dictSize);
+ } else {
+ /* lz4hc */
+ assert(cctx->lz4CtxType == ctxHC);
+ LZ4_loadDictHC((LZ4_streamHC_t*)cctx->lz4CtxPtr, (const char*)dictBuffer, (int)dictSize);
+ }
}
+ /* Stage 2 : Write Frame Header */
+
/* Magic Number */
LZ4F_writeLE32(dstPtr, LZ4F_MAGICNUMBER);
dstPtr += 4;
@@ -757,22 +785,22 @@ size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
/* FLG Byte */
*dstPtr++ = (BYTE)(((1 & _2BITS) << 6) /* Version('01') */
- + ((cctxPtr->prefs.frameInfo.blockMode & _1BIT ) << 5)
- + ((cctxPtr->prefs.frameInfo.blockChecksumFlag & _1BIT ) << 4)
- + ((unsigned)(cctxPtr->prefs.frameInfo.contentSize > 0) << 3)
- + ((cctxPtr->prefs.frameInfo.contentChecksumFlag & _1BIT ) << 2)
- + (cctxPtr->prefs.frameInfo.dictID > 0) );
+ + ((cctx->prefs.frameInfo.blockMode & _1BIT ) << 5)
+ + ((cctx->prefs.frameInfo.blockChecksumFlag & _1BIT ) << 4)
+ + ((unsigned)(cctx->prefs.frameInfo.contentSize > 0) << 3)
+ + ((cctx->prefs.frameInfo.contentChecksumFlag & _1BIT ) << 2)
+ + (cctx->prefs.frameInfo.dictID > 0) );
/* BD Byte */
- *dstPtr++ = (BYTE)((cctxPtr->prefs.frameInfo.blockSizeID & _3BITS) << 4);
+ *dstPtr++ = (BYTE)((cctx->prefs.frameInfo.blockSizeID & _3BITS) << 4);
/* Optional Frame content size field */
- if (cctxPtr->prefs.frameInfo.contentSize) {
- LZ4F_writeLE64(dstPtr, cctxPtr->prefs.frameInfo.contentSize);
+ if (cctx->prefs.frameInfo.contentSize) {
+ LZ4F_writeLE64(dstPtr, cctx->prefs.frameInfo.contentSize);
dstPtr += 8;
- cctxPtr->totalInSize = 0;
+ cctx->totalInSize = 0;
}
/* Optional dictionary ID field */
- if (cctxPtr->prefs.frameInfo.dictID) {
- LZ4F_writeLE32(dstPtr, cctxPtr->prefs.frameInfo.dictID);
+ if (cctx->prefs.frameInfo.dictID) {
+ LZ4F_writeLE32(dstPtr, cctx->prefs.frameInfo.dictID);
dstPtr += 4;
}
/* Header CRC Byte */
@@ -780,24 +808,54 @@ size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
dstPtr++;
}
- cctxPtr->cStage = 1; /* header written, now request input data block */
+ cctx->cStage = 1; /* header written, now request input data block */
return (size_t)(dstPtr - dstStart);
}
+size_t LZ4F_compressBegin(LZ4F_cctx* cctx,
+ void* dstBuffer, size_t dstCapacity,
+ const LZ4F_preferences_t* preferencesPtr)
+{
+ return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity,
+ NULL, 0,
+ NULL, preferencesPtr);
+}
-/*! LZ4F_compressBegin() :
- * init streaming compression AND writes frame header into @dstBuffer.
- * @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
- * @preferencesPtr can be NULL, in which case default parameters are selected.
- * @return : number of bytes written into dstBuffer for the header
- * or an error code (can be tested using LZ4F_isError())
- */
-size_t LZ4F_compressBegin(LZ4F_cctx* cctxPtr,
+/* LZ4F_compressBegin_usingDictOnce:
+ * Hidden implementation,
+ * employed for multi-threaded compression
+ * when frame defines linked blocks */
+size_t LZ4F_compressBegin_usingDictOnce(LZ4F_cctx* cctx,
+ void* dstBuffer, size_t dstCapacity,
+ const void* dict, size_t dictSize,
+ const LZ4F_preferences_t* preferencesPtr)
+{
+ return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity,
+ dict, dictSize,
+ NULL, preferencesPtr);
+}
+
+size_t LZ4F_compressBegin_usingDict(LZ4F_cctx* cctx,
+ void* dstBuffer, size_t dstCapacity,
+ const void* dict, size_t dictSize,
+ const LZ4F_preferences_t* preferencesPtr)
+{
+ /* note : incorrect implementation :
+ * this will only use the dictionary once,
+ * instead of once *per* block when frames defines independent blocks */
+ return LZ4F_compressBegin_usingDictOnce(cctx, dstBuffer, dstCapacity,
+ dict, dictSize,
+ preferencesPtr);
+}
+
+size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctx,
void* dstBuffer, size_t dstCapacity,
+ const LZ4F_CDict* cdict,
const LZ4F_preferences_t* preferencesPtr)
{
- return LZ4F_compressBegin_usingCDict(cctxPtr, dstBuffer, dstCapacity,
- NULL, preferencesPtr);
+ return LZ4F_compressBegin_internal(cctx, dstBuffer, dstCapacity,
+ NULL, 0,
+ cdict, preferencesPtr);
}
@@ -891,9 +949,10 @@ static int LZ4F_doNotCompressBlock(void* ctx, const char* src, char* dst, int sr
return 0;
}
-static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level, LZ4F_blockCompression_t compressMode)
+static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level, LZ4F_BlockCompressMode_e compressMode)
{
- if (compressMode == LZ4B_UNCOMPRESSED) return LZ4F_doNotCompressBlock;
+ if (compressMode == LZ4B_UNCOMPRESSED)
+ return LZ4F_doNotCompressBlock;
if (level < LZ4HC_CLEVEL_MIN) {
if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlock;
return LZ4F_compressBlock_continue;
@@ -931,7 +990,7 @@ static size_t LZ4F_compressUpdateImpl(LZ4F_cctx* cctxPtr,
void* dstBuffer, size_t dstCapacity,
const void* srcBuffer, size_t srcSize,
const LZ4F_compressOptions_t* compressOptionsPtr,
- LZ4F_blockCompression_t blockCompression)
+ LZ4F_BlockCompressMode_e blockCompression)
{
size_t const blockSize = cctxPtr->maxBlockSize;
const BYTE* srcPtr = (const BYTE*)srcBuffer;
@@ -951,10 +1010,10 @@ static size_t LZ4F_compressUpdateImpl(LZ4F_cctx* cctxPtr,
RETURN_ERROR(dstMaxSize_tooSmall);
/* flush currently written block, to continue with new block compression */
- if (cctxPtr->blockCompression != blockCompression) {
+ if (cctxPtr->blockCompressMode != blockCompression) {
bytesWritten = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr);
dstPtr += bytesWritten;
- cctxPtr->blockCompression = blockCompression;
+ cctxPtr->blockCompressMode = blockCompression;
}
if (compressOptionsPtr == NULL) compressOptionsPtr = &k_cOptionsNull;
@@ -1068,13 +1127,9 @@ size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
compressOptionsPtr, LZ4B_COMPRESSED);
}
-/*! LZ4F_compressUpdate() :
- * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
- * When successful, the function always entirely consumes @srcBuffer.
- * src data is either buffered or compressed into @dstBuffer.
- * If previously an uncompressed block was written, buffered data is flushed
- * before appending compressed data is continued.
- * This is only supported when LZ4F_blockIndependent is used
+/*! LZ4F_uncompressedUpdate() :
+ * Same as LZ4F_compressUpdate(), but requests blocks to be sent uncompressed.
+ * This symbol is only supported when LZ4F_blockIndependent is used
* @dstCapacity MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
* @compressOptionsPtr is optional : provide NULL to mean "default".
* @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
@@ -1084,8 +1139,8 @@ size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
size_t LZ4F_uncompressedUpdate(LZ4F_cctx* cctxPtr,
void* dstBuffer, size_t dstCapacity,
const void* srcBuffer, size_t srcSize,
- const LZ4F_compressOptions_t* compressOptionsPtr) {
- RETURN_ERROR_IF(cctxPtr->prefs.frameInfo.blockMode != LZ4F_blockIndependent, blockMode_invalid);
+ const LZ4F_compressOptions_t* compressOptionsPtr)
+{
return LZ4F_compressUpdateImpl(cctxPtr,
dstBuffer, dstCapacity,
srcBuffer, srcSize,
@@ -1115,7 +1170,7 @@ size_t LZ4F_flush(LZ4F_cctx* cctxPtr,
(void)compressOptionsPtr; /* not useful (yet) */
/* select compression function */
- compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, cctxPtr->blockCompression);
+ compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel, cctxPtr->blockCompressMode);
/* compress tmp buffer */
dstPtr += LZ4F_makeBlock(dstPtr,
@@ -1170,13 +1225,12 @@ size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr,
if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) {
U32 const xxh = XXH32_digest(&(cctxPtr->xxh));
RETURN_ERROR_IF(dstCapacity < 8, dstMaxSize_tooSmall);
- DEBUGLOG(5,"Writing 32-bit content checksum");
+ DEBUGLOG(5,"Writing 32-bit content checksum (0x%0X)", xxh);
LZ4F_writeLE32(dstPtr, xxh);
dstPtr+=4; /* content Checksum */
}
cctxPtr->cStage = 0; /* state is now re-usable (with identical preferences) */
- cctxPtr->maxBufferSize = 0; /* reuse HC context */
if (cctxPtr->prefs.frameInfo.contentSize) {
if (cctxPtr->prefs.frameInfo.contentSize != cctxPtr->totalInSize)
@@ -1270,13 +1324,14 @@ LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx)
/*==--- Streaming Decompression operations ---==*/
-
void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx)
{
+ DEBUGLOG(5, "LZ4F_resetDecompressionContext");
dctx->dStage = dstage_getFrameHeader;
dctx->dict = NULL;
dctx->dictSize = 0;
dctx->skipChecksum = 0;
+ dctx->frameRemainingSize = 0;
}
@@ -1333,6 +1388,7 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
if (((FLG>>1)&_1BIT) != 0) RETURN_ERROR(reservedFlag_set); /* Reserved bit */
if (version != 1) RETURN_ERROR(headerVersion_wrong); /* Version Number, only supported value */
}
+ DEBUGLOG(6, "contentSizeFlag: %u", contentSizeFlag);
/* Frame Header Size */
frameHeaderSize = minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0);
@@ -1369,8 +1425,9 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
dctx->frameInfo.contentChecksumFlag = (LZ4F_contentChecksum_t)contentChecksumFlag;
dctx->frameInfo.blockSizeID = (LZ4F_blockSizeID_t)blockSizeID;
dctx->maxBlockSize = LZ4F_getBlockSize((LZ4F_blockSizeID_t)blockSizeID);
- if (contentSizeFlag)
+ if (contentSizeFlag) {
dctx->frameRemainingSize = dctx->frameInfo.contentSize = LZ4F_readLE64(srcPtr+6);
+ }
if (dictIDFlag)
dctx->frameInfo.dictID = LZ4F_readLE32(srcPtr + frameHeaderSize - 5);
@@ -1570,7 +1627,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
size_t nextSrcSizeHint = 1;
- DEBUGLOG(5, "LZ4F_decompress : %p,%u => %p,%u",
+ DEBUGLOG(5, "LZ4F_decompress: src[%p](%u) => dst[%p](%u)",
srcBuffer, (unsigned)*srcSizePtr, dstBuffer, (unsigned)*dstSizePtr);
if (dstBuffer == NULL) assert(*dstSizePtr == 0);
MEM_INIT(&optionsNull, 0, sizeof(optionsNull));
@@ -1722,10 +1779,10 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
/* history management (linked blocks only)*/
if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 0);
- } }
-
- srcPtr += sizeToCopy;
- dstPtr += sizeToCopy;
+ }
+ srcPtr += sizeToCopy;
+ dstPtr += sizeToCopy;
+ }
if (sizeToCopy == dctx->tmpInTarget) { /* all done */
if (dctx->frameInfo.blockChecksumFlag) {
dctx->tmpInSize = 0;
@@ -1959,6 +2016,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
if (!dctx->skipChecksum) {
U32 const readCRC = LZ4F_readLE32(selectedIn);
U32 const resultCRC = XXH32_digest(&(dctx->xxh));
+ DEBUGLOG(4, "frame checksum: stored 0x%0X vs 0x%0X processed", readCRC, resultCRC);
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
RETURN_ERROR_IF(readCRC != resultCRC, contentChecksum_invalid);
#else
diff --git a/contrib/libs/lz4/lz4frame.h b/contrib/libs/lz4/lz4frame.h
index 1bdf6c4fcb..b8ae322767 100644
--- a/contrib/libs/lz4/lz4frame.h
+++ b/contrib/libs/lz4/lz4frame.h
@@ -173,16 +173,16 @@ typedef LZ4F_contentChecksum_t contentChecksum_t;
* setting all parameters to default.
* It's then possible to update selectively some parameters */
typedef struct {
- LZ4F_blockSizeID_t blockSizeID; /* max64KB, max256KB, max1MB, max4MB; 0 == default */
- LZ4F_blockMode_t blockMode; /* LZ4F_blockLinked, LZ4F_blockIndependent; 0 == default */
- LZ4F_contentChecksum_t contentChecksumFlag; /* 1: frame terminated with 32-bit checksum of decompressed data; 0: disabled (default) */
+ LZ4F_blockSizeID_t blockSizeID; /* max64KB, max256KB, max1MB, max4MB; 0 == default (LZ4F_max64KB) */
+ LZ4F_blockMode_t blockMode; /* LZ4F_blockLinked, LZ4F_blockIndependent; 0 == default (LZ4F_blockLinked) */
+ LZ4F_contentChecksum_t contentChecksumFlag; /* 1: add a 32-bit checksum of frame's decompressed data; 0 == default (disabled) */
LZ4F_frameType_t frameType; /* read-only field : LZ4F_frame or LZ4F_skippableFrame */
unsigned long long contentSize; /* Size of uncompressed content ; 0 == unknown */
unsigned dictID; /* Dictionary ID, sent by compressor to help decoder select correct dictionary; 0 == no dictID provided */
- LZ4F_blockChecksum_t blockChecksumFlag; /* 1: each block followed by a checksum of block's compressed data; 0: disabled (default) */
+ LZ4F_blockChecksum_t blockChecksumFlag; /* 1: each block followed by a checksum of block's compressed data; 0 == default (disabled) */
} LZ4F_frameInfo_t;
-#define LZ4F_INIT_FRAMEINFO { LZ4F_default, LZ4F_blockLinked, LZ4F_noContentChecksum, LZ4F_frame, 0ULL, 0U, LZ4F_noBlockChecksum } /* v1.8.3+ */
+#define LZ4F_INIT_FRAMEINFO { LZ4F_max64KB, LZ4F_blockLinked, LZ4F_noContentChecksum, LZ4F_frame, 0ULL, 0U, LZ4F_noBlockChecksum } /* v1.8.3+ */
/*! LZ4F_preferences_t :
* makes it possible to supply advanced compression instructions to streaming interface.
@@ -204,7 +204,26 @@ typedef struct {
* Simple compression function
***********************************/
-LZ4FLIB_API int LZ4F_compressionLevel_max(void); /* v1.8.0+ */
+/*! LZ4F_compressFrame() :
+ * Compress srcBuffer content into an LZ4-compressed frame.
+ * It's a one shot operation, all input content is consumed, and all output is generated.
+ *
+ * Note : it's a stateless operation (no LZ4F_cctx state needed).
+ * In order to reduce load on the allocator, LZ4F_compressFrame(), by default,
+ * uses the stack to allocate space for the compression state and some table.
+ * If this usage of the stack is too much for your application,
+ * consider compiling `lz4frame.c` with compile-time macro LZ4F_HEAPMODE set to 1 instead.
+ * All state allocations will use the Heap.
+ * It also means each invocation of LZ4F_compressFrame() will trigger several internal alloc/free invocations.
+ *
+ * @dstCapacity MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
+ * @preferencesPtr is optional : one can provide NULL, in which case all preferences are set to default.
+ * @return : number of bytes written into dstBuffer.
+ * or an error code if it fails (can be tested using LZ4F_isError())
+ */
+LZ4FLIB_API size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
+ const void* srcBuffer, size_t srcSize,
+ const LZ4F_preferences_t* preferencesPtr);
/*! LZ4F_compressFrameBound() :
* Returns the maximum possible compressed size with LZ4F_compressFrame() given srcSize and preferences.
@@ -214,16 +233,11 @@ LZ4FLIB_API int LZ4F_compressionLevel_max(void); /* v1.8.0+ */
*/
LZ4FLIB_API size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr);
-/*! LZ4F_compressFrame() :
- * Compress an entire srcBuffer into a valid LZ4 frame.
- * dstCapacity MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
- * The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default.
- * @return : number of bytes written into dstBuffer.
- * or an error code if it fails (can be tested using LZ4F_isError())
+
+/*! LZ4F_compressionLevel_max() :
+ * @return maximum allowed compression level (currently: 12)
*/
-LZ4FLIB_API size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
- const void* srcBuffer, size_t srcSize,
- const LZ4F_preferences_t* preferencesPtr);
+LZ4FLIB_API int LZ4F_compressionLevel_max(void); /* v1.8.0+ */
/*-***********************************
@@ -278,7 +292,7 @@ LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctx);
/*! LZ4F_compressBegin() :
* will write the frame header into dstBuffer.
* dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
- * `prefsPtr` is optional : you can provide NULL as argument, all preferences will then be set to default.
+ * `prefsPtr` is optional : NULL can be provided to set all preferences to default.
* @return : number of bytes written into dstBuffer for the header
* or an error code (which can be tested using LZ4F_isError())
*/
@@ -355,8 +369,9 @@ typedef struct LZ4F_dctx_s LZ4F_dctx; /* incomplete type */
typedef LZ4F_dctx* LZ4F_decompressionContext_t; /* compatibility with previous API versions */
typedef struct {
- unsigned stableDst; /* pledges that last 64KB decompressed data will remain available unmodified between invocations.
- * This optimization skips storage operations in tmp buffers. */
+ unsigned stableDst; /* pledges that last 64KB decompressed data is present right before @dstBuffer pointer.
+ * This optimization skips internal storage operations.
+ * Once set, this pledge must remain valid up to the end of current frame. */
unsigned skipChecksums; /* disable checksum calculation and verification, even when one is present in frame, to save CPU time.
* Setting this option to 1 once disables all checksums for the rest of the frame. */
unsigned reserved1; /* must be set to zero for forward compatibility */
@@ -463,6 +478,11 @@ LZ4F_getFrameInfo(LZ4F_dctx* dctx,
* `dstBuffer` can freely change between each consecutive function invocation.
* `dstBuffer` content will be overwritten.
*
+ * Note: if `LZ4F_getFrameInfo()` is called before `LZ4F_decompress()`, srcBuffer must be updated to reflect
+ * the number of bytes consumed after reading the frame header. Failure to update srcBuffer before calling
+ * `LZ4F_decompress()` will cause decompression failure or, even worse, successful but incorrect decompression.
+ * See the `LZ4F_getFrameInfo()` docs for details.
+ *
* @return : an hint of how many `srcSize` bytes LZ4F_decompress() expects for next call.
* Schematically, it's the size of the current (or remaining) compressed block + header of next block.
* Respecting the hint provides some small speed benefit, because it skips intermediate buffers.
@@ -493,6 +513,109 @@ LZ4F_decompress(LZ4F_dctx* dctx,
LZ4FLIB_API void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx); /* always successful */
+/**********************************
+ * Dictionary compression API
+ *********************************/
+
+/* A Dictionary is useful for the compression of small messages (KB range).
+ * It dramatically improves compression efficiency.
+ *
+ * LZ4 can ingest any input as dictionary, though only the last 64 KB are useful.
+ * Better results are generally achieved by using Zstandard's Dictionary Builder
+ * to generate a high-quality dictionary from a set of samples.
+ *
+ * The same dictionary will have to be used on the decompression side
+ * for decoding to be successful.
+ * To help identify the correct dictionary at decoding stage,
+ * the frame header allows optional embedding of a dictID field.
+ */
+
+/*! LZ4F_compressBegin_usingDict() : stable since v1.10
+ * Inits dictionary compression streaming, and writes the frame header into dstBuffer.
+ * @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
+ * @prefsPtr is optional : one may provide NULL as argument,
+ * however, it's the only way to provide dictID in the frame header.
+ * @dictBuffer must outlive the compression session.
+ * @return : number of bytes written into dstBuffer for the header,
+ * or an error code (which can be tested using LZ4F_isError())
+ * NOTE: The LZ4Frame spec allows each independent block to be compressed with the dictionary,
+ * but this entry supports a more limited scenario, where only the first block uses the dictionary.
+ * This is still useful for small data, which only need one block anyway.
+ * For larger inputs, one may be more interested in LZ4F_compressFrame_usingCDict() below.
+ */
+LZ4FLIB_API size_t
+LZ4F_compressBegin_usingDict(LZ4F_cctx* cctx,
+ void* dstBuffer, size_t dstCapacity,
+ const void* dictBuffer, size_t dictSize,
+ const LZ4F_preferences_t* prefsPtr);
+
+/*! LZ4F_decompress_usingDict() : stable since v1.10
+ * Same as LZ4F_decompress(), using a predefined dictionary.
+ * Dictionary is used "in place", without any preprocessing.
+** It must remain accessible throughout the entire frame decoding. */
+LZ4FLIB_API size_t
+LZ4F_decompress_usingDict(LZ4F_dctx* dctxPtr,
+ void* dstBuffer, size_t* dstSizePtr,
+ const void* srcBuffer, size_t* srcSizePtr,
+ const void* dict, size_t dictSize,
+ const LZ4F_decompressOptions_t* decompressOptionsPtr);
+
+/*****************************************
+ * Bulk processing dictionary compression
+ *****************************************/
+
+/* Loading a dictionary has a cost, since it involves construction of tables.
+ * The Bulk processing dictionary API makes it possible to share this cost
+ * over an arbitrary number of compression jobs, even concurrently,
+ * markedly improving compression latency for these cases.
+ *
+ * Note that there is no corresponding bulk API for the decompression side,
+ * because dictionary does not carry any initialization cost for decompression.
+ * Use the regular LZ4F_decompress_usingDict() there.
+ */
+typedef struct LZ4F_CDict_s LZ4F_CDict;
+
+/*! LZ4_createCDict() : stable since v1.10
+ * When compressing multiple messages / blocks using the same dictionary, it's recommended to initialize it just once.
+ * LZ4_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
+ * LZ4_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
+ * @dictBuffer can be released after LZ4_CDict creation, since its content is copied within CDict. */
+LZ4FLIB_API LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize);
+LZ4FLIB_API void LZ4F_freeCDict(LZ4F_CDict* CDict);
+
+/*! LZ4_compressFrame_usingCDict() : stable since v1.10
+ * Compress an entire srcBuffer into a valid LZ4 frame using a digested Dictionary.
+ * @cctx must point to a context created by LZ4F_createCompressionContext().
+ * If @cdict==NULL, compress without a dictionary.
+ * @dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
+ * If this condition is not respected, function will fail (@return an errorCode).
+ * The LZ4F_preferences_t structure is optional : one may provide NULL as argument,
+ * but it's not recommended, as it's the only way to provide @dictID in the frame header.
+ * @return : number of bytes written into dstBuffer.
+ * or an error code if it fails (can be tested using LZ4F_isError())
+ * Note: for larger inputs generating multiple independent blocks,
+ * this entry point uses the dictionary for each block. */
+LZ4FLIB_API size_t
+LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const LZ4F_CDict* cdict,
+ const LZ4F_preferences_t* preferencesPtr);
+
+/*! LZ4F_compressBegin_usingCDict() : stable since v1.10
+ * Inits streaming dictionary compression, and writes the frame header into dstBuffer.
+ * @dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
+ * @prefsPtr is optional : one may provide NULL as argument,
+ * note however that it's the only way to insert a @dictID in the frame header.
+ * @cdict must outlive the compression session.
+ * @return : number of bytes written into dstBuffer for the header,
+ * or an error code, which can be tested using LZ4F_isError(). */
+LZ4FLIB_API size_t
+LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctx,
+ void* dstBuffer, size_t dstCapacity,
+ const LZ4F_CDict* cdict,
+ const LZ4F_preferences_t* prefsPtr);
+
#if defined (__cplusplus)
}
@@ -503,11 +626,8 @@ LZ4FLIB_API void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx); /* always su
#if defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843)
#define LZ4F_H_STATIC_09782039843
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/* These declarations are not stable and may change in the future.
+/* Note :
+ * The below declarations are not stable and may change in the future.
* They are therefore only safe to depend on
* when the caller is statically linked against the library.
* To access their declarations, define LZ4F_STATIC_LINKING_ONLY.
@@ -517,6 +637,11 @@ extern "C" {
* by defining LZ4F_PUBLISH_STATIC_FUNCTIONS.
* Use at your own risk.
*/
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
#ifdef LZ4F_PUBLISH_STATIC_FUNCTIONS
# define LZ4FLIB_STATIC_API LZ4FLIB_API
#else
@@ -530,7 +655,7 @@ extern "C" {
ITEM(ERROR_GENERIC) \
ITEM(ERROR_maxBlockSize_invalid) \
ITEM(ERROR_blockMode_invalid) \
- ITEM(ERROR_contentChecksumFlag_invalid) \
+ ITEM(ERROR_parameter_invalid) \
ITEM(ERROR_compressionLevel_invalid) \
ITEM(ERROR_headerVersion_wrong) \
ITEM(ERROR_blockChecksum_invalid) \
@@ -548,6 +673,8 @@ extern "C" {
ITEM(ERROR_frameDecoding_alreadyStarted) \
ITEM(ERROR_compressionState_uninitialized) \
ITEM(ERROR_parameter_null) \
+ ITEM(ERROR_io_write) \
+ ITEM(ERROR_io_read) \
ITEM(ERROR_maxCode)
#define LZ4F_GENERATE_ENUM(ENUM) LZ4F_##ENUM,
@@ -558,22 +685,26 @@ typedef enum { LZ4F_LIST_ERRORS(LZ4F_GENERATE_ENUM)
LZ4FLIB_STATIC_API LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult);
+/**********************************
+ * Advanced compression operations
+ *********************************/
/*! LZ4F_getBlockSize() :
- * Return, in scalar format (size_t),
- * the maximum block size associated with blockSizeID.
+ * @return, in scalar format (size_t),
+ * the maximum block size associated with @blockSizeID,
+ * or an error code (can be tested using LZ4F_isError()) if @blockSizeID is invalid.
**/
LZ4FLIB_STATIC_API size_t LZ4F_getBlockSize(LZ4F_blockSizeID_t blockSizeID);
/*! LZ4F_uncompressedUpdate() :
- * LZ4F_uncompressedUpdate() can be called repetitively to add as much data uncompressed data as necessary.
+ * LZ4F_uncompressedUpdate() can be called repetitively to add data stored as uncompressed blocks.
* Important rule: dstCapacity MUST be large enough to store the entire source buffer as
* no compression is done for this operation
* If this condition is not respected, LZ4F_uncompressedUpdate() will fail (result is an errorCode).
* After an error, the state is left in a UB state, and must be re-initialized or freed.
- * If previously a compressed block was written, buffered data is flushed
+ * If previously a compressed block was written, buffered data is flushed first,
* before appending uncompressed data is continued.
- * This is only supported when LZ4F_blockIndependent is used
+ * This operation is only supported when LZ4F_blockIndependent is used.
* `cOptPtr` is optional : NULL can be provided, in which case all options are set to default.
* @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered).
* or an error code if it fails (which can be tested using LZ4F_isError())
@@ -585,82 +716,10 @@ LZ4F_uncompressedUpdate(LZ4F_cctx* cctx,
const LZ4F_compressOptions_t* cOptPtr);
/**********************************
- * Bulk processing dictionary API
+ * Custom memory allocation
*********************************/
-/* A Dictionary is useful for the compression of small messages (KB range).
- * It dramatically improves compression efficiency.
- *
- * LZ4 can ingest any input as dictionary, though only the last 64 KB are useful.
- * Best results are generally achieved by using Zstandard's Dictionary Builder
- * to generate a high-quality dictionary from a set of samples.
- *
- * Loading a dictionary has a cost, since it involves construction of tables.
- * The Bulk processing dictionary API makes it possible to share this cost
- * over an arbitrary number of compression jobs, even concurrently,
- * markedly improving compression latency for these cases.
- *
- * The same dictionary will have to be used on the decompression side
- * for decoding to be successful.
- * To help identify the correct dictionary at decoding stage,
- * the frame header allows optional embedding of a dictID field.
- */
-typedef struct LZ4F_CDict_s LZ4F_CDict;
-
-/*! LZ4_createCDict() :
- * When compressing multiple messages / blocks using the same dictionary, it's recommended to load it just once.
- * LZ4_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
- * LZ4_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
- * `dictBuffer` can be released after LZ4_CDict creation, since its content is copied within CDict */
-LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize);
-LZ4FLIB_STATIC_API void LZ4F_freeCDict(LZ4F_CDict* CDict);
-
-
-/*! LZ4_compressFrame_usingCDict() :
- * Compress an entire srcBuffer into a valid LZ4 frame using a digested Dictionary.
- * cctx must point to a context created by LZ4F_createCompressionContext().
- * If cdict==NULL, compress without a dictionary.
- * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
- * If this condition is not respected, function will fail (@return an errorCode).
- * The LZ4F_preferences_t structure is optional : you may provide NULL as argument,
- * but it's not recommended, as it's the only way to provide dictID in the frame header.
- * @return : number of bytes written into dstBuffer.
- * or an error code if it fails (can be tested using LZ4F_isError()) */
-LZ4FLIB_STATIC_API size_t
-LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- const LZ4F_CDict* cdict,
- const LZ4F_preferences_t* preferencesPtr);
-
-
-/*! LZ4F_compressBegin_usingCDict() :
- * Inits streaming dictionary compression, and writes the frame header into dstBuffer.
- * dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
- * `prefsPtr` is optional : you may provide NULL as argument,
- * however, it's the only way to provide dictID in the frame header.
- * @return : number of bytes written into dstBuffer for the header,
- * or an error code (which can be tested using LZ4F_isError()) */
-LZ4FLIB_STATIC_API size_t
-LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctx,
- void* dstBuffer, size_t dstCapacity,
- const LZ4F_CDict* cdict,
- const LZ4F_preferences_t* prefsPtr);
-
-
-/*! LZ4F_decompress_usingDict() :
- * Same as LZ4F_decompress(), using a predefined dictionary.
- * Dictionary is used "in place", without any preprocessing.
-** It must remain accessible throughout the entire frame decoding. */
-LZ4FLIB_STATIC_API size_t
-LZ4F_decompress_usingDict(LZ4F_dctx* dctxPtr,
- void* dstBuffer, size_t* dstSizePtr,
- const void* srcBuffer, size_t* srcSizePtr,
- const void* dict, size_t dictSize,
- const LZ4F_decompressOptions_t* decompressOptionsPtr);
-
-
-/*! Custom memory allocation :
+/*! Custom memory allocation : v1.9.4+
* These prototypes make it possible to pass custom allocation/free functions.
* LZ4F_customMem is provided at state creation time, using LZ4F_create*_advanced() listed below.
* All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.
diff --git a/contrib/libs/lz4/lz4hc.c b/contrib/libs/lz4/lz4hc.c
index b21ad6bb59..4d8c36a697 100644
--- a/contrib/libs/lz4/lz4hc.c
+++ b/contrib/libs/lz4/lz4hc.c
@@ -39,9 +39,10 @@
***************************************/
/*! HEAPMODE :
- * Select how default compression function will allocate workplace memory,
- * in stack (0:fastest), or in heap (1:requires malloc()).
- * Since workplace is rather large, heap mode is recommended.
+ * Select how stateless HC compression functions like `LZ4_compress_HC()`
+ * allocate memory for their workspace:
+ * in stack (0:fastest), or in heap (1:default, requires malloc()).
+ * Since workspace is rather large, heap mode is recommended.
**/
#ifndef LZ4HC_HEAPMODE
# define LZ4HC_HEAPMODE 1
@@ -51,19 +52,19 @@
/*=== Dependency ===*/
#define LZ4_HC_STATIC_LINKING_ONLY
#include "lz4hc.h"
+#include <limits.h>
-/*=== Common definitions ===*/
-#if defined(__GNUC__)
+/*=== Shared lz4.c code ===*/
+#ifndef LZ4_SRC_INCLUDED
+# if defined(__GNUC__)
# pragma GCC diagnostic ignored "-Wunused-function"
-#endif
-#if defined (__clang__)
+# endif
+# if defined (__clang__)
# pragma clang diagnostic ignored "-Wunused-function"
-#endif
-
-#define LZ4_COMMONDEFS_ONLY
-#ifndef LZ4_SRC_INCLUDED
-#include "lz4.c" /* LZ4_count, constants, mem */
+# endif
+# define LZ4_COMMONDEFS_ONLY
+# include "lz4.c" /* LZ4_count, constants, mem */
#endif
@@ -79,17 +80,158 @@ typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive;
/*=== Macros ===*/
#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define MAX(a,b) ( (a) > (b) ? (a) : (b) )
-#define HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-LZ4HC_HASH_LOG))
-#define DELTANEXTMAXD(p) chainTable[(p) & LZ4HC_MAXD_MASK] /* flexible, LZ4HC_MAXD dependent */
+
+
+/*=== Levels definition ===*/
+typedef enum { lz4mid, lz4hc, lz4opt } lz4hc_strat_e;
+typedef struct {
+ lz4hc_strat_e strat;
+ int nbSearches;
+ U32 targetLength;
+} cParams_t;
+static const cParams_t k_clTable[LZ4HC_CLEVEL_MAX+1] = {
+ { lz4mid, 2, 16 }, /* 0, unused */
+ { lz4mid, 2, 16 }, /* 1, unused */
+ { lz4mid, 2, 16 }, /* 2 */
+ { lz4hc, 4, 16 }, /* 3 */
+ { lz4hc, 8, 16 }, /* 4 */
+ { lz4hc, 16, 16 }, /* 5 */
+ { lz4hc, 32, 16 }, /* 6 */
+ { lz4hc, 64, 16 }, /* 7 */
+ { lz4hc, 128, 16 }, /* 8 */
+ { lz4hc, 256, 16 }, /* 9 */
+ { lz4opt, 96, 64 }, /*10==LZ4HC_CLEVEL_OPT_MIN*/
+ { lz4opt, 512,128 }, /*11 */
+ { lz4opt,16384,LZ4_OPT_NUM }, /* 12==LZ4HC_CLEVEL_MAX */
+};
+
+static cParams_t LZ4HC_getCLevelParams(int cLevel)
+{
+ /* note : clevel convention is a bit different from lz4frame,
+ * possibly something worth revisiting for consistency */
+ if (cLevel < 1)
+ cLevel = LZ4HC_CLEVEL_DEFAULT;
+ cLevel = MIN(LZ4HC_CLEVEL_MAX, cLevel);
+ return k_clTable[cLevel];
+}
+
+
+/*=== Hashing ===*/
+#define LZ4HC_HASHSIZE 4
+#define HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-LZ4HC_HASH_LOG))
+static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)); }
+
+#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
+/* lie to the compiler about data alignment; use with caution */
+static U64 LZ4_read64(const void* memPtr) { return *(const U64*) memPtr; }
+
+#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
+/* __pack instructions are safer, but compiler specific */
+LZ4_PACK(typedef struct { U64 u64; }) LZ4_unalign64;
+static U64 LZ4_read64(const void* ptr) { return ((const LZ4_unalign64*)ptr)->u64; }
+
+#else /* safe and portable access using memcpy() */
+static U64 LZ4_read64(const void* memPtr)
+{
+ U64 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+#endif /* LZ4_FORCE_MEMORY_ACCESS */
+
+#define LZ4MID_HASHSIZE 8
+#define LZ4MID_HASHLOG (LZ4HC_HASH_LOG-1)
+#define LZ4MID_HASHTABLESIZE (1 << LZ4MID_HASHLOG)
+
+static U32 LZ4MID_hash4(U32 v) { return (v * 2654435761U) >> (32-LZ4MID_HASHLOG); }
+static U32 LZ4MID_hash4Ptr(const void* ptr) { return LZ4MID_hash4(LZ4_read32(ptr)); }
+/* note: hash7 hashes the lower 56-bits.
+ * It presumes input was read using little endian.*/
+static U32 LZ4MID_hash7(U64 v) { return (U32)(((v << (64-56)) * 58295818150454627ULL) >> (64-LZ4MID_HASHLOG)) ; }
+static U64 LZ4_readLE64(const void* memPtr);
+static U32 LZ4MID_hash8Ptr(const void* ptr) { return LZ4MID_hash7(LZ4_readLE64(ptr)); }
+
+static U64 LZ4_readLE64(const void* memPtr)
+{
+ if (LZ4_isLittleEndian()) {
+ return LZ4_read64(memPtr);
+ } else {
+ const BYTE* p = (const BYTE*)memPtr;
+ /* note: relies on the compiler to simplify this expression */
+ return (U64)p[0] | ((U64)p[1]<<8) | ((U64)p[2]<<16) | ((U64)p[3]<<24)
+ | ((U64)p[4]<<32) | ((U64)p[5]<<40) | ((U64)p[6]<<48) | ((U64)p[7]<<56);
+ }
+}
+
+
+/*=== Count match length ===*/
+LZ4_FORCE_INLINE
+unsigned LZ4HC_NbCommonBytes32(U32 val)
+{
+ assert(val != 0);
+ if (LZ4_isLittleEndian()) {
+# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r;
+ _BitScanReverse(&r, val);
+ return (unsigned)((31 - r) >> 3);
+# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (unsigned)__builtin_clz(val) >> 3;
+# else
+ val >>= 8;
+ val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |
+ (val + 0x00FF0000)) >> 24;
+ return (unsigned)val ^ 3;
+# endif
+ } else {
+# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r;
+ _BitScanForward(&r, val);
+ return (unsigned)(r >> 3);
+# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (unsigned)__builtin_ctz(val) >> 3;
+# else
+ const U32 m = 0x01010101;
+ return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;
+# endif
+ }
+}
+
+/** LZ4HC_countBack() :
+ * @return : negative value, nb of common bytes before ip/match */
+LZ4_FORCE_INLINE
+int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match,
+ const BYTE* const iMin, const BYTE* const mMin)
+{
+ int back = 0;
+ int const min = (int)MAX(iMin - ip, mMin - match);
+ assert(min <= 0);
+ assert(ip >= iMin); assert((size_t)(ip-iMin) < (1U<<31));
+ assert(match >= mMin); assert((size_t)(match - mMin) < (1U<<31));
+
+ while ((back - min) > 3) {
+ U32 const v = LZ4_read32(ip + back - 4) ^ LZ4_read32(match + back - 4);
+ if (v) {
+ return (back - (int)LZ4HC_NbCommonBytes32(v));
+ } else back -= 4; /* 4-byte step */
+ }
+ /* check remainder if any */
+ while ( (back > min)
+ && (ip[back-1] == match[back-1]) )
+ back--;
+ return back;
+}
+
+/*=== Chain table updates ===*/
#define DELTANEXTU16(table, pos) table[(U16)(pos)] /* faster */
/* Make fields passed to, and updated by LZ4HC_encodeSequence explicit */
#define UPDATABLE(ip, op, anchor) &ip, &op, &anchor
-static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)); }
-
/**************************************
-* HC Compression
+* Init
**************************************/
static void LZ4HC_clearTables (LZ4HC_CCtx_internal* hc4)
{
@@ -101,6 +243,7 @@ static void LZ4HC_init_internal (LZ4HC_CCtx_internal* hc4, const BYTE* start)
{
size_t const bufferSize = (size_t)(hc4->end - hc4->prefixStart);
size_t newStartingOffset = bufferSize + hc4->dictLimit;
+ DEBUGLOG(5, "LZ4HC_init_internal");
assert(newStartingOffset >= bufferSize); /* check overflow */
if (newStartingOffset > 1 GB) {
LZ4HC_clearTables(hc4);
@@ -116,6 +259,524 @@ static void LZ4HC_init_internal (LZ4HC_CCtx_internal* hc4, const BYTE* start)
}
+/**************************************
+* Encode
+**************************************/
+/* LZ4HC_encodeSequence() :
+ * @return : 0 if ok,
+ * 1 if buffer issue detected */
+LZ4_FORCE_INLINE int LZ4HC_encodeSequence (
+ const BYTE** _ip,
+ BYTE** _op,
+ const BYTE** _anchor,
+ int matchLength,
+ int offset,
+ limitedOutput_directive limit,
+ BYTE* oend)
+{
+#define ip (*_ip)
+#define op (*_op)
+#define anchor (*_anchor)
+
+ size_t length;
+ BYTE* const token = op++;
+
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 6)
+ static const BYTE* start = NULL;
+ static U32 totalCost = 0;
+ U32 const pos = (start==NULL) ? 0 : (U32)(anchor - start);
+ U32 const ll = (U32)(ip - anchor);
+ U32 const llAdd = (ll>=15) ? ((ll-15) / 255) + 1 : 0;
+ U32 const mlAdd = (matchLength>=19) ? ((matchLength-19) / 255) + 1 : 0;
+ U32 const cost = 1 + llAdd + ll + 2 + mlAdd;
+ if (start==NULL) start = anchor; /* only works for single segment */
+ /* g_debuglog_enable = (pos >= 2228) & (pos <= 2262); */
+ DEBUGLOG(6, "pos:%7u -- literals:%4u, match:%4i, offset:%5i, cost:%4u + %5u",
+ pos,
+ (U32)(ip - anchor), matchLength, offset,
+ cost, totalCost);
+ totalCost += cost;
+#endif
+
+ /* Encode Literal length */
+ length = (size_t)(ip - anchor);
+ LZ4_STATIC_ASSERT(notLimited == 0);
+ /* Check output limit */
+ if (limit && ((op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) {
+ DEBUGLOG(6, "Not enough room to write %i literals (%i bytes remaining)",
+ (int)length, (int)(oend - op));
+ return 1;
+ }
+ if (length >= RUN_MASK) {
+ size_t len = length - RUN_MASK;
+ *token = (RUN_MASK << ML_BITS);
+ for(; len >= 255 ; len -= 255) *op++ = 255;
+ *op++ = (BYTE)len;
+ } else {
+ *token = (BYTE)(length << ML_BITS);
+ }
+
+ /* Copy Literals */
+ LZ4_wildCopy8(op, anchor, op + length);
+ op += length;
+
+ /* Encode Offset */
+ assert(offset <= LZ4_DISTANCE_MAX );
+ assert(offset > 0);
+ LZ4_writeLE16(op, (U16)(offset)); op += 2;
+
+ /* Encode MatchLength */
+ assert(matchLength >= MINMATCH);
+ length = (size_t)matchLength - MINMATCH;
+ if (limit && (op + (length / 255) + (1 + LASTLITERALS) > oend)) {
+ DEBUGLOG(6, "Not enough room to write match length");
+ return 1; /* Check output limit */
+ }
+ if (length >= ML_MASK) {
+ *token += ML_MASK;
+ length -= ML_MASK;
+ for(; length >= 510 ; length -= 510) { *op++ = 255; *op++ = 255; }
+ if (length >= 255) { length -= 255; *op++ = 255; }
+ *op++ = (BYTE)length;
+ } else {
+ *token += (BYTE)(length);
+ }
+
+ /* Prepare next loop */
+ ip += matchLength;
+ anchor = ip;
+
+ return 0;
+
+#undef ip
+#undef op
+#undef anchor
+}
+
+
+typedef struct {
+ int off;
+ int len;
+ int back; /* negative value */
+} LZ4HC_match_t;
+
+LZ4HC_match_t LZ4HC_searchExtDict(const BYTE* ip, U32 ipIndex,
+ const BYTE* const iLowLimit, const BYTE* const iHighLimit,
+ const LZ4HC_CCtx_internal* dictCtx, U32 gDictEndIndex,
+ int currentBestML, int nbAttempts)
+{
+ size_t const lDictEndIndex = (size_t)(dictCtx->end - dictCtx->prefixStart) + dictCtx->dictLimit;
+ U32 lDictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)];
+ U32 matchIndex = lDictMatchIndex + gDictEndIndex - (U32)lDictEndIndex;
+ int offset = 0, sBack = 0;
+ assert(lDictEndIndex <= 1 GB);
+ if (lDictMatchIndex>0)
+ DEBUGLOG(7, "lDictEndIndex = %zu, lDictMatchIndex = %u", lDictEndIndex, lDictMatchIndex);
+ while (ipIndex - matchIndex <= LZ4_DISTANCE_MAX && nbAttempts--) {
+ const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + lDictMatchIndex;
+
+ if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
+ int mlt;
+ int back = 0;
+ const BYTE* vLimit = ip + (lDictEndIndex - lDictMatchIndex);
+ if (vLimit > iHighLimit) vLimit = iHighLimit;
+ mlt = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
+ back = (ip > iLowLimit) ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->prefixStart) : 0;
+ mlt -= back;
+ if (mlt > currentBestML) {
+ currentBestML = mlt;
+ offset = (int)(ipIndex - matchIndex);
+ sBack = back;
+ DEBUGLOG(7, "found match of length %i within extDictCtx", currentBestML);
+ } }
+
+ { U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, lDictMatchIndex);
+ lDictMatchIndex -= nextOffset;
+ matchIndex -= nextOffset;
+ } }
+
+ { LZ4HC_match_t md;
+ md.len = currentBestML;
+ md.off = offset;
+ md.back = sBack;
+ return md;
+ }
+}
+
+typedef LZ4HC_match_t (*LZ4MID_searchIntoDict_f)(const BYTE* ip, U32 ipIndex,
+ const BYTE* const iHighLimit,
+ const LZ4HC_CCtx_internal* dictCtx, U32 gDictEndIndex);
+
+static LZ4HC_match_t LZ4MID_searchHCDict(const BYTE* ip, U32 ipIndex,
+ const BYTE* const iHighLimit,
+ const LZ4HC_CCtx_internal* dictCtx, U32 gDictEndIndex)
+{
+ return LZ4HC_searchExtDict(ip,ipIndex,
+ ip, iHighLimit,
+ dictCtx, gDictEndIndex,
+ MINMATCH-1, 2);
+}
+
+static LZ4HC_match_t LZ4MID_searchExtDict(const BYTE* ip, U32 ipIndex,
+ const BYTE* const iHighLimit,
+ const LZ4HC_CCtx_internal* dictCtx, U32 gDictEndIndex)
+{
+ size_t const lDictEndIndex = (size_t)(dictCtx->end - dictCtx->prefixStart) + dictCtx->dictLimit;
+ const U32* const hash4Table = dictCtx->hashTable;
+ const U32* const hash8Table = hash4Table + LZ4MID_HASHTABLESIZE;
+ DEBUGLOG(7, "LZ4MID_searchExtDict (ipIdx=%u)", ipIndex);
+
+ /* search long match first */
+ { U32 l8DictMatchIndex = hash8Table[LZ4MID_hash8Ptr(ip)];
+ U32 m8Index = l8DictMatchIndex + gDictEndIndex - (U32)lDictEndIndex;
+ assert(lDictEndIndex <= 1 GB);
+ if (ipIndex - m8Index <= LZ4_DISTANCE_MAX) {
+ const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + l8DictMatchIndex;
+ const size_t safeLen = MIN(lDictEndIndex - l8DictMatchIndex, (size_t)(iHighLimit - ip));
+ int mlt = (int)LZ4_count(ip, matchPtr, ip + safeLen);
+ if (mlt >= MINMATCH) {
+ LZ4HC_match_t md;
+ DEBUGLOG(7, "Found long ExtDict match of len=%u", mlt);
+ md.len = mlt;
+ md.off = (int)(ipIndex - m8Index);
+ md.back = 0;
+ return md;
+ }
+ }
+ }
+
+ /* search for short match second */
+ { U32 l4DictMatchIndex = hash4Table[LZ4MID_hash4Ptr(ip)];
+ U32 m4Index = l4DictMatchIndex + gDictEndIndex - (U32)lDictEndIndex;
+ if (ipIndex - m4Index <= LZ4_DISTANCE_MAX) {
+ const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + l4DictMatchIndex;
+ const size_t safeLen = MIN(lDictEndIndex - l4DictMatchIndex, (size_t)(iHighLimit - ip));
+ int mlt = (int)LZ4_count(ip, matchPtr, ip + safeLen);
+ if (mlt >= MINMATCH) {
+ LZ4HC_match_t md;
+ DEBUGLOG(7, "Found short ExtDict match of len=%u", mlt);
+ md.len = mlt;
+ md.off = (int)(ipIndex - m4Index);
+ md.back = 0;
+ return md;
+ }
+ }
+ }
+
+ /* nothing found */
+ { LZ4HC_match_t const md = {0, 0, 0 };
+ return md;
+ }
+}
+
+/**************************************
+* Mid Compression (level 2)
+**************************************/
+
+LZ4_FORCE_INLINE void
+LZ4MID_addPosition(U32* hTable, U32 hValue, U32 index)
+{
+ hTable[hValue] = index;
+}
+
+#define ADDPOS8(_p, _idx) LZ4MID_addPosition(hash8Table, LZ4MID_hash8Ptr(_p), _idx)
+#define ADDPOS4(_p, _idx) LZ4MID_addPosition(hash4Table, LZ4MID_hash4Ptr(_p), _idx)
+
+/* Fill hash tables with references into dictionary.
+ * The resulting table is only exploitable by LZ4MID (level 2) */
+static void
+LZ4MID_fillHTable (LZ4HC_CCtx_internal* cctx, const void* dict, size_t size)
+{
+ U32* const hash4Table = cctx->hashTable;
+ U32* const hash8Table = hash4Table + LZ4MID_HASHTABLESIZE;
+ const BYTE* const prefixPtr = (const BYTE*)dict;
+ U32 const prefixIdx = cctx->dictLimit;
+ U32 const target = prefixIdx + (U32)size - LZ4MID_HASHSIZE;
+ U32 idx = cctx->nextToUpdate;
+ assert(dict == cctx->prefixStart);
+ DEBUGLOG(4, "LZ4MID_fillHTable (size:%zu)", size);
+ if (size <= LZ4MID_HASHSIZE)
+ return;
+
+ for (; idx < target; idx += 3) {
+ ADDPOS4(prefixPtr+idx-prefixIdx, idx);
+ ADDPOS8(prefixPtr+idx+1-prefixIdx, idx+1);
+ }
+
+ idx = (size > 32 KB + LZ4MID_HASHSIZE) ? target - 32 KB : cctx->nextToUpdate;
+ for (; idx < target; idx += 1) {
+ ADDPOS8(prefixPtr+idx-prefixIdx, idx);
+ }
+
+ cctx->nextToUpdate = target;
+}
+
+static LZ4MID_searchIntoDict_f select_searchDict_function(const LZ4HC_CCtx_internal* dictCtx)
+{
+ if (dictCtx == NULL) return NULL;
+ if (LZ4HC_getCLevelParams(dictCtx->compressionLevel).strat == lz4mid)
+ return LZ4MID_searchExtDict;
+ return LZ4MID_searchHCDict;
+}
+
+static int LZ4MID_compress (
+ LZ4HC_CCtx_internal* const ctx,
+ const char* const src,
+ char* const dst,
+ int* srcSizePtr,
+ int const maxOutputSize,
+ const limitedOutput_directive limit,
+ const dictCtx_directive dict
+ )
+{
+ U32* const hash4Table = ctx->hashTable;
+ U32* const hash8Table = hash4Table + LZ4MID_HASHTABLESIZE;
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* anchor = ip;
+ const BYTE* const iend = ip + *srcSizePtr;
+ const BYTE* const mflimit = iend - MFLIMIT;
+ const BYTE* const matchlimit = (iend - LASTLITERALS);
+ const BYTE* const ilimit = (iend - LZ4MID_HASHSIZE);
+ BYTE* op = (BYTE*)dst;
+ BYTE* oend = op + maxOutputSize;
+
+ const BYTE* const prefixPtr = ctx->prefixStart;
+ const U32 prefixIdx = ctx->dictLimit;
+ const U32 ilimitIdx = (U32)(ilimit - prefixPtr) + prefixIdx;
+ const BYTE* const dictStart = ctx->dictStart;
+ const U32 dictIdx = ctx->lowLimit;
+ const U32 gDictEndIndex = ctx->lowLimit;
+ const LZ4MID_searchIntoDict_f searchIntoDict = (dict == usingDictCtxHc) ? select_searchDict_function(ctx->dictCtx) : NULL;
+ unsigned matchLength;
+ unsigned matchDistance;
+
+ /* input sanitization */
+ DEBUGLOG(5, "LZ4MID_compress (%i bytes)", *srcSizePtr);
+ if (dict == usingDictCtxHc) DEBUGLOG(5, "usingDictCtxHc");
+ assert(*srcSizePtr >= 0);
+ if (*srcSizePtr) assert(src != NULL);
+ if (maxOutputSize) assert(dst != NULL);
+ if (*srcSizePtr < 0) return 0; /* invalid */
+ if (maxOutputSize < 0) return 0; /* invalid */
+ if (*srcSizePtr > LZ4_MAX_INPUT_SIZE) {
+ /* forbidden: no input is allowed to be that large */
+ return 0;
+ }
+ if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
+ if (*srcSizePtr < LZ4_minLength)
+ goto _lz4mid_last_literals; /* Input too small, no compression (all literals) */
+
+ /* main loop */
+ while (ip <= mflimit) {
+ const U32 ipIndex = (U32)(ip - prefixPtr) + prefixIdx;
+ /* search long match */
+ { U32 const h8 = LZ4MID_hash8Ptr(ip);
+ U32 const pos8 = hash8Table[h8];
+ assert(h8 < LZ4MID_HASHTABLESIZE);
+ assert(pos8 < ipIndex);
+ LZ4MID_addPosition(hash8Table, h8, ipIndex);
+ if (ipIndex - pos8 <= LZ4_DISTANCE_MAX) {
+ /* match candidate found */
+ if (pos8 >= prefixIdx) {
+ const BYTE* const matchPtr = prefixPtr + pos8 - prefixIdx;
+ assert(matchPtr < ip);
+ matchLength = LZ4_count(ip, matchPtr, matchlimit);
+ if (matchLength >= MINMATCH) {
+ DEBUGLOG(7, "found long match at pos %u (len=%u)", pos8, matchLength);
+ matchDistance = ipIndex - pos8;
+ goto _lz4mid_encode_sequence;
+ }
+ } else {
+ if (pos8 >= dictIdx) {
+ /* extDict match candidate */
+ const BYTE* const matchPtr = dictStart + (pos8 - dictIdx);
+ const size_t safeLen = MIN(prefixIdx - pos8, (size_t)(matchlimit - ip));
+ matchLength = LZ4_count(ip, matchPtr, ip + safeLen);
+ if (matchLength >= MINMATCH) {
+ DEBUGLOG(7, "found long match at ExtDict pos %u (len=%u)", pos8, matchLength);
+ matchDistance = ipIndex - pos8;
+ goto _lz4mid_encode_sequence;
+ }
+ }
+ }
+ } }
+ /* search short match */
+ { U32 const h4 = LZ4MID_hash4Ptr(ip);
+ U32 const pos4 = hash4Table[h4];
+ assert(h4 < LZ4MID_HASHTABLESIZE);
+ assert(pos4 < ipIndex);
+ LZ4MID_addPosition(hash4Table, h4, ipIndex);
+ if (ipIndex - pos4 <= LZ4_DISTANCE_MAX) {
+ /* match candidate found */
+ if (pos4 >= prefixIdx) {
+ /* only search within prefix */
+ const BYTE* const matchPtr = prefixPtr + (pos4 - prefixIdx);
+ assert(matchPtr < ip);
+ assert(matchPtr >= prefixPtr);
+ matchLength = LZ4_count(ip, matchPtr, matchlimit);
+ if (matchLength >= MINMATCH) {
+ /* short match found, let's just check ip+1 for longer */
+ U32 const h8 = LZ4MID_hash8Ptr(ip+1);
+ U32 const pos8 = hash8Table[h8];
+ U32 const m2Distance = ipIndex + 1 - pos8;
+ matchDistance = ipIndex - pos4;
+ if ( m2Distance <= LZ4_DISTANCE_MAX
+ && pos8 >= prefixIdx /* only search within prefix */
+ && likely(ip < mflimit)
+ ) {
+ const BYTE* const m2Ptr = prefixPtr + (pos8 - prefixIdx);
+ unsigned ml2 = LZ4_count(ip+1, m2Ptr, matchlimit);
+ if (ml2 > matchLength) {
+ LZ4MID_addPosition(hash8Table, h8, ipIndex+1);
+ ip++;
+ matchLength = ml2;
+ matchDistance = m2Distance;
+ } }
+ goto _lz4mid_encode_sequence;
+ }
+ } else {
+ if (pos4 >= dictIdx) {
+ /* extDict match candidate */
+ const BYTE* const matchPtr = dictStart + (pos4 - dictIdx);
+ const size_t safeLen = MIN(prefixIdx - pos4, (size_t)(matchlimit - ip));
+ matchLength = LZ4_count(ip, matchPtr, ip + safeLen);
+ if (matchLength >= MINMATCH) {
+ DEBUGLOG(7, "found match at ExtDict pos %u (len=%u)", pos4, matchLength);
+ matchDistance = ipIndex - pos4;
+ goto _lz4mid_encode_sequence;
+ }
+ }
+ }
+ } }
+ /* no match found in prefix */
+ if ( (dict == usingDictCtxHc)
+ && (ipIndex - gDictEndIndex < LZ4_DISTANCE_MAX - 8) ) {
+ /* search a match into external dictionary */
+ LZ4HC_match_t dMatch = searchIntoDict(ip, ipIndex,
+ matchlimit,
+ ctx->dictCtx, gDictEndIndex);
+ if (dMatch.len >= MINMATCH) {
+ DEBUGLOG(7, "found Dictionary match (offset=%i)", dMatch.off);
+ assert(dMatch.back == 0);
+ matchLength = (unsigned)dMatch.len;
+ matchDistance = (unsigned)dMatch.off;
+ goto _lz4mid_encode_sequence;
+ }
+ }
+ /* no match found */
+ ip += 1 + ((ip-anchor) >> 9); /* skip faster over incompressible data */
+ continue;
+
+_lz4mid_encode_sequence:
+ /* catch back */
+ while (((ip > anchor) & ((U32)(ip-prefixPtr) > matchDistance)) && (unlikely(ip[-1] == ip[-(int)matchDistance-1]))) {
+ ip--; matchLength++;
+ };
+
+ /* fill table with beginning of match */
+ ADDPOS8(ip+1, ipIndex+1);
+ ADDPOS8(ip+2, ipIndex+2);
+ ADDPOS4(ip+1, ipIndex+1);
+
+ /* encode */
+ { BYTE* const saved_op = op;
+ /* LZ4HC_encodeSequence always updates @op; on success, it updates @ip and @anchor */
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
+ (int)matchLength, (int)matchDistance,
+ limit, oend) ) {
+ op = saved_op; /* restore @op value before failed LZ4HC_encodeSequence */
+ goto _lz4mid_dest_overflow;
+ }
+ }
+
+ /* fill table with end of match */
+ { U32 endMatchIdx = (U32)(ip-prefixPtr) + prefixIdx;
+ U32 pos_m2 = endMatchIdx - 2;
+ if (pos_m2 < ilimitIdx) {
+ if (likely(ip - prefixPtr > 5)) {
+ ADDPOS8(ip-5, endMatchIdx - 5);
+ }
+ ADDPOS8(ip-3, endMatchIdx - 3);
+ ADDPOS8(ip-2, endMatchIdx - 2);
+ ADDPOS4(ip-2, endMatchIdx - 2);
+ ADDPOS4(ip-1, endMatchIdx - 1);
+ }
+ }
+ }
+
+_lz4mid_last_literals:
+ /* Encode Last Literals */
+ { size_t lastRunSize = (size_t)(iend - anchor); /* literals */
+ size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
+ size_t const totalSize = 1 + llAdd + lastRunSize;
+ if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
+ if (limit && (op + totalSize > oend)) {
+ if (limit == limitedOutput) return 0; /* not enough space in @dst */
+ /* adapt lastRunSize to fill 'dest' */
+ lastRunSize = (size_t)(oend - op) - 1 /*token*/;
+ llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
+ lastRunSize -= llAdd;
+ }
+ DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
+ ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
+
+ if (lastRunSize >= RUN_MASK) {
+ size_t accumulator = lastRunSize - RUN_MASK;
+ *op++ = (RUN_MASK << ML_BITS);
+ for(; accumulator >= 255 ; accumulator -= 255)
+ *op++ = 255;
+ *op++ = (BYTE) accumulator;
+ } else {
+ *op++ = (BYTE)(lastRunSize << ML_BITS);
+ }
+ assert(lastRunSize <= (size_t)(oend - op));
+ LZ4_memcpy(op, anchor, lastRunSize);
+ op += lastRunSize;
+ }
+
+ /* End */
+ DEBUGLOG(5, "compressed %i bytes into %i bytes", *srcSizePtr, (int)((char*)op - dst));
+ assert(ip >= (const BYTE*)src);
+ assert(ip <= iend);
+ *srcSizePtr = (int)(ip - (const BYTE*)src);
+ assert((char*)op >= dst);
+ assert(op <= oend);
+ assert((char*)op - dst < INT_MAX);
+ return (int)((char*)op - dst);
+
+_lz4mid_dest_overflow:
+ if (limit == fillOutput) {
+ /* Assumption : @ip, @anchor, @optr and @matchLength must be set correctly */
+ size_t const ll = (size_t)(ip - anchor);
+ size_t const ll_addbytes = (ll + 240) / 255;
+ size_t const ll_totalCost = 1 + ll_addbytes + ll;
+ BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
+ DEBUGLOG(6, "Last sequence is overflowing : %u literals, %u remaining space",
+ (unsigned)ll, (unsigned)(oend-op));
+ if (op + ll_totalCost <= maxLitPos) {
+ /* ll validated; now adjust match length */
+ size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
+ size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
+ assert(maxMlSize < INT_MAX);
+ if ((size_t)matchLength > maxMlSize) matchLength= (unsigned)maxMlSize;
+ if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + matchLength >= MFLIMIT) {
+ DEBUGLOG(6, "Let's encode a last sequence (ll=%u, ml=%u)", (unsigned)ll, matchLength);
+ LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
+ (int)matchLength, (int)matchDistance,
+ notLimited, oend);
+ } }
+ DEBUGLOG(6, "Let's finish with a run of literals (%u bytes left)", (unsigned)(oend-op));
+ goto _lz4mid_last_literals;
+ }
+ /* compression failed */
+ return 0;
+}
+
+
+/**************************************
+* HC Compression - Search
+**************************************/
+
/* Update chains up to ip (excluded) */
LZ4_FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip)
{
@@ -140,23 +801,6 @@ LZ4_FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip)
hc4->nextToUpdate = target;
}
-/** LZ4HC_countBack() :
- * @return : negative value, nb of common bytes before ip/match */
-LZ4_FORCE_INLINE
-int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match,
- const BYTE* const iMin, const BYTE* const mMin)
-{
- int back = 0;
- int const min = (int)MAX(iMin - ip, mMin - match);
- assert(min <= 0);
- assert(ip >= iMin); assert((size_t)(ip-iMin) < (1U<<31));
- assert(match >= mMin); assert((size_t)(match - mMin) < (1U<<31));
- while ( (back > min)
- && (ip[back-1] == match[back-1]) )
- back--;
- return back;
-}
-
#if defined(_MSC_VER)
# define LZ4HC_rotl32(x,r) _rotl(x,r)
#else
@@ -236,22 +880,21 @@ static int LZ4HC_protectDictEnd(U32 const dictLimit, U32 const matchIndex)
typedef enum { rep_untested, rep_not, rep_confirmed } repeat_state_e;
typedef enum { favorCompressionRatio=0, favorDecompressionSpeed } HCfavor_e;
-LZ4_FORCE_INLINE int
+
+LZ4_FORCE_INLINE LZ4HC_match_t
LZ4HC_InsertAndGetWiderMatch (
LZ4HC_CCtx_internal* const hc4,
const BYTE* const ip,
const BYTE* const iLowLimit, const BYTE* const iHighLimit,
int longest,
- const BYTE** matchpos,
- const BYTE** startpos,
const int maxNbAttempts,
const int patternAnalysis, const int chainSwap,
const dictCtx_directive dict,
const HCfavor_e favorDecSpeed)
{
U16* const chainTable = hc4->chainTable;
- U32* const HashTable = hc4->hashTable;
- const LZ4HC_CCtx_internal * const dictCtx = hc4->dictCtx;
+ U32* const hashTable = hc4->hashTable;
+ const LZ4HC_CCtx_internal* const dictCtx = hc4->dictCtx;
const BYTE* const prefixPtr = hc4->prefixStart;
const U32 prefixIdx = hc4->dictLimit;
const U32 ipIndex = (U32)(ip - prefixPtr) + prefixIdx;
@@ -267,22 +910,24 @@ LZ4HC_InsertAndGetWiderMatch (
U32 matchIndex;
repeat_state_e repeat = rep_untested;
size_t srcPatternLength = 0;
+ int offset = 0, sBack = 0;
DEBUGLOG(7, "LZ4HC_InsertAndGetWiderMatch");
/* First Match */
- LZ4HC_Insert(hc4, ip);
- matchIndex = HashTable[LZ4HC_hashPtr(ip)];
- DEBUGLOG(7, "First match at index %u / %u (lowestMatchIndex)",
- matchIndex, lowestMatchIndex);
+ LZ4HC_Insert(hc4, ip); /* insert all prior positions up to ip (excluded) */
+ matchIndex = hashTable[LZ4HC_hashPtr(ip)];
+ DEBUGLOG(7, "First candidate match for pos %u found at index %u / %u (lowestMatchIndex)",
+ ipIndex, matchIndex, lowestMatchIndex);
while ((matchIndex>=lowestMatchIndex) && (nbAttempts>0)) {
int matchLength=0;
nbAttempts--;
assert(matchIndex < ipIndex);
if (favorDecSpeed && (ipIndex - matchIndex < 8)) {
- /* do nothing */
+ /* do nothing:
+ * favorDecSpeed intentionally skips matches with offset < 8 */
} else if (matchIndex >= prefixIdx) { /* within current Prefix */
- const BYTE* const matchPtr = prefixPtr + matchIndex - prefixIdx;
+ const BYTE* const matchPtr = prefixPtr + (matchIndex - prefixIdx);
assert(matchPtr < ip);
assert(longest >= 1);
if (LZ4_read16(iLowLimit + longest - 1) == LZ4_read16(matchPtr - lookBackLength + longest - 1)) {
@@ -292,10 +937,11 @@ LZ4HC_InsertAndGetWiderMatch (
matchLength -= back;
if (matchLength > longest) {
longest = matchLength;
- *matchpos = matchPtr + back;
- *startpos = ip + back;
+ offset = (int)(ipIndex - matchIndex);
+ sBack = back;
+ DEBUGLOG(7, "Found match of len=%i within prefix, offset=%i, back=%i", longest, offset, -back);
} } }
- } else { /* lowestMatchIndex <= matchIndex < dictLimit */
+ } else { /* lowestMatchIndex <= matchIndex < dictLimit : within Ext Dict */
const BYTE* const matchPtr = dictStart + (matchIndex - dictIdx);
assert(matchIndex >= dictIdx);
if ( likely(matchIndex <= prefixIdx - 4)
@@ -310,8 +956,9 @@ LZ4HC_InsertAndGetWiderMatch (
matchLength -= back;
if (matchLength > longest) {
longest = matchLength;
- *matchpos = prefixPtr - prefixIdx + matchIndex + back; /* virtual pos, relative to ip, to retrieve offset */
- *startpos = ip + back;
+ offset = (int)(ipIndex - matchIndex);
+ sBack = back;
+ DEBUGLOG(7, "Found match of len=%i within dict, offset=%i, back=%i", longest, offset, -back);
} } }
if (chainSwap && matchLength==longest) { /* better match => select a better chain */
@@ -344,6 +991,7 @@ LZ4HC_InsertAndGetWiderMatch (
if (repeat == rep_untested) {
if ( ((pattern & 0xFFFF) == (pattern >> 16))
& ((pattern & 0xFF) == (pattern >> 24)) ) {
+ DEBUGLOG(7, "Repeat pattern detected, char %02X", pattern >> 24);
repeat = rep_confirmed;
srcPatternLength = LZ4HC_countPattern(ip+sizeof(pattern), iHighLimit, pattern) + sizeof(pattern);
} else {
@@ -352,7 +1000,7 @@ LZ4HC_InsertAndGetWiderMatch (
if ( (repeat == rep_confirmed) && (matchCandidateIdx >= lowestMatchIndex)
&& LZ4HC_protectDictEnd(prefixIdx, matchCandidateIdx) ) {
const int extDict = matchCandidateIdx < prefixIdx;
- const BYTE* const matchPtr = (extDict ? dictStart - dictIdx : prefixPtr - prefixIdx) + matchCandidateIdx;
+ const BYTE* const matchPtr = extDict ? dictStart + (matchCandidateIdx - dictIdx) : prefixPtr + (matchCandidateIdx - prefixIdx);
if (LZ4_read32(matchPtr) == pattern) { /* good candidate */
const BYTE* const iLimit = extDict ? dictEnd : iHighLimit;
size_t forwardPatternLength = LZ4HC_countPattern(matchPtr+sizeof(pattern), iLimit, pattern) + sizeof(pattern);
@@ -398,8 +1046,9 @@ LZ4HC_InsertAndGetWiderMatch (
if ((size_t)(ip - prefixPtr) + prefixIdx - matchIndex > LZ4_DISTANCE_MAX) break;
assert(maxML < 2 GB);
longest = (int)maxML;
- *matchpos = prefixPtr - prefixIdx + matchIndex; /* virtual pos, relative to ip, to retrieve offset */
- *startpos = ip;
+ offset = (int)(ipIndex - matchIndex);
+ assert(sBack == 0);
+ DEBUGLOG(7, "Found repeat pattern match of len=%i, offset=%i", longest, offset);
}
{ U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex);
if (distToNextPattern > matchIndex) break; /* avoid overflow */
@@ -416,11 +1065,12 @@ LZ4HC_InsertAndGetWiderMatch (
if ( dict == usingDictCtxHc
&& nbAttempts > 0
- && ipIndex - lowestMatchIndex < LZ4_DISTANCE_MAX) {
+ && withinStartDistance) {
size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->prefixStart) + dictCtx->dictLimit;
U32 dictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)];
assert(dictEndOffset <= 1 GB);
matchIndex = dictMatchIndex + lowestMatchIndex - (U32)dictEndOffset;
+ if (dictMatchIndex>0) DEBUGLOG(7, "dictEndOffset = %zu, dictMatchIndex = %u => relative matchIndex = %i", dictEndOffset, dictMatchIndex, (int)dictMatchIndex - (int)dictEndOffset);
while (ipIndex - matchIndex <= LZ4_DISTANCE_MAX && nbAttempts--) {
const BYTE* const matchPtr = dictCtx->prefixStart - dictCtx->dictLimit + dictMatchIndex;
@@ -434,8 +1084,9 @@ LZ4HC_InsertAndGetWiderMatch (
mlt -= back;
if (mlt > longest) {
longest = mlt;
- *matchpos = prefixPtr - prefixIdx + matchIndex + back;
- *startpos = ip + back;
+ offset = (int)(ipIndex - matchIndex);
+ sBack = back;
+ DEBUGLOG(7, "found match of length %i within extDictCtx", longest);
} }
{ U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, dictMatchIndex);
@@ -443,112 +1094,29 @@ LZ4HC_InsertAndGetWiderMatch (
matchIndex -= nextOffset;
} } }
- return longest;
+ { LZ4HC_match_t md;
+ assert(longest >= 0);
+ md.len = longest;
+ md.off = offset;
+ md.back = sBack;
+ return md;
+ }
}
-LZ4_FORCE_INLINE int
+LZ4_FORCE_INLINE LZ4HC_match_t
LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index table will be updated */
const BYTE* const ip, const BYTE* const iLimit,
- const BYTE** matchpos,
const int maxNbAttempts,
const int patternAnalysis,
const dictCtx_directive dict)
{
- const BYTE* uselessPtr = ip;
+ DEBUGLOG(7, "LZ4HC_InsertAndFindBestMatch");
/* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
* but this won't be the case here, as we define iLowLimit==ip,
* so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
- return LZ4HC_InsertAndGetWiderMatch(hc4, ip, ip, iLimit, MINMATCH-1, matchpos, &uselessPtr, maxNbAttempts, patternAnalysis, 0 /*chainSwap*/, dict, favorCompressionRatio);
+ return LZ4HC_InsertAndGetWiderMatch(hc4, ip, ip, iLimit, MINMATCH-1, maxNbAttempts, patternAnalysis, 0 /*chainSwap*/, dict, favorCompressionRatio);
}
-/* LZ4HC_encodeSequence() :
- * @return : 0 if ok,
- * 1 if buffer issue detected */
-LZ4_FORCE_INLINE int LZ4HC_encodeSequence (
- const BYTE** _ip,
- BYTE** _op,
- const BYTE** _anchor,
- int matchLength,
- const BYTE* const match,
- limitedOutput_directive limit,
- BYTE* oend)
-{
-#define ip (*_ip)
-#define op (*_op)
-#define anchor (*_anchor)
-
- size_t length;
- BYTE* const token = op++;
-
-#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 6)
- static const BYTE* start = NULL;
- static U32 totalCost = 0;
- U32 const pos = (start==NULL) ? 0 : (U32)(anchor - start);
- U32 const ll = (U32)(ip - anchor);
- U32 const llAdd = (ll>=15) ? ((ll-15) / 255) + 1 : 0;
- U32 const mlAdd = (matchLength>=19) ? ((matchLength-19) / 255) + 1 : 0;
- U32 const cost = 1 + llAdd + ll + 2 + mlAdd;
- if (start==NULL) start = anchor; /* only works for single segment */
- /* g_debuglog_enable = (pos >= 2228) & (pos <= 2262); */
- DEBUGLOG(6, "pos:%7u -- literals:%4u, match:%4i, offset:%5u, cost:%4u + %5u",
- pos,
- (U32)(ip - anchor), matchLength, (U32)(ip-match),
- cost, totalCost);
- totalCost += cost;
-#endif
-
- /* Encode Literal length */
- length = (size_t)(ip - anchor);
- LZ4_STATIC_ASSERT(notLimited == 0);
- /* Check output limit */
- if (limit && ((op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) {
- DEBUGLOG(6, "Not enough room to write %i literals (%i bytes remaining)",
- (int)length, (int)(oend - op));
- return 1;
- }
- if (length >= RUN_MASK) {
- size_t len = length - RUN_MASK;
- *token = (RUN_MASK << ML_BITS);
- for(; len >= 255 ; len -= 255) *op++ = 255;
- *op++ = (BYTE)len;
- } else {
- *token = (BYTE)(length << ML_BITS);
- }
-
- /* Copy Literals */
- LZ4_wildCopy8(op, anchor, op + length);
- op += length;
-
- /* Encode Offset */
- assert( (ip - match) <= LZ4_DISTANCE_MAX ); /* note : consider providing offset as a value, rather than as a pointer difference */
- LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
-
- /* Encode MatchLength */
- assert(matchLength >= MINMATCH);
- length = (size_t)matchLength - MINMATCH;
- if (limit && (op + (length / 255) + (1 + LASTLITERALS) > oend)) {
- DEBUGLOG(6, "Not enough room to write match length");
- return 1; /* Check output limit */
- }
- if (length >= ML_MASK) {
- *token += ML_MASK;
- length -= ML_MASK;
- for(; length >= 510 ; length -= 510) { *op++ = 255; *op++ = 255; }
- if (length >= 255) { length -= 255; *op++ = 255; }
- *op++ = (BYTE)length;
- } else {
- *token += (BYTE)(length);
- }
-
- /* Prepare next loop */
- ip += matchLength;
- anchor = ip;
-
- return 0;
-}
-#undef ip
-#undef op
-#undef anchor
LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
LZ4HC_CCtx_internal* const ctx,
@@ -574,127 +1142,130 @@ LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
BYTE* op = (BYTE*) dest;
BYTE* oend = op + maxOutputSize;
- int ml0, ml, ml2, ml3;
const BYTE* start0;
- const BYTE* ref0;
- const BYTE* ref = NULL;
const BYTE* start2 = NULL;
- const BYTE* ref2 = NULL;
const BYTE* start3 = NULL;
- const BYTE* ref3 = NULL;
+ LZ4HC_match_t m0, m1, m2, m3;
+ const LZ4HC_match_t nomatch = {0, 0, 0};
/* init */
+ DEBUGLOG(5, "LZ4HC_compress_hashChain (dict?=>%i)", dict);
*srcSizePtr = 0;
if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
/* Main Loop */
while (ip <= mflimit) {
- ml = LZ4HC_InsertAndFindBestMatch(ctx, ip, matchlimit, &ref, maxNbAttempts, patternAnalysis, dict);
- if (ml<MINMATCH) { ip++; continue; }
+ m1 = LZ4HC_InsertAndFindBestMatch(ctx, ip, matchlimit, maxNbAttempts, patternAnalysis, dict);
+ if (m1.len<MINMATCH) { ip++; continue; }
/* saved, in case we would skip too much */
- start0 = ip; ref0 = ref; ml0 = ml;
+ start0 = ip; m0 = m1;
_Search2:
- if (ip+ml <= mflimit) {
- ml2 = LZ4HC_InsertAndGetWiderMatch(ctx,
- ip + ml - 2, ip + 0, matchlimit, ml, &ref2, &start2,
+ DEBUGLOG(7, "_Search2 (currently found match of size %i)", m1.len);
+ if (ip+m1.len <= mflimit) {
+ start2 = ip + m1.len - 2;
+ m2 = LZ4HC_InsertAndGetWiderMatch(ctx,
+ start2, ip + 0, matchlimit, m1.len,
maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio);
+ start2 += m2.back;
} else {
- ml2 = ml;
+ m2 = nomatch; /* do not search further */
}
- if (ml2 == ml) { /* No better match => encode ML1 */
+ if (m2.len <= m1.len) { /* No better match => encode ML1 immediately */
optr = op;
- if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
+ m1.len, m1.off,
+ limit, oend) )
+ goto _dest_overflow;
continue;
}
if (start0 < ip) { /* first match was skipped at least once */
- if (start2 < ip + ml0) { /* squeezing ML1 between ML0(original ML1) and ML2 */
- ip = start0; ref = ref0; ml = ml0; /* restore initial ML1 */
+ if (start2 < ip + m0.len) { /* squeezing ML1 between ML0(original ML1) and ML2 */
+ ip = start0; m1 = m0; /* restore initial Match1 */
} }
/* Here, start0==ip */
if ((start2 - ip) < 3) { /* First Match too small : removed */
- ml = ml2;
ip = start2;
- ref =ref2;
+ m1 = m2;
goto _Search2;
}
_Search3:
- /* At this stage, we have :
- * ml2 > ml1, and
- * ip1+3 <= ip2 (usually < ip1+ml1) */
if ((start2 - ip) < OPTIMAL_ML) {
int correction;
- int new_ml = ml;
+ int new_ml = m1.len;
if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML;
- if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
+ if (ip+new_ml > start2 + m2.len - MINMATCH)
+ new_ml = (int)(start2 - ip) + m2.len - MINMATCH;
correction = new_ml - (int)(start2 - ip);
if (correction > 0) {
start2 += correction;
- ref2 += correction;
- ml2 -= correction;
+ m2.len -= correction;
}
}
- /* Now, we have start2 = ip+new_ml, with new_ml = min(ml, OPTIMAL_ML=18) */
- if (start2 + ml2 <= mflimit) {
- ml3 = LZ4HC_InsertAndGetWiderMatch(ctx,
- start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3,
+ if (start2 + m2.len <= mflimit) {
+ start3 = start2 + m2.len - 3;
+ m3 = LZ4HC_InsertAndGetWiderMatch(ctx,
+ start3, start2, matchlimit, m2.len,
maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio);
+ start3 += m3.back;
} else {
- ml3 = ml2;
+ m3 = nomatch; /* do not search further */
}
- if (ml3 == ml2) { /* No better match => encode ML1 and ML2 */
+ if (m3.len <= m2.len) { /* No better match => encode ML1 and ML2 */
/* ip & ref are known; Now for ml */
- if (start2 < ip+ml) ml = (int)(start2 - ip);
+ if (start2 < ip+m1.len) m1.len = (int)(start2 - ip);
/* Now, encode 2 sequences */
optr = op;
- if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
+ m1.len, m1.off,
+ limit, oend) )
+ goto _dest_overflow;
ip = start2;
optr = op;
- if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml2, ref2, limit, oend)) {
- ml = ml2;
- ref = ref2;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
+ m2.len, m2.off,
+ limit, oend) ) {
+ m1 = m2;
goto _dest_overflow;
}
continue;
}
- if (start3 < ip+ml+3) { /* Not enough space for match 2 : remove it */
- if (start3 >= (ip+ml)) { /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */
- if (start2 < ip+ml) {
- int correction = (int)(ip+ml - start2);
+ if (start3 < ip+m1.len+3) { /* Not enough space for match 2 : remove it */
+ if (start3 >= (ip+m1.len)) { /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */
+ if (start2 < ip+m1.len) {
+ int correction = (int)(ip+m1.len - start2);
start2 += correction;
- ref2 += correction;
- ml2 -= correction;
- if (ml2 < MINMATCH) {
+ m2.len -= correction;
+ if (m2.len < MINMATCH) {
start2 = start3;
- ref2 = ref3;
- ml2 = ml3;
+ m2 = m3;
}
}
optr = op;
- if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
+ m1.len, m1.off,
+ limit, oend) )
+ goto _dest_overflow;
ip = start3;
- ref = ref3;
- ml = ml3;
+ m1 = m3;
start0 = start2;
- ref0 = ref2;
- ml0 = ml2;
+ m0 = m2;
goto _Search2;
}
start2 = start3;
- ref2 = ref3;
- ml2 = ml3;
+ m2 = m3;
goto _Search3;
}
@@ -703,29 +1274,32 @@ _Search3:
* let's write the first one ML1.
* ip & ref are known; Now decide ml.
*/
- if (start2 < ip+ml) {
+ if (start2 < ip+m1.len) {
if ((start2 - ip) < OPTIMAL_ML) {
int correction;
- if (ml > OPTIMAL_ML) ml = OPTIMAL_ML;
- if (ip + ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH;
- correction = ml - (int)(start2 - ip);
+ if (m1.len > OPTIMAL_ML) m1.len = OPTIMAL_ML;
+ if (ip + m1.len > start2 + m2.len - MINMATCH)
+ m1.len = (int)(start2 - ip) + m2.len - MINMATCH;
+ correction = m1.len - (int)(start2 - ip);
if (correction > 0) {
start2 += correction;
- ref2 += correction;
- ml2 -= correction;
+ m2.len -= correction;
}
} else {
- ml = (int)(start2 - ip);
+ m1.len = (int)(start2 - ip);
}
}
optr = op;
- if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
+ if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor),
+ m1.len, m1.off,
+ limit, oend) )
+ goto _dest_overflow;
/* ML2 becomes ML1 */
- ip = start2; ref = ref2; ml = ml2;
+ ip = start2; m1 = m2;
/* ML3 becomes ML2 */
- start2 = start3; ref2 = ref3; ml2 = ml3;
+ start2 = start3; m2 = m3;
/* let's find a new ML3 */
goto _Search3;
@@ -765,7 +1339,7 @@ _last_literals:
_dest_overflow:
if (limit == fillOutput) {
- /* Assumption : ip, anchor, ml and ref must be set correctly */
+ /* Assumption : @ip, @anchor, @optr and @m1 must be set correctly */
size_t const ll = (size_t)(ip - anchor);
size_t const ll_addbytes = (ll + 240) / 255;
size_t const ll_totalCost = 1 + ll_addbytes + ll;
@@ -776,10 +1350,10 @@ _dest_overflow:
/* ll validated; now adjust match length */
size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
- assert(maxMlSize < INT_MAX); assert(ml >= 0);
- if ((size_t)ml > maxMlSize) ml = (int)maxMlSize;
- if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ml >= MFLIMIT) {
- LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, notLimited, oend);
+ assert(maxMlSize < INT_MAX); assert(m1.len >= 0);
+ if ((size_t)m1.len > maxMlSize) m1.len = (int)maxMlSize;
+ if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + m1.len >= MFLIMIT) {
+ LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), m1.len, m1.off, notLimited, oend);
} }
goto _last_literals;
}
@@ -796,54 +1370,34 @@ static int LZ4HC_compress_optimal( LZ4HC_CCtx_internal* ctx,
const dictCtx_directive dict,
const HCfavor_e favorDecSpeed);
-
-LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
- LZ4HC_CCtx_internal* const ctx,
- const char* const src,
- char* const dst,
- int* const srcSizePtr,
- int const dstCapacity,
- int cLevel,
- const limitedOutput_directive limit,
- const dictCtx_directive dict
- )
+LZ4_FORCE_INLINE int
+LZ4HC_compress_generic_internal (
+ LZ4HC_CCtx_internal* const ctx,
+ const char* const src,
+ char* const dst,
+ int* const srcSizePtr,
+ int const dstCapacity,
+ int cLevel,
+ const limitedOutput_directive limit,
+ const dictCtx_directive dict
+ )
{
- typedef enum { lz4hc, lz4opt } lz4hc_strat_e;
- typedef struct {
- lz4hc_strat_e strat;
- int nbSearches;
- U32 targetLength;
- } cParams_t;
- static const cParams_t clTable[LZ4HC_CLEVEL_MAX+1] = {
- { lz4hc, 2, 16 }, /* 0, unused */
- { lz4hc, 2, 16 }, /* 1, unused */
- { lz4hc, 2, 16 }, /* 2, unused */
- { lz4hc, 4, 16 }, /* 3 */
- { lz4hc, 8, 16 }, /* 4 */
- { lz4hc, 16, 16 }, /* 5 */
- { lz4hc, 32, 16 }, /* 6 */
- { lz4hc, 64, 16 }, /* 7 */
- { lz4hc, 128, 16 }, /* 8 */
- { lz4hc, 256, 16 }, /* 9 */
- { lz4opt, 96, 64 }, /*10==LZ4HC_CLEVEL_OPT_MIN*/
- { lz4opt, 512,128 }, /*11 */
- { lz4opt,16384,LZ4_OPT_NUM }, /* 12==LZ4HC_CLEVEL_MAX */
- };
-
- DEBUGLOG(4, "LZ4HC_compress_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
- ctx, src, *srcSizePtr, limit);
+ DEBUGLOG(5, "LZ4HC_compress_generic_internal(src=%p, srcSize=%d)",
+ src, *srcSizePtr);
if (limit == fillOutput && dstCapacity < 1) return 0; /* Impossible to store anything */
- if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size (too large or negative) */
+ if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size (too large or negative) */
ctx->end += *srcSizePtr;
- if (cLevel < 1) cLevel = LZ4HC_CLEVEL_DEFAULT; /* note : convention is different from lz4frame, maybe something to review */
- cLevel = MIN(LZ4HC_CLEVEL_MAX, cLevel);
- { cParams_t const cParam = clTable[cLevel];
+ { cParams_t const cParam = LZ4HC_getCLevelParams(cLevel);
HCfavor_e const favor = ctx->favorDecSpeed ? favorDecompressionSpeed : favorCompressionRatio;
int result;
- if (cParam.strat == lz4hc) {
+ if (cParam.strat == lz4mid) {
+ result = LZ4MID_compress(ctx,
+ src, dst, srcSizePtr, dstCapacity,
+ limit, dict);
+ } else if (cParam.strat == lz4hc) {
result = LZ4HC_compress_hashChain(ctx,
src, dst, srcSizePtr, dstCapacity,
cParam.nbSearches, limit, dict);
@@ -852,7 +1406,7 @@ LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
result = LZ4HC_compress_optimal(ctx,
src, dst, srcSizePtr, dstCapacity,
cParam.nbSearches, cParam.targetLength, limit,
- cLevel == LZ4HC_CLEVEL_MAX, /* ultra mode */
+ cLevel >= LZ4HC_CLEVEL_MAX, /* ultra mode */
dict, favor);
}
if (result <= 0) ctx->dirty = 1;
@@ -877,6 +1431,13 @@ LZ4HC_compress_generic_noDictCtx (
return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, noDictCtx);
}
+static int isStateCompatible(const LZ4HC_CCtx_internal* ctx1, const LZ4HC_CCtx_internal* ctx2)
+{
+ int const isMid1 = LZ4HC_getCLevelParams(ctx1->compressionLevel).strat == lz4mid;
+ int const isMid2 = LZ4HC_getCLevelParams(ctx2->compressionLevel).strat == lz4mid;
+ return !(isMid1 ^ isMid2);
+}
+
static int
LZ4HC_compress_generic_dictCtx (
LZ4HC_CCtx_internal* const ctx,
@@ -893,7 +1454,7 @@ LZ4HC_compress_generic_dictCtx (
if (position >= 64 KB) {
ctx->dictCtx = NULL;
return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
- } else if (position == 0 && *srcSizePtr > 4 KB) {
+ } else if (position == 0 && *srcSizePtr > 4 KB && isStateCompatible(ctx, ctx->dictCtx)) {
LZ4_memcpy(ctx, ctx->dictCtx, sizeof(LZ4HC_CCtx_internal));
LZ4HC_setExternalDict(ctx, (const BYTE *)src);
ctx->compressionLevel = (short)cLevel;
@@ -965,6 +1526,7 @@ int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, in
LZ4_streamHC_t state;
LZ4_streamHC_t* const statePtr = &state;
#endif
+ DEBUGLOG(5, "LZ4_compress_HC")
cSize = LZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, dstCapacity, compressionLevel);
#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
FREEMEM(statePtr);
@@ -1032,18 +1594,16 @@ void LZ4_resetStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
void LZ4_resetStreamHC_fast (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
{
- DEBUGLOG(4, "LZ4_resetStreamHC_fast(%p, %d)", LZ4_streamHCPtr, compressionLevel);
- if (LZ4_streamHCPtr->internal_donotuse.dirty) {
+ LZ4HC_CCtx_internal* const s = &LZ4_streamHCPtr->internal_donotuse;
+ DEBUGLOG(5, "LZ4_resetStreamHC_fast(%p, %d)", LZ4_streamHCPtr, compressionLevel);
+ if (s->dirty) {
LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
} else {
- /* preserve end - prefixStart : can trigger clearTable's threshold */
- if (LZ4_streamHCPtr->internal_donotuse.end != NULL) {
- LZ4_streamHCPtr->internal_donotuse.end -= (uptrval)LZ4_streamHCPtr->internal_donotuse.prefixStart;
- } else {
- assert(LZ4_streamHCPtr->internal_donotuse.prefixStart == NULL);
- }
- LZ4_streamHCPtr->internal_donotuse.prefixStart = NULL;
- LZ4_streamHCPtr->internal_donotuse.dictCtx = NULL;
+ assert(s->end >= s->prefixStart);
+ s->dictLimit += (U32)(s->end - s->prefixStart);
+ s->prefixStart = NULL;
+ s->end = NULL;
+ s->dictCtx = NULL;
}
LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
}
@@ -1067,7 +1627,9 @@ int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr,
const char* dictionary, int dictSize)
{
LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
- DEBUGLOG(4, "LZ4_loadDictHC(ctx:%p, dict:%p, dictSize:%d)", LZ4_streamHCPtr, dictionary, dictSize);
+ cParams_t cp;
+ DEBUGLOG(4, "LZ4_loadDictHC(ctx:%p, dict:%p, dictSize:%d, clevel=%d)", LZ4_streamHCPtr, dictionary, dictSize, ctxPtr->compressionLevel);
+ assert(dictSize >= 0);
assert(LZ4_streamHCPtr != NULL);
if (dictSize > 64 KB) {
dictionary += (size_t)dictSize - 64 KB;
@@ -1077,10 +1639,15 @@ int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr,
{ int const cLevel = ctxPtr->compressionLevel;
LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
LZ4_setCompressionLevel(LZ4_streamHCPtr, cLevel);
+ cp = LZ4HC_getCLevelParams(cLevel);
}
LZ4HC_init_internal (ctxPtr, (const BYTE*)dictionary);
ctxPtr->end = (const BYTE*)dictionary + dictSize;
- if (dictSize >= 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3);
+ if (cp.strat == lz4mid) {
+ LZ4MID_fillHTable (ctxPtr, dictionary, (size_t)dictSize);
+ } else {
+ if (dictSize >= LZ4HC_HASHSIZE) LZ4HC_Insert (ctxPtr, ctxPtr->end-3);
+ }
return dictSize;
}
@@ -1093,8 +1660,10 @@ void LZ4_attach_HC_dictionary(LZ4_streamHC_t *working_stream, const LZ4_streamHC
static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock)
{
DEBUGLOG(4, "LZ4HC_setExternalDict(%p, %p)", ctxPtr, newBlock);
- if (ctxPtr->end >= ctxPtr->prefixStart + 4)
- LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */
+ if ( (ctxPtr->end >= ctxPtr->prefixStart + 4)
+ && (LZ4HC_getCLevelParams(ctxPtr->compressionLevel).strat != lz4mid) ) {
+ LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */
+ }
/* Only one memory segment for extDict, so any previous extDict is lost at this stage */
ctxPtr->lowLimit = ctxPtr->dictLimit;
@@ -1119,7 +1688,8 @@ LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
LZ4_streamHCPtr, src, *srcSizePtr, limit);
assert(ctxPtr != NULL);
/* auto-init if forgotten */
- if (ctxPtr->prefixStart == NULL) LZ4HC_init_internal (ctxPtr, (const BYTE*) src);
+ if (ctxPtr->prefixStart == NULL)
+ LZ4HC_init_internal (ctxPtr, (const BYTE*) src);
/* Check overflow */
if ((size_t)(ctxPtr->end - ctxPtr->prefixStart) + ctxPtr->dictLimit > 2 GB) {
@@ -1140,7 +1710,8 @@ LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
if (sourceEnd > dictEnd) sourceEnd = dictEnd;
ctxPtr->lowLimit += (U32)(sourceEnd - ctxPtr->dictStart);
ctxPtr->dictStart += (U32)(sourceEnd - ctxPtr->dictStart);
- if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) {
+ /* invalidate dictionary is it's too small */
+ if (ctxPtr->dictLimit - ctxPtr->lowLimit < LZ4HC_HASHSIZE) {
ctxPtr->lowLimit = ctxPtr->dictLimit;
ctxPtr->dictStart = ctxPtr->prefixStart;
} } }
@@ -1150,6 +1721,7 @@ LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
int LZ4_compress_HC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int srcSize, int dstCapacity)
{
+ DEBUGLOG(5, "LZ4_compress_HC_continue");
if (dstCapacity < LZ4_compressBound(srcSize))
return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, limitedOutput);
else
@@ -1162,7 +1734,6 @@ int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const ch
}
-
/* LZ4_saveDictHC :
* save history content
* into a user-provided buffer
@@ -1179,10 +1750,10 @@ int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictS
if (dictSize > prefixSize) dictSize = prefixSize;
if (safeBuffer == NULL) assert(dictSize == 0);
if (dictSize > 0)
- LZ4_memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
+ LZ4_memmove(safeBuffer, streamPtr->end - dictSize, (size_t)dictSize);
{ U32 const endIndex = (U32)(streamPtr->end - streamPtr->prefixStart) + streamPtr->dictLimit;
- streamPtr->end = (const BYTE*)safeBuffer + dictSize;
- streamPtr->prefixStart = streamPtr->end - dictSize;
+ streamPtr->end = (safeBuffer == NULL) ? NULL : (const BYTE*)safeBuffer + dictSize;
+ streamPtr->prefixStart = (const BYTE*)safeBuffer;
streamPtr->dictLimit = endIndex - (U32)dictSize;
streamPtr->lowLimit = endIndex - (U32)dictSize;
streamPtr->dictStart = streamPtr->prefixStart;
@@ -1193,75 +1764,6 @@ int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictS
}
-/***************************************************
-* Deprecated Functions
-***************************************************/
-
-/* These functions currently generate deprecation warnings */
-
-/* Wrappers for deprecated compression functions */
-int LZ4_compressHC(const char* src, char* dst, int srcSize) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
-int LZ4_compressHC_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, 0); }
-int LZ4_compressHC2(const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
-int LZ4_compressHC2_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, cLevel); }
-int LZ4_compressHC_withStateHC (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
-int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, maxDstSize, 0); }
-int LZ4_compressHC2_withStateHC (void* state, const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
-int LZ4_compressHC2_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, maxDstSize, cLevel); }
-int LZ4_compressHC_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, LZ4_compressBound(srcSize)); }
-int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, maxDstSize); }
-
-
-/* Deprecated streaming functions */
-int LZ4_sizeofStreamStateHC(void) { return sizeof(LZ4_streamHC_t); }
-
-/* state is presumed correctly sized, aka >= sizeof(LZ4_streamHC_t)
- * @return : 0 on success, !=0 if error */
-int LZ4_resetStreamStateHC(void* state, char* inputBuffer)
-{
- LZ4_streamHC_t* const hc4 = LZ4_initStreamHC(state, sizeof(*hc4));
- if (hc4 == NULL) return 1; /* init failed */
- LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
- return 0;
-}
-
-#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
-void* LZ4_createHC (const char* inputBuffer)
-{
- LZ4_streamHC_t* const hc4 = LZ4_createStreamHC();
- if (hc4 == NULL) return NULL; /* not enough memory */
- LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
- return hc4;
-}
-
-int LZ4_freeHC (void* LZ4HC_Data)
-{
- if (!LZ4HC_Data) return 0; /* support free on NULL */
- FREEMEM(LZ4HC_Data);
- return 0;
-}
-#endif
-
-int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int cLevel)
-{
- return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, 0, cLevel, notLimited);
-}
-
-int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int dstCapacity, int cLevel)
-{
- return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, dstCapacity, cLevel, limitedOutput);
-}
-
-char* LZ4_slideInputBufferHC(void* LZ4HC_Data)
-{
- LZ4_streamHC_t* const ctx = (LZ4_streamHC_t*)LZ4HC_Data;
- const BYTE* bufferStart = ctx->internal_donotuse.prefixStart - ctx->internal_donotuse.dictLimit + ctx->internal_donotuse.lowLimit;
- LZ4_resetStreamHC_fast(ctx, ctx->internal_donotuse.compressionLevel);
- /* avoid const char * -> char * conversion warning :( */
- return (char*)(uptrval)bufferStart;
-}
-
-
/* ================================================
* LZ4 Optimal parser (levels [LZ4HC_CLEVEL_OPT_MIN - LZ4HC_CLEVEL_MAX])
* ===============================================*/
@@ -1282,7 +1784,6 @@ LZ4_FORCE_INLINE int LZ4HC_literalsPrice(int const litlen)
return price;
}
-
/* requires mlen >= MINMATCH */
LZ4_FORCE_INLINE int LZ4HC_sequencePrice(int litlen, int mlen)
{
@@ -1298,12 +1799,6 @@ LZ4_FORCE_INLINE int LZ4HC_sequencePrice(int litlen, int mlen)
return price;
}
-
-typedef struct {
- int off;
- int len;
-} LZ4HC_match_t;
-
LZ4_FORCE_INLINE LZ4HC_match_t
LZ4HC_FindLongerMatch(LZ4HC_CCtx_internal* const ctx,
const BYTE* ip, const BYTE* const iHighLimit,
@@ -1311,19 +1806,17 @@ LZ4HC_FindLongerMatch(LZ4HC_CCtx_internal* const ctx,
const dictCtx_directive dict,
const HCfavor_e favorDecSpeed)
{
- LZ4HC_match_t match = { 0 , 0 };
- const BYTE* matchPtr = NULL;
+ LZ4HC_match_t const match0 = { 0 , 0, 0 };
/* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
* but this won't be the case here, as we define iLowLimit==ip,
- * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
- int matchLength = LZ4HC_InsertAndGetWiderMatch(ctx, ip, ip, iHighLimit, minLen, &matchPtr, &ip, nbSearches, 1 /*patternAnalysis*/, 1 /*chainSwap*/, dict, favorDecSpeed);
- if (matchLength <= minLen) return match;
+ ** so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
+ LZ4HC_match_t md = LZ4HC_InsertAndGetWiderMatch(ctx, ip, ip, iHighLimit, minLen, nbSearches, 1 /*patternAnalysis*/, 1 /*chainSwap*/, dict, favorDecSpeed);
+ assert(md.back == 0);
+ if (md.len <= minLen) return match0;
if (favorDecSpeed) {
- if ((matchLength>18) & (matchLength<=36)) matchLength=18; /* favor shortcut */
+ if ((md.len>18) & (md.len<=36)) md.len=18; /* favor dec.speed (shortcut) */
}
- match.len = matchLength;
- match.off = (int)(ip-matchPtr);
- return match;
+ return md;
}
@@ -1356,7 +1849,7 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
BYTE* opSaved = (BYTE*) dst;
BYTE* oend = op + dstCapacity;
int ovml = MINMATCH; /* overflow - last sequence */
- const BYTE* ovref = NULL;
+ int ovoff = 0;
/* init */
#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
@@ -1379,11 +1872,10 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
if ((size_t)firstMatch.len > sufficient_len) {
/* good enough solution : immediate encoding */
int const firstML = firstMatch.len;
- const BYTE* const matchPos = ip - firstMatch.off;
opSaved = op;
- if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, matchPos, limit, oend) ) { /* updates ip, op and anchor */
+ if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, firstMatch.off, limit, oend) ) { /* updates ip, op and anchor */
ovml = firstML;
- ovref = matchPos;
+ ovoff = firstMatch.off;
goto _dest_overflow;
}
continue;
@@ -1401,11 +1893,11 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
rPos, cost, opt[rPos].litlen);
} }
/* set prices using initial match */
- { int mlen = MINMATCH;
- int const matchML = firstMatch.len; /* necessarily < sufficient_len < LZ4_OPT_NUM */
+ { int const matchML = firstMatch.len; /* necessarily < sufficient_len < LZ4_OPT_NUM */
int const offset = firstMatch.off;
+ int mlen;
assert(matchML < LZ4_OPT_NUM);
- for ( ; mlen <= matchML ; mlen++) {
+ for (mlen = MINMATCH ; mlen <= matchML ; mlen++) {
int const cost = LZ4HC_sequencePrice(llen, mlen);
opt[mlen].mlen = mlen;
opt[mlen].off = offset;
@@ -1557,9 +2049,9 @@ encode: /* cur, last_match_pos, best_mlen, best_off must be set */
assert(ml >= MINMATCH);
assert((offset >= 1) && (offset <= LZ4_DISTANCE_MAX));
opSaved = op;
- if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ip - offset, limit, oend) ) { /* updates ip, op and anchor */
+ if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, offset, limit, oend) ) { /* updates ip, op and anchor */
ovml = ml;
- ovref = ip - offset;
+ ovoff = offset;
goto _dest_overflow;
} } }
} /* while (ip <= mflimit) */
@@ -1618,14 +2110,83 @@ if (limit == fillOutput) {
if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ovml >= MFLIMIT) {
DEBUGLOG(6, "Space to end : %i + ml (%i)", (int)((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1), ovml);
DEBUGLOG(6, "Before : ip = %p, anchor = %p", ip, anchor);
- LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ovml, ovref, notLimited, oend);
+ LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ovml, ovoff, notLimited, oend);
DEBUGLOG(6, "After : ip = %p, anchor = %p", ip, anchor);
} }
goto _last_literals;
}
_return_label:
#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
- FREEMEM(opt);
+ if (opt) FREEMEM(opt);
#endif
return retval;
}
+
+
+/***************************************************
+* Deprecated Functions
+***************************************************/
+
+/* These functions currently generate deprecation warnings */
+
+/* Wrappers for deprecated compression functions */
+int LZ4_compressHC(const char* src, char* dst, int srcSize) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
+int LZ4_compressHC_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, 0); }
+int LZ4_compressHC2(const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
+int LZ4_compressHC2_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, cLevel); }
+int LZ4_compressHC_withStateHC (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
+int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, maxDstSize, 0); }
+int LZ4_compressHC2_withStateHC (void* state, const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
+int LZ4_compressHC2_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, maxDstSize, cLevel); }
+int LZ4_compressHC_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, LZ4_compressBound(srcSize)); }
+int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, maxDstSize); }
+
+
+/* Deprecated streaming functions */
+int LZ4_sizeofStreamStateHC(void) { return sizeof(LZ4_streamHC_t); }
+
+/* state is presumed correctly sized, aka >= sizeof(LZ4_streamHC_t)
+ * @return : 0 on success, !=0 if error */
+int LZ4_resetStreamStateHC(void* state, char* inputBuffer)
+{
+ LZ4_streamHC_t* const hc4 = LZ4_initStreamHC(state, sizeof(*hc4));
+ if (hc4 == NULL) return 1; /* init failed */
+ LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
+ return 0;
+}
+
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+void* LZ4_createHC (const char* inputBuffer)
+{
+ LZ4_streamHC_t* const hc4 = LZ4_createStreamHC();
+ if (hc4 == NULL) return NULL; /* not enough memory */
+ LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
+ return hc4;
+}
+
+int LZ4_freeHC (void* LZ4HC_Data)
+{
+ if (!LZ4HC_Data) return 0; /* support free on NULL */
+ FREEMEM(LZ4HC_Data);
+ return 0;
+}
+#endif
+
+int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int cLevel)
+{
+ return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, 0, cLevel, notLimited);
+}
+
+int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int dstCapacity, int cLevel)
+{
+ return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, dstCapacity, cLevel, limitedOutput);
+}
+
+char* LZ4_slideInputBufferHC(void* LZ4HC_Data)
+{
+ LZ4HC_CCtx_internal* const s = &((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse;
+ const BYTE* const bufferStart = s->prefixStart - s->dictLimit + s->lowLimit;
+ LZ4_resetStreamHC_fast((LZ4_streamHC_t*)LZ4HC_Data, s->compressionLevel);
+ /* ugly conversion trick, required to evade (const char*) -> (char*) cast-qual warning :( */
+ return (char*)(uptrval)bufferStart;
+}
diff --git a/contrib/libs/lz4/lz4hc.h b/contrib/libs/lz4/lz4hc.h
index e937acfefd..992bc8cdd7 100644
--- a/contrib/libs/lz4/lz4hc.h
+++ b/contrib/libs/lz4/lz4hc.h
@@ -44,7 +44,7 @@ extern "C" {
/* --- Useful constants --- */
-#define LZ4HC_CLEVEL_MIN 3
+#define LZ4HC_CLEVEL_MIN 2
#define LZ4HC_CLEVEL_DEFAULT 9
#define LZ4HC_CLEVEL_OPT_MIN 10
#define LZ4HC_CLEVEL_MAX 12
@@ -126,6 +126,8 @@ LZ4LIB_API int LZ4_freeStreamHC (LZ4_streamHC_t* streamHCPtr);
After reset, a first "fictional block" can be designated as initial dictionary,
using LZ4_loadDictHC() (Optional).
+ Note: In order for LZ4_loadDictHC() to create the correct data structure,
+ it is essential to set the compression level _before_ loading the dictionary.
Invoke LZ4_compress_HC_continue() to compress each successive block.
The number of blocks is unlimited.
@@ -135,12 +137,12 @@ LZ4LIB_API int LZ4_freeStreamHC (LZ4_streamHC_t* streamHCPtr);
It's allowed to update compression level anytime between blocks,
using LZ4_setCompressionLevel() (experimental).
- 'dst' buffer should be sized to handle worst case scenarios
+ @dst buffer should be sized to handle worst case scenarios
(see LZ4_compressBound(), it ensures compression success).
In case of failure, the API does not guarantee recovery,
so the state _must_ be reset.
To ensure compression success
- whenever `dst` buffer size cannot be made >= LZ4_compressBound(),
+ whenever @dst buffer size cannot be made >= LZ4_compressBound(),
consider using LZ4_compress_HC_continue_destSize().
Whenever previous input blocks can't be preserved unmodified in-place during compression of next blocks,
@@ -176,6 +178,34 @@ LZ4LIB_API int LZ4_compress_HC_continue_destSize(LZ4_streamHC_t* LZ4_streamHCPtr
LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, int maxDictSize);
+/*! LZ4_attach_HC_dictionary() : stable since v1.10.0
+ * This API allows for the efficient re-use of a static dictionary many times.
+ *
+ * Rather than re-loading the dictionary buffer into a working context before
+ * each compression, or copying a pre-loaded dictionary's LZ4_streamHC_t into a
+ * working LZ4_streamHC_t, this function introduces a no-copy setup mechanism,
+ * in which the working stream references the dictionary stream in-place.
+ *
+ * Several assumptions are made about the state of the dictionary stream.
+ * Currently, only streams which have been prepared by LZ4_loadDictHC() should
+ * be expected to work.
+ *
+ * Alternatively, the provided dictionary stream pointer may be NULL, in which
+ * case any existing dictionary stream is unset.
+ *
+ * A dictionary should only be attached to a stream without any history (i.e.,
+ * a stream that has just been reset).
+ *
+ * The dictionary will remain attached to the working stream only for the
+ * current stream session. Calls to LZ4_resetStreamHC(_fast) will remove the
+ * dictionary context association from the working stream. The dictionary
+ * stream (and source buffer) must remain in-place / accessible / unchanged
+ * through the lifetime of the stream session.
+ */
+LZ4LIB_API void
+LZ4_attach_HC_dictionary(LZ4_streamHC_t* working_stream,
+ const LZ4_streamHC_t* dictionary_stream);
+
/*^**********************************************
* !!!!!! STATIC LINKING ONLY !!!!!!
@@ -204,18 +234,18 @@ LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, in
typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal;
struct LZ4HC_CCtx_internal
{
- LZ4_u32 hashTable[LZ4HC_HASHTABLESIZE];
- LZ4_u16 chainTable[LZ4HC_MAXD];
- const LZ4_byte* end; /* next block here to continue on current prefix */
+ LZ4_u32 hashTable[LZ4HC_HASHTABLESIZE];
+ LZ4_u16 chainTable[LZ4HC_MAXD];
+ const LZ4_byte* end; /* next block here to continue on current prefix */
const LZ4_byte* prefixStart; /* Indexes relative to this position */
const LZ4_byte* dictStart; /* alternate reference for extDict */
- LZ4_u32 dictLimit; /* below that point, need extDict */
- LZ4_u32 lowLimit; /* below that point, no more dict */
- LZ4_u32 nextToUpdate; /* index from which to continue dictionary update */
- short compressionLevel;
- LZ4_i8 favorDecSpeed; /* favor decompression speed if this flag set,
- otherwise, favor compression ratio */
- LZ4_i8 dirty; /* stream has to be fully reset if this flag is set */
+ LZ4_u32 dictLimit; /* below that point, need extDict */
+ LZ4_u32 lowLimit; /* below that point, no more history */
+ LZ4_u32 nextToUpdate; /* index from which to continue dictionary update */
+ short compressionLevel;
+ LZ4_i8 favorDecSpeed; /* favor decompression speed if this flag set,
+ otherwise, favor compression ratio */
+ LZ4_i8 dirty; /* stream has to be fully reset if this flag is set */
const LZ4HC_CCtx_internal* dictCtx;
};
@@ -376,35 +406,6 @@ LZ4LIB_STATIC_API int LZ4_compress_HC_extStateHC_fastReset (
int srcSize, int dstCapacity,
int compressionLevel);
-/*! LZ4_attach_HC_dictionary() :
- * This is an experimental API that allows for the efficient use of a
- * static dictionary many times.
- *
- * Rather than re-loading the dictionary buffer into a working context before
- * each compression, or copying a pre-loaded dictionary's LZ4_streamHC_t into a
- * working LZ4_streamHC_t, this function introduces a no-copy setup mechanism,
- * in which the working stream references the dictionary stream in-place.
- *
- * Several assumptions are made about the state of the dictionary stream.
- * Currently, only streams which have been prepared by LZ4_loadDictHC() should
- * be expected to work.
- *
- * Alternatively, the provided dictionary stream pointer may be NULL, in which
- * case any existing dictionary stream is unset.
- *
- * A dictionary should only be attached to a stream without any history (i.e.,
- * a stream that has just been reset).
- *
- * The dictionary will remain attached to the working stream only for the
- * current stream session. Calls to LZ4_resetStreamHC(_fast) will remove the
- * dictionary context association from the working stream. The dictionary
- * stream (and source buffer) must remain in-place / accessible / unchanged
- * through the lifetime of the stream session.
- */
-LZ4LIB_STATIC_API void LZ4_attach_HC_dictionary(
- LZ4_streamHC_t *working_stream,
- const LZ4_streamHC_t *dictionary_stream);
-
#if defined (__cplusplus)
}
#endif
diff --git a/contrib/libs/lz4/ya.make b/contrib/libs/lz4/ya.make
index 78477250e2..8c8428bca6 100644
--- a/contrib/libs/lz4/ya.make
+++ b/contrib/libs/lz4/ya.make
@@ -6,9 +6,9 @@ LICENSE(BSD-2-Clause)
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-VERSION(1.9.4)
+VERSION(1.10.0)
-ORIGINAL_SOURCE(https://github.com/lz4/lz4/archive/v1.9.4.tar.gz)
+ORIGINAL_SOURCE(https://github.com/lz4/lz4/archive/v1.10.0.tar.gz)
PEERDIR(
contrib/libs/xxhash