aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorigorsolovyev <igorsolovyev@yandex-team.ru>2022-02-10 16:48:03 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:48:03 +0300
commit93dc653cf53bf7a9319b52b85a7c02edfd95463d (patch)
treeb85de7682b5f10d28a798003716a65756425aa15
parent6ab7e5f5ada0643a48d393717f443bd548706ffc (diff)
downloadydb-93dc653cf53bf7a9319b52b85a7c02edfd95463d.tar.gz
Restoring authorship annotation for <igorsolovyev@yandex-team.ru>. Commit 1 of 2.
-rw-r--r--contrib/libs/zstd/lib/common/bitstream.h214
-rw-r--r--contrib/libs/zstd/lib/common/compiler.h164
-rw-r--r--contrib/libs/zstd/lib/common/cpu.h418
-rw-r--r--contrib/libs/zstd/lib/common/entropy_common.c24
-rw-r--r--contrib/libs/zstd/lib/common/error_private.c32
-rw-r--r--contrib/libs/zstd/lib/common/error_private.h14
-rw-r--r--contrib/libs/zstd/lib/common/fse.h120
-rw-r--r--contrib/libs/zstd/lib/common/fse_decompress.c10
-rw-r--r--contrib/libs/zstd/lib/common/huf.h306
-rw-r--r--contrib/libs/zstd/lib/common/mem.h62
-rw-r--r--contrib/libs/zstd/lib/common/pool.c462
-rw-r--r--contrib/libs/zstd/lib/common/pool.h102
-rw-r--r--contrib/libs/zstd/lib/common/threading.c138
-rw-r--r--contrib/libs/zstd/lib/common/threading.h238
-rw-r--r--contrib/libs/zstd/lib/common/zstd_common.c52
-rw-r--r--contrib/libs/zstd/lib/common/zstd_internal.h138
-rw-r--r--contrib/libs/zstd/lib/compress/fse_compress.c58
-rw-r--r--contrib/libs/zstd/lib/compress/huf_compress.c316
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_compress.c2028
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_compress_internal.h1140
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_double_fast.c382
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_double_fast.h60
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_fast.c286
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_fast.h58
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_lazy.c1026
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_lazy.h86
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_ldm.c724
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_ldm.h198
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_opt.c1006
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_opt.h44
-rw-r--r--contrib/libs/zstd/lib/compress/zstdmt_compress.c2570
-rw-r--r--contrib/libs/zstd/lib/compress/zstdmt_compress.h110
-rw-r--r--contrib/libs/zstd/lib/decompress/huf_decompress.c270
-rw-r--r--contrib/libs/zstd/lib/decompress/zstd_decompress.c1124
-rw-r--r--contrib/libs/zstd/lib/dictBuilder/cover.c1746
-rw-r--r--contrib/libs/zstd/lib/dictBuilder/zdict.c384
-rw-r--r--contrib/libs/zstd/lib/legacy/zstd_legacy.h244
-rw-r--r--contrib/libs/zstd/lib/legacy/zstd_v01.c64
-rw-r--r--contrib/libs/zstd/lib/legacy/zstd_v01.h14
-rw-r--r--contrib/libs/zstd/lib/legacy/zstd_v02.c106
-rw-r--r--contrib/libs/zstd/lib/legacy/zstd_v02.h14
-rw-r--r--contrib/libs/zstd/lib/legacy/zstd_v03.c104
-rw-r--r--contrib/libs/zstd/lib/legacy/zstd_v03.h14
-rw-r--r--contrib/libs/zstd/lib/legacy/zstd_v04.c124
-rw-r--r--contrib/libs/zstd/lib/legacy/zstd_v04.h14
-rw-r--r--contrib/libs/zstd/lib/legacy/zstd_v05.c96
-rw-r--r--contrib/libs/zstd/lib/legacy/zstd_v05.h10
-rw-r--r--contrib/libs/zstd/lib/legacy/zstd_v06.c96
-rw-r--r--contrib/libs/zstd/lib/legacy/zstd_v06.h14
-rw-r--r--contrib/libs/zstd/lib/legacy/zstd_v07.c116
-rw-r--r--contrib/libs/zstd/lib/legacy/zstd_v07.h16
-rw-r--r--contrib/libs/zstd/lib/zdict.h246
-rw-r--r--contrib/libs/zstd/lib/zstd.h578
-rw-r--r--contrib/libs/zstd/lib/zstd_errors.h128
-rw-r--r--library/cpp/codecs/ut/codecs_ut.cpp1504
-rw-r--r--library/cpp/codecs/zstd_dict_codec.cpp6
56 files changed, 9809 insertions, 9809 deletions
diff --git a/contrib/libs/zstd/lib/common/bitstream.h b/contrib/libs/zstd/lib/common/bitstream.h
index 84b6062ff3..abbde06ca8 100644
--- a/contrib/libs/zstd/lib/common/bitstream.h
+++ b/contrib/libs/zstd/lib/common/bitstream.h
@@ -43,21 +43,21 @@ extern "C" {
# endif
#endif
-#define STREAM_ACCUMULATOR_MIN_32 25
-#define STREAM_ACCUMULATOR_MIN_64 57
-#define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
-
+#define STREAM_ACCUMULATOR_MIN_32 25
+#define STREAM_ACCUMULATOR_MIN_64 57
+#define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
+
/*-******************************************
* bitStream encoding API (write forward)
********************************************/
/* bitStream can mix input from multiple sources.
- * A critical property of these streams is that they encode and decode in **reverse** direction.
- * So the first bit sequence you add will be the last to be read, like a LIFO stack.
- */
+ * A critical property of these streams is that they encode and decode in **reverse** direction.
+ * So the first bit sequence you add will be the last to be read, like a LIFO stack.
+ */
typedef struct {
size_t bitContainer;
- unsigned bitPos;
+ unsigned bitPos;
char* startPtr;
char* ptr;
char* endPtr;
@@ -94,7 +94,7 @@ typedef struct {
unsigned bitsConsumed;
const char* ptr;
const char* start;
- const char* limitPtr;
+ const char* limitPtr;
} BIT_DStream_t;
typedef enum { BIT_DStream_unfinished = 0,
@@ -137,10 +137,10 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
/*-**************************************************************
* Internal functions
****************************************************************/
-MEM_STATIC unsigned BIT_highbit32 (U32 val)
+MEM_STATIC unsigned BIT_highbit32 (U32 val)
{
- assert(val != 0);
- {
+ assert(val != 0);
+ {
# if defined(_MSC_VER) /* Visual */
# if STATIC_BMI2 == 1
return _lzcnt_u32(val) ^ 31;
@@ -159,59 +159,59 @@ MEM_STATIC unsigned BIT_highbit32 (U32 val)
# elif defined(__ICCARM__) /* IAR Intrinsic */
return 31 - __CLZ(val);
# else /* Software version */
- static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29,
- 11, 14, 16, 18, 22, 25, 3, 30,
- 8, 12, 20, 28, 15, 17, 24, 7,
- 19, 27, 23, 6, 26, 5, 4, 31 };
- U32 v = val;
- v |= v >> 1;
- v |= v >> 2;
- v |= v >> 4;
- v |= v >> 8;
- v |= v >> 16;
- return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29,
+ 11, 14, 16, 18, 22, 25, 3, 30,
+ 8, 12, 20, 28, 15, 17, 24, 7,
+ 19, 27, 23, 6, 26, 5, 4, 31 };
+ U32 v = val;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
# endif
- }
+ }
}
/*===== Local Constants =====*/
-static const unsigned BIT_mask[] = {
- 0, 1, 3, 7, 0xF, 0x1F,
- 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
- 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF,
- 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF,
- 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF,
- 0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */
-#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0]))
+static const unsigned BIT_mask[] = {
+ 0, 1, 3, 7, 0xF, 0x1F,
+ 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
+ 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF,
+ 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF,
+ 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF,
+ 0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */
+#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0]))
/*-**************************************************************
* bitStream encoding
****************************************************************/
/*! BIT_initCStream() :
- * `dstCapacity` must be > sizeof(size_t)
+ * `dstCapacity` must be > sizeof(size_t)
* @return : 0 if success,
- * otherwise an error code (can be tested using ERR_isError()) */
-MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
- void* startPtr, size_t dstCapacity)
+ * otherwise an error code (can be tested using ERR_isError()) */
+MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
+ void* startPtr, size_t dstCapacity)
{
bitC->bitContainer = 0;
bitC->bitPos = 0;
bitC->startPtr = (char*)startPtr;
bitC->ptr = bitC->startPtr;
- bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer);
- if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall);
+ bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer);
+ if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall);
return 0;
}
/*! BIT_addBits() :
- * can add up to 31 bits into `bitC`.
- * Note : does not check for register overflow ! */
-MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
- size_t value, unsigned nbBits)
+ * can add up to 31 bits into `bitC`.
+ * Note : does not check for register overflow ! */
+MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
+ size_t value, unsigned nbBits)
{
DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32);
- assert(nbBits < BIT_MASK_SIZE);
- assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ assert(nbBits < BIT_MASK_SIZE);
+ assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
bitC->bitPos += nbBits;
}
@@ -219,74 +219,74 @@ MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
/*! BIT_addBitsFast() :
* works only if `value` is _clean_,
* meaning all high bits above nbBits are 0 */
-MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
- size_t value, unsigned nbBits)
+MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
+ size_t value, unsigned nbBits)
{
- assert((value>>nbBits) == 0);
- assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ assert((value>>nbBits) == 0);
+ assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
bitC->bitContainer |= value << bitC->bitPos;
bitC->bitPos += nbBits;
}
/*! BIT_flushBitsFast() :
- * assumption : bitContainer has not overflowed
+ * assumption : bitContainer has not overflowed
* unsafe version; does not check buffer overflow */
MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
{
size_t const nbBytes = bitC->bitPos >> 3;
- assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
assert(bitC->ptr <= bitC->endPtr);
MEM_writeLEST(bitC->ptr, bitC->bitContainer);
bitC->ptr += nbBytes;
bitC->bitPos &= 7;
- bitC->bitContainer >>= nbBytes*8;
+ bitC->bitContainer >>= nbBytes*8;
}
/*! BIT_flushBits() :
- * assumption : bitContainer has not overflowed
+ * assumption : bitContainer has not overflowed
* safe version; check for buffer overflow, and prevents it.
- * note : does not signal buffer overflow.
- * overflow will be revealed later on using BIT_closeCStream() */
+ * note : does not signal buffer overflow.
+ * overflow will be revealed later on using BIT_closeCStream() */
MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
{
size_t const nbBytes = bitC->bitPos >> 3;
- assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
assert(bitC->ptr <= bitC->endPtr);
MEM_writeLEST(bitC->ptr, bitC->bitContainer);
bitC->ptr += nbBytes;
if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
bitC->bitPos &= 7;
- bitC->bitContainer >>= nbBytes*8;
+ bitC->bitContainer >>= nbBytes*8;
}
/*! BIT_closeCStream() :
* @return : size of CStream, in bytes,
- * or 0 if it could not fit into dstBuffer */
+ * or 0 if it could not fit into dstBuffer */
MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
{
BIT_addBitsFast(bitC, 1, 1); /* endMark */
BIT_flushBits(bitC);
- if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
+ if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
}
/*-********************************************************
-* bitStream decoding
+* bitStream decoding
**********************************************************/
/*! BIT_initDStream() :
- * Initialize a BIT_DStream_t.
- * `bitD` : a pointer to an already allocated BIT_DStream_t structure.
- * `srcSize` must be the *exact* size of the bitStream, in bytes.
- * @return : size of stream (== srcSize), or an errorCode if a problem is detected
- */
+ * Initialize a BIT_DStream_t.
+ * `bitD` : a pointer to an already allocated BIT_DStream_t structure.
+ * `srcSize` must be the *exact* size of the bitStream, in bytes.
+ * @return : size of stream (== srcSize), or an errorCode if a problem is detected
+ */
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
{
if (srcSize < 1) { ZSTD_memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
- bitD->start = (const char*)srcBuffer;
- bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);
-
+ bitD->start = (const char*)srcBuffer;
+ bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);
+
if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
bitD->bitContainer = MEM_readLEST(bitD->ptr);
@@ -298,30 +298,30 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si
bitD->bitContainer = *(const BYTE*)(bitD->start);
switch(srcSize)
{
- case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
+ case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
ZSTD_FALLTHROUGH;
-
- case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
+
+ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
ZSTD_FALLTHROUGH;
-
- case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
+
+ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
ZSTD_FALLTHROUGH;
-
- case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
+
+ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
ZSTD_FALLTHROUGH;
-
- case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
+
+ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
ZSTD_FALLTHROUGH;
-
- case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
+
+ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
ZSTD_FALLTHROUGH;
-
- default: break;
- }
- { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
- bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
- if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */
+
+ default: break;
}
+ { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+ bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
+ if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */
+ }
bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
}
@@ -337,7 +337,7 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 c
{
U32 const regMask = sizeof(bitContainer)*8 - 1;
/* if start > regMask, bitstream is corrupted, and result is undefined */
- assert(nbBits < BIT_MASK_SIZE);
+ assert(nbBits < BIT_MASK_SIZE);
/* x86 transform & ((1 << nbBits) - 1) to bzhi instruction, it is better
* than accessing memory. When bmi2 instruction is not present, we consider
* such cpus old (pre-Haswell, 2013) and their performance is not of that
@@ -355,7 +355,7 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 co
#if defined(STATIC_BMI2) && STATIC_BMI2 == 1
return _bzhi_u64(bitContainer, nbBits);
#else
- assert(nbBits < BIT_MASK_SIZE);
+ assert(nbBits < BIT_MASK_SIZE);
return bitContainer & BIT_mask[nbBits];
#endif
}
@@ -365,7 +365,7 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 co
* local register is not modified.
* On 32-bits, maxNbBits==24.
* On 64-bits, maxNbBits==56.
- * @return : value extracted */
+ * @return : value extracted */
MEM_STATIC FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
{
/* arbitrate between double-shift and shift+mask */
@@ -375,18 +375,18 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t* bitD, U3
return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);
#else
/* this code path is slower on my os-x laptop */
- U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
- return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask);
+ U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
+ return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask);
#endif
}
/*! BIT_lookBitsFast() :
- * unsafe version; only works if nbBits >= 1 */
+ * unsafe version; only works if nbBits >= 1 */
MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
{
- U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
- assert(nbBits >= 1);
- return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
+ U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
+ assert(nbBits >= 1);
+ return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
}
MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
@@ -397,7 +397,7 @@ MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
/*! BIT_readBits() :
* Read (consume) next n bits from local register and update.
* Pay attention to not read more than nbBits contained into local register.
- * @return : extracted value. */
+ * @return : extracted value. */
MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
{
size_t const value = BIT_lookBits(bitD, nbBits);
@@ -406,11 +406,11 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned n
}
/*! BIT_readBitsFast() :
- * unsafe version; only works only if nbBits >= 1 */
+ * unsafe version; only works only if nbBits >= 1 */
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
{
size_t const value = BIT_lookBitsFast(bitD, nbBits);
- assert(nbBits >= 1);
+ assert(nbBits >= 1);
BIT_skipBits(bitD, nbBits);
return value;
}
@@ -433,23 +433,23 @@ MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD)
}
/*! BIT_reloadDStream() :
- * Refill `bitD` from buffer previously set in BIT_initDStream() .
- * This function is safe, it guarantees it will not read beyond src buffer.
- * @return : status of `BIT_DStream_t` internal register.
- * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
+ * Refill `bitD` from buffer previously set in BIT_initDStream() .
+ * This function is safe, it guarantees it will not read beyond src buffer.
+ * @return : status of `BIT_DStream_t` internal register.
+ * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
{
- if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */
- return BIT_DStream_overflow;
+ if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */
+ return BIT_DStream_overflow;
- if (bitD->ptr >= bitD->limitPtr) {
+ if (bitD->ptr >= bitD->limitPtr) {
return BIT_reloadDStreamFast(bitD);
}
if (bitD->ptr == bitD->start) {
if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
return BIT_DStream_completed;
}
- /* start < ptr < limitPtr */
+ /* start < ptr < limitPtr */
{ U32 nbBytes = bitD->bitsConsumed >> 3;
BIT_DStream_status result = BIT_DStream_unfinished;
if (bitD->ptr - nbBytes < bitD->start) {
@@ -458,14 +458,14 @@ MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
}
bitD->ptr -= nbBytes;
bitD->bitsConsumed -= nbBytes*8;
- bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */
+ bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */
return result;
}
}
/*! BIT_endOfDStream() :
- * @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
- */
+ * @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
+ */
MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
{
return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
diff --git a/contrib/libs/zstd/lib/common/compiler.h b/contrib/libs/zstd/lib/common/compiler.h
index 516930c01e..441b3586d2 100644
--- a/contrib/libs/zstd/lib/common/compiler.h
+++ b/contrib/libs/zstd/lib/common/compiler.h
@@ -1,38 +1,38 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTD_COMPILER_H
-#define ZSTD_COMPILER_H
-
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMPILER_H
+#define ZSTD_COMPILER_H
+
#include "portability_macros.h"
-/*-*******************************************************
-* Compiler specifics
-*********************************************************/
-/* force inlining */
+/*-*******************************************************
+* Compiler specifics
+*********************************************************/
+/* force inlining */
#if !defined(ZSTD_NO_INLINE)
#if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
-# define INLINE_KEYWORD inline
-#else
-# define INLINE_KEYWORD
-#endif
-
+# define INLINE_KEYWORD inline
+#else
+# define INLINE_KEYWORD
+#endif
+
#if defined(__GNUC__) || defined(__ICCARM__)
-# define FORCE_INLINE_ATTR __attribute__((always_inline))
-#elif defined(_MSC_VER)
-# define FORCE_INLINE_ATTR __forceinline
-#else
-# define FORCE_INLINE_ATTR
-#endif
-
+# define FORCE_INLINE_ATTR __attribute__((always_inline))
+#elif defined(_MSC_VER)
+# define FORCE_INLINE_ATTR __forceinline
+#else
+# define FORCE_INLINE_ATTR
+#endif
+
#else
#define INLINE_KEYWORD
@@ -40,7 +40,7 @@
#endif
-/**
+/**
On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC).
This explicitly marks such functions as __cdecl so that the code will still compile
if a CC other than __cdecl has been made the default.
@@ -52,28 +52,28 @@
#endif
/**
- * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
+ * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
* parameters. They must be inlined for the compiler to eliminate the constant
- * branches.
- */
-#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
-/**
- * HINT_INLINE is used to help the compiler generate better code. It is *not*
- * used for "templates", so it can be tweaked based on the compilers
- * performance.
- *
- * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the
- * always_inline attribute.
- *
- * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline
- * attribute.
- */
-#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
-# define HINT_INLINE static INLINE_KEYWORD
-#else
-# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
-#endif
-
+ * branches.
+ */
+#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
+/**
+ * HINT_INLINE is used to help the compiler generate better code. It is *not*
+ * used for "templates", so it can be tweaked based on the compilers
+ * performance.
+ *
+ * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the
+ * always_inline attribute.
+ *
+ * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline
+ * attribute.
+ */
+#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
+# define HINT_INLINE static INLINE_KEYWORD
+#else
+# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
+#endif
+
/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
#if defined(__GNUC__)
# define UNUSED_ATTR __attribute__((unused))
@@ -81,37 +81,37 @@
# define UNUSED_ATTR
#endif
-/* force no inlining */
-#ifdef _MSC_VER
-# define FORCE_NOINLINE static __declspec(noinline)
-#else
+/* force no inlining */
+#ifdef _MSC_VER
+# define FORCE_NOINLINE static __declspec(noinline)
+#else
# if defined(__GNUC__) || defined(__ICCARM__)
-# define FORCE_NOINLINE static __attribute__((__noinline__))
-# else
-# define FORCE_NOINLINE static
-# endif
-#endif
-
-
-/* target attribute */
+# define FORCE_NOINLINE static __attribute__((__noinline__))
+# else
+# define FORCE_NOINLINE static
+# endif
+#endif
+
+
+/* target attribute */
#if defined(__GNUC__) || defined(__ICCARM__)
-# define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
-#else
-# define TARGET_ATTRIBUTE(target)
-#endif
-
+# define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
+#else
+# define TARGET_ATTRIBUTE(target)
+#endif
+
/* Target attribute for BMI2 dynamic dispatch.
* Enable lzcnt, bmi, and bmi2.
* We test for bmi1 & bmi2. lzcnt is included in bmi1.
- */
+ */
#define BMI2_TARGET_ATTRIBUTE TARGET_ATTRIBUTE("lzcnt,bmi,bmi2")
-
+
/* prefetch
* can be disabled, by declaring NO_PREFETCH build macro */
#if defined(NO_PREFETCH)
# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */
# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */
-#else
+#else
# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */
# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
# define PREFETCH_L1(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
@@ -127,7 +127,7 @@
# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */
# endif
#endif /* NO_PREFETCH */
-
+
#define CACHELINE_SIZE 64
#define PREFETCH_AREA(p, s) { \
@@ -165,16 +165,16 @@
#define UNLIKELY(x) (x)
#endif
-/* disable warnings */
-#ifdef _MSC_VER /* Visual Studio */
-# include <intrin.h> /* For Visual 2005 */
-# pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */
-# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
-# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
-# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
-# pragma warning(disable : 4324) /* disable: C4324: padded structure */
-#endif
-
+/* disable warnings */
+#ifdef _MSC_VER /* Visual Studio */
+# include <intrin.h> /* For Visual 2005 */
+# pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
+# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
+# pragma warning(disable : 4324) /* disable: C4324: padded structure */
+#endif
+
/*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/
#ifndef STATIC_BMI2
# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86))
@@ -332,4 +332,4 @@ void __asan_poison_memory_region(void const volatile *addr, size_t size);
void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
#endif
-#endif /* ZSTD_COMPILER_H */
+#endif /* ZSTD_COMPILER_H */
diff --git a/contrib/libs/zstd/lib/common/cpu.h b/contrib/libs/zstd/lib/common/cpu.h
index 8acd33be3c..547cea7efd 100644
--- a/contrib/libs/zstd/lib/common/cpu.h
+++ b/contrib/libs/zstd/lib/common/cpu.h
@@ -1,213 +1,213 @@
-/*
+/*
* Copyright (c) Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTD_COMMON_CPU_H
-#define ZSTD_COMMON_CPU_H
-
-/**
- * Implementation taken from folly/CpuId.h
- * https://github.com/facebook/folly/blob/master/folly/CpuId.h
- */
-
-#include "mem.h"
-
-#ifdef _MSC_VER
-#include <intrin.h>
-#endif
-
-typedef struct {
- U32 f1c;
- U32 f1d;
- U32 f7b;
- U32 f7c;
-} ZSTD_cpuid_t;
-
-MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) {
- U32 f1c = 0;
- U32 f1d = 0;
- U32 f7b = 0;
- U32 f7c = 0;
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMMON_CPU_H
+#define ZSTD_COMMON_CPU_H
+
+/**
+ * Implementation taken from folly/CpuId.h
+ * https://github.com/facebook/folly/blob/master/folly/CpuId.h
+ */
+
+#include "mem.h"
+
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif
+
+typedef struct {
+ U32 f1c;
+ U32 f1d;
+ U32 f7b;
+ U32 f7c;
+} ZSTD_cpuid_t;
+
+MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) {
+ U32 f1c = 0;
+ U32 f1d = 0;
+ U32 f7b = 0;
+ U32 f7c = 0;
#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))
- int reg[4];
- __cpuid((int*)reg, 0);
- {
- int const n = reg[0];
- if (n >= 1) {
- __cpuid((int*)reg, 1);
- f1c = (U32)reg[2];
- f1d = (U32)reg[3];
- }
- if (n >= 7) {
- __cpuidex((int*)reg, 7, 0);
- f7b = (U32)reg[1];
- f7c = (U32)reg[2];
- }
- }
-#elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__)
- /* The following block like the normal cpuid branch below, but gcc
- * reserves ebx for use of its pic register so we must specially
- * handle the save and restore to avoid clobbering the register
- */
- U32 n;
- __asm__(
- "pushl %%ebx\n\t"
- "cpuid\n\t"
- "popl %%ebx\n\t"
- : "=a"(n)
- : "a"(0)
- : "ecx", "edx");
- if (n >= 1) {
- U32 f1a;
- __asm__(
- "pushl %%ebx\n\t"
- "cpuid\n\t"
- "popl %%ebx\n\t"
- : "=a"(f1a), "=c"(f1c), "=d"(f1d)
+ int reg[4];
+ __cpuid((int*)reg, 0);
+ {
+ int const n = reg[0];
+ if (n >= 1) {
+ __cpuid((int*)reg, 1);
+ f1c = (U32)reg[2];
+ f1d = (U32)reg[3];
+ }
+ if (n >= 7) {
+ __cpuidex((int*)reg, 7, 0);
+ f7b = (U32)reg[1];
+ f7c = (U32)reg[2];
+ }
+ }
+#elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__)
+ /* The following block like the normal cpuid branch below, but gcc
+ * reserves ebx for use of its pic register so we must specially
+ * handle the save and restore to avoid clobbering the register
+ */
+ U32 n;
+ __asm__(
+ "pushl %%ebx\n\t"
+ "cpuid\n\t"
+ "popl %%ebx\n\t"
+ : "=a"(n)
+ : "a"(0)
+ : "ecx", "edx");
+ if (n >= 1) {
+ U32 f1a;
+ __asm__(
+ "pushl %%ebx\n\t"
+ "cpuid\n\t"
+ "popl %%ebx\n\t"
+ : "=a"(f1a), "=c"(f1c), "=d"(f1d)
: "a"(1));
- }
- if (n >= 7) {
- __asm__(
- "pushl %%ebx\n\t"
- "cpuid\n\t"
+ }
+ if (n >= 7) {
+ __asm__(
+ "pushl %%ebx\n\t"
+ "cpuid\n\t"
"movl %%ebx, %%eax\n\t"
- "popl %%ebx"
- : "=a"(f7b), "=c"(f7c)
- : "a"(7), "c"(0)
- : "edx");
- }
-#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__)
- U32 n;
- __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx");
- if (n >= 1) {
- U32 f1a;
- __asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx");
- }
- if (n >= 7) {
- U32 f7a;
- __asm__("cpuid"
- : "=a"(f7a), "=b"(f7b), "=c"(f7c)
- : "a"(7), "c"(0)
- : "edx");
- }
-#endif
- {
- ZSTD_cpuid_t cpuid;
- cpuid.f1c = f1c;
- cpuid.f1d = f1d;
- cpuid.f7b = f7b;
- cpuid.f7c = f7c;
- return cpuid;
- }
-}
-
-#define X(name, r, bit) \
- MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) { \
- return ((cpuid.r) & (1U << bit)) != 0; \
- }
-
-/* cpuid(1): Processor Info and Feature Bits. */
-#define C(name, bit) X(name, f1c, bit)
- C(sse3, 0)
- C(pclmuldq, 1)
- C(dtes64, 2)
- C(monitor, 3)
- C(dscpl, 4)
- C(vmx, 5)
- C(smx, 6)
- C(eist, 7)
- C(tm2, 8)
- C(ssse3, 9)
- C(cnxtid, 10)
- C(fma, 12)
- C(cx16, 13)
- C(xtpr, 14)
- C(pdcm, 15)
- C(pcid, 17)
- C(dca, 18)
- C(sse41, 19)
- C(sse42, 20)
- C(x2apic, 21)
- C(movbe, 22)
- C(popcnt, 23)
- C(tscdeadline, 24)
- C(aes, 25)
- C(xsave, 26)
- C(osxsave, 27)
- C(avx, 28)
- C(f16c, 29)
- C(rdrand, 30)
-#undef C
-#define D(name, bit) X(name, f1d, bit)
- D(fpu, 0)
- D(vme, 1)
- D(de, 2)
- D(pse, 3)
- D(tsc, 4)
- D(msr, 5)
- D(pae, 6)
- D(mce, 7)
- D(cx8, 8)
- D(apic, 9)
- D(sep, 11)
- D(mtrr, 12)
- D(pge, 13)
- D(mca, 14)
- D(cmov, 15)
- D(pat, 16)
- D(pse36, 17)
- D(psn, 18)
- D(clfsh, 19)
- D(ds, 21)
- D(acpi, 22)
- D(mmx, 23)
- D(fxsr, 24)
- D(sse, 25)
- D(sse2, 26)
- D(ss, 27)
- D(htt, 28)
- D(tm, 29)
- D(pbe, 31)
-#undef D
-
-/* cpuid(7): Extended Features. */
-#define B(name, bit) X(name, f7b, bit)
- B(bmi1, 3)
- B(hle, 4)
- B(avx2, 5)
- B(smep, 7)
- B(bmi2, 8)
- B(erms, 9)
- B(invpcid, 10)
- B(rtm, 11)
- B(mpx, 14)
- B(avx512f, 16)
- B(avx512dq, 17)
- B(rdseed, 18)
- B(adx, 19)
- B(smap, 20)
- B(avx512ifma, 21)
- B(pcommit, 22)
- B(clflushopt, 23)
- B(clwb, 24)
- B(avx512pf, 26)
- B(avx512er, 27)
- B(avx512cd, 28)
- B(sha, 29)
- B(avx512bw, 30)
- B(avx512vl, 31)
-#undef B
-#define C(name, bit) X(name, f7c, bit)
- C(prefetchwt1, 0)
- C(avx512vbmi, 1)
-#undef C
-
-#undef X
-
-#endif /* ZSTD_COMMON_CPU_H */
+ "popl %%ebx"
+ : "=a"(f7b), "=c"(f7c)
+ : "a"(7), "c"(0)
+ : "edx");
+ }
+#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__)
+ U32 n;
+ __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx");
+ if (n >= 1) {
+ U32 f1a;
+ __asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx");
+ }
+ if (n >= 7) {
+ U32 f7a;
+ __asm__("cpuid"
+ : "=a"(f7a), "=b"(f7b), "=c"(f7c)
+ : "a"(7), "c"(0)
+ : "edx");
+ }
+#endif
+ {
+ ZSTD_cpuid_t cpuid;
+ cpuid.f1c = f1c;
+ cpuid.f1d = f1d;
+ cpuid.f7b = f7b;
+ cpuid.f7c = f7c;
+ return cpuid;
+ }
+}
+
+#define X(name, r, bit) \
+ MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) { \
+ return ((cpuid.r) & (1U << bit)) != 0; \
+ }
+
+/* cpuid(1): Processor Info and Feature Bits. */
+#define C(name, bit) X(name, f1c, bit)
+ C(sse3, 0)
+ C(pclmuldq, 1)
+ C(dtes64, 2)
+ C(monitor, 3)
+ C(dscpl, 4)
+ C(vmx, 5)
+ C(smx, 6)
+ C(eist, 7)
+ C(tm2, 8)
+ C(ssse3, 9)
+ C(cnxtid, 10)
+ C(fma, 12)
+ C(cx16, 13)
+ C(xtpr, 14)
+ C(pdcm, 15)
+ C(pcid, 17)
+ C(dca, 18)
+ C(sse41, 19)
+ C(sse42, 20)
+ C(x2apic, 21)
+ C(movbe, 22)
+ C(popcnt, 23)
+ C(tscdeadline, 24)
+ C(aes, 25)
+ C(xsave, 26)
+ C(osxsave, 27)
+ C(avx, 28)
+ C(f16c, 29)
+ C(rdrand, 30)
+#undef C
+#define D(name, bit) X(name, f1d, bit)
+ D(fpu, 0)
+ D(vme, 1)
+ D(de, 2)
+ D(pse, 3)
+ D(tsc, 4)
+ D(msr, 5)
+ D(pae, 6)
+ D(mce, 7)
+ D(cx8, 8)
+ D(apic, 9)
+ D(sep, 11)
+ D(mtrr, 12)
+ D(pge, 13)
+ D(mca, 14)
+ D(cmov, 15)
+ D(pat, 16)
+ D(pse36, 17)
+ D(psn, 18)
+ D(clfsh, 19)
+ D(ds, 21)
+ D(acpi, 22)
+ D(mmx, 23)
+ D(fxsr, 24)
+ D(sse, 25)
+ D(sse2, 26)
+ D(ss, 27)
+ D(htt, 28)
+ D(tm, 29)
+ D(pbe, 31)
+#undef D
+
+/* cpuid(7): Extended Features. */
+#define B(name, bit) X(name, f7b, bit)
+ B(bmi1, 3)
+ B(hle, 4)
+ B(avx2, 5)
+ B(smep, 7)
+ B(bmi2, 8)
+ B(erms, 9)
+ B(invpcid, 10)
+ B(rtm, 11)
+ B(mpx, 14)
+ B(avx512f, 16)
+ B(avx512dq, 17)
+ B(rdseed, 18)
+ B(adx, 19)
+ B(smap, 20)
+ B(avx512ifma, 21)
+ B(pcommit, 22)
+ B(clflushopt, 23)
+ B(clwb, 24)
+ B(avx512pf, 26)
+ B(avx512er, 27)
+ B(avx512cd, 28)
+ B(sha, 29)
+ B(avx512bw, 30)
+ B(avx512vl, 31)
+#undef B
+#define C(name, bit) X(name, f7c, bit)
+ C(prefetchwt1, 0)
+ C(avx512vbmi, 1)
+#undef C
+
+#undef X
+
+#endif /* ZSTD_COMMON_CPU_H */
diff --git a/contrib/libs/zstd/lib/common/entropy_common.c b/contrib/libs/zstd/lib/common/entropy_common.c
index 4229b40c5e..7734a6d40f 100644
--- a/contrib/libs/zstd/lib/common/entropy_common.c
+++ b/contrib/libs/zstd/lib/common/entropy_common.c
@@ -23,12 +23,12 @@
#include "huf.h"
-/*=== Version ===*/
-unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
-
-
-/*=== Error Management ===*/
-unsigned FSE_isError(size_t code) { return ERR_isError(code); }
+/*=== Version ===*/
+unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
+
+
+/*=== Error Management ===*/
+unsigned FSE_isError(size_t code) { return ERR_isError(code); }
const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
unsigned HUF_isError(size_t code) { return ERR_isError(code); }
@@ -158,15 +158,15 @@ size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigne
}
{
int const max = (2*threshold-1) - remaining;
- int count;
+ int count;
if ((bitStream & (threshold-1)) < (U32)max) {
- count = bitStream & (threshold-1);
- bitCount += nbBits-1;
+ count = bitStream & (threshold-1);
+ bitCount += nbBits-1;
} else {
- count = bitStream & (2*threshold-1);
+ count = bitStream & (2*threshold-1);
if (count >= threshold) count -= max;
- bitCount += nbBits;
+ bitCount += nbBits;
}
count--; /* extra accuracy */
@@ -179,7 +179,7 @@ size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigne
assert(count == -1);
remaining += count;
}
- normalizedCounter[charnum++] = (short)count;
+ normalizedCounter[charnum++] = (short)count;
previous0 = !count;
assert(threshold > 1);
diff --git a/contrib/libs/zstd/lib/common/error_private.c b/contrib/libs/zstd/lib/common/error_private.c
index 6d1135f8c3..cd5eda3191 100644
--- a/contrib/libs/zstd/lib/common/error_private.c
+++ b/contrib/libs/zstd/lib/common/error_private.c
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
/* The purpose of this file is to have a single list of error strings embedded in binary */
@@ -27,26 +27,26 @@ const char* ERR_getErrorString(ERR_enum code)
case PREFIX(version_unsupported): return "Version not supported";
case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
- case PREFIX(corruption_detected): return "Corrupted block detected";
- case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
- case PREFIX(parameter_unsupported): return "Unsupported parameter";
- case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
+ case PREFIX(corruption_detected): return "Corrupted block detected";
+ case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
+ case PREFIX(parameter_unsupported): return "Unsupported parameter";
+ case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
case PREFIX(init_missing): return "Context should be init first";
case PREFIX(memory_allocation): return "Allocation error : not enough memory";
- case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough";
+ case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough";
case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
case PREFIX(dictionary_wrong): return "Dictionary mismatch";
- case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
- case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
- case PREFIX(srcSize_wrong): return "Src size is incorrect";
+ case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
+ case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
+ case PREFIX(srcSize_wrong): return "Src size is incorrect";
case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer";
- /* following error codes are not stable and may be removed or changed in a future version */
- case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
- case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
+ /* following error codes are not stable and may be removed or changed in a future version */
+ case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
+ case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong";
case PREFIX(srcBuffer_wrong): return "Source buffer is wrong";
case PREFIX(maxCode):
diff --git a/contrib/libs/zstd/lib/common/error_private.h b/contrib/libs/zstd/lib/common/error_private.h
index 007d81066a..f61fc3b02f 100644
--- a/contrib/libs/zstd/lib/common/error_private.h
+++ b/contrib/libs/zstd/lib/common/error_private.h
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
/* Note : this module is expected to remain private, do not expose it */
@@ -52,8 +52,8 @@ typedef ZSTD_ErrorCode ERR_enum;
* Error codes handling
******************************************/
#undef ERROR /* already defined on Visual Studio */
-#define ERROR(name) ZSTD_ERROR(name)
-#define ZSTD_ERROR(name) ((size_t)-PREFIX(name))
+#define ERROR(name) ZSTD_ERROR(name)
+#define ZSTD_ERROR(name) ((size_t)-PREFIX(name))
ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
diff --git a/contrib/libs/zstd/lib/common/fse.h b/contrib/libs/zstd/lib/common/fse.h
index 714bfd3e7f..c35d3c6e11 100644
--- a/contrib/libs/zstd/lib/common/fse.h
+++ b/contrib/libs/zstd/lib/common/fse.h
@@ -16,42 +16,42 @@
extern "C" {
#endif
-#ifndef FSE_H
-#define FSE_H
-
+#ifndef FSE_H
+#define FSE_H
+
/*-*****************************************
* Dependencies
******************************************/
#include "zstd_deps.h" /* size_t, ptrdiff_t */
-/*-*****************************************
-* FSE_PUBLIC_API : control library symbols visibility
-******************************************/
-#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
-# define FSE_PUBLIC_API __attribute__ ((visibility ("default")))
-#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
-# define FSE_PUBLIC_API __declspec(dllexport)
-#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
-# define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
-#else
-# define FSE_PUBLIC_API
-#endif
-
-/*------ Version ------*/
-#define FSE_VERSION_MAJOR 0
-#define FSE_VERSION_MINOR 9
-#define FSE_VERSION_RELEASE 0
-
-#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
-#define FSE_QUOTE(str) #str
-#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
-#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
-
-#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
-FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */
-
+/*-*****************************************
+* FSE_PUBLIC_API : control library symbols visibility
+******************************************/
+#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
+# define FSE_PUBLIC_API __attribute__ ((visibility ("default")))
+#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
+# define FSE_PUBLIC_API __declspec(dllexport)
+#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
+# define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+# define FSE_PUBLIC_API
+#endif
+
+/*------ Version ------*/
+#define FSE_VERSION_MAJOR 0
+#define FSE_VERSION_MINOR 9
+#define FSE_VERSION_RELEASE 0
+
+#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
+#define FSE_QUOTE(str) #str
+#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
+#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
+
+#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
+FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */
+
/*-****************************************
* FSE simple functions
@@ -64,8 +64,8 @@ FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number;
if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead.
if FSE_isError(return), compression failed (more details using FSE_getErrorName())
*/
-FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,
- const void* src, size_t srcSize);
+FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
/*! FSE_decompress():
Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
@@ -77,18 +77,18 @@ FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,
Why ? : making this distinction requires a header.
Header management is intentionally delegated to the user layer, which can better manage special cases.
*/
-FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity,
- const void* cSrc, size_t cSrcSize);
+FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity,
+ const void* cSrc, size_t cSrcSize);
/*-*****************************************
* Tool functions
******************************************/
-FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */
+FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */
/* Error Management */
-FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
-FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */
+FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
+FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */
/*-*****************************************
@@ -102,7 +102,7 @@ FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error co
if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression.
if FSE_isError(return), it's an error code.
*/
-FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
+FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
/*-*****************************************
@@ -132,7 +132,7 @@ or to save and provide normalized distribution using external method.
dynamically downsize 'tableLog' when conditions are met.
It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
@return : recommended tableLog (necessarily <= 'maxTableLog') */
-FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
+FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
/*! FSE_normalizeCount():
normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
@@ -151,7 +151,7 @@ FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tabl
/*! FSE_NCountWriteBound():
Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
Typically useful for allocation purpose. */
-FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
+FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
/*! FSE_writeNCount():
Compactly save 'normalizedCounter' into 'buffer'.
@@ -164,20 +164,20 @@ FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize,
/*! Constructor and Destructor of FSE_CTable.
Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */
-FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog);
-FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct);
+FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog);
+FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct);
/*! FSE_buildCTable():
Builds `ct`, which must be already allocated, using FSE_createCTable().
@return : 0, or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
/*! FSE_compress_usingCTable():
Compress `src` using `ct` into `dst` which must be already allocated.
@return : size of compressed data (<= `dstCapacity`),
or 0 if compressed data could not fit into `dst`,
or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);
+FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);
/*!
Tutorial :
@@ -244,20 +244,20 @@ FSE_PUBLIC_API size_t FSE_readNCount_bmi2(short* normalizedCounter,
/*! Constructor and Destructor of FSE_DTable.
Note that its size depends on 'tableLog' */
typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
-FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog);
-FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt);
+FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog);
+FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt);
/*! FSE_buildDTable():
Builds 'dt', which must be already allocated, using FSE_createDTable().
return : 0, or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
/*! FSE_decompress_usingDTable():
Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
into `dst` which must be already allocated.
@return : size of regenerated data (necessarily <= `dstCapacity`),
or an errorCode, which can be tested using FSE_isError() */
-FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
+FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
/*!
Tutorial :
@@ -287,10 +287,10 @@ FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<
If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
*/
-#endif /* FSE_H */
+#endif /* FSE_H */
-#if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
-#define FSE_H_FSE_STATIC_LINKING_ONLY
+#if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
+#define FSE_H_FSE_STATIC_LINKING_ONLY
/* *** Dependency *** */
#include "bitstream.h"
@@ -308,11 +308,11 @@ If there is an error, the function will return an error code, which can be teste
#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<((maxTableLog)-1)) + (((maxSymbolValue)+1)*2))
#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<(maxTableLog)))
-/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */
-#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue) (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable))
-#define FSE_DTABLE_SIZE(maxTableLog) (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable))
-
+/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */
+#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue) (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable))
+#define FSE_DTABLE_SIZE(maxTableLog) (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable))
+
/* *****************************************
* FSE advanced API
***************************************** */
@@ -361,11 +361,11 @@ size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size
size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2);
/**< Same as FSE_decompress_wksp() but with dynamic BMI2 support. Pass 1 if your CPU supports BMI2 or 0 if it doesn't. */
-typedef enum {
- FSE_repeat_none, /**< Cannot use the previous table */
- FSE_repeat_check, /**< Can use the previous table but it must be checked */
+typedef enum {
+ FSE_repeat_none, /**< Cannot use the previous table */
+ FSE_repeat_check, /**< Can use the previous table but it must be checked */
FSE_repeat_valid /**< Can use the previous table and it is assumed to be valid */
- } FSE_repeat;
+ } FSE_repeat;
/* *****************************************
* FSE symbol compression API
@@ -539,9 +539,9 @@ MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U3
MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol)
{
- FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
+ FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
const U16* const stateTable = (const U16*)(statePtr->stateTable);
- U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
+ U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
BIT_addBits(bitC, statePtr->value, nbBitsOut);
statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
}
diff --git a/contrib/libs/zstd/lib/common/fse_decompress.c b/contrib/libs/zstd/lib/common/fse_decompress.c
index a5a358015f..f915ad58a2 100644
--- a/contrib/libs/zstd/lib/common/fse_decompress.c
+++ b/contrib/libs/zstd/lib/common/fse_decompress.c
@@ -18,10 +18,10 @@
****************************************************************/
#include "debug.h" /* assert */
#include "bitstream.h"
-#include "compiler.h"
+#include "compiler.h"
#define FSE_STATIC_LINKING_ONLY
#include "fse.h"
-#include "error_private.h"
+#include "error_private.h"
#define ZSTD_DEPS_NEED_MALLOC
#include "zstd_deps.h"
@@ -165,8 +165,8 @@ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCo
{ U32 u;
for (u=0; u<tableSize; u++) {
FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
- U32 const nextState = symbolNext[symbol]++;
- tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
+ U32 const nextState = symbolNext[symbol]++;
+ tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
} }
@@ -228,7 +228,7 @@ size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
return 0;
}
-FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
+FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
void* dst, size_t maxDstSize,
const void* cSrc, size_t cSrcSize,
const FSE_DTable* dt, const unsigned fast)
diff --git a/contrib/libs/zstd/lib/common/huf.h b/contrib/libs/zstd/lib/common/huf.h
index 85518481ec..cc398f64c0 100644
--- a/contrib/libs/zstd/lib/common/huf.h
+++ b/contrib/libs/zstd/lib/common/huf.h
@@ -16,98 +16,98 @@
extern "C" {
#endif
-#ifndef HUF_H_298734234
-#define HUF_H_298734234
+#ifndef HUF_H_298734234
+#define HUF_H_298734234
/* *** Dependencies *** */
#include "zstd_deps.h" /* size_t */
-/* *** library symbols visibility *** */
-/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual,
- * HUF symbols remain "private" (internal symbols for library only).
- * Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */
-#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
-# define HUF_PUBLIC_API __attribute__ ((visibility ("default")))
-#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
-# define HUF_PUBLIC_API __declspec(dllexport)
-#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
-# define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */
-#else
-# define HUF_PUBLIC_API
-#endif
-
-
-/* ========================== */
-/* *** simple functions *** */
-/* ========================== */
-
-/** HUF_compress() :
- * Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
- * 'dst' buffer must be already allocated.
- * Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
- * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
- * @return : size of compressed data (<= `dstCapacity`).
- * Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
- * if HUF_isError(return), compression failed (more details using HUF_getErrorName())
- */
-HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,
- const void* src, size_t srcSize);
-
-/** HUF_decompress() :
- * Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
- * into already allocated buffer 'dst', of minimum size 'dstSize'.
- * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
- * Note : in contrast with FSE, HUF_decompress can regenerate
- * RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
- * because it knows size to regenerate (originalSize).
- * @return : size of regenerated data (== originalSize),
- * or an error code, which can be tested using HUF_isError()
- */
-HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize,
- const void* cSrc, size_t cSrcSize);
-
-
+/* *** library symbols visibility *** */
+/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual,
+ * HUF symbols remain "private" (internal symbols for library only).
+ * Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */
+#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
+# define HUF_PUBLIC_API __attribute__ ((visibility ("default")))
+#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
+# define HUF_PUBLIC_API __declspec(dllexport)
+#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
+# define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */
+#else
+# define HUF_PUBLIC_API
+#endif
+
+
+/* ========================== */
+/* *** simple functions *** */
+/* ========================== */
+
+/** HUF_compress() :
+ * Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
+ * 'dst' buffer must be already allocated.
+ * Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
+ * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
+ * @return : size of compressed data (<= `dstCapacity`).
+ * Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
+ * if HUF_isError(return), compression failed (more details using HUF_getErrorName())
+ */
+HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+/** HUF_decompress() :
+ * Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
+ * into already allocated buffer 'dst', of minimum size 'dstSize'.
+ * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
+ * Note : in contrast with FSE, HUF_decompress can regenerate
+ * RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
+ * because it knows size to regenerate (originalSize).
+ * @return : size of regenerated data (== originalSize),
+ * or an error code, which can be tested using HUF_isError()
+ */
+HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize,
+ const void* cSrc, size_t cSrcSize);
+
+
/* *** Tool functions *** */
-#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */
-HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */
+#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */
+HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */
/* Error Management */
-HUF_PUBLIC_API unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */
-HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */
+HUF_PUBLIC_API unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */
+HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */
/* *** Advanced function *** */
/** HUF_compress2() :
- * Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`.
- * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX .
- * `tableLog` must be `<= HUF_TABLELOG_MAX` . */
-HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- unsigned maxSymbolValue, unsigned tableLog);
+ * Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`.
+ * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX .
+ * `tableLog` must be `<= HUF_TABLELOG_MAX` . */
+HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog);
/** HUF_compress4X_wksp() :
- * Same as HUF_compress2(), but uses externally allocated `workSpace`.
+ * Same as HUF_compress2(), but uses externally allocated `workSpace`.
* `workspace` must be at least as large as HUF_WORKSPACE_SIZE */
#define HUF_WORKSPACE_SIZE ((8 << 10) + 512 /* sorting scratch space */)
#define HUF_WORKSPACE_SIZE_U64 (HUF_WORKSPACE_SIZE / sizeof(U64))
-HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- unsigned maxSymbolValue, unsigned tableLog,
- void* workSpace, size_t wkspSize);
-
-#endif /* HUF_H_298734234 */
-
-/* ******************************************************************
- * WARNING !!
- * The following section contains advanced and experimental definitions
- * which shall never be used in the context of a dynamic library,
- * because they are not guaranteed to remain stable in the future.
- * Only consider them in association with static linking.
- * *****************************************************************/
-#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
-#define HUF_H_HUF_STATIC_LINKING_ONLY
+HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize);
+
+#endif /* HUF_H_298734234 */
+
+/* ******************************************************************
+ * WARNING !!
+ * The following section contains advanced and experimental definitions
+ * which shall never be used in the context of a dynamic library,
+ * because they are not guaranteed to remain stable in the future.
+ * Only consider them in association with static linking.
+ * *****************************************************************/
+#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
+#define HUF_H_HUF_STATIC_LINKING_ONLY
/* *** Dependencies *** */
#include "mem.h" /* U32 */
@@ -117,9 +117,9 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
/* *** Constants *** */
#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */
-#define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */
-#define HUF_SYMBOLVALUE_MAX 255
-
+#define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */
+#define HUF_SYMBOLVALUE_MAX 255
+
#define HUF_TABLELOG_ABSOLUTEMAX 12 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
# error "HUF_TABLELOG_MAX is too large !"
@@ -131,7 +131,7 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
******************************************/
/* HUF buffer bounds */
#define HUF_CTABLEBOUND 129
-#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */
+#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */
#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
/* static allocation of HUF's Compression Table */
@@ -161,7 +161,7 @@ size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cS
size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */
size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
-size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */
+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */
size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */
#ifndef HUF_FORCE_DECOMPRESS_X1
@@ -171,22 +171,22 @@ size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
/* ****************************************
- * HUF detailed API
- * ****************************************/
-
-/*! HUF_compress() does the following:
- * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
- * 2. (optional) refine tableLog using HUF_optimalTableLog()
- * 3. build Huffman table from count using HUF_buildCTable()
- * 4. save Huffman table to memory buffer using HUF_writeCTable()
- * 5. encode the data stream using HUF_compress4X_usingCTable()
- *
- * The following API allows targeting specific sub-functions for advanced tasks.
- * For example, it's possible to compress several blocks using the same 'CTable',
- * or to save and regenerate 'CTable' using external methods.
- */
+ * HUF detailed API
+ * ****************************************/
+
+/*! HUF_compress() does the following:
+ * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
+ * 2. (optional) refine tableLog using HUF_optimalTableLog()
+ * 3. build Huffman table from count using HUF_buildCTable()
+ * 4. save Huffman table to memory buffer using HUF_writeCTable()
+ * 5. encode the data stream using HUF_compress4X_usingCTable()
+ *
+ * The following API allows targeting specific sub-functions for advanced tasks.
+ * For example, it's possible to compress several blocks using the same 'CTable',
+ * or to save and regenerate 'CTable' using external methods.
+ */
unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
-size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */
+size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */
size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize);
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
@@ -194,40 +194,40 @@ size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* sr
size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
-typedef enum {
- HUF_repeat_none, /**< Cannot use the previous table */
- HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
+typedef enum {
+ HUF_repeat_none, /**< Cannot use the previous table */
+ HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */
- } HUF_repeat;
-/** HUF_compress4X_repeat() :
- * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
- * If it uses hufTable it does not modify hufTable or repeat.
- * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
+ } HUF_repeat;
+/** HUF_compress4X_repeat() :
+ * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
+ * If it uses hufTable it does not modify hufTable or repeat.
+ * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
* If preferRepeat then the old table will always be used if valid.
* If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
-size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
- const void* src, size_t srcSize,
- unsigned maxSymbolValue, unsigned tableLog,
- void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
+size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible);
/** HUF_buildCTable_wksp() :
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
- * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
+ * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
*/
-#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
-#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
+#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
+#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
size_t HUF_buildCTable_wksp (HUF_CElt* tree,
const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
void* workSpace, size_t wkspSize);
/*! HUF_readStats() :
- * Read compact Huffman tree, saved by HUF_writeCTable().
- * `huffWeight` is destination buffer.
- * @return : size read from `src` , or an error Code .
- * Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
-size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
- U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
+ * Read compact Huffman tree, saved by HUF_writeCTable().
+ * `huffWeight` is destination buffer.
+ * @return : size read from `src` , or an error Code .
+ * Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
+size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
+ U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize);
/*! HUF_readStats_wksp() :
@@ -244,7 +244,7 @@ size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize,
int bmi2);
/** HUF_readCTable() :
- * Loading a CTable saved with HUF_writeCTable() */
+ * Loading a CTable saved with HUF_writeCTable() */
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights);
/** HUF_getNbBitsFromCTable() :
@@ -253,39 +253,39 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void
U32 HUF_getNbBitsFromCTable(const HUF_CElt* symbolTable, U32 symbolValue);
/*
- * HUF_decompress() does the following:
+ * HUF_decompress() does the following:
* 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics
- * 2. build Huffman table from save, using HUF_readDTableX?()
- * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable()
- */
+ * 2. build Huffman table from save, using HUF_readDTableX?()
+ * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable()
+ */
/** HUF_selectDecoder() :
- * Tells which decoder is likely to decode faster,
- * based on a set of pre-computed metrics.
+ * Tells which decoder is likely to decode faster,
+ * based on a set of pre-computed metrics.
* @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
- * Assumption : 0 < dstSize <= 128 KB */
+ * Assumption : 0 < dstSize <= 128 KB */
U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
-/**
- * The minimum workspace size for the `workSpace` used in
+/**
+ * The minimum workspace size for the `workSpace` used in
* HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp().
- *
- * The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
- * HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
- * Buffer overflow errors may potentially occur if code modifications result in
- * a required workspace size greater than that specified in the following
- * macro.
- */
+ *
+ * The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
+ * HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
+ * Buffer overflow errors may potentially occur if code modifications result in
+ * a required workspace size greater than that specified in the following
+ * macro.
+ */
#define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9))
-#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
-
+#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
+
#ifndef HUF_FORCE_DECOMPRESS_X2
size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize);
size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
#endif
#ifndef HUF_FORCE_DECOMPRESS_X1
size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
-size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
+size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
#endif
size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
@@ -297,24 +297,24 @@ size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* c
#endif
-/* ====================== */
+/* ====================== */
/* single stream variants */
-/* ====================== */
+/* ====================== */
size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U64 U64 */
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2);
-/** HUF_compress1X_repeat() :
- * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
- * If it uses hufTable it does not modify hufTable or repeat.
- * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
+/** HUF_compress1X_repeat() :
+ * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
+ * If it uses hufTable it does not modify hufTable or repeat.
+ * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
* If preferRepeat then the old table will always be used if valid.
* If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
-size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
- const void* src, size_t srcSize,
- unsigned maxSymbolValue, unsigned tableLog,
- void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
+size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible);
size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
@@ -323,7 +323,7 @@ size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cS
#endif
size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
-size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
+size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
#ifndef HUF_FORCE_DECOMPRESS_X2
size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */
@@ -341,22 +341,22 @@ size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* c
size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
#endif
-/* BMI2 variants.
- * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
- */
-size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
+/* BMI2 variants.
+ * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
+ */
+size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
#ifndef HUF_FORCE_DECOMPRESS_X2
size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
#endif
-size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
-size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
+size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
+size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
#ifndef HUF_FORCE_DECOMPRESS_X2
size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
#endif
#ifndef HUF_FORCE_DECOMPRESS_X1
size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
#endif
-
+
#endif /* HUF_STATIC_LINKING_ONLY */
#if defined (__cplusplus)
diff --git a/contrib/libs/zstd/lib/common/mem.h b/contrib/libs/zstd/lib/common/mem.h
index 85581c3847..b46e47c00e 100644
--- a/contrib/libs/zstd/lib/common/mem.h
+++ b/contrib/libs/zstd/lib/common/mem.h
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef MEM_H_MODULE
@@ -50,15 +50,15 @@ extern "C" {
# else
# include <stdint.h> /* intptr_t */
# endif
- typedef uint8_t BYTE;
+ typedef uint8_t BYTE;
typedef uint8_t U8;
typedef int8_t S8;
- typedef uint16_t U16;
- typedef int16_t S16;
- typedef uint32_t U32;
- typedef int32_t S32;
- typedef uint64_t U64;
- typedef int64_t S64;
+ typedef uint16_t U16;
+ typedef int16_t S16;
+ typedef uint32_t U32;
+ typedef int32_t S32;
+ typedef uint64_t U64;
+ typedef int64_t S64;
#else
# include <limits.h>
#if CHAR_BIT != 8
@@ -138,11 +138,11 @@ MEM_STATIC size_t MEM_swapST(size_t in);
* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
* The below switch allow to select different access method for improved performance.
* Method 0 (default) : use `memcpy()`. Safe and portable.
- * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable).
+ * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable).
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
* Method 2 : direct access. This method is portable but violate C standard.
* It can generate buggy code on targets depending on alignment.
- * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6)
+ * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6)
* See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
* Prefer these methods in priority order (0 > 1 > 2)
*/
@@ -182,7 +182,7 @@ Only use if no other choice to achieve best performance on target platform */
MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
-MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; }
+MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; }
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
@@ -193,27 +193,27 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
/* currently only defined for gcc and icc */
#if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32))
- __pragma( pack(push, 1) )
- typedef struct { U16 v; } unalign16;
- typedef struct { U32 v; } unalign32;
- typedef struct { U64 v; } unalign64;
- typedef struct { size_t v; } unalignArch;
+ __pragma( pack(push, 1) )
+ typedef struct { U16 v; } unalign16;
+ typedef struct { U32 v; } unalign32;
+ typedef struct { U64 v; } unalign64;
+ typedef struct { size_t v; } unalignArch;
__pragma( pack(pop) )
#else
- typedef struct { U16 v; } __attribute__((packed)) unalign16;
- typedef struct { U32 v; } __attribute__((packed)) unalign32;
- typedef struct { U64 v; } __attribute__((packed)) unalign64;
- typedef struct { size_t v; } __attribute__((packed)) unalignArch;
+ typedef struct { U16 v; } __attribute__((packed)) unalign16;
+ typedef struct { U32 v; } __attribute__((packed)) unalign32;
+ typedef struct { U64 v; } __attribute__((packed)) unalign64;
+ typedef struct { size_t v; } __attribute__((packed)) unalignArch;
#endif
-MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; }
-MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; }
-MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; }
-MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; }
+MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; }
+MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; }
+MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; }
+MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; }
-MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; }
-MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; }
-MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; }
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; }
+MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; }
+MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; }
#else
diff --git a/contrib/libs/zstd/lib/common/pool.c b/contrib/libs/zstd/lib/common/pool.c
index 2e37cdd73c..7591a5f84c 100644
--- a/contrib/libs/zstd/lib/common/pool.c
+++ b/contrib/libs/zstd/lib/common/pool.c
@@ -1,76 +1,76 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-
-/* ====== Dependencies ======= */
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/* ====== Dependencies ======= */
#include "zstd_deps.h" /* size_t */
#include "debug.h" /* assert */
#include "zstd_internal.h" /* ZSTD_customMalloc, ZSTD_customFree */
-#include "pool.h"
-
-/* ====== Compiler specifics ====== */
-#if defined(_MSC_VER)
-# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
-#endif
-
-
-#ifdef ZSTD_MULTITHREAD
-
-#include "threading.h" /* pthread adaptation */
-
-/* A job is a function and an opaque argument */
-typedef struct POOL_job_s {
- POOL_function function;
- void *opaque;
-} POOL_job;
-
-struct POOL_ctx_s {
- ZSTD_customMem customMem;
- /* Keep track of the threads */
+#include "pool.h"
+
+/* ====== Compiler specifics ====== */
+#if defined(_MSC_VER)
+# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
+#endif
+
+
+#ifdef ZSTD_MULTITHREAD
+
+#include "threading.h" /* pthread adaptation */
+
+/* A job is a function and an opaque argument */
+typedef struct POOL_job_s {
+ POOL_function function;
+ void *opaque;
+} POOL_job;
+
+struct POOL_ctx_s {
+ ZSTD_customMem customMem;
+ /* Keep track of the threads */
ZSTD_pthread_t* threads;
size_t threadCapacity;
size_t threadLimit;
-
- /* The queue is a circular buffer */
- POOL_job *queue;
- size_t queueHead;
- size_t queueTail;
- size_t queueSize;
-
- /* The number of threads working on jobs */
- size_t numThreadsBusy;
- /* Indicates if the queue is empty */
- int queueEmpty;
-
- /* The mutex protects the queue */
- ZSTD_pthread_mutex_t queueMutex;
- /* Condition variable for pushers to wait on when the queue is full */
- ZSTD_pthread_cond_t queuePushCond;
- /* Condition variables for poppers to wait on when the queue is empty */
- ZSTD_pthread_cond_t queuePopCond;
- /* Indicates if the queue is shutting down */
- int shutdown;
-};
-
-/* POOL_thread() :
+
+ /* The queue is a circular buffer */
+ POOL_job *queue;
+ size_t queueHead;
+ size_t queueTail;
+ size_t queueSize;
+
+ /* The number of threads working on jobs */
+ size_t numThreadsBusy;
+ /* Indicates if the queue is empty */
+ int queueEmpty;
+
+ /* The mutex protects the queue */
+ ZSTD_pthread_mutex_t queueMutex;
+ /* Condition variable for pushers to wait on when the queue is full */
+ ZSTD_pthread_cond_t queuePushCond;
+ /* Condition variables for poppers to wait on when the queue is empty */
+ ZSTD_pthread_cond_t queuePopCond;
+ /* Indicates if the queue is shutting down */
+ int shutdown;
+};
+
+/* POOL_thread() :
* Work thread for the thread pool.
* Waits for jobs and executes them.
* @returns : NULL on failure else non-null.
*/
-static void* POOL_thread(void* opaque) {
- POOL_ctx* const ctx = (POOL_ctx*)opaque;
- if (!ctx) { return NULL; }
- for (;;) {
- /* Lock the mutex and wait for a non-empty queue or until shutdown */
- ZSTD_pthread_mutex_lock(&ctx->queueMutex);
-
+static void* POOL_thread(void* opaque) {
+ POOL_ctx* const ctx = (POOL_ctx*)opaque;
+ if (!ctx) { return NULL; }
+ for (;;) {
+ /* Lock the mutex and wait for a non-empty queue or until shutdown */
+ ZSTD_pthread_mutex_lock(&ctx->queueMutex);
+
while ( ctx->queueEmpty
|| (ctx->numThreadsBusy >= ctx->threadLimit) ) {
if (ctx->shutdown) {
@@ -80,59 +80,59 @@ static void* POOL_thread(void* opaque) {
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
return opaque;
}
- ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
- }
- /* Pop a job off the queue */
- { POOL_job const job = ctx->queue[ctx->queueHead];
- ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
- ctx->numThreadsBusy++;
+ ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
+ }
+ /* Pop a job off the queue */
+ { POOL_job const job = ctx->queue[ctx->queueHead];
+ ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
+ ctx->numThreadsBusy++;
ctx->queueEmpty = (ctx->queueHead == ctx->queueTail);
- /* Unlock the mutex, signal a pusher, and run the job */
+ /* Unlock the mutex, signal a pusher, and run the job */
ZSTD_pthread_cond_signal(&ctx->queuePushCond);
- ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
-
- job.function(job.opaque);
-
- /* If the intended queue size was 0, signal after finishing job */
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+
+ job.function(job.opaque);
+
+ /* If the intended queue size was 0, signal after finishing job */
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
ctx->numThreadsBusy--;
- if (ctx->queueSize == 1) {
- ZSTD_pthread_cond_signal(&ctx->queuePushCond);
+ if (ctx->queueSize == 1) {
+ ZSTD_pthread_cond_signal(&ctx->queuePushCond);
}
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
}
- } /* for (;;) */
+ } /* for (;;) */
assert(0); /* Unreachable */
-}
-
+}
+
/* ZSTD_createThreadPool() : public access point */
POOL_ctx* ZSTD_createThreadPool(size_t numThreads) {
return POOL_create (numThreads, 0);
}
-POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
- return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
-}
-
+POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
+ return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
+}
+
POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
ZSTD_customMem customMem)
{
- POOL_ctx* ctx;
+ POOL_ctx* ctx;
/* Check parameters */
- if (!numThreads) { return NULL; }
- /* Allocate the context and zero initialize */
+ if (!numThreads) { return NULL; }
+ /* Allocate the context and zero initialize */
ctx = (POOL_ctx*)ZSTD_customCalloc(sizeof(POOL_ctx), customMem);
- if (!ctx) { return NULL; }
- /* Initialize the job queue.
+ if (!ctx) { return NULL; }
+ /* Initialize the job queue.
* It needs one extra space since one space is wasted to differentiate
* empty and full queues.
- */
- ctx->queueSize = queueSize + 1;
+ */
+ ctx->queueSize = queueSize + 1;
ctx->queue = (POOL_job*)ZSTD_customMalloc(ctx->queueSize * sizeof(POOL_job), customMem);
- ctx->queueHead = 0;
- ctx->queueTail = 0;
- ctx->numThreadsBusy = 0;
- ctx->queueEmpty = 1;
+ ctx->queueHead = 0;
+ ctx->queueTail = 0;
+ ctx->numThreadsBusy = 0;
+ ctx->queueEmpty = 1;
{
int error = 0;
error |= ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL);
@@ -140,67 +140,67 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
error |= ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL);
if (error) { POOL_free(ctx); return NULL; }
}
- ctx->shutdown = 0;
- /* Allocate space for the thread handles */
+ ctx->shutdown = 0;
+ /* Allocate space for the thread handles */
ctx->threads = (ZSTD_pthread_t*)ZSTD_customMalloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
ctx->threadCapacity = 0;
- ctx->customMem = customMem;
- /* Check for errors */
- if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }
- /* Initialize the threads */
- { size_t i;
- for (i = 0; i < numThreads; ++i) {
- if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) {
+ ctx->customMem = customMem;
+ /* Check for errors */
+ if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }
+ /* Initialize the threads */
+ { size_t i;
+ for (i = 0; i < numThreads; ++i) {
+ if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) {
ctx->threadCapacity = i;
- POOL_free(ctx);
- return NULL;
- } }
+ POOL_free(ctx);
+ return NULL;
+ } }
ctx->threadCapacity = numThreads;
ctx->threadLimit = numThreads;
- }
- return ctx;
-}
-
-/*! POOL_join() :
- Shutdown the queue, wake any sleeping threads, and join all of the threads.
-*/
-static void POOL_join(POOL_ctx* ctx) {
- /* Shut down the queue */
- ZSTD_pthread_mutex_lock(&ctx->queueMutex);
- ctx->shutdown = 1;
- ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
- /* Wake up sleeping threads */
- ZSTD_pthread_cond_broadcast(&ctx->queuePushCond);
- ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
- /* Join all of the threads */
- { size_t i;
+ }
+ return ctx;
+}
+
+/*! POOL_join() :
+ Shutdown the queue, wake any sleeping threads, and join all of the threads.
+*/
+static void POOL_join(POOL_ctx* ctx) {
+ /* Shut down the queue */
+ ZSTD_pthread_mutex_lock(&ctx->queueMutex);
+ ctx->shutdown = 1;
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+ /* Wake up sleeping threads */
+ ZSTD_pthread_cond_broadcast(&ctx->queuePushCond);
+ ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
+ /* Join all of the threads */
+ { size_t i;
for (i = 0; i < ctx->threadCapacity; ++i) {
ZSTD_pthread_join(ctx->threads[i], NULL); /* note : could fail */
- } }
-}
-
-void POOL_free(POOL_ctx *ctx) {
- if (!ctx) { return; }
- POOL_join(ctx);
- ZSTD_pthread_mutex_destroy(&ctx->queueMutex);
- ZSTD_pthread_cond_destroy(&ctx->queuePushCond);
- ZSTD_pthread_cond_destroy(&ctx->queuePopCond);
+ } }
+}
+
+void POOL_free(POOL_ctx *ctx) {
+ if (!ctx) { return; }
+ POOL_join(ctx);
+ ZSTD_pthread_mutex_destroy(&ctx->queueMutex);
+ ZSTD_pthread_cond_destroy(&ctx->queuePushCond);
+ ZSTD_pthread_cond_destroy(&ctx->queuePopCond);
ZSTD_customFree(ctx->queue, ctx->customMem);
ZSTD_customFree(ctx->threads, ctx->customMem);
ZSTD_customFree(ctx, ctx->customMem);
-}
-
+}
+
void ZSTD_freeThreadPool (ZSTD_threadPool* pool) {
POOL_free (pool);
}
size_t POOL_sizeof(const POOL_ctx* ctx) {
- if (ctx==NULL) return 0; /* supports sizeof NULL */
- return sizeof(*ctx)
- + ctx->queueSize * sizeof(POOL_job)
+ if (ctx==NULL) return 0; /* supports sizeof NULL */
+ return sizeof(*ctx)
+ + ctx->queueSize * sizeof(POOL_job)
+ ctx->threadCapacity * sizeof(ZSTD_pthread_t);
-}
-
+}
+
/* @return : 0 on success, 1 on error */
static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads)
@@ -243,113 +243,113 @@ int POOL_resize(POOL_ctx* ctx, size_t numThreads)
return result;
}
-/**
- * Returns 1 if the queue is full and 0 otherwise.
- *
+/**
+ * Returns 1 if the queue is full and 0 otherwise.
+ *
* When queueSize is 1 (pool was created with an intended queueSize of 0),
* then a queue is empty if there is a thread free _and_ no job is waiting.
- */
-static int isQueueFull(POOL_ctx const* ctx) {
- if (ctx->queueSize > 1) {
- return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize);
- } else {
+ */
+static int isQueueFull(POOL_ctx const* ctx) {
+ if (ctx->queueSize > 1) {
+ return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize);
+ } else {
return (ctx->numThreadsBusy == ctx->threadLimit) ||
- !ctx->queueEmpty;
- }
-}
-
-
+ !ctx->queueEmpty;
+ }
+}
+
+
static void
POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque)
-{
- POOL_job const job = {function, opaque};
- assert(ctx != NULL);
- if (ctx->shutdown) return;
-
- ctx->queueEmpty = 0;
- ctx->queue[ctx->queueTail] = job;
- ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize;
- ZSTD_pthread_cond_signal(&ctx->queuePopCond);
-}
-
-void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque)
-{
- assert(ctx != NULL);
- ZSTD_pthread_mutex_lock(&ctx->queueMutex);
- /* Wait until there is space in the queue for the new job */
- while (isQueueFull(ctx) && (!ctx->shutdown)) {
- ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
- }
- POOL_add_internal(ctx, function, opaque);
- ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
-}
-
-
-int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque)
-{
- assert(ctx != NULL);
- ZSTD_pthread_mutex_lock(&ctx->queueMutex);
- if (isQueueFull(ctx)) {
- ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
- return 0;
- }
- POOL_add_internal(ctx, function, opaque);
- ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
- return 1;
-}
-
-
-#else /* ZSTD_MULTITHREAD not defined */
-
-/* ========================== */
-/* No multi-threading support */
-/* ========================== */
-
-
-/* We don't need any data, but if it is empty, malloc() might return NULL. */
-struct POOL_ctx_s {
- int dummy;
-};
+{
+ POOL_job const job = {function, opaque};
+ assert(ctx != NULL);
+ if (ctx->shutdown) return;
+
+ ctx->queueEmpty = 0;
+ ctx->queue[ctx->queueTail] = job;
+ ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize;
+ ZSTD_pthread_cond_signal(&ctx->queuePopCond);
+}
+
+void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque)
+{
+ assert(ctx != NULL);
+ ZSTD_pthread_mutex_lock(&ctx->queueMutex);
+ /* Wait until there is space in the queue for the new job */
+ while (isQueueFull(ctx) && (!ctx->shutdown)) {
+ ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
+ }
+ POOL_add_internal(ctx, function, opaque);
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+}
+
+
+int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque)
+{
+ assert(ctx != NULL);
+ ZSTD_pthread_mutex_lock(&ctx->queueMutex);
+ if (isQueueFull(ctx)) {
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+ return 0;
+ }
+ POOL_add_internal(ctx, function, opaque);
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+ return 1;
+}
+
+
+#else /* ZSTD_MULTITHREAD not defined */
+
+/* ========================== */
+/* No multi-threading support */
+/* ========================== */
+
+
+/* We don't need any data, but if it is empty, malloc() might return NULL. */
+struct POOL_ctx_s {
+ int dummy;
+};
static POOL_ctx g_poolCtx;
-
-POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
- return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
-}
-
+
+POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
+ return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
+}
+
POOL_ctx*
POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem)
{
- (void)numThreads;
- (void)queueSize;
- (void)customMem;
+ (void)numThreads;
+ (void)queueSize;
+ (void)customMem;
return &g_poolCtx;
-}
-
-void POOL_free(POOL_ctx* ctx) {
+}
+
+void POOL_free(POOL_ctx* ctx) {
assert(!ctx || ctx == &g_poolCtx);
- (void)ctx;
-}
-
+ (void)ctx;
+}
+
int POOL_resize(POOL_ctx* ctx, size_t numThreads) {
(void)ctx; (void)numThreads;
return 0;
}
-void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) {
- (void)ctx;
- function(opaque);
-}
-
-int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) {
- (void)ctx;
- function(opaque);
- return 1;
-}
-
+void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) {
+ (void)ctx;
+ function(opaque);
+}
+
+int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) {
+ (void)ctx;
+ function(opaque);
+ return 1;
+}
+
size_t POOL_sizeof(const POOL_ctx* ctx) {
- if (ctx==NULL) return 0; /* supports sizeof NULL */
+ if (ctx==NULL) return 0; /* supports sizeof NULL */
assert(ctx == &g_poolCtx);
- return sizeof(*ctx);
-}
-
-#endif /* ZSTD_MULTITHREAD */
+ return sizeof(*ctx);
+}
+
+#endif /* ZSTD_MULTITHREAD */
diff --git a/contrib/libs/zstd/lib/common/pool.h b/contrib/libs/zstd/lib/common/pool.h
index 0ebde1805d..67feb6da6d 100644
--- a/contrib/libs/zstd/lib/common/pool.h
+++ b/contrib/libs/zstd/lib/common/pool.h
@@ -1,43 +1,43 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef POOL_H
-#define POOL_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef POOL_H
+#define POOL_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
#include "zstd_deps.h"
-#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */
+#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */
#include "../zstd.h"
-
-typedef struct POOL_ctx_s POOL_ctx;
-
-/*! POOL_create() :
- * Create a thread pool with at most `numThreads` threads.
- * `numThreads` must be at least 1.
- * The maximum number of queued jobs before blocking is `queueSize`.
- * @return : POOL_ctx pointer on success, else NULL.
-*/
-POOL_ctx* POOL_create(size_t numThreads, size_t queueSize);
-
+
+typedef struct POOL_ctx_s POOL_ctx;
+
+/*! POOL_create() :
+ * Create a thread pool with at most `numThreads` threads.
+ * `numThreads` must be at least 1.
+ * The maximum number of queued jobs before blocking is `queueSize`.
+ * @return : POOL_ctx pointer on success, else NULL.
+*/
+POOL_ctx* POOL_create(size_t numThreads, size_t queueSize);
+
POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
ZSTD_customMem customMem);
-
-/*! POOL_free() :
+
+/*! POOL_free() :
* Free a thread pool returned by POOL_create().
*/
-void POOL_free(POOL_ctx* ctx);
-
+void POOL_free(POOL_ctx* ctx);
+
/*! POOL_resize() :
* Expands or shrinks pool's number of threads.
* This is more efficient than releasing + creating a new context,
@@ -49,36 +49,36 @@ void POOL_free(POOL_ctx* ctx);
*/
int POOL_resize(POOL_ctx* ctx, size_t numThreads);
-/*! POOL_sizeof() :
+/*! POOL_sizeof() :
* @return threadpool memory usage
* note : compatible with NULL (returns 0 in this case)
*/
size_t POOL_sizeof(const POOL_ctx* ctx);
-
-/*! POOL_function :
+
+/*! POOL_function :
* The function type that can be added to a thread pool.
*/
-typedef void (*POOL_function)(void*);
-
-/*! POOL_add() :
+typedef void (*POOL_function)(void*);
+
+/*! POOL_add() :
* Add the job `function(opaque)` to the thread pool. `ctx` must be valid.
* Possibly blocks until there is room in the queue.
* Note : The function may be executed asynchronously,
* therefore, `opaque` must live until function has been completed.
*/
-void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque);
-
-
-/*! POOL_tryAdd() :
+void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque);
+
+
+/*! POOL_tryAdd() :
* Add the job `function(opaque)` to thread pool _if_ a queue slot is available.
* Returns immediately even if not (does not block).
* @return : 1 if successful, 0 if not.
*/
-int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque);
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif
+int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
diff --git a/contrib/libs/zstd/lib/common/threading.c b/contrib/libs/zstd/lib/common/threading.c
index 92cf57c195..c0c2adb937 100644
--- a/contrib/libs/zstd/lib/common/threading.c
+++ b/contrib/libs/zstd/lib/common/threading.c
@@ -1,80 +1,80 @@
-/**
- * Copyright (c) 2016 Tino Reichardt
- * All rights reserved.
- *
+/**
+ * Copyright (c) 2016 Tino Reichardt
+ * All rights reserved.
+ *
* You can contact the author at:
* - zstdmt source repository: https://github.com/mcmilk/zstdmt
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
- */
-
-/**
- * This file will hold wrapper for systems, which do not support pthreads
- */
-
+ */
+
+/**
+ * This file will hold wrapper for systems, which do not support pthreads
+ */
+
#include "threading.h"
/* create fake symbol to avoid empty translation unit warning */
int g_ZSTD_threading_useless_symbol;
-
-#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
-
-/**
- * Windows minimalist Pthread Wrapper, based on :
- * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
- */
-
-
-/* === Dependencies === */
-#include <process.h>
-#include <errno.h>
-
-
-/* === Implementation === */
-
-static unsigned __stdcall worker(void *arg)
-{
- ZSTD_pthread_t* const thread = (ZSTD_pthread_t*) arg;
- thread->arg = thread->start_routine(thread->arg);
- return 0;
-}
-
-int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
- void* (*start_routine) (void*), void* arg)
-{
- (void)unused;
- thread->arg = arg;
- thread->start_routine = start_routine;
- thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL);
-
- if (!thread->handle)
- return errno;
- else
- return 0;
-}
-
-int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr)
-{
- DWORD result;
-
- if (!thread.handle) return 0;
-
- result = WaitForSingleObject(thread.handle, INFINITE);
- switch (result) {
- case WAIT_OBJECT_0:
- if (value_ptr) *value_ptr = thread.arg;
- return 0;
- case WAIT_ABANDONED:
- return EINVAL;
- default:
- return GetLastError();
- }
-}
-
-#endif /* ZSTD_MULTITHREAD */
+
+#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
+
+/**
+ * Windows minimalist Pthread Wrapper, based on :
+ * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
+ */
+
+
+/* === Dependencies === */
+#include <process.h>
+#include <errno.h>
+
+
+/* === Implementation === */
+
+static unsigned __stdcall worker(void *arg)
+{
+ ZSTD_pthread_t* const thread = (ZSTD_pthread_t*) arg;
+ thread->arg = thread->start_routine(thread->arg);
+ return 0;
+}
+
+int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
+ void* (*start_routine) (void*), void* arg)
+{
+ (void)unused;
+ thread->arg = arg;
+ thread->start_routine = start_routine;
+ thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL);
+
+ if (!thread->handle)
+ return errno;
+ else
+ return 0;
+}
+
+int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr)
+{
+ DWORD result;
+
+ if (!thread.handle) return 0;
+
+ result = WaitForSingleObject(thread.handle, INFINITE);
+ switch (result) {
+ case WAIT_OBJECT_0:
+ if (value_ptr) *value_ptr = thread.arg;
+ return 0;
+ case WAIT_ABANDONED:
+ return EINVAL;
+ default:
+ return GetLastError();
+ }
+}
+
+#endif /* ZSTD_MULTITHREAD */
#if defined(ZSTD_MULTITHREAD) && DEBUGLEVEL >= 1 && !defined(_WIN32)
diff --git a/contrib/libs/zstd/lib/common/threading.h b/contrib/libs/zstd/lib/common/threading.h
index fd0060d5aa..c5e303eedc 100644
--- a/contrib/libs/zstd/lib/common/threading.h
+++ b/contrib/libs/zstd/lib/common/threading.h
@@ -1,106 +1,106 @@
-/**
- * Copyright (c) 2016 Tino Reichardt
- * All rights reserved.
- *
+/**
+ * Copyright (c) 2016 Tino Reichardt
+ * All rights reserved.
+ *
* You can contact the author at:
* - zstdmt source repository: https://github.com/mcmilk/zstdmt
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef THREADING_H_938743
-#define THREADING_H_938743
-
+ */
+
+#ifndef THREADING_H_938743
+#define THREADING_H_938743
+
#include "debug.h"
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
-
-/**
- * Windows minimalist Pthread Wrapper, based on :
- * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
- */
-#ifdef WINVER
-# undef WINVER
-#endif
-#define WINVER 0x0600
-
-#ifdef _WIN32_WINNT
-# undef _WIN32_WINNT
-#endif
-#define _WIN32_WINNT 0x0600
-
-#ifndef WIN32_LEAN_AND_MEAN
-# define WIN32_LEAN_AND_MEAN
-#endif
-
-#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */
-#include <windows.h>
-#undef ERROR
-#define ERROR(name) ZSTD_ERROR(name)
-
-
-/* mutex */
-#define ZSTD_pthread_mutex_t CRITICAL_SECTION
-#define ZSTD_pthread_mutex_init(a, b) ((void)(b), InitializeCriticalSection((a)), 0)
-#define ZSTD_pthread_mutex_destroy(a) DeleteCriticalSection((a))
-#define ZSTD_pthread_mutex_lock(a) EnterCriticalSection((a))
-#define ZSTD_pthread_mutex_unlock(a) LeaveCriticalSection((a))
-
-/* condition variable */
-#define ZSTD_pthread_cond_t CONDITION_VARIABLE
-#define ZSTD_pthread_cond_init(a, b) ((void)(b), InitializeConditionVariable((a)), 0)
-#define ZSTD_pthread_cond_destroy(a) ((void)(a))
-#define ZSTD_pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE)
-#define ZSTD_pthread_cond_signal(a) WakeConditionVariable((a))
-#define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a))
-
-/* ZSTD_pthread_create() and ZSTD_pthread_join() */
-typedef struct {
- HANDLE handle;
- void* (*start_routine)(void*);
- void* arg;
-} ZSTD_pthread_t;
-
-int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
- void* (*start_routine) (void*), void* arg);
-
-int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr);
-
-/**
- * add here more wrappers as required
- */
-
-
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
+
+/**
+ * Windows minimalist Pthread Wrapper, based on :
+ * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
+ */
+#ifdef WINVER
+# undef WINVER
+#endif
+#define WINVER 0x0600
+
+#ifdef _WIN32_WINNT
+# undef _WIN32_WINNT
+#endif
+#define _WIN32_WINNT 0x0600
+
+#ifndef WIN32_LEAN_AND_MEAN
+# define WIN32_LEAN_AND_MEAN
+#endif
+
+#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */
+#include <windows.h>
+#undef ERROR
+#define ERROR(name) ZSTD_ERROR(name)
+
+
+/* mutex */
+#define ZSTD_pthread_mutex_t CRITICAL_SECTION
+#define ZSTD_pthread_mutex_init(a, b) ((void)(b), InitializeCriticalSection((a)), 0)
+#define ZSTD_pthread_mutex_destroy(a) DeleteCriticalSection((a))
+#define ZSTD_pthread_mutex_lock(a) EnterCriticalSection((a))
+#define ZSTD_pthread_mutex_unlock(a) LeaveCriticalSection((a))
+
+/* condition variable */
+#define ZSTD_pthread_cond_t CONDITION_VARIABLE
+#define ZSTD_pthread_cond_init(a, b) ((void)(b), InitializeConditionVariable((a)), 0)
+#define ZSTD_pthread_cond_destroy(a) ((void)(a))
+#define ZSTD_pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE)
+#define ZSTD_pthread_cond_signal(a) WakeConditionVariable((a))
+#define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a))
+
+/* ZSTD_pthread_create() and ZSTD_pthread_join() */
+typedef struct {
+ HANDLE handle;
+ void* (*start_routine)(void*);
+ void* arg;
+} ZSTD_pthread_t;
+
+int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
+ void* (*start_routine) (void*), void* arg);
+
+int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr);
+
+/**
+ * add here more wrappers as required
+ */
+
+
#elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */
-/* === POSIX Systems === */
-# include <pthread.h>
-
+/* === POSIX Systems === */
+# include <pthread.h>
+
#if DEBUGLEVEL < 1
-#define ZSTD_pthread_mutex_t pthread_mutex_t
-#define ZSTD_pthread_mutex_init(a, b) pthread_mutex_init((a), (b))
-#define ZSTD_pthread_mutex_destroy(a) pthread_mutex_destroy((a))
-#define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock((a))
-#define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock((a))
-
-#define ZSTD_pthread_cond_t pthread_cond_t
-#define ZSTD_pthread_cond_init(a, b) pthread_cond_init((a), (b))
-#define ZSTD_pthread_cond_destroy(a) pthread_cond_destroy((a))
-#define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait((a), (b))
-#define ZSTD_pthread_cond_signal(a) pthread_cond_signal((a))
-#define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast((a))
-
-#define ZSTD_pthread_t pthread_t
-#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
-#define ZSTD_pthread_join(a, b) pthread_join((a),(b))
-
+#define ZSTD_pthread_mutex_t pthread_mutex_t
+#define ZSTD_pthread_mutex_init(a, b) pthread_mutex_init((a), (b))
+#define ZSTD_pthread_mutex_destroy(a) pthread_mutex_destroy((a))
+#define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock((a))
+#define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock((a))
+
+#define ZSTD_pthread_cond_t pthread_cond_t
+#define ZSTD_pthread_cond_init(a, b) pthread_cond_init((a), (b))
+#define ZSTD_pthread_cond_destroy(a) pthread_cond_destroy((a))
+#define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait((a), (b))
+#define ZSTD_pthread_cond_signal(a) pthread_cond_signal((a))
+#define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast((a))
+
+#define ZSTD_pthread_t pthread_t
+#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
+#define ZSTD_pthread_join(a, b) pthread_join((a),(b))
+
#else /* DEBUGLEVEL >= 1 */
/* Debug implementation of threading.
@@ -128,28 +128,28 @@ int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond);
#endif
-#else /* ZSTD_MULTITHREAD not defined */
-/* No multithreading support */
-
-typedef int ZSTD_pthread_mutex_t;
-#define ZSTD_pthread_mutex_init(a, b) ((void)(a), (void)(b), 0)
-#define ZSTD_pthread_mutex_destroy(a) ((void)(a))
-#define ZSTD_pthread_mutex_lock(a) ((void)(a))
-#define ZSTD_pthread_mutex_unlock(a) ((void)(a))
-
-typedef int ZSTD_pthread_cond_t;
-#define ZSTD_pthread_cond_init(a, b) ((void)(a), (void)(b), 0)
-#define ZSTD_pthread_cond_destroy(a) ((void)(a))
-#define ZSTD_pthread_cond_wait(a, b) ((void)(a), (void)(b))
-#define ZSTD_pthread_cond_signal(a) ((void)(a))
-#define ZSTD_pthread_cond_broadcast(a) ((void)(a))
-
-/* do not use ZSTD_pthread_t */
-
-#endif /* ZSTD_MULTITHREAD */
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* THREADING_H_938743 */
+#else /* ZSTD_MULTITHREAD not defined */
+/* No multithreading support */
+
+typedef int ZSTD_pthread_mutex_t;
+#define ZSTD_pthread_mutex_init(a, b) ((void)(a), (void)(b), 0)
+#define ZSTD_pthread_mutex_destroy(a) ((void)(a))
+#define ZSTD_pthread_mutex_lock(a) ((void)(a))
+#define ZSTD_pthread_mutex_unlock(a) ((void)(a))
+
+typedef int ZSTD_pthread_cond_t;
+#define ZSTD_pthread_cond_init(a, b) ((void)(a), (void)(b), 0)
+#define ZSTD_pthread_cond_destroy(a) ((void)(a))
+#define ZSTD_pthread_cond_wait(a, b) ((void)(a), (void)(b))
+#define ZSTD_pthread_cond_signal(a) ((void)(a))
+#define ZSTD_pthread_cond_broadcast(a) ((void)(a))
+
+/* do not use ZSTD_pthread_t */
+
+#endif /* ZSTD_MULTITHREAD */
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* THREADING_H_938743 */
diff --git a/contrib/libs/zstd/lib/common/zstd_common.c b/contrib/libs/zstd/lib/common/zstd_common.c
index 3d7e35b309..ea4e288daa 100644
--- a/contrib/libs/zstd/lib/common/zstd_common.c
+++ b/contrib/libs/zstd/lib/common/zstd_common.c
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -16,17 +16,17 @@
#define ZSTD_DEPS_NEED_MALLOC
#include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */
#include "error_private.h"
-#include "zstd_internal.h"
+#include "zstd_internal.h"
/*-****************************************
* Version
******************************************/
-unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; }
-
-const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }
+unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; }
+const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }
+
/*-****************************************
* ZSTD Error Management
******************************************/
@@ -37,16 +37,16 @@ const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }
unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
/*! ZSTD_getErrorName() :
- * provides error code string from function result (useful for debugging) */
+ * provides error code string from function result (useful for debugging) */
const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); }
/*! ZSTD_getError() :
- * convert a `size_t` function result into a proper ZSTD_errorCode enum */
+ * convert a `size_t` function result into a proper ZSTD_errorCode enum */
ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
/*! ZSTD_getErrorString() :
- * provides error code string from enum */
-const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
+ * provides error code string from enum */
+const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
@@ -55,29 +55,29 @@ const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString
****************************************************************/
void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem)
{
- if (customMem.customAlloc)
- return customMem.customAlloc(customMem.opaque, size);
+ if (customMem.customAlloc)
+ return customMem.customAlloc(customMem.opaque, size);
return ZSTD_malloc(size);
}
void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem)
{
- if (customMem.customAlloc) {
- /* calloc implemented as malloc+memset;
- * not as efficient as calloc, but next best guess for custom malloc */
- void* const ptr = customMem.customAlloc(customMem.opaque, size);
+ if (customMem.customAlloc) {
+ /* calloc implemented as malloc+memset;
+ * not as efficient as calloc, but next best guess for custom malloc */
+ void* const ptr = customMem.customAlloc(customMem.opaque, size);
ZSTD_memset(ptr, 0, size);
- return ptr;
- }
+ return ptr;
+ }
return ZSTD_calloc(1, size);
}
void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
{
- if (ptr!=NULL) {
- if (customMem.customFree)
- customMem.customFree(customMem.opaque, ptr);
- else
+ if (ptr!=NULL) {
+ if (customMem.customFree)
+ customMem.customFree(customMem.opaque, ptr);
+ else
ZSTD_free(ptr);
- }
+ }
}
diff --git a/contrib/libs/zstd/lib/common/zstd_internal.h b/contrib/libs/zstd/lib/common/zstd_internal.h
index 1dee37cdbe..e149b40943 100644
--- a/contrib/libs/zstd/lib/common/zstd_internal.h
+++ b/contrib/libs/zstd/lib/common/zstd_internal.h
@@ -1,38 +1,38 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_CCOMMON_H_MODULE
#define ZSTD_CCOMMON_H_MODULE
-/* this module contains definitions which must be identical
- * across compression, decompression and dictBuilder.
- * It also contains a few functions useful to at least 2 of them
- * and which benefit from being inlined */
+/* this module contains definitions which must be identical
+ * across compression, decompression and dictBuilder.
+ * It also contains a few functions useful to at least 2 of them
+ * and which benefit from being inlined */
/*-*************************************
* Dependencies
***************************************/
-#include "compiler.h"
+#include "compiler.h"
#include "cpu.h"
#include "mem.h"
#include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */
#include "error_private.h"
#define ZSTD_STATIC_LINKING_ONLY
#include "../zstd.h"
-#define FSE_STATIC_LINKING_ONLY
-#include "fse.h"
-#define HUF_STATIC_LINKING_ONLY
-#include "huf.h"
-#ifndef XXH_STATIC_LINKING_ONLY
-# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
-#endif
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#ifndef XXH_STATIC_LINKING_ONLY
+# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
+#endif
#include <contrib/libs/xxhash/xxhash.h> /* XXH_reset, update, digest */
#ifndef ZSTD_NO_TRACE
# include "zstd_trace.h"
@@ -40,22 +40,22 @@
# define ZSTD_TRACE 0
#endif
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
/* ---- static assert (debug) --- */
#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)
#define ZSTD_isError ERR_isError /* for inlining */
#define FSE_isError ERR_isError
#define HUF_isError ERR_isError
-
-
-/*-*************************************
+
+
+/*-*************************************
* shared macros
***************************************/
-#undef MIN
-#undef MAX
+#undef MIN
+#undef MAX
#define MIN(a,b) ((a)<(b) ? (a) : (b))
#define MAX(a,b) ((a)>(b) ? (a) : (b))
#define BOUNDED(min,val,max) (MAX(min,MIN(val,max)))
@@ -85,7 +85,7 @@ static UNUSED_ATTR const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
static UNUSED_ATTR const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
#define ZSTD_FRAMEIDSIZE 4 /* magic number size */
-
+
#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
static UNUSED_ATTR const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
@@ -104,15 +104,15 @@ typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingTy
#define Litbits 8
#define MaxLit ((1<<Litbits) - 1)
-#define MaxML 52
-#define MaxLL 35
-#define DefaultMaxOff 28
-#define MaxOff 31
+#define MaxML 52
+#define MaxLL 35
+#define DefaultMaxOff 28
+#define MaxOff 31
#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
#define MLFSELog 9
#define LLFSELog 9
#define OffFSELog 8
-#define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
+#define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
#define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */
/* Each table cannot take more than #symbols * FSELog bits */
@@ -281,7 +281,7 @@ typedef enum {
/*-*******************************************
-* Private declarations
+* Private declarations
*********************************************/
typedef struct seqDef_s {
U32 offBase; /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */
@@ -351,8 +351,8 @@ typedef struct {
unsigned long long decompressedBound;
} ZSTD_frameSizeInfo; /* decompress & legacy */
-const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
-void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
+const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
+void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
/* custom memory allocation functions */
void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem);
@@ -360,10 +360,10 @@ void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem);
void ZSTD_customFree(void* ptr, ZSTD_customMem customMem);
-MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */
+MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */
{
- assert(val != 0);
- {
+ assert(val != 0);
+ {
# if defined(_MSC_VER) /* Visual */
# if STATIC_BMI2 == 1
return _lzcnt_u32(val)^31;
@@ -382,16 +382,16 @@ MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus
# elif defined(__ICCARM__) /* IAR Intrinsic */
return 31 - __CLZ(val);
# else /* Software version */
- static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
- U32 v = val;
- v |= v >> 1;
- v |= v >> 2;
- v |= v >> 4;
- v |= v >> 8;
- v |= v >> 16;
- return DeBruijnClz[(v * 0x07C4ACDDU) >> 27];
+ static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+ U32 v = val;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ return DeBruijnClz[(v * 0x07C4ACDDU) >> 27];
# endif
- }
+ }
}
/**
@@ -452,25 +452,25 @@ MEM_STATIC unsigned ZSTD_countTrailingZeros(size_t val)
}
-/* ZSTD_invalidateRepCodes() :
- * ensures next compression will not use repcodes from previous block.
- * Note : only works with regular variant;
- * do not use with extDict variant ! */
-void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /* zstdmt, adaptive_compression (shouldn't get this definition from here) */
-
-
-typedef struct {
- blockType_e blockType;
- U32 lastBlock;
- U32 origSize;
+/* ZSTD_invalidateRepCodes() :
+ * ensures next compression will not use repcodes from previous block.
+ * Note : only works with regular variant;
+ * do not use with extDict variant ! */
+void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /* zstdmt, adaptive_compression (shouldn't get this definition from here) */
+
+
+typedef struct {
+ blockType_e blockType;
+ U32 lastBlock;
+ U32 origSize;
} blockProperties_t; /* declared here for decompress and fullbench */
-
-/*! ZSTD_getcBlockSize() :
- * Provides the size of compressed block from block header `src` */
-/* Used by: decompress, fullbench (does not get its definition from here) */
-size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
- blockProperties_t* bpPtr);
-
+
+/*! ZSTD_getcBlockSize() :
+ * Provides the size of compressed block from block header `src` */
+/* Used by: decompress, fullbench (does not get its definition from here) */
+size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
+ blockProperties_t* bpPtr);
+
/*! ZSTD_decodeSeqHeaders() :
* decode sequence header from src */
/* Used by: decompress, fullbench (does not get its definition from here) */
@@ -486,8 +486,8 @@ MEM_STATIC int ZSTD_cpuSupportsBmi2(void)
return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid);
}
-#if defined (__cplusplus)
-}
-#endif
-
+#if defined (__cplusplus)
+}
+#endif
+
#endif /* ZSTD_CCOMMON_H_MODULE */
diff --git a/contrib/libs/zstd/lib/compress/fse_compress.c b/contrib/libs/zstd/lib/compress/fse_compress.c
index 5547b4ac09..95dbec2558 100644
--- a/contrib/libs/zstd/lib/compress/fse_compress.c
+++ b/contrib/libs/zstd/lib/compress/fse_compress.c
@@ -31,7 +31,7 @@
/* **************************************************************
* Error Management
****************************************************************/
-#define FSE_isError ERR_isError
+#define FSE_isError ERR_isError
/* **************************************************************
@@ -289,7 +289,7 @@ FSE_writeNCount_generic (void* header, size_t headerBufferSize,
} }
{ int count = normalizedCounter[symbol++];
int const max = (2*threshold-1) - remaining;
- remaining -= count < 0 ? -count : count;
+ remaining -= count < 0 ? -count : count;
count++; /* +1 for extra accuracy */
if (count>=threshold)
count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
@@ -297,8 +297,8 @@ FSE_writeNCount_generic (void* header, size_t headerBufferSize,
bitCount += nbBits;
bitCount -= (count<max);
previousIs0 = (count==1);
- if (remaining<1) return ERROR(GENERIC);
- while (remaining<threshold) { nbBits--; threshold>>=1; }
+ if (remaining<1) return ERROR(GENERIC);
+ while (remaining<threshold) { nbBits--; threshold>>=1; }
}
if (bitCount>16) {
if ((!writeIsSafe) && (out > oend - 2))
@@ -328,7 +328,7 @@ FSE_writeNCount_generic (void* header, size_t headerBufferSize,
size_t FSE_writeNCount (void* buffer, size_t bufferSize,
const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
{
- if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */
+ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */
if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */
if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
@@ -356,21 +356,21 @@ void FSE_freeCTable (FSE_CTable* ct) { ZSTD_free(ct); }
static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
{
U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1;
- U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
- U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
- assert(srcSize > 1); /* Not supported, RLE should be used instead */
- return minBits;
+ U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
+ U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
+ assert(srcSize > 1); /* Not supported, RLE should be used instead */
+ return minBits;
}
unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
{
- U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
+ U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
U32 tableLog = maxTableLog;
- U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
- assert(srcSize > 1); /* Not supported, RLE should be used instead */
+ U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
+ assert(srcSize > 1); /* Not supported, RLE should be used instead */
if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
- if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */
- if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */
+ if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */
+ if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */
if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;
if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;
return tableLog;
@@ -386,7 +386,7 @@ unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxS
static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue, short lowProbCount)
{
- short const NOT_YET_ASSIGNED = -2;
+ short const NOT_YET_ASSIGNED = -2;
U32 s;
U32 distributed = 0;
U32 ToDistribute;
@@ -412,8 +412,8 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count,
total -= count[s];
continue;
}
-
- norm[s]=NOT_YET_ASSIGNED;
+
+ norm[s]=NOT_YET_ASSIGNED;
}
ToDistribute = (1 << tableLog) - distributed;
@@ -424,7 +424,7 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count,
/* risk of rounding to zero */
lowOne = (U32)((total * 3) / (ToDistribute * 2));
for (s=0; s<=maxSymbolValue; s++) {
- if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
+ if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
norm[s] = 1;
distributed++;
total -= count[s];
@@ -439,24 +439,24 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count,
find max, then give all remaining points to max */
U32 maxV = 0, maxC = 0;
for (s=0; s<=maxSymbolValue; s++)
- if (count[s] > maxC) { maxV=s; maxC=count[s]; }
+ if (count[s] > maxC) { maxV=s; maxC=count[s]; }
norm[maxV] += (short)ToDistribute;
return 0;
}
- if (total == 0) {
- /* all of the symbols were low enough for the lowOne or lowThreshold */
- for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1))
- if (norm[s] > 0) { ToDistribute--; norm[s]++; }
- return 0;
- }
-
+ if (total == 0) {
+ /* all of the symbols were low enough for the lowOne or lowThreshold */
+ for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1))
+ if (norm[s] > 0) { ToDistribute--; norm[s]++; }
+ return 0;
+ }
+
{ U64 const vStepLog = 62 - tableLog;
U64 const mid = (1ULL << (vStepLog-1)) - 1;
U64 const rStep = ZSTD_div64((((U64)1<<vStepLog) * ToDistribute) + mid, (U32)total); /* scale on remaining */
U64 tmpTotal = mid;
for (s=0; s<=maxSymbolValue; s++) {
- if (norm[s]==NOT_YET_ASSIGNED) {
+ if (norm[s]==NOT_YET_ASSIGNED) {
U64 const end = tmpTotal + (count[s] * rStep);
U32 const sStart = (U32)(tmpTotal >> vStepLog);
U32 const sEnd = (U32)(end >> vStepLog);
@@ -480,7 +480,7 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */
if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
- { static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
+ { static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
short const lowProbCount = useLowProbCount ? -1 : 1;
U64 const scale = 62 - tableLog;
U64 const step = ZSTD_div64((U64)1<<62, (U32)total); /* <== here, one division ! */
@@ -503,7 +503,7 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
U64 restToBeat = vStep * rtbTable[proba];
proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
}
- if (proba > largestP) { largestP=proba; largest=s; }
+ if (proba > largestP) { largestP=proba; largest=s; }
normalizedCounter[s] = proba;
stillToDistribute -= proba;
} }
diff --git a/contrib/libs/zstd/lib/compress/huf_compress.c b/contrib/libs/zstd/lib/compress/huf_compress.c
index 2b3d6adc2a..0de33e9408 100644
--- a/contrib/libs/zstd/lib/compress/huf_compress.c
+++ b/contrib/libs/zstd/lib/compress/huf_compress.c
@@ -37,7 +37,7 @@
/* **************************************************************
* Error Management
****************************************************************/
-#define HUF_isError ERR_isError
+#define HUF_isError ERR_isError
#define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
@@ -232,7 +232,7 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void
/* check result */
if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
- if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
+ if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
CTable[0] = tableLog;
@@ -266,7 +266,7 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void
{ U32 n; for (n=0; n<nbSymbols; n++) HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); }
}
- *maxSymbolValuePtr = nbSymbols - 1;
+ *maxSymbolValuePtr = nbSymbols - 1;
return readSize;
}
@@ -369,8 +369,8 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
} }
/* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1);
- /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
- while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
+ /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
+ while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
nBitsToDecrease++;
assert(rankLast[nBitsToDecrease] != noSymbol);
/* Increase the number of bits to gain back half the rank cost. */
@@ -710,24 +710,24 @@ size_t HUF_buildCTable_wksp (HUF_CElt* CTable, const unsigned* count, U32 maxSym
size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
{
HUF_CElt const* ct = CTable + 1;
- size_t nbBits = 0;
- int s;
- for (s = 0; s <= (int)maxSymbolValue; ++s) {
+ size_t nbBits = 0;
+ int s;
+ for (s = 0; s <= (int)maxSymbolValue; ++s) {
nbBits += HUF_getNbBits(ct[s]) * count[s];
- }
- return nbBits >> 3;
+ }
+ return nbBits >> 3;
}
int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
HUF_CElt const* ct = CTable + 1;
- int bad = 0;
- int s;
- for (s = 0; s <= (int)maxSymbolValue; ++s) {
+ int bad = 0;
+ int s;
+ for (s = 0; s <= (int)maxSymbolValue; ++s) {
bad |= (count[s] != 0) & (HUF_getNbBits(ct[s]) == 0);
- }
- return !bad;
-}
-
+ }
+ return !bad;
+}
+
size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
/** HUF_CStream_t:
@@ -891,11 +891,11 @@ static size_t HUF_closeCStream(HUF_CStream_t* bitC)
}
}
-FORCE_INLINE_TEMPLATE void
+FORCE_INLINE_TEMPLATE void
HUF_encodeSymbol(HUF_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable, int idx, int fast)
-{
+{
HUF_addBits(bitCPtr, CTable[symbol], idx, fast);
-}
+}
FORCE_INLINE_TEMPLATE void
HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t* bitC,
@@ -913,7 +913,7 @@ HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t* bitC,
HUF_flushBits(bitC, kFastFlush);
}
assert(n % kUnroll == 0);
-
+
/* Join to 2 * kUnroll */
if (n % (2 * kUnroll)) {
int u;
@@ -962,10 +962,10 @@ static size_t HUF_tightCompressBound(size_t srcSize, size_t tableLog)
}
-FORCE_INLINE_TEMPLATE size_t
-HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
- const void* src, size_t srcSize,
- const HUF_CElt* CTable)
+FORCE_INLINE_TEMPLATE size_t
+HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable)
{
U32 const tableLog = (U32)CTable[0];
HUF_CElt const* ct = CTable + 1;
@@ -1027,63 +1027,63 @@ HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
return HUF_closeCStream(&bitC);
}
-#if DYNAMIC_BMI2
+#if DYNAMIC_BMI2
static BMI2_TARGET_ATTRIBUTE size_t
-HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
- const void* src, size_t srcSize,
- const HUF_CElt* CTable)
-{
- return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
-}
-
-static size_t
-HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
- const void* src, size_t srcSize,
- const HUF_CElt* CTable)
-{
- return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
-}
-
-static size_t
-HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
- const void* src, size_t srcSize,
- const HUF_CElt* CTable, const int bmi2)
-{
- if (bmi2) {
- return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);
- }
- return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);
-}
-
-#else
-
-static size_t
-HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
- const void* src, size_t srcSize,
- const HUF_CElt* CTable, const int bmi2)
-{
- (void)bmi2;
- return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
-}
-
-#endif
-
-size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
+HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable)
{
+ return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
+}
+
+static size_t
+HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable)
+{
+ return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
+}
+
+static size_t
+HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable, const int bmi2)
+{
+ if (bmi2) {
+ return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);
+ }
+ return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);
+}
+
+#else
+
+static size_t
+HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable, const int bmi2)
+{
+ (void)bmi2;
+ return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
+}
+
+#endif
+
+size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
+{
return HUF_compress1X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
-}
-
+}
+
size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2)
{
return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2);
}
-
-static size_t
-HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
- const void* src, size_t srcSize,
- const HUF_CElt* CTable, int bmi2)
-{
+
+static size_t
+HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable, int bmi2)
+{
size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */
const BYTE* ip = (const BYTE*) src;
const BYTE* const iend = ip + srcSize;
@@ -1129,10 +1129,10 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
return (size_t)(op-ostart);
}
-size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
-{
+size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
+{
return HUF_compress4X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
-}
+}
size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2)
{
@@ -1140,25 +1140,25 @@ size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* sr
}
typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
-
-static size_t HUF_compressCTable_internal(
- BYTE* const ostart, BYTE* op, BYTE* const oend,
- const void* src, size_t srcSize,
+
+static size_t HUF_compressCTable_internal(
+ BYTE* const ostart, BYTE* op, BYTE* const oend,
+ const void* src, size_t srcSize,
HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2)
-{
+{
size_t const cSize = (nbStreams==HUF_singleStream) ?
HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2) :
HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2);
- if (HUF_isError(cSize)) { return cSize; }
- if (cSize==0) { return 0; } /* uncompressible */
- op += cSize;
- /* check compressibility */
+ if (HUF_isError(cSize)) { return cSize; }
+ if (cSize==0) { return 0; } /* uncompressible */
+ op += cSize;
+ /* check compressibility */
assert(op >= ostart);
- if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
+ if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
return (size_t)(op-ostart);
-}
-
-typedef struct {
+}
+
+typedef struct {
unsigned count[HUF_SYMBOLVALUE_MAX + 1];
HUF_CElt CTable[HUF_CTABLE_SIZE_ST(HUF_SYMBOLVALUE_MAX)];
union {
@@ -1166,12 +1166,12 @@ typedef struct {
HUF_WriteCTableWksp writeCTable_wksp;
U32 hist_wksp[HIST_WKSP_SIZE_U32];
} wksps;
-} HUF_compress_tables_t;
-
+} HUF_compress_tables_t;
+
#define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096
#define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */
-/* HUF_compress_internal() :
+/* HUF_compress_internal() :
* `workSpace_align4` must be aligned on 4-bytes boundaries,
* and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */
static size_t
@@ -1192,21 +1192,21 @@ HUF_compress_internal (void* dst, size_t dstSize,
/* checks & inits */
if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall);
- if (!srcSize) return 0; /* Uncompressed */
- if (!dstSize) return 0; /* cannot fit anything within dst budget */
+ if (!srcSize) return 0; /* Uncompressed */
+ if (!dstSize) return 0; /* cannot fit anything within dst budget */
if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
- if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
+ if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
- /* Heuristic : If old table is valid, use it for small inputs */
- if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
- return HUF_compressCTable_internal(ostart, op, oend,
- src, srcSize,
+ /* Heuristic : If old table is valid, use it for small inputs */
+ if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
+ return HUF_compressCTable_internal(ostart, op, oend,
+ src, srcSize,
nbStreams, oldHufTable, bmi2);
- }
-
+ }
+
/* If uncompressible data is suspected, do a smaller sampling first */
DEBUG_STATIC_ASSERT(SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2);
if (suspectUncompressible && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) {
@@ -1228,19 +1228,19 @@ HUF_compress_internal (void* dst, size_t dstSize,
if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */
}
- /* Check validity of previous table */
- if ( repeat
- && *repeat == HUF_repeat_check
- && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {
- *repeat = HUF_repeat_none;
- }
- /* Heuristic : use existing table for small inputs */
- if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
- return HUF_compressCTable_internal(ostart, op, oend,
- src, srcSize,
+ /* Check validity of previous table */
+ if ( repeat
+ && *repeat == HUF_repeat_check
+ && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {
+ *repeat = HUF_repeat_none;
+ }
+ /* Heuristic : use existing table for small inputs */
+ if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
+ return HUF_compressCTable_internal(ostart, op, oend,
+ src, srcSize,
nbStreams, oldHufTable, bmi2);
- }
-
+ }
+
/* Build Huffman Tree */
huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
{ size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
@@ -1259,25 +1259,25 @@ HUF_compress_internal (void* dst, size_t dstSize,
/* Write table description header */
{ CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog,
&table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) );
- /* Check if using previous huffman table is beneficial */
- if (repeat && *repeat != HUF_repeat_none) {
- size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
- size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);
- if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
- return HUF_compressCTable_internal(ostart, op, oend,
- src, srcSize,
+ /* Check if using previous huffman table is beneficial */
+ if (repeat && *repeat != HUF_repeat_none) {
+ size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
+ size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);
+ if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
+ return HUF_compressCTable_internal(ostart, op, oend,
+ src, srcSize,
nbStreams, oldHufTable, bmi2);
- } }
-
- /* Use the new huffman table */
- if (hSize + 12ul >= srcSize) { return 0; }
+ } }
+
+ /* Use the new huffman table */
+ if (hSize + 12ul >= srcSize) { return 0; }
op += hSize;
- if (repeat) { *repeat = HUF_repeat_none; }
- if (oldHufTable)
+ if (repeat) { *repeat = HUF_repeat_none; }
+ if (oldHufTable)
ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */
}
- return HUF_compressCTable_internal(ostart, op, oend,
- src, srcSize,
+ return HUF_compressCTable_internal(ostart, op, oend,
+ src, srcSize,
nbStreams, table->CTable, bmi2);
}
@@ -1287,55 +1287,55 @@ size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
unsigned maxSymbolValue, unsigned huffLog,
void* workSpace, size_t wkspSize)
{
- return HUF_compress_internal(dst, dstSize, src, srcSize,
+ return HUF_compress_internal(dst, dstSize, src, srcSize,
maxSymbolValue, huffLog, HUF_singleStream,
- workSpace, wkspSize,
+ workSpace, wkspSize,
NULL, NULL, 0, 0 /*bmi2*/, 0);
}
-size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
- const void* src, size_t srcSize,
- unsigned maxSymbolValue, unsigned huffLog,
- void* workSpace, size_t wkspSize,
+size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ void* workSpace, size_t wkspSize,
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat,
int bmi2, unsigned suspectUncompressible)
-{
- return HUF_compress_internal(dst, dstSize, src, srcSize,
+{
+ return HUF_compress_internal(dst, dstSize, src, srcSize,
maxSymbolValue, huffLog, HUF_singleStream,
- workSpace, wkspSize, hufTable,
+ workSpace, wkspSize, hufTable,
repeat, preferRepeat, bmi2, suspectUncompressible);
-}
-
-/* HUF_compress4X_repeat():
- * compress input using 4 streams.
- * provide workspace to generate compression tables */
+}
+
+/* HUF_compress4X_repeat():
+ * compress input using 4 streams.
+ * provide workspace to generate compression tables */
size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
void* workSpace, size_t wkspSize)
{
- return HUF_compress_internal(dst, dstSize, src, srcSize,
+ return HUF_compress_internal(dst, dstSize, src, srcSize,
maxSymbolValue, huffLog, HUF_fourStreams,
- workSpace, wkspSize,
+ workSpace, wkspSize,
NULL, NULL, 0, 0 /*bmi2*/, 0);
}
-/* HUF_compress4X_repeat():
- * compress input using 4 streams.
+/* HUF_compress4X_repeat():
+ * compress input using 4 streams.
* consider skipping quickly
- * re-use an existing huffman compression table */
-size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
- const void* src, size_t srcSize,
- unsigned maxSymbolValue, unsigned huffLog,
- void* workSpace, size_t wkspSize,
+ * re-use an existing huffman compression table */
+size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ void* workSpace, size_t wkspSize,
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible)
-{
- return HUF_compress_internal(dst, dstSize, src, srcSize,
+{
+ return HUF_compress_internal(dst, dstSize, src, srcSize,
maxSymbolValue, huffLog, HUF_fourStreams,
- workSpace, wkspSize,
+ workSpace, wkspSize,
hufTable, repeat, preferRepeat, bmi2, suspectUncompressible);
-}
-
+}
+
#ifndef ZSTD_NO_UNUSED_FUNCTIONS
/** HUF_buildCTable() :
* @return : maxNbBits
@@ -1365,6 +1365,6 @@ size_t HUF_compress2 (void* dst, size_t dstSize,
size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
- return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT);
+ return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT);
}
#endif
diff --git a/contrib/libs/zstd/lib/compress/zstd_compress.c b/contrib/libs/zstd/lib/compress/zstd_compress.c
index f06456af92..a03dfab6bd 100644
--- a/contrib/libs/zstd/lib/compress/zstd_compress.c
+++ b/contrib/libs/zstd/lib/compress/zstd_compress.c
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
/*-*************************************
@@ -18,14 +18,14 @@
#include "../common/fse.h"
#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
-#include "zstd_compress_internal.h"
+#include "zstd_compress_internal.h"
#include "zstd_compress_sequences.h"
#include "zstd_compress_literals.h"
-#include "zstd_fast.h"
-#include "zstd_double_fast.h"
-#include "zstd_lazy.h"
-#include "zstd_opt.h"
-#include "zstd_ldm.h"
+#include "zstd_fast.h"
+#include "zstd_double_fast.h"
+#include "zstd_lazy.h"
+#include "zstd_opt.h"
+#include "zstd_ldm.h"
#include "zstd_compress_superblock.h"
/* ***************************************************************
@@ -64,34 +64,34 @@
* the overhead of headers can make the compressed data to be larger than the
* return value of ZSTD_compressBound().
*/
-size_t ZSTD_compressBound(size_t srcSize) {
- return ZSTD_COMPRESSBOUND(srcSize);
+size_t ZSTD_compressBound(size_t srcSize) {
+ return ZSTD_COMPRESSBOUND(srcSize);
}
/*-*************************************
* Context memory management
***************************************/
-struct ZSTD_CDict_s {
- const void* dictContent;
- size_t dictContentSize;
+struct ZSTD_CDict_s {
+ const void* dictContent;
+ size_t dictContentSize;
ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */
U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
ZSTD_cwksp workspace;
- ZSTD_matchState_t matchState;
- ZSTD_compressedBlockState_t cBlockState;
+ ZSTD_matchState_t matchState;
+ ZSTD_compressedBlockState_t cBlockState;
ZSTD_customMem customMem;
- U32 dictID;
+ U32 dictID;
int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
ZSTD_paramSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use
* row-based matchfinder. Unless the cdict is reloaded, we will use
* the same greedy/lazy matchfinder at compression time.
*/
-}; /* typedef'd to ZSTD_CDict within "zstd.h" */
+}; /* typedef'd to ZSTD_CDict within "zstd.h" */
ZSTD_CCtx* ZSTD_createCCtx(void)
{
- return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
+ return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
}
static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
@@ -108,22 +108,22 @@ static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
{
- ZSTD_STATIC_ASSERT(zcss_init==0);
- ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
+ ZSTD_STATIC_ASSERT(zcss_init==0);
+ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
{ ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem);
- if (!cctx) return NULL;
+ if (!cctx) return NULL;
ZSTD_initCCtx(cctx, customMem);
- return cctx;
- }
-}
+ return cctx;
+ }
+}
ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)
-{
+{
ZSTD_cwksp ws;
ZSTD_CCtx* cctx;
- if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */
- if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */
+ if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */
+ if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */
ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx));
@@ -131,14 +131,14 @@ ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)
ZSTD_memset(cctx, 0, sizeof(ZSTD_CCtx));
ZSTD_cwksp_move(&cctx->workspace, &ws);
- cctx->staticSize = workspaceSize;
+ cctx->staticSize = workspaceSize;
- /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
+ /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE);
- cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
+ cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
return cctx;
}
@@ -166,9 +166,9 @@ static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
assert(cctx != NULL);
assert(cctx->staticSize == 0);
ZSTD_clearAllDicts(cctx);
-#ifdef ZSTD_MULTITHREAD
- ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL;
-#endif
+#ifdef ZSTD_MULTITHREAD
+ ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL;
+#endif
ZSTD_cwksp_free(&cctx->workspace, cctx->customMem);
}
@@ -187,18 +187,18 @@ size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
return 0;
}
-
-static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
-{
-#ifdef ZSTD_MULTITHREAD
- return ZSTDMT_sizeof_CCtx(cctx->mtctx);
-#else
+
+static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
+{
+#ifdef ZSTD_MULTITHREAD
+ return ZSTDMT_sizeof_CCtx(cctx->mtctx);
+#else
(void)cctx;
- return 0;
-#endif
-}
-
-
+ return 0;
+#endif
+}
+
+
size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
{
if (cctx==NULL) return 0; /* support sizeof on NULL */
@@ -206,17 +206,17 @@ size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx))
+ ZSTD_cwksp_sizeof(&cctx->workspace)
+ ZSTD_sizeof_localDict(cctx->localDict)
- + ZSTD_sizeof_mtctx(cctx);
+ + ZSTD_sizeof_mtctx(cctx);
}
-size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
+size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
{
- return ZSTD_sizeof_CCtx(zcs); /* same object */
+ return ZSTD_sizeof_CCtx(zcs); /* same object */
}
-/* private API call, for dictBuilder only */
-const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
-
+/* private API call, for dictBuilder only */
+const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
+
/* Returns true if the strategy supports using a row based matchfinder */
static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) {
return (strategy >= ZSTD_greedy && strategy <= ZSTD_lazy2);
@@ -277,13 +277,13 @@ static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode,
return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27) ? ZSTD_ps_enable : ZSTD_ps_disable;
}
-static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
- ZSTD_compressionParameters cParams)
-{
- ZSTD_CCtx_params cctxParams;
+static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
+ ZSTD_compressionParameters cParams)
+{
+ ZSTD_CCtx_params cctxParams;
/* should not matter, as all cParams are presumed properly defined */
ZSTD_CCtxParams_init(&cctxParams, ZSTD_CLEVEL_DEFAULT);
- cctxParams.cParams = cParams;
+ cctxParams.cParams = cParams;
/* Adjust advanced params according to cParams */
cctxParams.ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams.ldmParams.enableLdm, &cParams);
@@ -294,48 +294,48 @@ static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
}
cctxParams.useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.useBlockSplitter, &cParams);
cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams);
- assert(!ZSTD_checkCParams(cParams));
- return cctxParams;
-}
-
-static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
- ZSTD_customMem customMem)
-{
- ZSTD_CCtx_params* params;
+ assert(!ZSTD_checkCParams(cParams));
+ return cctxParams;
+}
+
+static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
+ ZSTD_customMem customMem)
+{
+ ZSTD_CCtx_params* params;
if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
params = (ZSTD_CCtx_params*)ZSTD_customCalloc(
- sizeof(ZSTD_CCtx_params), customMem);
- if (!params) { return NULL; }
+ sizeof(ZSTD_CCtx_params), customMem);
+ if (!params) { return NULL; }
ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
- params->customMem = customMem;
- return params;
-}
-
-ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
-{
- return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
-}
-
-size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
-{
- if (params == NULL) { return 0; }
+ params->customMem = customMem;
+ return params;
+}
+
+ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
+{
+ return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
+}
+
+size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
+{
+ if (params == NULL) { return 0; }
ZSTD_customFree(params, params->customMem);
- return 0;
-}
-
-size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
-{
- return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
-}
-
-size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
+ return 0;
+}
+
+size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
+{
+ return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
+}
+
+size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
- cctxParams->compressionLevel = compressionLevel;
- cctxParams->fParams.contentSizeFlag = 1;
- return 0;
-}
-
+ cctxParams->compressionLevel = compressionLevel;
+ cctxParams->fParams.contentSizeFlag = 1;
+ return 0;
+}
+
#define ZSTD_NO_CLEVEL 0
/**
@@ -359,21 +359,21 @@ static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_par
cctxParams->useRowMatchFinder, cctxParams->useBlockSplitter, cctxParams->ldmParams.enableLdm);
}
-size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
-{
+size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
+{
RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
ZSTD_CCtxParams_init_internal(cctxParams, &params, ZSTD_NO_CLEVEL);
- return 0;
-}
-
+ return 0;
+}
+
/**
* Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone.
* @param param Validated zstd parameters.
*/
static void ZSTD_CCtxParams_setZstdParams(
ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
-{
+{
assert(!ZSTD_checkCParams(params->cParams));
cctxParams->cParams = params->cParams;
cctxParams->fParams = params->fParams;
@@ -381,19 +381,19 @@ static void ZSTD_CCtxParams_setZstdParams(
* But, set it for tracing anyway.
*/
cctxParams->compressionLevel = ZSTD_NO_CLEVEL;
-}
-
+}
+
ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
{
ZSTD_bounds bounds = { 0, 0, 0 };
-
+
switch(param)
{
case ZSTD_c_compressionLevel:
bounds.lowerBound = ZSTD_minCLevel();
bounds.upperBound = ZSTD_maxCLevel();
return bounds;
-
+
case ZSTD_c_windowLog:
bounds.lowerBound = ZSTD_WINDOWLOG_MIN;
bounds.upperBound = ZSTD_WINDOWLOG_MAX;
@@ -596,10 +596,10 @@ static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
}
-static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
-{
- switch(param)
- {
+static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
+{
+ switch(param)
+ {
case ZSTD_c_compressionLevel:
case ZSTD_c_hashLog:
case ZSTD_c_chainLog:
@@ -607,8 +607,8 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
case ZSTD_c_minMatch:
case ZSTD_c_targetLength:
case ZSTD_c_strategy:
- return 1;
-
+ return 1;
+
case ZSTD_c_format:
case ZSTD_c_windowLog:
case ZSTD_c_contentSizeFlag:
@@ -636,28 +636,28 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
case ZSTD_c_useBlockSplitter:
case ZSTD_c_useRowMatchFinder:
case ZSTD_c_deterministicRefPrefix:
- default:
- return 0;
- }
-}
-
+ default:
+ return 0;
+ }
+}
+
size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
-{
+{
DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value);
- if (cctx->streamStage != zcss_init) {
- if (ZSTD_isUpdateAuthorized(param)) {
- cctx->cParamsChanged = 1;
- } else {
+ if (cctx->streamStage != zcss_init) {
+ if (ZSTD_isUpdateAuthorized(param)) {
+ cctx->cParamsChanged = 1;
+ } else {
RETURN_ERROR(stage_wrong, "can only set params in ctx init stage");
- } }
-
- switch(param)
- {
+ } }
+
+ switch(param)
+ {
case ZSTD_c_nbWorkers:
RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,
"MT not compatible with static alloc");
break;
-
+
case ZSTD_c_compressionLevel:
case ZSTD_c_windowLog:
case ZSTD_c_hashLog:
@@ -692,23 +692,23 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
case ZSTD_c_useRowMatchFinder:
case ZSTD_c_deterministicRefPrefix:
break;
-
+
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
- }
+ }
return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value);
-}
-
+}
+
size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
ZSTD_cParameter param, int value)
-{
+{
DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value);
- switch(param)
- {
+ switch(param)
+ {
case ZSTD_c_format :
BOUNDCHECK(ZSTD_c_format, value);
- CCtxParams->format = (ZSTD_format_e)value;
- return (size_t)CCtxParams->format;
-
+ CCtxParams->format = (ZSTD_format_e)value;
+ return (size_t)CCtxParams->format;
+
case ZSTD_c_compressionLevel : {
FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
if (value == 0)
@@ -716,70 +716,70 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
else
CCtxParams->compressionLevel = value;
if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel;
- return 0; /* return type (size_t) cannot represent negative values */
- }
-
+ return 0; /* return type (size_t) cannot represent negative values */
+ }
+
case ZSTD_c_windowLog :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_windowLog, value);
CCtxParams->cParams.windowLog = (U32)value;
- return CCtxParams->cParams.windowLog;
-
+ return CCtxParams->cParams.windowLog;
+
case ZSTD_c_hashLog :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_hashLog, value);
CCtxParams->cParams.hashLog = (U32)value;
- return CCtxParams->cParams.hashLog;
-
+ return CCtxParams->cParams.hashLog;
+
case ZSTD_c_chainLog :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_chainLog, value);
CCtxParams->cParams.chainLog = (U32)value;
- return CCtxParams->cParams.chainLog;
-
+ return CCtxParams->cParams.chainLog;
+
case ZSTD_c_searchLog :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_searchLog, value);
CCtxParams->cParams.searchLog = (U32)value;
return (size_t)value;
-
+
case ZSTD_c_minMatch :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_minMatch, value);
CCtxParams->cParams.minMatch = value;
return CCtxParams->cParams.minMatch;
-
+
case ZSTD_c_targetLength :
BOUNDCHECK(ZSTD_c_targetLength, value);
- CCtxParams->cParams.targetLength = value;
- return CCtxParams->cParams.targetLength;
-
+ CCtxParams->cParams.targetLength = value;
+ return CCtxParams->cParams.targetLength;
+
case ZSTD_c_strategy :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_strategy, value);
- CCtxParams->cParams.strategy = (ZSTD_strategy)value;
- return (size_t)CCtxParams->cParams.strategy;
-
+ CCtxParams->cParams.strategy = (ZSTD_strategy)value;
+ return (size_t)CCtxParams->cParams.strategy;
+
case ZSTD_c_contentSizeFlag :
- /* Content size written in frame header _when known_ (default:1) */
+ /* Content size written in frame header _when known_ (default:1) */
DEBUGLOG(4, "set content size flag = %u", (value!=0));
CCtxParams->fParams.contentSizeFlag = value != 0;
- return CCtxParams->fParams.contentSizeFlag;
-
+ return CCtxParams->fParams.contentSizeFlag;
+
case ZSTD_c_checksumFlag :
- /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
+ /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
CCtxParams->fParams.checksumFlag = value != 0;
- return CCtxParams->fParams.checksumFlag;
-
+ return CCtxParams->fParams.checksumFlag;
+
case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
- CCtxParams->fParams.noDictIDFlag = !value;
- return !CCtxParams->fParams.noDictIDFlag;
-
+ CCtxParams->fParams.noDictIDFlag = !value;
+ return !CCtxParams->fParams.noDictIDFlag;
+
case ZSTD_c_forceMaxWindow :
CCtxParams->forceWindow = (value != 0);
- return CCtxParams->forceWindow;
-
+ return CCtxParams->forceWindow;
+
case ZSTD_c_forceAttachDict : {
const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;
BOUNDCHECK(ZSTD_c_forceAttachDict, pref);
@@ -795,20 +795,20 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
}
case ZSTD_c_nbWorkers :
-#ifndef ZSTD_MULTITHREAD
+#ifndef ZSTD_MULTITHREAD
RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
- return 0;
-#else
+ return 0;
+#else
FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
CCtxParams->nbWorkers = value;
return CCtxParams->nbWorkers;
-#endif
-
+#endif
+
case ZSTD_c_jobSize :
-#ifndef ZSTD_MULTITHREAD
+#ifndef ZSTD_MULTITHREAD
RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
return 0;
-#else
+#else
/* Adjust to the minimum non-default value. */
if (value != 0 && value < ZSTDMT_JOBSIZE_MIN)
value = ZSTDMT_JOBSIZE_MIN;
@@ -816,18 +816,18 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
assert(value >= 0);
CCtxParams->jobSize = value;
return CCtxParams->jobSize;
-#endif
-
+#endif
+
case ZSTD_c_overlapLog :
-#ifndef ZSTD_MULTITHREAD
+#ifndef ZSTD_MULTITHREAD
RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
return 0;
-#else
+#else
FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), "");
CCtxParams->overlapLog = value;
return CCtxParams->overlapLog;
-#endif
-
+#endif
+
case ZSTD_c_rsyncable :
#ifndef ZSTD_MULTITHREAD
RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
@@ -844,32 +844,32 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
case ZSTD_c_enableLongDistanceMatching :
CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value;
- return CCtxParams->ldmParams.enableLdm;
-
+ return CCtxParams->ldmParams.enableLdm;
+
case ZSTD_c_ldmHashLog :
if (value!=0) /* 0 ==> auto */
BOUNDCHECK(ZSTD_c_ldmHashLog, value);
- CCtxParams->ldmParams.hashLog = value;
- return CCtxParams->ldmParams.hashLog;
-
+ CCtxParams->ldmParams.hashLog = value;
+ return CCtxParams->ldmParams.hashLog;
+
case ZSTD_c_ldmMinMatch :
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_ldmMinMatch, value);
- CCtxParams->ldmParams.minMatchLength = value;
- return CCtxParams->ldmParams.minMatchLength;
-
+ CCtxParams->ldmParams.minMatchLength = value;
+ return CCtxParams->ldmParams.minMatchLength;
+
case ZSTD_c_ldmBucketSizeLog :
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);
- CCtxParams->ldmParams.bucketSizeLog = value;
- return CCtxParams->ldmParams.bucketSizeLog;
-
+ CCtxParams->ldmParams.bucketSizeLog = value;
+ return CCtxParams->ldmParams.bucketSizeLog;
+
case ZSTD_c_ldmHashRateLog :
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_ldmHashRateLog, value);
CCtxParams->ldmParams.hashRateLog = value;
return CCtxParams->ldmParams.hashRateLog;
-
+
case ZSTD_c_targetCBlockSize :
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_targetCBlockSize, value);
@@ -918,9 +918,9 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
return CCtxParams->deterministicRefPrefix;
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
- }
-}
-
+ }
+}
+
size_t ZSTD_CCtx_getParameter(ZSTD_CCtx const* cctx, ZSTD_cParameter param, int* value)
{
return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);
@@ -1054,36 +1054,36 @@ size_t ZSTD_CCtxParams_getParameter(
return 0;
}
-/** ZSTD_CCtx_setParametersUsingCCtxParams() :
- * just applies `params` into `cctx`
- * no action is performed, parameters are merely stored.
- * If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.
- * This is possible even if a compression is ongoing.
- * In which case, new parameters will be applied on the fly, starting with next compression job.
- */
-size_t ZSTD_CCtx_setParametersUsingCCtxParams(
- ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
-{
+/** ZSTD_CCtx_setParametersUsingCCtxParams() :
+ * just applies `params` into `cctx`
+ * no action is performed, parameters are merely stored.
+ * If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.
+ * This is possible even if a compression is ongoing.
+ * In which case, new parameters will be applied on the fly, starting with next compression job.
+ */
+size_t ZSTD_CCtx_setParametersUsingCCtxParams(
+ ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
+{
DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"The context is in the wrong stage!");
RETURN_ERROR_IF(cctx->cdict, stage_wrong,
"Can't override parameters with cdict attached (some must "
"be inherited from the cdict).");
-
- cctx->requestedParams = *params;
- return 0;
-}
-
+
+ cctx->requestedParams = *params;
+ return 0;
+}
+
size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
-{
- DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
+{
+ DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"Can't set pledgedSrcSize when not in init stage.");
- cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
- return 0;
-}
-
+ cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
+ return 0;
+}
+
static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(
int const compressionLevel,
size_t const dictSize);
@@ -1128,19 +1128,19 @@ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
return 0;
}
-size_t ZSTD_CCtx_loadDictionary_advanced(
- ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
- ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
-{
+size_t ZSTD_CCtx_loadDictionary_advanced(
+ ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
+{
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"Can't load a dictionary when ctx is not in init stage.");
- DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
+ DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
ZSTD_clearAllDicts(cctx); /* in case one already exists */
if (dict == NULL || dictSize == 0) /* no dictionary mode */
return 0;
if (dictLoadMethod == ZSTD_dlm_byRef) {
cctx->localDict.dict = dict;
- } else {
+ } else {
void* dictBuffer;
RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
"no malloc for static CCtx");
@@ -1149,36 +1149,36 @@ size_t ZSTD_CCtx_loadDictionary_advanced(
ZSTD_memcpy(dictBuffer, dict, dictSize);
cctx->localDict.dictBuffer = dictBuffer;
cctx->localDict.dict = dictBuffer;
- }
+ }
cctx->localDict.dictSize = dictSize;
cctx->localDict.dictContentType = dictContentType;
- return 0;
-}
-
+ return 0;
+}
+
size_t ZSTD_CCtx_loadDictionary_byReference(
- ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
-{
- return ZSTD_CCtx_loadDictionary_advanced(
- cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
-}
-
+ ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_CCtx_loadDictionary_advanced(
+ cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
+}
+
size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
-{
- return ZSTD_CCtx_loadDictionary_advanced(
- cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
-}
-
-
-size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
-{
+{
+ return ZSTD_CCtx_loadDictionary_advanced(
+ cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
+}
+
+
+size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
+{
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"Can't ref a dict when ctx not in init stage.");
/* Free the existing local cdict (if any) to save memory. */
ZSTD_clearAllDicts(cctx);
- cctx->cdict = cdict;
- return 0;
-}
-
+ cctx->cdict = cdict;
+ return 0;
+}
+
size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool)
{
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
@@ -1187,14 +1187,14 @@ size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool)
return 0;
}
-size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
-{
- return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
-}
-
-size_t ZSTD_CCtx_refPrefix_advanced(
- ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
-{
+size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
+{
+ return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
+}
+
+size_t ZSTD_CCtx_refPrefix_advanced(
+ ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
+{
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"Can't ref a prefix when ctx not in init stage.");
ZSTD_clearAllDicts(cctx);
@@ -1203,13 +1203,13 @@ size_t ZSTD_CCtx_refPrefix_advanced(
cctx->prefixDict.dictSize = prefixSize;
cctx->prefixDict.dictContentType = dictContentType;
}
- return 0;
-}
-
-/*! ZSTD_CCtx_reset() :
- * Also dumps dictionary */
+ return 0;
+}
+
+/*! ZSTD_CCtx_reset() :
+ * Also dumps dictionary */
size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
-{
+{
if ( (reset == ZSTD_reset_session_only)
|| (reset == ZSTD_reset_session_and_parameters) ) {
cctx->streamStage = zcss_init;
@@ -1223,11 +1223,11 @@ size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
return ZSTD_CCtxParams_reset(&cctx->requestedParams);
}
return 0;
-}
-
+}
+
-/** ZSTD_checkCParams() :
- control CParam values remain within authorized range.
+/** ZSTD_checkCParams() :
+ control CParam values remain within authorized range.
@return : 0, or an error code if one value is beyond authorized range */
size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
{
@@ -1241,17 +1241,17 @@ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
return 0;
}
-/** ZSTD_clampCParams() :
- * make CParam values within valid range.
- * @return : valid CParams */
+/** ZSTD_clampCParams() :
+ * make CParam values within valid range.
+ * @return : valid CParams */
static ZSTD_compressionParameters
ZSTD_clampCParams(ZSTD_compressionParameters cParams)
-{
+{
# define CLAMP_TYPE(cParam, val, type) { \
ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \
if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound; \
else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
- }
+ }
# define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)
CLAMP(ZSTD_c_windowLog, cParams.windowLog);
CLAMP(ZSTD_c_chainLog, cParams.chainLog);
@@ -1260,8 +1260,8 @@ ZSTD_clampCParams(ZSTD_compressionParameters cParams)
CLAMP(ZSTD_c_minMatch, cParams.minMatch);
CLAMP(ZSTD_c_targetLength,cParams.targetLength);
CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy);
- return cParams;
-}
+ return cParams;
+}
/** ZSTD_cycleLog() :
* condition for correct operation : hashLog > 1 */
@@ -1305,7 +1305,7 @@ static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize)
}
}
-/** ZSTD_adjustCParams_internal() :
+/** ZSTD_adjustCParams_internal() :
* optimize `cPar` for a specified input (`srcSize` and `dictSize`).
* mostly downsize to reduce memory consumption and initialization latency.
* `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
@@ -1320,7 +1320,7 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
{
const U64 minSrcSize = 513; /* (1<<9) + 1 */
const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
- assert(ZSTD_checkCParams(cPar)==0);
+ assert(ZSTD_checkCParams(cPar)==0);
switch (mode) {
case ZSTD_cpm_unknown:
@@ -1349,15 +1349,15 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
break;
}
- /* resize windowLog if input is small enough, to use less memory */
- if ( (srcSize < maxWindowResize)
- && (dictSize < maxWindowResize) ) {
- U32 const tSize = (U32)(srcSize + dictSize);
- static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
- U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
- ZSTD_highbit32(tSize-1) + 1;
- if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
- }
+ /* resize windowLog if input is small enough, to use less memory */
+ if ( (srcSize < maxWindowResize)
+ && (dictSize < maxWindowResize) ) {
+ U32 const tSize = (U32)(srcSize + dictSize);
+ static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
+ U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
+ ZSTD_highbit32(tSize-1) + 1;
+ if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
+ }
if (srcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
U32 const dictAndWindowLog = ZSTD_dictAndWindowLog(cPar.windowLog, (U64)srcSize, (U64)dictSize);
U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
@@ -1366,7 +1366,7 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
cPar.chainLog -= (cycleLog - dictAndWindowLog);
}
- if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
+ if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */
return cPar;
@@ -1376,12 +1376,12 @@ ZSTD_compressionParameters
ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
unsigned long long srcSize,
size_t dictSize)
-{
+{
cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */
if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN;
return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown);
-}
-
+}
+
static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
@@ -1400,7 +1400,7 @@ static void ZSTD_overrideCParams(
ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
-{
+{
ZSTD_compressionParameters cParams;
if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {
srcSizeHint = CCtxParams->srcSizeHint;
@@ -1423,7 +1423,7 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder, enableDedicatedDictSearch && !forCCtx)
? ((size_t)1 << cParams->chainLog)
: 0;
- size_t const hSize = ((size_t)1) << cParams->hashLog;
+ size_t const hSize = ((size_t)1) << cParams->hashLog;
U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
/* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't
@@ -1442,18 +1442,18 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
? ZSTD_cwksp_aligned_alloc_size(hSize*sizeof(U16))
: 0;
size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
- ? optPotentialSpace
- : 0;
+ ? optPotentialSpace
+ : 0;
size_t const slackSpace = ZSTD_cwksp_slack_space_required();
/* tables are guaranteed to be sized in multiples of 64 bytes (or 16 uint32_t) */
ZSTD_STATIC_ASSERT(ZSTD_HASHLOG_MIN >= 4 && ZSTD_WINDOWLOG_MIN >= 4 && ZSTD_CHAINLOG_MIN >= 4);
assert(useRowMatchFinder != ZSTD_ps_auto);
- DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
- (U32)chainSize, (U32)hSize, (U32)h3Size);
+ DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
+ (U32)chainSize, (U32)hSize, (U32)h3Size);
return tableSpace + optSpace + slackSpace + lazyAdditionalSpace;
-}
+}
static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
const ZSTD_compressionParameters* cParams,
@@ -1463,7 +1463,7 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
const size_t buffInSize,
const size_t buffOutSize,
const U64 pledgedSrcSize)
-{
+{
size_t const windowSize = (size_t) BOUNDED(1ULL, 1ULL << cParams->windowLog, pledgedSrcSize);
size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
U32 const divider = (cParams->minMatch==3) ? 3 : 4;
@@ -1479,11 +1479,11 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize);
size_t const ldmSeqSpace = ldmParams->enableLdm == ZSTD_ps_enable ?
ZSTD_cwksp_aligned_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;
-
+
size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize)
+ ZSTD_cwksp_alloc_size(buffOutSize);
-
+
size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0;
size_t const neededSpace =
@@ -1515,8 +1515,8 @@ size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
&cParams, &params->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN);
}
-size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
-{
+size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
+{
ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams);
if (ZSTD_rowMatchFinderSupported(cParams.strategy)) {
/* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */
@@ -1530,9 +1530,9 @@ size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
} else {
return ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
}
-}
+}
-static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
+static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
{
int tier = 0;
size_t largestSize = 0;
@@ -1545,20 +1545,20 @@ static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
return largestSize;
}
-size_t ZSTD_estimateCCtxSize(int compressionLevel)
-{
- int level;
- size_t memBudget = 0;
+size_t ZSTD_estimateCCtxSize(int compressionLevel)
+{
+ int level;
+ size_t memBudget = 0;
for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
/* Ensure monotonically increasing memory usage as compression level increases */
- size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
- if (newMB > memBudget) memBudget = newMB;
- }
- return memBudget;
-}
-
-size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
-{
+ size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
+ if (newMB > memBudget) memBudget = newMB;
+ }
+ return memBudget;
+}
+
+size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
+{
RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
{ ZSTD_compressionParameters const cParams =
ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
@@ -1570,15 +1570,15 @@ size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
? ZSTD_compressBound(blockSize) + 1
: 0;
ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, &params->cParams);
-
+
return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
&cParams, &params->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize,
ZSTD_CONTENTSIZE_UNKNOWN);
- }
-}
-
-size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
-{
+ }
+}
+
+size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
+{
ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams);
if (ZSTD_rowMatchFinderSupported(cParams.strategy)) {
/* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */
@@ -1592,50 +1592,50 @@ size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
} else {
return ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
}
-}
-
+}
+
static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
{
ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
- return ZSTD_estimateCStreamSize_usingCParams(cParams);
-}
-
+ return ZSTD_estimateCStreamSize_usingCParams(cParams);
+}
+
size_t ZSTD_estimateCStreamSize(int compressionLevel)
{
- int level;
- size_t memBudget = 0;
+ int level;
+ size_t memBudget = 0;
for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
- size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
- if (newMB > memBudget) memBudget = newMB;
- }
- return memBudget;
-}
-
-/* ZSTD_getFrameProgression():
- * tells how much data has been consumed (input) and produced (output) for current frame.
- * able to count progression inside worker threads (non-blocking mode).
- */
-ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
-{
-#ifdef ZSTD_MULTITHREAD
- if (cctx->appliedParams.nbWorkers > 0) {
- return ZSTDMT_getFrameProgression(cctx->mtctx);
- }
-#endif
- { ZSTD_frameProgression fp;
- size_t const buffered = (cctx->inBuff == NULL) ? 0 :
- cctx->inBuffPos - cctx->inToCompress;
- if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);
- assert(buffered <= ZSTD_BLOCKSIZE_MAX);
- fp.ingested = cctx->consumedSrcSize + buffered;
- fp.consumed = cctx->consumedSrcSize;
- fp.produced = cctx->producedCSize;
+ size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
+ if (newMB > memBudget) memBudget = newMB;
+ }
+ return memBudget;
+}
+
+/* ZSTD_getFrameProgression():
+ * tells how much data has been consumed (input) and produced (output) for current frame.
+ * able to count progression inside worker threads (non-blocking mode).
+ */
+ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
+{
+#ifdef ZSTD_MULTITHREAD
+ if (cctx->appliedParams.nbWorkers > 0) {
+ return ZSTDMT_getFrameProgression(cctx->mtctx);
+ }
+#endif
+ { ZSTD_frameProgression fp;
+ size_t const buffered = (cctx->inBuff == NULL) ? 0 :
+ cctx->inBuffPos - cctx->inToCompress;
+ if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);
+ assert(buffered <= ZSTD_BLOCKSIZE_MAX);
+ fp.ingested = cctx->consumedSrcSize + buffered;
+ fp.consumed = cctx->consumedSrcSize;
+ fp.produced = cctx->producedCSize;
fp.flushed = cctx->producedCSize; /* simplified; some data might still be left within streaming output buffer */
fp.currentJobID = 0;
fp.nbActiveWorkers = 0;
- return fp;
-} }
-
+ return fp;
+} }
+
/*! ZSTD_toFlushNow()
* Only useful for multithreading scenarios currently (nbWorkers >= 1).
*/
@@ -1649,7 +1649,7 @@ size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)
(void)cctx;
return 0; /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
}
-
+
static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
ZSTD_compressionParameters cParams2)
{
@@ -1665,30 +1665,30 @@ static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
}
void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
-{
- int i;
- for (i = 0; i < ZSTD_REP_NUM; ++i)
- bs->rep[i] = repStartValue[i];
+{
+ int i;
+ for (i = 0; i < ZSTD_REP_NUM; ++i)
+ bs->rep[i] = repStartValue[i];
bs->entropy.huf.repeatMode = HUF_repeat_none;
bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
-}
-
-/*! ZSTD_invalidateMatchState()
+}
+
+/*! ZSTD_invalidateMatchState()
* Invalidate all the matches in the match finder tables.
* Requires nextSrc and base to be set (can be NULL).
- */
-static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
-{
- ZSTD_window_clear(&ms->window);
-
+ */
+static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
+{
+ ZSTD_window_clear(&ms->window);
+
ms->nextToUpdate = ms->window.dictLimit;
- ms->loadedDictEnd = 0;
- ms->opt.litLengthSum = 0; /* force reset of btopt stats */
+ ms->loadedDictEnd = 0;
+ ms->opt.litLengthSum = 0; /* force reset of btopt stats */
ms->dictMatchState = NULL;
-}
-
+}
+
/**
* Controls, for this matchState reset, whether the tables need to be cleared /
* prepared for the coming compression (ZSTDcrp_makeClean), or whether the
@@ -1731,7 +1731,7 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
ms->dedicatedDictSearch && (forWho == ZSTD_resetTarget_CDict))
? ((size_t)1 << cParams->chainLog)
: 0;
- size_t const hSize = ((size_t)1) << cParams->hashLog;
+ size_t const hSize = ((size_t)1) << cParams->hashLog;
U32 const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
@@ -1741,11 +1741,11 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
ZSTD_window_init(&ms->window);
ZSTD_cwksp_mark_tables_dirty(ws);
}
+
+ ms->hashLog3 = hashLog3;
- ms->hashLog3 = hashLog3;
-
- ZSTD_invalidateMatchState(ms);
-
+ ZSTD_invalidateMatchState(ms);
+
assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */
ZSTD_cwksp_clear_tables(ws);
@@ -1764,17 +1764,17 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
ZSTD_cwksp_clean_tables(ws);
}
- /* opt parser space */
+ /* opt parser space */
if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
- DEBUGLOG(4, "reserving optimal parser space");
+ DEBUGLOG(4, "reserving optimal parser space");
ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned));
ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t));
ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
- }
-
+ }
+
if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)) {
{ /* Row match finder needs an additional table of hashes ("tags") */
size_t const tagTableSize = hSize*sizeof(U16);
@@ -1793,8 +1793,8 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
"failed a workspace allocation in ZSTD_reset_matchState");
return 0;
-}
-
+}
+
/* ZSTD_indexTooCloseToMax() :
* minor optimization : prefer memset() rather than reduceIndex()
* which is measurably slow in some circumstances (reported for Visual Studio).
@@ -1818,26 +1818,26 @@ static int ZSTD_dictTooBig(size_t const loadedDictSize)
return loadedDictSize > ZSTD_CHUNKSIZE_MAX;
}
-/*! ZSTD_resetCCtx_internal() :
+/*! ZSTD_resetCCtx_internal() :
* @param loadedDictSize The size of the dictionary to be loaded
* into the context, if any. If no dictionary is used, or the
* dictionary is being attached / copied, then pass 0.
* note : `params` are assumed fully validated at this stage.
*/
-static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
+static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
ZSTD_CCtx_params const* params,
U64 const pledgedSrcSize,
size_t const loadedDictSize,
- ZSTD_compResetPolicy_e const crp,
- ZSTD_buffered_policy_e const zbuff)
-{
+ ZSTD_compResetPolicy_e const crp,
+ ZSTD_buffered_policy_e const zbuff)
+{
ZSTD_cwksp* const ws = &zc->workspace;
DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d useBlockSplitter=%d",
(U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->useBlockSplitter);
assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
-
+
zc->isFirstBlock = 1;
-
+
/* Set applied params early so we can modify them for LDM,
* and point params at the applied params.
*/
@@ -1848,14 +1848,14 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
assert(params->useBlockSplitter != ZSTD_ps_auto);
assert(params->ldmParams.enableLdm != ZSTD_ps_auto);
if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
- /* Adjust long distance matching parameters */
+ /* Adjust long distance matching parameters */
ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, &params->cParams);
assert(params->ldmParams.hashLog >= params->ldmParams.bucketSizeLog);
assert(params->ldmParams.hashRateLog < 32);
- }
-
+ }
+
{ size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params->cParams.windowLog), pledgedSrcSize));
- size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
+ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
U32 const divider = (params->cParams.minMatch==3) ? 3 : 4;
size_t const maxNbSeq = blockSize / divider;
size_t const buffOutSize = (zbuff == ZSTDb_buffered && params->outBufferMode == ZSTD_bm_buffered)
@@ -1896,7 +1896,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
needsIndexReset = ZSTDirp_reset;
-
+
ZSTD_cwksp_free(ws, zc->customMem);
FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem), "");
@@ -1915,23 +1915,23 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
ZSTD_cwksp_clear(ws);
- /* init params */
+ /* init params */
zc->blockState.matchState.cParams = params->cParams;
- zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
- zc->consumedSrcSize = 0;
- zc->producedCSize = 0;
- if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
- zc->appliedParams.fParams.contentSizeFlag = 0;
- DEBUGLOG(4, "pledged content size : %u ; flag : %u",
+ zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
+ zc->consumedSrcSize = 0;
+ zc->producedCSize = 0;
+ if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
+ zc->appliedParams.fParams.contentSizeFlag = 0;
+ DEBUGLOG(4, "pledged content size : %u ; flag : %u",
(unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
- zc->blockSize = blockSize;
-
+ zc->blockSize = blockSize;
+
XXH64_reset(&zc->xxhState, 0);
- zc->stage = ZSTDcs_init;
- zc->dictID = 0;
+ zc->stage = ZSTDcs_init;
+ zc->dictID = 0;
zc->dictContentSize = 0;
- ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
+ ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
/* ZSTD_wildcopy() is used to copy into the literals buffer,
* so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
@@ -1946,7 +1946,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
zc->outBuffSize = buffOutSize;
zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize);
- /* ldm bucketOffsets table */
+ /* ldm bucketOffsets table */
if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
/* TODO: avoid memset? */
size_t const numBuckets =
@@ -1954,10 +1954,10 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
params->ldmParams.bucketSizeLog);
zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets);
ZSTD_memset(zc->ldmState.bucketOffsets, 0, numBuckets);
- }
+ }
/* sequences storage */
- ZSTD_referenceExternalSequences(zc, NULL, 0);
+ ZSTD_referenceExternalSequences(zc, NULL, 0);
zc->seqStore.maxNbSeq = maxNbSeq;
zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
@@ -1972,7 +1972,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
crp,
needsIndexReset,
ZSTD_resetTarget_CCtx), "");
-
+
/* ldm hash table */
if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
/* TODO: avoid memset? */
@@ -1995,15 +1995,15 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
}
}
-/* ZSTD_invalidateRepCodes() :
- * ensures next compression will not use repcodes from previous block.
- * Note : only works with regular variant;
- * do not use with extDict variant ! */
-void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
- int i;
- for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
- assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
-}
+/* ZSTD_invalidateRepCodes() :
+ * ensures next compression will not use repcodes from previous block.
+ * Note : only works with regular variant;
+ * do not use with extDict variant ! */
+void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
+ int i;
+ for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
+ assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
+}
/* These are the approximate sizes for each strategy past which copying the
* dictionary tables into the working context is faster than using them
@@ -2099,10 +2099,10 @@ ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
}
static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
- const ZSTD_CDict* cdict,
+ const ZSTD_CDict* cdict,
ZSTD_CCtx_params params,
- U64 pledgedSrcSize,
- ZSTD_buffered_policy_e zbuff)
+ U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
{
const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
@@ -2112,7 +2112,7 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
{ unsigned const windowLog = params.cParams.windowLog;
assert(windowLog != 0);
- /* Copy only compression parameters related to tables. */
+ /* Copy only compression parameters related to tables. */
params.cParams = *cdict_cParams;
params.cParams.windowLog = windowLog;
params.useRowMatchFinder = cdict->useRowMatchFinder;
@@ -2122,12 +2122,12 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
- }
-
+ }
+
ZSTD_cwksp_mark_tables_dirty(&cctx->workspace);
assert(params.useRowMatchFinder != ZSTD_ps_auto);
- /* copy tables */
+ /* copy tables */
{ size_t const chainSize = ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0 /* DDS guaranteed disabled */)
? ((size_t)1 << cdict_cParams->chainLog)
: 0;
@@ -2149,34 +2149,34 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
cdict->matchState.tagTable,
tagTableSize);
}
- }
+ }
- /* Zero the hashTable3, since the cdict never fills it */
+ /* Zero the hashTable3, since the cdict never fills it */
{ int const h3log = cctx->blockState.matchState.hashLog3;
size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
- assert(cdict->matchState.hashLog3 == 0);
+ assert(cdict->matchState.hashLog3 == 0);
ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
- }
-
+ }
+
ZSTD_cwksp_mark_tables_clean(&cctx->workspace);
- /* copy dictionary offsets */
+ /* copy dictionary offsets */
{ ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
- ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
- dstMatchState->window = srcMatchState->window;
- dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
- dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
- }
+ ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
+ dstMatchState->window = srcMatchState->window;
+ dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
+ dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
+ }
- cctx->dictID = cdict->dictID;
+ cctx->dictID = cdict->dictID;
cctx->dictContentSize = cdict->dictContentSize;
-
- /* copy block state */
+
+ /* copy block state */
ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
-
- return 0;
-}
-
+
+ return 0;
+}
+
/* We have a choice between copying the dictionary context into the working
* context, or referencing the dictionary context from the working context
* in-place. We decide here which strategy to use. */
@@ -2199,42 +2199,42 @@ static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
}
}
-/*! ZSTD_copyCCtx_internal() :
- * Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
- * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
- * The "context", in this case, refers to the hash and chain tables,
- * entropy tables, and dictionary references.
- * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
- * @return : 0, or an error code */
-static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
- const ZSTD_CCtx* srcCCtx,
- ZSTD_frameParameters fParams,
- U64 pledgedSrcSize,
- ZSTD_buffered_policy_e zbuff)
-{
+/*! ZSTD_copyCCtx_internal() :
+ * Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
+ * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
+ * The "context", in this case, refers to the hash and chain tables,
+ * entropy tables, and dictionary references.
+ * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
+ * @return : 0, or an error code */
+static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
+ const ZSTD_CCtx* srcCCtx,
+ ZSTD_frameParameters fParams,
+ U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong,
"Can't copy a ctx that's not in init stage.");
DEBUGLOG(5, "ZSTD_copyCCtx_internal");
ZSTD_memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
- { ZSTD_CCtx_params params = dstCCtx->requestedParams;
- /* Copy only compression parameters related to tables. */
- params.cParams = srcCCtx->appliedParams.cParams;
+ { ZSTD_CCtx_params params = dstCCtx->requestedParams;
+ /* Copy only compression parameters related to tables. */
+ params.cParams = srcCCtx->appliedParams.cParams;
assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_ps_auto);
assert(srcCCtx->appliedParams.useBlockSplitter != ZSTD_ps_auto);
assert(srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_ps_auto);
params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder;
params.useBlockSplitter = srcCCtx->appliedParams.useBlockSplitter;
params.ldmParams = srcCCtx->appliedParams.ldmParams;
- params.fParams = fParams;
+ params.fParams = fParams;
ZSTD_resetCCtx_internal(dstCCtx, &params, pledgedSrcSize,
/* loadedDictSize */ 0,
ZSTDcrp_leaveDirty, zbuff);
- assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
- assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
- assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
- assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
- assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
- }
+ assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
+ assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
+ assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
+ assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
+ assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
+ }
ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace);
@@ -2244,7 +2244,7 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
0 /* forDDSDict */)
? ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog)
: 0;
- size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
+ size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
int const h3log = srcCCtx->blockState.matchState.hashLog3;
size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
@@ -2262,58 +2262,58 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace);
/* copy dictionary offsets */
- {
+ {
const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
- ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
- dstMatchState->window = srcMatchState->window;
- dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
- dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
+ ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
+ dstMatchState->window = srcMatchState->window;
+ dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
+ dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
}
- dstCCtx->dictID = srcCCtx->dictID;
+ dstCCtx->dictID = srcCCtx->dictID;
dstCCtx->dictContentSize = srcCCtx->dictContentSize;
- /* copy block state */
+ /* copy block state */
ZSTD_memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
-
+
return 0;
}
-/*! ZSTD_copyCCtx() :
- * Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
- * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
- * pledgedSrcSize==0 means "unknown".
-* @return : 0, or an error code */
-size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
-{
- ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
+/*! ZSTD_copyCCtx() :
+ * Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
+ * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
+ * pledgedSrcSize==0 means "unknown".
+* @return : 0, or an error code */
+size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
+{
+ ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
ZSTD_buffered_policy_e const zbuff = srcCCtx->bufferedPolicy;
- ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
- if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
- fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
-
- return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
- fParams, pledgedSrcSize,
- zbuff);
-}
-
-
-#define ZSTD_ROWSIZE 16
+ ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
+ if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
+ fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
+
+ return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
+ fParams, pledgedSrcSize,
+ zbuff);
+}
+
+
+#define ZSTD_ROWSIZE 16
/*! ZSTD_reduceTable() :
- * reduce table indexes by `reducerValue`, or squash to zero.
- * PreserveMark preserves "unsorted mark" for btlazy2 strategy.
- * It must be set to a clear 0/1 value, to remove branch during inlining.
- * Presume table size is a multiple of ZSTD_ROWSIZE
- * to help auto-vectorization */
-FORCE_INLINE_TEMPLATE void
-ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
-{
- int const nbRows = (int)size / ZSTD_ROWSIZE;
- int cellNb = 0;
- int rowNb;
+ * reduce table indexes by `reducerValue`, or squash to zero.
+ * PreserveMark preserves "unsorted mark" for btlazy2 strategy.
+ * It must be set to a clear 0/1 value, to remove branch during inlining.
+ * Presume table size is a multiple of ZSTD_ROWSIZE
+ * to help auto-vectorization */
+FORCE_INLINE_TEMPLATE void
+ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
+{
+ int const nbRows = (int)size / ZSTD_ROWSIZE;
+ int cellNb = 0;
+ int rowNb;
/* Protect special index values < ZSTD_WINDOW_START_INDEX. */
U32 const reducerThreshold = reducerValue + ZSTD_WINDOW_START_INDEX;
- assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */
- assert(size < (1U<<31)); /* can be casted to int */
+ assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */
+ assert(size < (1U<<31)); /* can be casted to int */
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
/* To validate that the table re-use logic is sound, and that we don't
@@ -2328,9 +2328,9 @@ ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerVa
__msan_unpoison(table, size * sizeof(U32));
#endif
- for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
- int column;
- for (column=0; column<ZSTD_ROWSIZE; column++) {
+ for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
+ int column;
+ for (column=0; column<ZSTD_ROWSIZE; column++) {
U32 newVal;
if (preserveMark && table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) {
/* This write is pointless, but is required(?) for the compiler
@@ -2340,42 +2340,42 @@ ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerVa
newVal = 0;
} else {
newVal = table[cellNb] - reducerValue;
- }
+ }
table[cellNb] = newVal;
- cellNb++;
- } }
-}
-
-static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
-{
- ZSTD_reduceTable_internal(table, size, reducerValue, 0);
-}
-
-static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
-{
- ZSTD_reduceTable_internal(table, size, reducerValue, 1);
-}
-
+ cellNb++;
+ } }
+}
+
+static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
+{
+ ZSTD_reduceTable_internal(table, size, reducerValue, 0);
+}
+
+static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
+{
+ ZSTD_reduceTable_internal(table, size, reducerValue, 1);
+}
+
/*! ZSTD_reduceIndex() :
* rescale all indexes to avoid future overflow (indexes are U32) */
static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
{
{ U32 const hSize = (U32)1 << params->cParams.hashLog;
- ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
- }
+ ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
+ }
if (ZSTD_allocateChainTable(params->cParams.strategy, params->useRowMatchFinder, (U32)ms->dedicatedDictSearch)) {
U32 const chainSize = (U32)1 << params->cParams.chainLog;
if (params->cParams.strategy == ZSTD_btlazy2)
- ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
- else
- ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
- }
+ ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
+ else
+ ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
+ }
- if (ms->hashLog3) {
- U32 const h3Size = (U32)1 << ms->hashLog3;
- ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
- }
+ if (ms->hashLog3) {
+ U32 const h3Size = (U32)1 << ms->hashLog3;
+ ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
+ }
}
@@ -2397,9 +2397,9 @@ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
for (u=0; u<nbSeq; u++) {
U32 const llv = sequences[u].litLength;
U32 const mlv = sequences[u].mlBase;
- llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
+ llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offBase);
- mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
+ mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
}
if (seqStorePtr->longLengthType==ZSTD_llt_literalLength)
llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
@@ -2579,54 +2579,54 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
void* entropyWorkspace, size_t entropyWkspSize,
const int bmi2)
{
- const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
+ const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
ZSTD_strategy const strategy = cctxParams->cParams.strategy;
unsigned* count = (unsigned*)entropyWorkspace;
FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
- const seqDef* const sequences = seqStorePtr->sequencesStart;
+ const seqDef* const sequences = seqStorePtr->sequencesStart;
const size_t nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
- const BYTE* const ofCodeTable = seqStorePtr->ofCode;
- const BYTE* const llCodeTable = seqStorePtr->llCode;
- const BYTE* const mlCodeTable = seqStorePtr->mlCode;
- BYTE* const ostart = (BYTE*)dst;
- BYTE* const oend = ostart + dstCapacity;
- BYTE* op = ostart;
+ const BYTE* const ofCodeTable = seqStorePtr->ofCode;
+ const BYTE* const llCodeTable = seqStorePtr->llCode;
+ const BYTE* const mlCodeTable = seqStorePtr->mlCode;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstCapacity;
+ BYTE* op = ostart;
size_t lastCountSize;
entropyWorkspace = count + (MaxSeq + 1);
entropyWkspSize -= (MaxSeq + 1) * sizeof(*count);
DEBUGLOG(4, "ZSTD_entropyCompressSeqStore_internal (nbSeq=%zu)", nbSeq);
- ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
+ ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
assert(entropyWkspSize >= HUF_WORKSPACE_SIZE);
- /* Compress literals */
- { const BYTE* const literals = seqStorePtr->litStart;
+ /* Compress literals */
+ { const BYTE* const literals = seqStorePtr->litStart;
size_t const numSequences = seqStorePtr->sequences - seqStorePtr->sequencesStart;
size_t const numLiterals = seqStorePtr->lit - seqStorePtr->litStart;
/* Base suspicion of uncompressibility on ratio of literals to sequences */
unsigned const suspectUncompressible = (numSequences == 0) || (numLiterals / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO);
size_t const litSize = (size_t)(seqStorePtr->lit - literals);
- size_t const cSize = ZSTD_compressLiterals(
+ size_t const cSize = ZSTD_compressLiterals(
&prevEntropy->huf, &nextEntropy->huf,
cctxParams->cParams.strategy,
ZSTD_literalsCompressionIsDisabled(cctxParams),
- op, dstCapacity,
- literals, litSize,
+ op, dstCapacity,
+ literals, litSize,
entropyWorkspace, entropyWkspSize,
bmi2, suspectUncompressible);
FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed");
- assert(cSize <= dstCapacity);
- op += cSize;
- }
+ assert(cSize <= dstCapacity);
+ op += cSize;
+ }
- /* Sequences Header */
+ /* Sequences Header */
RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
dstSize_tooSmall, "Can't fit seq hdr in output buf!");
if (nbSeq < 128) {
- *op++ = (BYTE)nbSeq;
+ *op++ = (BYTE)nbSeq;
} else if (nbSeq < LONGNBSEQ) {
op[0] = (BYTE)((nbSeq>>8) + 0x80);
op[1] = (BYTE)nbSeq;
@@ -2637,11 +2637,11 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
op+=3;
}
assert(op <= oend);
- if (nbSeq==0) {
+ if (nbSeq==0) {
/* Copy the old tables over as if we repeated them */
ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
return (size_t)(op - ostart);
- }
+ }
{
ZSTD_symbolEncodingTypeStats_t stats;
BYTE* seqHead = op++;
@@ -2657,15 +2657,15 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
op += stats.size;
}
- { size_t const bitstreamSize = ZSTD_encodeSequences(
+ { size_t const bitstreamSize = ZSTD_encodeSequences(
op, (size_t)(oend - op),
- CTable_MatchLength, mlCodeTable,
- CTable_OffsetBits, ofCodeTable,
- CTable_LitLength, llCodeTable,
- sequences, nbSeq,
- longOffsets, bmi2);
+ CTable_MatchLength, mlCodeTable,
+ CTable_OffsetBits, ofCodeTable,
+ CTable_LitLength, llCodeTable,
+ sequences, nbSeq,
+ longOffsets, bmi2);
FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
- op += bitstreamSize;
+ op += bitstreamSize;
assert(op <= oend);
/* zstd versions <= 1.3.4 mistakenly report corruption when
* FSE_readNCount() receives a buffer < 4 bytes.
@@ -2682,12 +2682,12 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
"emitting an uncompressed block.");
return 0;
}
- }
-
+ }
+
DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
return (size_t)(op - ostart);
-}
-
+}
+
MEM_STATIC size_t
ZSTD_entropyCompressSeqStore(seqStore_t* seqStorePtr,
const ZSTD_entropyCTables_t* prevEntropy,
@@ -2697,34 +2697,34 @@ ZSTD_entropyCompressSeqStore(seqStore_t* seqStorePtr,
size_t srcSize,
void* entropyWorkspace, size_t entropyWkspSize,
int bmi2)
-{
+{
size_t const cSize = ZSTD_entropyCompressSeqStore_internal(
seqStorePtr, prevEntropy, nextEntropy, cctxParams,
dst, dstCapacity,
entropyWorkspace, entropyWkspSize, bmi2);
if (cSize == 0) return 0;
- /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
- * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
- */
- if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
- return 0; /* block not compressed */
+ /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
+ * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
+ */
+ if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
+ return 0; /* block not compressed */
FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSeqStore_internal failed");
-
- /* Check compressibility */
+
+ /* Check compressibility */
{ size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
- if (cSize >= maxCSize) return 0; /* block not compressed */
- }
+ if (cSize >= maxCSize) return 0; /* block not compressed */
+ }
DEBUGLOG(4, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize);
- return cSize;
+ return cSize;
}
-/* ZSTD_selectBlockCompressor() :
- * Not static, but internal use only (used by long distance matcher)
- * assumption : strat is a valid strategy */
+/* ZSTD_selectBlockCompressor() :
+ * Not static, but internal use only (used by long distance matcher)
+ * assumption : strat is a valid strategy */
ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode)
{
static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
- { ZSTD_compressBlock_fast /* default for 0 */,
+ { ZSTD_compressBlock_fast /* default for 0 */,
ZSTD_compressBlock_fast,
ZSTD_compressBlock_doubleFast,
ZSTD_compressBlock_greedy,
@@ -2734,7 +2734,7 @@ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramS
ZSTD_compressBlock_btopt,
ZSTD_compressBlock_btultra,
ZSTD_compressBlock_btultra2 },
- { ZSTD_compressBlock_fast_extDict /* default for 0 */,
+ { ZSTD_compressBlock_fast_extDict /* default for 0 */,
ZSTD_compressBlock_fast_extDict,
ZSTD_compressBlock_doubleFast_extDict,
ZSTD_compressBlock_greedy_extDict,
@@ -2764,9 +2764,9 @@ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramS
NULL,
NULL,
NULL }
- };
+ };
ZSTD_blockCompressor selectedCompressor;
- ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
+ ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
DEBUGLOG(4, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder);
@@ -2793,19 +2793,19 @@ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramS
}
assert(selectedCompressor != NULL);
return selectedCompressor;
-}
+}
-static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
- const BYTE* anchor, size_t lastLLSize)
-{
+static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
+ const BYTE* anchor, size_t lastLLSize)
+{
ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize);
- seqStorePtr->lit += lastLLSize;
+ seqStorePtr->lit += lastLLSize;
}
void ZSTD_resetSeqStore(seqStore_t* ssPtr)
{
- ssPtr->lit = ssPtr->litStart;
- ssPtr->sequences = ssPtr->sequencesStart;
+ ssPtr->lit = ssPtr->litStart;
+ ssPtr->sequences = ssPtr->sequencesStart;
ssPtr->longLengthType = ZSTD_llt_none;
}
@@ -2813,20 +2813,20 @@ typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
{
- ZSTD_matchState_t* const ms = &zc->blockState.matchState;
+ ZSTD_matchState_t* const ms = &zc->blockState.matchState;
DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
/* Assert that we have correctly flushed the ctx params into the ms's copy */
ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
- if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
+ if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) {
ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize);
} else {
ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
}
return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */
- }
- ZSTD_resetSeqStore(&(zc->seqStore));
+ }
+ ZSTD_resetSeqStore(&(zc->seqStore));
/* required for optimal parser to read stats from dictionary */
ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;
/* tell the optimal parser how we expect to compress literals */
@@ -2836,62 +2836,62 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
* and when that stops being the case, the dict must be unset */
assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
- /* limited update after a very long match */
- { const BYTE* const base = ms->window.base;
- const BYTE* const istart = (const BYTE*)src;
+ /* limited update after a very long match */
+ { const BYTE* const base = ms->window.base;
+ const BYTE* const istart = (const BYTE*)src;
const U32 curr = (U32)(istart-base);
if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1)); /* ensure no overflow */
if (curr > ms->nextToUpdate + 384)
ms->nextToUpdate = curr - MIN(192, (U32)(curr - ms->nextToUpdate - 384));
- }
+ }
- /* select and store sequences */
+ /* select and store sequences */
{ ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
- size_t lastLLSize;
- { int i;
- for (i = 0; i < ZSTD_REP_NUM; ++i)
- zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
- }
- if (zc->externSeqStore.pos < zc->externSeqStore.size) {
+ size_t lastLLSize;
+ { int i;
+ for (i = 0; i < ZSTD_REP_NUM; ++i)
+ zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
+ }
+ if (zc->externSeqStore.pos < zc->externSeqStore.size) {
assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_disable);
- /* Updates ldmSeqStore.pos */
- lastLLSize =
- ZSTD_ldm_blockCompress(&zc->externSeqStore,
- ms, &zc->seqStore,
- zc->blockState.nextCBlock->rep,
+ /* Updates ldmSeqStore.pos */
+ lastLLSize =
+ ZSTD_ldm_blockCompress(&zc->externSeqStore,
+ ms, &zc->seqStore,
+ zc->blockState.nextCBlock->rep,
zc->appliedParams.useRowMatchFinder,
src, srcSize);
- assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
+ assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
} else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) {
rawSeqStore_t ldmSeqStore = kNullRawSeqStore;
-
- ldmSeqStore.seq = zc->ldmSequences;
- ldmSeqStore.capacity = zc->maxNbLdmSequences;
- /* Updates ldmSeqStore.size */
+
+ ldmSeqStore.seq = zc->ldmSequences;
+ ldmSeqStore.capacity = zc->maxNbLdmSequences;
+ /* Updates ldmSeqStore.size */
FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
- &zc->appliedParams.ldmParams,
+ &zc->appliedParams.ldmParams,
src, srcSize), "");
- /* Updates ldmSeqStore.pos */
- lastLLSize =
- ZSTD_ldm_blockCompress(&ldmSeqStore,
- ms, &zc->seqStore,
- zc->blockState.nextCBlock->rep,
+ /* Updates ldmSeqStore.pos */
+ lastLLSize =
+ ZSTD_ldm_blockCompress(&ldmSeqStore,
+ ms, &zc->seqStore,
+ zc->blockState.nextCBlock->rep,
zc->appliedParams.useRowMatchFinder,
src, srcSize);
- assert(ldmSeqStore.pos == ldmSeqStore.size);
- } else { /* not long range mode */
+ assert(ldmSeqStore.pos == ldmSeqStore.size);
+ } else { /* not long range mode */
ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy,
zc->appliedParams.useRowMatchFinder,
dictMode);
ms->ldmSeqStore = NULL;
lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
- }
- { const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
- ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
- } }
+ }
+ { const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
+ ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
+ } }
return ZSTDbss_compress;
}
-
+
static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
{
const seqStore_t* seqStore = ZSTD_getSeqStore(zc);
@@ -3787,7 +3787,7 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
return 0;
}
- /* encode sequences and literals */
+ /* encode sequences and literals */
cSize = ZSTD_entropyCompressSeqStore(&zc->seqStore,
&zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
&zc->appliedParams,
@@ -3922,7 +3922,7 @@ static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
}
}
-/*! ZSTD_compress_frameChunk() :
+/*! ZSTD_compress_frameChunk() :
* Compress a chunk of data into one or multiple blocks.
* All blocks will be terminated, all input will be consumed.
* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
@@ -3939,16 +3939,16 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
const BYTE* ip = (const BYTE*)src;
BYTE* const ostart = (BYTE*)dst;
BYTE* op = ostart;
- U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
+ U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
- if (cctx->appliedParams.fParams.checksumFlag && srcSize)
+ if (cctx->appliedParams.fParams.checksumFlag && srcSize)
XXH64_update(&cctx->xxhState, src, srcSize);
while (remaining) {
- ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
+ ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
@@ -3962,7 +3962,7 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
ZSTD_window_enforceMaxDist(&ms->window, ip, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
/* Ensure hash/chain table insertion resumes no sooner than lowlimit */
- if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
+ if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
{ size_t cSize;
if (ZSTD_useTargetCBlockSize(&cctx->appliedParams)) {
@@ -3974,7 +3974,7 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
cSize = ZSTD_compressBlock_splitBlock(cctx, op, dstCapacity, ip, blockSize, lastBlock);
FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_splitBlock failed");
assert(cSize > 0 || cctx->seqCollector.collectSequences == 1);
- } else {
+ } else {
cSize = ZSTD_compressBlock_internal(cctx,
op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
ip, blockSize, 1 /* frame */);
@@ -3990,19 +3990,19 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
MEM_writeLE24(op, cBlockHeader);
cSize += ZSTD_blockHeaderSize;
}
- }
+ }
- ip += blockSize;
- assert(remaining >= blockSize);
- remaining -= blockSize;
- op += cSize;
- assert(dstCapacity >= cSize);
- dstCapacity -= cSize;
+ ip += blockSize;
+ assert(remaining >= blockSize);
+ remaining -= blockSize;
+ op += cSize;
+ assert(dstCapacity >= cSize);
+ dstCapacity -= cSize;
cctx->isFirstBlock = 0;
- DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
+ DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
(unsigned)cSize);
- } }
+ } }
if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
return (size_t)(op-ostart);
@@ -4012,26 +4012,26 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)
{ BYTE* const op = (BYTE*)dst;
- U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */
+ U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */
U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */
U32 const checksumFlag = params->fParams.checksumFlag>0;
U32 const windowSize = (U32)1 << params->cParams.windowLog;
U32 const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
BYTE const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
U32 const fcsCode = params->fParams.contentSizeFlag ?
- (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */
+ (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */
BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
- size_t pos=0;
+ size_t pos=0;
assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall,
"dst buf is too small to fit worst-case frame header size.");
- DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
+ DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
!params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
if (params->format == ZSTD_f_zstd1) {
- MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
- pos = 4;
- }
+ MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
+ pos = 4;
+ }
op[pos++] = frameHeaderDescriptionByte;
if (!singleSegment) op[pos++] = windowLogByte;
switch(dictIDSizeCode)
@@ -4077,37 +4077,37 @@ size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
return srcSize + ZSTD_SKIPPABLEHEADERSIZE;
}
-/* ZSTD_writeLastEmptyBlock() :
- * output an empty Block with end-of-frame mark to complete a frame
- * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
+/* ZSTD_writeLastEmptyBlock() :
+ * output an empty Block with end-of-frame mark to complete a frame
+ * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
* or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
- */
-size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
-{
+ */
+size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
+{
RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall,
"dst buf is too small to write frame trailer empty block.");
- { U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1); /* 0 size */
- MEM_writeLE24(dst, cBlockHeader24);
- return ZSTD_blockHeaderSize;
- }
-}
-
-size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
-{
+ { U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1); /* 0 size */
+ MEM_writeLE24(dst, cBlockHeader24);
+ return ZSTD_blockHeaderSize;
+ }
+}
+
+size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
+{
RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong,
"wrong cctx stage");
RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable,
parameter_unsupported,
"incompatible with ldm");
- cctx->externSeqStore.seq = seq;
- cctx->externSeqStore.size = nbSeq;
- cctx->externSeqStore.capacity = nbSeq;
- cctx->externSeqStore.pos = 0;
+ cctx->externSeqStore.seq = seq;
+ cctx->externSeqStore.size = nbSeq;
+ cctx->externSeqStore.capacity = nbSeq;
+ cctx->externSeqStore.pos = 0;
cctx->externSeqStore.posInSequence = 0;
- return 0;
-}
-
-
+ return 0;
+}
+
+
static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
@@ -4116,14 +4116,14 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
size_t fhSize = 0;
- DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
+ DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
cctx->stage, (unsigned)srcSize);
RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
"missing init (ZSTD_compressBegin)");
if (frame && (cctx->stage==ZSTDcs_init)) {
fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams,
- cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
+ cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
assert(fhSize <= dstCapacity);
dstCapacity -= fhSize;
@@ -4131,11 +4131,11 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
cctx->stage = ZSTDcs_ongoing;
}
- if (!srcSize) return fhSize; /* do not generate an empty block if no input */
+ if (!srcSize) return fhSize; /* do not generate an empty block if no input */
if (!ZSTD_window_update(&ms->window, src, srcSize, ms->forceNonContiguous)) {
ms->forceNonContiguous = 0;
- ms->nextToUpdate = ms->window.dictLimit;
+ ms->nextToUpdate = ms->window.dictLimit;
}
if (cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) {
ZSTD_window_update(&cctx->ldmState.window, src, srcSize, /* forceNonContiguous */ 0);
@@ -4150,11 +4150,11 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
{ size_t const cSize = frame ?
- ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
+ ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);
FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed");
- cctx->consumedSrcSize += srcSize;
- cctx->producedCSize += (cSize + fhSize);
+ cctx->consumedSrcSize += srcSize;
+ cctx->producedCSize += (cSize + fhSize);
assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
@@ -4164,7 +4164,7 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
"error : pledgedSrcSize = %u, while realSrcSize >= %u",
(unsigned)cctx->pledgedSrcSizePlusOne-1,
(unsigned)cctx->consumedSrcSize);
- }
+ }
return cSize + fhSize;
}
}
@@ -4174,15 +4174,15 @@ size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
- return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
+ return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
}
-size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
+size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
{
- ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
- assert(!ZSTD_checkCParams(cParams));
- return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
+ ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
+ assert(!ZSTD_checkCParams(cParams));
+ return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
}
size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
@@ -4191,12 +4191,12 @@ size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const
{ size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); }
- return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
+ return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
}
-/*! ZSTD_loadDictionaryContent() :
- * @return : 0, or an error code
- */
+/*! ZSTD_loadDictionaryContent() :
+ * @return : 0, or an error code
+ */
static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
ldmState_t* ls,
ZSTD_cwksp* ws,
@@ -4231,7 +4231,7 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
DEBUGLOG(4, "ZSTD_loadDictionaryContent(): useRowMatchFinder=%d", (int)params->useRowMatchFinder);
ZSTD_window_update(&ms->window, src, srcSize, /* forceNonContiguous */ 0);
- ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
+ ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
ms->forceNonContiguous = params->deterministicRefPrefix;
if (loadLdmDict) {
@@ -4288,7 +4288,7 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
assert(0); /* not possible : not a valid strategy id */
}
- ms->nextToUpdate = (U32)(iend - ms->window.base);
+ ms->nextToUpdate = (U32)(iend - ms->window.base);
return 0;
}
@@ -4325,12 +4325,12 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
unsigned hasZeroWeights = 1;
size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr,
dictEnd-dictPtr, &hasZeroWeights);
-
+
/* We only set the loaded table as valid if it contains all non-zero
* weights. Otherwise, we set it to check */
if (!hasZeroWeights)
bs->entropy.huf.repeatMode = HUF_repeat_valid;
-
+
RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, "");
dictPtr += hufHeaderSize;
@@ -4379,9 +4379,9 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
}
RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
- bs->rep[0] = MEM_readLE32(dictPtr+0);
- bs->rep[1] = MEM_readLE32(dictPtr+4);
- bs->rep[2] = MEM_readLE32(dictPtr+8);
+ bs->rep[0] = MEM_readLE32(dictPtr+0);
+ bs->rep[1] = MEM_readLE32(dictPtr+4);
+ bs->rep[2] = MEM_readLE32(dictPtr+8);
dictPtr += 12;
{ size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
@@ -4437,12 +4437,12 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(
ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), "");
- }
+ }
return dictID;
}
/** ZSTD_compress_insertDictionary() :
-* @return : dictID, or an error code */
+* @return : dictID, or an error code */
static size_t
ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
ZSTD_matchState_t* ms,
@@ -4454,29 +4454,29 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
ZSTD_dictTableLoadMethod_e dtlm,
void* workspace)
{
- DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
+ DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
if ((dict==NULL) || (dictSize<8)) {
RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
return 0;
}
- ZSTD_reset_compressedBlockState(bs);
+ ZSTD_reset_compressedBlockState(bs);
- /* dict restricted modes */
- if (dictContentType == ZSTD_dct_rawContent)
+ /* dict restricted modes */
+ if (dictContentType == ZSTD_dct_rawContent)
return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm);
-
- if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
- if (dictContentType == ZSTD_dct_auto) {
- DEBUGLOG(4, "raw content dictionary detected");
+
+ if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
+ if (dictContentType == ZSTD_dct_auto) {
+ DEBUGLOG(4, "raw content dictionary detected");
return ZSTD_loadDictionaryContent(
ms, ls, ws, params, dict, dictSize, dtlm);
- }
+ }
RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
- assert(0); /* impossible */
+ assert(0); /* impossible */
}
-
- /* dict as full zstd dictionary */
+
+ /* dict as full zstd dictionary */
return ZSTD_loadZstdDictionary(
bs, ms, ws, params, dict, dictSize, dtlm, workspace);
}
@@ -4485,7 +4485,7 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL)
/*! ZSTD_compressBegin_internal() :
- * @return : 0, or an error code */
+ * @return : 0, or an error code */
static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
const void* dict, size_t dictSize,
ZSTD_dictContentType_e dictContentType,
@@ -4499,9 +4499,9 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
cctx->traceCtx = (ZSTD_trace_compress_begin != NULL) ? ZSTD_trace_compress_begin(cctx) : 0;
#endif
DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog);
- /* params are supposed to be fully validated at this point */
+ /* params are supposed to be fully validated at this point */
assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
- assert(!((dict) && (cdict))); /* either dict or cdict, not both */
+ assert(!((dict) && (cdict))); /* either dict or cdict, not both */
if ( (cdict)
&& (cdict->dictContentSize > 0)
&& ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
@@ -4510,8 +4510,8 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
|| cdict->compressionLevel == 0)
&& (params->attachDictPref != ZSTD_dictForceLoad) ) {
return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
- }
-
+ }
+
FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
dictContentSize,
ZSTDcrp_makeClean, zbuff) , "");
@@ -4527,29 +4527,29 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
dictContentType, dtlm, cctx->entropyWorkspace);
FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
assert(dictID <= UINT_MAX);
- cctx->dictID = (U32)dictID;
+ cctx->dictID = (U32)dictID;
cctx->dictContentSize = dictContentSize;
- }
- return 0;
+ }
+ return 0;
}
-size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
- const void* dict, size_t dictSize,
- ZSTD_dictContentType_e dictContentType,
+size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictContentType_e dictContentType,
ZSTD_dictTableLoadMethod_e dtlm,
- const ZSTD_CDict* cdict,
+ const ZSTD_CDict* cdict,
const ZSTD_CCtx_params* params,
- unsigned long long pledgedSrcSize)
-{
+ unsigned long long pledgedSrcSize)
+{
DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog);
- /* compression parameters verification and optimization */
+ /* compression parameters verification and optimization */
FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) , "");
- return ZSTD_compressBegin_internal(cctx,
+ return ZSTD_compressBegin_internal(cctx,
dict, dictSize, dictContentType, dtlm,
- cdict,
- params, pledgedSrcSize,
- ZSTDb_not_buffered);
-}
+ cdict,
+ params, pledgedSrcSize,
+ ZSTDb_not_buffered);
+}
/*! ZSTD_compressBegin_advanced() :
* @return : 0, or an error code */
@@ -4559,9 +4559,9 @@ size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
{
ZSTD_CCtx_params cctxParams;
ZSTD_CCtxParams_init_internal(&cctxParams, &params, ZSTD_NO_CLEVEL);
- return ZSTD_compressBegin_advanced_internal(cctx,
+ return ZSTD_compressBegin_advanced_internal(cctx,
dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
- NULL /*cdict*/,
+ NULL /*cdict*/,
&cctxParams, pledgedSrcSize);
}
@@ -4577,9 +4577,9 @@ size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t di
&cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
}
-size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
+size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
{
- return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
+ return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
}
@@ -4592,7 +4592,7 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
BYTE* op = ostart;
size_t fhSize = 0;
- DEBUGLOG(4, "ZSTD_writeEpilogue");
+ DEBUGLOG(4, "ZSTD_writeEpilogue");
RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
/* special case : empty frame */
@@ -4613,7 +4613,7 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
dstCapacity -= ZSTD_blockHeaderSize;
}
- if (cctx->appliedParams.fParams.checksumFlag) {
+ if (cctx->appliedParams.fParams.checksumFlag) {
U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
@@ -4654,16 +4654,16 @@ size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
const void* src, size_t srcSize)
{
size_t endResult;
- size_t const cSize = ZSTD_compressContinue_internal(cctx,
- dst, dstCapacity, src, srcSize,
- 1 /* frame mode */, 1 /* last chunk */);
+ size_t const cSize = ZSTD_compressContinue_internal(cctx,
+ dst, dstCapacity, src, srcSize,
+ 1 /* frame mode */, 1 /* last chunk */);
FORWARD_IF_ERROR(cSize, "ZSTD_compressContinue_internal failed");
endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
FORWARD_IF_ERROR(endResult, "ZSTD_writeEpilogue failed");
assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
- DEBUGLOG(4, "end of frame : controlling src size");
+ DEBUGLOG(4, "end of frame : controlling src size");
RETURN_ERROR_IF(
cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,
srcSize_wrong,
@@ -4681,7 +4681,7 @@ size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
const void* dict,size_t dictSize,
ZSTD_parameters params)
{
- DEBUGLOG(4, "ZSTD_compress_advanced");
+ DEBUGLOG(4, "ZSTD_compress_advanced");
FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), "");
ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, &params, ZSTD_NO_CLEVEL);
return ZSTD_compress_advanced_internal(cctx,
@@ -4691,19 +4691,19 @@ size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
&cctx->simpleApiParams);
}
-/* Internal */
-size_t ZSTD_compress_advanced_internal(
- ZSTD_CCtx* cctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- const void* dict,size_t dictSize,
+/* Internal */
+size_t ZSTD_compress_advanced_internal(
+ ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
const ZSTD_CCtx_params* params)
{
DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
params, srcSize, ZSTDb_not_buffered) , "");
- return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
+ return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
}
size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
@@ -4725,12 +4725,12 @@ size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
int compressionLevel)
-{
+{
DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize);
assert(cctx != NULL);
- return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
-}
-
+ return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
+}
+
size_t ZSTD_compress(void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
int compressionLevel)
@@ -4753,12 +4753,12 @@ size_t ZSTD_compress(void* dst, size_t dstCapacity,
/* ===== Dictionary API ===== */
-/*! ZSTD_estimateCDictSize_advanced() :
- * Estimate amount of memory that will be needed to create a dictionary with following arguments */
-size_t ZSTD_estimateCDictSize_advanced(
- size_t dictSize, ZSTD_compressionParameters cParams,
- ZSTD_dictLoadMethod_e dictLoadMethod)
-{
+/*! ZSTD_estimateCDictSize_advanced() :
+ * Estimate amount of memory that will be needed to create a dictionary with following arguments */
+size_t ZSTD_estimateCDictSize_advanced(
+ size_t dictSize, ZSTD_compressionParameters cParams,
+ ZSTD_dictLoadMethod_e dictLoadMethod)
+{
DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
+ ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
@@ -4768,14 +4768,14 @@ size_t ZSTD_estimateCDictSize_advanced(
/* enableDedicatedDictSearch */ 1, /* forCCtx */ 0)
+ (dictLoadMethod == ZSTD_dlm_byRef ? 0
: ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *))));
-}
+}
-size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
-{
+size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
+{
ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
- return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
-}
-
+ return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
+}
+
size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
{
if (cdict==NULL) return 0; /* support sizeof on NULL */
@@ -4785,33 +4785,33 @@ size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
+ ZSTD_cwksp_sizeof(&cdict->workspace);
}
-static size_t ZSTD_initCDict_internal(
- ZSTD_CDict* cdict,
- const void* dictBuffer, size_t dictSize,
- ZSTD_dictLoadMethod_e dictLoadMethod,
- ZSTD_dictContentType_e dictContentType,
+static size_t ZSTD_initCDict_internal(
+ ZSTD_CDict* cdict,
+ const void* dictBuffer, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
ZSTD_CCtx_params params)
{
DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
assert(!ZSTD_checkCParams(params.cParams));
cdict->matchState.cParams = params.cParams;
cdict->matchState.dedicatedDictSearch = params.enableDedicatedDictSearch;
- if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
- cdict->dictContent = dictBuffer;
- } else {
+ if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
+ cdict->dictContent = dictBuffer;
+ } else {
void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*)));
RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!");
- cdict->dictContent = internalBuffer;
+ cdict->dictContent = internalBuffer;
ZSTD_memcpy(internalBuffer, dictBuffer, dictSize);
- }
- cdict->dictContentSize = dictSize;
+ }
+ cdict->dictContentSize = dictSize;
cdict->dictContentType = dictContentType;
cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE);
- /* Reset the state to no dictionary */
- ZSTD_reset_compressedBlockState(&cdict->cBlockState);
+ /* Reset the state to no dictionary */
+ ZSTD_reset_compressedBlockState(&cdict->cBlockState);
FORWARD_IF_ERROR(ZSTD_reset_matchState(
&cdict->matchState,
&cdict->workspace,
@@ -4820,33 +4820,33 @@ static size_t ZSTD_initCDict_internal(
ZSTDcrp_makeClean,
ZSTDirp_reset,
ZSTD_resetTarget_CDict), "");
- /* (Maybe) load the dictionary
+ /* (Maybe) load the dictionary
* Skips loading the dictionary if it is < 8 bytes.
- */
+ */
{ params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
- params.fParams.contentSizeFlag = 1;
- { size_t const dictID = ZSTD_compress_insertDictionary(
+ params.fParams.contentSizeFlag = 1;
+ { size_t const dictID = ZSTD_compress_insertDictionary(
&cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace,
&params, cdict->dictContent, cdict->dictContentSize,
dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace);
FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
- assert(dictID <= (size_t)(U32)-1);
- cdict->dictID = (U32)dictID;
- }
- }
-
- return 0;
-}
-
+ assert(dictID <= (size_t)(U32)-1);
+ cdict->dictID = (U32)dictID;
+ }
+ }
+
+ return 0;
+}
+
static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
- ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_compressionParameters cParams,
ZSTD_paramSwitch_e useRowMatchFinder,
U32 enableDedicatedDictSearch,
ZSTD_customMem customMem)
-{
+{
if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
-
+
{ size_t const workspaceSize =
ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) +
@@ -4856,7 +4856,7 @@ static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
void* const workspace = ZSTD_customMalloc(workspaceSize, customMem);
ZSTD_cwksp ws;
ZSTD_CDict* cdict;
-
+
if (!workspace) {
ZSTD_customFree(workspace, customMem);
return NULL;
@@ -4867,7 +4867,7 @@ static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
assert(cdict != NULL);
ZSTD_cwksp_move(&cdict->workspace, &ws);
- cdict->customMem = customMem;
+ cdict->customMem = customMem;
cdict->compressionLevel = ZSTD_NO_CLEVEL; /* signals advanced API usage */
cdict->useRowMatchFinder = useRowMatchFinder;
return cdict;
@@ -4952,21 +4952,21 @@ ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionL
return cdict;
}
-ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
-{
+ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
+{
ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
- ZSTD_dlm_byRef, ZSTD_dct_auto,
- cParams, ZSTD_defaultCMem);
+ ZSTD_dlm_byRef, ZSTD_dct_auto,
+ cParams, ZSTD_defaultCMem);
if (cdict)
cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
return cdict;
-}
-
+}
+
size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
{
if (cdict==NULL) return 0; /* support free on NULL */
- { ZSTD_customMem const cMem = cdict->customMem;
+ { ZSTD_customMem const cMem = cdict->customMem;
int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict);
ZSTD_cwksp_free(&cdict->workspace, cMem);
if (!cdictInWorkspace) {
@@ -4976,26 +4976,26 @@ size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
}
}
-/*! ZSTD_initStaticCDict_advanced() :
- * Generate a digested dictionary in provided memory area.
- * workspace: The memory area to emplace the dictionary into.
- * Provided pointer must 8-bytes aligned.
- * It must outlive dictionary usage.
- * workspaceSize: Use ZSTD_estimateCDictSize()
- * to determine how large workspace must be.
- * cParams : use ZSTD_getCParams() to transform a compression level
- * into its relevants cParams.
- * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
- * Note : there is no corresponding "free" function.
- * Since workspace was allocated externally, it must be freed externally.
- */
-const ZSTD_CDict* ZSTD_initStaticCDict(
- void* workspace, size_t workspaceSize,
- const void* dict, size_t dictSize,
- ZSTD_dictLoadMethod_e dictLoadMethod,
- ZSTD_dictContentType_e dictContentType,
- ZSTD_compressionParameters cParams)
-{
+/*! ZSTD_initStaticCDict_advanced() :
+ * Generate a digested dictionary in provided memory area.
+ * workspace: The memory area to emplace the dictionary into.
+ * Provided pointer must 8-bytes aligned.
+ * It must outlive dictionary usage.
+ * workspaceSize: Use ZSTD_estimateCDictSize()
+ * to determine how large workspace must be.
+ * cParams : use ZSTD_getCParams() to transform a compression level
+ * into its relevants cParams.
+ * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
+ * Note : there is no corresponding "free" function.
+ * Since workspace was allocated externally, it must be freed externally.
+ */
+const ZSTD_CDict* ZSTD_initStaticCDict(
+ void* workspace, size_t workspaceSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams)
+{
ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams);
/* enableDedicatedDictSearch == 1 ensures matchstate is not too small in case this CDict will be used for DDS + row hash */
size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0);
@@ -5007,7 +5007,7 @@ const ZSTD_CDict* ZSTD_initStaticCDict(
ZSTD_CDict* cdict;
ZSTD_CCtx_params params;
- if ((size_t)workspace & 7) return NULL; /* 8-aligned */
+ if ((size_t)workspace & 7) return NULL; /* 8-aligned */
{
ZSTD_cwksp ws;
@@ -5017,27 +5017,27 @@ const ZSTD_CDict* ZSTD_initStaticCDict(
ZSTD_cwksp_move(&cdict->workspace, &ws);
}
- DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
+ DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
(unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
- if (workspaceSize < neededSize) return NULL;
-
+ if (workspaceSize < neededSize) return NULL;
+
ZSTD_CCtxParams_init(&params, 0);
params.cParams = cParams;
params.useRowMatchFinder = useRowMatchFinder;
cdict->useRowMatchFinder = useRowMatchFinder;
- if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
- dict, dictSize,
+ if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
+ dict, dictSize,
dictLoadMethod, dictContentType,
params) ))
- return NULL;
-
- return cdict;
+ return NULL;
+
+ return cdict;
}
-ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
+ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
{
- assert(cdict != NULL);
+ assert(cdict != NULL);
return cdict->matchState.cParams;
}
@@ -5055,9 +5055,9 @@ unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict)
* Implementation of various ZSTD_compressBegin_usingCDict* functions.
*/
static size_t ZSTD_compressBegin_usingCDict_internal(
- ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
- ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
-{
+ ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
+ ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
+{
ZSTD_CCtx_params cctxParams;
DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_internal");
RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!");
@@ -5074,7 +5074,7 @@ static size_t ZSTD_compressBegin_usingCDict_internal(
pledgedSrcSize,
cdict->dictContentSize);
ZSTD_CCtxParams_init_internal(&cctxParams, &params, cdict->compressionLevel);
- }
+ }
/* Increase window log to fit the entire dictionary and source if the
* source size is known. Limit the increase to 19, which is the
* window log for compression level 1 with the largest source size.
@@ -5089,8 +5089,8 @@ static size_t ZSTD_compressBegin_usingCDict_internal(
cdict,
&cctxParams, pledgedSrcSize,
ZSTDb_not_buffered);
-}
-
+}
+
/* ZSTD_compressBegin_usingCDict_advanced() :
* This function is DEPRECATED.
@@ -5102,26 +5102,26 @@ size_t ZSTD_compressBegin_usingCDict_advanced(
return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, pledgedSrcSize);
}
-/* ZSTD_compressBegin_usingCDict() :
+/* ZSTD_compressBegin_usingCDict() :
* cdict must be != NULL */
-size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
-{
- ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
+size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
+{
+ ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
-}
-
+}
+
/*! ZSTD_compress_usingCDict_internal():
* Implementation of various ZSTD_compress_usingCDict* functions.
*/
static size_t ZSTD_compress_usingCDict_internal(ZSTD_CCtx* cctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
-{
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
+{
FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */
- return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
-}
-
+ return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
+}
+
/*! ZSTD_compress_usingCDict_advanced():
* This function is DEPRECATED.
*/
@@ -5134,16 +5134,16 @@ size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
}
/*! ZSTD_compress_usingCDict() :
- * Compression using a digested Dictionary.
- * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
- * Note that compression parameters are decided at CDict creation time
- * while frame parameters are hardcoded */
+ * Compression using a digested Dictionary.
+ * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
+ * Note that compression parameters are decided at CDict creation time
+ * while frame parameters are hardcoded */
size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict)
{
- ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
+ ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
}
@@ -5155,46 +5155,46 @@ size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
ZSTD_CStream* ZSTD_createCStream(void)
{
- DEBUGLOG(3, "ZSTD_createCStream");
- return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
+ DEBUGLOG(3, "ZSTD_createCStream");
+ return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
}
-ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
+ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
{
- return ZSTD_initStaticCCtx(workspace, workspaceSize);
-}
+ return ZSTD_initStaticCCtx(workspace, workspaceSize);
+}
-ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
-{ /* CStream and CCtx are now same object */
- return ZSTD_createCCtx_advanced(customMem);
+ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
+{ /* CStream and CCtx are now same object */
+ return ZSTD_createCCtx_advanced(customMem);
}
size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
{
- return ZSTD_freeCCtx(zcs); /* same object */
+ return ZSTD_freeCCtx(zcs); /* same object */
}
-
+
/*====== Initialization ======*/
-size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; }
+size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; }
-size_t ZSTD_CStreamOutSize(void)
+size_t ZSTD_CStreamOutSize(void)
{
- return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
-}
+ return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
+}
static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize)
-{
+{
if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize))
return ZSTD_cpm_attachDict;
else
return ZSTD_cpm_noAttachDict;
}
-/* ZSTD_resetCStream():
- * pledgedSrcSize == 0 means "unknown" */
+/* ZSTD_resetCStream():
+ * pledgedSrcSize == 0 means "unknown" */
size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
{
/* temporary : 0 interpreted as "unknown" during transition period.
@@ -5206,40 +5206,40 @@ size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
return 0;
-}
-
-/*! ZSTD_initCStream_internal() :
- * Note : for lib/compress only. Used by zstdmt_compress.c.
- * Assumption 1 : params are valid
- * Assumption 2 : either dict, or cdict, is defined, not both */
-size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
- const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
+}
+
+/*! ZSTD_initCStream_internal() :
+ * Note : for lib/compress only. Used by zstdmt_compress.c.
+ * Assumption 1 : params are valid
+ * Assumption 2 : either dict, or cdict, is defined, not both */
+size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
const ZSTD_CCtx_params* params,
unsigned long long pledgedSrcSize)
-{
- DEBUGLOG(4, "ZSTD_initCStream_internal");
+{
+ DEBUGLOG(4, "ZSTD_initCStream_internal");
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
zcs->requestedParams = *params;
- assert(!((dict) && (cdict))); /* either dict or cdict, not both */
+ assert(!((dict) && (cdict))); /* either dict or cdict, not both */
if (dict) {
FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
- } else {
+ } else {
/* Dictionary is cleared if !cdict */
FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
- }
+ }
return 0;
-}
-
-/* ZSTD_initCStream_usingCDict_advanced() :
- * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
-size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
- const ZSTD_CDict* cdict,
- ZSTD_frameParameters fParams,
- unsigned long long pledgedSrcSize)
-{
- DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
+}
+
+/* ZSTD_initCStream_usingCDict_advanced() :
+ * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
+size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
+ const ZSTD_CDict* cdict,
+ ZSTD_frameParameters fParams,
+ unsigned long long pledgedSrcSize)
+{
+ DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
zcs->requestedParams.fParams = fParams;
@@ -5250,21 +5250,21 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
/* note : cdict must outlive compression session */
size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
{
- DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
+ DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
return 0;
}
-
-/* ZSTD_initCStream_advanced() :
- * pledgedSrcSize must be exact.
- * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
+
+/* ZSTD_initCStream_advanced() :
+ * pledgedSrcSize must be exact.
+ * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
* dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */
-size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
- const void* dict, size_t dictSize,
+size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize,
ZSTD_parameters params, unsigned long long pss)
-{
+{
/* for compatibility with older programs relying on this behavior.
* Users should now specify ZSTD_CONTENTSIZE_UNKNOWN.
* This line will be removed in the future.
@@ -5277,8 +5277,8 @@ size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
ZSTD_CCtxParams_setZstdParams(&zcs->requestedParams, &params);
FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
return 0;
-}
-
+}
+
size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
{
DEBUGLOG(4, "ZSTD_initCStream_usingDict");
@@ -5288,7 +5288,7 @@ size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t di
return 0;
}
-size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
+size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
{
/* temporary : 0 interpreted as "unknown" during transition period.
* Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
@@ -5305,7 +5305,7 @@ size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigne
size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
{
- DEBUGLOG(4, "ZSTD_initCStream");
+ DEBUGLOG(4, "ZSTD_initCStream");
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
@@ -5321,24 +5321,24 @@ static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
return hintInSize;
}
-/** ZSTD_compressStream_generic():
+/** ZSTD_compressStream_generic():
* internal function for all *compressStream*() variants
- * non-static, because can be called from zstdmt_compress.c
- * @return : hint size for next input */
+ * non-static, because can be called from zstdmt_compress.c
+ * @return : hint size for next input */
static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
ZSTD_outBuffer* output,
ZSTD_inBuffer* input,
ZSTD_EndDirective const flushMode)
{
- const char* const istart = (const char*)input->src;
+ const char* const istart = (const char*)input->src;
const char* const iend = input->size != 0 ? istart + input->size : istart;
const char* ip = input->pos != 0 ? istart + input->pos : istart;
- char* const ostart = (char*)output->dst;
+ char* const ostart = (char*)output->dst;
char* const oend = output->size != 0 ? ostart + output->size : ostart;
char* op = output->pos != 0 ? ostart + output->pos : ostart;
U32 someMoreWork = 1;
- /* check expectations */
+ /* check expectations */
DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
assert(zcs->inBuff != NULL);
@@ -5348,54 +5348,54 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
assert(zcs->outBuff != NULL);
assert(zcs->outBuffSize > 0);
}
- assert(output->pos <= output->size);
- assert(input->pos <= input->size);
+ assert(output->pos <= output->size);
+ assert(input->pos <= input->size);
assert((U32)flushMode <= (U32)ZSTD_e_end);
-
+
while (someMoreWork) {
- switch(zcs->streamStage)
+ switch(zcs->streamStage)
{
- case zcss_init:
+ case zcss_init:
RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!");
case zcss_load:
- if ( (flushMode == ZSTD_e_end)
+ if ( (flushMode == ZSTD_e_end)
&& ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip) /* Enough output space */
|| zcs->appliedParams.outBufferMode == ZSTD_bm_stable) /* OR we are allowed to return dstSizeTooSmall */
- && (zcs->inBuffPos == 0) ) {
- /* shortcut to compression pass directly into output buffer */
- size_t const cSize = ZSTD_compressEnd(zcs,
- op, oend-op, ip, iend-ip);
+ && (zcs->inBuffPos == 0) ) {
+ /* shortcut to compression pass directly into output buffer */
+ size_t const cSize = ZSTD_compressEnd(zcs,
+ op, oend-op, ip, iend-ip);
DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed");
- ip = iend;
- op += cSize;
- zcs->frameEnded = 1;
+ ip = iend;
+ op += cSize;
+ zcs->frameEnded = 1;
ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
- someMoreWork = 0; break;
- }
+ someMoreWork = 0; break;
+ }
/* complete loading into inBuffer in buffered mode */
if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
- size_t const loaded = ZSTD_limitCopy(
- zcs->inBuff + zcs->inBuffPos, toLoad,
- ip, iend-ip);
+ size_t const loaded = ZSTD_limitCopy(
+ zcs->inBuff + zcs->inBuffPos, toLoad,
+ ip, iend-ip);
zcs->inBuffPos += loaded;
if (loaded != 0)
ip += loaded;
- if ( (flushMode == ZSTD_e_continue)
- && (zcs->inBuffPos < zcs->inBuffTarget) ) {
- /* not enough input to fill full block : stop here */
- someMoreWork = 0; break;
- }
- if ( (flushMode == ZSTD_e_flush)
- && (zcs->inBuffPos == zcs->inToCompress) ) {
- /* empty */
- someMoreWork = 0; break;
- }
- }
+ if ( (flushMode == ZSTD_e_continue)
+ && (zcs->inBuffPos < zcs->inBuffTarget) ) {
+ /* not enough input to fill full block : stop here */
+ someMoreWork = 0; break;
+ }
+ if ( (flushMode == ZSTD_e_flush)
+ && (zcs->inBuffPos == zcs->inToCompress) ) {
+ /* empty */
+ someMoreWork = 0; break;
+ }
+ }
/* compress current block (note : this stage cannot be stopped in the middle) */
- DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
+ DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
{ int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered);
void* cDst;
size_t cSize;
@@ -5404,7 +5404,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
? zcs->inBuffPos - zcs->inToCompress
: MIN((size_t)(iend - ip), zcs->blockSize);
if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)
- cDst = op; /* compress into output buffer, to skip flush stage */
+ cDst = op; /* compress into output buffer, to skip flush stage */
else
cDst = zcs->outBuff, oSize = zcs->outBuffSize;
if (inputBuffered) {
@@ -5439,55 +5439,55 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
if (lastBlock)
assert(ip == iend);
}
- if (cDst == op) { /* no need to flush */
- op += cSize;
- if (zcs->frameEnded) {
- DEBUGLOG(5, "Frame completed directly in outBuffer");
- someMoreWork = 0;
+ if (cDst == op) { /* no need to flush */
+ op += cSize;
+ if (zcs->frameEnded) {
+ DEBUGLOG(5, "Frame completed directly in outBuffer");
+ someMoreWork = 0;
ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
- }
- break;
- }
+ }
+ break;
+ }
zcs->outBuffContentSize = cSize;
zcs->outBuffFlushedSize = 0;
- zcs->streamStage = zcss_flush; /* pass-through to flush stage */
+ zcs->streamStage = zcss_flush; /* pass-through to flush stage */
}
ZSTD_FALLTHROUGH;
case zcss_flush:
- DEBUGLOG(5, "flush stage");
+ DEBUGLOG(5, "flush stage");
assert(zcs->appliedParams.outBufferMode == ZSTD_bm_buffered);
{ size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op),
- zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
- DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
+ zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
+ DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
(unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
if (flushed)
op += flushed;
zcs->outBuffFlushedSize += flushed;
- if (toFlush!=flushed) {
- /* flush not fully completed, presumably because dst is too small */
- assert(op==oend);
- someMoreWork = 0;
- break;
- }
+ if (toFlush!=flushed) {
+ /* flush not fully completed, presumably because dst is too small */
+ assert(op==oend);
+ someMoreWork = 0;
+ break;
+ }
zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
- if (zcs->frameEnded) {
- DEBUGLOG(5, "Frame completed on flush");
- someMoreWork = 0;
+ if (zcs->frameEnded) {
+ DEBUGLOG(5, "Frame completed on flush");
+ someMoreWork = 0;
ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
- break;
- }
- zcs->streamStage = zcss_load;
+ break;
+ }
+ zcs->streamStage = zcss_load;
break;
}
- default: /* impossible */
- assert(0);
+ default: /* impossible */
+ assert(0);
}
}
- input->pos = ip - istart;
- output->pos = op - ostart;
+ input->pos = ip - istart;
+ output->pos = op - ostart;
if (zcs->frameEnded) return 0;
return ZSTD_nextInputSizeHint(zcs);
}
@@ -5635,42 +5635,42 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
ZSTD_outBuffer* output,
ZSTD_inBuffer* input,
ZSTD_EndDirective endOp)
-{
+{
DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
- /* check conditions */
+ /* check conditions */
RETURN_ERROR_IF(output->pos > output->size, dstSize_tooSmall, "invalid output buffer");
RETURN_ERROR_IF(input->pos > input->size, srcSize_wrong, "invalid input buffer");
RETURN_ERROR_IF((U32)endOp > (U32)ZSTD_e_end, parameter_outOfBound, "invalid endDirective");
assert(cctx != NULL);
-
- /* transparent initialization stage */
- if (cctx->streamStage == zcss_init) {
+
+ /* transparent initialization stage */
+ if (cctx->streamStage == zcss_init) {
FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, input->size), "CompressStream2 initialization failed");
ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */
}
/* end of transparent initialization stage */
-
+
FORWARD_IF_ERROR(ZSTD_checkBufferStability(cctx, output, input, endOp), "invalid buffers");
- /* compression stage */
-#ifdef ZSTD_MULTITHREAD
- if (cctx->appliedParams.nbWorkers > 0) {
+ /* compression stage */
+#ifdef ZSTD_MULTITHREAD
+ if (cctx->appliedParams.nbWorkers > 0) {
size_t flushMin;
- if (cctx->cParamsChanged) {
- ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams);
- cctx->cParamsChanged = 0;
- }
+ if (cctx->cParamsChanged) {
+ ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams);
+ cctx->cParamsChanged = 0;
+ }
for (;;) {
size_t const ipos = input->pos;
size_t const opos = output->pos;
flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
cctx->consumedSrcSize += (U64)(input->pos - ipos);
cctx->producedCSize += (U64)(output->pos - opos);
- if ( ZSTD_isError(flushMin)
- || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
+ if ( ZSTD_isError(flushMin)
+ || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
if (flushMin == 0)
ZSTD_CCtx_trace(cctx, 0);
ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
- }
+ }
FORWARD_IF_ERROR(flushMin, "ZSTDMT_compressStream_generic failed");
if (endOp == ZSTD_e_continue) {
@@ -5697,28 +5697,28 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
ZSTD_setBufferExpectations(cctx, output, input);
return flushMin;
}
-#endif
+#endif
FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , "");
DEBUGLOG(5, "completed ZSTD_compressStream2");
ZSTD_setBufferExpectations(cctx, output, input);
- return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
-}
-
+ return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
+}
+
size_t ZSTD_compressStream2_simpleArgs (
- ZSTD_CCtx* cctx,
- void* dst, size_t dstCapacity, size_t* dstPos,
- const void* src, size_t srcSize, size_t* srcPos,
- ZSTD_EndDirective endOp)
-{
- ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
- ZSTD_inBuffer input = { src, srcSize, *srcPos };
+ ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos,
+ ZSTD_EndDirective endOp)
+{
+ ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
+ ZSTD_inBuffer input = { src, srcSize, *srcPos };
/* ZSTD_compressStream2() will check validity of dstPos and srcPos */
size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
- *dstPos = output.pos;
- *srcPos = input.pos;
- return cErr;
-}
-
+ *dstPos = output.pos;
+ *srcPos = input.pos;
+ return cErr;
+}
+
size_t ZSTD_compress2(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
@@ -5748,7 +5748,7 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx,
return oPos;
}
}
-
+
typedef struct {
U32 idx; /* Index in array of ZSTD_Sequence */
U32 posInSequence; /* Position within sequence at idx */
@@ -6156,26 +6156,26 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapaci
/*====== Finalize ======*/
/*! ZSTD_flushStream() :
- * @return : amount of data remaining to flush */
+ * @return : amount of data remaining to flush */
size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
{
- ZSTD_inBuffer input = { NULL, 0, 0 };
+ ZSTD_inBuffer input = { NULL, 0, 0 };
return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
}
size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
{
- ZSTD_inBuffer input = { NULL, 0, 0 };
+ ZSTD_inBuffer input = { NULL, 0, 0 };
size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
FORWARD_IF_ERROR( remainingToFlush , "ZSTD_compressStream2 failed");
if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */
/* single thread mode : attempt to calculate remaining to flush more precisely */
- { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
+ { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4);
size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
- return toFlush;
+ return toFlush;
}
}
@@ -6277,12 +6277,12 @@ static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel,
DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel);
/* row */
- if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
+ if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
else if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */
else if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
else row = compressionLevel;
- { ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
+ { ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
DEBUGLOG(5, "ZSTD_getCParams_internal selected tableID: %u row: %u strat: %u", tableID, row, (U32)cp.strategy);
/* acceleration factor */
if (compressionLevel < 0) {
@@ -6310,10 +6310,10 @@ ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long l
static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) {
ZSTD_parameters params;
ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode);
- DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
+ DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
ZSTD_memset(&params, 0, sizeof(params));
params.cParams = cParams;
- params.fParams.contentSizeFlag = 1;
+ params.fParams.contentSizeFlag = 1;
return params;
}
diff --git a/contrib/libs/zstd/lib/compress/zstd_compress_internal.h b/contrib/libs/zstd/lib/compress/zstd_compress_internal.h
index c406e794bd..4a493f015e 100644
--- a/contrib/libs/zstd/lib/compress/zstd_compress_internal.h
+++ b/contrib/libs/zstd/lib/compress/zstd_compress_internal.h
@@ -1,60 +1,60 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-/* This header contains definitions
- * that shall **only** be used by modules within lib/compress.
- */
-
-#ifndef ZSTD_COMPRESS_H
-#define ZSTD_COMPRESS_H
-
-/*-*************************************
-* Dependencies
-***************************************/
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* This header contains definitions
+ * that shall **only** be used by modules within lib/compress.
+ */
+
+#ifndef ZSTD_COMPRESS_H
+#define ZSTD_COMPRESS_H
+
+/*-*************************************
+* Dependencies
+***************************************/
#include "../common/zstd_internal.h"
#include "zstd_cwksp.h"
-#ifdef ZSTD_MULTITHREAD
-# include "zstdmt_compress.h"
-#endif
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/*-*************************************
-* Constants
-***************************************/
-#define kSearchStrength 8
-#define HASH_READ_SIZE 8
+#ifdef ZSTD_MULTITHREAD
+# include "zstdmt_compress.h"
+#endif
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*-*************************************
+* Constants
+***************************************/
+#define kSearchStrength 8
+#define HASH_READ_SIZE 8
#define ZSTD_DUBT_UNSORTED_MARK 1 /* For btlazy2 strategy, index ZSTD_DUBT_UNSORTED_MARK==1 means "unsorted".
- It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
- It's not a big deal though : candidate will just be sorted again.
+ It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
+ It's not a big deal though : candidate will just be sorted again.
Additionally, candidate position 1 will be lost.
- But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
+ But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy.
This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
-
-
-/*-*************************************
-* Context memory management
-***************************************/
-typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
-typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage;
-
-typedef struct ZSTD_prefixDict_s {
- const void* dict;
- size_t dictSize;
- ZSTD_dictContentType_e dictContentType;
-} ZSTD_prefixDict;
-
-typedef struct {
+
+
+/*-*************************************
+* Context memory management
+***************************************/
+typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
+typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage;
+
+typedef struct ZSTD_prefixDict_s {
+ const void* dict;
+ size_t dictSize;
+ ZSTD_dictContentType_e dictContentType;
+} ZSTD_prefixDict;
+
+typedef struct {
void* dictBuffer;
void const* dict;
size_t dictSize;
@@ -68,19 +68,19 @@ typedef struct {
} ZSTD_hufCTables_t;
typedef struct {
- FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
- FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
- FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
- FSE_repeat offcode_repeatMode;
- FSE_repeat matchlength_repeatMode;
- FSE_repeat litlength_repeatMode;
+ FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
+ FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
+ FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
+ FSE_repeat offcode_repeatMode;
+ FSE_repeat matchlength_repeatMode;
+ FSE_repeat litlength_repeatMode;
} ZSTD_fseCTables_t;
typedef struct {
ZSTD_hufCTables_t huf;
ZSTD_fseCTables_t fse;
-} ZSTD_entropyCTables_t;
-
+} ZSTD_entropyCTables_t;
+
/***********************************************
* Entropy buffer statistics structs and funcs *
***********************************************/
@@ -89,7 +89,7 @@ typedef struct {
* huffman tree description in hufDesBuffer.
* hufDesSize refers to the size of huffman tree description in bytes.
* This metadata is populated in ZSTD_buildBlockEntropyStats_literals() */
-typedef struct {
+typedef struct {
symbolEncodingType_e hType;
BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE];
size_t hufDesSize;
@@ -131,9 +131,9 @@ size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr,
typedef struct {
U32 off; /* Offset sumtype code for the match, using ZSTD_storeSeq() format */
U32 len; /* Raw length of match */
-} ZSTD_match_t;
-
-typedef struct {
+} ZSTD_match_t;
+
+typedef struct {
U32 offset; /* Offset of sequence */
U32 litLength; /* Length of literals prior to match */
U32 matchLength; /* Raw length of match */
@@ -151,28 +151,28 @@ typedef struct {
UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0};
typedef struct {
- int price;
- U32 off;
- U32 mlen;
- U32 litlen;
- U32 rep[ZSTD_REP_NUM];
-} ZSTD_optimal_t;
-
+ int price;
+ U32 off;
+ U32 mlen;
+ U32 litlen;
+ U32 rep[ZSTD_REP_NUM];
+} ZSTD_optimal_t;
+
typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e;
-typedef struct {
- /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
+typedef struct {
+ /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
unsigned* litFreq; /* table of literals statistics, of size 256 */
unsigned* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */
unsigned* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */
unsigned* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */
ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_NUM+1 */
ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
-
- U32 litSum; /* nb of literals */
- U32 litLengthSum; /* nb of litLength codes */
- U32 matchLengthSum; /* nb of matchLength codes */
- U32 offCodeSum; /* nb of offset codes */
+
+ U32 litSum; /* nb of literals */
+ U32 litLengthSum; /* nb of litLength codes */
+ U32 matchLengthSum; /* nb of matchLength codes */
+ U32 offCodeSum; /* nb of offset codes */
U32 litSumBasePrice; /* to compare to log2(litfreq) */
U32 litLengthSumBasePrice; /* to compare to log2(llfreq) */
U32 matchLengthSumBasePrice;/* to compare to log2(mlfreq) */
@@ -180,14 +180,14 @@ typedef struct {
ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */
const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */
ZSTD_paramSwitch_e literalCompressionMode;
-} optState_t;
-
-typedef struct {
- ZSTD_entropyCTables_t entropy;
- U32 rep[ZSTD_REP_NUM];
-} ZSTD_compressedBlockState_t;
-
-typedef struct {
+} optState_t;
+
+typedef struct {
+ ZSTD_entropyCTables_t entropy;
+ U32 rep[ZSTD_REP_NUM];
+} ZSTD_compressedBlockState_t;
+
+typedef struct {
BYTE const* nextSrc; /* next block here to continue on current prefix */
BYTE const* base; /* All regular indexes relative to this position */
BYTE const* dictBase; /* extDict indexes relative to this position */
@@ -197,8 +197,8 @@ typedef struct {
* ZSTD_window_init(). Useful for debugging coredumps
* and for ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY.
*/
-} ZSTD_window_t;
-
+} ZSTD_window_t;
+
#define ZSTD_WINDOW_START_INDEX 2
typedef struct ZSTD_matchState_t ZSTD_matchState_t;
@@ -221,33 +221,33 @@ struct ZSTD_matchState_t {
U16* tagTable; /* For row-based matchFinder: A row-based table containing the hashes and head index. */
U32 hashCache[ZSTD_ROW_HASH_CACHE_SIZE]; /* For row-based matchFinder: a cache of hashes to improve speed */
- U32* hashTable;
- U32* hashTable3;
- U32* chainTable;
+ U32* hashTable;
+ U32* hashTable3;
+ U32* chainTable;
U32 forceNonContiguous; /* Non-zero if we should force non-contiguous load for the next window update. */
int dedicatedDictSearch; /* Indicates whether this matchState is using the
* dedicated dictionary search structure.
*/
- optState_t opt; /* optimal parser state */
+ optState_t opt; /* optimal parser state */
const ZSTD_matchState_t* dictMatchState;
ZSTD_compressionParameters cParams;
const rawSeqStore_t* ldmSeqStore;
};
-
-typedef struct {
- ZSTD_compressedBlockState_t* prevCBlock;
- ZSTD_compressedBlockState_t* nextCBlock;
- ZSTD_matchState_t matchState;
-} ZSTD_blockState_t;
-
-typedef struct {
- U32 offset;
- U32 checksum;
-} ldmEntry_t;
-
-typedef struct {
+
+typedef struct {
+ ZSTD_compressedBlockState_t* prevCBlock;
+ ZSTD_compressedBlockState_t* nextCBlock;
+ ZSTD_matchState_t matchState;
+} ZSTD_blockState_t;
+
+typedef struct {
+ U32 offset;
+ U32 checksum;
+} ldmEntry_t;
+
+typedef struct {
BYTE const* split;
U32 hash;
U32 checksum;
@@ -257,57 +257,57 @@ typedef struct {
#define LDM_BATCH_SIZE 64
typedef struct {
- ZSTD_window_t window; /* State for the window round buffer management */
- ldmEntry_t* hashTable;
+ ZSTD_window_t window; /* State for the window round buffer management */
+ ldmEntry_t* hashTable;
U32 loadedDictEnd;
- BYTE* bucketOffsets; /* Next position in bucket to insert entry */
+ BYTE* bucketOffsets; /* Next position in bucket to insert entry */
size_t splitIndices[LDM_BATCH_SIZE];
ldmMatchCandidate_t matchCandidates[LDM_BATCH_SIZE];
-} ldmState_t;
-
-typedef struct {
+} ldmState_t;
+
+typedef struct {
ZSTD_paramSwitch_e enableLdm; /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */
- U32 hashLog; /* Log size of hashTable */
- U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */
- U32 minMatchLength; /* Minimum match length */
+ U32 hashLog; /* Log size of hashTable */
+ U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */
+ U32 minMatchLength; /* Minimum match length */
U32 hashRateLog; /* Log number of entries to skip */
- U32 windowLog; /* Window log for the LDM */
-} ldmParams_t;
-
-typedef struct {
+ U32 windowLog; /* Window log for the LDM */
+} ldmParams_t;
+
+typedef struct {
int collectSequences;
ZSTD_Sequence* seqStart;
size_t seqIndex;
size_t maxSequences;
} SeqCollector;
-struct ZSTD_CCtx_params_s {
- ZSTD_format_e format;
- ZSTD_compressionParameters cParams;
- ZSTD_frameParameters fParams;
-
- int compressionLevel;
- int forceWindow; /* force back-references to respect limit of
- * 1<<wLog, even for dictionary */
+struct ZSTD_CCtx_params_s {
+ ZSTD_format_e format;
+ ZSTD_compressionParameters cParams;
+ ZSTD_frameParameters fParams;
+
+ int compressionLevel;
+ int forceWindow; /* force back-references to respect limit of
+ * 1<<wLog, even for dictionary */
size_t targetCBlockSize; /* Tries to fit compressed block size to be around targetCBlockSize.
* No target when targetCBlockSize == 0.
* There is no guarantee on compressed block size */
int srcSizeHint; /* User's best guess of source size.
* Hint is not valid when srcSizeHint == 0.
* There is no guarantee that hint is close to actual source size */
-
+
ZSTD_dictAttachPref_e attachDictPref;
ZSTD_paramSwitch_e literalCompressionMode;
- /* Multithreading: used to pass parameters to mtctx */
+ /* Multithreading: used to pass parameters to mtctx */
int nbWorkers;
size_t jobSize;
int overlapLog;
int rsyncable;
-
- /* Long distance matching parameters */
- ldmParams_t ldmParams;
-
+
+ /* Long distance matching parameters */
+ ldmParams_t ldmParams;
+
/* Dedicated dict search algorithm trigger */
int enableDedicatedDictSearch;
@@ -328,10 +328,10 @@ struct ZSTD_CCtx_params_s {
/* Always load a dictionary in ext-dict mode (not prefix mode)? */
int deterministicRefPrefix;
- /* Internal use, for createCCtxParams() and freeCCtxParams() only */
- ZSTD_customMem customMem;
-}; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
-
+ /* Internal use, for createCCtxParams() and freeCCtxParams() only */
+ ZSTD_customMem customMem;
+}; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
+
#define COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2))
#define ENTROPY_WORKSPACE_SIZE (HUF_WORKSPACE_SIZE + COMPRESS_SEQUENCES_WORKSPACE_SIZE)
@@ -361,66 +361,66 @@ typedef struct {
ZSTD_entropyCTablesMetadata_t entropyMetadata;
} ZSTD_blockSplitCtx;
-struct ZSTD_CCtx_s {
- ZSTD_compressionStage_e stage;
- int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
- int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
- ZSTD_CCtx_params requestedParams;
- ZSTD_CCtx_params appliedParams;
+struct ZSTD_CCtx_s {
+ ZSTD_compressionStage_e stage;
+ int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
+ int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
+ ZSTD_CCtx_params requestedParams;
+ ZSTD_CCtx_params appliedParams;
ZSTD_CCtx_params simpleApiParams; /* Param storage used by the simple API - not sticky. Must only be used in top-level simple API functions for storage. */
- U32 dictID;
+ U32 dictID;
size_t dictContentSize;
ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */
- size_t blockSize;
- unsigned long long pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */
- unsigned long long consumedSrcSize;
- unsigned long long producedCSize;
- XXH64_state_t xxhState;
- ZSTD_customMem customMem;
+ size_t blockSize;
+ unsigned long long pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */
+ unsigned long long consumedSrcSize;
+ unsigned long long producedCSize;
+ XXH64_state_t xxhState;
+ ZSTD_customMem customMem;
ZSTD_threadPool* pool;
- size_t staticSize;
+ size_t staticSize;
SeqCollector seqCollector;
int isFirstBlock;
int initialized;
-
- seqStore_t seqStore; /* sequences storage ptrs */
- ldmState_t ldmState; /* long distance matching state */
- rawSeq* ldmSequences; /* Storage for the ldm output sequences */
- size_t maxNbLdmSequences;
- rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
- ZSTD_blockState_t blockState;
+
+ seqStore_t seqStore; /* sequences storage ptrs */
+ ldmState_t ldmState; /* long distance matching state */
+ rawSeq* ldmSequences; /* Storage for the ldm output sequences */
+ size_t maxNbLdmSequences;
+ rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
+ ZSTD_blockState_t blockState;
U32* entropyWorkspace; /* entropy workspace of ENTROPY_WORKSPACE_SIZE bytes */
-
+
/* Whether we are streaming or not */
ZSTD_buffered_policy_e bufferedPolicy;
- /* streaming */
- char* inBuff;
- size_t inBuffSize;
- size_t inToCompress;
- size_t inBuffPos;
- size_t inBuffTarget;
- char* outBuff;
- size_t outBuffSize;
- size_t outBuffContentSize;
- size_t outBuffFlushedSize;
- ZSTD_cStreamStage streamStage;
- U32 frameEnded;
-
+ /* streaming */
+ char* inBuff;
+ size_t inBuffSize;
+ size_t inToCompress;
+ size_t inBuffPos;
+ size_t inBuffTarget;
+ char* outBuff;
+ size_t outBuffSize;
+ size_t outBuffContentSize;
+ size_t outBuffFlushedSize;
+ ZSTD_cStreamStage streamStage;
+ U32 frameEnded;
+
/* Stable in/out buffer verification */
ZSTD_inBuffer expectedInBuffer;
size_t expectedOutBufferSize;
- /* Dictionary */
+ /* Dictionary */
ZSTD_localDict localDict;
- const ZSTD_CDict* cdict;
- ZSTD_prefixDict prefixDict; /* single-usage dictionary */
-
- /* Multi-threading */
-#ifdef ZSTD_MULTITHREAD
- ZSTDMT_CCtx* mtctx;
-#endif
+ const ZSTD_CDict* cdict;
+ ZSTD_prefixDict prefixDict; /* single-usage dictionary */
+
+ /* Multi-threading */
+#ifdef ZSTD_MULTITHREAD
+ ZSTDMT_CCtx* mtctx;
+#endif
/* Tracing */
#if ZSTD_TRACE
@@ -429,10 +429,10 @@ struct ZSTD_CCtx_s {
/* Workspace for block splitter */
ZSTD_blockSplitCtx blockSplitCtx;
-};
-
+};
+
typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
-
+
typedef enum {
ZSTD_noDict = 0,
ZSTD_extDict = 1,
@@ -460,43 +460,43 @@ typedef enum {
*/
} ZSTD_cParamMode_e;
-typedef size_t (*ZSTD_blockCompressor) (
- ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+typedef size_t (*ZSTD_blockCompressor) (
+ ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e rowMatchfinderMode, ZSTD_dictMode_e dictMode);
-
-
-MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
-{
- static const BYTE LL_Code[64] = { 0, 1, 2, 3, 4, 5, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 16, 17, 17, 18, 18, 19, 19,
- 20, 20, 20, 20, 21, 21, 21, 21,
- 22, 22, 22, 22, 22, 22, 22, 22,
- 23, 23, 23, 23, 23, 23, 23, 23,
- 24, 24, 24, 24, 24, 24, 24, 24,
- 24, 24, 24, 24, 24, 24, 24, 24 };
- static const U32 LL_deltaCode = 19;
- return (litLength > 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
-}
-
-/* ZSTD_MLcode() :
- * note : mlBase = matchLength - MINMATCH;
- * because it's the format it's stored in seqStore->sequences */
-MEM_STATIC U32 ZSTD_MLcode(U32 mlBase)
-{
- static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
- 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
- 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
- 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
- 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
- 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
- static const U32 ML_deltaCode = 36;
- return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
-}
-
+
+
+MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
+{
+ static const BYTE LL_Code[64] = { 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 20, 20, 21, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22,
+ 23, 23, 23, 23, 23, 23, 23, 23,
+ 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24 };
+ static const U32 LL_deltaCode = 19;
+ return (litLength > 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
+}
+
+/* ZSTD_MLcode() :
+ * note : mlBase = matchLength - MINMATCH;
+ * because it's the format it's stored in seqStore->sequences */
+MEM_STATIC U32 ZSTD_MLcode(U32 mlBase)
+{
+ static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
+ 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
+ 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+ 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
+ static const U32 ML_deltaCode = 36;
+ return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
+}
+
/* ZSTD_cParam_withinBounds:
* @return 1 if value is within cParam bounds,
* 0 otherwise */
@@ -590,30 +590,30 @@ ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE con
#define STORED_TO_OFFBASE(o) ((o)+1)
#define OFFBASE_TO_STORED(o) ((o)-1)
-/*! ZSTD_storeSeq() :
+/*! ZSTD_storeSeq() :
* Store a sequence (litlen, litPtr, offCode and matchLength) into seqStore_t.
* @offBase_minus1 : Users should use employ macros STORE_REPCODE_X and STORE_OFFSET().
* @matchLength : must be >= MINMATCH
* Allowed to overread literals up to litLimit.
-*/
+*/
HINT_INLINE UNUSED_ATTR void
ZSTD_storeSeq(seqStore_t* seqStorePtr,
size_t litLength, const BYTE* literals, const BYTE* litLimit,
U32 offBase_minus1,
size_t matchLength)
-{
+{
BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
BYTE const* const litEnd = literals + litLength;
#if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6)
- static const BYTE* g_start = NULL;
- if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */
- { U32 const pos = (U32)((const BYTE*)literals - g_start);
+ static const BYTE* g_start = NULL;
+ if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */
+ { U32 const pos = (U32)((const BYTE*)literals - g_start);
DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
pos, (U32)litLength, (U32)matchLength, (U32)offBase_minus1);
- }
-#endif
+ }
+#endif
assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
- /* copy Literals */
+ /* copy Literals */
assert(seqStorePtr->maxNbLit <= 128 KB);
assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
assert(literals + litLength <= litLimit);
@@ -629,20 +629,20 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr,
} else {
ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w);
}
- seqStorePtr->lit += litLength;
-
- /* literal Length */
- if (litLength>0xFFFF) {
+ seqStorePtr->lit += litLength;
+
+ /* literal Length */
+ if (litLength>0xFFFF) {
assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
seqStorePtr->longLengthType = ZSTD_llt_literalLength;
- seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
- }
- seqStorePtr->sequences[0].litLength = (U16)litLength;
-
- /* match offset */
+ seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ }
+ seqStorePtr->sequences[0].litLength = (U16)litLength;
+
+ /* match offset */
seqStorePtr->sequences[0].offBase = STORED_TO_OFFBASE(offBase_minus1);
-
- /* match Length */
+
+ /* match Length */
assert(matchLength >= MINMATCH);
{ size_t const mlBase = matchLength - MINMATCH;
if (mlBase>0xFFFF) {
@@ -651,11 +651,11 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr,
seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
}
seqStorePtr->sequences[0].mlBase = (U16)mlBase;
- }
-
- seqStorePtr->sequences++;
-}
-
+ }
+
+ seqStorePtr->sequences++;
+}
+
/* ZSTD_updateRep() :
* updates in-place @rep (array of repeat offsets)
* @offBase_minus1 : sum-type, with same numeric representation as ZSTD_storeSeq()
@@ -679,7 +679,7 @@ ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0)
}
}
}
-
+
typedef struct repcodes_s {
U32 rep[3];
} repcodes_t;
@@ -694,14 +694,14 @@ ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0
}
-/*-*************************************
-* Match length counter
-***************************************/
-static unsigned ZSTD_NbCommonBytes (size_t val)
-{
- if (MEM_isLittleEndian()) {
- if (MEM_64bits()) {
-# if defined(_MSC_VER) && defined(_WIN64)
+/*-*************************************
+* Match length counter
+***************************************/
+static unsigned ZSTD_NbCommonBytes (size_t val)
+{
+ if (MEM_isLittleEndian()) {
+ if (MEM_64bits()) {
+# if defined(_MSC_VER) && defined(_WIN64)
# if STATIC_BMI2
return _tzcnt_u64(val) >> 3;
# else
@@ -714,21 +714,21 @@ static unsigned ZSTD_NbCommonBytes (size_t val)
__assume(0);
}
# endif
-# elif defined(__GNUC__) && (__GNUC__ >= 4)
- return (__builtin_ctzll((U64)val) >> 3);
-# else
- static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
- 0, 3, 1, 3, 1, 4, 2, 7,
- 0, 2, 3, 6, 1, 5, 3, 5,
- 1, 3, 4, 4, 2, 5, 6, 7,
- 7, 0, 1, 2, 3, 3, 4, 6,
- 2, 6, 5, 5, 3, 4, 5, 6,
- 7, 1, 2, 4, 6, 4, 4, 5,
- 7, 2, 6, 5, 7, 6, 7, 7 };
- return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
-# endif
- } else { /* 32 bits */
-# if defined(_MSC_VER)
+# elif defined(__GNUC__) && (__GNUC__ >= 4)
+ return (__builtin_ctzll((U64)val) >> 3);
+# else
+ static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
+ 0, 3, 1, 3, 1, 4, 2, 7,
+ 0, 2, 3, 6, 1, 5, 3, 5,
+ 1, 3, 4, 4, 2, 5, 6, 7,
+ 7, 0, 1, 2, 3, 3, 4, 6,
+ 2, 6, 5, 5, 3, 4, 5, 6,
+ 7, 1, 2, 4, 6, 4, 4, 5,
+ 7, 2, 6, 5, 7, 6, 7, 7 };
+ return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
+# endif
+ } else { /* 32 bits */
+# if defined(_MSC_VER)
if (val != 0) {
unsigned long r;
_BitScanForward(&r, (U32)val);
@@ -737,19 +737,19 @@ static unsigned ZSTD_NbCommonBytes (size_t val)
/* Should not reach this code path */
__assume(0);
}
-# elif defined(__GNUC__) && (__GNUC__ >= 3)
- return (__builtin_ctz((U32)val) >> 3);
-# else
- static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
- 3, 2, 2, 1, 3, 2, 0, 1,
- 3, 3, 1, 2, 2, 2, 2, 0,
- 3, 1, 2, 0, 1, 0, 1, 1 };
- return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
-# endif
- }
- } else { /* Big Endian CPU */
- if (MEM_64bits()) {
-# if defined(_MSC_VER) && defined(_WIN64)
+# elif defined(__GNUC__) && (__GNUC__ >= 3)
+ return (__builtin_ctz((U32)val) >> 3);
+# else
+ static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
+ 3, 2, 2, 1, 3, 2, 0, 1,
+ 3, 3, 1, 2, 2, 2, 2, 0,
+ 3, 1, 2, 0, 1, 0, 1, 1 };
+ return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
+# endif
+ }
+ } else { /* Big Endian CPU */
+ if (MEM_64bits()) {
+# if defined(_MSC_VER) && defined(_WIN64)
# if STATIC_BMI2
return _lzcnt_u64(val) >> 3;
# else
@@ -762,18 +762,18 @@ static unsigned ZSTD_NbCommonBytes (size_t val)
__assume(0);
}
# endif
-# elif defined(__GNUC__) && (__GNUC__ >= 4)
- return (__builtin_clzll(val) >> 3);
-# else
- unsigned r;
- const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
- if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
- if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
- r += (!val);
- return r;
-# endif
- } else { /* 32 bits */
-# if defined(_MSC_VER)
+# elif defined(__GNUC__) && (__GNUC__ >= 4)
+ return (__builtin_clzll(val) >> 3);
+# else
+ unsigned r;
+ const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
+ if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
+ if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
+ r += (!val);
+ return r;
+# endif
+ } else { /* 32 bits */
+# if defined(_MSC_VER)
if (val != 0) {
unsigned long r;
_BitScanReverse(&r, (unsigned long)val);
@@ -782,100 +782,100 @@ static unsigned ZSTD_NbCommonBytes (size_t val)
/* Should not reach this code path */
__assume(0);
}
-# elif defined(__GNUC__) && (__GNUC__ >= 3)
- return (__builtin_clz((U32)val) >> 3);
-# else
- unsigned r;
- if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
- r += (!val);
- return r;
-# endif
- } }
-}
-
-
-MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
-{
- const BYTE* const pStart = pIn;
- const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
-
- if (pIn < pInLoopLimit) {
- { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
- if (diff) return ZSTD_NbCommonBytes(diff); }
- pIn+=sizeof(size_t); pMatch+=sizeof(size_t);
- while (pIn < pInLoopLimit) {
- size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
- if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
- pIn += ZSTD_NbCommonBytes(diff);
- return (size_t)(pIn - pStart);
- } }
- if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
- if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
- if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
- return (size_t)(pIn - pStart);
-}
-
-/** ZSTD_count_2segments() :
- * can count match length with `ip` & `match` in 2 different segments.
- * convention : on reaching mEnd, match count continue starting from iStart
- */
-MEM_STATIC size_t
-ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
- const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
-{
- const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
- size_t const matchLength = ZSTD_count(ip, match, vEnd);
- if (match + matchLength != mEnd) return matchLength;
+# elif defined(__GNUC__) && (__GNUC__ >= 3)
+ return (__builtin_clz((U32)val) >> 3);
+# else
+ unsigned r;
+ if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
+ r += (!val);
+ return r;
+# endif
+ } }
+}
+
+
+MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
+{
+ const BYTE* const pStart = pIn;
+ const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
+
+ if (pIn < pInLoopLimit) {
+ { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
+ if (diff) return ZSTD_NbCommonBytes(diff); }
+ pIn+=sizeof(size_t); pMatch+=sizeof(size_t);
+ while (pIn < pInLoopLimit) {
+ size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
+ if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
+ pIn += ZSTD_NbCommonBytes(diff);
+ return (size_t)(pIn - pStart);
+ } }
+ if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
+ if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
+ if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
+ return (size_t)(pIn - pStart);
+}
+
+/** ZSTD_count_2segments() :
+ * can count match length with `ip` & `match` in 2 different segments.
+ * convention : on reaching mEnd, match count continue starting from iStart
+ */
+MEM_STATIC size_t
+ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
+ const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
+{
+ const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
+ size_t const matchLength = ZSTD_count(ip, match, vEnd);
+ if (match + matchLength != mEnd) return matchLength;
DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength);
DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match);
DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip);
DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart);
DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd));
- return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
-}
-
-
-/*-*************************************
- * Hashes
- ***************************************/
-static const U32 prime3bytes = 506832829U;
-static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes) >> (32-h) ; }
-MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
-
-static const U32 prime4bytes = 2654435761U;
-static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
-static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
-
-static const U64 prime5bytes = 889523592379ULL;
-static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; }
-static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
-
-static const U64 prime6bytes = 227718039650203ULL;
-static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; }
-static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
-
-static const U64 prime7bytes = 58295818150454627ULL;
-static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; }
-static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
-
-static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
-static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
-static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
-
+ return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
+}
+
+
+/*-*************************************
+ * Hashes
+ ***************************************/
+static const U32 prime3bytes = 506832829U;
+static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes) >> (32-h) ; }
+MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
+
+static const U32 prime4bytes = 2654435761U;
+static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
+static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
+
+static const U64 prime5bytes = 889523592379ULL;
+static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; }
+static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
+
+static const U64 prime6bytes = 227718039650203ULL;
+static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; }
+static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
+
+static const U64 prime7bytes = 58295818150454627ULL;
+static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; }
+static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
+
+static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
+static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
+static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
+
MEM_STATIC FORCE_INLINE_ATTR
size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
-{
- switch(mls)
- {
- default:
- case 4: return ZSTD_hash4Ptr(p, hBits);
- case 5: return ZSTD_hash5Ptr(p, hBits);
- case 6: return ZSTD_hash6Ptr(p, hBits);
- case 7: return ZSTD_hash7Ptr(p, hBits);
- case 8: return ZSTD_hash8Ptr(p, hBits);
- }
-}
-
+{
+ switch(mls)
+ {
+ default:
+ case 4: return ZSTD_hash4Ptr(p, hBits);
+ case 5: return ZSTD_hash5Ptr(p, hBits);
+ case 6: return ZSTD_hash6Ptr(p, hBits);
+ case 7: return ZSTD_hash7Ptr(p, hBits);
+ case 8: return ZSTD_hash8Ptr(p, hBits);
+ }
+}
+
/** ZSTD_ipow() :
* Return base^exponent.
*/
@@ -934,32 +934,32 @@ MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64
return hash;
}
-/*-*************************************
-* Round buffer management
-***************************************/
+/*-*************************************
+* Round buffer management
+***************************************/
#if (ZSTD_WINDOWLOG_MAX_64 > 31)
# error "ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX"
#endif
-/* Max current allowed */
-#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
-/* Maximum chunk size before overflow correction needs to be called again */
-#define ZSTD_CHUNKSIZE_MAX \
- ( ((U32)-1) /* Maximum ending current index */ \
- - ZSTD_CURRENT_MAX) /* Maximum beginning lowLimit */
-
-/**
- * ZSTD_window_clear():
- * Clears the window containing the history by simply setting it to empty.
- */
-MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window)
-{
- size_t const endT = (size_t)(window->nextSrc - window->base);
- U32 const end = (U32)endT;
-
- window->lowLimit = end;
- window->dictLimit = end;
-}
-
+/* Max current allowed */
+#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
+/* Maximum chunk size before overflow correction needs to be called again */
+#define ZSTD_CHUNKSIZE_MAX \
+ ( ((U32)-1) /* Maximum ending current index */ \
+ - ZSTD_CURRENT_MAX) /* Maximum beginning lowLimit */
+
+/**
+ * ZSTD_window_clear():
+ * Clears the window containing the history by simply setting it to empty.
+ */
+MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window)
+{
+ size_t const endT = (size_t)(window->nextSrc - window->base);
+ U32 const end = (U32)endT;
+
+ window->lowLimit = end;
+ window->dictLimit = end;
+}
+
MEM_STATIC U32 ZSTD_window_isEmpty(ZSTD_window_t const window)
{
return window.dictLimit == ZSTD_WINDOW_START_INDEX &&
@@ -967,16 +967,16 @@ MEM_STATIC U32 ZSTD_window_isEmpty(ZSTD_window_t const window)
(window.nextSrc - window.base) == ZSTD_WINDOW_START_INDEX;
}
-/**
- * ZSTD_window_hasExtDict():
- * Returns non-zero if the window has a non-empty extDict.
- */
-MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window)
-{
- return window.lowLimit < window.dictLimit;
-}
-
-/**
+/**
+ * ZSTD_window_hasExtDict():
+ * Returns non-zero if the window has a non-empty extDict.
+ */
+MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window)
+{
+ return window.lowLimit < window.dictLimit;
+}
+
+/**
* ZSTD_matchState_dictMode():
* Inspects the provided matchState and figures out what dictMode should be
* passed to the compressor.
@@ -1038,17 +1038,17 @@ MEM_STATIC U32 ZSTD_window_canOverflowCorrect(ZSTD_window_t const window,
}
/**
- * ZSTD_window_needOverflowCorrection():
- * Returns non-zero if the indices are getting too large and need overflow
- * protection.
- */
-MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
+ * ZSTD_window_needOverflowCorrection():
+ * Returns non-zero if the indices are getting too large and need overflow
+ * protection.
+ */
+MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
U32 cycleLog,
U32 maxDist,
U32 loadedDictEnd,
void const* src,
- void const* srcEnd)
-{
+ void const* srcEnd)
+{
U32 const curr = (U32)((BYTE const*)srcEnd - window.base);
if (ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY) {
if (ZSTD_window_canOverflowCorrect(window, cycleLog, maxDist, loadedDictEnd, src)) {
@@ -1056,39 +1056,39 @@ MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
}
}
return curr > ZSTD_CURRENT_MAX;
-}
-
-/**
- * ZSTD_window_correctOverflow():
- * Reduces the indices to protect from index overflow.
- * Returns the correction made to the indices, which must be applied to every
- * stored index.
- *
- * The least significant cycleLog bits of the indices must remain the same,
- * which may be 0. Every index up to maxDist in the past must be valid.
- */
-MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
- U32 maxDist, void const* src)
-{
- /* preemptive overflow correction:
- * 1. correction is large enough:
- * lowLimit > (3<<29) ==> current > 3<<29 + 1<<windowLog
- * 1<<windowLog <= newCurrent < 1<<chainLog + 1<<windowLog
- *
- * current - newCurrent
- * > (3<<29 + 1<<windowLog) - (1<<windowLog + 1<<chainLog)
- * > (3<<29) - (1<<chainLog)
- * > (3<<29) - (1<<30) (NOTE: chainLog <= 30)
- * > 1<<29
- *
- * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow:
- * After correction, current is less than (1<<chainLog + 1<<windowLog).
- * In 64-bit mode we are safe, because we have 64-bit ptrdiff_t.
- * In 32-bit mode we are safe, because (chainLog <= 29), so
- * ip+ZSTD_CHUNKSIZE_MAX - cctx->base < 1<<32.
- * 3. (cctx->lowLimit + 1<<windowLog) < 1<<32:
- * windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
- */
+}
+
+/**
+ * ZSTD_window_correctOverflow():
+ * Reduces the indices to protect from index overflow.
+ * Returns the correction made to the indices, which must be applied to every
+ * stored index.
+ *
+ * The least significant cycleLog bits of the indices must remain the same,
+ * which may be 0. Every index up to maxDist in the past must be valid.
+ */
+MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
+ U32 maxDist, void const* src)
+{
+ /* preemptive overflow correction:
+ * 1. correction is large enough:
+ * lowLimit > (3<<29) ==> current > 3<<29 + 1<<windowLog
+ * 1<<windowLog <= newCurrent < 1<<chainLog + 1<<windowLog
+ *
+ * current - newCurrent
+ * > (3<<29 + 1<<windowLog) - (1<<windowLog + 1<<chainLog)
+ * > (3<<29) - (1<<chainLog)
+ * > (3<<29) - (1<<30) (NOTE: chainLog <= 30)
+ * > 1<<29
+ *
+ * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow:
+ * After correction, current is less than (1<<chainLog + 1<<windowLog).
+ * In 64-bit mode we are safe, because we have 64-bit ptrdiff_t.
+ * In 32-bit mode we are safe, because (chainLog <= 29), so
+ * ip+ZSTD_CHUNKSIZE_MAX - cctx->base < 1<<32.
+ * 3. (cctx->lowLimit + 1<<windowLog) < 1<<32:
+ * windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
+ */
U32 const cycleSize = 1u << cycleLog;
U32 const cycleMask = cycleSize - 1;
U32 const curr = (U32)((BYTE const*)src - window->base);
@@ -1112,9 +1112,9 @@ MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
/* Loose bound, should be around 1<<29 (see above) */
assert(correction > 1<<28);
}
-
- window->base += correction;
- window->dictBase += correction;
+
+ window->base += correction;
+ window->dictBase += correction;
if (window->lowLimit < correction + ZSTD_WINDOW_START_INDEX) {
window->lowLimit = ZSTD_WINDOW_START_INDEX;
} else {
@@ -1125,7 +1125,7 @@ MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
} else {
window->dictLimit -= correction;
}
-
+
/* Ensure we can still reference the full window. */
assert(newCurrent >= maxDist);
assert(newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX);
@@ -1135,15 +1135,15 @@ MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
++window->nbOverflowCorrections;
- DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction,
- window->lowLimit);
- return correction;
-}
-
-/**
- * ZSTD_window_enforceMaxDist():
- * Updates lowLimit so that:
- * (srcEnd - base) - lowLimit == maxDist + loadedDictEnd
+ DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction,
+ window->lowLimit);
+ return correction;
+}
+
+/**
+ * ZSTD_window_enforceMaxDist():
+ * Updates lowLimit so that:
+ * (srcEnd - base) - lowLimit == maxDist + loadedDictEnd
*
* It ensures index is valid as long as index >= lowLimit.
* This must be called before a block compression call.
@@ -1162,14 +1162,14 @@ MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
* In dictMatchState mode, lowLimit and dictLimit are the same,
* and the dictionary is below them.
* forceWindow and dictMatchState are therefore incompatible.
- */
+ */
MEM_STATIC void
ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
const void* blockEnd,
U32 maxDist,
U32* loadedDictEndPtr,
const ZSTD_matchState_t** dictMatchStatePtr)
-{
+{
U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
@@ -1190,18 +1190,18 @@ ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
*/
if (blockEndIdx > maxDist + loadedDictEnd) {
U32 const newLowLimit = blockEndIdx - maxDist;
- if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
- if (window->dictLimit < window->lowLimit) {
+ if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
+ if (window->dictLimit < window->lowLimit) {
DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u",
(unsigned)window->dictLimit, (unsigned)window->lowLimit);
- window->dictLimit = window->lowLimit;
- }
+ window->dictLimit = window->lowLimit;
+ }
/* On reaching window size, dictionaries are invalidated */
if (loadedDictEndPtr) *loadedDictEndPtr = 0;
if (dictMatchStatePtr) *dictMatchStatePtr = NULL;
- }
-}
-
+ }
+}
+
/* Similar to ZSTD_window_enforceMaxDist(),
* but only invalidates dictionary
* when input progresses beyond window size.
@@ -1248,50 +1248,50 @@ MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
window->nbOverflowCorrections = 0;
}
-/**
- * ZSTD_window_update():
- * Updates the window by appending [src, src + srcSize) to the window.
- * If it is not contiguous, the current prefix becomes the extDict, and we
- * forget about the extDict. Handles overlap of the prefix and extDict.
- * Returns non-zero if the segment is contiguous.
- */
-MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
+/**
+ * ZSTD_window_update():
+ * Updates the window by appending [src, src + srcSize) to the window.
+ * If it is not contiguous, the current prefix becomes the extDict, and we
+ * forget about the extDict. Handles overlap of the prefix and extDict.
+ * Returns non-zero if the segment is contiguous.
+ */
+MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
void const* src, size_t srcSize,
int forceNonContiguous)
-{
- BYTE const* const ip = (BYTE const*)src;
- U32 contiguous = 1;
+{
+ BYTE const* const ip = (BYTE const*)src;
+ U32 contiguous = 1;
DEBUGLOG(5, "ZSTD_window_update");
if (srcSize == 0)
return contiguous;
assert(window->base != NULL);
assert(window->dictBase != NULL);
- /* Check if blocks follow each other */
+ /* Check if blocks follow each other */
if (src != window->nextSrc || forceNonContiguous) {
- /* not contiguous */
- size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
+ /* not contiguous */
+ size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit);
- window->lowLimit = window->dictLimit;
- assert(distanceFromBase == (size_t)(U32)distanceFromBase); /* should never overflow */
- window->dictLimit = (U32)distanceFromBase;
- window->dictBase = window->base;
- window->base = ip - distanceFromBase;
+ window->lowLimit = window->dictLimit;
+ assert(distanceFromBase == (size_t)(U32)distanceFromBase); /* should never overflow */
+ window->dictLimit = (U32)distanceFromBase;
+ window->dictBase = window->base;
+ window->base = ip - distanceFromBase;
/* ms->nextToUpdate = window->dictLimit; */
- if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit; /* too small extDict */
- contiguous = 0;
- }
- window->nextSrc = ip + srcSize;
- /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
- if ( (ip+srcSize > window->dictBase + window->lowLimit)
- & (ip < window->dictBase + window->dictLimit)) {
- ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
- U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
- window->lowLimit = lowLimitMax;
+ if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit; /* too small extDict */
+ contiguous = 0;
+ }
+ window->nextSrc = ip + srcSize;
+ /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
+ if ( (ip+srcSize > window->dictBase + window->lowLimit)
+ & (ip < window->dictBase + window->dictLimit)) {
+ ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
+ U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
+ window->lowLimit = lowLimitMax;
DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit);
- }
- return contiguous;
-}
-
+ }
+ return contiguous;
+}
+
/**
* Returns the lowest allowed match index. It may either be in the ext-dict or the prefix.
*/
@@ -1359,15 +1359,15 @@ MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
#endif
-#if defined (__cplusplus)
-}
-#endif
-
+#if defined (__cplusplus)
+}
+#endif
+
/* ===============================================================
* Shared internal declarations
* These prototypes may be called from sources not in lib/compress
* =============================================================== */
-
+
/* ZSTD_loadCEntropy() :
* dict : must point at beginning of a valid zstd dictionary.
* return : size of dictionary header (size of magic number + dict ID + entropy tables)
@@ -1378,81 +1378,81 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs);
-/* ==============================================================
- * Private declarations
- * These prototypes shall only be called from within lib/compress
- * ============================================================== */
-
-/* ZSTD_getCParamsFromCCtxParams() :
+/* ==============================================================
+ * Private declarations
+ * These prototypes shall only be called from within lib/compress
+ * ============================================================== */
+
+/* ZSTD_getCParamsFromCCtxParams() :
* cParams are built depending on compressionLevel, src size hints,
- * LDM and manually set compression parameters.
+ * LDM and manually set compression parameters.
* Note: srcSizeHint == 0 means 0!
- */
-ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
+ */
+ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
-
-/*! ZSTD_initCStream_internal() :
- * Private use only. Init streaming operation.
- * expects params to be valid.
- * must receive dict, or cdict, or none, but not both.
- * @return : 0, or an error code */
-size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
- const void* dict, size_t dictSize,
- const ZSTD_CDict* cdict,
+
+/*! ZSTD_initCStream_internal() :
+ * Private use only. Init streaming operation.
+ * expects params to be valid.
+ * must receive dict, or cdict, or none, but not both.
+ * @return : 0, or an error code */
+size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize,
+ const ZSTD_CDict* cdict,
const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize);
-
+
void ZSTD_resetSeqStore(seqStore_t* ssPtr);
-
-/*! ZSTD_getCParamsFromCDict() :
- * as the name implies */
-ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
-
-/* ZSTD_compressBegin_advanced_internal() :
- * Private use only. To be called from zstdmt_compress.c. */
-size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
- const void* dict, size_t dictSize,
- ZSTD_dictContentType_e dictContentType,
+
+/*! ZSTD_getCParamsFromCDict() :
+ * as the name implies */
+ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
+
+/* ZSTD_compressBegin_advanced_internal() :
+ * Private use only. To be called from zstdmt_compress.c. */
+size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictContentType_e dictContentType,
ZSTD_dictTableLoadMethod_e dtlm,
- const ZSTD_CDict* cdict,
+ const ZSTD_CDict* cdict,
const ZSTD_CCtx_params* params,
- unsigned long long pledgedSrcSize);
-
-/* ZSTD_compress_advanced_internal() :
- * Private use only. To be called from zstdmt_compress.c. */
-size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- const void* dict,size_t dictSize,
+ unsigned long long pledgedSrcSize);
+
+/* ZSTD_compress_advanced_internal() :
+ * Private use only. To be called from zstdmt_compress.c. */
+size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
const ZSTD_CCtx_params* params);
-
-
-/* ZSTD_writeLastEmptyBlock() :
- * output an empty Block with end-of-frame mark to complete a frame
- * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
+
+
+/* ZSTD_writeLastEmptyBlock() :
+ * output an empty Block with end-of-frame mark to complete a frame
+ * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
* or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
- */
-size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
-
-
-/* ZSTD_referenceExternalSequences() :
- * Must be called before starting a compression operation.
- * seqs must parse a prefix of the source.
- * This cannot be used when long range matching is enabled.
- * Zstd will use these sequences, and pass the literals to a secondary block
- * compressor.
- * @return : An error code on failure.
- * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory
- * access and data corruption.
- */
-size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
-
+ */
+size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
+
+
+/* ZSTD_referenceExternalSequences() :
+ * Must be called before starting a compression operation.
+ * seqs must parse a prefix of the source.
+ * This cannot be used when long range matching is enabled.
+ * Zstd will use these sequences, and pass the literals to a secondary block
+ * compressor.
+ * @return : An error code on failure.
+ * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory
+ * access and data corruption.
+ */
+size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
+
/** ZSTD_cycleLog() :
* condition for correct operation : hashLog > 1 */
U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat);
-
+
/** ZSTD_CCtx_trace() :
* Trace the end of a compression call.
*/
void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize);
-#endif /* ZSTD_COMPRESS_H */
+#endif /* ZSTD_COMPRESS_H */
diff --git a/contrib/libs/zstd/lib/compress/zstd_double_fast.c b/contrib/libs/zstd/lib/compress/zstd_double_fast.c
index 76933dea26..59f4ad5e05 100644
--- a/contrib/libs/zstd/lib/compress/zstd_double_fast.c
+++ b/contrib/libs/zstd/lib/compress/zstd_double_fast.c
@@ -1,55 +1,55 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#include "zstd_compress_internal.h"
-#include "zstd_double_fast.h"
-
-
-void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h"
+#include "zstd_double_fast.h"
+
+
+void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
void const* end, ZSTD_dictTableLoadMethod_e dtlm)
-{
+{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
- U32* const hashLarge = ms->hashTable;
- U32 const hBitsL = cParams->hashLog;
+ U32* const hashLarge = ms->hashTable;
+ U32 const hBitsL = cParams->hashLog;
U32 const mls = cParams->minMatch;
- U32* const hashSmall = ms->chainTable;
- U32 const hBitsS = cParams->chainLog;
- const BYTE* const base = ms->window.base;
- const BYTE* ip = base + ms->nextToUpdate;
- const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
- const U32 fastHashFillStep = 3;
-
- /* Always insert every fastHashFillStep position into the hash tables.
- * Insert the other positions into the large hash table if their entry
- * is empty.
- */
- for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
+ U32* const hashSmall = ms->chainTable;
+ U32 const hBitsS = cParams->chainLog;
+ const BYTE* const base = ms->window.base;
+ const BYTE* ip = base + ms->nextToUpdate;
+ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
+ const U32 fastHashFillStep = 3;
+
+ /* Always insert every fastHashFillStep position into the hash tables.
+ * Insert the other positions into the large hash table if their entry
+ * is empty.
+ */
+ for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
U32 const curr = (U32)(ip - base);
- U32 i;
- for (i = 0; i < fastHashFillStep; ++i) {
- size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls);
- size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8);
- if (i == 0)
+ U32 i;
+ for (i = 0; i < fastHashFillStep; ++i) {
+ size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls);
+ size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8);
+ if (i == 0)
hashSmall[smHash] = curr + i;
- if (i == 0 || hashLarge[lgHash] == 0)
+ if (i == 0 || hashLarge[lgHash] == 0)
hashLarge[lgHash] = curr + i;
/* Only load extra positions for ZSTD_dtlm_full */
if (dtlm == ZSTD_dtlm_fast)
break;
} }
-}
-
-
-FORCE_INLINE_TEMPLATE
+}
+
+
+FORCE_INLINE_TEMPLATE
size_t ZSTD_compressBlock_doubleFast_noDict_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize, U32 const mls /* template */)
{
ZSTD_compressionParameters const* cParams = &ms->cParams;
@@ -258,25 +258,25 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize,
U32 const mls /* template */)
-{
+{
ZSTD_compressionParameters const* cParams = &ms->cParams;
- U32* const hashLong = ms->hashTable;
- const U32 hBitsL = cParams->hashLog;
- U32* const hashSmall = ms->chainTable;
- const U32 hBitsS = cParams->chainLog;
- const BYTE* const base = ms->window.base;
- const BYTE* const istart = (const BYTE*)src;
- const BYTE* ip = istart;
- const BYTE* anchor = istart;
+ U32* const hashLong = ms->hashTable;
+ const U32 hBitsL = cParams->hashLog;
+ U32* const hashSmall = ms->chainTable;
+ const U32 hBitsS = cParams->chainLog;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
/* presumes that, if there is a dictionary, it must be using Attach mode */
const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
const BYTE* const prefixLowest = base + prefixLowestIndex;
- const BYTE* const iend = istart + srcSize;
- const BYTE* const ilimit = iend - HASH_READ_SIZE;
- U32 offset_1=rep[0], offset_2=rep[1];
- U32 offsetSaved = 0;
-
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - HASH_READ_SIZE;
+ U32 offset_1=rep[0], offset_2=rep[1];
+ U32 offsetSaved = 0;
+
const ZSTD_matchState_t* const dms = ms->dictMatchState;
const ZSTD_compressionParameters* const dictCParams = &dms->cParams;
const U32* const dictHashLong = dms->hashTable;
@@ -295,33 +295,33 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
/* if a dictionary is attached, it must be within window range */
assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex);
- /* init */
+ /* init */
ip += (dictAndPrefixLength == 0);
-
+
/* dictMatchState repCode checks don't currently handle repCode == 0
* disabling. */
assert(offset_1 <= dictAndPrefixLength);
assert(offset_2 <= dictAndPrefixLength);
- /* Main Search Loop */
- while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
- size_t mLength;
+ /* Main Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
+ size_t mLength;
U32 offset;
- size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
- size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
+ size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
+ size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8);
size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls);
U32 const curr = (U32)(ip-base);
- U32 const matchIndexL = hashLong[h2];
+ U32 const matchIndexL = hashLong[h2];
U32 matchIndexS = hashSmall[h];
- const BYTE* matchLong = base + matchIndexL;
- const BYTE* match = base + matchIndexS;
+ const BYTE* matchLong = base + matchIndexL;
+ const BYTE* match = base + matchIndexS;
const U32 repIndex = curr + 1 - offset_1;
const BYTE* repMatch = (repIndex < prefixLowestIndex) ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
-
+
/* check repcode */
if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
@@ -335,8 +335,8 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
if (matchIndexL > prefixLowestIndex) {
/* check prefix long match */
if (MEM_read64(matchLong) == MEM_read64(ip)) {
- mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
- offset = (U32)(ip-matchLong);
+ mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
+ offset = (U32)(ip-matchLong);
while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
goto _match_found;
}
@@ -385,12 +385,12 @@ _search_next_long:
/* check prefix long +1 match */
if (matchIndexL3 > prefixLowestIndex) {
if (MEM_read64(matchL3) == MEM_read64(ip+1)) {
- mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
- ip++;
- offset = (U32)(ip-matchL3);
+ mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
+ ip++;
+ offset = (U32)(ip-matchL3);
while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
goto _match_found;
- }
+ }
} else {
/* check dict long +1 match */
U32 const dictMatchIndexL3 = dictHashLong[dictHLNext];
@@ -403,7 +403,7 @@ _search_next_long:
while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */
goto _match_found;
} } }
-
+
/* if no long +1 match, explore the short match we found */
if (matchIndexS < prefixLowestIndex) {
mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
@@ -413,8 +413,8 @@ _search_next_long:
mLength = ZSTD_count(ip+4, match+4, iend) + 4;
offset = (U32)(ip - match);
while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
- }
-
+ }
+
_match_found:
offset_2 = offset_1;
offset_1 = offset;
@@ -422,11 +422,11 @@ _match_found:
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
_match_stored:
- /* match found */
- ip += mLength;
- anchor = ip;
-
- if (ip <= ilimit) {
+ /* match found */
+ ip += mLength;
+ anchor = ip;
+
+ if (ip <= ilimit) {
/* Complementary insertion */
/* done after iLimit test, as candidates could be > iend-8 */
{ U32 const indexToInsert = curr+2;
@@ -435,8 +435,8 @@ _match_stored:
hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
}
-
- /* check immediate repcode */
+
+ /* check immediate repcode */
while (ip <= ilimit) {
U32 const current2 = (U32)(ip-base);
U32 const repIndex2 = current2 - offset_2;
@@ -460,14 +460,14 @@ _match_stored:
}
} /* while (ip < ilimit) */
- /* save reps for next block */
- rep[0] = offset_1 ? offset_1 : offsetSaved;
- rep[1] = offset_2 ? offset_2 : offsetSaved;
-
- /* Return the last literals size */
+ /* save reps for next block */
+ rep[0] = offset_1 ? offset_1 : offsetSaved;
+ rep[1] = offset_2 ? offset_2 : offsetSaved;
+
+ /* Return the last literals size */
return (size_t)(iend - anchor);
-}
-
+}
+
#define ZSTD_GEN_DFAST_FN(dictMode, mls) \
static size_t ZSTD_compressBlock_doubleFast_##dictMode##_##mls( \
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
@@ -475,7 +475,7 @@ _match_stored:
{ \
return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \
}
-
+
ZSTD_GEN_DFAST_FN(noDict, 4)
ZSTD_GEN_DFAST_FN(noDict, 5)
ZSTD_GEN_DFAST_FN(noDict, 6)
@@ -487,26 +487,26 @@ ZSTD_GEN_DFAST_FN(dictMatchState, 6)
ZSTD_GEN_DFAST_FN(dictMatchState, 7)
-size_t ZSTD_compressBlock_doubleFast(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_doubleFast(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
-{
+{
const U32 mls = ms->cParams.minMatch;
- switch(mls)
- {
- default: /* includes case 3 */
- case 4 :
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
return ZSTD_compressBlock_doubleFast_noDict_4(ms, seqStore, rep, src, srcSize);
- case 5 :
+ case 5 :
return ZSTD_compressBlock_doubleFast_noDict_5(ms, seqStore, rep, src, srcSize);
- case 6 :
+ case 6 :
return ZSTD_compressBlock_doubleFast_noDict_6(ms, seqStore, rep, src, srcSize);
- case 7 :
+ case 7 :
return ZSTD_compressBlock_doubleFast_noDict_7(ms, seqStore, rep, src, srcSize);
- }
-}
-
-
+ }
+}
+
+
size_t ZSTD_compressBlock_doubleFast_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
@@ -527,21 +527,21 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState(
}
-static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize,
- U32 const mls /* template */)
-{
+ U32 const mls /* template */)
+{
ZSTD_compressionParameters const* cParams = &ms->cParams;
- U32* const hashLong = ms->hashTable;
- U32 const hBitsL = cParams->hashLog;
- U32* const hashSmall = ms->chainTable;
- U32 const hBitsS = cParams->chainLog;
- const BYTE* const istart = (const BYTE*)src;
- const BYTE* ip = istart;
- const BYTE* anchor = istart;
- const BYTE* const iend = istart + srcSize;
- const BYTE* const ilimit = iend - 8;
+ U32* const hashLong = ms->hashTable;
+ U32 const hBitsL = cParams->hashLog;
+ U32* const hashSmall = ms->chainTable;
+ U32 const hBitsS = cParams->chainLog;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
const BYTE* const base = ms->window.base;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
@@ -552,87 +552,87 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
const BYTE* const dictBase = ms->window.dictBase;
const BYTE* const dictStart = dictBase + dictStartIndex;
const BYTE* const dictEnd = dictBase + prefixStartIndex;
- U32 offset_1=rep[0], offset_2=rep[1];
-
+ U32 offset_1=rep[0], offset_2=rep[1];
+
DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize);
/* if extDict is invalidated due to maxDistance, switch to "regular" variant */
if (prefixStartIndex == dictStartIndex)
return ZSTD_compressBlock_doubleFast(ms, seqStore, rep, src, srcSize);
- /* Search Loop */
- while (ip < ilimit) { /* < instead of <=, because (ip+1) */
- const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
- const U32 matchIndex = hashSmall[hSmall];
+ /* Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because (ip+1) */
+ const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
+ const U32 matchIndex = hashSmall[hSmall];
const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
- const BYTE* match = matchBase + matchIndex;
-
- const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
- const U32 matchLongIndex = hashLong[hLong];
+ const BYTE* match = matchBase + matchIndex;
+
+ const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
+ const U32 matchLongIndex = hashLong[hLong];
const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base;
- const BYTE* matchLong = matchLongBase + matchLongIndex;
-
+ const BYTE* matchLong = matchLongBase + matchLongIndex;
+
const U32 curr = (U32)(ip-base);
const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
- size_t mLength;
+ size_t mLength;
hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
-
+
if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
& (offset_1 <= curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
- ip++;
+ ip++;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
- } else {
+ } else {
if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart;
- U32 offset;
+ U32 offset;
mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8;
offset = curr - matchLongIndex;
- while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
- offset_2 = offset_1;
- offset_1 = offset;
+ while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
+ offset_2 = offset_1;
+ offset_1 = offset;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
-
+
} else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
- size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
- U32 const matchIndex3 = hashLong[h3];
+ size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
+ U32 const matchIndex3 = hashLong[h3];
const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base;
- const BYTE* match3 = match3Base + matchIndex3;
- U32 offset;
+ const BYTE* match3 = match3Base + matchIndex3;
+ U32 offset;
hashLong[h3] = curr + 1;
if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend;
const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart;
mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8;
- ip++;
+ ip++;
offset = curr+1 - matchIndex3;
- while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
- } else {
+ while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
+ } else {
const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
offset = curr - matchIndex;
- while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
- }
- offset_2 = offset_1;
- offset_1 = offset;
+ while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ }
+ offset_2 = offset_1;
+ offset_1 = offset;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
-
- } else {
- ip += ((ip-anchor) >> kSearchStrength) + 1;
- continue;
- } }
-
+
+ } else {
+ ip += ((ip-anchor) >> kSearchStrength) + 1;
+ continue;
+ } }
+
/* move to next sequence start */
- ip += mLength;
- anchor = ip;
-
- if (ip <= ilimit) {
+ ip += mLength;
+ anchor = ip;
+
+ if (ip <= ilimit) {
/* Complementary insertion */
/* done after iLimit test, as candidates could be > iend-8 */
{ U32 const indexToInsert = curr+2;
@@ -642,10 +642,10 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
}
- /* check immediate repcode */
- while (ip <= ilimit) {
- U32 const current2 = (U32)(ip-base);
- U32 const repIndex2 = current2 - offset_2;
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
& (offset_2 <= current2 - dictStartIndex))
@@ -654,43 +654,43 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2);
- hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
- hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
- ip += repLength2;
- anchor = ip;
- continue;
- }
- break;
- } } }
-
- /* save reps for next block */
- rep[0] = offset_1;
- rep[1] = offset_2;
-
- /* Return the last literals size */
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ } } }
+
+ /* save reps for next block */
+ rep[0] = offset_1;
+ rep[1] = offset_2;
+
+ /* Return the last literals size */
return (size_t)(iend - anchor);
-}
-
+}
+
ZSTD_GEN_DFAST_FN(extDict, 4)
ZSTD_GEN_DFAST_FN(extDict, 5)
ZSTD_GEN_DFAST_FN(extDict, 6)
ZSTD_GEN_DFAST_FN(extDict, 7)
-
-size_t ZSTD_compressBlock_doubleFast_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+
+size_t ZSTD_compressBlock_doubleFast_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
-{
+{
U32 const mls = ms->cParams.minMatch;
- switch(mls)
- {
- default: /* includes case 3 */
- case 4 :
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
return ZSTD_compressBlock_doubleFast_extDict_4(ms, seqStore, rep, src, srcSize);
- case 5 :
+ case 5 :
return ZSTD_compressBlock_doubleFast_extDict_5(ms, seqStore, rep, src, srcSize);
- case 6 :
+ case 6 :
return ZSTD_compressBlock_doubleFast_extDict_6(ms, seqStore, rep, src, srcSize);
- case 7 :
+ case 7 :
return ZSTD_compressBlock_doubleFast_extDict_7(ms, seqStore, rep, src, srcSize);
- }
-}
+ }
+}
diff --git a/contrib/libs/zstd/lib/compress/zstd_double_fast.h b/contrib/libs/zstd/lib/compress/zstd_double_fast.h
index e16b7b03a3..ddca837bff 100644
--- a/contrib/libs/zstd/lib/compress/zstd_double_fast.h
+++ b/contrib/libs/zstd/lib/compress/zstd_double_fast.h
@@ -1,38 +1,38 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTD_DOUBLE_FAST_H
-#define ZSTD_DOUBLE_FAST_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_DOUBLE_FAST_H
+#define ZSTD_DOUBLE_FAST_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
#include "../common/mem.h" /* U32 */
-#include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */
-
-void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
+#include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */
+
+void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
void const* end, ZSTD_dictTableLoadMethod_e dtlm);
-size_t ZSTD_compressBlock_doubleFast(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_doubleFast(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_doubleFast_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_doubleFast_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_doubleFast_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* ZSTD_DOUBLE_FAST_H */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_DOUBLE_FAST_H */
diff --git a/contrib/libs/zstd/lib/compress/zstd_fast.c b/contrib/libs/zstd/lib/compress/zstd_fast.c
index 802fc31579..7f6b333e5b 100644
--- a/contrib/libs/zstd/lib/compress/zstd_fast.c
+++ b/contrib/libs/zstd/lib/compress/zstd_fast.c
@@ -1,33 +1,33 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
#include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
-#include "zstd_fast.h"
-
-
-void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
+#include "zstd_fast.h"
+
+
+void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
const void* const end,
ZSTD_dictTableLoadMethod_e dtlm)
-{
+{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
- U32* const hashTable = ms->hashTable;
- U32 const hBits = cParams->hashLog;
+ U32* const hashTable = ms->hashTable;
+ U32 const hBits = cParams->hashLog;
U32 const mls = cParams->minMatch;
- const BYTE* const base = ms->window.base;
- const BYTE* ip = base + ms->nextToUpdate;
- const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
- const U32 fastHashFillStep = 3;
-
- /* Always insert every fastHashFillStep position into the hash table.
- * Insert the other positions if their hash entry is empty.
- */
+ const BYTE* const base = ms->window.base;
+ const BYTE* ip = base + ms->nextToUpdate;
+ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
+ const U32 fastHashFillStep = 3;
+
+ /* Always insert every fastHashFillStep position into the hash table.
+ * Insert the other positions if their hash entry is empty.
+ */
for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
U32 const curr = (U32)(ip - base);
size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
@@ -40,8 +40,8 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
if (hashTable[hash] == 0) { /* not yet filled */
hashTable[hash] = curr + p;
} } } }
-}
-
+}
+
/**
* If you squint hard enough (and ignore repcodes), the search operation at any
@@ -91,22 +91,22 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
*/
FORCE_INLINE_TEMPLATE size_t
ZSTD_compressBlock_fast_noDict_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
- void const* src, size_t srcSize,
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize,
U32 const mls, U32 const hasStep)
-{
+{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
- U32* const hashTable = ms->hashTable;
+ U32* const hashTable = ms->hashTable;
U32 const hlog = cParams->hashLog;
/* support stepSize of 0 */
size_t const stepSize = hasStep ? (cParams->targetLength + !(cParams->targetLength) + 1) : 2;
- const BYTE* const base = ms->window.base;
- const BYTE* const istart = (const BYTE*)src;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const istart = (const BYTE*)src;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
const BYTE* const prefixStart = base + prefixStartIndex;
- const BYTE* const iend = istart + srcSize;
- const BYTE* const ilimit = iend - HASH_READ_SIZE;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - HASH_READ_SIZE;
const BYTE* anchor = istart;
const BYTE* ip0 = istart;
@@ -117,8 +117,8 @@ ZSTD_compressBlock_fast_noDict_generic(
U32 rep_offset1 = rep[0];
U32 rep_offset2 = rep[1];
- U32 offsetSaved = 0;
-
+ U32 offsetSaved = 0;
+
size_t hash0; /* hash for ip0 */
size_t hash1; /* hash for ip1 */
U32 idx; /* match idx for ip0 */
@@ -143,8 +143,8 @@ ZSTD_compressBlock_fast_noDict_generic(
U32 const maxRep = curr - windowLow;
if (rep_offset2 > maxRep) offsetSaved = rep_offset2, rep_offset2 = 0;
if (rep_offset1 > maxRep) offsetSaved = rep_offset1, rep_offset1 = 0;
- }
-
+ }
+
/* start each op */
_start: /* Requires: ip0 */
@@ -422,23 +422,23 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
assert(offset_2 <= dictAndPrefixLength);
/* Main Search Loop */
- while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
- size_t mLength;
- size_t const h = ZSTD_hashPtr(ip, hlog, mls);
+ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
+ size_t mLength;
+ size_t const h = ZSTD_hashPtr(ip, hlog, mls);
U32 const curr = (U32)(ip-base);
- U32 const matchIndex = hashTable[h];
- const BYTE* match = base + matchIndex;
+ U32 const matchIndex = hashTable[h];
+ const BYTE* match = base + matchIndex;
const U32 repIndex = curr + 1 - offset_1;
const BYTE* repMatch = (repIndex < prefixStartIndex) ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
hashTable[h] = curr; /* update hash table */
-
+
if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
- ip++;
+ ip++;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
} else if ( (matchIndex <= prefixStartIndex) ) {
size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
@@ -446,9 +446,9 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
const BYTE* dictMatch = dictBase + dictMatchIndex;
if (dictMatchIndex <= dictStartIndex ||
MEM_read32(dictMatch) != MEM_read32(ip)) {
- assert(stepSize >= 1);
- ip += ((ip-anchor) >> kSearchStrength) + stepSize;
- continue;
+ assert(stepSize >= 1);
+ ip += ((ip-anchor) >> kSearchStrength) + stepSize;
+ continue;
} else {
/* found a dict match */
U32 const offset = (U32)(curr-dictMatchIndex-dictIndexDelta);
@@ -457,8 +457,8 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
&& (ip[-1] == dictMatch[-1])) {
ip--; dictMatch--; mLength++;
} /* catch up */
- offset_2 = offset_1;
- offset_1 = offset;
+ offset_2 = offset_1;
+ offset_1 = offset;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
}
} else if (MEM_read32(match) != MEM_read32(ip)) {
@@ -476,18 +476,18 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
offset_1 = offset;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
}
-
- /* match found */
- ip += mLength;
- anchor = ip;
-
- if (ip <= ilimit) {
- /* Fill Table */
+
+ /* match found */
+ ip += mLength;
+ anchor = ip;
+
+ if (ip <= ilimit) {
+ /* Fill Table */
assert(base+curr+2 > istart); /* check base overflow */
hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */
- hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
+ hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
- /* check immediate repcode */
+ /* check immediate repcode */
while (ip <= ilimit) {
U32 const current2 = (U32)(ip-base);
U32 const repIndex2 = current2 - offset_2;
@@ -509,15 +509,15 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
}
}
}
-
- /* save reps for next block */
- rep[0] = offset_1 ? offset_1 : offsetSaved;
- rep[1] = offset_2 ? offset_2 : offsetSaved;
-
- /* Return the last literals size */
+
+ /* save reps for next block */
+ rep[0] = offset_1 ? offset_1 : offsetSaved;
+ rep[1] = offset_2 ? offset_2 : offsetSaved;
+
+ /* Return the last literals size */
return (size_t)(iend - anchor);
-}
-
+}
+
ZSTD_GEN_FAST_FN(dictMatchState, 4, 0)
ZSTD_GEN_FAST_FN(dictMatchState, 5, 0)
@@ -525,40 +525,40 @@ ZSTD_GEN_FAST_FN(dictMatchState, 6, 0)
ZSTD_GEN_FAST_FN(dictMatchState, 7, 0)
size_t ZSTD_compressBlock_fast_dictMatchState(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
-{
+{
U32 const mls = ms->cParams.minMatch;
assert(ms->dictMatchState != NULL);
- switch(mls)
- {
- default: /* includes case 3 */
- case 4 :
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize);
- case 5 :
+ case 5 :
return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize);
- case 6 :
+ case 6 :
return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize);
- case 7 :
+ case 7 :
return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize);
- }
-}
-
-
-static size_t ZSTD_compressBlock_fast_extDict_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ }
+}
+
+
+static size_t ZSTD_compressBlock_fast_extDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
-{
+{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
U32 const hlog = cParams->hashLog;
/* support stepSize of 0 */
U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
- const BYTE* const base = ms->window.base;
- const BYTE* const dictBase = ms->window.dictBase;
- const BYTE* const istart = (const BYTE*)src;
- const BYTE* ip = istart;
- const BYTE* anchor = istart;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
const U32 dictStartIndex = lowLimit;
@@ -567,10 +567,10 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
const U32 prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit;
const BYTE* const prefixStart = base + prefixStartIndex;
const BYTE* const dictEnd = dictBase + prefixStartIndex;
- const BYTE* const iend = istart + srcSize;
- const BYTE* const ilimit = iend - 8;
- U32 offset_1=rep[0], offset_2=rep[1];
-
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ U32 offset_1=rep[0], offset_2=rep[1];
+
(void)hasStep; /* not currently specialized on whether it's accelerated */
DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
@@ -579,9 +579,9 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
if (prefixStartIndex == dictStartIndex)
return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize);
- /* Search Loop */
- while (ip < ilimit) { /* < instead of <=, because (ip+1) */
- const size_t h = ZSTD_hashPtr(ip, hlog, mls);
+ /* Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because (ip+1) */
+ const size_t h = ZSTD_hashPtr(ip, hlog, mls);
const U32 matchIndex = hashTable[h];
const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
const BYTE* match = matchBase + matchIndex;
@@ -591,85 +591,85 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
const BYTE* const repMatch = repBase + repIndex;
hashTable[h] = curr; /* update hash table */
DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr);
-
+
if ( ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */
& (offset_1 <= curr+1 - dictStartIndex) ) /* note: we are searching at curr+1 */
- && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
- ip++;
+ ip++;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, rLength);
ip += rLength;
anchor = ip;
- } else {
+ } else {
if ( (matchIndex < dictStartIndex) ||
- (MEM_read32(match) != MEM_read32(ip)) ) {
- assert(stepSize >= 1);
- ip += ((ip-anchor) >> kSearchStrength) + stepSize;
- continue;
- }
+ (MEM_read32(match) != MEM_read32(ip)) ) {
+ assert(stepSize >= 1);
+ ip += ((ip-anchor) >> kSearchStrength) + stepSize;
+ continue;
+ }
{ const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
U32 const offset = curr - matchIndex;
size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
- while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
offset_2 = offset_1; offset_1 = offset; /* update offset history */
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
ip += mLength;
anchor = ip;
- } }
-
- if (ip <= ilimit) {
- /* Fill Table */
+ } }
+
+ if (ip <= ilimit) {
+ /* Fill Table */
hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;
- hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
- /* check immediate repcode */
- while (ip <= ilimit) {
- U32 const current2 = (U32)(ip-base);
- U32 const repIndex2 = current2 - offset_2;
+ hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 <= curr - dictStartIndex)) /* intentional overflow */
- && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
{ U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, STORE_REPCODE_1, repLength2);
- hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
- ip += repLength2;
- anchor = ip;
- continue;
- }
- break;
- } } }
-
- /* save reps for next block */
- rep[0] = offset_1;
- rep[1] = offset_2;
-
- /* Return the last literals size */
+ hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ } } }
+
+ /* save reps for next block */
+ rep[0] = offset_1;
+ rep[1] = offset_2;
+
+ /* Return the last literals size */
return (size_t)(iend - anchor);
-}
-
+}
+
ZSTD_GEN_FAST_FN(extDict, 4, 0)
ZSTD_GEN_FAST_FN(extDict, 5, 0)
ZSTD_GEN_FAST_FN(extDict, 6, 0)
ZSTD_GEN_FAST_FN(extDict, 7, 0)
-
-size_t ZSTD_compressBlock_fast_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+
+size_t ZSTD_compressBlock_fast_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
-{
+{
U32 const mls = ms->cParams.minMatch;
- switch(mls)
- {
- default: /* includes case 3 */
- case 4 :
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize);
- case 5 :
+ case 5 :
return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize);
- case 6 :
+ case 6 :
return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize);
- case 7 :
+ case 7 :
return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize);
- }
-}
+ }
+}
diff --git a/contrib/libs/zstd/lib/compress/zstd_fast.h b/contrib/libs/zstd/lib/compress/zstd_fast.h
index 0d4a0c1090..1259a23629 100644
--- a/contrib/libs/zstd/lib/compress/zstd_fast.h
+++ b/contrib/libs/zstd/lib/compress/zstd_fast.h
@@ -1,37 +1,37 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTD_FAST_H
-#define ZSTD_FAST_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_FAST_H
+#define ZSTD_FAST_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
#include "../common/mem.h" /* U32 */
-#include "zstd_compress_internal.h"
-
-void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
+#include "zstd_compress_internal.h"
+
+void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
void const* end, ZSTD_dictTableLoadMethod_e dtlm);
-size_t ZSTD_compressBlock_fast(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_fast(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_fast_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_fast_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_fast_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* ZSTD_FAST_H */
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_FAST_H */
diff --git a/contrib/libs/zstd/lib/compress/zstd_lazy.c b/contrib/libs/zstd/lib/compress/zstd_lazy.c
index 2e38dcb46d..d43ac04f95 100644
--- a/contrib/libs/zstd/lib/compress/zstd_lazy.c
+++ b/contrib/libs/zstd/lib/compress/zstd_lazy.c
@@ -1,154 +1,154 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#include "zstd_compress_internal.h"
-#include "zstd_lazy.h"
-
-
-/*-*************************************
-* Binary Tree search
-***************************************/
-
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h"
+#include "zstd_lazy.h"
+
+
+/*-*************************************
+* Binary Tree search
+***************************************/
+
static void
ZSTD_updateDUBT(ZSTD_matchState_t* ms,
- const BYTE* ip, const BYTE* iend,
- U32 mls)
-{
+ const BYTE* ip, const BYTE* iend,
+ U32 mls)
+{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
- U32* const hashTable = ms->hashTable;
- U32 const hashLog = cParams->hashLog;
-
- U32* const bt = ms->chainTable;
- U32 const btLog = cParams->chainLog - 1;
- U32 const btMask = (1 << btLog) - 1;
-
- const BYTE* const base = ms->window.base;
- U32 const target = (U32)(ip - base);
- U32 idx = ms->nextToUpdate;
-
- if (idx != target)
- DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)",
- idx, target, ms->window.dictLimit);
- assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */
- (void)iend;
-
- assert(idx >= ms->window.dictLimit); /* condition for valid base+idx */
- for ( ; idx < target ; idx++) {
- size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); /* assumption : ip + 8 <= iend */
- U32 const matchIndex = hashTable[h];
-
- U32* const nextCandidatePtr = bt + 2*(idx&btMask);
- U32* const sortMarkPtr = nextCandidatePtr + 1;
-
- DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx);
- hashTable[h] = idx; /* Update Hash Table */
- *nextCandidatePtr = matchIndex; /* update BT like a chain */
- *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;
- }
- ms->nextToUpdate = target;
-}
-
-
-/** ZSTD_insertDUBT1() :
- * sort one already inserted but unsorted position
+ U32* const hashTable = ms->hashTable;
+ U32 const hashLog = cParams->hashLog;
+
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+
+ const BYTE* const base = ms->window.base;
+ U32 const target = (U32)(ip - base);
+ U32 idx = ms->nextToUpdate;
+
+ if (idx != target)
+ DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)",
+ idx, target, ms->window.dictLimit);
+ assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */
+ (void)iend;
+
+ assert(idx >= ms->window.dictLimit); /* condition for valid base+idx */
+ for ( ; idx < target ; idx++) {
+ size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); /* assumption : ip + 8 <= iend */
+ U32 const matchIndex = hashTable[h];
+
+ U32* const nextCandidatePtr = bt + 2*(idx&btMask);
+ U32* const sortMarkPtr = nextCandidatePtr + 1;
+
+ DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx);
+ hashTable[h] = idx; /* Update Hash Table */
+ *nextCandidatePtr = matchIndex; /* update BT like a chain */
+ *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;
+ }
+ ms->nextToUpdate = target;
+}
+
+
+/** ZSTD_insertDUBT1() :
+ * sort one already inserted but unsorted position
* assumption : curr >= btlow == (curr - btmask)
- * doesn't fail */
+ * doesn't fail */
static void
ZSTD_insertDUBT1(const ZSTD_matchState_t* ms,
U32 curr, const BYTE* inputEnd,
U32 nbCompares, U32 btLow,
const ZSTD_dictMode_e dictMode)
-{
+{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const bt = ms->chainTable;
U32 const btLog = cParams->chainLog - 1;
U32 const btMask = (1 << btLog) - 1;
- size_t commonLengthSmaller=0, commonLengthLarger=0;
- const BYTE* const base = ms->window.base;
- const BYTE* const dictBase = ms->window.dictBase;
- const U32 dictLimit = ms->window.dictLimit;
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
const BYTE* const ip = (curr>=dictLimit) ? base + curr : dictBase + curr;
const BYTE* const iend = (curr>=dictLimit) ? inputEnd : dictBase + dictLimit;
- const BYTE* const dictEnd = dictBase + dictLimit;
- const BYTE* const prefixStart = base + dictLimit;
- const BYTE* match;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* match;
U32* smallerPtr = bt + 2*(curr&btMask);
- U32* largerPtr = smallerPtr + 1;
+ U32* largerPtr = smallerPtr + 1;
U32 matchIndex = *smallerPtr; /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */
- U32 dummy32; /* to be nullified at the end */
+ U32 dummy32; /* to be nullified at the end */
U32 const windowValid = ms->window.lowLimit;
U32 const maxDistance = 1U << cParams->windowLog;
U32 const windowLow = (curr - windowValid > maxDistance) ? curr - maxDistance : windowValid;
+
-
- DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
+ DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
curr, dictLimit, windowLow);
assert(curr >= btLow);
- assert(ip < iend); /* condition for ZSTD_count */
-
+ assert(ip < iend); /* condition for ZSTD_count */
+
for (; nbCompares && (matchIndex > windowLow); --nbCompares) {
- U32* const nextPtr = bt + 2*(matchIndex & btMask);
- size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
assert(matchIndex < curr);
/* note : all candidates are now supposed sorted,
* but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK
* when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */
-
+
if ( (dictMode != ZSTD_extDict)
- || (matchIndex+matchLength >= dictLimit) /* both in current segment*/
+ || (matchIndex+matchLength >= dictLimit) /* both in current segment*/
|| (curr < dictLimit) /* both in extDict */) {
const BYTE* const mBase = ( (dictMode != ZSTD_extDict)
|| (matchIndex+matchLength >= dictLimit)) ?
base : dictBase;
- assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */
+ assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */
|| (curr < dictLimit) );
- match = mBase + matchIndex;
- matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
- } else {
- match = dictBase + matchIndex;
- matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
- if (matchIndex+matchLength >= dictLimit)
+ match = mBase + matchIndex;
+ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
+ } else {
+ match = dictBase + matchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
match = base + matchIndex; /* preparation for next read of match[matchLength] */
- }
-
- DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
+ }
+
+ DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
curr, matchIndex, (U32)matchLength);
-
- if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
- break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
- }
-
- if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
- /* match is smaller than current */
- *smallerPtr = matchIndex; /* update smaller idx */
- commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
- if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
- DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u",
- matchIndex, btLow, nextPtr[1]);
- smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
- matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
- } else {
- /* match is larger than current */
- *largerPtr = matchIndex;
- commonLengthLarger = matchLength;
- if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
- DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u",
- matchIndex, btLow, nextPtr[0]);
- largerPtr = nextPtr;
- matchIndex = nextPtr[0];
- } }
-
- *smallerPtr = *largerPtr = 0;
-}
-
-
+
+ if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
+ break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
+ }
+
+ if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
+ /* match is smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
+ DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u",
+ matchIndex, btLow, nextPtr[1]);
+ smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
+ matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
+ } else {
+ /* match is larger than current */
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
+ DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u",
+ matchIndex, btLow, nextPtr[0]);
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
+}
+
+
static size_t
ZSTD_DUBT_findBetterDictMatch (
const ZSTD_matchState_t* ms,
@@ -158,7 +158,7 @@ ZSTD_DUBT_findBetterDictMatch (
U32 nbCompares,
U32 const mls,
const ZSTD_dictMode_e dictMode)
-{
+{
const ZSTD_matchState_t * const dms = ms->dictMatchState;
const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;
const U32 * const dictHashTable = dms->hashTable;
@@ -235,128 +235,128 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
const ZSTD_dictMode_e dictMode)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
- U32* const hashTable = ms->hashTable;
- U32 const hashLog = cParams->hashLog;
- size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
- U32 matchIndex = hashTable[h];
-
- const BYTE* const base = ms->window.base;
+ U32* const hashTable = ms->hashTable;
+ U32 const hashLog = cParams->hashLog;
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32 matchIndex = hashTable[h];
+
+ const BYTE* const base = ms->window.base;
U32 const curr = (U32)(ip-base);
U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
-
- U32* const bt = ms->chainTable;
- U32 const btLog = cParams->chainLog - 1;
- U32 const btMask = (1 << btLog) - 1;
+
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
- U32 const unsortLimit = MAX(btLow, windowLow);
-
- U32* nextCandidate = bt + 2*(matchIndex&btMask);
- U32* unsortedMark = bt + 2*(matchIndex&btMask) + 1;
- U32 nbCompares = 1U << cParams->searchLog;
- U32 nbCandidates = nbCompares;
- U32 previousCandidate = 0;
-
+ U32 const unsortLimit = MAX(btLow, windowLow);
+
+ U32* nextCandidate = bt + 2*(matchIndex&btMask);
+ U32* unsortedMark = bt + 2*(matchIndex&btMask) + 1;
+ U32 nbCompares = 1U << cParams->searchLog;
+ U32 nbCandidates = nbCompares;
+ U32 previousCandidate = 0;
+
DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", curr);
- assert(ip <= iend-8); /* required for h calculation */
+ assert(ip <= iend-8); /* required for h calculation */
assert(dictMode != ZSTD_dedicatedDictSearch);
-
- /* reach end of unsorted candidates list */
- while ( (matchIndex > unsortLimit)
- && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)
- && (nbCandidates > 1) ) {
- DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
- matchIndex);
+
+ /* reach end of unsorted candidates list */
+ while ( (matchIndex > unsortLimit)
+ && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)
+ && (nbCandidates > 1) ) {
+ DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
+ matchIndex);
*unsortedMark = previousCandidate; /* the unsortedMark becomes a reversed chain, to move up back to original position */
- previousCandidate = matchIndex;
- matchIndex = *nextCandidate;
- nextCandidate = bt + 2*(matchIndex&btMask);
- unsortedMark = bt + 2*(matchIndex&btMask) + 1;
- nbCandidates --;
- }
-
+ previousCandidate = matchIndex;
+ matchIndex = *nextCandidate;
+ nextCandidate = bt + 2*(matchIndex&btMask);
+ unsortedMark = bt + 2*(matchIndex&btMask) + 1;
+ nbCandidates --;
+ }
+
/* nullify last candidate if it's still unsorted
* simplification, detrimental to compression ratio, beneficial for speed */
- if ( (matchIndex > unsortLimit)
- && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
- DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
- matchIndex);
+ if ( (matchIndex > unsortLimit)
+ && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
+ DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
+ matchIndex);
*nextCandidate = *unsortedMark = 0;
- }
-
- /* batch sort stacked candidates */
- matchIndex = previousCandidate;
- while (matchIndex) { /* will end on matchIndex == 0 */
- U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
- U32 const nextCandidateIdx = *nextCandidateIdxPtr;
+ }
+
+ /* batch sort stacked candidates */
+ matchIndex = previousCandidate;
+ while (matchIndex) { /* will end on matchIndex == 0 */
+ U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
+ U32 const nextCandidateIdx = *nextCandidateIdxPtr;
ZSTD_insertDUBT1(ms, matchIndex, iend,
nbCandidates, unsortLimit, dictMode);
- matchIndex = nextCandidateIdx;
- nbCandidates++;
- }
-
- /* find longest match */
+ matchIndex = nextCandidateIdx;
+ nbCandidates++;
+ }
+
+ /* find longest match */
{ size_t commonLengthSmaller = 0, commonLengthLarger = 0;
- const BYTE* const dictBase = ms->window.dictBase;
- const U32 dictLimit = ms->window.dictLimit;
- const BYTE* const dictEnd = dictBase + dictLimit;
- const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
U32* smallerPtr = bt + 2*(curr&btMask);
U32* largerPtr = bt + 2*(curr&btMask) + 1;
U32 matchEndIdx = curr + 8 + 1;
- U32 dummy32; /* to be nullified at the end */
- size_t bestLength = 0;
-
- matchIndex = hashTable[h];
+ U32 dummy32; /* to be nullified at the end */
+ size_t bestLength = 0;
+
+ matchIndex = hashTable[h];
hashTable[h] = curr; /* Update Hash Table */
-
+
for (; nbCompares && (matchIndex > windowLow); --nbCompares) {
- U32* const nextPtr = bt + 2*(matchIndex & btMask);
- size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
- const BYTE* match;
-
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ const BYTE* match;
+
if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) {
- match = base + matchIndex;
- matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
- } else {
- match = dictBase + matchIndex;
- matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
- if (matchIndex+matchLength >= dictLimit)
- match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
- }
-
- if (matchLength > bestLength) {
- if (matchLength > matchEndIdx - matchIndex)
- matchEndIdx = matchIndex + (U32)matchLength;
+ match = base + matchIndex;
+ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
+ } else {
+ match = dictBase + matchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
+ match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
+ }
+
+ if (matchLength > bestLength) {
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
bestLength = matchLength, *offsetPtr = STORE_OFFSET(curr - matchIndex);
- if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
+ if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
if (dictMode == ZSTD_dictMatchState) {
nbCompares = 0; /* in addition to avoiding checking any
* further in this loop, make sure we
* skip checking in the dictionary. */
}
- break; /* drop, to guarantee consistency (miss a little bit of compression) */
- }
- }
-
- if (match[matchLength] < ip[matchLength]) {
- /* match is smaller than current */
- *smallerPtr = matchIndex; /* update smaller idx */
- commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
- if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
- smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
- matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
- } else {
- /* match is larger than current */
- *largerPtr = matchIndex;
- commonLengthLarger = matchLength;
- if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
- largerPtr = nextPtr;
- matchIndex = nextPtr[0];
- } }
-
- *smallerPtr = *largerPtr = 0;
-
+ break; /* drop, to guarantee consistency (miss a little bit of compression) */
+ }
+ }
+
+ if (match[matchLength] < ip[matchLength]) {
+ /* match is smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ } else {
+ /* match is larger than current */
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
+
assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
if (dictMode == ZSTD_dictMatchState && nbCompares) {
bestLength = ZSTD_DUBT_findBetterDictMatch(
@@ -366,35 +366,35 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
}
assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */
- ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
- if (bestLength >= MINMATCH) {
+ ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
+ if (bestLength >= MINMATCH) {
U32 const mIndex = curr - (U32)STORED_OFFSET(*offsetPtr); (void)mIndex;
- DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
+ DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
- }
- return bestLength;
- }
-}
-
-
-/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
+ }
+ return bestLength;
+ }
+}
+
+
+/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
FORCE_INLINE_TEMPLATE size_t
ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
const BYTE* const ip, const BYTE* const iLimit,
size_t* offsetPtr,
const U32 mls /* template */,
const ZSTD_dictMode_e dictMode)
-{
- DEBUGLOG(7, "ZSTD_BtFindBestMatch");
- if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
+{
+ DEBUGLOG(7, "ZSTD_BtFindBestMatch");
+ if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
ZSTD_updateDUBT(ms, ip, iLimit, mls);
return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
-}
-
+}
+
/***********************************
* Dedicated dict search
-***********************************/
-
+***********************************/
+
void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip)
{
const BYTE* const base = ms->window.base;
@@ -408,7 +408,7 @@ void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const B
U32 const cacheSize = bucketSize - 1;
U32 const chainAttempts = (1 << ms->cParams.searchLog) - cacheSize;
U32 const chainLimit = chainAttempts > 255 ? 255 : chainAttempts;
-
+
/* We know the hashtable is oversized by a factor of `bucketSize`.
* We are going to temporarily pretend `bucketSize == 1`, keeping only a
* single entry. We will use the rest of the space to construct a temporary
@@ -643,23 +643,23 @@ U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);
}
-/* inlining is important to hardwire a hot branch (template emulation) */
-FORCE_INLINE_TEMPLATE
+/* inlining is important to hardwire a hot branch (template emulation) */
+FORCE_INLINE_TEMPLATE
size_t ZSTD_HcFindBestMatch(
ZSTD_matchState_t* ms,
- const BYTE* const ip, const BYTE* const iLimit,
- size_t* offsetPtr,
+ const BYTE* const ip, const BYTE* const iLimit,
+ size_t* offsetPtr,
const U32 mls, const ZSTD_dictMode_e dictMode)
-{
+{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
- U32* const chainTable = ms->chainTable;
- const U32 chainSize = (1 << cParams->chainLog);
- const U32 chainMask = chainSize-1;
- const BYTE* const base = ms->window.base;
- const BYTE* const dictBase = ms->window.dictBase;
- const U32 dictLimit = ms->window.dictLimit;
- const BYTE* const prefixStart = base + dictLimit;
- const BYTE* const dictEnd = dictBase + dictLimit;
+ U32* const chainTable = ms->chainTable;
+ const U32 chainSize = (1 << cParams->chainLog);
+ const U32 chainMask = chainSize-1;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
const U32 curr = (U32)(ip-base);
const U32 maxDistance = 1U << cParams->windowLog;
const U32 lowestValid = ms->window.lowLimit;
@@ -667,9 +667,9 @@ size_t ZSTD_HcFindBestMatch(
const U32 isDictionary = (ms->loadedDictEnd != 0);
const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
const U32 minChain = curr > chainSize ? curr - chainSize : 0;
- U32 nbAttempts = 1U << cParams->searchLog;
- size_t ml=4-1;
-
+ U32 nbAttempts = 1U << cParams->searchLog;
+ size_t ml=4-1;
+
const ZSTD_matchState_t* const dms = ms->dictMatchState;
const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch
? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
@@ -683,34 +683,34 @@ size_t ZSTD_HcFindBestMatch(
PREFETCH_L1(entry);
}
- /* HC4 match finder */
+ /* HC4 match finder */
matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
-
+
for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) {
- size_t currentMl=0;
+ size_t currentMl=0;
if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
- const BYTE* const match = base + matchIndex;
+ const BYTE* const match = base + matchIndex;
assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
- if (match[ml] == ip[ml]) /* potentially better */
- currentMl = ZSTD_count(ip, match, iLimit);
- } else {
- const BYTE* const match = dictBase + matchIndex;
- assert(match+4 <= dictEnd);
- if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
- currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
- }
-
- /* save best solution */
- if (currentMl > ml) {
- ml = currentMl;
+ if (match[ml] == ip[ml]) /* potentially better */
+ currentMl = ZSTD_count(ip, match, iLimit);
+ } else {
+ const BYTE* const match = dictBase + matchIndex;
+ assert(match+4 <= dictEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
+ }
+
+ /* save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
*offsetPtr = STORE_OFFSET(curr - matchIndex);
- if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
- }
-
- if (matchIndex <= minChain) break;
- matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
- }
-
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+ }
+
+ if (matchIndex <= minChain) break;
+ matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
+ }
+
assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
if (dictMode == ZSTD_dedicatedDictSearch) {
ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts, dms,
@@ -749,9 +749,9 @@ size_t ZSTD_HcFindBestMatch(
}
}
- return ml;
-}
-
+ return ml;
+}
+
/* *********************************
* (SIMD) Row-based matchfinder
***********************************/
@@ -760,7 +760,7 @@ size_t ZSTD_HcFindBestMatch(
#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */
#define ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1)
#define ZSTD_ROW_HASH_MAX_ENTRIES 64 /* absolute maximum number of entries per row, for all configurations */
-
+
#define ZSTD_ROW_HASH_CACHE_MASK (ZSTD_ROW_HASH_CACHE_SIZE - 1)
typedef U64 ZSTD_VecMask; /* Clarifies when we are interacting with a U64 representing a mask of matches */
@@ -1437,9 +1437,9 @@ ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_HC_VTABLE)
X(dedicatedDictSearch) \
}
-/* *******************************
-* Common parser - lazy strategy
-*********************************/
+/* *******************************
+* Common parser - lazy strategy
+*********************************/
typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e;
/**
@@ -1474,24 +1474,24 @@ ZSTD_selectLazyVTable(ZSTD_matchState_t const* ms, searchMethod_e searchMethod,
FORCE_INLINE_TEMPLATE size_t
ZSTD_compressBlock_lazy_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore,
- U32 rep[ZSTD_REP_NUM],
- const void* src, size_t srcSize,
+ ZSTD_matchState_t* ms, seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize,
const searchMethod_e searchMethod, const U32 depth,
ZSTD_dictMode_e const dictMode)
-{
- const BYTE* const istart = (const BYTE*)src;
- const BYTE* ip = istart;
- const BYTE* anchor = istart;
- const BYTE* const iend = istart + srcSize;
+{
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = (searchMethod == search_rowHash) ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8;
const BYTE* const base = ms->window.base;
const U32 prefixLowestIndex = ms->window.dictLimit;
const BYTE* const prefixLowest = base + prefixLowestIndex;
-
+
searchMax_f const searchMax = ZSTD_selectLazyVTable(ms, searchMethod, dictMode)->searchMax;
- U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
-
+ U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
+
const int isDMS = dictMode == ZSTD_dictMatchState;
const int isDDS = dictMode == ZSTD_dedicatedDictSearch;
const int isDxS = isDMS || isDDS;
@@ -1513,16 +1513,16 @@ ZSTD_compressBlock_lazy_generic(
U32 const curr = (U32)(ip - base);
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog);
U32 const maxRep = curr - windowLow;
- if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
- if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
- }
+ if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
+ if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
+ }
if (isDxS) {
/* dictMatchState repCode checks don't currently handle repCode == 0
* disabling. */
assert(offset_1 <= dictAndPrefixLength);
assert(offset_2 <= dictAndPrefixLength);
}
-
+
if (searchMethod == search_rowHash) {
const U32 rowLog = MAX(4, MIN(6, ms->cParams.searchLog));
ZSTD_row_fillHashCache(ms, base, rowLog,
@@ -1530,20 +1530,20 @@ ZSTD_compressBlock_lazy_generic(
ms->nextToUpdate, ilimit);
}
- /* Match Loop */
+ /* Match Loop */
#if defined(__GNUC__) && defined(__x86_64__)
/* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
* code alignment is perturbed. To fix the instability align the loop on 32-bytes.
*/
__asm__(".p2align 5");
#endif
- while (ip < ilimit) {
- size_t matchLength=0;
+ while (ip < ilimit) {
+ size_t matchLength=0;
size_t offcode=STORE_REPCODE_1;
- const BYTE* start=ip+1;
+ const BYTE* start=ip+1;
DEBUGLOG(7, "search baseline (depth 0)");
-
- /* check repCode */
+
+ /* check repCode */
if (isDxS) {
const U32 repIndex = (U32)(ip - base) + 1 - offset_1;
const BYTE* repMatch = ((dictMode == ZSTD_dictMatchState || dictMode == ZSTD_dedicatedDictSearch)
@@ -1559,35 +1559,35 @@ ZSTD_compressBlock_lazy_generic(
}
if ( dictMode == ZSTD_noDict
&& ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
- matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
- if (depth==0) goto _storeSequence;
- }
-
- /* first search (depth 0) */
+ matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
+ if (depth==0) goto _storeSequence;
+ }
+
+ /* first search (depth 0) */
{ size_t offsetFound = 999999999;
size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
- if (ml2 > matchLength)
+ if (ml2 > matchLength)
matchLength = ml2, start = ip, offcode=offsetFound;
- }
-
- if (matchLength < 4) {
- ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
- continue;
- }
-
- /* let's try to find a better solution */
- if (depth>=1)
- while (ip<ilimit) {
+ }
+
+ if (matchLength < 4) {
+ ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
+ continue;
+ }
+
+ /* let's try to find a better solution */
+ if (depth>=1)
+ while (ip<ilimit) {
DEBUGLOG(7, "search depth 1");
- ip ++;
+ ip ++;
if ( (dictMode == ZSTD_noDict)
&& (offcode) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
- size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
- int const gain2 = (int)(mlRep * 3);
+ size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
+ int const gain2 = (int)(mlRep * 3);
int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
- if ((mlRep >= 4) && (gain2 > gain1))
+ if ((mlRep >= 4) && (gain2 > gain1))
matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip;
- }
+ }
if (isDxS) {
const U32 repIndex = (U32)(ip - base) - offset_1;
const BYTE* repMatch = repIndex < prefixLowestIndex ?
@@ -1607,15 +1607,15 @@ ZSTD_compressBlock_lazy_generic(
size_t const ml2 = searchMax(ms, ip, iend, &offset2);
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 4);
- if ((ml2 >= 4) && (gain2 > gain1)) {
+ if ((ml2 >= 4) && (gain2 > gain1)) {
matchLength = ml2, offcode = offset2, start = ip;
- continue; /* search a better one */
- } }
-
- /* let's find an even better one */
- if ((depth==2) && (ip<ilimit)) {
+ continue; /* search a better one */
+ } }
+
+ /* let's find an even better one */
+ if ((depth==2) && (ip<ilimit)) {
DEBUGLOG(7, "search depth 2");
- ip ++;
+ ip ++;
if ( (dictMode == ZSTD_noDict)
&& (offcode) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
@@ -1623,7 +1623,7 @@ ZSTD_compressBlock_lazy_generic(
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
if ((mlRep >= 4) && (gain2 > gain1))
matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip;
- }
+ }
if (isDxS) {
const U32 repIndex = (U32)(ip - base) - offset_1;
const BYTE* repMatch = repIndex < prefixLowestIndex ?
@@ -1643,18 +1643,18 @@ ZSTD_compressBlock_lazy_generic(
size_t const ml2 = searchMax(ms, ip, iend, &offset2);
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 7);
- if ((ml2 >= 4) && (gain2 > gain1)) {
+ if ((ml2 >= 4) && (gain2 > gain1)) {
matchLength = ml2, offcode = offset2, start = ip;
- continue;
- } } }
- break; /* nothing found : store previous solution */
- }
-
- /* NOTE:
+ continue;
+ } } }
+ break; /* nothing found : store previous solution */
+ }
+
+ /* NOTE:
* Pay attention that `start[-value]` can lead to strange undefined behavior
* notably if `value` is unsigned, resulting in a large positive `-value`.
- */
- /* catch up */
+ */
+ /* catch up */
if (STORED_IS_OFFSET(offcode)) {
if (dictMode == ZSTD_noDict) {
while ( ((start > anchor) & (start - STORED_OFFSET(offcode) > prefixLowest))
@@ -1668,15 +1668,15 @@ ZSTD_compressBlock_lazy_generic(
while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
}
offset_2 = offset_1; offset_1 = (U32)STORED_OFFSET(offcode);
- }
- /* store sequence */
-_storeSequence:
+ }
+ /* store sequence */
+_storeSequence:
{ size_t const litLength = (size_t)(start - anchor);
ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offcode, matchLength);
- anchor = ip = start + matchLength;
- }
-
- /* check immediate repcode */
+ anchor = ip = start + matchLength;
+ }
+
+ /* check immediate repcode */
if (isDxS) {
while (ip <= ilimit) {
U32 const current2 = (U32)(ip-base);
@@ -1697,7 +1697,7 @@ _storeSequence:
break;
}
}
-
+
if (dictMode == ZSTD_noDict) {
while ( ((ip <= ilimit) & (offset_2>0))
&& (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
@@ -1710,50 +1710,50 @@ _storeSequence:
continue; /* faster when present ... (?) */
} } }
- /* Save reps for next block */
- rep[0] = offset_1 ? offset_1 : savedOffset;
- rep[1] = offset_2 ? offset_2 : savedOffset;
-
- /* Return the last literals size */
+ /* Save reps for next block */
+ rep[0] = offset_1 ? offset_1 : savedOffset;
+ rep[1] = offset_2 ? offset_2 : savedOffset;
+
+ /* Return the last literals size */
return (size_t)(iend - anchor);
-}
-
-
-size_t ZSTD_compressBlock_btlazy2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+}
+
+
+size_t ZSTD_compressBlock_btlazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
-{
+{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
-}
-
-size_t ZSTD_compressBlock_lazy2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+}
+
+size_t ZSTD_compressBlock_lazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
-{
+{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
-}
-
-size_t ZSTD_compressBlock_lazy(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+}
+
+size_t ZSTD_compressBlock_lazy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
-{
+{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
-}
-
-size_t ZSTD_compressBlock_greedy(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+}
+
+size_t ZSTD_compressBlock_greedy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
-{
+{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
-}
-
+}
+
size_t ZSTD_compressBlock_btlazy2_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
}
-
+
size_t ZSTD_compressBlock_lazy2_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
@@ -1862,223 +1862,223 @@ size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch);
}
-FORCE_INLINE_TEMPLATE
-size_t ZSTD_compressBlock_lazy_extDict_generic(
- ZSTD_matchState_t* ms, seqStore_t* seqStore,
- U32 rep[ZSTD_REP_NUM],
- const void* src, size_t srcSize,
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_lazy_extDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize,
const searchMethod_e searchMethod, const U32 depth)
-{
- const BYTE* const istart = (const BYTE*)src;
- const BYTE* ip = istart;
- const BYTE* anchor = istart;
- const BYTE* const iend = istart + srcSize;
+{
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = searchMethod == search_rowHash ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8;
- const BYTE* const base = ms->window.base;
- const U32 dictLimit = ms->window.dictLimit;
- const BYTE* const prefixStart = base + dictLimit;
- const BYTE* const dictBase = ms->window.dictBase;
- const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const base = ms->window.base;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const BYTE* const dictEnd = dictBase + dictLimit;
const BYTE* const dictStart = dictBase + ms->window.lowLimit;
const U32 windowLog = ms->cParams.windowLog;
const U32 rowLog = ms->cParams.searchLog < 5 ? 4 : 5;
-
+
searchMax_f const searchMax = ZSTD_selectLazyVTable(ms, searchMethod, ZSTD_extDict)->searchMax;
- U32 offset_1 = rep[0], offset_2 = rep[1];
-
+ U32 offset_1 = rep[0], offset_2 = rep[1];
+
DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic (searchFunc=%u)", (U32)searchMethod);
- /* init */
- ip += (ip == prefixStart);
+ /* init */
+ ip += (ip == prefixStart);
if (searchMethod == search_rowHash) {
ZSTD_row_fillHashCache(ms, base, rowLog,
MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */),
ms->nextToUpdate, ilimit);
}
-
- /* Match Loop */
+
+ /* Match Loop */
#if defined(__GNUC__) && defined(__x86_64__)
/* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
* code alignment is perturbed. To fix the instability align the loop on 32-bytes.
*/
__asm__(".p2align 5");
#endif
- while (ip < ilimit) {
- size_t matchLength=0;
+ while (ip < ilimit) {
+ size_t matchLength=0;
size_t offcode=STORE_REPCODE_1;
- const BYTE* start=ip+1;
+ const BYTE* start=ip+1;
U32 curr = (U32)(ip-base);
-
- /* check repCode */
+
+ /* check repCode */
{ const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr+1, windowLog);
const U32 repIndex = (U32)(curr+1 - offset_1);
- const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE* const repMatch = repBase + repIndex;
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow */
& (offset_1 <= curr+1 - windowLow) ) /* note: we are searching at curr+1 */
- if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
- /* repcode detected we should take it */
- const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
- matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
- if (depth==0) goto _storeSequence;
- } }
-
- /* first search (depth 0) */
+ if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
+ /* repcode detected we should take it */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ if (depth==0) goto _storeSequence;
+ } }
+
+ /* first search (depth 0) */
{ size_t offsetFound = 999999999;
size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
- if (ml2 > matchLength)
+ if (ml2 > matchLength)
matchLength = ml2, start = ip, offcode=offsetFound;
- }
-
+ }
+
if (matchLength < 4) {
- ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
- continue;
- }
-
- /* let's try to find a better solution */
- if (depth>=1)
- while (ip<ilimit) {
- ip ++;
+ ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
+ continue;
+ }
+
+ /* let's try to find a better solution */
+ if (depth>=1)
+ while (ip<ilimit) {
+ ip ++;
curr++;
- /* check repCode */
+ /* check repCode */
if (offcode) {
const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
const U32 repIndex = (U32)(curr - offset_1);
- const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE* const repMatch = repBase + repIndex;
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
& (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
- if (MEM_read32(ip) == MEM_read32(repMatch)) {
- /* repcode detected */
- const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
- size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
- int const gain2 = (int)(repLength * 3);
+ if (MEM_read32(ip) == MEM_read32(repMatch)) {
+ /* repcode detected */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ int const gain2 = (int)(repLength * 3);
int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
- if ((repLength >= 4) && (gain2 > gain1))
+ if ((repLength >= 4) && (gain2 > gain1))
matchLength = repLength, offcode = STORE_REPCODE_1, start = ip;
- } }
-
- /* search match, depth 1 */
+ } }
+
+ /* search match, depth 1 */
{ size_t offset2=999999999;
size_t const ml2 = searchMax(ms, ip, iend, &offset2);
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 4);
- if ((ml2 >= 4) && (gain2 > gain1)) {
+ if ((ml2 >= 4) && (gain2 > gain1)) {
matchLength = ml2, offcode = offset2, start = ip;
- continue; /* search a better one */
- } }
-
- /* let's find an even better one */
- if ((depth==2) && (ip<ilimit)) {
- ip ++;
+ continue; /* search a better one */
+ } }
+
+ /* let's find an even better one */
+ if ((depth==2) && (ip<ilimit)) {
+ ip ++;
curr++;
- /* check repCode */
+ /* check repCode */
if (offcode) {
const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
const U32 repIndex = (U32)(curr - offset_1);
- const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE* const repMatch = repBase + repIndex;
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
& (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
- if (MEM_read32(ip) == MEM_read32(repMatch)) {
- /* repcode detected */
- const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
- size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
- int const gain2 = (int)(repLength * 4);
+ if (MEM_read32(ip) == MEM_read32(repMatch)) {
+ /* repcode detected */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ int const gain2 = (int)(repLength * 4);
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
- if ((repLength >= 4) && (gain2 > gain1))
+ if ((repLength >= 4) && (gain2 > gain1))
matchLength = repLength, offcode = STORE_REPCODE_1, start = ip;
- } }
-
- /* search match, depth 2 */
+ } }
+
+ /* search match, depth 2 */
{ size_t offset2=999999999;
size_t const ml2 = searchMax(ms, ip, iend, &offset2);
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 7);
- if ((ml2 >= 4) && (gain2 > gain1)) {
+ if ((ml2 >= 4) && (gain2 > gain1)) {
matchLength = ml2, offcode = offset2, start = ip;
- continue;
- } } }
- break; /* nothing found : store previous solution */
- }
-
- /* catch up */
+ continue;
+ } } }
+ break; /* nothing found : store previous solution */
+ }
+
+ /* catch up */
if (STORED_IS_OFFSET(offcode)) {
U32 const matchIndex = (U32)((size_t)(start-base) - STORED_OFFSET(offcode));
- const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
- const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
- while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
+ const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
+ const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
+ while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
offset_2 = offset_1; offset_1 = (U32)STORED_OFFSET(offcode);
- }
-
- /* store sequence */
-_storeSequence:
+ }
+
+ /* store sequence */
+_storeSequence:
{ size_t const litLength = (size_t)(start - anchor);
ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offcode, matchLength);
- anchor = ip = start + matchLength;
- }
-
- /* check immediate repcode */
- while (ip <= ilimit) {
+ anchor = ip = start + matchLength;
+ }
+
+ /* check immediate repcode */
+ while (ip <= ilimit) {
const U32 repCurrent = (U32)(ip-base);
const U32 windowLow = ZSTD_getLowestMatchIndex(ms, repCurrent, windowLog);
const U32 repIndex = repCurrent - offset_2;
- const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
- const BYTE* const repMatch = repBase + repIndex;
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
& (offset_2 <= repCurrent - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
- if (MEM_read32(ip) == MEM_read32(repMatch)) {
- /* repcode detected we should take it */
- const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
- matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ if (MEM_read32(ip) == MEM_read32(repMatch)) {
+ /* repcode detected we should take it */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap offset history */
ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength);
- ip += matchLength;
- anchor = ip;
- continue; /* faster when present ... (?) */
- }
- break;
- } }
-
- /* Save reps for next block */
- rep[0] = offset_1;
- rep[1] = offset_2;
-
- /* Return the last literals size */
+ ip += matchLength;
+ anchor = ip;
+ continue; /* faster when present ... (?) */
+ }
+ break;
+ } }
+
+ /* Save reps for next block */
+ rep[0] = offset_1;
+ rep[1] = offset_2;
+
+ /* Return the last literals size */
return (size_t)(iend - anchor);
-}
-
-
-size_t ZSTD_compressBlock_greedy_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+}
+
+
+size_t ZSTD_compressBlock_greedy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
-{
+{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);
-}
-
-size_t ZSTD_compressBlock_lazy_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+}
+
+size_t ZSTD_compressBlock_lazy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
-
-{
+
+{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
-}
-
-size_t ZSTD_compressBlock_lazy2_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+}
+
+size_t ZSTD_compressBlock_lazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
-
-{
+
+{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
-}
-
-size_t ZSTD_compressBlock_btlazy2_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+}
+
+size_t ZSTD_compressBlock_btlazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
-
-{
+
+{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
-}
+}
size_t ZSTD_compressBlock_greedy_extDict_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
diff --git a/contrib/libs/zstd/lib/compress/zstd_lazy.h b/contrib/libs/zstd/lib/compress/zstd_lazy.h
index 150f7b390b..c98a4359f3 100644
--- a/contrib/libs/zstd/lib/compress/zstd_lazy.h
+++ b/contrib/libs/zstd/lib/compress/zstd_lazy.h
@@ -1,22 +1,22 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTD_LAZY_H
-#define ZSTD_LAZY_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-#include "zstd_compress_internal.h"
-
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_LAZY_H
+#define ZSTD_LAZY_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "zstd_compress_internal.h"
+
/**
* Dedicated Dictionary Search Structure bucket log. In the
* ZSTD_dedicatedDictSearch mode, the hashTable has
@@ -27,22 +27,22 @@ extern "C" {
U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip);
-
+
void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip);
void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
-
-size_t ZSTD_compressBlock_btlazy2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+
+size_t ZSTD_compressBlock_btlazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy2(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_greedy(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_greedy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy2_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
@@ -53,7 +53,7 @@ size_t ZSTD_compressBlock_lazy_row(
size_t ZSTD_compressBlock_greedy_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-
+
size_t ZSTD_compressBlock_btlazy2_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
@@ -95,14 +95,14 @@ size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_greedy_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_greedy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_lazy2_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_lazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_greedy_extDict_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
@@ -113,13 +113,13 @@ size_t ZSTD_compressBlock_lazy_extDict_row(
size_t ZSTD_compressBlock_lazy2_extDict_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_btlazy2_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_btlazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* ZSTD_LAZY_H */
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_LAZY_H */
diff --git a/contrib/libs/zstd/lib/compress/zstd_ldm.c b/contrib/libs/zstd/lib/compress/zstd_ldm.c
index 476b45746e..da14f7896b 100644
--- a/contrib/libs/zstd/lib/compress/zstd_ldm.c
+++ b/contrib/libs/zstd/lib/compress/zstd_ldm.c
@@ -1,25 +1,25 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
- */
-
-#include "zstd_ldm.h"
-
+ */
+
+#include "zstd_ldm.h"
+
#include "../common/debug.h"
#include <contrib/libs/xxhash/xxhash.h>
-#include "zstd_fast.h" /* ZSTD_fillHashTable() */
-#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
+#include "zstd_fast.h" /* ZSTD_fillHashTable() */
+#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
#include "zstd_ldm_geartab.h"
-
-#define LDM_BUCKET_SIZE_LOG 3
-#define LDM_MIN_MATCH_LENGTH 64
-#define LDM_HASH_RLOG 7
-
+
+#define LDM_BUCKET_SIZE_LOG 3
+#define LDM_MIN_MATCH_LENGTH 64
+#define LDM_HASH_RLOG 7
+
typedef struct {
U64 rolling;
U64 stopMask;
@@ -132,80 +132,80 @@ done:
return n;
}
-void ZSTD_ldm_adjustParameters(ldmParams_t* params,
- ZSTD_compressionParameters const* cParams)
-{
+void ZSTD_ldm_adjustParameters(ldmParams_t* params,
+ ZSTD_compressionParameters const* cParams)
+{
params->windowLog = cParams->windowLog;
- ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
- DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
- if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
- if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
- if (params->hashLog == 0) {
+ ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
+ DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
+ if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
+ if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
+ if (params->hashLog == 0) {
params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
- assert(params->hashLog <= ZSTD_HASHLOG_MAX);
- }
+ assert(params->hashLog <= ZSTD_HASHLOG_MAX);
+ }
if (params->hashRateLog == 0) {
params->hashRateLog = params->windowLog < params->hashLog
? 0
: params->windowLog - params->hashLog;
- }
- params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
-}
-
-size_t ZSTD_ldm_getTableSize(ldmParams_t params)
-{
- size_t const ldmHSize = ((size_t)1) << params.hashLog;
- size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
+ }
+ params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
+}
+
+size_t ZSTD_ldm_getTableSize(ldmParams_t params)
+{
+ size_t const ldmHSize = ((size_t)1) << params.hashLog;
+ size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
+ ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
return params.enableLdm == ZSTD_ps_enable ? totalSize : 0;
-}
-
-size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
-{
+}
+
+size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
+{
return params.enableLdm == ZSTD_ps_enable ? (maxChunkSize / params.minMatchLength) : 0;
-}
-
-/** ZSTD_ldm_getBucket() :
- * Returns a pointer to the start of the bucket associated with hash. */
-static ldmEntry_t* ZSTD_ldm_getBucket(
- ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
-{
- return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
-}
-
-/** ZSTD_ldm_insertEntry() :
- * Insert the entry with corresponding hash into the hash table */
-static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
- size_t const hash, const ldmEntry_t entry,
- ldmParams_t const ldmParams)
-{
+}
+
+/** ZSTD_ldm_getBucket() :
+ * Returns a pointer to the start of the bucket associated with hash. */
+static ldmEntry_t* ZSTD_ldm_getBucket(
+ ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
+{
+ return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
+}
+
+/** ZSTD_ldm_insertEntry() :
+ * Insert the entry with corresponding hash into the hash table */
+static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
+ size_t const hash, const ldmEntry_t entry,
+ ldmParams_t const ldmParams)
+{
BYTE* const pOffset = ldmState->bucketOffsets + hash;
unsigned const offset = *pOffset;
-
+
*(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry;
*pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1));
-}
-
-/** ZSTD_ldm_countBackwardsMatch() :
- * Returns the number of bytes that match backwards before pIn and pMatch.
- *
- * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
-static size_t ZSTD_ldm_countBackwardsMatch(
- const BYTE* pIn, const BYTE* pAnchor,
+}
+
+/** ZSTD_ldm_countBackwardsMatch() :
+ * Returns the number of bytes that match backwards before pIn and pMatch.
+ *
+ * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
+static size_t ZSTD_ldm_countBackwardsMatch(
+ const BYTE* pIn, const BYTE* pAnchor,
const BYTE* pMatch, const BYTE* pMatchBase)
-{
- size_t matchLength = 0;
+{
+ size_t matchLength = 0;
while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) {
- pIn--;
- pMatch--;
- matchLength++;
- }
- return matchLength;
-}
-
+ pIn--;
+ pMatch--;
+ matchLength++;
+ }
+ return matchLength;
+}
+
/** ZSTD_ldm_countBackwardsMatch_2segments() :
* Returns the number of bytes that match backwards from pMatch,
* even with the backwards match spanning 2 different segments.
@@ -227,43 +227,43 @@ static size_t ZSTD_ldm_countBackwardsMatch_2segments(
return matchLength;
}
-/** ZSTD_ldm_fillFastTables() :
- *
- * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
- * This is similar to ZSTD_loadDictionaryContent.
- *
- * The tables for the other strategies are filled within their
- * block compressors. */
-static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
- void const* end)
-{
- const BYTE* const iend = (const BYTE*)end;
-
+/** ZSTD_ldm_fillFastTables() :
+ *
+ * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
+ * This is similar to ZSTD_loadDictionaryContent.
+ *
+ * The tables for the other strategies are filled within their
+ * block compressors. */
+static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
+ void const* end)
+{
+ const BYTE* const iend = (const BYTE*)end;
+
switch(ms->cParams.strategy)
- {
- case ZSTD_fast:
+ {
+ case ZSTD_fast:
ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast);
- break;
-
- case ZSTD_dfast:
+ break;
+
+ case ZSTD_dfast:
ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);
- break;
-
- case ZSTD_greedy:
- case ZSTD_lazy:
- case ZSTD_lazy2:
- case ZSTD_btlazy2:
- case ZSTD_btopt:
- case ZSTD_btultra:
+ break;
+
+ case ZSTD_greedy:
+ case ZSTD_lazy:
+ case ZSTD_lazy2:
+ case ZSTD_btlazy2:
+ case ZSTD_btopt:
+ case ZSTD_btultra:
case ZSTD_btultra2:
- break;
- default:
- assert(0); /* not possible : not a valid strategy id */
- }
-
- return 0;
-}
-
+ break;
+ default:
+ assert(0); /* not possible : not a valid strategy id */
+ }
+
+ return 0;
+}
+
void ZSTD_ldm_fillHashTable(
ldmState_t* ldmState, const BYTE* ip,
const BYTE* iend, ldmParams_t const* params)
@@ -302,53 +302,53 @@ void ZSTD_ldm_fillHashTable(
ip += hashed;
}
}
-
-
-/** ZSTD_ldm_limitTableUpdate() :
- *
- * Sets cctx->nextToUpdate to a position corresponding closer to anchor
- * if it is far way
- * (after a long match, only update tables a limited amount). */
-static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
-{
+
+
+/** ZSTD_ldm_limitTableUpdate() :
+ *
+ * Sets cctx->nextToUpdate to a position corresponding closer to anchor
+ * if it is far way
+ * (after a long match, only update tables a limited amount). */
+static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
+{
U32 const curr = (U32)(anchor - ms->window.base);
if (curr > ms->nextToUpdate + 1024) {
- ms->nextToUpdate =
+ ms->nextToUpdate =
curr - MIN(512, curr - ms->nextToUpdate - 1024);
- }
-}
-
-static size_t ZSTD_ldm_generateSequences_internal(
- ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
- ldmParams_t const* params, void const* src, size_t srcSize)
-{
- /* LDM parameters */
- int const extDict = ZSTD_window_hasExtDict(ldmState->window);
- U32 const minMatchLength = params->minMatchLength;
+ }
+}
+
+static size_t ZSTD_ldm_generateSequences_internal(
+ ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
+ ldmParams_t const* params, void const* src, size_t srcSize)
+{
+ /* LDM parameters */
+ int const extDict = ZSTD_window_hasExtDict(ldmState->window);
+ U32 const minMatchLength = params->minMatchLength;
U32 const entsPerBucket = 1U << params->bucketSizeLog;
- U32 const hBits = params->hashLog - params->bucketSizeLog;
- /* Prefix and extDict parameters */
- U32 const dictLimit = ldmState->window.dictLimit;
- U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
- BYTE const* const base = ldmState->window.base;
- BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
- BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
- BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
- BYTE const* const lowPrefixPtr = base + dictLimit;
- /* Input bounds */
- BYTE const* const istart = (BYTE const*)src;
- BYTE const* const iend = istart + srcSize;
+ U32 const hBits = params->hashLog - params->bucketSizeLog;
+ /* Prefix and extDict parameters */
+ U32 const dictLimit = ldmState->window.dictLimit;
+ U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
+ BYTE const* const base = ldmState->window.base;
+ BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
+ BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
+ BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
+ BYTE const* const lowPrefixPtr = base + dictLimit;
+ /* Input bounds */
+ BYTE const* const istart = (BYTE const*)src;
+ BYTE const* const iend = istart + srcSize;
BYTE const* const ilimit = iend - HASH_READ_SIZE;
- /* Input positions */
- BYTE const* anchor = istart;
- BYTE const* ip = istart;
+ /* Input positions */
+ BYTE const* anchor = istart;
+ BYTE const* ip = istart;
/* Rolling hash state */
ldmRollingHashState_t hashState;
/* Arrays for staged-processing */
size_t* const splits = ldmState->splitIndices;
ldmMatchCandidate_t* const candidates = ldmState->matchCandidates;
unsigned numSplits;
-
+
if (srcSize < minMatchLength)
return iend - anchor;
@@ -375,8 +375,8 @@ static size_t ZSTD_ldm_generateSequences_internal(
candidates[n].checksum = (U32)(xxhash >> 32);
candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params);
PREFETCH_L1(candidates[n].bucket);
- }
-
+ }
+
for (n = 0; n < numSplits; n++) {
size_t forwardMatchLength = 0, backwardMatchLength = 0,
bestMatchLength = 0, mLength;
@@ -388,7 +388,7 @@ static size_t ZSTD_ldm_generateSequences_internal(
ldmEntry_t const* cur;
ldmEntry_t const* bestEntry = NULL;
ldmEntry_t newEntry;
-
+
newEntry.offset = (U32)(split - base);
newEntry.checksum = checksum;
@@ -401,58 +401,58 @@ static size_t ZSTD_ldm_generateSequences_internal(
}
for (cur = bucket; cur < bucket + entsPerBucket; cur++) {
- size_t curForwardMatchLength, curBackwardMatchLength,
- curTotalMatchLength;
- if (cur->checksum != checksum || cur->offset <= lowestIndex) {
- continue;
- }
- if (extDict) {
- BYTE const* const curMatchBase =
- cur->offset < dictLimit ? dictBase : base;
- BYTE const* const pMatch = curMatchBase + cur->offset;
- BYTE const* const matchEnd =
- cur->offset < dictLimit ? dictEnd : iend;
- BYTE const* const lowMatchPtr =
- cur->offset < dictLimit ? dictStart : lowPrefixPtr;
+ size_t curForwardMatchLength, curBackwardMatchLength,
+ curTotalMatchLength;
+ if (cur->checksum != checksum || cur->offset <= lowestIndex) {
+ continue;
+ }
+ if (extDict) {
+ BYTE const* const curMatchBase =
+ cur->offset < dictLimit ? dictBase : base;
+ BYTE const* const pMatch = curMatchBase + cur->offset;
+ BYTE const* const matchEnd =
+ cur->offset < dictLimit ? dictEnd : iend;
+ BYTE const* const lowMatchPtr =
+ cur->offset < dictLimit ? dictStart : lowPrefixPtr;
curForwardMatchLength =
ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr);
- if (curForwardMatchLength < minMatchLength) {
- continue;
- }
+ if (curForwardMatchLength < minMatchLength) {
+ continue;
+ }
curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments(
split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd);
- } else { /* !extDict */
- BYTE const* const pMatch = base + cur->offset;
+ } else { /* !extDict */
+ BYTE const* const pMatch = base + cur->offset;
curForwardMatchLength = ZSTD_count(split, pMatch, iend);
- if (curForwardMatchLength < minMatchLength) {
- continue;
- }
- curBackwardMatchLength =
+ if (curForwardMatchLength < minMatchLength) {
+ continue;
+ }
+ curBackwardMatchLength =
ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr);
- }
+ }
curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength;
-
- if (curTotalMatchLength > bestMatchLength) {
- bestMatchLength = curTotalMatchLength;
- forwardMatchLength = curForwardMatchLength;
- backwardMatchLength = curBackwardMatchLength;
- bestEntry = cur;
- }
- }
-
+
+ if (curTotalMatchLength > bestMatchLength) {
+ bestMatchLength = curTotalMatchLength;
+ forwardMatchLength = curForwardMatchLength;
+ backwardMatchLength = curBackwardMatchLength;
+ bestEntry = cur;
+ }
+ }
+
/* No match found -- insert an entry into the hash table
* and process the next candidate match */
if (bestEntry == NULL) {
ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
continue;
}
-
+
/* Match found */
offset = (U32)(split - base) - bestEntry->offset;
mLength = forwardMatchLength + backwardMatchLength;
{
rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
-
+
/* Out of sequence storage */
if (rawSeqStore->size == rawSeqStore->capacity)
return ERROR(dstSize_tooSmall);
@@ -461,7 +461,7 @@ static size_t ZSTD_ldm_generateSequences_internal(
seq->offset = offset;
rawSeqStore->size++;
}
-
+
/* Insert the current entry into the hash table --- it must be
* done after the previous block to avoid clobbering bestEntry */
ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
@@ -482,164 +482,164 @@ static size_t ZSTD_ldm_generateSequences_internal(
ip = anchor - hashed;
break;
}
- }
-
+ }
+
ip += hashed;
}
-
- return iend - anchor;
-}
-
-/*! ZSTD_ldm_reduceTable() :
- * reduce table indexes by `reducerValue` */
-static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
- U32 const reducerValue)
-{
- U32 u;
- for (u = 0; u < size; u++) {
- if (table[u].offset < reducerValue) table[u].offset = 0;
- else table[u].offset -= reducerValue;
- }
-}
-
-size_t ZSTD_ldm_generateSequences(
- ldmState_t* ldmState, rawSeqStore_t* sequences,
- ldmParams_t const* params, void const* src, size_t srcSize)
-{
- U32 const maxDist = 1U << params->windowLog;
- BYTE const* const istart = (BYTE const*)src;
- BYTE const* const iend = istart + srcSize;
- size_t const kMaxChunkSize = 1 << 20;
- size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
- size_t chunk;
- size_t leftoverSize = 0;
-
- assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
- /* Check that ZSTD_window_update() has been called for this chunk prior
- * to passing it to this function.
- */
- assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
- /* The input could be very large (in zstdmt), so it must be broken up into
+
+ return iend - anchor;
+}
+
+/*! ZSTD_ldm_reduceTable() :
+ * reduce table indexes by `reducerValue` */
+static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
+ U32 const reducerValue)
+{
+ U32 u;
+ for (u = 0; u < size; u++) {
+ if (table[u].offset < reducerValue) table[u].offset = 0;
+ else table[u].offset -= reducerValue;
+ }
+}
+
+size_t ZSTD_ldm_generateSequences(
+ ldmState_t* ldmState, rawSeqStore_t* sequences,
+ ldmParams_t const* params, void const* src, size_t srcSize)
+{
+ U32 const maxDist = 1U << params->windowLog;
+ BYTE const* const istart = (BYTE const*)src;
+ BYTE const* const iend = istart + srcSize;
+ size_t const kMaxChunkSize = 1 << 20;
+ size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
+ size_t chunk;
+ size_t leftoverSize = 0;
+
+ assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
+ /* Check that ZSTD_window_update() has been called for this chunk prior
+ * to passing it to this function.
+ */
+ assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
+ /* The input could be very large (in zstdmt), so it must be broken up into
* chunks to enforce the maximum distance and handle overflow correction.
- */
- assert(sequences->pos <= sequences->size);
- assert(sequences->size <= sequences->capacity);
- for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
- BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
- size_t const remaining = (size_t)(iend - chunkStart);
- BYTE const *const chunkEnd =
- (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
- size_t const chunkSize = chunkEnd - chunkStart;
- size_t newLeftoverSize;
- size_t const prevSize = sequences->size;
-
- assert(chunkStart < iend);
- /* 1. Perform overflow correction if necessary. */
+ */
+ assert(sequences->pos <= sequences->size);
+ assert(sequences->size <= sequences->capacity);
+ for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
+ BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
+ size_t const remaining = (size_t)(iend - chunkStart);
+ BYTE const *const chunkEnd =
+ (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
+ size_t const chunkSize = chunkEnd - chunkStart;
+ size_t newLeftoverSize;
+ size_t const prevSize = sequences->size;
+
+ assert(chunkStart < iend);
+ /* 1. Perform overflow correction if necessary. */
if (ZSTD_window_needOverflowCorrection(ldmState->window, 0, maxDist, ldmState->loadedDictEnd, chunkStart, chunkEnd)) {
- U32 const ldmHSize = 1U << params->hashLog;
- U32 const correction = ZSTD_window_correctOverflow(
+ U32 const ldmHSize = 1U << params->hashLog;
+ U32 const correction = ZSTD_window_correctOverflow(
&ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
- ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
+ ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
/* invalidate dictionaries on overflow correction */
ldmState->loadedDictEnd = 0;
- }
- /* 2. We enforce the maximum offset allowed.
- *
- * kMaxChunkSize should be small enough that we don't lose too much of
- * the window through early invalidation.
- * TODO: * Test the chunk size.
- * * Try invalidation after the sequence generation and test the
- * the offset against maxDist directly.
+ }
+ /* 2. We enforce the maximum offset allowed.
+ *
+ * kMaxChunkSize should be small enough that we don't lose too much of
+ * the window through early invalidation.
+ * TODO: * Test the chunk size.
+ * * Try invalidation after the sequence generation and test the
+ * the offset against maxDist directly.
*
* NOTE: Because of dictionaries + sequence splitting we MUST make sure
* that any offset used is valid at the END of the sequence, since it may
* be split into two sequences. This condition holds when using
* ZSTD_window_enforceMaxDist(), but if we move to checking offsets
* against maxDist directly, we'll have to carefully handle that case.
- */
+ */
ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL);
- /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
- newLeftoverSize = ZSTD_ldm_generateSequences_internal(
- ldmState, sequences, params, chunkStart, chunkSize);
- if (ZSTD_isError(newLeftoverSize))
- return newLeftoverSize;
- /* 4. We add the leftover literals from previous iterations to the first
- * newly generated sequence, or add the `newLeftoverSize` if none are
- * generated.
- */
- /* Prepend the leftover literals from the last call */
- if (prevSize < sequences->size) {
- sequences->seq[prevSize].litLength += (U32)leftoverSize;
- leftoverSize = newLeftoverSize;
- } else {
- assert(newLeftoverSize == chunkSize);
- leftoverSize += chunkSize;
- }
- }
- return 0;
-}
-
+ /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
+ newLeftoverSize = ZSTD_ldm_generateSequences_internal(
+ ldmState, sequences, params, chunkStart, chunkSize);
+ if (ZSTD_isError(newLeftoverSize))
+ return newLeftoverSize;
+ /* 4. We add the leftover literals from previous iterations to the first
+ * newly generated sequence, or add the `newLeftoverSize` if none are
+ * generated.
+ */
+ /* Prepend the leftover literals from the last call */
+ if (prevSize < sequences->size) {
+ sequences->seq[prevSize].litLength += (U32)leftoverSize;
+ leftoverSize = newLeftoverSize;
+ } else {
+ assert(newLeftoverSize == chunkSize);
+ leftoverSize += chunkSize;
+ }
+ }
+ return 0;
+}
+
void
ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch)
{
- while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
- rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
- if (srcSize <= seq->litLength) {
- /* Skip past srcSize literals */
- seq->litLength -= (U32)srcSize;
- return;
- }
- srcSize -= seq->litLength;
- seq->litLength = 0;
- if (srcSize < seq->matchLength) {
- /* Skip past the first srcSize of the match */
- seq->matchLength -= (U32)srcSize;
- if (seq->matchLength < minMatch) {
- /* The match is too short, omit it */
- if (rawSeqStore->pos + 1 < rawSeqStore->size) {
- seq[1].litLength += seq[0].matchLength;
- }
- rawSeqStore->pos++;
- }
- return;
- }
- srcSize -= seq->matchLength;
- seq->matchLength = 0;
- rawSeqStore->pos++;
- }
-}
-
-/**
- * If the sequence length is longer than remaining then the sequence is split
- * between this block and the next.
- *
- * Returns the current sequence to handle, or if the rest of the block should
- * be literals, it returns a sequence with offset == 0.
- */
-static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
- U32 const remaining, U32 const minMatch)
-{
- rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
- assert(sequence.offset > 0);
- /* Likely: No partial sequence */
- if (remaining >= sequence.litLength + sequence.matchLength) {
- rawSeqStore->pos++;
- return sequence;
- }
- /* Cut the sequence short (offset == 0 ==> rest is literals). */
- if (remaining <= sequence.litLength) {
- sequence.offset = 0;
- } else if (remaining < sequence.litLength + sequence.matchLength) {
- sequence.matchLength = remaining - sequence.litLength;
- if (sequence.matchLength < minMatch) {
- sequence.offset = 0;
- }
- }
- /* Skip past `remaining` bytes for the future sequences. */
- ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
- return sequence;
-}
-
+ while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
+ rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
+ if (srcSize <= seq->litLength) {
+ /* Skip past srcSize literals */
+ seq->litLength -= (U32)srcSize;
+ return;
+ }
+ srcSize -= seq->litLength;
+ seq->litLength = 0;
+ if (srcSize < seq->matchLength) {
+ /* Skip past the first srcSize of the match */
+ seq->matchLength -= (U32)srcSize;
+ if (seq->matchLength < minMatch) {
+ /* The match is too short, omit it */
+ if (rawSeqStore->pos + 1 < rawSeqStore->size) {
+ seq[1].litLength += seq[0].matchLength;
+ }
+ rawSeqStore->pos++;
+ }
+ return;
+ }
+ srcSize -= seq->matchLength;
+ seq->matchLength = 0;
+ rawSeqStore->pos++;
+ }
+}
+
+/**
+ * If the sequence length is longer than remaining then the sequence is split
+ * between this block and the next.
+ *
+ * Returns the current sequence to handle, or if the rest of the block should
+ * be literals, it returns a sequence with offset == 0.
+ */
+static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
+ U32 const remaining, U32 const minMatch)
+{
+ rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
+ assert(sequence.offset > 0);
+ /* Likely: No partial sequence */
+ if (remaining >= sequence.litLength + sequence.matchLength) {
+ rawSeqStore->pos++;
+ return sequence;
+ }
+ /* Cut the sequence short (offset == 0 ==> rest is literals). */
+ if (remaining <= sequence.litLength) {
+ sequence.offset = 0;
+ } else if (remaining < sequence.litLength + sequence.matchLength) {
+ sequence.matchLength = remaining - sequence.litLength;
+ if (sequence.matchLength < minMatch) {
+ sequence.offset = 0;
+ }
+ }
+ /* Skip past `remaining` bytes for the future sequences. */
+ ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
+ return sequence;
+}
+
void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
while (currPos && rawSeqStore->pos < rawSeqStore->size) {
@@ -657,21 +657,21 @@ void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
}
}
-size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
ZSTD_paramSwitch_e useRowMatchFinder,
void const* src, size_t srcSize)
-{
+{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
unsigned const minMatch = cParams->minMatch;
- ZSTD_blockCompressor const blockCompressor =
+ ZSTD_blockCompressor const blockCompressor =
ZSTD_selectBlockCompressor(cParams->strategy, useRowMatchFinder, ZSTD_matchState_dictMode(ms));
- /* Input bounds */
- BYTE const* const istart = (BYTE const*)src;
- BYTE const* const iend = istart + srcSize;
- /* Input positions */
- BYTE const* ip = istart;
-
+ /* Input bounds */
+ BYTE const* const istart = (BYTE const*)src;
+ BYTE const* const iend = istart + srcSize;
+ /* Input positions */
+ BYTE const* ip = istart;
+
DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
/* If using opt parser, use LDMs only as candidates rather than always accepting them */
if (cParams->strategy >= ZSTD_btopt) {
@@ -682,43 +682,43 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
return lastLLSize;
}
- assert(rawSeqStore->pos <= rawSeqStore->size);
- assert(rawSeqStore->size <= rawSeqStore->capacity);
+ assert(rawSeqStore->pos <= rawSeqStore->size);
+ assert(rawSeqStore->size <= rawSeqStore->capacity);
/* Loop through each sequence and apply the block compressor to the literals */
- while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
- /* maybeSplitSequence updates rawSeqStore->pos */
- rawSeq const sequence = maybeSplitSequence(rawSeqStore,
- (U32)(iend - ip), minMatch);
- int i;
- /* End signal */
- if (sequence.offset == 0)
- break;
-
- assert(ip + sequence.litLength + sequence.matchLength <= iend);
-
- /* Fill tables for block compressor */
- ZSTD_ldm_limitTableUpdate(ms, ip);
+ while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
+ /* maybeSplitSequence updates rawSeqStore->pos */
+ rawSeq const sequence = maybeSplitSequence(rawSeqStore,
+ (U32)(iend - ip), minMatch);
+ int i;
+ /* End signal */
+ if (sequence.offset == 0)
+ break;
+
+ assert(ip + sequence.litLength + sequence.matchLength <= iend);
+
+ /* Fill tables for block compressor */
+ ZSTD_ldm_limitTableUpdate(ms, ip);
ZSTD_ldm_fillFastTables(ms, ip);
- /* Run the block compressor */
+ /* Run the block compressor */
DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
- {
- size_t const newLitLength =
+ {
+ size_t const newLitLength =
blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
- ip += sequence.litLength;
- /* Update the repcodes */
- for (i = ZSTD_REP_NUM - 1; i > 0; i--)
- rep[i] = rep[i-1];
- rep[0] = sequence.offset;
- /* Store the sequence */
+ ip += sequence.litLength;
+ /* Update the repcodes */
+ for (i = ZSTD_REP_NUM - 1; i > 0; i--)
+ rep[i] = rep[i-1];
+ rep[0] = sequence.offset;
+ /* Store the sequence */
ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
STORE_OFFSET(sequence.offset),
sequence.matchLength);
- ip += sequence.matchLength;
- }
- }
- /* Fill the tables for the block compressor */
- ZSTD_ldm_limitTableUpdate(ms, ip);
+ ip += sequence.matchLength;
+ }
+ }
+ /* Fill the tables for the block compressor */
+ ZSTD_ldm_limitTableUpdate(ms, ip);
ZSTD_ldm_fillFastTables(ms, ip);
- /* Compress the last literals */
+ /* Compress the last literals */
return blockCompressor(ms, seqStore, rep, ip, iend - ip);
-}
+}
diff --git a/contrib/libs/zstd/lib/compress/zstd_ldm.h b/contrib/libs/zstd/lib/compress/zstd_ldm.h
index 4e68dbf52e..9432096183 100644
--- a/contrib/libs/zstd/lib/compress/zstd_ldm.h
+++ b/contrib/libs/zstd/lib/compress/zstd_ldm.h
@@ -1,117 +1,117 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTD_LDM_H
-#define ZSTD_LDM_H
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-#include "zstd_compress_internal.h" /* ldmParams_t, U32 */
+ */
+
+#ifndef ZSTD_LDM_H
+#define ZSTD_LDM_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "zstd_compress_internal.h" /* ldmParams_t, U32 */
#include "../zstd.h" /* ZSTD_CCtx, size_t */
-
-/*-*************************************
-* Long distance matching
-***************************************/
-
+
+/*-*************************************
+* Long distance matching
+***************************************/
+
#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_LIMIT_DEFAULT
-
+
void ZSTD_ldm_fillHashTable(
ldmState_t* state, const BYTE* ip,
const BYTE* iend, ldmParams_t const* params);
-/**
- * ZSTD_ldm_generateSequences():
- *
- * Generates the sequences using the long distance match finder.
- * Generates long range matching sequences in `sequences`, which parse a prefix
- * of the source. `sequences` must be large enough to store every sequence,
- * which can be checked with `ZSTD_ldm_getMaxNbSeq()`.
- * @returns 0 or an error code.
- *
- * NOTE: The user must have called ZSTD_window_update() for all of the input
- * they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks.
- * NOTE: This function returns an error if it runs out of space to store
- * sequences.
- */
-size_t ZSTD_ldm_generateSequences(
- ldmState_t* ldms, rawSeqStore_t* sequences,
- ldmParams_t const* params, void const* src, size_t srcSize);
-
-/**
- * ZSTD_ldm_blockCompress():
- *
- * Compresses a block using the predefined sequences, along with a secondary
- * block compressor. The literals section of every sequence is passed to the
- * secondary block compressor, and those sequences are interspersed with the
- * predefined sequences. Returns the length of the last literals.
- * Updates `rawSeqStore.pos` to indicate how many sequences have been consumed.
- * `rawSeqStore.seq` may also be updated to split the last sequence between two
- * blocks.
- * @return The length of the last literals.
- *
- * NOTE: The source must be at most the maximum block size, but the predefined
- * sequences can be any size, and may be longer than the block. In the case that
- * they are longer than the block, the last sequences may need to be split into
- * two. We handle that case correctly, and update `rawSeqStore` appropriately.
- * NOTE: This function does not return any errors.
- */
-size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+/**
+ * ZSTD_ldm_generateSequences():
+ *
+ * Generates the sequences using the long distance match finder.
+ * Generates long range matching sequences in `sequences`, which parse a prefix
+ * of the source. `sequences` must be large enough to store every sequence,
+ * which can be checked with `ZSTD_ldm_getMaxNbSeq()`.
+ * @returns 0 or an error code.
+ *
+ * NOTE: The user must have called ZSTD_window_update() for all of the input
+ * they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks.
+ * NOTE: This function returns an error if it runs out of space to store
+ * sequences.
+ */
+size_t ZSTD_ldm_generateSequences(
+ ldmState_t* ldms, rawSeqStore_t* sequences,
+ ldmParams_t const* params, void const* src, size_t srcSize);
+
+/**
+ * ZSTD_ldm_blockCompress():
+ *
+ * Compresses a block using the predefined sequences, along with a secondary
+ * block compressor. The literals section of every sequence is passed to the
+ * secondary block compressor, and those sequences are interspersed with the
+ * predefined sequences. Returns the length of the last literals.
+ * Updates `rawSeqStore.pos` to indicate how many sequences have been consumed.
+ * `rawSeqStore.seq` may also be updated to split the last sequence between two
+ * blocks.
+ * @return The length of the last literals.
+ *
+ * NOTE: The source must be at most the maximum block size, but the predefined
+ * sequences can be any size, and may be longer than the block. In the case that
+ * they are longer than the block, the last sequences may need to be split into
+ * two. We handle that case correctly, and update `rawSeqStore` appropriately.
+ * NOTE: This function does not return any errors.
+ */
+size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
ZSTD_paramSwitch_e useRowMatchFinder,
void const* src, size_t srcSize);
-
-/**
- * ZSTD_ldm_skipSequences():
- *
- * Skip past `srcSize` bytes worth of sequences in `rawSeqStore`.
- * Avoids emitting matches less than `minMatch` bytes.
+
+/**
+ * ZSTD_ldm_skipSequences():
+ *
+ * Skip past `srcSize` bytes worth of sequences in `rawSeqStore`.
+ * Avoids emitting matches less than `minMatch` bytes.
* Must be called for data that is not passed to ZSTD_ldm_blockCompress().
- */
-void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
- U32 const minMatch);
-
+ */
+void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
+ U32 const minMatch);
+
/* ZSTD_ldm_skipRawSeqStoreBytes():
* Moves forward in rawSeqStore by nbBytes, updating fields 'pos' and 'posInSequence'.
* Not to be used in conjunction with ZSTD_ldm_skipSequences().
* Must be called for data with is not passed to ZSTD_ldm_blockCompress().
*/
void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes);
-
-/** ZSTD_ldm_getTableSize() :
- * Estimate the space needed for long distance matching tables or 0 if LDM is
- * disabled.
- */
-size_t ZSTD_ldm_getTableSize(ldmParams_t params);
-
-/** ZSTD_ldm_getSeqSpace() :
- * Return an upper bound on the number of sequences that can be produced by
- * the long distance matcher, or 0 if LDM is disabled.
- */
-size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize);
-
-/** ZSTD_ldm_adjustParameters() :
+
+/** ZSTD_ldm_getTableSize() :
+ * Estimate the space needed for long distance matching tables or 0 if LDM is
+ * disabled.
+ */
+size_t ZSTD_ldm_getTableSize(ldmParams_t params);
+
+/** ZSTD_ldm_getSeqSpace() :
+ * Return an upper bound on the number of sequences that can be produced by
+ * the long distance matcher, or 0 if LDM is disabled.
+ */
+size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize);
+
+/** ZSTD_ldm_adjustParameters() :
* If the params->hashRateLog is not set, set it to its default value based on
- * windowLog and params->hashLog.
- *
- * Ensures that params->bucketSizeLog is <= params->hashLog (setting it to
- * params->hashLog if it is not).
- *
- * Ensures that the minMatchLength >= targetLength during optimal parsing.
- */
-void ZSTD_ldm_adjustParameters(ldmParams_t* params,
- ZSTD_compressionParameters const* cParams);
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* ZSTD_FAST_H */
+ * windowLog and params->hashLog.
+ *
+ * Ensures that params->bucketSizeLog is <= params->hashLog (setting it to
+ * params->hashLog if it is not).
+ *
+ * Ensures that the minMatchLength >= targetLength during optimal parsing.
+ */
+void ZSTD_ldm_adjustParameters(ldmParams_t* params,
+ ZSTD_compressionParameters const* cParams);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_FAST_H */
diff --git a/contrib/libs/zstd/lib/compress/zstd_opt.c b/contrib/libs/zstd/lib/compress/zstd_opt.c
index 1b1ddad428..794af785dd 100644
--- a/contrib/libs/zstd/lib/compress/zstd_opt.c
+++ b/contrib/libs/zstd/lib/compress/zstd_opt.c
@@ -1,27 +1,27 @@
-/*
+/*
* Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#include "zstd_compress_internal.h"
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h"
#include "hist.h"
-#include "zstd_opt.h"
-
-
+#include "zstd_opt.h"
+
+
#define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
-#define ZSTD_MAX_PRICE (1<<30)
-
+#define ZSTD_MAX_PRICE (1<<30)
+
#define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
+
-
-/*-*************************************
-* Price functions for optimal parser
-***************************************/
+/*-*************************************
+* Price functions for optimal parser
+***************************************/
#if 0 /* approximation at bit level (for tests) */
# define BITCOST_ACCURACY 0
@@ -38,10 +38,10 @@
#endif
MEM_STATIC U32 ZSTD_bitWeight(U32 stat)
-{
+{
return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER);
-}
-
+}
+
MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
{
U32 const stat = rawStat + 1;
@@ -52,22 +52,22 @@ MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
assert(hb + BITCOST_ACCURACY < 31);
return weight;
}
-
+
#if (DEBUGLEVEL>=2)
/* debugging function,
* @return price in bytes as fractional value
* for debug messages only */
MEM_STATIC double ZSTD_fCost(U32 price)
-{
+{
return (double)price / (BITCOST_MULTIPLIER*8);
}
#endif
-
+
static int ZSTD_compressedLiterals(optState_t const* const optPtr)
{
return optPtr->literalCompressionMode != ZSTD_ps_disable;
}
-
+
static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
{
if (ZSTD_compressedLiterals(optPtr))
@@ -133,13 +133,13 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
if (srcSize <= ZSTD_PREDEF_THRESHOLD) { /* heuristic */
DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef");
optPtr->priceType = zop_predef;
- }
-
+ }
+
assert(optPtr->symbolCosts != NULL);
if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) {
/* huffman table presumed generated by dictionary */
optPtr->priceType = zop_dynamic;
-
+
if (compressedLiterals) {
unsigned lit;
assert(optPtr->litFreq != NULL);
@@ -151,7 +151,7 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
optPtr->litSum += optPtr->litFreq[lit];
} }
-
+
{ unsigned ll;
FSE_CState_t llstate;
FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable);
@@ -225,7 +225,7 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
}
- }
+ }
} else { /* new block : re-use previous statistics, scaled down */
@@ -234,20 +234,20 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
optPtr->litLengthSum = ZSTD_scaleStats(optPtr->litLengthFreq, MaxLL, 11);
optPtr->matchLengthSum = ZSTD_scaleStats(optPtr->matchLengthFreq, MaxML, 11);
optPtr->offCodeSum = ZSTD_scaleStats(optPtr->offCodeFreq, MaxOff, 11);
- }
-
+ }
+
ZSTD_setBasePrices(optPtr, optLevel);
-}
-
-/* ZSTD_rawLiteralsCost() :
+}
+
+/* ZSTD_rawLiteralsCost() :
* price of literals (only) in specified segment (which length can be 0).
* does not include price of literalLength symbol */
-static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
+static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
const optState_t* const optPtr,
int optLevel)
-{
- if (litLength == 0) return 0;
-
+{
+ if (litLength == 0) return 0;
+
if (!ZSTD_compressedLiterals(optPtr))
return (litLength << 3) * BITCOST_MULTIPLIER; /* Uncompressed - 8 bytes per literal. */
@@ -262,13 +262,13 @@ static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel);
}
return price;
- }
-}
-
-/* ZSTD_litLengthPrice() :
- * cost of literalLength symbol */
+ }
+}
+
+/* ZSTD_litLengthPrice() :
+ * cost of literalLength symbol */
static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel)
-{
+{
assert(litLength <= ZSTD_BLOCKSIZE_MAX);
if (optPtr->priceType == zop_predef)
return WEIGHT(litLength, optLevel);
@@ -279,18 +279,18 @@ static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optP
*/
if (litLength == ZSTD_BLOCKSIZE_MAX)
return BITCOST_MULTIPLIER + ZSTD_litLengthPrice(ZSTD_BLOCKSIZE_MAX - 1, optPtr, optLevel);
-
+
/* dynamic statistics */
- { U32 const llCode = ZSTD_LLcode(litLength);
+ { U32 const llCode = ZSTD_LLcode(litLength);
return (LL_bits[llCode] * BITCOST_MULTIPLIER)
+ optPtr->litLengthSumBasePrice
- WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
- }
-}
-
-/* ZSTD_getMatchPrice() :
- * Provides the cost of the match part (offset + matchLength) of a sequence
- * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
+ }
+}
+
+/* ZSTD_getMatchPrice() :
+ * Provides the cost of the match part (offset + matchLength) of a sequence
+ * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
* @offcode : expects a scale where 0,1,2 are repcodes 1-3, and 3+ are real_offsets+2
* @optLevel: when <2, favors small offset for decompression speed (improved cache efficiency)
*/
@@ -299,245 +299,245 @@ ZSTD_getMatchPrice(U32 const offcode,
U32 const matchLength,
const optState_t* const optPtr,
int const optLevel)
-{
- U32 price;
+{
+ U32 price;
U32 const offCode = ZSTD_highbit32(STORED_TO_OFFBASE(offcode));
- U32 const mlBase = matchLength - MINMATCH;
- assert(matchLength >= MINMATCH);
-
+ U32 const mlBase = matchLength - MINMATCH;
+ assert(matchLength >= MINMATCH);
+
if (optPtr->priceType == zop_predef) /* fixed scheme, do not use statistics */
return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER);
-
+
/* dynamic statistics */
price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel));
if ((optLevel<2) /*static*/ && offCode >= 20)
price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */
-
- /* match Length */
- { U32 const mlCode = ZSTD_MLcode(mlBase);
+
+ /* match Length */
+ { U32 const mlCode = ZSTD_MLcode(mlBase);
price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel));
- }
-
+ }
+
price += BITCOST_MULTIPLIER / 5; /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */
- DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price);
- return price;
-}
-
+ DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price);
+ return price;
+}
+
/* ZSTD_updateStats() :
* assumption : literals + litLengtn <= iend */
-static void ZSTD_updateStats(optState_t* const optPtr,
- U32 litLength, const BYTE* literals,
- U32 offsetCode, U32 matchLength)
-{
- /* literals */
+static void ZSTD_updateStats(optState_t* const optPtr,
+ U32 litLength, const BYTE* literals,
+ U32 offsetCode, U32 matchLength)
+{
+ /* literals */
if (ZSTD_compressedLiterals(optPtr)) {
U32 u;
- for (u=0; u < litLength; u++)
- optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
- optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
- }
-
- /* literal Length */
- { U32 const llCode = ZSTD_LLcode(litLength);
- optPtr->litLengthFreq[llCode]++;
- optPtr->litLengthSum++;
- }
-
+ for (u=0; u < litLength; u++)
+ optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
+ optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
+ }
+
+ /* literal Length */
+ { U32 const llCode = ZSTD_LLcode(litLength);
+ optPtr->litLengthFreq[llCode]++;
+ optPtr->litLengthSum++;
+ }
+
/* offset code : expected to follow storeSeq() numeric representation */
{ U32 const offCode = ZSTD_highbit32(STORED_TO_OFFBASE(offsetCode));
- assert(offCode <= MaxOff);
- optPtr->offCodeFreq[offCode]++;
- optPtr->offCodeSum++;
- }
-
- /* match Length */
- { U32 const mlBase = matchLength - MINMATCH;
- U32 const mlCode = ZSTD_MLcode(mlBase);
- optPtr->matchLengthFreq[mlCode]++;
- optPtr->matchLengthSum++;
- }
-}
-
-
-/* ZSTD_readMINMATCH() :
- * function safe only for comparisons
- * assumption : memPtr must be at least 4 bytes before end of buffer */
-MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
-{
- switch (length)
- {
- default :
- case 4 : return MEM_read32(memPtr);
- case 3 : if (MEM_isLittleEndian())
- return MEM_read32(memPtr)<<8;
- else
- return MEM_read32(memPtr)>>8;
- }
-}
-
-
-/* Update hashTable3 up to ip (excluded)
- Assumption : always within prefix (i.e. not within extDict) */
+ assert(offCode <= MaxOff);
+ optPtr->offCodeFreq[offCode]++;
+ optPtr->offCodeSum++;
+ }
+
+ /* match Length */
+ { U32 const mlBase = matchLength - MINMATCH;
+ U32 const mlCode = ZSTD_MLcode(mlBase);
+ optPtr->matchLengthFreq[mlCode]++;
+ optPtr->matchLengthSum++;
+ }
+}
+
+
+/* ZSTD_readMINMATCH() :
+ * function safe only for comparisons
+ * assumption : memPtr must be at least 4 bytes before end of buffer */
+MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
+{
+ switch (length)
+ {
+ default :
+ case 4 : return MEM_read32(memPtr);
+ case 3 : if (MEM_isLittleEndian())
+ return MEM_read32(memPtr)<<8;
+ else
+ return MEM_read32(memPtr)>>8;
+ }
+}
+
+
+/* Update hashTable3 up to ip (excluded)
+ Assumption : always within prefix (i.e. not within extDict) */
static U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms,
U32* nextToUpdate3,
const BYTE* const ip)
-{
- U32* const hashTable3 = ms->hashTable3;
- U32 const hashLog3 = ms->hashLog3;
- const BYTE* const base = ms->window.base;
+{
+ U32* const hashTable3 = ms->hashTable3;
+ U32 const hashLog3 = ms->hashLog3;
+ const BYTE* const base = ms->window.base;
U32 idx = *nextToUpdate3;
U32 const target = (U32)(ip - base);
- size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3);
- assert(hashLog3 > 0);
-
- while(idx < target) {
- hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
- idx++;
- }
-
+ size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3);
+ assert(hashLog3 > 0);
+
+ while(idx < target) {
+ hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
+ idx++;
+ }
+
*nextToUpdate3 = target;
- return hashTable3[hash3];
-}
-
-
-/*-*************************************
-* Binary Tree search
-***************************************/
-/** ZSTD_insertBt1() : add one or multiple positions to tree.
+ return hashTable3[hash3];
+}
+
+
+/*-*************************************
+* Binary Tree search
+***************************************/
+/** ZSTD_insertBt1() : add one or multiple positions to tree.
* @param ip assumed <= iend-8 .
* @param target The target of ZSTD_updateTree_internal() - we are filling to this position
- * @return : nb of positions added */
-static U32 ZSTD_insertBt1(
+ * @return : nb of positions added */
+static U32 ZSTD_insertBt1(
const ZSTD_matchState_t* ms,
- const BYTE* const ip, const BYTE* const iend,
+ const BYTE* const ip, const BYTE* const iend,
U32 const target,
U32 const mls, const int extDict)
-{
+{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
- U32* const hashTable = ms->hashTable;
- U32 const hashLog = cParams->hashLog;
- size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
- U32* const bt = ms->chainTable;
- U32 const btLog = cParams->chainLog - 1;
- U32 const btMask = (1 << btLog) - 1;
- U32 matchIndex = hashTable[h];
- size_t commonLengthSmaller=0, commonLengthLarger=0;
- const BYTE* const base = ms->window.base;
- const BYTE* const dictBase = ms->window.dictBase;
- const U32 dictLimit = ms->window.dictLimit;
- const BYTE* const dictEnd = dictBase + dictLimit;
- const BYTE* const prefixStart = base + dictLimit;
- const BYTE* match;
+ U32* const hashTable = ms->hashTable;
+ U32 const hashLog = cParams->hashLog;
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+ U32 matchIndex = hashTable[h];
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* match;
const U32 curr = (U32)(ip-base);
const U32 btLow = btMask >= curr ? 0 : curr - btMask;
U32* smallerPtr = bt + 2*(curr&btMask);
- U32* largerPtr = smallerPtr + 1;
- U32 dummy32; /* to be nullified at the end */
+ U32* largerPtr = smallerPtr + 1;
+ U32 dummy32; /* to be nullified at the end */
/* windowLow is based on target because
* we only need positions that will be in the window at the end of the tree update.
*/
U32 const windowLow = ZSTD_getLowestMatchIndex(ms, target, cParams->windowLog);
U32 matchEndIdx = curr+8+1;
- size_t bestLength = 8;
- U32 nbCompares = 1U << cParams->searchLog;
-#ifdef ZSTD_C_PREDICT
+ size_t bestLength = 8;
+ U32 nbCompares = 1U << cParams->searchLog;
+#ifdef ZSTD_C_PREDICT
U32 predictedSmall = *(bt + 2*((curr-1)&btMask) + 0);
U32 predictedLarge = *(bt + 2*((curr-1)&btMask) + 1);
- predictedSmall += (predictedSmall>0);
- predictedLarge += (predictedLarge>0);
-#endif /* ZSTD_C_PREDICT */
-
+ predictedSmall += (predictedSmall>0);
+ predictedLarge += (predictedLarge>0);
+#endif /* ZSTD_C_PREDICT */
+
DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr);
-
+
assert(curr <= target);
- assert(ip <= iend-8); /* required for h calculation */
+ assert(ip <= iend-8); /* required for h calculation */
hashTable[h] = curr; /* Update Hash Table */
-
+
assert(windowLow > 0);
for (; nbCompares && (matchIndex >= windowLow); --nbCompares) {
- U32* const nextPtr = bt + 2*(matchIndex & btMask);
- size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
assert(matchIndex < curr);
-
-#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
- const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */
- if (matchIndex == predictedSmall) {
- /* no need to check length, result known */
- *smallerPtr = matchIndex;
- if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
- smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
- matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
- predictedSmall = predictPtr[1] + (predictPtr[1]>0);
- continue;
- }
- if (matchIndex == predictedLarge) {
- *largerPtr = matchIndex;
- if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
- largerPtr = nextPtr;
- matchIndex = nextPtr[0];
- predictedLarge = predictPtr[0] + (predictPtr[0]>0);
- continue;
- }
-#endif
-
+
+#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
+ const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */
+ if (matchIndex == predictedSmall) {
+ /* no need to check length, result known */
+ *smallerPtr = matchIndex;
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ predictedSmall = predictPtr[1] + (predictPtr[1]>0);
+ continue;
+ }
+ if (matchIndex == predictedLarge) {
+ *largerPtr = matchIndex;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ predictedLarge = predictPtr[0] + (predictPtr[0]>0);
+ continue;
+ }
+#endif
+
if (!extDict || (matchIndex+matchLength >= dictLimit)) {
assert(matchIndex+matchLength >= dictLimit); /* might be wrong if actually extDict */
- match = base + matchIndex;
- matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
- } else {
- match = dictBase + matchIndex;
- matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
- if (matchIndex+matchLength >= dictLimit)
- match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
- }
-
- if (matchLength > bestLength) {
- bestLength = matchLength;
- if (matchLength > matchEndIdx - matchIndex)
- matchEndIdx = matchIndex + (U32)matchLength;
- }
-
- if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
- break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
- }
-
- if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
- /* match is smaller than current */
- *smallerPtr = matchIndex; /* update smaller idx */
- commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
- if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
- smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
- matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
- } else {
- /* match is larger than current */
- *largerPtr = matchIndex;
- commonLengthLarger = matchLength;
- if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
- largerPtr = nextPtr;
- matchIndex = nextPtr[0];
- } }
-
- *smallerPtr = *largerPtr = 0;
+ match = base + matchIndex;
+ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
+ } else {
+ match = dictBase + matchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
+ match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
+ }
+
+ if (matchLength > bestLength) {
+ bestLength = matchLength;
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
+ }
+
+ if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
+ break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
+ }
+
+ if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
+ /* match is smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
+ smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
+ matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
+ } else {
+ /* match is larger than current */
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
{ U32 positions = 0;
if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384)); /* speed optimization */
assert(matchEndIdx > curr + 8);
return MAX(positions, matchEndIdx - (curr + 8));
}
-}
-
-FORCE_INLINE_TEMPLATE
-void ZSTD_updateTree_internal(
+}
+
+FORCE_INLINE_TEMPLATE
+void ZSTD_updateTree_internal(
ZSTD_matchState_t* ms,
- const BYTE* const ip, const BYTE* const iend,
+ const BYTE* const ip, const BYTE* const iend,
const U32 mls, const ZSTD_dictMode_e dictMode)
-{
- const BYTE* const base = ms->window.base;
- U32 const target = (U32)(ip - base);
- U32 idx = ms->nextToUpdate;
+{
+ const BYTE* const base = ms->window.base;
+ U32 const target = (U32)(ip - base);
+ U32 idx = ms->nextToUpdate;
DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)",
idx, target, dictMode);
-
+
while(idx < target) {
U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, target, mls, dictMode == ZSTD_extDict);
assert(idx < (U32)(idx + forward));
@@ -545,15 +545,15 @@ void ZSTD_updateTree_internal(
}
assert((size_t)(ip - base) <= (size_t)(U32)(-1));
assert((size_t)(iend - base) <= (size_t)(U32)(-1));
- ms->nextToUpdate = target;
-}
-
+ ms->nextToUpdate = target;
+}
+
void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);
-}
-
-FORCE_INLINE_TEMPLATE
-U32 ZSTD_insertBtAndGetAllMatches (
+}
+
+FORCE_INLINE_TEMPLATE
+U32 ZSTD_insertBtAndGetAllMatches (
ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */
ZSTD_matchState_t* ms,
U32* nextToUpdate3,
@@ -562,34 +562,34 @@ U32 ZSTD_insertBtAndGetAllMatches (
U32 const ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
const U32 lengthToBeat,
U32 const mls /* template */)
-{
+{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
- U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
- const BYTE* const base = ms->window.base;
+ U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
+ const BYTE* const base = ms->window.base;
U32 const curr = (U32)(ip-base);
- U32 const hashLog = cParams->hashLog;
- U32 const minMatch = (mls==3) ? 3 : 4;
- U32* const hashTable = ms->hashTable;
- size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
- U32 matchIndex = hashTable[h];
- U32* const bt = ms->chainTable;
- U32 const btLog = cParams->chainLog - 1;
- U32 const btMask= (1U << btLog) - 1;
- size_t commonLengthSmaller=0, commonLengthLarger=0;
- const BYTE* const dictBase = ms->window.dictBase;
- U32 const dictLimit = ms->window.dictLimit;
- const BYTE* const dictEnd = dictBase + dictLimit;
- const BYTE* const prefixStart = base + dictLimit;
+ U32 const hashLog = cParams->hashLog;
+ U32 const minMatch = (mls==3) ? 3 : 4;
+ U32* const hashTable = ms->hashTable;
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32 matchIndex = hashTable[h];
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask= (1U << btLog) - 1;
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+ const BYTE* const dictBase = ms->window.dictBase;
+ U32 const dictLimit = ms->window.dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
U32 const matchLow = windowLow ? windowLow : 1;
U32* smallerPtr = bt + 2*(curr&btMask);
U32* largerPtr = bt + 2*(curr&btMask) + 1;
U32 matchEndIdx = curr+8+1; /* farthest referenced position of any match => detects repetitive patterns */
- U32 dummy32; /* to be nullified at the end */
- U32 mnum = 0;
- U32 nbCompares = 1U << cParams->searchLog;
-
+ U32 dummy32; /* to be nullified at the end */
+ U32 mnum = 0;
+ U32 nbCompares = 1U << cParams->searchLog;
+
const ZSTD_matchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
const ZSTD_compressionParameters* const dmsCParams =
dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL;
@@ -603,25 +603,25 @@ U32 ZSTD_insertBtAndGetAllMatches (
U32 const dmsBtMask = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0;
U32 const dmsBtLow = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit;
- size_t bestLength = lengthToBeat-1;
+ size_t bestLength = lengthToBeat-1;
DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", curr);
-
- /* check repCode */
+
+ /* check repCode */
assert(ll0 <= 1); /* necessarily 1 or 0 */
- { U32 const lastR = ZSTD_REP_NUM + ll0;
- U32 repCode;
- for (repCode = ll0; repCode < lastR; repCode++) {
- U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
+ { U32 const lastR = ZSTD_REP_NUM + ll0;
+ U32 repCode;
+ for (repCode = ll0; repCode < lastR; repCode++) {
+ U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
U32 const repIndex = curr - repOffset;
- U32 repLen = 0;
+ U32 repLen = 0;
assert(curr >= dictLimit);
if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < curr-dictLimit) { /* equivalent to `curr > repIndex >= dictLimit` */
/* We must validate the repcode offset because when we're using a dictionary the
* valid offset range shrinks when the dictionary goes out of bounds.
*/
if ((repIndex >= windowLow) & (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch))) {
- repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
- }
+ repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
+ }
} else { /* repIndex < dictLimit || repIndex >= curr */
const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ?
dmsBase + repIndex - dmsIndexDelta :
@@ -629,115 +629,115 @@ U32 ZSTD_insertBtAndGetAllMatches (
assert(curr >= windowLow);
if ( dictMode == ZSTD_extDict
&& ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow) /* equivalent to `curr > repIndex >= windowLow` */
- & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
- && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
- repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
+ & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
+ && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
+ repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
}
if (dictMode == ZSTD_dictMatchState
&& ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `curr > repIndex >= dmsLowLimit` */
& ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
&& (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;
- } }
- /* save longer solution */
- if (repLen > bestLength) {
+ } }
+ /* save longer solution */
+ if (repLen > bestLength) {
DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u",
repCode, ll0, repOffset, repLen);
- bestLength = repLen;
+ bestLength = repLen;
matches[mnum].off = STORE_REPCODE(repCode - ll0 + 1); /* expect value between 1 and 3 */
- matches[mnum].len = (U32)repLen;
- mnum++;
- if ( (repLen > sufficient_len)
- | (ip+repLen == iLimit) ) { /* best possible */
- return mnum;
- } } } }
-
- /* HC3 match finder */
- if ((mls == 3) /*static*/ && (bestLength < mls)) {
+ matches[mnum].len = (U32)repLen;
+ mnum++;
+ if ( (repLen > sufficient_len)
+ | (ip+repLen == iLimit) ) { /* best possible */
+ return mnum;
+ } } } }
+
+ /* HC3 match finder */
+ if ((mls == 3) /*static*/ && (bestLength < mls)) {
U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip);
if ((matchIndex3 >= matchLow)
& (curr - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
- size_t mlen;
+ size_t mlen;
if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) {
- const BYTE* const match = base + matchIndex3;
- mlen = ZSTD_count(ip, match, iLimit);
- } else {
- const BYTE* const match = dictBase + matchIndex3;
- mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart);
- }
-
- /* save best solution */
- if (mlen >= mls /* == 3 > bestLength */) {
- DEBUGLOG(8, "found small match with hlog3, of length %u",
- (U32)mlen);
- bestLength = mlen;
+ const BYTE* const match = base + matchIndex3;
+ mlen = ZSTD_count(ip, match, iLimit);
+ } else {
+ const BYTE* const match = dictBase + matchIndex3;
+ mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart);
+ }
+
+ /* save best solution */
+ if (mlen >= mls /* == 3 > bestLength */) {
+ DEBUGLOG(8, "found small match with hlog3, of length %u",
+ (U32)mlen);
+ bestLength = mlen;
assert(curr > matchIndex3);
- assert(mnum==0); /* no prior solution */
+ assert(mnum==0); /* no prior solution */
matches[0].off = STORE_OFFSET(curr - matchIndex3);
- matches[0].len = (U32)mlen;
- mnum = 1;
- if ( (mlen > sufficient_len) |
- (ip+mlen == iLimit) ) { /* best possible length */
+ matches[0].len = (U32)mlen;
+ mnum = 1;
+ if ( (mlen > sufficient_len) |
+ (ip+mlen == iLimit) ) { /* best possible length */
ms->nextToUpdate = curr+1; /* skip insertion */
- return 1;
+ return 1;
} } }
/* no dictMatchState lookup: dicts don't have a populated HC3 table */
} /* if (mls == 3) */
-
+
hashTable[h] = curr; /* Update Hash Table */
-
+
for (; nbCompares && (matchIndex >= matchLow); --nbCompares) {
- U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
const BYTE* match;
- size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
assert(curr > matchIndex);
-
+
if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
- assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */
- match = base + matchIndex;
+ assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */
+ match = base + matchIndex;
if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
- matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
- } else {
- match = dictBase + matchIndex;
+ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
+ } else {
+ match = dictBase + matchIndex;
assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
- matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
- if (matchIndex+matchLength >= dictLimit)
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
match = base + matchIndex; /* prepare for match[matchLength] read */
- }
-
- if (matchLength > bestLength) {
+ }
+
+ if (matchLength > bestLength) {
DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)",
(U32)matchLength, curr - matchIndex, STORE_OFFSET(curr - matchIndex));
- assert(matchEndIdx > matchIndex);
- if (matchLength > matchEndIdx - matchIndex)
- matchEndIdx = matchIndex + (U32)matchLength;
- bestLength = matchLength;
+ assert(matchEndIdx > matchIndex);
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
+ bestLength = matchLength;
matches[mnum].off = STORE_OFFSET(curr - matchIndex);
- matches[mnum].len = (U32)matchLength;
- mnum++;
+ matches[mnum].len = (U32)matchLength;
+ mnum++;
if ( (matchLength > ZSTD_OPT_NUM)
| (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */
break; /* drop, to preserve bt consistency (miss a little bit of compression) */
} }
-
- if (match[matchLength] < ip[matchLength]) {
- /* match smaller than current */
- *smallerPtr = matchIndex; /* update smaller idx */
- commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
- if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
- smallerPtr = nextPtr+1; /* new candidate => larger than match, which was smaller than current */
- matchIndex = nextPtr[1]; /* new matchIndex, larger than previous, closer to current */
- } else {
- *largerPtr = matchIndex;
- commonLengthLarger = matchLength;
- if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
- largerPtr = nextPtr;
- matchIndex = nextPtr[0];
- } }
-
- *smallerPtr = *largerPtr = 0;
-
+
+ if (match[matchLength] < ip[matchLength]) {
+ /* match smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ smallerPtr = nextPtr+1; /* new candidate => larger than match, which was smaller than current */
+ matchIndex = nextPtr[1]; /* new matchIndex, larger than previous, closer to current */
+ } else {
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
+
assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
if (dictMode == ZSTD_dictMatchState && nbCompares) {
size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls);
@@ -778,10 +778,10 @@ U32 ZSTD_insertBtAndGetAllMatches (
} } } /* if (dictMode == ZSTD_dictMatchState) */
assert(matchEndIdx > curr+8);
- ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
- return mnum;
-}
-
+ ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
+ return mnum;
+}
+
typedef U32 (*ZSTD_getAllMatchesFn)(
ZSTD_match_t*,
ZSTD_matchState_t*,
@@ -791,7 +791,7 @@ typedef U32 (*ZSTD_getAllMatchesFn)(
const U32 rep[ZSTD_REP_NUM],
U32 const ll0,
U32 const lengthToBeat);
-
+
FORCE_INLINE_TEMPLATE U32 ZSTD_btGetAllMatches_internal(
ZSTD_match_t* matches,
ZSTD_matchState_t* ms,
@@ -803,7 +803,7 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_btGetAllMatches_internal(
U32 const lengthToBeat,
const ZSTD_dictMode_e dictMode,
const U32 mls)
-{
+{
assert(BOUNDED(3, ms->cParams.minMatch, 6) == mls);
DEBUGLOG(8, "ZSTD_BtGetAllMatches(dictMode=%d, mls=%u)", (int)dictMode, mls);
if (ip < ms->window.base + ms->nextToUpdate)
@@ -828,7 +828,7 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_btGetAllMatches_internal(
return ZSTD_btGetAllMatches_internal( \
matches, ms, nextToUpdate3, ip, iHighLimit, \
rep, ll0, lengthToBeat, ZSTD_##dictMode, mls); \
- }
+ }
#define GEN_ZSTD_BT_GET_ALL_MATCHES(dictMode) \
GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 3) \
@@ -860,12 +860,12 @@ ZSTD_selectBtGetAllMatches(ZSTD_matchState_t const* ms, ZSTD_dictMode_e const di
assert((U32)dictMode < 3);
assert(mls - 3 < 4);
return getAllMatchesFns[(int)dictMode][mls - 3];
-}
-
+}
+
/*************************
* LDM helper functions *
*************************/
-
+
/* Struct containing info needed to make decision about ldm inclusion */
typedef struct {
rawSeqStore_t seqStore; /* External match candidates store for this block */
@@ -1007,17 +1007,17 @@ ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm,
}
-/*-*******************************
-* Optimal parser
-*********************************/
-
+/*-*******************************
+* Optimal parser
+*********************************/
+
static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
-{
+{
return sol.litlen + sol.mlen;
}
-
+
#if 0 /* debug */
-
+
static void
listStats(const U32* table, int lastEltID)
{
@@ -1027,12 +1027,12 @@ listStats(const U32* table, int lastEltID)
(void)table;
/* RAWLOG(2, "%3i:%3i, ", enb, table[enb]); */
RAWLOG(2, "%4i,", table[enb]);
- }
+ }
RAWLOG(2, " \n");
-}
-
+}
+
#endif
-
+
FORCE_INLINE_TEMPLATE size_t
ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
seqStore_t* seqStore,
@@ -1040,132 +1040,132 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
const void* src, size_t srcSize,
const int optLevel,
const ZSTD_dictMode_e dictMode)
-{
- optState_t* const optStatePtr = &ms->opt;
- const BYTE* const istart = (const BYTE*)src;
- const BYTE* ip = istart;
- const BYTE* anchor = istart;
- const BYTE* const iend = istart + srcSize;
- const BYTE* const ilimit = iend - 8;
- const BYTE* const base = ms->window.base;
- const BYTE* const prefixStart = base + ms->window.dictLimit;
+{
+ optState_t* const optStatePtr = &ms->opt;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const prefixStart = base + ms->window.dictLimit;
const ZSTD_compressionParameters* const cParams = &ms->cParams;
-
+
ZSTD_getAllMatchesFn getAllMatches = ZSTD_selectBtGetAllMatches(ms, dictMode);
- U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
+ U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4;
U32 nextToUpdate3 = ms->nextToUpdate;
-
- ZSTD_optimal_t* const opt = optStatePtr->priceTable;
- ZSTD_match_t* const matches = optStatePtr->matchTable;
+
+ ZSTD_optimal_t* const opt = optStatePtr->priceTable;
+ ZSTD_match_t* const matches = optStatePtr->matchTable;
ZSTD_optimal_t lastSequence;
ZSTD_optLdm_t optLdm;
-
+
optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore;
optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0;
ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip));
- /* init */
+ /* init */
DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u",
(U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate);
assert(optLevel <= 2);
ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel);
- ip += (ip==prefixStart);
-
- /* Match Loop */
- while (ip < ilimit) {
- U32 cur, last_pos = 0;
-
- /* find first match */
- { U32 const litlen = (U32)(ip - anchor);
- U32 const ll0 = !litlen;
+ ip += (ip==prefixStart);
+
+ /* Match Loop */
+ while (ip < ilimit) {
+ U32 cur, last_pos = 0;
+
+ /* find first match */
+ { U32 const litlen = (U32)(ip - anchor);
+ U32 const ll0 = !litlen;
U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, ip, iend, rep, ll0, minMatch);
ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
(U32)(ip-istart), (U32)(iend - ip));
- if (!nbMatches) { ip++; continue; }
-
- /* initialize opt[0] */
- { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
+ if (!nbMatches) { ip++; continue; }
+
+ /* initialize opt[0] */
+ { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
opt[0].mlen = 0; /* means is_a_literal */
- opt[0].litlen = litlen;
+ opt[0].litlen = litlen;
/* We don't need to include the actual price of the literals because
* it is static for the duration of the forward pass, and is included
* in every price. We include the literal length to avoid negative
* prices when we subtract the previous literal length.
*/
opt[0].price = (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel);
-
- /* large match -> immediate encoding */
- { U32 const maxML = matches[nbMatches-1].len;
+
+ /* large match -> immediate encoding */
+ { U32 const maxML = matches[nbMatches-1].len;
U32 const maxOffcode = matches[nbMatches-1].off;
DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
nbMatches, maxML, maxOffcode, (U32)(ip-prefixStart));
-
- if (maxML > sufficient_len) {
+
+ if (maxML > sufficient_len) {
lastSequence.litlen = litlen;
lastSequence.mlen = maxML;
lastSequence.off = maxOffcode;
DEBUGLOG(6, "large match (%u>%u), immediate encoding",
maxML, sufficient_len);
- cur = 0;
+ cur = 0;
last_pos = ZSTD_totalLen(lastSequence);
- goto _shortestPath;
- } }
-
- /* set prices for first matches starting position == 0 */
+ goto _shortestPath;
+ } }
+
+ /* set prices for first matches starting position == 0 */
assert(opt[0].price >= 0);
{ U32 const literalsPrice = (U32)opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
- U32 pos;
- U32 matchNb;
+ U32 pos;
+ U32 matchNb;
for (pos = 1; pos < minMatch; pos++) {
opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */
- }
- for (matchNb = 0; matchNb < nbMatches; matchNb++) {
+ }
+ for (matchNb = 0; matchNb < nbMatches; matchNb++) {
U32 const offcode = matches[matchNb].off;
- U32 const end = matches[matchNb].len;
- for ( ; pos <= end ; pos++ ) {
+ U32 const end = matches[matchNb].len;
+ for ( ; pos <= end ; pos++ ) {
U32 const matchPrice = ZSTD_getMatchPrice(offcode, pos, optStatePtr, optLevel);
U32 const sequencePrice = literalsPrice + matchPrice;
DEBUGLOG(7, "rPos:%u => set initial price : %.2f",
pos, ZSTD_fCost(sequencePrice));
- opt[pos].mlen = pos;
+ opt[pos].mlen = pos;
opt[pos].off = offcode;
- opt[pos].litlen = litlen;
+ opt[pos].litlen = litlen;
opt[pos].price = (int)sequencePrice;
- } }
- last_pos = pos-1;
- }
- }
-
- /* check further positions */
- for (cur = 1; cur <= last_pos; cur++) {
- const BYTE* const inr = ip + cur;
- assert(cur < ZSTD_OPT_NUM);
+ } }
+ last_pos = pos-1;
+ }
+ }
+
+ /* check further positions */
+ for (cur = 1; cur <= last_pos; cur++) {
+ const BYTE* const inr = ip + cur;
+ assert(cur < ZSTD_OPT_NUM);
DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur)
-
- /* Fix current position with one literal if cheaper */
+
+ /* Fix current position with one literal if cheaper */
{ U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;
int const price = opt[cur-1].price
+ (int)ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
+ (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
- (int)ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
- assert(price < 1000000000); /* overflow check */
- if (price <= opt[cur].price) {
+ assert(price < 1000000000); /* overflow check */
+ if (price <= opt[cur].price) {
DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);
opt[cur].mlen = 0;
- opt[cur].off = 0;
- opt[cur].litlen = litlen;
- opt[cur].price = price;
+ opt[cur].off = 0;
+ opt[cur].litlen = litlen;
+ opt[cur].price = price;
} else {
DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)",
inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price),
opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]);
}
}
-
+
/* Set the repcodes of the current position. We must do it here
* because we rely on the repcodes of the 2nd to last sequence being
* correct to set the next chunks repcodes during the backward
@@ -1181,24 +1181,24 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
ZSTD_memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t));
}
- /* last match must start at a minimum distance of 8 from oend */
- if (inr > ilimit) continue;
-
- if (cur == last_pos) break;
-
+ /* last match must start at a minimum distance of 8 from oend */
+ if (inr > ilimit) continue;
+
+ if (cur == last_pos) break;
+
if ( (optLevel==0) /*static_test*/
&& (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) {
DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1);
- continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
+ continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
}
-
+
assert(opt[cur].price >= 0);
{ U32 const ll0 = (opt[cur].mlen != 0);
U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
U32 const previousPrice = (U32)opt[cur].price;
U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, inr, iend, opt[cur].rep, ll0, minMatch);
- U32 matchNb;
+ U32 matchNb;
ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
(U32)(inr-istart), (U32)(iend-inr));
@@ -1207,12 +1207,12 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
DEBUGLOG(7, "rPos:%u : no match found", cur);
continue;
}
-
- { U32 const maxML = matches[nbMatches-1].len;
+
+ { U32 const maxML = matches[nbMatches-1].len;
DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u",
inr-istart, cur, nbMatches, maxML);
-
- if ( (maxML > sufficient_len)
+
+ if ( (maxML > sufficient_len)
|| (cur + maxML >= ZSTD_OPT_NUM) ) {
lastSequence.mlen = maxML;
lastSequence.off = matches[nbMatches-1].off;
@@ -1220,46 +1220,46 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0; /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */
last_pos = cur + ZSTD_totalLen(lastSequence);
if (cur > ZSTD_OPT_NUM) cur = 0; /* underflow => first match */
- goto _shortestPath;
+ goto _shortestPath;
} }
-
- /* set prices using matches found at position == cur */
- for (matchNb = 0; matchNb < nbMatches; matchNb++) {
- U32 const offset = matches[matchNb].off;
- U32 const lastML = matches[matchNb].len;
- U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
- U32 mlen;
-
+
+ /* set prices using matches found at position == cur */
+ for (matchNb = 0; matchNb < nbMatches; matchNb++) {
+ U32 const offset = matches[matchNb].off;
+ U32 const lastML = matches[matchNb].len;
+ U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
+ U32 mlen;
+
DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u",
- matchNb, matches[matchNb].off, lastML, litlen);
-
+ matchNb, matches[matchNb].off, lastML, litlen);
+
for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */
- U32 const pos = cur + mlen;
+ U32 const pos = cur + mlen;
int const price = (int)basePrice + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
-
- if ((pos > last_pos) || (price < opt[pos].price)) {
+
+ if ((pos > last_pos) || (price < opt[pos].price)) {
DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } /* fill empty positions */
- opt[pos].mlen = mlen;
- opt[pos].off = offset;
- opt[pos].litlen = litlen;
- opt[pos].price = price;
- } else {
+ opt[pos].mlen = mlen;
+ opt[pos].off = offset;
+ opt[pos].litlen = litlen;
+ opt[pos].price = price;
+ } else {
DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)",
pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
if (optLevel==0) break; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */
- }
- } } }
- } /* for (cur = 1; cur <= last_pos; cur++) */
-
+ }
+ } } }
+ } /* for (cur = 1; cur <= last_pos; cur++) */
+
lastSequence = opt[last_pos];
cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0; /* single sequence, and it starts before `ip` */
assert(cur < ZSTD_OPT_NUM); /* control overflow*/
-
-_shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
+
+_shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
assert(opt[0].mlen == 0);
-
+
/* Set the next chunk's repcodes based on the repcodes of the beginning
* of the last match, and the last sequence. This avoids us having to
* update them while traversing the sequences.
@@ -1274,7 +1274,7 @@ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
{ U32 const storeEnd = cur + 1;
U32 storeStart = storeEnd;
U32 seqPos = cur;
-
+
DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)",
last_pos, cur); (void)last_pos;
assert(storeEnd < ZSTD_OPT_NUM);
@@ -1289,7 +1289,7 @@ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
opt[storeStart] = opt[seqPos];
seqPos = (seqPos > backDist) ? seqPos - backDist : 0;
}
-
+
/* save sequences */
DEBUGLOG(6, "sending selected sequences into seqStore")
{ U32 storePos;
@@ -1305,8 +1305,8 @@ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
assert(storePos == storeEnd); /* must be last sequence */
ip = anchor + llen; /* last "sequence" is a bunch of literals => don't progress anchor */
continue; /* will finish */
- }
-
+ }
+
assert(anchor + llen <= iend);
ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen);
@@ -1315,19 +1315,19 @@ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
} }
ZSTD_setBasePrices(optStatePtr, optLevel);
}
- } /* while (ip < ilimit) */
-
- /* Return the last literals size */
+ } /* while (ip < ilimit) */
+
+ /* Return the last literals size */
return (size_t)(iend - anchor);
-}
-
+}
+
static size_t ZSTD_compressBlock_opt0(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
{
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /* optLevel */, dictMode);
}
-
+
static size_t ZSTD_compressBlock_opt2(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
@@ -1335,14 +1335,14 @@ static size_t ZSTD_compressBlock_opt2(
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /* optLevel */, dictMode);
}
-size_t ZSTD_compressBlock_btopt(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_btopt(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
-{
- DEBUGLOG(5, "ZSTD_compressBlock_btopt");
+{
+ DEBUGLOG(5, "ZSTD_compressBlock_btopt");
return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
-}
-
+}
+
@@ -1377,14 +1377,14 @@ ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
}
-size_t ZSTD_compressBlock_btultra(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_btultra(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
-{
+{
DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
-}
-
+}
+
size_t ZSTD_compressBlock_btultra2(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
@@ -1427,19 +1427,19 @@ size_t ZSTD_compressBlock_btultra_dictMatchState(
return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
}
-size_t ZSTD_compressBlock_btopt_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_btopt_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
-{
+{
return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
-}
-
-size_t ZSTD_compressBlock_btultra_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+}
+
+size_t ZSTD_compressBlock_btultra_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
-{
+{
return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
-}
+}
/* note : no btultra2 variant for extDict nor dictMatchState,
* because btultra2 is not meant to work with dictionaries
diff --git a/contrib/libs/zstd/lib/compress/zstd_opt.h b/contrib/libs/zstd/lib/compress/zstd_opt.h
index 627255f53d..f7bab711f7 100644
--- a/contrib/libs/zstd/lib/compress/zstd_opt.h
+++ b/contrib/libs/zstd/lib/compress/zstd_opt.h
@@ -1,30 +1,30 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
-#ifndef ZSTD_OPT_H
-#define ZSTD_OPT_H
+#ifndef ZSTD_OPT_H
+#define ZSTD_OPT_H
-#if defined (__cplusplus)
-extern "C" {
-#endif
+#if defined (__cplusplus)
+extern "C" {
+#endif
-#include "zstd_compress_internal.h"
+#include "zstd_compress_internal.h"
/* used in ZSTD_loadDictionaryContent() */
void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend);
-size_t ZSTD_compressBlock_btopt(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_btopt(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_btultra(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_btultra(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_btultra2(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
@@ -38,19 +38,19 @@ size_t ZSTD_compressBlock_btultra_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_btopt_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_btopt_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-size_t ZSTD_compressBlock_btultra_extDict(
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+size_t ZSTD_compressBlock_btultra_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
/* note : no btultra2 variant for extDict nor dictMatchState,
* because btultra2 is not meant to work with dictionaries
* and is only specific for the first block (no prefix) */
-#if defined (__cplusplus)
+#if defined (__cplusplus)
}
-#endif
+#endif
-#endif /* ZSTD_OPT_H */
+#endif /* ZSTD_OPT_H */
diff --git a/contrib/libs/zstd/lib/compress/zstdmt_compress.c b/contrib/libs/zstd/lib/compress/zstdmt_compress.c
index 6bc14b035e..30cb698dff 100644
--- a/contrib/libs/zstd/lib/compress/zstdmt_compress.c
+++ b/contrib/libs/zstd/lib/compress/zstdmt_compress.c
@@ -1,163 +1,163 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-
-/* ====== Compiler specifics ====== */
-#if defined(_MSC_VER)
-# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
-#endif
-
-
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/* ====== Compiler specifics ====== */
+#if defined(_MSC_VER)
+# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
+#endif
+
+
/* ====== Constants ====== */
#define ZSTDMT_OVERLAPLOG_DEFAULT 0
-/* ====== Dependencies ====== */
+/* ====== Dependencies ====== */
#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset, INT_MAX, UINT_MAX */
#include "../common/mem.h" /* MEM_STATIC */
#include "../common/pool.h" /* threadpool */
#include "../common/threading.h" /* mutex */
-#include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
-#include "zstd_ldm.h"
-#include "zstdmt_compress.h"
-
-/* Guards code to support resizing the SeqPool.
- * We will want to resize the SeqPool to save memory in the future.
- * Until then, comment the code out since it is unused.
- */
-#define ZSTD_RESIZE_SEQPOOL 0
-
-/* ====== Debug ====== */
+#include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
+#include "zstd_ldm.h"
+#include "zstdmt_compress.h"
+
+/* Guards code to support resizing the SeqPool.
+ * We will want to resize the SeqPool to save memory in the future.
+ * Until then, comment the code out since it is unused.
+ */
+#define ZSTD_RESIZE_SEQPOOL 0
+
+/* ====== Debug ====== */
#if defined(DEBUGLEVEL) && (DEBUGLEVEL>=2) \
&& !defined(_MSC_VER) \
&& !defined(__MINGW32__)
-
-# include <stdio.h>
-# include <unistd.h>
-# include <sys/times.h>
-
-# define DEBUG_PRINTHEX(l,p,n) { \
- unsigned debug_u; \
- for (debug_u=0; debug_u<(n); debug_u++) \
+
+# include <stdio.h>
+# include <unistd.h>
+# include <sys/times.h>
+
+# define DEBUG_PRINTHEX(l,p,n) { \
+ unsigned debug_u; \
+ for (debug_u=0; debug_u<(n); debug_u++) \
RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \
RAWLOG(l, " \n"); \
-}
-
-static unsigned long long GetCurrentClockTimeMicroseconds(void)
-{
- static clock_t _ticksPerSecond = 0;
- if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK);
-
+}
+
+static unsigned long long GetCurrentClockTimeMicroseconds(void)
+{
+ static clock_t _ticksPerSecond = 0;
+ if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK);
+
{ struct tms junk; clock_t newTicks = (clock_t) times(&junk);
return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond);
} }
-
-#define MUTEX_WAIT_TIME_DLEVEL 6
-#define ZSTD_PTHREAD_MUTEX_LOCK(mutex) { \
+
+#define MUTEX_WAIT_TIME_DLEVEL 6
+#define ZSTD_PTHREAD_MUTEX_LOCK(mutex) { \
if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) { \
- unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \
- ZSTD_pthread_mutex_lock(mutex); \
- { unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \
- unsigned long long const elapsedTime = (afterTime-beforeTime); \
- if (elapsedTime > 1000) { /* or whatever threshold you like; I'm using 1 millisecond here */ \
- DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \
- elapsedTime, #mutex); \
- } } \
- } else { \
- ZSTD_pthread_mutex_lock(mutex); \
- } \
-}
-
-#else
-
-# define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m)
-# define DEBUG_PRINTHEX(l,p,n) {}
-
-#endif
-
-
-/* ===== Buffer Pool ===== */
-/* a single Buffer Pool can be invoked from multiple threads in parallel */
-
-typedef struct buffer_s {
- void* start;
- size_t capacity;
-} buffer_t;
-
-static const buffer_t g_nullBuffer = { NULL, 0 };
-
-typedef struct ZSTDMT_bufferPool_s {
- ZSTD_pthread_mutex_t poolMutex;
- size_t bufferSize;
- unsigned totalBuffers;
- unsigned nbBuffers;
- ZSTD_customMem cMem;
- buffer_t bTable[1]; /* variable size */
-} ZSTDMT_bufferPool;
-
+ unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \
+ ZSTD_pthread_mutex_lock(mutex); \
+ { unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \
+ unsigned long long const elapsedTime = (afterTime-beforeTime); \
+ if (elapsedTime > 1000) { /* or whatever threshold you like; I'm using 1 millisecond here */ \
+ DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \
+ elapsedTime, #mutex); \
+ } } \
+ } else { \
+ ZSTD_pthread_mutex_lock(mutex); \
+ } \
+}
+
+#else
+
+# define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m)
+# define DEBUG_PRINTHEX(l,p,n) {}
+
+#endif
+
+
+/* ===== Buffer Pool ===== */
+/* a single Buffer Pool can be invoked from multiple threads in parallel */
+
+typedef struct buffer_s {
+ void* start;
+ size_t capacity;
+} buffer_t;
+
+static const buffer_t g_nullBuffer = { NULL, 0 };
+
+typedef struct ZSTDMT_bufferPool_s {
+ ZSTD_pthread_mutex_t poolMutex;
+ size_t bufferSize;
+ unsigned totalBuffers;
+ unsigned nbBuffers;
+ ZSTD_customMem cMem;
+ buffer_t bTable[1]; /* variable size */
+} ZSTDMT_bufferPool;
+
static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned maxNbBuffers, ZSTD_customMem cMem)
-{
+{
ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_customCalloc(
- sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem);
- if (bufPool==NULL) return NULL;
- if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) {
+ sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem);
+ if (bufPool==NULL) return NULL;
+ if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) {
ZSTD_customFree(bufPool, cMem);
- return NULL;
- }
- bufPool->bufferSize = 64 KB;
- bufPool->totalBuffers = maxNbBuffers;
- bufPool->nbBuffers = 0;
- bufPool->cMem = cMem;
- return bufPool;
-}
-
-static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
-{
- unsigned u;
- DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool);
- if (!bufPool) return; /* compatibility with free on NULL */
- for (u=0; u<bufPool->totalBuffers; u++) {
- DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->bTable[u].start);
+ return NULL;
+ }
+ bufPool->bufferSize = 64 KB;
+ bufPool->totalBuffers = maxNbBuffers;
+ bufPool->nbBuffers = 0;
+ bufPool->cMem = cMem;
+ return bufPool;
+}
+
+static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
+{
+ unsigned u;
+ DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool);
+ if (!bufPool) return; /* compatibility with free on NULL */
+ for (u=0; u<bufPool->totalBuffers; u++) {
+ DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->bTable[u].start);
ZSTD_customFree(bufPool->bTable[u].start, bufPool->cMem);
- }
- ZSTD_pthread_mutex_destroy(&bufPool->poolMutex);
+ }
+ ZSTD_pthread_mutex_destroy(&bufPool->poolMutex);
ZSTD_customFree(bufPool, bufPool->cMem);
-}
-
-/* only works at initialization, not during compression */
-static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool)
-{
- size_t const poolSize = sizeof(*bufPool)
- + (bufPool->totalBuffers - 1) * sizeof(buffer_t);
- unsigned u;
- size_t totalBufferSize = 0;
- ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
- for (u=0; u<bufPool->totalBuffers; u++)
- totalBufferSize += bufPool->bTable[u].capacity;
- ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
-
- return poolSize + totalBufferSize;
-}
-
-/* ZSTDMT_setBufferSize() :
- * all future buffers provided by this buffer pool will have _at least_ this size
- * note : it's better for all buffers to have same size,
- * as they become freely interchangeable, reducing malloc/free usages and memory fragmentation */
-static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const bSize)
-{
- ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
- DEBUGLOG(4, "ZSTDMT_setBufferSize: bSize = %u", (U32)bSize);
- bufPool->bufferSize = bSize;
- ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
-}
-
+}
+
+/* only works at initialization, not during compression */
+static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool)
+{
+ size_t const poolSize = sizeof(*bufPool)
+ + (bufPool->totalBuffers - 1) * sizeof(buffer_t);
+ unsigned u;
+ size_t totalBufferSize = 0;
+ ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
+ for (u=0; u<bufPool->totalBuffers; u++)
+ totalBufferSize += bufPool->bTable[u].capacity;
+ ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
+
+ return poolSize + totalBufferSize;
+}
+
+/* ZSTDMT_setBufferSize() :
+ * all future buffers provided by this buffer pool will have _at least_ this size
+ * note : it's better for all buffers to have same size,
+ * as they become freely interchangeable, reducing malloc/free usages and memory fragmentation */
+static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const bSize)
+{
+ ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
+ DEBUGLOG(4, "ZSTDMT_setBufferSize: bSize = %u", (U32)bSize);
+ bufPool->bufferSize = bSize;
+ ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
+}
+
static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, unsigned maxNbBuffers)
{
@@ -176,214 +176,214 @@ static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool,
}
}
-/** ZSTDMT_getBuffer() :
- * assumption : bufPool must be valid
- * @return : a buffer, with start pointer and size
- * note: allocation may fail, in this case, start==NULL and size==0 */
-static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
-{
- size_t const bSize = bufPool->bufferSize;
- DEBUGLOG(5, "ZSTDMT_getBuffer: bSize = %u", (U32)bufPool->bufferSize);
- ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
- if (bufPool->nbBuffers) { /* try to use an existing buffer */
- buffer_t const buf = bufPool->bTable[--(bufPool->nbBuffers)];
- size_t const availBufferSize = buf.capacity;
- bufPool->bTable[bufPool->nbBuffers] = g_nullBuffer;
- if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) {
- /* large enough, but not too much */
- DEBUGLOG(5, "ZSTDMT_getBuffer: provide buffer %u of size %u",
- bufPool->nbBuffers, (U32)buf.capacity);
- ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
- return buf;
- }
- /* size conditions not respected : scratch this buffer, create new one */
- DEBUGLOG(5, "ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing");
+/** ZSTDMT_getBuffer() :
+ * assumption : bufPool must be valid
+ * @return : a buffer, with start pointer and size
+ * note: allocation may fail, in this case, start==NULL and size==0 */
+static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
+{
+ size_t const bSize = bufPool->bufferSize;
+ DEBUGLOG(5, "ZSTDMT_getBuffer: bSize = %u", (U32)bufPool->bufferSize);
+ ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
+ if (bufPool->nbBuffers) { /* try to use an existing buffer */
+ buffer_t const buf = bufPool->bTable[--(bufPool->nbBuffers)];
+ size_t const availBufferSize = buf.capacity;
+ bufPool->bTable[bufPool->nbBuffers] = g_nullBuffer;
+ if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) {
+ /* large enough, but not too much */
+ DEBUGLOG(5, "ZSTDMT_getBuffer: provide buffer %u of size %u",
+ bufPool->nbBuffers, (U32)buf.capacity);
+ ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
+ return buf;
+ }
+ /* size conditions not respected : scratch this buffer, create new one */
+ DEBUGLOG(5, "ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing");
ZSTD_customFree(buf.start, bufPool->cMem);
- }
- ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
- /* create new buffer */
- DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer");
- { buffer_t buffer;
+ }
+ ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
+ /* create new buffer */
+ DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer");
+ { buffer_t buffer;
void* const start = ZSTD_customMalloc(bSize, bufPool->cMem);
- buffer.start = start; /* note : start can be NULL if malloc fails ! */
- buffer.capacity = (start==NULL) ? 0 : bSize;
- if (start==NULL) {
- DEBUGLOG(5, "ZSTDMT_getBuffer: buffer allocation failure !!");
- } else {
- DEBUGLOG(5, "ZSTDMT_getBuffer: created buffer of size %u", (U32)bSize);
- }
- return buffer;
- }
-}
-
-#if ZSTD_RESIZE_SEQPOOL
-/** ZSTDMT_resizeBuffer() :
- * assumption : bufPool must be valid
- * @return : a buffer that is at least the buffer pool buffer size.
- * If a reallocation happens, the data in the input buffer is copied.
- */
-static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer)
-{
- size_t const bSize = bufPool->bufferSize;
- if (buffer.capacity < bSize) {
+ buffer.start = start; /* note : start can be NULL if malloc fails ! */
+ buffer.capacity = (start==NULL) ? 0 : bSize;
+ if (start==NULL) {
+ DEBUGLOG(5, "ZSTDMT_getBuffer: buffer allocation failure !!");
+ } else {
+ DEBUGLOG(5, "ZSTDMT_getBuffer: created buffer of size %u", (U32)bSize);
+ }
+ return buffer;
+ }
+}
+
+#if ZSTD_RESIZE_SEQPOOL
+/** ZSTDMT_resizeBuffer() :
+ * assumption : bufPool must be valid
+ * @return : a buffer that is at least the buffer pool buffer size.
+ * If a reallocation happens, the data in the input buffer is copied.
+ */
+static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer)
+{
+ size_t const bSize = bufPool->bufferSize;
+ if (buffer.capacity < bSize) {
void* const start = ZSTD_customMalloc(bSize, bufPool->cMem);
- buffer_t newBuffer;
- newBuffer.start = start;
- newBuffer.capacity = start == NULL ? 0 : bSize;
- if (start != NULL) {
- assert(newBuffer.capacity >= buffer.capacity);
+ buffer_t newBuffer;
+ newBuffer.start = start;
+ newBuffer.capacity = start == NULL ? 0 : bSize;
+ if (start != NULL) {
+ assert(newBuffer.capacity >= buffer.capacity);
ZSTD_memcpy(newBuffer.start, buffer.start, buffer.capacity);
- DEBUGLOG(5, "ZSTDMT_resizeBuffer: created buffer of size %u", (U32)bSize);
- return newBuffer;
- }
- DEBUGLOG(5, "ZSTDMT_resizeBuffer: buffer allocation failure !!");
- }
- return buffer;
-}
-#endif
-
-/* store buffer for later re-use, up to pool capacity */
-static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
-{
+ DEBUGLOG(5, "ZSTDMT_resizeBuffer: created buffer of size %u", (U32)bSize);
+ return newBuffer;
+ }
+ DEBUGLOG(5, "ZSTDMT_resizeBuffer: buffer allocation failure !!");
+ }
+ return buffer;
+}
+#endif
+
+/* store buffer for later re-use, up to pool capacity */
+static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
+{
DEBUGLOG(5, "ZSTDMT_releaseBuffer");
- if (buf.start == NULL) return; /* compatible with release on NULL */
- ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
- if (bufPool->nbBuffers < bufPool->totalBuffers) {
- bufPool->bTable[bufPool->nbBuffers++] = buf; /* stored for later use */
- DEBUGLOG(5, "ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u",
- (U32)buf.capacity, (U32)(bufPool->nbBuffers-1));
- ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
- return;
- }
- ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
- /* Reached bufferPool capacity (should not happen) */
- DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing ");
+ if (buf.start == NULL) return; /* compatible with release on NULL */
+ ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
+ if (bufPool->nbBuffers < bufPool->totalBuffers) {
+ bufPool->bTable[bufPool->nbBuffers++] = buf; /* stored for later use */
+ DEBUGLOG(5, "ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u",
+ (U32)buf.capacity, (U32)(bufPool->nbBuffers-1));
+ ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
+ return;
+ }
+ ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
+ /* Reached bufferPool capacity (should not happen) */
+ DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing ");
ZSTD_customFree(buf.start, bufPool->cMem);
-}
-
+}
+
/* We need 2 output buffers per worker since each dstBuff must be flushed after it is released.
* The 3 additional buffers are as follows:
* 1 buffer for input loading
* 1 buffer for "next input" when submitting current one
* 1 buffer stuck in queue */
#define BUF_POOL_MAX_NB_BUFFERS(nbWorkers) 2*nbWorkers + 3
-
+
/* After a worker releases its rawSeqStore, it is immediately ready for reuse.
* So we only need one seq buffer per worker. */
#define SEQ_POOL_MAX_NB_BUFFERS(nbWorkers) nbWorkers
-/* ===== Seq Pool Wrapper ====== */
-
-typedef ZSTDMT_bufferPool ZSTDMT_seqPool;
-
-static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool)
-{
- return ZSTDMT_sizeof_bufferPool(seqPool);
-}
-
-static rawSeqStore_t bufferToSeq(buffer_t buffer)
-{
+/* ===== Seq Pool Wrapper ====== */
+
+typedef ZSTDMT_bufferPool ZSTDMT_seqPool;
+
+static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool)
+{
+ return ZSTDMT_sizeof_bufferPool(seqPool);
+}
+
+static rawSeqStore_t bufferToSeq(buffer_t buffer)
+{
rawSeqStore_t seq = kNullRawSeqStore;
- seq.seq = (rawSeq*)buffer.start;
- seq.capacity = buffer.capacity / sizeof(rawSeq);
- return seq;
-}
-
-static buffer_t seqToBuffer(rawSeqStore_t seq)
-{
- buffer_t buffer;
- buffer.start = seq.seq;
- buffer.capacity = seq.capacity * sizeof(rawSeq);
- return buffer;
-}
-
-static rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool)
-{
- if (seqPool->bufferSize == 0) {
- return kNullRawSeqStore;
- }
- return bufferToSeq(ZSTDMT_getBuffer(seqPool));
-}
-
-#if ZSTD_RESIZE_SEQPOOL
-static rawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
-{
- return bufferToSeq(ZSTDMT_resizeBuffer(seqPool, seqToBuffer(seq)));
-}
-#endif
-
-static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
-{
- ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq));
-}
-
-static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq)
-{
- ZSTDMT_setBufferSize(seqPool, nbSeq * sizeof(rawSeq));
-}
-
-static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem)
-{
+ seq.seq = (rawSeq*)buffer.start;
+ seq.capacity = buffer.capacity / sizeof(rawSeq);
+ return seq;
+}
+
+static buffer_t seqToBuffer(rawSeqStore_t seq)
+{
+ buffer_t buffer;
+ buffer.start = seq.seq;
+ buffer.capacity = seq.capacity * sizeof(rawSeq);
+ return buffer;
+}
+
+static rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool)
+{
+ if (seqPool->bufferSize == 0) {
+ return kNullRawSeqStore;
+ }
+ return bufferToSeq(ZSTDMT_getBuffer(seqPool));
+}
+
+#if ZSTD_RESIZE_SEQPOOL
+static rawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
+{
+ return bufferToSeq(ZSTDMT_resizeBuffer(seqPool, seqToBuffer(seq)));
+}
+#endif
+
+static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
+{
+ ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq));
+}
+
+static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq)
+{
+ ZSTDMT_setBufferSize(seqPool, nbSeq * sizeof(rawSeq));
+}
+
+static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem)
+{
ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(SEQ_POOL_MAX_NB_BUFFERS(nbWorkers), cMem);
if (seqPool == NULL) return NULL;
- ZSTDMT_setNbSeq(seqPool, 0);
- return seqPool;
-}
-
-static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool)
-{
- ZSTDMT_freeBufferPool(seqPool);
-}
-
+ ZSTDMT_setNbSeq(seqPool, 0);
+ return seqPool;
+}
+
+static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool)
+{
+ ZSTDMT_freeBufferPool(seqPool);
+}
+
static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers)
{
return ZSTDMT_expandBufferPool(pool, SEQ_POOL_MAX_NB_BUFFERS(nbWorkers));
}
-
-
-/* ===== CCtx Pool ===== */
-/* a single CCtx Pool can be invoked from multiple threads in parallel */
-
-typedef struct {
- ZSTD_pthread_mutex_t poolMutex;
+
+
+/* ===== CCtx Pool ===== */
+/* a single CCtx Pool can be invoked from multiple threads in parallel */
+
+typedef struct {
+ ZSTD_pthread_mutex_t poolMutex;
int totalCCtx;
int availCCtx;
- ZSTD_customMem cMem;
- ZSTD_CCtx* cctx[1]; /* variable size */
-} ZSTDMT_CCtxPool;
-
-/* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */
-static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
-{
+ ZSTD_customMem cMem;
+ ZSTD_CCtx* cctx[1]; /* variable size */
+} ZSTDMT_CCtxPool;
+
+/* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */
+static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
+{
int cid;
for (cid=0; cid<pool->totalCCtx; cid++)
ZSTD_freeCCtx(pool->cctx[cid]); /* note : compatible with free on NULL */
- ZSTD_pthread_mutex_destroy(&pool->poolMutex);
+ ZSTD_pthread_mutex_destroy(&pool->poolMutex);
ZSTD_customFree(pool, pool->cMem);
-}
-
-/* ZSTDMT_createCCtxPool() :
- * implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */
+}
+
+/* ZSTDMT_createCCtxPool() :
+ * implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */
static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers,
- ZSTD_customMem cMem)
-{
+ ZSTD_customMem cMem)
+{
ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_customCalloc(
- sizeof(ZSTDMT_CCtxPool) + (nbWorkers-1)*sizeof(ZSTD_CCtx*), cMem);
- assert(nbWorkers > 0);
- if (!cctxPool) return NULL;
- if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) {
+ sizeof(ZSTDMT_CCtxPool) + (nbWorkers-1)*sizeof(ZSTD_CCtx*), cMem);
+ assert(nbWorkers > 0);
+ if (!cctxPool) return NULL;
+ if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) {
ZSTD_customFree(cctxPool, cMem);
- return NULL;
- }
- cctxPool->cMem = cMem;
- cctxPool->totalCCtx = nbWorkers;
- cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */
- cctxPool->cctx[0] = ZSTD_createCCtx_advanced(cMem);
- if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; }
- DEBUGLOG(3, "cctxPool created, with %u workers", nbWorkers);
- return cctxPool;
-}
-
+ return NULL;
+ }
+ cctxPool->cMem = cMem;
+ cctxPool->totalCCtx = nbWorkers;
+ cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */
+ cctxPool->cctx[0] = ZSTD_createCCtx_advanced(cMem);
+ if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; }
+ DEBUGLOG(3, "cctxPool created, with %u workers", nbWorkers);
+ return cctxPool;
+}
+
static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool,
int nbWorkers)
{
@@ -396,76 +396,76 @@ static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool,
}
}
-/* only works during initialization phase, not during compression */
-static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
-{
- ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
- { unsigned const nbWorkers = cctxPool->totalCCtx;
- size_t const poolSize = sizeof(*cctxPool)
- + (nbWorkers-1) * sizeof(ZSTD_CCtx*);
- unsigned u;
- size_t totalCCtxSize = 0;
- for (u=0; u<nbWorkers; u++) {
- totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctx[u]);
- }
- ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
- assert(nbWorkers > 0);
- return poolSize + totalCCtxSize;
- }
-}
-
-static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool)
-{
- DEBUGLOG(5, "ZSTDMT_getCCtx");
- ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
- if (cctxPool->availCCtx) {
- cctxPool->availCCtx--;
- { ZSTD_CCtx* const cctx = cctxPool->cctx[cctxPool->availCCtx];
- ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
- return cctx;
- } }
- ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
- DEBUGLOG(5, "create one more CCtx");
- return ZSTD_createCCtx_advanced(cctxPool->cMem); /* note : can be NULL, when creation fails ! */
-}
-
-static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx)
-{
- if (cctx==NULL) return; /* compatibility with release on NULL */
- ZSTD_pthread_mutex_lock(&pool->poolMutex);
- if (pool->availCCtx < pool->totalCCtx)
- pool->cctx[pool->availCCtx++] = cctx;
- else {
- /* pool overflow : should not happen, since totalCCtx==nbWorkers */
- DEBUGLOG(4, "CCtx pool overflow : free cctx");
- ZSTD_freeCCtx(cctx);
- }
- ZSTD_pthread_mutex_unlock(&pool->poolMutex);
-}
-
-/* ==== Serial State ==== */
-
-typedef struct {
- void const* start;
- size_t size;
-} range_t;
-
-typedef struct {
- /* All variables in the struct are protected by mutex. */
- ZSTD_pthread_mutex_t mutex;
- ZSTD_pthread_cond_t cond;
- ZSTD_CCtx_params params;
- ldmState_t ldmState;
- XXH64_state_t xxhState;
- unsigned nextJobID;
- /* Protects ldmWindow.
- * Must be acquired after the main mutex when acquiring both.
- */
- ZSTD_pthread_mutex_t ldmWindowMutex;
+/* only works during initialization phase, not during compression */
+static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
+{
+ ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
+ { unsigned const nbWorkers = cctxPool->totalCCtx;
+ size_t const poolSize = sizeof(*cctxPool)
+ + (nbWorkers-1) * sizeof(ZSTD_CCtx*);
+ unsigned u;
+ size_t totalCCtxSize = 0;
+ for (u=0; u<nbWorkers; u++) {
+ totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctx[u]);
+ }
+ ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
+ assert(nbWorkers > 0);
+ return poolSize + totalCCtxSize;
+ }
+}
+
+static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool)
+{
+ DEBUGLOG(5, "ZSTDMT_getCCtx");
+ ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
+ if (cctxPool->availCCtx) {
+ cctxPool->availCCtx--;
+ { ZSTD_CCtx* const cctx = cctxPool->cctx[cctxPool->availCCtx];
+ ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
+ return cctx;
+ } }
+ ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
+ DEBUGLOG(5, "create one more CCtx");
+ return ZSTD_createCCtx_advanced(cctxPool->cMem); /* note : can be NULL, when creation fails ! */
+}
+
+static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx)
+{
+ if (cctx==NULL) return; /* compatibility with release on NULL */
+ ZSTD_pthread_mutex_lock(&pool->poolMutex);
+ if (pool->availCCtx < pool->totalCCtx)
+ pool->cctx[pool->availCCtx++] = cctx;
+ else {
+ /* pool overflow : should not happen, since totalCCtx==nbWorkers */
+ DEBUGLOG(4, "CCtx pool overflow : free cctx");
+ ZSTD_freeCCtx(cctx);
+ }
+ ZSTD_pthread_mutex_unlock(&pool->poolMutex);
+}
+
+/* ==== Serial State ==== */
+
+typedef struct {
+ void const* start;
+ size_t size;
+} range_t;
+
+typedef struct {
+ /* All variables in the struct are protected by mutex. */
+ ZSTD_pthread_mutex_t mutex;
+ ZSTD_pthread_cond_t cond;
+ ZSTD_CCtx_params params;
+ ldmState_t ldmState;
+ XXH64_state_t xxhState;
+ unsigned nextJobID;
+ /* Protects ldmWindow.
+ * Must be acquired after the main mutex when acquiring both.
+ */
+ ZSTD_pthread_mutex_t ldmWindowMutex;
ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is updated */
- ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */
-} serialState_t;
-
+ ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */
+} serialState_t;
+
static int
ZSTDMT_serialState_reset(serialState_t* serialState,
ZSTDMT_seqPool* seqPool,
@@ -473,45 +473,45 @@ ZSTDMT_serialState_reset(serialState_t* serialState,
size_t jobSize,
const void* dict, size_t const dictSize,
ZSTD_dictContentType_e dictContentType)
-{
- /* Adjust parameters */
+{
+ /* Adjust parameters */
if (params.ldmParams.enableLdm == ZSTD_ps_enable) {
- DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10);
- ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
- assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
+ DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10);
+ ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
+ assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
assert(params.ldmParams.hashRateLog < 32);
- } else {
+ } else {
ZSTD_memset(&params.ldmParams, 0, sizeof(params.ldmParams));
- }
- serialState->nextJobID = 0;
- if (params.fParams.checksumFlag)
- XXH64_reset(&serialState->xxhState, 0);
+ }
+ serialState->nextJobID = 0;
+ if (params.fParams.checksumFlag)
+ XXH64_reset(&serialState->xxhState, 0);
if (params.ldmParams.enableLdm == ZSTD_ps_enable) {
- ZSTD_customMem cMem = params.customMem;
- unsigned const hashLog = params.ldmParams.hashLog;
- size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t);
- unsigned const bucketLog =
- params.ldmParams.hashLog - params.ldmParams.bucketSizeLog;
- unsigned const prevBucketLog =
- serialState->params.ldmParams.hashLog -
- serialState->params.ldmParams.bucketSizeLog;
+ ZSTD_customMem cMem = params.customMem;
+ unsigned const hashLog = params.ldmParams.hashLog;
+ size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t);
+ unsigned const bucketLog =
+ params.ldmParams.hashLog - params.ldmParams.bucketSizeLog;
+ unsigned const prevBucketLog =
+ serialState->params.ldmParams.hashLog -
+ serialState->params.ldmParams.bucketSizeLog;
size_t const numBuckets = (size_t)1 << bucketLog;
- /* Size the seq pool tables */
+ /* Size the seq pool tables */
ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize));
- /* Reset the window */
+ /* Reset the window */
ZSTD_window_init(&serialState->ldmState.window);
- /* Resize tables and output space if necessary. */
- if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) {
+ /* Resize tables and output space if necessary. */
+ if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) {
ZSTD_customFree(serialState->ldmState.hashTable, cMem);
serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_customMalloc(hashSize, cMem);
- }
- if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) {
+ }
+ if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) {
ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem);
serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_customMalloc(numBuckets, cMem);
- }
- if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets)
- return 1;
- /* Zero the tables */
+ }
+ if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets)
+ return 1;
+ /* Zero the tables */
ZSTD_memset(serialState->ldmState.hashTable, 0, hashSize);
ZSTD_memset(serialState->ldmState.bucketOffsets, 0, numBuckets);
@@ -530,132 +530,132 @@ ZSTDMT_serialState_reset(serialState_t* serialState,
/* Initialize serialState's copy of ldmWindow. */
serialState->ldmWindow = serialState->ldmState.window;
- }
+ }
- serialState->params = params;
+ serialState->params = params;
serialState->params.jobSize = (U32)jobSize;
- return 0;
-}
-
-static int ZSTDMT_serialState_init(serialState_t* serialState)
-{
- int initError = 0;
+ return 0;
+}
+
+static int ZSTDMT_serialState_init(serialState_t* serialState)
+{
+ int initError = 0;
ZSTD_memset(serialState, 0, sizeof(*serialState));
- initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL);
- initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL);
- initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL);
- initError |= ZSTD_pthread_cond_init(&serialState->ldmWindowCond, NULL);
- return initError;
-}
-
-static void ZSTDMT_serialState_free(serialState_t* serialState)
-{
- ZSTD_customMem cMem = serialState->params.customMem;
- ZSTD_pthread_mutex_destroy(&serialState->mutex);
- ZSTD_pthread_cond_destroy(&serialState->cond);
- ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex);
- ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond);
+ initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL);
+ initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL);
+ initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL);
+ initError |= ZSTD_pthread_cond_init(&serialState->ldmWindowCond, NULL);
+ return initError;
+}
+
+static void ZSTDMT_serialState_free(serialState_t* serialState)
+{
+ ZSTD_customMem cMem = serialState->params.customMem;
+ ZSTD_pthread_mutex_destroy(&serialState->mutex);
+ ZSTD_pthread_cond_destroy(&serialState->cond);
+ ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex);
+ ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond);
ZSTD_customFree(serialState->ldmState.hashTable, cMem);
ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem);
-}
-
-static void ZSTDMT_serialState_update(serialState_t* serialState,
- ZSTD_CCtx* jobCCtx, rawSeqStore_t seqStore,
- range_t src, unsigned jobID)
-{
- /* Wait for our turn */
- ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
- while (serialState->nextJobID < jobID) {
+}
+
+static void ZSTDMT_serialState_update(serialState_t* serialState,
+ ZSTD_CCtx* jobCCtx, rawSeqStore_t seqStore,
+ range_t src, unsigned jobID)
+{
+ /* Wait for our turn */
+ ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
+ while (serialState->nextJobID < jobID) {
DEBUGLOG(5, "wait for serialState->cond");
- ZSTD_pthread_cond_wait(&serialState->cond, &serialState->mutex);
- }
- /* A future job may error and skip our job */
- if (serialState->nextJobID == jobID) {
- /* It is now our turn, do any processing necessary */
+ ZSTD_pthread_cond_wait(&serialState->cond, &serialState->mutex);
+ }
+ /* A future job may error and skip our job */
+ if (serialState->nextJobID == jobID) {
+ /* It is now our turn, do any processing necessary */
if (serialState->params.ldmParams.enableLdm == ZSTD_ps_enable) {
- size_t error;
- assert(seqStore.seq != NULL && seqStore.pos == 0 &&
- seqStore.size == 0 && seqStore.capacity > 0);
+ size_t error;
+ assert(seqStore.seq != NULL && seqStore.pos == 0 &&
+ seqStore.size == 0 && seqStore.capacity > 0);
assert(src.size <= serialState->params.jobSize);
ZSTD_window_update(&serialState->ldmState.window, src.start, src.size, /* forceNonContiguous */ 0);
- error = ZSTD_ldm_generateSequences(
- &serialState->ldmState, &seqStore,
- &serialState->params.ldmParams, src.start, src.size);
- /* We provide a large enough buffer to never fail. */
- assert(!ZSTD_isError(error)); (void)error;
- /* Update ldmWindow to match the ldmState.window and signal the main
- * thread if it is waiting for a buffer.
- */
- ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
- serialState->ldmWindow = serialState->ldmState.window;
- ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
- ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
- }
- if (serialState->params.fParams.checksumFlag && src.size > 0)
- XXH64_update(&serialState->xxhState, src.start, src.size);
- }
- /* Now it is the next jobs turn */
- serialState->nextJobID++;
- ZSTD_pthread_cond_broadcast(&serialState->cond);
- ZSTD_pthread_mutex_unlock(&serialState->mutex);
-
- if (seqStore.size > 0) {
- size_t const err = ZSTD_referenceExternalSequences(
- jobCCtx, seqStore.seq, seqStore.size);
+ error = ZSTD_ldm_generateSequences(
+ &serialState->ldmState, &seqStore,
+ &serialState->params.ldmParams, src.start, src.size);
+ /* We provide a large enough buffer to never fail. */
+ assert(!ZSTD_isError(error)); (void)error;
+ /* Update ldmWindow to match the ldmState.window and signal the main
+ * thread if it is waiting for a buffer.
+ */
+ ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
+ serialState->ldmWindow = serialState->ldmState.window;
+ ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
+ ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
+ }
+ if (serialState->params.fParams.checksumFlag && src.size > 0)
+ XXH64_update(&serialState->xxhState, src.start, src.size);
+ }
+ /* Now it is the next jobs turn */
+ serialState->nextJobID++;
+ ZSTD_pthread_cond_broadcast(&serialState->cond);
+ ZSTD_pthread_mutex_unlock(&serialState->mutex);
+
+ if (seqStore.size > 0) {
+ size_t const err = ZSTD_referenceExternalSequences(
+ jobCCtx, seqStore.seq, seqStore.size);
assert(serialState->params.ldmParams.enableLdm == ZSTD_ps_enable);
- assert(!ZSTD_isError(err));
- (void)err;
- }
-}
-
-static void ZSTDMT_serialState_ensureFinished(serialState_t* serialState,
- unsigned jobID, size_t cSize)
-{
- ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
- if (serialState->nextJobID <= jobID) {
- assert(ZSTD_isError(cSize)); (void)cSize;
- DEBUGLOG(5, "Skipping past job %u because of error", jobID);
- serialState->nextJobID = jobID + 1;
- ZSTD_pthread_cond_broadcast(&serialState->cond);
-
- ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
- ZSTD_window_clear(&serialState->ldmWindow);
- ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
- ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
- }
- ZSTD_pthread_mutex_unlock(&serialState->mutex);
-
-}
-
-
-/* ------------------------------------------ */
-/* ===== Worker thread ===== */
-/* ------------------------------------------ */
-
-static const range_t kNullRange = { NULL, 0 };
-
-typedef struct {
- size_t consumed; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */
- size_t cSize; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */
- ZSTD_pthread_mutex_t job_mutex; /* Thread-safe - used by mtctx and worker */
- ZSTD_pthread_cond_t job_cond; /* Thread-safe - used by mtctx and worker */
- ZSTDMT_CCtxPool* cctxPool; /* Thread-safe - used by mtctx and (all) workers */
- ZSTDMT_bufferPool* bufPool; /* Thread-safe - used by mtctx and (all) workers */
- ZSTDMT_seqPool* seqPool; /* Thread-safe - used by mtctx and (all) workers */
- serialState_t* serial; /* Thread-safe - used by mtctx and (all) workers */
- buffer_t dstBuff; /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */
- range_t prefix; /* set by mtctx, then read by worker & mtctx => no barrier */
- range_t src; /* set by mtctx, then read by worker & mtctx => no barrier */
- unsigned jobID; /* set by mtctx, then read by worker => no barrier */
- unsigned firstJob; /* set by mtctx, then read by worker => no barrier */
- unsigned lastJob; /* set by mtctx, then read by worker => no barrier */
- ZSTD_CCtx_params params; /* set by mtctx, then read by worker => no barrier */
- const ZSTD_CDict* cdict; /* set by mtctx, then read by worker => no barrier */
- unsigned long long fullFrameSize; /* set by mtctx, then read by worker => no barrier */
- size_t dstFlushed; /* used only by mtctx */
- unsigned frameChecksumNeeded; /* used only by mtctx */
-} ZSTDMT_jobDescription;
-
+ assert(!ZSTD_isError(err));
+ (void)err;
+ }
+}
+
+static void ZSTDMT_serialState_ensureFinished(serialState_t* serialState,
+ unsigned jobID, size_t cSize)
+{
+ ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
+ if (serialState->nextJobID <= jobID) {
+ assert(ZSTD_isError(cSize)); (void)cSize;
+ DEBUGLOG(5, "Skipping past job %u because of error", jobID);
+ serialState->nextJobID = jobID + 1;
+ ZSTD_pthread_cond_broadcast(&serialState->cond);
+
+ ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
+ ZSTD_window_clear(&serialState->ldmWindow);
+ ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
+ ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
+ }
+ ZSTD_pthread_mutex_unlock(&serialState->mutex);
+
+}
+
+
+/* ------------------------------------------ */
+/* ===== Worker thread ===== */
+/* ------------------------------------------ */
+
+static const range_t kNullRange = { NULL, 0 };
+
+typedef struct {
+ size_t consumed; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */
+ size_t cSize; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */
+ ZSTD_pthread_mutex_t job_mutex; /* Thread-safe - used by mtctx and worker */
+ ZSTD_pthread_cond_t job_cond; /* Thread-safe - used by mtctx and worker */
+ ZSTDMT_CCtxPool* cctxPool; /* Thread-safe - used by mtctx and (all) workers */
+ ZSTDMT_bufferPool* bufPool; /* Thread-safe - used by mtctx and (all) workers */
+ ZSTDMT_seqPool* seqPool; /* Thread-safe - used by mtctx and (all) workers */
+ serialState_t* serial; /* Thread-safe - used by mtctx and (all) workers */
+ buffer_t dstBuff; /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */
+ range_t prefix; /* set by mtctx, then read by worker & mtctx => no barrier */
+ range_t src; /* set by mtctx, then read by worker & mtctx => no barrier */
+ unsigned jobID; /* set by mtctx, then read by worker => no barrier */
+ unsigned firstJob; /* set by mtctx, then read by worker => no barrier */
+ unsigned lastJob; /* set by mtctx, then read by worker => no barrier */
+ ZSTD_CCtx_params params; /* set by mtctx, then read by worker => no barrier */
+ const ZSTD_CDict* cdict; /* set by mtctx, then read by worker => no barrier */
+ unsigned long long fullFrameSize; /* set by mtctx, then read by worker => no barrier */
+ size_t dstFlushed; /* used only by mtctx */
+ unsigned frameChecksumNeeded; /* used only by mtctx */
+} ZSTDMT_jobDescription;
+
#define JOB_ERROR(e) { \
ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); \
job->cSize = e; \
@@ -663,16 +663,16 @@ typedef struct {
goto _endJob; \
}
-/* ZSTDMT_compressionJob() is a POOL_function type */
+/* ZSTDMT_compressionJob() is a POOL_function type */
static void ZSTDMT_compressionJob(void* jobDescription)
-{
- ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
- ZSTD_CCtx_params jobParams = job->params; /* do not modify job->params ! copy it, modify the copy */
- ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool);
- rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool);
- buffer_t dstBuff = job->dstBuff;
+{
+ ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
+ ZSTD_CCtx_params jobParams = job->params; /* do not modify job->params ! copy it, modify the copy */
+ ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool);
+ rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool);
+ buffer_t dstBuff = job->dstBuff;
size_t lastCBlockSize = 0;
-
+
/* resources */
if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation));
if (dstBuff.start == NULL) { /* streaming job : doesn't provide a dstBuffer */
@@ -683,23 +683,23 @@ static void ZSTDMT_compressionJob(void* jobDescription)
if (jobParams.ldmParams.enableLdm == ZSTD_ps_enable && rawSeqStore.seq == NULL)
JOB_ERROR(ERROR(memory_allocation));
- /* Don't compute the checksum for chunks, since we compute it externally,
- * but write it in the header.
- */
- if (job->jobID != 0) jobParams.fParams.checksumFlag = 0;
- /* Don't run LDM for the chunks, since we handle it externally */
+ /* Don't compute the checksum for chunks, since we compute it externally,
+ * but write it in the header.
+ */
+ if (job->jobID != 0) jobParams.fParams.checksumFlag = 0;
+ /* Don't run LDM for the chunks, since we handle it externally */
jobParams.ldmParams.enableLdm = ZSTD_ps_disable;
/* Correct nbWorkers to 0. */
jobParams.nbWorkers = 0;
-
-
- /* init */
- if (job->cdict) {
+
+
+ /* init */
+ if (job->cdict) {
size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize);
- assert(job->firstJob); /* only allowed for first job */
+ assert(job->firstJob); /* only allowed for first job */
if (ZSTD_isError(initError)) JOB_ERROR(initError);
- } else { /* srcStart points at reloaded section */
- U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size;
+ } else { /* srcStart points at reloaded section */
+ U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size;
{ size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);
if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError);
}
@@ -707,61 +707,61 @@ static void ZSTDMT_compressionJob(void* jobDescription)
size_t const err = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_deterministicRefPrefix, 0);
if (ZSTD_isError(err)) JOB_ERROR(err);
}
- { size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
- job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
+ { size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
+ job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
ZSTD_dtlm_fast,
- NULL, /*cdict*/
+ NULL, /*cdict*/
&jobParams, pledgedSrcSize);
if (ZSTD_isError(initError)) JOB_ERROR(initError);
} }
-
- /* Perform serial step as early as possible, but after CCtx initialization */
- ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID);
-
- if (!job->firstJob) { /* flush and overwrite frame header when it's not first job */
- size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0);
+
+ /* Perform serial step as early as possible, but after CCtx initialization */
+ ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID);
+
+ if (!job->firstJob) { /* flush and overwrite frame header when it's not first job */
+ size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0);
if (ZSTD_isError(hSize)) JOB_ERROR(hSize);
- DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize);
- ZSTD_invalidateRepCodes(cctx);
- }
-
- /* compress */
- { size_t const chunkSize = 4*ZSTD_BLOCKSIZE_MAX;
- int const nbChunks = (int)((job->src.size + (chunkSize-1)) / chunkSize);
- const BYTE* ip = (const BYTE*) job->src.start;
- BYTE* const ostart = (BYTE*)dstBuff.start;
- BYTE* op = ostart;
- BYTE* oend = op + dstBuff.capacity;
- int chunkNb;
- if (sizeof(size_t) > sizeof(int)) assert(job->src.size < ((size_t)INT_MAX) * chunkSize); /* check overflow */
- DEBUGLOG(5, "ZSTDMT_compressionJob: compress %u bytes in %i blocks", (U32)job->src.size, nbChunks);
- assert(job->cSize == 0);
- for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) {
- size_t const cSize = ZSTD_compressContinue(cctx, op, oend-op, ip, chunkSize);
+ DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize);
+ ZSTD_invalidateRepCodes(cctx);
+ }
+
+ /* compress */
+ { size_t const chunkSize = 4*ZSTD_BLOCKSIZE_MAX;
+ int const nbChunks = (int)((job->src.size + (chunkSize-1)) / chunkSize);
+ const BYTE* ip = (const BYTE*) job->src.start;
+ BYTE* const ostart = (BYTE*)dstBuff.start;
+ BYTE* op = ostart;
+ BYTE* oend = op + dstBuff.capacity;
+ int chunkNb;
+ if (sizeof(size_t) > sizeof(int)) assert(job->src.size < ((size_t)INT_MAX) * chunkSize); /* check overflow */
+ DEBUGLOG(5, "ZSTDMT_compressionJob: compress %u bytes in %i blocks", (U32)job->src.size, nbChunks);
+ assert(job->cSize == 0);
+ for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) {
+ size_t const cSize = ZSTD_compressContinue(cctx, op, oend-op, ip, chunkSize);
if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
- ip += chunkSize;
- op += cSize; assert(op < oend);
- /* stats */
- ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
- job->cSize += cSize;
- job->consumed = chunkSize * chunkNb;
- DEBUGLOG(5, "ZSTDMT_compressionJob: compress new block : cSize==%u bytes (total: %u)",
- (U32)cSize, (U32)job->cSize);
- ZSTD_pthread_cond_signal(&job->job_cond); /* warns some more data is ready to be flushed */
- ZSTD_pthread_mutex_unlock(&job->job_mutex);
- }
- /* last block */
+ ip += chunkSize;
+ op += cSize; assert(op < oend);
+ /* stats */
+ ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
+ job->cSize += cSize;
+ job->consumed = chunkSize * chunkNb;
+ DEBUGLOG(5, "ZSTDMT_compressionJob: compress new block : cSize==%u bytes (total: %u)",
+ (U32)cSize, (U32)job->cSize);
+ ZSTD_pthread_cond_signal(&job->job_cond); /* warns some more data is ready to be flushed */
+ ZSTD_pthread_mutex_unlock(&job->job_mutex);
+ }
+ /* last block */
assert(chunkSize > 0);
assert((chunkSize & (chunkSize - 1)) == 0); /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */
- if ((nbChunks > 0) | job->lastJob /*must output a "last block" flag*/ ) {
- size_t const lastBlockSize1 = job->src.size & (chunkSize-1);
- size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1;
- size_t const cSize = (job->lastJob) ?
- ZSTD_compressEnd (cctx, op, oend-op, ip, lastBlockSize) :
- ZSTD_compressContinue(cctx, op, oend-op, ip, lastBlockSize);
+ if ((nbChunks > 0) | job->lastJob /*must output a "last block" flag*/ ) {
+ size_t const lastBlockSize1 = job->src.size & (chunkSize-1);
+ size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1;
+ size_t const cSize = (job->lastJob) ?
+ ZSTD_compressEnd (cctx, op, oend-op, ip, lastBlockSize) :
+ ZSTD_compressContinue(cctx, op, oend-op, ip, lastBlockSize);
if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
lastCBlockSize = cSize;
- } }
+ } }
if (!job->firstJob) {
/* Double check that we don't have an ext-dict, because then our
* repcode invalidation doesn't work.
@@ -769,51 +769,51 @@ static void ZSTDMT_compressionJob(void* jobDescription)
assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
}
ZSTD_CCtx_trace(cctx, 0);
-
-_endJob:
- ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize);
- if (job->prefix.size > 0)
- DEBUGLOG(5, "Finished with prefix: %zx", (size_t)job->prefix.start);
- DEBUGLOG(5, "Finished with source: %zx", (size_t)job->src.start);
- /* release resources */
- ZSTDMT_releaseSeq(job->seqPool, rawSeqStore);
- ZSTDMT_releaseCCtx(job->cctxPool, cctx);
- /* report */
- ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
+
+_endJob:
+ ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize);
+ if (job->prefix.size > 0)
+ DEBUGLOG(5, "Finished with prefix: %zx", (size_t)job->prefix.start);
+ DEBUGLOG(5, "Finished with source: %zx", (size_t)job->src.start);
+ /* release resources */
+ ZSTDMT_releaseSeq(job->seqPool, rawSeqStore);
+ ZSTDMT_releaseCCtx(job->cctxPool, cctx);
+ /* report */
+ ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
if (ZSTD_isError(job->cSize)) assert(lastCBlockSize == 0);
job->cSize += lastCBlockSize;
job->consumed = job->src.size; /* when job->consumed == job->src.size , compression job is presumed completed */
- ZSTD_pthread_cond_signal(&job->job_cond);
- ZSTD_pthread_mutex_unlock(&job->job_mutex);
-}
-
-
-/* ------------------------------------------ */
-/* ===== Multi-threaded compression ===== */
-/* ------------------------------------------ */
-
-typedef struct {
- range_t prefix; /* read-only non-owned prefix buffer */
- buffer_t buffer;
- size_t filled;
-} inBuff_t;
-
-typedef struct {
- BYTE* buffer; /* The round input buffer. All jobs get references
- * to pieces of the buffer. ZSTDMT_tryGetInputRange()
- * handles handing out job input buffers, and makes
- * sure it doesn't overlap with any pieces still in use.
- */
- size_t capacity; /* The capacity of buffer. */
- size_t pos; /* The position of the current inBuff in the round
- * buffer. Updated past the end if the inBuff once
- * the inBuff is sent to the worker thread.
- * pos <= capacity.
- */
-} roundBuff_t;
-
-static const roundBuff_t kNullRoundBuff = {NULL, 0, 0};
-
+ ZSTD_pthread_cond_signal(&job->job_cond);
+ ZSTD_pthread_mutex_unlock(&job->job_mutex);
+}
+
+
+/* ------------------------------------------ */
+/* ===== Multi-threaded compression ===== */
+/* ------------------------------------------ */
+
+typedef struct {
+ range_t prefix; /* read-only non-owned prefix buffer */
+ buffer_t buffer;
+ size_t filled;
+} inBuff_t;
+
+typedef struct {
+ BYTE* buffer; /* The round input buffer. All jobs get references
+ * to pieces of the buffer. ZSTDMT_tryGetInputRange()
+ * handles handing out job input buffers, and makes
+ * sure it doesn't overlap with any pieces still in use.
+ */
+ size_t capacity; /* The capacity of buffer. */
+ size_t pos; /* The position of the current inBuff in the round
+ * buffer. Updated past the end if the inBuff once
+ * the inBuff is sent to the worker thread.
+ * pos <= capacity.
+ */
+} roundBuff_t;
+
+static const roundBuff_t kNullRoundBuff = {NULL, 0, 0};
+
#define RSYNC_LENGTH 32
/* Don't create chunks smaller than the zstd block size.
* This stops us from regressing compression ratio too much,
@@ -831,69 +831,69 @@ typedef struct {
U64 primePower;
} rsyncState_t;
-struct ZSTDMT_CCtx_s {
- POOL_ctx* factory;
- ZSTDMT_jobDescription* jobs;
- ZSTDMT_bufferPool* bufPool;
- ZSTDMT_CCtxPool* cctxPool;
- ZSTDMT_seqPool* seqPool;
- ZSTD_CCtx_params params;
- size_t targetSectionSize;
- size_t targetPrefixSize;
+struct ZSTDMT_CCtx_s {
+ POOL_ctx* factory;
+ ZSTDMT_jobDescription* jobs;
+ ZSTDMT_bufferPool* bufPool;
+ ZSTDMT_CCtxPool* cctxPool;
+ ZSTDMT_seqPool* seqPool;
+ ZSTD_CCtx_params params;
+ size_t targetSectionSize;
+ size_t targetPrefixSize;
int jobReady; /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */
inBuff_t inBuff;
- roundBuff_t roundBuff;
- serialState_t serial;
+ roundBuff_t roundBuff;
+ serialState_t serial;
rsyncState_t rsync;
- unsigned jobIDMask;
- unsigned doneJobID;
- unsigned nextJobID;
- unsigned frameEnded;
- unsigned allJobsCompleted;
- unsigned long long frameContentSize;
- unsigned long long consumed;
- unsigned long long produced;
- ZSTD_customMem cMem;
- ZSTD_CDict* cdictLocal;
- const ZSTD_CDict* cdict;
+ unsigned jobIDMask;
+ unsigned doneJobID;
+ unsigned nextJobID;
+ unsigned frameEnded;
+ unsigned allJobsCompleted;
+ unsigned long long frameContentSize;
+ unsigned long long consumed;
+ unsigned long long produced;
+ ZSTD_customMem cMem;
+ ZSTD_CDict* cdictLocal;
+ const ZSTD_CDict* cdict;
unsigned providedFactory: 1;
-};
-
-static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem)
-{
- U32 jobNb;
- if (jobTable == NULL) return;
- for (jobNb=0; jobNb<nbJobs; jobNb++) {
- ZSTD_pthread_mutex_destroy(&jobTable[jobNb].job_mutex);
- ZSTD_pthread_cond_destroy(&jobTable[jobNb].job_cond);
- }
+};
+
+static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem)
+{
+ U32 jobNb;
+ if (jobTable == NULL) return;
+ for (jobNb=0; jobNb<nbJobs; jobNb++) {
+ ZSTD_pthread_mutex_destroy(&jobTable[jobNb].job_mutex);
+ ZSTD_pthread_cond_destroy(&jobTable[jobNb].job_cond);
+ }
ZSTD_customFree(jobTable, cMem);
-}
-
-/* ZSTDMT_allocJobsTable()
- * allocate and init a job table.
- * update *nbJobsPtr to next power of 2 value, as size of table */
-static ZSTDMT_jobDescription* ZSTDMT_createJobsTable(U32* nbJobsPtr, ZSTD_customMem cMem)
-{
- U32 const nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1;
- U32 const nbJobs = 1 << nbJobsLog2;
- U32 jobNb;
- ZSTDMT_jobDescription* const jobTable = (ZSTDMT_jobDescription*)
+}
+
+/* ZSTDMT_allocJobsTable()
+ * allocate and init a job table.
+ * update *nbJobsPtr to next power of 2 value, as size of table */
+static ZSTDMT_jobDescription* ZSTDMT_createJobsTable(U32* nbJobsPtr, ZSTD_customMem cMem)
+{
+ U32 const nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1;
+ U32 const nbJobs = 1 << nbJobsLog2;
+ U32 jobNb;
+ ZSTDMT_jobDescription* const jobTable = (ZSTDMT_jobDescription*)
ZSTD_customCalloc(nbJobs * sizeof(ZSTDMT_jobDescription), cMem);
- int initError = 0;
- if (jobTable==NULL) return NULL;
- *nbJobsPtr = nbJobs;
- for (jobNb=0; jobNb<nbJobs; jobNb++) {
- initError |= ZSTD_pthread_mutex_init(&jobTable[jobNb].job_mutex, NULL);
- initError |= ZSTD_pthread_cond_init(&jobTable[jobNb].job_cond, NULL);
- }
- if (initError != 0) {
- ZSTDMT_freeJobsTable(jobTable, nbJobs, cMem);
- return NULL;
- }
- return jobTable;
-}
-
+ int initError = 0;
+ if (jobTable==NULL) return NULL;
+ *nbJobsPtr = nbJobs;
+ for (jobNb=0; jobNb<nbJobs; jobNb++) {
+ initError |= ZSTD_pthread_mutex_init(&jobTable[jobNb].job_mutex, NULL);
+ initError |= ZSTD_pthread_cond_init(&jobTable[jobNb].job_cond, NULL);
+ }
+ if (initError != 0) {
+ ZSTDMT_freeJobsTable(jobTable, nbJobs, cMem);
+ return NULL;
+ }
+ return jobTable;
+}
+
static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) {
U32 nbJobs = nbWorkers + 2;
if (nbJobs > mtctx->jobIDMask+1) { /* need more job capacity */
@@ -908,31 +908,31 @@ static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) {
}
-/* ZSTDMT_CCtxParam_setNbWorkers():
- * Internal use only */
+/* ZSTDMT_CCtxParam_setNbWorkers():
+ * Internal use only */
static size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
-{
+{
return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers);
-}
-
+}
+
MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool)
-{
- ZSTDMT_CCtx* mtctx;
- U32 nbJobs = nbWorkers + 2;
- int initError;
- DEBUGLOG(3, "ZSTDMT_createCCtx_advanced (nbWorkers = %u)", nbWorkers);
-
- if (nbWorkers < 1) return NULL;
- nbWorkers = MIN(nbWorkers , ZSTDMT_NBWORKERS_MAX);
- if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL))
- /* invalid custom allocator */
- return NULL;
-
+{
+ ZSTDMT_CCtx* mtctx;
+ U32 nbJobs = nbWorkers + 2;
+ int initError;
+ DEBUGLOG(3, "ZSTDMT_createCCtx_advanced (nbWorkers = %u)", nbWorkers);
+
+ if (nbWorkers < 1) return NULL;
+ nbWorkers = MIN(nbWorkers , ZSTDMT_NBWORKERS_MAX);
+ if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL))
+ /* invalid custom allocator */
+ return NULL;
+
mtctx = (ZSTDMT_CCtx*) ZSTD_customCalloc(sizeof(ZSTDMT_CCtx), cMem);
- if (!mtctx) return NULL;
- ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
- mtctx->cMem = cMem;
- mtctx->allJobsCompleted = 1;
+ if (!mtctx) return NULL;
+ ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
+ mtctx->cMem = cMem;
+ mtctx->allJobsCompleted = 1;
if (pool != NULL) {
mtctx->factory = pool;
mtctx->providedFactory = 1;
@@ -941,22 +941,22 @@ MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers,
mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem);
mtctx->providedFactory = 0;
}
- mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem);
- assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0); /* ensure nbJobs is a power of 2 */
- mtctx->jobIDMask = nbJobs - 1;
+ mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem);
+ assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0); /* ensure nbJobs is a power of 2 */
+ mtctx->jobIDMask = nbJobs - 1;
mtctx->bufPool = ZSTDMT_createBufferPool(BUF_POOL_MAX_NB_BUFFERS(nbWorkers), cMem);
- mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem);
- mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem);
- initError = ZSTDMT_serialState_init(&mtctx->serial);
- mtctx->roundBuff = kNullRoundBuff;
- if (!mtctx->factory | !mtctx->jobs | !mtctx->bufPool | !mtctx->cctxPool | !mtctx->seqPool | initError) {
- ZSTDMT_freeCCtx(mtctx);
- return NULL;
- }
- DEBUGLOG(3, "mt_cctx created, for %u threads", nbWorkers);
- return mtctx;
-}
-
+ mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem);
+ mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem);
+ initError = ZSTDMT_serialState_init(&mtctx->serial);
+ mtctx->roundBuff = kNullRoundBuff;
+ if (!mtctx->factory | !mtctx->jobs | !mtctx->bufPool | !mtctx->cctxPool | !mtctx->seqPool | initError) {
+ ZSTDMT_freeCCtx(mtctx);
+ return NULL;
+ }
+ DEBUGLOG(3, "mt_cctx created, for %u threads", nbWorkers);
+ return mtctx;
+}
+
ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool)
{
#ifdef ZSTD_MULTITHREAD
@@ -969,78 +969,78 @@ ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem,
#endif
}
-
-/* ZSTDMT_releaseAllJobResources() :
- * note : ensure all workers are killed first ! */
-static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
-{
- unsigned jobID;
- DEBUGLOG(3, "ZSTDMT_releaseAllJobResources");
- for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) {
+
+/* ZSTDMT_releaseAllJobResources() :
+ * note : ensure all workers are killed first ! */
+static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
+{
+ unsigned jobID;
+ DEBUGLOG(3, "ZSTDMT_releaseAllJobResources");
+ for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) {
/* Copy the mutex/cond out */
ZSTD_pthread_mutex_t const mutex = mtctx->jobs[jobID].job_mutex;
ZSTD_pthread_cond_t const cond = mtctx->jobs[jobID].job_cond;
- DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start);
- ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
+ DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start);
+ ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
/* Clear the job description, but keep the mutex/cond */
ZSTD_memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID]));
mtctx->jobs[jobID].job_mutex = mutex;
mtctx->jobs[jobID].job_cond = cond;
- }
- mtctx->inBuff.buffer = g_nullBuffer;
- mtctx->inBuff.filled = 0;
- mtctx->allJobsCompleted = 1;
-}
-
-static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx)
-{
- DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted");
- while (mtctx->doneJobID < mtctx->nextJobID) {
- unsigned const jobID = mtctx->doneJobID & mtctx->jobIDMask;
- ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
- while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
+ }
+ mtctx->inBuff.buffer = g_nullBuffer;
+ mtctx->inBuff.filled = 0;
+ mtctx->allJobsCompleted = 1;
+}
+
+static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx)
+{
+ DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted");
+ while (mtctx->doneJobID < mtctx->nextJobID) {
+ unsigned const jobID = mtctx->doneJobID & mtctx->jobIDMask;
+ ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
+ while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
DEBUGLOG(4, "waiting for jobCompleted signal from job %u", mtctx->doneJobID); /* we want to block when waiting for data to flush */
- ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
- }
- ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
- mtctx->doneJobID++;
- }
-}
-
-size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
-{
- if (mtctx==NULL) return 0; /* compatible with free on NULL */
+ ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
+ }
+ ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
+ mtctx->doneJobID++;
+ }
+}
+
+size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
+{
+ if (mtctx==NULL) return 0; /* compatible with free on NULL */
if (!mtctx->providedFactory)
POOL_free(mtctx->factory); /* stop and free worker threads */
- ZSTDMT_releaseAllJobResources(mtctx); /* release job resources into pools first */
- ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
- ZSTDMT_freeBufferPool(mtctx->bufPool);
- ZSTDMT_freeCCtxPool(mtctx->cctxPool);
- ZSTDMT_freeSeqPool(mtctx->seqPool);
- ZSTDMT_serialState_free(&mtctx->serial);
- ZSTD_freeCDict(mtctx->cdictLocal);
- if (mtctx->roundBuff.buffer)
+ ZSTDMT_releaseAllJobResources(mtctx); /* release job resources into pools first */
+ ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
+ ZSTDMT_freeBufferPool(mtctx->bufPool);
+ ZSTDMT_freeCCtxPool(mtctx->cctxPool);
+ ZSTDMT_freeSeqPool(mtctx->seqPool);
+ ZSTDMT_serialState_free(&mtctx->serial);
+ ZSTD_freeCDict(mtctx->cdictLocal);
+ if (mtctx->roundBuff.buffer)
ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem);
ZSTD_customFree(mtctx, mtctx->cMem);
- return 0;
-}
-
-size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx)
-{
- if (mtctx == NULL) return 0; /* supports sizeof NULL */
- return sizeof(*mtctx)
- + POOL_sizeof(mtctx->factory)
- + ZSTDMT_sizeof_bufferPool(mtctx->bufPool)
- + (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription)
- + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool)
- + ZSTDMT_sizeof_seqPool(mtctx->seqPool)
- + ZSTD_sizeof_CDict(mtctx->cdictLocal)
- + mtctx->roundBuff.capacity;
-}
-
-
+ return 0;
+}
+
+size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx)
+{
+ if (mtctx == NULL) return 0; /* supports sizeof NULL */
+ return sizeof(*mtctx)
+ + POOL_sizeof(mtctx->factory)
+ + ZSTDMT_sizeof_bufferPool(mtctx->bufPool)
+ + (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription)
+ + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool)
+ + ZSTDMT_sizeof_seqPool(mtctx->seqPool)
+ + ZSTD_sizeof_CDict(mtctx->cdictLocal)
+ + mtctx->roundBuff.capacity;
+}
+
+
/* ZSTDMT_resize() :
* @return : error code if fails, 0 on success */
static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
@@ -1055,63 +1055,63 @@ static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
if (mtctx->seqPool == NULL) return ERROR(memory_allocation);
ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
return 0;
-}
-
+}
+
-/*! ZSTDMT_updateCParams_whileCompressing() :
+/*! ZSTDMT_updateCParams_whileCompressing() :
* Updates a selected set of compression parameters, remaining compatible with currently active frame.
- * New parameters will be applied to next compression job. */
-void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams)
-{
- U32 const saved_wlog = mtctx->params.cParams.windowLog; /* Do not modify windowLog while compressing */
- int const compressionLevel = cctxParams->compressionLevel;
- DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)",
- compressionLevel);
- mtctx->params.compressionLevel = compressionLevel;
+ * New parameters will be applied to next compression job. */
+void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams)
+{
+ U32 const saved_wlog = mtctx->params.cParams.windowLog; /* Do not modify windowLog while compressing */
+ int const compressionLevel = cctxParams->compressionLevel;
+ DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)",
+ compressionLevel);
+ mtctx->params.compressionLevel = compressionLevel;
{ ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
- cParams.windowLog = saved_wlog;
- mtctx->params.cParams = cParams;
- }
-}
-
-/* ZSTDMT_getFrameProgression():
- * tells how much data has been consumed (input) and produced (output) for current frame.
- * able to count progression inside worker threads.
+ cParams.windowLog = saved_wlog;
+ mtctx->params.cParams = cParams;
+ }
+}
+
+/* ZSTDMT_getFrameProgression():
+ * tells how much data has been consumed (input) and produced (output) for current frame.
+ * able to count progression inside worker threads.
* Note : mutex will be acquired during statistics collection inside workers. */
-ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx)
-{
- ZSTD_frameProgression fps;
+ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx)
+{
+ ZSTD_frameProgression fps;
DEBUGLOG(5, "ZSTDMT_getFrameProgression");
fps.ingested = mtctx->consumed + mtctx->inBuff.filled;
- fps.consumed = mtctx->consumed;
+ fps.consumed = mtctx->consumed;
fps.produced = fps.flushed = mtctx->produced;
fps.currentJobID = mtctx->nextJobID;
fps.nbActiveWorkers = 0;
- { unsigned jobNb;
- unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1);
- DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)",
- mtctx->doneJobID, lastJobNb, mtctx->jobReady)
- for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) {
- unsigned const wJobID = jobNb & mtctx->jobIDMask;
+ { unsigned jobNb;
+ unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1);
+ DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)",
+ mtctx->doneJobID, lastJobNb, mtctx->jobReady)
+ for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) {
+ unsigned const wJobID = jobNb & mtctx->jobIDMask;
ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID];
ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
{ size_t const cResult = jobPtr->cSize;
- size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
+ size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
assert(flushed <= produced);
fps.ingested += jobPtr->src.size;
fps.consumed += jobPtr->consumed;
- fps.produced += produced;
+ fps.produced += produced;
fps.flushed += flushed;
fps.nbActiveWorkers += (jobPtr->consumed < jobPtr->src.size);
- }
- ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
- }
- }
- return fps;
-}
-
-
+ }
+ ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
+ }
+ }
+ return fps;
+}
+
+
size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx)
{
size_t toFlush;
@@ -1145,12 +1145,12 @@ size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx)
}
-/* ------------------------------------------ */
-/* ===== Multi-threaded compression ===== */
-/* ------------------------------------------ */
-
+/* ------------------------------------------ */
+/* ===== Multi-threaded compression ===== */
+/* ------------------------------------------ */
+
static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params)
-{
+{
unsigned jobLog;
if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
/* In Long Range Mode, the windowLog is typically oversized.
@@ -1161,10 +1161,10 @@ static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params)
jobLog = MAX(20, params->cParams.windowLog + 2);
}
return MIN(jobLog, (unsigned)ZSTDMT_JOBLOG_MAX);
-}
-
+}
+
static int ZSTDMT_overlapLog_default(ZSTD_strategy strat)
-{
+{
switch(strat)
{
case ZSTD_btultra2:
@@ -1182,8 +1182,8 @@ static int ZSTDMT_overlapLog_default(ZSTD_strategy strat)
default:;
}
return 6;
-}
-
+}
+
static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat)
{
assert(0 <= ovlog && ovlog <= 9);
@@ -1210,56 +1210,56 @@ static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params)
return (ovLog==0) ? 0 : (size_t)1 << ovLog;
}
-/* ====================================== */
-/* ======= Streaming API ======= */
-/* ====================================== */
-
-size_t ZSTDMT_initCStream_internal(
- ZSTDMT_CCtx* mtctx,
- const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
- const ZSTD_CDict* cdict, ZSTD_CCtx_params params,
- unsigned long long pledgedSrcSize)
-{
+/* ====================================== */
+/* ======= Streaming API ======= */
+/* ====================================== */
+
+size_t ZSTDMT_initCStream_internal(
+ ZSTDMT_CCtx* mtctx,
+ const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
+ const ZSTD_CDict* cdict, ZSTD_CCtx_params params,
+ unsigned long long pledgedSrcSize)
+{
DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u)",
(U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx);
/* params supposed partially fully validated at this point */
- assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
- assert(!((dict) && (cdict))); /* either dict or cdict, not both */
-
- /* init */
+ assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+ assert(!((dict) && (cdict))); /* either dict or cdict, not both */
+
+ /* init */
if (params.nbWorkers != mtctx->params.nbWorkers)
FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) , "");
-
+
if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;
if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = (size_t)ZSTDMT_JOBSIZE_MAX;
- DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers);
-
- if (mtctx->allJobsCompleted == 0) { /* previous compression not correctly finished */
- ZSTDMT_waitForAllJobsCompleted(mtctx);
- ZSTDMT_releaseAllJobResources(mtctx);
- mtctx->allJobsCompleted = 1;
- }
-
- mtctx->params = params;
- mtctx->frameContentSize = pledgedSrcSize;
- if (dict) {
- ZSTD_freeCDict(mtctx->cdictLocal);
- mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
- ZSTD_dlm_byCopy, dictContentType, /* note : a loadPrefix becomes an internal CDict */
- params.cParams, mtctx->cMem);
- mtctx->cdict = mtctx->cdictLocal;
- if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation);
- } else {
- ZSTD_freeCDict(mtctx->cdictLocal);
- mtctx->cdictLocal = NULL;
- mtctx->cdict = cdict;
- }
-
+ DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers);
+
+ if (mtctx->allJobsCompleted == 0) { /* previous compression not correctly finished */
+ ZSTDMT_waitForAllJobsCompleted(mtctx);
+ ZSTDMT_releaseAllJobResources(mtctx);
+ mtctx->allJobsCompleted = 1;
+ }
+
+ mtctx->params = params;
+ mtctx->frameContentSize = pledgedSrcSize;
+ if (dict) {
+ ZSTD_freeCDict(mtctx->cdictLocal);
+ mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
+ ZSTD_dlm_byCopy, dictContentType, /* note : a loadPrefix becomes an internal CDict */
+ params.cParams, mtctx->cMem);
+ mtctx->cdict = mtctx->cdictLocal;
+ if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation);
+ } else {
+ ZSTD_freeCDict(mtctx->cdictLocal);
+ mtctx->cdictLocal = NULL;
+ mtctx->cdict = cdict;
+ }
+
mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(&params);
DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10));
- mtctx->targetSectionSize = params.jobSize;
+ mtctx->targetSectionSize = params.jobSize;
if (mtctx->targetSectionSize == 0) {
mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(&params);
}
@@ -1277,292 +1277,292 @@ size_t ZSTDMT_initCStream_internal(
mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1;
mtctx->rsync.primePower = ZSTD_rollingHash_primePower(RSYNC_LENGTH);
}
- if (mtctx->targetSectionSize < mtctx->targetPrefixSize) mtctx->targetSectionSize = mtctx->targetPrefixSize; /* job size must be >= overlap size */
+ if (mtctx->targetSectionSize < mtctx->targetPrefixSize) mtctx->targetSectionSize = mtctx->targetPrefixSize; /* job size must be >= overlap size */
DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(mtctx->targetSectionSize>>10), (U32)params.jobSize);
- DEBUGLOG(4, "inBuff Size : %u KB", (U32)(mtctx->targetSectionSize>>10));
- ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize));
- {
- /* If ldm is enabled we need windowSize space. */
+ DEBUGLOG(4, "inBuff Size : %u KB", (U32)(mtctx->targetSectionSize>>10));
+ ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize));
+ {
+ /* If ldm is enabled we need windowSize space. */
size_t const windowSize = mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable ? (1U << mtctx->params.cParams.windowLog) : 0;
- /* Two buffers of slack, plus extra space for the overlap
- * This is the minimum slack that LDM works with. One extra because
- * flush might waste up to targetSectionSize-1 bytes. Another extra
- * for the overlap (if > 0), then one to fill which doesn't overlap
- * with the LDM window.
- */
- size_t const nbSlackBuffers = 2 + (mtctx->targetPrefixSize > 0);
- size_t const slackSize = mtctx->targetSectionSize * nbSlackBuffers;
- /* Compute the total size, and always have enough slack */
- size_t const nbWorkers = MAX(mtctx->params.nbWorkers, 1);
- size_t const sectionsSize = mtctx->targetSectionSize * nbWorkers;
- size_t const capacity = MAX(windowSize, sectionsSize) + slackSize;
- if (mtctx->roundBuff.capacity < capacity) {
- if (mtctx->roundBuff.buffer)
+ /* Two buffers of slack, plus extra space for the overlap
+ * This is the minimum slack that LDM works with. One extra because
+ * flush might waste up to targetSectionSize-1 bytes. Another extra
+ * for the overlap (if > 0), then one to fill which doesn't overlap
+ * with the LDM window.
+ */
+ size_t const nbSlackBuffers = 2 + (mtctx->targetPrefixSize > 0);
+ size_t const slackSize = mtctx->targetSectionSize * nbSlackBuffers;
+ /* Compute the total size, and always have enough slack */
+ size_t const nbWorkers = MAX(mtctx->params.nbWorkers, 1);
+ size_t const sectionsSize = mtctx->targetSectionSize * nbWorkers;
+ size_t const capacity = MAX(windowSize, sectionsSize) + slackSize;
+ if (mtctx->roundBuff.capacity < capacity) {
+ if (mtctx->roundBuff.buffer)
ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem);
mtctx->roundBuff.buffer = (BYTE*)ZSTD_customMalloc(capacity, mtctx->cMem);
- if (mtctx->roundBuff.buffer == NULL) {
- mtctx->roundBuff.capacity = 0;
- return ERROR(memory_allocation);
- }
- mtctx->roundBuff.capacity = capacity;
- }
- }
- DEBUGLOG(4, "roundBuff capacity : %u KB", (U32)(mtctx->roundBuff.capacity>>10));
- mtctx->roundBuff.pos = 0;
- mtctx->inBuff.buffer = g_nullBuffer;
- mtctx->inBuff.filled = 0;
- mtctx->inBuff.prefix = kNullRange;
- mtctx->doneJobID = 0;
- mtctx->nextJobID = 0;
- mtctx->frameEnded = 0;
- mtctx->allJobsCompleted = 0;
- mtctx->consumed = 0;
- mtctx->produced = 0;
+ if (mtctx->roundBuff.buffer == NULL) {
+ mtctx->roundBuff.capacity = 0;
+ return ERROR(memory_allocation);
+ }
+ mtctx->roundBuff.capacity = capacity;
+ }
+ }
+ DEBUGLOG(4, "roundBuff capacity : %u KB", (U32)(mtctx->roundBuff.capacity>>10));
+ mtctx->roundBuff.pos = 0;
+ mtctx->inBuff.buffer = g_nullBuffer;
+ mtctx->inBuff.filled = 0;
+ mtctx->inBuff.prefix = kNullRange;
+ mtctx->doneJobID = 0;
+ mtctx->nextJobID = 0;
+ mtctx->frameEnded = 0;
+ mtctx->allJobsCompleted = 0;
+ mtctx->consumed = 0;
+ mtctx->produced = 0;
if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize,
dict, dictSize, dictContentType))
- return ERROR(memory_allocation);
- return 0;
-}
-
-
-/* ZSTDMT_writeLastEmptyBlock()
- * Write a single empty block with an end-of-frame to finish a frame.
- * Job must be created from streaming variant.
+ return ERROR(memory_allocation);
+ return 0;
+}
+
+
+/* ZSTDMT_writeLastEmptyBlock()
+ * Write a single empty block with an end-of-frame to finish a frame.
+ * Job must be created from streaming variant.
* This function is always successful if expected conditions are fulfilled.
- */
-static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job)
-{
- assert(job->lastJob == 1);
- assert(job->src.size == 0); /* last job is empty -> will be simplified into a last empty block */
- assert(job->firstJob == 0); /* cannot be first job, as it also needs to create frame header */
- assert(job->dstBuff.start == NULL); /* invoked from streaming variant only (otherwise, dstBuff might be user's output) */
- job->dstBuff = ZSTDMT_getBuffer(job->bufPool);
- if (job->dstBuff.start == NULL) {
- job->cSize = ERROR(memory_allocation);
- return;
- }
- assert(job->dstBuff.capacity >= ZSTD_blockHeaderSize); /* no buffer should ever be that small */
- job->src = kNullRange;
- job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity);
- assert(!ZSTD_isError(job->cSize));
- assert(job->consumed == 0);
-}
-
-static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZSTD_EndDirective endOp)
-{
- unsigned const jobID = mtctx->nextJobID & mtctx->jobIDMask;
- int const endFrame = (endOp == ZSTD_e_end);
-
- if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) {
- DEBUGLOG(5, "ZSTDMT_createCompressionJob: will not create new job : table is full");
- assert((mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask));
- return 0;
- }
-
- if (!mtctx->jobReady) {
- BYTE const* src = (BYTE const*)mtctx->inBuff.buffer.start;
- DEBUGLOG(5, "ZSTDMT_createCompressionJob: preparing job %u to compress %u bytes with %u preload ",
- mtctx->nextJobID, (U32)srcSize, (U32)mtctx->inBuff.prefix.size);
- mtctx->jobs[jobID].src.start = src;
- mtctx->jobs[jobID].src.size = srcSize;
- assert(mtctx->inBuff.filled >= srcSize);
- mtctx->jobs[jobID].prefix = mtctx->inBuff.prefix;
- mtctx->jobs[jobID].consumed = 0;
- mtctx->jobs[jobID].cSize = 0;
- mtctx->jobs[jobID].params = mtctx->params;
- mtctx->jobs[jobID].cdict = mtctx->nextJobID==0 ? mtctx->cdict : NULL;
- mtctx->jobs[jobID].fullFrameSize = mtctx->frameContentSize;
- mtctx->jobs[jobID].dstBuff = g_nullBuffer;
- mtctx->jobs[jobID].cctxPool = mtctx->cctxPool;
- mtctx->jobs[jobID].bufPool = mtctx->bufPool;
- mtctx->jobs[jobID].seqPool = mtctx->seqPool;
- mtctx->jobs[jobID].serial = &mtctx->serial;
- mtctx->jobs[jobID].jobID = mtctx->nextJobID;
- mtctx->jobs[jobID].firstJob = (mtctx->nextJobID==0);
- mtctx->jobs[jobID].lastJob = endFrame;
+ */
+static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job)
+{
+ assert(job->lastJob == 1);
+ assert(job->src.size == 0); /* last job is empty -> will be simplified into a last empty block */
+ assert(job->firstJob == 0); /* cannot be first job, as it also needs to create frame header */
+ assert(job->dstBuff.start == NULL); /* invoked from streaming variant only (otherwise, dstBuff might be user's output) */
+ job->dstBuff = ZSTDMT_getBuffer(job->bufPool);
+ if (job->dstBuff.start == NULL) {
+ job->cSize = ERROR(memory_allocation);
+ return;
+ }
+ assert(job->dstBuff.capacity >= ZSTD_blockHeaderSize); /* no buffer should ever be that small */
+ job->src = kNullRange;
+ job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity);
+ assert(!ZSTD_isError(job->cSize));
+ assert(job->consumed == 0);
+}
+
+static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZSTD_EndDirective endOp)
+{
+ unsigned const jobID = mtctx->nextJobID & mtctx->jobIDMask;
+ int const endFrame = (endOp == ZSTD_e_end);
+
+ if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) {
+ DEBUGLOG(5, "ZSTDMT_createCompressionJob: will not create new job : table is full");
+ assert((mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask));
+ return 0;
+ }
+
+ if (!mtctx->jobReady) {
+ BYTE const* src = (BYTE const*)mtctx->inBuff.buffer.start;
+ DEBUGLOG(5, "ZSTDMT_createCompressionJob: preparing job %u to compress %u bytes with %u preload ",
+ mtctx->nextJobID, (U32)srcSize, (U32)mtctx->inBuff.prefix.size);
+ mtctx->jobs[jobID].src.start = src;
+ mtctx->jobs[jobID].src.size = srcSize;
+ assert(mtctx->inBuff.filled >= srcSize);
+ mtctx->jobs[jobID].prefix = mtctx->inBuff.prefix;
+ mtctx->jobs[jobID].consumed = 0;
+ mtctx->jobs[jobID].cSize = 0;
+ mtctx->jobs[jobID].params = mtctx->params;
+ mtctx->jobs[jobID].cdict = mtctx->nextJobID==0 ? mtctx->cdict : NULL;
+ mtctx->jobs[jobID].fullFrameSize = mtctx->frameContentSize;
+ mtctx->jobs[jobID].dstBuff = g_nullBuffer;
+ mtctx->jobs[jobID].cctxPool = mtctx->cctxPool;
+ mtctx->jobs[jobID].bufPool = mtctx->bufPool;
+ mtctx->jobs[jobID].seqPool = mtctx->seqPool;
+ mtctx->jobs[jobID].serial = &mtctx->serial;
+ mtctx->jobs[jobID].jobID = mtctx->nextJobID;
+ mtctx->jobs[jobID].firstJob = (mtctx->nextJobID==0);
+ mtctx->jobs[jobID].lastJob = endFrame;
mtctx->jobs[jobID].frameChecksumNeeded = mtctx->params.fParams.checksumFlag && endFrame && (mtctx->nextJobID>0);
- mtctx->jobs[jobID].dstFlushed = 0;
-
- /* Update the round buffer pos and clear the input buffer to be reset */
- mtctx->roundBuff.pos += srcSize;
- mtctx->inBuff.buffer = g_nullBuffer;
- mtctx->inBuff.filled = 0;
- /* Set the prefix */
- if (!endFrame) {
- size_t const newPrefixSize = MIN(srcSize, mtctx->targetPrefixSize);
- mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize;
- mtctx->inBuff.prefix.size = newPrefixSize;
- } else { /* endFrame==1 => no need for another input buffer */
- mtctx->inBuff.prefix = kNullRange;
- mtctx->frameEnded = endFrame;
- if (mtctx->nextJobID == 0) {
- /* single job exception : checksum is already calculated directly within worker thread */
- mtctx->params.fParams.checksumFlag = 0;
- } }
-
- if ( (srcSize == 0)
- && (mtctx->nextJobID>0)/*single job must also write frame header*/ ) {
- DEBUGLOG(5, "ZSTDMT_createCompressionJob: creating a last empty block to end frame");
- assert(endOp == ZSTD_e_end); /* only possible case : need to end the frame with an empty last block */
- ZSTDMT_writeLastEmptyBlock(mtctx->jobs + jobID);
- mtctx->nextJobID++;
- return 0;
- }
- }
-
- DEBUGLOG(5, "ZSTDMT_createCompressionJob: posting job %u : %u bytes (end:%u, jobNb == %u (mod:%u))",
- mtctx->nextJobID,
- (U32)mtctx->jobs[jobID].src.size,
- mtctx->jobs[jobID].lastJob,
- mtctx->nextJobID,
- jobID);
- if (POOL_tryAdd(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[jobID])) {
- mtctx->nextJobID++;
- mtctx->jobReady = 0;
- } else {
- DEBUGLOG(5, "ZSTDMT_createCompressionJob: no worker available for job %u", mtctx->nextJobID);
- mtctx->jobReady = 1;
- }
- return 0;
-}
-
-
-/*! ZSTDMT_flushProduced() :
+ mtctx->jobs[jobID].dstFlushed = 0;
+
+ /* Update the round buffer pos and clear the input buffer to be reset */
+ mtctx->roundBuff.pos += srcSize;
+ mtctx->inBuff.buffer = g_nullBuffer;
+ mtctx->inBuff.filled = 0;
+ /* Set the prefix */
+ if (!endFrame) {
+ size_t const newPrefixSize = MIN(srcSize, mtctx->targetPrefixSize);
+ mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize;
+ mtctx->inBuff.prefix.size = newPrefixSize;
+ } else { /* endFrame==1 => no need for another input buffer */
+ mtctx->inBuff.prefix = kNullRange;
+ mtctx->frameEnded = endFrame;
+ if (mtctx->nextJobID == 0) {
+ /* single job exception : checksum is already calculated directly within worker thread */
+ mtctx->params.fParams.checksumFlag = 0;
+ } }
+
+ if ( (srcSize == 0)
+ && (mtctx->nextJobID>0)/*single job must also write frame header*/ ) {
+ DEBUGLOG(5, "ZSTDMT_createCompressionJob: creating a last empty block to end frame");
+ assert(endOp == ZSTD_e_end); /* only possible case : need to end the frame with an empty last block */
+ ZSTDMT_writeLastEmptyBlock(mtctx->jobs + jobID);
+ mtctx->nextJobID++;
+ return 0;
+ }
+ }
+
+ DEBUGLOG(5, "ZSTDMT_createCompressionJob: posting job %u : %u bytes (end:%u, jobNb == %u (mod:%u))",
+ mtctx->nextJobID,
+ (U32)mtctx->jobs[jobID].src.size,
+ mtctx->jobs[jobID].lastJob,
+ mtctx->nextJobID,
+ jobID);
+ if (POOL_tryAdd(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[jobID])) {
+ mtctx->nextJobID++;
+ mtctx->jobReady = 0;
+ } else {
+ DEBUGLOG(5, "ZSTDMT_createCompressionJob: no worker available for job %u", mtctx->nextJobID);
+ mtctx->jobReady = 1;
+ }
+ return 0;
+}
+
+
+/*! ZSTDMT_flushProduced() :
* flush whatever data has been produced but not yet flushed in current job.
* move to next job if current one is fully flushed.
- * `output` : `pos` will be updated with amount of data flushed .
- * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush .
- * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */
-static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, unsigned blockToFlush, ZSTD_EndDirective end)
-{
- unsigned const wJobID = mtctx->doneJobID & mtctx->jobIDMask;
- DEBUGLOG(5, "ZSTDMT_flushProduced (blocking:%u , job %u <= %u)",
- blockToFlush, mtctx->doneJobID, mtctx->nextJobID);
- assert(output->size >= output->pos);
-
- ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
- if ( blockToFlush
- && (mtctx->doneJobID < mtctx->nextJobID) ) {
- assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize);
- while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) { /* nothing to flush */
- if (mtctx->jobs[wJobID].consumed == mtctx->jobs[wJobID].src.size) {
- DEBUGLOG(5, "job %u is completely consumed (%u == %u) => don't wait for cond, there will be none",
- mtctx->doneJobID, (U32)mtctx->jobs[wJobID].consumed, (U32)mtctx->jobs[wJobID].src.size);
- break;
- }
- DEBUGLOG(5, "waiting for something to flush from job %u (currently flushed: %u bytes)",
- mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
- ZSTD_pthread_cond_wait(&mtctx->jobs[wJobID].job_cond, &mtctx->jobs[wJobID].job_mutex); /* block when nothing to flush but some to come */
- } }
-
- /* try to flush something */
- { size_t cSize = mtctx->jobs[wJobID].cSize; /* shared */
- size_t const srcConsumed = mtctx->jobs[wJobID].consumed; /* shared */
+ * `output` : `pos` will be updated with amount of data flushed .
+ * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush .
+ * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */
+static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, unsigned blockToFlush, ZSTD_EndDirective end)
+{
+ unsigned const wJobID = mtctx->doneJobID & mtctx->jobIDMask;
+ DEBUGLOG(5, "ZSTDMT_flushProduced (blocking:%u , job %u <= %u)",
+ blockToFlush, mtctx->doneJobID, mtctx->nextJobID);
+ assert(output->size >= output->pos);
+
+ ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
+ if ( blockToFlush
+ && (mtctx->doneJobID < mtctx->nextJobID) ) {
+ assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize);
+ while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) { /* nothing to flush */
+ if (mtctx->jobs[wJobID].consumed == mtctx->jobs[wJobID].src.size) {
+ DEBUGLOG(5, "job %u is completely consumed (%u == %u) => don't wait for cond, there will be none",
+ mtctx->doneJobID, (U32)mtctx->jobs[wJobID].consumed, (U32)mtctx->jobs[wJobID].src.size);
+ break;
+ }
+ DEBUGLOG(5, "waiting for something to flush from job %u (currently flushed: %u bytes)",
+ mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
+ ZSTD_pthread_cond_wait(&mtctx->jobs[wJobID].job_cond, &mtctx->jobs[wJobID].job_mutex); /* block when nothing to flush but some to come */
+ } }
+
+ /* try to flush something */
+ { size_t cSize = mtctx->jobs[wJobID].cSize; /* shared */
+ size_t const srcConsumed = mtctx->jobs[wJobID].consumed; /* shared */
size_t const srcSize = mtctx->jobs[wJobID].src.size; /* read-only, could be done after mutex lock, but no-declaration-after-statement */
- ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
- if (ZSTD_isError(cSize)) {
- DEBUGLOG(5, "ZSTDMT_flushProduced: job %u : compression error detected : %s",
- mtctx->doneJobID, ZSTD_getErrorName(cSize));
- ZSTDMT_waitForAllJobsCompleted(mtctx);
- ZSTDMT_releaseAllJobResources(mtctx);
- return cSize;
- }
- /* add frame checksum if necessary (can only happen once) */
- assert(srcConsumed <= srcSize);
- if ( (srcConsumed == srcSize) /* job completed -> worker no longer active */
- && mtctx->jobs[wJobID].frameChecksumNeeded ) {
- U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState);
- DEBUGLOG(4, "ZSTDMT_flushProduced: writing checksum : %08X \n", checksum);
- MEM_writeLE32((char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, checksum);
- cSize += 4;
- mtctx->jobs[wJobID].cSize += 4; /* can write this shared value, as worker is no longer active */
- mtctx->jobs[wJobID].frameChecksumNeeded = 0;
- }
-
- if (cSize > 0) { /* compression is ongoing or completed */
- size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos);
- DEBUGLOG(5, "ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)",
- (U32)toFlush, mtctx->doneJobID, (U32)srcConsumed, (U32)srcSize, (U32)cSize);
- assert(mtctx->doneJobID < mtctx->nextJobID);
- assert(cSize >= mtctx->jobs[wJobID].dstFlushed);
- assert(mtctx->jobs[wJobID].dstBuff.start != NULL);
+ ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
+ if (ZSTD_isError(cSize)) {
+ DEBUGLOG(5, "ZSTDMT_flushProduced: job %u : compression error detected : %s",
+ mtctx->doneJobID, ZSTD_getErrorName(cSize));
+ ZSTDMT_waitForAllJobsCompleted(mtctx);
+ ZSTDMT_releaseAllJobResources(mtctx);
+ return cSize;
+ }
+ /* add frame checksum if necessary (can only happen once) */
+ assert(srcConsumed <= srcSize);
+ if ( (srcConsumed == srcSize) /* job completed -> worker no longer active */
+ && mtctx->jobs[wJobID].frameChecksumNeeded ) {
+ U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState);
+ DEBUGLOG(4, "ZSTDMT_flushProduced: writing checksum : %08X \n", checksum);
+ MEM_writeLE32((char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, checksum);
+ cSize += 4;
+ mtctx->jobs[wJobID].cSize += 4; /* can write this shared value, as worker is no longer active */
+ mtctx->jobs[wJobID].frameChecksumNeeded = 0;
+ }
+
+ if (cSize > 0) { /* compression is ongoing or completed */
+ size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos);
+ DEBUGLOG(5, "ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)",
+ (U32)toFlush, mtctx->doneJobID, (U32)srcConsumed, (U32)srcSize, (U32)cSize);
+ assert(mtctx->doneJobID < mtctx->nextJobID);
+ assert(cSize >= mtctx->jobs[wJobID].dstFlushed);
+ assert(mtctx->jobs[wJobID].dstBuff.start != NULL);
if (toFlush > 0) {
ZSTD_memcpy((char*)output->dst + output->pos,
(const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed,
toFlush);
}
- output->pos += toFlush;
- mtctx->jobs[wJobID].dstFlushed += toFlush; /* can write : this value is only used by mtctx */
-
+ output->pos += toFlush;
+ mtctx->jobs[wJobID].dstFlushed += toFlush; /* can write : this value is only used by mtctx */
+
if ( (srcConsumed == srcSize) /* job is completed */
- && (mtctx->jobs[wJobID].dstFlushed == cSize) ) { /* output buffer fully flushed => free this job position */
- DEBUGLOG(5, "Job %u completed (%u bytes), moving to next one",
- mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
- ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff);
+ && (mtctx->jobs[wJobID].dstFlushed == cSize) ) { /* output buffer fully flushed => free this job position */
+ DEBUGLOG(5, "Job %u completed (%u bytes), moving to next one",
+ mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
+ ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff);
DEBUGLOG(5, "dstBuffer released");
- mtctx->jobs[wJobID].dstBuff = g_nullBuffer;
- mtctx->jobs[wJobID].cSize = 0; /* ensure this job slot is considered "not started" in future check */
- mtctx->consumed += srcSize;
- mtctx->produced += cSize;
- mtctx->doneJobID++;
- } }
-
- /* return value : how many bytes left in buffer ; fake it to 1 when unknown but >0 */
- if (cSize > mtctx->jobs[wJobID].dstFlushed) return (cSize - mtctx->jobs[wJobID].dstFlushed);
- if (srcSize > srcConsumed) return 1; /* current job not completely compressed */
- }
- if (mtctx->doneJobID < mtctx->nextJobID) return 1; /* some more jobs ongoing */
- if (mtctx->jobReady) return 1; /* one job is ready to push, just not yet in the list */
- if (mtctx->inBuff.filled > 0) return 1; /* input is not empty, and still needs to be converted into a job */
- mtctx->allJobsCompleted = mtctx->frameEnded; /* all jobs are entirely flushed => if this one is last one, frame is completed */
- if (end == ZSTD_e_end) return !mtctx->frameEnded; /* for ZSTD_e_end, question becomes : is frame completed ? instead of : are internal buffers fully flushed ? */
- return 0; /* internal buffers fully flushed */
-}
-
-/**
- * Returns the range of data used by the earliest job that is not yet complete.
- * If the data of the first job is broken up into two segments, we cover both
- * sections.
- */
-static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx)
-{
- unsigned const firstJobID = mtctx->doneJobID;
- unsigned const lastJobID = mtctx->nextJobID;
- unsigned jobID;
-
- for (jobID = firstJobID; jobID < lastJobID; ++jobID) {
- unsigned const wJobID = jobID & mtctx->jobIDMask;
- size_t consumed;
-
- ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
- consumed = mtctx->jobs[wJobID].consumed;
- ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
-
- if (consumed < mtctx->jobs[wJobID].src.size) {
- range_t range = mtctx->jobs[wJobID].prefix;
- if (range.size == 0) {
- /* Empty prefix */
- range = mtctx->jobs[wJobID].src;
- }
- /* Job source in multiple segments not supported yet */
- assert(range.start <= mtctx->jobs[wJobID].src.start);
- return range;
- }
- }
- return kNullRange;
-}
-
-/**
- * Returns non-zero iff buffer and range overlap.
- */
-static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range)
-{
- BYTE const* const bufferStart = (BYTE const*)buffer.start;
- BYTE const* const rangeStart = (BYTE const*)range.start;
-
- if (rangeStart == NULL || bufferStart == NULL)
- return 0;
-
+ mtctx->jobs[wJobID].dstBuff = g_nullBuffer;
+ mtctx->jobs[wJobID].cSize = 0; /* ensure this job slot is considered "not started" in future check */
+ mtctx->consumed += srcSize;
+ mtctx->produced += cSize;
+ mtctx->doneJobID++;
+ } }
+
+ /* return value : how many bytes left in buffer ; fake it to 1 when unknown but >0 */
+ if (cSize > mtctx->jobs[wJobID].dstFlushed) return (cSize - mtctx->jobs[wJobID].dstFlushed);
+ if (srcSize > srcConsumed) return 1; /* current job not completely compressed */
+ }
+ if (mtctx->doneJobID < mtctx->nextJobID) return 1; /* some more jobs ongoing */
+ if (mtctx->jobReady) return 1; /* one job is ready to push, just not yet in the list */
+ if (mtctx->inBuff.filled > 0) return 1; /* input is not empty, and still needs to be converted into a job */
+ mtctx->allJobsCompleted = mtctx->frameEnded; /* all jobs are entirely flushed => if this one is last one, frame is completed */
+ if (end == ZSTD_e_end) return !mtctx->frameEnded; /* for ZSTD_e_end, question becomes : is frame completed ? instead of : are internal buffers fully flushed ? */
+ return 0; /* internal buffers fully flushed */
+}
+
+/**
+ * Returns the range of data used by the earliest job that is not yet complete.
+ * If the data of the first job is broken up into two segments, we cover both
+ * sections.
+ */
+static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx)
+{
+ unsigned const firstJobID = mtctx->doneJobID;
+ unsigned const lastJobID = mtctx->nextJobID;
+ unsigned jobID;
+
+ for (jobID = firstJobID; jobID < lastJobID; ++jobID) {
+ unsigned const wJobID = jobID & mtctx->jobIDMask;
+ size_t consumed;
+
+ ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
+ consumed = mtctx->jobs[wJobID].consumed;
+ ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
+
+ if (consumed < mtctx->jobs[wJobID].src.size) {
+ range_t range = mtctx->jobs[wJobID].prefix;
+ if (range.size == 0) {
+ /* Empty prefix */
+ range = mtctx->jobs[wJobID].src;
+ }
+ /* Job source in multiple segments not supported yet */
+ assert(range.start <= mtctx->jobs[wJobID].src.start);
+ return range;
+ }
+ }
+ return kNullRange;
+}
+
+/**
+ * Returns non-zero iff buffer and range overlap.
+ */
+static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range)
+{
+ BYTE const* const bufferStart = (BYTE const*)buffer.start;
+ BYTE const* const rangeStart = (BYTE const*)range.start;
+
+ if (rangeStart == NULL || bufferStart == NULL)
+ return 0;
+
{
BYTE const* const bufferEnd = bufferStart + buffer.capacity;
BYTE const* const rangeEnd = rangeStart + range.size;
@@ -1573,112 +1573,112 @@ static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range)
return bufferStart < rangeEnd && rangeStart < bufferEnd;
}
-}
-
-static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window)
-{
- range_t extDict;
- range_t prefix;
-
+}
+
+static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window)
+{
+ range_t extDict;
+ range_t prefix;
+
DEBUGLOG(5, "ZSTDMT_doesOverlapWindow");
- extDict.start = window.dictBase + window.lowLimit;
- extDict.size = window.dictLimit - window.lowLimit;
-
- prefix.start = window.base + window.dictLimit;
- prefix.size = window.nextSrc - (window.base + window.dictLimit);
- DEBUGLOG(5, "extDict [0x%zx, 0x%zx)",
- (size_t)extDict.start,
- (size_t)extDict.start + extDict.size);
- DEBUGLOG(5, "prefix [0x%zx, 0x%zx)",
- (size_t)prefix.start,
- (size_t)prefix.start + prefix.size);
-
- return ZSTDMT_isOverlapped(buffer, extDict)
- || ZSTDMT_isOverlapped(buffer, prefix);
-}
-
-static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer)
-{
+ extDict.start = window.dictBase + window.lowLimit;
+ extDict.size = window.dictLimit - window.lowLimit;
+
+ prefix.start = window.base + window.dictLimit;
+ prefix.size = window.nextSrc - (window.base + window.dictLimit);
+ DEBUGLOG(5, "extDict [0x%zx, 0x%zx)",
+ (size_t)extDict.start,
+ (size_t)extDict.start + extDict.size);
+ DEBUGLOG(5, "prefix [0x%zx, 0x%zx)",
+ (size_t)prefix.start,
+ (size_t)prefix.start + prefix.size);
+
+ return ZSTDMT_isOverlapped(buffer, extDict)
+ || ZSTDMT_isOverlapped(buffer, prefix);
+}
+
+static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer)
+{
if (mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable) {
- ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex;
+ ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex;
DEBUGLOG(5, "ZSTDMT_waitForLdmComplete");
- DEBUGLOG(5, "source [0x%zx, 0x%zx)",
- (size_t)buffer.start,
- (size_t)buffer.start + buffer.capacity);
- ZSTD_PTHREAD_MUTEX_LOCK(mutex);
- while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow)) {
+ DEBUGLOG(5, "source [0x%zx, 0x%zx)",
+ (size_t)buffer.start,
+ (size_t)buffer.start + buffer.capacity);
+ ZSTD_PTHREAD_MUTEX_LOCK(mutex);
+ while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow)) {
DEBUGLOG(5, "Waiting for LDM to finish...");
- ZSTD_pthread_cond_wait(&mtctx->serial.ldmWindowCond, mutex);
- }
- DEBUGLOG(6, "Done waiting for LDM to finish");
- ZSTD_pthread_mutex_unlock(mutex);
- }
-}
-
-/**
- * Attempts to set the inBuff to the next section to fill.
- * If any part of the new section is still in use we give up.
- * Returns non-zero if the buffer is filled.
- */
-static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)
-{
- range_t const inUse = ZSTDMT_getInputDataInUse(mtctx);
- size_t const spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos;
- size_t const target = mtctx->targetSectionSize;
- buffer_t buffer;
-
+ ZSTD_pthread_cond_wait(&mtctx->serial.ldmWindowCond, mutex);
+ }
+ DEBUGLOG(6, "Done waiting for LDM to finish");
+ ZSTD_pthread_mutex_unlock(mutex);
+ }
+}
+
+/**
+ * Attempts to set the inBuff to the next section to fill.
+ * If any part of the new section is still in use we give up.
+ * Returns non-zero if the buffer is filled.
+ */
+static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)
+{
+ range_t const inUse = ZSTDMT_getInputDataInUse(mtctx);
+ size_t const spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos;
+ size_t const target = mtctx->targetSectionSize;
+ buffer_t buffer;
+
DEBUGLOG(5, "ZSTDMT_tryGetInputRange");
- assert(mtctx->inBuff.buffer.start == NULL);
- assert(mtctx->roundBuff.capacity >= target);
-
- if (spaceLeft < target) {
- /* ZSTD_invalidateRepCodes() doesn't work for extDict variants.
- * Simply copy the prefix to the beginning in that case.
- */
- BYTE* const start = (BYTE*)mtctx->roundBuff.buffer;
- size_t const prefixSize = mtctx->inBuff.prefix.size;
-
- buffer.start = start;
- buffer.capacity = prefixSize;
- if (ZSTDMT_isOverlapped(buffer, inUse)) {
+ assert(mtctx->inBuff.buffer.start == NULL);
+ assert(mtctx->roundBuff.capacity >= target);
+
+ if (spaceLeft < target) {
+ /* ZSTD_invalidateRepCodes() doesn't work for extDict variants.
+ * Simply copy the prefix to the beginning in that case.
+ */
+ BYTE* const start = (BYTE*)mtctx->roundBuff.buffer;
+ size_t const prefixSize = mtctx->inBuff.prefix.size;
+
+ buffer.start = start;
+ buffer.capacity = prefixSize;
+ if (ZSTDMT_isOverlapped(buffer, inUse)) {
DEBUGLOG(5, "Waiting for buffer...");
- return 0;
- }
- ZSTDMT_waitForLdmComplete(mtctx, buffer);
+ return 0;
+ }
+ ZSTDMT_waitForLdmComplete(mtctx, buffer);
ZSTD_memmove(start, mtctx->inBuff.prefix.start, prefixSize);
- mtctx->inBuff.prefix.start = start;
- mtctx->roundBuff.pos = prefixSize;
- }
- buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos;
- buffer.capacity = target;
-
- if (ZSTDMT_isOverlapped(buffer, inUse)) {
+ mtctx->inBuff.prefix.start = start;
+ mtctx->roundBuff.pos = prefixSize;
+ }
+ buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos;
+ buffer.capacity = target;
+
+ if (ZSTDMT_isOverlapped(buffer, inUse)) {
DEBUGLOG(5, "Waiting for buffer...");
- return 0;
- }
- assert(!ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix));
-
- ZSTDMT_waitForLdmComplete(mtctx, buffer);
-
- DEBUGLOG(5, "Using prefix range [%zx, %zx)",
- (size_t)mtctx->inBuff.prefix.start,
- (size_t)mtctx->inBuff.prefix.start + mtctx->inBuff.prefix.size);
- DEBUGLOG(5, "Using source range [%zx, %zx)",
- (size_t)buffer.start,
- (size_t)buffer.start + buffer.capacity);
-
-
- mtctx->inBuff.buffer = buffer;
- mtctx->inBuff.filled = 0;
- assert(mtctx->roundBuff.pos + buffer.capacity <= mtctx->roundBuff.capacity);
- return 1;
-}
-
+ return 0;
+ }
+ assert(!ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix));
+
+ ZSTDMT_waitForLdmComplete(mtctx, buffer);
+
+ DEBUGLOG(5, "Using prefix range [%zx, %zx)",
+ (size_t)mtctx->inBuff.prefix.start,
+ (size_t)mtctx->inBuff.prefix.start + mtctx->inBuff.prefix.size);
+ DEBUGLOG(5, "Using source range [%zx, %zx)",
+ (size_t)buffer.start,
+ (size_t)buffer.start + buffer.capacity);
+
+
+ mtctx->inBuff.buffer = buffer;
+ mtctx->inBuff.filled = 0;
+ assert(mtctx->roundBuff.pos + buffer.capacity <= mtctx->roundBuff.capacity);
+ return 1;
+}
+
typedef struct {
size_t toLoad; /* The number of bytes to load from the input. */
int flush; /* Boolean declaring if we must flush because we found a synchronization point. */
} syncPoint_t;
-
+
/**
* Searches through the input for a synchronization point. If one is found, we
* will instruct the caller to flush, and return the number of bytes to load.
@@ -1782,54 +1782,54 @@ size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx)
return hintInSize;
}
-/** ZSTDMT_compressStream_generic() :
- * internal use only - exposed to be invoked from zstd_compress.c
- * assumption : output and input are valid (pos <= size)
- * @return : minimum amount of data remaining to flush, 0 if none */
-size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
- ZSTD_outBuffer* output,
- ZSTD_inBuffer* input,
- ZSTD_EndDirective endOp)
-{
- unsigned forwardInputProgress = 0;
- DEBUGLOG(5, "ZSTDMT_compressStream_generic (endOp=%u, srcSize=%u)",
- (U32)endOp, (U32)(input->size - input->pos));
- assert(output->pos <= output->size);
- assert(input->pos <= input->size);
-
- if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
- /* current frame being ended. Only flush/end are allowed */
- return ERROR(stage_wrong);
- }
-
- /* fill input buffer */
- if ( (!mtctx->jobReady)
- && (input->size > input->pos) ) { /* support NULL input */
- if (mtctx->inBuff.buffer.start == NULL) {
- assert(mtctx->inBuff.filled == 0); /* Can't fill an empty buffer */
- if (!ZSTDMT_tryGetInputRange(mtctx)) {
- /* It is only possible for this operation to fail if there are
- * still compression jobs ongoing.
- */
+/** ZSTDMT_compressStream_generic() :
+ * internal use only - exposed to be invoked from zstd_compress.c
+ * assumption : output and input are valid (pos <= size)
+ * @return : minimum amount of data remaining to flush, 0 if none */
+size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input,
+ ZSTD_EndDirective endOp)
+{
+ unsigned forwardInputProgress = 0;
+ DEBUGLOG(5, "ZSTDMT_compressStream_generic (endOp=%u, srcSize=%u)",
+ (U32)endOp, (U32)(input->size - input->pos));
+ assert(output->pos <= output->size);
+ assert(input->pos <= input->size);
+
+ if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
+ /* current frame being ended. Only flush/end are allowed */
+ return ERROR(stage_wrong);
+ }
+
+ /* fill input buffer */
+ if ( (!mtctx->jobReady)
+ && (input->size > input->pos) ) { /* support NULL input */
+ if (mtctx->inBuff.buffer.start == NULL) {
+ assert(mtctx->inBuff.filled == 0); /* Can't fill an empty buffer */
+ if (!ZSTDMT_tryGetInputRange(mtctx)) {
+ /* It is only possible for this operation to fail if there are
+ * still compression jobs ongoing.
+ */
DEBUGLOG(5, "ZSTDMT_tryGetInputRange failed");
- assert(mtctx->doneJobID != mtctx->nextJobID);
+ assert(mtctx->doneJobID != mtctx->nextJobID);
} else
DEBUGLOG(5, "ZSTDMT_tryGetInputRange completed successfully : mtctx->inBuff.buffer.start = %p", mtctx->inBuff.buffer.start);
- }
- if (mtctx->inBuff.buffer.start != NULL) {
+ }
+ if (mtctx->inBuff.buffer.start != NULL) {
syncPoint_t const syncPoint = findSynchronizationPoint(mtctx, *input);
if (syncPoint.flush && endOp == ZSTD_e_continue) {
endOp = ZSTD_e_flush;
}
- assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize);
- DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u",
+ assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize);
+ DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u",
(U32)syncPoint.toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize);
ZSTD_memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad);
input->pos += syncPoint.toLoad;
mtctx->inBuff.filled += syncPoint.toLoad;
forwardInputProgress = syncPoint.toLoad>0;
- }
- }
+ }
+ }
if ((input->pos < input->size) && (endOp == ZSTD_e_end)) {
/* Can't end yet because the input is not fully consumed.
* We are in one of these cases:
@@ -1840,20 +1840,20 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
assert(mtctx->inBuff.filled == 0 || mtctx->inBuff.filled == mtctx->targetSectionSize || mtctx->params.rsyncable);
endOp = ZSTD_e_flush;
}
-
- if ( (mtctx->jobReady)
- || (mtctx->inBuff.filled >= mtctx->targetSectionSize) /* filled enough : let's compress */
- || ((endOp != ZSTD_e_continue) && (mtctx->inBuff.filled > 0)) /* something to flush : let's go */
- || ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) { /* must finish the frame with a zero-size block */
- size_t const jobSize = mtctx->inBuff.filled;
- assert(mtctx->inBuff.filled <= mtctx->targetSectionSize);
+
+ if ( (mtctx->jobReady)
+ || (mtctx->inBuff.filled >= mtctx->targetSectionSize) /* filled enough : let's compress */
+ || ((endOp != ZSTD_e_continue) && (mtctx->inBuff.filled > 0)) /* something to flush : let's go */
+ || ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) { /* must finish the frame with a zero-size block */
+ size_t const jobSize = mtctx->inBuff.filled;
+ assert(mtctx->inBuff.filled <= mtctx->targetSectionSize);
FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) , "");
- }
-
- /* check for potential compressed data ready to be flushed */
- { size_t const remainingToFlush = ZSTDMT_flushProduced(mtctx, output, !forwardInputProgress, endOp); /* block if there was no forward input progress */
- if (input->pos < input->size) return MAX(remainingToFlush, 1); /* input not consumed : do not end flush yet */
+ }
+
+ /* check for potential compressed data ready to be flushed */
+ { size_t const remainingToFlush = ZSTDMT_flushProduced(mtctx, output, !forwardInputProgress, endOp); /* block if there was no forward input progress */
+ if (input->pos < input->size) return MAX(remainingToFlush, 1); /* input not consumed : do not end flush yet */
DEBUGLOG(5, "end of ZSTDMT_compressStream_generic: remainingToFlush = %u", (U32)remainingToFlush);
- return remainingToFlush;
- }
-}
+ return remainingToFlush;
+ }
+}
diff --git a/contrib/libs/zstd/lib/compress/zstdmt_compress.h b/contrib/libs/zstd/lib/compress/zstdmt_compress.h
index 271eb1ac71..c0bea93148 100644
--- a/contrib/libs/zstd/lib/compress/zstdmt_compress.h
+++ b/contrib/libs/zstd/lib/compress/zstdmt_compress.h
@@ -1,36 +1,36 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
- #ifndef ZSTDMT_COMPRESS_H
- #define ZSTDMT_COMPRESS_H
-
- #if defined (__cplusplus)
- extern "C" {
- #endif
-
-
-/* Note : This is an internal API.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+ #ifndef ZSTDMT_COMPRESS_H
+ #define ZSTDMT_COMPRESS_H
+
+ #if defined (__cplusplus)
+ extern "C" {
+ #endif
+
+
+/* Note : This is an internal API.
* These APIs used to be exposed with ZSTDLIB_API,
- * because it used to be the only way to invoke MT compression.
+ * because it used to be the only way to invoke MT compression.
* Now, you must use ZSTD_compress2 and ZSTD_compressStream2() instead.
*
* This API requires ZSTD_MULTITHREAD to be defined during compilation,
* otherwise ZSTDMT_createCCtx*() will fail.
*/
-
-/* === Dependencies === */
+
+/* === Dependencies === */
#include "../common/zstd_deps.h" /* size_t */
-#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */
+#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */
#include "../zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */
-
-
+
+
/* === Constants === */
#ifndef ZSTDMT_NBWORKERS_MAX /* a different value can be selected at compile time */
# define ZSTDMT_NBWORKERS_MAX ((sizeof(void*)==4) /*32-bit*/ ? 64 : 256)
@@ -47,20 +47,20 @@
* === Not exposed in libzstd. Never invoke directly ===
* ======================================================== */
-/* === Memory management === */
-typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx;
+/* === Memory management === */
+typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx;
/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */
ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers,
ZSTD_customMem cMem,
ZSTD_threadPool *pool);
size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx);
-
+
size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx);
-
-/* === Streaming functions === */
-
+
+/* === Streaming functions === */
+
size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx);
-
+
/*! ZSTDMT_initCStream_internal() :
* Private use only. Init streaming operation.
* expects params to be valid.
@@ -73,19 +73,19 @@ size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* mtctx,
const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
const ZSTD_CDict* cdict,
ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
-
-/*! ZSTDMT_compressStream_generic() :
- * Combines ZSTDMT_compressStream() with optional ZSTDMT_flushStream() or ZSTDMT_endStream()
- * depending on flush directive.
- * @return : minimum amount of data still to be flushed
- * 0 if fully flushed
- * or an error code
- * note : needs to be init using any ZSTD_initCStream*() variant */
+
+/*! ZSTDMT_compressStream_generic() :
+ * Combines ZSTDMT_compressStream() with optional ZSTDMT_flushStream() or ZSTDMT_endStream()
+ * depending on flush directive.
+ * @return : minimum amount of data still to be flushed
+ * 0 if fully flushed
+ * or an error code
+ * note : needs to be init using any ZSTD_initCStream*() variant */
size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
ZSTD_outBuffer* output,
ZSTD_inBuffer* input,
ZSTD_EndDirective endOp);
-
+
/*! ZSTDMT_toFlushNow()
* Tell how many bytes are ready to be flushed immediately.
* Probe the oldest active job (not yet entirely flushed) and check its output buffer.
@@ -93,21 +93,21 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
* or, it means oldest job is still active, but everything produced has been flushed so far,
* therefore flushing is limited by speed of oldest job. */
size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx);
-
-/*! ZSTDMT_updateCParams_whileCompressing() :
- * Updates only a selected set of compression parameters, to remain compatible with current frame.
- * New parameters will be applied to next compression job. */
-void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams);
-
+
+/*! ZSTDMT_updateCParams_whileCompressing() :
+ * Updates only a selected set of compression parameters, to remain compatible with current frame.
+ * New parameters will be applied to next compression job. */
+void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams);
+
/*! ZSTDMT_getFrameProgression():
* tells how much data has been consumed (input) and produced (output) for current frame.
* able to count progression inside worker threads.
- */
-ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx);
-
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* ZSTDMT_COMPRESS_H */
+ */
+ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTDMT_COMPRESS_H */
diff --git a/contrib/libs/zstd/lib/decompress/huf_decompress.c b/contrib/libs/zstd/lib/decompress/huf_decompress.c
index 2027188255..275ccf88d5 100644
--- a/contrib/libs/zstd/lib/decompress/huf_decompress.c
+++ b/contrib/libs/zstd/lib/decompress/huf_decompress.c
@@ -71,16 +71,16 @@
/* **************************************************************
* Error Management
****************************************************************/
-#define HUF_isError ERR_isError
-
-
-/* **************************************************************
-* Byte alignment for workSpace management
-****************************************************************/
-#define HUF_ALIGN(x, a) HUF_ALIGN_MASK((x), (a) - 1)
-#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#define HUF_isError ERR_isError
+/* **************************************************************
+* Byte alignment for workSpace management
+****************************************************************/
+#define HUF_ALIGN(x, a) HUF_ALIGN_MASK((x), (a) - 1)
+#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
+
+
/* **************************************************************
* BMI2 Variant Wrappers
****************************************************************/
@@ -350,7 +350,7 @@ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t sr
DEBUG_STATIC_ASSERT(HUF_DECOMPRESS_WORKSPACE_SIZE >= sizeof(*wksp));
if (sizeof(*wksp) > wkspSize) return ERROR(tableLog_tooLarge);
-
+
DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
/* ZSTD_memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
@@ -363,7 +363,7 @@ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t sr
U32 const maxTableLog = dtd.maxTableLog + 1;
U32 const targetTableLog = MIN(maxTableLog, HUF_DECODER_FAST_TABLELOG);
tableLog = HUF_rescaleStats(wksp->huffWeight, wksp->rankVal, nbSymbols, tableLog, targetTableLog);
- if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
+ if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
dtd.tableType = 0;
dtd.tableLog = (BYTE)tableLog;
ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
@@ -477,7 +477,7 @@ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t sr
return iSize;
}
-FORCE_INLINE_TEMPLATE BYTE
+FORCE_INLINE_TEMPLATE BYTE
HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog)
{
size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
@@ -497,7 +497,7 @@ HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog
if (MEM_64bits()) \
HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
-HINT_INLINE size_t
+HINT_INLINE size_t
HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog)
{
BYTE* const pStart = p;
@@ -514,19 +514,19 @@ HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, cons
BIT_reloadDStream(bitDPtr);
}
- /* [0-3] symbols remaining */
- if (MEM_32bits())
- while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd))
+ /* [0-3] symbols remaining */
+ if (MEM_32bits())
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd))
HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
- /* no more data to retrieve from bitstream, no need to reload */
+ /* no more data to retrieve from bitstream, no need to reload */
while (p < pEnd)
HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
return pEnd-pStart;
}
-FORCE_INLINE_TEMPLATE size_t
+FORCE_INLINE_TEMPLATE size_t
HUF_decompress1X1_usingDTable_internal_body(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
@@ -540,7 +540,7 @@ HUF_decompress1X1_usingDTable_internal_body(
DTableDesc const dtd = HUF_getDTableDesc(DTable);
U32 const dtLog = dtd.tableLog;
- CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
+ CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog);
@@ -549,7 +549,7 @@ HUF_decompress1X1_usingDTable_internal_body(
return dstSize;
}
-FORCE_INLINE_TEMPLATE size_t
+FORCE_INLINE_TEMPLATE size_t
HUF_decompress4X1_usingDTable_internal_body(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
@@ -592,12 +592,12 @@ HUF_decompress4X1_usingDTable_internal_body(
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */
- CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
- CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
- CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
- CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
+ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
+ CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
+ CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
+ CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
- /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */
+ /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */
if ((size_t)(oend - op4) >= sizeof(size_t)) {
for ( ; (endSignal) & (op4 < olimit) ; ) {
HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
@@ -624,8 +624,8 @@ HUF_decompress4X1_usingDTable_internal_body(
}
/* check corruption */
- /* note : should not be necessary : op# advance in lock step, and we control op4.
- * but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */
+ /* note : should not be necessary : op# advance in lock step, and we control op4.
+ * but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */
if (op1 > opStart2) return ERROR(corruption_detected);
if (op2 > opStart3) return ERROR(corruption_detected);
if (op3 > opStart4) return ERROR(corruption_detected);
@@ -638,8 +638,8 @@ HUF_decompress4X1_usingDTable_internal_body(
HUF_decodeStreamX1(op4, &bitD4, oend, dt, dtLog);
/* check */
- { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
- if (!endCheck) return ERROR(corruption_detected); }
+ { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+ if (!endCheck) return ERROR(corruption_detected); }
/* decoded size */
return dstSize;
@@ -720,13 +720,13 @@ HUF_decompress4X1_usingDTable_internal_bmi2_asm(
}
#endif /* ZSTD_ENABLE_ASM_X86_64_BMI2 */
-typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize,
- const void *cSrc,
- size_t cSrcSize,
- const HUF_DTable *DTable);
-
+typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize,
+ const void *cSrc,
+ size_t cSrcSize,
+ const HUF_DTable *DTable);
+
HUF_DGEN(HUF_decompress1X1_usingDTable_internal)
-
+
static size_t HUF_decompress4X1_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc,
size_t cSrcSize, HUF_DTable const* DTable, int bmi2)
{
@@ -741,54 +741,54 @@ static size_t HUF_decompress4X1_usingDTable_internal(void* dst, size_t dstSize,
#else
(void)bmi2;
#endif
-
+
#if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)
return HUF_decompress4X1_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable);
#else
return HUF_decompress4X1_usingDTable_internal_default(dst, dstSize, cSrc, cSrcSize, DTable);
#endif
}
-
+
size_t HUF_decompress1X1_usingDTable(
- void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize,
- const HUF_DTable* DTable)
-{
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
DTableDesc dtd = HUF_getDTableDesc(DTable);
if (dtd.tableType != 0) return ERROR(GENERIC);
return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
}
size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize,
- void* workSpace, size_t wkspSize)
-{
- const BYTE* ip = (const BYTE*) cSrc;
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
- if (HUF_isError(hSize)) return hSize;
- if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
- ip += hSize; cSrcSize -= hSize;
-
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
-}
-
-
+}
+
+
size_t HUF_decompress4X1_usingDTable(
- void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize,
- const HUF_DTable* DTable)
-{
- DTableDesc dtd = HUF_getDTableDesc(DTable);
- if (dtd.tableType != 0) return ERROR(GENERIC);
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (dtd.tableType != 0) return ERROR(GENERIC);
return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
-}
-
+}
+
static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize,
- void* workSpace, size_t wkspSize, int bmi2)
-{
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize, int bmi2)
+{
const BYTE* ip = (const BYTE*) cSrc;
size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
@@ -800,13 +800,13 @@ static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size
}
size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize,
- void* workSpace, size_t wkspSize)
-{
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0);
-}
-
-
+}
+
+
#endif /* HUF_FORCE_DECOMPRESS_X2 */
@@ -1059,16 +1059,16 @@ size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable,
size_t iSize;
void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */
HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
- U32 *rankStart;
+ U32 *rankStart;
HUF_ReadDTableX2_Workspace* const wksp = (HUF_ReadDTableX2_Workspace*)workSpace;
-
+
if (sizeof(*wksp) > wkspSize) return ERROR(GENERIC);
-
+
rankStart = wksp->rankStart0 + 1;
ZSTD_memset(wksp->rankStats, 0, sizeof(wksp->rankStats));
ZSTD_memset(wksp->rankStart0, 0, sizeof(wksp->rankStart0));
-
+
DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
/* ZSTD_memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
@@ -1467,13 +1467,13 @@ size_t HUF_decompress1X2_usingDTable(
}
size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize,
- void* workSpace, size_t wkspSize)
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize,
- workSpace, wkspSize);
+ workSpace, wkspSize);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
@@ -1481,7 +1481,7 @@ size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
}
-
+
size_t HUF_decompress4X2_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
@@ -1493,13 +1493,13 @@ size_t HUF_decompress4X2_usingDTable(
}
static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize,
- void* workSpace, size_t wkspSize, int bmi2)
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize, int bmi2)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize,
- workSpace, wkspSize);
+ workSpace, wkspSize);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
@@ -1508,13 +1508,13 @@ static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size
}
size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize,
- void* workSpace, size_t wkspSize)
-{
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);
-}
-
-
+}
+
+
#endif /* HUF_FORCE_DECOMPRESS_X1 */
@@ -1586,13 +1586,13 @@ static const algo_time_t algoTime[16 /* Quantization */][2 /* single, double */]
#endif
/** HUF_selectDecoder() :
- * Tells which decoder is likely to decode faster,
- * based on a set of pre-computed metrics.
+ * Tells which decoder is likely to decode faster,
+ * based on a set of pre-computed metrics.
* @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
- * Assumption : 0 < dstSize <= 128 KB */
+ * Assumption : 0 < dstSize <= 128 KB */
U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
{
- assert(dstSize > 0);
+ assert(dstSize > 0);
assert(dstSize <= 128*1024);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)dstSize;
@@ -1604,25 +1604,25 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
return 1;
#else
/* decoder timing evaluation */
- { U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */
- U32 const D256 = (U32)(dstSize >> 8);
- U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
- U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
+ { U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */
+ U32 const D256 = (U32)(dstSize >> 8);
+ U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
+ U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
DTime1 += DTime1 >> 5; /* small advantage to algorithm using less memory, to reduce cache eviction */
- return DTime1 < DTime0;
+ return DTime1 < DTime0;
}
#endif
}
-size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
- size_t dstSize, const void* cSrc,
- size_t cSrcSize, void* workSpace,
- size_t wkspSize)
-{
+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
+ size_t dstSize, const void* cSrc,
+ size_t cSrcSize, void* workSpace,
+ size_t wkspSize)
+{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
- if (cSrcSize == 0) return ERROR(corruption_detected);
+ if (cSrcSize == 0) return ERROR(corruption_detected);
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
#if defined(HUF_FORCE_DECOMPRESS_X1)
@@ -1641,9 +1641,9 @@ size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
}
}
-size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
- const void* cSrc, size_t cSrcSize,
- void* workSpace, size_t wkspSize)
+size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
@@ -1664,17 +1664,17 @@ size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
cSrcSize, workSpace, wkspSize);
#else
return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
- cSrcSize, workSpace, wkspSize):
+ cSrcSize, workSpace, wkspSize):
HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
- cSrcSize, workSpace, wkspSize);
+ cSrcSize, workSpace, wkspSize);
#endif
}
}
-
-
-size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
-{
- DTableDesc const dtd = HUF_getDTableDesc(DTable);
+
+
+size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
+{
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)dtd;
assert(dtd.tableType == 0);
@@ -1687,25 +1687,25 @@ size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const voi
return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
#endif
-}
-
+}
+
#ifndef HUF_FORCE_DECOMPRESS_X2
size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
-{
- const BYTE* ip = (const BYTE*) cSrc;
-
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
- if (HUF_isError(hSize)) return hSize;
- if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
- ip += hSize; cSrcSize -= hSize;
-
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
-}
+}
#endif
-
-size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
-{
- DTableDesc const dtd = HUF_getDTableDesc(DTable);
+
+size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
+{
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)dtd;
assert(dtd.tableType == 0);
@@ -1718,15 +1718,15 @@ size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const voi
return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
#endif
-}
-
-size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
-{
- /* validation checks */
- if (dstSize == 0) return ERROR(dstSize_tooSmall);
- if (cSrcSize == 0) return ERROR(corruption_detected);
-
- { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+}
+
+size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
+{
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize == 0) return ERROR(corruption_detected);
+
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)algoNb;
assert(algoNb == 0);
@@ -1739,8 +1739,8 @@ size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t ds
return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
#endif
- }
-}
+ }
+}
#ifndef ZSTD_NO_UNUSED_FUNCTIONS
#ifndef HUF_FORCE_DECOMPRESS_X2
diff --git a/contrib/libs/zstd/lib/decompress/zstd_decompress.c b/contrib/libs/zstd/lib/decompress/zstd_decompress.c
index b8bbefd538..15fee21c23 100644
--- a/contrib/libs/zstd/lib/decompress/zstd_decompress.c
+++ b/contrib/libs/zstd/lib/decompress/zstd_decompress.c
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -14,9 +14,9 @@
*****************************************************************/
/*!
* HEAPMODE :
- * Select how default decompression function ZSTD_decompress() allocates its context,
- * on stack (0), or into heap (1, default; requires malloc()).
- * Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected.
+ * Select how default decompression function ZSTD_decompress() allocates its context,
+ * on stack (0), or into heap (1, default; requires malloc()).
+ * Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected.
*/
#ifndef ZSTD_HEAPMODE
# define ZSTD_HEAPMODE 1
@@ -24,18 +24,18 @@
/*!
* LEGACY_SUPPORT :
-* if set to 1+, ZSTD_decompress() can decode older formats (v0.1+)
+* if set to 1+, ZSTD_decompress() can decode older formats (v0.1+)
*/
#ifndef ZSTD_LEGACY_SUPPORT
# define ZSTD_LEGACY_SUPPORT 0
#endif
/*!
- * MAXWINDOWSIZE_DEFAULT :
- * maximum window size accepted by DStream __by default__.
- * Frames requiring more memory will be rejected.
- * It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize().
- */
+ * MAXWINDOWSIZE_DEFAULT :
+ * maximum window size accepted by DStream __by default__.
+ * Frames requiring more memory will be rejected.
+ * It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize().
+ */
#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
# define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1)
#endif
@@ -216,23 +216,23 @@ static size_t ZSTD_DDictHashSet_addDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_
/*-*************************************************************
* Context management
***************************************************************/
-size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx)
-{
- if (dctx==NULL) return 0; /* support sizeof NULL */
- return sizeof(*dctx)
- + ZSTD_sizeof_DDict(dctx->ddictLocal)
- + dctx->inBuffSize + dctx->outBuffSize;
-}
+size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx)
+{
+ if (dctx==NULL) return 0; /* support sizeof NULL */
+ return sizeof(*dctx)
+ + ZSTD_sizeof_DDict(dctx->ddictLocal)
+ + dctx->inBuffSize + dctx->outBuffSize;
+}
size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); }
-
-static size_t ZSTD_startingInputLength(ZSTD_format_e format)
+
+static size_t ZSTD_startingInputLength(ZSTD_format_e format)
{
size_t const startingInputLength = ZSTD_FRAMEHEADERSIZE_PREFIX(format);
- /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
- assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) );
- return startingInputLength;
+ /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
+ assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) );
+ return startingInputLength;
}
static void ZSTD_DCtx_resetParameters(ZSTD_DCtx* dctx)
@@ -245,18 +245,18 @@ static void ZSTD_DCtx_resetParameters(ZSTD_DCtx* dctx)
dctx->refMultipleDDicts = ZSTD_rmd_refSingleDDict;
}
-static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
+static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
{
- dctx->staticSize = 0;
- dctx->ddict = NULL;
- dctx->ddictLocal = NULL;
+ dctx->staticSize = 0;
+ dctx->ddict = NULL;
+ dctx->ddictLocal = NULL;
dctx->dictEnd = NULL;
dctx->ddictIsCold = 0;
dctx->dictUses = ZSTD_dont_use;
- dctx->inBuff = NULL;
- dctx->inBuffSize = 0;
- dctx->outBuffSize = 0;
- dctx->streamStage = zdss_init;
+ dctx->inBuff = NULL;
+ dctx->inBuffSize = 0;
+ dctx->outBuffSize = 0;
+ dctx->streamStage = zdss_init;
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
dctx->legacyContext = NULL;
dctx->previousLegacyVersion = 0;
@@ -271,32 +271,32 @@ static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
dctx->dictContentEndForFuzzing = NULL;
#endif
-}
-
-ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize)
-{
- ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace;
-
- if ((size_t)workspace & 7) return NULL; /* 8-aligned */
- if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL; /* minimum size */
-
- ZSTD_initDCtx_internal(dctx);
- dctx->staticSize = workspaceSize;
- dctx->inBuff = (char*)(dctx+1);
+}
+
+ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize)
+{
+ ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace;
+
+ if ((size_t)workspace & 7) return NULL; /* 8-aligned */
+ if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL; /* minimum size */
+
+ ZSTD_initDCtx_internal(dctx);
+ dctx->staticSize = workspaceSize;
+ dctx->inBuff = (char*)(dctx+1);
return dctx;
}
static ZSTD_DCtx* ZSTD_createDCtx_internal(ZSTD_customMem customMem) {
if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
-
+
{ ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_customMalloc(sizeof(*dctx), customMem);
- if (!dctx) return NULL;
- dctx->customMem = customMem;
- ZSTD_initDCtx_internal(dctx);
- return dctx;
- }
-}
-
+ if (!dctx) return NULL;
+ dctx->customMem = customMem;
+ ZSTD_initDCtx_internal(dctx);
+ return dctx;
+ }
+}
+
ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
{
return ZSTD_createDCtx_internal(customMem);
@@ -304,7 +304,7 @@ ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
ZSTD_DCtx* ZSTD_createDCtx(void)
{
- DEBUGLOG(3, "ZSTD_createDCtx");
+ DEBUGLOG(3, "ZSTD_createDCtx");
return ZSTD_createDCtx_internal(ZSTD_defaultCMem);
}
@@ -320,27 +320,27 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
{
if (dctx==NULL) return 0; /* support free on NULL */
RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx");
- { ZSTD_customMem const cMem = dctx->customMem;
+ { ZSTD_customMem const cMem = dctx->customMem;
ZSTD_clearDict(dctx);
ZSTD_customFree(dctx->inBuff, cMem);
- dctx->inBuff = NULL;
-#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
- if (dctx->legacyContext)
- ZSTD_freeLegacyStreamContext(dctx->legacyContext, dctx->previousLegacyVersion);
-#endif
+ dctx->inBuff = NULL;
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
+ if (dctx->legacyContext)
+ ZSTD_freeLegacyStreamContext(dctx->legacyContext, dctx->previousLegacyVersion);
+#endif
if (dctx->ddictSet) {
ZSTD_freeDDictHashSet(dctx->ddictSet, cMem);
dctx->ddictSet = NULL;
}
ZSTD_customFree(dctx, cMem);
- return 0;
- }
+ return 0;
+ }
}
-/* no longer useful */
+/* no longer useful */
void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
{
- size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx);
+ size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx);
ZSTD_memcpy(dstDCtx, srcDCtx, toCopy); /* no need to copy workspace */
}
@@ -369,8 +369,8 @@ static void ZSTD_DCtx_selectFrameDDict(ZSTD_DCtx* dctx) {
/*-*************************************************************
- * Frame header decoding
- ***************************************************************/
+ * Frame header decoding
+ ***************************************************************/
/*! ZSTD_isFrame() :
* Tells if the content of `buffer` starts with a valid Frame Identifier.
@@ -403,88 +403,88 @@ unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size)
return 0;
}
-/** ZSTD_frameHeaderSize_internal() :
- * srcSize must be large enough to reach header size fields.
- * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless.
- * @return : size of the Frame Header
- * or an error code, which can be tested with ZSTD_isError() */
-static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)
-{
- size_t const minInputSize = ZSTD_startingInputLength(format);
+/** ZSTD_frameHeaderSize_internal() :
+ * srcSize must be large enough to reach header size fields.
+ * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless.
+ * @return : size of the Frame Header
+ * or an error code, which can be tested with ZSTD_isError() */
+static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)
+{
+ size_t const minInputSize = ZSTD_startingInputLength(format);
RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong, "");
-
- { BYTE const fhd = ((const BYTE*)src)[minInputSize-1];
+
+ { BYTE const fhd = ((const BYTE*)src)[minInputSize-1];
U32 const dictID= fhd & 3;
U32 const singleSegment = (fhd >> 5) & 1;
U32 const fcsId = fhd >> 6;
- return minInputSize + !singleSegment
- + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId]
- + (singleSegment && !fcsId);
+ return minInputSize + !singleSegment
+ + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId]
+ + (singleSegment && !fcsId);
}
}
-/** ZSTD_frameHeaderSize() :
- * srcSize must be >= ZSTD_frameHeaderSize_prefix.
+/** ZSTD_frameHeaderSize() :
+ * srcSize must be >= ZSTD_frameHeaderSize_prefix.
* @return : size of the Frame Header,
* or an error code (if srcSize is too small) */
-size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
-{
- return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1);
-}
-
+size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
+{
+ return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1);
+}
+
/** ZSTD_getFrameHeader_advanced() :
- * decode Frame Header, or require larger `srcSize`.
- * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
- * @return : 0, `zfhPtr` is correctly filled,
- * >0, `srcSize` is too small, value is wanted `srcSize` amount,
- * or an error code, which can be tested using ZSTD_isError() */
+ * decode Frame Header, or require larger `srcSize`.
+ * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
+ * @return : 0, `zfhPtr` is correctly filled,
+ * >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ * or an error code, which can be tested using ZSTD_isError() */
size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
{
const BYTE* ip = (const BYTE*)src;
- size_t const minInputSize = ZSTD_startingInputLength(format);
+ size_t const minInputSize = ZSTD_startingInputLength(format);
ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
- if (srcSize < minInputSize) return minInputSize;
+ if (srcSize < minInputSize) return minInputSize;
RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter");
-
- if ( (format != ZSTD_f_zstd1_magicless)
- && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
+
+ if ( (format != ZSTD_f_zstd1_magicless)
+ && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
- /* skippable frame */
+ /* skippable frame */
if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)
return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */
ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr));
zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
- zfhPtr->frameType = ZSTD_skippableFrame;
+ zfhPtr->frameType = ZSTD_skippableFrame;
return 0;
}
RETURN_ERROR(prefix_unknown, "");
}
/* ensure there is enough `srcSize` to fully read/decode frame header */
- { size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format);
- if (srcSize < fhsize) return fhsize;
- zfhPtr->headerSize = (U32)fhsize;
- }
+ { size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format);
+ if (srcSize < fhsize) return fhsize;
+ zfhPtr->headerSize = (U32)fhsize;
+ }
- { BYTE const fhdByte = ip[minInputSize-1];
- size_t pos = minInputSize;
+ { BYTE const fhdByte = ip[minInputSize-1];
+ size_t pos = minInputSize;
U32 const dictIDSizeCode = fhdByte&3;
U32 const checksumFlag = (fhdByte>>2)&1;
U32 const singleSegment = (fhdByte>>5)&1;
U32 const fcsID = fhdByte>>6;
- U64 windowSize = 0;
+ U64 windowSize = 0;
U32 dictID = 0;
- U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN;
+ U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN;
RETURN_ERROR_IF((fhdByte & 0x08) != 0, frameParameter_unsupported,
"reserved bits, must be zero");
-
+
if (!singleSegment) {
BYTE const wlByte = ip[pos++];
U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge, "");
- windowSize = (1ULL << windowLog);
+ windowSize = (1ULL << windowLog);
windowSize += (windowSize >> 3) * (wlByte&7);
}
switch(dictIDSizeCode)
@@ -507,52 +507,52 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s
case 2 : frameContentSize = MEM_readLE32(ip+pos); break;
case 3 : frameContentSize = MEM_readLE64(ip+pos); break;
}
- if (singleSegment) windowSize = frameContentSize;
-
- zfhPtr->frameType = ZSTD_frame;
- zfhPtr->frameContentSize = frameContentSize;
- zfhPtr->windowSize = windowSize;
- zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
- zfhPtr->dictID = dictID;
- zfhPtr->checksumFlag = checksumFlag;
+ if (singleSegment) windowSize = frameContentSize;
+
+ zfhPtr->frameType = ZSTD_frame;
+ zfhPtr->frameContentSize = frameContentSize;
+ zfhPtr->windowSize = windowSize;
+ zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+ zfhPtr->dictID = dictID;
+ zfhPtr->checksumFlag = checksumFlag;
}
return 0;
}
-/** ZSTD_getFrameHeader() :
- * decode Frame Header, or require larger `srcSize`.
- * note : this function does not consume input, it only reads it.
- * @return : 0, `zfhPtr` is correctly filled,
- * >0, `srcSize` is too small, value is wanted `srcSize` amount,
- * or an error code, which can be tested using ZSTD_isError() */
-size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
-{
+/** ZSTD_getFrameHeader() :
+ * decode Frame Header, or require larger `srcSize`.
+ * note : this function does not consume input, it only reads it.
+ * @return : 0, `zfhPtr` is correctly filled,
+ * >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ * or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
+{
return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1);
-}
-
-/** ZSTD_getFrameContentSize() :
- * compatible with legacy mode
- * @return : decompressed size of the single frame pointed to be `src` if known, otherwise
- * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
- * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
-unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
-{
-#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
- if (ZSTD_isLegacy(src, srcSize)) {
- unsigned long long const ret = ZSTD_getDecompressedSize_legacy(src, srcSize);
- return ret == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : ret;
- }
-#endif
- { ZSTD_frameHeader zfh;
- if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0)
- return ZSTD_CONTENTSIZE_ERROR;
- if (zfh.frameType == ZSTD_skippableFrame) {
- return 0;
- } else {
- return zfh.frameContentSize;
- } }
-}
-
+}
+
+/** ZSTD_getFrameContentSize() :
+ * compatible with legacy mode
+ * @return : decompressed size of the single frame pointed to be `src` if known, otherwise
+ * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
+ * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
+unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
+{
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
+ if (ZSTD_isLegacy(src, srcSize)) {
+ unsigned long long const ret = ZSTD_getDecompressedSize_legacy(src, srcSize);
+ return ret == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : ret;
+ }
+#endif
+ { ZSTD_frameHeader zfh;
+ if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0)
+ return ZSTD_CONTENTSIZE_ERROR;
+ if (zfh.frameType == ZSTD_skippableFrame) {
+ return 0;
+ } else {
+ return zfh.frameContentSize;
+ } }
+}
+
static size_t readSkippableFrameSize(void const* src, size_t srcSize)
{
size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE;
@@ -601,65 +601,65 @@ ZSTDLIB_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsign
return skippableContentSize;
}
-/** ZSTD_findDecompressedSize() :
- * compatible with legacy mode
- * `srcSize` must be the exact length of some number of ZSTD compressed and/or
- * skippable frames
- * @return : decompressed size of the frames contained */
-unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
-{
- unsigned long long totalDstSize = 0;
-
+/** ZSTD_findDecompressedSize() :
+ * compatible with legacy mode
+ * `srcSize` must be the exact length of some number of ZSTD compressed and/or
+ * skippable frames
+ * @return : decompressed size of the frames contained */
+unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
+{
+ unsigned long long totalDstSize = 0;
+
while (srcSize >= ZSTD_startingInputLength(ZSTD_f_zstd1)) {
- U32 const magicNumber = MEM_readLE32(src);
-
+ U32 const magicNumber = MEM_readLE32(src);
+
if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
size_t const skippableSize = readSkippableFrameSize(src, srcSize);
if (ZSTD_isError(skippableSize)) {
- return ZSTD_CONTENTSIZE_ERROR;
- }
+ return ZSTD_CONTENTSIZE_ERROR;
+ }
assert(skippableSize <= srcSize);
-
- src = (const BYTE *)src + skippableSize;
- srcSize -= skippableSize;
- continue;
- }
-
- { unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
- if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret;
-
- /* check for overflow */
- if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR;
- totalDstSize += ret;
- }
- { size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
- if (ZSTD_isError(frameSrcSize)) {
- return ZSTD_CONTENTSIZE_ERROR;
- }
-
- src = (const BYTE *)src + frameSrcSize;
- srcSize -= frameSrcSize;
- }
- } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
-
- if (srcSize) return ZSTD_CONTENTSIZE_ERROR;
-
- return totalDstSize;
-}
-
+
+ src = (const BYTE *)src + skippableSize;
+ srcSize -= skippableSize;
+ continue;
+ }
+
+ { unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
+ if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret;
+
+ /* check for overflow */
+ if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR;
+ totalDstSize += ret;
+ }
+ { size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
+ if (ZSTD_isError(frameSrcSize)) {
+ return ZSTD_CONTENTSIZE_ERROR;
+ }
+
+ src = (const BYTE *)src + frameSrcSize;
+ srcSize -= frameSrcSize;
+ }
+ } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
+
+ if (srcSize) return ZSTD_CONTENTSIZE_ERROR;
+
+ return totalDstSize;
+}
+
/** ZSTD_getDecompressedSize() :
* compatible with legacy mode
* @return : decompressed size if known, 0 otherwise
note : 0 can mean any of the following :
- - frame content is empty
- - decompressed size field is not present in frame header
+ - frame content is empty
+ - decompressed size field is not present in frame header
- frame header unknown / not supported
- frame header not complete (`srcSize` too small) */
unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
{
- unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
- ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN);
- return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret;
+ unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
+ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN);
+ return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret;
}
@@ -670,7 +670,7 @@ unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize)
{
size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format);
- if (ZSTD_isError(result)) return result; /* invalid header */
+ if (ZSTD_isError(result)) return result; /* invalid header */
RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small");
/* Reference DDict requested by frame if dctx references multiple ddicts */
@@ -742,13 +742,13 @@ static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize
if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
-
+
ip += ZSTD_blockHeaderSize + cBlockSize;
remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
nbBlocks++;
-
+
if (blockProperties.lastBlock) break;
- }
+ }
/* Final frame content checksum */
if (zfh.checksumFlag) {
@@ -781,9 +781,9 @@ size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
* `src` must point to the start of a ZSTD frame or a skippeable frame
* `srcSize` must be at least as large as the frame contained
* @return : the maximum decompressed size of the compressed source
- */
+ */
unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
-{
+{
unsigned long long bound = 0;
/* Iterate over each frame */
while (srcSize > 0) {
@@ -796,10 +796,10 @@ unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
src = (const BYTE*)src + compressedSize;
srcSize -= compressedSize;
bound += decompressedBound;
- }
+ }
return bound;
-}
-
+}
+
/*-*************************************************************
* Frame decoding
@@ -832,16 +832,16 @@ static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
BYTE b,
size_t regenSize)
-{
+{
RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall, "");
if (dst == NULL) {
if (regenSize == 0) return 0;
RETURN_ERROR(dstBuffer_null, "");
- }
+ }
ZSTD_memset(dst, b, regenSize);
return regenSize;
-}
-
+}
+
static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, unsigned streaming)
{
#if ZSTD_TRACE
@@ -874,8 +874,8 @@ static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64
* will update *srcPtr and *srcSizePtr,
* to make *srcPtr progress by one frame. */
static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
- void* dst, size_t dstCapacity,
- const void** srcPtr, size_t *srcSizePtr)
+ void* dst, size_t dstCapacity,
+ const void** srcPtr, size_t *srcSizePtr)
{
const BYTE* const istart = (const BYTE*)(*srcPtr);
const BYTE* ip = istart;
@@ -930,7 +930,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
if (ZSTD_isError(decodedSize)) return decodedSize;
if (dctx->validateChecksum)
- XXH64_update(&dctx->xxhState, op, decodedSize);
+ XXH64_update(&dctx->xxhState, op, decodedSize);
if (decodedSize != 0)
op += decodedSize;
assert(ip != NULL);
@@ -939,11 +939,11 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
if (blockProperties.lastBlock) break;
}
- if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
+ if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize,
corruption_detected, "");
}
- if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
+ if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong, "");
if (!dctx->forceIgnoreChecksum) {
U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState);
@@ -951,57 +951,57 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
checkRead = MEM_readLE32(ip);
RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong, "");
}
- ip += 4;
+ ip += 4;
remainingSrcSize -= 4;
}
ZSTD_DCtx_trace_end(dctx, (U64)(op-ostart), (U64)(ip-istart), /* streaming */ 0);
- /* Allow caller to get size read */
- *srcPtr = ip;
+ /* Allow caller to get size read */
+ *srcPtr = ip;
*srcSizePtr = remainingSrcSize;
return (size_t)(op-ostart);
}
-static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- const void* dict, size_t dictSize,
- const ZSTD_DDict* ddict)
-{
- void* const dststart = dst;
+static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize,
+ const ZSTD_DDict* ddict)
+{
+ void* const dststart = dst;
int moreThan1Frame = 0;
DEBUGLOG(5, "ZSTD_decompressMultiFrame");
- assert(dict==NULL || ddict==NULL); /* either dict or ddict set, not both */
-
- if (ddict) {
+ assert(dict==NULL || ddict==NULL); /* either dict or ddict set, not both */
+
+ if (ddict) {
dict = ZSTD_DDict_dictContent(ddict);
dictSize = ZSTD_DDict_dictSize(ddict);
- }
-
+ }
+
while (srcSize >= ZSTD_startingInputLength(dctx->format)) {
-
-#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
- if (ZSTD_isLegacy(src, srcSize)) {
- size_t decodedSize;
- size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
- if (ZSTD_isError(frameSize)) return frameSize;
+
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
+ if (ZSTD_isLegacy(src, srcSize)) {
+ size_t decodedSize;
+ size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
+ if (ZSTD_isError(frameSize)) return frameSize;
RETURN_ERROR_IF(dctx->staticSize, memory_allocation,
"legacy support is not compatible with static dctx");
-
- decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize);
+
+ decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize);
if (ZSTD_isError(decodedSize)) return decodedSize;
-
+
assert(decodedSize <= dstCapacity);
- dst = (BYTE*)dst + decodedSize;
- dstCapacity -= decodedSize;
-
- src = (const BYTE*)src + frameSize;
- srcSize -= frameSize;
-
- continue;
- }
-#endif
-
+ dst = (BYTE*)dst + decodedSize;
+ dstCapacity -= decodedSize;
+
+ src = (const BYTE*)src + frameSize;
+ srcSize -= frameSize;
+
+ continue;
+ }
+#endif
+
{ U32 const magicNumber = MEM_readLE32(src);
DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
(unsigned)magicNumber, ZSTD_MAGICNUMBER);
@@ -1009,24 +1009,24 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
size_t const skippableSize = readSkippableFrameSize(src, srcSize);
FORWARD_IF_ERROR(skippableSize, "readSkippableFrameSize failed");
assert(skippableSize <= srcSize);
-
- src = (const BYTE *)src + skippableSize;
- srcSize -= skippableSize;
- continue;
+
+ src = (const BYTE *)src + skippableSize;
+ srcSize -= skippableSize;
+ continue;
} }
-
- if (ddict) {
- /* we were called from ZSTD_decompress_usingDDict */
+
+ if (ddict) {
+ /* we were called from ZSTD_decompress_usingDDict */
FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict), "");
- } else {
- /* this will initialize correctly with no dict if dict == NULL, so
- * use this in all cases but ddict */
+ } else {
+ /* this will initialize correctly with no dict if dict == NULL, so
+ * use this in all cases but ddict */
FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize), "");
- }
+ }
ZSTD_checkContinuity(dctx, dst, dstCapacity);
-
- { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
- &src, &srcSize);
+
+ { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
+ &src, &srcSize);
RETURN_ERROR_IF(
(ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
&& (moreThan1Frame==1),
@@ -1038,26 +1038,26 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
"Note: one could be unlucky, it might be a corruption error instead, "
"happening right at the place where we expect zstd magic bytes. "
"But this is _much_ less likely than a srcSize field error.");
- if (ZSTD_isError(res)) return res;
+ if (ZSTD_isError(res)) return res;
assert(res <= dstCapacity);
if (res != 0)
dst = (BYTE*)dst + res;
- dstCapacity -= res;
- }
+ dstCapacity -= res;
+ }
moreThan1Frame = 1;
- } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
-
+ } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
+
RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed");
-
+
return (size_t)((BYTE*)dst - (BYTE*)dststart);
-}
-
+}
+
size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict, size_t dictSize)
{
- return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
+ return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
}
@@ -1086,7 +1086,7 @@ size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const
size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
-#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
+#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
size_t regenSize;
ZSTD_DCtx* const dctx = ZSTD_createDCtx_internal(ZSTD_defaultCMem);
RETURN_ERROR_IF(dctx==NULL, memory_allocation, "NULL pointer!");
@@ -1129,7 +1129,7 @@ ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {
switch(dctx->stage)
{
default: /* should not happen */
- assert(0);
+ assert(0);
ZSTD_FALLTHROUGH;
case ZSTDds_getFrameHeaderSize:
ZSTD_FALLTHROUGH;
@@ -1150,12 +1150,12 @@ ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {
}
}
-static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; }
+static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; }
/** ZSTD_decompressContinue() :
- * srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress())
- * @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
- * or an error code, which can be tested using ZSTD_isError() */
+ * srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress())
+ * @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
+ * or an error code, which can be tested using ZSTD_isError() */
size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize);
@@ -1168,24 +1168,24 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
switch (dctx->stage)
{
case ZSTDds_getFrameHeaderSize :
- assert(src != NULL);
- if (dctx->format == ZSTD_f_zstd1) { /* allows header */
+ assert(src != NULL);
+ if (dctx->format == ZSTD_f_zstd1) { /* allows header */
assert(srcSize >= ZSTD_FRAMEIDSIZE); /* to read skippable magic number */
if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
ZSTD_memcpy(dctx->headerBuffer, src, srcSize);
dctx->expected = ZSTD_SKIPPABLEHEADERSIZE - srcSize; /* remaining to load to get full skippable frame header */
- dctx->stage = ZSTDds_decodeSkippableHeader;
- return 0;
- } }
- dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format);
+ dctx->stage = ZSTDds_decodeSkippableHeader;
+ return 0;
+ } }
+ dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format);
if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize;
ZSTD_memcpy(dctx->headerBuffer, src, srcSize);
- dctx->expected = dctx->headerSize - srcSize;
- dctx->stage = ZSTDds_decodeFrameHeader;
- return 0;
+ dctx->expected = dctx->headerSize - srcSize;
+ dctx->stage = ZSTDds_decodeFrameHeader;
+ return 0;
case ZSTDds_decodeFrameHeader:
- assert(src != NULL);
+ assert(src != NULL);
ZSTD_memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize), "");
dctx->expected = ZSTD_blockHeaderSize;
@@ -1214,20 +1214,20 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
dctx->stage = ZSTDds_getFrameHeaderSize;
}
} else {
- dctx->expected = ZSTD_blockHeaderSize; /* jump to next header */
+ dctx->expected = ZSTD_blockHeaderSize; /* jump to next header */
dctx->stage = ZSTDds_decodeBlockHeader;
}
return 0;
}
-
+
case ZSTDds_decompressLastBlock:
case ZSTDds_decompressBlock:
- DEBUGLOG(5, "ZSTD_decompressContinue: case ZSTDds_decompressBlock");
+ DEBUGLOG(5, "ZSTD_decompressContinue: case ZSTDds_decompressBlock");
{ size_t rSize;
switch(dctx->bType)
{
case bt_compressed:
- DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed");
+ DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed");
rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1, is_streaming);
dctx->expected = 0; /* Streaming not supported */
break;
@@ -1249,7 +1249,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
FORWARD_IF_ERROR(rSize, "");
RETURN_ERROR_IF(rSize > dctx->fParams.blockSizeMax, corruption_detected, "Decompressed Block Size Exceeds Maximum");
DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize);
- dctx->decodedSize += rSize;
+ dctx->decodedSize += rSize;
if (dctx->validateChecksum) XXH64_update(&dctx->xxhState, dst, rSize);
dctx->previousDstEnd = (char*)dst + rSize;
@@ -1278,9 +1278,9 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
}
return rSize;
}
-
+
case ZSTDds_checkChecksum:
- assert(srcSize == 4); /* guaranteed by dctx->expected */
+ assert(srcSize == 4); /* guaranteed by dctx->expected */
{
if (dctx->validateChecksum) {
U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);
@@ -1293,20 +1293,20 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c
dctx->stage = ZSTDds_getFrameHeaderSize;
return 0;
}
-
+
case ZSTDds_decodeSkippableHeader:
- assert(src != NULL);
+ assert(src != NULL);
assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE);
ZSTD_memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize); /* complete skippable header */
dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE); /* note : dctx->expected can grow seriously large, beyond local buffer size */
- dctx->stage = ZSTDds_skipFrame;
- return 0;
-
+ dctx->stage = ZSTDds_skipFrame;
+ return 0;
+
case ZSTDds_skipFrame:
- dctx->expected = 0;
- dctx->stage = ZSTDds_getFrameHeaderSize;
- return 0;
-
+ dctx->expected = 0;
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ return 0;
+
default:
assert(0); /* impossible */
RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */
@@ -1329,7 +1329,7 @@ static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dict
/*! ZSTD_loadDEntropy() :
* dict : must point at beginning of a valid zstd dictionary.
- * @return : size of entropy tables read */
+ * @return : size of entropy tables read */
size_t
ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
const void* const dict, size_t const dictSize)
@@ -1339,8 +1339,8 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted, "dict is too small");
assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY); /* dict must be valid */
- dictPtr += 8; /* skip header = magic + dictID */
-
+ dictPtr += 8; /* skip header = magic + dictID */
+
ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, OFTable) == offsetof(ZSTD_entropyDTables_t, LLTable) + sizeof(entropy->LLTable));
ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, MLTable) == offsetof(ZSTD_entropyDTables_t, OFTable) + sizeof(entropy->OFTable));
ZSTD_STATIC_ASSERT(sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable) >= HUF_DECOMPRESS_WORKSPACE_SIZE);
@@ -1367,8 +1367,8 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted, "");
RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
ZSTD_buildFSETable( entropy->OFTable,
- offcodeNCount, offcodeMaxValue,
- OF_base, OF_bits,
+ offcodeNCount, offcodeMaxValue,
+ OF_base, OF_bits,
offcodeLog,
entropy->workspace, sizeof(entropy->workspace),
/* bmi2 */0);
@@ -1382,8 +1382,8 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted, "");
RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
ZSTD_buildFSETable( entropy->MLTable,
- matchlengthNCount, matchlengthMaxValue,
- ML_base, ML_bits,
+ matchlengthNCount, matchlengthMaxValue,
+ ML_base, ML_bits,
matchlengthLog,
entropy->workspace, sizeof(entropy->workspace),
/* bmi2 */ 0);
@@ -1397,8 +1397,8 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted, "");
RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
ZSTD_buildFSETable( entropy->LLTable,
- litlengthNCount, litlengthMaxValue,
- LL_base, LL_bits,
+ litlengthNCount, litlengthMaxValue,
+ LL_base, LL_bits,
litlengthLog,
entropy->workspace, sizeof(entropy->workspace),
/* bmi2 */ 0);
@@ -1406,14 +1406,14 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
}
RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
- { int i;
- size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
- for (i=0; i<3; i++) {
- U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
+ { int i;
+ size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
+ for (i=0; i<3; i++) {
+ U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
RETURN_ERROR_IF(rep==0 || rep > dictContentSize,
dictionary_corrupted, "");
- entropy->rep[i] = rep;
- } }
+ entropy->rep[i] = rep;
+ } }
return (size_t)(dictPtr - (const BYTE*)dict);
}
@@ -1422,7 +1422,7 @@ static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict
{
if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize);
{ U32 const magic = MEM_readLE32(dict);
- if (magic != ZSTD_MAGIC_DICTIONARY) {
+ if (magic != ZSTD_MAGIC_DICTIONARY) {
return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */
} }
dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
@@ -1433,43 +1433,43 @@ static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict
dict = (const char*)dict + eSize;
dictSize -= eSize;
}
- dctx->litEntropy = dctx->fseEntropy = 1;
+ dctx->litEntropy = dctx->fseEntropy = 1;
/* reference dictionary content */
return ZSTD_refDictContent(dctx, dict, dictSize);
}
-size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
-{
- assert(dctx != NULL);
+size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
+{
+ assert(dctx != NULL);
#if ZSTD_TRACE
dctx->traceCtx = (ZSTD_trace_decompress_begin != NULL) ? ZSTD_trace_decompress_begin(dctx) : 0;
#endif
- dctx->expected = ZSTD_startingInputLength(dctx->format); /* dctx->format must be properly set */
- dctx->stage = ZSTDds_getFrameHeaderSize;
+ dctx->expected = ZSTD_startingInputLength(dctx->format); /* dctx->format must be properly set */
+ dctx->stage = ZSTDds_getFrameHeaderSize;
dctx->processedCSize = 0;
- dctx->decodedSize = 0;
- dctx->previousDstEnd = NULL;
+ dctx->decodedSize = 0;
+ dctx->previousDstEnd = NULL;
dctx->prefixStart = NULL;
dctx->virtualStart = NULL;
- dctx->dictEnd = NULL;
- dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
- dctx->litEntropy = dctx->fseEntropy = 0;
- dctx->dictID = 0;
+ dctx->dictEnd = NULL;
+ dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
+ dctx->litEntropy = dctx->fseEntropy = 0;
+ dctx->dictID = 0;
dctx->bType = bt_reserved;
- ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
+ ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
ZSTD_memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
- dctx->LLTptr = dctx->entropy.LLTable;
- dctx->MLTptr = dctx->entropy.MLTable;
- dctx->OFTptr = dctx->entropy.OFTable;
- dctx->HUFptr = dctx->entropy.hufTable;
- return 0;
-}
-
+ dctx->LLTptr = dctx->entropy.LLTable;
+ dctx->MLTptr = dctx->entropy.MLTable;
+ dctx->OFTptr = dctx->entropy.OFTable;
+ dctx->HUFptr = dctx->entropy.hufTable;
+ return 0;
+}
+
size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
{
FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , "");
- if (dict && dictSize)
+ if (dict && dictSize)
RETURN_ERROR_IF(
ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)),
dictionary_corrupted, "");
@@ -1490,14 +1490,14 @@ size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
dctx->ddictIsCold = (dctx->dictEnd != dictEnd);
DEBUGLOG(4, "DDict is %s",
dctx->ddictIsCold ? "~cold~" : "hot!");
- }
+ }
FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , "");
if (ddict) { /* NULL ddict is equivalent to no dictionary */
ZSTD_copyDDictParameters(dctx, ddict);
- }
- return 0;
-}
-
+ }
+ return 0;
+}
+
/*! ZSTD_getDictID_fromDict() :
* Provides the dictID stored within dictionary.
* if @return == 0, the dictionary is not conformant with Zstandard specification.
@@ -1505,7 +1505,7 @@ size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
{
if (dictSize < 8) return 0;
- if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0;
+ if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0;
return MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
}
@@ -1513,19 +1513,19 @@ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
* Provides the dictID required to decompress frame stored within `src`.
* If @return == 0, the dictID could not be decoded.
* This could for one of the following reasons :
- * - The frame does not require a dictionary (most common case).
- * - The frame was built with dictID intentionally removed.
- * Needed dictionary is a hidden information.
+ * - The frame does not require a dictionary (most common case).
+ * - The frame was built with dictID intentionally removed.
+ * Needed dictionary is a hidden information.
* Note : this use case also happens when using a non-conformant dictionary.
- * - `srcSize` is too small, and as a result, frame header could not be decoded.
- * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`.
+ * - `srcSize` is too small, and as a result, frame header could not be decoded.
+ * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`.
* - This is not a Zstandard frame.
- * When identifying the exact failure cause, it's possible to use
- * ZSTD_getFrameHeader(), which will provide a more precise error code. */
+ * When identifying the exact failure cause, it's possible to use
+ * ZSTD_getFrameHeader(), which will provide a more precise error code. */
unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
{
- ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 };
- size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);
+ ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 };
+ size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);
if (ZSTD_isError(hError)) return 0;
return zfp.dictID;
}
@@ -1539,10 +1539,10 @@ size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
const void* src, size_t srcSize,
const ZSTD_DDict* ddict)
{
- /* pass content and size in case legacy frames are encountered */
- return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize,
- NULL, 0,
- ddict);
+ /* pass content and size in case legacy frames are encountered */
+ return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize,
+ NULL, 0,
+ ddict);
}
@@ -1550,104 +1550,104 @@ size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
* Streaming decompression
*====================================*/
-ZSTD_DStream* ZSTD_createDStream(void)
-{
- DEBUGLOG(3, "ZSTD_createDStream");
+ZSTD_DStream* ZSTD_createDStream(void)
+{
+ DEBUGLOG(3, "ZSTD_createDStream");
return ZSTD_createDCtx_internal(ZSTD_defaultCMem);
-}
-
-ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize)
-{
- return ZSTD_initStaticDCtx(workspace, workspaceSize);
-}
-
-ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem)
-{
+}
+
+ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize)
+{
+ return ZSTD_initStaticDCtx(workspace, workspaceSize);
+}
+
+ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem)
+{
return ZSTD_createDCtx_internal(customMem);
-}
-
-size_t ZSTD_freeDStream(ZSTD_DStream* zds)
-{
- return ZSTD_freeDCtx(zds);
-}
-
-
+}
+
+size_t ZSTD_freeDStream(ZSTD_DStream* zds)
+{
+ return ZSTD_freeDCtx(zds);
+}
+
+
/* *** Initialization *** */
-
-size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; }
-size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; }
-
+
+size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; }
+size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; }
+
size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,
const void* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType)
-{
+{
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
ZSTD_clearDict(dctx);
if (dict && dictSize != 0) {
- dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
+ dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation, "NULL pointer!");
dctx->ddict = dctx->ddictLocal;
dctx->dictUses = ZSTD_use_indefinitely;
- }
- return 0;
-}
-
-size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
-{
- return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
-}
-
-size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
-{
- return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
-}
-
-size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
+ }
+ return 0;
+}
+
+size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
+}
+
+size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
+}
+
+size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
{
FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType), "");
dctx->dictUses = ZSTD_use_once;
return 0;
}
-size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize)
-{
- return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dct_rawContent);
-}
-
-
-/* ZSTD_initDStream_usingDict() :
+size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize)
+{
+ return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dct_rawContent);
+}
+
+
+/* ZSTD_initDStream_usingDict() :
* return : expected size, aka ZSTD_startingInputLength().
- * this function cannot fail */
+ * this function cannot fail */
size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
{
- DEBUGLOG(4, "ZSTD_initDStream_usingDict");
+ DEBUGLOG(4, "ZSTD_initDStream_usingDict");
FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) , "");
return ZSTD_startingInputLength(zds->format);
}
-/* note : this variant can't fail */
+/* note : this variant can't fail */
size_t ZSTD_initDStream(ZSTD_DStream* zds)
{
- DEBUGLOG(4, "ZSTD_initDStream");
+ DEBUGLOG(4, "ZSTD_initDStream");
return ZSTD_initDStream_usingDDict(zds, NULL);
}
-/* ZSTD_initDStream_usingDDict() :
- * ddict will just be referenced, and must outlive decompression session
- * this function cannot fail */
-size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
-{
+/* ZSTD_initDStream_usingDDict() :
+ * ddict will just be referenced, and must outlive decompression session
+ * this function cannot fail */
+size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
+{
FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , "");
return ZSTD_startingInputLength(dctx->format);
}
-/* ZSTD_resetDStream() :
+/* ZSTD_resetDStream() :
* return : expected size, aka ZSTD_startingInputLength().
- * this function cannot fail */
-size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
+ * this function cannot fail */
+size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
{
FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), "");
return ZSTD_startingInputLength(dctx->format);
@@ -1678,19 +1678,19 @@ size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
/* ZSTD_DCtx_setMaxWindowSize() :
* note : no direct equivalence in ZSTD_DCtx_setParameter,
* since this version sets windowSize, and the other sets windowLog */
-size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
-{
+size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
+{
ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax);
size_t const min = (size_t)1 << bounds.lowerBound;
size_t const max = (size_t)1 << bounds.upperBound;
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound, "");
RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound, "");
- dctx->maxWindowSize = maxWindowSize;
- return 0;
-}
+ dctx->maxWindowSize = maxWindowSize;
+ return 0;
+}
-size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format)
+size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format)
{
return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, (int)format);
}
@@ -1811,48 +1811,48 @@ size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
ZSTD_clearDict(dctx);
ZSTD_DCtx_resetParameters(dctx);
}
- return 0;
+ return 0;
}
-size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx)
-{
- return ZSTD_sizeof_DCtx(dctx);
-}
-
-size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
-{
- size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx)
+{
+ return ZSTD_sizeof_DCtx(dctx);
+}
+
+size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
+{
+ size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
/* space is needed to store the litbuffer after the output of a given block without stomping the extDict of a previous run, as well as to cover both windows against wildcopy*/
unsigned long long const neededRBSize = windowSize + blockSize + ZSTD_BLOCKSIZE_MAX + (WILDCOPY_OVERLENGTH * 2);
- unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
- size_t const minRBSize = (size_t) neededSize;
+ unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
+ size_t const minRBSize = (size_t) neededSize;
RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,
frameParameter_windowTooLarge, "");
- return minRBSize;
-}
-
-size_t ZSTD_estimateDStreamSize(size_t windowSize)
-{
- size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
- size_t const inBuffSize = blockSize; /* no block can be larger */
- size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN);
- return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize;
-}
-
-size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
-{
+ return minRBSize;
+}
+
+size_t ZSTD_estimateDStreamSize(size_t windowSize)
+{
+ size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+ size_t const inBuffSize = blockSize; /* no block can be larger */
+ size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN);
+ return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize;
+}
+
+size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
+{
U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable, but requires an additional parameter (or a dctx) */
- ZSTD_frameHeader zfh;
- size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
- if (ZSTD_isError(err)) return err;
+ ZSTD_frameHeader zfh;
+ size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
+ if (ZSTD_isError(err)) return err;
RETURN_ERROR_IF(err>0, srcSize_wrong, "");
RETURN_ERROR_IF(zfh.windowSize > windowSizeMax,
frameParameter_windowTooLarge, "");
- return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);
-}
-
-
+ return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);
+}
+
+
/* ***** Decompression ***** */
static int ZSTD_DCtx_isOverflow(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize)
@@ -1937,7 +1937,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
char* op = ostart;
U32 someMoreWork = 1;
- DEBUGLOG(5, "ZSTD_decompressStream");
+ DEBUGLOG(5, "ZSTD_decompressStream");
RETURN_ERROR_IF(
input->pos > input->size,
srcSize_wrong,
@@ -1948,14 +1948,14 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
dstSize_tooSmall,
"forbidden. out: pos: %u vs size: %u",
(U32)output->pos, (U32)output->size);
- DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
+ DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
FORWARD_IF_ERROR(ZSTD_checkOutBuffer(zds, output), "");
while (someMoreWork) {
- switch(zds->streamStage)
+ switch(zds->streamStage)
{
case zdss_init :
- DEBUGLOG(5, "stage zdss_init => transparent reset ");
+ DEBUGLOG(5, "stage zdss_init => transparent reset ");
zds->streamStage = zdss_loadHeader;
zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
@@ -1966,75 +1966,75 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
ZSTD_FALLTHROUGH;
case zdss_loadHeader :
- DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
+ DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
- if (zds->legacyVersion) {
+ if (zds->legacyVersion) {
RETURN_ERROR_IF(zds->staticSize, memory_allocation,
"legacy support is incompatible with static dctx");
- { size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input);
- if (hint==0) zds->streamStage = zdss_init;
- return hint;
- } }
-#endif
+ { size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input);
+ if (hint==0) zds->streamStage = zdss_init;
+ return hint;
+ } }
+#endif
{ size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format);
if (zds->refMultipleDDicts && zds->ddictSet) {
ZSTD_DCtx_selectFrameDDict(zds);
}
- DEBUGLOG(5, "header size : %u", (U32)hSize);
- if (ZSTD_isError(hSize)) {
-#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
- U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart);
+ DEBUGLOG(5, "header size : %u", (U32)hSize);
+ if (ZSTD_isError(hSize)) {
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
+ U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart);
if (legacyVersion) {
ZSTD_DDict const* const ddict = ZSTD_getDDict(zds);
const void* const dict = ddict ? ZSTD_DDict_dictContent(ddict) : NULL;
size_t const dictSize = ddict ? ZSTD_DDict_dictSize(ddict) : 0;
- DEBUGLOG(5, "ZSTD_decompressStream: detected legacy version v0.%u", legacyVersion);
+ DEBUGLOG(5, "ZSTD_decompressStream: detected legacy version v0.%u", legacyVersion);
RETURN_ERROR_IF(zds->staticSize, memory_allocation,
"legacy support is incompatible with static dctx");
FORWARD_IF_ERROR(ZSTD_initLegacyStream(&zds->legacyContext,
- zds->previousLegacyVersion, legacyVersion,
+ zds->previousLegacyVersion, legacyVersion,
dict, dictSize), "");
zds->legacyVersion = zds->previousLegacyVersion = legacyVersion;
- { size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, legacyVersion, output, input);
- if (hint==0) zds->streamStage = zdss_init; /* or stay in stage zdss_loadHeader */
- return hint;
- } }
+ { size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, legacyVersion, output, input);
+ if (hint==0) zds->streamStage = zdss_init; /* or stay in stage zdss_loadHeader */
+ return hint;
+ } }
#endif
- return hSize; /* error */
- }
+ return hSize; /* error */
+ }
if (hSize != 0) { /* need more input */
size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */
- size_t const remainingInput = (size_t)(iend-ip);
- assert(iend >= ip);
- if (toLoad > remainingInput) { /* not enough input to load full header */
- if (remainingInput > 0) {
+ size_t const remainingInput = (size_t)(iend-ip);
+ assert(iend >= ip);
+ if (toLoad > remainingInput) { /* not enough input to load full header */
+ if (remainingInput > 0) {
ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput);
- zds->lhSize += remainingInput;
- }
+ zds->lhSize += remainingInput;
+ }
input->pos = input->size;
return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
}
- assert(ip != NULL);
+ assert(ip != NULL);
ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;
break;
} }
- /* check for single-pass mode opportunity */
+ /* check for single-pass mode opportunity */
if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
&& zds->fParams.frameType != ZSTD_skippableFrame
- && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {
+ && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {
size_t const cSize = ZSTD_findFrameCompressedSize(istart, (size_t)(iend-istart));
- if (cSize <= (size_t)(iend-istart)) {
- /* shortcut : using single-pass mode */
+ if (cSize <= (size_t)(iend-istart)) {
+ /* shortcut : using single-pass mode */
size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, (size_t)(oend-op), istart, cSize, ZSTD_getDDict(zds));
- if (ZSTD_isError(decompressedSize)) return decompressedSize;
- DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
- ip = istart + cSize;
- op += decompressedSize;
- zds->expected = 0;
- zds->streamStage = zdss_init;
- someMoreWork = 0;
- break;
+ if (ZSTD_isError(decompressedSize)) return decompressedSize;
+ DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
+ ip = istart + cSize;
+ op += decompressedSize;
+ zds->expected = 0;
+ zds->streamStage = zdss_init;
+ someMoreWork = 0;
+ break;
} }
/* Check output buffer is large enough for ZSTD_odm_stable. */
@@ -2045,29 +2045,29 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
RETURN_ERROR(dstSize_tooSmall, "ZSTD_obm_stable passed but ZSTD_outBuffer is too small");
}
- /* Consume header (see ZSTDds_decodeFrameHeader) */
- DEBUGLOG(4, "Consume header");
+ /* Consume header (see ZSTDds_decodeFrameHeader) */
+ DEBUGLOG(4, "Consume header");
FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)), "");
-
+
if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
- zds->stage = ZSTDds_skipFrame;
- } else {
+ zds->stage = ZSTDds_skipFrame;
+ } else {
FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize), "");
- zds->expected = ZSTD_blockHeaderSize;
- zds->stage = ZSTDds_decodeBlockHeader;
- }
-
- /* control buffer memory usage */
- DEBUGLOG(4, "Control max memory usage (%u KB <= max %u KB)",
- (U32)(zds->fParams.windowSize >>10),
- (U32)(zds->maxWindowSize >> 10) );
+ zds->expected = ZSTD_blockHeaderSize;
+ zds->stage = ZSTDds_decodeBlockHeader;
+ }
+
+ /* control buffer memory usage */
+ DEBUGLOG(4, "Control max memory usage (%u KB <= max %u KB)",
+ (U32)(zds->fParams.windowSize >>10),
+ (U32)(zds->maxWindowSize >> 10) );
zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize,
frameParameter_windowTooLarge, "");
/* Adapt buffer sizes to frame header instructions */
- { size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
+ { size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_bm_buffered
? ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize)
: 0;
@@ -2100,15 +2100,15 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
zds->outBuff = zds->inBuff + zds->inBuffSize;
zds->outBuffSize = neededOutBuffSize;
} } }
- zds->streamStage = zdss_read;
+ zds->streamStage = zdss_read;
ZSTD_FALLTHROUGH;
case zdss_read:
- DEBUGLOG(5, "stage zdss_read");
+ DEBUGLOG(5, "stage zdss_read");
{ size_t const neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip));
- DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize);
+ DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize);
if (neededInSize==0) { /* end of frame */
- zds->streamStage = zdss_init;
+ zds->streamStage = zdss_init;
someMoreWork = 0;
break;
}
@@ -2117,26 +2117,26 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
ip += neededInSize;
/* Function modifies the stage so we must break */
break;
- } }
- if (ip==iend) { someMoreWork = 0; break; } /* no more input */
- zds->streamStage = zdss_load;
+ } }
+ if (ip==iend) { someMoreWork = 0; break; } /* no more input */
+ zds->streamStage = zdss_load;
ZSTD_FALLTHROUGH;
case zdss_load:
- { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
- size_t const toLoad = neededInSize - zds->inPos;
- int const isSkipFrame = ZSTD_isSkipFrame(zds);
+ { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
+ size_t const toLoad = neededInSize - zds->inPos;
+ int const isSkipFrame = ZSTD_isSkipFrame(zds);
size_t loadedSize;
/* At this point we shouldn't be decompressing a block that we can stream. */
assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip));
- if (isSkipFrame) {
- loadedSize = MIN(toLoad, (size_t)(iend-ip));
- } else {
+ if (isSkipFrame) {
+ loadedSize = MIN(toLoad, (size_t)(iend-ip));
+ } else {
RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos,
corruption_detected,
"should never happen");
loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (size_t)(iend-ip));
- }
+ }
ip += loadedSize;
zds->inPos += loadedSize;
if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */
@@ -2153,20 +2153,20 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
op += flushedSize;
zds->outStart += flushedSize;
if (flushedSize == toFlushSize) { /* flush completed */
- zds->streamStage = zdss_read;
- if ( (zds->outBuffSize < zds->fParams.frameContentSize)
- && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) {
- DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)",
- (int)(zds->outBuffSize - zds->outStart),
- (U32)zds->fParams.blockSizeMax);
+ zds->streamStage = zdss_read;
+ if ( (zds->outBuffSize < zds->fParams.frameContentSize)
+ && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) {
+ DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)",
+ (int)(zds->outBuffSize - zds->outStart),
+ (U32)zds->fParams.blockSizeMax);
zds->outStart = zds->outEnd = 0;
- }
+ }
break;
- } }
- /* cannot complete flush */
- someMoreWork = 0;
- break;
-
+ } }
+ /* cannot complete flush */
+ someMoreWork = 0;
+ break;
+
default:
assert(0); /* impossible */
RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */
@@ -2189,42 +2189,42 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
} else {
zds->noForwardProgress = 0;
}
- { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds);
+ { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds);
if (!nextSrcSizeHint) { /* frame fully decoded */
if (zds->outEnd == zds->outStart) { /* output fully flushed */
if (zds->hostageByte) {
- if (input->pos >= input->size) {
- /* can't release hostage (not present) */
- zds->streamStage = zdss_read;
- return 1;
- }
+ if (input->pos >= input->size) {
+ /* can't release hostage (not present) */
+ zds->streamStage = zdss_read;
+ return 1;
+ }
input->pos++; /* release hostage */
- } /* zds->hostageByte */
+ } /* zds->hostageByte */
return 0;
- } /* zds->outEnd == zds->outStart */
+ } /* zds->outEnd == zds->outStart */
if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */
zds->hostageByte=1;
}
return 1;
- } /* nextSrcSizeHint==0 */
- nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block); /* preload header of next block */
- assert(zds->inPos <= nextSrcSizeHint);
- nextSrcSizeHint -= zds->inPos; /* part already loaded*/
+ } /* nextSrcSizeHint==0 */
+ nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block); /* preload header of next block */
+ assert(zds->inPos <= nextSrcSizeHint);
+ nextSrcSizeHint -= zds->inPos; /* part already loaded*/
return nextSrcSizeHint;
}
}
-
+
size_t ZSTD_decompressStream_simpleArgs (
- ZSTD_DCtx* dctx,
- void* dst, size_t dstCapacity, size_t* dstPos,
- const void* src, size_t srcSize, size_t* srcPos)
-{
- ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
- ZSTD_inBuffer input = { src, srcSize, *srcPos };
- /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
+ ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos)
+{
+ ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
+ ZSTD_inBuffer input = { src, srcSize, *srcPos };
+ /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
size_t const cErr = ZSTD_decompressStream(dctx, &output, &input);
- *dstPos = output.pos;
- *srcPos = input.pos;
- return cErr;
-}
+ *dstPos = output.pos;
+ *srcPos = input.pos;
+ return cErr;
+}
diff --git a/contrib/libs/zstd/lib/dictBuilder/cover.c b/contrib/libs/zstd/lib/dictBuilder/cover.c
index 028802a1b0..336cacf122 100644
--- a/contrib/libs/zstd/lib/dictBuilder/cover.c
+++ b/contrib/libs/zstd/lib/dictBuilder/cover.c
@@ -1,31 +1,31 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-/* *****************************************************************************
- * Constructs a dictionary using a heuristic based on the following paper:
- *
- * Liao, Petri, Moffat, Wirth
- * Effective Construction of Relative Lempel-Ziv Dictionaries
- * Published in WWW 2016.
- *
- * Adapted from code originally written by @ot (Giuseppe Ottaviano).
- ******************************************************************************/
-
-/*-*************************************
-* Dependencies
-***************************************/
-#include <stdio.h> /* fprintf */
-#include <stdlib.h> /* malloc, free, qsort */
-#include <string.h> /* memset */
-#include <time.h> /* clock */
-
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* *****************************************************************************
+ * Constructs a dictionary using a heuristic based on the following paper:
+ *
+ * Liao, Petri, Moffat, Wirth
+ * Effective Construction of Relative Lempel-Ziv Dictionaries
+ * Published in WWW 2016.
+ *
+ * Adapted from code originally written by @ot (Giuseppe Ottaviano).
+ ******************************************************************************/
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include <stdio.h> /* fprintf */
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memset */
+#include <time.h> /* clock */
+
#ifndef ZDICT_STATIC_LINKING_ONLY
# define ZDICT_STATIC_LINKING_ONLY
#endif
@@ -36,10 +36,10 @@
#include "../common/zstd_internal.h" /* includes zstd.h */
#include "../zdict.h"
#include "cover.h"
-
-/*-*************************************
-* Constants
-***************************************/
+
+/*-*************************************
+* Constants
+***************************************/
/**
* There are 32bit indexes used to ref samples, so limit samples size to 4GB
* on 64bit builds.
@@ -49,520 +49,520 @@
*/
#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
#define COVER_DEFAULT_SPLITPOINT 1.0
-
-/*-*************************************
-* Console display
-***************************************/
+
+/*-*************************************
+* Console display
+***************************************/
#ifndef LOCALDISPLAYLEVEL
static int g_displayLevel = 0;
#endif
#undef DISPLAY
-#define DISPLAY(...) \
- { \
- fprintf(stderr, __VA_ARGS__); \
- fflush(stderr); \
- }
+#define DISPLAY(...) \
+ { \
+ fprintf(stderr, __VA_ARGS__); \
+ fflush(stderr); \
+ }
#undef LOCALDISPLAYLEVEL
-#define LOCALDISPLAYLEVEL(displayLevel, l, ...) \
- if (displayLevel >= l) { \
- DISPLAY(__VA_ARGS__); \
- } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */
+#define LOCALDISPLAYLEVEL(displayLevel, l, ...) \
+ if (displayLevel >= l) { \
+ DISPLAY(__VA_ARGS__); \
+ } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */
#undef DISPLAYLEVEL
-#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__)
-
+#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__)
+
#ifndef LOCALDISPLAYUPDATE
static const clock_t g_refreshRate = CLOCKS_PER_SEC * 15 / 100;
static clock_t g_time = 0;
#endif
#undef LOCALDISPLAYUPDATE
-#define LOCALDISPLAYUPDATE(displayLevel, l, ...) \
- if (displayLevel >= l) { \
+#define LOCALDISPLAYUPDATE(displayLevel, l, ...) \
+ if (displayLevel >= l) { \
if ((clock() - g_time > g_refreshRate) || (displayLevel >= 4)) { \
- g_time = clock(); \
- DISPLAY(__VA_ARGS__); \
- } \
- }
+ g_time = clock(); \
+ DISPLAY(__VA_ARGS__); \
+ } \
+ }
#undef DISPLAYUPDATE
-#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__)
-
-/*-*************************************
-* Hash table
-***************************************
-* A small specialized hash map for storing activeDmers.
-* The map does not resize, so if it becomes full it will loop forever.
-* Thus, the map must be large enough to store every value.
-* The map implements linear probing and keeps its load less than 0.5.
-*/
-
-#define MAP_EMPTY_VALUE ((U32)-1)
-typedef struct COVER_map_pair_t_s {
- U32 key;
- U32 value;
-} COVER_map_pair_t;
-
-typedef struct COVER_map_s {
- COVER_map_pair_t *data;
- U32 sizeLog;
- U32 size;
- U32 sizeMask;
-} COVER_map_t;
-
-/**
- * Clear the map.
- */
-static void COVER_map_clear(COVER_map_t *map) {
- memset(map->data, MAP_EMPTY_VALUE, map->size * sizeof(COVER_map_pair_t));
-}
-
-/**
- * Initializes a map of the given size.
- * Returns 1 on success and 0 on failure.
- * The map must be destroyed with COVER_map_destroy().
- * The map is only guaranteed to be large enough to hold size elements.
- */
-static int COVER_map_init(COVER_map_t *map, U32 size) {
- map->sizeLog = ZSTD_highbit32(size) + 2;
- map->size = (U32)1 << map->sizeLog;
- map->sizeMask = map->size - 1;
- map->data = (COVER_map_pair_t *)malloc(map->size * sizeof(COVER_map_pair_t));
- if (!map->data) {
- map->sizeLog = 0;
- map->size = 0;
- return 0;
- }
- COVER_map_clear(map);
- return 1;
-}
-
-/**
- * Internal hash function
- */
+#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__)
+
+/*-*************************************
+* Hash table
+***************************************
+* A small specialized hash map for storing activeDmers.
+* The map does not resize, so if it becomes full it will loop forever.
+* Thus, the map must be large enough to store every value.
+* The map implements linear probing and keeps its load less than 0.5.
+*/
+
+#define MAP_EMPTY_VALUE ((U32)-1)
+typedef struct COVER_map_pair_t_s {
+ U32 key;
+ U32 value;
+} COVER_map_pair_t;
+
+typedef struct COVER_map_s {
+ COVER_map_pair_t *data;
+ U32 sizeLog;
+ U32 size;
+ U32 sizeMask;
+} COVER_map_t;
+
+/**
+ * Clear the map.
+ */
+static void COVER_map_clear(COVER_map_t *map) {
+ memset(map->data, MAP_EMPTY_VALUE, map->size * sizeof(COVER_map_pair_t));
+}
+
+/**
+ * Initializes a map of the given size.
+ * Returns 1 on success and 0 on failure.
+ * The map must be destroyed with COVER_map_destroy().
+ * The map is only guaranteed to be large enough to hold size elements.
+ */
+static int COVER_map_init(COVER_map_t *map, U32 size) {
+ map->sizeLog = ZSTD_highbit32(size) + 2;
+ map->size = (U32)1 << map->sizeLog;
+ map->sizeMask = map->size - 1;
+ map->data = (COVER_map_pair_t *)malloc(map->size * sizeof(COVER_map_pair_t));
+ if (!map->data) {
+ map->sizeLog = 0;
+ map->size = 0;
+ return 0;
+ }
+ COVER_map_clear(map);
+ return 1;
+}
+
+/**
+ * Internal hash function
+ */
static const U32 COVER_prime4bytes = 2654435761U;
-static U32 COVER_map_hash(COVER_map_t *map, U32 key) {
+static U32 COVER_map_hash(COVER_map_t *map, U32 key) {
return (key * COVER_prime4bytes) >> (32 - map->sizeLog);
-}
-
-/**
- * Helper function that returns the index that a key should be placed into.
- */
-static U32 COVER_map_index(COVER_map_t *map, U32 key) {
- const U32 hash = COVER_map_hash(map, key);
- U32 i;
- for (i = hash;; i = (i + 1) & map->sizeMask) {
- COVER_map_pair_t *pos = &map->data[i];
- if (pos->value == MAP_EMPTY_VALUE) {
- return i;
- }
- if (pos->key == key) {
- return i;
- }
- }
-}
-
-/**
- * Returns the pointer to the value for key.
- * If key is not in the map, it is inserted and the value is set to 0.
- * The map must not be full.
- */
-static U32 *COVER_map_at(COVER_map_t *map, U32 key) {
- COVER_map_pair_t *pos = &map->data[COVER_map_index(map, key)];
- if (pos->value == MAP_EMPTY_VALUE) {
- pos->key = key;
- pos->value = 0;
- }
- return &pos->value;
-}
-
-/**
- * Deletes key from the map if present.
- */
-static void COVER_map_remove(COVER_map_t *map, U32 key) {
- U32 i = COVER_map_index(map, key);
- COVER_map_pair_t *del = &map->data[i];
- U32 shift = 1;
- if (del->value == MAP_EMPTY_VALUE) {
- return;
- }
- for (i = (i + 1) & map->sizeMask;; i = (i + 1) & map->sizeMask) {
- COVER_map_pair_t *const pos = &map->data[i];
- /* If the position is empty we are done */
- if (pos->value == MAP_EMPTY_VALUE) {
- del->value = MAP_EMPTY_VALUE;
- return;
- }
- /* If pos can be moved to del do so */
- if (((i - COVER_map_hash(map, pos->key)) & map->sizeMask) >= shift) {
- del->key = pos->key;
- del->value = pos->value;
- del = pos;
- shift = 1;
- } else {
- ++shift;
- }
- }
-}
-
-/**
+}
+
+/**
+ * Helper function that returns the index that a key should be placed into.
+ */
+static U32 COVER_map_index(COVER_map_t *map, U32 key) {
+ const U32 hash = COVER_map_hash(map, key);
+ U32 i;
+ for (i = hash;; i = (i + 1) & map->sizeMask) {
+ COVER_map_pair_t *pos = &map->data[i];
+ if (pos->value == MAP_EMPTY_VALUE) {
+ return i;
+ }
+ if (pos->key == key) {
+ return i;
+ }
+ }
+}
+
+/**
+ * Returns the pointer to the value for key.
+ * If key is not in the map, it is inserted and the value is set to 0.
+ * The map must not be full.
+ */
+static U32 *COVER_map_at(COVER_map_t *map, U32 key) {
+ COVER_map_pair_t *pos = &map->data[COVER_map_index(map, key)];
+ if (pos->value == MAP_EMPTY_VALUE) {
+ pos->key = key;
+ pos->value = 0;
+ }
+ return &pos->value;
+}
+
+/**
+ * Deletes key from the map if present.
+ */
+static void COVER_map_remove(COVER_map_t *map, U32 key) {
+ U32 i = COVER_map_index(map, key);
+ COVER_map_pair_t *del = &map->data[i];
+ U32 shift = 1;
+ if (del->value == MAP_EMPTY_VALUE) {
+ return;
+ }
+ for (i = (i + 1) & map->sizeMask;; i = (i + 1) & map->sizeMask) {
+ COVER_map_pair_t *const pos = &map->data[i];
+ /* If the position is empty we are done */
+ if (pos->value == MAP_EMPTY_VALUE) {
+ del->value = MAP_EMPTY_VALUE;
+ return;
+ }
+ /* If pos can be moved to del do so */
+ if (((i - COVER_map_hash(map, pos->key)) & map->sizeMask) >= shift) {
+ del->key = pos->key;
+ del->value = pos->value;
+ del = pos;
+ shift = 1;
+ } else {
+ ++shift;
+ }
+ }
+}
+
+/**
* Destroys a map that is inited with COVER_map_init().
- */
-static void COVER_map_destroy(COVER_map_t *map) {
- if (map->data) {
- free(map->data);
- }
- map->data = NULL;
- map->size = 0;
-}
-
-/*-*************************************
-* Context
-***************************************/
-
-typedef struct {
- const BYTE *samples;
- size_t *offsets;
- const size_t *samplesSizes;
- size_t nbSamples;
+ */
+static void COVER_map_destroy(COVER_map_t *map) {
+ if (map->data) {
+ free(map->data);
+ }
+ map->data = NULL;
+ map->size = 0;
+}
+
+/*-*************************************
+* Context
+***************************************/
+
+typedef struct {
+ const BYTE *samples;
+ size_t *offsets;
+ const size_t *samplesSizes;
+ size_t nbSamples;
size_t nbTrainSamples;
size_t nbTestSamples;
- U32 *suffix;
- size_t suffixSize;
- U32 *freqs;
- U32 *dmerAt;
- unsigned d;
-} COVER_ctx_t;
-
-/* We need a global context for qsort... */
+ U32 *suffix;
+ size_t suffixSize;
+ U32 *freqs;
+ U32 *dmerAt;
+ unsigned d;
+} COVER_ctx_t;
+
+/* We need a global context for qsort... */
static COVER_ctx_t *g_coverCtx = NULL;
-
-/*-*************************************
-* Helper functions
-***************************************/
-
-/**
- * Returns the sum of the sample sizes.
- */
+
+/*-*************************************
+* Helper functions
+***************************************/
+
+/**
+ * Returns the sum of the sample sizes.
+ */
size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) {
- size_t sum = 0;
+ size_t sum = 0;
unsigned i;
- for (i = 0; i < nbSamples; ++i) {
- sum += samplesSizes[i];
- }
- return sum;
-}
-
-/**
- * Returns -1 if the dmer at lp is less than the dmer at rp.
- * Return 0 if the dmers at lp and rp are equal.
- * Returns 1 if the dmer at lp is greater than the dmer at rp.
- */
-static int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) {
- U32 const lhs = *(U32 const *)lp;
- U32 const rhs = *(U32 const *)rp;
- return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d);
-}
-/**
- * Faster version for d <= 8.
- */
-static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) {
- U64 const mask = (ctx->d == 8) ? (U64)-1 : (((U64)1 << (8 * ctx->d)) - 1);
- U64 const lhs = MEM_readLE64(ctx->samples + *(U32 const *)lp) & mask;
- U64 const rhs = MEM_readLE64(ctx->samples + *(U32 const *)rp) & mask;
- if (lhs < rhs) {
- return -1;
- }
- return (lhs > rhs);
-}
-
-/**
- * Same as COVER_cmp() except ties are broken by pointer value
+ for (i = 0; i < nbSamples; ++i) {
+ sum += samplesSizes[i];
+ }
+ return sum;
+}
+
+/**
+ * Returns -1 if the dmer at lp is less than the dmer at rp.
+ * Return 0 if the dmers at lp and rp are equal.
+ * Returns 1 if the dmer at lp is greater than the dmer at rp.
+ */
+static int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) {
+ U32 const lhs = *(U32 const *)lp;
+ U32 const rhs = *(U32 const *)rp;
+ return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d);
+}
+/**
+ * Faster version for d <= 8.
+ */
+static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) {
+ U64 const mask = (ctx->d == 8) ? (U64)-1 : (((U64)1 << (8 * ctx->d)) - 1);
+ U64 const lhs = MEM_readLE64(ctx->samples + *(U32 const *)lp) & mask;
+ U64 const rhs = MEM_readLE64(ctx->samples + *(U32 const *)rp) & mask;
+ if (lhs < rhs) {
+ return -1;
+ }
+ return (lhs > rhs);
+}
+
+/**
+ * Same as COVER_cmp() except ties are broken by pointer value
* NOTE: g_coverCtx must be set to call this function. A global is required because
- * qsort doesn't take an opaque pointer.
- */
+ * qsort doesn't take an opaque pointer.
+ */
static int WIN_CDECL COVER_strict_cmp(const void *lp, const void *rp) {
int result = COVER_cmp(g_coverCtx, lp, rp);
- if (result == 0) {
- result = lp < rp ? -1 : 1;
- }
- return result;
-}
-/**
- * Faster version for d <= 8.
- */
+ if (result == 0) {
+ result = lp < rp ? -1 : 1;
+ }
+ return result;
+}
+/**
+ * Faster version for d <= 8.
+ */
static int WIN_CDECL COVER_strict_cmp8(const void *lp, const void *rp) {
int result = COVER_cmp8(g_coverCtx, lp, rp);
- if (result == 0) {
- result = lp < rp ? -1 : 1;
- }
- return result;
-}
-
-/**
- * Returns the first pointer in [first, last) whose element does not compare
- * less than value. If no such element exists it returns last.
- */
-static const size_t *COVER_lower_bound(const size_t *first, const size_t *last,
- size_t value) {
- size_t count = last - first;
- while (count != 0) {
- size_t step = count / 2;
- const size_t *ptr = first;
- ptr += step;
- if (*ptr < value) {
- first = ++ptr;
- count -= step + 1;
- } else {
- count = step;
- }
- }
- return first;
-}
-
-/**
- * Generic groupBy function.
- * Groups an array sorted by cmp into groups with equivalent values.
- * Calls grp for each group.
- */
-static void
-COVER_groupBy(const void *data, size_t count, size_t size, COVER_ctx_t *ctx,
- int (*cmp)(COVER_ctx_t *, const void *, const void *),
- void (*grp)(COVER_ctx_t *, const void *, const void *)) {
- const BYTE *ptr = (const BYTE *)data;
- size_t num = 0;
- while (num < count) {
- const BYTE *grpEnd = ptr + size;
- ++num;
- while (num < count && cmp(ctx, ptr, grpEnd) == 0) {
- grpEnd += size;
- ++num;
- }
- grp(ctx, ptr, grpEnd);
- ptr = grpEnd;
- }
-}
-
-/*-*************************************
-* Cover functions
-***************************************/
-
-/**
- * Called on each group of positions with the same dmer.
- * Counts the frequency of each dmer and saves it in the suffix array.
- * Fills `ctx->dmerAt`.
- */
-static void COVER_group(COVER_ctx_t *ctx, const void *group,
- const void *groupEnd) {
- /* The group consists of all the positions with the same first d bytes. */
- const U32 *grpPtr = (const U32 *)group;
- const U32 *grpEnd = (const U32 *)groupEnd;
- /* The dmerId is how we will reference this dmer.
- * This allows us to map the whole dmer space to a much smaller space, the
- * size of the suffix array.
- */
- const U32 dmerId = (U32)(grpPtr - ctx->suffix);
- /* Count the number of samples this dmer shows up in */
- U32 freq = 0;
- /* Details */
- const size_t *curOffsetPtr = ctx->offsets;
- const size_t *offsetsEnd = ctx->offsets + ctx->nbSamples;
- /* Once *grpPtr >= curSampleEnd this occurrence of the dmer is in a
- * different sample than the last.
- */
- size_t curSampleEnd = ctx->offsets[0];
- for (; grpPtr != grpEnd; ++grpPtr) {
- /* Save the dmerId for this position so we can get back to it. */
- ctx->dmerAt[*grpPtr] = dmerId;
- /* Dictionaries only help for the first reference to the dmer.
- * After that zstd can reference the match from the previous reference.
- * So only count each dmer once for each sample it is in.
- */
- if (*grpPtr < curSampleEnd) {
- continue;
- }
- freq += 1;
- /* Binary search to find the end of the sample *grpPtr is in.
- * In the common case that grpPtr + 1 == grpEnd we can skip the binary
- * search because the loop is over.
- */
- if (grpPtr + 1 != grpEnd) {
- const size_t *sampleEndPtr =
- COVER_lower_bound(curOffsetPtr, offsetsEnd, *grpPtr);
- curSampleEnd = *sampleEndPtr;
- curOffsetPtr = sampleEndPtr + 1;
- }
- }
- /* At this point we are never going to look at this segment of the suffix
- * array again. We take advantage of this fact to save memory.
- * We store the frequency of the dmer in the first position of the group,
- * which is dmerId.
- */
- ctx->suffix[dmerId] = freq;
-}
-
-
-/**
- * Selects the best segment in an epoch.
- * Segments of are scored according to the function:
- *
- * Let F(d) be the frequency of dmer d.
- * Let S_i be the dmer at position i of segment S which has length k.
- *
- * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
- *
+ if (result == 0) {
+ result = lp < rp ? -1 : 1;
+ }
+ return result;
+}
+
+/**
+ * Returns the first pointer in [first, last) whose element does not compare
+ * less than value. If no such element exists it returns last.
+ */
+static const size_t *COVER_lower_bound(const size_t *first, const size_t *last,
+ size_t value) {
+ size_t count = last - first;
+ while (count != 0) {
+ size_t step = count / 2;
+ const size_t *ptr = first;
+ ptr += step;
+ if (*ptr < value) {
+ first = ++ptr;
+ count -= step + 1;
+ } else {
+ count = step;
+ }
+ }
+ return first;
+}
+
+/**
+ * Generic groupBy function.
+ * Groups an array sorted by cmp into groups with equivalent values.
+ * Calls grp for each group.
+ */
+static void
+COVER_groupBy(const void *data, size_t count, size_t size, COVER_ctx_t *ctx,
+ int (*cmp)(COVER_ctx_t *, const void *, const void *),
+ void (*grp)(COVER_ctx_t *, const void *, const void *)) {
+ const BYTE *ptr = (const BYTE *)data;
+ size_t num = 0;
+ while (num < count) {
+ const BYTE *grpEnd = ptr + size;
+ ++num;
+ while (num < count && cmp(ctx, ptr, grpEnd) == 0) {
+ grpEnd += size;
+ ++num;
+ }
+ grp(ctx, ptr, grpEnd);
+ ptr = grpEnd;
+ }
+}
+
+/*-*************************************
+* Cover functions
+***************************************/
+
+/**
+ * Called on each group of positions with the same dmer.
+ * Counts the frequency of each dmer and saves it in the suffix array.
+ * Fills `ctx->dmerAt`.
+ */
+static void COVER_group(COVER_ctx_t *ctx, const void *group,
+ const void *groupEnd) {
+ /* The group consists of all the positions with the same first d bytes. */
+ const U32 *grpPtr = (const U32 *)group;
+ const U32 *grpEnd = (const U32 *)groupEnd;
+ /* The dmerId is how we will reference this dmer.
+ * This allows us to map the whole dmer space to a much smaller space, the
+ * size of the suffix array.
+ */
+ const U32 dmerId = (U32)(grpPtr - ctx->suffix);
+ /* Count the number of samples this dmer shows up in */
+ U32 freq = 0;
+ /* Details */
+ const size_t *curOffsetPtr = ctx->offsets;
+ const size_t *offsetsEnd = ctx->offsets + ctx->nbSamples;
+ /* Once *grpPtr >= curSampleEnd this occurrence of the dmer is in a
+ * different sample than the last.
+ */
+ size_t curSampleEnd = ctx->offsets[0];
+ for (; grpPtr != grpEnd; ++grpPtr) {
+ /* Save the dmerId for this position so we can get back to it. */
+ ctx->dmerAt[*grpPtr] = dmerId;
+ /* Dictionaries only help for the first reference to the dmer.
+ * After that zstd can reference the match from the previous reference.
+ * So only count each dmer once for each sample it is in.
+ */
+ if (*grpPtr < curSampleEnd) {
+ continue;
+ }
+ freq += 1;
+ /* Binary search to find the end of the sample *grpPtr is in.
+ * In the common case that grpPtr + 1 == grpEnd we can skip the binary
+ * search because the loop is over.
+ */
+ if (grpPtr + 1 != grpEnd) {
+ const size_t *sampleEndPtr =
+ COVER_lower_bound(curOffsetPtr, offsetsEnd, *grpPtr);
+ curSampleEnd = *sampleEndPtr;
+ curOffsetPtr = sampleEndPtr + 1;
+ }
+ }
+ /* At this point we are never going to look at this segment of the suffix
+ * array again. We take advantage of this fact to save memory.
+ * We store the frequency of the dmer in the first position of the group,
+ * which is dmerId.
+ */
+ ctx->suffix[dmerId] = freq;
+}
+
+
+/**
+ * Selects the best segment in an epoch.
+ * Segments of are scored according to the function:
+ *
+ * Let F(d) be the frequency of dmer d.
+ * Let S_i be the dmer at position i of segment S which has length k.
+ *
+ * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
+ *
* Once the dmer d is in the dictionary we set F(d) = 0.
- */
-static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs,
- COVER_map_t *activeDmers, U32 begin,
- U32 end,
- ZDICT_cover_params_t parameters) {
- /* Constants */
- const U32 k = parameters.k;
- const U32 d = parameters.d;
- const U32 dmersInK = k - d + 1;
- /* Try each segment (activeSegment) and save the best (bestSegment) */
- COVER_segment_t bestSegment = {0, 0, 0};
- COVER_segment_t activeSegment;
- /* Reset the activeDmers in the segment */
- COVER_map_clear(activeDmers);
- /* The activeSegment starts at the beginning of the epoch. */
- activeSegment.begin = begin;
- activeSegment.end = begin;
- activeSegment.score = 0;
- /* Slide the activeSegment through the whole epoch.
- * Save the best segment in bestSegment.
- */
- while (activeSegment.end < end) {
- /* The dmerId for the dmer at the next position */
- U32 newDmer = ctx->dmerAt[activeSegment.end];
- /* The entry in activeDmers for this dmerId */
- U32 *newDmerOcc = COVER_map_at(activeDmers, newDmer);
- /* If the dmer isn't already present in the segment add its score. */
- if (*newDmerOcc == 0) {
- /* The paper suggest using the L-0.5 norm, but experiments show that it
- * doesn't help.
- */
- activeSegment.score += freqs[newDmer];
- }
- /* Add the dmer to the segment */
- activeSegment.end += 1;
- *newDmerOcc += 1;
-
- /* If the window is now too large, drop the first position */
- if (activeSegment.end - activeSegment.begin == dmersInK + 1) {
- U32 delDmer = ctx->dmerAt[activeSegment.begin];
- U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer);
- activeSegment.begin += 1;
- *delDmerOcc -= 1;
+ */
+static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs,
+ COVER_map_t *activeDmers, U32 begin,
+ U32 end,
+ ZDICT_cover_params_t parameters) {
+ /* Constants */
+ const U32 k = parameters.k;
+ const U32 d = parameters.d;
+ const U32 dmersInK = k - d + 1;
+ /* Try each segment (activeSegment) and save the best (bestSegment) */
+ COVER_segment_t bestSegment = {0, 0, 0};
+ COVER_segment_t activeSegment;
+ /* Reset the activeDmers in the segment */
+ COVER_map_clear(activeDmers);
+ /* The activeSegment starts at the beginning of the epoch. */
+ activeSegment.begin = begin;
+ activeSegment.end = begin;
+ activeSegment.score = 0;
+ /* Slide the activeSegment through the whole epoch.
+ * Save the best segment in bestSegment.
+ */
+ while (activeSegment.end < end) {
+ /* The dmerId for the dmer at the next position */
+ U32 newDmer = ctx->dmerAt[activeSegment.end];
+ /* The entry in activeDmers for this dmerId */
+ U32 *newDmerOcc = COVER_map_at(activeDmers, newDmer);
+ /* If the dmer isn't already present in the segment add its score. */
+ if (*newDmerOcc == 0) {
+ /* The paper suggest using the L-0.5 norm, but experiments show that it
+ * doesn't help.
+ */
+ activeSegment.score += freqs[newDmer];
+ }
+ /* Add the dmer to the segment */
+ activeSegment.end += 1;
+ *newDmerOcc += 1;
+
+ /* If the window is now too large, drop the first position */
+ if (activeSegment.end - activeSegment.begin == dmersInK + 1) {
+ U32 delDmer = ctx->dmerAt[activeSegment.begin];
+ U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer);
+ activeSegment.begin += 1;
+ *delDmerOcc -= 1;
/* If this is the last occurrence of the dmer, subtract its score */
- if (*delDmerOcc == 0) {
- COVER_map_remove(activeDmers, delDmer);
- activeSegment.score -= freqs[delDmer];
- }
- }
-
- /* If this segment is the best so far save it */
- if (activeSegment.score > bestSegment.score) {
- bestSegment = activeSegment;
- }
- }
- {
- /* Trim off the zero frequency head and tail from the segment. */
- U32 newBegin = bestSegment.end;
- U32 newEnd = bestSegment.begin;
- U32 pos;
- for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
- U32 freq = freqs[ctx->dmerAt[pos]];
- if (freq != 0) {
- newBegin = MIN(newBegin, pos);
- newEnd = pos + 1;
- }
- }
- bestSegment.begin = newBegin;
- bestSegment.end = newEnd;
- }
- {
- /* Zero out the frequency of each dmer covered by the chosen segment. */
- U32 pos;
- for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
- freqs[ctx->dmerAt[pos]] = 0;
- }
- }
- return bestSegment;
-}
-
-/**
- * Check the validity of the parameters.
- * Returns non-zero if the parameters are valid and 0 otherwise.
- */
-static int COVER_checkParameters(ZDICT_cover_params_t parameters,
- size_t maxDictSize) {
- /* k and d are required parameters */
- if (parameters.d == 0 || parameters.k == 0) {
- return 0;
- }
- /* k <= maxDictSize */
- if (parameters.k > maxDictSize) {
- return 0;
- }
- /* d <= k */
- if (parameters.d > parameters.k) {
- return 0;
- }
+ if (*delDmerOcc == 0) {
+ COVER_map_remove(activeDmers, delDmer);
+ activeSegment.score -= freqs[delDmer];
+ }
+ }
+
+ /* If this segment is the best so far save it */
+ if (activeSegment.score > bestSegment.score) {
+ bestSegment = activeSegment;
+ }
+ }
+ {
+ /* Trim off the zero frequency head and tail from the segment. */
+ U32 newBegin = bestSegment.end;
+ U32 newEnd = bestSegment.begin;
+ U32 pos;
+ for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
+ U32 freq = freqs[ctx->dmerAt[pos]];
+ if (freq != 0) {
+ newBegin = MIN(newBegin, pos);
+ newEnd = pos + 1;
+ }
+ }
+ bestSegment.begin = newBegin;
+ bestSegment.end = newEnd;
+ }
+ {
+ /* Zero out the frequency of each dmer covered by the chosen segment. */
+ U32 pos;
+ for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
+ freqs[ctx->dmerAt[pos]] = 0;
+ }
+ }
+ return bestSegment;
+}
+
+/**
+ * Check the validity of the parameters.
+ * Returns non-zero if the parameters are valid and 0 otherwise.
+ */
+static int COVER_checkParameters(ZDICT_cover_params_t parameters,
+ size_t maxDictSize) {
+ /* k and d are required parameters */
+ if (parameters.d == 0 || parameters.k == 0) {
+ return 0;
+ }
+ /* k <= maxDictSize */
+ if (parameters.k > maxDictSize) {
+ return 0;
+ }
+ /* d <= k */
+ if (parameters.d > parameters.k) {
+ return 0;
+ }
/* 0 < splitPoint <= 1 */
if (parameters.splitPoint <= 0 || parameters.splitPoint > 1){
return 0;
}
- return 1;
-}
-
-/**
- * Clean up a context initialized with `COVER_ctx_init()`.
- */
-static void COVER_ctx_destroy(COVER_ctx_t *ctx) {
- if (!ctx) {
- return;
- }
- if (ctx->suffix) {
- free(ctx->suffix);
- ctx->suffix = NULL;
- }
- if (ctx->freqs) {
- free(ctx->freqs);
- ctx->freqs = NULL;
- }
- if (ctx->dmerAt) {
- free(ctx->dmerAt);
- ctx->dmerAt = NULL;
- }
- if (ctx->offsets) {
- free(ctx->offsets);
- ctx->offsets = NULL;
- }
-}
-
-/**
- * Prepare a context for dictionary building.
- * The context is only dependent on the parameter `d` and can used multiple
- * times.
+ return 1;
+}
+
+/**
+ * Clean up a context initialized with `COVER_ctx_init()`.
+ */
+static void COVER_ctx_destroy(COVER_ctx_t *ctx) {
+ if (!ctx) {
+ return;
+ }
+ if (ctx->suffix) {
+ free(ctx->suffix);
+ ctx->suffix = NULL;
+ }
+ if (ctx->freqs) {
+ free(ctx->freqs);
+ ctx->freqs = NULL;
+ }
+ if (ctx->dmerAt) {
+ free(ctx->dmerAt);
+ ctx->dmerAt = NULL;
+ }
+ if (ctx->offsets) {
+ free(ctx->offsets);
+ ctx->offsets = NULL;
+ }
+}
+
+/**
+ * Prepare a context for dictionary building.
+ * The context is only dependent on the parameter `d` and can used multiple
+ * times.
* Returns 0 on success or error code on error.
- * The context must be destroyed with `COVER_ctx_destroy()`.
- */
+ * The context must be destroyed with `COVER_ctx_destroy()`.
+ */
static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
- const size_t *samplesSizes, unsigned nbSamples,
+ const size_t *samplesSizes, unsigned nbSamples,
unsigned d, double splitPoint) {
- const BYTE *const samples = (const BYTE *)samplesBuffer;
- const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
+ const BYTE *const samples = (const BYTE *)samplesBuffer;
+ const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
/* Split samples into testing and training sets */
const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples;
const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples;
const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize;
const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize;
- /* Checks */
- if (totalSamplesSize < MAX(d, sizeof(U64)) ||
- totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) {
- DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n",
+ /* Checks */
+ if (totalSamplesSize < MAX(d, sizeof(U64)) ||
+ totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) {
+ DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n",
(unsigned)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20));
return ERROR(srcSize_wrong);
- }
+ }
/* Check if there are at least 5 training samples */
if (nbTrainSamples < 5) {
DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid.", nbTrainSamples);
@@ -573,50 +573,50 @@ static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.", nbTestSamples);
return ERROR(srcSize_wrong);
}
- /* Zero the context */
- memset(ctx, 0, sizeof(*ctx));
+ /* Zero the context */
+ memset(ctx, 0, sizeof(*ctx));
DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples,
(unsigned)trainingSamplesSize);
DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples,
(unsigned)testSamplesSize);
- ctx->samples = samples;
- ctx->samplesSizes = samplesSizes;
- ctx->nbSamples = nbSamples;
+ ctx->samples = samples;
+ ctx->samplesSizes = samplesSizes;
+ ctx->nbSamples = nbSamples;
ctx->nbTrainSamples = nbTrainSamples;
ctx->nbTestSamples = nbTestSamples;
- /* Partial suffix array */
+ /* Partial suffix array */
ctx->suffixSize = trainingSamplesSize - MAX(d, sizeof(U64)) + 1;
- ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
- /* Maps index to the dmerID */
- ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
- /* The offsets of each file */
- ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t));
- if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) {
- DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n");
- COVER_ctx_destroy(ctx);
+ ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
+ /* Maps index to the dmerID */
+ ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
+ /* The offsets of each file */
+ ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t));
+ if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) {
+ DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n");
+ COVER_ctx_destroy(ctx);
return ERROR(memory_allocation);
- }
- ctx->freqs = NULL;
- ctx->d = d;
-
+ }
+ ctx->freqs = NULL;
+ ctx->d = d;
+
/* Fill offsets from the samplesSizes */
- {
- U32 i;
- ctx->offsets[0] = 0;
- for (i = 1; i <= nbSamples; ++i) {
- ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];
- }
- }
- DISPLAYLEVEL(2, "Constructing partial suffix array\n");
- {
- /* suffix is a partial suffix array.
- * It only sorts suffixes by their first parameters.d bytes.
- * The sort is stable, so each dmer group is sorted by position in input.
- */
- U32 i;
- for (i = 0; i < ctx->suffixSize; ++i) {
- ctx->suffix[i] = i;
- }
+ {
+ U32 i;
+ ctx->offsets[0] = 0;
+ for (i = 1; i <= nbSamples; ++i) {
+ ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];
+ }
+ }
+ DISPLAYLEVEL(2, "Constructing partial suffix array\n");
+ {
+ /* suffix is a partial suffix array.
+ * It only sorts suffixes by their first parameters.d bytes.
+ * The sort is stable, so each dmer group is sorted by position in input.
+ */
+ U32 i;
+ for (i = 0; i < ctx->suffixSize; ++i) {
+ ctx->suffix[i] = i;
+ }
/* qsort doesn't take an opaque pointer, so pass as a global.
* On OpenBSD qsort() is not guaranteed to be stable, their mergesort() is.
*/
@@ -625,25 +625,25 @@ static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
mergesort(ctx->suffix, ctx->suffixSize, sizeof(U32),
(ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
#else
- qsort(ctx->suffix, ctx->suffixSize, sizeof(U32),
- (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
+ qsort(ctx->suffix, ctx->suffixSize, sizeof(U32),
+ (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
#endif
- }
- DISPLAYLEVEL(2, "Computing frequencies\n");
- /* For each dmer group (group of positions with the same first d bytes):
- * 1. For each position we set dmerAt[position] = dmerID. The dmerID is
- * (groupBeginPtr - suffix). This allows us to go from position to
- * dmerID so we can look up values in freq.
- * 2. We calculate how many samples the dmer occurs in and save it in
- * freqs[dmerId].
- */
- COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx,
- (ctx->d <= 8 ? &COVER_cmp8 : &COVER_cmp), &COVER_group);
- ctx->freqs = ctx->suffix;
- ctx->suffix = NULL;
+ }
+ DISPLAYLEVEL(2, "Computing frequencies\n");
+ /* For each dmer group (group of positions with the same first d bytes):
+ * 1. For each position we set dmerAt[position] = dmerID. The dmerID is
+ * (groupBeginPtr - suffix). This allows us to go from position to
+ * dmerID so we can look up values in freq.
+ * 2. We calculate how many samples the dmer occurs in and save it in
+ * freqs[dmerId].
+ */
+ COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx,
+ (ctx->d <= 8 ? &COVER_cmp8 : &COVER_cmp), &COVER_group);
+ ctx->freqs = ctx->suffix;
+ ctx->suffix = NULL;
return 0;
-}
-
+}
+
void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel)
{
const double ratio = (double)nbDmers / maxDictSize;
@@ -677,120 +677,120 @@ COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize,
return epochs;
}
-/**
- * Given the prepared context build the dictionary.
- */
-static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs,
- COVER_map_t *activeDmers, void *dictBuffer,
- size_t dictBufferCapacity,
- ZDICT_cover_params_t parameters) {
- BYTE *const dict = (BYTE *)dictBuffer;
- size_t tail = dictBufferCapacity;
+/**
+ * Given the prepared context build the dictionary.
+ */
+static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs,
+ COVER_map_t *activeDmers, void *dictBuffer,
+ size_t dictBufferCapacity,
+ ZDICT_cover_params_t parameters) {
+ BYTE *const dict = (BYTE *)dictBuffer;
+ size_t tail = dictBufferCapacity;
/* Divide the data into epochs. We will select one segment from each epoch. */
const COVER_epoch_info_t epochs = COVER_computeEpochs(
(U32)dictBufferCapacity, (U32)ctx->suffixSize, parameters.k, 4);
const size_t maxZeroScoreRun = MAX(10, MIN(100, epochs.num >> 3));
size_t zeroScoreRun = 0;
- size_t epoch;
+ size_t epoch;
DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n",
(U32)epochs.num, (U32)epochs.size);
- /* Loop through the epochs until there are no more segments or the dictionary
- * is full.
- */
+ /* Loop through the epochs until there are no more segments or the dictionary
+ * is full.
+ */
for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) {
const U32 epochBegin = (U32)(epoch * epochs.size);
const U32 epochEnd = epochBegin + epochs.size;
- size_t segmentSize;
- /* Select a segment */
- COVER_segment_t segment = COVER_selectSegment(
- ctx, freqs, activeDmers, epochBegin, epochEnd, parameters);
+ size_t segmentSize;
+ /* Select a segment */
+ COVER_segment_t segment = COVER_selectSegment(
+ ctx, freqs, activeDmers, epochBegin, epochEnd, parameters);
/* If the segment covers no dmers, then we are out of content.
* There may be new content in other epochs, for continue for some time.
*/
- if (segment.score == 0) {
+ if (segment.score == 0) {
if (++zeroScoreRun >= maxZeroScoreRun) {
break;
}
continue;
- }
+ }
zeroScoreRun = 0;
- /* Trim the segment if necessary and if it is too small then we are done */
- segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);
- if (segmentSize < parameters.d) {
- break;
- }
- /* We fill the dictionary from the back to allow the best segments to be
- * referenced with the smallest offsets.
- */
- tail -= segmentSize;
- memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
- DISPLAYUPDATE(
- 2, "\r%u%% ",
+ /* Trim the segment if necessary and if it is too small then we are done */
+ segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);
+ if (segmentSize < parameters.d) {
+ break;
+ }
+ /* We fill the dictionary from the back to allow the best segments to be
+ * referenced with the smallest offsets.
+ */
+ tail -= segmentSize;
+ memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
+ DISPLAYUPDATE(
+ 2, "\r%u%% ",
(unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
- }
- DISPLAYLEVEL(2, "\r%79s\r", "");
- return tail;
-}
-
-ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
- void *dictBuffer, size_t dictBufferCapacity,
- const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
- ZDICT_cover_params_t parameters)
-{
- BYTE* const dict = (BYTE*)dictBuffer;
- COVER_ctx_t ctx;
- COVER_map_t activeDmers;
+ }
+ DISPLAYLEVEL(2, "\r%79s\r", "");
+ return tail;
+}
+
+ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
+ void *dictBuffer, size_t dictBufferCapacity,
+ const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
+ ZDICT_cover_params_t parameters)
+{
+ BYTE* const dict = (BYTE*)dictBuffer;
+ COVER_ctx_t ctx;
+ COVER_map_t activeDmers;
parameters.splitPoint = 1.0;
- /* Initialize global data */
+ /* Initialize global data */
g_displayLevel = (int)parameters.zParams.notificationLevel;
- /* Checks */
- if (!COVER_checkParameters(parameters, dictBufferCapacity)) {
- DISPLAYLEVEL(1, "Cover parameters incorrect\n");
+ /* Checks */
+ if (!COVER_checkParameters(parameters, dictBufferCapacity)) {
+ DISPLAYLEVEL(1, "Cover parameters incorrect\n");
return ERROR(parameter_outOfBound);
- }
- if (nbSamples == 0) {
- DISPLAYLEVEL(1, "Cover must have at least one input file\n");
+ }
+ if (nbSamples == 0) {
+ DISPLAYLEVEL(1, "Cover must have at least one input file\n");
return ERROR(srcSize_wrong);
- }
- if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
- DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
- ZDICT_DICTSIZE_MIN);
- return ERROR(dstSize_tooSmall);
- }
- /* Initialize context and activeDmers */
+ }
+ if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
+ DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
+ ZDICT_DICTSIZE_MIN);
+ return ERROR(dstSize_tooSmall);
+ }
+ /* Initialize context and activeDmers */
{
size_t const initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
parameters.d, parameters.splitPoint);
if (ZSTD_isError(initVal)) {
return initVal;
}
- }
+ }
COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, g_displayLevel);
- if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
- DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
- COVER_ctx_destroy(&ctx);
+ if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
+ DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
+ COVER_ctx_destroy(&ctx);
return ERROR(memory_allocation);
- }
-
- DISPLAYLEVEL(2, "Building dictionary\n");
- {
- const size_t tail =
- COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer,
- dictBufferCapacity, parameters);
- const size_t dictionarySize = ZDICT_finalizeDictionary(
- dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
- samplesBuffer, samplesSizes, nbSamples, parameters.zParams);
- if (!ZSTD_isError(dictionarySize)) {
- DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
+ }
+
+ DISPLAYLEVEL(2, "Building dictionary\n");
+ {
+ const size_t tail =
+ COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer,
+ dictBufferCapacity, parameters);
+ const size_t dictionarySize = ZDICT_finalizeDictionary(
+ dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
+ samplesBuffer, samplesSizes, nbSamples, parameters.zParams);
+ if (!ZSTD_isError(dictionarySize)) {
+ DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
(unsigned)dictionarySize);
- }
- COVER_ctx_destroy(&ctx);
- COVER_map_destroy(&activeDmers);
- return dictionarySize;
- }
-}
-
-
+ }
+ COVER_ctx_destroy(&ctx);
+ COVER_map_destroy(&activeDmers);
+ return dictionarySize;
+ }
+}
+
+
size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters,
const size_t *samplesSizes, const BYTE *samples,
@@ -845,111 +845,111 @@ _compressCleanup:
}
-/**
- * Initialize the `COVER_best_t`.
- */
+/**
+ * Initialize the `COVER_best_t`.
+ */
void COVER_best_init(COVER_best_t *best) {
- if (best==NULL) return; /* compatible with init on NULL */
- (void)ZSTD_pthread_mutex_init(&best->mutex, NULL);
- (void)ZSTD_pthread_cond_init(&best->cond, NULL);
- best->liveJobs = 0;
- best->dict = NULL;
- best->dictSize = 0;
- best->compressedSize = (size_t)-1;
- memset(&best->parameters, 0, sizeof(best->parameters));
-}
-
-/**
- * Wait until liveJobs == 0.
- */
+ if (best==NULL) return; /* compatible with init on NULL */
+ (void)ZSTD_pthread_mutex_init(&best->mutex, NULL);
+ (void)ZSTD_pthread_cond_init(&best->cond, NULL);
+ best->liveJobs = 0;
+ best->dict = NULL;
+ best->dictSize = 0;
+ best->compressedSize = (size_t)-1;
+ memset(&best->parameters, 0, sizeof(best->parameters));
+}
+
+/**
+ * Wait until liveJobs == 0.
+ */
void COVER_best_wait(COVER_best_t *best) {
- if (!best) {
- return;
- }
- ZSTD_pthread_mutex_lock(&best->mutex);
- while (best->liveJobs != 0) {
- ZSTD_pthread_cond_wait(&best->cond, &best->mutex);
- }
- ZSTD_pthread_mutex_unlock(&best->mutex);
-}
-
-/**
- * Call COVER_best_wait() and then destroy the COVER_best_t.
- */
+ if (!best) {
+ return;
+ }
+ ZSTD_pthread_mutex_lock(&best->mutex);
+ while (best->liveJobs != 0) {
+ ZSTD_pthread_cond_wait(&best->cond, &best->mutex);
+ }
+ ZSTD_pthread_mutex_unlock(&best->mutex);
+}
+
+/**
+ * Call COVER_best_wait() and then destroy the COVER_best_t.
+ */
void COVER_best_destroy(COVER_best_t *best) {
- if (!best) {
- return;
- }
- COVER_best_wait(best);
- if (best->dict) {
- free(best->dict);
- }
- ZSTD_pthread_mutex_destroy(&best->mutex);
- ZSTD_pthread_cond_destroy(&best->cond);
-}
-
-/**
- * Called when a thread is about to be launched.
- * Increments liveJobs.
- */
+ if (!best) {
+ return;
+ }
+ COVER_best_wait(best);
+ if (best->dict) {
+ free(best->dict);
+ }
+ ZSTD_pthread_mutex_destroy(&best->mutex);
+ ZSTD_pthread_cond_destroy(&best->cond);
+}
+
+/**
+ * Called when a thread is about to be launched.
+ * Increments liveJobs.
+ */
void COVER_best_start(COVER_best_t *best) {
- if (!best) {
- return;
- }
- ZSTD_pthread_mutex_lock(&best->mutex);
- ++best->liveJobs;
- ZSTD_pthread_mutex_unlock(&best->mutex);
-}
-
-/**
- * Called when a thread finishes executing, both on error or success.
- * Decrements liveJobs and signals any waiting threads if liveJobs == 0.
- * If this dictionary is the best so far save it and its parameters.
- */
+ if (!best) {
+ return;
+ }
+ ZSTD_pthread_mutex_lock(&best->mutex);
+ ++best->liveJobs;
+ ZSTD_pthread_mutex_unlock(&best->mutex);
+}
+
+/**
+ * Called when a thread finishes executing, both on error or success.
+ * Decrements liveJobs and signals any waiting threads if liveJobs == 0.
+ * If this dictionary is the best so far save it and its parameters.
+ */
void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters,
COVER_dictSelection_t selection) {
void* dict = selection.dictContent;
size_t compressedSize = selection.totalCompressedSize;
size_t dictSize = selection.dictSize;
- if (!best) {
- return;
- }
- {
- size_t liveJobs;
- ZSTD_pthread_mutex_lock(&best->mutex);
- --best->liveJobs;
- liveJobs = best->liveJobs;
- /* If the new dictionary is better */
- if (compressedSize < best->compressedSize) {
- /* Allocate space if necessary */
- if (!best->dict || best->dictSize < dictSize) {
- if (best->dict) {
- free(best->dict);
- }
- best->dict = malloc(dictSize);
- if (!best->dict) {
- best->compressedSize = ERROR(GENERIC);
- best->dictSize = 0;
+ if (!best) {
+ return;
+ }
+ {
+ size_t liveJobs;
+ ZSTD_pthread_mutex_lock(&best->mutex);
+ --best->liveJobs;
+ liveJobs = best->liveJobs;
+ /* If the new dictionary is better */
+ if (compressedSize < best->compressedSize) {
+ /* Allocate space if necessary */
+ if (!best->dict || best->dictSize < dictSize) {
+ if (best->dict) {
+ free(best->dict);
+ }
+ best->dict = malloc(dictSize);
+ if (!best->dict) {
+ best->compressedSize = ERROR(GENERIC);
+ best->dictSize = 0;
ZSTD_pthread_cond_signal(&best->cond);
ZSTD_pthread_mutex_unlock(&best->mutex);
- return;
- }
- }
- /* Save the dictionary, parameters, and size */
+ return;
+ }
+ }
+ /* Save the dictionary, parameters, and size */
if (dict) {
memcpy(best->dict, dict, dictSize);
best->dictSize = dictSize;
best->parameters = parameters;
best->compressedSize = compressedSize;
}
- }
- if (liveJobs == 0) {
- ZSTD_pthread_cond_broadcast(&best->cond);
- }
+ }
+ if (liveJobs == 0) {
+ ZSTD_pthread_cond_broadcast(&best->cond);
+ }
ZSTD_pthread_mutex_unlock(&best->mutex);
- }
-}
-
+ }
+}
+
COVER_dictSelection_t COVER_dictSelectionError(size_t error) {
COVER_dictSelection_t selection = { NULL, 0, error };
return selection;
@@ -1055,128 +1055,128 @@ COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, size_t dictBuffe
}
}
-/**
- * Parameters for COVER_tryParameters().
- */
-typedef struct COVER_tryParameters_data_s {
- const COVER_ctx_t *ctx;
- COVER_best_t *best;
- size_t dictBufferCapacity;
- ZDICT_cover_params_t parameters;
-} COVER_tryParameters_data_t;
-
-/**
+/**
+ * Parameters for COVER_tryParameters().
+ */
+typedef struct COVER_tryParameters_data_s {
+ const COVER_ctx_t *ctx;
+ COVER_best_t *best;
+ size_t dictBufferCapacity;
+ ZDICT_cover_params_t parameters;
+} COVER_tryParameters_data_t;
+
+/**
* Tries a set of parameters and updates the COVER_best_t with the results.
- * This function is thread safe if zstd is compiled with multithreaded support.
- * It takes its parameters as an *OWNING* opaque pointer to support threading.
- */
+ * This function is thread safe if zstd is compiled with multithreaded support.
+ * It takes its parameters as an *OWNING* opaque pointer to support threading.
+ */
static void COVER_tryParameters(void *opaque)
{
- /* Save parameters as local variables */
+ /* Save parameters as local variables */
COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t*)opaque;
- const COVER_ctx_t *const ctx = data->ctx;
- const ZDICT_cover_params_t parameters = data->parameters;
- size_t dictBufferCapacity = data->dictBufferCapacity;
- size_t totalCompressedSize = ERROR(GENERIC);
- /* Allocate space for hash table, dict, and freqs */
- COVER_map_t activeDmers;
+ const COVER_ctx_t *const ctx = data->ctx;
+ const ZDICT_cover_params_t parameters = data->parameters;
+ size_t dictBufferCapacity = data->dictBufferCapacity;
+ size_t totalCompressedSize = ERROR(GENERIC);
+ /* Allocate space for hash table, dict, and freqs */
+ COVER_map_t activeDmers;
BYTE* const dict = (BYTE*)malloc(dictBufferCapacity);
COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC));
U32* const freqs = (U32*)malloc(ctx->suffixSize * sizeof(U32));
- if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
- DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
- goto _cleanup;
- }
- if (!dict || !freqs) {
- DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n");
- goto _cleanup;
- }
- /* Copy the frequencies because we need to modify them */
- memcpy(freqs, ctx->freqs, ctx->suffixSize * sizeof(U32));
- /* Build the dictionary */
- {
- const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict,
- dictBufferCapacity, parameters);
+ if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
+ DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
+ goto _cleanup;
+ }
+ if (!dict || !freqs) {
+ DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n");
+ goto _cleanup;
+ }
+ /* Copy the frequencies because we need to modify them */
+ memcpy(freqs, ctx->freqs, ctx->suffixSize * sizeof(U32));
+ /* Build the dictionary */
+ {
+ const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict,
+ dictBufferCapacity, parameters);
selection = COVER_selectDict(dict + tail, dictBufferCapacity, dictBufferCapacity - tail,
ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbTrainSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets,
totalCompressedSize);
if (COVER_dictSelectionIsError(selection)) {
DISPLAYLEVEL(1, "Failed to select dictionary\n");
- goto _cleanup;
- }
- }
-_cleanup:
+ goto _cleanup;
+ }
+ }
+_cleanup:
free(dict);
COVER_best_finish(data->best, parameters, selection);
- free(data);
- COVER_map_destroy(&activeDmers);
+ free(data);
+ COVER_map_destroy(&activeDmers);
COVER_dictSelectionFree(selection);
free(freqs);
-}
-
-ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
+}
+
+ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer,
const size_t* samplesSizes, unsigned nbSamples,
ZDICT_cover_params_t* parameters)
{
- /* constants */
- const unsigned nbThreads = parameters->nbThreads;
+ /* constants */
+ const unsigned nbThreads = parameters->nbThreads;
const double splitPoint =
parameters->splitPoint <= 0.0 ? COVER_DEFAULT_SPLITPOINT : parameters->splitPoint;
- const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
- const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;
- const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;
- const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k;
- const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps;
- const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);
- const unsigned kIterations =
- (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
+ const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
+ const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;
+ const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;
+ const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k;
+ const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps;
+ const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);
+ const unsigned kIterations =
+ (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
const unsigned shrinkDict = 0;
- /* Local variables */
- const int displayLevel = parameters->zParams.notificationLevel;
- unsigned iteration = 1;
- unsigned d;
- unsigned k;
- COVER_best_t best;
- POOL_ctx *pool = NULL;
+ /* Local variables */
+ const int displayLevel = parameters->zParams.notificationLevel;
+ unsigned iteration = 1;
+ unsigned d;
+ unsigned k;
+ COVER_best_t best;
+ POOL_ctx *pool = NULL;
int warned = 0;
-
- /* Checks */
+
+ /* Checks */
if (splitPoint <= 0 || splitPoint > 1) {
LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
return ERROR(parameter_outOfBound);
}
- if (kMinK < kMaxD || kMaxK < kMinK) {
- LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
+ if (kMinK < kMaxD || kMaxK < kMinK) {
+ LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
return ERROR(parameter_outOfBound);
- }
- if (nbSamples == 0) {
- DISPLAYLEVEL(1, "Cover must have at least one input file\n");
+ }
+ if (nbSamples == 0) {
+ DISPLAYLEVEL(1, "Cover must have at least one input file\n");
return ERROR(srcSize_wrong);
- }
- if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
- DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
- ZDICT_DICTSIZE_MIN);
- return ERROR(dstSize_tooSmall);
- }
- if (nbThreads > 1) {
- pool = POOL_create(nbThreads, 1);
- if (!pool) {
- return ERROR(memory_allocation);
- }
- }
- /* Initialization */
- COVER_best_init(&best);
- /* Turn down global display level to clean up display at level 2 and below */
- g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1;
- /* Loop through d first because each new value needs a new context */
- LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n",
- kIterations);
- for (d = kMinD; d <= kMaxD; d += 2) {
- /* Initialize the context for this value of d */
- COVER_ctx_t ctx;
- LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d);
+ }
+ if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
+ DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
+ ZDICT_DICTSIZE_MIN);
+ return ERROR(dstSize_tooSmall);
+ }
+ if (nbThreads > 1) {
+ pool = POOL_create(nbThreads, 1);
+ if (!pool) {
+ return ERROR(memory_allocation);
+ }
+ }
+ /* Initialization */
+ COVER_best_init(&best);
+ /* Turn down global display level to clean up display at level 2 and below */
+ g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1;
+ /* Loop through d first because each new value needs a new context */
+ LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n",
+ kIterations);
+ for (d = kMinD; d <= kMaxD; d += 2) {
+ /* Initialize the context for this value of d */
+ COVER_ctx_t ctx;
+ LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d);
{
const size_t initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint);
if (ZSTD_isError(initVal)) {
@@ -1185,69 +1185,69 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
POOL_free(pool);
return initVal;
}
- }
+ }
if (!warned) {
COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, displayLevel);
warned = 1;
}
- /* Loop through k reusing the same context */
- for (k = kMinK; k <= kMaxK; k += kStepSize) {
- /* Prepare the arguments */
- COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc(
- sizeof(COVER_tryParameters_data_t));
- LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k);
- if (!data) {
- LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n");
- COVER_best_destroy(&best);
- COVER_ctx_destroy(&ctx);
- POOL_free(pool);
+ /* Loop through k reusing the same context */
+ for (k = kMinK; k <= kMaxK; k += kStepSize) {
+ /* Prepare the arguments */
+ COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc(
+ sizeof(COVER_tryParameters_data_t));
+ LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k);
+ if (!data) {
+ LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n");
+ COVER_best_destroy(&best);
+ COVER_ctx_destroy(&ctx);
+ POOL_free(pool);
return ERROR(memory_allocation);
- }
- data->ctx = &ctx;
- data->best = &best;
- data->dictBufferCapacity = dictBufferCapacity;
- data->parameters = *parameters;
- data->parameters.k = k;
- data->parameters.d = d;
+ }
+ data->ctx = &ctx;
+ data->best = &best;
+ data->dictBufferCapacity = dictBufferCapacity;
+ data->parameters = *parameters;
+ data->parameters.k = k;
+ data->parameters.d = d;
data->parameters.splitPoint = splitPoint;
- data->parameters.steps = kSteps;
+ data->parameters.steps = kSteps;
data->parameters.shrinkDict = shrinkDict;
- data->parameters.zParams.notificationLevel = g_displayLevel;
- /* Check the parameters */
- if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) {
- DISPLAYLEVEL(1, "Cover parameters incorrect\n");
- free(data);
- continue;
- }
- /* Call the function and pass ownership of data to it */
- COVER_best_start(&best);
- if (pool) {
- POOL_add(pool, &COVER_tryParameters, data);
- } else {
- COVER_tryParameters(data);
- }
- /* Print status */
- LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ",
+ data->parameters.zParams.notificationLevel = g_displayLevel;
+ /* Check the parameters */
+ if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) {
+ DISPLAYLEVEL(1, "Cover parameters incorrect\n");
+ free(data);
+ continue;
+ }
+ /* Call the function and pass ownership of data to it */
+ COVER_best_start(&best);
+ if (pool) {
+ POOL_add(pool, &COVER_tryParameters, data);
+ } else {
+ COVER_tryParameters(data);
+ }
+ /* Print status */
+ LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ",
(unsigned)((iteration * 100) / kIterations));
- ++iteration;
- }
- COVER_best_wait(&best);
- COVER_ctx_destroy(&ctx);
- }
- LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", "");
- /* Fill the output buffer and parameters with output of the best parameters */
- {
- const size_t dictSize = best.dictSize;
- if (ZSTD_isError(best.compressedSize)) {
- const size_t compressedSize = best.compressedSize;
- COVER_best_destroy(&best);
- POOL_free(pool);
- return compressedSize;
- }
- *parameters = best.parameters;
- memcpy(dictBuffer, best.dict, dictSize);
- COVER_best_destroy(&best);
- POOL_free(pool);
- return dictSize;
- }
-}
+ ++iteration;
+ }
+ COVER_best_wait(&best);
+ COVER_ctx_destroy(&ctx);
+ }
+ LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", "");
+ /* Fill the output buffer and parameters with output of the best parameters */
+ {
+ const size_t dictSize = best.dictSize;
+ if (ZSTD_isError(best.compressedSize)) {
+ const size_t compressedSize = best.compressedSize;
+ COVER_best_destroy(&best);
+ POOL_free(pool);
+ return compressedSize;
+ }
+ *parameters = best.parameters;
+ memcpy(dictBuffer, best.dict, dictSize);
+ COVER_best_destroy(&best);
+ POOL_free(pool);
+ return dictSize;
+ }
+}
diff --git a/contrib/libs/zstd/lib/dictBuilder/zdict.c b/contrib/libs/zstd/lib/dictBuilder/zdict.c
index 587df6b861..9a02f360bc 100644
--- a/contrib/libs/zstd/lib/dictBuilder/zdict.c
+++ b/contrib/libs/zstd/lib/dictBuilder/zdict.c
@@ -1,20 +1,20 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
/*-**************************************
* Tuning parameters
****************************************/
-#define MINRATIO 4 /* minimum nb of apparition to be selected in dictionary */
+#define MINRATIO 4 /* minimum nb of apparition to be selected in dictionary */
#define ZDICT_MAX_SAMPLES_SIZE (2000U << 20)
-#define ZDICT_MIN_SAMPLES_SIZE (ZDICT_CONTENTSIZE_MIN * MINRATIO)
+#define ZDICT_MIN_SAMPLES_SIZE (ZDICT_CONTENTSIZE_MIN * MINRATIO)
/*-**************************************
@@ -102,7 +102,7 @@ const char* ZDICT_getErrorName(size_t errorCode) { return ERR_getErrorName(error
unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize)
{
if (dictSize < 8) return 0;
- if (MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return 0;
+ if (MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return 0;
return MEM_readLE32((const char*)dictBuffer + 4);
}
@@ -130,7 +130,7 @@ size_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictSize)
/*-********************************************************
* Dictionary training functions
**********************************************************/
-static unsigned ZDICT_NbCommonBytes (size_t val)
+static unsigned ZDICT_NbCommonBytes (size_t val)
{
if (MEM_isLittleEndian()) {
if (MEM_64bits()) {
@@ -268,30 +268,30 @@ static dictItem ZDICT_analyzePos(
||(MEM_read16(b+pos+1) == MEM_read16(b+pos+3))
||(MEM_read16(b+pos+2) == MEM_read16(b+pos+4)) ) {
/* skip and mark segment */
- U16 const pattern16 = MEM_read16(b+pos+4);
- U32 u, patternEnd = 6;
- while (MEM_read16(b+pos+patternEnd) == pattern16) patternEnd+=2 ;
- if (b[pos+patternEnd] == b[pos+patternEnd-1]) patternEnd++;
- for (u=1; u<patternEnd; u++)
+ U16 const pattern16 = MEM_read16(b+pos+4);
+ U32 u, patternEnd = 6;
+ while (MEM_read16(b+pos+patternEnd) == pattern16) patternEnd+=2 ;
+ if (b[pos+patternEnd] == b[pos+patternEnd-1]) patternEnd++;
+ for (u=1; u<patternEnd; u++)
doneMarks[pos+u] = 1;
return solution;
}
/* look forward */
- { size_t length;
- do {
- end++;
- length = ZDICT_count(b + pos, b + suffix[end]);
- } while (length >= MINMATCHLENGTH);
- }
+ { size_t length;
+ do {
+ end++;
+ length = ZDICT_count(b + pos, b + suffix[end]);
+ } while (length >= MINMATCHLENGTH);
+ }
/* look backward */
- { size_t length;
- do {
- length = ZDICT_count(b + pos, b + *(suffix+start-1));
- if (length >=MINMATCHLENGTH) start--;
- } while(length >= MINMATCHLENGTH);
- }
+ { size_t length;
+ do {
+ length = ZDICT_count(b + pos, b + *(suffix+start-1));
+ if (length >=MINMATCHLENGTH) start--;
+ } while(length >= MINMATCHLENGTH);
+ }
/* exit if not found a minimum nb of repetitions */
if (end-start < minRatio) {
@@ -347,24 +347,24 @@ static dictItem ZDICT_analyzePos(
memset(lengthList, 0, sizeof(lengthList));
/* look forward */
- { size_t length;
- do {
- end++;
- length = ZDICT_count(b + pos, b + suffix[end]);
- if (length >= LLIMIT) length = LLIMIT-1;
- lengthList[length]++;
- } while (length >=MINMATCHLENGTH);
- }
+ { size_t length;
+ do {
+ end++;
+ length = ZDICT_count(b + pos, b + suffix[end]);
+ if (length >= LLIMIT) length = LLIMIT-1;
+ lengthList[length]++;
+ } while (length >=MINMATCHLENGTH);
+ }
/* look backward */
- { size_t length = MINMATCHLENGTH;
- while ((length >= MINMATCHLENGTH) & (start > 0)) {
- length = ZDICT_count(b + pos, b + suffix[start - 1]);
- if (length >= LLIMIT) length = LLIMIT - 1;
- lengthList[length]++;
- if (length >= MINMATCHLENGTH) start--;
- }
- }
+ { size_t length = MINMATCHLENGTH;
+ while ((length >= MINMATCHLENGTH) & (start > 0)) {
+ length = ZDICT_count(b + pos, b + suffix[start - 1]);
+ if (length >= LLIMIT) length = LLIMIT - 1;
+ lengthList[length]++;
+ if (length >= MINMATCHLENGTH) start--;
+ }
+ }
/* largest useful length */
memset(cumulLength, 0, sizeof(cumulLength));
@@ -398,12 +398,12 @@ static dictItem ZDICT_analyzePos(
/* mark positions done */
{ U32 id;
for (id=start; id<end; id++) {
- U32 p, pEnd, length;
+ U32 p, pEnd, length;
U32 const testedPos = (U32)suffix[id];
if (testedPos == pos)
length = solution.length;
else {
- length = (U32)ZDICT_count(b+pos, b+testedPos);
+ length = (U32)ZDICT_count(b+pos, b+testedPos);
if (length > solution.length) length = solution.length;
}
pEnd = (U32)(testedPos + length);
@@ -415,35 +415,35 @@ static dictItem ZDICT_analyzePos(
}
-static int isIncluded(const void* in, const void* container, size_t length)
-{
- const char* const ip = (const char*) in;
- const char* const into = (const char*) container;
- size_t u;
-
- for (u=0; u<length; u++) { /* works because end of buffer is a noisy guard band */
- if (ip[u] != into[u]) break;
- }
-
- return u==length;
-}
-
-/*! ZDICT_tryMerge() :
+static int isIncluded(const void* in, const void* container, size_t length)
+{
+ const char* const ip = (const char*) in;
+ const char* const into = (const char*) container;
+ size_t u;
+
+ for (u=0; u<length; u++) { /* works because end of buffer is a noisy guard band */
+ if (ip[u] != into[u]) break;
+ }
+
+ return u==length;
+}
+
+/*! ZDICT_tryMerge() :
check if dictItem can be merged, do it if possible
@return : id of destination elt, 0 if not merged
*/
-static U32 ZDICT_tryMerge(dictItem* table, dictItem elt, U32 eltNbToSkip, const void* buffer)
+static U32 ZDICT_tryMerge(dictItem* table, dictItem elt, U32 eltNbToSkip, const void* buffer)
{
const U32 tableSize = table->pos;
const U32 eltEnd = elt.pos + elt.length;
- const char* const buf = (const char*) buffer;
+ const char* const buf = (const char*) buffer;
/* tail overlap */
U32 u; for (u=1; u<tableSize; u++) {
if (u==eltNbToSkip) continue;
if ((table[u].pos > elt.pos) && (table[u].pos <= eltEnd)) { /* overlap, existing > new */
/* append */
- U32 const addedLength = table[u].pos - elt.pos;
+ U32 const addedLength = table[u].pos - elt.pos;
table[u].length += addedLength;
table[u].pos = elt.pos;
table[u].savings += elt.savings * addedLength / elt.length; /* rough approx */
@@ -459,7 +459,7 @@ static U32 ZDICT_tryMerge(dictItem* table, dictItem elt, U32 eltNbToSkip, const
/* front overlap */
for (u=1; u<tableSize; u++) {
if (u==eltNbToSkip) continue;
-
+
if ((table[u].pos + table[u].length >= elt.pos) && (table[u].pos < elt.pos)) { /* overlap, existing < new */
/* append */
int const addedLength = (int)eltEnd - (int)(table[u].pos + table[u].length);
@@ -474,27 +474,27 @@ static U32 ZDICT_tryMerge(dictItem* table, dictItem elt, U32 eltNbToSkip, const
table[u] = table[u-1], u--;
table[u] = elt;
return u;
- }
-
- if (MEM_read64(buf + table[u].pos) == MEM_read64(buf + elt.pos + 1)) {
- if (isIncluded(buf + table[u].pos, buf + elt.pos + 1, table[u].length)) {
- size_t const addedLength = MAX( (int)elt.length - (int)table[u].length , 1 );
- table[u].pos = elt.pos;
- table[u].savings += (U32)(elt.savings * addedLength / elt.length);
- table[u].length = MIN(elt.length, table[u].length + 1);
- return u;
- }
- }
- }
-
+ }
+
+ if (MEM_read64(buf + table[u].pos) == MEM_read64(buf + elt.pos + 1)) {
+ if (isIncluded(buf + table[u].pos, buf + elt.pos + 1, table[u].length)) {
+ size_t const addedLength = MAX( (int)elt.length - (int)table[u].length , 1 );
+ table[u].pos = elt.pos;
+ table[u].savings += (U32)(elt.savings * addedLength / elt.length);
+ table[u].length = MIN(elt.length, table[u].length + 1);
+ return u;
+ }
+ }
+ }
+
return 0;
}
static void ZDICT_removeDictItem(dictItem* table, U32 id)
{
- /* convention : table[0].pos stores nb of elts */
- U32 const max = table[0].pos;
+ /* convention : table[0].pos stores nb of elts */
+ U32 const max = table[0].pos;
U32 u;
if (!id) return; /* protection, should never happen */
for (u=id; u<max-1; u++)
@@ -503,14 +503,14 @@ static void ZDICT_removeDictItem(dictItem* table, U32 id)
}
-static void ZDICT_insertDictItem(dictItem* table, U32 maxSize, dictItem elt, const void* buffer)
+static void ZDICT_insertDictItem(dictItem* table, U32 maxSize, dictItem elt, const void* buffer)
{
/* merge if possible */
- U32 mergeId = ZDICT_tryMerge(table, elt, 0, buffer);
+ U32 mergeId = ZDICT_tryMerge(table, elt, 0, buffer);
if (mergeId) {
U32 newMerge = 1;
while (newMerge) {
- newMerge = ZDICT_tryMerge(table, table[mergeId], mergeId, buffer);
+ newMerge = ZDICT_tryMerge(table, table[mergeId], mergeId, buffer);
if (newMerge) ZDICT_removeDictItem(table, mergeId);
mergeId = newMerge;
}
@@ -541,7 +541,7 @@ static U32 ZDICT_dictSize(const dictItem* dictList)
}
-static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize,
+static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize,
const void* const buffer, size_t bufferSize, /* buffer must end with noisy guard band */
const size_t* fileSizes, unsigned nbFiles,
unsigned minRatio, U32 notificationLevel)
@@ -559,7 +559,7 @@ static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize,
# define DISPLAYUPDATE(l, ...) if (notificationLevel>=l) { \
if (ZDICT_clockSpan(displayClock) > refreshRate) \
{ displayClock = clock(); DISPLAY(__VA_ARGS__); \
- if (notificationLevel>=4) fflush(stderr); } }
+ if (notificationLevel>=4) fflush(stderr); } }
/* init */
DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */
@@ -600,7 +600,7 @@ static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize,
if (doneMarks[cursor]) { cursor++; continue; }
solution = ZDICT_analyzePos(doneMarks, suffix, reverseSuffix[cursor], buffer, minRatio, notificationLevel);
if (solution.length==0) { cursor++; continue; }
- ZDICT_insertDictItem(dictList, dictListSize, solution, buffer);
+ ZDICT_insertDictItem(dictList, dictListSize, solution, buffer);
cursor += solution.length;
DISPLAYUPDATE(2, "\r%4.2f %% \r", (double)cursor / bufferSize * 100);
} }
@@ -630,16 +630,16 @@ static void ZDICT_fillNoise(void* buffer, size_t length)
typedef struct
{
ZSTD_CDict* dict; /* dictionary */
- ZSTD_CCtx* zc; /* working context */
- void* workPlace; /* must be ZSTD_BLOCKSIZE_MAX allocated */
+ ZSTD_CCtx* zc; /* working context */
+ void* workPlace; /* must be ZSTD_BLOCKSIZE_MAX allocated */
} EStats_ress_t;
#define MAXREPOFFSET 1024
static void ZDICT_countEStats(EStats_ress_t esr, const ZSTD_parameters* params,
unsigned* countLit, unsigned* offsetcodeCount, unsigned* matchlengthCount, unsigned* litlengthCount, U32* repOffsets,
- const void* src, size_t srcSize,
- U32 notificationLevel)
+ const void* src, size_t srcSize,
+ U32 notificationLevel)
{
size_t const blockSizeMax = MIN (ZSTD_BLOCKSIZE_MAX, 1 << params->cParams.windowLog);
size_t cSize;
@@ -649,11 +649,11 @@ static void ZDICT_countEStats(EStats_ress_t esr, const ZSTD_parameters* params,
if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_compressBegin_usingCDict failed \n"); return; }
}
- cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize);
+ cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize);
if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (unsigned)srcSize); return; }
if (cSize) { /* if == 0; block is not compressible */
- const seqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc);
+ const seqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc);
/* literals stats */
{ const BYTE* bytePtr;
@@ -715,18 +715,18 @@ static void ZDICT_insertSortCount(offsetCount_t table[ZSTD_REP_NUM+1], U32 val,
}
}
-/* ZDICT_flatLit() :
- * rewrite `countLit` to contain a mostly flat but still compressible distribution of literals.
- * necessary to avoid generating a non-compressible distribution that HUF_writeCTable() cannot encode.
- */
+/* ZDICT_flatLit() :
+ * rewrite `countLit` to contain a mostly flat but still compressible distribution of literals.
+ * necessary to avoid generating a non-compressible distribution that HUF_writeCTable() cannot encode.
+ */
static void ZDICT_flatLit(unsigned* countLit)
-{
- int u;
- for (u=1; u<256; u++) countLit[u] = 2;
- countLit[0] = 4;
- countLit[253] = 1;
- countLit[254] = 1;
-}
+{
+ int u;
+ for (u=1; u<256; u++) countLit[u] = 2;
+ countLit[0] = 4;
+ countLit[253] = 1;
+ countLit[254] = 1;
+}
#define OFFCODE_MAX 30 /* only applicable to first block */
static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize,
@@ -756,12 +756,12 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize,
BYTE* dstPtr = (BYTE*)dstBuffer;
/* init */
- DEBUGLOG(4, "ZDICT_analyzeEntropy");
- if (offcodeMax>OFFCODE_MAX) { eSize = ERROR(dictionaryCreation_failed); goto _cleanup; } /* too large dictionary */
- for (u=0; u<256; u++) countLit[u] = 1; /* any character must be described */
- for (u=0; u<=offcodeMax; u++) offcodeCount[u] = 1;
- for (u=0; u<=MaxML; u++) matchLengthCount[u] = 1;
- for (u=0; u<=MaxLL; u++) litLengthCount[u] = 1;
+ DEBUGLOG(4, "ZDICT_analyzeEntropy");
+ if (offcodeMax>OFFCODE_MAX) { eSize = ERROR(dictionaryCreation_failed); goto _cleanup; } /* too large dictionary */
+ for (u=0; u<256; u++) countLit[u] = 1; /* any character must be described */
+ for (u=0; u<=offcodeMax; u++) offcodeCount[u] = 1;
+ for (u=0; u<=MaxML; u++) matchLengthCount[u] = 1;
+ for (u=0; u<=MaxLL; u++) litLengthCount[u] = 1;
memset(repOffset, 0, sizeof(repOffset));
repOffset[1] = repOffset[4] = repOffset[8] = 1;
memset(bestRepOffset, 0, sizeof(bestRepOffset));
@@ -777,7 +777,7 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize,
goto _cleanup;
}
- /* collect stats on all samples */
+ /* collect stats on all samples */
for (u=0; u<nbFiles; u++) {
ZDICT_countEStats(esr, &params,
countLit, offcodeCount, matchLengthCount, litLengthCount, repOffset,
@@ -793,20 +793,20 @@ static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize,
DISPLAYLEVEL(4, "%2u :%7u \n", u, offcodeCount[u]);
} }
- /* analyze, build stats, starting with literals */
- { size_t maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog);
- if (HUF_isError(maxNbBits)) {
+ /* analyze, build stats, starting with literals */
+ { size_t maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog);
+ if (HUF_isError(maxNbBits)) {
eSize = maxNbBits;
- DISPLAYLEVEL(1, " HUF_buildCTable error \n");
- goto _cleanup;
- }
- if (maxNbBits==8) { /* not compressible : will fail on HUF_writeCTable() */
- DISPLAYLEVEL(2, "warning : pathological dataset : literals are not compressible : samples are noisy or too regular \n");
- ZDICT_flatLit(countLit); /* replace distribution by a fake "mostly flat but still compressible" distribution, that HUF_writeCTable() can encode */
- maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog);
- assert(maxNbBits==9);
- }
- huffLog = (U32)maxNbBits;
+ DISPLAYLEVEL(1, " HUF_buildCTable error \n");
+ goto _cleanup;
+ }
+ if (maxNbBits==8) { /* not compressible : will fail on HUF_writeCTable() */
+ DISPLAYLEVEL(2, "warning : pathological dataset : literals are not compressible : samples are noisy or too regular \n");
+ ZDICT_flatLit(countLit); /* replace distribution by a fake "mostly flat but still compressible" distribution, that HUF_writeCTable() can encode */
+ maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog);
+ assert(maxNbBits==9);
+ }
+ huffLog = (U32)maxNbBits;
}
/* looking for most common first offsets */
@@ -926,47 +926,47 @@ static U32 ZDICT_maxRep(U32 const reps[ZSTD_REP_NUM])
maxRep = MAX(maxRep, reps[r]);
return maxRep;
}
-
-size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
- const void* customDictContent, size_t dictContentSize,
+
+size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
+ const void* customDictContent, size_t dictContentSize,
const void* samplesBuffer, const size_t* samplesSizes,
unsigned nbSamples, ZDICT_params_t params)
{
size_t hSize;
-#define HBUFFSIZE 256 /* should prove large enough for all entropy headers */
- BYTE header[HBUFFSIZE];
+#define HBUFFSIZE 256 /* should prove large enough for all entropy headers */
+ BYTE header[HBUFFSIZE];
int const compressionLevel = (params.compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : params.compressionLevel;
U32 const notificationLevel = params.notificationLevel;
/* The final dictionary content must be at least as large as the largest repcode */
size_t const minContentSize = (size_t)ZDICT_maxRep(repStartValue);
size_t paddingSize;
- /* check conditions */
- DEBUGLOG(4, "ZDICT_finalizeDictionary");
- if (dictBufferCapacity < dictContentSize) return ERROR(dstSize_tooSmall);
- if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) return ERROR(dstSize_tooSmall);
-
+ /* check conditions */
+ DEBUGLOG(4, "ZDICT_finalizeDictionary");
+ if (dictBufferCapacity < dictContentSize) return ERROR(dstSize_tooSmall);
+ if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) return ERROR(dstSize_tooSmall);
+
/* dictionary header */
- MEM_writeLE32(header, ZSTD_MAGIC_DICTIONARY);
- { U64 const randomID = XXH64(customDictContent, dictContentSize, 0);
+ MEM_writeLE32(header, ZSTD_MAGIC_DICTIONARY);
+ { U64 const randomID = XXH64(customDictContent, dictContentSize, 0);
U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;
U32 const dictID = params.dictID ? params.dictID : compliantID;
- MEM_writeLE32(header+4, dictID);
+ MEM_writeLE32(header+4, dictID);
}
hSize = 8;
/* entropy tables */
DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */
DISPLAYLEVEL(2, "statistics ... \n");
- { size_t const eSize = ZDICT_analyzeEntropy(header+hSize, HBUFFSIZE-hSize,
- compressionLevel,
- samplesBuffer, samplesSizes, nbSamples,
- customDictContent, dictContentSize,
- notificationLevel);
- if (ZDICT_isError(eSize)) return eSize;
- hSize += eSize;
- }
-
+ { size_t const eSize = ZDICT_analyzeEntropy(header+hSize, HBUFFSIZE-hSize,
+ compressionLevel,
+ samplesBuffer, samplesSizes, nbSamples,
+ customDictContent, dictContentSize,
+ notificationLevel);
+ if (ZDICT_isError(eSize)) return eSize;
+ hSize += eSize;
+ }
+
/* Shrink the content size if it doesn't fit in the buffer */
if (hSize + dictContentSize > dictBufferCapacity) {
dictContentSize = dictBufferCapacity - hSize;
@@ -1004,23 +1004,23 @@ size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
memcpy(outDictHeader, header, hSize);
memset(outDictPadding, 0, paddingSize);
- return dictSize;
- }
-}
-
-
+ return dictSize;
+ }
+}
+
+
static size_t ZDICT_addEntropyTablesFromBuffer_advanced(
void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
ZDICT_params_t params)
-{
+{
int const compressionLevel = (params.compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : params.compressionLevel;
- U32 const notificationLevel = params.notificationLevel;
- size_t hSize = 8;
-
- /* calculate entropy tables */
- DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */
- DISPLAYLEVEL(2, "statistics ... \n");
+ U32 const notificationLevel = params.notificationLevel;
+ size_t hSize = 8;
+
+ /* calculate entropy tables */
+ DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */
+ DISPLAYLEVEL(2, "statistics ... \n");
{ size_t const eSize = ZDICT_analyzeEntropy((char*)dictBuffer+hSize, dictBufferCapacity-hSize,
compressionLevel,
samplesBuffer, samplesSizes, nbSamples,
@@ -1030,27 +1030,27 @@ static size_t ZDICT_addEntropyTablesFromBuffer_advanced(
hSize += eSize;
}
- /* add dictionary header (after entropy tables) */
- MEM_writeLE32(dictBuffer, ZSTD_MAGIC_DICTIONARY);
- { U64 const randomID = XXH64((char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, 0);
- U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;
- U32 const dictID = params.dictID ? params.dictID : compliantID;
- MEM_writeLE32((char*)dictBuffer+4, dictID);
- }
+ /* add dictionary header (after entropy tables) */
+ MEM_writeLE32(dictBuffer, ZSTD_MAGIC_DICTIONARY);
+ { U64 const randomID = XXH64((char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, 0);
+ U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;
+ U32 const dictID = params.dictID ? params.dictID : compliantID;
+ MEM_writeLE32((char*)dictBuffer+4, dictID);
+ }
if (hSize + dictContentSize < dictBufferCapacity)
memmove((char*)dictBuffer + hSize, (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize);
return MIN(dictBufferCapacity, hSize+dictContentSize);
}
-/*! ZDICT_trainFromBuffer_unsafe_legacy() :
+/*! ZDICT_trainFromBuffer_unsafe_legacy() :
* Warning : `samplesBuffer` must be followed by noisy guard band !!!
* @return : size of dictionary, or an error code which can be tested with ZDICT_isError()
*/
static size_t ZDICT_trainFromBuffer_unsafe_legacy(
void* dictBuffer, size_t maxDictSize,
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
- ZDICT_legacy_params_t params)
+ ZDICT_legacy_params_t params)
{
U32 const dictListSize = MAX(MAX(DICTLISTSIZE_DEFAULT, nbSamples), (U32)(maxDictSize/16));
dictItem* const dictList = (dictItem*)malloc(dictListSize * sizeof(*dictList));
@@ -1059,24 +1059,24 @@ static size_t ZDICT_trainFromBuffer_unsafe_legacy(
size_t const targetDictSize = maxDictSize;
size_t const samplesBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples);
size_t dictSize = 0;
- U32 const notificationLevel = params.zParams.notificationLevel;
+ U32 const notificationLevel = params.zParams.notificationLevel;
/* checks */
if (!dictList) return ERROR(memory_allocation);
- if (maxDictSize < ZDICT_DICTSIZE_MIN) { free(dictList); return ERROR(dstSize_tooSmall); } /* requested dictionary size is too small */
- if (samplesBuffSize < ZDICT_MIN_SAMPLES_SIZE) { free(dictList); return ERROR(dictionaryCreation_failed); } /* not enough source to create dictionary */
+ if (maxDictSize < ZDICT_DICTSIZE_MIN) { free(dictList); return ERROR(dstSize_tooSmall); } /* requested dictionary size is too small */
+ if (samplesBuffSize < ZDICT_MIN_SAMPLES_SIZE) { free(dictList); return ERROR(dictionaryCreation_failed); } /* not enough source to create dictionary */
/* init */
ZDICT_initDictItem(dictList);
/* build dictionary */
- ZDICT_trainBuffer_legacy(dictList, dictListSize,
- samplesBuffer, samplesBuffSize,
- samplesSizes, nbSamples,
- minRep, notificationLevel);
+ ZDICT_trainBuffer_legacy(dictList, dictListSize,
+ samplesBuffer, samplesBuffSize,
+ samplesSizes, nbSamples,
+ minRep, notificationLevel);
/* display best matches */
- if (params.zParams.notificationLevel>= 3) {
+ if (params.zParams.notificationLevel>= 3) {
unsigned const nb = MIN(25, dictList[0].pos);
unsigned const dictContentSize = ZDICT_dictSize(dictList);
unsigned u;
@@ -1099,10 +1099,10 @@ static size_t ZDICT_trainFromBuffer_unsafe_legacy(
/* create dictionary */
{ unsigned dictContentSize = ZDICT_dictSize(dictList);
- if (dictContentSize < ZDICT_CONTENTSIZE_MIN) { free(dictList); return ERROR(dictionaryCreation_failed); } /* dictionary content too small */
- if (dictContentSize < targetDictSize/4) {
+ if (dictContentSize < ZDICT_CONTENTSIZE_MIN) { free(dictList); return ERROR(dictionaryCreation_failed); } /* dictionary content too small */
+ if (dictContentSize < targetDictSize/4) {
DISPLAYLEVEL(2, "! warning : selected content significantly smaller than requested (%u < %u) \n", dictContentSize, (unsigned)maxDictSize);
- if (samplesBuffSize < 10 * targetDictSize)
+ if (samplesBuffSize < 10 * targetDictSize)
DISPLAYLEVEL(2, "! consider increasing the number of samples (total size : %u MB)\n", (unsigned)(samplesBuffSize>>20));
if (minRep > MINRATIO) {
DISPLAYLEVEL(2, "! consider increasing selectivity to produce larger dictionary (-s%u) \n", selectivity+1);
@@ -1115,7 +1115,7 @@ static size_t ZDICT_trainFromBuffer_unsafe_legacy(
while ((nbSamples >> proposedSelectivity) <= MINRATIO) { proposedSelectivity--; }
DISPLAYLEVEL(2, "! note : calculated dictionary significantly larger than requested (%u > %u) \n", dictContentSize, (unsigned)maxDictSize);
DISPLAYLEVEL(2, "! consider increasing dictionary size, or produce denser dictionary (-s%u) \n", proposedSelectivity);
- DISPLAYLEVEL(2, "! always test dictionary efficiency on real samples \n");
+ DISPLAYLEVEL(2, "! always test dictionary efficiency on real samples \n");
}
/* limit dictionary size */
@@ -1141,7 +1141,7 @@ static size_t ZDICT_trainFromBuffer_unsafe_legacy(
dictSize = ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, maxDictSize,
samplesBuffer, samplesSizes, nbSamples,
- params.zParams);
+ params.zParams);
}
/* clean up */
@@ -1150,12 +1150,12 @@ static size_t ZDICT_trainFromBuffer_unsafe_legacy(
}
-/* ZDICT_trainFromBuffer_legacy() :
- * issue : samplesBuffer need to be followed by a noisy guard band.
- * work around : duplicate the buffer, and add the noise */
-size_t ZDICT_trainFromBuffer_legacy(void* dictBuffer, size_t dictBufferCapacity,
- const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
- ZDICT_legacy_params_t params)
+/* ZDICT_trainFromBuffer_legacy() :
+ * issue : samplesBuffer need to be followed by a noisy guard band.
+ * work around : duplicate the buffer, and add the noise */
+size_t ZDICT_trainFromBuffer_legacy(void* dictBuffer, size_t dictBufferCapacity,
+ const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+ ZDICT_legacy_params_t params)
{
size_t result;
void* newBuff;
@@ -1168,9 +1168,9 @@ size_t ZDICT_trainFromBuffer_legacy(void* dictBuffer, size_t dictBufferCapacity,
memcpy(newBuff, samplesBuffer, sBuffSize);
ZDICT_fillNoise((char*)newBuff + sBuffSize, NOISELENGTH); /* guard band, for end of buffer condition */
- result =
- ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, dictBufferCapacity, newBuff,
- samplesSizes, nbSamples, params);
+ result =
+ ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, dictBufferCapacity, newBuff,
+ samplesSizes, nbSamples, params);
free(newBuff);
return result;
}
@@ -1180,22 +1180,22 @@ size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples)
{
ZDICT_fastCover_params_t params;
- DEBUGLOG(3, "ZDICT_trainFromBuffer");
+ DEBUGLOG(3, "ZDICT_trainFromBuffer");
memset(&params, 0, sizeof(params));
- params.d = 8;
- params.steps = 4;
+ params.d = 8;
+ params.steps = 4;
/* Use default level since no compression level information is available */
params.zParams.compressionLevel = ZSTD_CLEVEL_DEFAULT;
#if defined(DEBUGLEVEL) && (DEBUGLEVEL>=1)
params.zParams.notificationLevel = DEBUGLEVEL;
-#endif
+#endif
return ZDICT_optimizeTrainFromBuffer_fastCover(dictBuffer, dictBufferCapacity,
- samplesBuffer, samplesSizes, nbSamples,
- &params);
+ samplesBuffer, samplesSizes, nbSamples,
+ &params);
}
size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
- const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples)
+ const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples)
{
ZDICT_params_t params;
memset(&params, 0, sizeof(params));
diff --git a/contrib/libs/zstd/lib/legacy/zstd_legacy.h b/contrib/libs/zstd/lib/legacy/zstd_legacy.h
index a6f1174b82..cde5ef5c5f 100644
--- a/contrib/libs/zstd/lib/legacy/zstd_legacy.h
+++ b/contrib/libs/zstd/lib/legacy/zstd_legacy.h
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_LEGACY_H
@@ -22,33 +22,33 @@ extern "C" {
#include "../common/error_private.h" /* ERROR */
#include "../common/zstd_internal.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTD_frameSizeInfo */
-#if !defined (ZSTD_LEGACY_SUPPORT) || (ZSTD_LEGACY_SUPPORT == 0)
-# undef ZSTD_LEGACY_SUPPORT
-# define ZSTD_LEGACY_SUPPORT 8
-#endif
-
-#if (ZSTD_LEGACY_SUPPORT <= 1)
-# include "zstd_v01.h"
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 2)
-# include "zstd_v02.h"
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 3)
-# include "zstd_v03.h"
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 4)
-# include "zstd_v04.h"
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 5)
-# include "zstd_v05.h"
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 6)
-# include "zstd_v06.h"
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 7)
-# include "zstd_v07.h"
-#endif
+#if !defined (ZSTD_LEGACY_SUPPORT) || (ZSTD_LEGACY_SUPPORT == 0)
+# undef ZSTD_LEGACY_SUPPORT
+# define ZSTD_LEGACY_SUPPORT 8
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 1)
+# include "zstd_v01.h"
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 2)
+# include "zstd_v02.h"
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 3)
+# include "zstd_v03.h"
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 4)
+# include "zstd_v04.h"
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 5)
+# include "zstd_v05.h"
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
+# include "zstd_v06.h"
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
+# include "zstd_v07.h"
+#endif
+
/** ZSTD_isLegacy() :
@return : > 0 if supported by legacy decoder. 0 otherwise.
return value is the version.
@@ -60,27 +60,27 @@ MEM_STATIC unsigned ZSTD_isLegacy(const void* src, size_t srcSize)
magicNumberLE = MEM_readLE32(src);
switch(magicNumberLE)
{
-#if (ZSTD_LEGACY_SUPPORT <= 1)
+#if (ZSTD_LEGACY_SUPPORT <= 1)
case ZSTDv01_magicNumberLE:return 1;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 2)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 2)
case ZSTDv02_magicNumber : return 2;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 3)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 3)
case ZSTDv03_magicNumber : return 3;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 4)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 4)
case ZSTDv04_magicNumber : return 4;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 5)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 5)
case ZSTDv05_MAGICNUMBER : return 5;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 6)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
case ZSTDv06_MAGICNUMBER : return 6;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 7)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
case ZSTDv07_MAGICNUMBER : return 7;
-#endif
+#endif
default : return 0;
}
}
@@ -90,30 +90,30 @@ MEM_STATIC unsigned long long ZSTD_getDecompressedSize_legacy(const void* src, s
{
U32 const version = ZSTD_isLegacy(src, srcSize);
if (version < 5) return 0; /* no decompressed size in frame header, or not a legacy format */
-#if (ZSTD_LEGACY_SUPPORT <= 5)
+#if (ZSTD_LEGACY_SUPPORT <= 5)
if (version==5) {
ZSTDv05_parameters fParams;
size_t const frResult = ZSTDv05_getFrameParams(&fParams, src, srcSize);
if (frResult != 0) return 0;
return fParams.srcSize;
}
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 6)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
if (version==6) {
ZSTDv06_frameParams fParams;
size_t const frResult = ZSTDv06_getFrameParams(&fParams, src, srcSize);
if (frResult != 0) return 0;
return fParams.frameContentSize;
}
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 7)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
if (version==7) {
ZSTDv07_frameParams fParams;
size_t const frResult = ZSTDv07_getFrameParams(&fParams, src, srcSize);
if (frResult != 0) return 0;
return fParams.frameContentSize;
}
-#endif
+#endif
return 0; /* should not be possible */
}
@@ -124,26 +124,26 @@ MEM_STATIC size_t ZSTD_decompressLegacy(
const void* dict,size_t dictSize)
{
U32 const version = ZSTD_isLegacy(src, compressedSize);
- (void)dst; (void)dstCapacity; (void)dict; (void)dictSize; /* unused when ZSTD_LEGACY_SUPPORT >= 8 */
+ (void)dst; (void)dstCapacity; (void)dict; (void)dictSize; /* unused when ZSTD_LEGACY_SUPPORT >= 8 */
switch(version)
{
-#if (ZSTD_LEGACY_SUPPORT <= 1)
+#if (ZSTD_LEGACY_SUPPORT <= 1)
case 1 :
return ZSTDv01_decompress(dst, dstCapacity, src, compressedSize);
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 2)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 2)
case 2 :
return ZSTDv02_decompress(dst, dstCapacity, src, compressedSize);
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 3)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 3)
case 3 :
return ZSTDv03_decompress(dst, dstCapacity, src, compressedSize);
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 4)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 4)
case 4 :
return ZSTDv04_decompress(dst, dstCapacity, src, compressedSize);
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 5)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 5)
case 5 :
{ size_t result;
ZSTDv05_DCtx* const zd = ZSTDv05_createDCtx();
@@ -152,8 +152,8 @@ MEM_STATIC size_t ZSTD_decompressLegacy(
ZSTDv05_freeDCtx(zd);
return result;
}
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 6)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
case 6 :
{ size_t result;
ZSTDv06_DCtx* const zd = ZSTDv06_createDCtx();
@@ -162,8 +162,8 @@ MEM_STATIC size_t ZSTD_decompressLegacy(
ZSTDv06_freeDCtx(zd);
return result;
}
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 7)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
case 7 :
{ size_t result;
ZSTDv07_DCtx* const zd = ZSTDv07_createDCtx();
@@ -172,78 +172,78 @@ MEM_STATIC size_t ZSTD_decompressLegacy(
ZSTDv07_freeDCtx(zd);
return result;
}
-#endif
+#endif
default :
return ERROR(prefix_unknown);
}
}
MEM_STATIC ZSTD_frameSizeInfo ZSTD_findFrameSizeInfoLegacy(const void *src, size_t srcSize)
-{
+{
ZSTD_frameSizeInfo frameSizeInfo;
U32 const version = ZSTD_isLegacy(src, srcSize);
- switch(version)
- {
-#if (ZSTD_LEGACY_SUPPORT <= 1)
- case 1 :
+ switch(version)
+ {
+#if (ZSTD_LEGACY_SUPPORT <= 1)
+ case 1 :
ZSTDv01_findFrameSizeInfoLegacy(src, srcSize,
&frameSizeInfo.compressedSize,
&frameSizeInfo.decompressedBound);
break;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 2)
- case 2 :
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 2)
+ case 2 :
ZSTDv02_findFrameSizeInfoLegacy(src, srcSize,
&frameSizeInfo.compressedSize,
&frameSizeInfo.decompressedBound);
break;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 3)
- case 3 :
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 3)
+ case 3 :
ZSTDv03_findFrameSizeInfoLegacy(src, srcSize,
&frameSizeInfo.compressedSize,
&frameSizeInfo.decompressedBound);
break;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 4)
- case 4 :
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 4)
+ case 4 :
ZSTDv04_findFrameSizeInfoLegacy(src, srcSize,
&frameSizeInfo.compressedSize,
&frameSizeInfo.decompressedBound);
break;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 5)
- case 5 :
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 5)
+ case 5 :
ZSTDv05_findFrameSizeInfoLegacy(src, srcSize,
&frameSizeInfo.compressedSize,
&frameSizeInfo.decompressedBound);
break;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 6)
- case 6 :
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
+ case 6 :
ZSTDv06_findFrameSizeInfoLegacy(src, srcSize,
&frameSizeInfo.compressedSize,
&frameSizeInfo.decompressedBound);
break;
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 7)
- case 7 :
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
+ case 7 :
ZSTDv07_findFrameSizeInfoLegacy(src, srcSize,
&frameSizeInfo.compressedSize,
&frameSizeInfo.decompressedBound);
break;
-#endif
- default :
+#endif
+ default :
frameSizeInfo.compressedSize = ERROR(prefix_unknown);
frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
break;
- }
+ }
if (!ZSTD_isError(frameSizeInfo.compressedSize) && frameSizeInfo.compressedSize > srcSize) {
frameSizeInfo.compressedSize = ERROR(srcSize_wrong);
frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
}
return frameSizeInfo;
-}
+}
MEM_STATIC size_t ZSTD_findFrameCompressedSizeLegacy(const void *src, size_t srcSize)
{
@@ -259,20 +259,20 @@ MEM_STATIC size_t ZSTD_freeLegacyStreamContext(void* legacyContext, U32 version)
case 1 :
case 2 :
case 3 :
- (void)legacyContext;
+ (void)legacyContext;
return ERROR(version_unsupported);
-#if (ZSTD_LEGACY_SUPPORT <= 4)
+#if (ZSTD_LEGACY_SUPPORT <= 4)
case 4 : return ZBUFFv04_freeDCtx((ZBUFFv04_DCtx*)legacyContext);
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 5)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 5)
case 5 : return ZBUFFv05_freeDCtx((ZBUFFv05_DCtx*)legacyContext);
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 6)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
case 6 : return ZBUFFv06_freeDCtx((ZBUFFv06_DCtx*)legacyContext);
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 7)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
case 7 : return ZBUFFv07_freeDCtx((ZBUFFv07_DCtx*)legacyContext);
-#endif
+#endif
}
}
@@ -280,7 +280,7 @@ MEM_STATIC size_t ZSTD_freeLegacyStreamContext(void* legacyContext, U32 version)
MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U32 newVersion,
const void* dict, size_t dictSize)
{
- DEBUGLOG(5, "ZSTD_initLegacyStream for v0.%u", newVersion);
+ DEBUGLOG(5, "ZSTD_initLegacyStream for v0.%u", newVersion);
if (prevVersion != newVersion) ZSTD_freeLegacyStreamContext(*legacyContext, prevVersion);
switch(newVersion)
{
@@ -288,9 +288,9 @@ MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U
case 1 :
case 2 :
case 3 :
- (void)dict; (void)dictSize;
+ (void)dict; (void)dictSize;
return 0;
-#if (ZSTD_LEGACY_SUPPORT <= 4)
+#if (ZSTD_LEGACY_SUPPORT <= 4)
case 4 :
{
ZBUFFv04_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv04_createDCtx() : (ZBUFFv04_DCtx*)*legacyContext;
@@ -300,8 +300,8 @@ MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U
*legacyContext = dctx;
return 0;
}
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 5)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 5)
case 5 :
{
ZBUFFv05_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv05_createDCtx() : (ZBUFFv05_DCtx*)*legacyContext;
@@ -310,8 +310,8 @@ MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U
*legacyContext = dctx;
return 0;
}
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 6)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
case 6 :
{
ZBUFFv06_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv06_createDCtx() : (ZBUFFv06_DCtx*)*legacyContext;
@@ -320,8 +320,8 @@ MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U
*legacyContext = dctx;
return 0;
}
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 7)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
case 7 :
{
ZBUFFv07_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv07_createDCtx() : (ZBUFFv07_DCtx*)*legacyContext;
@@ -330,7 +330,7 @@ MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U
*legacyContext = dctx;
return 0;
}
-#endif
+#endif
}
}
@@ -339,16 +339,16 @@ MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U
MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,
ZSTD_outBuffer* output, ZSTD_inBuffer* input)
{
- DEBUGLOG(5, "ZSTD_decompressLegacyStream for v0.%u", version);
+ DEBUGLOG(5, "ZSTD_decompressLegacyStream for v0.%u", version);
switch(version)
{
default :
case 1 :
case 2 :
case 3 :
- (void)legacyContext; (void)output; (void)input;
+ (void)legacyContext; (void)output; (void)input;
return ERROR(version_unsupported);
-#if (ZSTD_LEGACY_SUPPORT <= 4)
+#if (ZSTD_LEGACY_SUPPORT <= 4)
case 4 :
{
ZBUFFv04_DCtx* dctx = (ZBUFFv04_DCtx*) legacyContext;
@@ -361,8 +361,8 @@ MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,
input->pos += readSize;
return hintSize;
}
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 5)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 5)
case 5 :
{
ZBUFFv05_DCtx* dctx = (ZBUFFv05_DCtx*) legacyContext;
@@ -375,8 +375,8 @@ MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,
input->pos += readSize;
return hintSize;
}
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 6)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
case 6 :
{
ZBUFFv06_DCtx* dctx = (ZBUFFv06_DCtx*) legacyContext;
@@ -389,8 +389,8 @@ MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,
input->pos += readSize;
return hintSize;
}
-#endif
-#if (ZSTD_LEGACY_SUPPORT <= 7)
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
case 7 :
{
ZBUFFv07_DCtx* dctx = (ZBUFFv07_DCtx*) legacyContext;
@@ -403,7 +403,7 @@ MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,
input->pos += readSize;
return hintSize;
}
-#endif
+#endif
}
}
diff --git a/contrib/libs/zstd/lib/legacy/zstd_v01.c b/contrib/libs/zstd/lib/legacy/zstd_v01.c
index 23caaef564..43955848db 100644
--- a/contrib/libs/zstd/lib/legacy/zstd_v01.c
+++ b/contrib/libs/zstd/lib/legacy/zstd_v01.c
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -336,7 +336,7 @@ typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
/****************************************************************
* Internal functions
****************************************************************/
-FORCE_INLINE unsigned FSE_highbit32 (U32 val)
+FORCE_INLINE unsigned FSE_highbit32 (U32 val)
{
# if defined(_MSC_VER) /* Visual */
unsigned long r;
@@ -1435,7 +1435,7 @@ typedef struct ZSTD_Cctx_s
#else
U32 hashTable[HASH_TABLESIZE];
#endif
- BYTE buffer[WORKPLACESIZE];
+ BYTE buffer[WORKPLACESIZE];
} cctxi_t;
@@ -2004,58 +2004,58 @@ size_t ZSTDv01_decompress(void* dst, size_t maxDstSize, const void* src, size_t
/* ZSTD_errorFrameSizeInfoLegacy() :
assumes `cSize` and `dBound` are _not_ NULL */
static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
-{
+{
*cSize = ret;
*dBound = ZSTD_CONTENTSIZE_ERROR;
}
void ZSTDv01_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
{
- const BYTE* ip = (const BYTE*)src;
- size_t remainingSize = srcSize;
+ const BYTE* ip = (const BYTE*)src;
+ size_t remainingSize = srcSize;
size_t nbBlocks = 0;
- U32 magicNumber;
- blockProperties_t blockProperties;
+ U32 magicNumber;
+ blockProperties_t blockProperties;
- /* Frame Header */
+ /* Frame Header */
if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
- magicNumber = ZSTD_readBE32(src);
+ magicNumber = ZSTD_readBE32(src);
if (magicNumber != ZSTD_magicNumber) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
return;
}
- ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
-
- /* Loop on each block */
- while (1)
- {
- size_t blockSize = ZSTDv01_getcBlockSize(ip, remainingSize, &blockProperties);
+ ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
+
+ /* Loop on each block */
+ while (1)
+ {
+ size_t blockSize = ZSTDv01_getcBlockSize(ip, remainingSize, &blockProperties);
if (ZSTDv01_isError(blockSize)) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, blockSize);
return;
}
-
- ip += ZSTD_blockHeaderSize;
- remainingSize -= ZSTD_blockHeaderSize;
+
+ ip += ZSTD_blockHeaderSize;
+ remainingSize -= ZSTD_blockHeaderSize;
if (blockSize > remainingSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
-
- if (blockSize == 0) break; /* bt_end */
-
- ip += blockSize;
- remainingSize -= blockSize;
+
+ if (blockSize == 0) break; /* bt_end */
+
+ ip += blockSize;
+ remainingSize -= blockSize;
nbBlocks++;
- }
-
+ }
+
*cSize = ip - (const BYTE*)src;
*dBound = nbBlocks * BLOCKSIZE;
-}
-
+}
+
/*******************************
* Streaming Decompression API
*******************************/
diff --git a/contrib/libs/zstd/lib/legacy/zstd_v01.h b/contrib/libs/zstd/lib/legacy/zstd_v01.h
index f777eb6e4c..0f8c05d5e1 100644
--- a/contrib/libs/zstd/lib/legacy/zstd_v01.h
+++ b/contrib/libs/zstd/lib/legacy/zstd_v01.h
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_V01_H_28739879432
@@ -42,13 +42,13 @@ size_t ZSTDv01_decompress( void* dst, size_t maxOriginalSize,
or an error code if it fails (which can be tested using ZSTDv01_isError())
dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
or ZSTD_CONTENTSIZE_ERROR if an error occurs
-
+
note : assumes `cSize` and `dBound` are _not_ NULL.
*/
void ZSTDv01_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
size_t* cSize, unsigned long long* dBound);
-/**
+/**
ZSTDv01_isError() : tells if the result of ZSTDv01_decompress() is an error
*/
unsigned ZSTDv01_isError(size_t code);
diff --git a/contrib/libs/zstd/lib/legacy/zstd_v02.c b/contrib/libs/zstd/lib/legacy/zstd_v02.c
index 2f473a7573..21d2e61673 100644
--- a/contrib/libs/zstd/lib/legacy/zstd_v02.c
+++ b/contrib/libs/zstd/lib/legacy/zstd_v02.c
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -347,7 +347,7 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
/****************************************************************
* Helper functions
****************************************************************/
-MEM_STATIC unsigned BIT_highbit32 (U32 val)
+MEM_STATIC unsigned BIT_highbit32 (U32 val)
{
# if defined(_MSC_VER) /* Visual */
unsigned long r;
@@ -463,8 +463,8 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
{
- if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
- return BIT_DStream_overflow;
+ if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
+ return BIT_DStream_overflow;
if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
{
@@ -1273,8 +1273,8 @@ static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsi
else
{
bitCount -= (int)(8 * (iend - 4 - ip));
- ip = iend - 4;
- }
+ ip = iend - 4;
+ }
bitStream = MEM_readLE32(ip) >> (bitCount & 31);
}
}
@@ -1979,7 +1979,7 @@ static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
}
- /* Build rankVal */
+ /* Build rankVal */
{
const U32 minBits = tableLog+1 - maxW;
U32 nextRankVal = 0;
@@ -2313,7 +2313,7 @@ static size_t HUF_readDTableX6 (U32* DTable, const void* src, size_t srcSize)
rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
}
- /* Build rankVal */
+ /* Build rankVal */
{
const U32 minBits = tableLog+1 - maxW;
U32 nextRankVal = 0;
@@ -2892,14 +2892,14 @@ static size_t ZSTD_decodeLiteralsBlock(void* ctx,
if (litSize > srcSize-11) /* risk of reading too far with wildcopy */
{
if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
- if (litSize > srcSize-3) return ERROR(corruption_detected);
- memcpy(dctx->litBuffer, istart, litSize);
- dctx->litPtr = dctx->litBuffer;
- dctx->litSize = litSize;
- memset(dctx->litBuffer + dctx->litSize, 0, 8);
- return litSize+3;
- }
- /* direct reference into compressed stream */
+ if (litSize > srcSize-3) return ERROR(corruption_detected);
+ memcpy(dctx->litBuffer, istart, litSize);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ memset(dctx->litBuffer + dctx->litSize, 0, 8);
+ return litSize+3;
+ }
+ /* direct reference into compressed stream */
dctx->litPtr = istart+3;
dctx->litSize = litSize;
return litSize+3;
@@ -3327,58 +3327,58 @@ static size_t ZSTD_decompress(void* dst, size_t maxDstSize, const void* src, siz
/* ZSTD_errorFrameSizeInfoLegacy() :
assumes `cSize` and `dBound` are _not_ NULL */
static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
-{
+{
*cSize = ret;
*dBound = ZSTD_CONTENTSIZE_ERROR;
}
void ZSTDv02_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
{
- const BYTE* ip = (const BYTE*)src;
- size_t remainingSize = srcSize;
+ const BYTE* ip = (const BYTE*)src;
+ size_t remainingSize = srcSize;
size_t nbBlocks = 0;
- U32 magicNumber;
- blockProperties_t blockProperties;
-
- /* Frame Header */
+ U32 magicNumber;
+ blockProperties_t blockProperties;
+
+ /* Frame Header */
if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
- magicNumber = MEM_readLE32(src);
+ magicNumber = MEM_readLE32(src);
if (magicNumber != ZSTD_magicNumber) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
return;
}
- ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
-
- /* Loop on each block */
- while (1)
- {
- size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+ ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
+
+ /* Loop on each block */
+ while (1)
+ {
+ size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
if (ZSTD_isError(cBlockSize)) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
return;
}
-
- ip += ZSTD_blockHeaderSize;
- remainingSize -= ZSTD_blockHeaderSize;
+
+ ip += ZSTD_blockHeaderSize;
+ remainingSize -= ZSTD_blockHeaderSize;
if (cBlockSize > remainingSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
-
- if (cBlockSize == 0) break; /* bt_end */
-
- ip += cBlockSize;
- remainingSize -= cBlockSize;
+
+ if (cBlockSize == 0) break; /* bt_end */
+
+ ip += cBlockSize;
+ remainingSize -= cBlockSize;
nbBlocks++;
- }
-
+ }
+
*cSize = ip - (const BYTE*)src;
*dBound = nbBlocks * BLOCKSIZE;
-}
-
+}
+
/*******************************
* Streaming Decompression API
*******************************/
@@ -3483,36 +3483,36 @@ static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSi
unsigned ZSTDv02_isError(size_t code)
{
- return ZSTD_isError(code);
+ return ZSTD_isError(code);
}
size_t ZSTDv02_decompress( void* dst, size_t maxOriginalSize,
const void* src, size_t compressedSize)
{
- return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);
+ return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);
}
ZSTDv02_Dctx* ZSTDv02_createDCtx(void)
{
- return (ZSTDv02_Dctx*)ZSTD_createDCtx();
+ return (ZSTDv02_Dctx*)ZSTD_createDCtx();
}
size_t ZSTDv02_freeDCtx(ZSTDv02_Dctx* dctx)
{
- return ZSTD_freeDCtx((ZSTD_DCtx*)dctx);
+ return ZSTD_freeDCtx((ZSTD_DCtx*)dctx);
}
size_t ZSTDv02_resetDCtx(ZSTDv02_Dctx* dctx)
{
- return ZSTD_resetDCtx((ZSTD_DCtx*)dctx);
+ return ZSTD_resetDCtx((ZSTD_DCtx*)dctx);
}
size_t ZSTDv02_nextSrcSizeToDecompress(ZSTDv02_Dctx* dctx)
{
- return ZSTD_nextSrcSizeToDecompress((ZSTD_DCtx*)dctx);
+ return ZSTD_nextSrcSizeToDecompress((ZSTD_DCtx*)dctx);
}
size_t ZSTDv02_decompressContinue(ZSTDv02_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
- return ZSTD_decompressContinue((ZSTD_DCtx*)dctx, dst, maxDstSize, src, srcSize);
+ return ZSTD_decompressContinue((ZSTD_DCtx*)dctx, dst, maxDstSize, src, srcSize);
}
diff --git a/contrib/libs/zstd/lib/legacy/zstd_v02.h b/contrib/libs/zstd/lib/legacy/zstd_v02.h
index 1b371953b7..d5e35b0c40 100644
--- a/contrib/libs/zstd/lib/legacy/zstd_v02.h
+++ b/contrib/libs/zstd/lib/legacy/zstd_v02.h
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_V02_H_4174539423
@@ -42,13 +42,13 @@ size_t ZSTDv02_decompress( void* dst, size_t maxOriginalSize,
or an error code if it fails (which can be tested using ZSTDv01_isError())
dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
or ZSTD_CONTENTSIZE_ERROR if an error occurs
-
+
note : assumes `cSize` and `dBound` are _not_ NULL.
*/
void ZSTDv02_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
size_t* cSize, unsigned long long* dBound);
-/**
+/**
ZSTDv02_isError() : tells if the result of ZSTDv02_decompress() is an error
*/
unsigned ZSTDv02_isError(size_t code);
diff --git a/contrib/libs/zstd/lib/legacy/zstd_v03.c b/contrib/libs/zstd/lib/legacy/zstd_v03.c
index 6625f4df1c..50ac225538 100644
--- a/contrib/libs/zstd/lib/legacy/zstd_v03.c
+++ b/contrib/libs/zstd/lib/legacy/zstd_v03.c
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -350,7 +350,7 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
/****************************************************************
* Helper functions
****************************************************************/
-MEM_STATIC unsigned BIT_highbit32 (U32 val)
+MEM_STATIC unsigned BIT_highbit32 (U32 val)
{
# if defined(_MSC_VER) /* Visual */
unsigned long r;
@@ -465,8 +465,8 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
{
- if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
- return BIT_DStream_overflow;
+ if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
+ return BIT_DStream_overflow;
if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
{
@@ -1274,8 +1274,8 @@ static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsi
else
{
bitCount -= (int)(8 * (iend - 4 - ip));
- ip = iend - 4;
- }
+ ip = iend - 4;
+ }
bitStream = MEM_readLE32(ip) >> (bitCount & 31);
}
}
@@ -1976,7 +1976,7 @@ static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
}
- /* Build rankVal */
+ /* Build rankVal */
{
const U32 minBits = tableLog+1 - maxW;
U32 nextRankVal = 0;
@@ -2533,14 +2533,14 @@ static size_t ZSTD_decodeLiteralsBlock(void* ctx,
if (litSize > srcSize-11) /* risk of reading too far with wildcopy */
{
if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
- if (litSize > srcSize-3) return ERROR(corruption_detected);
- memcpy(dctx->litBuffer, istart, litSize);
- dctx->litPtr = dctx->litBuffer;
- dctx->litSize = litSize;
- memset(dctx->litBuffer + dctx->litSize, 0, 8);
- return litSize+3;
- }
- /* direct reference into compressed stream */
+ if (litSize > srcSize-3) return ERROR(corruption_detected);
+ memcpy(dctx->litBuffer, istart, litSize);
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ memset(dctx->litBuffer + dctx->litSize, 0, 8);
+ return litSize+3;
+ }
+ /* direct reference into compressed stream */
dctx->litPtr = istart+3;
dctx->litSize = litSize;
return litSize+3;
@@ -2968,59 +2968,59 @@ static size_t ZSTD_decompress(void* dst, size_t maxDstSize, const void* src, siz
/* ZSTD_errorFrameSizeInfoLegacy() :
assumes `cSize` and `dBound` are _not_ NULL */
MEM_STATIC void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
-{
+{
*cSize = ret;
*dBound = ZSTD_CONTENTSIZE_ERROR;
}
void ZSTDv03_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
{
- const BYTE* ip = (const BYTE*)src;
- size_t remainingSize = srcSize;
+ const BYTE* ip = (const BYTE*)src;
+ size_t remainingSize = srcSize;
size_t nbBlocks = 0;
- U32 magicNumber;
- blockProperties_t blockProperties;
+ U32 magicNumber;
+ blockProperties_t blockProperties;
- /* Frame Header */
+ /* Frame Header */
if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
- magicNumber = MEM_readLE32(src);
+ magicNumber = MEM_readLE32(src);
if (magicNumber != ZSTD_magicNumber) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
return;
}
- ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
-
- /* Loop on each block */
- while (1)
- {
- size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+ ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
+
+ /* Loop on each block */
+ while (1)
+ {
+ size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
if (ZSTD_isError(cBlockSize)) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
return;
}
-
- ip += ZSTD_blockHeaderSize;
- remainingSize -= ZSTD_blockHeaderSize;
+
+ ip += ZSTD_blockHeaderSize;
+ remainingSize -= ZSTD_blockHeaderSize;
if (cBlockSize > remainingSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
-
- if (cBlockSize == 0) break; /* bt_end */
-
- ip += cBlockSize;
- remainingSize -= cBlockSize;
+
+ if (cBlockSize == 0) break; /* bt_end */
+
+ ip += cBlockSize;
+ remainingSize -= cBlockSize;
nbBlocks++;
- }
-
+ }
+
*cSize = ip - (const BYTE*)src;
*dBound = nbBlocks * BLOCKSIZE;
-}
-
-
+}
+
+
/*******************************
* Streaming Decompression API
*******************************/
@@ -3125,36 +3125,36 @@ static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSi
unsigned ZSTDv03_isError(size_t code)
{
- return ZSTD_isError(code);
+ return ZSTD_isError(code);
}
size_t ZSTDv03_decompress( void* dst, size_t maxOriginalSize,
const void* src, size_t compressedSize)
{
- return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);
+ return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);
}
ZSTDv03_Dctx* ZSTDv03_createDCtx(void)
{
- return (ZSTDv03_Dctx*)ZSTD_createDCtx();
+ return (ZSTDv03_Dctx*)ZSTD_createDCtx();
}
size_t ZSTDv03_freeDCtx(ZSTDv03_Dctx* dctx)
{
- return ZSTD_freeDCtx((ZSTD_DCtx*)dctx);
+ return ZSTD_freeDCtx((ZSTD_DCtx*)dctx);
}
size_t ZSTDv03_resetDCtx(ZSTDv03_Dctx* dctx)
{
- return ZSTD_resetDCtx((ZSTD_DCtx*)dctx);
+ return ZSTD_resetDCtx((ZSTD_DCtx*)dctx);
}
size_t ZSTDv03_nextSrcSizeToDecompress(ZSTDv03_Dctx* dctx)
{
- return ZSTD_nextSrcSizeToDecompress((ZSTD_DCtx*)dctx);
+ return ZSTD_nextSrcSizeToDecompress((ZSTD_DCtx*)dctx);
}
size_t ZSTDv03_decompressContinue(ZSTDv03_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
- return ZSTD_decompressContinue((ZSTD_DCtx*)dctx, dst, maxDstSize, src, srcSize);
+ return ZSTD_decompressContinue((ZSTD_DCtx*)dctx, dst, maxDstSize, src, srcSize);
}
diff --git a/contrib/libs/zstd/lib/legacy/zstd_v03.h b/contrib/libs/zstd/lib/legacy/zstd_v03.h
index 7a00d4304b..1e4377b63b 100644
--- a/contrib/libs/zstd/lib/legacy/zstd_v03.h
+++ b/contrib/libs/zstd/lib/legacy/zstd_v03.h
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_V03_H_298734209782
@@ -42,13 +42,13 @@ size_t ZSTDv03_decompress( void* dst, size_t maxOriginalSize,
or an error code if it fails (which can be tested using ZSTDv01_isError())
dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
or ZSTD_CONTENTSIZE_ERROR if an error occurs
-
+
note : assumes `cSize` and `dBound` are _not_ NULL.
*/
void ZSTDv03_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
size_t* cSize, unsigned long long* dBound);
- /**
+ /**
ZSTDv03_isError() : tells if the result of ZSTDv03_decompress() is an error
*/
unsigned ZSTDv03_isError(size_t code);
diff --git a/contrib/libs/zstd/lib/legacy/zstd_v04.c b/contrib/libs/zstd/lib/legacy/zstd_v04.c
index 8d305c7eae..605c931fdc 100644
--- a/contrib/libs/zstd/lib/legacy/zstd_v04.c
+++ b/contrib/libs/zstd/lib/legacy/zstd_v04.c
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -75,15 +75,15 @@ extern "C" {
#endif
-/*-*************************************
-* Debug
-***************************************/
+/*-*************************************
+* Debug
+***************************************/
#include "../common/debug.h"
#ifndef assert
# define assert(condition) ((void)0)
-#endif
-
-
+#endif
+
+
/****************************************************************
* Memory I/O
*****************************************************************/
@@ -621,7 +621,7 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
/****************************************************************
* Helper functions
****************************************************************/
-MEM_STATIC unsigned BIT_highbit32 (U32 val)
+MEM_STATIC unsigned BIT_highbit32 (U32 val)
{
# if defined(_MSC_VER) /* Visual */
unsigned long r;
@@ -676,13 +676,13 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si
bitD->bitContainer = *(const BYTE*)(bitD->start);
switch(srcSize)
{
- case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);/* fall-through */
- case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);/* fall-through */
- case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);/* fall-through */
- case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; /* fall-through */
- case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; /* fall-through */
- case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; /* fall-through */
- default: break;
+ case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);/* fall-through */
+ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);/* fall-through */
+ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);/* fall-through */
+ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; /* fall-through */
+ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; /* fall-through */
+ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; /* fall-through */
+ default: break;
}
contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
@@ -730,8 +730,8 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
{
- if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
- return BIT_DStream_overflow;
+ if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
+ return BIT_DStream_overflow;
if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
{
@@ -1251,8 +1251,8 @@ static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsi
else
{
bitCount -= (int)(8 * (iend - 4 - ip));
- ip = iend - 4;
- }
+ ip = iend - 4;
+ }
bitStream = MEM_readLE32(ip) >> (bitCount & 31);
}
}
@@ -2576,7 +2576,7 @@ static size_t ZSTD_decodeFrameHeader_Part2(ZSTD_DCtx* zc, const void* src, size_
size_t result;
if (srcSize != zc->headerSize) return ERROR(srcSize_wrong);
result = ZSTD_getFrameParams(&(zc->params), src, srcSize);
- if ((MEM_32bits()) && (zc->params.windowLog > 25)) return ERROR(frameParameter_unsupported);
+ if ((MEM_32bits()) && (zc->params.windowLog > 25)) return ERROR(frameParameter_unsupported);
return result;
}
@@ -2815,18 +2815,18 @@ static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
/* Literal length */
litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));
prevOffset = litLength ? seq->offset : seqState->prevOffset;
- if (litLength == MaxLL) {
+ if (litLength == MaxLL) {
const U32 add = dumps<de ? *dumps++ : 0;
if (add < 255) litLength += add;
else if (dumps + 3 <= de) {
litLength = MEM_readLE24(dumps);
dumps += 3;
}
- if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */
+ if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */
}
/* Offset */
- { static const U32 offsetPrefix[MaxOff+1] = {
+ { static const U32 offsetPrefix[MaxOff+1] = {
1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,
512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,
524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };
@@ -2843,14 +2843,14 @@ static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
/* MatchLength */
matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
- if (matchLength == MaxML) {
+ if (matchLength == MaxML) {
const U32 add = dumps<de ? *dumps++ : 0;
if (add < 255) matchLength += add;
else if (dumps + 3 <= de){
matchLength = MEM_readLE24(dumps);
dumps += 3;
}
- if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */
+ if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */
}
matchLength += MINMATCH;
@@ -2914,7 +2914,7 @@ static size_t ZSTD_execSequence(BYTE* op,
/* Requirement: op <= oend_8 */
/* match within prefix */
- if (sequence.offset < 8) {
+ if (sequence.offset < 8) {
/* close range match, overlap */
const int sub2 = dec64table[sequence.offset];
op[0] = match[0];
@@ -2924,7 +2924,7 @@ static size_t ZSTD_execSequence(BYTE* op,
match += dec32table[sequence.offset];
ZSTD_copy4(op+4, match);
match -= sub2;
- } else {
+ } else {
ZSTD_copy8(op, match);
}
op += 8; match += 8;
@@ -3135,19 +3135,19 @@ static size_t ZSTD_decompress_usingDict(ZSTD_DCtx* ctx,
/* ZSTD_errorFrameSizeInfoLegacy() :
assumes `cSize` and `dBound` are _not_ NULL */
static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
-{
+{
*cSize = ret;
*dBound = ZSTD_CONTENTSIZE_ERROR;
}
void ZSTDv04_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
{
- const BYTE* ip = (const BYTE*)src;
- size_t remainingSize = srcSize;
+ const BYTE* ip = (const BYTE*)src;
+ size_t remainingSize = srcSize;
size_t nbBlocks = 0;
- blockProperties_t blockProperties;
+ blockProperties_t blockProperties;
- /* Frame Header */
+ /* Frame Header */
if (srcSize < ZSTD_frameHeaderSize_min) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
@@ -3156,35 +3156,35 @@ void ZSTDv04_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cS
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
return;
}
- ip += ZSTD_frameHeaderSize_min; remainingSize -= ZSTD_frameHeaderSize_min;
-
- /* Loop on each block */
- while (1)
- {
- size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+ ip += ZSTD_frameHeaderSize_min; remainingSize -= ZSTD_frameHeaderSize_min;
+
+ /* Loop on each block */
+ while (1)
+ {
+ size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
if (ZSTD_isError(cBlockSize)) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
return;
}
-
- ip += ZSTD_blockHeaderSize;
- remainingSize -= ZSTD_blockHeaderSize;
+
+ ip += ZSTD_blockHeaderSize;
+ remainingSize -= ZSTD_blockHeaderSize;
if (cBlockSize > remainingSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
-
- if (cBlockSize == 0) break; /* bt_end */
-
- ip += cBlockSize;
- remainingSize -= cBlockSize;
+
+ if (cBlockSize == 0) break; /* bt_end */
+
+ ip += cBlockSize;
+ remainingSize -= cBlockSize;
nbBlocks++;
- }
-
+ }
+
*cSize = ip - (const BYTE*)src;
*dBound = nbBlocks * BLOCKSIZE;
-}
-
+}
+
/* ******************************
* Streaming Decompression API
********************************/
@@ -3429,14 +3429,14 @@ static size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbc, void* dst, size_t* maxDs
char* const oend = ostart + *maxDstSizePtr;
U32 notDone = 1;
- DEBUGLOG(5, "ZBUFF_decompressContinue");
+ DEBUGLOG(5, "ZBUFF_decompressContinue");
while (notDone)
{
switch(zbc->stage)
{
case ZBUFFds_init :
- DEBUGLOG(5, "ZBUFF_decompressContinue: stage==ZBUFFds_init => ERROR(init_missing)");
+ DEBUGLOG(5, "ZBUFF_decompressContinue: stage==ZBUFFds_init => ERROR(init_missing)");
return ERROR(init_missing);
case ZBUFFds_readHeader :
@@ -3498,7 +3498,7 @@ static size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbc, void* dst, size_t* maxDs
break;
}
zbc->stage = ZBUFFds_read;
- /* fall-through */
+ /* fall-through */
case ZBUFFds_read:
{
size_t neededInSize = ZSTD_nextSrcSizeToDecompress(zbc->zc);
@@ -3524,7 +3524,7 @@ static size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbc, void* dst, size_t* maxDs
if (ip==iend) { notDone = 0; break; } /* no more input */
zbc->stage = ZBUFFds_load;
}
- /* fall-through */
+ /* fall-through */
case ZBUFFds_load:
{
size_t neededInSize = ZSTD_nextSrcSizeToDecompress(zbc->zc);
@@ -3544,10 +3544,10 @@ static size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbc, void* dst, size_t* maxDs
if (!decodedSize) { zbc->stage = ZBUFFds_read; break; } /* this was just a header */
zbc->outEnd = zbc->outStart + decodedSize;
zbc->stage = ZBUFFds_flush;
- /* ZBUFFds_flush follows */
+ /* ZBUFFds_flush follows */
}
}
- /* fall-through */
+ /* fall-through */
case ZBUFFds_flush:
{
size_t toFlushSize = zbc->outEnd - zbc->outStart;
@@ -3631,7 +3631,7 @@ size_t ZSTDv04_decompressContinue(ZSTDv04_Dctx* dctx, void* dst, size_t maxDstSi
ZBUFFv04_DCtx* ZBUFFv04_createDCtx(void) { return ZBUFF_createDCtx(); }
-size_t ZBUFFv04_freeDCtx(ZBUFFv04_DCtx* dctx) { return ZBUFF_freeDCtx(dctx); }
+size_t ZBUFFv04_freeDCtx(ZBUFFv04_DCtx* dctx) { return ZBUFF_freeDCtx(dctx); }
size_t ZBUFFv04_decompressInit(ZBUFFv04_DCtx* dctx) { return ZBUFF_decompressInit(dctx); }
size_t ZBUFFv04_decompressWithDictionary(ZBUFFv04_DCtx* dctx, const void* src, size_t srcSize)
@@ -3639,7 +3639,7 @@ size_t ZBUFFv04_decompressWithDictionary(ZBUFFv04_DCtx* dctx, const void* src, s
size_t ZBUFFv04_decompressContinue(ZBUFFv04_DCtx* dctx, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr)
{
- DEBUGLOG(5, "ZBUFFv04_decompressContinue");
+ DEBUGLOG(5, "ZBUFFv04_decompressContinue");
return ZBUFF_decompressContinue(dctx, dst, maxDstSizePtr, src, srcSizePtr);
}
diff --git a/contrib/libs/zstd/lib/legacy/zstd_v04.h b/contrib/libs/zstd/lib/legacy/zstd_v04.h
index 66b97ab8e6..688c131574 100644
--- a/contrib/libs/zstd/lib/legacy/zstd_v04.h
+++ b/contrib/libs/zstd/lib/legacy/zstd_v04.h
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_V04_H_91868324769238
@@ -42,13 +42,13 @@ size_t ZSTDv04_decompress( void* dst, size_t maxOriginalSize,
or an error code if it fails (which can be tested using ZSTDv01_isError())
dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
or ZSTD_CONTENTSIZE_ERROR if an error occurs
-
+
note : assumes `cSize` and `dBound` are _not_ NULL.
*/
void ZSTDv04_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
size_t* cSize, unsigned long long* dBound);
-/**
+/**
ZSTDv04_isError() : tells if the result of ZSTDv04_decompress() is an error
*/
unsigned ZSTDv04_isError(size_t code);
diff --git a/contrib/libs/zstd/lib/legacy/zstd_v05.c b/contrib/libs/zstd/lib/legacy/zstd_v05.c
index 795dfb410c..700e33efaf 100644
--- a/contrib/libs/zstd/lib/legacy/zstd_v05.c
+++ b/contrib/libs/zstd/lib/legacy/zstd_v05.c
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -750,7 +750,7 @@ MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, unsigned nbBits);
/*-**************************************************************
* Helper functions
****************************************************************/
-MEM_STATIC unsigned BITv05_highbit32 (U32 val)
+MEM_STATIC unsigned BITv05_highbit32 (U32 val)
{
# if defined(_MSC_VER) /* Visual */
unsigned long r;
@@ -802,13 +802,13 @@ MEM_STATIC size_t BITv05_initDStream(BITv05_DStream_t* bitD, const void* srcBuff
bitD->bitContainer = *(const BYTE*)(bitD->start);
switch(srcSize)
{
- case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);/* fall-through */
- case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);/* fall-through */
- case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);/* fall-through */
- case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; /* fall-through */
- case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; /* fall-through */
- case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; /* fall-through */
- default: break;
+ case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);/* fall-through */
+ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);/* fall-through */
+ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);/* fall-through */
+ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; /* fall-through */
+ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; /* fall-through */
+ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; /* fall-through */
+ default: break;
}
contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
@@ -856,8 +856,8 @@ MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, unsigned nbBits)
MEM_STATIC BITv05_DStream_status BITv05_reloadDStream(BITv05_DStream_t* bitD)
{
- if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
- return BITv05_DStream_overflow;
+ if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
+ return BITv05_DStream_overflow;
if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
bitD->ptr -= bitD->bitsConsumed >> 3;
@@ -2822,7 +2822,7 @@ static size_t ZSTDv05_decodeFrameHeader_Part2(ZSTDv05_DCtx* zc, const void* src,
if (srcSize != zc->headerSize)
return ERROR(srcSize_wrong);
result = ZSTDv05_getFrameParams(&(zc->params), src, srcSize);
- if ((MEM_32bits()) && (zc->params.windowLog > 25)) return ERROR(frameParameter_unsupported);
+ if ((MEM_32bits()) && (zc->params.windowLog > 25)) return ERROR(frameParameter_unsupported);
return result;
}
@@ -3162,7 +3162,7 @@ static void ZSTDv05_decodeSequence(seq_t* seq, seqState_t* seqState)
}
litLength>>=1;
}
- if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */
+ if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */
}
/* Offset */
@@ -3199,7 +3199,7 @@ static void ZSTDv05_decodeSequence(seq_t* seq, seqState_t* seqState)
}
matchLength >>= 1;
}
- if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */
+ if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */
}
matchLength += MINMATCH;
@@ -3522,19 +3522,19 @@ size_t ZSTDv05_decompress(void* dst, size_t maxDstSize, const void* src, size_t
/* ZSTD_errorFrameSizeInfoLegacy() :
assumes `cSize` and `dBound` are _not_ NULL */
static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
-{
+{
*cSize = ret;
*dBound = ZSTD_CONTENTSIZE_ERROR;
}
void ZSTDv05_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
{
- const BYTE* ip = (const BYTE*)src;
- size_t remainingSize = srcSize;
+ const BYTE* ip = (const BYTE*)src;
+ size_t remainingSize = srcSize;
size_t nbBlocks = 0;
- blockProperties_t blockProperties;
+ blockProperties_t blockProperties;
- /* Frame Header */
+ /* Frame Header */
if (srcSize < ZSTDv05_frameHeaderSize_min) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
@@ -3543,35 +3543,35 @@ void ZSTDv05_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cS
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
return;
}
- ip += ZSTDv05_frameHeaderSize_min; remainingSize -= ZSTDv05_frameHeaderSize_min;
-
- /* Loop on each block */
- while (1)
- {
- size_t cBlockSize = ZSTDv05_getcBlockSize(ip, remainingSize, &blockProperties);
+ ip += ZSTDv05_frameHeaderSize_min; remainingSize -= ZSTDv05_frameHeaderSize_min;
+
+ /* Loop on each block */
+ while (1)
+ {
+ size_t cBlockSize = ZSTDv05_getcBlockSize(ip, remainingSize, &blockProperties);
if (ZSTDv05_isError(cBlockSize)) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
return;
}
-
- ip += ZSTDv05_blockHeaderSize;
- remainingSize -= ZSTDv05_blockHeaderSize;
+
+ ip += ZSTDv05_blockHeaderSize;
+ remainingSize -= ZSTDv05_blockHeaderSize;
if (cBlockSize > remainingSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
-
- if (cBlockSize == 0) break; /* bt_end */
-
- ip += cBlockSize;
- remainingSize -= cBlockSize;
+
+ if (cBlockSize == 0) break; /* bt_end */
+
+ ip += cBlockSize;
+ remainingSize -= cBlockSize;
nbBlocks++;
- }
-
+ }
+
*cSize = ip - (const BYTE*)src;
*dBound = nbBlocks * BLOCKSIZE;
-}
-
+}
+
/* ******************************
* Streaming Decompression API
********************************/
@@ -3916,7 +3916,7 @@ size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* zbc, void* dst, size_t* maxDst
zbc->stage = ZBUFFv05ds_decodeHeader;
break;
}
- /* fall-through */
+ /* fall-through */
case ZBUFFv05ds_loadHeader:
/* complete header from src */
{
@@ -3934,7 +3934,7 @@ size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* zbc, void* dst, size_t* maxDst
}
/* zbc->stage = ZBUFFv05ds_decodeHeader; break; */ /* useless : stage follows */
}
- /* fall-through */
+ /* fall-through */
case ZBUFFv05ds_decodeHeader:
/* apply header to create / resize buffers */
{
@@ -3961,7 +3961,7 @@ size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* zbc, void* dst, size_t* maxDst
break;
}
zbc->stage = ZBUFFv05ds_read;
- /* fall-through */
+ /* fall-through */
case ZBUFFv05ds_read:
{
size_t neededInSize = ZSTDv05_nextSrcSizeToDecompress(zbc->zc);
@@ -3985,7 +3985,7 @@ size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* zbc, void* dst, size_t* maxDst
if (ip==iend) { notDone = 0; break; } /* no more input */
zbc->stage = ZBUFFv05ds_load;
}
- /* fall-through */
+ /* fall-through */
case ZBUFFv05ds_load:
{
size_t neededInSize = ZSTDv05_nextSrcSizeToDecompress(zbc->zc);
@@ -4006,9 +4006,9 @@ size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* zbc, void* dst, size_t* maxDst
zbc->outEnd = zbc->outStart + decodedSize;
zbc->stage = ZBUFFv05ds_flush;
/* break; */ /* ZBUFFv05ds_flush follows */
- }
- }
- /* fall-through */
+ }
+ }
+ /* fall-through */
case ZBUFFv05ds_flush:
{
size_t toFlushSize = zbc->outEnd - zbc->outStart;
diff --git a/contrib/libs/zstd/lib/legacy/zstd_v05.h b/contrib/libs/zstd/lib/legacy/zstd_v05.h
index bd423bfc1b..ad45c53255 100644
--- a/contrib/libs/zstd/lib/legacy/zstd_v05.h
+++ b/contrib/libs/zstd/lib/legacy/zstd_v05.h
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTDv05_H
diff --git a/contrib/libs/zstd/lib/legacy/zstd_v06.c b/contrib/libs/zstd/lib/legacy/zstd_v06.c
index ead213c484..3239144a8b 100644
--- a/contrib/libs/zstd/lib/legacy/zstd_v06.c
+++ b/contrib/libs/zstd/lib/legacy/zstd_v06.c
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -190,7 +190,7 @@ MEM_STATIC U32 MEM_swap32(U32 in)
{
#if defined(_MSC_VER) /* Visual Studio */
return _byteswap_ulong(in);
-#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
+#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
return __builtin_bswap32(in);
#else
return ((in << 24) & 0xff000000 ) |
@@ -204,7 +204,7 @@ MEM_STATIC U64 MEM_swap64(U64 in)
{
#if defined(_MSC_VER) /* Visual Studio */
return _byteswap_uint64(in);
-#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
+#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
return __builtin_bswap64(in);
#else
return ((in << 56) & 0xff00000000000000ULL) |
@@ -854,7 +854,7 @@ MEM_STATIC size_t BITv06_readBitsFast(BITv06_DStream_t* bitD, unsigned nbBits);
/*-**************************************************************
* Internal functions
****************************************************************/
-MEM_STATIC unsigned BITv06_highbit32 ( U32 val)
+MEM_STATIC unsigned BITv06_highbit32 ( U32 val)
{
# if defined(_MSC_VER) /* Visual */
unsigned long r;
@@ -903,13 +903,13 @@ MEM_STATIC size_t BITv06_initDStream(BITv06_DStream_t* bitD, const void* srcBuff
bitD->bitContainer = *(const BYTE*)(bitD->start);
switch(srcSize)
{
- case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);/* fall-through */
- case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);/* fall-through */
- case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);/* fall-through */
- case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; /* fall-through */
- case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; /* fall-through */
- case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; /* fall-through */
- default: break;
+ case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);/* fall-through */
+ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);/* fall-through */
+ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);/* fall-through */
+ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; /* fall-through */
+ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; /* fall-through */
+ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; /* fall-through */
+ default: break;
}
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */
@@ -958,8 +958,8 @@ MEM_STATIC size_t BITv06_readBitsFast(BITv06_DStream_t* bitD, U32 nbBits)
MEM_STATIC BITv06_DStream_status BITv06_reloadDStream(BITv06_DStream_t* bitD)
{
- if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
- return BITv06_DStream_overflow;
+ if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
+ return BITv06_DStream_overflow;
if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
bitD->ptr -= bitD->bitsConsumed >> 3;
@@ -3010,7 +3010,7 @@ size_t ZSTDv06_getFrameParams(ZSTDv06_frameParams* fparamsPtr, const void* src,
static size_t ZSTDv06_decodeFrameHeader(ZSTDv06_DCtx* zc, const void* src, size_t srcSize)
{
size_t const result = ZSTDv06_getFrameParams(&(zc->fParams), src, srcSize);
- if ((MEM_32bits()) && (zc->fParams.windowLog > 25)) return ERROR(frameParameter_unsupported);
+ if ((MEM_32bits()) && (zc->fParams.windowLog > 25)) return ERROR(frameParameter_unsupported);
return result;
}
@@ -3659,19 +3659,19 @@ size_t ZSTDv06_decompress(void* dst, size_t dstCapacity, const void* src, size_t
/* ZSTD_errorFrameSizeInfoLegacy() :
assumes `cSize` and `dBound` are _not_ NULL */
static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
-{
+{
*cSize = ret;
*dBound = ZSTD_CONTENTSIZE_ERROR;
}
void ZSTDv06_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
{
- const BYTE* ip = (const BYTE*)src;
- size_t remainingSize = srcSize;
+ const BYTE* ip = (const BYTE*)src;
+ size_t remainingSize = srcSize;
size_t nbBlocks = 0;
- blockProperties_t blockProperties = { bt_compressed, 0 };
+ blockProperties_t blockProperties = { bt_compressed, 0 };
- /* Frame Header */
+ /* Frame Header */
{ size_t const frameHeaderSize = ZSTDv06_frameHeaderSize(src, srcSize);
if (ZSTDv06_isError(frameHeaderSize)) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, frameHeaderSize);
@@ -3685,35 +3685,35 @@ void ZSTDv06_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cS
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
- ip += frameHeaderSize; remainingSize -= frameHeaderSize;
- }
-
- /* Loop on each block */
- while (1) {
- size_t const cBlockSize = ZSTDv06_getcBlockSize(ip, remainingSize, &blockProperties);
+ ip += frameHeaderSize; remainingSize -= frameHeaderSize;
+ }
+
+ /* Loop on each block */
+ while (1) {
+ size_t const cBlockSize = ZSTDv06_getcBlockSize(ip, remainingSize, &blockProperties);
if (ZSTDv06_isError(cBlockSize)) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
return;
}
-
- ip += ZSTDv06_blockHeaderSize;
- remainingSize -= ZSTDv06_blockHeaderSize;
+
+ ip += ZSTDv06_blockHeaderSize;
+ remainingSize -= ZSTDv06_blockHeaderSize;
if (cBlockSize > remainingSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
-
- if (cBlockSize == 0) break; /* bt_end */
-
- ip += cBlockSize;
- remainingSize -= cBlockSize;
+
+ if (cBlockSize == 0) break; /* bt_end */
+
+ ip += cBlockSize;
+ remainingSize -= cBlockSize;
nbBlocks++;
- }
-
+ }
+
*cSize = ip - (const BYTE*)src;
*dBound = nbBlocks * ZSTDv06_BLOCKSIZE_MAX;
-}
-
+}
+
/*_******************************
* Streaming Decompression API
********************************/
@@ -3742,7 +3742,7 @@ size_t ZSTDv06_decompressContinue(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapac
return 0;
}
dctx->expected = 0; /* not necessary to copy more */
- /* fall-through */
+ /* fall-through */
case ZSTDds_decodeFrameHeader:
{ size_t result;
memcpy(dctx->headerBuffer + ZSTDv06_frameHeaderSize_min, src, dctx->expected);
@@ -4063,7 +4063,7 @@ size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* zbd,
zbd->inBuff = (char*)malloc(blockSize);
if (zbd->inBuff == NULL) return ERROR(memory_allocation);
}
- { size_t const neededOutSize = ((size_t)1 << zbd->fParams.windowLog) + blockSize + WILDCOPY_OVERLENGTH * 2;
+ { size_t const neededOutSize = ((size_t)1 << zbd->fParams.windowLog) + blockSize + WILDCOPY_OVERLENGTH * 2;
if (zbd->outBuffSize < neededOutSize) {
free(zbd->outBuff);
zbd->outBuffSize = neededOutSize;
@@ -4071,7 +4071,7 @@ size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* zbd,
if (zbd->outBuff == NULL) return ERROR(memory_allocation);
} } }
zbd->stage = ZBUFFds_read;
- /* fall-through */
+ /* fall-through */
case ZBUFFds_read:
{ size_t const neededInSize = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);
if (neededInSize==0) { /* end of frame */
@@ -4093,7 +4093,7 @@ size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* zbd,
if (ip==iend) { notDone = 0; break; } /* no more input */
zbd->stage = ZBUFFds_load;
}
- /* fall-through */
+ /* fall-through */
case ZBUFFds_load:
{ size_t const neededInSize = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);
size_t const toLoad = neededInSize - zbd->inPos; /* should always be <= remaining space within inBuff */
@@ -4114,9 +4114,9 @@ size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* zbd,
zbd->outEnd = zbd->outStart + decodedSize;
zbd->stage = ZBUFFds_flush;
/* break; */ /* ZBUFFds_flush follows */
- }
- }
- /* fall-through */
+ }
+ }
+ /* fall-through */
case ZBUFFds_flush:
{ size_t const toFlushSize = zbd->outEnd - zbd->outStart;
size_t const flushedSize = ZBUFFv06_limitCopy(op, oend-op, zbd->outBuff + zbd->outStart, toFlushSize);
diff --git a/contrib/libs/zstd/lib/legacy/zstd_v06.h b/contrib/libs/zstd/lib/legacy/zstd_v06.h
index 9e32b76e08..802326e6f0 100644
--- a/contrib/libs/zstd/lib/legacy/zstd_v06.h
+++ b/contrib/libs/zstd/lib/legacy/zstd_v06.h
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTDv06_H
@@ -42,7 +42,7 @@ extern "C" {
ZSTDLIBv06_API size_t ZSTDv06_decompress( void* dst, size_t dstCapacity,
const void* src, size_t compressedSize);
-/**
+/**
ZSTDv06_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.6.x format
srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
cSize (output parameter) : the number of bytes that would be read to decompress this frame
@@ -51,7 +51,7 @@ ZSTDv06_findFrameSizeInfoLegacy() : get the source length and decompressed bound
or ZSTD_CONTENTSIZE_ERROR if an error occurs
note : assumes `cSize` and `dBound` are _not_ NULL.
-*/
+*/
void ZSTDv06_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
size_t* cSize, unsigned long long* dBound);
diff --git a/contrib/libs/zstd/lib/legacy/zstd_v07.c b/contrib/libs/zstd/lib/legacy/zstd_v07.c
index 189f6ede69..25d234fe0c 100644
--- a/contrib/libs/zstd/lib/legacy/zstd_v07.c
+++ b/contrib/libs/zstd/lib/legacy/zstd_v07.c
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
@@ -14,14 +14,14 @@
#include <string.h> /* memcpy */
#include <stdlib.h> /* malloc, free, qsort */
-#ifndef XXH_STATIC_LINKING_ONLY
-# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
-#endif
+#ifndef XXH_STATIC_LINKING_ONLY
+# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
+#endif
#include <contrib/libs/xxhash/xxhash.h> /* XXH64_* */
#include "zstd_v07.h"
-#define FSEv07_STATIC_LINKING_ONLY /* FSEv07_MIN_TABLELOG */
-#define HUFv07_STATIC_LINKING_ONLY /* HUFv07_TABLELOG_ABSOLUTEMAX */
+#define FSEv07_STATIC_LINKING_ONLY /* FSEv07_MIN_TABLELOG */
+#define HUFv07_STATIC_LINKING_ONLY /* HUFv07_TABLELOG_ABSOLUTEMAX */
#define ZSTDv07_STATIC_LINKING_ONLY
#include "../common/error_private.h"
@@ -349,7 +349,7 @@ MEM_STATIC U32 MEM_swap32(U32 in)
{
#if defined(_MSC_VER) /* Visual Studio */
return _byteswap_ulong(in);
-#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
+#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
return __builtin_bswap32(in);
#else
return ((in << 24) & 0xff000000 ) |
@@ -363,7 +363,7 @@ MEM_STATIC U64 MEM_swap64(U64 in)
{
#if defined(_MSC_VER) /* Visual Studio */
return _byteswap_uint64(in);
-#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
+#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
return __builtin_bswap64(in);
#else
return ((in << 56) & 0xff00000000000000ULL) |
@@ -524,7 +524,7 @@ MEM_STATIC size_t BITv07_readBitsFast(BITv07_DStream_t* bitD, unsigned nbBits);
/*-**************************************************************
* Internal functions
****************************************************************/
-MEM_STATIC unsigned BITv07_highbit32 (U32 val)
+MEM_STATIC unsigned BITv07_highbit32 (U32 val)
{
# if defined(_MSC_VER) /* Visual */
unsigned long r;
@@ -571,13 +571,13 @@ MEM_STATIC size_t BITv07_initDStream(BITv07_DStream_t* bitD, const void* srcBuff
bitD->bitContainer = *(const BYTE*)(bitD->start);
switch(srcSize)
{
- case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);/* fall-through */
- case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);/* fall-through */
- case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);/* fall-through */
- case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; /* fall-through */
- case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; /* fall-through */
- case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; /* fall-through */
- default: break;
+ case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);/* fall-through */
+ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);/* fall-through */
+ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);/* fall-through */
+ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; /* fall-through */
+ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; /* fall-through */
+ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; /* fall-through */
+ default: break;
}
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
bitD->bitsConsumed = lastByte ? 8 - BITv07_highbit32(lastByte) : 0;
@@ -3904,24 +3904,24 @@ size_t ZSTDv07_decompress(void* dst, size_t dstCapacity, const void* src, size_t
/* ZSTD_errorFrameSizeInfoLegacy() :
assumes `cSize` and `dBound` are _not_ NULL */
static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
-{
+{
*cSize = ret;
*dBound = ZSTD_CONTENTSIZE_ERROR;
}
void ZSTDv07_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
{
- const BYTE* ip = (const BYTE*)src;
- size_t remainingSize = srcSize;
+ const BYTE* ip = (const BYTE*)src;
+ size_t remainingSize = srcSize;
size_t nbBlocks = 0;
- /* check */
+ /* check */
if (srcSize < ZSTDv07_frameHeaderSize_min+ZSTDv07_blockHeaderSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
-
- /* Frame Header */
+
+ /* Frame Header */
{ size_t const frameHeaderSize = ZSTDv07_frameHeaderSize(src, srcSize);
if (ZSTDv07_isError(frameHeaderSize)) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, frameHeaderSize);
@@ -3935,37 +3935,37 @@ void ZSTDv07_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cS
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
- ip += frameHeaderSize; remainingSize -= frameHeaderSize;
- }
-
- /* Loop on each block */
- while (1) {
- blockProperties_t blockProperties;
- size_t const cBlockSize = ZSTDv07_getcBlockSize(ip, remainingSize, &blockProperties);
+ ip += frameHeaderSize; remainingSize -= frameHeaderSize;
+ }
+
+ /* Loop on each block */
+ while (1) {
+ blockProperties_t blockProperties;
+ size_t const cBlockSize = ZSTDv07_getcBlockSize(ip, remainingSize, &blockProperties);
if (ZSTDv07_isError(cBlockSize)) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
return;
}
-
- ip += ZSTDv07_blockHeaderSize;
- remainingSize -= ZSTDv07_blockHeaderSize;
-
- if (blockProperties.blockType == bt_end) break;
-
+
+ ip += ZSTDv07_blockHeaderSize;
+ remainingSize -= ZSTDv07_blockHeaderSize;
+
+ if (blockProperties.blockType == bt_end) break;
+
if (cBlockSize > remainingSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
-
- ip += cBlockSize;
- remainingSize -= cBlockSize;
+
+ ip += cBlockSize;
+ remainingSize -= cBlockSize;
nbBlocks++;
- }
-
+ }
+
*cSize = ip - (const BYTE*)src;
*dBound = nbBlocks * ZSTDv07_BLOCKSIZE_ABSOLUTEMAX;
-}
-
+}
+
/*_******************************
* Streaming Decompression API
********************************/
@@ -4007,7 +4007,7 @@ size_t ZSTDv07_decompressContinue(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapac
return 0;
}
dctx->expected = 0; /* not necessary to copy more */
- /* fall-through */
+ /* fall-through */
case ZSTDds_decodeFrameHeader:
{ size_t result;
memcpy(dctx->headerBuffer + ZSTDv07_frameHeaderSize_min, src, dctx->expected);
@@ -4131,9 +4131,9 @@ static size_t ZSTDv07_loadEntropy(ZSTDv07_DCtx* dctx, const void* const dict, si
}
if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
- dctx->rep[0] = MEM_readLE32(dictPtr+0); if (dctx->rep[0] == 0 || dctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted);
- dctx->rep[1] = MEM_readLE32(dictPtr+4); if (dctx->rep[1] == 0 || dctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted);
- dctx->rep[2] = MEM_readLE32(dictPtr+8); if (dctx->rep[2] == 0 || dctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted);
+ dctx->rep[0] = MEM_readLE32(dictPtr+0); if (dctx->rep[0] == 0 || dctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted);
+ dctx->rep[1] = MEM_readLE32(dictPtr+4); if (dctx->rep[1] == 0 || dctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted);
+ dctx->rep[2] = MEM_readLE32(dictPtr+8); if (dctx->rep[2] == 0 || dctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted);
dictPtr += 12;
dctx->litEntropy = dctx->fseEntropy = 1;
@@ -4447,7 +4447,7 @@ size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* zbd,
zbd->inBuff = (char*)zbd->customMem.customAlloc(zbd->customMem.opaque, blockSize);
if (zbd->inBuff == NULL) return ERROR(memory_allocation);
}
- { size_t const neededOutSize = zbd->fParams.windowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
+ { size_t const neededOutSize = zbd->fParams.windowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
if (zbd->outBuffSize < neededOutSize) {
zbd->customMem.customFree(zbd->customMem.opaque, zbd->outBuff);
zbd->outBuffSize = neededOutSize;
@@ -4456,7 +4456,7 @@ size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* zbd,
} } }
zbd->stage = ZBUFFds_read;
/* pass-through */
- /* fall-through */
+ /* fall-through */
case ZBUFFds_read:
{ size_t const neededInSize = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);
if (neededInSize==0) { /* end of frame */
@@ -4479,7 +4479,7 @@ size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* zbd,
if (ip==iend) { notDone = 0; break; } /* no more input */
zbd->stage = ZBUFFds_load;
}
- /* fall-through */
+ /* fall-through */
case ZBUFFds_load:
{ size_t const neededInSize = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);
size_t const toLoad = neededInSize - zbd->inPos; /* should always be <= remaining space within inBuff */
@@ -4500,11 +4500,11 @@ size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* zbd,
if (!decodedSize && !isSkipFrame) { zbd->stage = ZBUFFds_read; break; } /* this was just a header */
zbd->outEnd = zbd->outStart + decodedSize;
zbd->stage = ZBUFFds_flush;
- /* break; */
- /* pass-through */
- }
- }
- /* fall-through */
+ /* break; */
+ /* pass-through */
+ }
+ }
+ /* fall-through */
case ZBUFFds_flush:
{ size_t const toFlushSize = zbd->outEnd - zbd->outStart;
size_t const flushedSize = ZBUFFv07_limitCopy(op, oend-op, zbd->outBuff + zbd->outStart, toFlushSize);
diff --git a/contrib/libs/zstd/lib/legacy/zstd_v07.h b/contrib/libs/zstd/lib/legacy/zstd_v07.h
index bc35cfa6a3..1b60da5ced 100644
--- a/contrib/libs/zstd/lib/legacy/zstd_v07.h
+++ b/contrib/libs/zstd/lib/legacy/zstd_v07.h
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTDv07_H_235446
@@ -49,7 +49,7 @@ unsigned long long ZSTDv07_getDecompressedSize(const void* src, size_t srcSize);
ZSTDLIBv07_API size_t ZSTDv07_decompress( void* dst, size_t dstCapacity,
const void* src, size_t compressedSize);
-/**
+/**
ZSTDv07_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.7.x format
srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
cSize (output parameter) : the number of bytes that would be read to decompress this frame
@@ -58,10 +58,10 @@ ZSTDv07_findFrameSizeInfoLegacy() : get the source length and decompressed bound
or ZSTD_CONTENTSIZE_ERROR if an error occurs
note : assumes `cSize` and `dBound` are _not_ NULL.
-*/
+*/
void ZSTDv07_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
size_t* cSize, unsigned long long* dBound);
-
+
/*====== Helper functions ======*/
ZSTDLIBv07_API unsigned ZSTDv07_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
ZSTDLIBv07_API const char* ZSTDv07_getErrorName(size_t code); /*!< provides readable string from an error code */
diff --git a/contrib/libs/zstd/lib/zdict.h b/contrib/libs/zstd/lib/zdict.h
index f1e139a40d..07c3ca5ba8 100644
--- a/contrib/libs/zstd/lib/zdict.h
+++ b/contrib/libs/zstd/lib/zdict.h
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef DICTBUILDER_H_001
@@ -20,20 +20,20 @@ extern "C" {
#include <stddef.h> /* size_t */
-/* ===== ZDICTLIB_API : control library symbols visibility ===== */
-#ifndef ZDICTLIB_VISIBILITY
-# if defined(__GNUC__) && (__GNUC__ >= 4)
-# define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default")))
-# else
-# define ZDICTLIB_VISIBILITY
-# endif
-#endif
-#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
-# define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY
-#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
-# define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+/* ===== ZDICTLIB_API : control library symbols visibility ===== */
+#ifndef ZDICTLIB_VISIBILITY
+# if defined(__GNUC__) && (__GNUC__ >= 4)
+# define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default")))
+# else
+# define ZDICTLIB_VISIBILITY
+# endif
+#endif
+#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
+# define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY
+#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
+# define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
#else
-# define ZDICTLIB_API ZDICTLIB_VISIBILITY
+# define ZDICTLIB_API ZDICTLIB_VISIBILITY
#endif
/*******************************************************************************
@@ -176,26 +176,26 @@ extern "C" {
******************************************************************************/
-/*! ZDICT_trainFromBuffer():
- * Train a dictionary from an array of samples.
+/*! ZDICT_trainFromBuffer():
+ * Train a dictionary from an array of samples.
* Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4,
* f=20, and accel=1.
- * Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
- * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
- * The resulting dictionary will be saved into `dictBuffer`.
- * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
- * or an error code, which can be tested with ZDICT_isError().
+ * Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
+ * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
+ * The resulting dictionary will be saved into `dictBuffer`.
+ * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+ * or an error code, which can be tested with ZDICT_isError().
* Note: Dictionary training will fail if there are not enough samples to construct a
* dictionary, or if most of the samples are too small (< 8 bytes being the lower limit).
* If dictionary training fails, you should use zstd without a dictionary, as the dictionary
* would've been ineffective anyways. If you believe your samples would benefit from a dictionary
* please open an issue with details, and we can look into it.
* Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB.
- * Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
- * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
- * In general, it's recommended to provide a few thousands samples, though this can vary a lot.
- * It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
- */
+ * Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
+ * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
+ * In general, it's recommended to provide a few thousands samples, though this can vary a lot.
+ * It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
+ */
ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
const void* samplesBuffer,
const size_t* samplesSizes, unsigned nbSamples);
@@ -275,20 +275,20 @@ ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode);
/* Deprecated: Remove in v1.6.0 */
#define ZDICT_CONTENTSIZE_MIN 128
-/*! ZDICT_cover_params_t:
- * k and d are the only required parameters.
- * For others, value 0 means default.
- */
-typedef struct {
- unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */
- unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */
+/*! ZDICT_cover_params_t:
+ * k and d are the only required parameters.
+ * For others, value 0 means default.
+ */
+typedef struct {
+ unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */
+ unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */
unsigned steps; /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */
- unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */
+ unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */
double splitPoint; /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (1.0), 1.0 when all samples are used for both training and testing */
unsigned shrinkDict; /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */
unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */
- ZDICT_params_t zParams;
-} ZDICT_cover_params_t;
+ ZDICT_params_t zParams;
+} ZDICT_cover_params_t;
typedef struct {
unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */
@@ -303,48 +303,48 @@ typedef struct {
ZDICT_params_t zParams;
} ZDICT_fastCover_params_t;
-
-/*! ZDICT_trainFromBuffer_cover():
- * Train a dictionary from an array of samples using the COVER algorithm.
- * Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
- * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
- * The resulting dictionary will be saved into `dictBuffer`.
- * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
- * or an error code, which can be tested with ZDICT_isError().
+
+/*! ZDICT_trainFromBuffer_cover():
+ * Train a dictionary from an array of samples using the COVER algorithm.
+ * Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
+ * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
+ * The resulting dictionary will be saved into `dictBuffer`.
+ * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+ * or an error code, which can be tested with ZDICT_isError().
* See ZDICT_trainFromBuffer() for details on failure modes.
- * Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte.
- * Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
- * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
- * In general, it's recommended to provide a few thousands samples, though this can vary a lot.
- * It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
- */
-ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
- void *dictBuffer, size_t dictBufferCapacity,
- const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
- ZDICT_cover_params_t parameters);
-
-/*! ZDICT_optimizeTrainFromBuffer_cover():
- * The same requirements as above hold for all the parameters except `parameters`.
- * This function tries many parameter combinations and picks the best parameters.
- * `*parameters` is filled with the best parameters found,
- * dictionary constructed with those parameters is stored in `dictBuffer`.
- *
- * All of the parameters d, k, steps are optional.
+ * Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte.
+ * Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
+ * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
+ * In general, it's recommended to provide a few thousands samples, though this can vary a lot.
+ * It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
+ */
+ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
+ void *dictBuffer, size_t dictBufferCapacity,
+ const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
+ ZDICT_cover_params_t parameters);
+
+/*! ZDICT_optimizeTrainFromBuffer_cover():
+ * The same requirements as above hold for all the parameters except `parameters`.
+ * This function tries many parameter combinations and picks the best parameters.
+ * `*parameters` is filled with the best parameters found,
+ * dictionary constructed with those parameters is stored in `dictBuffer`.
+ *
+ * All of the parameters d, k, steps are optional.
* If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}.
- * if steps is zero it defaults to its default value.
+ * if steps is zero it defaults to its default value.
* If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000].
- *
- * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+ *
+ * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
* or an error code, which can be tested with ZDICT_isError().
* On success `*parameters` contains the parameters selected.
* See ZDICT_trainFromBuffer() for details on failure modes.
- * Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread.
- */
-ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
- void* dictBuffer, size_t dictBufferCapacity,
- const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
- ZDICT_cover_params_t* parameters);
-
+ * Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread.
+ */
+ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
+ void* dictBuffer, size_t dictBufferCapacity,
+ const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+ ZDICT_cover_params_t* parameters);
+
/*! ZDICT_trainFromBuffer_fastCover():
* Train a dictionary from an array of samples using a modified version of COVER algorithm.
* Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
@@ -389,58 +389,58 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer,
const size_t* samplesSizes, unsigned nbSamples,
ZDICT_fastCover_params_t* parameters);
-typedef struct {
- unsigned selectivityLevel; /* 0 means default; larger => select more => larger dictionary */
- ZDICT_params_t zParams;
-} ZDICT_legacy_params_t;
-
-/*! ZDICT_trainFromBuffer_legacy():
- * Train a dictionary from an array of samples.
- * Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
- * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
- * The resulting dictionary will be saved into `dictBuffer`.
- * `parameters` is optional and can be provided with values set to 0 to mean "default".
- * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
- * or an error code, which can be tested with ZDICT_isError().
+typedef struct {
+ unsigned selectivityLevel; /* 0 means default; larger => select more => larger dictionary */
+ ZDICT_params_t zParams;
+} ZDICT_legacy_params_t;
+
+/*! ZDICT_trainFromBuffer_legacy():
+ * Train a dictionary from an array of samples.
+ * Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
+ * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
+ * The resulting dictionary will be saved into `dictBuffer`.
+ * `parameters` is optional and can be provided with values set to 0 to mean "default".
+ * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+ * or an error code, which can be tested with ZDICT_isError().
* See ZDICT_trainFromBuffer() for details on failure modes.
- * Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
- * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
- * In general, it's recommended to provide a few thousands samples, though this can vary a lot.
- * It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
- * Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0.
- */
-ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy(
+ * Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
+ * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
+ * In general, it's recommended to provide a few thousands samples, though this can vary a lot.
+ * It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
+ * Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0.
+ */
+ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy(
void* dictBuffer, size_t dictBufferCapacity,
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
- ZDICT_legacy_params_t parameters);
-
-
-/* Deprecation warnings */
-/* It is generally possible to disable deprecation warnings from compiler,
- for example with -Wno-deprecated-declarations for gcc
- or _CRT_SECURE_NO_WARNINGS in Visual.
- Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */
-#ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS
-# define ZDICT_DEPRECATED(message) ZDICTLIB_API /* disable deprecation warnings */
-#else
-# define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
-# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
-# define ZDICT_DEPRECATED(message) [[deprecated(message)]] ZDICTLIB_API
+ ZDICT_legacy_params_t parameters);
+
+
+/* Deprecation warnings */
+/* It is generally possible to disable deprecation warnings from compiler,
+ for example with -Wno-deprecated-declarations for gcc
+ or _CRT_SECURE_NO_WARNINGS in Visual.
+ Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */
+#ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS
+# define ZDICT_DEPRECATED(message) ZDICTLIB_API /* disable deprecation warnings */
+#else
+# define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
+# define ZDICT_DEPRECATED(message) [[deprecated(message)]] ZDICTLIB_API
# elif defined(__clang__) || (ZDICT_GCC_VERSION >= 405)
-# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message)))
-# elif (ZDICT_GCC_VERSION >= 301)
-# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated))
-# elif defined(_MSC_VER)
-# define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message))
-# else
-# pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler")
-# define ZDICT_DEPRECATED(message) ZDICTLIB_API
-# endif
-#endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */
-
-ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead")
+# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message)))
+# elif (ZDICT_GCC_VERSION >= 301)
+# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated))
+# elif defined(_MSC_VER)
+# define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message))
+# else
+# pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler")
+# define ZDICT_DEPRECATED(message) ZDICTLIB_API
+# endif
+#endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */
+
+ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead")
size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
- const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);
+ const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);
#endif /* ZDICT_STATIC_LINKING_ONLY */
diff --git a/contrib/libs/zstd/lib/zstd.h b/contrib/libs/zstd/lib/zstd.h
index a88ae7bf8e..8d25f23bfe 100644
--- a/contrib/libs/zstd/lib/zstd.h
+++ b/contrib/libs/zstd/lib/zstd.h
@@ -2,10 +2,10 @@
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#if defined (__cplusplus)
extern "C" {
@@ -24,12 +24,12 @@ extern "C" {
# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__)
# define ZSTDLIB_VISIBLE __attribute__ ((visibility ("default")))
# define ZSTDLIB_HIDDEN __attribute__ ((visibility ("hidden")))
-# else
+# else
# define ZSTDLIB_VISIBLE
# define ZSTDLIB_HIDDEN
-# endif
-#endif
-#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
+# endif
+#endif
+#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
# define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBLE
#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
# define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBLE /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
@@ -54,7 +54,7 @@ extern "C" {
Compression can be done in:
- a single step (described as Simple API)
- - a single step, reusing a context (described as Explicit context)
+ - a single step, reusing a context (described as Explicit context)
- unbounded multiple steps (described as Streaming compression)
The compression ratio achievable on small data can be highly improved using
@@ -75,8 +75,8 @@ extern "C" {
#define ZSTD_VERSION_MAJOR 1
#define ZSTD_VERSION_MINOR 5
#define ZSTD_VERSION_RELEASE 2
-#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
-
+#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
+
/*! ZSTD_versionNumber() :
* Return runtime library version, the value is (MAJOR*100*100 + MINOR*100 + RELEASE). */
ZSTDLIB_API unsigned ZSTD_versionNumber(void);
@@ -115,55 +115,55 @@ ZSTDLIB_API const char* ZSTD_versionString(void);
* Simple API
***************************************/
/*! ZSTD_compress() :
- * Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
- * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
- * @return : compressed size written into `dst` (<= `dstCapacity),
- * or an error code if it fails (which can be tested using ZSTD_isError()). */
+ * Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
+ * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
+ * @return : compressed size written into `dst` (<= `dstCapacity),
+ * or an error code if it fails (which can be tested using ZSTD_isError()). */
ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
int compressionLevel);
/*! ZSTD_decompress() :
- * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
- * `dstCapacity` is an upper bound of originalSize to regenerate.
- * If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
- * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
- * or an errorCode if it fails (which can be tested using ZSTD_isError()). */
+ * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
+ * `dstCapacity` is an upper bound of originalSize to regenerate.
+ * If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
+ * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
+ * or an errorCode if it fails (which can be tested using ZSTD_isError()). */
ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,
const void* src, size_t compressedSize);
/*! ZSTD_getFrameContentSize() : requires v1.3.0+
- * `src` should point to the start of a ZSTD encoded frame.
- * `srcSize` must be at least as large as the frame header.
- * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
+ * `src` should point to the start of a ZSTD encoded frame.
+ * `srcSize` must be at least as large as the frame header.
+ * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
* @return : - decompressed size of `src` frame content, if known
- * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
- * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
- * note 1 : a 0 return value means the frame is valid but "empty".
- * note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode.
- * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
- * In which case, it's necessary to use streaming mode to decompress data.
- * Optionally, application can rely on some implicit limit,
- * as ZSTD_decompress() only needs an upper bound of decompressed size.
- * (For example, data could be necessarily cut into blocks <= 16 KB).
+ * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
+ * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
+ * note 1 : a 0 return value means the frame is valid but "empty".
+ * note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode.
+ * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * Optionally, application can rely on some implicit limit,
+ * as ZSTD_decompress() only needs an upper bound of decompressed size.
+ * (For example, data could be necessarily cut into blocks <= 16 KB).
* note 3 : decompressed size is always present when compression is completed using single-pass functions,
* such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().
- * note 4 : decompressed size can be very large (64-bits value),
- * potentially larger than what local system can handle as a single memory segment.
- * In which case, it's necessary to use streaming mode to decompress data.
- * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
- * Always ensure return value fits within application's authorized limits.
- * Each application can set its own limits.
- * note 6 : This function replaces ZSTD_getDecompressedSize() */
-#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
-#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
-ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
-
+ * note 4 : decompressed size can be very large (64-bits value),
+ * potentially larger than what local system can handle as a single memory segment.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
+ * Always ensure return value fits within application's authorized limits.
+ * Each application can set its own limits.
+ * note 6 : This function replaces ZSTD_getDecompressedSize() */
+#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
+#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
+ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
+
/*! ZSTD_getDecompressedSize() :
- * NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize().
- * Both functions work the same way, but ZSTD_getDecompressedSize() blends
- * "empty", "unknown" and "error" results to the same return value (0),
- * while ZSTD_getFrameContentSize() gives them separate return values.
+ * NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize().
+ * Both functions work the same way, but ZSTD_getDecompressedSize() blends
+ * "empty", "unknown" and "error" results to the same return value (0),
+ * while ZSTD_getFrameContentSize() gives them separate return values.
* @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */
ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
@@ -177,23 +177,23 @@ ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize)
/*====== Helper functions ======*/
-#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
-ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
+#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
+ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */
ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed, requires v1.4.0+ */
-ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */
+ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */
ZSTDLIB_API int ZSTD_defaultCLevel(void); /*!< default compression level, specified by ZSTD_CLEVEL_DEFAULT, requires v1.5.0+ */
/***************************************
-* Explicit context
+* Explicit context
***************************************/
/*= Compression context
- * When compressing many times,
+ * When compressing many times,
* it is recommended to allocate a context just once,
* and re-use it for each successive compression operation.
- * This will make workload friendlier for system's memory.
+ * This will make workload friendlier for system's memory.
* Note : re-using context is just a speed / resource optimization.
* It doesn't change the compression ratio, which remains identical.
* Note 2 : In multi-threaded environments,
@@ -212,16 +212,16 @@ ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* accept NULL pointer *
* they will all be reset. Only `compressionLevel` remains.
*/
ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- int compressionLevel);
-
-/*= Decompression context
- * When decompressing many times,
- * it is recommended to allocate a context only once,
- * and re-use it for each successive compression operation.
- * This will make workload friendlier for system's memory.
- * Use one context per thread for parallel execution. */
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel);
+
+/*= Decompression context
+ * When decompressing many times,
+ * it is recommended to allocate a context only once,
+ * and re-use it for each successive compression operation.
+ * This will make workload friendlier for system's memory.
+ * Use one context per thread for parallel execution. */
typedef struct ZSTD_DCtx_s ZSTD_DCtx;
ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void);
ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); /* accept NULL pointer */
@@ -232,8 +232,8 @@ ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); /* accept NULL pointer *
* Compatible with sticky parameters.
*/
ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize);
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
/*********************************************
@@ -662,19 +662,19 @@ typedef struct ZSTD_outBuffer_s {
* start a new frame.
* note: ZSTD_e_end will flush as much output as possible, meaning when compressing with multiple threads, it will
* block until the flush is complete or the output buffer is full.
-* @return : 0 if frame fully completed and fully flushed,
+* @return : 0 if frame fully completed and fully flushed,
* >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
* or an error code, which can be tested using ZSTD_isError().
*
* *******************************************************************/
-typedef ZSTD_CCtx ZSTD_CStream; /**< CCtx and CStream are now effectively same object (>= v1.3.0) */
+typedef ZSTD_CCtx ZSTD_CStream; /**< CCtx and CStream are now effectively same object (>= v1.3.0) */
/* Continue to distinguish them for compatibility with older versions <= v1.2.0 */
-/*===== ZSTD_CStream management functions =====*/
+/*===== ZSTD_CStream management functions =====*/
ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs); /* accept NULL pointer */
-/*===== Streaming compression functions =====*/
+/*===== Streaming compression functions =====*/
typedef enum {
ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal compression ratio */
ZSTD_e_flush=1, /* flush any data provided so far,
@@ -789,13 +789,13 @@ ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
* that will never request more than the remaining frame size.
* *******************************************************************************/
-typedef ZSTD_DCtx ZSTD_DStream; /**< DCtx and DStream are now effectively same object (>= v1.3.0) */
+typedef ZSTD_DCtx ZSTD_DStream; /**< DCtx and DStream are now effectively same object (>= v1.3.0) */
/* For compatibility with versions <= v1.2.0, prefer differentiating them. */
-/*===== ZSTD_DStream management functions =====*/
+/*===== ZSTD_DStream management functions =====*/
ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds); /* accept NULL pointer */
-/*===== Streaming decompression functions =====*/
+/*===== Streaming decompression functions =====*/
/* This function is redundant with the advanced API and equivalent to:
*
@@ -1068,13 +1068,13 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
****************************************************************************************
* The definitions in the following section are considered experimental.
* They are provided for advanced scenarios.
- * They should never be used with a dynamic library, as prototypes may change in the future.
+ * They should never be used with a dynamic library, as prototypes may change in the future.
* Use them only in association with static linking.
* ***************************************************************************************/
-#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
-#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
-
+#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
+#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
+
/* This can be overridden externally to hide static symbols. */
#ifndef ZSTDLIB_STATIC_API
# if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
@@ -1233,24 +1233,24 @@ typedef struct {
ZSTD_frameParameters fParams;
} ZSTD_parameters;
-typedef enum {
+typedef enum {
ZSTD_dct_auto = 0, /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */
ZSTD_dct_rawContent = 1, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */
ZSTD_dct_fullDict = 2 /* refuses to load a dictionary if it does not respect Zstandard's specification, starting with ZSTD_MAGIC_DICTIONARY */
-} ZSTD_dictContentType_e;
+} ZSTD_dictContentType_e;
-typedef enum {
+typedef enum {
ZSTD_dlm_byCopy = 0, /**< Copy dictionary content internally */
ZSTD_dlm_byRef = 1 /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
-} ZSTD_dictLoadMethod_e;
-
+} ZSTD_dictLoadMethod_e;
+
typedef enum {
ZSTD_f_zstd1 = 0, /* zstd frame format, specified in zstd_compression_format.md (default) */
ZSTD_f_zstd1_magicless = 1 /* Variant of zstd frame format, without initial 4-bytes magic number.
* Useful to save 4 bytes per generated frame.
* Decoder cannot recognise automatically this format, requiring this instruction. */
} ZSTD_format_e;
-
+
typedef enum {
/* Note: this enum controls ZSTD_d_forceIgnoreChecksum */
ZSTD_d_validateChecksum = 0,
@@ -1301,7 +1301,7 @@ typedef enum {
ZSTD_dictForceCopy = 2, /* Always copy the dictionary. */
ZSTD_dictForceLoad = 3 /* Always reload the dictionary */
} ZSTD_dictAttachPref_e;
-
+
typedef enum {
ZSTD_lcm_auto = 0, /**< Automatically determine the compression mode based on the compression level.
* Negative compression levels will be uncompressed, and positive compression
@@ -1322,32 +1322,32 @@ typedef enum {
} ZSTD_paramSwitch_e;
/***************************************
-* Frame size functions
+* Frame size functions
***************************************/
-/*! ZSTD_findDecompressedSize() :
+/*! ZSTD_findDecompressedSize() :
* `src` should point to the start of a series of ZSTD encoded and/or skippable frames
- * `srcSize` must be the _exact_ size of this series
+ * `srcSize` must be the _exact_ size of this series
* (i.e. there should be a frame boundary at `src + srcSize`)
- * @return : - decompressed size of all data in all successive frames
- * - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
- * - if an error occurred: ZSTD_CONTENTSIZE_ERROR
- *
- * note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
- * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
- * In which case, it's necessary to use streaming mode to decompress data.
- * note 2 : decompressed size is always present when compression is done with ZSTD_compress()
- * note 3 : decompressed size can be very large (64-bits value),
- * potentially larger than what local system can handle as a single memory segment.
- * In which case, it's necessary to use streaming mode to decompress data.
- * note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
- * Always ensure result fits within application's authorized limits.
- * Each application can set its own limits.
- * note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
- * read each contained frame header. This is fast as most of the data is skipped,
- * however it does mean that all frame data must be present and valid. */
+ * @return : - decompressed size of all data in all successive frames
+ * - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
+ * - if an error occurred: ZSTD_CONTENTSIZE_ERROR
+ *
+ * note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
+ * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 2 : decompressed size is always present when compression is done with ZSTD_compress()
+ * note 3 : decompressed size can be very large (64-bits value),
+ * potentially larger than what local system can handle as a single memory segment.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
+ * Always ensure result fits within application's authorized limits.
+ * Each application can set its own limits.
+ * note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
+ * read each contained frame header. This is fast as most of the data is skipped,
+ * however it does mean that all frame data must be present and valid. */
ZSTDLIB_STATIC_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
-
+
/*! ZSTD_decompressBound() :
* `src` should point to the start of a series of ZSTD encoded and/or skippable frames
* `srcSize` must be the _exact_ size of this series
@@ -1363,12 +1363,12 @@ ZSTDLIB_STATIC_API unsigned long long ZSTD_findDecompressedSize(const void* src,
*/
ZSTDLIB_STATIC_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);
-/*! ZSTD_frameHeaderSize() :
+/*! ZSTD_frameHeaderSize() :
* srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX.
* @return : size of the Frame Header,
* or an error code (if srcSize is too small) */
ZSTDLIB_STATIC_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
-
+
typedef enum {
ZSTD_sf_noBlockDelimiters = 0, /* Representation of ZSTD_Sequence has no block delimiters, sequences only */
ZSTD_sf_explicitBlockDelimiters = 1 /* Representation of ZSTD_Sequence contains explicit block delimiters */
@@ -1389,7 +1389,7 @@ typedef enum {
* setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters
* @return : number of sequences generated
*/
-
+
ZSTDLIB_STATIC_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
size_t outSeqsSize, const void* src, size_t srcSize);
@@ -1476,11 +1476,11 @@ ZSTDLIB_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size);
-/***************************************
-* Memory management
-***************************************/
-
-/*! ZSTD_estimate*() :
+/***************************************
+* Memory management
+***************************************/
+
+/*! ZSTD_estimate*() :
* These functions make it possible to estimate memory usage
* of a future {D,C}Ctx, before its creation.
*
@@ -1507,100 +1507,100 @@ ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
ZSTDLIB_STATIC_API size_t ZSTD_estimateDCtxSize(void);
-
-/*! ZSTD_estimateCStreamSize() :
- * ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
- * It will also consider src size to be arbitrarily "large", which is worst case.
- * If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
- * ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
+
+/*! ZSTD_estimateCStreamSize() :
+ * ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
+ * It will also consider src size to be arbitrarily "large", which is worst case.
+ * If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
+ * ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
* ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
- * Note : CStream size estimation is only correct for single-threaded compression.
- * ZSTD_DStream memory budget depends on window Size.
- * This information can be passed manually, using ZSTD_estimateDStreamSize,
- * or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();
- * Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
- * an internal ?Dict will be created, which additional size is not estimated here.
- * In this case, get total size by adding ZSTD_estimate?DictSize */
+ * Note : CStream size estimation is only correct for single-threaded compression.
+ * ZSTD_DStream memory budget depends on window Size.
+ * This information can be passed manually, using ZSTD_estimateDStreamSize,
+ * or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();
+ * Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
+ * an internal ?Dict will be created, which additional size is not estimated here.
+ * In this case, get total size by adding ZSTD_estimate?DictSize */
ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
-
-/*! ZSTD_estimate?DictSize() :
- * ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict().
- * ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().
- * Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller.
- */
+
+/*! ZSTD_estimate?DictSize() :
+ * ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict().
+ * ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().
+ * Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller.
+ */
ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
ZSTDLIB_STATIC_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
-
-/*! ZSTD_initStatic*() :
- * Initialize an object using a pre-allocated fixed-size buffer.
- * workspace: The memory area to emplace the object into.
- * Provided pointer *must be 8-bytes aligned*.
- * Buffer must outlive object.
- * workspaceSize: Use ZSTD_estimate*Size() to determine
- * how large workspace must be to support target scenario.
- * @return : pointer to object (same address as workspace, just different type),
- * or NULL if error (size too small, incorrect alignment, etc.)
- * Note : zstd will never resize nor malloc() when using a static buffer.
- * If the object requires more memory than available,
- * zstd will just error out (typically ZSTD_error_memory_allocation).
- * Note 2 : there is no corresponding "free" function.
- * Since workspace is allocated externally, it must be freed externally too.
- * Note 3 : cParams : use ZSTD_getCParams() to convert a compression level
- * into its associated cParams.
- * Limitation 1 : currently not compatible with internal dictionary creation, triggered by
- * ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict().
- * Limitation 2 : static cctx currently not compatible with multi-threading.
- * Limitation 3 : static dctx is incompatible with legacy support.
- */
+
+/*! ZSTD_initStatic*() :
+ * Initialize an object using a pre-allocated fixed-size buffer.
+ * workspace: The memory area to emplace the object into.
+ * Provided pointer *must be 8-bytes aligned*.
+ * Buffer must outlive object.
+ * workspaceSize: Use ZSTD_estimate*Size() to determine
+ * how large workspace must be to support target scenario.
+ * @return : pointer to object (same address as workspace, just different type),
+ * or NULL if error (size too small, incorrect alignment, etc.)
+ * Note : zstd will never resize nor malloc() when using a static buffer.
+ * If the object requires more memory than available,
+ * zstd will just error out (typically ZSTD_error_memory_allocation).
+ * Note 2 : there is no corresponding "free" function.
+ * Since workspace is allocated externally, it must be freed externally too.
+ * Note 3 : cParams : use ZSTD_getCParams() to convert a compression level
+ * into its associated cParams.
+ * Limitation 1 : currently not compatible with internal dictionary creation, triggered by
+ * ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict().
+ * Limitation 2 : static cctx currently not compatible with multi-threading.
+ * Limitation 3 : static dctx is incompatible with legacy support.
+ */
ZSTDLIB_STATIC_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticCCtx() */
-
+
ZSTDLIB_STATIC_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
ZSTDLIB_STATIC_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticDCtx() */
-
+
ZSTDLIB_STATIC_API const ZSTD_CDict* ZSTD_initStaticCDict(
- void* workspace, size_t workspaceSize,
- const void* dict, size_t dictSize,
- ZSTD_dictLoadMethod_e dictLoadMethod,
- ZSTD_dictContentType_e dictContentType,
- ZSTD_compressionParameters cParams);
-
+ void* workspace, size_t workspaceSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams);
+
ZSTDLIB_STATIC_API const ZSTD_DDict* ZSTD_initStaticDDict(
- void* workspace, size_t workspaceSize,
- const void* dict, size_t dictSize,
- ZSTD_dictLoadMethod_e dictLoadMethod,
- ZSTD_dictContentType_e dictContentType);
-
-
-/*! Custom memory allocation :
- * These prototypes make it possible to pass your own allocation/free functions.
- * ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below.
- * All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.
- */
-typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
-typedef void (*ZSTD_freeFunction) (void* opaque, void* address);
-typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
+ void* workspace, size_t workspaceSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType);
+
+
+/*! Custom memory allocation :
+ * These prototypes make it possible to pass your own allocation/free functions.
+ * ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below.
+ * All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.
+ */
+typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
+typedef void (*ZSTD_freeFunction) (void* opaque, void* address);
+typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
static
#ifdef __GNUC__
__attribute__((__unused__))
#endif
ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */
-
+
ZSTDLIB_STATIC_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
ZSTDLIB_STATIC_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
ZSTDLIB_STATIC_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
-
+
ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
- ZSTD_dictLoadMethod_e dictLoadMethod,
- ZSTD_dictContentType_e dictContentType,
- ZSTD_compressionParameters cParams,
- ZSTD_customMem customMem);
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams,
+ ZSTD_customMem customMem);
/*! Thread pool :
* These prototypes make it possible to share a thread pool among multiple compression contexts.
@@ -1635,18 +1635,18 @@ ZSTDLIB_STATIC_API ZSTD_DDict* ZSTD_createDDict_advanced(
ZSTD_customMem customMem);
-/***************************************
-* Advanced compression functions
-***************************************/
-
-/*! ZSTD_createCDict_byReference() :
- * Create a digested dictionary for compression
+/***************************************
+* Advanced compression functions
+***************************************/
+
+/*! ZSTD_createCDict_byReference() :
+ * Create a digested dictionary for compression
* Dictionary content is just referenced, not duplicated.
* As a consequence, `dictBuffer` **must** outlive CDict,
* and its content must remain unmodified throughout the lifetime of CDict.
* note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */
ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
-
+
/*! ZSTD_getCParams() :
* @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
* `estimatedSrcSize` value is optional, select 0 if not known */
@@ -1663,7 +1663,7 @@ ZSTDLIB_STATIC_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned
ZSTDLIB_STATIC_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
/*! ZSTD_adjustCParams() :
- * optimize params for a given `srcSize` and `dictSize`.
+ * optimize params for a given `srcSize` and `dictSize`.
* `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN.
* `dictSize` must be `0` when there is no dictionary.
* cPar can be invalid : all parameters will be clamped within valid range in the @return struct.
@@ -1681,7 +1681,7 @@ size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
const void* dict,size_t dictSize,
ZSTD_parameters params);
-/*! ZSTD_compress_usingCDict_advanced() :
+/*! ZSTD_compress_usingCDict_advanced() :
* Note : this function is now DEPRECATED.
* It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.
* This prototype will generate compilation warnings. */
@@ -1692,7 +1692,7 @@ size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
const ZSTD_CDict* cdict,
ZSTD_frameParameters fParams);
-
+
/*! ZSTD_CCtx_loadDictionary_byReference() :
* Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx.
* It saves some memory, but also requires that `dict` outlives its usage within `cctx` */
@@ -2045,11 +2045,11 @@ ZSTDLIB_STATIC_API size_t ZSTD_compressStream2_simpleArgs (
* Note 3 : Skippable Frame Identifiers are considered valid. */
ZSTDLIB_STATIC_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
-/*! ZSTD_createDDict_byReference() :
- * Create a digested dictionary, ready to start decompression operation without startup delay.
- * Dictionary content is referenced, and therefore stays in dictBuffer.
- * It is important that dictBuffer outlives DDict,
- * it must remain read accessible throughout the lifetime of DDict */
+/*! ZSTD_createDDict_byReference() :
+ * Create a digested dictionary, ready to start decompression operation without startup delay.
+ * Dictionary content is referenced, and therefore stays in dictBuffer.
+ * It is important that dictBuffer outlives DDict,
+ * it must remain read accessible throughout the lifetime of DDict */
ZSTDLIB_STATIC_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
/*! ZSTD_DCtx_loadDictionary_byReference() :
@@ -2276,7 +2276,7 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
ZSTD_frameParameters fParams,
unsigned long long pledgedSrcSize);
-/*! ZSTD_resetCStream() :
+/*! ZSTD_resetCStream() :
* This function is DEPRECATED, and is equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
@@ -2286,35 +2286,35 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
*
* start a new frame, using same parameters from previous frame.
* This is typically useful to skip dictionary loading stage, since it will re-use it in-place.
- * Note that zcs must be init at least once before using ZSTD_resetCStream().
- * If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.
- * If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.
- * For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs,
- * but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.
+ * Note that zcs must be init at least once before using ZSTD_resetCStream().
+ * If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.
+ * If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.
+ * For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs,
+ * but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.
* @return : 0, or an error code (which can be tested using ZSTD_isError())
* This prototype will generate compilation warnings.
*/
ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
-
-typedef struct {
+
+typedef struct {
unsigned long long ingested; /* nb input bytes read and buffered */
unsigned long long consumed; /* nb input bytes actually compressed */
unsigned long long produced; /* nb of compressed bytes generated and buffered */
unsigned long long flushed; /* nb of compressed bytes flushed : not provided; can be tracked from caller side */
unsigned currentJobID; /* MT only : latest started job nb */
unsigned nbActiveWorkers; /* MT only : nb of workers actively compressing at probe time */
-} ZSTD_frameProgression;
-
+} ZSTD_frameProgression;
+
/* ZSTD_getFrameProgression() :
- * tells how much data has been ingested (read from input)
- * consumed (input actually compressed) and produced (output) for current frame.
+ * tells how much data has been ingested (read from input)
+ * consumed (input actually compressed) and produced (output) for current frame.
* Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed.
* Aggregates progression inside active worker threads.
- */
+ */
ZSTDLIB_STATIC_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);
-
+
/*! ZSTD_toFlushNow() :
* Tell how many bytes are ready to be flushed immediately.
* Useful for multithreading scenarios (nbWorkers >= 1).
@@ -2329,8 +2329,8 @@ ZSTDLIB_STATIC_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCt
* irrespective of the speed of concurrent (and newer) jobs.
*/
ZSTDLIB_STATIC_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
-
-
+
+
/*===== Advanced Streaming decompression functions =====*/
/*!
@@ -2370,8 +2370,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
* Buffer-less and synchronous inner streaming functions
*
* This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
-* But it's also a complex one, with several restrictions, documented below.
-* Prefer normal streaming API for an easier experience.
+* But it's also a complex one, with several restrictions, documented below.
+* Prefer normal streaming API for an easier experience.
********************************************************************* */
/**
@@ -2387,8 +2387,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
Then, consume your input using ZSTD_compressContinue().
There are some important considerations to keep in mind when using this advanced function :
- - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.
- - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.
+ - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.
+ - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.
- Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.
Worst case evaluation is provided by ZSTD_compressBound().
ZSTD_compressContinue() doesn't guarantee recover after a failed compression.
@@ -2398,10 +2398,10 @@ ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
In which case, it will "discard" the relevant memory section from its history.
Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.
- It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
- Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
+ It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
+ Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
- `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
+ `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
*/
/*===== Buffer-less streaming compression functions =====*/
@@ -2409,7 +2409,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLev
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
ZSTDLIB_STATIC_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
-
+
ZSTDLIB_STATIC_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
ZSTDLIB_STATIC_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
@@ -2425,52 +2425,52 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_
Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
A ZSTD_DCtx object can be re-used multiple times.
- First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
- Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
- Data fragment must be large enough to ensure successful decoding.
- `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
- @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
+ First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
+ Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
+ Data fragment must be large enough to ensure successful decoding.
+ `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
+ @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
>0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
errorCode, which can be tested using ZSTD_isError().
- It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
- such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
- Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
- As a consequence, check that values remain within valid application range.
- For example, do not allocate memory blindly, check that `windowSize` is within expectation.
- Each application can set its own limits, depending on local restrictions.
- For extended interoperability, it is recommended to support `windowSize` of at least 8 MB.
-
- ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.
- ZSTD_decompressContinue() is very sensitive to contiguity,
- if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
- or that previous contiguous segment is large enough to properly handle maximum back-reference distance.
- There are multiple ways to guarantee this condition.
-
- The most memory efficient way is to use a round buffer of sufficient size.
- Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
- which can @return an error code if required value is too large for current system (in 32-bits mode).
- In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
- up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
- which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
- At which point, decoding can resume from the beginning of the buffer.
- Note that already decoded data stored in the buffer should be flushed before being overwritten.
-
- There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.
-
- Finally, if you control the compression process, you can also ignore all buffer size rules,
- as long as the encoder and decoder progress in "lock-step",
- aka use exactly the same buffer sizes, break contiguity at the same place, etc.
-
- Once buffers are setup, start decompression, with ZSTD_decompressBegin().
- If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().
-
+ It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
+ such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
+ Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
+ As a consequence, check that values remain within valid application range.
+ For example, do not allocate memory blindly, check that `windowSize` is within expectation.
+ Each application can set its own limits, depending on local restrictions.
+ For extended interoperability, it is recommended to support `windowSize` of at least 8 MB.
+
+ ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.
+ ZSTD_decompressContinue() is very sensitive to contiguity,
+ if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
+ or that previous contiguous segment is large enough to properly handle maximum back-reference distance.
+ There are multiple ways to guarantee this condition.
+
+ The most memory efficient way is to use a round buffer of sufficient size.
+ Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
+ which can @return an error code if required value is too large for current system (in 32-bits mode).
+ In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
+ up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
+ which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
+ At which point, decoding can resume from the beginning of the buffer.
+ Note that already decoded data stored in the buffer should be flushed before being overwritten.
+
+ There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.
+
+ Finally, if you control the compression process, you can also ignore all buffer size rules,
+ as long as the encoder and decoder progress in "lock-step",
+ aka use exactly the same buffer sizes, break contiguity at the same place, etc.
+
+ Once buffers are setup, start decompression, with ZSTD_decompressBegin().
+ If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().
+
Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
- @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
- It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
+ @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
+ It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
It can also be an error code, which can be tested with ZSTD_isError().
A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
@@ -2482,26 +2482,26 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_
== Special case : skippable frames ==
Skippable frames allow integration of user-defined data into a flow of concatenated frames.
- Skippable frames will be ignored (skipped) by decompressor.
- The format of skippable frames is as follows :
+ Skippable frames will be ignored (skipped) by decompressor.
+ The format of skippable frames is as follows :
a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
c) Frame Content - any content (User Data) of length equal to Frame Size
- For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.
- For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.
+ For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.
+ For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.
*/
-/*===== Buffer-less streaming decompression functions =====*/
-typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
+/*===== Buffer-less streaming decompression functions =====*/
+typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
typedef struct {
- unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
- unsigned long long windowSize; /* can be very large, up to <= frameContentSize */
- unsigned blockSizeMax;
- ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
- unsigned headerSize;
+ unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
+ unsigned long long windowSize; /* can be very large, up to <= frameContentSize */
+ unsigned blockSizeMax;
+ ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
+ unsigned headerSize;
unsigned dictID;
unsigned checksumFlag;
-} ZSTD_frameHeader;
+} ZSTD_frameHeader;
/*! ZSTD_getFrameHeader() :
* decode Frame Header, or requires larger `srcSize`.
@@ -2518,23 +2518,23 @@ ZSTDLIB_STATIC_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowS
ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
-
+
ZSTDLIB_STATIC_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
ZSTDLIB_STATIC_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
-
-/* misc */
+
+/* misc */
ZSTDLIB_STATIC_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
-
-
-/* ============================ */
-/** Block level API */
-/* ============================ */
-
-/*!
+
+
+/* ============================ */
+/** Block level API */
+/* ============================ */
+
+/*!
Block functions produce and decode raw zstd blocks, without frame metadata.
Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).
But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.
@@ -2543,11 +2543,11 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
- Compressing and decompressing require a context structure
+ Use ZSTD_createCCtx() and ZSTD_createDCtx()
- It is necessary to init context before starting
- + compression : any ZSTD_compressBegin*() variant, including with dictionary
- + decompression : any ZSTD_decompressBegin*() variant, including with dictionary
- + copyCCtx() and copyDCtx() can be used too
- - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
- + If input is larger than a block size, it's necessary to split input data into multiple blocks
+ + compression : any ZSTD_compressBegin*() variant, including with dictionary
+ + decompression : any ZSTD_decompressBegin*() variant, including with dictionary
+ + copyCCtx() and copyDCtx() can be used too
+ - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
+ + If input is larger than a block size, it's necessary to split input data into multiple blocks
+ For inputs larger than a single block, consider using regular ZSTD_compress() instead.
Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.
- When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !
@@ -2556,9 +2556,9 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
+ A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.
Doing so would mess up with statistics history, leading to potential data corruption.
+ ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!
- + In case of multiple successive blocks, should some of them be uncompressed,
- decoder must be informed of their existence in order to follow proper history.
- Use ZSTD_insertBlock() for such a case.
+ + In case of multiple successive blocks, should some of them be uncompressed,
+ decoder must be informed of their existence in order to follow proper history.
+ Use ZSTD_insertBlock() for such a case.
*/
/*===== Raw zstd block functions =====*/
diff --git a/contrib/libs/zstd/lib/zstd_errors.h b/contrib/libs/zstd/lib/zstd_errors.h
index fa3686b772..f5c7c43d0b 100644
--- a/contrib/libs/zstd/lib/zstd_errors.h
+++ b/contrib/libs/zstd/lib/zstd_errors.h
@@ -1,11 +1,11 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_ERRORS_H_398273423
@@ -19,73 +19,73 @@ extern "C" {
#include <stddef.h> /* size_t */
-/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */
-#ifndef ZSTDERRORLIB_VISIBILITY
-# if defined(__GNUC__) && (__GNUC__ >= 4)
-# define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default")))
-# else
-# define ZSTDERRORLIB_VISIBILITY
-# endif
-#endif
-#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
-# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY
-#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
-# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
-#else
-# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY
-#endif
-
-/*-*********************************************
- * Error codes list
- *-*********************************************
- * Error codes _values_ are pinned down since v1.3.1 only.
- * Therefore, don't rely on values if you may link to any version < v1.3.1.
- *
- * Only values < 100 are considered stable.
- *
- * note 1 : this API shall be used with static linking only.
- * dynamic linking is not yet officially supported.
- * note 2 : Prefer relying on the enum than on its value whenever possible
- * This is the only supported way to use the error list < v1.3.1
- * note 3 : ZSTD_isError() is always correct, whatever the library version.
- **********************************************/
+/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */
+#ifndef ZSTDERRORLIB_VISIBILITY
+# if defined(__GNUC__) && (__GNUC__ >= 4)
+# define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default")))
+# else
+# define ZSTDERRORLIB_VISIBILITY
+# endif
+#endif
+#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
+# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY
+#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
+# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY
+#endif
+
+/*-*********************************************
+ * Error codes list
+ *-*********************************************
+ * Error codes _values_ are pinned down since v1.3.1 only.
+ * Therefore, don't rely on values if you may link to any version < v1.3.1.
+ *
+ * Only values < 100 are considered stable.
+ *
+ * note 1 : this API shall be used with static linking only.
+ * dynamic linking is not yet officially supported.
+ * note 2 : Prefer relying on the enum than on its value whenever possible
+ * This is the only supported way to use the error list < v1.3.1
+ * note 3 : ZSTD_isError() is always correct, whatever the library version.
+ **********************************************/
typedef enum {
- ZSTD_error_no_error = 0,
- ZSTD_error_GENERIC = 1,
- ZSTD_error_prefix_unknown = 10,
- ZSTD_error_version_unsupported = 12,
- ZSTD_error_frameParameter_unsupported = 14,
- ZSTD_error_frameParameter_windowTooLarge = 16,
- ZSTD_error_corruption_detected = 20,
- ZSTD_error_checksum_wrong = 22,
- ZSTD_error_dictionary_corrupted = 30,
- ZSTD_error_dictionary_wrong = 32,
- ZSTD_error_dictionaryCreation_failed = 34,
- ZSTD_error_parameter_unsupported = 40,
- ZSTD_error_parameter_outOfBound = 42,
- ZSTD_error_tableLog_tooLarge = 44,
- ZSTD_error_maxSymbolValue_tooLarge = 46,
- ZSTD_error_maxSymbolValue_tooSmall = 48,
- ZSTD_error_stage_wrong = 60,
- ZSTD_error_init_missing = 62,
- ZSTD_error_memory_allocation = 64,
- ZSTD_error_workSpace_tooSmall= 66,
- ZSTD_error_dstSize_tooSmall = 70,
- ZSTD_error_srcSize_wrong = 72,
+ ZSTD_error_no_error = 0,
+ ZSTD_error_GENERIC = 1,
+ ZSTD_error_prefix_unknown = 10,
+ ZSTD_error_version_unsupported = 12,
+ ZSTD_error_frameParameter_unsupported = 14,
+ ZSTD_error_frameParameter_windowTooLarge = 16,
+ ZSTD_error_corruption_detected = 20,
+ ZSTD_error_checksum_wrong = 22,
+ ZSTD_error_dictionary_corrupted = 30,
+ ZSTD_error_dictionary_wrong = 32,
+ ZSTD_error_dictionaryCreation_failed = 34,
+ ZSTD_error_parameter_unsupported = 40,
+ ZSTD_error_parameter_outOfBound = 42,
+ ZSTD_error_tableLog_tooLarge = 44,
+ ZSTD_error_maxSymbolValue_tooLarge = 46,
+ ZSTD_error_maxSymbolValue_tooSmall = 48,
+ ZSTD_error_stage_wrong = 60,
+ ZSTD_error_init_missing = 62,
+ ZSTD_error_memory_allocation = 64,
+ ZSTD_error_workSpace_tooSmall= 66,
+ ZSTD_error_dstSize_tooSmall = 70,
+ ZSTD_error_srcSize_wrong = 72,
ZSTD_error_dstBuffer_null = 74,
- /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
- ZSTD_error_frameIndex_tooLarge = 100,
- ZSTD_error_seekableIO = 102,
+ /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
+ ZSTD_error_frameIndex_tooLarge = 100,
+ ZSTD_error_seekableIO = 102,
ZSTD_error_dstBuffer_wrong = 104,
ZSTD_error_srcBuffer_wrong = 105,
- ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
+ ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
} ZSTD_ErrorCode;
/*! ZSTD_getErrorCode() :
convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
- which can be used to compare with enum list published above */
-ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
-ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */
+ which can be used to compare with enum list published above */
+ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
+ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */
#if defined (__cplusplus)
diff --git a/library/cpp/codecs/ut/codecs_ut.cpp b/library/cpp/codecs/ut/codecs_ut.cpp
index caf6089aef..b23d427d95 100644
--- a/library/cpp/codecs/ut/codecs_ut.cpp
+++ b/library/cpp/codecs/ut/codecs_ut.cpp
@@ -114,746 +114,746 @@ namespace {
"&lt;tkjxrf",
"&lt;беларусь это мы",
"&lt;бокс, версия ibf",
- "designer tree svc",
- "seriesg810",
- "doll makers",
- "rotten.com",
- "evening gowns",
- "discover",
- "south carolina escorts",
- "forkliftjobsinhousron",
- "mailbox",
- "alexis",
- "espn.com mlb",
- "gypsy.chat.2k",
- "the man in the mirror",
- "azteca",
- "sebastian telfair - jamel thomas",
- "kirby",
- "java",
- "trike motorcycles",
- "piasecki helicopter",
- "wicca binding spells",
- "pier park panama city beach .com",
- "continente europeo",
- "asswatchers.com",
- "asswatchers.com",
- "easton stealth stiff flex cnt adult baseball bat - 3",
- "facesofdeath",
- "video of 9 11",
- "profileedit.myspace.com",
- "georgia snakes",
- "yahoo.com",
- "google",
- "http wwwclassicindustries .corvettes-roadsters.com",
- "arington training stable",
- "find bred of dog",
- "southpark contact tables for myspace",
- "symptoms of laryngitis",
- "suzuki stickers",
- "avianca",
- "radio shack",
- "dominican republic pictures",
- "recent",
- "mapquest",
- "http myspace .com",
- "research chemicals supplies",
- "winn dixie.com",
- "drivers 20guide.com",
- "dylan whitley north carolina",
- "google com",
- "order wild horses cigarettes",
- "yahoocom",
- "fl runners",
- "aol companion install",
- "nbc.comdond 59595 6",
- "directv.com",
- "motorsports insurance",
- "cartoonnetwork",
- "pop warner-victorville",
- "black iorn spars",
- "goog",
- "the suns",
- "ebay",
- "pop warner",
- "philadelphia cream cheese",
- "oklahoma",
- "doudleday books.com",
- "javascript download",
- "city of nacogdoches",
- "sfyl",
- "myspace.com",
- "baptism pictures",
- "games",
- "depredadores sexuales",
- "mycl.cravelyrics.com",
- "become a bone marrow donner",
- "vintage copies",
- "ford dealership",
- "candystand",
- "smarthairypussyom",
- "yahoo.com",
- "vanderbilt.edu",
- "ebay",
- "grouper",
- "mys",
- "myrsa and birth defects",
- "hatteras rentals",
- "female escorts",
- "ja rule",
- "meat bluesheet",
- "yahoo",
- "american disability act court cases",
- "clearview cinemas",
- "hard69.com",
- "make a living will for free",
- "fat asses",
- "flashback concert in atlanta ga",
- "fucking",
- "flat abdomen exercises",
- "big brother facial",
- "german dictionary",
- "black dick",
- "ebonymovies",
- "airsoft rifles",
- "best fishing days calander",
- "tattoo",
- "impressions",
- "cs.com",
- "northwest airlines reservations",
- "halo 3",
- "wallbaums",
- "chat room listings",
- "waterbury ct warrants",
- "pictures of chad michael murry",
- "yahoo",
- "install wallpaper",
- "halo 3",
- "clits and tits",
- "prothsmouth general circuit courts",
- "old hawthorne columbia",
- "jess lee photos",
- "no deposit casino bonus",
- "bbc gladiator dressed to kill",
- "anemagazine.com",
- "lyrics unfaithful",
- "gold bars found",
- "art.comhttp",
- "free unlock key",
- "man o war lost a race",
- "blue cross and blue shield",
- "phenergan",
- "myspace.com",
- "http www.constitutional court.com",
- "monster trucks",
- "the breeze fort myers fla.newspaper",
- "davis origin name",
- "upper deck.com",
- "arizona",
- "akira lane",
- "ebaumsworld",
- "union pacific jobs",
- "google.cm",
- "free bigt girls nudes",
- "abcnews.com",
- "tootse.com",
- "az lyrics",
- "freddy",
- "georgia.com",
- "johncombest.com",
- "nelly",
- "gussi mane",
- "university of illinois",
- "oregan valcano's",
- "mythbusters",
- "sailormoon hentai",
- "international cub tractor",
- "desert sky movie green valley az",
- "evite",
- "nelly nud epics",
- "penndot.com",
- "first banks",
- "psp manual",
- "google",
- "jackieaudet hotmail.com",
- "internet",
- "shootinggames",
- "shootinggames",
- "montana western rendezvous of art",
- "hello kitty layouts",
- "yahoo",
- "translation",
- "glenn scott attorney",
- "hallofshame",
- "capitolone.com",
- "recipe for popovers",
- "pictures of demons",
- "barnes and nobles.com",
- "rbd",
- "hart and hunnington tattoo shop",
- "janepowellmovies.com",
- "ged schools in the military",
- "kelis",
- "hvacagent",
- "neat home organizer television show",
- "2719 24-2-crime and courts",
- "fsu",
- "torpedo bomber games",
- "love poems",
- "polly pocket'toys",
- "yweatherahoo.com",
- "jungle gin",
- "flemington new jersey real estate",
- "milf hunter stories",
- "budget.com",
- "chopperstyle",
- "keno player",
- "up skirt",
- "dogs",
- "beerballers",
- "phat white butt",
- "phat white butt",
- "va licensing for interpeters for the deaf",
- "white page phone book maiden north carolina",
- "controlled 20solutions 20corp.com",
- "friedman jewelery",
- "kelis",
- "curtains",
- "curtains",
- "fuck me harder",
- "naked girls",
- "southwest airlines boarding pass",
- "mailbox",
- "1976 mavrick",
- "adult diapers",
- "horse nasal discharge",
- "charles ludlam",
- "google",
- "himnos en espanol",
- "quarter horses for sale in nebraska",
- "cosmo",
- "hi",
- "mattel",
- "aouto 20trader.com",
- "sunsetter awnings",
- "bl.cfm",
- "at",
- "tattoo designs",
- "bubs",
- "yahoo",
- "free live gay cam chats",
- "antibiotics",
- "upgrade",
- "aessuccess.org",
- "yahoo",
- "boobdex",
- "the jackle",
- "plus size lingerie magazines for home",
- "lehigh valley little league",
- "ancient trade coins",
- "pillsbury",
- "colorado springs",
- "canada aviation jobs",
- "free guitar tablature",
- "kids aol",
- "capitol community colage",
- "kevin thomas bermuda",
- "missouri lotto",
- "homedepotsportscomplex.com",
- "dr. franklin schneier",
- "williamsburg va. hotels",
- "aim",
- "morningbuzz",
- "probusines.com",
- "wwwalbasoul.com",
- "w.runehints.com",
- "yahoo.com",
- "yahoo.com",
- "yahoo.com",
- "fantasy 5",
- "xxx rape",
- "hawaiian gift baskets",
- "madonna.com",
- "myspace contact tables",
- "white cock",
- "safe space",
- "drinks",
- "o rly",
- "dsl",
- "wwww.uncc.edu",
- "wwww.uncc.edu",
- "wwww.uncc.edu",
- "online overseas checkt.westernunion.com",
- "angina",
- "heba technologies",
- "hebrew ancient coins",
- "games",
- "recent",
- "international male.com",
- "sex pics",
- "paul wall layouts for myspace",
- "health",
- "wire lamp shade frames",
- "windows",
- "top business colleges",
- "mary jo eustace",
- "attored",
- "oklahoma indian legal services",
- "6arab",
- "santo nino",
- "10.1.0.199",
- "http www.myspace.com daffydonn07",
- "marine electrical",
- "sandy creek cabins weekend new york",
- "onionbutts",
- "tucson classifieds",
- "new york times",
- "recently deleted screen names",
- "goldeneagle.net",
- "fta support forums",
- "low protein in bloos",
- "datring",
- "lilwayne",
- "free billiards games",
- "yahoo",
- "ako",
- "a.tribalfusion.c script language",
- "dustin davis",
- "cooking",
- "yahoo.com",
- "universal studios",
- "adult chat",
- "santa monica flea market",
- "carpevino.us",
- "wine vinyard in stewertstown pa",
- "y",
- "craigslist",
- "ups.com",
- "1-866-347-3292",
- "renegade boats",
- "renegade boats",
- "sunset state beach caping",
- "artofstanlync.org",
- "heart-i want make love to you video",
- "triangles around the world",
- "mycl.cravelyrics.com",
- "in the bible what type of persons were forced to walk around in public and say unclean unclean",
- "providence water fire",
- "googlecom",
- "yahoo.com",
- "b.g",
- "website de rebelde",
- "stoplinks",
- "allison 2000 transmission",
- "thepriceanduseofgasoline.com",
- "chamillinaire",
- "veryspecialhomescom",
- "crashbandicoot",
- "a short sex story",
- "yahoo.com",
- "music now",
- "east carolina university",
- "vandalism in new york",
- "the bainde soleil company",
- "dicaprio movies",
- "xxx dvds",
- "visual basic scripting support",
- "english bulldogs",
- "travelocity.com",
- "website for asstr.org",
- "hypnotic slave training",
- "pogo",
- "university at buffalo addmissions",
- "screen name services",
- "superdrol",
- "art institute",
- "online business cards",
- "aolfinancial",
- "upgrade shop",
- "anderson abrasive",
- "weatherchannel.com",
- "recent",
- "ebay",
- "diagram and xray of a normal shouldercheck out surgicalpoker.comfor more sports medicine and orthopedic information and images check out emedx.com by dr. allan mishranormal diagram normal x-ray",
- "95 mustang gt chips",
- "gold grills",
- "hap housing in portland or",
- "car sales",
- "swimming with dolphins",
- "jennifer lopez nude",
- "wwwdubcnn.com",
- "dominicks pizza",
- "fl studio",
- "http blackplanet .com",
- "http blackplanet .com",
- "http blackplanet .com",
- "A$AP Rocky",
- "benie mac",
- "fujifilm.com",
- "aol dialup setup",
- "metal fabrication tools",
- "internet",
- "buy my painting",
- "pulaski va classifieds",
- "w.coj.net",
- "postopia.com",
- "no medical records hydrocodone",
- "auto completes for deal or no deal contest",
- "http www. big monster dicks .com",
- "invacare wheelchairs",
- "musicdownload.com",
- "president bush",
- "heavy equipment",
- "inmate information",
- "allina.com",
- "megan law.gov",
- "wwwl.eharmony.com",
- "jobs in colombiaoqx0nq",
- "beastsex",
- "ferguisson",
- "heart-i wanna make love to you vedio",
- "west georgia university",
- "west georgia university",
- "hsn",
- "bb&t",
- "midas realty",
- "yahoo",
- "mytrip.com",
- "donna texas mcdonalds",
- "free picture of our lady",
- "bubs",
- "taken chemo for 5 month's cancer can still be seen on ct scan",
- "porn 20video 20clips",
- "lake monsters",
- "freedj mix vibes",
- "myspace.coim",
- "la joya school district tx",
- "colorado bungee jumping",
- "yahoo",
- "google.com",
- "lafayette co vampire grave",
- "ice cube",
- "internet",
- "tccd.edu",
- "google",
- "people",
- "instructions on putting together a filing cabinet",
- "click.babycenter.com",
- "90minut",
- "ramien noodles",
- "lilwayne",
- "danni virgin",
- "nice sexy girls.com",
- "guttural pouch",
- "free male masturbating",
- "good",
- "rotton 20dot.com",
- "fox sports",
- "seth rogen",
- "desb.mspaceads.com",
- "betjc.com",
- "pictures of quebec",
- "gold in quartz",
- "evergreen college",
- "runescape",
- "gastons white river resort",
- "sunset beach santa cruz",
- "auto parts",
- "travelocity",
- "myspace.com",
- "laptops",
- "beyaonce and j",
- "free gay ebony knights webcams",
- "google",
- "derek watson",
- "alice in wonderland tshirts",
- "hippa p rivacy act",
- "down payment mortgage",
- "believe it or not",
- "mys",
- "datatreca",
- "onesuite",
- "names",
- "lil john",
- "scales of justice cuff links",
- "localsales.com",
- "alametris denise lipsey",
- "adam for adam",
- "flip flops crochet",
- "arbors",
- "heb hospital",
- "myspae.com",
- "midevil breast torture",
- "askjeeves",
- "assparade",
- ".comhttp",
- "weekly hotels reston virginia",
- "noiceinparadise.com",
- "pre diabetic diet",
- "h.i.m.com",
- "myspace",
- "myspace",
- "wwww.sex.lp.cpm",
- "mcso mugshots",
- "roush",
- "wellfargo",
- "lilwayne",
- "hopecherie",
- "frontgate.com",
- "barbados registration department",
- "american pitbull",
- "free pc full flight simulation game downloads",
- "google",
- "vaginal secretion grey stuff",
- "myspace layouts",
- "kanye west",
- "walmart",
- "pain in hip and leg",
- "tenneesseeaquarium.com",
- "suncom.com",
- "alysseandrachelwerehere",
- "pimiclo",
- "starmagazine.com",
- "classifieds",
- "mount rushmore in dakota",
- "sams",
- "disney com",
- "beastyality",
- "chief joseph paintings",
- "henry scott",
- "paris hilton",
- "kb903235",
- "autotrader",
- "irish traveller",
- "ajcobs.com",
- "art of stanlync.org",
- "fox news",
- "freeporn",
- "depo provera",
- "air france",
- "talk city active chats",
- "codes for the gamecube game resident evil 4",
- "good food to eat for sugar diabetes",
- "warpmymind",
- "arc jacksonville fl",
- "7fwww.sendspace.com",
- "j blackfoot",
- "mcso madison street jail inmate",
- "macys",
- "eduscapes",
- "free picture of our lady",
- "http www.eastman.org",
- "minneapolisstartribune localnews",
- "minneapolisstartribune localnews",
- "tennessee",
- "foodtown",
- "anti virous download",
- "http www.mdland rec.net",
- "ed edd eddy",
- "maryjbilge",
- "shipping services",
- "baseball videogames",
- "egyption ancient coins",
- "internet",
- "what is sodomy",
- "international cub lowboy",
- "mary j. bilge",
- "scenic backgrounds",
- "google.com",
- "rosettalangueges.com",
- "titanpoker.net",
- "titie show",
- "edelen realtor",
- "lil cim",
- "china.com",
- "boost mobile",
- "nc eipa",
- "people's 20pharmacy 20guide 20to",
- "costco",
- "charles schultz drawings",
- "nicisterling",
- "a picture of author stephen crane",
- "yahoo.com",
- "sponge bob myspace layouts",
- "g",
- "calendar creator",
- "careerbuilder.com",
- "cool tex for web pages",
- "yahoo.com",
- "mcdougal littel",
- "sign on",
- "superman",
- "radio",
- "lajollaindians.com",
- "mike tyson died",
- "pink panther",
- "lolita newgroups",
- "nude girls",
- "galveston 20texas",
- "gerlach meat co.",
- "thetakeover2006.com",
- "yahoo",
- "simpsons movie",
- "saxy",
- "yahoo",
- "21st century realty",
- "new zealand",
- "dogs",
- "weather",
- "free porn sex",
- "bugs bunny parties",
- "mortal kombat 2 fatalities",
- "sea life park hawaii",
- "songs for middle school choir",
- "rocky mountain jeep",
- "householdbank.com",
- "birdville isd",
- "brutal dildo",
- "brutal dildo",
- "free live gay cam chats",
- "wonder woman",
- "ebay com",
- "myspace.com",
- "boost mobile",
- "desktop themes sex",
- "myspace.com",
- "myspace.com",
- "maroon chevy auto dealership",
- "beyonce",
- "cleopatra vii",
- "accountcentralonline.com",
- "juvenile",
- "the game cock",
- "pics of ashland city tennessee",
- "coherent deos",
- "microwsoft wireless connection",
- "best buy",
- "southwest airlines",
- "southwest airlines",
- "pogo games",
- "family court record room in brooklyn newyork",
- "60.ufc.net",
- "us mint",
- "people",
- "firstcitycreditunion",
- "washington mutual careers",
- "beyonce",
- "tab energy drink",
- "http vemmabuilder.com",
- "new york state lottery",
- "yahoo",
- "tmobile",
- "yellow pages.com",
- "az.central.com",
- "pasco auto salvage",
- "im help",
- "home based businesses",
- "studyisland",
- "bible study from king james on 1 corinthians chapter 6 verses 18- 20",
- "bellevue-ne",
- "msn.com",
- "aolsignupfree",
- "the simsons",
- "nevada",
- "forsyth central high school",
- "road state college",
- "does my child have adhd",
- "les tucanesde tijuana",
- "yahoo.com",
- "mexican pharmacy hyrocodone",
- "ford motor co year end sales",
- "google.com",
- "google.com",
- "person.com",
- "marylyn monroe",
- "nfl",
- "the hun.net",
- "nkena anderson",
- "free netscape download",
- "top fifty colleges",
- "wil.",
- "memphis tennessee",
- "yahoo mail",
- "corrections officer of juveniles",
- "jada pinkett smith",
- "mapquest.com",
- "apartments",
- "msn.com",
- "msn.com",
- "wasco state prison",
- "solitaire",
- "http",
- "freeport seaman center",
- "futbol soccer",
- "screen names",
- "kmov.com",
- "survey.otxresearch.com",
- "facial shaves",
- "gle",
- "flw.com",
- "seasportboats.com",
- "toysrus.com",
- "animated sexy graphics",
- "colombia",
- "unitarian univeralist association",
- "fr",
- "google video.com",
- "660-342-1072",
- "suzan-lori parks",
- "male facial",
- "william bouguereau first kiss how much it is worth",
- "streetfighter",
- "nick.com",
- "wonder woman",
- "pentagram",
- "mcafee virus protection",
- "diary",
- "037f34742140a5f761ad51d95180b4f8",
- "free porn",
- "no deposit casino bonus",
- "spongebob the movie myspace layouts",
- "on line banking",
- "equestrian properties for sale",
- "kazaa free muisc download",
- "gay truckers",
- "24",
- "pay-pal",
- "www yahoo.com",
- "phatazz.white hoes",
- "planets of the universe",
- "free movies",
- "budget rentals special",
- "yahoogames",
- "talaat pasha",
- "mariah carey song lyrics don't forget about us",
- "futbol soccer",
- "msn groups",
- "martha steward",
- "martha steward",
- "soap opera scoops cbs",
- "cingular",
- "stuwie",
- "womengiving blowjobs",
- "hear dancing queen by abba",
- "love song",
- "fhsaa.org",
- "any dvd",
- "any dvd",
- "gallery.brookeskye.com",
- "gibson ranch",
- "wachovia com",
- "kzg golf information",
- "skylight curtains",
- "c",
- "123freeweblayouts.com",
- "yahoo.com",
- "allie.com",
- "ghosts of bingham cemetery",
- "resume maker",
- "resume maker",
- "resume maker",
- "lymphomatoid papulosis",
- "sez.com",
+ "designer tree svc",
+ "seriesg810",
+ "doll makers",
+ "rotten.com",
+ "evening gowns",
+ "discover",
+ "south carolina escorts",
+ "forkliftjobsinhousron",
+ "mailbox",
+ "alexis",
+ "espn.com mlb",
+ "gypsy.chat.2k",
+ "the man in the mirror",
+ "azteca",
+ "sebastian telfair - jamel thomas",
+ "kirby",
+ "java",
+ "trike motorcycles",
+ "piasecki helicopter",
+ "wicca binding spells",
+ "pier park panama city beach .com",
+ "continente europeo",
+ "asswatchers.com",
+ "asswatchers.com",
+ "easton stealth stiff flex cnt adult baseball bat - 3",
+ "facesofdeath",
+ "video of 9 11",
+ "profileedit.myspace.com",
+ "georgia snakes",
+ "yahoo.com",
+ "google",
+ "http wwwclassicindustries .corvettes-roadsters.com",
+ "arington training stable",
+ "find bred of dog",
+ "southpark contact tables for myspace",
+ "symptoms of laryngitis",
+ "suzuki stickers",
+ "avianca",
+ "radio shack",
+ "dominican republic pictures",
+ "recent",
+ "mapquest",
+ "http myspace .com",
+ "research chemicals supplies",
+ "winn dixie.com",
+ "drivers 20guide.com",
+ "dylan whitley north carolina",
+ "google com",
+ "order wild horses cigarettes",
+ "yahoocom",
+ "fl runners",
+ "aol companion install",
+ "nbc.comdond 59595 6",
+ "directv.com",
+ "motorsports insurance",
+ "cartoonnetwork",
+ "pop warner-victorville",
+ "black iorn spars",
+ "goog",
+ "the suns",
+ "ebay",
+ "pop warner",
+ "philadelphia cream cheese",
+ "oklahoma",
+ "doudleday books.com",
+ "javascript download",
+ "city of nacogdoches",
+ "sfyl",
+ "myspace.com",
+ "baptism pictures",
+ "games",
+ "depredadores sexuales",
+ "mycl.cravelyrics.com",
+ "become a bone marrow donner",
+ "vintage copies",
+ "ford dealership",
+ "candystand",
+ "smarthairypussyom",
+ "yahoo.com",
+ "vanderbilt.edu",
+ "ebay",
+ "grouper",
+ "mys",
+ "myrsa and birth defects",
+ "hatteras rentals",
+ "female escorts",
+ "ja rule",
+ "meat bluesheet",
+ "yahoo",
+ "american disability act court cases",
+ "clearview cinemas",
+ "hard69.com",
+ "make a living will for free",
+ "fat asses",
+ "flashback concert in atlanta ga",
+ "fucking",
+ "flat abdomen exercises",
+ "big brother facial",
+ "german dictionary",
+ "black dick",
+ "ebonymovies",
+ "airsoft rifles",
+ "best fishing days calander",
+ "tattoo",
+ "impressions",
+ "cs.com",
+ "northwest airlines reservations",
+ "halo 3",
+ "wallbaums",
+ "chat room listings",
+ "waterbury ct warrants",
+ "pictures of chad michael murry",
+ "yahoo",
+ "install wallpaper",
+ "halo 3",
+ "clits and tits",
+ "prothsmouth general circuit courts",
+ "old hawthorne columbia",
+ "jess lee photos",
+ "no deposit casino bonus",
+ "bbc gladiator dressed to kill",
+ "anemagazine.com",
+ "lyrics unfaithful",
+ "gold bars found",
+ "art.comhttp",
+ "free unlock key",
+ "man o war lost a race",
+ "blue cross and blue shield",
+ "phenergan",
+ "myspace.com",
+ "http www.constitutional court.com",
+ "monster trucks",
+ "the breeze fort myers fla.newspaper",
+ "davis origin name",
+ "upper deck.com",
+ "arizona",
+ "akira lane",
+ "ebaumsworld",
+ "union pacific jobs",
+ "google.cm",
+ "free bigt girls nudes",
+ "abcnews.com",
+ "tootse.com",
+ "az lyrics",
+ "freddy",
+ "georgia.com",
+ "johncombest.com",
+ "nelly",
+ "gussi mane",
+ "university of illinois",
+ "oregan valcano's",
+ "mythbusters",
+ "sailormoon hentai",
+ "international cub tractor",
+ "desert sky movie green valley az",
+ "evite",
+ "nelly nud epics",
+ "penndot.com",
+ "first banks",
+ "psp manual",
+ "google",
+ "jackieaudet hotmail.com",
+ "internet",
+ "shootinggames",
+ "shootinggames",
+ "montana western rendezvous of art",
+ "hello kitty layouts",
+ "yahoo",
+ "translation",
+ "glenn scott attorney",
+ "hallofshame",
+ "capitolone.com",
+ "recipe for popovers",
+ "pictures of demons",
+ "barnes and nobles.com",
+ "rbd",
+ "hart and hunnington tattoo shop",
+ "janepowellmovies.com",
+ "ged schools in the military",
+ "kelis",
+ "hvacagent",
+ "neat home organizer television show",
+ "2719 24-2-crime and courts",
+ "fsu",
+ "torpedo bomber games",
+ "love poems",
+ "polly pocket'toys",
+ "yweatherahoo.com",
+ "jungle gin",
+ "flemington new jersey real estate",
+ "milf hunter stories",
+ "budget.com",
+ "chopperstyle",
+ "keno player",
+ "up skirt",
+ "dogs",
+ "beerballers",
+ "phat white butt",
+ "phat white butt",
+ "va licensing for interpeters for the deaf",
+ "white page phone book maiden north carolina",
+ "controlled 20solutions 20corp.com",
+ "friedman jewelery",
+ "kelis",
+ "curtains",
+ "curtains",
+ "fuck me harder",
+ "naked girls",
+ "southwest airlines boarding pass",
+ "mailbox",
+ "1976 mavrick",
+ "adult diapers",
+ "horse nasal discharge",
+ "charles ludlam",
+ "google",
+ "himnos en espanol",
+ "quarter horses for sale in nebraska",
+ "cosmo",
+ "hi",
+ "mattel",
+ "aouto 20trader.com",
+ "sunsetter awnings",
+ "bl.cfm",
+ "at",
+ "tattoo designs",
+ "bubs",
+ "yahoo",
+ "free live gay cam chats",
+ "antibiotics",
+ "upgrade",
+ "aessuccess.org",
+ "yahoo",
+ "boobdex",
+ "the jackle",
+ "plus size lingerie magazines for home",
+ "lehigh valley little league",
+ "ancient trade coins",
+ "pillsbury",
+ "colorado springs",
+ "canada aviation jobs",
+ "free guitar tablature",
+ "kids aol",
+ "capitol community colage",
+ "kevin thomas bermuda",
+ "missouri lotto",
+ "homedepotsportscomplex.com",
+ "dr. franklin schneier",
+ "williamsburg va. hotels",
+ "aim",
+ "morningbuzz",
+ "probusines.com",
+ "wwwalbasoul.com",
+ "w.runehints.com",
+ "yahoo.com",
+ "yahoo.com",
+ "yahoo.com",
+ "fantasy 5",
+ "xxx rape",
+ "hawaiian gift baskets",
+ "madonna.com",
+ "myspace contact tables",
+ "white cock",
+ "safe space",
+ "drinks",
+ "o rly",
+ "dsl",
+ "wwww.uncc.edu",
+ "wwww.uncc.edu",
+ "wwww.uncc.edu",
+ "online overseas checkt.westernunion.com",
+ "angina",
+ "heba technologies",
+ "hebrew ancient coins",
+ "games",
+ "recent",
+ "international male.com",
+ "sex pics",
+ "paul wall layouts for myspace",
+ "health",
+ "wire lamp shade frames",
+ "windows",
+ "top business colleges",
+ "mary jo eustace",
+ "attored",
+ "oklahoma indian legal services",
+ "6arab",
+ "santo nino",
+ "10.1.0.199",
+ "http www.myspace.com daffydonn07",
+ "marine electrical",
+ "sandy creek cabins weekend new york",
+ "onionbutts",
+ "tucson classifieds",
+ "new york times",
+ "recently deleted screen names",
+ "goldeneagle.net",
+ "fta support forums",
+ "low protein in bloos",
+ "datring",
+ "lilwayne",
+ "free billiards games",
+ "yahoo",
+ "ako",
+ "a.tribalfusion.c script language",
+ "dustin davis",
+ "cooking",
+ "yahoo.com",
+ "universal studios",
+ "adult chat",
+ "santa monica flea market",
+ "carpevino.us",
+ "wine vinyard in stewertstown pa",
+ "y",
+ "craigslist",
+ "ups.com",
+ "1-866-347-3292",
+ "renegade boats",
+ "renegade boats",
+ "sunset state beach caping",
+ "artofstanlync.org",
+ "heart-i want make love to you video",
+ "triangles around the world",
+ "mycl.cravelyrics.com",
+ "in the bible what type of persons were forced to walk around in public and say unclean unclean",
+ "providence water fire",
+ "googlecom",
+ "yahoo.com",
+ "b.g",
+ "website de rebelde",
+ "stoplinks",
+ "allison 2000 transmission",
+ "thepriceanduseofgasoline.com",
+ "chamillinaire",
+ "veryspecialhomescom",
+ "crashbandicoot",
+ "a short sex story",
+ "yahoo.com",
+ "music now",
+ "east carolina university",
+ "vandalism in new york",
+ "the bainde soleil company",
+ "dicaprio movies",
+ "xxx dvds",
+ "visual basic scripting support",
+ "english bulldogs",
+ "travelocity.com",
+ "website for asstr.org",
+ "hypnotic slave training",
+ "pogo",
+ "university at buffalo addmissions",
+ "screen name services",
+ "superdrol",
+ "art institute",
+ "online business cards",
+ "aolfinancial",
+ "upgrade shop",
+ "anderson abrasive",
+ "weatherchannel.com",
+ "recent",
+ "ebay",
+ "diagram and xray of a normal shouldercheck out surgicalpoker.comfor more sports medicine and orthopedic information and images check out emedx.com by dr. allan mishranormal diagram normal x-ray",
+ "95 mustang gt chips",
+ "gold grills",
+ "hap housing in portland or",
+ "car sales",
+ "swimming with dolphins",
+ "jennifer lopez nude",
+ "wwwdubcnn.com",
+ "dominicks pizza",
+ "fl studio",
+ "http blackplanet .com",
+ "http blackplanet .com",
+ "http blackplanet .com",
+ "A$AP Rocky",
+ "benie mac",
+ "fujifilm.com",
+ "aol dialup setup",
+ "metal fabrication tools",
+ "internet",
+ "buy my painting",
+ "pulaski va classifieds",
+ "w.coj.net",
+ "postopia.com",
+ "no medical records hydrocodone",
+ "auto completes for deal or no deal contest",
+ "http www. big monster dicks .com",
+ "invacare wheelchairs",
+ "musicdownload.com",
+ "president bush",
+ "heavy equipment",
+ "inmate information",
+ "allina.com",
+ "megan law.gov",
+ "wwwl.eharmony.com",
+ "jobs in colombiaoqx0nq",
+ "beastsex",
+ "ferguisson",
+ "heart-i wanna make love to you vedio",
+ "west georgia university",
+ "west georgia university",
+ "hsn",
+ "bb&t",
+ "midas realty",
+ "yahoo",
+ "mytrip.com",
+ "donna texas mcdonalds",
+ "free picture of our lady",
+ "bubs",
+ "taken chemo for 5 month's cancer can still be seen on ct scan",
+ "porn 20video 20clips",
+ "lake monsters",
+ "freedj mix vibes",
+ "myspace.coim",
+ "la joya school district tx",
+ "colorado bungee jumping",
+ "yahoo",
+ "google.com",
+ "lafayette co vampire grave",
+ "ice cube",
+ "internet",
+ "tccd.edu",
+ "google",
+ "people",
+ "instructions on putting together a filing cabinet",
+ "click.babycenter.com",
+ "90minut",
+ "ramien noodles",
+ "lilwayne",
+ "danni virgin",
+ "nice sexy girls.com",
+ "guttural pouch",
+ "free male masturbating",
+ "good",
+ "rotton 20dot.com",
+ "fox sports",
+ "seth rogen",
+ "desb.mspaceads.com",
+ "betjc.com",
+ "pictures of quebec",
+ "gold in quartz",
+ "evergreen college",
+ "runescape",
+ "gastons white river resort",
+ "sunset beach santa cruz",
+ "auto parts",
+ "travelocity",
+ "myspace.com",
+ "laptops",
+ "beyaonce and j",
+ "free gay ebony knights webcams",
+ "google",
+ "derek watson",
+ "alice in wonderland tshirts",
+ "hippa p rivacy act",
+ "down payment mortgage",
+ "believe it or not",
+ "mys",
+ "datatreca",
+ "onesuite",
+ "names",
+ "lil john",
+ "scales of justice cuff links",
+ "localsales.com",
+ "alametris denise lipsey",
+ "adam for adam",
+ "flip flops crochet",
+ "arbors",
+ "heb hospital",
+ "myspae.com",
+ "midevil breast torture",
+ "askjeeves",
+ "assparade",
+ ".comhttp",
+ "weekly hotels reston virginia",
+ "noiceinparadise.com",
+ "pre diabetic diet",
+ "h.i.m.com",
+ "myspace",
+ "myspace",
+ "wwww.sex.lp.cpm",
+ "mcso mugshots",
+ "roush",
+ "wellfargo",
+ "lilwayne",
+ "hopecherie",
+ "frontgate.com",
+ "barbados registration department",
+ "american pitbull",
+ "free pc full flight simulation game downloads",
+ "google",
+ "vaginal secretion grey stuff",
+ "myspace layouts",
+ "kanye west",
+ "walmart",
+ "pain in hip and leg",
+ "tenneesseeaquarium.com",
+ "suncom.com",
+ "alysseandrachelwerehere",
+ "pimiclo",
+ "starmagazine.com",
+ "classifieds",
+ "mount rushmore in dakota",
+ "sams",
+ "disney com",
+ "beastyality",
+ "chief joseph paintings",
+ "henry scott",
+ "paris hilton",
+ "kb903235",
+ "autotrader",
+ "irish traveller",
+ "ajcobs.com",
+ "art of stanlync.org",
+ "fox news",
+ "freeporn",
+ "depo provera",
+ "air france",
+ "talk city active chats",
+ "codes for the gamecube game resident evil 4",
+ "good food to eat for sugar diabetes",
+ "warpmymind",
+ "arc jacksonville fl",
+ "7fwww.sendspace.com",
+ "j blackfoot",
+ "mcso madison street jail inmate",
+ "macys",
+ "eduscapes",
+ "free picture of our lady",
+ "http www.eastman.org",
+ "minneapolisstartribune localnews",
+ "minneapolisstartribune localnews",
+ "tennessee",
+ "foodtown",
+ "anti virous download",
+ "http www.mdland rec.net",
+ "ed edd eddy",
+ "maryjbilge",
+ "shipping services",
+ "baseball videogames",
+ "egyption ancient coins",
+ "internet",
+ "what is sodomy",
+ "international cub lowboy",
+ "mary j. bilge",
+ "scenic backgrounds",
+ "google.com",
+ "rosettalangueges.com",
+ "titanpoker.net",
+ "titie show",
+ "edelen realtor",
+ "lil cim",
+ "china.com",
+ "boost mobile",
+ "nc eipa",
+ "people's 20pharmacy 20guide 20to",
+ "costco",
+ "charles schultz drawings",
+ "nicisterling",
+ "a picture of author stephen crane",
+ "yahoo.com",
+ "sponge bob myspace layouts",
+ "g",
+ "calendar creator",
+ "careerbuilder.com",
+ "cool tex for web pages",
+ "yahoo.com",
+ "mcdougal littel",
+ "sign on",
+ "superman",
+ "radio",
+ "lajollaindians.com",
+ "mike tyson died",
+ "pink panther",
+ "lolita newgroups",
+ "nude girls",
+ "galveston 20texas",
+ "gerlach meat co.",
+ "thetakeover2006.com",
+ "yahoo",
+ "simpsons movie",
+ "saxy",
+ "yahoo",
+ "21st century realty",
+ "new zealand",
+ "dogs",
+ "weather",
+ "free porn sex",
+ "bugs bunny parties",
+ "mortal kombat 2 fatalities",
+ "sea life park hawaii",
+ "songs for middle school choir",
+ "rocky mountain jeep",
+ "householdbank.com",
+ "birdville isd",
+ "brutal dildo",
+ "brutal dildo",
+ "free live gay cam chats",
+ "wonder woman",
+ "ebay com",
+ "myspace.com",
+ "boost mobile",
+ "desktop themes sex",
+ "myspace.com",
+ "myspace.com",
+ "maroon chevy auto dealership",
+ "beyonce",
+ "cleopatra vii",
+ "accountcentralonline.com",
+ "juvenile",
+ "the game cock",
+ "pics of ashland city tennessee",
+ "coherent deos",
+ "microwsoft wireless connection",
+ "best buy",
+ "southwest airlines",
+ "southwest airlines",
+ "pogo games",
+ "family court record room in brooklyn newyork",
+ "60.ufc.net",
+ "us mint",
+ "people",
+ "firstcitycreditunion",
+ "washington mutual careers",
+ "beyonce",
+ "tab energy drink",
+ "http vemmabuilder.com",
+ "new york state lottery",
+ "yahoo",
+ "tmobile",
+ "yellow pages.com",
+ "az.central.com",
+ "pasco auto salvage",
+ "im help",
+ "home based businesses",
+ "studyisland",
+ "bible study from king james on 1 corinthians chapter 6 verses 18- 20",
+ "bellevue-ne",
+ "msn.com",
+ "aolsignupfree",
+ "the simsons",
+ "nevada",
+ "forsyth central high school",
+ "road state college",
+ "does my child have adhd",
+ "les tucanesde tijuana",
+ "yahoo.com",
+ "mexican pharmacy hyrocodone",
+ "ford motor co year end sales",
+ "google.com",
+ "google.com",
+ "person.com",
+ "marylyn monroe",
+ "nfl",
+ "the hun.net",
+ "nkena anderson",
+ "free netscape download",
+ "top fifty colleges",
+ "wil.",
+ "memphis tennessee",
+ "yahoo mail",
+ "corrections officer of juveniles",
+ "jada pinkett smith",
+ "mapquest.com",
+ "apartments",
+ "msn.com",
+ "msn.com",
+ "wasco state prison",
+ "solitaire",
+ "http",
+ "freeport seaman center",
+ "futbol soccer",
+ "screen names",
+ "kmov.com",
+ "survey.otxresearch.com",
+ "facial shaves",
+ "gle",
+ "flw.com",
+ "seasportboats.com",
+ "toysrus.com",
+ "animated sexy graphics",
+ "colombia",
+ "unitarian univeralist association",
+ "fr",
+ "google video.com",
+ "660-342-1072",
+ "suzan-lori parks",
+ "male facial",
+ "william bouguereau first kiss how much it is worth",
+ "streetfighter",
+ "nick.com",
+ "wonder woman",
+ "pentagram",
+ "mcafee virus protection",
+ "diary",
+ "037f34742140a5f761ad51d95180b4f8",
+ "free porn",
+ "no deposit casino bonus",
+ "spongebob the movie myspace layouts",
+ "on line banking",
+ "equestrian properties for sale",
+ "kazaa free muisc download",
+ "gay truckers",
+ "24",
+ "pay-pal",
+ "www yahoo.com",
+ "phatazz.white hoes",
+ "planets of the universe",
+ "free movies",
+ "budget rentals special",
+ "yahoogames",
+ "talaat pasha",
+ "mariah carey song lyrics don't forget about us",
+ "futbol soccer",
+ "msn groups",
+ "martha steward",
+ "martha steward",
+ "soap opera scoops cbs",
+ "cingular",
+ "stuwie",
+ "womengiving blowjobs",
+ "hear dancing queen by abba",
+ "love song",
+ "fhsaa.org",
+ "any dvd",
+ "any dvd",
+ "gallery.brookeskye.com",
+ "gibson ranch",
+ "wachovia com",
+ "kzg golf information",
+ "skylight curtains",
+ "c",
+ "123freeweblayouts.com",
+ "yahoo.com",
+ "allie.com",
+ "ghosts of bingham cemetery",
+ "resume maker",
+ "resume maker",
+ "resume maker",
+ "lymphomatoid papulosis",
+ "sez.com",
};
}
@@ -1140,7 +1140,7 @@ private:
learn.emplace_back();
for (ui32 i = 0; i < 256; ++i) {
for (ui32 j = 0; j < i; ++j) {
- learn.back().Append((ui8)i);
+ learn.back().Append((ui8)i);
}
}
@@ -1177,17 +1177,17 @@ private:
}
void TestZStdDict() {
- using namespace NCodecs;
- {
- TVector<TBuffer> learn;
-
- for (auto& textValue : TextValues) {
- learn.emplace_back(textValue, strlen(textValue));
- }
-
- TestCodec<TZStdDictCodec, true>(learn);
- }
-
+ using namespace NCodecs;
+ {
+ TVector<TBuffer> learn;
+
+ for (auto& textValue : TextValues) {
+ learn.emplace_back(textValue, strlen(textValue));
+ }
+
+ TestCodec<TZStdDictCodec, true>(learn);
+ }
+
}
void TestCompTable() {
diff --git a/library/cpp/codecs/zstd_dict_codec.cpp b/library/cpp/codecs/zstd_dict_codec.cpp
index c42a2879e6..5729d1de3f 100644
--- a/library/cpp/codecs/zstd_dict_codec.cpp
+++ b/library/cpp/codecs/zstd_dict_codec.cpp
@@ -159,10 +159,10 @@ namespace NCodecs {
lens.push_back(r.size());
}
- ZDICT_legacy_params_t params;
+ ZDICT_legacy_params_t params;
memset(&params, 0, sizeof(params));
- params.zParams.compressionLevel = 1;
- params.zParams.notificationLevel = 1;
+ params.zParams.compressionLevel = 1;
+ params.zParams.notificationLevel = 1;
Dict.Resize(Max<size_t>(1 << 20, data.Size() + 16 * lens.size()));
if (!lens) {