diff options
author | igorsolovyev <igorsolovyev@yandex-team.ru> | 2022-02-10 16:48:03 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:48:03 +0300 |
commit | 93dc653cf53bf7a9319b52b85a7c02edfd95463d (patch) | |
tree | b85de7682b5f10d28a798003716a65756425aa15 /contrib/libs/zstd/lib/common | |
parent | 6ab7e5f5ada0643a48d393717f443bd548706ffc (diff) | |
download | ydb-93dc653cf53bf7a9319b52b85a7c02edfd95463d.tar.gz |
Restoring authorship annotation for <igorsolovyev@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/libs/zstd/lib/common')
-rw-r--r-- | contrib/libs/zstd/lib/common/bitstream.h | 214 | ||||
-rw-r--r-- | contrib/libs/zstd/lib/common/compiler.h | 164 | ||||
-rw-r--r-- | contrib/libs/zstd/lib/common/cpu.h | 418 | ||||
-rw-r--r-- | contrib/libs/zstd/lib/common/entropy_common.c | 24 | ||||
-rw-r--r-- | contrib/libs/zstd/lib/common/error_private.c | 32 | ||||
-rw-r--r-- | contrib/libs/zstd/lib/common/error_private.h | 14 | ||||
-rw-r--r-- | contrib/libs/zstd/lib/common/fse.h | 120 | ||||
-rw-r--r-- | contrib/libs/zstd/lib/common/fse_decompress.c | 10 | ||||
-rw-r--r-- | contrib/libs/zstd/lib/common/huf.h | 306 | ||||
-rw-r--r-- | contrib/libs/zstd/lib/common/mem.h | 62 | ||||
-rw-r--r-- | contrib/libs/zstd/lib/common/pool.c | 462 | ||||
-rw-r--r-- | contrib/libs/zstd/lib/common/pool.h | 102 | ||||
-rw-r--r-- | contrib/libs/zstd/lib/common/threading.c | 138 | ||||
-rw-r--r-- | contrib/libs/zstd/lib/common/threading.h | 238 | ||||
-rw-r--r-- | contrib/libs/zstd/lib/common/zstd_common.c | 52 | ||||
-rw-r--r-- | contrib/libs/zstd/lib/common/zstd_internal.h | 138 |
16 files changed, 1247 insertions, 1247 deletions
diff --git a/contrib/libs/zstd/lib/common/bitstream.h b/contrib/libs/zstd/lib/common/bitstream.h index 84b6062ff3..abbde06ca8 100644 --- a/contrib/libs/zstd/lib/common/bitstream.h +++ b/contrib/libs/zstd/lib/common/bitstream.h @@ -43,21 +43,21 @@ extern "C" { # endif #endif -#define STREAM_ACCUMULATOR_MIN_32 25 -#define STREAM_ACCUMULATOR_MIN_64 57 -#define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64)) - +#define STREAM_ACCUMULATOR_MIN_32 25 +#define STREAM_ACCUMULATOR_MIN_64 57 +#define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64)) + /*-****************************************** * bitStream encoding API (write forward) ********************************************/ /* bitStream can mix input from multiple sources. - * A critical property of these streams is that they encode and decode in **reverse** direction. - * So the first bit sequence you add will be the last to be read, like a LIFO stack. - */ + * A critical property of these streams is that they encode and decode in **reverse** direction. + * So the first bit sequence you add will be the last to be read, like a LIFO stack. + */ typedef struct { size_t bitContainer; - unsigned bitPos; + unsigned bitPos; char* startPtr; char* ptr; char* endPtr; @@ -94,7 +94,7 @@ typedef struct { unsigned bitsConsumed; const char* ptr; const char* start; - const char* limitPtr; + const char* limitPtr; } BIT_DStream_t; typedef enum { BIT_DStream_unfinished = 0, @@ -137,10 +137,10 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); /*-************************************************************** * Internal functions ****************************************************************/ -MEM_STATIC unsigned BIT_highbit32 (U32 val) +MEM_STATIC unsigned BIT_highbit32 (U32 val) { - assert(val != 0); - { + assert(val != 0); + { # if defined(_MSC_VER) /* Visual */ # if STATIC_BMI2 == 1 return _lzcnt_u32(val) ^ 31; @@ -159,59 +159,59 @@ MEM_STATIC unsigned BIT_highbit32 (U32 val) # elif defined(__ICCARM__) /* IAR Intrinsic */ return 31 - __CLZ(val); # else /* Software version */ - static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, - 11, 14, 16, 18, 22, 25, 3, 30, - 8, 12, 20, 28, 15, 17, 24, 7, - 19, 27, 23, 6, 26, 5, 4, 31 }; - U32 v = val; - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; + static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, + 11, 14, 16, 18, 22, 25, 3, 30, + 8, 12, 20, 28, 15, 17, 24, 7, + 19, 27, 23, 6, 26, 5, 4, 31 }; + U32 v = val; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; # endif - } + } } /*===== Local Constants =====*/ -static const unsigned BIT_mask[] = { - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF, - 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, - 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF, - 0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */ -#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0])) +static const unsigned BIT_mask[] = { + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF, + 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, + 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF, + 0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */ +#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0])) /*-************************************************************** * bitStream encoding ****************************************************************/ /*! BIT_initCStream() : - * `dstCapacity` must be > sizeof(size_t) + * `dstCapacity` must be > sizeof(size_t) * @return : 0 if success, - * otherwise an error code (can be tested using ERR_isError()) */ -MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, - void* startPtr, size_t dstCapacity) + * otherwise an error code (can be tested using ERR_isError()) */ +MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, + void* startPtr, size_t dstCapacity) { bitC->bitContainer = 0; bitC->bitPos = 0; bitC->startPtr = (char*)startPtr; bitC->ptr = bitC->startPtr; - bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer); - if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall); + bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer); + if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall); return 0; } /*! BIT_addBits() : - * can add up to 31 bits into `bitC`. - * Note : does not check for register overflow ! */ -MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, - size_t value, unsigned nbBits) + * can add up to 31 bits into `bitC`. + * Note : does not check for register overflow ! */ +MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, + size_t value, unsigned nbBits) { DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32); - assert(nbBits < BIT_MASK_SIZE); - assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); + assert(nbBits < BIT_MASK_SIZE); + assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos; bitC->bitPos += nbBits; } @@ -219,74 +219,74 @@ MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, /*! BIT_addBitsFast() : * works only if `value` is _clean_, * meaning all high bits above nbBits are 0 */ -MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, - size_t value, unsigned nbBits) +MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, + size_t value, unsigned nbBits) { - assert((value>>nbBits) == 0); - assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); + assert((value>>nbBits) == 0); + assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); bitC->bitContainer |= value << bitC->bitPos; bitC->bitPos += nbBits; } /*! BIT_flushBitsFast() : - * assumption : bitContainer has not overflowed + * assumption : bitContainer has not overflowed * unsafe version; does not check buffer overflow */ MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC) { size_t const nbBytes = bitC->bitPos >> 3; - assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8); + assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8); assert(bitC->ptr <= bitC->endPtr); MEM_writeLEST(bitC->ptr, bitC->bitContainer); bitC->ptr += nbBytes; bitC->bitPos &= 7; - bitC->bitContainer >>= nbBytes*8; + bitC->bitContainer >>= nbBytes*8; } /*! BIT_flushBits() : - * assumption : bitContainer has not overflowed + * assumption : bitContainer has not overflowed * safe version; check for buffer overflow, and prevents it. - * note : does not signal buffer overflow. - * overflow will be revealed later on using BIT_closeCStream() */ + * note : does not signal buffer overflow. + * overflow will be revealed later on using BIT_closeCStream() */ MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC) { size_t const nbBytes = bitC->bitPos >> 3; - assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8); + assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8); assert(bitC->ptr <= bitC->endPtr); MEM_writeLEST(bitC->ptr, bitC->bitContainer); bitC->ptr += nbBytes; if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr; bitC->bitPos &= 7; - bitC->bitContainer >>= nbBytes*8; + bitC->bitContainer >>= nbBytes*8; } /*! BIT_closeCStream() : * @return : size of CStream, in bytes, - * or 0 if it could not fit into dstBuffer */ + * or 0 if it could not fit into dstBuffer */ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC) { BIT_addBitsFast(bitC, 1, 1); /* endMark */ BIT_flushBits(bitC); - if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */ + if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */ return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0); } /*-******************************************************** -* bitStream decoding +* bitStream decoding **********************************************************/ /*! BIT_initDStream() : - * Initialize a BIT_DStream_t. - * `bitD` : a pointer to an already allocated BIT_DStream_t structure. - * `srcSize` must be the *exact* size of the bitStream, in bytes. - * @return : size of stream (== srcSize), or an errorCode if a problem is detected - */ + * Initialize a BIT_DStream_t. + * `bitD` : a pointer to an already allocated BIT_DStream_t structure. + * `srcSize` must be the *exact* size of the bitStream, in bytes. + * @return : size of stream (== srcSize), or an errorCode if a problem is detected + */ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize) { if (srcSize < 1) { ZSTD_memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); } - bitD->start = (const char*)srcBuffer; - bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer); - + bitD->start = (const char*)srcBuffer; + bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer); + if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */ bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer); bitD->bitContainer = MEM_readLEST(bitD->ptr); @@ -298,30 +298,30 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si bitD->bitContainer = *(const BYTE*)(bitD->start); switch(srcSize) { - case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16); + case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16); ZSTD_FALLTHROUGH; - - case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24); + + case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24); ZSTD_FALLTHROUGH; - - case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32); + + case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32); ZSTD_FALLTHROUGH; - - case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; + + case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; ZSTD_FALLTHROUGH; - - case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; + + case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; ZSTD_FALLTHROUGH; - - case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; + + case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; ZSTD_FALLTHROUGH; - - default: break; - } - { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; - bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; - if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */ + + default: break; } + { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; + bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; + if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */ + } bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8; } @@ -337,7 +337,7 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 c { U32 const regMask = sizeof(bitContainer)*8 - 1; /* if start > regMask, bitstream is corrupted, and result is undefined */ - assert(nbBits < BIT_MASK_SIZE); + assert(nbBits < BIT_MASK_SIZE); /* x86 transform & ((1 << nbBits) - 1) to bzhi instruction, it is better * than accessing memory. When bmi2 instruction is not present, we consider * such cpus old (pre-Haswell, 2013) and their performance is not of that @@ -355,7 +355,7 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 co #if defined(STATIC_BMI2) && STATIC_BMI2 == 1 return _bzhi_u64(bitContainer, nbBits); #else - assert(nbBits < BIT_MASK_SIZE); + assert(nbBits < BIT_MASK_SIZE); return bitContainer & BIT_mask[nbBits]; #endif } @@ -365,7 +365,7 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 co * local register is not modified. * On 32-bits, maxNbBits==24. * On 64-bits, maxNbBits==56. - * @return : value extracted */ + * @return : value extracted */ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits) { /* arbitrate between double-shift and shift+mask */ @@ -375,18 +375,18 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t* bitD, U3 return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits); #else /* this code path is slower on my os-x laptop */ - U32 const regMask = sizeof(bitD->bitContainer)*8 - 1; - return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask); + U32 const regMask = sizeof(bitD->bitContainer)*8 - 1; + return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask); #endif } /*! BIT_lookBitsFast() : - * unsafe version; only works if nbBits >= 1 */ + * unsafe version; only works if nbBits >= 1 */ MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits) { - U32 const regMask = sizeof(bitD->bitContainer)*8 - 1; - assert(nbBits >= 1); - return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask); + U32 const regMask = sizeof(bitD->bitContainer)*8 - 1; + assert(nbBits >= 1); + return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask); } MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) @@ -397,7 +397,7 @@ MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) /*! BIT_readBits() : * Read (consume) next n bits from local register and update. * Pay attention to not read more than nbBits contained into local register. - * @return : extracted value. */ + * @return : extracted value. */ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits) { size_t const value = BIT_lookBits(bitD, nbBits); @@ -406,11 +406,11 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned n } /*! BIT_readBitsFast() : - * unsafe version; only works only if nbBits >= 1 */ + * unsafe version; only works only if nbBits >= 1 */ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits) { size_t const value = BIT_lookBitsFast(bitD, nbBits); - assert(nbBits >= 1); + assert(nbBits >= 1); BIT_skipBits(bitD, nbBits); return value; } @@ -433,23 +433,23 @@ MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD) } /*! BIT_reloadDStream() : - * Refill `bitD` from buffer previously set in BIT_initDStream() . - * This function is safe, it guarantees it will not read beyond src buffer. - * @return : status of `BIT_DStream_t` internal register. - * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */ + * Refill `bitD` from buffer previously set in BIT_initDStream() . + * This function is safe, it guarantees it will not read beyond src buffer. + * @return : status of `BIT_DStream_t` internal register. + * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */ MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) { - if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */ - return BIT_DStream_overflow; + if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */ + return BIT_DStream_overflow; - if (bitD->ptr >= bitD->limitPtr) { + if (bitD->ptr >= bitD->limitPtr) { return BIT_reloadDStreamFast(bitD); } if (bitD->ptr == bitD->start) { if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer; return BIT_DStream_completed; } - /* start < ptr < limitPtr */ + /* start < ptr < limitPtr */ { U32 nbBytes = bitD->bitsConsumed >> 3; BIT_DStream_status result = BIT_DStream_unfinished; if (bitD->ptr - nbBytes < bitD->start) { @@ -458,14 +458,14 @@ MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) } bitD->ptr -= nbBytes; bitD->bitsConsumed -= nbBytes*8; - bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */ + bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */ return result; } } /*! BIT_endOfDStream() : - * @return : 1 if DStream has _exactly_ reached its end (all bits consumed). - */ + * @return : 1 if DStream has _exactly_ reached its end (all bits consumed). + */ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) { return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); diff --git a/contrib/libs/zstd/lib/common/compiler.h b/contrib/libs/zstd/lib/common/compiler.h index 516930c01e..441b3586d2 100644 --- a/contrib/libs/zstd/lib/common/compiler.h +++ b/contrib/libs/zstd/lib/common/compiler.h @@ -1,38 +1,38 @@ -/* +/* * Copyright (c) Yann Collet, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#ifndef ZSTD_COMPILER_H -#define ZSTD_COMPILER_H - + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_COMPILER_H +#define ZSTD_COMPILER_H + #include "portability_macros.h" -/*-******************************************************* -* Compiler specifics -*********************************************************/ -/* force inlining */ +/*-******************************************************* +* Compiler specifics +*********************************************************/ +/* force inlining */ #if !defined(ZSTD_NO_INLINE) #if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ -# define INLINE_KEYWORD inline -#else -# define INLINE_KEYWORD -#endif - +# define INLINE_KEYWORD inline +#else +# define INLINE_KEYWORD +#endif + #if defined(__GNUC__) || defined(__ICCARM__) -# define FORCE_INLINE_ATTR __attribute__((always_inline)) -#elif defined(_MSC_VER) -# define FORCE_INLINE_ATTR __forceinline -#else -# define FORCE_INLINE_ATTR -#endif - +# define FORCE_INLINE_ATTR __attribute__((always_inline)) +#elif defined(_MSC_VER) +# define FORCE_INLINE_ATTR __forceinline +#else +# define FORCE_INLINE_ATTR +#endif + #else #define INLINE_KEYWORD @@ -40,7 +40,7 @@ #endif -/** +/** On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC). This explicitly marks such functions as __cdecl so that the code will still compile if a CC other than __cdecl has been made the default. @@ -52,28 +52,28 @@ #endif /** - * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant + * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant * parameters. They must be inlined for the compiler to eliminate the constant - * branches. - */ -#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR -/** - * HINT_INLINE is used to help the compiler generate better code. It is *not* - * used for "templates", so it can be tweaked based on the compilers - * performance. - * - * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the - * always_inline attribute. - * - * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline - * attribute. - */ -#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5 -# define HINT_INLINE static INLINE_KEYWORD -#else -# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR -#endif - + * branches. + */ +#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR +/** + * HINT_INLINE is used to help the compiler generate better code. It is *not* + * used for "templates", so it can be tweaked based on the compilers + * performance. + * + * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the + * always_inline attribute. + * + * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline + * attribute. + */ +#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5 +# define HINT_INLINE static INLINE_KEYWORD +#else +# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR +#endif + /* UNUSED_ATTR tells the compiler it is okay if the function is unused. */ #if defined(__GNUC__) # define UNUSED_ATTR __attribute__((unused)) @@ -81,37 +81,37 @@ # define UNUSED_ATTR #endif -/* force no inlining */ -#ifdef _MSC_VER -# define FORCE_NOINLINE static __declspec(noinline) -#else +/* force no inlining */ +#ifdef _MSC_VER +# define FORCE_NOINLINE static __declspec(noinline) +#else # if defined(__GNUC__) || defined(__ICCARM__) -# define FORCE_NOINLINE static __attribute__((__noinline__)) -# else -# define FORCE_NOINLINE static -# endif -#endif - - -/* target attribute */ +# define FORCE_NOINLINE static __attribute__((__noinline__)) +# else +# define FORCE_NOINLINE static +# endif +#endif + + +/* target attribute */ #if defined(__GNUC__) || defined(__ICCARM__) -# define TARGET_ATTRIBUTE(target) __attribute__((__target__(target))) -#else -# define TARGET_ATTRIBUTE(target) -#endif - +# define TARGET_ATTRIBUTE(target) __attribute__((__target__(target))) +#else +# define TARGET_ATTRIBUTE(target) +#endif + /* Target attribute for BMI2 dynamic dispatch. * Enable lzcnt, bmi, and bmi2. * We test for bmi1 & bmi2. lzcnt is included in bmi1. - */ + */ #define BMI2_TARGET_ATTRIBUTE TARGET_ATTRIBUTE("lzcnt,bmi,bmi2") - + /* prefetch * can be disabled, by declaring NO_PREFETCH build macro */ #if defined(NO_PREFETCH) # define PREFETCH_L1(ptr) (void)(ptr) /* disabled */ # define PREFETCH_L2(ptr) (void)(ptr) /* disabled */ -#else +#else # if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */ # include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ # define PREFETCH_L1(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) @@ -127,7 +127,7 @@ # define PREFETCH_L2(ptr) (void)(ptr) /* disabled */ # endif #endif /* NO_PREFETCH */ - + #define CACHELINE_SIZE 64 #define PREFETCH_AREA(p, s) { \ @@ -165,16 +165,16 @@ #define UNLIKELY(x) (x) #endif -/* disable warnings */ -#ifdef _MSC_VER /* Visual Studio */ -# include <intrin.h> /* For Visual 2005 */ -# pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */ -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ -# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ -# pragma warning(disable : 4324) /* disable: C4324: padded structure */ -#endif - +/* disable warnings */ +#ifdef _MSC_VER /* Visual Studio */ +# include <intrin.h> /* For Visual 2005 */ +# pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ +# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ +# pragma warning(disable : 4324) /* disable: C4324: padded structure */ +#endif + /*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/ #ifndef STATIC_BMI2 # if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) @@ -332,4 +332,4 @@ void __asan_poison_memory_region(void const volatile *addr, size_t size); void __asan_unpoison_memory_region(void const volatile *addr, size_t size); #endif -#endif /* ZSTD_COMPILER_H */ +#endif /* ZSTD_COMPILER_H */ diff --git a/contrib/libs/zstd/lib/common/cpu.h b/contrib/libs/zstd/lib/common/cpu.h index 8acd33be3c..547cea7efd 100644 --- a/contrib/libs/zstd/lib/common/cpu.h +++ b/contrib/libs/zstd/lib/common/cpu.h @@ -1,213 +1,213 @@ -/* +/* * Copyright (c) Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#ifndef ZSTD_COMMON_CPU_H -#define ZSTD_COMMON_CPU_H - -/** - * Implementation taken from folly/CpuId.h - * https://github.com/facebook/folly/blob/master/folly/CpuId.h - */ - -#include "mem.h" - -#ifdef _MSC_VER -#include <intrin.h> -#endif - -typedef struct { - U32 f1c; - U32 f1d; - U32 f7b; - U32 f7c; -} ZSTD_cpuid_t; - -MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) { - U32 f1c = 0; - U32 f1d = 0; - U32 f7b = 0; - U32 f7c = 0; + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_COMMON_CPU_H +#define ZSTD_COMMON_CPU_H + +/** + * Implementation taken from folly/CpuId.h + * https://github.com/facebook/folly/blob/master/folly/CpuId.h + */ + +#include "mem.h" + +#ifdef _MSC_VER +#include <intrin.h> +#endif + +typedef struct { + U32 f1c; + U32 f1d; + U32 f7b; + U32 f7c; +} ZSTD_cpuid_t; + +MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) { + U32 f1c = 0; + U32 f1d = 0; + U32 f7b = 0; + U32 f7c = 0; #if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) - int reg[4]; - __cpuid((int*)reg, 0); - { - int const n = reg[0]; - if (n >= 1) { - __cpuid((int*)reg, 1); - f1c = (U32)reg[2]; - f1d = (U32)reg[3]; - } - if (n >= 7) { - __cpuidex((int*)reg, 7, 0); - f7b = (U32)reg[1]; - f7c = (U32)reg[2]; - } - } -#elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__) - /* The following block like the normal cpuid branch below, but gcc - * reserves ebx for use of its pic register so we must specially - * handle the save and restore to avoid clobbering the register - */ - U32 n; - __asm__( - "pushl %%ebx\n\t" - "cpuid\n\t" - "popl %%ebx\n\t" - : "=a"(n) - : "a"(0) - : "ecx", "edx"); - if (n >= 1) { - U32 f1a; - __asm__( - "pushl %%ebx\n\t" - "cpuid\n\t" - "popl %%ebx\n\t" - : "=a"(f1a), "=c"(f1c), "=d"(f1d) + int reg[4]; + __cpuid((int*)reg, 0); + { + int const n = reg[0]; + if (n >= 1) { + __cpuid((int*)reg, 1); + f1c = (U32)reg[2]; + f1d = (U32)reg[3]; + } + if (n >= 7) { + __cpuidex((int*)reg, 7, 0); + f7b = (U32)reg[1]; + f7c = (U32)reg[2]; + } + } +#elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__) + /* The following block like the normal cpuid branch below, but gcc + * reserves ebx for use of its pic register so we must specially + * handle the save and restore to avoid clobbering the register + */ + U32 n; + __asm__( + "pushl %%ebx\n\t" + "cpuid\n\t" + "popl %%ebx\n\t" + : "=a"(n) + : "a"(0) + : "ecx", "edx"); + if (n >= 1) { + U32 f1a; + __asm__( + "pushl %%ebx\n\t" + "cpuid\n\t" + "popl %%ebx\n\t" + : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1)); - } - if (n >= 7) { - __asm__( - "pushl %%ebx\n\t" - "cpuid\n\t" + } + if (n >= 7) { + __asm__( + "pushl %%ebx\n\t" + "cpuid\n\t" "movl %%ebx, %%eax\n\t" - "popl %%ebx" - : "=a"(f7b), "=c"(f7c) - : "a"(7), "c"(0) - : "edx"); - } -#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__) - U32 n; - __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx"); - if (n >= 1) { - U32 f1a; - __asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx"); - } - if (n >= 7) { - U32 f7a; - __asm__("cpuid" - : "=a"(f7a), "=b"(f7b), "=c"(f7c) - : "a"(7), "c"(0) - : "edx"); - } -#endif - { - ZSTD_cpuid_t cpuid; - cpuid.f1c = f1c; - cpuid.f1d = f1d; - cpuid.f7b = f7b; - cpuid.f7c = f7c; - return cpuid; - } -} - -#define X(name, r, bit) \ - MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) { \ - return ((cpuid.r) & (1U << bit)) != 0; \ - } - -/* cpuid(1): Processor Info and Feature Bits. */ -#define C(name, bit) X(name, f1c, bit) - C(sse3, 0) - C(pclmuldq, 1) - C(dtes64, 2) - C(monitor, 3) - C(dscpl, 4) - C(vmx, 5) - C(smx, 6) - C(eist, 7) - C(tm2, 8) - C(ssse3, 9) - C(cnxtid, 10) - C(fma, 12) - C(cx16, 13) - C(xtpr, 14) - C(pdcm, 15) - C(pcid, 17) - C(dca, 18) - C(sse41, 19) - C(sse42, 20) - C(x2apic, 21) - C(movbe, 22) - C(popcnt, 23) - C(tscdeadline, 24) - C(aes, 25) - C(xsave, 26) - C(osxsave, 27) - C(avx, 28) - C(f16c, 29) - C(rdrand, 30) -#undef C -#define D(name, bit) X(name, f1d, bit) - D(fpu, 0) - D(vme, 1) - D(de, 2) - D(pse, 3) - D(tsc, 4) - D(msr, 5) - D(pae, 6) - D(mce, 7) - D(cx8, 8) - D(apic, 9) - D(sep, 11) - D(mtrr, 12) - D(pge, 13) - D(mca, 14) - D(cmov, 15) - D(pat, 16) - D(pse36, 17) - D(psn, 18) - D(clfsh, 19) - D(ds, 21) - D(acpi, 22) - D(mmx, 23) - D(fxsr, 24) - D(sse, 25) - D(sse2, 26) - D(ss, 27) - D(htt, 28) - D(tm, 29) - D(pbe, 31) -#undef D - -/* cpuid(7): Extended Features. */ -#define B(name, bit) X(name, f7b, bit) - B(bmi1, 3) - B(hle, 4) - B(avx2, 5) - B(smep, 7) - B(bmi2, 8) - B(erms, 9) - B(invpcid, 10) - B(rtm, 11) - B(mpx, 14) - B(avx512f, 16) - B(avx512dq, 17) - B(rdseed, 18) - B(adx, 19) - B(smap, 20) - B(avx512ifma, 21) - B(pcommit, 22) - B(clflushopt, 23) - B(clwb, 24) - B(avx512pf, 26) - B(avx512er, 27) - B(avx512cd, 28) - B(sha, 29) - B(avx512bw, 30) - B(avx512vl, 31) -#undef B -#define C(name, bit) X(name, f7c, bit) - C(prefetchwt1, 0) - C(avx512vbmi, 1) -#undef C - -#undef X - -#endif /* ZSTD_COMMON_CPU_H */ + "popl %%ebx" + : "=a"(f7b), "=c"(f7c) + : "a"(7), "c"(0) + : "edx"); + } +#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__) + U32 n; + __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx"); + if (n >= 1) { + U32 f1a; + __asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx"); + } + if (n >= 7) { + U32 f7a; + __asm__("cpuid" + : "=a"(f7a), "=b"(f7b), "=c"(f7c) + : "a"(7), "c"(0) + : "edx"); + } +#endif + { + ZSTD_cpuid_t cpuid; + cpuid.f1c = f1c; + cpuid.f1d = f1d; + cpuid.f7b = f7b; + cpuid.f7c = f7c; + return cpuid; + } +} + +#define X(name, r, bit) \ + MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) { \ + return ((cpuid.r) & (1U << bit)) != 0; \ + } + +/* cpuid(1): Processor Info and Feature Bits. */ +#define C(name, bit) X(name, f1c, bit) + C(sse3, 0) + C(pclmuldq, 1) + C(dtes64, 2) + C(monitor, 3) + C(dscpl, 4) + C(vmx, 5) + C(smx, 6) + C(eist, 7) + C(tm2, 8) + C(ssse3, 9) + C(cnxtid, 10) + C(fma, 12) + C(cx16, 13) + C(xtpr, 14) + C(pdcm, 15) + C(pcid, 17) + C(dca, 18) + C(sse41, 19) + C(sse42, 20) + C(x2apic, 21) + C(movbe, 22) + C(popcnt, 23) + C(tscdeadline, 24) + C(aes, 25) + C(xsave, 26) + C(osxsave, 27) + C(avx, 28) + C(f16c, 29) + C(rdrand, 30) +#undef C +#define D(name, bit) X(name, f1d, bit) + D(fpu, 0) + D(vme, 1) + D(de, 2) + D(pse, 3) + D(tsc, 4) + D(msr, 5) + D(pae, 6) + D(mce, 7) + D(cx8, 8) + D(apic, 9) + D(sep, 11) + D(mtrr, 12) + D(pge, 13) + D(mca, 14) + D(cmov, 15) + D(pat, 16) + D(pse36, 17) + D(psn, 18) + D(clfsh, 19) + D(ds, 21) + D(acpi, 22) + D(mmx, 23) + D(fxsr, 24) + D(sse, 25) + D(sse2, 26) + D(ss, 27) + D(htt, 28) + D(tm, 29) + D(pbe, 31) +#undef D + +/* cpuid(7): Extended Features. */ +#define B(name, bit) X(name, f7b, bit) + B(bmi1, 3) + B(hle, 4) + B(avx2, 5) + B(smep, 7) + B(bmi2, 8) + B(erms, 9) + B(invpcid, 10) + B(rtm, 11) + B(mpx, 14) + B(avx512f, 16) + B(avx512dq, 17) + B(rdseed, 18) + B(adx, 19) + B(smap, 20) + B(avx512ifma, 21) + B(pcommit, 22) + B(clflushopt, 23) + B(clwb, 24) + B(avx512pf, 26) + B(avx512er, 27) + B(avx512cd, 28) + B(sha, 29) + B(avx512bw, 30) + B(avx512vl, 31) +#undef B +#define C(name, bit) X(name, f7c, bit) + C(prefetchwt1, 0) + C(avx512vbmi, 1) +#undef C + +#undef X + +#endif /* ZSTD_COMMON_CPU_H */ diff --git a/contrib/libs/zstd/lib/common/entropy_common.c b/contrib/libs/zstd/lib/common/entropy_common.c index 4229b40c5e..7734a6d40f 100644 --- a/contrib/libs/zstd/lib/common/entropy_common.c +++ b/contrib/libs/zstd/lib/common/entropy_common.c @@ -23,12 +23,12 @@ #include "huf.h" -/*=== Version ===*/ -unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; } - - -/*=== Error Management ===*/ -unsigned FSE_isError(size_t code) { return ERR_isError(code); } +/*=== Version ===*/ +unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; } + + +/*=== Error Management ===*/ +unsigned FSE_isError(size_t code) { return ERR_isError(code); } const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); } unsigned HUF_isError(size_t code) { return ERR_isError(code); } @@ -158,15 +158,15 @@ size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigne } { int const max = (2*threshold-1) - remaining; - int count; + int count; if ((bitStream & (threshold-1)) < (U32)max) { - count = bitStream & (threshold-1); - bitCount += nbBits-1; + count = bitStream & (threshold-1); + bitCount += nbBits-1; } else { - count = bitStream & (2*threshold-1); + count = bitStream & (2*threshold-1); if (count >= threshold) count -= max; - bitCount += nbBits; + bitCount += nbBits; } count--; /* extra accuracy */ @@ -179,7 +179,7 @@ size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigne assert(count == -1); remaining += count; } - normalizedCounter[charnum++] = (short)count; + normalizedCounter[charnum++] = (short)count; previous0 = !count; assert(threshold > 1); diff --git a/contrib/libs/zstd/lib/common/error_private.c b/contrib/libs/zstd/lib/common/error_private.c index 6d1135f8c3..cd5eda3191 100644 --- a/contrib/libs/zstd/lib/common/error_private.c +++ b/contrib/libs/zstd/lib/common/error_private.c @@ -1,11 +1,11 @@ -/* +/* * Copyright (c) Yann Collet, Facebook, Inc. * All rights reserved. * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ /* The purpose of this file is to have a single list of error strings embedded in binary */ @@ -27,26 +27,26 @@ const char* ERR_getErrorString(ERR_enum code) case PREFIX(version_unsupported): return "Version not supported"; case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter"; case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding"; - case PREFIX(corruption_detected): return "Corrupted block detected"; - case PREFIX(checksum_wrong): return "Restored data doesn't match checksum"; - case PREFIX(parameter_unsupported): return "Unsupported parameter"; - case PREFIX(parameter_outOfBound): return "Parameter is out of bound"; + case PREFIX(corruption_detected): return "Corrupted block detected"; + case PREFIX(checksum_wrong): return "Restored data doesn't match checksum"; + case PREFIX(parameter_unsupported): return "Unsupported parameter"; + case PREFIX(parameter_outOfBound): return "Parameter is out of bound"; case PREFIX(init_missing): return "Context should be init first"; case PREFIX(memory_allocation): return "Allocation error : not enough memory"; - case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough"; + case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough"; case PREFIX(stage_wrong): return "Operation not authorized at current processing stage"; case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported"; case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large"; case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small"; case PREFIX(dictionary_corrupted): return "Dictionary is corrupted"; case PREFIX(dictionary_wrong): return "Dictionary mismatch"; - case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples"; - case PREFIX(dstSize_tooSmall): return "Destination buffer is too small"; - case PREFIX(srcSize_wrong): return "Src size is incorrect"; + case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples"; + case PREFIX(dstSize_tooSmall): return "Destination buffer is too small"; + case PREFIX(srcSize_wrong): return "Src size is incorrect"; case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer"; - /* following error codes are not stable and may be removed or changed in a future version */ - case PREFIX(frameIndex_tooLarge): return "Frame index is too large"; - case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking"; + /* following error codes are not stable and may be removed or changed in a future version */ + case PREFIX(frameIndex_tooLarge): return "Frame index is too large"; + case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking"; case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong"; case PREFIX(srcBuffer_wrong): return "Source buffer is wrong"; case PREFIX(maxCode): diff --git a/contrib/libs/zstd/lib/common/error_private.h b/contrib/libs/zstd/lib/common/error_private.h index 007d81066a..f61fc3b02f 100644 --- a/contrib/libs/zstd/lib/common/error_private.h +++ b/contrib/libs/zstd/lib/common/error_private.h @@ -1,11 +1,11 @@ -/* +/* * Copyright (c) Yann Collet, Facebook, Inc. * All rights reserved. * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ /* Note : this module is expected to remain private, do not expose it */ @@ -52,8 +52,8 @@ typedef ZSTD_ErrorCode ERR_enum; * Error codes handling ******************************************/ #undef ERROR /* already defined on Visual Studio */ -#define ERROR(name) ZSTD_ERROR(name) -#define ZSTD_ERROR(name) ((size_t)-PREFIX(name)) +#define ERROR(name) ZSTD_ERROR(name) +#define ZSTD_ERROR(name) ((size_t)-PREFIX(name)) ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } diff --git a/contrib/libs/zstd/lib/common/fse.h b/contrib/libs/zstd/lib/common/fse.h index 714bfd3e7f..c35d3c6e11 100644 --- a/contrib/libs/zstd/lib/common/fse.h +++ b/contrib/libs/zstd/lib/common/fse.h @@ -16,42 +16,42 @@ extern "C" { #endif -#ifndef FSE_H -#define FSE_H - +#ifndef FSE_H +#define FSE_H + /*-***************************************** * Dependencies ******************************************/ #include "zstd_deps.h" /* size_t, ptrdiff_t */ -/*-***************************************** -* FSE_PUBLIC_API : control library symbols visibility -******************************************/ -#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) -# define FSE_PUBLIC_API __attribute__ ((visibility ("default"))) -#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */ -# define FSE_PUBLIC_API __declspec(dllexport) -#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) -# define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ -#else -# define FSE_PUBLIC_API -#endif - -/*------ Version ------*/ -#define FSE_VERSION_MAJOR 0 -#define FSE_VERSION_MINOR 9 -#define FSE_VERSION_RELEASE 0 - -#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE -#define FSE_QUOTE(str) #str -#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str) -#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION) - -#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE) -FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */ - +/*-***************************************** +* FSE_PUBLIC_API : control library symbols visibility +******************************************/ +#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) +# define FSE_PUBLIC_API __attribute__ ((visibility ("default"))) +#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */ +# define FSE_PUBLIC_API __declspec(dllexport) +#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) +# define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +#else +# define FSE_PUBLIC_API +#endif + +/*------ Version ------*/ +#define FSE_VERSION_MAJOR 0 +#define FSE_VERSION_MINOR 9 +#define FSE_VERSION_RELEASE 0 + +#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE +#define FSE_QUOTE(str) #str +#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str) +#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION) + +#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE) +FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */ + /*-**************************************** * FSE simple functions @@ -64,8 +64,8 @@ FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead. if FSE_isError(return), compression failed (more details using FSE_getErrorName()) */ -FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity, - const void* src, size_t srcSize); +FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity, + const void* src, size_t srcSize); /*! FSE_decompress(): Decompress FSE data from buffer 'cSrc', of size 'cSrcSize', @@ -77,18 +77,18 @@ FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity, Why ? : making this distinction requires a header. Header management is intentionally delegated to the user layer, which can better manage special cases. */ -FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity, - const void* cSrc, size_t cSrcSize); +FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity, + const void* cSrc, size_t cSrcSize); /*-***************************************** * Tool functions ******************************************/ -FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */ +FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */ /* Error Management */ -FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */ -FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */ +FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */ +FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */ /*-***************************************** @@ -102,7 +102,7 @@ FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error co if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression. if FSE_isError(return), it's an error code. */ -FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); +FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); /*-***************************************** @@ -132,7 +132,7 @@ or to save and provide normalized distribution using external method. dynamically downsize 'tableLog' when conditions are met. It saves CPU time, by using smaller tables, while preserving or even improving compression ratio. @return : recommended tableLog (necessarily <= 'maxTableLog') */ -FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); +FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); /*! FSE_normalizeCount(): normalize counts so that sum(count[]) == Power_of_2 (2^tableLog) @@ -151,7 +151,7 @@ FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tabl /*! FSE_NCountWriteBound(): Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'. Typically useful for allocation purpose. */ -FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog); +FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog); /*! FSE_writeNCount(): Compactly save 'normalizedCounter' into 'buffer'. @@ -164,20 +164,20 @@ FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, /*! Constructor and Destructor of FSE_CTable. Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */ typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */ -FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog); -FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct); +FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog); +FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct); /*! FSE_buildCTable(): Builds `ct`, which must be already allocated, using FSE_createCTable(). @return : 0, or an errorCode, which can be tested using FSE_isError() */ -FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); +FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); /*! FSE_compress_usingCTable(): Compress `src` using `ct` into `dst` which must be already allocated. @return : size of compressed data (<= `dstCapacity`), or 0 if compressed data could not fit into `dst`, or an errorCode, which can be tested using FSE_isError() */ -FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct); +FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct); /*! Tutorial : @@ -244,20 +244,20 @@ FSE_PUBLIC_API size_t FSE_readNCount_bmi2(short* normalizedCounter, /*! Constructor and Destructor of FSE_DTable. Note that its size depends on 'tableLog' */ typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ -FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog); -FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt); +FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog); +FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt); /*! FSE_buildDTable(): Builds 'dt', which must be already allocated, using FSE_createDTable(). return : 0, or an errorCode, which can be tested using FSE_isError() */ -FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); +FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); /*! FSE_decompress_usingDTable(): Decompress compressed source `cSrc` of size `cSrcSize` using `dt` into `dst` which must be already allocated. @return : size of regenerated data (necessarily <= `dstCapacity`), or an errorCode, which can be tested using FSE_isError() */ -FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt); +FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt); /*! Tutorial : @@ -287,10 +287,10 @@ FSE_decompress_usingDTable() result will tell how many bytes were regenerated (< If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small) */ -#endif /* FSE_H */ +#endif /* FSE_H */ -#if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY) -#define FSE_H_FSE_STATIC_LINKING_ONLY +#if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY) +#define FSE_H_FSE_STATIC_LINKING_ONLY /* *** Dependency *** */ #include "bitstream.h" @@ -308,11 +308,11 @@ If there is an error, the function will return an error code, which can be teste #define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<((maxTableLog)-1)) + (((maxSymbolValue)+1)*2)) #define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<(maxTableLog))) -/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */ -#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue) (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable)) -#define FSE_DTABLE_SIZE(maxTableLog) (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable)) - +/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */ +#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue) (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable)) +#define FSE_DTABLE_SIZE(maxTableLog) (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable)) + /* ***************************************** * FSE advanced API ***************************************** */ @@ -361,11 +361,11 @@ size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2); /**< Same as FSE_decompress_wksp() but with dynamic BMI2 support. Pass 1 if your CPU supports BMI2 or 0 if it doesn't. */ -typedef enum { - FSE_repeat_none, /**< Cannot use the previous table */ - FSE_repeat_check, /**< Can use the previous table but it must be checked */ +typedef enum { + FSE_repeat_none, /**< Cannot use the previous table */ + FSE_repeat_check, /**< Can use the previous table but it must be checked */ FSE_repeat_valid /**< Can use the previous table and it is assumed to be valid */ - } FSE_repeat; + } FSE_repeat; /* ***************************************** * FSE symbol compression API @@ -539,9 +539,9 @@ MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U3 MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol) { - FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; + FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; const U16* const stateTable = (const U16*)(statePtr->stateTable); - U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16); + U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16); BIT_addBits(bitC, statePtr->value, nbBitsOut); statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; } diff --git a/contrib/libs/zstd/lib/common/fse_decompress.c b/contrib/libs/zstd/lib/common/fse_decompress.c index a5a358015f..f915ad58a2 100644 --- a/contrib/libs/zstd/lib/common/fse_decompress.c +++ b/contrib/libs/zstd/lib/common/fse_decompress.c @@ -18,10 +18,10 @@ ****************************************************************/ #include "debug.h" /* assert */ #include "bitstream.h" -#include "compiler.h" +#include "compiler.h" #define FSE_STATIC_LINKING_ONLY #include "fse.h" -#include "error_private.h" +#include "error_private.h" #define ZSTD_DEPS_NEED_MALLOC #include "zstd_deps.h" @@ -165,8 +165,8 @@ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCo { U32 u; for (u=0; u<tableSize; u++) { FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol); - U32 const nextState = symbolNext[symbol]++; - tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) ); + U32 const nextState = symbolNext[symbol]++; + tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) ); tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize); } } @@ -228,7 +228,7 @@ size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) return 0; } -FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic( +FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic( void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt, const unsigned fast) diff --git a/contrib/libs/zstd/lib/common/huf.h b/contrib/libs/zstd/lib/common/huf.h index 85518481ec..cc398f64c0 100644 --- a/contrib/libs/zstd/lib/common/huf.h +++ b/contrib/libs/zstd/lib/common/huf.h @@ -16,98 +16,98 @@ extern "C" { #endif -#ifndef HUF_H_298734234 -#define HUF_H_298734234 +#ifndef HUF_H_298734234 +#define HUF_H_298734234 /* *** Dependencies *** */ #include "zstd_deps.h" /* size_t */ -/* *** library symbols visibility *** */ -/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual, - * HUF symbols remain "private" (internal symbols for library only). - * Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */ -#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) -# define HUF_PUBLIC_API __attribute__ ((visibility ("default"))) -#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */ -# define HUF_PUBLIC_API __declspec(dllexport) -#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) -# define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */ -#else -# define HUF_PUBLIC_API -#endif - - -/* ========================== */ -/* *** simple functions *** */ -/* ========================== */ - -/** HUF_compress() : - * Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'. - * 'dst' buffer must be already allocated. - * Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize). - * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB. - * @return : size of compressed data (<= `dstCapacity`). - * Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! - * if HUF_isError(return), compression failed (more details using HUF_getErrorName()) - */ -HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity, - const void* src, size_t srcSize); - -/** HUF_decompress() : - * Decompress HUF data from buffer 'cSrc', of size 'cSrcSize', - * into already allocated buffer 'dst', of minimum size 'dstSize'. - * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data. - * Note : in contrast with FSE, HUF_decompress can regenerate - * RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, - * because it knows size to regenerate (originalSize). - * @return : size of regenerated data (== originalSize), - * or an error code, which can be tested using HUF_isError() - */ -HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize, - const void* cSrc, size_t cSrcSize); - - +/* *** library symbols visibility *** */ +/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual, + * HUF symbols remain "private" (internal symbols for library only). + * Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */ +#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) +# define HUF_PUBLIC_API __attribute__ ((visibility ("default"))) +#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */ +# define HUF_PUBLIC_API __declspec(dllexport) +#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) +# define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */ +#else +# define HUF_PUBLIC_API +#endif + + +/* ========================== */ +/* *** simple functions *** */ +/* ========================== */ + +/** HUF_compress() : + * Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'. + * 'dst' buffer must be already allocated. + * Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize). + * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB. + * @return : size of compressed data (<= `dstCapacity`). + * Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! + * if HUF_isError(return), compression failed (more details using HUF_getErrorName()) + */ +HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity, + const void* src, size_t srcSize); + +/** HUF_decompress() : + * Decompress HUF data from buffer 'cSrc', of size 'cSrcSize', + * into already allocated buffer 'dst', of minimum size 'dstSize'. + * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data. + * Note : in contrast with FSE, HUF_decompress can regenerate + * RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, + * because it knows size to regenerate (originalSize). + * @return : size of regenerated data (== originalSize), + * or an error code, which can be tested using HUF_isError() + */ +HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize, + const void* cSrc, size_t cSrcSize); + + /* *** Tool functions *** */ -#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ -HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ +#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ +HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ /* Error Management */ -HUF_PUBLIC_API unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ -HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */ +HUF_PUBLIC_API unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ +HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */ /* *** Advanced function *** */ /** HUF_compress2() : - * Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`. - * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX . - * `tableLog` must be `<= HUF_TABLELOG_MAX` . */ -HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned tableLog); + * Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`. + * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX . + * `tableLog` must be `<= HUF_TABLELOG_MAX` . */ +HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned tableLog); /** HUF_compress4X_wksp() : - * Same as HUF_compress2(), but uses externally allocated `workSpace`. + * Same as HUF_compress2(), but uses externally allocated `workSpace`. * `workspace` must be at least as large as HUF_WORKSPACE_SIZE */ #define HUF_WORKSPACE_SIZE ((8 << 10) + 512 /* sorting scratch space */) #define HUF_WORKSPACE_SIZE_U64 (HUF_WORKSPACE_SIZE / sizeof(U64)) -HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned tableLog, - void* workSpace, size_t wkspSize); - -#endif /* HUF_H_298734234 */ - -/* ****************************************************************** - * WARNING !! - * The following section contains advanced and experimental definitions - * which shall never be used in the context of a dynamic library, - * because they are not guaranteed to remain stable in the future. - * Only consider them in association with static linking. - * *****************************************************************/ -#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY) -#define HUF_H_HUF_STATIC_LINKING_ONLY +HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned tableLog, + void* workSpace, size_t wkspSize); + +#endif /* HUF_H_298734234 */ + +/* ****************************************************************** + * WARNING !! + * The following section contains advanced and experimental definitions + * which shall never be used in the context of a dynamic library, + * because they are not guaranteed to remain stable in the future. + * Only consider them in association with static linking. + * *****************************************************************/ +#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY) +#define HUF_H_HUF_STATIC_LINKING_ONLY /* *** Dependencies *** */ #include "mem.h" /* U32 */ @@ -117,9 +117,9 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, /* *** Constants *** */ #define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */ -#define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */ -#define HUF_SYMBOLVALUE_MAX 255 - +#define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */ +#define HUF_SYMBOLVALUE_MAX 255 + #define HUF_TABLELOG_ABSOLUTEMAX 12 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ #if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX) # error "HUF_TABLELOG_MAX is too large !" @@ -131,7 +131,7 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, ******************************************/ /* HUF buffer bounds */ #define HUF_CTABLEBOUND 129 -#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */ +#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */ #define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ /* static allocation of HUF's Compression Table */ @@ -161,7 +161,7 @@ size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cS size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */ size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */ -size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */ +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */ size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ #ifndef HUF_FORCE_DECOMPRESS_X1 @@ -171,22 +171,22 @@ size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, /* **************************************** - * HUF detailed API - * ****************************************/ - -/*! HUF_compress() does the following: - * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h") - * 2. (optional) refine tableLog using HUF_optimalTableLog() - * 3. build Huffman table from count using HUF_buildCTable() - * 4. save Huffman table to memory buffer using HUF_writeCTable() - * 5. encode the data stream using HUF_compress4X_usingCTable() - * - * The following API allows targeting specific sub-functions for advanced tasks. - * For example, it's possible to compress several blocks using the same 'CTable', - * or to save and regenerate 'CTable' using external methods. - */ + * HUF detailed API + * ****************************************/ + +/*! HUF_compress() does the following: + * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h") + * 2. (optional) refine tableLog using HUF_optimalTableLog() + * 3. build Huffman table from count using HUF_buildCTable() + * 4. save Huffman table to memory buffer using HUF_writeCTable() + * 5. encode the data stream using HUF_compress4X_usingCTable() + * + * The following API allows targeting specific sub-functions for advanced tasks. + * For example, it's possible to compress several blocks using the same 'CTable', + * or to save and regenerate 'CTable' using external methods. + */ unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); -size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */ +size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */ size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog); size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize); size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); @@ -194,40 +194,40 @@ size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* sr size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); -typedef enum { - HUF_repeat_none, /**< Cannot use the previous table */ - HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ +typedef enum { + HUF_repeat_none, /**< Cannot use the previous table */ + HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */ - } HUF_repeat; -/** HUF_compress4X_repeat() : - * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. - * If it uses hufTable it does not modify hufTable or repeat. - * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. + } HUF_repeat; +/** HUF_compress4X_repeat() : + * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. + * If it uses hufTable it does not modify hufTable or repeat. + * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. * If preferRepeat then the old table will always be used if valid. * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */ -size_t HUF_compress4X_repeat(void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned tableLog, - void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ +size_t HUF_compress4X_repeat(void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned tableLog, + void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible); /** HUF_buildCTable_wksp() : * Same as HUF_buildCTable(), but using externally allocated scratch buffer. - * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE. + * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE. */ -#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1) -#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned)) +#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1) +#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned)) size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize); /*! HUF_readStats() : - * Read compact Huffman tree, saved by HUF_writeCTable(). - * `huffWeight` is destination buffer. - * @return : size read from `src` , or an error Code . - * Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */ -size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, - U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, + * Read compact Huffman tree, saved by HUF_writeCTable(). + * `huffWeight` is destination buffer. + * @return : size read from `src` , or an error Code . + * Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */ +size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, + U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize); /*! HUF_readStats_wksp() : @@ -244,7 +244,7 @@ size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, int bmi2); /** HUF_readCTable() : - * Loading a CTable saved with HUF_writeCTable() */ + * Loading a CTable saved with HUF_writeCTable() */ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights); /** HUF_getNbBitsFromCTable() : @@ -253,39 +253,39 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void U32 HUF_getNbBitsFromCTable(const HUF_CElt* symbolTable, U32 symbolValue); /* - * HUF_decompress() does the following: + * HUF_decompress() does the following: * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics - * 2. build Huffman table from save, using HUF_readDTableX?() - * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable() - */ + * 2. build Huffman table from save, using HUF_readDTableX?() + * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable() + */ /** HUF_selectDecoder() : - * Tells which decoder is likely to decode faster, - * based on a set of pre-computed metrics. + * Tells which decoder is likely to decode faster, + * based on a set of pre-computed metrics. * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 . - * Assumption : 0 < dstSize <= 128 KB */ + * Assumption : 0 < dstSize <= 128 KB */ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize); -/** - * The minimum workspace size for the `workSpace` used in +/** + * The minimum workspace size for the `workSpace` used in * HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp(). - * - * The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when - * HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15. - * Buffer overflow errors may potentially occur if code modifications result in - * a required workspace size greater than that specified in the following - * macro. - */ + * + * The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when + * HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15. + * Buffer overflow errors may potentially occur if code modifications result in + * a required workspace size greater than that specified in the following + * macro. + */ #define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9)) -#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32)) - +#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32)) + #ifndef HUF_FORCE_DECOMPRESS_X2 size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize); size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); #endif #ifndef HUF_FORCE_DECOMPRESS_X1 size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize); -size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); +size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); #endif size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); @@ -297,24 +297,24 @@ size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* c #endif -/* ====================== */ +/* ====================== */ /* single stream variants */ -/* ====================== */ +/* ====================== */ size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U64 U64 */ size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2); -/** HUF_compress1X_repeat() : - * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. - * If it uses hufTable it does not modify hufTable or repeat. - * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. +/** HUF_compress1X_repeat() : + * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. + * If it uses hufTable it does not modify hufTable or repeat. + * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. * If preferRepeat then the old table will always be used if valid. * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */ -size_t HUF_compress1X_repeat(void* dst, size_t dstSize, - const void* src, size_t srcSize, - unsigned maxSymbolValue, unsigned tableLog, - void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ +size_t HUF_compress1X_repeat(void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned tableLog, + void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible); size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */ @@ -323,7 +323,7 @@ size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cS #endif size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); -size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); +size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); #ifndef HUF_FORCE_DECOMPRESS_X2 size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ @@ -341,22 +341,22 @@ size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* c size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); #endif -/* BMI2 variants. - * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0. - */ -size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); +/* BMI2 variants. + * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0. + */ +size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); #ifndef HUF_FORCE_DECOMPRESS_X2 size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); #endif -size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); -size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); +size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); +size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); #ifndef HUF_FORCE_DECOMPRESS_X2 size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2); #endif #ifndef HUF_FORCE_DECOMPRESS_X1 size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2); #endif - + #endif /* HUF_STATIC_LINKING_ONLY */ #if defined (__cplusplus) diff --git a/contrib/libs/zstd/lib/common/mem.h b/contrib/libs/zstd/lib/common/mem.h index 85581c3847..b46e47c00e 100644 --- a/contrib/libs/zstd/lib/common/mem.h +++ b/contrib/libs/zstd/lib/common/mem.h @@ -1,11 +1,11 @@ -/* +/* * Copyright (c) Yann Collet, Facebook, Inc. * All rights reserved. * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ #ifndef MEM_H_MODULE @@ -50,15 +50,15 @@ extern "C" { # else # include <stdint.h> /* intptr_t */ # endif - typedef uint8_t BYTE; + typedef uint8_t BYTE; typedef uint8_t U8; typedef int8_t S8; - typedef uint16_t U16; - typedef int16_t S16; - typedef uint32_t U32; - typedef int32_t S32; - typedef uint64_t U64; - typedef int64_t S64; + typedef uint16_t U16; + typedef int16_t S16; + typedef uint32_t U32; + typedef int32_t S32; + typedef uint64_t U64; + typedef int64_t S64; #else # include <limits.h> #if CHAR_BIT != 8 @@ -138,11 +138,11 @@ MEM_STATIC size_t MEM_swapST(size_t in); * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. * The below switch allow to select different access method for improved performance. * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable). + * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable). * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. * Method 2 : direct access. This method is portable but violate C standard. * It can generate buggy code on targets depending on alignment. - * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6) + * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6) * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. * Prefer these methods in priority order (0 > 1 > 2) */ @@ -182,7 +182,7 @@ Only use if no other choice to achieve best performance on target platform */ MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } -MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; } +MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; } MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } @@ -193,27 +193,27 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; } /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ /* currently only defined for gcc and icc */ #if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32)) - __pragma( pack(push, 1) ) - typedef struct { U16 v; } unalign16; - typedef struct { U32 v; } unalign32; - typedef struct { U64 v; } unalign64; - typedef struct { size_t v; } unalignArch; + __pragma( pack(push, 1) ) + typedef struct { U16 v; } unalign16; + typedef struct { U32 v; } unalign32; + typedef struct { U64 v; } unalign64; + typedef struct { size_t v; } unalignArch; __pragma( pack(pop) ) #else - typedef struct { U16 v; } __attribute__((packed)) unalign16; - typedef struct { U32 v; } __attribute__((packed)) unalign32; - typedef struct { U64 v; } __attribute__((packed)) unalign64; - typedef struct { size_t v; } __attribute__((packed)) unalignArch; + typedef struct { U16 v; } __attribute__((packed)) unalign16; + typedef struct { U32 v; } __attribute__((packed)) unalign32; + typedef struct { U64 v; } __attribute__((packed)) unalign64; + typedef struct { size_t v; } __attribute__((packed)) unalignArch; #endif -MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; } -MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; } -MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; } -MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; } +MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; } +MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; } +MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; } +MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; } -MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; } -MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; } -MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; } +MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; } +MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; } +MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; } #else diff --git a/contrib/libs/zstd/lib/common/pool.c b/contrib/libs/zstd/lib/common/pool.c index 2e37cdd73c..7591a5f84c 100644 --- a/contrib/libs/zstd/lib/common/pool.c +++ b/contrib/libs/zstd/lib/common/pool.c @@ -1,76 +1,76 @@ -/* +/* * Copyright (c) Yann Collet, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - - -/* ====== Dependencies ======= */ + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + +/* ====== Dependencies ======= */ #include "zstd_deps.h" /* size_t */ #include "debug.h" /* assert */ #include "zstd_internal.h" /* ZSTD_customMalloc, ZSTD_customFree */ -#include "pool.h" - -/* ====== Compiler specifics ====== */ -#if defined(_MSC_VER) -# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ -#endif - - -#ifdef ZSTD_MULTITHREAD - -#include "threading.h" /* pthread adaptation */ - -/* A job is a function and an opaque argument */ -typedef struct POOL_job_s { - POOL_function function; - void *opaque; -} POOL_job; - -struct POOL_ctx_s { - ZSTD_customMem customMem; - /* Keep track of the threads */ +#include "pool.h" + +/* ====== Compiler specifics ====== */ +#if defined(_MSC_VER) +# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ +#endif + + +#ifdef ZSTD_MULTITHREAD + +#include "threading.h" /* pthread adaptation */ + +/* A job is a function and an opaque argument */ +typedef struct POOL_job_s { + POOL_function function; + void *opaque; +} POOL_job; + +struct POOL_ctx_s { + ZSTD_customMem customMem; + /* Keep track of the threads */ ZSTD_pthread_t* threads; size_t threadCapacity; size_t threadLimit; - - /* The queue is a circular buffer */ - POOL_job *queue; - size_t queueHead; - size_t queueTail; - size_t queueSize; - - /* The number of threads working on jobs */ - size_t numThreadsBusy; - /* Indicates if the queue is empty */ - int queueEmpty; - - /* The mutex protects the queue */ - ZSTD_pthread_mutex_t queueMutex; - /* Condition variable for pushers to wait on when the queue is full */ - ZSTD_pthread_cond_t queuePushCond; - /* Condition variables for poppers to wait on when the queue is empty */ - ZSTD_pthread_cond_t queuePopCond; - /* Indicates if the queue is shutting down */ - int shutdown; -}; - -/* POOL_thread() : + + /* The queue is a circular buffer */ + POOL_job *queue; + size_t queueHead; + size_t queueTail; + size_t queueSize; + + /* The number of threads working on jobs */ + size_t numThreadsBusy; + /* Indicates if the queue is empty */ + int queueEmpty; + + /* The mutex protects the queue */ + ZSTD_pthread_mutex_t queueMutex; + /* Condition variable for pushers to wait on when the queue is full */ + ZSTD_pthread_cond_t queuePushCond; + /* Condition variables for poppers to wait on when the queue is empty */ + ZSTD_pthread_cond_t queuePopCond; + /* Indicates if the queue is shutting down */ + int shutdown; +}; + +/* POOL_thread() : * Work thread for the thread pool. * Waits for jobs and executes them. * @returns : NULL on failure else non-null. */ -static void* POOL_thread(void* opaque) { - POOL_ctx* const ctx = (POOL_ctx*)opaque; - if (!ctx) { return NULL; } - for (;;) { - /* Lock the mutex and wait for a non-empty queue or until shutdown */ - ZSTD_pthread_mutex_lock(&ctx->queueMutex); - +static void* POOL_thread(void* opaque) { + POOL_ctx* const ctx = (POOL_ctx*)opaque; + if (!ctx) { return NULL; } + for (;;) { + /* Lock the mutex and wait for a non-empty queue or until shutdown */ + ZSTD_pthread_mutex_lock(&ctx->queueMutex); + while ( ctx->queueEmpty || (ctx->numThreadsBusy >= ctx->threadLimit) ) { if (ctx->shutdown) { @@ -80,59 +80,59 @@ static void* POOL_thread(void* opaque) { ZSTD_pthread_mutex_unlock(&ctx->queueMutex); return opaque; } - ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex); - } - /* Pop a job off the queue */ - { POOL_job const job = ctx->queue[ctx->queueHead]; - ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize; - ctx->numThreadsBusy++; + ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex); + } + /* Pop a job off the queue */ + { POOL_job const job = ctx->queue[ctx->queueHead]; + ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize; + ctx->numThreadsBusy++; ctx->queueEmpty = (ctx->queueHead == ctx->queueTail); - /* Unlock the mutex, signal a pusher, and run the job */ + /* Unlock the mutex, signal a pusher, and run the job */ ZSTD_pthread_cond_signal(&ctx->queuePushCond); - ZSTD_pthread_mutex_unlock(&ctx->queueMutex); - - job.function(job.opaque); - - /* If the intended queue size was 0, signal after finishing job */ + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); + + job.function(job.opaque); + + /* If the intended queue size was 0, signal after finishing job */ ZSTD_pthread_mutex_lock(&ctx->queueMutex); ctx->numThreadsBusy--; - if (ctx->queueSize == 1) { - ZSTD_pthread_cond_signal(&ctx->queuePushCond); + if (ctx->queueSize == 1) { + ZSTD_pthread_cond_signal(&ctx->queuePushCond); } ZSTD_pthread_mutex_unlock(&ctx->queueMutex); } - } /* for (;;) */ + } /* for (;;) */ assert(0); /* Unreachable */ -} - +} + /* ZSTD_createThreadPool() : public access point */ POOL_ctx* ZSTD_createThreadPool(size_t numThreads) { return POOL_create (numThreads, 0); } -POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) { - return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); -} - +POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) { + return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); +} + POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) { - POOL_ctx* ctx; + POOL_ctx* ctx; /* Check parameters */ - if (!numThreads) { return NULL; } - /* Allocate the context and zero initialize */ + if (!numThreads) { return NULL; } + /* Allocate the context and zero initialize */ ctx = (POOL_ctx*)ZSTD_customCalloc(sizeof(POOL_ctx), customMem); - if (!ctx) { return NULL; } - /* Initialize the job queue. + if (!ctx) { return NULL; } + /* Initialize the job queue. * It needs one extra space since one space is wasted to differentiate * empty and full queues. - */ - ctx->queueSize = queueSize + 1; + */ + ctx->queueSize = queueSize + 1; ctx->queue = (POOL_job*)ZSTD_customMalloc(ctx->queueSize * sizeof(POOL_job), customMem); - ctx->queueHead = 0; - ctx->queueTail = 0; - ctx->numThreadsBusy = 0; - ctx->queueEmpty = 1; + ctx->queueHead = 0; + ctx->queueTail = 0; + ctx->numThreadsBusy = 0; + ctx->queueEmpty = 1; { int error = 0; error |= ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL); @@ -140,67 +140,67 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, error |= ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL); if (error) { POOL_free(ctx); return NULL; } } - ctx->shutdown = 0; - /* Allocate space for the thread handles */ + ctx->shutdown = 0; + /* Allocate space for the thread handles */ ctx->threads = (ZSTD_pthread_t*)ZSTD_customMalloc(numThreads * sizeof(ZSTD_pthread_t), customMem); ctx->threadCapacity = 0; - ctx->customMem = customMem; - /* Check for errors */ - if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; } - /* Initialize the threads */ - { size_t i; - for (i = 0; i < numThreads; ++i) { - if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) { + ctx->customMem = customMem; + /* Check for errors */ + if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; } + /* Initialize the threads */ + { size_t i; + for (i = 0; i < numThreads; ++i) { + if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) { ctx->threadCapacity = i; - POOL_free(ctx); - return NULL; - } } + POOL_free(ctx); + return NULL; + } } ctx->threadCapacity = numThreads; ctx->threadLimit = numThreads; - } - return ctx; -} - -/*! POOL_join() : - Shutdown the queue, wake any sleeping threads, and join all of the threads. -*/ -static void POOL_join(POOL_ctx* ctx) { - /* Shut down the queue */ - ZSTD_pthread_mutex_lock(&ctx->queueMutex); - ctx->shutdown = 1; - ZSTD_pthread_mutex_unlock(&ctx->queueMutex); - /* Wake up sleeping threads */ - ZSTD_pthread_cond_broadcast(&ctx->queuePushCond); - ZSTD_pthread_cond_broadcast(&ctx->queuePopCond); - /* Join all of the threads */ - { size_t i; + } + return ctx; +} + +/*! POOL_join() : + Shutdown the queue, wake any sleeping threads, and join all of the threads. +*/ +static void POOL_join(POOL_ctx* ctx) { + /* Shut down the queue */ + ZSTD_pthread_mutex_lock(&ctx->queueMutex); + ctx->shutdown = 1; + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); + /* Wake up sleeping threads */ + ZSTD_pthread_cond_broadcast(&ctx->queuePushCond); + ZSTD_pthread_cond_broadcast(&ctx->queuePopCond); + /* Join all of the threads */ + { size_t i; for (i = 0; i < ctx->threadCapacity; ++i) { ZSTD_pthread_join(ctx->threads[i], NULL); /* note : could fail */ - } } -} - -void POOL_free(POOL_ctx *ctx) { - if (!ctx) { return; } - POOL_join(ctx); - ZSTD_pthread_mutex_destroy(&ctx->queueMutex); - ZSTD_pthread_cond_destroy(&ctx->queuePushCond); - ZSTD_pthread_cond_destroy(&ctx->queuePopCond); + } } +} + +void POOL_free(POOL_ctx *ctx) { + if (!ctx) { return; } + POOL_join(ctx); + ZSTD_pthread_mutex_destroy(&ctx->queueMutex); + ZSTD_pthread_cond_destroy(&ctx->queuePushCond); + ZSTD_pthread_cond_destroy(&ctx->queuePopCond); ZSTD_customFree(ctx->queue, ctx->customMem); ZSTD_customFree(ctx->threads, ctx->customMem); ZSTD_customFree(ctx, ctx->customMem); -} - +} + void ZSTD_freeThreadPool (ZSTD_threadPool* pool) { POOL_free (pool); } size_t POOL_sizeof(const POOL_ctx* ctx) { - if (ctx==NULL) return 0; /* supports sizeof NULL */ - return sizeof(*ctx) - + ctx->queueSize * sizeof(POOL_job) + if (ctx==NULL) return 0; /* supports sizeof NULL */ + return sizeof(*ctx) + + ctx->queueSize * sizeof(POOL_job) + ctx->threadCapacity * sizeof(ZSTD_pthread_t); -} - +} + /* @return : 0 on success, 1 on error */ static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads) @@ -243,113 +243,113 @@ int POOL_resize(POOL_ctx* ctx, size_t numThreads) return result; } -/** - * Returns 1 if the queue is full and 0 otherwise. - * +/** + * Returns 1 if the queue is full and 0 otherwise. + * * When queueSize is 1 (pool was created with an intended queueSize of 0), * then a queue is empty if there is a thread free _and_ no job is waiting. - */ -static int isQueueFull(POOL_ctx const* ctx) { - if (ctx->queueSize > 1) { - return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize); - } else { + */ +static int isQueueFull(POOL_ctx const* ctx) { + if (ctx->queueSize > 1) { + return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize); + } else { return (ctx->numThreadsBusy == ctx->threadLimit) || - !ctx->queueEmpty; - } -} - - + !ctx->queueEmpty; + } +} + + static void POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque) -{ - POOL_job const job = {function, opaque}; - assert(ctx != NULL); - if (ctx->shutdown) return; - - ctx->queueEmpty = 0; - ctx->queue[ctx->queueTail] = job; - ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize; - ZSTD_pthread_cond_signal(&ctx->queuePopCond); -} - -void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) -{ - assert(ctx != NULL); - ZSTD_pthread_mutex_lock(&ctx->queueMutex); - /* Wait until there is space in the queue for the new job */ - while (isQueueFull(ctx) && (!ctx->shutdown)) { - ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex); - } - POOL_add_internal(ctx, function, opaque); - ZSTD_pthread_mutex_unlock(&ctx->queueMutex); -} - - -int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) -{ - assert(ctx != NULL); - ZSTD_pthread_mutex_lock(&ctx->queueMutex); - if (isQueueFull(ctx)) { - ZSTD_pthread_mutex_unlock(&ctx->queueMutex); - return 0; - } - POOL_add_internal(ctx, function, opaque); - ZSTD_pthread_mutex_unlock(&ctx->queueMutex); - return 1; -} - - -#else /* ZSTD_MULTITHREAD not defined */ - -/* ========================== */ -/* No multi-threading support */ -/* ========================== */ - - -/* We don't need any data, but if it is empty, malloc() might return NULL. */ -struct POOL_ctx_s { - int dummy; -}; +{ + POOL_job const job = {function, opaque}; + assert(ctx != NULL); + if (ctx->shutdown) return; + + ctx->queueEmpty = 0; + ctx->queue[ctx->queueTail] = job; + ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize; + ZSTD_pthread_cond_signal(&ctx->queuePopCond); +} + +void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) +{ + assert(ctx != NULL); + ZSTD_pthread_mutex_lock(&ctx->queueMutex); + /* Wait until there is space in the queue for the new job */ + while (isQueueFull(ctx) && (!ctx->shutdown)) { + ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex); + } + POOL_add_internal(ctx, function, opaque); + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); +} + + +int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) +{ + assert(ctx != NULL); + ZSTD_pthread_mutex_lock(&ctx->queueMutex); + if (isQueueFull(ctx)) { + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); + return 0; + } + POOL_add_internal(ctx, function, opaque); + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); + return 1; +} + + +#else /* ZSTD_MULTITHREAD not defined */ + +/* ========================== */ +/* No multi-threading support */ +/* ========================== */ + + +/* We don't need any data, but if it is empty, malloc() might return NULL. */ +struct POOL_ctx_s { + int dummy; +}; static POOL_ctx g_poolCtx; - -POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) { - return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); -} - + +POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) { + return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); +} + POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) { - (void)numThreads; - (void)queueSize; - (void)customMem; + (void)numThreads; + (void)queueSize; + (void)customMem; return &g_poolCtx; -} - -void POOL_free(POOL_ctx* ctx) { +} + +void POOL_free(POOL_ctx* ctx) { assert(!ctx || ctx == &g_poolCtx); - (void)ctx; -} - + (void)ctx; +} + int POOL_resize(POOL_ctx* ctx, size_t numThreads) { (void)ctx; (void)numThreads; return 0; } -void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) { - (void)ctx; - function(opaque); -} - -int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) { - (void)ctx; - function(opaque); - return 1; -} - +void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) { + (void)ctx; + function(opaque); +} + +int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) { + (void)ctx; + function(opaque); + return 1; +} + size_t POOL_sizeof(const POOL_ctx* ctx) { - if (ctx==NULL) return 0; /* supports sizeof NULL */ + if (ctx==NULL) return 0; /* supports sizeof NULL */ assert(ctx == &g_poolCtx); - return sizeof(*ctx); -} - -#endif /* ZSTD_MULTITHREAD */ + return sizeof(*ctx); +} + +#endif /* ZSTD_MULTITHREAD */ diff --git a/contrib/libs/zstd/lib/common/pool.h b/contrib/libs/zstd/lib/common/pool.h index 0ebde1805d..67feb6da6d 100644 --- a/contrib/libs/zstd/lib/common/pool.h +++ b/contrib/libs/zstd/lib/common/pool.h @@ -1,43 +1,43 @@ -/* +/* * Copyright (c) Yann Collet, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. - */ - -#ifndef POOL_H -#define POOL_H - -#if defined (__cplusplus) -extern "C" { -#endif - - + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef POOL_H +#define POOL_H + +#if defined (__cplusplus) +extern "C" { +#endif + + #include "zstd_deps.h" -#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */ +#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */ #include "../zstd.h" - -typedef struct POOL_ctx_s POOL_ctx; - -/*! POOL_create() : - * Create a thread pool with at most `numThreads` threads. - * `numThreads` must be at least 1. - * The maximum number of queued jobs before blocking is `queueSize`. - * @return : POOL_ctx pointer on success, else NULL. -*/ -POOL_ctx* POOL_create(size_t numThreads, size_t queueSize); - + +typedef struct POOL_ctx_s POOL_ctx; + +/*! POOL_create() : + * Create a thread pool with at most `numThreads` threads. + * `numThreads` must be at least 1. + * The maximum number of queued jobs before blocking is `queueSize`. + * @return : POOL_ctx pointer on success, else NULL. +*/ +POOL_ctx* POOL_create(size_t numThreads, size_t queueSize); + POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem); - -/*! POOL_free() : + +/*! POOL_free() : * Free a thread pool returned by POOL_create(). */ -void POOL_free(POOL_ctx* ctx); - +void POOL_free(POOL_ctx* ctx); + /*! POOL_resize() : * Expands or shrinks pool's number of threads. * This is more efficient than releasing + creating a new context, @@ -49,36 +49,36 @@ void POOL_free(POOL_ctx* ctx); */ int POOL_resize(POOL_ctx* ctx, size_t numThreads); -/*! POOL_sizeof() : +/*! POOL_sizeof() : * @return threadpool memory usage * note : compatible with NULL (returns 0 in this case) */ size_t POOL_sizeof(const POOL_ctx* ctx); - -/*! POOL_function : + +/*! POOL_function : * The function type that can be added to a thread pool. */ -typedef void (*POOL_function)(void*); - -/*! POOL_add() : +typedef void (*POOL_function)(void*); + +/*! POOL_add() : * Add the job `function(opaque)` to the thread pool. `ctx` must be valid. * Possibly blocks until there is room in the queue. * Note : The function may be executed asynchronously, * therefore, `opaque` must live until function has been completed. */ -void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque); - - -/*! POOL_tryAdd() : +void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque); + + +/*! POOL_tryAdd() : * Add the job `function(opaque)` to thread pool _if_ a queue slot is available. * Returns immediately even if not (does not block). * @return : 1 if successful, 0 if not. */ -int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque); - - -#if defined (__cplusplus) -} -#endif - -#endif +int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque); + + +#if defined (__cplusplus) +} +#endif + +#endif diff --git a/contrib/libs/zstd/lib/common/threading.c b/contrib/libs/zstd/lib/common/threading.c index 92cf57c195..c0c2adb937 100644 --- a/contrib/libs/zstd/lib/common/threading.c +++ b/contrib/libs/zstd/lib/common/threading.c @@ -1,80 +1,80 @@ -/** - * Copyright (c) 2016 Tino Reichardt - * All rights reserved. - * +/** + * Copyright (c) 2016 Tino Reichardt + * All rights reserved. + * * You can contact the author at: * - zstdmt source repository: https://github.com/mcmilk/zstdmt * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. - */ - -/** - * This file will hold wrapper for systems, which do not support pthreads - */ - + */ + +/** + * This file will hold wrapper for systems, which do not support pthreads + */ + #include "threading.h" /* create fake symbol to avoid empty translation unit warning */ int g_ZSTD_threading_useless_symbol; - -#if defined(ZSTD_MULTITHREAD) && defined(_WIN32) - -/** - * Windows minimalist Pthread Wrapper, based on : - * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html - */ - - -/* === Dependencies === */ -#include <process.h> -#include <errno.h> - - -/* === Implementation === */ - -static unsigned __stdcall worker(void *arg) -{ - ZSTD_pthread_t* const thread = (ZSTD_pthread_t*) arg; - thread->arg = thread->start_routine(thread->arg); - return 0; -} - -int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, - void* (*start_routine) (void*), void* arg) -{ - (void)unused; - thread->arg = arg; - thread->start_routine = start_routine; - thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL); - - if (!thread->handle) - return errno; - else - return 0; -} - -int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr) -{ - DWORD result; - - if (!thread.handle) return 0; - - result = WaitForSingleObject(thread.handle, INFINITE); - switch (result) { - case WAIT_OBJECT_0: - if (value_ptr) *value_ptr = thread.arg; - return 0; - case WAIT_ABANDONED: - return EINVAL; - default: - return GetLastError(); - } -} - -#endif /* ZSTD_MULTITHREAD */ + +#if defined(ZSTD_MULTITHREAD) && defined(_WIN32) + +/** + * Windows minimalist Pthread Wrapper, based on : + * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html + */ + + +/* === Dependencies === */ +#include <process.h> +#include <errno.h> + + +/* === Implementation === */ + +static unsigned __stdcall worker(void *arg) +{ + ZSTD_pthread_t* const thread = (ZSTD_pthread_t*) arg; + thread->arg = thread->start_routine(thread->arg); + return 0; +} + +int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, + void* (*start_routine) (void*), void* arg) +{ + (void)unused; + thread->arg = arg; + thread->start_routine = start_routine; + thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL); + + if (!thread->handle) + return errno; + else + return 0; +} + +int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr) +{ + DWORD result; + + if (!thread.handle) return 0; + + result = WaitForSingleObject(thread.handle, INFINITE); + switch (result) { + case WAIT_OBJECT_0: + if (value_ptr) *value_ptr = thread.arg; + return 0; + case WAIT_ABANDONED: + return EINVAL; + default: + return GetLastError(); + } +} + +#endif /* ZSTD_MULTITHREAD */ #if defined(ZSTD_MULTITHREAD) && DEBUGLEVEL >= 1 && !defined(_WIN32) diff --git a/contrib/libs/zstd/lib/common/threading.h b/contrib/libs/zstd/lib/common/threading.h index fd0060d5aa..c5e303eedc 100644 --- a/contrib/libs/zstd/lib/common/threading.h +++ b/contrib/libs/zstd/lib/common/threading.h @@ -1,106 +1,106 @@ -/** - * Copyright (c) 2016 Tino Reichardt - * All rights reserved. - * +/** + * Copyright (c) 2016 Tino Reichardt + * All rights reserved. + * * You can contact the author at: * - zstdmt source repository: https://github.com/mcmilk/zstdmt * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. - */ - -#ifndef THREADING_H_938743 -#define THREADING_H_938743 - + */ + +#ifndef THREADING_H_938743 +#define THREADING_H_938743 + #include "debug.h" -#if defined (__cplusplus) -extern "C" { -#endif - -#if defined(ZSTD_MULTITHREAD) && defined(_WIN32) - -/** - * Windows minimalist Pthread Wrapper, based on : - * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html - */ -#ifdef WINVER -# undef WINVER -#endif -#define WINVER 0x0600 - -#ifdef _WIN32_WINNT -# undef _WIN32_WINNT -#endif -#define _WIN32_WINNT 0x0600 - -#ifndef WIN32_LEAN_AND_MEAN -# define WIN32_LEAN_AND_MEAN -#endif - -#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */ -#include <windows.h> -#undef ERROR -#define ERROR(name) ZSTD_ERROR(name) - - -/* mutex */ -#define ZSTD_pthread_mutex_t CRITICAL_SECTION -#define ZSTD_pthread_mutex_init(a, b) ((void)(b), InitializeCriticalSection((a)), 0) -#define ZSTD_pthread_mutex_destroy(a) DeleteCriticalSection((a)) -#define ZSTD_pthread_mutex_lock(a) EnterCriticalSection((a)) -#define ZSTD_pthread_mutex_unlock(a) LeaveCriticalSection((a)) - -/* condition variable */ -#define ZSTD_pthread_cond_t CONDITION_VARIABLE -#define ZSTD_pthread_cond_init(a, b) ((void)(b), InitializeConditionVariable((a)), 0) -#define ZSTD_pthread_cond_destroy(a) ((void)(a)) -#define ZSTD_pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE) -#define ZSTD_pthread_cond_signal(a) WakeConditionVariable((a)) -#define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a)) - -/* ZSTD_pthread_create() and ZSTD_pthread_join() */ -typedef struct { - HANDLE handle; - void* (*start_routine)(void*); - void* arg; -} ZSTD_pthread_t; - -int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, - void* (*start_routine) (void*), void* arg); - -int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr); - -/** - * add here more wrappers as required - */ - - +#if defined (__cplusplus) +extern "C" { +#endif + +#if defined(ZSTD_MULTITHREAD) && defined(_WIN32) + +/** + * Windows minimalist Pthread Wrapper, based on : + * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html + */ +#ifdef WINVER +# undef WINVER +#endif +#define WINVER 0x0600 + +#ifdef _WIN32_WINNT +# undef _WIN32_WINNT +#endif +#define _WIN32_WINNT 0x0600 + +#ifndef WIN32_LEAN_AND_MEAN +# define WIN32_LEAN_AND_MEAN +#endif + +#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */ +#include <windows.h> +#undef ERROR +#define ERROR(name) ZSTD_ERROR(name) + + +/* mutex */ +#define ZSTD_pthread_mutex_t CRITICAL_SECTION +#define ZSTD_pthread_mutex_init(a, b) ((void)(b), InitializeCriticalSection((a)), 0) +#define ZSTD_pthread_mutex_destroy(a) DeleteCriticalSection((a)) +#define ZSTD_pthread_mutex_lock(a) EnterCriticalSection((a)) +#define ZSTD_pthread_mutex_unlock(a) LeaveCriticalSection((a)) + +/* condition variable */ +#define ZSTD_pthread_cond_t CONDITION_VARIABLE +#define ZSTD_pthread_cond_init(a, b) ((void)(b), InitializeConditionVariable((a)), 0) +#define ZSTD_pthread_cond_destroy(a) ((void)(a)) +#define ZSTD_pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE) +#define ZSTD_pthread_cond_signal(a) WakeConditionVariable((a)) +#define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a)) + +/* ZSTD_pthread_create() and ZSTD_pthread_join() */ +typedef struct { + HANDLE handle; + void* (*start_routine)(void*); + void* arg; +} ZSTD_pthread_t; + +int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, + void* (*start_routine) (void*), void* arg); + +int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr); + +/** + * add here more wrappers as required + */ + + #elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */ -/* === POSIX Systems === */ -# include <pthread.h> - +/* === POSIX Systems === */ +# include <pthread.h> + #if DEBUGLEVEL < 1 -#define ZSTD_pthread_mutex_t pthread_mutex_t -#define ZSTD_pthread_mutex_init(a, b) pthread_mutex_init((a), (b)) -#define ZSTD_pthread_mutex_destroy(a) pthread_mutex_destroy((a)) -#define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock((a)) -#define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock((a)) - -#define ZSTD_pthread_cond_t pthread_cond_t -#define ZSTD_pthread_cond_init(a, b) pthread_cond_init((a), (b)) -#define ZSTD_pthread_cond_destroy(a) pthread_cond_destroy((a)) -#define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait((a), (b)) -#define ZSTD_pthread_cond_signal(a) pthread_cond_signal((a)) -#define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast((a)) - -#define ZSTD_pthread_t pthread_t -#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d)) -#define ZSTD_pthread_join(a, b) pthread_join((a),(b)) - +#define ZSTD_pthread_mutex_t pthread_mutex_t +#define ZSTD_pthread_mutex_init(a, b) pthread_mutex_init((a), (b)) +#define ZSTD_pthread_mutex_destroy(a) pthread_mutex_destroy((a)) +#define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock((a)) +#define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock((a)) + +#define ZSTD_pthread_cond_t pthread_cond_t +#define ZSTD_pthread_cond_init(a, b) pthread_cond_init((a), (b)) +#define ZSTD_pthread_cond_destroy(a) pthread_cond_destroy((a)) +#define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait((a), (b)) +#define ZSTD_pthread_cond_signal(a) pthread_cond_signal((a)) +#define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast((a)) + +#define ZSTD_pthread_t pthread_t +#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d)) +#define ZSTD_pthread_join(a, b) pthread_join((a),(b)) + #else /* DEBUGLEVEL >= 1 */ /* Debug implementation of threading. @@ -128,28 +128,28 @@ int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond); #endif -#else /* ZSTD_MULTITHREAD not defined */ -/* No multithreading support */ - -typedef int ZSTD_pthread_mutex_t; -#define ZSTD_pthread_mutex_init(a, b) ((void)(a), (void)(b), 0) -#define ZSTD_pthread_mutex_destroy(a) ((void)(a)) -#define ZSTD_pthread_mutex_lock(a) ((void)(a)) -#define ZSTD_pthread_mutex_unlock(a) ((void)(a)) - -typedef int ZSTD_pthread_cond_t; -#define ZSTD_pthread_cond_init(a, b) ((void)(a), (void)(b), 0) -#define ZSTD_pthread_cond_destroy(a) ((void)(a)) -#define ZSTD_pthread_cond_wait(a, b) ((void)(a), (void)(b)) -#define ZSTD_pthread_cond_signal(a) ((void)(a)) -#define ZSTD_pthread_cond_broadcast(a) ((void)(a)) - -/* do not use ZSTD_pthread_t */ - -#endif /* ZSTD_MULTITHREAD */ - -#if defined (__cplusplus) -} -#endif - -#endif /* THREADING_H_938743 */ +#else /* ZSTD_MULTITHREAD not defined */ +/* No multithreading support */ + +typedef int ZSTD_pthread_mutex_t; +#define ZSTD_pthread_mutex_init(a, b) ((void)(a), (void)(b), 0) +#define ZSTD_pthread_mutex_destroy(a) ((void)(a)) +#define ZSTD_pthread_mutex_lock(a) ((void)(a)) +#define ZSTD_pthread_mutex_unlock(a) ((void)(a)) + +typedef int ZSTD_pthread_cond_t; +#define ZSTD_pthread_cond_init(a, b) ((void)(a), (void)(b), 0) +#define ZSTD_pthread_cond_destroy(a) ((void)(a)) +#define ZSTD_pthread_cond_wait(a, b) ((void)(a), (void)(b)) +#define ZSTD_pthread_cond_signal(a) ((void)(a)) +#define ZSTD_pthread_cond_broadcast(a) ((void)(a)) + +/* do not use ZSTD_pthread_t */ + +#endif /* ZSTD_MULTITHREAD */ + +#if defined (__cplusplus) +} +#endif + +#endif /* THREADING_H_938743 */ diff --git a/contrib/libs/zstd/lib/common/zstd_common.c b/contrib/libs/zstd/lib/common/zstd_common.c index 3d7e35b309..ea4e288daa 100644 --- a/contrib/libs/zstd/lib/common/zstd_common.c +++ b/contrib/libs/zstd/lib/common/zstd_common.c @@ -1,11 +1,11 @@ -/* +/* * Copyright (c) Yann Collet, Facebook, Inc. * All rights reserved. * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ @@ -16,17 +16,17 @@ #define ZSTD_DEPS_NEED_MALLOC #include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */ #include "error_private.h" -#include "zstd_internal.h" +#include "zstd_internal.h" /*-**************************************** * Version ******************************************/ -unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; } - -const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; } +unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; } +const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; } + /*-**************************************** * ZSTD Error Management ******************************************/ @@ -37,16 +37,16 @@ const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; } unsigned ZSTD_isError(size_t code) { return ERR_isError(code); } /*! ZSTD_getErrorName() : - * provides error code string from function result (useful for debugging) */ + * provides error code string from function result (useful for debugging) */ const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); } /*! ZSTD_getError() : - * convert a `size_t` function result into a proper ZSTD_errorCode enum */ + * convert a `size_t` function result into a proper ZSTD_errorCode enum */ ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); } /*! ZSTD_getErrorString() : - * provides error code string from enum */ -const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); } + * provides error code string from enum */ +const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); } @@ -55,29 +55,29 @@ const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString ****************************************************************/ void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem) { - if (customMem.customAlloc) - return customMem.customAlloc(customMem.opaque, size); + if (customMem.customAlloc) + return customMem.customAlloc(customMem.opaque, size); return ZSTD_malloc(size); } void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem) { - if (customMem.customAlloc) { - /* calloc implemented as malloc+memset; - * not as efficient as calloc, but next best guess for custom malloc */ - void* const ptr = customMem.customAlloc(customMem.opaque, size); + if (customMem.customAlloc) { + /* calloc implemented as malloc+memset; + * not as efficient as calloc, but next best guess for custom malloc */ + void* const ptr = customMem.customAlloc(customMem.opaque, size); ZSTD_memset(ptr, 0, size); - return ptr; - } + return ptr; + } return ZSTD_calloc(1, size); } void ZSTD_customFree(void* ptr, ZSTD_customMem customMem) { - if (ptr!=NULL) { - if (customMem.customFree) - customMem.customFree(customMem.opaque, ptr); - else + if (ptr!=NULL) { + if (customMem.customFree) + customMem.customFree(customMem.opaque, ptr); + else ZSTD_free(ptr); - } + } } diff --git a/contrib/libs/zstd/lib/common/zstd_internal.h b/contrib/libs/zstd/lib/common/zstd_internal.h index 1dee37cdbe..e149b40943 100644 --- a/contrib/libs/zstd/lib/common/zstd_internal.h +++ b/contrib/libs/zstd/lib/common/zstd_internal.h @@ -1,38 +1,38 @@ -/* +/* * Copyright (c) Yann Collet, Facebook, Inc. * All rights reserved. * - * This source code is licensed under both the BSD-style license (found in the - * LICENSE file in the root directory of this source tree) and the GPLv2 (found - * in the COPYING file in the root directory of this source tree). - * You may select, at your option, one of the above-listed licenses. + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. */ #ifndef ZSTD_CCOMMON_H_MODULE #define ZSTD_CCOMMON_H_MODULE -/* this module contains definitions which must be identical - * across compression, decompression and dictBuilder. - * It also contains a few functions useful to at least 2 of them - * and which benefit from being inlined */ +/* this module contains definitions which must be identical + * across compression, decompression and dictBuilder. + * It also contains a few functions useful to at least 2 of them + * and which benefit from being inlined */ /*-************************************* * Dependencies ***************************************/ -#include "compiler.h" +#include "compiler.h" #include "cpu.h" #include "mem.h" #include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */ #include "error_private.h" #define ZSTD_STATIC_LINKING_ONLY #include "../zstd.h" -#define FSE_STATIC_LINKING_ONLY -#include "fse.h" -#define HUF_STATIC_LINKING_ONLY -#include "huf.h" -#ifndef XXH_STATIC_LINKING_ONLY -# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */ -#endif +#define FSE_STATIC_LINKING_ONLY +#include "fse.h" +#define HUF_STATIC_LINKING_ONLY +#include "huf.h" +#ifndef XXH_STATIC_LINKING_ONLY +# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */ +#endif #include <contrib/libs/xxhash/xxhash.h> /* XXH_reset, update, digest */ #ifndef ZSTD_NO_TRACE # include "zstd_trace.h" @@ -40,22 +40,22 @@ # define ZSTD_TRACE 0 #endif -#if defined (__cplusplus) -extern "C" { -#endif - +#if defined (__cplusplus) +extern "C" { +#endif + /* ---- static assert (debug) --- */ #define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) #define ZSTD_isError ERR_isError /* for inlining */ #define FSE_isError ERR_isError #define HUF_isError ERR_isError - - -/*-************************************* + + +/*-************************************* * shared macros ***************************************/ -#undef MIN -#undef MAX +#undef MIN +#undef MAX #define MIN(a,b) ((a)<(b) ? (a) : (b)) #define MAX(a,b) ((a)>(b) ? (a) : (b)) #define BOUNDED(min,val,max) (MAX(min,MIN(val,max))) @@ -85,7 +85,7 @@ static UNUSED_ATTR const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 }; static UNUSED_ATTR const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 }; #define ZSTD_FRAMEIDSIZE 4 /* magic number size */ - + #define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */ static UNUSED_ATTR const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE; typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e; @@ -104,15 +104,15 @@ typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingTy #define Litbits 8 #define MaxLit ((1<<Litbits) - 1) -#define MaxML 52 -#define MaxLL 35 -#define DefaultMaxOff 28 -#define MaxOff 31 +#define MaxML 52 +#define MaxLL 35 +#define DefaultMaxOff 28 +#define MaxOff 31 #define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */ #define MLFSELog 9 #define LLFSELog 9 #define OffFSELog 8 -#define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog) +#define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog) #define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */ /* Each table cannot take more than #symbols * FSELog bits */ @@ -281,7 +281,7 @@ typedef enum { /*-******************************************* -* Private declarations +* Private declarations *********************************************/ typedef struct seqDef_s { U32 offBase; /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */ @@ -351,8 +351,8 @@ typedef struct { unsigned long long decompressedBound; } ZSTD_frameSizeInfo; /* decompress & legacy */ -const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */ -void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */ +const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */ +void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */ /* custom memory allocation functions */ void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem); @@ -360,10 +360,10 @@ void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem); void ZSTD_customFree(void* ptr, ZSTD_customMem customMem); -MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */ +MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */ { - assert(val != 0); - { + assert(val != 0); + { # if defined(_MSC_VER) /* Visual */ # if STATIC_BMI2 == 1 return _lzcnt_u32(val)^31; @@ -382,16 +382,16 @@ MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus # elif defined(__ICCARM__) /* IAR Intrinsic */ return 31 - __CLZ(val); # else /* Software version */ - static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; - U32 v = val; - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - return DeBruijnClz[(v * 0x07C4ACDDU) >> 27]; + static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; + U32 v = val; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + return DeBruijnClz[(v * 0x07C4ACDDU) >> 27]; # endif - } + } } /** @@ -452,25 +452,25 @@ MEM_STATIC unsigned ZSTD_countTrailingZeros(size_t val) } -/* ZSTD_invalidateRepCodes() : - * ensures next compression will not use repcodes from previous block. - * Note : only works with regular variant; - * do not use with extDict variant ! */ -void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /* zstdmt, adaptive_compression (shouldn't get this definition from here) */ - - -typedef struct { - blockType_e blockType; - U32 lastBlock; - U32 origSize; +/* ZSTD_invalidateRepCodes() : + * ensures next compression will not use repcodes from previous block. + * Note : only works with regular variant; + * do not use with extDict variant ! */ +void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /* zstdmt, adaptive_compression (shouldn't get this definition from here) */ + + +typedef struct { + blockType_e blockType; + U32 lastBlock; + U32 origSize; } blockProperties_t; /* declared here for decompress and fullbench */ - -/*! ZSTD_getcBlockSize() : - * Provides the size of compressed block from block header `src` */ -/* Used by: decompress, fullbench (does not get its definition from here) */ -size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, - blockProperties_t* bpPtr); - + +/*! ZSTD_getcBlockSize() : + * Provides the size of compressed block from block header `src` */ +/* Used by: decompress, fullbench (does not get its definition from here) */ +size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, + blockProperties_t* bpPtr); + /*! ZSTD_decodeSeqHeaders() : * decode sequence header from src */ /* Used by: decompress, fullbench (does not get its definition from here) */ @@ -486,8 +486,8 @@ MEM_STATIC int ZSTD_cpuSupportsBmi2(void) return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid); } -#if defined (__cplusplus) -} -#endif - +#if defined (__cplusplus) +} +#endif + #endif /* ZSTD_CCOMMON_H_MODULE */ |