aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/zstd/lib/compress
diff options
context:
space:
mode:
authorRuslan Kovalev <ruslan.a.kovalev@gmail.com>2022-02-10 16:46:45 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:46:45 +0300
commit9123176b341b6f2658cff5132482b8237c1416c8 (patch)
tree49e222ea1c5804306084bb3ae065bb702625360f /contrib/libs/zstd/lib/compress
parent59e19371de37995fcb36beb16cd6ec030af960bc (diff)
downloadydb-9123176b341b6f2658cff5132482b8237c1416c8.tar.gz
Restoring authorship annotation for Ruslan Kovalev <ruslan.a.kovalev@gmail.com>. Commit 2 of 2.
Diffstat (limited to 'contrib/libs/zstd/lib/compress')
-rw-r--r--contrib/libs/zstd/lib/compress/fse_compress.c958
-rw-r--r--contrib/libs/zstd/lib/compress/huf_compress.c540
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_compress.c768
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_opt.h18
4 files changed, 1142 insertions, 1142 deletions
diff --git a/contrib/libs/zstd/lib/compress/fse_compress.c b/contrib/libs/zstd/lib/compress/fse_compress.c
index dbce9566ae..5547b4ac09 100644
--- a/contrib/libs/zstd/lib/compress/fse_compress.c
+++ b/contrib/libs/zstd/lib/compress/fse_compress.c
@@ -1,4 +1,4 @@
-/* ******************************************************************
+/* ******************************************************************
* FSE : Finite State Entropy encoder
* Copyright (c) Yann Collet, Facebook, Inc.
*
@@ -10,54 +10,54 @@
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
-****************************************************************** */
-
-/* **************************************************************
-* Includes
-****************************************************************/
+****************************************************************** */
+
+/* **************************************************************
+* Includes
+****************************************************************/
#include "../common/compiler.h"
#include "../common/mem.h" /* U32, U16, etc. */
#include "../common/debug.h" /* assert, DEBUGLOG */
#include "hist.h" /* HIST_count_wksp */
#include "../common/bitstream.h"
-#define FSE_STATIC_LINKING_ONLY
+#define FSE_STATIC_LINKING_ONLY
#include "../common/fse.h"
#include "../common/error_private.h"
#define ZSTD_DEPS_NEED_MALLOC
#define ZSTD_DEPS_NEED_MATH64
#include "../common/zstd_deps.h" /* ZSTD_malloc, ZSTD_free, ZSTD_memcpy, ZSTD_memset */
-
-
-/* **************************************************************
-* Error Management
-****************************************************************/
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
#define FSE_isError ERR_isError
-
-
-/* **************************************************************
-* Templates
-****************************************************************/
-/*
- designed to be included
- for type-specific functions (template emulation in C)
- Objective is to write these functions only once, for improved maintenance
-*/
-
-/* safety checks */
-#ifndef FSE_FUNCTION_EXTENSION
-# error "FSE_FUNCTION_EXTENSION must be defined"
-#endif
-#ifndef FSE_FUNCTION_TYPE
-# error "FSE_FUNCTION_TYPE must be defined"
-#endif
-
-/* Function names */
-#define FSE_CAT(X,Y) X##Y
-#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
-#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
-
-
-/* Function templates */
+
+
+/* **************************************************************
+* Templates
+****************************************************************/
+/*
+ designed to be included
+ for type-specific functions (template emulation in C)
+ Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+# error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+# error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X,Y) X##Y
+#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
+#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
+
+
+/* Function templates */
/* FSE_buildCTable_wksp() :
* Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
@@ -67,51 +67,51 @@
size_t FSE_buildCTable_wksp(FSE_CTable* ct,
const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
void* workSpace, size_t wkspSize)
-{
- U32 const tableSize = 1 << tableLog;
- U32 const tableMask = tableSize - 1;
- void* const ptr = ct;
- U16* const tableU16 = ( (U16*) ptr) + 2;
- void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
- FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
- U32 const step = FSE_TABLESTEP(tableSize);
+{
+ U32 const tableSize = 1 << tableLog;
+ U32 const tableMask = tableSize - 1;
+ void* const ptr = ct;
+ U16* const tableU16 = ( (U16*) ptr) + 2;
+ void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
+ FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
+ U32 const step = FSE_TABLESTEP(tableSize);
U32 const maxSV1 = maxSymbolValue+1;
-
+
U16* cumul = (U16*)workSpace; /* size = maxSV1 */
FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSV1+1)); /* size = tableSize */
- U32 highThreshold = tableSize-1;
-
+ U32 highThreshold = tableSize-1;
+
assert(((size_t)workSpace & 1) == 0); /* Must be 2 bytes-aligned */
if (FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) > wkspSize) return ERROR(tableLog_tooLarge);
- /* CTable header */
- tableU16[-2] = (U16) tableLog;
- tableU16[-1] = (U16) maxSymbolValue;
+ /* CTable header */
+ tableU16[-2] = (U16) tableLog;
+ tableU16[-1] = (U16) maxSymbolValue;
assert(tableLog < 16); /* required for threshold strategy to work */
-
- /* For explanations on how to distribute symbol values over the table :
+
+ /* For explanations on how to distribute symbol values over the table :
* http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
-
+
#ifdef __clang_analyzer__
ZSTD_memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */
#endif
- /* symbol start positions */
- { U32 u;
- cumul[0] = 0;
+ /* symbol start positions */
+ { U32 u;
+ cumul[0] = 0;
for (u=1; u <= maxSV1; u++) {
- if (normalizedCounter[u-1]==-1) { /* Low proba symbol */
- cumul[u] = cumul[u-1] + 1;
- tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
- } else {
+ if (normalizedCounter[u-1]==-1) { /* Low proba symbol */
+ cumul[u] = cumul[u-1] + 1;
+ tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
+ } else {
assert(normalizedCounter[u-1] >= 0);
cumul[u] = cumul[u-1] + (U16)normalizedCounter[u-1];
assert(cumul[u] >= cumul[u-1]); /* no overflow */
- } }
+ } }
cumul[maxSV1] = (U16)(tableSize+1);
- }
-
- /* Spread symbols */
+ }
+
+ /* Spread symbols */
if (highThreshold == tableSize - 1) {
/* Case for no low prob count symbols. Lay down 8 bytes at a time
* to reduce branch misses since we are operating on a small block
@@ -152,52 +152,52 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct,
}
} else {
U32 position = 0;
- U32 symbol;
+ U32 symbol;
for (symbol=0; symbol<maxSV1; symbol++) {
int nbOccurrences;
int const freq = normalizedCounter[symbol];
for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {
- tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
- position = (position + step) & tableMask;
+ tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
+ position = (position + step) & tableMask;
while (position > highThreshold)
position = (position + step) & tableMask; /* Low proba area */
- } }
+ } }
assert(position==0); /* Must have initialized all positions */
- }
-
- /* Build table */
- { U32 u; for (u=0; u<tableSize; u++) {
- FSE_FUNCTION_TYPE s = tableSymbol[u]; /* note : static analyzer may not understand tableSymbol is properly initialized */
- tableU16[cumul[s]++] = (U16) (tableSize+u); /* TableU16 : sorted by symbol order; gives next state value */
- } }
-
- /* Build Symbol Transformation Table */
- { unsigned total = 0;
- unsigned s;
- for (s=0; s<=maxSymbolValue; s++) {
- switch (normalizedCounter[s])
- {
+ }
+
+ /* Build table */
+ { U32 u; for (u=0; u<tableSize; u++) {
+ FSE_FUNCTION_TYPE s = tableSymbol[u]; /* note : static analyzer may not understand tableSymbol is properly initialized */
+ tableU16[cumul[s]++] = (U16) (tableSize+u); /* TableU16 : sorted by symbol order; gives next state value */
+ } }
+
+ /* Build Symbol Transformation Table */
+ { unsigned total = 0;
+ unsigned s;
+ for (s=0; s<=maxSymbolValue; s++) {
+ switch (normalizedCounter[s])
+ {
case 0:
/* filling nonetheless, for compatibility with FSE_getMaxNbBits() */
symbolTT[s].deltaNbBits = ((tableLog+1) << 16) - (1<<tableLog);
break;
-
- case -1:
- case 1:
- symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);
+
+ case -1:
+ case 1:
+ symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);
assert(total <= INT_MAX);
symbolTT[s].deltaFindState = (int)(total - 1);
- total ++;
- break;
- default :
+ total ++;
+ break;
+ default :
assert(normalizedCounter[s] > 1);
{ U32 const maxBitsOut = tableLog - BIT_highbit32 ((U32)normalizedCounter[s]-1);
U32 const minStatePlus = (U32)normalizedCounter[s] << maxBitsOut;
- symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
+ symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
symbolTT[s].deltaFindState = (int)(total - (unsigned)normalizedCounter[s]);
total += (unsigned)normalizedCounter[s];
- } } } }
-
+ } } } }
+
#if 0 /* debug : symbol costs */
DEBUGLOG(5, "\n --- table statistics : ");
{ U32 symbol;
@@ -209,241 +209,241 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct,
} }
#endif
- return 0;
-}
-
-
+ return 0;
+}
+
+
-#ifndef FSE_COMMONDEFS_ONLY
-
-/*-**************************************************************
+#ifndef FSE_COMMONDEFS_ONLY
+
+/*-**************************************************************
* FSE NCount encoding
-****************************************************************/
-size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
-{
+****************************************************************/
+size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
+{
size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog
+ 4 /* bitCount initialized at 4 */
+ 2 /* first two symbols may use one additional bit each */) / 8)
+ 1 /* round up to whole nb bytes */
+ 2 /* additional two bytes for bitstream flush */;
- return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
-}
-
+ return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
+}
+
static size_t
FSE_writeNCount_generic (void* header, size_t headerBufferSize,
const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
unsigned writeIsSafe)
-{
- BYTE* const ostart = (BYTE*) header;
- BYTE* out = ostart;
- BYTE* const oend = ostart + headerBufferSize;
- int nbBits;
- const int tableSize = 1 << tableLog;
- int remaining;
- int threshold;
+{
+ BYTE* const ostart = (BYTE*) header;
+ BYTE* out = ostart;
+ BYTE* const oend = ostart + headerBufferSize;
+ int nbBits;
+ const int tableSize = 1 << tableLog;
+ int remaining;
+ int threshold;
U32 bitStream = 0;
int bitCount = 0;
unsigned symbol = 0;
unsigned const alphabetSize = maxSymbolValue + 1;
int previousIs0 = 0;
-
- /* Table Size */
- bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
- bitCount += 4;
-
- /* Init */
- remaining = tableSize+1; /* +1 for extra accuracy */
- threshold = tableSize;
- nbBits = tableLog+1;
-
+
+ /* Table Size */
+ bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
+ bitCount += 4;
+
+ /* Init */
+ remaining = tableSize+1; /* +1 for extra accuracy */
+ threshold = tableSize;
+ nbBits = tableLog+1;
+
while ((symbol < alphabetSize) && (remaining>1)) { /* stops at 1 */
if (previousIs0) {
unsigned start = symbol;
while ((symbol < alphabetSize) && !normalizedCounter[symbol]) symbol++;
if (symbol == alphabetSize) break; /* incorrect distribution */
while (symbol >= start+24) {
- start+=24;
- bitStream += 0xFFFFU << bitCount;
+ start+=24;
+ bitStream += 0xFFFFU << bitCount;
if ((!writeIsSafe) && (out > oend-2))
return ERROR(dstSize_tooSmall); /* Buffer overflow */
- out[0] = (BYTE) bitStream;
- out[1] = (BYTE)(bitStream>>8);
- out+=2;
- bitStream>>=16;
- }
+ out[0] = (BYTE) bitStream;
+ out[1] = (BYTE)(bitStream>>8);
+ out+=2;
+ bitStream>>=16;
+ }
while (symbol >= start+3) {
- start+=3;
- bitStream += 3 << bitCount;
- bitCount += 2;
- }
+ start+=3;
+ bitStream += 3 << bitCount;
+ bitCount += 2;
+ }
bitStream += (symbol-start) << bitCount;
- bitCount += 2;
- if (bitCount>16) {
+ bitCount += 2;
+ if (bitCount>16) {
if ((!writeIsSafe) && (out > oend - 2))
return ERROR(dstSize_tooSmall); /* Buffer overflow */
- out[0] = (BYTE)bitStream;
- out[1] = (BYTE)(bitStream>>8);
- out += 2;
- bitStream >>= 16;
- bitCount -= 16;
- } }
+ out[0] = (BYTE)bitStream;
+ out[1] = (BYTE)(bitStream>>8);
+ out += 2;
+ bitStream >>= 16;
+ bitCount -= 16;
+ } }
{ int count = normalizedCounter[symbol++];
int const max = (2*threshold-1) - remaining;
remaining -= count < 0 ? -count : count;
- count++; /* +1 for extra accuracy */
+ count++; /* +1 for extra accuracy */
if (count>=threshold)
count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
- bitStream += count << bitCount;
- bitCount += nbBits;
- bitCount -= (count<max);
+ bitStream += count << bitCount;
+ bitCount += nbBits;
+ bitCount -= (count<max);
previousIs0 = (count==1);
if (remaining<1) return ERROR(GENERIC);
while (remaining<threshold) { nbBits--; threshold>>=1; }
- }
- if (bitCount>16) {
+ }
+ if (bitCount>16) {
if ((!writeIsSafe) && (out > oend - 2))
return ERROR(dstSize_tooSmall); /* Buffer overflow */
- out[0] = (BYTE)bitStream;
- out[1] = (BYTE)(bitStream>>8);
- out += 2;
- bitStream >>= 16;
- bitCount -= 16;
- } }
-
+ out[0] = (BYTE)bitStream;
+ out[1] = (BYTE)(bitStream>>8);
+ out += 2;
+ bitStream >>= 16;
+ bitCount -= 16;
+ } }
+
if (remaining != 1)
return ERROR(GENERIC); /* incorrect normalized distribution */
assert(symbol <= alphabetSize);
- /* flush remaining bitStream */
+ /* flush remaining bitStream */
if ((!writeIsSafe) && (out > oend - 2))
return ERROR(dstSize_tooSmall); /* Buffer overflow */
- out[0] = (BYTE)bitStream;
- out[1] = (BYTE)(bitStream>>8);
- out+= (bitCount+7) /8;
-
- return (out-ostart);
-}
-
-
+ out[0] = (BYTE)bitStream;
+ out[1] = (BYTE)(bitStream>>8);
+ out+= (bitCount+7) /8;
+
+ return (out-ostart);
+}
+
+
size_t FSE_writeNCount (void* buffer, size_t bufferSize,
const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
-{
+{
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */
- if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */
-
- if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
- return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
-
+ if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */
+
+ if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
+ return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
+
return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1 /* write in buffer is safe */);
-}
-
-
-/*-**************************************************************
-* FSE Compression Code
-****************************************************************/
-
-FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
-{
- size_t size;
- if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
- size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
+}
+
+
+/*-**************************************************************
+* FSE Compression Code
+****************************************************************/
+
+FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
+{
+ size_t size;
+ if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
+ size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
return (FSE_CTable*)ZSTD_malloc(size);
-}
-
+}
+
void FSE_freeCTable (FSE_CTable* ct) { ZSTD_free(ct); }
-
-/* provides the minimum logSize to safely represent a distribution */
-static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
-{
+
+/* provides the minimum logSize to safely represent a distribution */
+static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
+{
U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1;
U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
assert(srcSize > 1); /* Not supported, RLE should be used instead */
return minBits;
-}
-
-unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
-{
+}
+
+unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
+{
U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
- U32 tableLog = maxTableLog;
+ U32 tableLog = maxTableLog;
U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
assert(srcSize > 1); /* Not supported, RLE should be used instead */
- if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
+ if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */
if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */
- if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;
- if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;
- return tableLog;
-}
-
-unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
-{
- return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
-}
-
-/* Secondary normalization method.
- To be used when primary method fails. */
-
+ if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;
+ if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;
+ return tableLog;
+}
+
+unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
+{
+ return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
+}
+
+/* Secondary normalization method.
+ To be used when primary method fails. */
+
static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue, short lowProbCount)
-{
+{
short const NOT_YET_ASSIGNED = -2;
- U32 s;
- U32 distributed = 0;
- U32 ToDistribute;
-
- /* Init */
+ U32 s;
+ U32 distributed = 0;
+ U32 ToDistribute;
+
+ /* Init */
U32 const lowThreshold = (U32)(total >> tableLog);
- U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
-
- for (s=0; s<=maxSymbolValue; s++) {
- if (count[s] == 0) {
- norm[s]=0;
- continue;
- }
- if (count[s] <= lowThreshold) {
+ U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
+
+ for (s=0; s<=maxSymbolValue; s++) {
+ if (count[s] == 0) {
+ norm[s]=0;
+ continue;
+ }
+ if (count[s] <= lowThreshold) {
norm[s] = lowProbCount;
- distributed++;
- total -= count[s];
- continue;
- }
- if (count[s] <= lowOne) {
- norm[s] = 1;
- distributed++;
- total -= count[s];
- continue;
- }
+ distributed++;
+ total -= count[s];
+ continue;
+ }
+ if (count[s] <= lowOne) {
+ norm[s] = 1;
+ distributed++;
+ total -= count[s];
+ continue;
+ }
norm[s]=NOT_YET_ASSIGNED;
- }
- ToDistribute = (1 << tableLog) - distributed;
-
+ }
+ ToDistribute = (1 << tableLog) - distributed;
+
if (ToDistribute == 0)
return 0;
- if ((total / ToDistribute) > lowOne) {
- /* risk of rounding to zero */
- lowOne = (U32)((total * 3) / (ToDistribute * 2));
- for (s=0; s<=maxSymbolValue; s++) {
+ if ((total / ToDistribute) > lowOne) {
+ /* risk of rounding to zero */
+ lowOne = (U32)((total * 3) / (ToDistribute * 2));
+ for (s=0; s<=maxSymbolValue; s++) {
if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
- norm[s] = 1;
- distributed++;
- total -= count[s];
- continue;
- } }
- ToDistribute = (1 << tableLog) - distributed;
- }
-
- if (distributed == maxSymbolValue+1) {
- /* all values are pretty poor;
- probably incompressible data (should have already been detected);
- find max, then give all remaining points to max */
- U32 maxV = 0, maxC = 0;
- for (s=0; s<=maxSymbolValue; s++)
+ norm[s] = 1;
+ distributed++;
+ total -= count[s];
+ continue;
+ } }
+ ToDistribute = (1 << tableLog) - distributed;
+ }
+
+ if (distributed == maxSymbolValue+1) {
+ /* all values are pretty poor;
+ probably incompressible data (should have already been detected);
+ find max, then give all remaining points to max */
+ U32 maxV = 0, maxC = 0;
+ for (s=0; s<=maxSymbolValue; s++)
if (count[s] > maxC) { maxV=s; maxC=count[s]; }
- norm[maxV] += (short)ToDistribute;
- return 0;
- }
-
+ norm[maxV] += (short)ToDistribute;
+ return 0;
+ }
+
if (total == 0) {
/* all of the symbols were low enough for the lowOne or lowThreshold */
for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1))
@@ -452,270 +452,270 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count,
}
{ U64 const vStepLog = 62 - tableLog;
- U64 const mid = (1ULL << (vStepLog-1)) - 1;
+ U64 const mid = (1ULL << (vStepLog-1)) - 1;
U64 const rStep = ZSTD_div64((((U64)1<<vStepLog) * ToDistribute) + mid, (U32)total); /* scale on remaining */
- U64 tmpTotal = mid;
- for (s=0; s<=maxSymbolValue; s++) {
+ U64 tmpTotal = mid;
+ for (s=0; s<=maxSymbolValue; s++) {
if (norm[s]==NOT_YET_ASSIGNED) {
U64 const end = tmpTotal + (count[s] * rStep);
U32 const sStart = (U32)(tmpTotal >> vStepLog);
U32 const sEnd = (U32)(end >> vStepLog);
U32 const weight = sEnd - sStart;
- if (weight < 1)
- return ERROR(GENERIC);
- norm[s] = (short)weight;
- tmpTotal = end;
- } } }
-
- return 0;
-}
-
-size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
- const unsigned* count, size_t total,
+ if (weight < 1)
+ return ERROR(GENERIC);
+ norm[s] = (short)weight;
+ tmpTotal = end;
+ } } }
+
+ return 0;
+}
+
+size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
+ const unsigned* count, size_t total,
unsigned maxSymbolValue, unsigned useLowProbCount)
-{
- /* Sanity checks */
- if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
- if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported size */
- if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */
- if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
-
+{
+ /* Sanity checks */
+ if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
+ if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported size */
+ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */
+ if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
+
{ static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
short const lowProbCount = useLowProbCount ? -1 : 1;
- U64 const scale = 62 - tableLog;
+ U64 const scale = 62 - tableLog;
U64 const step = ZSTD_div64((U64)1<<62, (U32)total); /* <== here, one division ! */
- U64 const vStep = 1ULL<<(scale-20);
- int stillToDistribute = 1<<tableLog;
- unsigned s;
- unsigned largest=0;
- short largestP=0;
- U32 lowThreshold = (U32)(total >> tableLog);
-
- for (s=0; s<=maxSymbolValue; s++) {
- if (count[s] == total) return 0; /* rle special case */
- if (count[s] == 0) { normalizedCounter[s]=0; continue; }
- if (count[s] <= lowThreshold) {
+ U64 const vStep = 1ULL<<(scale-20);
+ int stillToDistribute = 1<<tableLog;
+ unsigned s;
+ unsigned largest=0;
+ short largestP=0;
+ U32 lowThreshold = (U32)(total >> tableLog);
+
+ for (s=0; s<=maxSymbolValue; s++) {
+ if (count[s] == total) return 0; /* rle special case */
+ if (count[s] == 0) { normalizedCounter[s]=0; continue; }
+ if (count[s] <= lowThreshold) {
normalizedCounter[s] = lowProbCount;
- stillToDistribute--;
- } else {
- short proba = (short)((count[s]*step) >> scale);
- if (proba<8) {
- U64 restToBeat = vStep * rtbTable[proba];
- proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
- }
+ stillToDistribute--;
+ } else {
+ short proba = (short)((count[s]*step) >> scale);
+ if (proba<8) {
+ U64 restToBeat = vStep * rtbTable[proba];
+ proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
+ }
if (proba > largestP) { largestP=proba; largest=s; }
- normalizedCounter[s] = proba;
- stillToDistribute -= proba;
- } }
- if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
- /* corner case, need another normalization method */
+ normalizedCounter[s] = proba;
+ stillToDistribute -= proba;
+ } }
+ if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
+ /* corner case, need another normalization method */
size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue, lowProbCount);
- if (FSE_isError(errorCode)) return errorCode;
- }
- else normalizedCounter[largest] += (short)stillToDistribute;
- }
-
-#if 0
- { /* Print Table (debug) */
- U32 s;
- U32 nTotal = 0;
- for (s=0; s<=maxSymbolValue; s++)
+ if (FSE_isError(errorCode)) return errorCode;
+ }
+ else normalizedCounter[largest] += (short)stillToDistribute;
+ }
+
+#if 0
+ { /* Print Table (debug) */
+ U32 s;
+ U32 nTotal = 0;
+ for (s=0; s<=maxSymbolValue; s++)
RAWLOG(2, "%3i: %4i \n", s, normalizedCounter[s]);
- for (s=0; s<=maxSymbolValue; s++)
- nTotal += abs(normalizedCounter[s]);
- if (nTotal != (1U<<tableLog))
+ for (s=0; s<=maxSymbolValue; s++)
+ nTotal += abs(normalizedCounter[s]);
+ if (nTotal != (1U<<tableLog))
RAWLOG(2, "Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
- getchar();
- }
-#endif
-
- return tableLog;
-}
-
-
-/* fake FSE_CTable, for raw (uncompressed) input */
-size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
-{
- const unsigned tableSize = 1 << nbBits;
- const unsigned tableMask = tableSize - 1;
- const unsigned maxSymbolValue = tableMask;
- void* const ptr = ct;
- U16* const tableU16 = ( (U16*) ptr) + 2;
- void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1); /* assumption : tableLog >= 1 */
- FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
- unsigned s;
-
- /* Sanity checks */
- if (nbBits < 1) return ERROR(GENERIC); /* min size */
-
- /* header */
- tableU16[-2] = (U16) nbBits;
- tableU16[-1] = (U16) maxSymbolValue;
-
- /* Build table */
- for (s=0; s<tableSize; s++)
- tableU16[s] = (U16)(tableSize + s);
-
- /* Build Symbol Transformation Table */
- { const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
- for (s=0; s<=maxSymbolValue; s++) {
- symbolTT[s].deltaNbBits = deltaNbBits;
- symbolTT[s].deltaFindState = s-1;
- } }
-
- return 0;
-}
-
+ getchar();
+ }
+#endif
+
+ return tableLog;
+}
+
+
+/* fake FSE_CTable, for raw (uncompressed) input */
+size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
+{
+ const unsigned tableSize = 1 << nbBits;
+ const unsigned tableMask = tableSize - 1;
+ const unsigned maxSymbolValue = tableMask;
+ void* const ptr = ct;
+ U16* const tableU16 = ( (U16*) ptr) + 2;
+ void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1); /* assumption : tableLog >= 1 */
+ FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
+ unsigned s;
+
+ /* Sanity checks */
+ if (nbBits < 1) return ERROR(GENERIC); /* min size */
+
+ /* header */
+ tableU16[-2] = (U16) nbBits;
+ tableU16[-1] = (U16) maxSymbolValue;
+
+ /* Build table */
+ for (s=0; s<tableSize; s++)
+ tableU16[s] = (U16)(tableSize + s);
+
+ /* Build Symbol Transformation Table */
+ { const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
+ for (s=0; s<=maxSymbolValue; s++) {
+ symbolTT[s].deltaNbBits = deltaNbBits;
+ symbolTT[s].deltaFindState = s-1;
+ } }
+
+ return 0;
+}
+
/* fake FSE_CTable, for rle input (always same symbol) */
-size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)
-{
- void* ptr = ct;
- U16* tableU16 = ( (U16*) ptr) + 2;
- void* FSCTptr = (U32*)ptr + 2;
- FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*) FSCTptr;
-
- /* header */
- tableU16[-2] = (U16) 0;
- tableU16[-1] = (U16) symbolValue;
-
- /* Build table */
- tableU16[0] = 0;
- tableU16[1] = 0; /* just in case */
-
- /* Build Symbol Transformation Table */
- symbolTT[symbolValue].deltaNbBits = 0;
- symbolTT[symbolValue].deltaFindState = 0;
-
- return 0;
-}
-
-
-static size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize,
- const void* src, size_t srcSize,
- const FSE_CTable* ct, const unsigned fast)
-{
- const BYTE* const istart = (const BYTE*) src;
- const BYTE* const iend = istart + srcSize;
- const BYTE* ip=iend;
-
- BIT_CStream_t bitC;
- FSE_CState_t CState1, CState2;
-
- /* init */
- if (srcSize <= 2) return 0;
+size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)
+{
+ void* ptr = ct;
+ U16* tableU16 = ( (U16*) ptr) + 2;
+ void* FSCTptr = (U32*)ptr + 2;
+ FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*) FSCTptr;
+
+ /* header */
+ tableU16[-2] = (U16) 0;
+ tableU16[-1] = (U16) symbolValue;
+
+ /* Build table */
+ tableU16[0] = 0;
+ tableU16[1] = 0; /* just in case */
+
+ /* Build Symbol Transformation Table */
+ symbolTT[symbolValue].deltaNbBits = 0;
+ symbolTT[symbolValue].deltaFindState = 0;
+
+ return 0;
+}
+
+
+static size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const FSE_CTable* ct, const unsigned fast)
+{
+ const BYTE* const istart = (const BYTE*) src;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* ip=iend;
+
+ BIT_CStream_t bitC;
+ FSE_CState_t CState1, CState2;
+
+ /* init */
+ if (srcSize <= 2) return 0;
{ size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
if (FSE_isError(initError)) return 0; /* not enough space available to write a bitstream */ }
-
-#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
-
- if (srcSize & 1) {
- FSE_initCState2(&CState1, ct, *--ip);
- FSE_initCState2(&CState2, ct, *--ip);
- FSE_encodeSymbol(&bitC, &CState1, *--ip);
- FSE_FLUSHBITS(&bitC);
- } else {
- FSE_initCState2(&CState2, ct, *--ip);
- FSE_initCState2(&CState1, ct, *--ip);
- }
-
- /* join to mod 4 */
- srcSize -= 2;
- if ((sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) { /* test bit 2 */
- FSE_encodeSymbol(&bitC, &CState2, *--ip);
- FSE_encodeSymbol(&bitC, &CState1, *--ip);
- FSE_FLUSHBITS(&bitC);
- }
-
- /* 2 or 4 encoding per loop */
+
+#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
+
+ if (srcSize & 1) {
+ FSE_initCState2(&CState1, ct, *--ip);
+ FSE_initCState2(&CState2, ct, *--ip);
+ FSE_encodeSymbol(&bitC, &CState1, *--ip);
+ FSE_FLUSHBITS(&bitC);
+ } else {
+ FSE_initCState2(&CState2, ct, *--ip);
+ FSE_initCState2(&CState1, ct, *--ip);
+ }
+
+ /* join to mod 4 */
+ srcSize -= 2;
+ if ((sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) { /* test bit 2 */
+ FSE_encodeSymbol(&bitC, &CState2, *--ip);
+ FSE_encodeSymbol(&bitC, &CState1, *--ip);
+ FSE_FLUSHBITS(&bitC);
+ }
+
+ /* 2 or 4 encoding per loop */
while ( ip>istart ) {
-
- FSE_encodeSymbol(&bitC, &CState2, *--ip);
-
- if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 ) /* this test must be static */
- FSE_FLUSHBITS(&bitC);
-
- FSE_encodeSymbol(&bitC, &CState1, *--ip);
-
- if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) { /* this test must be static */
- FSE_encodeSymbol(&bitC, &CState2, *--ip);
- FSE_encodeSymbol(&bitC, &CState1, *--ip);
- }
-
- FSE_FLUSHBITS(&bitC);
- }
-
- FSE_flushCState(&bitC, &CState2);
- FSE_flushCState(&bitC, &CState1);
- return BIT_closeCStream(&bitC);
-}
-
-size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
- const void* src, size_t srcSize,
- const FSE_CTable* ct)
-{
+
+ FSE_encodeSymbol(&bitC, &CState2, *--ip);
+
+ if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 ) /* this test must be static */
+ FSE_FLUSHBITS(&bitC);
+
+ FSE_encodeSymbol(&bitC, &CState1, *--ip);
+
+ if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) { /* this test must be static */
+ FSE_encodeSymbol(&bitC, &CState2, *--ip);
+ FSE_encodeSymbol(&bitC, &CState1, *--ip);
+ }
+
+ FSE_FLUSHBITS(&bitC);
+ }
+
+ FSE_flushCState(&bitC, &CState2);
+ FSE_flushCState(&bitC, &CState1);
+ return BIT_closeCStream(&bitC);
+}
+
+size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const FSE_CTable* ct)
+{
unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
-
- if (fast)
- return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
- else
- return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
-}
-
-
-size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
-
+
+ if (fast)
+ return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
+ else
+ return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
+}
+
+
+size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
+
#ifndef ZSTD_NO_UNUSED_FUNCTIONS
/* FSE_compress_wksp() :
* Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
* `wkspSize` size must be `(1<<tableLog)`.
*/
size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
-{
- BYTE* const ostart = (BYTE*) dst;
- BYTE* op = ostart;
- BYTE* const oend = ostart + dstSize;
-
+{
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* op = ostart;
+ BYTE* const oend = ostart + dstSize;
+
unsigned count[FSE_MAX_SYMBOL_VALUE+1];
- S16 norm[FSE_MAX_SYMBOL_VALUE+1];
+ S16 norm[FSE_MAX_SYMBOL_VALUE+1];
FSE_CTable* CTable = (FSE_CTable*)workSpace;
size_t const CTableSize = FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue);
void* scratchBuffer = (void*)(CTable + CTableSize);
size_t const scratchBufferSize = wkspSize - (CTableSize * sizeof(FSE_CTable));
-
- /* init conditions */
+
+ /* init conditions */
if (wkspSize < FSE_COMPRESS_WKSP_SIZE_U32(tableLog, maxSymbolValue)) return ERROR(tableLog_tooLarge);
if (srcSize <= 1) return 0; /* Not compressible */
- if (!maxSymbolValue) maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
- if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG;
-
- /* Scan input and build symbol stats */
+ if (!maxSymbolValue) maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
+ if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG;
+
+ /* Scan input and build symbol stats */
{ CHECK_V_F(maxCount, HIST_count_wksp(count, &maxSymbolValue, src, srcSize, scratchBuffer, scratchBufferSize) );
if (maxCount == srcSize) return 1; /* only a single symbol in src : rle */
if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */
}
-
- tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue);
+
+ tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue);
CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue, /* useLowProbCount */ srcSize >= 2048) );
-
- /* Write table description header */
+
+ /* Write table description header */
{ CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
op += nc_err;
}
-
- /* Compress */
+
+ /* Compress */
CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) );
{ CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) );
if (cSize == 0) return 0; /* not enough space for compressed data */
op += cSize;
}
-
- /* check compressibility */
+
+ /* check compressibility */
if ( (size_t)(op-ostart) >= srcSize-1 ) return 0;
-
- return op-ostart;
-}
-
+
+ return op-ostart;
+}
+
typedef struct {
FSE_CTable CTable_max[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)];
union {
@@ -725,17 +725,17 @@ typedef struct {
} fseWkspMax_t;
size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog)
-{
+{
fseWkspMax_t scratchBuffer;
DEBUG_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_COMPRESS_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer));
-}
-
+}
+
size_t FSE_compress (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
return FSE_compress2(dst, dstCapacity, src, srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG);
}
#endif
-
-#endif /* FSE_COMMONDEFS_ONLY */
+
+#endif /* FSE_COMMONDEFS_ONLY */
diff --git a/contrib/libs/zstd/lib/compress/huf_compress.c b/contrib/libs/zstd/lib/compress/huf_compress.c
index 137671d31a..2b3d6adc2a 100644
--- a/contrib/libs/zstd/lib/compress/huf_compress.c
+++ b/contrib/libs/zstd/lib/compress/huf_compress.c
@@ -1,4 +1,4 @@
-/* ******************************************************************
+/* ******************************************************************
* Huffman encoder, part of New Generation Entropy library
* Copyright (c) Yann Collet, Facebook, Inc.
*
@@ -10,49 +10,49 @@
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
-****************************************************************** */
-
-/* **************************************************************
-* Compiler specifics
-****************************************************************/
-#ifdef _MSC_VER /* Visual Studio */
-# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
-#endif
-
-
-/* **************************************************************
-* Includes
-****************************************************************/
+****************************************************************** */
+
+/* **************************************************************
+* Compiler specifics
+****************************************************************/
+#ifdef _MSC_VER /* Visual Studio */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+#endif
+
+
+/* **************************************************************
+* Includes
+****************************************************************/
#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */
#include "../common/compiler.h"
#include "../common/bitstream.h"
#include "hist.h"
-#define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */
+#define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */
#include "../common/fse.h" /* header compression */
-#define HUF_STATIC_LINKING_ONLY
+#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
#include "../common/error_private.h"
-
-
-/* **************************************************************
-* Error Management
-****************************************************************/
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
#define HUF_isError ERR_isError
#define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
-
-
-/* **************************************************************
-* Utils
-****************************************************************/
-unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
-{
- return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
-}
-
-
-/* *******************************************************
-* HUF : Huffman block compression
-*********************************************************/
+
+
+/* **************************************************************
+* Utils
+****************************************************************/
+unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
+{
+ return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
+}
+
+
+/* *******************************************************
+* HUF : Huffman block compression
+*********************************************************/
#define HUF_WORKSPACE_MAX_ALIGNMENT 8
static void* HUF_alignUpWorkspace(void* workspace, size_t* workspaceSizePtr, size_t align)
@@ -166,7 +166,7 @@ static void HUF_setValue(HUF_CElt* elt, size_t value)
typedef struct {
HUF_CompressWeightsWksp wksp;
BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */
- BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
+ BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
} HUF_WriteCTableWksp;
size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
@@ -174,39 +174,39 @@ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
void* workspace, size_t workspaceSize)
{
HUF_CElt const* const ct = CTable + 1;
- BYTE* op = (BYTE*)dst;
- U32 n;
+ BYTE* op = (BYTE*)dst;
+ U32 n;
HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
-
+
/* check conditions */
if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC);
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
-
- /* convert to weight */
+
+ /* convert to weight */
wksp->bitsToWeight[0] = 0;
- for (n=1; n<huffLog+1; n++)
+ for (n=1; n<huffLog+1; n++)
wksp->bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
- for (n=0; n<maxSymbolValue; n++)
+ for (n=0; n<maxSymbolValue; n++)
wksp->huffWeight[n] = wksp->bitsToWeight[HUF_getNbBits(ct[n])];
-
+
/* attempt weights compression by FSE */
if (maxDstSize < 1) return ERROR(dstSize_tooSmall);
{ CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) );
if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */
op[0] = (BYTE)hSize;
return hSize+1;
- } }
-
+ } }
+
/* write raw values as 4-bits (max : 15) */
if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
- if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
- op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
+ if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
+ op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
wksp->huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
- for (n=0; n<maxSymbolValue; n+=2)
+ for (n=0; n<maxSymbolValue; n+=2)
op[(n/2)+1] = (BYTE)((wksp->huffWeight[n] << 4) + wksp->huffWeight[n+1]);
- return ((maxSymbolValue+1)/2) + 1;
-}
-
+ return ((maxSymbolValue+1)/2) + 1;
+}
+
/*! HUF_writeCTable() :
`CTable` : Huffman tree to save, using huf representation.
@return : size of saved CTable */
@@ -216,67 +216,67 @@ size_t HUF_writeCTable (void* dst, size_t maxDstSize,
HUF_WriteCTableWksp wksp;
return HUF_writeCTable_wksp(dst, maxDstSize, CTable, maxSymbolValue, huffLog, &wksp, sizeof(wksp));
}
-
+
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights)
-{
+{
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
- U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
- U32 tableLog = 0;
- U32 nbSymbols = 0;
+ U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
+ U32 tableLog = 0;
+ U32 nbSymbols = 0;
HUF_CElt* const ct = CTable + 1;
-
- /* get symbol weights */
+
+ /* get symbol weights */
CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
*hasZeroWeights = (rankVal[0] > 0);
-
- /* check result */
- if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+
+ /* check result */
+ if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
-
+
CTable[0] = tableLog;
- /* Prepare base value per rank */
- { U32 n, nextRankStart = 0;
- for (n=1; n<=tableLog; n++) {
+ /* Prepare base value per rank */
+ { U32 n, nextRankStart = 0;
+ for (n=1; n<=tableLog; n++) {
U32 curr = nextRankStart;
- nextRankStart += (rankVal[n] << (n-1));
+ nextRankStart += (rankVal[n] << (n-1));
rankVal[n] = curr;
- } }
-
- /* fill nbBits */
- { U32 n; for (n=0; n<nbSymbols; n++) {
- const U32 w = huffWeight[n];
+ } }
+
+ /* fill nbBits */
+ { U32 n; for (n=0; n<nbSymbols; n++) {
+ const U32 w = huffWeight[n];
HUF_setNbBits(ct + n, (BYTE)(tableLog + 1 - w) & -(w != 0));
- } }
-
- /* fill val */
+ } }
+
+ /* fill val */
{ U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */
U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
{ U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[HUF_getNbBits(ct[n])]++; }
- /* determine stating value per rank */
+ /* determine stating value per rank */
valPerRank[tableLog+1] = 0; /* for w==0 */
- { U16 min = 0;
+ { U16 min = 0;
U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */
valPerRank[n] = min; /* get starting value within each rank */
- min += nbPerRank[n];
- min >>= 1;
- } }
- /* assign value within rank, symbol order */
+ min += nbPerRank[n];
+ min >>= 1;
+ } }
+ /* assign value within rank, symbol order */
{ U32 n; for (n=0; n<nbSymbols; n++) HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); }
- }
-
+ }
+
*maxSymbolValuePtr = nbSymbols - 1;
- return readSize;
-}
-
+ return readSize;
+}
+
U32 HUF_getNbBitsFromCTable(HUF_CElt const* CTable, U32 symbolValue)
{
const HUF_CElt* ct = CTable + 1;
assert(symbolValue <= HUF_SYMBOLVALUE_MAX);
return (U32)HUF_getNbBits(ct[symbolValue]);
}
-
+
typedef struct nodeElt_s {
U32 count;
@@ -305,83 +305,83 @@ typedef struct nodeElt_s {
* @return The maximum number of bits of the Huffman tree after adjustment,
* necessarily no more than maxNbBits.
*/
-static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
-{
- const U32 largestBits = huffNode[lastNonNull].nbBits;
+static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
+{
+ const U32 largestBits = huffNode[lastNonNull].nbBits;
/* early exit : no elt > maxNbBits, so the tree is already valid. */
if (largestBits <= maxNbBits) return largestBits;
-
- /* there are several too large elements (at least >= 2) */
- { int totalCost = 0;
- const U32 baseCost = 1 << (largestBits - maxNbBits);
+
+ /* there are several too large elements (at least >= 2) */
+ { int totalCost = 0;
+ const U32 baseCost = 1 << (largestBits - maxNbBits);
int n = (int)lastNonNull;
-
+
/* Adjust any ranks > maxNbBits to maxNbBits.
* Compute totalCost, which is how far the sum of the ranks is
* we are over 2^largestBits after adjust the offending ranks.
*/
- while (huffNode[n].nbBits > maxNbBits) {
- totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
- huffNode[n].nbBits = (BYTE)maxNbBits;
+ while (huffNode[n].nbBits > maxNbBits) {
+ totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
+ huffNode[n].nbBits = (BYTE)maxNbBits;
n--;
}
/* n stops at huffNode[n].nbBits <= maxNbBits */
assert(huffNode[n].nbBits <= maxNbBits);
/* n end at index of smallest symbol using < maxNbBits */
while (huffNode[n].nbBits == maxNbBits) --n;
-
+
/* renorm totalCost from 2^largestBits to 2^maxNbBits
* note : totalCost is necessarily a multiple of baseCost */
assert((totalCost & (baseCost - 1)) == 0);
totalCost >>= (largestBits - maxNbBits);
assert(totalCost > 0);
-
- /* repay normalized cost */
- { U32 const noSymbol = 0xF0F0F0F0;
- U32 rankLast[HUF_TABLELOG_MAX+2];
-
+
+ /* repay normalized cost */
+ { U32 const noSymbol = 0xF0F0F0F0;
+ U32 rankLast[HUF_TABLELOG_MAX+2];
+
/* Get pos of last (smallest = lowest cum. count) symbol per rank */
ZSTD_memset(rankLast, 0xF0, sizeof(rankLast));
- { U32 currentNbBits = maxNbBits;
+ { U32 currentNbBits = maxNbBits;
int pos;
- for (pos=n ; pos >= 0; pos--) {
- if (huffNode[pos].nbBits >= currentNbBits) continue;
- currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */
+ for (pos=n ; pos >= 0; pos--) {
+ if (huffNode[pos].nbBits >= currentNbBits) continue;
+ currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */
rankLast[maxNbBits-currentNbBits] = (U32)pos;
- } }
-
- while (totalCost > 0) {
+ } }
+
+ while (totalCost > 0) {
/* Try to reduce the next power of 2 above totalCost because we
* gain back half the rank.
*/
U32 nBitsToDecrease = BIT_highbit32((U32)totalCost) + 1;
- for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
+ for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
U32 const highPos = rankLast[nBitsToDecrease];
U32 const lowPos = rankLast[nBitsToDecrease-1];
- if (highPos == noSymbol) continue;
+ if (highPos == noSymbol) continue;
/* Decrease highPos if no symbols of lowPos or if it is
* not cheaper to remove 2 lowPos than highPos.
*/
- if (lowPos == noSymbol) break;
- { U32 const highTotal = huffNode[highPos].count;
- U32 const lowTotal = 2 * huffNode[lowPos].count;
- if (highTotal <= lowTotal) break;
- } }
- /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
+ if (lowPos == noSymbol) break;
+ { U32 const highTotal = huffNode[highPos].count;
+ U32 const lowTotal = 2 * huffNode[lowPos].count;
+ if (highTotal <= lowTotal) break;
+ } }
+ /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1);
/* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
nBitsToDecrease++;
assert(rankLast[nBitsToDecrease] != noSymbol);
/* Increase the number of bits to gain back half the rank cost. */
- totalCost -= 1 << (nBitsToDecrease-1);
+ totalCost -= 1 << (nBitsToDecrease-1);
huffNode[rankLast[nBitsToDecrease]].nbBits++;
/* Fix up the new rank.
* If the new rank was empty, this symbol is now its smallest.
* Otherwise, this symbol will be the largest in the new rank so no adjustment.
*/
- if (rankLast[nBitsToDecrease-1] == noSymbol)
+ if (rankLast[nBitsToDecrease-1] == noSymbol)
rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease];
/* Fix up the old rank.
* If the symbol was at position 0, meaning it was the highest weight symbol in the tree,
@@ -390,48 +390,48 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
* the smallest node in the rank. If the previous position belongs to a different rank,
* then the rank is now empty.
*/
- if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
- rankLast[nBitsToDecrease] = noSymbol;
- else {
- rankLast[nBitsToDecrease]--;
- if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
- rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
+ if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
+ rankLast[nBitsToDecrease] = noSymbol;
+ else {
+ rankLast[nBitsToDecrease]--;
+ if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
+ rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
}
} /* while (totalCost > 0) */
-
+
/* If we've removed too much weight, then we have to add it back.
* To avoid overshooting again, we only adjust the smallest rank.
* We take the largest nodes from the lowest rank 0 and move them
* to rank 1. There's guaranteed to be enough rank 0 symbols because
* TODO.
*/
- while (totalCost < 0) { /* Sometimes, cost correction overshoot */
+ while (totalCost < 0) { /* Sometimes, cost correction overshoot */
/* special case : no rank 1 symbol (using maxNbBits-1);
* let's create one from largest rank 0 (using maxNbBits).
*/
if (rankLast[1] == noSymbol) {
- while (huffNode[n].nbBits == maxNbBits) n--;
- huffNode[n+1].nbBits--;
+ while (huffNode[n].nbBits == maxNbBits) n--;
+ huffNode[n+1].nbBits--;
assert(n >= 0);
rankLast[1] = (U32)(n+1);
- totalCost++;
- continue;
- }
- huffNode[ rankLast[1] + 1 ].nbBits--;
- rankLast[1]++;
- totalCost ++;
+ totalCost++;
+ continue;
+ }
+ huffNode[ rankLast[1] + 1 ].nbBits--;
+ rankLast[1]++;
+ totalCost ++;
}
} /* repay normalized cost */
} /* there are several too large elements (at least >= 2) */
-
- return maxNbBits;
-}
-
-typedef struct {
+
+ return maxNbBits;
+}
+
+typedef struct {
U16 base;
U16 curr;
-} rankPos;
-
+} rankPos;
+
typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
/* Number of buckets available for HUF_sort() */
@@ -550,7 +550,7 @@ static void HUF_simpleQuickSort(nodeElt arr[], int low, int high) {
static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSymbolValue, rankPos rankPosition[]) {
U32 n;
U32 const maxSymbolValue1 = maxSymbolValue+1;
-
+
/* Compute base and set curr to base.
* For symbol s let lowerRank = HUF_getIndex(count[n]) and rank = lowerRank + 1.
* See HUF_getIndex to see bucketing strategy.
@@ -562,7 +562,7 @@ static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSy
U32 lowerRank = HUF_getIndex(count[n]);
assert(lowerRank < RANK_POSITION_TABLE_SIZE - 1);
rankPosition[lowerRank].base++;
- }
+ }
assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0);
/* Set up the rankPosition table */
@@ -573,13 +573,13 @@ static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSy
/* Insert each symbol into their appropriate bucket, setting up rankPosition table. */
for (n = 0; n < maxSymbolValue1; ++n) {
- U32 const c = count[n];
+ U32 const c = count[n];
U32 const r = HUF_getIndex(c) + 1;
U32 const pos = rankPosition[r].curr++;
assert(pos < maxSymbolValue1);
- huffNode[pos].count = c;
- huffNode[pos].byte = (BYTE)n;
- }
+ huffNode[pos].count = c;
+ huffNode[pos].byte = (BYTE)n;
+ }
/* Sort each bucket. */
for (n = RANK_POSITION_DISTINCT_COUNT_CUTOFF; n < RANK_POSITION_TABLE_SIZE - 1; ++n) {
@@ -592,13 +592,13 @@ static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSy
}
assert(HUF_isSorted(huffNode, maxSymbolValue1));
-}
-
+}
+
/** HUF_buildCTable_wksp() :
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
* `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables).
*/
-#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
+#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
/* HUF_buildTree():
* Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree.
@@ -608,38 +608,38 @@ static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSy
* @return The smallest node in the Huffman tree (by count).
*/
static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)
-{
+{
nodeElt* const huffNode0 = huffNode - 1;
int nonNullRank;
- int lowS, lowN;
+ int lowS, lowN;
int nodeNb = STARTNODE;
int n, nodeRoot;
- /* init for parents */
+ /* init for parents */
nonNullRank = (int)maxSymbolValue;
- while(huffNode[nonNullRank].count == 0) nonNullRank--;
- lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
- huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
+ while(huffNode[nonNullRank].count == 0) nonNullRank--;
+ lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
+ huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb;
- nodeNb++; lowS-=2;
- for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
+ nodeNb++; lowS-=2;
+ for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */
-
- /* create parents */
- while (nodeNb <= nodeRoot) {
+
+ /* create parents */
+ while (nodeNb <= nodeRoot) {
int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
- huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
+ huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb;
- nodeNb++;
- }
-
- /* distribute weights (unlimited tree height) */
- huffNode[nodeRoot].nbBits = 0;
- for (n=nodeRoot-1; n>=STARTNODE; n--)
- huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
- for (n=0; n<=nonNullRank; n++)
- huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
-
+ nodeNb++;
+ }
+
+ /* distribute weights (unlimited tree height) */
+ huffNode[nodeRoot].nbBits = 0;
+ for (n=nodeRoot-1; n>=STARTNODE; n--)
+ huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
+ for (n=0; n<=nonNullRank; n++)
+ huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
+
return nonNullRank;
}
@@ -698,17 +698,17 @@ size_t HUF_buildCTable_wksp (HUF_CElt* CTable, const unsigned* count, U32 maxSym
/* build tree */
nonNullRank = HUF_buildTree(huffNode, maxSymbolValue);
- /* enforce maxTableLog */
+ /* enforce maxTableLog */
maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits);
if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
-
+
HUF_buildCTableFromTree(CTable, huffNode, nonNullRank, maxSymbolValue, maxNbBits);
-
- return maxNbBits;
-}
-
+
+ return maxNbBits;
+}
+
size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
-{
+{
HUF_CElt const* ct = CTable + 1;
size_t nbBits = 0;
int s;
@@ -716,8 +716,8 @@ size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count,
nbBits += HUF_getNbBits(ct[s]) * count[s];
}
return nbBits >> 3;
-}
-
+}
+
int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
HUF_CElt const* ct = CTable + 1;
int bad = 0;
@@ -728,8 +728,8 @@ int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned m
return !bad;
}
-size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
-
+size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
+
/** HUF_CStream_t:
* Huffman uses its own BIT_CStream_t implementation.
* There are three major differences from BIT_CStream_t:
@@ -896,7 +896,7 @@ HUF_encodeSymbol(HUF_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable, int
{
HUF_addBits(bitCPtr, CTable[symbol], idx, fast);
}
-
+
FORCE_INLINE_TEMPLATE void
HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t* bitC,
const BYTE* ip, size_t srcSize,
@@ -925,7 +925,7 @@ HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t* bitC,
n -= kUnroll;
}
assert(n % (2 * kUnroll) == 0);
-
+
for (; n>0; n-= 2 * kUnroll) {
/* Encode kUnroll symbols into the bitstream @ index 0. */
int u;
@@ -948,7 +948,7 @@ HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t* bitC,
HUF_flushBits(bitC, kFastFlush);
}
assert(n == 0);
-
+
}
/**
@@ -966,20 +966,20 @@ FORCE_INLINE_TEMPLATE size_t
HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
const void* src, size_t srcSize,
const HUF_CElt* CTable)
-{
+{
U32 const tableLog = (U32)CTable[0];
HUF_CElt const* ct = CTable + 1;
- const BYTE* ip = (const BYTE*) src;
- BYTE* const ostart = (BYTE*)dst;
- BYTE* const oend = ostart + dstSize;
- BYTE* op = ostart;
+ const BYTE* ip = (const BYTE*) src;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* op = ostart;
HUF_CStream_t bitC;
-
- /* init */
- if (dstSize < 8) return 0; /* not enough space to compress */
+
+ /* init */
+ if (dstSize < 8) return 0; /* not enough space to compress */
{ size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op));
if (HUF_isError(initErr)) return 0; }
-
+
if (dstSize < HUF_tightCompressBound(srcSize, (size_t)tableLog) || tableLog > 11)
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ MEM_32bits() ? 2 : 4, /* kFast */ 0, /* kLastFast */ 0);
else {
@@ -1021,19 +1021,19 @@ HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
break;
}
}
- }
+ }
assert(bitC.ptr <= bitC.endPtr);
-
+
return HUF_closeCStream(&bitC);
-}
-
+}
+
#if DYNAMIC_BMI2
-
+
static BMI2_TARGET_ATTRIBUTE size_t
HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
const void* src, size_t srcSize,
const HUF_CElt* CTable)
-{
+{
return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
}
@@ -1084,56 +1084,56 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
const void* src, size_t srcSize,
const HUF_CElt* CTable, int bmi2)
{
- size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */
- const BYTE* ip = (const BYTE*) src;
- const BYTE* const iend = ip + srcSize;
- BYTE* const ostart = (BYTE*) dst;
- BYTE* const oend = ostart + dstSize;
- BYTE* op = ostart;
-
- if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */
- if (srcSize < 12) return 0; /* no saving possible : too small input */
- op += 6; /* jumpTable */
-
+ size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */
+ const BYTE* ip = (const BYTE*) src;
+ const BYTE* const iend = ip + srcSize;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* op = ostart;
+
+ if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */
+ if (srcSize < 12) return 0; /* no saving possible : too small input */
+ op += 6; /* jumpTable */
+
assert(op <= oend);
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
if (cSize == 0 || cSize > 65535) return 0;
- MEM_writeLE16(ostart, (U16)cSize);
- op += cSize;
- }
-
- ip += segmentSize;
+ MEM_writeLE16(ostart, (U16)cSize);
+ op += cSize;
+ }
+
+ ip += segmentSize;
assert(op <= oend);
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
if (cSize == 0 || cSize > 65535) return 0;
- MEM_writeLE16(ostart+2, (U16)cSize);
- op += cSize;
- }
-
- ip += segmentSize;
+ MEM_writeLE16(ostart+2, (U16)cSize);
+ op += cSize;
+ }
+
+ ip += segmentSize;
assert(op <= oend);
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
if (cSize == 0 || cSize > 65535) return 0;
- MEM_writeLE16(ostart+4, (U16)cSize);
- op += cSize;
- }
-
- ip += segmentSize;
+ MEM_writeLE16(ostart+4, (U16)cSize);
+ op += cSize;
+ }
+
+ ip += segmentSize;
assert(op <= oend);
assert(ip <= iend);
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) );
if (cSize == 0 || cSize > 65535) return 0;
- op += cSize;
- }
-
+ op += cSize;
+ }
+
return (size_t)(op-ostart);
-}
-
+}
+
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
{
return HUF_compress4X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
}
-
+
size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2)
{
return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2);
@@ -1182,24 +1182,24 @@ HUF_compress_internal (void* dst, size_t dstSize,
void* workSpace, size_t wkspSize,
HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
const int bmi2, unsigned suspectUncompressible)
-{
+{
HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(size_t));
- BYTE* const ostart = (BYTE*)dst;
- BYTE* const oend = ostart + dstSize;
- BYTE* op = ostart;
-
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* op = ostart;
+
HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE);
- /* checks & inits */
+ /* checks & inits */
if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall);
if (!srcSize) return 0; /* Uncompressed */
if (!dstSize) return 0; /* cannot fit anything within dst budget */
- if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
- if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+ if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
+ if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
- if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
- if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
-
+ if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
+ if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
+
/* Heuristic : If old table is valid, use it for small inputs */
if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
return HUF_compressCTable_internal(ostart, op, oend,
@@ -1222,12 +1222,12 @@ HUF_compress_internal (void* dst, size_t dstSize,
if (largestTotal <= ((2 * SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) >> 7)+4) return 0; /* heuristic : probably not compressible enough */
}
- /* Scan input and build symbol stats */
+ /* Scan input and build symbol stats */
{ CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->wksps.hist_wksp, sizeof(table->wksps.hist_wksp)) );
if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */
- }
-
+ }
+
/* Check validity of previous table */
if ( repeat
&& *repeat == HUF_repeat_check
@@ -1241,22 +1241,22 @@ HUF_compress_internal (void* dst, size_t dstSize,
nbStreams, oldHufTable, bmi2);
}
- /* Build Huffman Tree */
- huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
+ /* Build Huffman Tree */
+ huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
{ size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
maxSymbolValue, huffLog,
&table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp));
CHECK_F(maxBits);
- huffLog = (U32)maxBits;
- }
+ huffLog = (U32)maxBits;
+ }
/* Zero unused symbols in CTable, so we can check it for validity */
{
size_t const ctableSize = HUF_CTABLE_SIZE_ST(maxSymbolValue);
size_t const unusedSize = sizeof(table->CTable) - ctableSize * sizeof(HUF_CElt);
ZSTD_memset(table->CTable + ctableSize, 0, unusedSize);
}
-
- /* Write table description header */
+
+ /* Write table description header */
{ CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog,
&table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) );
/* Check if using previous huffman table is beneficial */
@@ -1271,17 +1271,17 @@ HUF_compress_internal (void* dst, size_t dstSize,
/* Use the new huffman table */
if (hSize + 12ul >= srcSize) { return 0; }
- op += hSize;
+ op += hSize;
if (repeat) { *repeat = HUF_repeat_none; }
if (oldHufTable)
ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */
- }
+ }
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
nbStreams, table->CTable, bmi2);
-}
-
-
+}
+
+
size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
@@ -1355,16 +1355,16 @@ size_t HUF_compress1X (void* dst, size_t dstSize,
return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
}
-size_t HUF_compress2 (void* dst, size_t dstSize,
- const void* src, size_t srcSize,
- unsigned maxSymbolValue, unsigned huffLog)
-{
+size_t HUF_compress2 (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog)
+{
U64 workSpace[HUF_WORKSPACE_SIZE_U64];
return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
-}
-
-size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize)
-{
+}
+
+size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT);
-}
+}
#endif
diff --git a/contrib/libs/zstd/lib/compress/zstd_compress.c b/contrib/libs/zstd/lib/compress/zstd_compress.c
index 0e8f184472..f06456af92 100644
--- a/contrib/libs/zstd/lib/compress/zstd_compress.c
+++ b/contrib/libs/zstd/lib/compress/zstd_compress.c
@@ -7,16 +7,16 @@
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
-
-/*-*************************************
-* Dependencies
-***************************************/
+
+/*-*************************************
+* Dependencies
+***************************************/
#include "../common/zstd_deps.h" /* INT_MAX, ZSTD_memset, ZSTD_memcpy */
#include "../common/mem.h"
#include "hist.h" /* HIST_countFast_wksp */
#define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */
#include "../common/fse.h"
-#define HUF_STATIC_LINKING_ONLY
+#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
#include "zstd_compress_internal.h"
#include "zstd_compress_sequences.h"
@@ -27,7 +27,7 @@
#include "zstd_opt.h"
#include "zstd_ldm.h"
#include "zstd_compress_superblock.h"
-
+
/* ***************************************************************
* Tuning parameters
*****************************************************************/
@@ -40,7 +40,7 @@
#ifndef ZSTD_COMPRESS_HEAPMODE
# define ZSTD_COMPRESS_HEAPMODE 0
#endif
-
+
/*!
* ZSTD_HASHLOG3_MAX :
* Maximum size of the hash table dedicated to find 3-bytes matches,
@@ -54,9 +54,9 @@
# define ZSTD_HASHLOG3_MAX 17
#endif
-/*-*************************************
-* Helper functions
-***************************************/
+/*-*************************************
+* Helper functions
+***************************************/
/* ZSTD_compressBound()
* Note that the result from this function is only compatible with the "normal"
* full-block strategy.
@@ -66,12 +66,12 @@
*/
size_t ZSTD_compressBound(size_t srcSize) {
return ZSTD_COMPRESSBOUND(srcSize);
-}
-
-
-/*-*************************************
-* Context memory management
-***************************************/
+}
+
+
+/*-*************************************
+* Context memory management
+***************************************/
struct ZSTD_CDict_s {
const void* dictContent;
size_t dictContentSize;
@@ -80,7 +80,7 @@ struct ZSTD_CDict_s {
ZSTD_cwksp workspace;
ZSTD_matchState_t matchState;
ZSTD_compressedBlockState_t cBlockState;
- ZSTD_customMem customMem;
+ ZSTD_customMem customMem;
U32 dictID;
int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
ZSTD_paramSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use
@@ -88,12 +88,12 @@ struct ZSTD_CDict_s {
* the same greedy/lazy matchfinder at compression time.
*/
}; /* typedef'd to ZSTD_CDict within "zstd.h" */
-
-ZSTD_CCtx* ZSTD_createCCtx(void)
-{
+
+ZSTD_CCtx* ZSTD_createCCtx(void)
+{
return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
-}
-
+}
+
static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
{
assert(cctx != NULL);
@@ -106,8 +106,8 @@ static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
}
}
-ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
-{
+ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
+{
ZSTD_STATIC_ASSERT(zcss_init==0);
ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
@@ -117,7 +117,7 @@ ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
return cctx;
}
}
-
+
ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)
{
ZSTD_cwksp ws;
@@ -132,21 +132,21 @@ ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)
ZSTD_memset(cctx, 0, sizeof(ZSTD_CCtx));
ZSTD_cwksp_move(&cctx->workspace, &ws);
cctx->staticSize = workspaceSize;
-
+
/* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE);
cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
- return cctx;
-}
-
+ return cctx;
+}
+
/**
* Clears and frees all of the dictionaries in the CCtx.
*/
static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)
-{
+{
ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem);
ZSTD_freeCDict(cctx->localDict.cdict);
ZSTD_memset(&cctx->localDict, 0, sizeof(cctx->localDict));
@@ -185,8 +185,8 @@ size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
}
}
return 0;
-}
-
+}
+
static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
{
@@ -200,20 +200,20 @@ static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
-{
+{
if (cctx==NULL) return 0; /* support sizeof on NULL */
/* cctx may be in the workspace */
return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx))
+ ZSTD_cwksp_sizeof(&cctx->workspace)
+ ZSTD_sizeof_localDict(cctx->localDict)
+ ZSTD_sizeof_mtctx(cctx);
-}
-
+}
+
size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
-{
+{
return ZSTD_sizeof_CCtx(zcs); /* same object */
-}
-
+}
+
/* private API call, for dictBuilder only */
const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
@@ -297,7 +297,7 @@ static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
assert(!ZSTD_checkCParams(cParams));
return cctxParams;
}
-
+
static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
ZSTD_customMem customMem)
{
@@ -1228,9 +1228,9 @@ size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
/** ZSTD_checkCParams() :
control CParam values remain within authorized range.
- @return : 0, or an error code if one value is beyond authorized range */
-size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
-{
+ @return : 0, or an error code if one value is beyond authorized range */
+size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
+{
BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog);
BOUNDCHECK(ZSTD_c_chainLog, (int)cParams.chainLog);
BOUNDCHECK(ZSTD_c_hashLog, (int)cParams.hashLog);
@@ -1238,9 +1238,9 @@ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch);
BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength);
BOUNDCHECK(ZSTD_c_strategy, cParams.strategy);
- return 0;
-}
-
+ return 0;
+}
+
/** ZSTD_clampCParams() :
* make CParam values within valid range.
* @return : valid CParams */
@@ -1262,15 +1262,15 @@ ZSTD_clampCParams(ZSTD_compressionParameters cParams)
CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy);
return cParams;
}
-
+
/** ZSTD_cycleLog() :
* condition for correct operation : hashLog > 1 */
U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
-{
+{
U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
return hashLog - btScale;
-}
-
+}
+
/** ZSTD_dictAndWindowLog() :
* Returns an adjusted window log that is large enough to fit the source and the dictionary.
* The zstd format says that the entire dictionary is valid if one byte of the dictionary
@@ -1317,11 +1317,11 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
unsigned long long srcSize,
size_t dictSize,
ZSTD_cParamMode_e mode)
-{
+{
const U64 minSrcSize = 513; /* (1<<9) + 1 */
const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
assert(ZSTD_checkCParams(cPar)==0);
-
+
switch (mode) {
case ZSTD_cpm_unknown:
case ZSTD_cpm_noAttachDict:
@@ -1365,13 +1365,13 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
if (cycleLog > dictAndWindowLog)
cPar.chainLog -= (cycleLog - dictAndWindowLog);
}
-
+
if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */
-
- return cPar;
-}
-
+
+ return cPar;
+}
+
ZSTD_compressionParameters
ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
unsigned long long srcSize,
@@ -1454,7 +1454,7 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
(U32)chainSize, (U32)hSize, (U32)h3Size);
return tableSpace + optSpace + slackSpace + lazyAdditionalSpace;
}
-
+
static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
const ZSTD_compressionParameters* cParams,
const ldmParams_t* ldmParams,
@@ -1474,7 +1474,7 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE);
size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 0, /* forCCtx */ 1);
-
+
size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams);
size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize);
size_t const ldmSeqSpace = ldmParams->enableLdm == ZSTD_ps_enable ?
@@ -1498,8 +1498,8 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace);
return neededSpace;
-}
-
+}
+
size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
{
ZSTD_compressionParameters const cParams =
@@ -1734,7 +1734,7 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
size_t const hSize = ((size_t)1) << cParams->hashLog;
U32 const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
-
+
DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset);
assert(useRowMatchFinder != ZSTD_ps_auto);
if (forceResetIndex == ZSTDirp_reset) {
@@ -1865,7 +1865,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
? windowSize + blockSize
: 0;
size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize);
-
+
int const indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window);
int const dictTooBig = ZSTD_dictTooBig(loadedDictSize);
ZSTD_indexResetPolicy_e needsIndexReset =
@@ -1912,7 +1912,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE);
RETURN_ERROR_IF(zc->entropyWorkspace == NULL, memory_allocation, "couldn't allocate entropyWorkspace");
} }
-
+
ZSTD_cwksp_clear(ws);
/* init params */
@@ -1930,15 +1930,15 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
zc->stage = ZSTDcs_init;
zc->dictID = 0;
zc->dictContentSize = 0;
-
+
ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
-
+
/* ZSTD_wildcopy() is used to copy into the literals buffer,
* so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
*/
zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH);
zc->seqStore.maxNbLit = blockSize;
-
+
/* buffers */
zc->bufferedPolicy = zbuff;
zc->inBuffSize = buffInSize;
@@ -1993,8 +1993,8 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
return 0;
}
-}
-
+}
+
/* ZSTD_invalidateRepCodes() :
* ensures next compression will not use repcodes from previous block.
* Note : only works with regular variant;
@@ -2004,7 +2004,7 @@ void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
}
-
+
/* These are the approximate sizes for each strategy past which copying the
* dictionary tables into the working context is faster than using them
* in-place.
@@ -2103,7 +2103,7 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
ZSTD_CCtx_params params,
U64 pledgedSrcSize,
ZSTD_buffered_policy_e zbuff)
-{
+{
const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
assert(!cdict->matchState.dedicatedDictSearch);
@@ -2235,10 +2235,10 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
}
-
+
ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace);
- /* copy tables */
+ /* copy tables */
{ size_t const chainSize = ZSTD_allocateChainTable(srcCCtx->appliedParams.cParams.strategy,
srcCCtx->appliedParams.useRowMatchFinder,
0 /* forDDSDict */)
@@ -2257,27 +2257,27 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable3,
srcCCtx->blockState.matchState.hashTable3,
h3Size * sizeof(U32));
- }
-
+ }
+
ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace);
- /* copy dictionary offsets */
+ /* copy dictionary offsets */
{
const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
dstMatchState->window = srcMatchState->window;
dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
- }
+ }
dstCCtx->dictID = srcCCtx->dictID;
dstCCtx->dictContentSize = srcCCtx->dictContentSize;
-
+
/* copy block state */
ZSTD_memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
- return 0;
-}
-
+ return 0;
+}
+
/*! ZSTD_copyCCtx() :
* Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
* Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
@@ -2290,7 +2290,7 @@ size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long
ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
-
+
return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
fParams, pledgedSrcSize,
zbuff);
@@ -2298,7 +2298,7 @@ size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long
#define ZSTD_ROWSIZE 16
-/*! ZSTD_reduceTable() :
+/*! ZSTD_reduceTable() :
* reduce table indexes by `reducerValue`, or squash to zero.
* PreserveMark preserves "unsorted mark" for btlazy2 strategy.
* It must be set to a clear 0/1 value, to remove branch during inlining.
@@ -2306,7 +2306,7 @@ size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long
* to help auto-vectorization */
FORCE_INLINE_TEMPLATE void
ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
-{
+{
int const nbRows = (int)size / ZSTD_ROWSIZE;
int cellNb = 0;
int rowNb;
@@ -2344,8 +2344,8 @@ ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerVa
table[cellNb] = newVal;
cellNb++;
} }
-}
-
+}
+
static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
{
ZSTD_reduceTable_internal(table, size, reducerValue, 0);
@@ -2356,14 +2356,14 @@ static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const
ZSTD_reduceTable_internal(table, size, reducerValue, 1);
}
-/*! ZSTD_reduceIndex() :
-* rescale all indexes to avoid future overflow (indexes are U32) */
+/*! ZSTD_reduceIndex() :
+* rescale all indexes to avoid future overflow (indexes are U32) */
static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
-{
+{
{ U32 const hSize = (U32)1 << params->cParams.hashLog;
ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
}
-
+
if (ZSTD_allocateChainTable(params->cParams.strategy, params->useRowMatchFinder, (U32)ms->dedicatedDictSearch)) {
U32 const chainSize = (U32)1 << params->cParams.chainLog;
if (params->cParams.strategy == ZSTD_btlazy2)
@@ -2371,48 +2371,48 @@ static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* par
else
ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
}
-
+
if (ms->hashLog3) {
U32 const h3Size = (U32)1 << ms->hashLog3;
ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
}
-}
-
-
-/*-*******************************************************
-* Block entropic compression
-*********************************************************/
-
+}
+
+
+/*-*******************************************************
+* Block entropic compression
+*********************************************************/
+
/* See doc/zstd_compression_format.md for detailed format description */
-
-void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
-{
- const seqDef* const sequences = seqStorePtr->sequencesStart;
- BYTE* const llCodeTable = seqStorePtr->llCode;
- BYTE* const ofCodeTable = seqStorePtr->ofCode;
- BYTE* const mlCodeTable = seqStorePtr->mlCode;
- U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
- U32 u;
+
+void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
+{
+ const seqDef* const sequences = seqStorePtr->sequencesStart;
+ BYTE* const llCodeTable = seqStorePtr->llCode;
+ BYTE* const ofCodeTable = seqStorePtr->ofCode;
+ BYTE* const mlCodeTable = seqStorePtr->mlCode;
+ U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ U32 u;
assert(nbSeq <= seqStorePtr->maxNbSeq);
- for (u=0; u<nbSeq; u++) {
- U32 const llv = sequences[u].litLength;
+ for (u=0; u<nbSeq; u++) {
+ U32 const llv = sequences[u].litLength;
U32 const mlv = sequences[u].mlBase;
llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offBase);
mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
- }
+ }
if (seqStorePtr->longLengthType==ZSTD_llt_literalLength)
- llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
+ llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
if (seqStorePtr->longLengthType==ZSTD_llt_matchLength)
- mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
-}
-
+ mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
+}
+
/* ZSTD_useTargetCBlockSize():
* Returns if target compressed block size param is being used.
* If used, compression will do best effort to make a compressed block size to be around targetCBlockSize.
* Returns 1 if true, 0 otherwise. */
static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams)
-{
+{
DEBUGLOG(5, "ZSTD_useTargetCBlockSize (targetCBlockSize=%zu)", cctxParams->targetCBlockSize);
return (cctxParams->targetCBlockSize != 0);
}
@@ -2594,14 +2594,14 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
BYTE* const oend = ostart + dstCapacity;
BYTE* op = ostart;
size_t lastCountSize;
-
+
entropyWorkspace = count + (MaxSeq + 1);
entropyWkspSize -= (MaxSeq + 1) * sizeof(*count);
DEBUGLOG(4, "ZSTD_entropyCompressSeqStore_internal (nbSeq=%zu)", nbSeq);
ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
assert(entropyWkspSize >= HUF_WORKSPACE_SIZE);
-
+
/* Compress literals */
{ const BYTE* const literals = seqStorePtr->litStart;
size_t const numSequences = seqStorePtr->sequences - seqStorePtr->sequencesStart;
@@ -2621,7 +2621,7 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
assert(cSize <= dstCapacity);
op += cSize;
}
-
+
/* Sequences Header */
RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
dstSize_tooSmall, "Can't fit seq hdr in output buf!");
@@ -2656,7 +2656,7 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
lastCountSize = stats.lastCountSize;
op += stats.size;
}
-
+
{ size_t const bitstreamSize = ZSTD_encodeSequences(
op, (size_t)(oend - op),
CTable_MatchLength, mlCodeTable,
@@ -2716,13 +2716,13 @@ ZSTD_entropyCompressSeqStore(seqStore_t* seqStorePtr,
}
DEBUGLOG(4, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize);
return cSize;
-}
-
+}
+
/* ZSTD_selectBlockCompressor() :
* Not static, but internal use only (used by long distance matcher)
* assumption : strat is a valid strategy */
ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode)
-{
+{
static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
{ ZSTD_compressBlock_fast /* default for 0 */,
ZSTD_compressBlock_fast,
@@ -2767,7 +2767,7 @@ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramS
};
ZSTD_blockCompressor selectedCompressor;
ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
-
+
assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
DEBUGLOG(4, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder);
if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder)) {
@@ -2794,25 +2794,25 @@ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramS
assert(selectedCompressor != NULL);
return selectedCompressor;
}
-
+
static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
const BYTE* anchor, size_t lastLLSize)
{
ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize);
seqStorePtr->lit += lastLLSize;
-}
-
+}
+
void ZSTD_resetSeqStore(seqStore_t* ssPtr)
-{
+{
ssPtr->lit = ssPtr->litStart;
ssPtr->sequences = ssPtr->sequencesStart;
ssPtr->longLengthType = ZSTD_llt_none;
-}
-
+}
+
typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
-{
+{
ZSTD_matchState_t* const ms = &zc->blockState.matchState;
DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
@@ -2844,7 +2844,7 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
if (curr > ms->nextToUpdate + 384)
ms->nextToUpdate = curr - MIN(192, (U32)(curr - ms->nextToUpdate - 384));
}
-
+
/* select and store sequences */
{ ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
size_t lastLLSize;
@@ -3812,7 +3812,7 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
out:
if (!ZSTD_isError(cSize) && cSize > 1) {
ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
- }
+ }
/* We check that dictionaries have offset codes available for the first
* block. After the first block, the offcode table might not have large
* enough codes to represent the offsets in the data.
@@ -3821,8 +3821,8 @@ out:
zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
return cSize;
-}
-
+}
+
static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
@@ -3870,7 +3870,7 @@ static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,
}
}
}
-
+
DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()");
/* Superblock compression failed, attempt to emit a single no compress block.
* The decoder will be able to stream this block since it is uncompressed.
@@ -3923,39 +3923,39 @@ static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
}
/*! ZSTD_compress_frameChunk() :
-* Compress a chunk of data into one or multiple blocks.
-* All blocks will be terminated, all input will be consumed.
-* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
-* Frame is supposed already started (header already produced)
-* @return : compressed size, or an error code
-*/
+* Compress a chunk of data into one or multiple blocks.
+* All blocks will be terminated, all input will be consumed.
+* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
+* Frame is supposed already started (header already produced)
+* @return : compressed size, or an error code
+*/
static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- U32 lastFrameChunk)
-{
- size_t blockSize = cctx->blockSize;
- size_t remaining = srcSize;
- const BYTE* ip = (const BYTE*)src;
- BYTE* const ostart = (BYTE*)dst;
- BYTE* op = ostart;
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ U32 lastFrameChunk)
+{
+ size_t blockSize = cctx->blockSize;
+ size_t remaining = srcSize;
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* op = ostart;
U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
-
+
DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
if (cctx->appliedParams.fParams.checksumFlag && srcSize)
- XXH64_update(&cctx->xxhState, src, srcSize);
-
- while (remaining) {
+ XXH64_update(&cctx->xxhState, src, srcSize);
+
+ while (remaining) {
ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
- U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
-
+ U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
+
RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
dstSize_tooSmall,
"not enough space to store compressed block");
- if (remaining < blockSize) blockSize = remaining;
-
+ if (remaining < blockSize) blockSize = remaining;
+
ZSTD_overflowCorrectIfNeeded(
ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize);
ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
@@ -3991,7 +3991,7 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
cSize += ZSTD_blockHeaderSize;
}
}
-
+
ip += blockSize;
assert(remaining >= blockSize);
@@ -4003,15 +4003,15 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
(unsigned)cSize);
} }
-
- if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
+
+ if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
return (size_t)(op-ostart);
-}
-
-
-static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
+}
+
+
+static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)
-{ BYTE* const op = (BYTE*)dst;
+{ BYTE* const op = (BYTE*)dst;
U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */
U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */
U32 const checksumFlag = params->fParams.checksumFlag>0;
@@ -4022,7 +4022,7 @@ static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
(pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */
BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
size_t pos=0;
-
+
assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall,
"dst buf is too small to fit worst-case frame header size.");
@@ -4033,30 +4033,30 @@ static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
pos = 4;
}
op[pos++] = frameHeaderDescriptionByte;
- if (!singleSegment) op[pos++] = windowLogByte;
- switch(dictIDSizeCode)
- {
+ if (!singleSegment) op[pos++] = windowLogByte;
+ switch(dictIDSizeCode)
+ {
default:
assert(0); /* impossible */
ZSTD_FALLTHROUGH;
- case 0 : break;
- case 1 : op[pos] = (BYTE)(dictID); pos++; break;
- case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
- case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
- }
- switch(fcsCode)
- {
+ case 0 : break;
+ case 1 : op[pos] = (BYTE)(dictID); pos++; break;
+ case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
+ case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
+ }
+ switch(fcsCode)
+ {
default:
assert(0); /* impossible */
ZSTD_FALLTHROUGH;
- case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
- case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
- case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
- case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
- }
- return pos;
-}
-
+ case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
+ case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
+ case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
+ case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
+ }
+ return pos;
+}
+
/* ZSTD_writeSkippableFrame_advanced() :
* Writes out a skippable frame with the specified magic number variant (16 are supported),
* from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data.
@@ -4091,7 +4091,7 @@ size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
return ZSTD_blockHeaderSize;
}
}
-
+
size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
{
RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong,
@@ -4109,38 +4109,38 @@ size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSe
static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- U32 frame, U32 lastFrameChunk)
-{
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ U32 frame, U32 lastFrameChunk)
+{
ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
- size_t fhSize = 0;
-
+ size_t fhSize = 0;
+
DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
cctx->stage, (unsigned)srcSize);
RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
"missing init (ZSTD_compressBegin)");
-
+
if (frame && (cctx->stage==ZSTDcs_init)) {
fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams,
cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
assert(fhSize <= dstCapacity);
- dstCapacity -= fhSize;
- dst = (char*)dst + fhSize;
+ dstCapacity -= fhSize;
+ dst = (char*)dst + fhSize;
cctx->stage = ZSTDcs_ongoing;
- }
-
+ }
+
if (!srcSize) return fhSize; /* do not generate an empty block if no input */
-
+
if (!ZSTD_window_update(&ms->window, src, srcSize, ms->forceNonContiguous)) {
ms->forceNonContiguous = 0;
ms->nextToUpdate = ms->window.dictLimit;
- }
+ }
if (cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) {
ZSTD_window_update(&cctx->ldmState.window, src, srcSize, /* forceNonContiguous */ 0);
}
-
+
if (!frame) {
/* overflow check and correction for block mode */
ZSTD_overflowCorrectIfNeeded(
@@ -4149,7 +4149,7 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
}
DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
- { size_t const cSize = frame ?
+ { size_t const cSize = frame ?
ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);
FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed");
@@ -4165,35 +4165,35 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
(unsigned)cctx->pledgedSrcSizePlusOne-1,
(unsigned)cctx->consumedSrcSize);
}
- return cSize + fhSize;
- }
-}
-
-size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize)
-{
+ return cSize + fhSize;
+ }
+}
+
+size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
-}
-
-
+}
+
+
size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
-{
+{
ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
assert(!ZSTD_checkCParams(cParams));
return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
-}
-
-size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
-{
+}
+
+size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize);
{ size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); }
return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
-}
-
+}
+
/*! ZSTD_loadDictionaryContent() :
* @return : 0, or an error code
*/
@@ -4203,11 +4203,11 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
ZSTD_CCtx_params const* params,
const void* src, size_t srcSize,
ZSTD_dictTableLoadMethod_e dtlm)
-{
+{
const BYTE* ip = (const BYTE*) src;
- const BYTE* const iend = ip + srcSize;
+ const BYTE* const iend = ip + srcSize;
int const loadLdmDict = params->ldmParams.enableLdm == ZSTD_ps_enable && ls != NULL;
-
+
/* Assert that we the ms params match the params we're being given */
ZSTD_assertEqualCParams(params->cParams, ms->cParams);
@@ -4233,19 +4233,19 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
ZSTD_window_update(&ms->window, src, srcSize, /* forceNonContiguous */ 0);
ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
ms->forceNonContiguous = params->deterministicRefPrefix;
-
+
if (loadLdmDict) {
ZSTD_window_update(&ls->window, src, srcSize, /* forceNonContiguous */ 0);
ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base);
}
- if (srcSize <= HASH_READ_SIZE) return 0;
-
+ if (srcSize <= HASH_READ_SIZE) return 0;
+
ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, iend);
-
+
if (loadLdmDict)
ZSTD_ldm_fillHashTable(ls, ip, iend, &params->ldmParams);
-
+
switch(params->cParams.strategy)
{
case ZSTD_fast:
@@ -4286,13 +4286,13 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
default:
assert(0); /* not possible : not a valid strategy id */
- }
-
+ }
+
ms->nextToUpdate = (U32)(iend - ms->window.base);
- return 0;
-}
-
-
+ return 0;
+}
+
+
/* Dictionaries that assign zero probability to symbols that show up causes problems
* when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check
* and only dictionaries with 100% valid symbols can be assumed valid.
@@ -4313,14 +4313,14 @@ static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, unsigned dictM
size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
const void* const dict, size_t dictSize)
-{
+{
short offcodeNCount[MaxOff+1];
unsigned offcodeMaxValue = MaxOff;
const BYTE* dictPtr = (const BYTE*)dict; /* skip magic num and dict ID */
- const BYTE* const dictEnd = dictPtr + dictSize;
+ const BYTE* const dictEnd = dictPtr + dictSize;
dictPtr += 8;
bs->entropy.huf.repeatMode = HUF_repeat_check;
-
+
{ unsigned maxSymbolValue = 255;
unsigned hasZeroWeights = 1;
size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr,
@@ -4333,9 +4333,9 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, "");
- dictPtr += hufHeaderSize;
- }
-
+ dictPtr += hufHeaderSize;
+ }
+
{ unsigned offcodeLog;
size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
@@ -4347,12 +4347,12 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
workspace, HUF_WORKSPACE_SIZE)),
dictionary_corrupted, "");
/* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
- dictPtr += offcodeHeaderSize;
- }
-
- { short matchlengthNCount[MaxML+1];
+ dictPtr += offcodeHeaderSize;
+ }
+
+ { short matchlengthNCount[MaxML+1];
unsigned matchlengthMaxValue = MaxML, matchlengthLog;
- size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
+ size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
@@ -4361,12 +4361,12 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
workspace, HUF_WORKSPACE_SIZE)),
dictionary_corrupted, "");
bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat(matchlengthNCount, matchlengthMaxValue, MaxML);
- dictPtr += matchlengthHeaderSize;
- }
-
- { short litlengthNCount[MaxLL+1];
+ dictPtr += matchlengthHeaderSize;
+ }
+
+ { short litlengthNCount[MaxLL+1];
unsigned litlengthMaxValue = MaxLL, litlengthLog;
- size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
+ size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
@@ -4375,15 +4375,15 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
workspace, HUF_WORKSPACE_SIZE)),
dictionary_corrupted, "");
bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat(litlengthNCount, litlengthMaxValue, MaxLL);
- dictPtr += litlengthHeaderSize;
- }
-
+ dictPtr += litlengthHeaderSize;
+ }
+
RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
bs->rep[0] = MEM_readLE32(dictPtr+0);
bs->rep[1] = MEM_readLE32(dictPtr+4);
bs->rep[2] = MEM_readLE32(dictPtr+8);
- dictPtr += 12;
-
+ dictPtr += 12;
+
{ size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
U32 offcodeMax = MaxOff;
if (dictContentSize <= ((U32)-1) - 128 KB) {
@@ -4439,9 +4439,9 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), "");
}
return dictID;
-}
-
-/** ZSTD_compress_insertDictionary() :
+}
+
+/** ZSTD_compress_insertDictionary() :
* @return : dictID, or an error code */
static size_t
ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
@@ -4453,15 +4453,15 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
ZSTD_dictContentType_e dictContentType,
ZSTD_dictTableLoadMethod_e dtlm,
void* workspace)
-{
+{
DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
if ((dict==NULL) || (dictSize<8)) {
RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
return 0;
}
-
+
ZSTD_reset_compressedBlockState(bs);
-
+
/* dict restricted modes */
if (dictContentType == ZSTD_dct_rawContent)
return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm);
@@ -4474,17 +4474,17 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
}
RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
assert(0); /* impossible */
- }
+ }
/* dict as full zstd dictionary */
return ZSTD_loadZstdDictionary(
bs, ms, ws, params, dict, dictSize, dtlm, workspace);
-}
-
+}
+
#define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)
#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL)
-/*! ZSTD_compressBegin_internal() :
+/*! ZSTD_compressBegin_internal() :
* @return : 0, or an error code */
static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
const void* dict, size_t dictSize,
@@ -4493,7 +4493,7 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
const ZSTD_CDict* cdict,
const ZSTD_CCtx_params* params, U64 pledgedSrcSize,
ZSTD_buffered_policy_e zbuff)
-{
+{
size_t const dictContentSize = cdict ? cdict->dictContentSize : dictSize;
#if ZSTD_TRACE
cctx->traceCtx = (ZSTD_trace_compress_begin != NULL) ? ZSTD_trace_compress_begin(cctx) : 0;
@@ -4531,8 +4531,8 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
cctx->dictContentSize = dictContentSize;
}
return 0;
-}
-
+}
+
size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
const void* dict, size_t dictSize,
ZSTD_dictContentType_e dictContentType,
@@ -4550,23 +4550,23 @@ size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
params, pledgedSrcSize,
ZSTDb_not_buffered);
}
-
-/*! ZSTD_compressBegin_advanced() :
-* @return : 0, or an error code */
-size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
- const void* dict, size_t dictSize,
- ZSTD_parameters params, unsigned long long pledgedSrcSize)
-{
+
+/*! ZSTD_compressBegin_advanced() :
+* @return : 0, or an error code */
+size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_parameters params, unsigned long long pledgedSrcSize)
+{
ZSTD_CCtx_params cctxParams;
ZSTD_CCtxParams_init_internal(&cctxParams, &params, ZSTD_NO_CLEVEL);
return ZSTD_compressBegin_advanced_internal(cctx,
dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
NULL /*cdict*/,
&cctxParams, pledgedSrcSize);
-}
-
-size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
-{
+}
+
+size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
+{
ZSTD_CCtx_params cctxParams;
{
ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);
@@ -4575,56 +4575,56 @@ size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t di
DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
&cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
-}
-
+}
+
size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
-{
+{
return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
-}
-
-
-/*! ZSTD_writeEpilogue() :
-* Ends a frame.
-* @return : nb of bytes written into dst (or an error code) */
-static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
-{
- BYTE* const ostart = (BYTE*)dst;
- BYTE* op = ostart;
- size_t fhSize = 0;
-
+}
+
+
+/*! ZSTD_writeEpilogue() :
+* Ends a frame.
+* @return : nb of bytes written into dst (or an error code) */
+static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
+{
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* op = ostart;
+ size_t fhSize = 0;
+
DEBUGLOG(4, "ZSTD_writeEpilogue");
RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
-
- /* special case : empty frame */
- if (cctx->stage == ZSTDcs_init) {
+
+ /* special case : empty frame */
+ if (cctx->stage == ZSTDcs_init) {
fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
- dstCapacity -= fhSize;
- op += fhSize;
- cctx->stage = ZSTDcs_ongoing;
- }
-
- if (cctx->stage != ZSTDcs_ending) {
- /* write one last empty block, make it the "last" block */
- U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
+ dstCapacity -= fhSize;
+ op += fhSize;
+ cctx->stage = ZSTDcs_ongoing;
+ }
+
+ if (cctx->stage != ZSTDcs_ending) {
+ /* write one last empty block, make it the "last" block */
+ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue");
- MEM_writeLE32(op, cBlockHeader24);
- op += ZSTD_blockHeaderSize;
- dstCapacity -= ZSTD_blockHeaderSize;
- }
-
+ MEM_writeLE32(op, cBlockHeader24);
+ op += ZSTD_blockHeaderSize;
+ dstCapacity -= ZSTD_blockHeaderSize;
+ }
+
if (cctx->appliedParams.fParams.checksumFlag) {
- U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
+ U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
- MEM_writeLE32(op, checksum);
- op += 4;
- }
-
- cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
- return op-ostart;
-}
-
+ MEM_writeLE32(op, checksum);
+ op += 4;
+ }
+
+ cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
+ return op-ostart;
+}
+
void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize)
{
#if ZSTD_TRACE
@@ -4649,16 +4649,16 @@ void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize)
#endif
}
-size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize)
-{
- size_t endResult;
+size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ size_t endResult;
size_t const cSize = ZSTD_compressContinue_internal(cctx,
dst, dstCapacity, src, srcSize,
1 /* frame mode */, 1 /* last chunk */);
FORWARD_IF_ERROR(cSize, "ZSTD_compressContinue_internal failed");
- endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
+ endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
FORWARD_IF_ERROR(endResult, "ZSTD_writeEpilogue failed");
assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
@@ -4672,15 +4672,15 @@ size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
(unsigned)cctx->consumedSrcSize);
}
ZSTD_CCtx_trace(cctx, endResult);
- return cSize + endResult;
-}
-
+ return cSize + endResult;
+}
+
size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- const void* dict,size_t dictSize,
- ZSTD_parameters params)
-{
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ ZSTD_parameters params)
+{
DEBUGLOG(4, "ZSTD_compress_advanced");
FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), "");
ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, &params, ZSTD_NO_CLEVEL);
@@ -4689,8 +4689,8 @@ size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
src, srcSize,
dict, dictSize,
&cctx->simpleApiParams);
-}
-
+}
+
/* Internal */
size_t ZSTD_compress_advanced_internal(
ZSTD_CCtx* cctx,
@@ -4698,20 +4698,20 @@ size_t ZSTD_compress_advanced_internal(
const void* src, size_t srcSize,
const void* dict,size_t dictSize,
const ZSTD_CCtx_params* params)
-{
+{
DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
params, srcSize, ZSTDb_not_buffered) , "");
return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
-}
-
+}
+
size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict, size_t dictSize,
int compressionLevel)
-{
+{
{
ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict);
assert(params.fParams.contentSizeFlag == 1);
@@ -4719,8 +4719,8 @@ size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
}
DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize);
return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctx->simpleApiParams);
-}
-
+}
+
size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
@@ -4734,25 +4734,25 @@ size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
size_t ZSTD_compress(void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
int compressionLevel)
-{
- size_t result;
+{
+ size_t result;
#if ZSTD_COMPRESS_HEAPMODE
ZSTD_CCtx* cctx = ZSTD_createCCtx();
RETURN_ERROR_IF(!cctx, memory_allocation, "ZSTD_createCCtx failed");
result = ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel);
ZSTD_freeCCtx(cctx);
#else
- ZSTD_CCtx ctxBody;
+ ZSTD_CCtx ctxBody;
ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem);
- result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
+ result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
ZSTD_freeCCtxContent(&ctxBody); /* can't free ctxBody itself, as it's on stack; free only heap content */
#endif
- return result;
-}
-
-
-/* ===== Dictionary API ===== */
-
+ return result;
+}
+
+
+/* ===== Dictionary API ===== */
+
/*! ZSTD_estimateCDictSize_advanced() :
* Estimate amount of memory that will be needed to create a dictionary with following arguments */
size_t ZSTD_estimateCDictSize_advanced(
@@ -4769,7 +4769,7 @@ size_t ZSTD_estimateCDictSize_advanced(
+ (dictLoadMethod == ZSTD_dlm_byRef ? 0
: ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *))));
}
-
+
size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
{
ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
@@ -4791,7 +4791,7 @@ static size_t ZSTD_initCDict_internal(
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType,
ZSTD_CCtx_params params)
-{
+{
DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
assert(!ZSTD_checkCParams(params.cParams));
cdict->matchState.cParams = params.cParams;
@@ -4806,7 +4806,7 @@ static size_t ZSTD_initCDict_internal(
}
cdict->dictContentSize = dictSize;
cdict->dictContentType = dictContentType;
-
+
cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE);
@@ -4834,7 +4834,7 @@ static size_t ZSTD_initCDict_internal(
cdict->dictID = (U32)dictID;
}
}
-
+
return 0;
}
@@ -4859,8 +4859,8 @@ static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
if (!workspace) {
ZSTD_customFree(workspace, customMem);
- return NULL;
- }
+ return NULL;
+ }
ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_dynamic_alloc);
@@ -4870,10 +4870,10 @@ static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
cdict->customMem = customMem;
cdict->compressionLevel = ZSTD_NO_CLEVEL; /* signals advanced API usage */
cdict->useRowMatchFinder = useRowMatchFinder;
- return cdict;
- }
-}
-
+ return cdict;
+ }
+}
+
ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType,
@@ -4941,8 +4941,8 @@ ZSTD_CDict* ZSTD_createCDict_advanced2(
return cdict;
}
-ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
-{
+ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
+{
ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
ZSTD_dlm_byCopy, ZSTD_dct_auto,
@@ -4950,8 +4950,8 @@ ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionL
if (cdict)
cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
return cdict;
-}
-
+}
+
ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
{
ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
@@ -4963,8 +4963,8 @@ ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int
return cdict;
}
-size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
-{
+size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
+{
if (cdict==NULL) return 0; /* support free on NULL */
{ ZSTD_customMem const cMem = cdict->customMem;
int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict);
@@ -5039,8 +5039,8 @@ ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
{
assert(cdict != NULL);
return cdict->matchState.cParams;
-}
-
+}
+
/*! ZSTD_getDictID_fromCDict() :
* Provides the dictID of the dictionary loaded into `cdict`.
* If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
@@ -5142,13 +5142,13 @@ size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict)
-{
+{
ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
-}
-
-
-
+}
+
+
+
/* ******************************************************************
* Streaming
********************************************************************/
@@ -6180,13 +6180,13 @@ size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
}
-/*-===== Pre-defined compression levels =====-*/
+/*-===== Pre-defined compression levels =====-*/
#include "clevels.h"
-
-int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
+
+int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
int ZSTD_defaultCLevel(void) { return ZSTD_CLEVEL_DEFAULT; }
-
+
static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, size_t const dictSize)
{
ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_cpm_createCDict);
@@ -6270,7 +6270,7 @@ static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMo
* Use dictSize == 0 for unknown or unused.
* Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */
static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
-{
+{
U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode);
U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
int row;
@@ -6292,8 +6292,8 @@ static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel,
/* refine parameters based on srcSize & dictSize */
return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode);
}
-}
-
+}
+
/*! ZSTD_getCParams() :
* @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
* Size values are optional, provide 0 if not known or unused */
@@ -6303,19 +6303,19 @@ ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long l
return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
}
-/*! ZSTD_getParams() :
+/*! ZSTD_getParams() :
* same idea as ZSTD_getCParams()
* @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
* Fields of `ZSTD_frameParameters` are set to default values */
static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) {
- ZSTD_parameters params;
+ ZSTD_parameters params;
ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode);
DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
ZSTD_memset(&params, 0, sizeof(params));
- params.cParams = cParams;
+ params.cParams = cParams;
params.fParams.contentSizeFlag = 1;
- return params;
-}
+ return params;
+}
/*! ZSTD_getParams() :
* same idea as ZSTD_getCParams()
diff --git a/contrib/libs/zstd/lib/compress/zstd_opt.h b/contrib/libs/zstd/lib/compress/zstd_opt.h
index bde0658a01..627255f53d 100644
--- a/contrib/libs/zstd/lib/compress/zstd_opt.h
+++ b/contrib/libs/zstd/lib/compress/zstd_opt.h
@@ -7,19 +7,19 @@
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
-
+
#ifndef ZSTD_OPT_H
#define ZSTD_OPT_H
-
+
#if defined (__cplusplus)
extern "C" {
#endif
-
+
#include "zstd_compress_internal.h"
-
+
/* used in ZSTD_loadDictionaryContent() */
void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend);
-
+
size_t ZSTD_compressBlock_btopt(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
@@ -29,7 +29,7 @@ size_t ZSTD_compressBlock_btultra(
size_t ZSTD_compressBlock_btultra2(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-
+
size_t ZSTD_compressBlock_btopt_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
@@ -44,13 +44,13 @@ size_t ZSTD_compressBlock_btopt_extDict(
size_t ZSTD_compressBlock_btultra_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
-
+
/* note : no btultra2 variant for extDict nor dictMatchState,
* because btultra2 is not meant to work with dictionaries
* and is only specific for the first block (no prefix) */
#if defined (__cplusplus)
-}
+}
#endif
-
+
#endif /* ZSTD_OPT_H */