aboutsummaryrefslogtreecommitdiffstats
path: root/contrib
diff options
context:
space:
mode:
authortobo <tobo@yandex-team.ru>2022-02-10 16:47:27 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:47:27 +0300
commit55a7f90e4cd31e9481cace8ee5dfd682c27e810e (patch)
tree9814fbd1c3effac9b8377c5d604b367b14e2db55 /contrib
parent7fe839092527589b38f014d854c51565b3c1adfa (diff)
downloadydb-55a7f90e4cd31e9481cace8ee5dfd682c27e810e.tar.gz
Restoring authorship annotation for <tobo@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib')
-rw-r--r--contrib/libs/cctz/tzdata/factory.cpp2
-rw-r--r--contrib/libs/zstd/CHANGELOG56
-rw-r--r--contrib/libs/zstd/README.md10
-rw-r--r--contrib/libs/zstd/lib/README.md10
-rw-r--r--contrib/libs/zstd/lib/common/bitstream.h10
-rw-r--r--contrib/libs/zstd/lib/common/compiler.h32
-rw-r--r--contrib/libs/zstd/lib/common/pool.c14
-rw-r--r--contrib/libs/zstd/lib/common/threading.c82
-rw-r--r--contrib/libs/zstd/lib/common/threading.h64
-rw-r--r--contrib/libs/zstd/lib/common/zstd_internal.h52
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_compress.c854
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_compress_internal.h168
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_compress_literals.c12
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_compress_literals.h2
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_compress_sequences.c2
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_compress_sequences.h2
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_cwksp.h878
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_double_fast.c4
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_fast.c42
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_lazy.c44
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_ldm.c8
-rw-r--r--contrib/libs/zstd/lib/compress/zstd_opt.c8
-rw-r--r--contrib/libs/zstd/lib/compress/zstdmt_compress.c38
-rw-r--r--contrib/libs/zstd/lib/decompress/zstd_decompress.c32
-rw-r--r--contrib/libs/zstd/lib/decompress/zstd_decompress_block.c258
-rw-r--r--contrib/libs/zstd/lib/dictBuilder/cover.c14
-rw-r--r--contrib/libs/zstd/lib/dictBuilder/zdict.c2
-rw-r--r--contrib/libs/zstd/lib/legacy/zstd_v01.c2
-rw-r--r--contrib/libs/zstd/lib/legacy/zstd_v02.c4
-rw-r--r--contrib/libs/zstd/lib/legacy/zstd_v03.c4
-rw-r--r--contrib/libs/zstd/lib/legacy/zstd_v04.c12
-rw-r--r--contrib/libs/zstd/lib/legacy/zstd_v05.c2
-rw-r--r--contrib/libs/zstd/lib/legacy/zstd_v06.c2
-rw-r--r--contrib/libs/zstd/lib/legacy/zstd_v07.c2
-rw-r--r--contrib/libs/zstd/lib/zstd.h230
35 files changed, 1479 insertions, 1479 deletions
diff --git a/contrib/libs/cctz/tzdata/factory.cpp b/contrib/libs/cctz/tzdata/factory.cpp
index 954a9a48c1..5a4b4a27b6 100644
--- a/contrib/libs/cctz/tzdata/factory.cpp
+++ b/contrib/libs/cctz/tzdata/factory.cpp
@@ -19,7 +19,7 @@ namespace cctz_extension {
public:
static std::unique_ptr<cctz::ZoneInfoSource> LoadZone(const std::string& zoneName) {
- TString resourceName = TStringBuilder() << "/cctz/tzdata/"sv << zoneName;
+ TString resourceName = TStringBuilder() << "/cctz/tzdata/"sv << zoneName;
TString tzData;
if (!NResource::FindExact(resourceName, &tzData)) {
return nullptr;
diff --git a/contrib/libs/zstd/CHANGELOG b/contrib/libs/zstd/CHANGELOG
index 9dba31576d..4e0045b950 100644
--- a/contrib/libs/zstd/CHANGELOG
+++ b/contrib/libs/zstd/CHANGELOG
@@ -191,37 +191,37 @@ doc : Improved beginner CONTRIBUTING.md docs
doc : New issue templates for zstd
v1.4.4 (Nov 6, 2019)
-perf: Improved decompression speed, by > 10%, by @terrelln
-perf: Better compression speed when re-using a context, by @felixhandte
-perf: Fix compression ratio when compressing large files with small dictionary, by @senhuang42
-perf: zstd reference encoder can generate RLE blocks, by @bimbashrestha
-perf: minor generic speed optimization, by @davidbolvansky
-api: new ability to extract sequences from the parser for analysis, by @bimbashrestha
-api: fixed decoding of magic-less frames, by @terrelln
-api: fixed ZSTD_initCStream_advanced() performance with fast modes, reported by @QrczakMK
-cli: Named pipes support, by @bimbashrestha
-cli: short tar's extension support, by @stokito
-cli: command --output-dir-flat= , generates target files into requested directory, by @senhuang42
-cli: commands --stream-size=# and --size-hint=#, by @nmagerko
-cli: command --exclude-compressed, by @shashank0791
-cli: faster `-t` test mode
-cli: improved some error messages, by @vangyzen
+perf: Improved decompression speed, by > 10%, by @terrelln
+perf: Better compression speed when re-using a context, by @felixhandte
+perf: Fix compression ratio when compressing large files with small dictionary, by @senhuang42
+perf: zstd reference encoder can generate RLE blocks, by @bimbashrestha
+perf: minor generic speed optimization, by @davidbolvansky
+api: new ability to extract sequences from the parser for analysis, by @bimbashrestha
+api: fixed decoding of magic-less frames, by @terrelln
+api: fixed ZSTD_initCStream_advanced() performance with fast modes, reported by @QrczakMK
+cli: Named pipes support, by @bimbashrestha
+cli: short tar's extension support, by @stokito
+cli: command --output-dir-flat= , generates target files into requested directory, by @senhuang42
+cli: commands --stream-size=# and --size-hint=#, by @nmagerko
+cli: command --exclude-compressed, by @shashank0791
+cli: faster `-t` test mode
+cli: improved some error messages, by @vangyzen
cli: fix command `-D dictionary` on Windows, reported by @artyompetrov
cli: fix rare deadlock condition within dictionary builder, by @terrelln
-build: single-file decoder with emscripten compilation script, by @cwoffenden
-build: fixed zlibWrapper compilation on Visual Studio, reported by @bluenlive
-build: fixed deprecation warning for certain gcc version, reported by @jasonma163
-build: fix compilation on old gcc versions, by @cemeyer
-build: improved installation directories for cmake script, by Dmitri Shubin
-pack: modified pkgconfig, for better integration into openwrt, requested by @neheb
-misc: Improved documentation : ZSTD_CLEVEL, DYNAMIC_BMI2, ZSTD_CDict, function deprecation, zstd format
-misc: fixed educational decoder : accept larger literals section, and removed UNALIGNED() macro
-
+build: single-file decoder with emscripten compilation script, by @cwoffenden
+build: fixed zlibWrapper compilation on Visual Studio, reported by @bluenlive
+build: fixed deprecation warning for certain gcc version, reported by @jasonma163
+build: fix compilation on old gcc versions, by @cemeyer
+build: improved installation directories for cmake script, by Dmitri Shubin
+pack: modified pkgconfig, for better integration into openwrt, requested by @neheb
+misc: Improved documentation : ZSTD_CLEVEL, DYNAMIC_BMI2, ZSTD_CDict, function deprecation, zstd format
+misc: fixed educational decoder : accept larger literals section, and removed UNALIGNED() macro
+
v1.4.3 (Aug 20, 2019)
-bug: Fix Dictionary Compression Ratio Regression by @cyan4973 (#1709)
-bug: Fix Buffer Overflow in legacy v0.3 decompression by @felixhandte (#1722)
-build: Add support for IAR C/C++ Compiler for Arm by @joseph0918 (#1705)
-
+bug: Fix Dictionary Compression Ratio Regression by @cyan4973 (#1709)
+bug: Fix Buffer Overflow in legacy v0.3 decompression by @felixhandte (#1722)
+build: Add support for IAR C/C++ Compiler for Arm by @joseph0918 (#1705)
+
v1.4.2 (Jul 26, 2019)
bug: Fix bug in zstd-0.5 decoder by @terrelln (#1696)
bug: Fix seekable decompression in-memory API by @iburinoc (#1695)
diff --git a/contrib/libs/zstd/README.md b/contrib/libs/zstd/README.md
index e883c75262..69720ba2cc 100644
--- a/contrib/libs/zstd/README.md
+++ b/contrib/libs/zstd/README.md
@@ -150,7 +150,7 @@ You can also take a look at [`.travis.yml`](.travis.yml) file for an
example about how Meson is used to build this project.
Note that default build type is **release**.
-
+
### VCPKG
You can build and install zstd [vcpkg](https://github.com/Microsoft/vcpkg/) dependency manager:
@@ -164,18 +164,18 @@ The zstd port in vcpkg is kept up to date by Microsoft team members and communit
If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository.
### Visual Studio (Windows)
-
+
Going into `build` directory, you will find additional possibilities:
- Projects for Visual Studio 2005, 2008 and 2010.
+ VS2010 project is compatible with VS2012, VS2013, VS2015 and VS2017.
- Automated build scripts for Visual compiler by [@KrzysFR](https://github.com/KrzysFR), in `build/VS_scripts`,
which will build `zstd` cli and `libzstd` library without any need to open Visual Studio solution.
-
+
### Buck
-
+
You can build the zstd binary via buck by executing: `buck build programs:zstd` from the root of the repo.
The output binary will be in `buck-out/gen/programs/`.
-
+
## Testing
You can run quick local smoke tests by executing the `playTest.sh` script from the `src/tests` directory.
diff --git a/contrib/libs/zstd/lib/README.md b/contrib/libs/zstd/lib/README.md
index c155e30c3f..4c9d8f0591 100644
--- a/contrib/libs/zstd/lib/README.md
+++ b/contrib/libs/zstd/lib/README.md
@@ -179,7 +179,7 @@ The compiled executable will require ZSTD DLL which is available at `dll\libzstd
#### Advanced Build options
-
+
The build system requires a hash function in order to
separate object files created with different compilation flags.
By default, it tries to use `md5sum` or equivalent.
@@ -199,15 +199,15 @@ In which case, the hash function doesn't matter.
#### Deprecated API
-
+
Obsolete API on their way out are stored in directory `lib/deprecated`.
At this stage, it contains older streaming prototypes, in `lib/deprecated/zbuff.h`.
These prototypes will be removed in some future version.
Consider migrating code towards supported streaming API exposed in `zstd.h`.
-
-
+
+
#### Miscellaneous
-
+
The other files are not source code. There are :
- `BUCK` : support for `buck` build system (https://buckbuild.com/)
diff --git a/contrib/libs/zstd/lib/common/bitstream.h b/contrib/libs/zstd/lib/common/bitstream.h
index 0383d00ddf..84b6062ff3 100644
--- a/contrib/libs/zstd/lib/common/bitstream.h
+++ b/contrib/libs/zstd/lib/common/bitstream.h
@@ -155,9 +155,9 @@ MEM_STATIC unsigned BIT_highbit32 (U32 val)
}
# endif
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
- return __builtin_clz (val) ^ 31;
-# elif defined(__ICCARM__) /* IAR Intrinsic */
- return 31 - __CLZ(val);
+ return __builtin_clz (val) ^ 31;
+# elif defined(__ICCARM__) /* IAR Intrinsic */
+ return 31 - __CLZ(val);
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29,
11, 14, 16, 18, 22, 25, 3, 30,
@@ -235,7 +235,7 @@ MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
{
size_t const nbBytes = bitC->bitPos >> 3;
assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
- assert(bitC->ptr <= bitC->endPtr);
+ assert(bitC->ptr <= bitC->endPtr);
MEM_writeLEST(bitC->ptr, bitC->bitContainer);
bitC->ptr += nbBytes;
bitC->bitPos &= 7;
@@ -251,7 +251,7 @@ MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
{
size_t const nbBytes = bitC->bitPos >> 3;
assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
- assert(bitC->ptr <= bitC->endPtr);
+ assert(bitC->ptr <= bitC->endPtr);
MEM_writeLEST(bitC->ptr, bitC->bitContainer);
bitC->ptr += nbBytes;
if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
diff --git a/contrib/libs/zstd/lib/common/compiler.h b/contrib/libs/zstd/lib/common/compiler.h
index c14ccccba0..516930c01e 100644
--- a/contrib/libs/zstd/lib/common/compiler.h
+++ b/contrib/libs/zstd/lib/common/compiler.h
@@ -25,7 +25,7 @@
# define INLINE_KEYWORD
#endif
-#if defined(__GNUC__) || defined(__ICCARM__)
+#if defined(__GNUC__) || defined(__ICCARM__)
# define FORCE_INLINE_ATTR __attribute__((always_inline))
#elif defined(_MSC_VER)
# define FORCE_INLINE_ATTR __forceinline
@@ -74,18 +74,18 @@
# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
#endif
-/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
-#if defined(__GNUC__)
-# define UNUSED_ATTR __attribute__((unused))
-#else
-# define UNUSED_ATTR
-#endif
-
+/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
+#if defined(__GNUC__)
+# define UNUSED_ATTR __attribute__((unused))
+#else
+# define UNUSED_ATTR
+#endif
+
/* force no inlining */
#ifdef _MSC_VER
# define FORCE_NOINLINE static __declspec(noinline)
#else
-# if defined(__GNUC__) || defined(__ICCARM__)
+# if defined(__GNUC__) || defined(__ICCARM__)
# define FORCE_NOINLINE static __attribute__((__noinline__))
# else
# define FORCE_NOINLINE static
@@ -94,7 +94,7 @@
/* target attribute */
-#if defined(__GNUC__) || defined(__ICCARM__)
+#if defined(__GNUC__) || defined(__ICCARM__)
# define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
#else
# define TARGET_ATTRIBUTE(target)
@@ -139,15 +139,15 @@
} \
}
-/* vectorization
+/* vectorization
* older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax,
* and some compilers, like Intel ICC and MCST LCC, do not support it at all. */
#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__) && !defined(__LCC__)
-# if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)
-# define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
-# else
-# define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")")
-# endif
+# if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)
+# define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
+# else
+# define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")")
+# endif
#else
# define DONT_VECTORIZE
#endif
diff --git a/contrib/libs/zstd/lib/common/pool.c b/contrib/libs/zstd/lib/common/pool.c
index 67d3bd8562..2e37cdd73c 100644
--- a/contrib/libs/zstd/lib/common/pool.c
+++ b/contrib/libs/zstd/lib/common/pool.c
@@ -133,13 +133,13 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
ctx->queueTail = 0;
ctx->numThreadsBusy = 0;
ctx->queueEmpty = 1;
- {
- int error = 0;
- error |= ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL);
- error |= ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL);
- error |= ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL);
- if (error) { POOL_free(ctx); return NULL; }
- }
+ {
+ int error = 0;
+ error |= ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL);
+ error |= ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL);
+ error |= ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL);
+ if (error) { POOL_free(ctx); return NULL; }
+ }
ctx->shutdown = 0;
/* Allocate space for the thread handles */
ctx->threads = (ZSTD_pthread_t*)ZSTD_customMalloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
diff --git a/contrib/libs/zstd/lib/common/threading.c b/contrib/libs/zstd/lib/common/threading.c
index 05497ead62..92cf57c195 100644
--- a/contrib/libs/zstd/lib/common/threading.c
+++ b/contrib/libs/zstd/lib/common/threading.c
@@ -15,8 +15,8 @@
* This file will hold wrapper for systems, which do not support pthreads
*/
-#include "threading.h"
-
+#include "threading.h"
+
/* create fake symbol to avoid empty translation unit warning */
int g_ZSTD_threading_useless_symbol;
@@ -75,48 +75,48 @@ int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr)
}
#endif /* ZSTD_MULTITHREAD */
-
-#if defined(ZSTD_MULTITHREAD) && DEBUGLEVEL >= 1 && !defined(_WIN32)
-
+
+#if defined(ZSTD_MULTITHREAD) && DEBUGLEVEL >= 1 && !defined(_WIN32)
+
#define ZSTD_DEPS_NEED_MALLOC
#include "zstd_deps.h"
-
-int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr)
-{
+
+int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr)
+{
*mutex = (pthread_mutex_t*)ZSTD_malloc(sizeof(pthread_mutex_t));
- if (!*mutex)
- return 1;
- return pthread_mutex_init(*mutex, attr);
-}
-
-int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex)
-{
- if (!*mutex)
- return 0;
- {
- int const ret = pthread_mutex_destroy(*mutex);
+ if (!*mutex)
+ return 1;
+ return pthread_mutex_init(*mutex, attr);
+}
+
+int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex)
+{
+ if (!*mutex)
+ return 0;
+ {
+ int const ret = pthread_mutex_destroy(*mutex);
ZSTD_free(*mutex);
- return ret;
- }
-}
-
-int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr)
-{
+ return ret;
+ }
+}
+
+int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr)
+{
*cond = (pthread_cond_t*)ZSTD_malloc(sizeof(pthread_cond_t));
- if (!*cond)
- return 1;
- return pthread_cond_init(*cond, attr);
-}
-
-int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond)
-{
- if (!*cond)
- return 0;
- {
- int const ret = pthread_cond_destroy(*cond);
+ if (!*cond)
+ return 1;
+ return pthread_cond_init(*cond, attr);
+}
+
+int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond)
+{
+ if (!*cond)
+ return 0;
+ {
+ int const ret = pthread_cond_destroy(*cond);
ZSTD_free(*cond);
- return ret;
- }
-}
-
-#endif
+ return ret;
+ }
+}
+
+#endif
diff --git a/contrib/libs/zstd/lib/common/threading.h b/contrib/libs/zstd/lib/common/threading.h
index e9dba89412..fd0060d5aa 100644
--- a/contrib/libs/zstd/lib/common/threading.h
+++ b/contrib/libs/zstd/lib/common/threading.h
@@ -14,8 +14,8 @@
#ifndef THREADING_H_938743
#define THREADING_H_938743
-#include "debug.h"
-
+#include "debug.h"
+
#if defined (__cplusplus)
extern "C" {
#endif
@@ -78,12 +78,12 @@ int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr);
*/
-#elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */
+#elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */
/* === POSIX Systems === */
# include <pthread.h>
-#if DEBUGLEVEL < 1
-
+#if DEBUGLEVEL < 1
+
#define ZSTD_pthread_mutex_t pthread_mutex_t
#define ZSTD_pthread_mutex_init(a, b) pthread_mutex_init((a), (b))
#define ZSTD_pthread_mutex_destroy(a) pthread_mutex_destroy((a))
@@ -101,33 +101,33 @@ int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr);
#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
#define ZSTD_pthread_join(a, b) pthread_join((a),(b))
-#else /* DEBUGLEVEL >= 1 */
-
-/* Debug implementation of threading.
- * In this implementation we use pointers for mutexes and condition variables.
- * This way, if we forget to init/destroy them the program will crash or ASAN
- * will report leaks.
- */
-
-#define ZSTD_pthread_mutex_t pthread_mutex_t*
-int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr);
-int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex);
-#define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock(*(a))
-#define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock(*(a))
-
-#define ZSTD_pthread_cond_t pthread_cond_t*
-int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr);
-int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond);
-#define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait(*(a), *(b))
-#define ZSTD_pthread_cond_signal(a) pthread_cond_signal(*(a))
-#define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast(*(a))
-
-#define ZSTD_pthread_t pthread_t
-#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
-#define ZSTD_pthread_join(a, b) pthread_join((a),(b))
-
-#endif
-
+#else /* DEBUGLEVEL >= 1 */
+
+/* Debug implementation of threading.
+ * In this implementation we use pointers for mutexes and condition variables.
+ * This way, if we forget to init/destroy them the program will crash or ASAN
+ * will report leaks.
+ */
+
+#define ZSTD_pthread_mutex_t pthread_mutex_t*
+int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr);
+int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex);
+#define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock(*(a))
+#define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock(*(a))
+
+#define ZSTD_pthread_cond_t pthread_cond_t*
+int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr);
+int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond);
+#define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait(*(a), *(b))
+#define ZSTD_pthread_cond_signal(a) pthread_cond_signal(*(a))
+#define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast(*(a))
+
+#define ZSTD_pthread_t pthread_t
+#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
+#define ZSTD_pthread_join(a, b) pthread_join((a),(b))
+
+#endif
+
#else /* ZSTD_MULTITHREAD not defined */
/* No multithreading support */
diff --git a/contrib/libs/zstd/lib/common/zstd_internal.h b/contrib/libs/zstd/lib/common/zstd_internal.h
index adb235592e..1dee37cdbe 100644
--- a/contrib/libs/zstd/lib/common/zstd_internal.h
+++ b/contrib/libs/zstd/lib/common/zstd_internal.h
@@ -198,43 +198,43 @@ static void ZSTD_copy16(void* dst, const void* src) {
}
#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
-#define WILDCOPY_OVERLENGTH 32
-#define WILDCOPY_VECLEN 16
+#define WILDCOPY_OVERLENGTH 32
+#define WILDCOPY_VECLEN 16
typedef enum {
ZSTD_no_overlap,
- ZSTD_overlap_src_before_dst
+ ZSTD_overlap_src_before_dst
/* ZSTD_overlap_dst_before_src, */
} ZSTD_overlap_e;
/*! ZSTD_wildcopy() :
* Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)
- * @param ovtype controls the overlap detection
- * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
- * - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.
- * The src buffer must be before the dst buffer.
- */
+ * @param ovtype controls the overlap detection
+ * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
+ * - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.
+ * The src buffer must be before the dst buffer.
+ */
MEM_STATIC FORCE_INLINE_ATTR
-void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype)
+void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype)
{
ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
const BYTE* ip = (const BYTE*)src;
BYTE* op = (BYTE*)dst;
BYTE* const oend = op + length;
- if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
- /* Handle short offset copies. */
- do {
- COPY8(op, ip)
- } while (op < oend);
- } else {
- assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
+ if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
+ /* Handle short offset copies. */
+ do {
+ COPY8(op, ip)
+ } while (op < oend);
+ } else {
+ assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
/* Separate out the first COPY16() call because the copy length is
- * almost certain to be short, so the branches have different
+ * almost certain to be short, so the branches have different
* probabilities. Since it is almost certain to be short, only do
* one COPY16() in the first call. Then, do two calls per loop since
* at that point it is more likely to have a high trip count.
- */
+ */
#ifdef __aarch64__
do {
COPY16(op, ip);
@@ -245,11 +245,11 @@ void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e
if (16 >= length) return;
op += 16;
ip += 16;
- do {
- COPY16(op, ip);
- COPY16(op, ip);
- }
- while (op < oend);
+ do {
+ COPY16(op, ip);
+ COPY16(op, ip);
+ }
+ while (op < oend);
#endif
}
}
@@ -378,9 +378,9 @@ MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus
}
# endif
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
- return __builtin_clz (val) ^ 31;
-# elif defined(__ICCARM__) /* IAR Intrinsic */
- return 31 - __CLZ(val);
+ return __builtin_clz (val) ^ 31;
+# elif defined(__ICCARM__) /* IAR Intrinsic */
+ return 31 - __CLZ(val);
# else /* Software version */
static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;
diff --git a/contrib/libs/zstd/lib/compress/zstd_compress.c b/contrib/libs/zstd/lib/compress/zstd_compress.c
index 2a6ca5f2b8..f06456af92 100644
--- a/contrib/libs/zstd/lib/compress/zstd_compress.c
+++ b/contrib/libs/zstd/lib/compress/zstd_compress.c
@@ -76,13 +76,13 @@ struct ZSTD_CDict_s {
const void* dictContent;
size_t dictContentSize;
ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */
- U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
- ZSTD_cwksp workspace;
+ U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
+ ZSTD_cwksp workspace;
ZSTD_matchState_t matchState;
ZSTD_compressedBlockState_t cBlockState;
ZSTD_customMem customMem;
U32 dictID;
- int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
+ int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
ZSTD_paramSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use
* row-based matchfinder. Unless the cdict is reloaded, we will use
* the same greedy/lazy matchfinder at compression time.
@@ -120,23 +120,23 @@ ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)
{
- ZSTD_cwksp ws;
- ZSTD_CCtx* cctx;
+ ZSTD_cwksp ws;
+ ZSTD_CCtx* cctx;
if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */
if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */
ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
-
- cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx));
+
+ cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx));
if (cctx == NULL) return NULL;
ZSTD_memset(cctx, 0, sizeof(ZSTD_CCtx));
- ZSTD_cwksp_move(&cctx->workspace, &ws);
+ ZSTD_cwksp_move(&cctx->workspace, &ws);
cctx->staticSize = workspaceSize;
/* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
- cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
- cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
+ cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
+ cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE);
cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
return cctx;
@@ -169,7 +169,7 @@ static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
#ifdef ZSTD_MULTITHREAD
ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL;
#endif
- ZSTD_cwksp_free(&cctx->workspace, cctx->customMem);
+ ZSTD_cwksp_free(&cctx->workspace, cctx->customMem);
}
size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
@@ -177,13 +177,13 @@ size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
if (cctx==NULL) return 0; /* support free on NULL */
RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
"not compatible with static CCtx");
- {
- int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
- ZSTD_freeCCtxContent(cctx);
- if (!cctxInWorkspace) {
+ {
+ int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
+ ZSTD_freeCCtxContent(cctx);
+ if (!cctxInWorkspace) {
ZSTD_customFree(cctx, cctx->customMem);
- }
- }
+ }
+ }
return 0;
}
@@ -202,9 +202,9 @@ static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
{
if (cctx==NULL) return 0; /* support sizeof on NULL */
- /* cctx may be in the workspace */
- return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx))
- + ZSTD_cwksp_sizeof(&cctx->workspace)
+ /* cctx may be in the workspace */
+ return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx))
+ + ZSTD_cwksp_sizeof(&cctx->workspace)
+ ZSTD_sizeof_localDict(cctx->localDict)
+ ZSTD_sizeof_mtctx(cctx);
}
@@ -522,7 +522,7 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
case ZSTD_c_forceAttachDict:
ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceLoad);
bounds.lowerBound = ZSTD_dictDefaultAttach;
- bounds.upperBound = ZSTD_dictForceLoad; /* note : how to ensure at compile time that this is the highest value enum ? */
+ bounds.upperBound = ZSTD_dictForceLoad; /* note : how to ensure at compile time that this is the highest value enum ? */
return bounds;
case ZSTD_c_literalCompressionMode:
@@ -536,11 +536,11 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX;
return bounds;
- case ZSTD_c_srcSizeHint:
- bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN;
- bounds.upperBound = ZSTD_SRCSIZEHINT_MAX;
- return bounds;
-
+ case ZSTD_c_srcSizeHint:
+ bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN;
+ bounds.upperBound = ZSTD_SRCSIZEHINT_MAX;
+ return bounds;
+
case ZSTD_c_stableInBuffer:
case ZSTD_c_stableOutBuffer:
bounds.lowerBound = (int)ZSTD_bm_buffered;
@@ -628,7 +628,7 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
case ZSTD_c_forceAttachDict:
case ZSTD_c_literalCompressionMode:
case ZSTD_c_targetCBlockSize:
- case ZSTD_c_srcSizeHint:
+ case ZSTD_c_srcSizeHint:
case ZSTD_c_stableInBuffer:
case ZSTD_c_stableOutBuffer:
case ZSTD_c_blockDelimiters:
@@ -683,7 +683,7 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
case ZSTD_c_ldmMinMatch:
case ZSTD_c_ldmBucketSizeLog:
case ZSTD_c_targetCBlockSize:
- case ZSTD_c_srcSizeHint:
+ case ZSTD_c_srcSizeHint:
case ZSTD_c_stableInBuffer:
case ZSTD_c_stableOutBuffer:
case ZSTD_c_blockDelimiters:
@@ -715,33 +715,33 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
CCtxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
else
CCtxParams->compressionLevel = value;
- if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel;
+ if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel;
return 0; /* return type (size_t) cannot represent negative values */
}
case ZSTD_c_windowLog :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_windowLog, value);
- CCtxParams->cParams.windowLog = (U32)value;
+ CCtxParams->cParams.windowLog = (U32)value;
return CCtxParams->cParams.windowLog;
case ZSTD_c_hashLog :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_hashLog, value);
- CCtxParams->cParams.hashLog = (U32)value;
+ CCtxParams->cParams.hashLog = (U32)value;
return CCtxParams->cParams.hashLog;
case ZSTD_c_chainLog :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_chainLog, value);
- CCtxParams->cParams.chainLog = (U32)value;
+ CCtxParams->cParams.chainLog = (U32)value;
return CCtxParams->cParams.chainLog;
case ZSTD_c_searchLog :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_searchLog, value);
- CCtxParams->cParams.searchLog = (U32)value;
- return (size_t)value;
+ CCtxParams->cParams.searchLog = (U32)value;
+ return (size_t)value;
case ZSTD_c_minMatch :
if (value!=0) /* 0 => use default */
@@ -876,12 +876,12 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
CCtxParams->targetCBlockSize = value;
return CCtxParams->targetCBlockSize;
- case ZSTD_c_srcSizeHint :
- if (value!=0) /* 0 ==> default */
- BOUNDCHECK(ZSTD_c_srcSizeHint, value);
- CCtxParams->srcSizeHint = value;
- return CCtxParams->srcSizeHint;
-
+ case ZSTD_c_srcSizeHint :
+ if (value!=0) /* 0 ==> default */
+ BOUNDCHECK(ZSTD_c_srcSizeHint, value);
+ CCtxParams->srcSizeHint = value;
+ return CCtxParams->srcSizeHint;
+
case ZSTD_c_stableInBuffer:
BOUNDCHECK(ZSTD_c_stableInBuffer, value);
CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value;
@@ -1025,9 +1025,9 @@ size_t ZSTD_CCtxParams_getParameter(
case ZSTD_c_targetCBlockSize :
*value = (int)CCtxParams->targetCBlockSize;
break;
- case ZSTD_c_srcSizeHint :
- *value = (int)CCtxParams->srcSizeHint;
- break;
+ case ZSTD_c_srcSizeHint :
+ *value = (int)CCtxParams->srcSizeHint;
+ break;
case ZSTD_c_stableInBuffer :
*value = (int)CCtxParams->inBufferMode;
break;
@@ -1401,10 +1401,10 @@ static void ZSTD_overrideCParams(
ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
{
- ZSTD_compressionParameters cParams;
- if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {
- srcSizeHint = CCtxParams->srcSizeHint;
- }
+ ZSTD_compressionParameters cParams;
+ if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {
+ srcSizeHint = CCtxParams->srcSizeHint;
+ }
cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode);
if (CCtxParams->ldmParams.enableLdm == ZSTD_ps_enable) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
ZSTD_overrideCParams(&cParams, &CCtxParams->cParams);
@@ -1425,13 +1425,13 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
: 0;
size_t const hSize = ((size_t)1) << cParams->hashLog;
U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
- size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
- /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't
- * surrounded by redzones in ASAN. */
- size_t const tableSpace = chainSize * sizeof(U32)
- + hSize * sizeof(U32)
- + h3Size * sizeof(U32);
- size_t const optPotentialSpace =
+ size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
+ /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't
+ * surrounded by redzones in ASAN. */
+ size_t const tableSpace = chainSize * sizeof(U32)
+ + hSize * sizeof(U32)
+ + h3Size * sizeof(U32);
+ size_t const optPotentialSpace =
ZSTD_cwksp_aligned_alloc_size((MaxML+1) * sizeof(U32))
+ ZSTD_cwksp_aligned_alloc_size((MaxLL+1) * sizeof(U32))
+ ZSTD_cwksp_aligned_alloc_size((MaxOff+1) * sizeof(U32))
@@ -1689,42 +1689,42 @@ static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
ms->dictMatchState = NULL;
}
-/**
- * Controls, for this matchState reset, whether the tables need to be cleared /
- * prepared for the coming compression (ZSTDcrp_makeClean), or whether the
- * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a
- * subsequent operation will overwrite the table space anyways (e.g., copying
- * the matchState contents in from a CDict).
- */
-typedef enum {
- ZSTDcrp_makeClean,
- ZSTDcrp_leaveDirty
-} ZSTD_compResetPolicy_e;
-
-/**
- * Controls, for this matchState reset, whether indexing can continue where it
- * left off (ZSTDirp_continue), or whether it needs to be restarted from zero
- * (ZSTDirp_reset).
- */
-typedef enum {
- ZSTDirp_continue,
- ZSTDirp_reset
-} ZSTD_indexResetPolicy_e;
-
-typedef enum {
- ZSTD_resetTarget_CDict,
- ZSTD_resetTarget_CCtx
-} ZSTD_resetTarget_e;
-
-
-static size_t
+/**
+ * Controls, for this matchState reset, whether the tables need to be cleared /
+ * prepared for the coming compression (ZSTDcrp_makeClean), or whether the
+ * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a
+ * subsequent operation will overwrite the table space anyways (e.g., copying
+ * the matchState contents in from a CDict).
+ */
+typedef enum {
+ ZSTDcrp_makeClean,
+ ZSTDcrp_leaveDirty
+} ZSTD_compResetPolicy_e;
+
+/**
+ * Controls, for this matchState reset, whether indexing can continue where it
+ * left off (ZSTDirp_continue), or whether it needs to be restarted from zero
+ * (ZSTDirp_reset).
+ */
+typedef enum {
+ ZSTDirp_continue,
+ ZSTDirp_reset
+} ZSTD_indexResetPolicy_e;
+
+typedef enum {
+ ZSTD_resetTarget_CDict,
+ ZSTD_resetTarget_CCtx
+} ZSTD_resetTarget_e;
+
+
+static size_t
ZSTD_reset_matchState(ZSTD_matchState_t* ms,
- ZSTD_cwksp* ws,
+ ZSTD_cwksp* ws,
const ZSTD_compressionParameters* cParams,
const ZSTD_paramSwitch_e useRowMatchFinder,
- const ZSTD_compResetPolicy_e crp,
- const ZSTD_indexResetPolicy_e forceResetIndex,
- const ZSTD_resetTarget_e forWho)
+ const ZSTD_compResetPolicy_e crp,
+ const ZSTD_indexResetPolicy_e forceResetIndex,
+ const ZSTD_resetTarget_e forWho)
{
/* disable chain table allocation for fast or row-based strategies */
size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder,
@@ -1733,46 +1733,46 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
: 0;
size_t const hSize = ((size_t)1) << cParams->hashLog;
U32 const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
- size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
+ size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
- DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset);
+ DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset);
assert(useRowMatchFinder != ZSTD_ps_auto);
- if (forceResetIndex == ZSTDirp_reset) {
+ if (forceResetIndex == ZSTDirp_reset) {
ZSTD_window_init(&ms->window);
- ZSTD_cwksp_mark_tables_dirty(ws);
- }
+ ZSTD_cwksp_mark_tables_dirty(ws);
+ }
ms->hashLog3 = hashLog3;
-
+
ZSTD_invalidateMatchState(ms);
- assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */
-
- ZSTD_cwksp_clear_tables(ws);
-
- DEBUGLOG(5, "reserving table space");
- /* table Space */
- ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32));
- ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32));
- ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32));
- RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
- "failed a workspace allocation in ZSTD_reset_matchState");
-
- DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty);
- if (crp!=ZSTDcrp_leaveDirty) {
- /* reset tables only */
- ZSTD_cwksp_clean_tables(ws);
- }
-
+ assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */
+
+ ZSTD_cwksp_clear_tables(ws);
+
+ DEBUGLOG(5, "reserving table space");
+ /* table Space */
+ ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32));
+ ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32));
+ ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32));
+ RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
+ "failed a workspace allocation in ZSTD_reset_matchState");
+
+ DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty);
+ if (crp!=ZSTDcrp_leaveDirty) {
+ /* reset tables only */
+ ZSTD_cwksp_clean_tables(ws);
+ }
+
/* opt parser space */
if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
DEBUGLOG(4, "reserving optimal parser space");
- ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned));
- ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
- ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
- ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
- ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t));
- ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
+ ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned));
+ ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
+ ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
+ ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
+ ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t));
+ ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
}
if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)) {
@@ -1790,9 +1790,9 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
ms->cParams = *cParams;
- RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
- "failed a workspace allocation in ZSTD_reset_matchState");
- return 0;
+ RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
+ "failed a workspace allocation in ZSTD_reset_matchState");
+ return 0;
}
/* ZSTD_indexTooCloseToMax() :
@@ -1831,12 +1831,12 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
ZSTD_compResetPolicy_e const crp,
ZSTD_buffered_policy_e const zbuff)
{
- ZSTD_cwksp* const ws = &zc->workspace;
+ ZSTD_cwksp* const ws = &zc->workspace;
DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d useBlockSplitter=%d",
(U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->useBlockSplitter);
assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
- zc->isFirstBlock = 1;
+ zc->isFirstBlock = 1;
/* Set applied params early so we can modify them for LDM,
* and point params at the applied params.
@@ -1870,7 +1870,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
int const dictTooBig = ZSTD_dictTooBig(loadedDictSize);
ZSTD_indexResetPolicy_e needsIndexReset =
(indexTooClose || dictTooBig || !zc->initialized) ? ZSTDirp_reset : ZSTDirp_continue;
-
+
size_t const neededSpace =
ZSTD_estimateCCtxSize_usingCCtxParams_internal(
&params->cParams, &params->ldmParams, zc->staticSize != 0, params->useRowMatchFinder,
@@ -1878,43 +1878,43 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
int resizeWorkspace;
FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!");
-
+
if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0);
-
+
{ /* Check if workspace is large enough, alloc a new one if needed */
- int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
- int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
+ int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
+ int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
resizeWorkspace = workspaceTooSmall || workspaceWasteful;
DEBUGLOG(4, "Need %zu B workspace", neededSpace);
DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
if (resizeWorkspace) {
- DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB",
- ZSTD_cwksp_sizeof(ws) >> 10,
+ DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB",
+ ZSTD_cwksp_sizeof(ws) >> 10,
neededSpace >> 10);
RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
- needsIndexReset = ZSTDirp_reset;
+ needsIndexReset = ZSTDirp_reset;
- ZSTD_cwksp_free(ws, zc->customMem);
+ ZSTD_cwksp_free(ws, zc->customMem);
FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem), "");
-
- DEBUGLOG(5, "reserving object space");
+
+ DEBUGLOG(5, "reserving object space");
/* Statically sized space.
* entropyWorkspace never moves,
* though prev/next block swap places */
- assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t)));
- zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
- RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock");
- zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
- RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock");
+ assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t)));
+ zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
+ RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock");
+ zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
+ RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock");
zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE);
RETURN_ERROR_IF(zc->entropyWorkspace == NULL, memory_allocation, "couldn't allocate entropyWorkspace");
} }
- ZSTD_cwksp_clear(ws);
-
+ ZSTD_cwksp_clear(ws);
+
/* init params */
zc->blockState.matchState.cParams = params->cParams;
zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
@@ -1936,61 +1936,61 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
/* ZSTD_wildcopy() is used to copy into the literals buffer,
* so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
*/
- zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH);
+ zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH);
zc->seqStore.maxNbLit = blockSize;
- /* buffers */
+ /* buffers */
zc->bufferedPolicy = zbuff;
- zc->inBuffSize = buffInSize;
- zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize);
- zc->outBuffSize = buffOutSize;
- zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize);
-
+ zc->inBuffSize = buffInSize;
+ zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize);
+ zc->outBuffSize = buffOutSize;
+ zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize);
+
/* ldm bucketOffsets table */
if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
- /* TODO: avoid memset? */
+ /* TODO: avoid memset? */
size_t const numBuckets =
((size_t)1) << (params->ldmParams.hashLog -
params->ldmParams.bucketSizeLog);
zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets);
ZSTD_memset(zc->ldmState.bucketOffsets, 0, numBuckets);
}
-
- /* sequences storage */
+
+ /* sequences storage */
ZSTD_referenceExternalSequences(zc, NULL, 0);
- zc->seqStore.maxNbSeq = maxNbSeq;
- zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
- zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
- zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
- zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef));
-
- FORWARD_IF_ERROR(ZSTD_reset_matchState(
- &zc->blockState.matchState,
- ws,
+ zc->seqStore.maxNbSeq = maxNbSeq;
+ zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
+ zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
+ zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
+ zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef));
+
+ FORWARD_IF_ERROR(ZSTD_reset_matchState(
+ &zc->blockState.matchState,
+ ws,
&params->cParams,
params->useRowMatchFinder,
- crp,
- needsIndexReset,
+ crp,
+ needsIndexReset,
ZSTD_resetTarget_CCtx), "");
- /* ldm hash table */
+ /* ldm hash table */
if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
- /* TODO: avoid memset? */
+ /* TODO: avoid memset? */
size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog;
- zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t));
+ zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t));
ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
- zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq));
- zc->maxNbLdmSequences = maxNbLdmSeq;
-
+ zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq));
+ zc->maxNbLdmSequences = maxNbLdmSeq;
+
ZSTD_window_init(&zc->ldmState.window);
zc->ldmState.loadedDictEnd = 0;
- }
-
+ }
+
DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws));
assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace, resizeWorkspace));
zc->initialized = 1;
-
+
return 0;
}
}
@@ -2023,7 +2023,7 @@ static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = {
};
static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
- const ZSTD_CCtx_params* params,
+ const ZSTD_CCtx_params* params,
U64 pledgedSrcSize)
{
size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
@@ -2124,23 +2124,23 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
}
- ZSTD_cwksp_mark_tables_dirty(&cctx->workspace);
+ ZSTD_cwksp_mark_tables_dirty(&cctx->workspace);
assert(params.useRowMatchFinder != ZSTD_ps_auto);
-
+
/* copy tables */
{ size_t const chainSize = ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0 /* DDS guaranteed disabled */)
? ((size_t)1 << cdict_cParams->chainLog)
: 0;
size_t const hSize = (size_t)1 << cdict_cParams->hashLog;
-
+
ZSTD_memcpy(cctx->blockState.matchState.hashTable,
- cdict->matchState.hashTable,
- hSize * sizeof(U32));
+ cdict->matchState.hashTable,
+ hSize * sizeof(U32));
/* Do not copy cdict's chainTable if cctx has parameters such that it would not use chainTable */
if (ZSTD_allocateChainTable(cctx->appliedParams.cParams.strategy, cctx->appliedParams.useRowMatchFinder, 0 /* forDDSDict */)) {
ZSTD_memcpy(cctx->blockState.matchState.chainTable,
- cdict->matchState.chainTable,
- chainSize * sizeof(U32));
+ cdict->matchState.chainTable,
+ chainSize * sizeof(U32));
}
/* copy tag table */
if (ZSTD_rowMatchFinderUsed(cdict_cParams->strategy, cdict->useRowMatchFinder)) {
@@ -2152,14 +2152,14 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
}
/* Zero the hashTable3, since the cdict never fills it */
- { int const h3log = cctx->blockState.matchState.hashLog3;
- size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
+ { int const h3log = cctx->blockState.matchState.hashLog3;
+ size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
assert(cdict->matchState.hashLog3 == 0);
ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
}
- ZSTD_cwksp_mark_tables_clean(&cctx->workspace);
-
+ ZSTD_cwksp_mark_tables_clean(&cctx->workspace);
+
/* copy dictionary offsets */
{ ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
@@ -2182,7 +2182,7 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
* in-place. We decide here which strategy to use. */
static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
const ZSTD_CDict* cdict,
- const ZSTD_CCtx_params* params,
+ const ZSTD_CCtx_params* params,
U64 pledgedSrcSize,
ZSTD_buffered_policy_e zbuff)
{
@@ -2192,10 +2192,10 @@ static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
return ZSTD_resetCCtx_byAttachingCDict(
- cctx, cdict, *params, pledgedSrcSize, zbuff);
+ cctx, cdict, *params, pledgedSrcSize, zbuff);
} else {
return ZSTD_resetCCtx_byCopyingCDict(
- cctx, cdict, *params, pledgedSrcSize, zbuff);
+ cctx, cdict, *params, pledgedSrcSize, zbuff);
}
}
@@ -2228,7 +2228,7 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
params.fParams = fParams;
ZSTD_resetCCtx_internal(dstCCtx, &params, pledgedSrcSize,
/* loadedDictSize */ 0,
- ZSTDcrp_leaveDirty, zbuff);
+ ZSTDcrp_leaveDirty, zbuff);
assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
@@ -2236,8 +2236,8 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
}
- ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace);
-
+ ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace);
+
/* copy tables */
{ size_t const chainSize = ZSTD_allocateChainTable(srcCCtx->appliedParams.cParams.strategy,
srcCCtx->appliedParams.useRowMatchFinder,
@@ -2245,22 +2245,22 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
? ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog)
: 0;
size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
- int const h3log = srcCCtx->blockState.matchState.hashLog3;
- size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
-
+ int const h3log = srcCCtx->blockState.matchState.hashLog3;
+ size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
+
ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable,
- srcCCtx->blockState.matchState.hashTable,
- hSize * sizeof(U32));
+ srcCCtx->blockState.matchState.hashTable,
+ hSize * sizeof(U32));
ZSTD_memcpy(dstCCtx->blockState.matchState.chainTable,
- srcCCtx->blockState.matchState.chainTable,
- chainSize * sizeof(U32));
+ srcCCtx->blockState.matchState.chainTable,
+ chainSize * sizeof(U32));
ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable3,
- srcCCtx->blockState.matchState.hashTable3,
- h3Size * sizeof(U32));
+ srcCCtx->blockState.matchState.hashTable3,
+ h3Size * sizeof(U32));
}
- ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace);
-
+ ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace);
+
/* copy dictionary offsets */
{
const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
@@ -2314,20 +2314,20 @@ ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerVa
U32 const reducerThreshold = reducerValue + ZSTD_WINDOW_START_INDEX;
assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */
assert(size < (1U<<31)); /* can be casted to int */
-
+
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
- /* To validate that the table re-use logic is sound, and that we don't
- * access table space that we haven't cleaned, we re-"poison" the table
- * space every time we mark it dirty.
- *
- * This function however is intended to operate on those dirty tables and
- * re-clean them. So when this function is used correctly, we can unpoison
- * the memory it operated on. This introduces a blind spot though, since
- * if we now try to operate on __actually__ poisoned memory, we will not
- * detect that. */
- __msan_unpoison(table, size * sizeof(U32));
-#endif
-
+ /* To validate that the table re-use logic is sound, and that we don't
+ * access table space that we haven't cleaned, we re-"poison" the table
+ * space every time we mark it dirty.
+ *
+ * This function however is intended to operate on those dirty tables and
+ * re-clean them. So when this function is used correctly, we can unpoison
+ * the memory it operated on. This introduces a blind spot though, since
+ * if we now try to operate on __actually__ poisoned memory, we will not
+ * detect that. */
+ __msan_unpoison(table, size * sizeof(U32));
+#endif
+
for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
int column;
for (column=0; column<ZSTD_ROWSIZE; column++) {
@@ -2576,7 +2576,7 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
ZSTD_entropyCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
void* dst, size_t dstCapacity,
- void* entropyWorkspace, size_t entropyWkspSize,
+ void* entropyWorkspace, size_t entropyWkspSize,
const int bmi2)
{
const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
@@ -2608,14 +2608,14 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
size_t const numLiterals = seqStorePtr->lit - seqStorePtr->litStart;
/* Base suspicion of uncompressibility on ratio of literals to sequences */
unsigned const suspectUncompressible = (numSequences == 0) || (numLiterals / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO);
- size_t const litSize = (size_t)(seqStorePtr->lit - literals);
+ size_t const litSize = (size_t)(seqStorePtr->lit - literals);
size_t const cSize = ZSTD_compressLiterals(
&prevEntropy->huf, &nextEntropy->huf,
cctxParams->cParams.strategy,
ZSTD_literalsCompressionIsDisabled(cctxParams),
op, dstCapacity,
literals, litSize,
- entropyWorkspace, entropyWkspSize,
+ entropyWorkspace, entropyWkspSize,
bmi2, suspectUncompressible);
FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed");
assert(cSize <= dstCapacity);
@@ -2625,22 +2625,22 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
/* Sequences Header */
RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
dstSize_tooSmall, "Can't fit seq hdr in output buf!");
- if (nbSeq < 128) {
+ if (nbSeq < 128) {
*op++ = (BYTE)nbSeq;
- } else if (nbSeq < LONGNBSEQ) {
- op[0] = (BYTE)((nbSeq>>8) + 0x80);
- op[1] = (BYTE)nbSeq;
- op+=2;
- } else {
- op[0]=0xFF;
- MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ));
- op+=3;
- }
+ } else if (nbSeq < LONGNBSEQ) {
+ op[0] = (BYTE)((nbSeq>>8) + 0x80);
+ op[1] = (BYTE)nbSeq;
+ op+=2;
+ } else {
+ op[0]=0xFF;
+ MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ));
+ op+=3;
+ }
assert(op <= oend);
if (nbSeq==0) {
/* Copy the old tables over as if we repeated them */
ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
- return (size_t)(op - ostart);
+ return (size_t)(op - ostart);
}
{
ZSTD_symbolEncodingTypeStats_t stats;
@@ -2658,7 +2658,7 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
}
{ size_t const bitstreamSize = ZSTD_encodeSequences(
- op, (size_t)(oend - op),
+ op, (size_t)(oend - op),
CTable_MatchLength, mlCodeTable,
CTable_OffsetBits, ofCodeTable,
CTable_LitLength, llCodeTable,
@@ -2685,7 +2685,7 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
}
DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
- return (size_t)(op - ostart);
+ return (size_t)(op - ostart);
}
MEM_STATIC size_t
@@ -2695,13 +2695,13 @@ ZSTD_entropyCompressSeqStore(seqStore_t* seqStorePtr,
const ZSTD_CCtx_params* cctxParams,
void* dst, size_t dstCapacity,
size_t srcSize,
- void* entropyWorkspace, size_t entropyWkspSize,
+ void* entropyWorkspace, size_t entropyWkspSize,
int bmi2)
{
size_t const cSize = ZSTD_entropyCompressSeqStore_internal(
seqStorePtr, prevEntropy, nextEntropy, cctxParams,
dst, dstCapacity,
- entropyWorkspace, entropyWkspSize, bmi2);
+ entropyWorkspace, entropyWkspSize, bmi2);
if (cSize == 0) return 0;
/* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
* Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
@@ -2892,20 +2892,20 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
return ZSTDbss_compress;
}
-static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
-{
- const seqStore_t* seqStore = ZSTD_getSeqStore(zc);
+static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
+{
+ const seqStore_t* seqStore = ZSTD_getSeqStore(zc);
const seqDef* seqStoreSeqs = seqStore->sequencesStart;
size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs;
size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart);
size_t literalsRead = 0;
size_t lastLLSize;
-
- ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];
+
+ ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];
size_t i;
repcodes_t updatedRepcodes;
-
- assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);
+
+ assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);
/* Ensure we have enough space for last literals "sequence" */
assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1);
ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
@@ -2914,15 +2914,15 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
outSeqs[i].litLength = seqStoreSeqs[i].litLength;
outSeqs[i].matchLength = seqStoreSeqs[i].mlBase + MINMATCH;
outSeqs[i].rep = 0;
-
- if (i == seqStore->longLengthPos) {
+
+ if (i == seqStore->longLengthPos) {
if (seqStore->longLengthType == ZSTD_llt_literalLength) {
- outSeqs[i].litLength += 0x10000;
+ outSeqs[i].litLength += 0x10000;
} else if (seqStore->longLengthType == ZSTD_llt_matchLength) {
- outSeqs[i].matchLength += 0x10000;
- }
- }
-
+ outSeqs[i].matchLength += 0x10000;
+ }
+ }
+
if (seqStoreSeqs[i].offBase <= ZSTD_REP_NUM) {
/* Derive the correct offset corresponding to a repcode */
outSeqs[i].rep = seqStoreSeqs[i].offBase;
@@ -2931,11 +2931,11 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
} else {
if (outSeqs[i].rep == 3) {
rawOffset = updatedRepcodes.rep[0] - 1;
- } else {
+ } else {
rawOffset = updatedRepcodes.rep[outSeqs[i].rep];
- }
- }
- }
+ }
+ }
+ }
outSeqs[i].offset = rawOffset;
/* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode
so we provide seqStoreSeqs[i].offset - 1 */
@@ -2943,7 +2943,7 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
seqStoreSeqs[i].offBase - 1,
seqStoreSeqs[i].litLength == 0);
literalsRead += outSeqs[i].litLength;
- }
+ }
/* Insert last literals (if any exist) in the block as a sequence with ml == off == 0.
* If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker
* for the block boundary, according to the API.
@@ -2954,28 +2954,28 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
outSeqs[i].matchLength = outSeqs[i].offset = outSeqs[i].rep = 0;
seqStoreSeqSize++;
zc->seqCollector.seqIndex += seqStoreSeqSize;
-}
-
+}
+
size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
size_t outSeqsSize, const void* src, size_t srcSize)
-{
- const size_t dstCapacity = ZSTD_compressBound(srcSize);
+{
+ const size_t dstCapacity = ZSTD_compressBound(srcSize);
void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
- SeqCollector seqCollector;
-
+ SeqCollector seqCollector;
+
RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!");
-
- seqCollector.collectSequences = 1;
- seqCollector.seqStart = outSeqs;
- seqCollector.seqIndex = 0;
- seqCollector.maxSequences = outSeqsSize;
- zc->seqCollector = seqCollector;
-
- ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
+
+ seqCollector.collectSequences = 1;
+ seqCollector.seqStart = outSeqs;
+ seqCollector.seqIndex = 0;
+ seqCollector.maxSequences = outSeqsSize;
+ zc->seqCollector = seqCollector;
+
+ ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
ZSTD_customFree(dst, ZSTD_defaultCMem);
- return zc->seqCollector.seqIndex;
-}
-
+ return zc->seqCollector.seqIndex;
+}
+
size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize) {
size_t in = 0;
size_t out = 0;
@@ -3000,13 +3000,13 @@ static int ZSTD_isRLE(const BYTE* src, size_t length) {
const size_t unrollSize = sizeof(size_t) * 4;
const size_t unrollMask = unrollSize - 1;
const size_t prefixLength = length & unrollMask;
- size_t i;
+ size_t i;
size_t u;
if (length == 1) return 1;
/* Check if prefix is RLE first before using unrolled loop */
if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) {
return 0;
- }
+ }
for (i = prefixLength; i != length; i += unrollSize) {
for (u = 0; u < unrollSize; u += sizeof(size_t)) {
if (MEM_readST(ip + i + u) != valueST) {
@@ -3014,9 +3014,9 @@ static int ZSTD_isRLE(const BYTE* src, size_t length) {
}
}
}
- return 1;
-}
-
+ return 1;
+}
+
/* Returns true if the given block may be RLE.
* This is just a heuristic based on the compressibility.
* It may return both false positives and false negatives.
@@ -3764,29 +3764,29 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize, U32 frame)
{
- /* This the upper bound for the length of an rle block.
- * This isn't the actual upper bound. Finding the real threshold
- * needs further investigation.
- */
- const U32 rleMaxLength = 25;
+ /* This the upper bound for the length of an rle block.
+ * This isn't the actual upper bound. Finding the real threshold
+ * needs further investigation.
+ */
+ const U32 rleMaxLength = 25;
size_t cSize;
- const BYTE* ip = (const BYTE*)src;
- BYTE* op = (BYTE*)dst;
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
- (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
- (unsigned)zc->blockState.matchState.nextToUpdate);
+ (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
+ (unsigned)zc->blockState.matchState.nextToUpdate);
{ const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }
}
- if (zc->seqCollector.collectSequences) {
- ZSTD_copyBlockSequences(zc);
+ if (zc->seqCollector.collectSequences) {
+ ZSTD_copyBlockSequences(zc);
ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
- return 0;
- }
-
+ return 0;
+ }
+
/* encode sequences and literals */
cSize = ZSTD_entropyCompressSeqStore(&zc->seqStore,
&zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
@@ -3796,21 +3796,21 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
zc->bmi2);
- if (frame &&
- /* We don't want to emit our first block as a RLE even if it qualifies because
- * doing so will cause the decoder (cli only) to throw a "should consume all input error."
- * This is only an issue for zstd <= v1.4.3
- */
- !zc->isFirstBlock &&
- cSize < rleMaxLength &&
- ZSTD_isRLE(ip, srcSize))
- {
- cSize = 1;
- op[0] = ip[0];
- }
-
+ if (frame &&
+ /* We don't want to emit our first block as a RLE even if it qualifies because
+ * doing so will cause the decoder (cli only) to throw a "should consume all input error."
+ * This is only an issue for zstd <= v1.4.3
+ */
+ !zc->isFirstBlock &&
+ cSize < rleMaxLength &&
+ ZSTD_isRLE(ip, srcSize))
+ {
+ cSize = 1;
+ op[0] = ip[0];
+ }
+
out:
- if (!ZSTD_isError(cSize) && cSize > 1) {
+ if (!ZSTD_isError(cSize) && cSize > 1) {
ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
}
/* We check that dictionaries have offset codes available for the first
@@ -3898,11 +3898,11 @@ static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc,
return cSize;
}
-static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
- ZSTD_cwksp* ws,
- ZSTD_CCtx_params const* params,
- void const* ip,
- void const* iend)
+static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
+ ZSTD_cwksp* ws,
+ ZSTD_CCtx_params const* params,
+ void const* ip,
+ void const* iend)
{
U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
U32 const maxDist = (U32)1 << params->cParams.windowLog;
@@ -3911,9 +3911,9 @@ static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
- ZSTD_cwksp_mark_tables_dirty(ws);
+ ZSTD_cwksp_mark_tables_dirty(ws);
ZSTD_reduceIndex(ms, params, correction);
- ZSTD_cwksp_mark_tables_clean(ws);
+ ZSTD_cwksp_mark_tables_clean(ws);
if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
else ms->nextToUpdate -= correction;
/* invalidate dictionaries on overflow correction */
@@ -3956,8 +3956,8 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
"not enough space to store compressed block");
if (remaining < blockSize) blockSize = remaining;
- ZSTD_overflowCorrectIfNeeded(
- ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize);
+ ZSTD_overflowCorrectIfNeeded(
+ ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize);
ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
ZSTD_window_enforceMaxDist(&ms->window, ip, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
@@ -3999,7 +3999,7 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
op += cSize;
assert(dstCapacity >= cSize);
dstCapacity -= cSize;
- cctx->isFirstBlock = 0;
+ cctx->isFirstBlock = 0;
DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
(unsigned)cSize);
} }
@@ -4010,25 +4010,25 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
- const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)
+ const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)
{ BYTE* const op = (BYTE*)dst;
U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */
- U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */
- U32 const checksumFlag = params->fParams.checksumFlag>0;
- U32 const windowSize = (U32)1 << params->cParams.windowLog;
- U32 const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
- BYTE const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
- U32 const fcsCode = params->fParams.contentSizeFlag ?
+ U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */
+ U32 const checksumFlag = params->fParams.checksumFlag>0;
+ U32 const windowSize = (U32)1 << params->cParams.windowLog;
+ U32 const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
+ BYTE const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
+ U32 const fcsCode = params->fParams.contentSizeFlag ?
(pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */
BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
size_t pos=0;
- assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
+ assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall,
"dst buf is too small to fit worst-case frame header size.");
DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
- !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
- if (params->format == ZSTD_f_zstd1) {
+ !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
+ if (params->format == ZSTD_f_zstd1) {
MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
pos = 4;
}
@@ -4122,7 +4122,7 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
"missing init (ZSTD_compressBegin)");
if (frame && (cctx->stage==ZSTDcs_init)) {
- fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams,
+ fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams,
cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
assert(fhSize <= dstCapacity);
@@ -4143,15 +4143,15 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
if (!frame) {
/* overflow check and correction for block mode */
- ZSTD_overflowCorrectIfNeeded(
- ms, &cctx->workspace, &cctx->appliedParams,
- src, (BYTE const*)src + srcSize);
+ ZSTD_overflowCorrectIfNeeded(
+ ms, &cctx->workspace, &cctx->appliedParams,
+ src, (BYTE const*)src + srcSize);
}
DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
{ size_t const cSize = frame ?
ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
- ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);
+ ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);
FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed");
cctx->consumedSrcSize += srcSize;
cctx->producedCSize += (cSize + fhSize);
@@ -4187,8 +4187,8 @@ size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
- DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize);
- { size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
+ DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize);
+ { size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); }
return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
@@ -4199,7 +4199,7 @@ size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const
*/
static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
ldmState_t* ls,
- ZSTD_cwksp* ws,
+ ZSTD_cwksp* ws,
ZSTD_CCtx_params const* params,
const void* src, size_t srcSize,
ZSTD_dictTableLoadMethod_e dtlm)
@@ -4435,7 +4435,7 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
{
size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
- FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(
+ FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(
ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), "");
}
return dictID;
@@ -4447,7 +4447,7 @@ static size_t
ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
ZSTD_matchState_t* ms,
ldmState_t* ls,
- ZSTD_cwksp* ws,
+ ZSTD_cwksp* ws,
const ZSTD_CCtx_params* params,
const void* dict, size_t dictSize,
ZSTD_dictContentType_e dictContentType,
@@ -4455,10 +4455,10 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
void* workspace)
{
DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
- if ((dict==NULL) || (dictSize<8)) {
+ if ((dict==NULL) || (dictSize<8)) {
RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
- return 0;
- }
+ return 0;
+ }
ZSTD_reset_compressedBlockState(bs);
@@ -4469,7 +4469,7 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
if (dictContentType == ZSTD_dct_auto) {
DEBUGLOG(4, "raw content dictionary detected");
- return ZSTD_loadDictionaryContent(
+ return ZSTD_loadDictionaryContent(
ms, ls, ws, params, dict, dictSize, dtlm);
}
RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
@@ -4477,13 +4477,13 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
}
/* dict as full zstd dictionary */
- return ZSTD_loadZstdDictionary(
- bs, ms, ws, params, dict, dictSize, dtlm, workspace);
+ return ZSTD_loadZstdDictionary(
+ bs, ms, ws, params, dict, dictSize, dtlm, workspace);
}
-#define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)
+#define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)
#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL)
-
+
/*! ZSTD_compressBegin_internal() :
* @return : 0, or an error code */
static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
@@ -4491,40 +4491,40 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
ZSTD_dictContentType_e dictContentType,
ZSTD_dictTableLoadMethod_e dtlm,
const ZSTD_CDict* cdict,
- const ZSTD_CCtx_params* params, U64 pledgedSrcSize,
+ const ZSTD_CCtx_params* params, U64 pledgedSrcSize,
ZSTD_buffered_policy_e zbuff)
{
size_t const dictContentSize = cdict ? cdict->dictContentSize : dictSize;
#if ZSTD_TRACE
cctx->traceCtx = (ZSTD_trace_compress_begin != NULL) ? ZSTD_trace_compress_begin(cctx) : 0;
#endif
- DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog);
+ DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog);
/* params are supposed to be fully validated at this point */
- assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
+ assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
assert(!((dict) && (cdict))); /* either dict or cdict, not both */
- if ( (cdict)
- && (cdict->dictContentSize > 0)
- && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
- || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
- || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
- || cdict->compressionLevel == 0)
- && (params->attachDictPref != ZSTD_dictForceLoad) ) {
+ if ( (cdict)
+ && (cdict->dictContentSize > 0)
+ && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
+ || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
+ || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
+ || cdict->compressionLevel == 0)
+ && (params->attachDictPref != ZSTD_dictForceLoad) ) {
return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
}
FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
dictContentSize,
ZSTDcrp_makeClean, zbuff) , "");
- { size_t const dictID = cdict ?
- ZSTD_compress_insertDictionary(
- cctx->blockState.prevCBlock, &cctx->blockState.matchState,
+ { size_t const dictID = cdict ?
+ ZSTD_compress_insertDictionary(
+ cctx->blockState.prevCBlock, &cctx->blockState.matchState,
&cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent,
cdict->dictContentSize, cdict->dictContentType, dtlm,
cctx->entropyWorkspace)
- : ZSTD_compress_insertDictionary(
- cctx->blockState.prevCBlock, &cctx->blockState.matchState,
+ : ZSTD_compress_insertDictionary(
+ cctx->blockState.prevCBlock, &cctx->blockState.matchState,
&cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize,
- dictContentType, dtlm, cctx->entropyWorkspace);
+ dictContentType, dtlm, cctx->entropyWorkspace);
FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
assert(dictID <= UINT_MAX);
cctx->dictID = (U32)dictID;
@@ -4538,10 +4538,10 @@ size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
ZSTD_dictContentType_e dictContentType,
ZSTD_dictTableLoadMethod_e dtlm,
const ZSTD_CDict* cdict,
- const ZSTD_CCtx_params* params,
+ const ZSTD_CCtx_params* params,
unsigned long long pledgedSrcSize)
{
- DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog);
+ DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog);
/* compression parameters verification and optimization */
FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) , "");
return ZSTD_compressBegin_internal(cctx,
@@ -4562,7 +4562,7 @@ size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
return ZSTD_compressBegin_advanced_internal(cctx,
dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
NULL /*cdict*/,
- &cctxParams, pledgedSrcSize);
+ &cctxParams, pledgedSrcSize);
}
size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
@@ -4574,7 +4574,7 @@ size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t di
}
DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
- &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
+ &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
}
size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
@@ -4597,7 +4597,7 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
/* special case : empty frame */
if (cctx->stage == ZSTDcs_init) {
- fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
+ fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
dstCapacity -= fhSize;
op += fhSize;
@@ -4697,7 +4697,7 @@ size_t ZSTD_compress_advanced_internal(
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize,
- const ZSTD_CCtx_params* params)
+ const ZSTD_CCtx_params* params)
{
DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
@@ -4760,14 +4760,14 @@ size_t ZSTD_estimateCDictSize_advanced(
ZSTD_dictLoadMethod_e dictLoadMethod)
{
DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
- return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
- + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
+ return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
+ + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
/* enableDedicatedDictSearch == 1 ensures that CDict estimation will not be too small
* in case we are using DDS with row-hash. */
+ ZSTD_sizeof_matchState(&cParams, ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams),
/* enableDedicatedDictSearch */ 1, /* forCCtx */ 0)
- + (dictLoadMethod == ZSTD_dlm_byRef ? 0
- : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *))));
+ + (dictLoadMethod == ZSTD_dlm_byRef ? 0
+ : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *))));
}
size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
@@ -4780,9 +4780,9 @@ size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
{
if (cdict==NULL) return 0; /* support sizeof on NULL */
DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
- /* cdict may be in the workspace */
- return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict))
- + ZSTD_cwksp_sizeof(&cdict->workspace);
+ /* cdict may be in the workspace */
+ return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict))
+ + ZSTD_cwksp_sizeof(&cdict->workspace);
}
static size_t ZSTD_initCDict_internal(
@@ -4799,7 +4799,7 @@ static size_t ZSTD_initCDict_internal(
if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
cdict->dictContent = dictBuffer;
} else {
- void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*)));
+ void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*)));
RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!");
cdict->dictContent = internalBuffer;
ZSTD_memcpy(internalBuffer, dictBuffer, dictSize);
@@ -4807,28 +4807,28 @@ static size_t ZSTD_initCDict_internal(
cdict->dictContentSize = dictSize;
cdict->dictContentType = dictContentType;
- cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE);
-
-
+ cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE);
+
+
/* Reset the state to no dictionary */
ZSTD_reset_compressedBlockState(&cdict->cBlockState);
- FORWARD_IF_ERROR(ZSTD_reset_matchState(
- &cdict->matchState,
- &cdict->workspace,
+ FORWARD_IF_ERROR(ZSTD_reset_matchState(
+ &cdict->matchState,
+ &cdict->workspace,
&params.cParams,
params.useRowMatchFinder,
- ZSTDcrp_makeClean,
- ZSTDirp_reset,
+ ZSTDcrp_makeClean,
+ ZSTDirp_reset,
ZSTD_resetTarget_CDict), "");
/* (Maybe) load the dictionary
- * Skips loading the dictionary if it is < 8 bytes.
+ * Skips loading the dictionary if it is < 8 bytes.
*/
{ params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
params.fParams.contentSizeFlag = 1;
{ size_t const dictID = ZSTD_compress_insertDictionary(
&cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace,
- &params, cdict->dictContent, cdict->dictContentSize,
- dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace);
+ &params, cdict->dictContent, cdict->dictContentSize,
+ dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace);
FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
assert(dictID <= (size_t)(U32)-1);
cdict->dictID = (U32)dictID;
@@ -4847,26 +4847,26 @@ static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
{
if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
- { size_t const workspaceSize =
- ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
- ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) +
+ { size_t const workspaceSize =
+ ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
+ ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) +
ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, enableDedicatedDictSearch, /* forCCtx */ 0) +
- (dictLoadMethod == ZSTD_dlm_byRef ? 0
- : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))));
+ (dictLoadMethod == ZSTD_dlm_byRef ? 0
+ : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))));
void* const workspace = ZSTD_customMalloc(workspaceSize, customMem);
- ZSTD_cwksp ws;
- ZSTD_CDict* cdict;
+ ZSTD_cwksp ws;
+ ZSTD_CDict* cdict;
- if (!workspace) {
+ if (!workspace) {
ZSTD_customFree(workspace, customMem);
return NULL;
}
-
+
ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_dynamic_alloc);
-
- cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
- assert(cdict != NULL);
- ZSTD_cwksp_move(&cdict->workspace, &ws);
+
+ cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
+ assert(cdict != NULL);
+ ZSTD_cwksp_move(&cdict->workspace, &ws);
cdict->customMem = customMem;
cdict->compressionLevel = ZSTD_NO_CLEVEL; /* signals advanced API usage */
cdict->useRowMatchFinder = useRowMatchFinder;
@@ -4945,11 +4945,11 @@ ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionL
{
ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
- ZSTD_dlm_byCopy, ZSTD_dct_auto,
- cParams, ZSTD_defaultCMem);
- if (cdict)
+ ZSTD_dlm_byCopy, ZSTD_dct_auto,
+ cParams, ZSTD_defaultCMem);
+ if (cdict)
cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
- return cdict;
+ return cdict;
}
ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
@@ -4967,11 +4967,11 @@ size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
{
if (cdict==NULL) return 0; /* support free on NULL */
{ ZSTD_customMem const cMem = cdict->customMem;
- int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict);
- ZSTD_cwksp_free(&cdict->workspace, cMem);
- if (!cdictInWorkspace) {
+ int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict);
+ ZSTD_cwksp_free(&cdict->workspace, cMem);
+ if (!cdictInWorkspace) {
ZSTD_customFree(cdict, cMem);
- }
+ }
return 0;
}
}
@@ -4999,24 +4999,24 @@ const ZSTD_CDict* ZSTD_initStaticCDict(
ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams);
/* enableDedicatedDictSearch == 1 ensures matchstate is not too small in case this CDict will be used for DDS + row hash */
size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0);
- size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
- + (dictLoadMethod == ZSTD_dlm_byRef ? 0
- : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))))
- + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
- + matchStateSize;
- ZSTD_CDict* cdict;
+ size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
+ + (dictLoadMethod == ZSTD_dlm_byRef ? 0
+ : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))))
+ + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
+ + matchStateSize;
+ ZSTD_CDict* cdict;
ZSTD_CCtx_params params;
-
+
if ((size_t)workspace & 7) return NULL; /* 8-aligned */
-
- {
- ZSTD_cwksp ws;
+
+ {
+ ZSTD_cwksp ws;
ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
- cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
- if (cdict == NULL) return NULL;
- ZSTD_cwksp_move(&cdict->workspace, &ws);
- }
-
+ cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
+ if (cdict == NULL) return NULL;
+ ZSTD_cwksp_move(&cdict->workspace, &ws);
+ }
+
DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
(unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
if (workspaceSize < neededSize) return NULL;
@@ -5028,7 +5028,7 @@ const ZSTD_CDict* ZSTD_initStaticCDict(
if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
dict, dictSize,
- dictLoadMethod, dictContentType,
+ dictLoadMethod, dictContentType,
params) ))
return NULL;
@@ -5065,14 +5065,14 @@ static size_t ZSTD_compressBegin_usingCDict_internal(
{
ZSTD_parameters params;
params.fParams = fParams;
- params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
- || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
- || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
+ params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
+ || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
+ || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
|| cdict->compressionLevel == 0 ) ?
- ZSTD_getCParamsFromCDict(cdict)
- : ZSTD_getCParams(cdict->compressionLevel,
- pledgedSrcSize,
- cdict->dictContentSize);
+ ZSTD_getCParamsFromCDict(cdict)
+ : ZSTD_getCParams(cdict->compressionLevel,
+ pledgedSrcSize,
+ cdict->dictContentSize);
ZSTD_CCtxParams_init_internal(&cctxParams, &params, cdict->compressionLevel);
}
/* Increase window log to fit the entire dictionary and source if the
@@ -5214,14 +5214,14 @@ size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
* Assumption 2 : either dict, or cdict, is defined, not both */
size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
- const ZSTD_CCtx_params* params,
- unsigned long long pledgedSrcSize)
+ const ZSTD_CCtx_params* params,
+ unsigned long long pledgedSrcSize)
{
DEBUGLOG(4, "ZSTD_initCStream_internal");
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
- assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
- zcs->requestedParams = *params;
+ assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
+ zcs->requestedParams = *params;
assert(!((dict) && (cdict))); /* either dict or cdict, not both */
if (dict) {
FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
@@ -5260,7 +5260,7 @@ size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
/* ZSTD_initCStream_advanced() :
* pledgedSrcSize must be exact.
* if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
- * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */
+ * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */
size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
const void* dict, size_t dictSize,
ZSTD_parameters params, unsigned long long pss)
diff --git a/contrib/libs/zstd/lib/compress/zstd_compress_internal.h b/contrib/libs/zstd/lib/compress/zstd_compress_internal.h
index fcba5e8953..c406e794bd 100644
--- a/contrib/libs/zstd/lib/compress/zstd_compress_internal.h
+++ b/contrib/libs/zstd/lib/compress/zstd_compress_internal.h
@@ -19,7 +19,7 @@
* Dependencies
***************************************/
#include "../common/zstd_internal.h"
-#include "zstd_cwksp.h"
+#include "zstd_cwksp.h"
#ifdef ZSTD_MULTITHREAD
# include "zstdmt_compress.h"
#endif
@@ -207,15 +207,15 @@ typedef struct ZSTD_matchState_t ZSTD_matchState_t;
struct ZSTD_matchState_t {
ZSTD_window_t window; /* State for window round buffer management */
- U32 loadedDictEnd; /* index of end of dictionary, within context's referential.
- * When loadedDictEnd != 0, a dictionary is in use, and still valid.
- * This relies on a mechanism to set loadedDictEnd=0 when dictionary is no longer within distance.
- * Such mechanism is provided within ZSTD_window_enforceMaxDist() and ZSTD_checkDictValidity().
- * When dict referential is copied into active context (i.e. not attached),
- * loadedDictEnd == dictSize, since referential starts from zero.
- */
+ U32 loadedDictEnd; /* index of end of dictionary, within context's referential.
+ * When loadedDictEnd != 0, a dictionary is in use, and still valid.
+ * This relies on a mechanism to set loadedDictEnd=0 when dictionary is no longer within distance.
+ * Such mechanism is provided within ZSTD_window_enforceMaxDist() and ZSTD_checkDictValidity().
+ * When dict referential is copied into active context (i.e. not attached),
+ * loadedDictEnd == dictSize, since referential starts from zero.
+ */
U32 nextToUpdate; /* index from which to continue table update */
- U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */
+ U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */
U32 rowHashLog; /* For row-based matchfinder: Hashlog based on nb of rows in the hashTable.*/
U16* tagTable; /* For row-based matchFinder: A row-based table containing the hashes and head index. */
@@ -275,12 +275,12 @@ typedef struct {
} ldmParams_t;
typedef struct {
- int collectSequences;
- ZSTD_Sequence* seqStart;
- size_t seqIndex;
- size_t maxSequences;
-} SeqCollector;
-
+ int collectSequences;
+ ZSTD_Sequence* seqStart;
+ size_t seqIndex;
+ size_t maxSequences;
+} SeqCollector;
+
struct ZSTD_CCtx_params_s {
ZSTD_format_e format;
ZSTD_compressionParameters cParams;
@@ -292,9 +292,9 @@ struct ZSTD_CCtx_params_s {
size_t targetCBlockSize; /* Tries to fit compressed block size to be around targetCBlockSize.
* No target when targetCBlockSize == 0.
* There is no guarantee on compressed block size */
- int srcSizeHint; /* User's best guess of source size.
- * Hint is not valid when srcSizeHint == 0.
- * There is no guarantee that hint is close to actual source size */
+ int srcSizeHint; /* User's best guess of source size.
+ * Hint is not valid when srcSizeHint == 0.
+ * There is no guarantee that hint is close to actual source size */
ZSTD_dictAttachPref_e attachDictPref;
ZSTD_paramSwitch_e literalCompressionMode;
@@ -371,7 +371,7 @@ struct ZSTD_CCtx_s {
U32 dictID;
size_t dictContentSize;
- ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */
+ ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */
size_t blockSize;
unsigned long long pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */
unsigned long long consumedSrcSize;
@@ -380,8 +380,8 @@ struct ZSTD_CCtx_s {
ZSTD_customMem customMem;
ZSTD_threadPool* pool;
size_t staticSize;
- SeqCollector seqCollector;
- int isFirstBlock;
+ SeqCollector seqCollector;
+ int isFirstBlock;
int initialized;
seqStore_t seqStore; /* sequences storage ptrs */
@@ -560,23 +560,23 @@ MEM_STATIC int ZSTD_literalsCompressionIsDisabled(const ZSTD_CCtx_params* cctxPa
}
}
-/*! ZSTD_safecopyLiterals() :
- * memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w.
- * Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single
- * large copies.
- */
+/*! ZSTD_safecopyLiterals() :
+ * memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w.
+ * Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single
+ * large copies.
+ */
static void
ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w)
{
- assert(iend > ilimit_w);
- if (ip <= ilimit_w) {
- ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap);
- op += ilimit_w - ip;
- ip = ilimit_w;
- }
- while (ip < iend) *op++ = *ip++;
-}
-
+ assert(iend > ilimit_w);
+ if (ip <= ilimit_w) {
+ ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap);
+ op += ilimit_w - ip;
+ ip = ilimit_w;
+ }
+ while (ip < iend) *op++ = *ip++;
+}
+
#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1)
#define STORE_REPCODE_1 STORE_REPCODE(1)
#define STORE_REPCODE_2 STORE_REPCODE(2)
@@ -594,7 +594,7 @@ ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE con
* Store a sequence (litlen, litPtr, offCode and matchLength) into seqStore_t.
* @offBase_minus1 : Users should use employ macros STORE_REPCODE_X and STORE_OFFSET().
* @matchLength : must be >= MINMATCH
- * Allowed to overread literals up to litLimit.
+ * Allowed to overread literals up to litLimit.
*/
HINT_INLINE UNUSED_ATTR void
ZSTD_storeSeq(seqStore_t* seqStorePtr,
@@ -602,8 +602,8 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr,
U32 offBase_minus1,
size_t matchLength)
{
- BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
- BYTE const* const litEnd = literals + litLength;
+ BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
+ BYTE const* const litEnd = literals + litLength;
#if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6)
static const BYTE* g_start = NULL;
if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */
@@ -616,19 +616,19 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr,
/* copy Literals */
assert(seqStorePtr->maxNbLit <= 128 KB);
assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
- assert(literals + litLength <= litLimit);
- if (litEnd <= litLimit_w) {
- /* Common case we can use wildcopy.
- * First copy 16 bytes, because literals are likely short.
- */
- assert(WILDCOPY_OVERLENGTH >= 16);
- ZSTD_copy16(seqStorePtr->lit, literals);
- if (litLength > 16) {
- ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap);
- }
- } else {
- ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w);
- }
+ assert(literals + litLength <= litLimit);
+ if (litEnd <= litLimit_w) {
+ /* Common case we can use wildcopy.
+ * First copy 16 bytes, because literals are likely short.
+ */
+ assert(WILDCOPY_OVERLENGTH >= 16);
+ ZSTD_copy16(seqStorePtr->lit, literals);
+ if (litLength > 16) {
+ ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap);
+ }
+ } else {
+ ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w);
+ }
seqStorePtr->lit += litLength;
/* literal Length */
@@ -1204,37 +1204,37 @@ ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
/* Similar to ZSTD_window_enforceMaxDist(),
* but only invalidates dictionary
- * when input progresses beyond window size.
- * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL)
- * loadedDictEnd uses same referential as window->base
- * maxDist is the window size */
+ * when input progresses beyond window size.
+ * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL)
+ * loadedDictEnd uses same referential as window->base
+ * maxDist is the window size */
MEM_STATIC void
-ZSTD_checkDictValidity(const ZSTD_window_t* window,
+ZSTD_checkDictValidity(const ZSTD_window_t* window,
const void* blockEnd,
U32 maxDist,
U32* loadedDictEndPtr,
const ZSTD_matchState_t** dictMatchStatePtr)
{
- assert(loadedDictEndPtr != NULL);
- assert(dictMatchStatePtr != NULL);
- { U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
- U32 const loadedDictEnd = *loadedDictEndPtr;
- DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
- (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
- assert(blockEndIdx >= loadedDictEnd);
-
- if (blockEndIdx > loadedDictEnd + maxDist) {
- /* On reaching window size, dictionaries are invalidated.
- * For simplification, if window size is reached anywhere within next block,
- * the dictionary is invalidated for the full block.
- */
- DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)");
- *loadedDictEndPtr = 0;
- *dictMatchStatePtr = NULL;
- } else {
- if (*loadedDictEndPtr != 0) {
- DEBUGLOG(6, "dictionary considered valid for current block");
- } } }
+ assert(loadedDictEndPtr != NULL);
+ assert(dictMatchStatePtr != NULL);
+ { U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
+ U32 const loadedDictEnd = *loadedDictEndPtr;
+ DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
+ (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
+ assert(blockEndIdx >= loadedDictEnd);
+
+ if (blockEndIdx > loadedDictEnd + maxDist) {
+ /* On reaching window size, dictionaries are invalidated.
+ * For simplification, if window size is reached anywhere within next block,
+ * the dictionary is invalidated for the full block.
+ */
+ DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)");
+ *loadedDictEndPtr = 0;
+ *dictMatchStatePtr = NULL;
+ } else {
+ if (*loadedDictEndPtr != 0) {
+ DEBUGLOG(6, "dictionary considered valid for current block");
+ } } }
}
MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
@@ -1296,7 +1296,7 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
* Returns the lowest allowed match index. It may either be in the ext-dict or the prefix.
*/
MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
-{
+{
U32 const maxDistance = 1U << windowLog;
U32 const lowestValid = ms->window.lowLimit;
U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
@@ -1306,8 +1306,8 @@ MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, u
* valid for the entire block. So this check is sufficient to find the lowest valid match index.
*/
U32 const matchLowest = isDictionary ? lowestValid : withinWindow;
- return matchLowest;
-}
+ return matchLowest;
+}
/**
* Returns the lowest allowed match index in the prefix.
@@ -1324,8 +1324,8 @@ MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr,
U32 const matchLowest = isDictionary ? lowestValid : withinWindow;
return matchLowest;
}
-
-
+
+
/* debug functions */
#if (DEBUGLEVEL>=2)
@@ -1399,7 +1399,7 @@ ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
const void* dict, size_t dictSize,
const ZSTD_CDict* cdict,
- const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize);
+ const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize);
void ZSTD_resetSeqStore(seqStore_t* ssPtr);
@@ -1414,7 +1414,7 @@ size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
ZSTD_dictContentType_e dictContentType,
ZSTD_dictTableLoadMethod_e dtlm,
const ZSTD_CDict* cdict,
- const ZSTD_CCtx_params* params,
+ const ZSTD_CCtx_params* params,
unsigned long long pledgedSrcSize);
/* ZSTD_compress_advanced_internal() :
@@ -1423,7 +1423,7 @@ size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize,
- const ZSTD_CCtx_params* params);
+ const ZSTD_CCtx_params* params);
/* ZSTD_writeLastEmptyBlock() :
diff --git a/contrib/libs/zstd/lib/compress/zstd_compress_literals.c b/contrib/libs/zstd/lib/compress/zstd_compress_literals.c
index 38905e42e6..52b0a8059a 100644
--- a/contrib/libs/zstd/lib/compress/zstd_compress_literals.c
+++ b/contrib/libs/zstd/lib/compress/zstd_compress_literals.c
@@ -72,7 +72,7 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
ZSTD_strategy strategy, int disableLiteralCompression,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
- void* entropyWorkspace, size_t entropyWorkspaceSize,
+ void* entropyWorkspace, size_t entropyWorkspaceSize,
const int bmi2,
unsigned suspectUncompressible)
{
@@ -102,13 +102,13 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
{ HUF_repeat repeat = prevHuf->repeatMode;
int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
- cLitSize = singleStream ?
- HUF_compress1X_repeat(
- ostart+lhSize, dstCapacity-lhSize, src, srcSize,
+ cLitSize = singleStream ?
+ HUF_compress1X_repeat(
+ ostart+lhSize, dstCapacity-lhSize, src, srcSize,
HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
(HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible) :
- HUF_compress4X_repeat(
- ostart+lhSize, dstCapacity-lhSize, src, srcSize,
+ HUF_compress4X_repeat(
+ ostart+lhSize, dstCapacity-lhSize, src, srcSize,
HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
(HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible);
if (repeat != HUF_repeat_none) {
diff --git a/contrib/libs/zstd/lib/compress/zstd_compress_literals.h b/contrib/libs/zstd/lib/compress/zstd_compress_literals.h
index 55b4c5c04e..9775fb97cb 100644
--- a/contrib/libs/zstd/lib/compress/zstd_compress_literals.h
+++ b/contrib/libs/zstd/lib/compress/zstd_compress_literals.h
@@ -24,7 +24,7 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
ZSTD_strategy strategy, int disableLiteralCompression,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
- void* entropyWorkspace, size_t entropyWorkspaceSize,
+ void* entropyWorkspace, size_t entropyWorkspaceSize,
const int bmi2,
unsigned suspectUncompressible);
diff --git a/contrib/libs/zstd/lib/compress/zstd_compress_sequences.c b/contrib/libs/zstd/lib/compress/zstd_compress_sequences.c
index a8463bbb99..f1e40af2ea 100644
--- a/contrib/libs/zstd/lib/compress/zstd_compress_sequences.c
+++ b/contrib/libs/zstd/lib/compress/zstd_compress_sequences.c
@@ -246,7 +246,7 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
const BYTE* codeTable, size_t nbSeq,
const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
const FSE_CTable* prevCTable, size_t prevCTableSize,
- void* entropyWorkspace, size_t entropyWorkspaceSize)
+ void* entropyWorkspace, size_t entropyWorkspaceSize)
{
BYTE* op = (BYTE*)dst;
const BYTE* const oend = op + dstCapacity;
diff --git a/contrib/libs/zstd/lib/compress/zstd_compress_sequences.h b/contrib/libs/zstd/lib/compress/zstd_compress_sequences.h
index d0a0118da3..7991364c2f 100644
--- a/contrib/libs/zstd/lib/compress/zstd_compress_sequences.h
+++ b/contrib/libs/zstd/lib/compress/zstd_compress_sequences.h
@@ -35,7 +35,7 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
const BYTE* codeTable, size_t nbSeq,
const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
const FSE_CTable* prevCTable, size_t prevCTableSize,
- void* entropyWorkspace, size_t entropyWorkspaceSize);
+ void* entropyWorkspace, size_t entropyWorkspaceSize);
size_t ZSTD_encodeSequences(
void* dst, size_t dstCapacity,
diff --git a/contrib/libs/zstd/lib/compress/zstd_cwksp.h b/contrib/libs/zstd/lib/compress/zstd_cwksp.h
index 327c1deb8b..dc3f40c80c 100644
--- a/contrib/libs/zstd/lib/compress/zstd_cwksp.h
+++ b/contrib/libs/zstd/lib/compress/zstd_cwksp.h
@@ -1,54 +1,54 @@
-/*
+/*
* Copyright (c) Yann Collet, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under both the BSD-style license (found in the
- * LICENSE file in the root directory of this source tree) and the GPLv2 (found
- * in the COPYING file in the root directory of this source tree).
- * You may select, at your option, one of the above-listed licenses.
- */
-
-#ifndef ZSTD_CWKSP_H
-#define ZSTD_CWKSP_H
-
-/*-*************************************
-* Dependencies
-***************************************/
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_CWKSP_H
+#define ZSTD_CWKSP_H
+
+/*-*************************************
+* Dependencies
+***************************************/
#include "../common/zstd_internal.h"
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/*-*************************************
-* Constants
-***************************************/
-
-/* Since the workspace is effectively its own little malloc implementation /
- * arena, when we run under ASAN, we should similarly insert redzones between
- * each internal element of the workspace, so ASAN will catch overruns that
- * reach outside an object but that stay inside the workspace.
- *
- * This defines the size of that redzone.
- */
-#ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
-#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
-#endif
-
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*-*************************************
+* Constants
+***************************************/
+
+/* Since the workspace is effectively its own little malloc implementation /
+ * arena, when we run under ASAN, we should similarly insert redzones between
+ * each internal element of the workspace, so ASAN will catch overruns that
+ * reach outside an object but that stay inside the workspace.
+ *
+ * This defines the size of that redzone.
+ */
+#ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
+#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
+#endif
+
/* Set our tables and aligneds to align by 64 bytes */
#define ZSTD_CWKSP_ALIGNMENT_BYTES 64
-/*-*************************************
-* Structures
-***************************************/
-typedef enum {
- ZSTD_cwksp_alloc_objects,
- ZSTD_cwksp_alloc_buffers,
- ZSTD_cwksp_alloc_aligned
-} ZSTD_cwksp_alloc_phase_e;
-
-/**
+/*-*************************************
+* Structures
+***************************************/
+typedef enum {
+ ZSTD_cwksp_alloc_objects,
+ ZSTD_cwksp_alloc_buffers,
+ ZSTD_cwksp_alloc_aligned
+} ZSTD_cwksp_alloc_phase_e;
+
+/**
* Used to describe whether the workspace is statically allocated (and will not
* necessarily ever be freed), or if it's dynamically allocated and we can
* expect a well-formed caller to free this.
@@ -59,151 +59,151 @@ typedef enum {
} ZSTD_cwksp_static_alloc_e;
/**
- * Zstd fits all its internal datastructures into a single continuous buffer,
- * so that it only needs to perform a single OS allocation (or so that a buffer
- * can be provided to it and it can perform no allocations at all). This buffer
- * is called the workspace.
- *
- * Several optimizations complicate that process of allocating memory ranges
- * from this workspace for each internal datastructure:
- *
- * - These different internal datastructures have different setup requirements:
- *
- * - The static objects need to be cleared once and can then be trivially
- * reused for each compression.
- *
- * - Various buffers don't need to be initialized at all--they are always
- * written into before they're read.
- *
- * - The matchstate tables have a unique requirement that they don't need
- * their memory to be totally cleared, but they do need the memory to have
- * some bound, i.e., a guarantee that all values in the memory they've been
- * allocated is less than some maximum value (which is the starting value
- * for the indices that they will then use for compression). When this
- * guarantee is provided to them, they can use the memory without any setup
- * work. When it can't, they have to clear the area.
- *
- * - These buffers also have different alignment requirements.
- *
- * - We would like to reuse the objects in the workspace for multiple
- * compressions without having to perform any expensive reallocation or
- * reinitialization work.
- *
- * - We would like to be able to efficiently reuse the workspace across
- * multiple compressions **even when the compression parameters change** and
- * we need to resize some of the objects (where possible).
- *
- * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
- * abstraction was created. It works as follows:
- *
- * Workspace Layout:
- *
- * [ ... workspace ... ]
- * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
- *
- * The various objects that live in the workspace are divided into the
- * following categories, and are allocated separately:
- *
- * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
- * so that literally everything fits in a single buffer. Note: if present,
+ * Zstd fits all its internal datastructures into a single continuous buffer,
+ * so that it only needs to perform a single OS allocation (or so that a buffer
+ * can be provided to it and it can perform no allocations at all). This buffer
+ * is called the workspace.
+ *
+ * Several optimizations complicate that process of allocating memory ranges
+ * from this workspace for each internal datastructure:
+ *
+ * - These different internal datastructures have different setup requirements:
+ *
+ * - The static objects need to be cleared once and can then be trivially
+ * reused for each compression.
+ *
+ * - Various buffers don't need to be initialized at all--they are always
+ * written into before they're read.
+ *
+ * - The matchstate tables have a unique requirement that they don't need
+ * their memory to be totally cleared, but they do need the memory to have
+ * some bound, i.e., a guarantee that all values in the memory they've been
+ * allocated is less than some maximum value (which is the starting value
+ * for the indices that they will then use for compression). When this
+ * guarantee is provided to them, they can use the memory without any setup
+ * work. When it can't, they have to clear the area.
+ *
+ * - These buffers also have different alignment requirements.
+ *
+ * - We would like to reuse the objects in the workspace for multiple
+ * compressions without having to perform any expensive reallocation or
+ * reinitialization work.
+ *
+ * - We would like to be able to efficiently reuse the workspace across
+ * multiple compressions **even when the compression parameters change** and
+ * we need to resize some of the objects (where possible).
+ *
+ * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
+ * abstraction was created. It works as follows:
+ *
+ * Workspace Layout:
+ *
+ * [ ... workspace ... ]
+ * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
+ *
+ * The various objects that live in the workspace are divided into the
+ * following categories, and are allocated separately:
+ *
+ * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
+ * so that literally everything fits in a single buffer. Note: if present,
* this must be the first object in the workspace, since ZSTD_customFree{CCtx,
- * CDict}() rely on a pointer comparison to see whether one or two frees are
- * required.
- *
- * - Fixed size objects: these are fixed-size, fixed-count objects that are
- * nonetheless "dynamically" allocated in the workspace so that we can
- * control how they're initialized separately from the broader ZSTD_CCtx.
- * Examples:
- * - Entropy Workspace
- * - 2 x ZSTD_compressedBlockState_t
- * - CDict dictionary contents
- *
- * - Tables: these are any of several different datastructures (hash tables,
- * chain tables, binary trees) that all respect a common format: they are
- * uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
+ * CDict}() rely on a pointer comparison to see whether one or two frees are
+ * required.
+ *
+ * - Fixed size objects: these are fixed-size, fixed-count objects that are
+ * nonetheless "dynamically" allocated in the workspace so that we can
+ * control how they're initialized separately from the broader ZSTD_CCtx.
+ * Examples:
+ * - Entropy Workspace
+ * - 2 x ZSTD_compressedBlockState_t
+ * - CDict dictionary contents
+ *
+ * - Tables: these are any of several different datastructures (hash tables,
+ * chain tables, binary trees) that all respect a common format: they are
+ * uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
* Their sizes depend on the cparams. These tables are 64-byte aligned.
- *
- * - Aligned: these buffers are used for various purposes that require 4 byte
+ *
+ * - Aligned: these buffers are used for various purposes that require 4 byte
* alignment, but don't require any initialization before they're used. These
* buffers are each aligned to 64 bytes.
- *
- * - Buffers: these buffers are used for various purposes that don't require
- * any alignment or initialization before they're used. This means they can
- * be moved around at no cost for a new compression.
- *
- * Allocating Memory:
- *
- * The various types of objects must be allocated in order, so they can be
- * correctly packed into the workspace buffer. That order is:
- *
- * 1. Objects
- * 2. Buffers
+ *
+ * - Buffers: these buffers are used for various purposes that don't require
+ * any alignment or initialization before they're used. This means they can
+ * be moved around at no cost for a new compression.
+ *
+ * Allocating Memory:
+ *
+ * The various types of objects must be allocated in order, so they can be
+ * correctly packed into the workspace buffer. That order is:
+ *
+ * 1. Objects
+ * 2. Buffers
* 3. Aligned/Tables
- *
- * Attempts to reserve objects of different types out of order will fail.
- */
-typedef struct {
- void* workspace;
- void* workspaceEnd;
-
- void* objectEnd;
- void* tableEnd;
- void* tableValidEnd;
- void* allocStart;
-
+ *
+ * Attempts to reserve objects of different types out of order will fail.
+ */
+typedef struct {
+ void* workspace;
+ void* workspaceEnd;
+
+ void* objectEnd;
+ void* tableEnd;
+ void* tableValidEnd;
+ void* allocStart;
+
BYTE allocFailed;
- int workspaceOversizedDuration;
- ZSTD_cwksp_alloc_phase_e phase;
+ int workspaceOversizedDuration;
+ ZSTD_cwksp_alloc_phase_e phase;
ZSTD_cwksp_static_alloc_e isStatic;
-} ZSTD_cwksp;
-
-/*-*************************************
-* Functions
-***************************************/
-
-MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
-
-MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
- (void)ws;
- assert(ws->workspace <= ws->objectEnd);
- assert(ws->objectEnd <= ws->tableEnd);
- assert(ws->objectEnd <= ws->tableValidEnd);
- assert(ws->tableEnd <= ws->allocStart);
- assert(ws->tableValidEnd <= ws->allocStart);
- assert(ws->allocStart <= ws->workspaceEnd);
-}
-
-/**
- * Align must be a power of 2.
- */
-MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
- size_t const mask = align - 1;
- assert((align & mask) == 0);
- return (size + mask) & ~mask;
-}
-
-/**
- * Use this to determine how much space in the workspace we will consume to
- * allocate this object. (Normally it should be exactly the size of the object,
- * but under special conditions, like ASAN, where we pad each object, it might
- * be larger.)
- *
- * Since tables aren't currently redzoned, you don't need to call through this
- * to figure out how much space you need for the matchState tables. Everything
- * else is though.
+} ZSTD_cwksp;
+
+/*-*************************************
+* Functions
+***************************************/
+
+MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
+
+MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
+ (void)ws;
+ assert(ws->workspace <= ws->objectEnd);
+ assert(ws->objectEnd <= ws->tableEnd);
+ assert(ws->objectEnd <= ws->tableValidEnd);
+ assert(ws->tableEnd <= ws->allocStart);
+ assert(ws->tableValidEnd <= ws->allocStart);
+ assert(ws->allocStart <= ws->workspaceEnd);
+}
+
+/**
+ * Align must be a power of 2.
+ */
+MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
+ size_t const mask = align - 1;
+ assert((align & mask) == 0);
+ return (size + mask) & ~mask;
+}
+
+/**
+ * Use this to determine how much space in the workspace we will consume to
+ * allocate this object. (Normally it should be exactly the size of the object,
+ * but under special conditions, like ASAN, where we pad each object, it might
+ * be larger.)
+ *
+ * Since tables aren't currently redzoned, you don't need to call through this
+ * to figure out how much space you need for the matchState tables. Everything
+ * else is though.
*
* Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
- */
-MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
+ */
+MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
if (size == 0)
return 0;
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
- return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
-#else
- return size;
-#endif
-}
-
+ return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
+#else
+ return size;
+#endif
+}
+
/**
* Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
* Used to determine the number of bytes required for a given "aligned".
@@ -279,17 +279,17 @@ ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
MEM_STATIC size_t
ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)
{
- assert(phase >= ws->phase);
- if (phase > ws->phase) {
+ assert(phase >= ws->phase);
+ if (phase > ws->phase) {
/* Going from allocating objects to allocating buffers */
- if (ws->phase < ZSTD_cwksp_alloc_buffers &&
- phase >= ZSTD_cwksp_alloc_buffers) {
- ws->tableValidEnd = ws->objectEnd;
- }
+ if (ws->phase < ZSTD_cwksp_alloc_buffers &&
+ phase >= ZSTD_cwksp_alloc_buffers) {
+ ws->tableValidEnd = ws->objectEnd;
+ }
/* Going from allocating buffers to allocating aligneds/tables */
- if (ws->phase < ZSTD_cwksp_alloc_aligned &&
- phase >= ZSTD_cwksp_alloc_aligned) {
+ if (ws->phase < ZSTD_cwksp_alloc_aligned &&
+ phase >= ZSTD_cwksp_alloc_aligned) {
{ /* Align the start of the "aligned" to 64 bytes. Use [1, 64] bytes. */
size_t const bytesToAlign =
ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BYTES);
@@ -297,7 +297,7 @@ ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase
ZSTD_STATIC_ASSERT((ZSTD_CWKSP_ALIGNMENT_BYTES & (ZSTD_CWKSP_ALIGNMENT_BYTES - 1)) == 0); /* power of 2 */
RETURN_ERROR_IF(!ZSTD_cwksp_reserve_internal_buffer_space(ws, bytesToAlign),
memory_allocation, "aligned phase - alignment initial allocation failed!");
- }
+ }
{ /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
void* const alloc = ws->objectEnd;
size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
@@ -310,83 +310,83 @@ ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase
if (ws->tableValidEnd < ws->tableEnd) {
ws->tableValidEnd = ws->tableEnd;
} } }
- ws->phase = phase;
+ ws->phase = phase;
ZSTD_cwksp_assert_internal_consistency(ws);
- }
+ }
return 0;
-}
-
-/**
- * Returns whether this object/buffer/etc was allocated in this workspace.
- */
+}
+
+/**
+ * Returns whether this object/buffer/etc was allocated in this workspace.
+ */
MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
{
- return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
-}
-
-/**
- * Internal function. Do not use directly.
- */
+ return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
+}
+
+/**
+ * Internal function. Do not use directly.
+ */
MEM_STATIC void*
ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)
{
- void* alloc;
+ void* alloc;
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
return NULL;
}
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
- /* over-reserve space */
+ /* over-reserve space */
bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
-#endif
-
+#endif
+
alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
-
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
- /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
- * either size. */
+ /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
+ * either size. */
if (alloc) {
alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
__asan_unpoison_memory_region(alloc, bytes);
}
}
-#endif
-
- return alloc;
-}
-
-/**
- * Reserves and returns unaligned memory.
- */
+#endif
+
+ return alloc;
+}
+
+/**
+ * Reserves and returns unaligned memory.
+ */
MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
{
- return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
-}
-
-/**
+ return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
+}
+
+/**
* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
- */
+ */
MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
{
void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
ZSTD_cwksp_alloc_aligned);
assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
return ptr;
-}
-
-/**
+}
+
+/**
* Aligned on 64 bytes. These buffers have the special property that
- * their values remain constrained, allowing us to re-use them without
- * memset()-ing them.
- */
+ * their values remain constrained, allowing us to re-use them without
+ * memset()-ing them.
+ */
MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
{
- const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
+ const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
void* alloc;
void* end;
void* top;
-
+
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
return NULL;
}
@@ -394,236 +394,236 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
end = (BYTE *)alloc + bytes;
top = ws->allocStart;
- DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
- alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
- assert((bytes & (sizeof(U32)-1)) == 0);
- ZSTD_cwksp_assert_internal_consistency(ws);
- assert(end <= top);
- if (end > top) {
- DEBUGLOG(4, "cwksp: table alloc failed!");
- ws->allocFailed = 1;
- return NULL;
- }
- ws->tableEnd = end;
-
+ DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
+ alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
+ assert((bytes & (sizeof(U32)-1)) == 0);
+ ZSTD_cwksp_assert_internal_consistency(ws);
+ assert(end <= top);
+ if (end > top) {
+ DEBUGLOG(4, "cwksp: table alloc failed!");
+ ws->allocFailed = 1;
+ return NULL;
+ }
+ ws->tableEnd = end;
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
__asan_unpoison_memory_region(alloc, bytes);
}
-#endif
-
+#endif
+
assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
- return alloc;
-}
-
-/**
- * Aligned on sizeof(void*).
+ return alloc;
+}
+
+/**
+ * Aligned on sizeof(void*).
* Note : should happen only once, at workspace first initialization
- */
+ */
MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
{
size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
- void* alloc = ws->objectEnd;
- void* end = (BYTE*)alloc + roundedBytes;
-
+ void* alloc = ws->objectEnd;
+ void* end = (BYTE*)alloc + roundedBytes;
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
- /* over-reserve space */
- end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
-#endif
-
+ /* over-reserve space */
+ end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
+#endif
+
DEBUGLOG(4,
- "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
- alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
+ "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
+ alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
assert(bytes % ZSTD_ALIGNOF(void*) == 0);
- ZSTD_cwksp_assert_internal_consistency(ws);
- /* we must be in the first phase, no advance is possible */
- if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
+ ZSTD_cwksp_assert_internal_consistency(ws);
+ /* we must be in the first phase, no advance is possible */
+ if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
DEBUGLOG(3, "cwksp: object alloc failed!");
- ws->allocFailed = 1;
- return NULL;
- }
- ws->objectEnd = end;
- ws->tableEnd = end;
- ws->tableValidEnd = end;
-
+ ws->allocFailed = 1;
+ return NULL;
+ }
+ ws->objectEnd = end;
+ ws->tableEnd = end;
+ ws->tableValidEnd = end;
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
- /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
- * either size. */
+ /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
+ * either size. */
alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
__asan_unpoison_memory_region(alloc, bytes);
}
-#endif
-
- return alloc;
-}
-
+#endif
+
+ return alloc;
+}
+
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
{
- DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
-
+ DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
+
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
- /* To validate that the table re-use logic is sound, and that we don't
- * access table space that we haven't cleaned, we re-"poison" the table
- * space every time we mark it dirty. */
- {
- size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
- assert(__msan_test_shadow(ws->objectEnd, size) == -1);
- __msan_poison(ws->objectEnd, size);
- }
-#endif
-
- assert(ws->tableValidEnd >= ws->objectEnd);
- assert(ws->tableValidEnd <= ws->allocStart);
- ws->tableValidEnd = ws->objectEnd;
- ZSTD_cwksp_assert_internal_consistency(ws);
-}
-
-MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
- DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
- assert(ws->tableValidEnd >= ws->objectEnd);
- assert(ws->tableValidEnd <= ws->allocStart);
- if (ws->tableValidEnd < ws->tableEnd) {
- ws->tableValidEnd = ws->tableEnd;
- }
- ZSTD_cwksp_assert_internal_consistency(ws);
-}
-
-/**
- * Zero the part of the allocated tables not already marked clean.
- */
-MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
- DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
- assert(ws->tableValidEnd >= ws->objectEnd);
- assert(ws->tableValidEnd <= ws->allocStart);
- if (ws->tableValidEnd < ws->tableEnd) {
+ /* To validate that the table re-use logic is sound, and that we don't
+ * access table space that we haven't cleaned, we re-"poison" the table
+ * space every time we mark it dirty. */
+ {
+ size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
+ assert(__msan_test_shadow(ws->objectEnd, size) == -1);
+ __msan_poison(ws->objectEnd, size);
+ }
+#endif
+
+ assert(ws->tableValidEnd >= ws->objectEnd);
+ assert(ws->tableValidEnd <= ws->allocStart);
+ ws->tableValidEnd = ws->objectEnd;
+ ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
+ DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
+ assert(ws->tableValidEnd >= ws->objectEnd);
+ assert(ws->tableValidEnd <= ws->allocStart);
+ if (ws->tableValidEnd < ws->tableEnd) {
+ ws->tableValidEnd = ws->tableEnd;
+ }
+ ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+/**
+ * Zero the part of the allocated tables not already marked clean.
+ */
+MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
+ DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
+ assert(ws->tableValidEnd >= ws->objectEnd);
+ assert(ws->tableValidEnd <= ws->allocStart);
+ if (ws->tableValidEnd < ws->tableEnd) {
ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
- }
- ZSTD_cwksp_mark_tables_clean(ws);
-}
-
-/**
- * Invalidates table allocations.
- * All other allocations remain valid.
- */
-MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
- DEBUGLOG(4, "cwksp: clearing tables!");
-
+ }
+ ZSTD_cwksp_mark_tables_clean(ws);
+}
+
+/**
+ * Invalidates table allocations.
+ * All other allocations remain valid.
+ */
+MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
+ DEBUGLOG(4, "cwksp: clearing tables!");
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
/* We don't do this when the workspace is statically allocated, because
* when that is the case, we have no capability to hook into the end of the
* workspace's lifecycle to unpoison the memory.
*/
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
- size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
- __asan_poison_memory_region(ws->objectEnd, size);
- }
-#endif
-
- ws->tableEnd = ws->objectEnd;
- ZSTD_cwksp_assert_internal_consistency(ws);
-}
-
-/**
- * Invalidates all buffer, aligned, and table allocations.
- * Object allocations remain valid.
- */
-MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
- DEBUGLOG(4, "cwksp: clearing!");
-
+ size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
+ __asan_poison_memory_region(ws->objectEnd, size);
+ }
+#endif
+
+ ws->tableEnd = ws->objectEnd;
+ ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+/**
+ * Invalidates all buffer, aligned, and table allocations.
+ * Object allocations remain valid.
+ */
+MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
+ DEBUGLOG(4, "cwksp: clearing!");
+
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
- /* To validate that the context re-use logic is sound, and that we don't
- * access stuff that this compression hasn't initialized, we re-"poison"
- * the workspace (or at least the non-static, non-table parts of it)
- * every time we start a new compression. */
- {
- size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd;
- __msan_poison(ws->tableValidEnd, size);
- }
-#endif
-
+ /* To validate that the context re-use logic is sound, and that we don't
+ * access stuff that this compression hasn't initialized, we re-"poison"
+ * the workspace (or at least the non-static, non-table parts of it)
+ * every time we start a new compression. */
+ {
+ size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd;
+ __msan_poison(ws->tableValidEnd, size);
+ }
+#endif
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
/* We don't do this when the workspace is statically allocated, because
* when that is the case, we have no capability to hook into the end of the
* workspace's lifecycle to unpoison the memory.
*/
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
- size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
- __asan_poison_memory_region(ws->objectEnd, size);
- }
-#endif
-
- ws->tableEnd = ws->objectEnd;
- ws->allocStart = ws->workspaceEnd;
- ws->allocFailed = 0;
- if (ws->phase > ZSTD_cwksp_alloc_buffers) {
- ws->phase = ZSTD_cwksp_alloc_buffers;
- }
- ZSTD_cwksp_assert_internal_consistency(ws);
-}
-
-/**
- * The provided workspace takes ownership of the buffer [start, start+size).
- * Any existing values in the workspace are ignored (the previously managed
- * buffer, if present, must be separately freed).
- */
+ size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
+ __asan_poison_memory_region(ws->objectEnd, size);
+ }
+#endif
+
+ ws->tableEnd = ws->objectEnd;
+ ws->allocStart = ws->workspaceEnd;
+ ws->allocFailed = 0;
+ if (ws->phase > ZSTD_cwksp_alloc_buffers) {
+ ws->phase = ZSTD_cwksp_alloc_buffers;
+ }
+ ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+/**
+ * The provided workspace takes ownership of the buffer [start, start+size).
+ * Any existing values in the workspace are ignored (the previously managed
+ * buffer, if present, must be separately freed).
+ */
MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
- DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
- assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
- ws->workspace = start;
- ws->workspaceEnd = (BYTE*)start + size;
- ws->objectEnd = ws->workspace;
- ws->tableValidEnd = ws->objectEnd;
- ws->phase = ZSTD_cwksp_alloc_objects;
+ DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
+ assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
+ ws->workspace = start;
+ ws->workspaceEnd = (BYTE*)start + size;
+ ws->objectEnd = ws->workspace;
+ ws->tableValidEnd = ws->objectEnd;
+ ws->phase = ZSTD_cwksp_alloc_objects;
ws->isStatic = isStatic;
- ZSTD_cwksp_clear(ws);
- ws->workspaceOversizedDuration = 0;
- ZSTD_cwksp_assert_internal_consistency(ws);
-}
-
-MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
+ ZSTD_cwksp_clear(ws);
+ ws->workspaceOversizedDuration = 0;
+ ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
void* workspace = ZSTD_customMalloc(size, customMem);
- DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
+ DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
- return 0;
-}
-
-MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
- void *ptr = ws->workspace;
- DEBUGLOG(4, "cwksp: freeing workspace");
+ return 0;
+}
+
+MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
+ void *ptr = ws->workspace;
+ DEBUGLOG(4, "cwksp: freeing workspace");
ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
ZSTD_customFree(ptr, customMem);
-}
-
-/**
- * Moves the management of a workspace from one cwksp to another. The src cwksp
+}
+
+/**
+ * Moves the management of a workspace from one cwksp to another. The src cwksp
* is left in an invalid state (src must be re-init()'ed before it's used again).
- */
-MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
- *dst = *src;
+ */
+MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
+ *dst = *src;
ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
-}
-
-MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
- return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
-}
-
+}
+
+MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
+ return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
+}
+
MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
+ (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
}
-MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
- return ws->allocFailed;
-}
-
-/*-*************************************
-* Functions Checking Free Space
-***************************************/
-
+MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
+ return ws->allocFailed;
+}
+
+/*-*************************************
+* Functions Checking Free Space
+***************************************/
+
/* ZSTD_alignmentSpaceWithinBounds() :
* Returns if the estimated space needed for a wksp is within an acceptable limit of the
* actual amount of space used.
@@ -642,35 +642,35 @@ MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp* const
}
-MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
- return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
-}
-
-MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
- return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
-}
-
-MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
- return ZSTD_cwksp_check_available(
- ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
-}
-
-MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
- return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
- && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
-}
-
-MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
- ZSTD_cwksp* ws, size_t additionalNeededSpace) {
- if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
- ws->workspaceOversizedDuration++;
- } else {
- ws->workspaceOversizedDuration = 0;
- }
-}
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* ZSTD_CWKSP_H */
+MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
+ return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
+}
+
+MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
+ return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
+}
+
+MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
+ return ZSTD_cwksp_check_available(
+ ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
+}
+
+MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
+ return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
+ && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
+}
+
+MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
+ ZSTD_cwksp* ws, size_t additionalNeededSpace) {
+ if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
+ ws->workspaceOversizedDuration++;
+ } else {
+ ws->workspaceOversizedDuration = 0;
+ }
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_CWKSP_H */
diff --git a/contrib/libs/zstd/lib/compress/zstd_double_fast.c b/contrib/libs/zstd/lib/compress/zstd_double_fast.c
index a666c53671..76933dea26 100644
--- a/contrib/libs/zstd/lib/compress/zstd_double_fast.c
+++ b/contrib/libs/zstd/lib/compress/zstd_double_fast.c
@@ -269,7 +269,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
const BYTE* ip = istart;
const BYTE* anchor = istart;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
- /* presumes that, if there is a dictionary, it must be using Attach mode */
+ /* presumes that, if there is a dictionary, it must be using Attach mode */
const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
const BYTE* const prefixLowest = base + prefixLowestIndex;
const BYTE* const iend = istart + srcSize;
@@ -544,7 +544,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
const BYTE* const ilimit = iend - 8;
const BYTE* const base = ms->window.base;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
- const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
+ const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
const U32 dictStartIndex = lowLimit;
const U32 dictLimit = ms->window.dictLimit;
const U32 prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit;
diff --git a/contrib/libs/zstd/lib/compress/zstd_fast.c b/contrib/libs/zstd/lib/compress/zstd_fast.c
index 057e7c6f71..802fc31579 100644
--- a/contrib/libs/zstd/lib/compress/zstd_fast.c
+++ b/contrib/libs/zstd/lib/compress/zstd_fast.c
@@ -8,7 +8,7 @@
* You may select, at your option, one of the above-listed licenses.
*/
-#include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
+#include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
#include "zstd_fast.h"
@@ -89,7 +89,7 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
*
* This is also the work we do at the beginning to enter the loop initially.
*/
-FORCE_INLINE_TEMPLATE size_t
+FORCE_INLINE_TEMPLATE size_t
ZSTD_compressBlock_fast_noDict_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize,
@@ -136,7 +136,7 @@ ZSTD_compressBlock_fast_noDict_generic(
const BYTE* nextStep;
const size_t kStepIncr = (1 << (kSearchStrength - 1));
- DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
+ DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
ip0 += (ip0 == prefixStart);
{ U32 const curr = (U32)(ip0 - base);
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
@@ -336,7 +336,7 @@ size_t ZSTD_compressBlock_fast(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- U32 const mls = ms->cParams.minMatch;
+ U32 const mls = ms->cParams.minMatch;
assert(ms->dictMatchState == NULL);
if (ms->cParams.targetLength > 1) {
switch(mls)
@@ -414,7 +414,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
/* init */
- DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
+ DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
ip += (dictAndPrefixLength == 0);
/* dictMatchState repCode checks don't currently handle repCode == 0
* disabling. */
@@ -528,7 +528,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- U32 const mls = ms->cParams.minMatch;
+ U32 const mls = ms->cParams.minMatch;
assert(ms->dictMatchState != NULL);
switch(mls)
{
@@ -560,7 +560,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
const BYTE* ip = istart;
const BYTE* anchor = istart;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
- const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
+ const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
const U32 dictStartIndex = lowLimit;
const BYTE* const dictStart = dictBase + dictStartIndex;
const U32 dictLimit = ms->window.dictLimit;
@@ -574,7 +574,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
(void)hasStep; /* not currently specialized on whether it's accelerated */
DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
-
+
/* switch to "regular" variant if extDict is invalidated due to maxDistance */
if (prefixStartIndex == dictStartIndex)
return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize);
@@ -595,12 +595,12 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
if ( ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */
& (offset_1 <= curr+1 - dictStartIndex) ) /* note: we are searching at curr+1 */
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
- const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
- size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
+ const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+ size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
ip++;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, rLength);
- ip += rLength;
- anchor = ip;
+ ip += rLength;
+ anchor = ip;
} else {
if ( (matchIndex < dictStartIndex) ||
(MEM_read32(match) != MEM_read32(ip)) ) {
@@ -608,15 +608,15 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
ip += ((ip-anchor) >> kSearchStrength) + stepSize;
continue;
}
- { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
- const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
+ { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
+ const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
U32 const offset = curr - matchIndex;
- size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
+ size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
- offset_2 = offset_1; offset_1 = offset; /* update offset history */
+ offset_2 = offset_1; offset_1 = offset; /* update offset history */
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
- ip += mLength;
- anchor = ip;
+ ip += mLength;
+ anchor = ip;
} }
if (ip <= ilimit) {
@@ -627,12 +627,12 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
while (ip <= ilimit) {
U32 const current2 = (U32)(ip-base);
U32 const repIndex2 = current2 - offset_2;
- const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
+ const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 <= curr - dictStartIndex)) /* intentional overflow */
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
- { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
+ { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, STORE_REPCODE_1, repLength2);
hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
ip += repLength2;
@@ -659,7 +659,7 @@ size_t ZSTD_compressBlock_fast_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- U32 const mls = ms->cParams.minMatch;
+ U32 const mls = ms->cParams.minMatch;
switch(mls)
{
default: /* includes case 3 */
diff --git a/contrib/libs/zstd/lib/compress/zstd_lazy.c b/contrib/libs/zstd/lib/compress/zstd_lazy.c
index fc88d37246..2e38dcb46d 100644
--- a/contrib/libs/zstd/lib/compress/zstd_lazy.c
+++ b/contrib/libs/zstd/lib/compress/zstd_lazy.c
@@ -662,10 +662,10 @@ size_t ZSTD_HcFindBestMatch(
const BYTE* const dictEnd = dictBase + dictLimit;
const U32 curr = (U32)(ip-base);
const U32 maxDistance = 1U << cParams->windowLog;
- const U32 lowestValid = ms->window.lowLimit;
+ const U32 lowestValid = ms->window.lowLimit;
const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
- const U32 isDictionary = (ms->loadedDictEnd != 0);
- const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
+ const U32 isDictionary = (ms->loadedDictEnd != 0);
+ const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
const U32 minChain = curr > chainSize ? curr - chainSize : 0;
U32 nbAttempts = 1U << cParams->searchLog;
size_t ml=4-1;
@@ -1441,7 +1441,7 @@ ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_HC_VTABLE)
* Common parser - lazy strategy
*********************************/
typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e;
-
+
/**
* This table is indexed first by the four ZSTD_dictMode_e values, and then
* by the two searchMethod_e values. NULLs are placed for configurations
@@ -1472,12 +1472,12 @@ ZSTD_selectLazyVTable(ZSTD_matchState_t const* ms, searchMethod_e searchMethod,
}
}
-FORCE_INLINE_TEMPLATE size_t
-ZSTD_compressBlock_lazy_generic(
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_compressBlock_lazy_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize,
- const searchMethod_e searchMethod, const U32 depth,
+ const searchMethod_e searchMethod, const U32 depth,
ZSTD_dictMode_e const dictMode)
{
const BYTE* const istart = (const BYTE*)src;
@@ -1715,7 +1715,7 @@ _storeSequence:
rep[1] = offset_2 ? offset_2 : savedOffset;
/* Return the last literals size */
- return (size_t)(iend - anchor);
+ return (size_t)(iend - anchor);
}
@@ -1723,56 +1723,56 @@ size_t ZSTD_compressBlock_btlazy2(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
}
size_t ZSTD_compressBlock_lazy2(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
}
size_t ZSTD_compressBlock_lazy(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
}
size_t ZSTD_compressBlock_greedy(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
}
size_t ZSTD_compressBlock_btlazy2_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_lazy2_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_lazy_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_greedy_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
}
@@ -1867,7 +1867,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize,
- const searchMethod_e searchMethod, const U32 depth)
+ const searchMethod_e searchMethod, const U32 depth)
{
const BYTE* const istart = (const BYTE*)src;
const BYTE* ip = istart;
@@ -2045,7 +2045,7 @@ _storeSequence:
rep[1] = offset_2;
/* Return the last literals size */
- return (size_t)(iend - anchor);
+ return (size_t)(iend - anchor);
}
@@ -2053,7 +2053,7 @@ size_t ZSTD_compressBlock_greedy_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);
}
size_t ZSTD_compressBlock_lazy_extDict(
@@ -2061,7 +2061,7 @@ size_t ZSTD_compressBlock_lazy_extDict(
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
}
size_t ZSTD_compressBlock_lazy2_extDict(
@@ -2069,7 +2069,7 @@ size_t ZSTD_compressBlock_lazy2_extDict(
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
}
size_t ZSTD_compressBlock_btlazy2_extDict(
@@ -2077,7 +2077,7 @@ size_t ZSTD_compressBlock_btlazy2_extDict(
void const* src, size_t srcSize)
{
- return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
}
size_t ZSTD_compressBlock_greedy_extDict_row(
diff --git a/contrib/libs/zstd/lib/compress/zstd_ldm.c b/contrib/libs/zstd/lib/compress/zstd_ldm.c
index 232661f3ad..476b45746e 100644
--- a/contrib/libs/zstd/lib/compress/zstd_ldm.c
+++ b/contrib/libs/zstd/lib/compress/zstd_ldm.c
@@ -156,9 +156,9 @@ size_t ZSTD_ldm_getTableSize(ldmParams_t params)
{
size_t const ldmHSize = ((size_t)1) << params.hashLog;
size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
- size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
- size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
- + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
+ size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
+ size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
+ + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
return params.enableLdm == ZSTD_ps_enable ? totalSize : 0;
}
@@ -710,7 +710,7 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
rep[i] = rep[i-1];
rep[0] = sequence.offset;
/* Store the sequence */
- ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
+ ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
STORE_OFFSET(sequence.offset),
sequence.matchLength);
ip += sequence.matchLength;
diff --git a/contrib/libs/zstd/lib/compress/zstd_opt.c b/contrib/libs/zstd/lib/compress/zstd_opt.c
index eb6f4ee563..1b1ddad428 100644
--- a/contrib/libs/zstd/lib/compress/zstd_opt.c
+++ b/contrib/libs/zstd/lib/compress/zstd_opt.c
@@ -688,21 +688,21 @@ U32 ZSTD_insertBtAndGetAllMatches (
for (; nbCompares && (matchIndex >= matchLow); --nbCompares) {
U32* const nextPtr = bt + 2*(matchIndex & btMask);
- const BYTE* match;
+ const BYTE* match;
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
assert(curr > matchIndex);
if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */
match = base + matchIndex;
- if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
+ if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
} else {
match = dictBase + matchIndex;
- assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
+ assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
if (matchIndex+matchLength >= dictLimit)
- match = base + matchIndex; /* prepare for match[matchLength] read */
+ match = base + matchIndex; /* prepare for match[matchLength] read */
}
if (matchLength > bestLength) {
diff --git a/contrib/libs/zstd/lib/compress/zstdmt_compress.c b/contrib/libs/zstd/lib/compress/zstdmt_compress.c
index 6328a85fee..6bc14b035e 100644
--- a/contrib/libs/zstd/lib/compress/zstdmt_compress.c
+++ b/contrib/libs/zstd/lib/compress/zstdmt_compress.c
@@ -695,7 +695,7 @@ static void ZSTDMT_compressionJob(void* jobDescription)
/* init */
if (job->cdict) {
- size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize);
+ size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize);
assert(job->firstJob); /* only allowed for first job */
if (ZSTD_isError(initError)) JOB_ERROR(initError);
} else { /* srcStart points at reloaded section */
@@ -711,7 +711,7 @@ static void ZSTDMT_compressionJob(void* jobDescription)
job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
ZSTD_dtlm_fast,
NULL, /*cdict*/
- &jobParams, pledgedSrcSize);
+ &jobParams, pledgedSrcSize);
if (ZSTD_isError(initError)) JOB_ERROR(initError);
} }
@@ -977,17 +977,17 @@ static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
unsigned jobID;
DEBUGLOG(3, "ZSTDMT_releaseAllJobResources");
for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) {
- /* Copy the mutex/cond out */
- ZSTD_pthread_mutex_t const mutex = mtctx->jobs[jobID].job_mutex;
- ZSTD_pthread_cond_t const cond = mtctx->jobs[jobID].job_cond;
-
+ /* Copy the mutex/cond out */
+ ZSTD_pthread_mutex_t const mutex = mtctx->jobs[jobID].job_mutex;
+ ZSTD_pthread_cond_t const cond = mtctx->jobs[jobID].job_cond;
+
DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start);
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
-
- /* Clear the job description, but keep the mutex/cond */
+
+ /* Clear the job description, but keep the mutex/cond */
ZSTD_memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID]));
- mtctx->jobs[jobID].job_mutex = mutex;
- mtctx->jobs[jobID].job_cond = cond;
+ mtctx->jobs[jobID].job_mutex = mutex;
+ mtctx->jobs[jobID].job_cond = cond;
}
mtctx->inBuff.buffer = g_nullBuffer;
mtctx->inBuff.filled = 0;
@@ -1149,7 +1149,7 @@ size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx)
/* ===== Multi-threaded compression ===== */
/* ------------------------------------------ */
-static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params)
+static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params)
{
unsigned jobLog;
if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
@@ -1158,7 +1158,7 @@ static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params)
* based on cycleLog instead. */
jobLog = MAX(21, ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy) + 3);
} else {
- jobLog = MAX(20, params->cParams.windowLog + 2);
+ jobLog = MAX(20, params->cParams.windowLog + 2);
}
return MIN(jobLog, (unsigned)ZSTDMT_JOBLOG_MAX);
}
@@ -1191,21 +1191,21 @@ static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat)
return ovlog;
}
-static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params)
+static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params)
{
- int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy);
- int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog);
+ int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy);
+ int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog);
assert(0 <= overlapRLog && overlapRLog <= 8);
if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
/* In Long Range Mode, the windowLog is typically oversized.
* In which case, it's preferable to determine the jobSize
* based on chainLog instead.
* Then, ovLog becomes a fraction of the jobSize, rather than windowSize */
- ovLog = MIN(params->cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)
+ ovLog = MIN(params->cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)
- overlapRLog;
}
assert(0 <= ovLog && ovLog <= ZSTD_WINDOWLOG_MAX);
- DEBUGLOG(4, "overlapLog : %i", params->overlapLog);
+ DEBUGLOG(4, "overlapLog : %i", params->overlapLog);
DEBUGLOG(4, "overlap size : %i", 1 << ovLog);
return (ovLog==0) ? 0 : (size_t)1 << ovLog;
}
@@ -1257,11 +1257,11 @@ size_t ZSTDMT_initCStream_internal(
mtctx->cdict = cdict;
}
- mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(&params);
+ mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(&params);
DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10));
mtctx->targetSectionSize = params.jobSize;
if (mtctx->targetSectionSize == 0) {
- mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(&params);
+ mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(&params);
}
assert(mtctx->targetSectionSize <= (size_t)ZSTDMT_JOBSIZE_MAX);
diff --git a/contrib/libs/zstd/lib/decompress/zstd_decompress.c b/contrib/libs/zstd/lib/decompress/zstd_decompress.c
index 9147aaa091..b8bbefd538 100644
--- a/contrib/libs/zstd/lib/decompress/zstd_decompress.c
+++ b/contrib/libs/zstd/lib/decompress/zstd_decompress.c
@@ -229,7 +229,7 @@ size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); }
static size_t ZSTD_startingInputLength(ZSTD_format_e format)
{
- size_t const startingInputLength = ZSTD_FRAMEHEADERSIZE_PREFIX(format);
+ size_t const startingInputLength = ZSTD_FRAMEHEADERSIZE_PREFIX(format);
/* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) );
return startingInputLength;
@@ -610,7 +610,7 @@ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
{
unsigned long long totalDstSize = 0;
- while (srcSize >= ZSTD_startingInputLength(ZSTD_f_zstd1)) {
+ while (srcSize >= ZSTD_startingInputLength(ZSTD_f_zstd1)) {
U32 const magicNumber = MEM_readLE32(src);
if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
@@ -806,10 +806,10 @@ unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
***************************************************************/
/** ZSTD_insertBlock() :
- * insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
+ * insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
{
- DEBUGLOG(5, "ZSTD_insertBlock: %u bytes", (unsigned)blockSize);
+ DEBUGLOG(5, "ZSTD_insertBlock: %u bytes", (unsigned)blockSize);
ZSTD_checkContinuity(dctx, blockStart, blockSize);
dctx->previousDstEnd = (const char*)blockStart + blockSize;
return blockSize;
@@ -888,12 +888,12 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
/* check */
RETURN_ERROR_IF(
- remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN(dctx->format)+ZSTD_blockHeaderSize,
+ remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN(dctx->format)+ZSTD_blockHeaderSize,
srcSize_wrong, "");
/* Frame Header */
- { size_t const frameHeaderSize = ZSTD_frameHeaderSize_internal(
- ip, ZSTD_FRAMEHEADERSIZE_PREFIX(dctx->format), dctx->format);
+ { size_t const frameHeaderSize = ZSTD_frameHeaderSize_internal(
+ ip, ZSTD_FRAMEHEADERSIZE_PREFIX(dctx->format), dctx->format);
if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize,
srcSize_wrong, "");
@@ -978,7 +978,7 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
dictSize = ZSTD_DDict_dictSize(ddict);
}
- while (srcSize >= ZSTD_startingInputLength(dctx->format)) {
+ while (srcSize >= ZSTD_startingInputLength(dctx->format)) {
#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
if (ZSTD_isLegacy(src, srcSize)) {
@@ -1410,7 +1410,7 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
for (i=0; i<3; i++) {
U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
- RETURN_ERROR_IF(rep==0 || rep > dictContentSize,
+ RETURN_ERROR_IF(rep==0 || rep > dictContentSize,
dictionary_corrupted, "");
entropy->rep[i] = rep;
} }
@@ -1584,7 +1584,7 @@ size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,
{
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
ZSTD_clearDict(dctx);
- if (dict && dictSize != 0) {
+ if (dict && dictSize != 0) {
dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation, "NULL pointer!");
dctx->ddict = dctx->ddictLocal;
@@ -1617,14 +1617,14 @@ size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSiz
/* ZSTD_initDStream_usingDict() :
- * return : expected size, aka ZSTD_startingInputLength().
+ * return : expected size, aka ZSTD_startingInputLength().
* this function cannot fail */
size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
{
DEBUGLOG(4, "ZSTD_initDStream_usingDict");
FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) , "");
- return ZSTD_startingInputLength(zds->format);
+ return ZSTD_startingInputLength(zds->format);
}
/* note : this variant can't fail */
@@ -1641,16 +1641,16 @@ size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
{
FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , "");
- return ZSTD_startingInputLength(dctx->format);
+ return ZSTD_startingInputLength(dctx->format);
}
/* ZSTD_resetDStream() :
- * return : expected size, aka ZSTD_startingInputLength().
+ * return : expected size, aka ZSTD_startingInputLength().
* this function cannot fail */
size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
{
FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), "");
- return ZSTD_startingInputLength(dctx->format);
+ return ZSTD_startingInputLength(dctx->format);
}
@@ -2012,7 +2012,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
zds->lhSize += remainingInput;
}
input->pos = input->size;
- return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
+ return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
}
assert(ip != NULL);
ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;
diff --git a/contrib/libs/zstd/lib/decompress/zstd_decompress_block.c b/contrib/libs/zstd/lib/decompress/zstd_decompress_block.c
index 18fe5e45c4..2e44d30d2f 100644
--- a/contrib/libs/zstd/lib/decompress/zstd_decompress_block.c
+++ b/contrib/libs/zstd/lib/decompress/zstd_decompress_block.c
@@ -120,7 +120,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
const void* src, size_t srcSize, /* note : srcSize < BLOCKSIZE */
void* dst, size_t dstCapacity, const streaming_operation streaming)
{
- DEBUGLOG(5, "ZSTD_decodeLiteralsBlock");
+ DEBUGLOG(5, "ZSTD_decodeLiteralsBlock");
RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, "");
{ const BYTE* const istart = (const BYTE*) src;
@@ -129,7 +129,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
switch(litEncType)
{
case set_repeat:
- DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block");
+ DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block");
RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted, "");
ZSTD_FALLTHROUGH;
@@ -160,7 +160,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
/* 2 - 2 - 18 - 18 */
lhSize = 5;
litSize = (lhc >> 4) & 0x3FFFF;
- litCSize = (lhc >> 22) + ((size_t)istart[4] << 10);
+ litCSize = (lhc >> 22) + ((size_t)istart[4] << 10);
break;
}
RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled");
@@ -472,8 +472,8 @@ void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt,
symbolNext[s] = 1;
} else {
if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
- assert(normalizedCounter[s]>=0);
- symbolNext[s] = (U16)normalizedCounter[s];
+ assert(normalizedCounter[s]>=0);
+ symbolNext[s] = (U16)normalizedCounter[s];
} } }
ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
}
@@ -749,83 +749,83 @@ typedef struct {
size_t prevOffset[ZSTD_REP_NUM];
} seqState_t;
-/*! ZSTD_overlapCopy8() :
- * Copies 8 bytes from ip to op and updates op and ip where ip <= op.
- * If the offset is < 8 then the offset is spread to at least 8 bytes.
- *
- * Precondition: *ip <= *op
- * Postcondition: *op - *op >= 8
- */
+/*! ZSTD_overlapCopy8() :
+ * Copies 8 bytes from ip to op and updates op and ip where ip <= op.
+ * If the offset is < 8 then the offset is spread to at least 8 bytes.
+ *
+ * Precondition: *ip <= *op
+ * Postcondition: *op - *op >= 8
+ */
HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) {
- assert(*ip <= *op);
- if (offset < 8) {
- /* close range match, overlap */
- static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
- static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
- int const sub2 = dec64table[offset];
- (*op)[0] = (*ip)[0];
- (*op)[1] = (*ip)[1];
- (*op)[2] = (*ip)[2];
- (*op)[3] = (*ip)[3];
- *ip += dec32table[offset];
- ZSTD_copy4(*op+4, *ip);
- *ip -= sub2;
- } else {
- ZSTD_copy8(*op, *ip);
- }
- *ip += 8;
- *op += 8;
- assert(*op - *ip >= 8);
-}
-
-/*! ZSTD_safecopy() :
- * Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer
- * and write up to 16 bytes past oend_w (op >= oend_w is allowed).
- * This function is only called in the uncommon case where the sequence is near the end of the block. It
- * should be fast for a single long sequence, but can be slow for several short sequences.
- *
- * @param ovtype controls the overlap detection
- * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
- * - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart.
- * The src buffer must be before the dst buffer.
- */
+ assert(*ip <= *op);
+ if (offset < 8) {
+ /* close range match, overlap */
+ static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
+ static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
+ int const sub2 = dec64table[offset];
+ (*op)[0] = (*ip)[0];
+ (*op)[1] = (*ip)[1];
+ (*op)[2] = (*ip)[2];
+ (*op)[3] = (*ip)[3];
+ *ip += dec32table[offset];
+ ZSTD_copy4(*op+4, *ip);
+ *ip -= sub2;
+ } else {
+ ZSTD_copy8(*op, *ip);
+ }
+ *ip += 8;
+ *op += 8;
+ assert(*op - *ip >= 8);
+}
+
+/*! ZSTD_safecopy() :
+ * Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer
+ * and write up to 16 bytes past oend_w (op >= oend_w is allowed).
+ * This function is only called in the uncommon case where the sequence is near the end of the block. It
+ * should be fast for a single long sequence, but can be slow for several short sequences.
+ *
+ * @param ovtype controls the overlap detection
+ * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
+ * - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart.
+ * The src buffer must be before the dst buffer.
+ */
static void ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) {
- ptrdiff_t const diff = op - ip;
- BYTE* const oend = op + length;
-
- assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) ||
- (ovtype == ZSTD_overlap_src_before_dst && diff >= 0));
-
- if (length < 8) {
- /* Handle short lengths. */
- while (op < oend) *op++ = *ip++;
- return;
- }
- if (ovtype == ZSTD_overlap_src_before_dst) {
- /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */
- assert(length >= 8);
- ZSTD_overlapCopy8(&op, &ip, diff);
+ ptrdiff_t const diff = op - ip;
+ BYTE* const oend = op + length;
+
+ assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) ||
+ (ovtype == ZSTD_overlap_src_before_dst && diff >= 0));
+
+ if (length < 8) {
+ /* Handle short lengths. */
+ while (op < oend) *op++ = *ip++;
+ return;
+ }
+ if (ovtype == ZSTD_overlap_src_before_dst) {
+ /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */
+ assert(length >= 8);
+ ZSTD_overlapCopy8(&op, &ip, diff);
length -= 8;
- assert(op - ip >= 8);
- assert(op <= oend);
- }
-
- if (oend <= oend_w) {
- /* No risk of overwrite. */
- ZSTD_wildcopy(op, ip, length, ovtype);
- return;
- }
- if (op <= oend_w) {
- /* Wildcopy until we get close to the end. */
- assert(oend > oend_w);
- ZSTD_wildcopy(op, ip, oend_w - op, ovtype);
- ip += oend_w - op;
+ assert(op - ip >= 8);
+ assert(op <= oend);
+ }
+
+ if (oend <= oend_w) {
+ /* No risk of overwrite. */
+ ZSTD_wildcopy(op, ip, length, ovtype);
+ return;
+ }
+ if (op <= oend_w) {
+ /* Wildcopy until we get close to the end. */
+ assert(oend > oend_w);
+ ZSTD_wildcopy(op, ip, oend_w - op, ovtype);
+ ip += oend_w - op;
op += oend_w - op;
- }
- /* Handle the leftovers. */
- while (op < oend) *op++ = *ip++;
-}
-
+ }
+ /* Handle the leftovers. */
+ while (op < oend) *op++ = *ip++;
+}
+
/* ZSTD_safecopyDstBeforeSrc():
* This version allows overlap with dst before src, or handles the non-overlap case with dst after src
* Kept separate from more common ZSTD_safecopy case to avoid performance impact to the safecopy common case */
@@ -849,16 +849,16 @@ static void ZSTD_safecopyDstBeforeSrc(BYTE* op, BYTE const* ip, ptrdiff_t length
while (op < oend) *op++ = *ip++;
}
-/* ZSTD_execSequenceEnd():
- * This version handles cases that are near the end of the output buffer. It requires
- * more careful checks to make sure there is no overflow. By separating out these hard
- * and unlikely cases, we can speed up the common cases.
- *
- * NOTE: This function needs to be fast for a single long sequence, but doesn't need
- * to be optimized for many small sequences, since those fall into ZSTD_execSequence().
- */
+/* ZSTD_execSequenceEnd():
+ * This version handles cases that are near the end of the output buffer. It requires
+ * more careful checks to make sure there is no overflow. By separating out these hard
+ * and unlikely cases, we can speed up the common cases.
+ *
+ * NOTE: This function needs to be fast for a single long sequence, but doesn't need
+ * to be optimized for many small sequences, since those fall into ZSTD_execSequence().
+ */
FORCE_NOINLINE
-size_t ZSTD_execSequenceEnd(BYTE* op,
+size_t ZSTD_execSequenceEnd(BYTE* op,
BYTE* const oend, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
@@ -867,7 +867,7 @@ size_t ZSTD_execSequenceEnd(BYTE* op,
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
const BYTE* const iLitEnd = *litPtr + sequence.litLength;
const BYTE* match = oLitEnd - sequence.offset;
- BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
+ BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
/* bounds checks : careful of address space overflow in 32-bit mode */
RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer");
@@ -876,12 +876,12 @@ size_t ZSTD_execSequenceEnd(BYTE* op,
assert(oLitEnd < op + sequenceLength);
/* copy literals */
- ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap);
- op = oLitEnd;
- *litPtr = iLitEnd;
+ ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap);
+ op = oLitEnd;
+ *litPtr = iLitEnd;
/* copy Match */
- if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
+ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
/* offset beyond prefix */
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, "");
match = dictEnd - (prefixStart - match);
@@ -897,7 +897,7 @@ size_t ZSTD_execSequenceEnd(BYTE* op,
match = prefixStart;
}
}
- ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
+ ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
return sequenceLength;
}
@@ -1067,27 +1067,27 @@ size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op,
(MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH)))
return ZSTD_execSequenceEndSplitLitBuffer(op, oend, oend_w, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
- /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
+ /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
assert(op <= oLitEnd /* No overflow */);
assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */);
assert(oMatchEnd <= oend /* No underflow */);
- assert(iLitEnd <= litLimit /* Literal length is in bounds */);
- assert(oLitEnd <= oend_w /* Can wildcopy literals */);
- assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
-
- /* Copy Literals:
- * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9.
- * We likely don't need the full 32-byte wildcopy.
- */
- assert(WILDCOPY_OVERLENGTH >= 16);
- ZSTD_copy16(op, (*litPtr));
+ assert(iLitEnd <= litLimit /* Literal length is in bounds */);
+ assert(oLitEnd <= oend_w /* Can wildcopy literals */);
+ assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
+
+ /* Copy Literals:
+ * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9.
+ * We likely don't need the full 32-byte wildcopy.
+ */
+ assert(WILDCOPY_OVERLENGTH >= 16);
+ ZSTD_copy16(op, (*litPtr));
if (UNLIKELY(sequence.litLength > 16)) {
- ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap);
- }
+ ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap);
+ }
op = oLitEnd;
*litPtr = iLitEnd; /* update for next sequence */
- /* Copy Match */
+ /* Copy Match */
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
/* offset beyond prefix -> go into extDict */
RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, "");
@@ -1103,32 +1103,32 @@ size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op,
sequence.matchLength -= length1;
match = prefixStart;
} }
- /* Match within prefix of 1 or more bytes */
- assert(op <= oMatchEnd);
- assert(oMatchEnd <= oend_w);
- assert(match >= prefixStart);
- assert(sequence.matchLength >= 1);
-
- /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy
- * without overlap checking.
- */
+ /* Match within prefix of 1 or more bytes */
+ assert(op <= oMatchEnd);
+ assert(oMatchEnd <= oend_w);
+ assert(match >= prefixStart);
+ assert(sequence.matchLength >= 1);
+
+ /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy
+ * without overlap checking.
+ */
if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) {
- /* We bet on a full wildcopy for matches, since we expect matches to be
- * longer than literals (in general). In silesia, ~10% of matches are longer
- * than 16 bytes.
- */
- ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);
- return sequenceLength;
+ /* We bet on a full wildcopy for matches, since we expect matches to be
+ * longer than literals (in general). In silesia, ~10% of matches are longer
+ * than 16 bytes.
+ */
+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);
+ return sequenceLength;
}
- assert(sequence.offset < WILDCOPY_VECLEN);
+ assert(sequence.offset < WILDCOPY_VECLEN);
- /* Copy 8 bytes and spread the offset to be >= 8. */
- ZSTD_overlapCopy8(&op, &match, sequence.offset);
+ /* Copy 8 bytes and spread the offset to be >= 8. */
+ ZSTD_overlapCopy8(&op, &match, sequence.offset);
- /* If the match length is > 8 bytes, then continue with the wildcopy. */
- if (sequence.matchLength > 8) {
- assert(op < oMatchEnd);
- ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);
+ /* If the match length is > 8 bytes, then continue with the wildcopy. */
+ if (sequence.matchLength > 8) {
+ assert(op < oMatchEnd);
+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);
}
return sequenceLength;
}
diff --git a/contrib/libs/zstd/lib/dictBuilder/cover.c b/contrib/libs/zstd/lib/dictBuilder/cover.c
index 0d129c039b..028802a1b0 100644
--- a/contrib/libs/zstd/lib/dictBuilder/cover.c
+++ b/contrib/libs/zstd/lib/dictBuilder/cover.c
@@ -655,8 +655,8 @@ void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLeve
"compared to the source size %u! "
"size(source)/size(dictionary) = %f, but it should be >= "
"10! This may lead to a subpar dictionary! We recommend "
- "training on sources at least 10x, and preferably 100x "
- "the size of the dictionary! \n", (U32)maxDictSize,
+ "training on sources at least 10x, and preferably 100x "
+ "the size of the dictionary! \n", (U32)maxDictSize,
(U32)nbDmers, ratio);
}
@@ -936,11 +936,11 @@ void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters,
}
}
/* Save the dictionary, parameters, and size */
- if (dict) {
- memcpy(best->dict, dict, dictSize);
- best->dictSize = dictSize;
- best->parameters = parameters;
- best->compressedSize = compressedSize;
+ if (dict) {
+ memcpy(best->dict, dict, dictSize);
+ best->dictSize = dictSize;
+ best->parameters = parameters;
+ best->compressedSize = compressedSize;
}
}
if (liveJobs == 0) {
diff --git a/contrib/libs/zstd/lib/dictBuilder/zdict.c b/contrib/libs/zstd/lib/dictBuilder/zdict.c
index 5773af1063..587df6b861 100644
--- a/contrib/libs/zstd/lib/dictBuilder/zdict.c
+++ b/contrib/libs/zstd/lib/dictBuilder/zdict.c
@@ -619,7 +619,7 @@ static void ZDICT_fillNoise(void* buffer, size_t length)
unsigned const prime1 = 2654435761U;
unsigned const prime2 = 2246822519U;
unsigned acc = prime1;
- size_t p=0;
+ size_t p=0;
for (p=0; p<length; p++) {
acc *= prime2;
((unsigned char*)buffer)[p] = (unsigned char)(acc >> 21);
diff --git a/contrib/libs/zstd/lib/legacy/zstd_v01.c b/contrib/libs/zstd/lib/legacy/zstd_v01.c
index 542f5ea9e7..23caaef564 100644
--- a/contrib/libs/zstd/lib/legacy/zstd_v01.c
+++ b/contrib/libs/zstd/lib/legacy/zstd_v01.c
@@ -342,7 +342,7 @@ FORCE_INLINE unsigned FSE_highbit32 (U32 val)
unsigned long r;
return _BitScanReverse(&r, val) ? (unsigned)r : 0;
# elif defined(__GNUC__) && (GCC_VERSION >= 304) /* GCC Intrinsic */
- return __builtin_clz (val) ^ 31;
+ return __builtin_clz (val) ^ 31;
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;
diff --git a/contrib/libs/zstd/lib/legacy/zstd_v02.c b/contrib/libs/zstd/lib/legacy/zstd_v02.c
index 379854ff74..2f473a7573 100644
--- a/contrib/libs/zstd/lib/legacy/zstd_v02.c
+++ b/contrib/libs/zstd/lib/legacy/zstd_v02.c
@@ -353,7 +353,7 @@ MEM_STATIC unsigned BIT_highbit32 (U32 val)
unsigned long r;
return _BitScanReverse(&r, val) ? (unsigned)r : 0;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
- return __builtin_clz (val) ^ 31;
+ return __builtin_clz (val) ^ 31;
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;
@@ -2891,7 +2891,7 @@ static size_t ZSTD_decodeLiteralsBlock(void* ctx,
const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
if (litSize > srcSize-11) /* risk of reading too far with wildcopy */
{
- if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
+ if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
if (litSize > srcSize-3) return ERROR(corruption_detected);
memcpy(dctx->litBuffer, istart, litSize);
dctx->litPtr = dctx->litBuffer;
diff --git a/contrib/libs/zstd/lib/legacy/zstd_v03.c b/contrib/libs/zstd/lib/legacy/zstd_v03.c
index ad2552f421..6625f4df1c 100644
--- a/contrib/libs/zstd/lib/legacy/zstd_v03.c
+++ b/contrib/libs/zstd/lib/legacy/zstd_v03.c
@@ -356,7 +356,7 @@ MEM_STATIC unsigned BIT_highbit32 (U32 val)
unsigned long r;
return _BitScanReverse(&r, val) ? (unsigned)r : 0;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
- return __builtin_clz (val) ^ 31;
+ return __builtin_clz (val) ^ 31;
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;
@@ -2532,7 +2532,7 @@ static size_t ZSTD_decodeLiteralsBlock(void* ctx,
const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
if (litSize > srcSize-11) /* risk of reading too far with wildcopy */
{
- if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
+ if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
if (litSize > srcSize-3) return ERROR(corruption_detected);
memcpy(dctx->litBuffer, istart, litSize);
dctx->litPtr = dctx->litBuffer;
diff --git a/contrib/libs/zstd/lib/legacy/zstd_v04.c b/contrib/libs/zstd/lib/legacy/zstd_v04.c
index 55c81796f4..8d305c7eae 100644
--- a/contrib/libs/zstd/lib/legacy/zstd_v04.c
+++ b/contrib/libs/zstd/lib/legacy/zstd_v04.c
@@ -627,7 +627,7 @@ MEM_STATIC unsigned BIT_highbit32 (U32 val)
unsigned long r;
return _BitScanReverse(&r, val) ? (unsigned)r : 0;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
- return __builtin_clz (val) ^ 31;
+ return __builtin_clz (val) ^ 31;
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;
@@ -2657,7 +2657,7 @@ static size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
if (litSize > srcSize-11) /* risk of reading too far with wildcopy */
{
- if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
+ if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
if (litSize > srcSize-3) return ERROR(corruption_detected);
memcpy(dctx->litBuffer, istart, litSize);
dctx->litPtr = dctx->litBuffer;
@@ -3039,12 +3039,12 @@ static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
{
/* blockType == blockCompressed */
const BYTE* ip = (const BYTE*)src;
- size_t litCSize;
+ size_t litCSize;
+
+ if (srcSize > BLOCKSIZE) return ERROR(corruption_detected);
- if (srcSize > BLOCKSIZE) return ERROR(corruption_detected);
-
/* Decode literals sub-block */
- litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
+ litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
if (ZSTD_isError(litCSize)) return litCSize;
ip += litCSize;
srcSize -= litCSize;
diff --git a/contrib/libs/zstd/lib/legacy/zstd_v05.c b/contrib/libs/zstd/lib/legacy/zstd_v05.c
index 857e765325..795dfb410c 100644
--- a/contrib/libs/zstd/lib/legacy/zstd_v05.c
+++ b/contrib/libs/zstd/lib/legacy/zstd_v05.c
@@ -756,7 +756,7 @@ MEM_STATIC unsigned BITv05_highbit32 (U32 val)
unsigned long r;
return _BitScanReverse(&r, val) ? (unsigned)r : 0;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
- return __builtin_clz (val) ^ 31;
+ return __builtin_clz (val) ^ 31;
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;
diff --git a/contrib/libs/zstd/lib/legacy/zstd_v06.c b/contrib/libs/zstd/lib/legacy/zstd_v06.c
index f668614dd9..ead213c484 100644
--- a/contrib/libs/zstd/lib/legacy/zstd_v06.c
+++ b/contrib/libs/zstd/lib/legacy/zstd_v06.c
@@ -860,7 +860,7 @@ MEM_STATIC unsigned BITv06_highbit32 ( U32 val)
unsigned long r;
return _BitScanReverse(&r, val) ? (unsigned)r : 0;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
- return __builtin_clz (val) ^ 31;
+ return __builtin_clz (val) ^ 31;
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;
diff --git a/contrib/libs/zstd/lib/legacy/zstd_v07.c b/contrib/libs/zstd/lib/legacy/zstd_v07.c
index 38e4a17d9c..189f6ede69 100644
--- a/contrib/libs/zstd/lib/legacy/zstd_v07.c
+++ b/contrib/libs/zstd/lib/legacy/zstd_v07.c
@@ -530,7 +530,7 @@ MEM_STATIC unsigned BITv07_highbit32 (U32 val)
unsigned long r;
return _BitScanReverse(&r, val) ? (unsigned)r : 0;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
- return __builtin_clz (val) ^ 31;
+ return __builtin_clz (val) ^ 31;
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;
diff --git a/contrib/libs/zstd/lib/zstd.h b/contrib/libs/zstd/lib/zstd.h
index a29ddde89a..a88ae7bf8e 100644
--- a/contrib/libs/zstd/lib/zstd.h
+++ b/contrib/libs/zstd/lib/zstd.h
@@ -15,7 +15,7 @@ extern "C" {
#define ZSTD_H_235446
/* ====== Dependency ======*/
-#include <limits.h> /* INT_MAX */
+#include <limits.h> /* INT_MAX */
#include <stddef.h> /* size_t */
@@ -204,13 +204,13 @@ ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* accept NULL pointer */
/*! ZSTD_compressCCtx() :
- * Same as ZSTD_compress(), using an explicit ZSTD_CCtx.
- * Important : in order to behave similarly to `ZSTD_compress()`,
- * this function compresses at requested compression level,
- * __ignoring any other parameter__ .
- * If any advanced parameter was set using the advanced API,
- * they will all be reset. Only `compressionLevel` remains.
- */
+ * Same as ZSTD_compress(), using an explicit ZSTD_CCtx.
+ * Important : in order to behave similarly to `ZSTD_compress()`,
+ * this function compresses at requested compression level,
+ * __ignoring any other parameter__ .
+ * If any advanced parameter was set using the advanced API,
+ * they will all be reset. Only `compressionLevel` remains.
+ */
ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
@@ -245,7 +245,7 @@ ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
* using ZSTD_CCtx_set*() functions.
* Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame.
* "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` !
- * __They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()__ .
+ * __They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()__ .
*
* It's possible to reset all parameters to "default" using ZSTD_CCtx_reset().
*
@@ -272,11 +272,11 @@ typedef enum {
/* compression parameters
* Note: When compressing with a ZSTD_CDict these parameters are superseded
- * by the parameters used to construct the ZSTD_CDict.
- * See ZSTD_CCtx_refCDict() for more info (superseded-by-cdict). */
- ZSTD_c_compressionLevel=100, /* Set compression parameters according to pre-defined cLevel table.
- * Note that exact compression parameters are dynamically determined,
- * depending on both compression level and srcSize (when known).
+ * by the parameters used to construct the ZSTD_CDict.
+ * See ZSTD_CCtx_refCDict() for more info (superseded-by-cdict). */
+ ZSTD_c_compressionLevel=100, /* Set compression parameters according to pre-defined cLevel table.
+ * Note that exact compression parameters are dynamically determined,
+ * depending on both compression level and srcSize (when known).
* Default level is ZSTD_CLEVEL_DEFAULT==3.
* Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT.
* Note 1 : it's possible to pass a negative compression level.
@@ -284,17 +284,17 @@ typedef enum {
* to default. Setting this will however eventually dynamically impact the compression
* parameters which have not been manually set. The manually set
* ones will 'stick'. */
- /* Advanced compression parameters :
- * It's possible to pin down compression parameters to some specific values.
- * In which case, these values are no longer dynamically selected by the compressor */
+ /* Advanced compression parameters :
+ * It's possible to pin down compression parameters to some specific values.
+ * In which case, these values are no longer dynamically selected by the compressor */
ZSTD_c_windowLog=101, /* Maximum allowed back-reference distance, expressed as power of 2.
- * This will set a memory budget for streaming decompression,
- * with larger values requiring more memory
- * and typically compressing more.
+ * This will set a memory budget for streaming decompression,
+ * with larger values requiring more memory
+ * and typically compressing more.
* Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
* Special: value 0 means "use default windowLog".
* Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT
- * requires explicitly allowing such size at streaming decompression stage. */
+ * requires explicitly allowing such size at streaming decompression stage. */
ZSTD_c_hashLog=102, /* Size of the initial probe table, as a power of 2.
* Resulting memory usage is (1 << (hashLog+2)).
* Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
@@ -305,13 +305,13 @@ typedef enum {
* Resulting memory usage is (1 << (chainLog+2)).
* Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX.
* Larger tables result in better and slower compression.
- * This parameter is useless for "fast" strategy.
+ * This parameter is useless for "fast" strategy.
* It's still useful when using "dfast" strategy,
* in which case it defines a secondary probe table.
* Special: value 0 means "use default chainLog". */
ZSTD_c_searchLog=104, /* Number of search attempts, as a power of 2.
* More attempts result in better and slower compression.
- * This parameter is useless for "fast" and "dFast" strategies.
+ * This parameter is useless for "fast" and "dFast" strategies.
* Special: value 0 means "use default searchLog". */
ZSTD_c_minMatch=105, /* Minimum size of searched matches.
* Note that Zstandard can still find matches of smaller size,
@@ -367,7 +367,7 @@ typedef enum {
ZSTD_c_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1)
* Content size must be known at the beginning of compression.
* This is automatically the case when using ZSTD_compress2(),
- * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
+ * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
ZSTD_c_checksumFlag=201, /* A 32-bits checksum of content is written at end of frame (default:0) */
ZSTD_c_dictIDFlag=202, /* When applicable, dictionary's ID is written into frame header (default:1) */
@@ -390,7 +390,7 @@ typedef enum {
* Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads.
* 0 means default, which is dynamically determined based on compression parameters.
* Job size must be a minimum of overlap size, or ZSTDMT_JOBSIZE_MIN (= 512 KB), whichever is largest.
- * The minimum size is automatically and transparently enforced. */
+ * The minimum size is automatically and transparently enforced. */
ZSTD_c_overlapLog=402, /* Control the overlap size, as a fraction of window size.
* The overlap size is an amount of data reloaded from previous job at the beginning of a new job.
* It helps preserve compression ratio, while each job is compressed in parallel.
@@ -413,7 +413,7 @@ typedef enum {
* ZSTD_c_forceAttachDict
* ZSTD_c_literalCompressionMode
* ZSTD_c_targetCBlockSize
- * ZSTD_c_srcSizeHint
+ * ZSTD_c_srcSizeHint
* ZSTD_c_enableDedicatedDictSearch
* ZSTD_c_stableInBuffer
* ZSTD_c_stableOutBuffer
@@ -844,17 +844,17 @@ ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
typedef struct ZSTD_CDict_s ZSTD_CDict;
/*! ZSTD_createCDict() :
- * When compressing multiple messages or blocks using the same dictionary,
- * it's recommended to digest the dictionary only once, since it's a costly operation.
- * ZSTD_createCDict() will create a state from digesting a dictionary.
- * The resulting state can be used for future compression operations with very limited startup cost.
+ * When compressing multiple messages or blocks using the same dictionary,
+ * it's recommended to digest the dictionary only once, since it's a costly operation.
+ * ZSTD_createCDict() will create a state from digesting a dictionary.
+ * The resulting state can be used for future compression operations with very limited startup cost.
* ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
- * @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict.
- * Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content.
- * Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer,
- * in which case the only thing that it transports is the @compressionLevel.
- * This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively,
- * expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */
+ * @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict.
+ * Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content.
+ * Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer,
+ * in which case the only thing that it transports is the @compressionLevel.
+ * This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively,
+ * expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */
ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
int compressionLevel);
@@ -989,7 +989,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
* Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.
* It's a CPU consuming operation, with non-negligible impact on latency.
* If there is a need to use the same prefix multiple times, consider loadDictionary instead.
- * Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent).
+ * Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent).
* Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */
ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
const void* prefix, size_t prefixSize);
@@ -1040,7 +1040,7 @@ ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
* Note 2 : Prefix buffer is referenced. It **must** outlive decompression.
* Prefix buffer must remain unmodified up to the end of frame,
* reached when ZSTD_decompressStream() returns 0.
- * Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent).
+ * Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent).
* Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section)
* Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.
* A full dictionary is more costly, as it requires building tables.
@@ -1118,8 +1118,8 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
* Some of them might be removed in the future (especially when redundant with existing stable functions)
* ***************************************************************************************/
-#define ZSTD_FRAMEHEADERSIZE_PREFIX(format) ((format) == ZSTD_f_zstd1 ? 5 : 1) /* minimum input size required to query frame header size */
-#define ZSTD_FRAMEHEADERSIZE_MIN(format) ((format) == ZSTD_f_zstd1 ? 6 : 2)
+#define ZSTD_FRAMEHEADERSIZE_PREFIX(format) ((format) == ZSTD_f_zstd1 ? 5 : 1) /* minimum input size required to query frame header size */
+#define ZSTD_FRAMEHEADERSIZE_MIN(format) ((format) == ZSTD_f_zstd1 ? 6 : 2)
#define ZSTD_FRAMEHEADERSIZE_MAX 18 /* can be useful for static allocation */
#define ZSTD_SKIPPABLEHEADERSIZE 8
@@ -1167,8 +1167,8 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
/* Advanced parameter bounds */
#define ZSTD_TARGETCBLOCKSIZE_MIN 64
#define ZSTD_TARGETCBLOCKSIZE_MAX ZSTD_BLOCKSIZE_MAX
-#define ZSTD_SRCSIZEHINT_MIN 0
-#define ZSTD_SRCSIZEHINT_MAX INT_MAX
+#define ZSTD_SRCSIZEHINT_MIN 0
+#define ZSTD_SRCSIZEHINT_MAX INT_MAX
/* --- Advanced types --- */
@@ -1210,9 +1210,9 @@ typedef struct {
* sequence provider's perspective. For example, ZSTD_compressSequences() does not
* use this 'rep' field at all (as of now).
*/
-} ZSTD_Sequence;
-
-typedef struct {
+} ZSTD_Sequence;
+
+typedef struct {
unsigned windowLog; /**< largest match distance : larger == more compression, more memory needed during decompression */
unsigned chainLog; /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */
unsigned hashLog; /**< dispatch table : larger == faster, more memory */
@@ -1241,12 +1241,12 @@ typedef enum {
typedef enum {
ZSTD_dlm_byCopy = 0, /**< Copy dictionary content internally */
- ZSTD_dlm_byRef = 1 /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
+ ZSTD_dlm_byRef = 1 /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
} ZSTD_dictLoadMethod_e;
typedef enum {
ZSTD_f_zstd1 = 0, /* zstd frame format, specified in zstd_compression_format.md (default) */
- ZSTD_f_zstd1_magicless = 1 /* Variant of zstd frame format, without initial 4-bytes magic number.
+ ZSTD_f_zstd1_magicless = 1 /* Variant of zstd frame format, without initial 4-bytes magic number.
* Useful to save 4 bytes per generated frame.
* Decoder cannot recognise automatically this format, requiring this instruction. */
} ZSTD_format_e;
@@ -1269,7 +1269,7 @@ typedef enum {
* to evolve and should be considered only in the context of extremely
* advanced performance tuning.
*
- * Zstd currently supports the use of a CDict in three ways:
+ * Zstd currently supports the use of a CDict in three ways:
*
* - The contents of the CDict can be copied into the working context. This
* means that the compression can search both the dictionary and input
@@ -1285,12 +1285,12 @@ typedef enum {
* working context's tables can be reused). For small inputs, this can be
* faster than copying the CDict's tables.
*
- * - The CDict's tables are not used at all, and instead we use the working
- * context alone to reload the dictionary and use params based on the source
- * size. See ZSTD_compress_insertDictionary() and ZSTD_compress_usingDict().
- * This method is effective when the dictionary sizes are very small relative
- * to the input size, and the input size is fairly large to begin with.
- *
+ * - The CDict's tables are not used at all, and instead we use the working
+ * context alone to reload the dictionary and use params based on the source
+ * size. See ZSTD_compress_insertDictionary() and ZSTD_compress_usingDict().
+ * This method is effective when the dictionary sizes are very small relative
+ * to the input size, and the input size is fairly large to begin with.
+ *
* Zstd has a simple internal heuristic that selects which strategy to use
* at the beginning of a compression. However, if experimentation shows that
* Zstd is making poor choices, it is possible to override that choice with
@@ -1299,7 +1299,7 @@ typedef enum {
ZSTD_dictDefaultAttach = 0, /* Use the default heuristic. */
ZSTD_dictForceAttach = 1, /* Never copy the dictionary. */
ZSTD_dictForceCopy = 2, /* Always copy the dictionary. */
- ZSTD_dictForceLoad = 3 /* Always reload the dictionary */
+ ZSTD_dictForceLoad = 3 /* Always reload the dictionary */
} ZSTD_dictAttachPref_e;
typedef enum {
@@ -1308,7 +1308,7 @@ typedef enum {
* levels will be compressed. */
ZSTD_lcm_huffman = 1, /**< Always attempt Huffman compression. Uncompressed literals will still be
* emitted if Huffman compression is not profitable. */
- ZSTD_lcm_uncompressed = 2 /**< Always emit uncompressed literals. */
+ ZSTD_lcm_uncompressed = 2 /**< Always emit uncompressed literals. */
} ZSTD_literalCompressionMode_e;
typedef enum {
@@ -1382,17 +1382,17 @@ typedef enum {
* litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0)
* simply acts as a block delimiter.
*
- * zc can be used to insert custom compression params.
- * This function invokes ZSTD_compress2
+ * zc can be used to insert custom compression params.
+ * This function invokes ZSTD_compress2
*
* The output of this function can be fed into ZSTD_compressSequences() with CCtx
* setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters
* @return : number of sequences generated
- */
+ */
ZSTDLIB_STATIC_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
size_t outSeqsSize, const void* src, size_t srcSize);
-
+
/*! ZSTD_mergeBlockDelimiters() :
* Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals
* by merging them into into the literals of the next sequence.
@@ -1483,7 +1483,7 @@ ZSTDLIB_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size);
/*! ZSTD_estimate*() :
* These functions make it possible to estimate memory usage
* of a future {D,C}Ctx, before its creation.
- *
+ *
* ZSTD_estimateCCtxSize() will provide a memory budget large enough
* for any compression level up to selected one.
* Note : Unlike ZSTD_estimateCStreamSize*(), this estimate
@@ -1491,7 +1491,7 @@ ZSTDLIB_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size);
* Therefore, the estimation is only guaranteed for single-shot compressions, not streaming.
* The estimate will assume the input may be arbitrarily large,
* which is the worst case.
- *
+ *
* When srcSize can be bound by a known and rather "small" value,
* this fact can be used to provide a tighter estimation
* because the CCtx compression context will need less memory.
@@ -1643,8 +1643,8 @@ ZSTDLIB_STATIC_API ZSTD_DDict* ZSTD_createDDict_advanced(
* Create a digested dictionary for compression
* Dictionary content is just referenced, not duplicated.
* As a consequence, `dictBuffer` **must** outlive CDict,
- * and its content must remain unmodified throughout the lifetime of CDict.
- * note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */
+ * and its content must remain unmodified throughout the lifetime of CDict.
+ * note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */
ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
/*! ZSTD_getCParams() :
@@ -1671,8 +1671,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
/*! ZSTD_compress_advanced() :
- * Note : this function is now DEPRECATED.
- * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.
+ * Note : this function is now DEPRECATED.
+ * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.
* This prototype will generate compilation warnings. */
ZSTD_DEPRECATED("use ZSTD_compress2")
size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
@@ -1683,7 +1683,7 @@ size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
/*! ZSTD_compress_usingCDict_advanced() :
* Note : this function is now DEPRECATED.
- * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.
+ * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.
* This prototype will generate compilation warnings. */
ZSTD_DEPRECATED("use ZSTD_compress2 with ZSTD_CCtx_loadDictionary")
size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
@@ -1763,12 +1763,12 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
* There is no guarantee on compressed block size (default:0) */
#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6
-/* User's best guess of source size.
- * Hint is not valid when srcSizeHint == 0.
- * There is no guarantee that hint is close to actual source size,
- * but compression ratio may regress significantly if guess considerably underestimates */
-#define ZSTD_c_srcSizeHint ZSTD_c_experimentalParam7
-
+/* User's best guess of source size.
+ * Hint is not valid when srcSizeHint == 0.
+ * There is no guarantee that hint is close to actual source size,
+ * but compression ratio may regress significantly if guess considerably underestimates */
+#define ZSTD_c_srcSizeHint ZSTD_c_experimentalParam7
+
/* Controls whether the new and experimental "dedicated dictionary search
* structure" can be used. This feature is still rough around the edges, be
* prepared for surprising behavior!
@@ -2203,9 +2203,9 @@ ZSTDLIB_STATIC_API size_t ZSTD_decompressStream_simpleArgs (
*/
ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
- int compressionLevel,
- unsigned long long pledgedSrcSize);
-
+ int compressionLevel,
+ unsigned long long pledgedSrcSize);
+
/*! ZSTD_initCStream_usingDict() :
* This function is DEPRECATED, and is equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
@@ -2214,36 +2214,36 @@ size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
*
* Creates of an internal CDict (incompatible with static CCtx), except if
* dict == NULL or dictSize < 8, in which case no dict is used.
- * Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if
+ * Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if
* it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.
* This prototype will generate compilation warnings.
*/
ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
- const void* dict, size_t dictSize,
- int compressionLevel);
-
+ const void* dict, size_t dictSize,
+ int compressionLevel);
+
/*! ZSTD_initCStream_advanced() :
* This function is DEPRECATED, and is approximately equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
- * // Pseudocode: Set each zstd parameter and leave the rest as-is.
- * for ((param, value) : params) {
- * ZSTD_CCtx_setParameter(zcs, param, value);
- * }
+ * // Pseudocode: Set each zstd parameter and leave the rest as-is.
+ * for ((param, value) : params) {
+ * ZSTD_CCtx_setParameter(zcs, param, value);
+ * }
* ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
* ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
*
- * dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy.
- * pledgedSrcSize must be correct.
- * If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
+ * dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy.
+ * pledgedSrcSize must be correct.
+ * If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
* This prototype will generate compilation warnings.
*/
ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
- const void* dict, size_t dictSize,
- ZSTD_parameters params,
- unsigned long long pledgedSrcSize);
-
+ const void* dict, size_t dictSize,
+ ZSTD_parameters params,
+ unsigned long long pledgedSrcSize);
+
/*! ZSTD_initCStream_usingCDict() :
* This function is DEPRECATED, and equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
@@ -2254,14 +2254,14 @@ size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
*/
ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
-
+
/*! ZSTD_initCStream_usingCDict_advanced() :
- * This function is DEPRECATED, and is approximately equivalent to:
+ * This function is DEPRECATED, and is approximately equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
- * // Pseudocode: Set each zstd frame parameter and leave the rest as-is.
- * for ((fParam, value) : fParams) {
- * ZSTD_CCtx_setParameter(zcs, fParam, value);
- * }
+ * // Pseudocode: Set each zstd frame parameter and leave the rest as-is.
+ * for ((fParam, value) : fParams) {
+ * ZSTD_CCtx_setParameter(zcs, fParam, value);
+ * }
* ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
* ZSTD_CCtx_refCDict(zcs, cdict);
*
@@ -2272,9 +2272,9 @@ size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
*/
ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
- const ZSTD_CDict* cdict,
- ZSTD_frameParameters fParams,
- unsigned long long pledgedSrcSize);
+ const ZSTD_CDict* cdict,
+ ZSTD_frameParameters fParams,
+ unsigned long long pledgedSrcSize);
/*! ZSTD_resetCStream() :
* This function is DEPRECATED, and is equivalent to:
@@ -2340,10 +2340,10 @@ ZSTDLIB_STATIC_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
* ZSTD_DCtx_loadDictionary(zds, dict, dictSize);
*
* note: no dictionary will be used if dict == NULL or dictSize < 8
- * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
*/
ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
-
+
/*!
* This function is deprecated, and is equivalent to:
*
@@ -2351,17 +2351,17 @@ ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const vo
* ZSTD_DCtx_refDDict(zds, ddict);
*
* note : ddict is referenced, it must outlive decompression session
- * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
*/
ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
-
+
/*!
* This function is deprecated, and is equivalent to:
*
* ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
*
* re-use decompression parameters from previous init; saves dictionary loading
- * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
*/
ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
@@ -2536,8 +2536,8 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
/*!
Block functions produce and decode raw zstd blocks, without frame metadata.
- Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).
- But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.
+ Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).
+ But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.
A few rules to respect :
- Compressing and decompressing require a context structure
@@ -2548,14 +2548,14 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
+ copyCCtx() and copyDCtx() can be used too
- Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
+ If input is larger than a block size, it's necessary to split input data into multiple blocks
- + For inputs larger than a single block, consider using regular ZSTD_compress() instead.
- Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.
- - When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !
- ===> In which case, nothing is produced into `dst` !
- + User __must__ test for such outcome and deal directly with uncompressed data
- + A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.
- Doing so would mess up with statistics history, leading to potential data corruption.
- + ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!
+ + For inputs larger than a single block, consider using regular ZSTD_compress() instead.
+ Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.
+ - When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !
+ ===> In which case, nothing is produced into `dst` !
+ + User __must__ test for such outcome and deal directly with uncompressed data
+ + A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.
+ Doing so would mess up with statistics history, leading to potential data corruption.
+ + ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!
+ In case of multiple successive blocks, should some of them be uncompressed,
decoder must be informed of their existence in order to follow proper history.
Use ZSTD_insertBlock() for such a case.