aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/lz4
diff options
context:
space:
mode:
authororivej <orivej@yandex-team.ru>2022-02-10 16:45:01 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:45:01 +0300
commit2d37894b1b037cf24231090eda8589bbb44fb6fc (patch)
treebe835aa92c6248212e705f25388ebafcf84bc7a1 /contrib/libs/lz4
parent718c552901d703c502ccbefdfc3c9028d608b947 (diff)
downloadydb-2d37894b1b037cf24231090eda8589bbb44fb6fc.tar.gz
Restoring authorship annotation for <orivej@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/libs/lz4')
-rw-r--r--contrib/libs/lz4/LICENSE48
-rw-r--r--contrib/libs/lz4/README.md268
-rw-r--r--contrib/libs/lz4/generated/gen.py84
-rw-r--r--contrib/libs/lz4/generated/lz4_10.cpp6
-rw-r--r--contrib/libs/lz4/generated/lz4_11.cpp6
-rw-r--r--contrib/libs/lz4/generated/lz4_12.cpp6
-rw-r--r--contrib/libs/lz4/generated/lz4_13.cpp6
-rw-r--r--contrib/libs/lz4/generated/lz4_14.cpp6
-rw-r--r--contrib/libs/lz4/generated/lz4_15.cpp6
-rw-r--r--contrib/libs/lz4/generated/lz4_16.cpp6
-rw-r--r--contrib/libs/lz4/generated/lz4_17.cpp6
-rw-r--r--contrib/libs/lz4/generated/lz4_18.cpp6
-rw-r--r--contrib/libs/lz4/generated/lz4_19.cpp6
-rw-r--r--contrib/libs/lz4/generated/lz4_20.cpp6
-rw-r--r--contrib/libs/lz4/generated/lz4_ns.h28
-rw-r--r--contrib/libs/lz4/generated/lz4methods.cpp70
-rw-r--r--contrib/libs/lz4/generated/ya.make42
-rw-r--r--contrib/libs/lz4/lz4.c1926
-rw-r--r--contrib/libs/lz4/lz4.h494
-rw-r--r--contrib/libs/lz4/lz4frame.c1718
-rw-r--r--contrib/libs/lz4/lz4frame.h462
-rw-r--r--contrib/libs/lz4/lz4hc.c1020
-rw-r--r--contrib/libs/lz4/lz4hc.h182
-rw-r--r--contrib/libs/lz4/ya.make28
24 files changed, 3218 insertions, 3218 deletions
diff --git a/contrib/libs/lz4/LICENSE b/contrib/libs/lz4/LICENSE
index b94cf9b342..74c2cdd7d5 100644
--- a/contrib/libs/lz4/LICENSE
+++ b/contrib/libs/lz4/LICENSE
@@ -1,24 +1,24 @@
-LZ4 Library
-Copyright (c) 2011-2016, Yann Collet
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice, this
- list of conditions and the following disclaimer in the documentation and/or
- other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+LZ4 Library
+Copyright (c) 2011-2016, Yann Collet
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice, this
+ list of conditions and the following disclaimer in the documentation and/or
+ other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/contrib/libs/lz4/README.md b/contrib/libs/lz4/README.md
index db7fa4ff5a..e2af868ff4 100644
--- a/contrib/libs/lz4/README.md
+++ b/contrib/libs/lz4/README.md
@@ -1,137 +1,137 @@
-LZ4 - Library Files
-================================
-
-The `/lib` directory contains many files, but depending on project's objectives,
-not all of them are necessary.
-
-#### Minimal LZ4 build
-
-The minimum required is **`lz4.c`** and **`lz4.h`**,
-which provides the fast compression and decompression algorithms.
-They generate and decode data using the [LZ4 block format].
-
-
-#### High Compression variant
-
-For more compression ratio at the cost of compression speed,
-the High Compression variant called **lz4hc** is available.
-Add files **`lz4hc.c`** and **`lz4hc.h`**.
-This variant also compresses data using the [LZ4 block format],
-and depends on regular `lib/lz4.*` source files.
-
-
-#### Frame support, for interoperability
-
-In order to produce compressed data compatible with `lz4` command line utility,
-it's necessary to use the [official interoperable frame format].
-This format is generated and decoded automatically by the **lz4frame** library.
-Its public API is described in `lib/lz4frame.h`.
-In order to work properly, lz4frame needs all other modules present in `/lib`,
-including, lz4 and lz4hc, and also **xxhash**.
-So it's necessary to include all `*.c` and `*.h` files present in `/lib`.
-
-
-#### Advanced / Experimental API
-
-Definitions which are not guaranteed to remain stable in future versions,
-are protected behind macros, such as `LZ4_STATIC_LINKING_ONLY`.
-As the name strongly implies, these definitions should only be invoked
-in the context of static linking ***only***.
-Otherwise, dependent application may fail on API or ABI break in the future.
-The associated symbols are also not exposed by the dynamic library by default.
-Should they be nonetheless needed, it's possible to force their publication
-by using build macros `LZ4_PUBLISH_STATIC_FUNCTIONS`
-and `LZ4F_PUBLISH_STATIC_FUNCTIONS`.
-
-
-#### Build macros
-
-The following build macro can be selected to adjust source code behavior at compilation time :
-
-- `LZ4_FAST_DEC_LOOP` : this triggers a speed optimized decompression loop, more powerful on modern cpus.
- This loop works great on `x86`, `x64` and `aarch64` cpus, and is automatically enabled for them.
- It's also possible to enable or disable it manually, by passing `LZ4_FAST_DEC_LOOP=1` or `0` to the preprocessor.
- For example, with `gcc` : `-DLZ4_FAST_DEC_LOOP=1`,
- and with `make` : `CPPFLAGS+=-DLZ4_FAST_DEC_LOOP=1 make lz4`.
-
-- `LZ4_DISTANCE_MAX` : control the maximum offset that the compressor will allow.
- Set to 65535 by default, which is the maximum value supported by lz4 format.
- Reducing maximum distance will reduce opportunities for LZ4 to find matches,
+LZ4 - Library Files
+================================
+
+The `/lib` directory contains many files, but depending on project's objectives,
+not all of them are necessary.
+
+#### Minimal LZ4 build
+
+The minimum required is **`lz4.c`** and **`lz4.h`**,
+which provides the fast compression and decompression algorithms.
+They generate and decode data using the [LZ4 block format].
+
+
+#### High Compression variant
+
+For more compression ratio at the cost of compression speed,
+the High Compression variant called **lz4hc** is available.
+Add files **`lz4hc.c`** and **`lz4hc.h`**.
+This variant also compresses data using the [LZ4 block format],
+and depends on regular `lib/lz4.*` source files.
+
+
+#### Frame support, for interoperability
+
+In order to produce compressed data compatible with `lz4` command line utility,
+it's necessary to use the [official interoperable frame format].
+This format is generated and decoded automatically by the **lz4frame** library.
+Its public API is described in `lib/lz4frame.h`.
+In order to work properly, lz4frame needs all other modules present in `/lib`,
+including, lz4 and lz4hc, and also **xxhash**.
+So it's necessary to include all `*.c` and `*.h` files present in `/lib`.
+
+
+#### Advanced / Experimental API
+
+Definitions which are not guaranteed to remain stable in future versions,
+are protected behind macros, such as `LZ4_STATIC_LINKING_ONLY`.
+As the name strongly implies, these definitions should only be invoked
+in the context of static linking ***only***.
+Otherwise, dependent application may fail on API or ABI break in the future.
+The associated symbols are also not exposed by the dynamic library by default.
+Should they be nonetheless needed, it's possible to force their publication
+by using build macros `LZ4_PUBLISH_STATIC_FUNCTIONS`
+and `LZ4F_PUBLISH_STATIC_FUNCTIONS`.
+
+
+#### Build macros
+
+The following build macro can be selected to adjust source code behavior at compilation time :
+
+- `LZ4_FAST_DEC_LOOP` : this triggers a speed optimized decompression loop, more powerful on modern cpus.
+ This loop works great on `x86`, `x64` and `aarch64` cpus, and is automatically enabled for them.
+ It's also possible to enable or disable it manually, by passing `LZ4_FAST_DEC_LOOP=1` or `0` to the preprocessor.
+ For example, with `gcc` : `-DLZ4_FAST_DEC_LOOP=1`,
+ and with `make` : `CPPFLAGS+=-DLZ4_FAST_DEC_LOOP=1 make lz4`.
+
+- `LZ4_DISTANCE_MAX` : control the maximum offset that the compressor will allow.
+ Set to 65535 by default, which is the maximum value supported by lz4 format.
+ Reducing maximum distance will reduce opportunities for LZ4 to find matches,
hence will produce a worse compression ratio.
However, a smaller max distance can allow compatibility with specific decoders using limited memory budget.
- This build macro only influences the compressed output of the compressor.
-
-- `LZ4_DISABLE_DEPRECATE_WARNINGS` : invoking a deprecated function will make the compiler generate a warning.
- This is meant to invite users to update their source code.
- Should this be a problem, it's generally possible to make the compiler ignore these warnings,
- for example with `-Wno-deprecated-declarations` on `gcc`,
- or `_CRT_SECURE_NO_WARNINGS` for Visual Studio.
- This build macro offers another project-specific method
- by defining `LZ4_DISABLE_DEPRECATE_WARNINGS` before including the LZ4 header files.
-
-- `LZ4_USER_MEMORY_FUNCTIONS` : replace calls to <stdlib>'s `malloc`, `calloc` and `free`
- by user-defined functions, which must be called `LZ4_malloc()`, `LZ4_calloc()` and `LZ4_free()`.
- User functions must be available at link time.
-
-- `LZ4_FORCE_SW_BITCOUNT` : by default, the compression algorithm tries to determine lengths
- by using bitcount instructions, generally implemented as fast single instructions in many cpus.
- In case the target cpus doesn't support it, or compiler intrinsic doesn't work, or feature bad performance,
- it's possible to use an optimized software path instead.
- This is achieved by setting this build macros .
- In most cases, it's not expected to be necessary,
- but it can be legitimately considered for less common platforms.
-
-- `LZ4_ALIGN_TEST` : alignment test ensures that the memory area
- passed as argument to become a compression state is suitably aligned.
- This test can be disabled if it proves flaky, by setting this value to 0.
-
-
-#### Amalgamation
-
-lz4 source code can be amalgamated into a single file.
-One can combine all source code into `lz4_all.c` by using following command:
-```
+ This build macro only influences the compressed output of the compressor.
+
+- `LZ4_DISABLE_DEPRECATE_WARNINGS` : invoking a deprecated function will make the compiler generate a warning.
+ This is meant to invite users to update their source code.
+ Should this be a problem, it's generally possible to make the compiler ignore these warnings,
+ for example with `-Wno-deprecated-declarations` on `gcc`,
+ or `_CRT_SECURE_NO_WARNINGS` for Visual Studio.
+ This build macro offers another project-specific method
+ by defining `LZ4_DISABLE_DEPRECATE_WARNINGS` before including the LZ4 header files.
+
+- `LZ4_USER_MEMORY_FUNCTIONS` : replace calls to <stdlib>'s `malloc`, `calloc` and `free`
+ by user-defined functions, which must be called `LZ4_malloc()`, `LZ4_calloc()` and `LZ4_free()`.
+ User functions must be available at link time.
+
+- `LZ4_FORCE_SW_BITCOUNT` : by default, the compression algorithm tries to determine lengths
+ by using bitcount instructions, generally implemented as fast single instructions in many cpus.
+ In case the target cpus doesn't support it, or compiler intrinsic doesn't work, or feature bad performance,
+ it's possible to use an optimized software path instead.
+ This is achieved by setting this build macros .
+ In most cases, it's not expected to be necessary,
+ but it can be legitimately considered for less common platforms.
+
+- `LZ4_ALIGN_TEST` : alignment test ensures that the memory area
+ passed as argument to become a compression state is suitably aligned.
+ This test can be disabled if it proves flaky, by setting this value to 0.
+
+
+#### Amalgamation
+
+lz4 source code can be amalgamated into a single file.
+One can combine all source code into `lz4_all.c` by using following command:
+```
cat lz4.c lz4hc.c lz4frame.c > lz4_all.c
-```
-(`cat` file order is important) then compile `lz4_all.c`.
-All `*.h` files present in `/lib` remain necessary to compile `lz4_all.c`.
-
-
-#### Windows : using MinGW+MSYS to create DLL
-
-DLL can be created using MinGW+MSYS with the `make liblz4` command.
-This command creates `dll\liblz4.dll` and the import library `dll\liblz4.lib`.
-To override the `dlltool` command when cross-compiling on Linux, just set the `DLLTOOL` variable. Example of cross compilation on Linux with mingw-w64 64 bits:
-```
-make BUILD_STATIC=no CC=x86_64-w64-mingw32-gcc DLLTOOL=x86_64-w64-mingw32-dlltool OS=Windows_NT
-```
-The import library is only required with Visual C++.
-The header files `lz4.h`, `lz4hc.h`, `lz4frame.h` and the dynamic library
-`dll\liblz4.dll` are required to compile a project using gcc/MinGW.
-The dynamic library has to be added to linking options.
-It means that if a project that uses LZ4 consists of a single `test-dll.c`
-file it should be linked with `dll\liblz4.dll`. For example:
-```
- $(CC) $(CFLAGS) -Iinclude/ test-dll.c -o test-dll dll\liblz4.dll
-```
-The compiled executable will require LZ4 DLL which is available at `dll\liblz4.dll`.
-
-
-#### Miscellaneous
-
-Other files present in the directory are not source code. They are :
-
- - `LICENSE` : contains the BSD license text
- - `Makefile` : `make` script to compile and install lz4 library (static and dynamic)
- - `liblz4.pc.in` : for `pkg-config` (used in `make install`)
- - `README.md` : this file
-
-[official interoperable frame format]: ../doc/lz4_Frame_format.md
-[LZ4 block format]: ../doc/lz4_Block_format.md
-
-
-#### License
-
-All source material within __lib__ directory are BSD 2-Clause licensed.
-See [LICENSE](LICENSE) for details.
-The license is also reminded at the top of each source file.
+```
+(`cat` file order is important) then compile `lz4_all.c`.
+All `*.h` files present in `/lib` remain necessary to compile `lz4_all.c`.
+
+
+#### Windows : using MinGW+MSYS to create DLL
+
+DLL can be created using MinGW+MSYS with the `make liblz4` command.
+This command creates `dll\liblz4.dll` and the import library `dll\liblz4.lib`.
+To override the `dlltool` command when cross-compiling on Linux, just set the `DLLTOOL` variable. Example of cross compilation on Linux with mingw-w64 64 bits:
+```
+make BUILD_STATIC=no CC=x86_64-w64-mingw32-gcc DLLTOOL=x86_64-w64-mingw32-dlltool OS=Windows_NT
+```
+The import library is only required with Visual C++.
+The header files `lz4.h`, `lz4hc.h`, `lz4frame.h` and the dynamic library
+`dll\liblz4.dll` are required to compile a project using gcc/MinGW.
+The dynamic library has to be added to linking options.
+It means that if a project that uses LZ4 consists of a single `test-dll.c`
+file it should be linked with `dll\liblz4.dll`. For example:
+```
+ $(CC) $(CFLAGS) -Iinclude/ test-dll.c -o test-dll dll\liblz4.dll
+```
+The compiled executable will require LZ4 DLL which is available at `dll\liblz4.dll`.
+
+
+#### Miscellaneous
+
+Other files present in the directory are not source code. They are :
+
+ - `LICENSE` : contains the BSD license text
+ - `Makefile` : `make` script to compile and install lz4 library (static and dynamic)
+ - `liblz4.pc.in` : for `pkg-config` (used in `make install`)
+ - `README.md` : this file
+
+[official interoperable frame format]: ../doc/lz4_Frame_format.md
+[LZ4 block format]: ../doc/lz4_Block_format.md
+
+
+#### License
+
+All source material within __lib__ directory are BSD 2-Clause licensed.
+See [LICENSE](LICENSE) for details.
+The license is also reminded at the top of each source file.
diff --git a/contrib/libs/lz4/generated/gen.py b/contrib/libs/lz4/generated/gen.py
index 6860cf164d..24dec0555c 100644
--- a/contrib/libs/lz4/generated/gen.py
+++ b/contrib/libs/lz4/generated/gen.py
@@ -1,45 +1,45 @@
-import os
-
-lz4 = '''
-#define LZ4_MEMORY_USAGE {i}
-#define LZ4_NAMESPACE lz4_{i}
-#include "lz4_ns.h"
-'''.lstrip()
-
-lz4methods = '''
-#include "iface.h"
-
-%s
-
-extern "C" {
-
-struct TLZ4Methods* LZ4Methods(int memory) {
- switch (memory) {
-%s
- }
-
- return 0;
-}
-
-}
-'''.lstrip()
-
-lz4_namespace = 'namespace lz4_{i} {{ extern struct TLZ4Methods ytbl; }}'
-lz4_case = ' case {i}: return &lz4_{i}::ytbl;'
-
-namespaces = []
-cases = []
-
-os.chdir(os.path.dirname(__file__))
-
+import os
+
+lz4 = '''
+#define LZ4_MEMORY_USAGE {i}
+#define LZ4_NAMESPACE lz4_{i}
+#include "lz4_ns.h"
+'''.lstrip()
+
+lz4methods = '''
+#include "iface.h"
+
+%s
+
+extern "C" {
+
+struct TLZ4Methods* LZ4Methods(int memory) {
+ switch (memory) {
+%s
+ }
+
+ return 0;
+}
+
+}
+'''.lstrip()
+
+lz4_namespace = 'namespace lz4_{i} {{ extern struct TLZ4Methods ytbl; }}'
+lz4_case = ' case {i}: return &lz4_{i}::ytbl;'
+
+namespaces = []
+cases = []
+
+os.chdir(os.path.dirname(__file__))
+
for i in range(10, 21):
- name = 'lz4_{}.cpp'.format(i)
- namespaces.append(lz4_namespace.format(i=i))
- cases.append(lz4_case.format(i=i))
- print ' ' + name
+ name = 'lz4_{}.cpp'.format(i)
+ namespaces.append(lz4_namespace.format(i=i))
+ cases.append(lz4_case.format(i=i))
+ print ' ' + name
- with open(name, 'w') as f:
- f.write(lz4.format(i=i))
+ with open(name, 'w') as f:
+ f.write(lz4.format(i=i))
-with open('lz4methods.cpp', 'w') as f:
- f.write(lz4methods % ('\n'.join(namespaces), '\n'.join(cases)))
+with open('lz4methods.cpp', 'w') as f:
+ f.write(lz4methods % ('\n'.join(namespaces), '\n'.join(cases)))
diff --git a/contrib/libs/lz4/generated/lz4_10.cpp b/contrib/libs/lz4/generated/lz4_10.cpp
index d404aa63a8..77a4528789 100644
--- a/contrib/libs/lz4/generated/lz4_10.cpp
+++ b/contrib/libs/lz4/generated/lz4_10.cpp
@@ -1,3 +1,3 @@
-#define LZ4_MEMORY_USAGE 10
-#define LZ4_NAMESPACE lz4_10
-#include "lz4_ns.h"
+#define LZ4_MEMORY_USAGE 10
+#define LZ4_NAMESPACE lz4_10
+#include "lz4_ns.h"
diff --git a/contrib/libs/lz4/generated/lz4_11.cpp b/contrib/libs/lz4/generated/lz4_11.cpp
index 8182e1d00c..456c2222ed 100644
--- a/contrib/libs/lz4/generated/lz4_11.cpp
+++ b/contrib/libs/lz4/generated/lz4_11.cpp
@@ -1,3 +1,3 @@
-#define LZ4_MEMORY_USAGE 11
-#define LZ4_NAMESPACE lz4_11
-#include "lz4_ns.h"
+#define LZ4_MEMORY_USAGE 11
+#define LZ4_NAMESPACE lz4_11
+#include "lz4_ns.h"
diff --git a/contrib/libs/lz4/generated/lz4_12.cpp b/contrib/libs/lz4/generated/lz4_12.cpp
index 882197e693..ccf99bceba 100644
--- a/contrib/libs/lz4/generated/lz4_12.cpp
+++ b/contrib/libs/lz4/generated/lz4_12.cpp
@@ -1,3 +1,3 @@
-#define LZ4_MEMORY_USAGE 12
-#define LZ4_NAMESPACE lz4_12
-#include "lz4_ns.h"
+#define LZ4_MEMORY_USAGE 12
+#define LZ4_NAMESPACE lz4_12
+#include "lz4_ns.h"
diff --git a/contrib/libs/lz4/generated/lz4_13.cpp b/contrib/libs/lz4/generated/lz4_13.cpp
index 2086aefcf9..41c56deecf 100644
--- a/contrib/libs/lz4/generated/lz4_13.cpp
+++ b/contrib/libs/lz4/generated/lz4_13.cpp
@@ -1,3 +1,3 @@
-#define LZ4_MEMORY_USAGE 13
-#define LZ4_NAMESPACE lz4_13
-#include "lz4_ns.h"
+#define LZ4_MEMORY_USAGE 13
+#define LZ4_NAMESPACE lz4_13
+#include "lz4_ns.h"
diff --git a/contrib/libs/lz4/generated/lz4_14.cpp b/contrib/libs/lz4/generated/lz4_14.cpp
index 76c2c8dfd8..16b3f15161 100644
--- a/contrib/libs/lz4/generated/lz4_14.cpp
+++ b/contrib/libs/lz4/generated/lz4_14.cpp
@@ -1,3 +1,3 @@
-#define LZ4_MEMORY_USAGE 14
-#define LZ4_NAMESPACE lz4_14
-#include "lz4_ns.h"
+#define LZ4_MEMORY_USAGE 14
+#define LZ4_NAMESPACE lz4_14
+#include "lz4_ns.h"
diff --git a/contrib/libs/lz4/generated/lz4_15.cpp b/contrib/libs/lz4/generated/lz4_15.cpp
index a230350e8b..e9ef921845 100644
--- a/contrib/libs/lz4/generated/lz4_15.cpp
+++ b/contrib/libs/lz4/generated/lz4_15.cpp
@@ -1,3 +1,3 @@
-#define LZ4_MEMORY_USAGE 15
-#define LZ4_NAMESPACE lz4_15
-#include "lz4_ns.h"
+#define LZ4_MEMORY_USAGE 15
+#define LZ4_NAMESPACE lz4_15
+#include "lz4_ns.h"
diff --git a/contrib/libs/lz4/generated/lz4_16.cpp b/contrib/libs/lz4/generated/lz4_16.cpp
index e3076fe5eb..9384215c72 100644
--- a/contrib/libs/lz4/generated/lz4_16.cpp
+++ b/contrib/libs/lz4/generated/lz4_16.cpp
@@ -1,3 +1,3 @@
-#define LZ4_MEMORY_USAGE 16
-#define LZ4_NAMESPACE lz4_16
-#include "lz4_ns.h"
+#define LZ4_MEMORY_USAGE 16
+#define LZ4_NAMESPACE lz4_16
+#include "lz4_ns.h"
diff --git a/contrib/libs/lz4/generated/lz4_17.cpp b/contrib/libs/lz4/generated/lz4_17.cpp
index 3b3c2cfc24..5e0e2d54d0 100644
--- a/contrib/libs/lz4/generated/lz4_17.cpp
+++ b/contrib/libs/lz4/generated/lz4_17.cpp
@@ -1,3 +1,3 @@
-#define LZ4_MEMORY_USAGE 17
-#define LZ4_NAMESPACE lz4_17
-#include "lz4_ns.h"
+#define LZ4_MEMORY_USAGE 17
+#define LZ4_NAMESPACE lz4_17
+#include "lz4_ns.h"
diff --git a/contrib/libs/lz4/generated/lz4_18.cpp b/contrib/libs/lz4/generated/lz4_18.cpp
index 5d2b859b2a..4440b9ba61 100644
--- a/contrib/libs/lz4/generated/lz4_18.cpp
+++ b/contrib/libs/lz4/generated/lz4_18.cpp
@@ -1,3 +1,3 @@
-#define LZ4_MEMORY_USAGE 18
-#define LZ4_NAMESPACE lz4_18
-#include "lz4_ns.h"
+#define LZ4_MEMORY_USAGE 18
+#define LZ4_NAMESPACE lz4_18
+#include "lz4_ns.h"
diff --git a/contrib/libs/lz4/generated/lz4_19.cpp b/contrib/libs/lz4/generated/lz4_19.cpp
index 7a765c10dc..59118b5cd5 100644
--- a/contrib/libs/lz4/generated/lz4_19.cpp
+++ b/contrib/libs/lz4/generated/lz4_19.cpp
@@ -1,3 +1,3 @@
-#define LZ4_MEMORY_USAGE 19
-#define LZ4_NAMESPACE lz4_19
-#include "lz4_ns.h"
+#define LZ4_MEMORY_USAGE 19
+#define LZ4_NAMESPACE lz4_19
+#include "lz4_ns.h"
diff --git a/contrib/libs/lz4/generated/lz4_20.cpp b/contrib/libs/lz4/generated/lz4_20.cpp
index fbb9177454..1e41fb33bc 100644
--- a/contrib/libs/lz4/generated/lz4_20.cpp
+++ b/contrib/libs/lz4/generated/lz4_20.cpp
@@ -1,3 +1,3 @@
-#define LZ4_MEMORY_USAGE 20
-#define LZ4_NAMESPACE lz4_20
-#include "lz4_ns.h"
+#define LZ4_MEMORY_USAGE 20
+#define LZ4_NAMESPACE lz4_20
+#include "lz4_ns.h"
diff --git a/contrib/libs/lz4/generated/lz4_ns.h b/contrib/libs/lz4/generated/lz4_ns.h
index 71f3b71307..2dfbbf8ccc 100644
--- a/contrib/libs/lz4/generated/lz4_ns.h
+++ b/contrib/libs/lz4/generated/lz4_ns.h
@@ -1,19 +1,19 @@
-#pragma once
+#pragma once
-#include "iface.h"
+#include "iface.h"
-#include <stddef.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <string.h>
-
-namespace LZ4_NAMESPACE {
-
-#define ONLY_COMPRESS
-#include "../lz4.c"
-
-struct TLZ4Methods ytbl = {
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+namespace LZ4_NAMESPACE {
+
+#define ONLY_COMPRESS
+#include "../lz4.c"
+
+struct TLZ4Methods ytbl = {
LZ4_compress_default,
};
-}
+}
diff --git a/contrib/libs/lz4/generated/lz4methods.cpp b/contrib/libs/lz4/generated/lz4methods.cpp
index 8ac970bc87..0f94d167a3 100644
--- a/contrib/libs/lz4/generated/lz4methods.cpp
+++ b/contrib/libs/lz4/generated/lz4methods.cpp
@@ -1,35 +1,35 @@
-#include "iface.h"
-
-namespace lz4_10 { extern struct TLZ4Methods ytbl; }
-namespace lz4_11 { extern struct TLZ4Methods ytbl; }
-namespace lz4_12 { extern struct TLZ4Methods ytbl; }
-namespace lz4_13 { extern struct TLZ4Methods ytbl; }
-namespace lz4_14 { extern struct TLZ4Methods ytbl; }
-namespace lz4_15 { extern struct TLZ4Methods ytbl; }
-namespace lz4_16 { extern struct TLZ4Methods ytbl; }
-namespace lz4_17 { extern struct TLZ4Methods ytbl; }
-namespace lz4_18 { extern struct TLZ4Methods ytbl; }
-namespace lz4_19 { extern struct TLZ4Methods ytbl; }
-namespace lz4_20 { extern struct TLZ4Methods ytbl; }
-
-extern "C" {
-
-struct TLZ4Methods* LZ4Methods(int memory) {
- switch (memory) {
- case 10: return &lz4_10::ytbl;
- case 11: return &lz4_11::ytbl;
- case 12: return &lz4_12::ytbl;
- case 13: return &lz4_13::ytbl;
- case 14: return &lz4_14::ytbl;
- case 15: return &lz4_15::ytbl;
- case 16: return &lz4_16::ytbl;
- case 17: return &lz4_17::ytbl;
- case 18: return &lz4_18::ytbl;
- case 19: return &lz4_19::ytbl;
- case 20: return &lz4_20::ytbl;
- }
-
- return 0;
-}
-
-}
+#include "iface.h"
+
+namespace lz4_10 { extern struct TLZ4Methods ytbl; }
+namespace lz4_11 { extern struct TLZ4Methods ytbl; }
+namespace lz4_12 { extern struct TLZ4Methods ytbl; }
+namespace lz4_13 { extern struct TLZ4Methods ytbl; }
+namespace lz4_14 { extern struct TLZ4Methods ytbl; }
+namespace lz4_15 { extern struct TLZ4Methods ytbl; }
+namespace lz4_16 { extern struct TLZ4Methods ytbl; }
+namespace lz4_17 { extern struct TLZ4Methods ytbl; }
+namespace lz4_18 { extern struct TLZ4Methods ytbl; }
+namespace lz4_19 { extern struct TLZ4Methods ytbl; }
+namespace lz4_20 { extern struct TLZ4Methods ytbl; }
+
+extern "C" {
+
+struct TLZ4Methods* LZ4Methods(int memory) {
+ switch (memory) {
+ case 10: return &lz4_10::ytbl;
+ case 11: return &lz4_11::ytbl;
+ case 12: return &lz4_12::ytbl;
+ case 13: return &lz4_13::ytbl;
+ case 14: return &lz4_14::ytbl;
+ case 15: return &lz4_15::ytbl;
+ case 16: return &lz4_16::ytbl;
+ case 17: return &lz4_17::ytbl;
+ case 18: return &lz4_18::ytbl;
+ case 19: return &lz4_19::ytbl;
+ case 20: return &lz4_20::ytbl;
+ }
+
+ return 0;
+}
+
+}
diff --git a/contrib/libs/lz4/generated/ya.make b/contrib/libs/lz4/generated/ya.make
index be9b26fc75..f37d13bddb 100644
--- a/contrib/libs/lz4/generated/ya.make
+++ b/contrib/libs/lz4/generated/ya.make
@@ -1,5 +1,5 @@
-LIBRARY()
-
+LIBRARY()
+
WITHOUT_LICENSE_TEXTS()
LICENSE(BSD-2-Clause)
@@ -9,22 +9,22 @@ OWNER(
g:contrib
g:cpp-contrib
)
-
-NO_UTIL()
-
-SRCS(
- lz4_10.cpp
- lz4_11.cpp
- lz4_12.cpp
- lz4_13.cpp
- lz4_14.cpp
- lz4_15.cpp
- lz4_16.cpp
- lz4_17.cpp
- lz4_18.cpp
- lz4_19.cpp
- lz4_20.cpp
- lz4methods.cpp
-)
-
-END()
+
+NO_UTIL()
+
+SRCS(
+ lz4_10.cpp
+ lz4_11.cpp
+ lz4_12.cpp
+ lz4_13.cpp
+ lz4_14.cpp
+ lz4_15.cpp
+ lz4_16.cpp
+ lz4_17.cpp
+ lz4_18.cpp
+ lz4_19.cpp
+ lz4_20.cpp
+ lz4methods.cpp
+)
+
+END()
diff --git a/contrib/libs/lz4/lz4.c b/contrib/libs/lz4/lz4.c
index 32a5c8e6af..c864ba73ba 100644
--- a/contrib/libs/lz4/lz4.c
+++ b/contrib/libs/lz4/lz4.c
@@ -1,7 +1,7 @@
/*
LZ4 - Fast LZ compression algorithm
Copyright (C) 2011-present, Yann Collet.
-
+
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
@@ -28,75 +28,75 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- - LZ4 homepage : http://www.lz4.org
- - LZ4 source repository : https://github.com/lz4/lz4
+ - LZ4 homepage : http://www.lz4.org
+ - LZ4 source repository : https://github.com/lz4/lz4
*/
-/*-************************************
-* Tuning parameters
-**************************************/
-/*
+/*-************************************
+* Tuning parameters
+**************************************/
+/*
* LZ4_HEAPMODE :
- * Select how default compression functions will allocate memory for their hash table,
- * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
- */
+ * Select how default compression functions will allocate memory for their hash table,
+ * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
+ */
#ifndef LZ4_HEAPMODE
# define LZ4_HEAPMODE 0
#endif
-/*
- * LZ4_ACCELERATION_DEFAULT :
- * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
- */
-#define LZ4_ACCELERATION_DEFAULT 1
-/*
- * LZ4_ACCELERATION_MAX :
- * Any "acceleration" value higher than this threshold
- * get treated as LZ4_ACCELERATION_MAX instead (fix #876)
- */
-#define LZ4_ACCELERATION_MAX 65537
-
-
-/*-************************************
-* CPU Feature Detection
-**************************************/
-/* LZ4_FORCE_MEMORY_ACCESS
- * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
- * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
- * The below switch allow to select different access method for improved performance.
- * Method 0 (default) : use `memcpy()`. Safe and portable.
- * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
- * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
- * Method 2 : direct access. This method is portable but violate C standard.
+/*
+ * LZ4_ACCELERATION_DEFAULT :
+ * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
+ */
+#define LZ4_ACCELERATION_DEFAULT 1
+/*
+ * LZ4_ACCELERATION_MAX :
+ * Any "acceleration" value higher than this threshold
+ * get treated as LZ4_ACCELERATION_MAX instead (fix #876)
+ */
+#define LZ4_ACCELERATION_MAX 65537
+
+
+/*-************************************
+* CPU Feature Detection
+**************************************/
+/* LZ4_FORCE_MEMORY_ACCESS
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
* It can generate buggy code on targets which assembly generation depends on alignment.
- * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
- * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
- * Prefer these methods in priority order (0 > 1 > 2)
- */
+ * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */
# if defined(__GNUC__) && \
( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \
|| defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
-# define LZ4_FORCE_MEMORY_ACCESS 2
+# define LZ4_FORCE_MEMORY_ACCESS 2
# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__)
-# define LZ4_FORCE_MEMORY_ACCESS 1
-# endif
+# define LZ4_FORCE_MEMORY_ACCESS 1
+# endif
#endif
-/*
- * LZ4_FORCE_SW_BITCOUNT
- * Define this parameter if your target system or compiler does not support hardware bit count
- */
+/*
+ * LZ4_FORCE_SW_BITCOUNT
+ * Define this parameter if your target system or compiler does not support hardware bit count
+ */
#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */
-# undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */
-# define LZ4_FORCE_SW_BITCOUNT
+# undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */
+# define LZ4_FORCE_SW_BITCOUNT
#endif
-/*-************************************
-* Dependency
-**************************************/
+/*-************************************
+* Dependency
+**************************************/
/*
* LZ4_SRC_INCLUDED:
* Amalgamation flag, whether lz4.c is included
@@ -114,17 +114,17 @@
#endif
#define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */
-#include "lz4.h"
-/* see also "memory routines" below */
+#include "lz4.h"
+/* see also "memory routines" below */
-/*-************************************
-* Compiler Options
-**************************************/
-#if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */
-# include <intrin.h> /* only present in VS2005+ */
-# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
-#endif /* _MSC_VER */
+/*-************************************
+* Compiler Options
+**************************************/
+#if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */
+# include <intrin.h> /* only present in VS2005+ */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+#endif /* _MSC_VER */
#ifndef LZ4_FORCE_INLINE
# ifdef _MSC_VER /* Visual Studio */
@@ -142,7 +142,7 @@
# endif /* _MSC_VER */
#endif /* LZ4_FORCE_INLINE */
-/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE
+/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE
* gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8,
* together with a simple 8-byte copy loop as a fall-back path.
* However, this optimization hurts the decompression speed by >30%,
@@ -157,14 +157,14 @@
* of LZ4_wildCopy8 does not affect the compression speed.
*/
#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__)
-# define LZ4_FORCE_O2 __attribute__((optimize("O2")))
-# undef LZ4_FORCE_INLINE
-# define LZ4_FORCE_INLINE static __inline __attribute__((optimize("O2"),always_inline))
+# define LZ4_FORCE_O2 __attribute__((optimize("O2")))
+# undef LZ4_FORCE_INLINE
+# define LZ4_FORCE_INLINE static __inline __attribute__((optimize("O2"),always_inline))
#else
-# define LZ4_FORCE_O2
+# define LZ4_FORCE_O2
#endif
-#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
+#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
# define expect(expr,value) (__builtin_expect ((expr),(value)) )
#else
# define expect(expr,value) (expr)
@@ -177,38 +177,38 @@
#define unlikely(expr) expect((expr) != 0, 0)
#endif
-/* Should the alignment test prove unreliable, for some reason,
- * it can be disabled by setting LZ4_ALIGN_TEST to 0 */
-#ifndef LZ4_ALIGN_TEST /* can be externally provided */
-# define LZ4_ALIGN_TEST 1
-#endif
-
-
-/*-************************************
-* Memory routines
-**************************************/
-#ifdef LZ4_USER_MEMORY_FUNCTIONS
-/* memory management functions can be customized by user project.
- * Below functions must exist somewhere in the Project
- * and be available at link time */
-void* LZ4_malloc(size_t s);
-void* LZ4_calloc(size_t n, size_t s);
-void LZ4_free(void* p);
-# define ALLOC(s) LZ4_malloc(s)
-# define ALLOC_AND_ZERO(s) LZ4_calloc(1,s)
-# define FREEMEM(p) LZ4_free(p)
-#else
-# include <stdlib.h> /* malloc, calloc, free */
-# define ALLOC(s) malloc(s)
-# define ALLOC_AND_ZERO(s) calloc(1,s)
-# define FREEMEM(p) free(p)
-#endif
-
-#include <string.h> /* memset, memcpy */
+/* Should the alignment test prove unreliable, for some reason,
+ * it can be disabled by setting LZ4_ALIGN_TEST to 0 */
+#ifndef LZ4_ALIGN_TEST /* can be externally provided */
+# define LZ4_ALIGN_TEST 1
+#endif
+
+
+/*-************************************
+* Memory routines
+**************************************/
+#ifdef LZ4_USER_MEMORY_FUNCTIONS
+/* memory management functions can be customized by user project.
+ * Below functions must exist somewhere in the Project
+ * and be available at link time */
+void* LZ4_malloc(size_t s);
+void* LZ4_calloc(size_t n, size_t s);
+void LZ4_free(void* p);
+# define ALLOC(s) LZ4_malloc(s)
+# define ALLOC_AND_ZERO(s) LZ4_calloc(1,s)
+# define FREEMEM(p) LZ4_free(p)
+#else
+# include <stdlib.h> /* malloc, calloc, free */
+# define ALLOC(s) malloc(s)
+# define ALLOC_AND_ZERO(s) calloc(1,s)
+# define FREEMEM(p) free(p)
+#endif
+
+#include <string.h> /* memset, memcpy */
#define MEM_INIT(p,v,s) memset((p),(v),(s))
-/*-************************************
+/*-************************************
* Common Constants
**************************************/
#define MINMATCH 4
@@ -250,51 +250,51 @@ static const int LZ4_minLength = (MFLIMIT+1);
#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
# include <stdio.h>
- static int g_debuglog_enable = 1;
-# define DEBUGLOG(l, ...) { \
- if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
- fprintf(stderr, __FILE__ ": "); \
- fprintf(stderr, __VA_ARGS__); \
- fprintf(stderr, " \n"); \
- } }
+ static int g_debuglog_enable = 1;
+# define DEBUGLOG(l, ...) { \
+ if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
+ fprintf(stderr, __FILE__ ": "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, " \n"); \
+ } }
#else
-# define DEBUGLOG(l, ...) {} /* disabled */
+# define DEBUGLOG(l, ...) {} /* disabled */
#endif
-static int LZ4_isAligned(const void* ptr, size_t alignment)
-{
- return ((size_t)ptr & (alignment -1)) == 0;
-}
+static int LZ4_isAligned(const void* ptr, size_t alignment)
+{
+ return ((size_t)ptr & (alignment -1)) == 0;
+}
+
-
/*-************************************
* Types
-**************************************/
-#include <limits.h>
-#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-# include <stdint.h>
- typedef uint8_t BYTE;
- typedef uint16_t U16;
- typedef uint32_t U32;
- typedef int32_t S32;
- typedef uint64_t U64;
- typedef uintptr_t uptrval;
+**************************************/
+#include <limits.h>
+#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef uint32_t U32;
+ typedef int32_t S32;
+ typedef uint64_t U64;
+ typedef uintptr_t uptrval;
#else
-# if UINT_MAX != 4294967295UL
-# error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4"
-# endif
- typedef unsigned char BYTE;
- typedef unsigned short U16;
- typedef unsigned int U32;
- typedef signed int S32;
- typedef unsigned long long U64;
- typedef size_t uptrval; /* generally true, except OpenVMS-64 */
+# if UINT_MAX != 4294967295UL
+# error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4"
+# endif
+ typedef unsigned char BYTE;
+ typedef unsigned short U16;
+ typedef unsigned int U32;
+ typedef signed int S32;
+ typedef unsigned long long U64;
+ typedef size_t uptrval; /* generally true, except OpenVMS-64 */
#endif
-#if defined(__x86_64__)
- typedef U64 reg_t; /* 64-bits in x32 mode */
-#else
- typedef size_t reg_t; /* 32-bits in x32 mode */
+#if defined(__x86_64__)
+ typedef U64 reg_t; /* 64-bits in x32 mode */
+#else
+ typedef size_t reg_t; /* 32-bits in x32 mode */
#endif
typedef enum {
@@ -304,153 +304,153 @@ typedef enum {
} limitedOutput_directive;
-/*-************************************
-* Reading and writing into memory
-**************************************/
-
-/**
- * LZ4 relies on memcpy with a constant size being inlined. In freestanding
- * environments, the compiler can't assume the implementation of memcpy() is
- * standard compliant, so it can't apply its specialized memcpy() inlining
- * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze
- * memcpy() as if it were standard compliant, so it can inline it in freestanding
- * environments. This is needed when decompressing the Linux Kernel, for example.
- */
-#if defined(__GNUC__) && (__GNUC__ >= 4)
-#define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
-#else
-#define LZ4_memcpy(dst, src, size) memcpy(dst, src, size)
-#endif
-
-static unsigned LZ4_isLittleEndian(void)
-{
- const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
- return one.c[0];
-}
-
-
-#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
-/* lie to the compiler about data alignment; use with caution */
-
-static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }
-static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }
-static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }
-
-static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
-static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
-
-#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
-
-/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
-/* currently only defined for gcc and icc */
-typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) unalign;
-
-static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
-static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
-static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; }
-
-static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
-static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
-
+/*-************************************
+* Reading and writing into memory
+**************************************/
+
+/**
+ * LZ4 relies on memcpy with a constant size being inlined. In freestanding
+ * environments, the compiler can't assume the implementation of memcpy() is
+ * standard compliant, so it can't apply its specialized memcpy() inlining
+ * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze
+ * memcpy() as if it were standard compliant, so it can inline it in freestanding
+ * environments. This is needed when decompressing the Linux Kernel, for example.
+ */
+#if defined(__GNUC__) && (__GNUC__ >= 4)
+#define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
+#else
+#define LZ4_memcpy(dst, src, size) memcpy(dst, src, size)
+#endif
+
+static unsigned LZ4_isLittleEndian(void)
+{
+ const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
+ return one.c[0];
+}
+
+
+#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
+/* lie to the compiler about data alignment; use with caution */
+
+static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }
+static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }
+static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }
+
+static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
+static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
+
+#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) unalign;
+
+static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
+static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; }
+
+static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
+static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
+
#else /* safe and portable access using memcpy() */
-
-static U16 LZ4_read16(const void* memPtr)
-{
- U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-static U32 LZ4_read32(const void* memPtr)
-{
- U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-static reg_t LZ4_read_ARCH(const void* memPtr)
-{
- reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
-}
-
-static void LZ4_write16(void* memPtr, U16 value)
-{
- LZ4_memcpy(memPtr, &value, sizeof(value));
-}
-
-static void LZ4_write32(void* memPtr, U32 value)
-{
- LZ4_memcpy(memPtr, &value, sizeof(value));
-}
-
-#endif /* LZ4_FORCE_MEMORY_ACCESS */
-
-
-#if !defined(ONLY_COMPRESS)
-static U16 LZ4_readLE16(const void* memPtr)
-{
- if (LZ4_isLittleEndian()) {
- return LZ4_read16(memPtr);
- } else {
- const BYTE* p = (const BYTE*)memPtr;
- return (U16)((U16)p[0] + (p[1]<<8));
- }
-}
-#endif /* ONLY_COMPRESS */
-
-static void LZ4_writeLE16(void* memPtr, U16 value)
-{
- if (LZ4_isLittleEndian()) {
- LZ4_write16(memPtr, value);
- } else {
- BYTE* p = (BYTE*)memPtr;
- p[0] = (BYTE) value;
- p[1] = (BYTE)(value>>8);
- }
-}
-
+
+static U16 LZ4_read16(const void* memPtr)
+{
+ U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+static U32 LZ4_read32(const void* memPtr)
+{
+ U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+static reg_t LZ4_read_ARCH(const void* memPtr)
+{
+ reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+static void LZ4_write16(void* memPtr, U16 value)
+{
+ LZ4_memcpy(memPtr, &value, sizeof(value));
+}
+
+static void LZ4_write32(void* memPtr, U32 value)
+{
+ LZ4_memcpy(memPtr, &value, sizeof(value));
+}
+
+#endif /* LZ4_FORCE_MEMORY_ACCESS */
+
+
+#if !defined(ONLY_COMPRESS)
+static U16 LZ4_readLE16(const void* memPtr)
+{
+ if (LZ4_isLittleEndian()) {
+ return LZ4_read16(memPtr);
+ } else {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U16)((U16)p[0] + (p[1]<<8));
+ }
+}
+#endif /* ONLY_COMPRESS */
+
+static void LZ4_writeLE16(void* memPtr, U16 value)
+{
+ if (LZ4_isLittleEndian()) {
+ LZ4_write16(memPtr, value);
+ } else {
+ BYTE* p = (BYTE*)memPtr;
+ p[0] = (BYTE) value;
+ p[1] = (BYTE)(value>>8);
+ }
+}
+
/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
-LZ4_FORCE_INLINE
+LZ4_FORCE_INLINE
void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd)
-{
+{
BYTE* d = (BYTE*)dstPtr;
const BYTE* s = (const BYTE*)srcPtr;
BYTE* const e = (BYTE*)dstEnd;
- do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d<e);
-}
-
+ do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d<e);
+}
+
static const unsigned inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
#ifndef LZ4_FAST_DEC_LOOP
-# if defined __i386__ || defined _M_IX86 || defined __x86_64__ || defined _M_X64
+# if defined __i386__ || defined _M_IX86 || defined __x86_64__ || defined _M_X64
# define LZ4_FAST_DEC_LOOP 1
# elif defined(__aarch64__) && !defined(__clang__)
/* On aarch64, we disable this optimization for clang because on certain
- * mobile chipsets, performance is reduced with clang. For information
- * refer to https://github.com/lz4/lz4/pull/707 */
+ * mobile chipsets, performance is reduced with clang. For information
+ * refer to https://github.com/lz4/lz4/pull/707 */
# define LZ4_FAST_DEC_LOOP 1
# else
# define LZ4_FAST_DEC_LOOP 0
# endif
#endif
-#if LZ4_FAST_DEC_LOOP && !defined(ONLY_COMPRESS)
+#if LZ4_FAST_DEC_LOOP && !defined(ONLY_COMPRESS)
-LZ4_FORCE_INLINE void
+LZ4_FORCE_INLINE void
LZ4_memcpy_using_offset_base(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
-{
- assert(srcPtr + offset == dstPtr);
+{
+ assert(srcPtr + offset == dstPtr);
if (offset < 8) {
- LZ4_write32(dstPtr, 0); /* silence an msan warning when offset==0 */
+ LZ4_write32(dstPtr, 0); /* silence an msan warning when offset==0 */
dstPtr[0] = srcPtr[0];
dstPtr[1] = srcPtr[1];
dstPtr[2] = srcPtr[2];
dstPtr[3] = srcPtr[3];
srcPtr += inc32table[offset];
- LZ4_memcpy(dstPtr+4, srcPtr, 4);
+ LZ4_memcpy(dstPtr+4, srcPtr, 4);
srcPtr -= dec64table[offset];
dstPtr += 8;
} else {
- LZ4_memcpy(dstPtr, srcPtr, 8);
+ LZ4_memcpy(dstPtr, srcPtr, 8);
dstPtr += 8;
srcPtr += 8;
}
@@ -461,20 +461,20 @@ LZ4_memcpy_using_offset_base(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, con
/* customized variant of memcpy, which can overwrite up to 32 bytes beyond dstEnd
* this version copies two times 16 bytes (instead of one time 32 bytes)
* because it must be compatible with offsets >= 16. */
-LZ4_FORCE_INLINE void
+LZ4_FORCE_INLINE void
LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)
{
- BYTE* d = (BYTE*)dstPtr;
- const BYTE* s = (const BYTE*)srcPtr;
- BYTE* const e = (BYTE*)dstEnd;
-
- do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e);
-}
-
+ BYTE* d = (BYTE*)dstPtr;
+ const BYTE* s = (const BYTE*)srcPtr;
+ BYTE* const e = (BYTE*)dstEnd;
+
+ do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e);
+}
+
/* LZ4_memcpy_using_offset() presumes :
* - dstEnd >= dstPtr + MINMATCH
* - there is at least 8 bytes available to write after dstEnd */
-LZ4_FORCE_INLINE void
+LZ4_FORCE_INLINE void
LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
{
BYTE v[8];
@@ -483,131 +483,131 @@ LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const si
switch(offset) {
case 1:
- MEM_INIT(v, *srcPtr, 8);
+ MEM_INIT(v, *srcPtr, 8);
break;
case 2:
- LZ4_memcpy(v, srcPtr, 2);
- LZ4_memcpy(&v[2], srcPtr, 2);
- LZ4_memcpy(&v[4], v, 4);
+ LZ4_memcpy(v, srcPtr, 2);
+ LZ4_memcpy(&v[2], srcPtr, 2);
+ LZ4_memcpy(&v[4], v, 4);
break;
case 4:
- LZ4_memcpy(v, srcPtr, 4);
- LZ4_memcpy(&v[4], srcPtr, 4);
+ LZ4_memcpy(v, srcPtr, 4);
+ LZ4_memcpy(&v[4], srcPtr, 4);
break;
default:
LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);
return;
}
-
- LZ4_memcpy(dstPtr, v, 8);
+
+ LZ4_memcpy(dstPtr, v, 8);
dstPtr += 8;
while (dstPtr < dstEnd) {
- LZ4_memcpy(dstPtr, v, 8);
+ LZ4_memcpy(dstPtr, v, 8);
dstPtr += 8;
}
}
#endif
-/*-************************************
-* Common functions
-**************************************/
+/*-************************************
+* Common functions
+**************************************/
static unsigned LZ4_NbCommonBytes (reg_t val)
-{
- assert(val != 0);
- if (LZ4_isLittleEndian()) {
- if (sizeof(val) == 8) {
-# if defined(_MSC_VER) && !defined(__clang__) && (_MSC_VER >= 1800) && defined(_M_AMD64) && !defined(LZ4_FORCE_SW_BITCOUNT)
- /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */
- return (unsigned)_tzcnt_u64(val) >> 3;
-# elif defined(_MSC_VER) && !defined(__clang__) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
- unsigned long r = 0;
- _BitScanForward64(&r, (U64)val);
- return (unsigned)r >> 3;
-# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
- ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
- !defined(LZ4_FORCE_SW_BITCOUNT)
+{
+ assert(val != 0);
+ if (LZ4_isLittleEndian()) {
+ if (sizeof(val) == 8) {
+# if defined(_MSC_VER) && !defined(__clang__) && (_MSC_VER >= 1800) && defined(_M_AMD64) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */
+ return (unsigned)_tzcnt_u64(val) >> 3;
+# elif defined(_MSC_VER) && !defined(__clang__) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r = 0;
+ _BitScanForward64(&r, (U64)val);
+ return (unsigned)r >> 3;
+# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(LZ4_FORCE_SW_BITCOUNT)
return (unsigned)__builtin_ctzll((U64)val) >> 3;
-# else
- const U64 m = 0x0101010101010101ULL;
- val ^= val - 1;
- return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56);
-# endif
- } else /* 32 bits */ {
-# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
- unsigned long r;
- _BitScanForward(&r, (U32)val);
- return (unsigned)r >> 3;
-# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
- ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
- !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
+# else
+ const U64 m = 0x0101010101010101ULL;
+ val ^= val - 1;
+ return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56);
+# endif
+ } else /* 32 bits */ {
+# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r;
+ _BitScanForward(&r, (U32)val);
+ return (unsigned)r >> 3;
+# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
return (unsigned)__builtin_ctz((U32)val) >> 3;
-# else
- const U32 m = 0x01010101;
- return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;
-# endif
- }
- } else /* Big Endian CPU */ {
- if (sizeof(val)==8) {
-# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
- ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
- !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
+# else
+ const U32 m = 0x01010101;
+ return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;
+# endif
+ }
+ } else /* Big Endian CPU */ {
+ if (sizeof(val)==8) {
+# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
return (unsigned)__builtin_clzll((U64)val) >> 3;
-# else
-#if 1
- /* this method is probably faster,
- * but adds a 128 bytes lookup table */
- static const unsigned char ctz7_tab[128] = {
- 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- };
- U64 const mask = 0x0101010101010101ULL;
- U64 const t = (((val >> 8) - mask) | val) & mask;
- return ctz7_tab[(t * 0x0080402010080402ULL) >> 57];
-#else
- /* this method doesn't consume memory space like the previous one,
- * but it contains several branches,
- * that may end up slowing execution */
+# else
+#if 1
+ /* this method is probably faster,
+ * but adds a 128 bytes lookup table */
+ static const unsigned char ctz7_tab[128] = {
+ 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ };
+ U64 const mask = 0x0101010101010101ULL;
+ U64 const t = (((val >> 8) - mask) | val) & mask;
+ return ctz7_tab[(t * 0x0080402010080402ULL) >> 57];
+#else
+ /* this method doesn't consume memory space like the previous one,
+ * but it contains several branches,
+ * that may end up slowing execution */
static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits.
- Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.
- Note that this code path is never triggered in 32-bits mode. */
- unsigned r;
+ Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.
+ Note that this code path is never triggered in 32-bits mode. */
+ unsigned r;
if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; }
- if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
- r += (!val);
- return r;
-#endif
-# endif
- } else /* 32 bits */ {
-# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
- ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
- !defined(LZ4_FORCE_SW_BITCOUNT)
+ if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
+ r += (!val);
+ return r;
+#endif
+# endif
+ } else /* 32 bits */ {
+# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(LZ4_FORCE_SW_BITCOUNT)
return (unsigned)__builtin_clz((U32)val) >> 3;
-# else
- val >>= 8;
- val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |
- (val + 0x00FF0000)) >> 24;
- return (unsigned)val ^ 3;
-# endif
- }
- }
-}
-
-
-#define STEPSIZE sizeof(reg_t)
+# else
+ val >>= 8;
+ val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |
+ (val + 0x00FF0000)) >> 24;
+ return (unsigned)val ^ 3;
+# endif
+ }
+ }
+}
+
+
+#define STEPSIZE sizeof(reg_t)
LZ4_FORCE_INLINE
unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
{
- const BYTE* const pStart = pIn;
+ const BYTE* const pStart = pIn;
if (likely(pIn < pInLimit-(STEPSIZE-1))) {
- reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
+ reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
if (!diff) {
pIn+=STEPSIZE; pMatch+=STEPSIZE;
} else {
@@ -616,31 +616,31 @@ unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
while (likely(pIn < pInLimit-(STEPSIZE-1))) {
reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
- if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
- pIn += LZ4_NbCommonBytes(diff);
- return (unsigned)(pIn - pStart);
- }
-
- if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
- if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
- if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
- return (unsigned)(pIn - pStart);
-}
-
-
-#ifndef LZ4_COMMONDEFS_ONLY
-/*-************************************
-* Local Constants
-**************************************/
-static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
-static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression run slower on incompressible data */
-
-
-/*-************************************
-* Local Structures and types
-**************************************/
+ if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
+ pIn += LZ4_NbCommonBytes(diff);
+ return (unsigned)(pIn - pStart);
+ }
+
+ if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
+ if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
+ if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
+ return (unsigned)(pIn - pStart);
+}
+
+
+#ifndef LZ4_COMMONDEFS_ONLY
+/*-************************************
+* Local Constants
+**************************************/
+static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
+static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression run slower on incompressible data */
+
+
+/*-************************************
+* Local Structures and types
+**************************************/
typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
-
+
/**
* This enum distinguishes several different modes of accessing previous
* content in the stream.
@@ -665,22 +665,22 @@ typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
* ->dictCtx->hashTable.
*/
typedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive;
-typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
-
-
-/*-************************************
-* Local Utils
-**************************************/
-int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
-const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
-int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
-int LZ4_sizeofState(void) { return LZ4_STREAMSIZE; }
-
-
+typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
+
+
+/*-************************************
+* Local Utils
+**************************************/
+int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
+const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
+int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
+int LZ4_sizeofState(void) { return LZ4_STREAMSIZE; }
+
+
/*-************************************
* Internal Definitions used in Tests
**************************************/
-#if defined (__cplusplus) && !defined(ONLY_COMPRESS)
+#if defined (__cplusplus) && !defined(ONLY_COMPRESS)
extern "C" {
#endif
@@ -690,40 +690,40 @@ int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
int compressedSize, int maxOutputSize,
const void* dictStart, size_t dictSize);
-#if defined (__cplusplus) && !defined(ONLY_COMPRESS)
+#if defined (__cplusplus) && !defined(ONLY_COMPRESS)
}
#endif
-/*-******************************
-* Compression functions
-********************************/
-LZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
+/*-******************************
+* Compression functions
+********************************/
+LZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
{
- if (tableType == byU16)
- return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
- else
- return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
+ if (tableType == byU16)
+ return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
+ else
+ return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
}
-LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
-{
- const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
+LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
+{
+ const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
if (LZ4_isLittleEndian()) {
const U64 prime5bytes = 889523592379ULL;
- return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
+ return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
} else {
const U64 prime8bytes = 11400714785074694791ULL;
- return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
+ return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
}
-}
+}
LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
{
- if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
- return LZ4_hash4(LZ4_read32(p), tableType);
+ if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
+ return LZ4_hash4(LZ4_read32(p), tableType);
}
-LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)
+LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)
{
switch (tableType)
{
@@ -735,10 +735,10 @@ LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const ta
}
}
-LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)
-{
- switch (tableType)
- {
+LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)
+{
+ switch (tableType)
+ {
default: /* fallthrough */
case clearedTable: /* fallthrough */
case byPtr: { /* illegal! */ assert(0); return; }
@@ -747,24 +747,24 @@ LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableT
}
}
-LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h,
+LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h,
void* tableBase, tableType_t const tableType,
const BYTE* srcBase)
{
switch (tableType)
{
case clearedTable: { /* illegal! */ assert(0); return; }
- case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
- case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
- case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
- }
-}
+ case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
+ case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
+ case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
+ }
+}
LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
-{
- U32 const h = LZ4_hashPosition(p, tableType);
- LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
-}
+{
+ U32 const h = LZ4_hashPosition(p, tableType);
+ LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
+}
/* LZ4_getIndexOnHash() :
* Index of match position registered in hash table.
@@ -772,8 +772,8 @@ LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_
* Assumption 1 : only valid if tableType == byU32 or byU16.
* Assumption 2 : h is presumed valid (within limits of hash table)
*/
-LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)
-{
+LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)
+{
LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2);
if (tableType == byU32) {
const U32* const hashTable = (const U32*) tableBase;
@@ -786,10 +786,10 @@ LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_
return hashTable[h];
}
assert(0); return 0; /* forbidden case */
-}
+}
static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase)
-{
+{
if (tableType == byPtr) { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }
if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; return hashTable[h] + srcBase; }
{ const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
@@ -800,9 +800,9 @@ LZ4_getPosition(const BYTE* p,
const void* tableBase, tableType_t tableType,
const BYTE* srcBase)
{
- U32 const h = LZ4_hashPosition(p, tableType);
- return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
-}
+ U32 const h = LZ4_hashPosition(p, tableType);
+ return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
+}
LZ4_FORCE_INLINE void
LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
@@ -812,9 +812,9 @@ LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
* therefore safe to use no matter what mode we're in. Otherwise, we figure
* out if it's safe to leave as is or whether it needs to be reset.
*/
- if ((tableType_t)cctx->tableType != clearedTable) {
+ if ((tableType_t)cctx->tableType != clearedTable) {
assert(inputSize >= 0);
- if ((tableType_t)cctx->tableType != tableType
+ if ((tableType_t)cctx->tableType != tableType
|| ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)
|| ((tableType == byU32) && cctx->currentOffset > 1 GB)
|| tableType == byPtr
@@ -823,7 +823,7 @@ LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx);
MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
cctx->currentOffset = 0;
- cctx->tableType = (U32)clearedTable;
+ cctx->tableType = (U32)clearedTable;
} else {
DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)");
}
@@ -844,31 +844,31 @@ LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
cctx->dictSize = 0;
}
-/** LZ4_compress_generic() :
- * inlined, to ensure branches are decided at compilation time.
- * Presumed already validated at this stage:
- * - source != NULL
- * - inputSize > 0
- */
-LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
- LZ4_stream_t_internal* const cctx,
- const char* const source,
- char* const dest,
- const int inputSize,
+/** LZ4_compress_generic() :
+ * inlined, to ensure branches are decided at compilation time.
+ * Presumed already validated at this stage:
+ * - source != NULL
+ * - inputSize > 0
+ */
+LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
+ LZ4_stream_t_internal* const cctx,
+ const char* const source,
+ char* const dest,
+ const int inputSize,
int *inputConsumed, /* only written when outputDirective == fillOutput */
- const int maxOutputSize,
+ const int maxOutputSize,
const limitedOutput_directive outputDirective,
- const tableType_t tableType,
+ const tableType_t tableType,
const dict_directive dictDirective,
- const dictIssue_directive dictIssue,
+ const dictIssue_directive dictIssue,
const int acceleration)
{
int result;
- const BYTE* ip = (const BYTE*) source;
+ const BYTE* ip = (const BYTE*) source;
U32 const startIndex = cctx->currentOffset;
const BYTE* base = (const BYTE*) source - startIndex;
- const BYTE* lowLimit;
+ const BYTE* lowLimit;
const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;
const BYTE* const dictionary =
@@ -879,26 +879,26 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);
U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */
- const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary;
- const BYTE* anchor = (const BYTE*) source;
- const BYTE* const iend = ip + inputSize;
+ const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary;
+ const BYTE* anchor = (const BYTE*) source;
+ const BYTE* const iend = ip + inputSize;
const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;
- const BYTE* const matchlimit = iend - LASTLITERALS;
+ const BYTE* const matchlimit = iend - LASTLITERALS;
/* the dictCtx currentOffset is indexed on the start of the dictionary,
* while a dictionary in the current context precedes the currentOffset */
- const BYTE* dictBase = !dictionary ? NULL : (dictDirective == usingDictCtx) ?
+ const BYTE* dictBase = !dictionary ? NULL : (dictDirective == usingDictCtx) ?
dictionary + dictSize - dictCtx->currentOffset :
dictionary + dictSize - startIndex;
BYTE* op = (BYTE*) dest;
- BYTE* const olimit = op + maxOutputSize;
+ BYTE* const olimit = op + maxOutputSize;
U32 offset = 0;
U32 forwardH;
- DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType);
- assert(ip != NULL);
+ DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType);
+ assert(ip != NULL);
/* If init conditions are not met, we don't have to mark stream
* as having dirty context, since no action was taken yet */
if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */
@@ -918,35 +918,35 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
cctx->dictSize += (U32)inputSize;
}
cctx->currentOffset += (U32)inputSize;
- cctx->tableType = (U32)tableType;
+ cctx->tableType = (U32)tableType;
if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
- /* First Byte */
- LZ4_putPosition(ip, cctx->hashTable, tableType, base);
- ip++; forwardH = LZ4_hashPosition(ip, tableType);
+ /* First Byte */
+ LZ4_putPosition(ip, cctx->hashTable, tableType, base);
+ ip++; forwardH = LZ4_hashPosition(ip, tableType);
- /* Main Loop */
- for ( ; ; ) {
- const BYTE* match;
+ /* Main Loop */
+ for ( ; ; ) {
+ const BYTE* match;
BYTE* token;
const BYTE* filledIp;
- /* Find a match */
+ /* Find a match */
if (tableType == byPtr) {
const BYTE* forwardIp = ip;
int step = 1;
int searchMatchNb = acceleration << LZ4_skipTrigger;
- do {
- U32 const h = forwardH;
- ip = forwardIp;
- forwardIp += step;
- step = (searchMatchNb++ >> LZ4_skipTrigger);
+ do {
+ U32 const h = forwardH;
+ ip = forwardIp;
+ forwardIp += step;
+ step = (searchMatchNb++ >> LZ4_skipTrigger);
if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
assert(ip < mflimitPlusOne);
- match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
+ match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
forwardH = LZ4_hashPosition(forwardIp, tableType);
LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
@@ -978,10 +978,10 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
match = dictBase + matchIndex;
matchIndex += dictDelta; /* make dictCtx index comparable with current context */
- lowLimit = dictionary;
- } else {
+ lowLimit = dictionary;
+ } else {
match = base + matchIndex;
- lowLimit = (const BYTE*)source;
+ lowLimit = (const BYTE*)source;
}
} else if (dictDirective==usingExtDict) {
if (matchIndex < startIndex) {
@@ -996,7 +996,7 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
} else { /* single continuous memory segment */
match = base + matchIndex;
}
- forwardH = LZ4_hashPosition(forwardIp, tableType);
+ forwardH = LZ4_hashPosition(forwardIp, tableType);
LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
DEBUGLOG(7, "candidate at pos=%u (offset=%u \n", matchIndex, current - matchIndex);
@@ -1014,15 +1014,15 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
}
} while(1);
- }
+ }
- /* Catch up */
+ /* Catch up */
filledIp = ip;
while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
- /* Encode Literals */
- { unsigned const litLength = (unsigned)(ip - anchor);
- token = op++;
+ /* Encode Literals */
+ { unsigned const litLength = (unsigned)(ip - anchor);
+ token = op++;
if ((outputDirective == limitedOutput) && /* Check output buffer overflow */
(unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) {
return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
@@ -1032,17 +1032,17 @@ LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
op--;
goto _last_literals;
}
- if (litLength >= RUN_MASK) {
+ if (litLength >= RUN_MASK) {
int len = (int)(litLength - RUN_MASK);
- *token = (RUN_MASK<<ML_BITS);
- for(; len >= 255 ; len-=255) *op++ = 255;
- *op++ = (BYTE)len;
+ *token = (RUN_MASK<<ML_BITS);
+ for(; len >= 255 ; len-=255) *op++ = 255;
+ *op++ = (BYTE)len;
}
- else *token = (BYTE)(litLength<<ML_BITS);
-
- /* Copy Literals */
+ else *token = (BYTE)(litLength<<ML_BITS);
+
+ /* Copy Literals */
LZ4_wildCopy8(op, anchor, op+litLength);
- op+=litLength;
+ op+=litLength;
DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
(int)(anchor-(const BYTE*)source), litLength, (int)(ip-(const BYTE*)source));
}
@@ -1063,7 +1063,7 @@ _next_match:
goto _last_literals;
}
- /* Encode Offset */
+ /* Encode Offset */
if (maybe_extMem) { /* static test */
DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE*)source));
assert(offset <= LZ4_DISTANCE_MAX && offset > 0);
@@ -1074,28 +1074,28 @@ _next_match:
LZ4_writeLE16(op, (U16)(ip - match)); op+=2;
}
- /* Encode MatchLength */
- { unsigned matchCode;
-
+ /* Encode MatchLength */
+ { unsigned matchCode;
+
if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx)
&& (lowLimit==dictionary) /* match within extDict */ ) {
const BYTE* limit = ip + (dictEnd-match);
assert(dictEnd > match);
- if (limit > matchlimit) limit = matchlimit;
- matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
+ if (limit > matchlimit) limit = matchlimit;
+ matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
ip += (size_t)matchCode + MINMATCH;
- if (ip==limit) {
+ if (ip==limit) {
unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit);
- matchCode += more;
- ip += more;
- }
+ matchCode += more;
+ ip += more;
+ }
DEBUGLOG(6, " with matchLength=%u starting in extDict", matchCode+MINMATCH);
- } else {
- matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
+ } else {
+ matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
ip += (size_t)matchCode + MINMATCH;
DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH);
- }
-
+ }
+
if ((outputDirective) && /* Check output buffer overflow */
(unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) {
if (outputDirective == fillOutput) {
@@ -1122,32 +1122,32 @@ _next_match:
return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
}
}
- if (matchCode >= ML_MASK) {
- *token += ML_MASK;
- matchCode -= ML_MASK;
- LZ4_write32(op, 0xFFFFFFFF);
+ if (matchCode >= ML_MASK) {
+ *token += ML_MASK;
+ matchCode -= ML_MASK;
+ LZ4_write32(op, 0xFFFFFFFF);
while (matchCode >= 4*255) {
op+=4;
LZ4_write32(op, 0xFFFFFFFF);
matchCode -= 4*255;
}
- op += matchCode / 255;
- *op++ = (BYTE)(matchCode % 255);
- } else
- *token += (BYTE)(matchCode);
+ op += matchCode / 255;
+ *op++ = (BYTE)(matchCode % 255);
+ } else
+ *token += (BYTE)(matchCode);
}
/* Ensure we have enough space for the last literals. */
assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit));
- anchor = ip;
+ anchor = ip;
- /* Test end of chunk */
+ /* Test end of chunk */
if (ip >= mflimitPlusOne) break;
- /* Fill table */
- LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
+ /* Fill table */
+ LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
- /* Test next position */
+ /* Test next position */
if (tableType == byPtr) {
match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
@@ -1198,38 +1198,38 @@ _next_match:
}
}
- /* Prepare next loop */
- forwardH = LZ4_hashPosition(++ip, tableType);
+ /* Prepare next loop */
+ forwardH = LZ4_hashPosition(++ip, tableType);
}
_last_literals:
- /* Encode Last Literals */
+ /* Encode Last Literals */
{ size_t lastRun = (size_t)(iend - anchor);
if ( (outputDirective) && /* Check output buffer overflow */
(op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) {
if (outputDirective == fillOutput) {
/* adapt lastRun to fill 'dst' */
assert(olimit >= op);
- lastRun = (size_t)(olimit-op) - 1/*token*/;
- lastRun -= (lastRun + 256 - RUN_MASK) / 256; /*additional length tokens*/
+ lastRun = (size_t)(olimit-op) - 1/*token*/;
+ lastRun -= (lastRun + 256 - RUN_MASK) / 256; /*additional length tokens*/
} else {
assert(outputDirective == limitedOutput);
return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
}
}
- DEBUGLOG(6, "Final literal run : %i literals", (int)lastRun);
- if (lastRun >= RUN_MASK) {
- size_t accumulator = lastRun - RUN_MASK;
- *op++ = RUN_MASK << ML_BITS;
- for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
- *op++ = (BYTE) accumulator;
- } else {
- *op++ = (BYTE)(lastRun<<ML_BITS);
- }
- LZ4_memcpy(op, anchor, lastRun);
+ DEBUGLOG(6, "Final literal run : %i literals", (int)lastRun);
+ if (lastRun >= RUN_MASK) {
+ size_t accumulator = lastRun - RUN_MASK;
+ *op++ = RUN_MASK << ML_BITS;
+ for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
+ *op++ = (BYTE) accumulator;
+ } else {
+ *op++ = (BYTE)(lastRun<<ML_BITS);
+ }
+ LZ4_memcpy(op, anchor, lastRun);
ip = anchor + lastRun;
- op += lastRun;
+ op += lastRun;
}
if (outputDirective == fillOutput) {
@@ -1237,58 +1237,58 @@ _last_literals:
}
result = (int)(((char*)op) - dest);
assert(result > 0);
- DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, result);
+ DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, result);
return result;
}
-/** LZ4_compress_generic() :
- * inlined, to ensure branches are decided at compilation time;
- * takes care of src == (NULL, 0)
- * and forward the rest to LZ4_compress_generic_validated */
-LZ4_FORCE_INLINE int LZ4_compress_generic(
- LZ4_stream_t_internal* const cctx,
- const char* const src,
- char* const dst,
- const int srcSize,
- int *inputConsumed, /* only written when outputDirective == fillOutput */
- const int dstCapacity,
- const limitedOutput_directive outputDirective,
- const tableType_t tableType,
- const dict_directive dictDirective,
- const dictIssue_directive dictIssue,
- const int acceleration)
-{
- DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, dstCapacity=%i",
- srcSize, dstCapacity);
-
- if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported srcSize, too large (or negative) */
- if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */
- if (outputDirective != notLimited && dstCapacity <= 0) return 0; /* no output, can't write anything */
- DEBUGLOG(5, "Generating an empty block");
- assert(outputDirective == notLimited || dstCapacity >= 1);
- assert(dst != NULL);
- dst[0] = 0;
- if (outputDirective == fillOutput) {
- assert (inputConsumed != NULL);
- *inputConsumed = 0;
- }
- return 1;
- }
- assert(src != NULL);
-
- return LZ4_compress_generic_validated(cctx, src, dst, srcSize,
- inputConsumed, /* only written into if outputDirective == fillOutput */
- dstCapacity, outputDirective,
- tableType, dictDirective, dictIssue, acceleration);
-}
-
-
-int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
-{
+/** LZ4_compress_generic() :
+ * inlined, to ensure branches are decided at compilation time;
+ * takes care of src == (NULL, 0)
+ * and forward the rest to LZ4_compress_generic_validated */
+LZ4_FORCE_INLINE int LZ4_compress_generic(
+ LZ4_stream_t_internal* const cctx,
+ const char* const src,
+ char* const dst,
+ const int srcSize,
+ int *inputConsumed, /* only written when outputDirective == fillOutput */
+ const int dstCapacity,
+ const limitedOutput_directive outputDirective,
+ const tableType_t tableType,
+ const dict_directive dictDirective,
+ const dictIssue_directive dictIssue,
+ const int acceleration)
+{
+ DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, dstCapacity=%i",
+ srcSize, dstCapacity);
+
+ if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported srcSize, too large (or negative) */
+ if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */
+ if (outputDirective != notLimited && dstCapacity <= 0) return 0; /* no output, can't write anything */
+ DEBUGLOG(5, "Generating an empty block");
+ assert(outputDirective == notLimited || dstCapacity >= 1);
+ assert(dst != NULL);
+ dst[0] = 0;
+ if (outputDirective == fillOutput) {
+ assert (inputConsumed != NULL);
+ *inputConsumed = 0;
+ }
+ return 1;
+ }
+ assert(src != NULL);
+
+ return LZ4_compress_generic_validated(cctx, src, dst, srcSize,
+ inputConsumed, /* only written into if outputDirective == fillOutput */
+ dstCapacity, outputDirective,
+ tableType, dictDirective, dictIssue, acceleration);
+}
+
+
+int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
+{
LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse;
assert(ctx != NULL);
- if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
- if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
+ if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
+ if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
if (maxOutputSize >= LZ4_compressBound(inputSize)) {
if (inputSize < LZ4_64Klimit) {
return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
@@ -1317,9 +1317,9 @@ int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int
*/
int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
{
- LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
- if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
- if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
+ LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
+ if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
+ if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
if (dstCapacity >= LZ4_compressBound(srcSize)) {
if (srcSize < LZ4_64Klimit) {
@@ -1335,7 +1335,7 @@ int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst
LZ4_prepareTable(ctx, srcSize, tableType);
return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
}
- } else {
+ } else {
if (srcSize < LZ4_64Klimit) {
const tableType_t tableType = byU16;
LZ4_prepareTable(ctx, srcSize, tableType);
@@ -1349,99 +1349,99 @@ int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst
LZ4_prepareTable(ctx, srcSize, tableType);
return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
}
- }
-}
-
-
-int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
+ }
+}
+
+
+int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
{
int result;
#if (LZ4_HEAPMODE)
LZ4_stream_t* ctxPtr = ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
if (ctxPtr == NULL) return 0;
#else
- LZ4_stream_t ctx;
+ LZ4_stream_t ctx;
LZ4_stream_t* const ctxPtr = &ctx;
#endif
result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
#if (LZ4_HEAPMODE)
- FREEMEM(ctxPtr);
-#endif
- return result;
-}
-
-
+ FREEMEM(ctxPtr);
+#endif
+ return result;
+}
+
+
int LZ4_compress_default(const char* src, char* dst, int srcSize, int maxOutputSize)
-{
+{
return LZ4_compress_fast(src, dst, srcSize, maxOutputSize, 1);
-}
-
-
+}
+
+
/* Note!: This function leaves the stream in an unclean/broken state!
* It is not safe to subsequently use the same state with a _fastReset() or
* _continue() call without resetting it. */
-static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
+static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
{
void* const s = LZ4_initStream(state, sizeof (*state));
assert(s != NULL); (void)s;
-
- if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */
- return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
- } else {
+
+ if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */
+ return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
+ } else {
if (*srcSizePtr < LZ4_64Klimit) {
return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1);
} else {
tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, 1);
} }
-}
-
-
-int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
-{
+}
+
+
+int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
+{
#if (LZ4_HEAPMODE)
LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
if (ctx == NULL) return 0;
#else
- LZ4_stream_t ctxBody;
- LZ4_stream_t* ctx = &ctxBody;
+ LZ4_stream_t ctxBody;
+ LZ4_stream_t* ctx = &ctxBody;
#endif
-
- int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
-
+
+ int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
+
#if (LZ4_HEAPMODE)
- FREEMEM(ctx);
-#endif
- return result;
+ FREEMEM(ctx);
+#endif
+ return result;
}
-
-/*-******************************
-* Streaming functions
-********************************/
-
-#if !defined(ONLY_COMPRESS)
-LZ4_stream_t* LZ4_createStream(void)
+
+/*-******************************
+* Streaming functions
+********************************/
+
+#if !defined(ONLY_COMPRESS)
+LZ4_stream_t* LZ4_createStream(void)
{
LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));
- LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */
+ LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */
DEBUGLOG(4, "LZ4_createStream %p", lz4s);
if (lz4s == NULL) return NULL;
LZ4_initStream(lz4s, sizeof(*lz4s));
- return lz4s;
+ return lz4s;
}
-#endif /* ONLY_COMPRESS */
+#endif /* ONLY_COMPRESS */
static size_t LZ4_stream_t_alignment(void)
{
-#if LZ4_ALIGN_TEST
- typedef struct { char c; LZ4_stream_t t; } t_a;
- return sizeof(t_a) - sizeof(LZ4_stream_t);
-#else
- return 1; /* effectively disabled */
-#endif
+#if LZ4_ALIGN_TEST
+ typedef struct { char c; LZ4_stream_t t; } t_a;
+ return sizeof(t_a) - sizeof(LZ4_stream_t);
+#else
+ return 1; /* effectively disabled */
+#endif
}
LZ4_stream_t* LZ4_initStream (void* buffer, size_t size)
@@ -1449,41 +1449,41 @@ LZ4_stream_t* LZ4_initStream (void* buffer, size_t size)
DEBUGLOG(5, "LZ4_initStream");
if (buffer == NULL) { return NULL; }
if (size < sizeof(LZ4_stream_t)) { return NULL; }
- if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL;
- MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal));
+ if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL;
+ MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal));
return (LZ4_stream_t*)buffer;
}
-#if !defined(ONLY_COMPRESS)
+#if !defined(ONLY_COMPRESS)
/* resetStream is now deprecated,
* prefer initStream() which is more general */
-void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
-{
+void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
+{
DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
- MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal));
-}
+ MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal));
+}
void LZ4_resetStream_fast(LZ4_stream_t* ctx) {
LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);
}
-int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
-{
+int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
+{
if (!LZ4_stream) return 0; /* support free on NULL */
DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream);
- FREEMEM(LZ4_stream);
- return (0);
-}
+ FREEMEM(LZ4_stream);
+ return (0);
+}
-#define HASH_UNIT sizeof(reg_t)
-int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
-{
- LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
+#define HASH_UNIT sizeof(reg_t)
+int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
+{
+ LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
const tableType_t tableType = byU32;
- const BYTE* p = (const BYTE*)dictionary;
- const BYTE* const dictEnd = p + dictSize;
- const BYTE* base;
+ const BYTE* p = (const BYTE*)dictionary;
+ const BYTE* const dictEnd = p + dictSize;
+ const BYTE* base;
DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict);
@@ -1505,19 +1505,19 @@ int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
return 0;
}
- if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
+ if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
base = dictEnd - dict->currentOffset;
- dict->dictionary = p;
- dict->dictSize = (U32)(dictEnd - p);
- dict->tableType = (U32)tableType;
+ dict->dictionary = p;
+ dict->dictSize = (U32)(dictEnd - p);
+ dict->tableType = (U32)tableType;
- while (p <= dictEnd-HASH_UNIT) {
+ while (p <= dictEnd-HASH_UNIT) {
LZ4_putPosition(p, dict->hashTable, tableType, base);
- p+=3;
- }
+ p+=3;
+ }
return (int)dict->dictSize;
-}
+}
void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream) {
const LZ4_stream_t_internal* dictCtx = dictionaryStream == NULL ? NULL :
@@ -1548,40 +1548,40 @@ void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dict
static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize)
-{
+{
assert(nextSize >= 0);
if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */
- /* rescale hash table */
- U32 const delta = LZ4_dict->currentOffset - 64 KB;
- const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
- int i;
+ /* rescale hash table */
+ U32 const delta = LZ4_dict->currentOffset - 64 KB;
+ const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
+ int i;
DEBUGLOG(4, "LZ4_renormDictT");
- for (i=0; i<LZ4_HASH_SIZE_U32; i++) {
- if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
- else LZ4_dict->hashTable[i] -= delta;
+ for (i=0; i<LZ4_HASH_SIZE_U32; i++) {
+ if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
+ else LZ4_dict->hashTable[i] -= delta;
}
- LZ4_dict->currentOffset = 64 KB;
- if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
- LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
- }
-}
+ LZ4_dict->currentOffset = 64 KB;
+ if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
+ LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
+ }
+}
int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
const char* source, char* dest,
int inputSize, int maxOutputSize,
int acceleration)
-{
+{
const tableType_t tableType = byU32;
- LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse;
+ LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse;
const BYTE* dictEnd = streamPtr->dictionary + streamPtr->dictSize;
DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i)", inputSize);
LZ4_renormDictT(streamPtr, inputSize); /* avoid index overflow */
- if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
- if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
-
+ if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
+ if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
+
/* invalidate tiny dictionaries */
if ( (streamPtr->dictSize-1 < 4-1) /* intentional underflow */
&& (dictEnd != (const BYTE*)source) ) {
@@ -1591,26 +1591,26 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
dictEnd = (const BYTE*)source;
}
- /* Check overlapping input/dictionary space */
- { const BYTE* sourceEnd = (const BYTE*) source + inputSize;
- if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) {
- streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
- if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
- if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
- streamPtr->dictionary = dictEnd - streamPtr->dictSize;
+ /* Check overlapping input/dictionary space */
+ { const BYTE* sourceEnd = (const BYTE*) source + inputSize;
+ if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) {
+ streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
+ if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
+ if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
+ streamPtr->dictionary = dictEnd - streamPtr->dictSize;
}
}
- /* prefix mode : source data follows dictionary */
- if (dictEnd == (const BYTE*)source) {
- if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
+ /* prefix mode : source data follows dictionary */
+ if (dictEnd == (const BYTE*)source) {
+ if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration);
- else
+ else
return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration);
- }
+ }
- /* external dictionary mode */
- { int result;
+ /* external dictionary mode */
+ { int result;
if (streamPtr->dictCtx) {
/* We depend here on the fact that dictCtx'es (produced by
* LZ4_loadDict) guarantee that their tables contain no references
@@ -1623,7 +1623,7 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
* cost to copy the dictionary's tables into the active context,
* so that the compression loop is only looking into one table.
*/
- LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr));
+ LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr));
result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
} else {
result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);
@@ -1635,61 +1635,61 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
}
}
- streamPtr->dictionary = (const BYTE*)source;
- streamPtr->dictSize = (U32)inputSize;
- return result;
- }
+ streamPtr->dictionary = (const BYTE*)source;
+ streamPtr->dictSize = (U32)inputSize;
+ return result;
+ }
}
/* Hidden debug function, to force-test external dictionary mode */
int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)
{
- LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
- int result;
+ LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
+ int result;
LZ4_renormDictT(streamPtr, srcSize);
-
+
if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1);
} else {
result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
}
-
- streamPtr->dictionary = (const BYTE*)source;
+
+ streamPtr->dictionary = (const BYTE*)source;
streamPtr->dictSize = (U32)srcSize;
-
- return result;
-}
-
-
-/*! LZ4_saveDict() :
- * If previously compressed data block is not guaranteed to remain available at its memory location,
- * save it into a safer place (char* safeBuffer).
- * Note : you don't need to call LZ4_loadDict() afterwards,
- * dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue().
- * Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.
- */
-int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
-{
- LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
- const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
-
+
+ return result;
+}
+
+
+/*! LZ4_saveDict() :
+ * If previously compressed data block is not guaranteed to remain available at its memory location,
+ * save it into a safer place (char* safeBuffer).
+ * Note : you don't need to call LZ4_loadDict() afterwards,
+ * dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue().
+ * Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.
+ */
+int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
+{
+ LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
+ const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
+
if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */
if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }
-
- if (safeBuffer == NULL) assert(dictSize == 0);
- if (dictSize > 0)
- memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
-
- dict->dictionary = (const BYTE*)safeBuffer;
- dict->dictSize = (U32)dictSize;
-
- return dictSize;
-}
-
-
-
+
+ if (safeBuffer == NULL) assert(dictSize == 0);
+ if (dictSize > 0)
+ memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
+
+ dict->dictionary = (const BYTE*)safeBuffer;
+ dict->dictSize = (U32)dictSize;
+
+ return dictSize;
+}
+
+
+
/*-*******************************
* Decompression functions
********************************/
@@ -1710,52 +1710,52 @@ typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
*/
typedef enum { loop_error = -2, initial_error = -1, ok = 0 } variable_length_error;
LZ4_FORCE_INLINE unsigned
-read_variable_length(const BYTE**ip, const BYTE* lencheck,
- int loop_check, int initial_check,
- variable_length_error* error)
-{
- U32 length = 0;
- U32 s;
- if (initial_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
- *error = initial_error;
- return length;
+read_variable_length(const BYTE**ip, const BYTE* lencheck,
+ int loop_check, int initial_check,
+ variable_length_error* error)
+{
+ U32 length = 0;
+ U32 s;
+ if (initial_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
+ *error = initial_error;
+ return length;
}
- do {
- s = **ip;
- (*ip)++;
- length += s;
- if (loop_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
- *error = loop_error;
- return length;
- }
- } while (s==255);
+ do {
+ s = **ip;
+ (*ip)++;
+ length += s;
+ if (loop_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
+ *error = loop_error;
+ return length;
+ }
+ } while (s==255);
- return length;
+ return length;
}
-/*! LZ4_decompress_generic() :
+/*! LZ4_decompress_generic() :
* This generic decompression function covers all use cases.
* It shall be instantiated several times, using different sets of directives.
* Note that it is important for performance that this function really get inlined,
- * in order to remove useless branches during compilation optimization.
- */
+ * in order to remove useless branches during compilation optimization.
+ */
LZ4_FORCE_INLINE int
LZ4_decompress_generic(
const char* const src,
char* const dst,
int srcSize,
int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */
-
+
endCondition_directive endOnInput, /* endOnOutputSize, endOnInputSize */
earlyEnd_directive partialDecoding, /* full, partial */
dict_directive dict, /* noDict, withPrefix64k, usingExtDict */
const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */
- const BYTE* const dictStart, /* only if dict==usingExtDict */
- const size_t dictSize /* note : = 0 if noDict */
- )
-{
+ const BYTE* const dictStart, /* only if dict==usingExtDict */
+ const size_t dictSize /* note : = 0 if noDict */
+ )
+{
if (src == NULL) { return -1; }
-
+
{ const BYTE* ip = (const BYTE*) src;
const BYTE* const iend = ip + srcSize;
@@ -1773,11 +1773,11 @@ LZ4_decompress_generic(
const BYTE* const shortiend = iend - (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
const BYTE* const shortoend = oend - (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
- const BYTE* match;
- size_t offset;
+ const BYTE* match;
+ size_t offset;
unsigned token;
size_t length;
-
+
DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)", srcSize, outputSize);
@@ -1796,8 +1796,8 @@ LZ4_decompress_generic(
if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {
DEBUGLOG(6, "skip fast decode loop");
goto safe_decode;
- }
-
+ }
+
/* Fast loop : decode sequences as long as output < iend-FASTLOOP_SAFE_DISTANCE */
while (1) {
/* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */
@@ -1811,7 +1811,7 @@ LZ4_decompress_generic(
/* decode literal length */
if (length == RUN_MASK) {
variable_length_error error = ok;
- length += read_variable_length(&ip, iend-RUN_MASK, (int)endOnInput, (int)endOnInput, &error);
+ length += read_variable_length(&ip, iend-RUN_MASK, (int)endOnInput, (int)endOnInput, &error);
if (error == initial_error) { goto _output_error; }
if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
@@ -1828,22 +1828,22 @@ LZ4_decompress_generic(
* it doesn't know input length, and only relies on end-of-block properties */
}
ip += length; op = cpy;
- } else {
+ } else {
cpy = op+length;
if (endOnInput) { /* LZ4_decompress_safe() */
DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length);
/* We don't need to check oend, since we check it once for each loop below */
if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; }
/* Literals can only be 14, but hope compilers optimize if we copy by a register size */
- LZ4_memcpy(op, ip, 16);
+ LZ4_memcpy(op, ip, 16);
} else { /* LZ4_decompress_fast() */
/* LZ4_decompress_fast() cannot copy more than 8 bytes at a time :
* it doesn't know input length, and relies on end-of-block properties */
- LZ4_memcpy(op, ip, 8);
- if (length > 8) { LZ4_memcpy(op+8, ip+8, 8); }
+ LZ4_memcpy(op, ip, 8);
+ if (length > 8) { LZ4_memcpy(op+8, ip+8, 8); }
}
ip += length; op = cpy;
- }
+ }
/* get offset */
offset = LZ4_readLE16(ip); ip+=2;
@@ -1854,10 +1854,10 @@ LZ4_decompress_generic(
length = token & ML_MASK;
if (length == ML_MASK) {
- variable_length_error error = ok;
- if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
- length += read_variable_length(&ip, iend - LASTLITERALS + 1, (int)endOnInput, 0, &error);
- if (error != ok) { goto _output_error; }
+ variable_length_error error = ok;
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
+ length += read_variable_length(&ip, iend - LASTLITERALS + 1, (int)endOnInput, 0, &error);
+ if (error != ok) { goto _output_error; }
if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */
length += MINMATCH;
if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
@@ -1876,20 +1876,20 @@ LZ4_decompress_generic(
assert(match <= op);
assert(op + 18 <= oend);
- LZ4_memcpy(op, match, 8);
- LZ4_memcpy(op+8, match+8, 8);
- LZ4_memcpy(op+16, match+16, 2);
+ LZ4_memcpy(op, match, 8);
+ LZ4_memcpy(op+8, match+8, 8);
+ LZ4_memcpy(op+16, match+16, 2);
op += length;
continue;
} } }
- if (checkOffset && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
+ if (checkOffset && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
/* match starting within external dictionary */
if ((dict==usingExtDict) && (match < lowPrefix)) {
if (unlikely(op+length > oend-LASTLITERALS)) {
if (partialDecoding) {
- DEBUGLOG(7, "partialDecoding: dictionary match, close to dstEnd");
- length = MIN(length, (size_t)(oend-op));
+ DEBUGLOG(7, "partialDecoding: dictionary match, close to dstEnd");
+ length = MIN(length, (size_t)(oend-op));
} else {
goto _output_error; /* end-of-block condition violated */
} }
@@ -1902,14 +1902,14 @@ LZ4_decompress_generic(
/* match stretches into both external dictionary and current block */
size_t const copySize = (size_t)(lowPrefix - match);
size_t const restSize = length - copySize;
- LZ4_memcpy(op, dictEnd - copySize, copySize);
+ LZ4_memcpy(op, dictEnd - copySize, copySize);
op += copySize;
if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
BYTE* const endOfMatch = op + restSize;
const BYTE* copyFrom = lowPrefix;
while (op < endOfMatch) { *op++ = *copyFrom++; }
} else {
- LZ4_memcpy(op, lowPrefix, restSize);
+ LZ4_memcpy(op, lowPrefix, restSize);
op += restSize;
} }
continue;
@@ -1926,7 +1926,7 @@ LZ4_decompress_generic(
}
op = cpy; /* wildcopy correction */
- }
+ }
safe_decode:
#endif
@@ -1934,7 +1934,7 @@ LZ4_decompress_generic(
while (1) {
token = *ip++;
length = token >> ML_BITS; /* literal length */
-
+
assert(!endOnInput || ip <= iend); /* ip < iend before the increment */
/* A two-stage shortcut for the most common case:
@@ -1950,7 +1950,7 @@ LZ4_decompress_generic(
/* strictly "less than" on input, to re-enter the loop with at least one byte */
&& likely((endOnInput ? ip < shortiend : 1) & (op <= shortoend)) ) {
/* Copy the literals */
- LZ4_memcpy(op, ip, endOnInput ? 16 : 8);
+ LZ4_memcpy(op, ip, endOnInput ? 16 : 8);
op += length; ip += length;
/* The second stage: prepare for match copying, decode full info.
@@ -1965,9 +1965,9 @@ LZ4_decompress_generic(
&& (offset >= 8)
&& (dict==withPrefix64k || match >= lowPrefix) ) {
/* Copy the match. */
- LZ4_memcpy(op + 0, match + 0, 8);
- LZ4_memcpy(op + 8, match + 8, 8);
- LZ4_memcpy(op +16, match +16, 2);
+ LZ4_memcpy(op + 0, match + 0, 8);
+ LZ4_memcpy(op + 8, match + 8, 8);
+ LZ4_memcpy(op +16, match +16, 2);
op += length + MINMATCH;
/* Both stages worked, load the next token. */
continue;
@@ -1981,7 +1981,7 @@ LZ4_decompress_generic(
/* decode literal length */
if (length == RUN_MASK) {
variable_length_error error = ok;
- length += read_variable_length(&ip, iend-RUN_MASK, (int)endOnInput, (int)endOnInput, &error);
+ length += read_variable_length(&ip, iend-RUN_MASK, (int)endOnInput, (int)endOnInput, &error);
if (error == initial_error) { goto _output_error; }
if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
@@ -1997,28 +1997,28 @@ LZ4_decompress_generic(
|| ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) )
{
/* We've either hit the input parsing restriction or the output parsing restriction.
- * In the normal scenario, decoding a full block, it must be the last sequence,
- * otherwise it's an error (invalid input or dimensions).
- * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow.
+ * In the normal scenario, decoding a full block, it must be the last sequence,
+ * otherwise it's an error (invalid input or dimensions).
+ * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow.
*/
if (partialDecoding) {
/* Since we are partial decoding we may be in this block because of the output parsing
* restriction, which is not valid since the output buffer is allowed to be undersized.
*/
assert(endOnInput);
- DEBUGLOG(7, "partialDecoding: copying literals, close to input or output end")
- DEBUGLOG(7, "partialDecoding: literal length = %u", (unsigned)length);
- DEBUGLOG(7, "partialDecoding: remaining space in dstBuffer : %i", (int)(oend - op));
- DEBUGLOG(7, "partialDecoding: remaining space in srcBuffer : %i", (int)(iend - ip));
- /* Finishing in the middle of a literals segment,
- * due to lack of input.
+ DEBUGLOG(7, "partialDecoding: copying literals, close to input or output end")
+ DEBUGLOG(7, "partialDecoding: literal length = %u", (unsigned)length);
+ DEBUGLOG(7, "partialDecoding: remaining space in dstBuffer : %i", (int)(oend - op));
+ DEBUGLOG(7, "partialDecoding: remaining space in srcBuffer : %i", (int)(iend - ip));
+ /* Finishing in the middle of a literals segment,
+ * due to lack of input.
*/
- if (ip+length > iend) {
- length = (size_t)(iend-ip);
- cpy = op + length;
- }
- /* Finishing in the middle of a literals segment,
- * due to lack of output space.
+ if (ip+length > iend) {
+ length = (size_t)(iend-ip);
+ cpy = op + length;
+ }
+ /* Finishing in the middle of a literals segment,
+ * due to lack of output space.
*/
if (cpy > oend) {
cpy = oend;
@@ -2033,25 +2033,25 @@ LZ4_decompress_generic(
/* We must be on the last sequence (or invalid) because of the parsing limitations
* so check that we exactly consume the input and don't overrun the output buffer.
*/
- if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) {
- DEBUGLOG(6, "should have been last run of literals")
- DEBUGLOG(6, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend);
- DEBUGLOG(6, "or cpy(%p) > oend(%p)", cpy, oend);
- goto _output_error;
- }
+ if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) {
+ DEBUGLOG(6, "should have been last run of literals")
+ DEBUGLOG(6, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend);
+ DEBUGLOG(6, "or cpy(%p) > oend(%p)", cpy, oend);
+ goto _output_error;
+ }
}
- memmove(op, ip, length); /* supports overlapping memory regions; only matters for in-place decompression scenarios */
+ memmove(op, ip, length); /* supports overlapping memory regions; only matters for in-place decompression scenarios */
ip += length;
- op += length;
- /* Necessarily EOF when !partialDecoding.
- * When partialDecoding, it is EOF if we've either
- * filled the output buffer or
- * can't proceed with reading an offset for following match.
+ op += length;
+ /* Necessarily EOF when !partialDecoding.
+ * When partialDecoding, it is EOF if we've either
+ * filled the output buffer or
+ * can't proceed with reading an offset for following match.
*/
- if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) {
+ if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) {
break;
}
- } else {
+ } else {
LZ4_wildCopy8(op, ip, cpy); /* may overwrite up to WILDCOPYLENGTH beyond cpy */
ip += length; op = cpy;
}
@@ -2066,7 +2066,7 @@ LZ4_decompress_generic(
_copy_match:
if (length == ML_MASK) {
variable_length_error error = ok;
- length += read_variable_length(&ip, iend - LASTLITERALS + 1, (int)endOnInput, 0, &error);
+ length += read_variable_length(&ip, iend - LASTLITERALS + 1, (int)endOnInput, 0, &error);
if (error != ok) goto _output_error;
if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */
}
@@ -2087,27 +2087,27 @@ LZ4_decompress_generic(
/* match fits entirely within external dictionary : just copy */
memmove(op, dictEnd - (lowPrefix-match), length);
op += length;
- } else {
+ } else {
/* match stretches into both external dictionary and current block */
size_t const copySize = (size_t)(lowPrefix - match);
size_t const restSize = length - copySize;
- LZ4_memcpy(op, dictEnd - copySize, copySize);
+ LZ4_memcpy(op, dictEnd - copySize, copySize);
op += copySize;
if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
BYTE* const endOfMatch = op + restSize;
const BYTE* copyFrom = lowPrefix;
while (op < endOfMatch) *op++ = *copyFrom++;
} else {
- LZ4_memcpy(op, lowPrefix, restSize);
+ LZ4_memcpy(op, lowPrefix, restSize);
op += restSize;
} }
continue;
}
assert(match >= lowPrefix);
-
+
/* copy match within block */
cpy = op + length;
-
+
/* partialDecoding : may end anywhere within the block */
assert(op<=oend);
if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {
@@ -2117,12 +2117,12 @@ LZ4_decompress_generic(
if (matchEnd > op) { /* overlap copy */
while (op < copyEnd) { *op++ = *match++; }
} else {
- LZ4_memcpy(op, match, mlen);
+ LZ4_memcpy(op, match, mlen);
}
op = copyEnd;
if (op == oend) { break; }
continue;
- }
+ }
if (unlikely(offset<8)) {
LZ4_write32(op, 0); /* silence msan warning when offset==0 */
@@ -2131,10 +2131,10 @@ LZ4_decompress_generic(
op[2] = match[2];
op[3] = match[3];
match += inc32table[offset];
- LZ4_memcpy(op+4, match, 4);
+ LZ4_memcpy(op+4, match, 4);
match -= dec64table[offset];
} else {
- LZ4_memcpy(op, match, 8);
+ LZ4_memcpy(op, match, 8);
match += 8;
}
op += 8;
@@ -2149,15 +2149,15 @@ LZ4_decompress_generic(
}
while (op < cpy) { *op++ = *match++; }
} else {
- LZ4_memcpy(op, match, 8);
+ LZ4_memcpy(op, match, 8);
if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }
}
op = cpy; /* wildcopy correction */
- }
+ }
/* end of decoding */
if (endOnInput) {
- DEBUGLOG(5, "decoded %i bytes", (int) (((char*)op)-dst));
+ DEBUGLOG(5, "decoded %i bytes", (int) (((char*)op)-dst));
return (int) (((char*)op)-dst); /* Nb of output bytes decoded */
} else {
return (int) (((const char*)ip)-src); /* Nb of input bytes read */
@@ -2168,38 +2168,38 @@ LZ4_decompress_generic(
return (int) (-(((const char*)ip)-src))-1;
}
}
-
-
+
+
/*===== Instantiate the API decoding functions. =====*/
-LZ4_FORCE_O2
-int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
-{
+LZ4_FORCE_O2
+int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
+{
return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize,
endOnInputSize, decode_full_block, noDict,
(BYTE*)dest, NULL, 0);
-}
-
-LZ4_FORCE_O2
+}
+
+LZ4_FORCE_O2
int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity)
-{
+{
dstCapacity = MIN(targetOutputSize, dstCapacity);
return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
endOnInputSize, partial_decode,
noDict, (BYTE*)dst, NULL, 0);
-}
-
-LZ4_FORCE_O2
-int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
-{
+}
+
+LZ4_FORCE_O2
+int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
+{
return LZ4_decompress_generic(source, dest, 0, originalSize,
endOnOutputSize, decode_full_block, withPrefix64k,
(BYTE*)dest - 64 KB, NULL, 0);
-}
-
+}
+
/*===== Instantiate a few more decoding cases, used more than once. =====*/
-
-LZ4_FORCE_O2 /* Exported, an obsolete API function. */
+
+LZ4_FORCE_O2 /* Exported, an obsolete API function. */
int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
{
return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
@@ -2215,7 +2215,7 @@ int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int origin
return LZ4_decompress_fast(source, dest, originalSize);
}
-LZ4_FORCE_O2
+LZ4_FORCE_O2
static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize,
size_t prefixSize)
{
@@ -2224,7 +2224,7 @@ static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, i
(BYTE*)dest-prefixSize, NULL, 0);
}
-LZ4_FORCE_O2
+LZ4_FORCE_O2
int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
int compressedSize, int maxOutputSize,
const void* dictStart, size_t dictSize)
@@ -2234,7 +2234,7 @@ int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
(BYTE*)dest, (const BYTE*)dictStart, dictSize);
}
-LZ4_FORCE_O2
+LZ4_FORCE_O2
static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize,
const void* dictStart, size_t dictSize)
{
@@ -2265,38 +2265,38 @@ int LZ4_decompress_fast_doubleDict(const char* source, char* dest, int originalS
(BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
}
-/*===== streaming decompression functions =====*/
-
-LZ4_streamDecode_t* LZ4_createStreamDecode(void)
-{
+/*===== streaming decompression functions =====*/
+
+LZ4_streamDecode_t* LZ4_createStreamDecode(void)
+{
LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t));
LZ4_STATIC_ASSERT(LZ4_STREAMDECODESIZE >= sizeof(LZ4_streamDecode_t_internal)); /* A compilation error here means LZ4_STREAMDECODESIZE is not large enough */
- return lz4s;
-}
-
-int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
-{
+ return lz4s;
+}
+
+int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
+{
if (LZ4_stream == NULL) { return 0; } /* support free on NULL */
- FREEMEM(LZ4_stream);
- return 0;
-}
-
+ FREEMEM(LZ4_stream);
+ return 0;
+}
+
/*! LZ4_setStreamDecode() :
* Use this function to instruct where to find the dictionary.
* This function is not necessary if previous data is still available where it was decoded.
* Loading a size of 0 is allowed (same effect as no dictionary).
* @return : 1 if OK, 0 if error
- */
-int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
-{
- LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
- lz4sd->prefixSize = (size_t) dictSize;
- lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;
- lz4sd->externalDict = NULL;
- lz4sd->extDictSize = 0;
- return 1;
-}
-
+ */
+int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
+{
+ LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
+ lz4sd->prefixSize = (size_t) dictSize;
+ lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;
+ lz4sd->externalDict = NULL;
+ lz4sd->extDictSize = 0;
+ return 1;
+}
+
/*! LZ4_decoderRingBufferSize() :
* when setting a ring buffer for streaming decompression (optional scenario),
* provides the minimum size of this ring buffer
@@ -2316,24 +2316,24 @@ int LZ4_decoderRingBufferSize(int maxBlockSize)
return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize);
}
-/*
-*_continue() :
- These decoding functions allow decompression of multiple blocks in "streaming" mode.
- Previously decoded blocks must still be available at the memory position where they were decoded.
- If it's not possible, save the relevant part of decoded data into a safe buffer,
- and indicate where it stands using LZ4_setStreamDecode()
-*/
-LZ4_FORCE_O2
-int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
-{
- LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
- int result;
-
+/*
+*_continue() :
+ These decoding functions allow decompression of multiple blocks in "streaming" mode.
+ Previously decoded blocks must still be available at the memory position where they were decoded.
+ If it's not possible, save the relevant part of decoded data into a safe buffer,
+ and indicate where it stands using LZ4_setStreamDecode()
+*/
+LZ4_FORCE_O2
+int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
+{
+ LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
+ int result;
+
if (lz4sd->prefixSize == 0) {
/* The first call, no dictionary yet. */
assert(lz4sd->extDictSize == 0);
result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
- if (result <= 0) return result;
+ if (result <= 0) return result;
lz4sd->prefixSize = (size_t)result;
lz4sd->prefixEnd = (BYTE*)dest + result;
} else if (lz4sd->prefixEnd == (BYTE*)dest) {
@@ -2348,32 +2348,32 @@ int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const ch
lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0) return result;
lz4sd->prefixSize += (size_t)result;
- lz4sd->prefixEnd += result;
- } else {
+ lz4sd->prefixEnd += result;
+ } else {
/* The buffer wraps around, or they're switching to another buffer. */
- lz4sd->extDictSize = lz4sd->prefixSize;
- lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
+ lz4sd->extDictSize = lz4sd->prefixSize;
+ lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize,
lz4sd->externalDict, lz4sd->extDictSize);
- if (result <= 0) return result;
+ if (result <= 0) return result;
lz4sd->prefixSize = (size_t)result;
- lz4sd->prefixEnd = (BYTE*)dest + result;
- }
-
- return result;
-}
-
-LZ4_FORCE_O2
-int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize)
-{
- LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
- int result;
+ lz4sd->prefixEnd = (BYTE*)dest + result;
+ }
+
+ return result;
+}
+
+LZ4_FORCE_O2
+int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize)
+{
+ LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
+ int result;
assert(originalSize >= 0);
-
+
if (lz4sd->prefixSize == 0) {
assert(lz4sd->extDictSize == 0);
result = LZ4_decompress_fast(source, dest, originalSize);
- if (result <= 0) return result;
+ if (result <= 0) return result;
lz4sd->prefixSize = (size_t)originalSize;
lz4sd->prefixEnd = (BYTE*)dest + originalSize;
} else if (lz4sd->prefixEnd == (BYTE*)dest) {
@@ -2384,56 +2384,56 @@ int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const ch
lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0) return result;
lz4sd->prefixSize += (size_t)originalSize;
- lz4sd->prefixEnd += originalSize;
- } else {
- lz4sd->extDictSize = lz4sd->prefixSize;
- lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
+ lz4sd->prefixEnd += originalSize;
+ } else {
+ lz4sd->extDictSize = lz4sd->prefixSize;
+ lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
result = LZ4_decompress_fast_extDict(source, dest, originalSize,
lz4sd->externalDict, lz4sd->extDictSize);
- if (result <= 0) return result;
+ if (result <= 0) return result;
lz4sd->prefixSize = (size_t)originalSize;
- lz4sd->prefixEnd = (BYTE*)dest + originalSize;
- }
-
- return result;
-}
-
-
-/*
-Advanced decoding functions :
-*_usingDict() :
- These decoding functions work the same as "_continue" ones,
- the dictionary must be explicitly provided within parameters
-*/
-
+ lz4sd->prefixEnd = (BYTE*)dest + originalSize;
+ }
+
+ return result;
+}
+
+
+/*
+Advanced decoding functions :
+*_usingDict() :
+ These decoding functions work the same as "_continue" ones,
+ the dictionary must be explicitly provided within parameters
+*/
+
int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
-{
- if (dictSize==0)
+{
+ if (dictSize==0)
return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
- if (dictStart+dictSize == dest) {
+ if (dictStart+dictSize == dest) {
if (dictSize >= 64 KB - 1) {
return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);
}
assert(dictSize >= 0);
return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize);
- }
+ }
assert(dictSize >= 0);
return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize);
-}
-
-int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
-{
+}
+
+int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
+{
if (dictSize==0 || dictStart+dictSize == dest)
return LZ4_decompress_fast(source, dest, originalSize);
assert(dictSize >= 0);
return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize);
-}
-
-
-/*=*************************************************
-* Obsolete Functions
-***************************************************/
-/* obsolete compression functions */
+}
+
+
+/*=*************************************************
+* Obsolete Functions
+***************************************************/
+/* obsolete compression functions */
int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
{
return LZ4_compress_default(source, dest, inputSize, maxOutputSize);
@@ -2458,13 +2458,13 @@ int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* d
{
return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1);
}
-
-/*
+
+/*
These decompression functions are deprecated and should no longer be used.
-They are only provided here for compatibility with older user programs.
-- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
-- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
-*/
+They are only provided here for compatibility with older user programs.
+- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
+- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
+*/
int LZ4_uncompress (const char* source, char* dest, int outputSize)
{
return LZ4_decompress_fast(source, dest, outputSize);
@@ -2473,29 +2473,29 @@ int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize,
{
return LZ4_decompress_safe(source, dest, isize, maxOutputSize);
}
-
-/* Obsolete Streaming functions */
-
-int LZ4_sizeofStreamState(void) { return LZ4_STREAMSIZE; }
-
-int LZ4_resetStreamState(void* state, char* inputBuffer)
-{
+
+/* Obsolete Streaming functions */
+
+int LZ4_sizeofStreamState(void) { return LZ4_STREAMSIZE; }
+
+int LZ4_resetStreamState(void* state, char* inputBuffer)
+{
(void)inputBuffer;
LZ4_resetStream((LZ4_stream_t*)state);
- return 0;
-}
-
-void* LZ4_create (char* inputBuffer)
-{
+ return 0;
+}
+
+void* LZ4_create (char* inputBuffer)
+{
(void)inputBuffer;
return LZ4_createStream();
-}
-
+}
+
char* LZ4_slideInputBuffer (void* state)
-{
+{
/* avoid const char * -> char * conversion warning */
return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary;
-}
-
-#endif /* ONLY_COMPRESS */
-#endif /* LZ4_COMMONDEFS_ONLY */
+}
+
+#endif /* ONLY_COMPRESS */
+#endif /* LZ4_COMMONDEFS_ONLY */
diff --git a/contrib/libs/lz4/lz4.h b/contrib/libs/lz4/lz4.h
index dd27bb3e88..66b8547bbc 100644
--- a/contrib/libs/lz4/lz4.h
+++ b/contrib/libs/lz4/lz4.h
@@ -1,8 +1,8 @@
/*
- * LZ4 - Fast LZ compression algorithm
- * Header File
+ * LZ4 - Fast LZ compression algorithm
+ * Header File
* Copyright (C) 2011-present, Yann Collet.
-
+
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
@@ -29,41 +29,41 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- - LZ4 homepage : http://www.lz4.org
- - LZ4 source repository : https://github.com/lz4/lz4
+ - LZ4 homepage : http://www.lz4.org
+ - LZ4 source repository : https://github.com/lz4/lz4
*/
-#if defined (__cplusplus) && !defined(LZ4_NAMESPACE)
+#if defined (__cplusplus) && !defined(LZ4_NAMESPACE)
extern "C" {
#endif
-#ifndef LZ4_H_2983827168210
-#define LZ4_H_2983827168210
+#ifndef LZ4_H_2983827168210
+#define LZ4_H_2983827168210
-/* --- Dependency --- */
-#include <stddef.h> /* size_t */
+/* --- Dependency --- */
+#include <stddef.h> /* size_t */
-/**
- Introduction
+/**
+ Introduction
LZ4 is lossless compression algorithm, providing compression speed >500 MB/s per core,
- scalable with multi-cores CPU. It features an extremely fast decoder, with speed in
- multiple GB/s per core, typically reaching RAM speed limits on multi-core systems.
-
- The LZ4 compression library provides in-memory compression and decompression functions.
+ scalable with multi-cores CPU. It features an extremely fast decoder, with speed in
+ multiple GB/s per core, typically reaching RAM speed limits on multi-core systems.
+
+ The LZ4 compression library provides in-memory compression and decompression functions.
It gives full buffer control to user.
- Compression can be done in:
- - a single step (described as Simple Functions)
- - a single step, reusing a context (described in Advanced Functions)
- - unbounded multiple steps (described as Streaming compression)
-
+ Compression can be done in:
+ - a single step (described as Simple Functions)
+ - a single step, reusing a context (described in Advanced Functions)
+ - unbounded multiple steps (described as Streaming compression)
+
lz4.h generates and decodes LZ4-compressed blocks (doc/lz4_Block_format.md).
Decompressing such a compressed block requires additional metadata.
Exact metadata depends on exact decompression function.
For the typical case of LZ4_decompress_safe(),
metadata includes block's compressed size, and maximum bound of decompressed size.
Each application is free to encode and pass such metadata in whichever way it wants.
-
+
lz4.h only handle blocks, it can not generate Frames.
Blocks are different from Frames (doc/lz4_Frame_format.md).
@@ -71,17 +71,17 @@ extern "C" {
Embedding metadata is required for compressed data to be self-contained and portable.
Frame format is delivered through a companion API, declared in lz4frame.h.
The `lz4` CLI can only manage frames.
-*/
-
-/*^***************************************************************
-* Export parameters
-*****************************************************************/
+*/
+
+/*^***************************************************************
+* Export parameters
+*****************************************************************/
/*
-* LZ4_DLL_EXPORT :
-* Enable exporting of functions when building a Windows DLL
+* LZ4_DLL_EXPORT :
+* Enable exporting of functions when building a Windows DLL
* LZ4LIB_VISIBILITY :
* Control library symbols visibility.
-*/
+*/
#ifndef LZ4LIB_VISIBILITY
# if defined(__GNUC__) && (__GNUC__ >= 4)
# define LZ4LIB_VISIBILITY __attribute__ ((visibility ("default")))
@@ -89,49 +89,49 @@ extern "C" {
# define LZ4LIB_VISIBILITY
# endif
#endif
-#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1)
+#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1)
# define LZ4LIB_API __declspec(dllexport) LZ4LIB_VISIBILITY
-#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1)
+#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1)
# define LZ4LIB_API __declspec(dllimport) LZ4LIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
-#else
+#else
# define LZ4LIB_API LZ4LIB_VISIBILITY
-#endif
+#endif
/*------ Version ------*/
-#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */
+#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */
#define LZ4_VERSION_MINOR 9 /* for new (non-breaking) interface capabilities */
-#define LZ4_VERSION_RELEASE 3 /* for tweaks, bug-fixes, or development */
-
-#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE)
-
-#define LZ4_LIB_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE
-#define LZ4_QUOTE(str) #str
-#define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str)
-#define LZ4_VERSION_STRING LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION)
-
+#define LZ4_VERSION_RELEASE 3 /* for tweaks, bug-fixes, or development */
+
+#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE)
+
+#define LZ4_LIB_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE
+#define LZ4_QUOTE(str) #str
+#define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str)
+#define LZ4_VERSION_STRING LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION)
+
LZ4LIB_API int LZ4_versionNumber (void); /**< library version number; useful to check dll version */
LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; useful to check dll version */
-
-
-/*-************************************
-* Tuning parameter
-**************************************/
-/*!
- * LZ4_MEMORY_USAGE :
- * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+
+
+/*-************************************
+* Tuning parameter
+**************************************/
+/*!
+ * LZ4_MEMORY_USAGE :
+ * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
* Increasing memory usage improves compression ratio.
* Reduced memory usage may improve speed, thanks to better cache locality.
- * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
- */
+ * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
+ */
#ifndef LZ4_MEMORY_USAGE
# define LZ4_MEMORY_USAGE 14
-#endif
-
-
-/*-************************************
-* Simple Functions
-**************************************/
-/*! LZ4_compress_default() :
+#endif
+
+
+/*-************************************
+* Simple Functions
+**************************************/
+/*! LZ4_compress_default() :
* Compresses 'srcSize' bytes from buffer 'src'
* into already allocated 'dst' buffer of size 'dstCapacity'.
* Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize).
@@ -146,8 +146,8 @@ LZ4LIB_API const char* LZ4_versionString (void); /**< library version string;
* Note : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer).
*/
LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity);
-
-/*! LZ4_decompress_safe() :
+
+/*! LZ4_decompress_safe() :
* compressedSize : is the exact complete size of the compressed block.
* dstCapacity : is the size of destination buffer (which must be already allocated), presumed an upper bound of decompressed size.
* @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity)
@@ -164,30 +164,30 @@ LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int
LZ4LIB_API int LZ4_decompress_safe (const char* src, char* dst, int compressedSize, int dstCapacity);
-/*-************************************
-* Advanced Functions
-**************************************/
-#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
-#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16)
+/*-************************************
+* Advanced Functions
+**************************************/
+#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
+#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16)
/*! LZ4_compressBound() :
- Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible)
- This function is primarily useful for memory allocation purposes (destination buffer size).
- Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example).
+ Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible)
+ This function is primarily useful for memory allocation purposes (destination buffer size).
+ Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example).
Note that LZ4_compress_default() compresses faster when dstCapacity is >= LZ4_compressBound(srcSize)
- inputSize : max supported value is LZ4_MAX_INPUT_SIZE
- return : maximum output size in a "worst case" scenario
+ inputSize : max supported value is LZ4_MAX_INPUT_SIZE
+ return : maximum output size in a "worst case" scenario
or 0, if input size is incorrect (too large or negative)
-*/
-LZ4LIB_API int LZ4_compressBound(int inputSize);
+*/
+LZ4LIB_API int LZ4_compressBound(int inputSize);
/*! LZ4_compress_fast() :
Same as LZ4_compress_default(), but allows selection of "acceleration" factor.
- The larger the acceleration value, the faster the algorithm, but also the lesser the compression.
- It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed.
- An acceleration value of "1" is the same as regular LZ4_compress_default()
- Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT (currently == 1, see lz4.c).
- Values > LZ4_ACCELERATION_MAX will be replaced by LZ4_ACCELERATION_MAX (currently == 65537, see lz4.c).
+ The larger the acceleration value, the faster the algorithm, but also the lesser the compression.
+ It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed.
+ An acceleration value of "1" is the same as regular LZ4_compress_default()
+ Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT (currently == 1, see lz4.c).
+ Values > LZ4_ACCELERATION_MAX will be replaced by LZ4_ACCELERATION_MAX (currently == 65537, see lz4.c).
*/
LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
@@ -198,7 +198,7 @@ LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int d
* and allocate it on 8-bytes boundaries (using `malloc()` typically).
* Then, provide this buffer as `void* state` to compression function.
*/
-LZ4LIB_API int LZ4_sizeofState(void);
+LZ4LIB_API int LZ4_sizeofState(void);
LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
@@ -213,18 +213,18 @@ LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* d
* New value is necessarily <= input value.
* @return : Nb bytes written into 'dst' (necessarily <= targetDestSize)
* or 0 if compression fails.
- *
- * Note : from v1.8.2 to v1.9.1, this function had a bug (fixed un v1.9.2+):
- * the produced compressed content could, in specific circumstances,
- * require to be decompressed into a destination buffer larger
- * by at least 1 byte than the content to decompress.
- * If an application uses `LZ4_compress_destSize()`,
- * it's highly recommended to update liblz4 to v1.9.2 or better.
- * If this can't be done or ensured,
- * the receiving decompression function should provide
- * a dstCapacity which is > decompressedSize, by at least 1 byte.
- * See https://github.com/lz4/lz4/issues/859 for details
- */
+ *
+ * Note : from v1.8.2 to v1.9.1, this function had a bug (fixed un v1.9.2+):
+ * the produced compressed content could, in specific circumstances,
+ * require to be decompressed into a destination buffer larger
+ * by at least 1 byte than the content to decompress.
+ * If an application uses `LZ4_compress_destSize()`,
+ * it's highly recommended to update liblz4 to v1.9.2 or better.
+ * If this can't be done or ensured,
+ * the receiving decompression function should provide
+ * a dstCapacity which is > decompressedSize, by at least 1 byte.
+ * See https://github.com/lz4/lz4/issues/859 for details
+ */
LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize);
@@ -232,47 +232,47 @@ LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePt
* Decompress an LZ4 compressed block, of size 'srcSize' at position 'src',
* into destination buffer 'dst' of size 'dstCapacity'.
* Up to 'targetOutputSize' bytes will be decoded.
- * The function stops decoding on reaching this objective.
- * This can be useful to boost performance
- * whenever only the beginning of a block is required.
+ * The function stops decoding on reaching this objective.
+ * This can be useful to boost performance
+ * whenever only the beginning of a block is required.
*
- * @return : the number of bytes decoded in `dst` (necessarily <= targetOutputSize)
+ * @return : the number of bytes decoded in `dst` (necessarily <= targetOutputSize)
* If source stream is detected malformed, function returns a negative result.
*
- * Note 1 : @return can be < targetOutputSize, if compressed block contains less data.
+ * Note 1 : @return can be < targetOutputSize, if compressed block contains less data.
*
- * Note 2 : targetOutputSize must be <= dstCapacity
- *
- * Note 3 : this function effectively stops decoding on reaching targetOutputSize,
+ * Note 2 : targetOutputSize must be <= dstCapacity
+ *
+ * Note 3 : this function effectively stops decoding on reaching targetOutputSize,
* so dstCapacity is kind of redundant.
- * This is because in older versions of this function,
- * decoding operation would still write complete sequences.
- * Therefore, there was no guarantee that it would stop writing at exactly targetOutputSize,
+ * This is because in older versions of this function,
+ * decoding operation would still write complete sequences.
+ * Therefore, there was no guarantee that it would stop writing at exactly targetOutputSize,
* it could write more bytes, though only up to dstCapacity.
* Some "margin" used to be required for this operation to work properly.
- * Thankfully, this is no longer necessary.
- * The function nonetheless keeps the same signature, in an effort to preserve API compatibility.
- *
- * Note 4 : If srcSize is the exact size of the block,
- * then targetOutputSize can be any value,
- * including larger than the block's decompressed size.
- * The function will, at most, generate block's decompressed size.
- *
- * Note 5 : If srcSize is _larger_ than block's compressed size,
- * then targetOutputSize **MUST** be <= block's decompressed size.
- * Otherwise, *silent corruption will occur*.
+ * Thankfully, this is no longer necessary.
+ * The function nonetheless keeps the same signature, in an effort to preserve API compatibility.
+ *
+ * Note 4 : If srcSize is the exact size of the block,
+ * then targetOutputSize can be any value,
+ * including larger than the block's decompressed size.
+ * The function will, at most, generate block's decompressed size.
+ *
+ * Note 5 : If srcSize is _larger_ than block's compressed size,
+ * then targetOutputSize **MUST** be <= block's decompressed size.
+ * Otherwise, *silent corruption will occur*.
*/
LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity);
-/*-*********************************************
-* Streaming Compression Functions
-***********************************************/
+/*-*********************************************
+* Streaming Compression Functions
+***********************************************/
typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */
-
-LZ4LIB_API LZ4_stream_t* LZ4_createStream(void);
-LZ4LIB_API int LZ4_freeStream (LZ4_stream_t* streamPtr);
-
+
+LZ4LIB_API LZ4_stream_t* LZ4_createStream(void);
+LZ4LIB_API int LZ4_freeStream (LZ4_stream_t* streamPtr);
+
/*! LZ4_resetStream_fast() : v1.9.0+
* Use this to prepare an LZ4_stream_t for a new chain of dependent blocks
* (e.g., LZ4_compress_fast_continue()).
@@ -294,10 +294,10 @@ LZ4LIB_API int LZ4_freeStream (LZ4_stream_t* streamPtr);
* in the context of streaming compression.
* The *extState* functions perform their own resets.
* Invoking LZ4_resetStream_fast() before is redundant, and even counterproductive.
- */
+ */
LZ4LIB_API void LZ4_resetStream_fast (LZ4_stream_t* streamPtr);
-
-/*! LZ4_loadDict() :
+
+/*! LZ4_loadDict() :
* Use this function to reference a static dictionary into LZ4_stream_t.
* The dictionary must remain available during compression.
* LZ4_loadDict() triggers a reset, so any previous data will be forgotten.
@@ -307,10 +307,10 @@ LZ4LIB_API void LZ4_resetStream_fast (LZ4_stream_t* streamPtr);
* results are generally better when using Zstandard's Dictionary Builder.
* Loading a size of 0 is allowed, and is the same as reset.
* @return : loaded dictionary size, in bytes (necessarily <= 64 KB)
- */
-LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize);
-
-/*! LZ4_compress_fast_continue() :
+ */
+LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize);
+
+/*! LZ4_compress_fast_continue() :
* Compress 'src' content using data from previously compressed blocks, for better compression ratio.
* 'dst' buffer must be already allocated.
* If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster.
@@ -332,41 +332,41 @@ LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, in
* Note 4 : If input buffer is a ring-buffer, it can have any size, including < 64 KB.
*
* Note 5 : After an error, the stream status is undefined (invalid), it can only be reset or freed.
- */
+ */
LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
-
-/*! LZ4_saveDict() :
+
+/*! LZ4_saveDict() :
* If last 64KB data cannot be guaranteed to remain available at its current memory location,
- * save it into a safer place (char* safeBuffer).
+ * save it into a safer place (char* safeBuffer).
* This is schematically equivalent to a memcpy() followed by LZ4_loadDict(),
* but is much faster, because LZ4_saveDict() doesn't need to rebuild tables.
* @return : saved dictionary size in bytes (necessarily <= maxDictSize), or 0 if error.
- */
+ */
LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int maxDictSize);
-
-
-/*-**********************************************
-* Streaming Decompression Functions
-* Bufferless synchronous API
-************************************************/
+
+
+/*-**********************************************
+* Streaming Decompression Functions
+* Bufferless synchronous API
+************************************************/
typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* tracking context */
-
+
/*! LZ4_createStreamDecode() and LZ4_freeStreamDecode() :
* creation / destruction of streaming decompression tracking context.
* A tracking context can be re-used multiple times.
*/
-LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void);
-LZ4LIB_API int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream);
-
-/*! LZ4_setStreamDecode() :
+LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void);
+LZ4LIB_API int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream);
+
+/*! LZ4_setStreamDecode() :
* An LZ4_streamDecode_t context can be allocated once and re-used multiple times.
* Use this function to start decompression of a new stream of blocks.
* A dictionary can optionally be set. Use NULL or size 0 for a reset order.
* Dictionary is presumed stable : it must remain accessible and unmodified during next decompression.
* @return : 1 if OK, 0 if error
- */
-LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize);
-
+ */
+LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize);
+
/*! LZ4_decoderRingBufferSize() : v1.8.2+
* Note : in a ring buffer scenario (optional),
* blocks are presumed decompressed next to each other
@@ -405,25 +405,25 @@ LZ4LIB_API int LZ4_decoderRingBufferSize(int maxBlockSize);
* Whenever these conditions are not possible,
* save the last 64KB of decoded data into a safe buffer where it can't be modified during decompression,
* then indicate where this data is saved using LZ4_setStreamDecode(), before decompressing next block.
-*/
+*/
LZ4LIB_API int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int srcSize, int dstCapacity);
-
-
-/*! LZ4_decompress_*_usingDict() :
- * These decoding functions work the same as
- * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_*_continue()
- * They are stand-alone, and don't need an LZ4_streamDecode_t structure.
+
+
+/*! LZ4_decompress_*_usingDict() :
+ * These decoding functions work the same as
+ * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_*_continue()
+ * They are stand-alone, and don't need an LZ4_streamDecode_t structure.
* Dictionary is presumed stable : it must remain accessible and unmodified during decompression.
* Performance tip : Decompression speed can be substantially increased
* when dst == dictStart + dictSize.
- */
+ */
LZ4LIB_API int LZ4_decompress_safe_usingDict (const char* src, char* dst, int srcSize, int dstCapcity, const char* dictStart, int dictSize);
-
+
#endif /* LZ4_H_2983827168210 */
-
+
/*^*************************************
- * !!!!!! STATIC LINKING ONLY !!!!!!
+ * !!!!!! STATIC LINKING ONLY !!!!!!
***************************************/
/*-****************************************************************************
@@ -569,65 +569,65 @@ LZ4LIB_STATIC_API void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const
#define LZ4_H_98237428734687
/*-************************************************************
- * Private Definitions
+ * Private Definitions
**************************************************************
* Do not use these definitions directly.
* They are only exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`.
- * Accessing members will expose user code to API and/or ABI break in future versions of the library.
+ * Accessing members will expose user code to API and/or ABI break in future versions of the library.
**************************************************************/
-#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
-#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
-#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */
-
-#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-# include <stdint.h>
- typedef int8_t LZ4_i8;
- typedef uint8_t LZ4_byte;
- typedef uint16_t LZ4_u16;
- typedef uint32_t LZ4_u32;
-#else
- typedef signed char LZ4_i8;
- typedef unsigned char LZ4_byte;
- typedef unsigned short LZ4_u16;
- typedef unsigned int LZ4_u32;
-#endif
-
+#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
+#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
+#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */
+
+#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# include <stdint.h>
+ typedef int8_t LZ4_i8;
+ typedef uint8_t LZ4_byte;
+ typedef uint16_t LZ4_u16;
+ typedef uint32_t LZ4_u32;
+#else
+ typedef signed char LZ4_i8;
+ typedef unsigned char LZ4_byte;
+ typedef unsigned short LZ4_u16;
+ typedef unsigned int LZ4_u32;
+#endif
+
typedef struct LZ4_stream_t_internal LZ4_stream_t_internal;
struct LZ4_stream_t_internal {
- LZ4_u32 hashTable[LZ4_HASH_SIZE_U32];
- LZ4_u32 currentOffset;
- LZ4_u32 tableType;
- const LZ4_byte* dictionary;
+ LZ4_u32 hashTable[LZ4_HASH_SIZE_U32];
+ LZ4_u32 currentOffset;
+ LZ4_u32 tableType;
+ const LZ4_byte* dictionary;
const LZ4_stream_t_internal* dictCtx;
- LZ4_u32 dictSize;
+ LZ4_u32 dictSize;
};
-
-typedef struct {
- const LZ4_byte* externalDict;
- size_t extDictSize;
- const LZ4_byte* prefixEnd;
- size_t prefixSize;
-} LZ4_streamDecode_t_internal;
-
-
+
+typedef struct {
+ const LZ4_byte* externalDict;
+ size_t extDictSize;
+ const LZ4_byte* prefixEnd;
+ size_t prefixSize;
+} LZ4_streamDecode_t_internal;
+
+
/*! LZ4_stream_t :
- * Do not use below internal definitions directly !
- * Declare or allocate an LZ4_stream_t instead.
+ * Do not use below internal definitions directly !
+ * Declare or allocate an LZ4_stream_t instead.
* LZ4_stream_t can also be created using LZ4_createStream(), which is recommended.
* The structure definition can be convenient for static allocation
* (on stack, or as part of larger structure).
* Init this structure with LZ4_initStream() before first use.
* note : only use this definition in association with static linking !
- * this definition is not API/ABI safe, and may change in future versions.
- */
-#define LZ4_STREAMSIZE 16416 /* static size, for inter-version compatibility */
-#define LZ4_STREAMSIZE_VOIDP (LZ4_STREAMSIZE / sizeof(void*))
-union LZ4_stream_u {
- void* table[LZ4_STREAMSIZE_VOIDP];
- LZ4_stream_t_internal internal_donotuse;
-}; /* previously typedef'd to LZ4_stream_t */
-
-
+ * this definition is not API/ABI safe, and may change in future versions.
+ */
+#define LZ4_STREAMSIZE 16416 /* static size, for inter-version compatibility */
+#define LZ4_STREAMSIZE_VOIDP (LZ4_STREAMSIZE / sizeof(void*))
+union LZ4_stream_u {
+ void* table[LZ4_STREAMSIZE_VOIDP];
+ LZ4_stream_t_internal internal_donotuse;
+}; /* previously typedef'd to LZ4_stream_t */
+
+
/*! LZ4_initStream() : v1.9.0+
* An LZ4_stream_t structure must be initialized at least once.
* This is automatically done when invoking LZ4_createStream(),
@@ -643,7 +643,7 @@ union LZ4_stream_u {
* Note3: Before v1.9.0, use LZ4_resetStream() instead
*/
LZ4LIB_API LZ4_stream_t* LZ4_initStream (void* buffer, size_t size);
-
+
/*! LZ4_streamDecode_t :
* information structure to track an LZ4 stream during decompression.
@@ -651,19 +651,19 @@ LZ4LIB_API LZ4_stream_t* LZ4_initStream (void* buffer, size_t size);
* note : only use in association with static linking !
* this definition is not API/ABI safe,
* and may change in a future version !
- */
+ */
#define LZ4_STREAMDECODESIZE_U64 (4 + ((sizeof(void*)==16) ? 2 : 0) /*AS-400*/ )
-#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * sizeof(unsigned long long))
-union LZ4_streamDecode_u {
- unsigned long long table[LZ4_STREAMDECODESIZE_U64];
- LZ4_streamDecode_t_internal internal_donotuse;
-} ; /* previously typedef'd to LZ4_streamDecode_t */
-
-
+#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * sizeof(unsigned long long))
+union LZ4_streamDecode_u {
+ unsigned long long table[LZ4_STREAMDECODESIZE_U64];
+ LZ4_streamDecode_t_internal internal_donotuse;
+} ; /* previously typedef'd to LZ4_streamDecode_t */
+
+
/*-************************************
-* Obsolete Functions
-**************************************/
+* Obsolete Functions
+**************************************/
/*! Deprecation warnings
*
@@ -681,37 +681,37 @@ union LZ4_streamDecode_u {
# define LZ4_DISABLE_DEPRECATE_WARNINGS
#endif
-#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS
-# define LZ4_DEPRECATED(message) /* disable deprecation warnings */
-#else
-# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
-# define LZ4_DEPRECATED(message) [[deprecated(message)]]
-# elif defined(_MSC_VER)
-# define LZ4_DEPRECATED(message) __declspec(deprecated(message))
-# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 45))
-# define LZ4_DEPRECATED(message) __attribute__((deprecated(message)))
-# elif defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 31)
-# define LZ4_DEPRECATED(message) __attribute__((deprecated))
-# else
-# pragma message("WARNING: LZ4_DEPRECATED needs custom implementation for this compiler")
-# define LZ4_DEPRECATED(message) /* disabled */
-# endif
-#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */
-
-/*! Obsolete compression functions (since v1.7.3) */
+#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS
+# define LZ4_DEPRECATED(message) /* disable deprecation warnings */
+#else
+# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
+# define LZ4_DEPRECATED(message) [[deprecated(message)]]
+# elif defined(_MSC_VER)
+# define LZ4_DEPRECATED(message) __declspec(deprecated(message))
+# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 45))
+# define LZ4_DEPRECATED(message) __attribute__((deprecated(message)))
+# elif defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 31)
+# define LZ4_DEPRECATED(message) __attribute__((deprecated))
+# else
+# pragma message("WARNING: LZ4_DEPRECATED needs custom implementation for this compiler")
+# define LZ4_DEPRECATED(message) /* disabled */
+# endif
+#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */
+
+/*! Obsolete compression functions (since v1.7.3) */
LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress (const char* src, char* dest, int srcSize);
LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress_limitedOutput (const char* src, char* dest, int srcSize, int maxOutputSize);
LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize);
LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize);
LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
-
-/*! Obsolete decompression functions (since v1.8.0) */
+
+/*! Obsolete decompression functions (since v1.8.0) */
LZ4_DEPRECATED("use LZ4_decompress_fast() instead") LZ4LIB_API int LZ4_uncompress (const char* source, char* dest, int outputSize);
LZ4_DEPRECATED("use LZ4_decompress_safe() instead") LZ4LIB_API int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize);
-
-/* Obsolete streaming functions (since v1.7.0)
- * degraded functionality; do not use!
+
+/* Obsolete streaming functions (since v1.7.0)
+ * degraded functionality; do not use!
*
* In order to perform streaming compression, these functions depended on data
* that is no longer tracked in the state. They have been preserved as well as
@@ -724,23 +724,23 @@ LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API void* LZ4_create (ch
LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API int LZ4_sizeofStreamState(void);
LZ4_DEPRECATED("Use LZ4_resetStream() instead") LZ4LIB_API int LZ4_resetStreamState(void* state, char* inputBuffer);
LZ4_DEPRECATED("Use LZ4_saveDict() instead") LZ4LIB_API char* LZ4_slideInputBuffer (void* state);
-
-/*! Obsolete streaming decoding functions (since v1.7.0) */
+
+/*! Obsolete streaming decoding functions (since v1.7.0) */
LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") LZ4LIB_API int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize);
LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize);
-
-/*! Obsolete LZ4_decompress_fast variants (since v1.9.0) :
+
+/*! Obsolete LZ4_decompress_fast variants (since v1.9.0) :
* These functions used to be faster than LZ4_decompress_safe(),
- * but this is no longer the case. They are now slower.
+ * but this is no longer the case. They are now slower.
* This is because LZ4_decompress_fast() doesn't know the input size,
- * and therefore must progress more cautiously into the input buffer to not read beyond the end of block.
+ * and therefore must progress more cautiously into the input buffer to not read beyond the end of block.
* On top of that `LZ4_decompress_fast()` is not protected vs malformed or malicious inputs, making it a security liability.
* As a consequence, LZ4_decompress_fast() is strongly discouraged, and deprecated.
*
* The last remaining LZ4_decompress_fast() specificity is that
* it can decompress a block without knowing its compressed size.
- * Such functionality can be achieved in a more secure manner
- * by employing LZ4_decompress_safe_partial().
+ * Such functionality can be achieved in a more secure manner
+ * by employing LZ4_decompress_safe_partial().
*
* Parameters:
* originalSize : is the uncompressed size to regenerate.
@@ -774,6 +774,6 @@ LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr);
#endif /* LZ4_H_98237428734687 */
-#if defined (__cplusplus) && !defined(LZ4_NAMESPACE)
+#if defined (__cplusplus) && !defined(LZ4_NAMESPACE)
}
#endif
diff --git a/contrib/libs/lz4/lz4frame.c b/contrib/libs/lz4/lz4frame.c
index aa58dda82e..ec02c92f72 100644
--- a/contrib/libs/lz4/lz4frame.c
+++ b/contrib/libs/lz4/lz4frame.c
@@ -1,4 +1,4 @@
-/*
+/*
* LZ4 auto-framing library
* Copyright (C) 2011-2016, Yann Collet.
*
@@ -31,25 +31,25 @@
* - LZ4 homepage : http://www.lz4.org
* - LZ4 source repository : https://github.com/lz4/lz4
*/
-
-/* LZ4F is a stand-alone API to create LZ4-compressed Frames
+
+/* LZ4F is a stand-alone API to create LZ4-compressed Frames
* in full conformance with specification v1.6.1 .
* This library rely upon memory management capabilities (malloc, free)
* provided either by <stdlib.h>,
* or redirected towards another library of user's choice
* (see Memory Routines below).
*/
-
-
-/*-************************************
-* Compiler Options
-**************************************/
-#ifdef _MSC_VER /* Visual Studio */
-# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
-#endif
-
-
-/*-************************************
+
+
+/*-************************************
+* Compiler Options
+**************************************/
+#ifdef _MSC_VER /* Visual Studio */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+#endif
+
+
+/*-************************************
* Tuning parameters
**************************************/
/*
@@ -63,8 +63,8 @@
/*-************************************
-* Memory routines
-**************************************/
+* Memory routines
+**************************************/
/*
* User may redirect invocations of
* malloc(), calloc() and free()
@@ -72,34 +72,34 @@
* by modifying below section.
*/
#ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
-# include <stdlib.h> /* malloc, calloc, free */
+# include <stdlib.h> /* malloc, calloc, free */
# define ALLOC(s) malloc(s)
# define ALLOC_AND_ZERO(s) calloc(1,(s))
# define FREEMEM(p) free(p)
#endif
-#include <string.h> /* memset, memcpy, memmove */
+#include <string.h> /* memset, memcpy, memmove */
#ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
# define MEM_INIT(p,v,s) memset((p),(v),(s))
#endif
-
-
-/*-************************************
+
+
+/*-************************************
* Library declarations
-**************************************/
+**************************************/
#define LZ4F_STATIC_LINKING_ONLY
#include "lz4frame.h"
#define LZ4_STATIC_LINKING_ONLY
-#include "lz4.h"
+#include "lz4.h"
#define LZ4_HC_STATIC_LINKING_ONLY
-#include "lz4hc.h"
-#define XXH_STATIC_LINKING_ONLY
-#include "xxhash.h"
-
-
-/*-************************************
+#include "lz4hc.h"
+#define XXH_STATIC_LINKING_ONLY
+#include "xxhash.h"
+
+
+/*-************************************
* Debug
-**************************************/
+**************************************/
#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
# include <assert.h>
#else
@@ -107,9 +107,9 @@
# define assert(condition) ((void)0)
# endif
#endif
-
+
#define LZ4F_STATIC_ASSERT(c) { enum { LZ4F_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
-
+
#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) && !defined(DEBUGLOG)
# include <stdio.h>
static int g_debuglog_enable = 1;
@@ -124,243 +124,243 @@ static int g_debuglog_enable = 1;
#endif
-/*-************************************
-* Basic Types
-**************************************/
-#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
-# include <stdint.h>
- typedef uint8_t BYTE;
- typedef uint16_t U16;
- typedef uint32_t U32;
- typedef int32_t S32;
- typedef uint64_t U64;
-#else
- typedef unsigned char BYTE;
- typedef unsigned short U16;
- typedef unsigned int U32;
- typedef signed int S32;
- typedef unsigned long long U64;
-#endif
-
-
-/* unoptimized version; solves endianess & alignment issues */
-static U32 LZ4F_readLE32 (const void* src)
-{
- const BYTE* const srcPtr = (const BYTE*)src;
- U32 value32 = srcPtr[0];
+/*-************************************
+* Basic Types
+**************************************/
+#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef uint32_t U32;
+ typedef int32_t S32;
+ typedef uint64_t U64;
+#else
+ typedef unsigned char BYTE;
+ typedef unsigned short U16;
+ typedef unsigned int U32;
+ typedef signed int S32;
+ typedef unsigned long long U64;
+#endif
+
+
+/* unoptimized version; solves endianess & alignment issues */
+static U32 LZ4F_readLE32 (const void* src)
+{
+ const BYTE* const srcPtr = (const BYTE*)src;
+ U32 value32 = srcPtr[0];
value32 += ((U32)srcPtr[1])<< 8;
value32 += ((U32)srcPtr[2])<<16;
- value32 += ((U32)srcPtr[3])<<24;
- return value32;
-}
-
-static void LZ4F_writeLE32 (void* dst, U32 value32)
-{
- BYTE* const dstPtr = (BYTE*)dst;
- dstPtr[0] = (BYTE)value32;
- dstPtr[1] = (BYTE)(value32 >> 8);
- dstPtr[2] = (BYTE)(value32 >> 16);
- dstPtr[3] = (BYTE)(value32 >> 24);
-}
-
-static U64 LZ4F_readLE64 (const void* src)
-{
- const BYTE* const srcPtr = (const BYTE*)src;
- U64 value64 = srcPtr[0];
- value64 += ((U64)srcPtr[1]<<8);
- value64 += ((U64)srcPtr[2]<<16);
- value64 += ((U64)srcPtr[3]<<24);
- value64 += ((U64)srcPtr[4]<<32);
- value64 += ((U64)srcPtr[5]<<40);
- value64 += ((U64)srcPtr[6]<<48);
- value64 += ((U64)srcPtr[7]<<56);
- return value64;
-}
-
-static void LZ4F_writeLE64 (void* dst, U64 value64)
-{
- BYTE* const dstPtr = (BYTE*)dst;
- dstPtr[0] = (BYTE)value64;
- dstPtr[1] = (BYTE)(value64 >> 8);
- dstPtr[2] = (BYTE)(value64 >> 16);
- dstPtr[3] = (BYTE)(value64 >> 24);
- dstPtr[4] = (BYTE)(value64 >> 32);
- dstPtr[5] = (BYTE)(value64 >> 40);
- dstPtr[6] = (BYTE)(value64 >> 48);
- dstPtr[7] = (BYTE)(value64 >> 56);
-}
-
-
-/*-************************************
-* Constants
-**************************************/
+ value32 += ((U32)srcPtr[3])<<24;
+ return value32;
+}
+
+static void LZ4F_writeLE32 (void* dst, U32 value32)
+{
+ BYTE* const dstPtr = (BYTE*)dst;
+ dstPtr[0] = (BYTE)value32;
+ dstPtr[1] = (BYTE)(value32 >> 8);
+ dstPtr[2] = (BYTE)(value32 >> 16);
+ dstPtr[3] = (BYTE)(value32 >> 24);
+}
+
+static U64 LZ4F_readLE64 (const void* src)
+{
+ const BYTE* const srcPtr = (const BYTE*)src;
+ U64 value64 = srcPtr[0];
+ value64 += ((U64)srcPtr[1]<<8);
+ value64 += ((U64)srcPtr[2]<<16);
+ value64 += ((U64)srcPtr[3]<<24);
+ value64 += ((U64)srcPtr[4]<<32);
+ value64 += ((U64)srcPtr[5]<<40);
+ value64 += ((U64)srcPtr[6]<<48);
+ value64 += ((U64)srcPtr[7]<<56);
+ return value64;
+}
+
+static void LZ4F_writeLE64 (void* dst, U64 value64)
+{
+ BYTE* const dstPtr = (BYTE*)dst;
+ dstPtr[0] = (BYTE)value64;
+ dstPtr[1] = (BYTE)(value64 >> 8);
+ dstPtr[2] = (BYTE)(value64 >> 16);
+ dstPtr[3] = (BYTE)(value64 >> 24);
+ dstPtr[4] = (BYTE)(value64 >> 32);
+ dstPtr[5] = (BYTE)(value64 >> 40);
+ dstPtr[6] = (BYTE)(value64 >> 48);
+ dstPtr[7] = (BYTE)(value64 >> 56);
+}
+
+
+/*-************************************
+* Constants
+**************************************/
#ifndef LZ4_SRC_INCLUDED /* avoid double definition */
# define KB *(1<<10)
# define MB *(1<<20)
# define GB *(1<<30)
#endif
-
-#define _1BIT 0x01
-#define _2BITS 0x03
-#define _3BITS 0x07
-#define _4BITS 0x0F
-#define _8BITS 0xFF
-
-#define LZ4F_MAGIC_SKIPPABLE_START 0x184D2A50U
-#define LZ4F_MAGICNUMBER 0x184D2204U
-#define LZ4F_BLOCKUNCOMPRESSED_FLAG 0x80000000U
-#define LZ4F_BLOCKSIZEID_DEFAULT LZ4F_max64KB
-
+
+#define _1BIT 0x01
+#define _2BITS 0x03
+#define _3BITS 0x07
+#define _4BITS 0x0F
+#define _8BITS 0xFF
+
+#define LZ4F_MAGIC_SKIPPABLE_START 0x184D2A50U
+#define LZ4F_MAGICNUMBER 0x184D2204U
+#define LZ4F_BLOCKUNCOMPRESSED_FLAG 0x80000000U
+#define LZ4F_BLOCKSIZEID_DEFAULT LZ4F_max64KB
+
static const size_t minFHSize = LZ4F_HEADER_SIZE_MIN; /* 7 */
static const size_t maxFHSize = LZ4F_HEADER_SIZE_MAX; /* 19 */
static const size_t BHSize = LZ4F_BLOCK_HEADER_SIZE; /* block header : size, and compress flag */
static const size_t BFSize = LZ4F_BLOCK_CHECKSUM_SIZE; /* block footer : checksum (optional) */
-
-
-/*-************************************
-* Structures and local types
-**************************************/
-typedef struct LZ4F_cctx_s
-{
- LZ4F_preferences_t prefs;
- U32 version;
- U32 cStage;
+
+
+/*-************************************
+* Structures and local types
+**************************************/
+typedef struct LZ4F_cctx_s
+{
+ LZ4F_preferences_t prefs;
+ U32 version;
+ U32 cStage;
const LZ4F_CDict* cdict;
- size_t maxBlockSize;
- size_t maxBufferSize;
- BYTE* tmpBuff;
- BYTE* tmpIn;
- size_t tmpInSize;
- U64 totalInSize;
- XXH32_state_t xxh;
- void* lz4CtxPtr;
+ size_t maxBlockSize;
+ size_t maxBufferSize;
+ BYTE* tmpBuff;
+ BYTE* tmpIn;
+ size_t tmpInSize;
+ U64 totalInSize;
+ XXH32_state_t xxh;
+ void* lz4CtxPtr;
U16 lz4CtxAlloc; /* sized for: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
U16 lz4CtxState; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
-} LZ4F_cctx_t;
-
-
-/*-************************************
-* Error management
-**************************************/
-#define LZ4F_GENERATE_STRING(STRING) #STRING,
-static const char* LZ4F_errorStrings[] = { LZ4F_LIST_ERRORS(LZ4F_GENERATE_STRING) };
-
-
-unsigned LZ4F_isError(LZ4F_errorCode_t code)
-{
- return (code > (LZ4F_errorCode_t)(-LZ4F_ERROR_maxCode));
-}
-
-const char* LZ4F_getErrorName(LZ4F_errorCode_t code)
-{
- static const char* codeError = "Unspecified error code";
- if (LZ4F_isError(code)) return LZ4F_errorStrings[-(int)(code)];
- return codeError;
-}
-
-LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult)
-{
- if (!LZ4F_isError(functionResult)) return LZ4F_OK_NoError;
- return (LZ4F_errorCodes)(-(ptrdiff_t)functionResult);
-}
-
-static LZ4F_errorCode_t err0r(LZ4F_errorCodes code)
-{
+} LZ4F_cctx_t;
+
+
+/*-************************************
+* Error management
+**************************************/
+#define LZ4F_GENERATE_STRING(STRING) #STRING,
+static const char* LZ4F_errorStrings[] = { LZ4F_LIST_ERRORS(LZ4F_GENERATE_STRING) };
+
+
+unsigned LZ4F_isError(LZ4F_errorCode_t code)
+{
+ return (code > (LZ4F_errorCode_t)(-LZ4F_ERROR_maxCode));
+}
+
+const char* LZ4F_getErrorName(LZ4F_errorCode_t code)
+{
+ static const char* codeError = "Unspecified error code";
+ if (LZ4F_isError(code)) return LZ4F_errorStrings[-(int)(code)];
+ return codeError;
+}
+
+LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult)
+{
+ if (!LZ4F_isError(functionResult)) return LZ4F_OK_NoError;
+ return (LZ4F_errorCodes)(-(ptrdiff_t)functionResult);
+}
+
+static LZ4F_errorCode_t err0r(LZ4F_errorCodes code)
+{
/* A compilation error here means sizeof(ptrdiff_t) is not large enough */
LZ4F_STATIC_ASSERT(sizeof(ptrdiff_t) >= sizeof(size_t));
- return (LZ4F_errorCode_t)-(ptrdiff_t)code;
-}
-
-unsigned LZ4F_getVersion(void) { return LZ4F_VERSION; }
-
+ return (LZ4F_errorCode_t)-(ptrdiff_t)code;
+}
+
+unsigned LZ4F_getVersion(void) { return LZ4F_VERSION; }
+
int LZ4F_compressionLevel_max(void) { return LZ4HC_CLEVEL_MAX; }
-
+
size_t LZ4F_getBlockSize(unsigned blockSizeID)
-{
- static const size_t blockSizes[4] = { 64 KB, 256 KB, 1 MB, 4 MB };
-
- if (blockSizeID == 0) blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
+{
+ static const size_t blockSizes[4] = { 64 KB, 256 KB, 1 MB, 4 MB };
+
+ if (blockSizeID == 0) blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
if (blockSizeID < LZ4F_max64KB || blockSizeID > LZ4F_max4MB)
return err0r(LZ4F_ERROR_maxBlockSize_invalid);
blockSizeID -= LZ4F_max64KB;
- return blockSizes[blockSizeID];
-}
-
+ return blockSizes[blockSizeID];
+}
+
/*-************************************
* Private functions
**************************************/
#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
-static BYTE LZ4F_headerChecksum (const void* header, size_t length)
-{
- U32 const xxh = XXH32(header, length, 0);
- return (BYTE)(xxh >> 8);
-}
-
-
-/*-************************************
-* Simple-pass compression functions
-**************************************/
+static BYTE LZ4F_headerChecksum (const void* header, size_t length)
+{
+ U32 const xxh = XXH32(header, length, 0);
+ return (BYTE)(xxh >> 8);
+}
+
+
+/*-************************************
+* Simple-pass compression functions
+**************************************/
static LZ4F_blockSizeID_t LZ4F_optimalBSID(const LZ4F_blockSizeID_t requestedBSID,
const size_t srcSize)
-{
- LZ4F_blockSizeID_t proposedBSID = LZ4F_max64KB;
- size_t maxBlockSize = 64 KB;
- while (requestedBSID > proposedBSID) {
- if (srcSize <= maxBlockSize)
- return proposedBSID;
- proposedBSID = (LZ4F_blockSizeID_t)((int)proposedBSID + 1);
- maxBlockSize <<= 2;
- }
- return requestedBSID;
-}
-
+{
+ LZ4F_blockSizeID_t proposedBSID = LZ4F_max64KB;
+ size_t maxBlockSize = 64 KB;
+ while (requestedBSID > proposedBSID) {
+ if (srcSize <= maxBlockSize)
+ return proposedBSID;
+ proposedBSID = (LZ4F_blockSizeID_t)((int)proposedBSID + 1);
+ maxBlockSize <<= 2;
+ }
+ return requestedBSID;
+}
+
/*! LZ4F_compressBound_internal() :
* Provides dstCapacity given a srcSize to guarantee operation success in worst case situations.
* prefsPtr is optional : if NULL is provided, preferences will be set to cover worst case scenario.
* @return is always the same for a srcSize and prefsPtr, so it can be relied upon to size reusable buffers.
* When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() operations.
- */
+ */
static size_t LZ4F_compressBound_internal(size_t srcSize,
const LZ4F_preferences_t* preferencesPtr,
size_t alreadyBuffered)
-{
- LZ4F_preferences_t prefsNull = LZ4F_INIT_PREFERENCES;
- prefsNull.frameInfo.contentChecksumFlag = LZ4F_contentChecksumEnabled; /* worst case */
+{
+ LZ4F_preferences_t prefsNull = LZ4F_INIT_PREFERENCES;
+ prefsNull.frameInfo.contentChecksumFlag = LZ4F_contentChecksumEnabled; /* worst case */
prefsNull.frameInfo.blockChecksumFlag = LZ4F_blockChecksumEnabled; /* worst case */
- { const LZ4F_preferences_t* const prefsPtr = (preferencesPtr==NULL) ? &prefsNull : preferencesPtr;
- U32 const flush = prefsPtr->autoFlush | (srcSize==0);
+ { const LZ4F_preferences_t* const prefsPtr = (preferencesPtr==NULL) ? &prefsNull : preferencesPtr;
+ U32 const flush = prefsPtr->autoFlush | (srcSize==0);
LZ4F_blockSizeID_t const blockID = prefsPtr->frameInfo.blockSizeID;
size_t const blockSize = LZ4F_getBlockSize(blockID);
- size_t const maxBuffered = blockSize - 1;
- size_t const bufferedSize = MIN(alreadyBuffered, maxBuffered);
- size_t const maxSrcSize = srcSize + bufferedSize;
- unsigned const nbFullBlocks = (unsigned)(maxSrcSize / blockSize);
+ size_t const maxBuffered = blockSize - 1;
+ size_t const bufferedSize = MIN(alreadyBuffered, maxBuffered);
+ size_t const maxSrcSize = srcSize + bufferedSize;
+ unsigned const nbFullBlocks = (unsigned)(maxSrcSize / blockSize);
size_t const partialBlockSize = maxSrcSize & (blockSize-1);
- size_t const lastBlockSize = flush ? partialBlockSize : 0;
- unsigned const nbBlocks = nbFullBlocks + (lastBlockSize>0);
-
+ size_t const lastBlockSize = flush ? partialBlockSize : 0;
+ unsigned const nbBlocks = nbFullBlocks + (lastBlockSize>0);
+
size_t const blockCRCSize = BFSize * prefsPtr->frameInfo.blockChecksumFlag;
size_t const frameEnd = BHSize + (prefsPtr->frameInfo.contentChecksumFlag*BFSize);
-
+
return ((BHSize + blockCRCSize) * nbBlocks) +
(blockSize * nbFullBlocks) + lastBlockSize + frameEnd;
- }
-}
-
-size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
-{
- LZ4F_preferences_t prefs;
+ }
+}
+
+size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
+{
+ LZ4F_preferences_t prefs;
size_t const headerSize = maxFHSize; /* max header size, including optional fields */
-
- if (preferencesPtr!=NULL) prefs = *preferencesPtr;
+
+ if (preferencesPtr!=NULL) prefs = *preferencesPtr;
else MEM_INIT(&prefs, 0, sizeof(prefs));
- prefs.autoFlush = 1;
-
- return headerSize + LZ4F_compressBound_internal(srcSize, &prefs, 0);;
-}
-
-
+ prefs.autoFlush = 1;
+
+ return headerSize + LZ4F_compressBound_internal(srcSize, &prefs, 0);;
+}
+
+
/*! LZ4F_compressFrame_usingCDict() :
* Compress srcBuffer using a dictionary, in a single step.
* cdict can be NULL, in which case, no dictionary is used.
@@ -375,49 +375,49 @@ size_t LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx,
const void* srcBuffer, size_t srcSize,
const LZ4F_CDict* cdict,
const LZ4F_preferences_t* preferencesPtr)
-{
- LZ4F_preferences_t prefs;
- LZ4F_compressOptions_t options;
- BYTE* const dstStart = (BYTE*) dstBuffer;
- BYTE* dstPtr = dstStart;
- BYTE* const dstEnd = dstStart + dstCapacity;
-
- if (preferencesPtr!=NULL)
- prefs = *preferencesPtr;
- else
+{
+ LZ4F_preferences_t prefs;
+ LZ4F_compressOptions_t options;
+ BYTE* const dstStart = (BYTE*) dstBuffer;
+ BYTE* dstPtr = dstStart;
+ BYTE* const dstEnd = dstStart + dstCapacity;
+
+ if (preferencesPtr!=NULL)
+ prefs = *preferencesPtr;
+ else
MEM_INIT(&prefs, 0, sizeof(prefs));
- if (prefs.frameInfo.contentSize != 0)
- prefs.frameInfo.contentSize = (U64)srcSize; /* auto-correct content size if selected (!=0) */
-
- prefs.frameInfo.blockSizeID = LZ4F_optimalBSID(prefs.frameInfo.blockSizeID, srcSize);
- prefs.autoFlush = 1;
- if (srcSize <= LZ4F_getBlockSize(prefs.frameInfo.blockSizeID))
+ if (prefs.frameInfo.contentSize != 0)
+ prefs.frameInfo.contentSize = (U64)srcSize; /* auto-correct content size if selected (!=0) */
+
+ prefs.frameInfo.blockSizeID = LZ4F_optimalBSID(prefs.frameInfo.blockSizeID, srcSize);
+ prefs.autoFlush = 1;
+ if (srcSize <= LZ4F_getBlockSize(prefs.frameInfo.blockSizeID))
prefs.frameInfo.blockMode = LZ4F_blockIndependent; /* only one block => no need for inter-block link */
-
+
MEM_INIT(&options, 0, sizeof(options));
- options.stableSrc = 1;
-
+ options.stableSrc = 1;
+
if (dstCapacity < LZ4F_compressFrameBound(srcSize, &prefs)) /* condition to guarantee success */
- return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
-
+ return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
+
{ size_t const headerSize = LZ4F_compressBegin_usingCDict(cctx, dstBuffer, dstCapacity, cdict, &prefs); /* write header */
- if (LZ4F_isError(headerSize)) return headerSize;
- dstPtr += headerSize; /* header size */ }
-
+ if (LZ4F_isError(headerSize)) return headerSize;
+ dstPtr += headerSize; /* header size */ }
+
assert(dstEnd >= dstPtr);
{ size_t const cSize = LZ4F_compressUpdate(cctx, dstPtr, (size_t)(dstEnd-dstPtr), srcBuffer, srcSize, &options);
- if (LZ4F_isError(cSize)) return cSize;
- dstPtr += cSize; }
-
+ if (LZ4F_isError(cSize)) return cSize;
+ dstPtr += cSize; }
+
assert(dstEnd >= dstPtr);
{ size_t const tailSize = LZ4F_compressEnd(cctx, dstPtr, (size_t)(dstEnd-dstPtr), &options); /* flush last block, and generate suffix */
- if (LZ4F_isError(tailSize)) return tailSize;
- dstPtr += tailSize; }
-
+ if (LZ4F_isError(tailSize)) return tailSize;
+ dstPtr += tailSize; }
+
assert(dstEnd >= dstStart);
return (size_t)(dstPtr - dstStart);
}
-
+
/*! LZ4F_compressFrame() :
* Compress an entire srcBuffer into a valid LZ4 frame, in a single step.
@@ -468,9 +468,9 @@ size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
}
#endif
return result;
-}
-
-
+}
+
+
/*-***************************************************
* Dictionary compression
*****************************************************/
@@ -521,44 +521,44 @@ void LZ4F_freeCDict(LZ4F_CDict* cdict)
}
-/*-*********************************
-* Advanced compression functions
-***********************************/
-
-/*! LZ4F_createCompressionContext() :
+/*-*********************************
+* Advanced compression functions
+***********************************/
+
+/*! LZ4F_createCompressionContext() :
* The first thing to do is to create a compressionContext object, which will be used in all compression operations.
* This is achieved using LZ4F_createCompressionContext(), which takes as argument a version and an LZ4F_preferences_t structure.
* The version provided MUST be LZ4F_VERSION. It is intended to track potential incompatible differences between different binaries.
* The function will provide a pointer to an allocated LZ4F_compressionContext_t object.
* If the result LZ4F_errorCode_t is not OK_NoError, there was an error during context creation.
* Object can release its memory using LZ4F_freeCompressionContext();
- */
-LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_cctx** LZ4F_compressionContextPtr, unsigned version)
-{
+ */
+LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_cctx** LZ4F_compressionContextPtr, unsigned version)
+{
LZ4F_cctx_t* const cctxPtr = (LZ4F_cctx_t*)ALLOC_AND_ZERO(sizeof(LZ4F_cctx_t));
- if (cctxPtr==NULL) return err0r(LZ4F_ERROR_allocation_failed);
-
- cctxPtr->version = version;
+ if (cctxPtr==NULL) return err0r(LZ4F_ERROR_allocation_failed);
+
+ cctxPtr->version = version;
cctxPtr->cStage = 0; /* Next stage : init stream */
-
- *LZ4F_compressionContextPtr = cctxPtr;
-
- return LZ4F_OK_NoError;
-}
-
-
-LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctxPtr)
-{
+
+ *LZ4F_compressionContextPtr = cctxPtr;
+
+ return LZ4F_OK_NoError;
+}
+
+
+LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctxPtr)
+{
if (cctxPtr != NULL) { /* support free on NULL */
- FREEMEM(cctxPtr->lz4CtxPtr); /* note: LZ4_streamHC_t and LZ4_stream_t are simple POD types */
- FREEMEM(cctxPtr->tmpBuff);
- FREEMEM(cctxPtr);
- }
-
- return LZ4F_OK_NoError;
-}
-
-
+ FREEMEM(cctxPtr->lz4CtxPtr); /* note: LZ4_streamHC_t and LZ4_stream_t are simple POD types */
+ FREEMEM(cctxPtr->tmpBuff);
+ FREEMEM(cctxPtr);
+ }
+
+ return LZ4F_OK_NoError;
+}
+
+
/**
* This function prepares the internal LZ4(HC) stream for a new compression,
* resetting the context and attaching the dictionary, if there is one.
@@ -592,28 +592,28 @@ static void LZ4F_initStream(void* ctx,
/*! LZ4F_compressBegin_usingCDict() :
* init streaming compression and writes frame header into dstBuffer.
* dstBuffer must be >= LZ4F_HEADER_SIZE_MAX bytes.
- * @return : number of bytes written into dstBuffer for the header
- * or an error code (can be tested using LZ4F_isError())
- */
+ * @return : number of bytes written into dstBuffer for the header
+ * or an error code (can be tested using LZ4F_isError())
+ */
size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
void* dstBuffer, size_t dstCapacity,
const LZ4F_CDict* cdict,
const LZ4F_preferences_t* preferencesPtr)
-{
- LZ4F_preferences_t prefNull;
- BYTE* const dstStart = (BYTE*)dstBuffer;
- BYTE* dstPtr = dstStart;
- BYTE* headerStart;
-
- if (dstCapacity < maxFHSize) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
+{
+ LZ4F_preferences_t prefNull;
+ BYTE* const dstStart = (BYTE*)dstBuffer;
+ BYTE* dstPtr = dstStart;
+ BYTE* headerStart;
+
+ if (dstCapacity < maxFHSize) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
MEM_INIT(&prefNull, 0, sizeof(prefNull));
- if (preferencesPtr == NULL) preferencesPtr = &prefNull;
- cctxPtr->prefs = *preferencesPtr;
-
+ if (preferencesPtr == NULL) preferencesPtr = &prefNull;
+ cctxPtr->prefs = *preferencesPtr;
+
/* Ctx Management */
{ U16 const ctxTypeID = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2;
if (cctxPtr->lz4CtxAlloc < ctxTypeID) {
- FREEMEM(cctxPtr->lz4CtxPtr);
+ FREEMEM(cctxPtr->lz4CtxPtr);
if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
cctxPtr->lz4CtxPtr = LZ4_createStream();
} else {
@@ -633,18 +633,18 @@ size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
LZ4_setCompressionLevel((LZ4_streamHC_t *) cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel);
}
cctxPtr->lz4CtxState = ctxTypeID;
- }
- }
-
- /* Buffer Management */
+ }
+ }
+
+ /* Buffer Management */
if (cctxPtr->prefs.frameInfo.blockSizeID == 0)
cctxPtr->prefs.frameInfo.blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
- cctxPtr->maxBlockSize = LZ4F_getBlockSize(cctxPtr->prefs.frameInfo.blockSizeID);
-
+ cctxPtr->maxBlockSize = LZ4F_getBlockSize(cctxPtr->prefs.frameInfo.blockSizeID);
+
{ size_t const requiredBuffSize = preferencesPtr->autoFlush ?
((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 64 KB : 0) : /* only needs past data up to window size */
cctxPtr->maxBlockSize + ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 128 KB : 0);
-
+
if (cctxPtr->maxBufferSize < requiredBuffSize) {
cctxPtr->maxBufferSize = 0;
FREEMEM(cctxPtr->tmpBuff);
@@ -652,10 +652,10 @@ size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
if (cctxPtr->tmpBuff == NULL) return err0r(LZ4F_ERROR_allocation_failed);
cctxPtr->maxBufferSize = requiredBuffSize;
} }
- cctxPtr->tmpIn = cctxPtr->tmpBuff;
- cctxPtr->tmpInSize = 0;
+ cctxPtr->tmpIn = cctxPtr->tmpBuff;
+ cctxPtr->tmpInSize = 0;
(void)XXH32_reset(&(cctxPtr->xxh), 0);
-
+
/* context init */
cctxPtr->cdict = cdict;
if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) {
@@ -666,26 +666,26 @@ size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, (int)preferencesPtr->favorDecSpeed);
}
- /* Magic Number */
- LZ4F_writeLE32(dstPtr, LZ4F_MAGICNUMBER);
- dstPtr += 4;
- headerStart = dstPtr;
-
- /* FLG Byte */
- *dstPtr++ = (BYTE)(((1 & _2BITS) << 6) /* Version('01') */
+ /* Magic Number */
+ LZ4F_writeLE32(dstPtr, LZ4F_MAGICNUMBER);
+ dstPtr += 4;
+ headerStart = dstPtr;
+
+ /* FLG Byte */
+ *dstPtr++ = (BYTE)(((1 & _2BITS) << 6) /* Version('01') */
+ ((cctxPtr->prefs.frameInfo.blockMode & _1BIT ) << 5)
+ ((cctxPtr->prefs.frameInfo.blockChecksumFlag & _1BIT ) << 4)
+ ((unsigned)(cctxPtr->prefs.frameInfo.contentSize > 0) << 3)
+ ((cctxPtr->prefs.frameInfo.contentChecksumFlag & _1BIT ) << 2)
+ (cctxPtr->prefs.frameInfo.dictID > 0) );
- /* BD Byte */
- *dstPtr++ = (BYTE)((cctxPtr->prefs.frameInfo.blockSizeID & _3BITS) << 4);
- /* Optional Frame content size field */
- if (cctxPtr->prefs.frameInfo.contentSize) {
- LZ4F_writeLE64(dstPtr, cctxPtr->prefs.frameInfo.contentSize);
- dstPtr += 8;
- cctxPtr->totalInSize = 0;
- }
+ /* BD Byte */
+ *dstPtr++ = (BYTE)((cctxPtr->prefs.frameInfo.blockSizeID & _3BITS) << 4);
+ /* Optional Frame content size field */
+ if (cctxPtr->prefs.frameInfo.contentSize) {
+ LZ4F_writeLE64(dstPtr, cctxPtr->prefs.frameInfo.contentSize);
+ dstPtr += 8;
+ cctxPtr->totalInSize = 0;
+ }
/* Optional dictionary ID field */
if (cctxPtr->prefs.frameInfo.dictID) {
LZ4F_writeLE32(dstPtr, cctxPtr->prefs.frameInfo.dictID);
@@ -693,12 +693,12 @@ size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
}
/* Header CRC Byte */
*dstPtr = LZ4F_headerChecksum(headerStart, (size_t)(dstPtr - headerStart));
- dstPtr++;
-
- cctxPtr->cStage = 1; /* header written, now request input data block */
+ dstPtr++;
+
+ cctxPtr->cStage = 1; /* header written, now request input data block */
return (size_t)(dstPtr - dstStart);
}
-
+
/*! LZ4F_compressBegin() :
* init streaming compression and writes frame header into dstBuffer.
@@ -713,25 +713,25 @@ size_t LZ4F_compressBegin(LZ4F_cctx* cctxPtr,
{
return LZ4F_compressBegin_usingCDict(cctxPtr, dstBuffer, dstCapacity,
NULL, preferencesPtr);
-}
-
-
+}
+
+
/* LZ4F_compressBound() :
* @return minimum capacity of dstBuffer for a given srcSize to handle worst case scenario.
* LZ4F_preferences_t structure is optional : if NULL, preferences will be set to cover worst case scenario.
* This function cannot fail.
- */
-size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
-{
- if (preferencesPtr && preferencesPtr->autoFlush) {
- return LZ4F_compressBound_internal(srcSize, preferencesPtr, 0);
- }
- return LZ4F_compressBound_internal(srcSize, preferencesPtr, (size_t)-1);
-}
-
-
+ */
+size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
+{
+ if (preferencesPtr && preferencesPtr->autoFlush) {
+ return LZ4F_compressBound_internal(srcSize, preferencesPtr, 0);
+ }
+ return LZ4F_compressBound_internal(srcSize, preferencesPtr, (size_t)-1);
+}
+
+
typedef int (*compressFunc_t)(void* ctx, const char* src, char* dst, int srcSize, int dstSize, int level, const LZ4F_CDict* cdict);
-
+
/*! LZ4F_makeBlock():
* compress a single block, add header and optional checksum.
@@ -742,29 +742,29 @@ static size_t LZ4F_makeBlock(void* dst,
compressFunc_t compress, void* lz4ctx, int level,
const LZ4F_CDict* cdict,
LZ4F_blockChecksum_t crcFlag)
-{
- BYTE* const cSizePtr = (BYTE*)dst;
+{
+ BYTE* const cSizePtr = (BYTE*)dst;
U32 cSize = (U32)compress(lz4ctx, (const char*)src, (char*)(cSizePtr+BHSize),
(int)(srcSize), (int)(srcSize-1),
level, cdict);
- if (cSize == 0) { /* compression failed */
- DEBUGLOG(5, "LZ4F_makeBlock: compression failed, creating a raw block (size %u)", (U32)srcSize);
- cSize = (U32)srcSize;
- LZ4F_writeLE32(cSizePtr, cSize | LZ4F_BLOCKUNCOMPRESSED_FLAG);
+ if (cSize == 0) { /* compression failed */
+ DEBUGLOG(5, "LZ4F_makeBlock: compression failed, creating a raw block (size %u)", (U32)srcSize);
+ cSize = (U32)srcSize;
+ LZ4F_writeLE32(cSizePtr, cSize | LZ4F_BLOCKUNCOMPRESSED_FLAG);
memcpy(cSizePtr+BHSize, src, srcSize);
} else {
LZ4F_writeLE32(cSizePtr, cSize);
- }
+ }
if (crcFlag) {
U32 const crc32 = XXH32(cSizePtr+BHSize, cSize, 0); /* checksum of compressed data */
LZ4F_writeLE32(cSizePtr+BHSize+cSize, crc32);
}
return BHSize + cSize + ((U32)crcFlag)*BFSize;
-}
-
-
+}
+
+
static int LZ4F_compressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
-{
+{
int const acceleration = (level < 0) ? -level + 1 : 1;
LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent);
if (cdict) {
@@ -772,50 +772,50 @@ static int LZ4F_compressBlock(void* ctx, const char* src, char* dst, int srcSize
} else {
return LZ4_compress_fast_extState_fastReset(ctx, src, dst, srcSize, dstCapacity, acceleration);
}
-}
-
+}
+
static int LZ4F_compressBlock_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
-{
+{
int const acceleration = (level < 0) ? -level + 1 : 1;
(void)cdict; /* init once at beginning of frame */
return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration);
-}
-
+}
+
static int LZ4F_compressBlockHC(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
-{
+{
LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent);
if (cdict) {
return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity);
}
return LZ4_compress_HC_extStateHC_fastReset(ctx, src, dst, srcSize, dstCapacity, level);
-}
-
+}
+
static int LZ4F_compressBlockHC_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
{
(void)level; (void)cdict; /* init once at beginning of frame */
return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity);
}
-static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level)
-{
- if (level < LZ4HC_CLEVEL_MIN) {
+static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level)
+{
+ if (level < LZ4HC_CLEVEL_MIN) {
if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlock;
return LZ4F_compressBlock_continue;
- }
+ }
if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlockHC;
return LZ4F_compressBlockHC_continue;
-}
-
-static int LZ4F_localSaveDict(LZ4F_cctx_t* cctxPtr)
-{
- if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN)
- return LZ4_saveDict ((LZ4_stream_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB);
- return LZ4_saveDictHC ((LZ4_streamHC_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB);
-}
-
-typedef enum { notDone, fromTmpBuffer, fromSrcBuffer } LZ4F_lastBlockStatus;
-
-/*! LZ4F_compressUpdate() :
+}
+
+static int LZ4F_localSaveDict(LZ4F_cctx_t* cctxPtr)
+{
+ if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN)
+ return LZ4_saveDict ((LZ4_stream_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB);
+ return LZ4_saveDictHC ((LZ4_streamHC_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB);
+}
+
+typedef enum { notDone, fromTmpBuffer, fromSrcBuffer } LZ4F_lastBlockStatus;
+
+/*! LZ4F_compressUpdate() :
* LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
* dstBuffer MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
* LZ4F_compressOptions_t structure is optional : you can provide NULL as argument.
@@ -826,108 +826,108 @@ size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
void* dstBuffer, size_t dstCapacity,
const void* srcBuffer, size_t srcSize,
const LZ4F_compressOptions_t* compressOptionsPtr)
-{
- LZ4F_compressOptions_t cOptionsNull;
- size_t const blockSize = cctxPtr->maxBlockSize;
- const BYTE* srcPtr = (const BYTE*)srcBuffer;
- const BYTE* const srcEnd = srcPtr + srcSize;
- BYTE* const dstStart = (BYTE*)dstBuffer;
- BYTE* dstPtr = dstStart;
- LZ4F_lastBlockStatus lastBlockCompressed = notDone;
- compressFunc_t const compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel);
-
+{
+ LZ4F_compressOptions_t cOptionsNull;
+ size_t const blockSize = cctxPtr->maxBlockSize;
+ const BYTE* srcPtr = (const BYTE*)srcBuffer;
+ const BYTE* const srcEnd = srcPtr + srcSize;
+ BYTE* const dstStart = (BYTE*)dstBuffer;
+ BYTE* dstPtr = dstStart;
+ LZ4F_lastBlockStatus lastBlockCompressed = notDone;
+ compressFunc_t const compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel);
+
DEBUGLOG(4, "LZ4F_compressUpdate (srcSize=%zu)", srcSize);
-
- if (cctxPtr->cStage != 1) return err0r(LZ4F_ERROR_GENERIC);
+
+ if (cctxPtr->cStage != 1) return err0r(LZ4F_ERROR_GENERIC);
if (dstCapacity < LZ4F_compressBound_internal(srcSize, &(cctxPtr->prefs), cctxPtr->tmpInSize))
return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
MEM_INIT(&cOptionsNull, 0, sizeof(cOptionsNull));
- if (compressOptionsPtr == NULL) compressOptionsPtr = &cOptionsNull;
-
- /* complete tmp buffer */
- if (cctxPtr->tmpInSize > 0) { /* some data already within tmp buffer */
- size_t const sizeToCopy = blockSize - cctxPtr->tmpInSize;
- if (sizeToCopy > srcSize) {
- /* add src to tmpIn buffer */
- memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, srcSize);
- srcPtr = srcEnd;
- cctxPtr->tmpInSize += srcSize;
- /* still needs some CRC */
- } else {
- /* complete tmpIn block and then compress it */
- lastBlockCompressed = fromTmpBuffer;
- memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, sizeToCopy);
- srcPtr += sizeToCopy;
-
+ if (compressOptionsPtr == NULL) compressOptionsPtr = &cOptionsNull;
+
+ /* complete tmp buffer */
+ if (cctxPtr->tmpInSize > 0) { /* some data already within tmp buffer */
+ size_t const sizeToCopy = blockSize - cctxPtr->tmpInSize;
+ if (sizeToCopy > srcSize) {
+ /* add src to tmpIn buffer */
+ memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, srcSize);
+ srcPtr = srcEnd;
+ cctxPtr->tmpInSize += srcSize;
+ /* still needs some CRC */
+ } else {
+ /* complete tmpIn block and then compress it */
+ lastBlockCompressed = fromTmpBuffer;
+ memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, sizeToCopy);
+ srcPtr += sizeToCopy;
+
dstPtr += LZ4F_makeBlock(dstPtr,
cctxPtr->tmpIn, blockSize,
compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
cctxPtr->cdict,
cctxPtr->prefs.frameInfo.blockChecksumFlag);
-
- if (cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) cctxPtr->tmpIn += blockSize;
- cctxPtr->tmpInSize = 0;
- }
- }
-
- while ((size_t)(srcEnd - srcPtr) >= blockSize) {
+
+ if (cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) cctxPtr->tmpIn += blockSize;
+ cctxPtr->tmpInSize = 0;
+ }
+ }
+
+ while ((size_t)(srcEnd - srcPtr) >= blockSize) {
/* compress full blocks */
- lastBlockCompressed = fromSrcBuffer;
+ lastBlockCompressed = fromSrcBuffer;
dstPtr += LZ4F_makeBlock(dstPtr,
srcPtr, blockSize,
compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
cctxPtr->cdict,
cctxPtr->prefs.frameInfo.blockChecksumFlag);
- srcPtr += blockSize;
- }
-
- if ((cctxPtr->prefs.autoFlush) && (srcPtr < srcEnd)) {
- /* compress remaining input < blockSize */
- lastBlockCompressed = fromSrcBuffer;
+ srcPtr += blockSize;
+ }
+
+ if ((cctxPtr->prefs.autoFlush) && (srcPtr < srcEnd)) {
+ /* compress remaining input < blockSize */
+ lastBlockCompressed = fromSrcBuffer;
dstPtr += LZ4F_makeBlock(dstPtr,
srcPtr, (size_t)(srcEnd - srcPtr),
compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
cctxPtr->cdict,
cctxPtr->prefs.frameInfo.blockChecksumFlag);
- srcPtr = srcEnd;
- }
-
- /* preserve dictionary if necessary */
- if ((cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) && (lastBlockCompressed==fromSrcBuffer)) {
- if (compressOptionsPtr->stableSrc) {
- cctxPtr->tmpIn = cctxPtr->tmpBuff;
- } else {
+ srcPtr = srcEnd;
+ }
+
+ /* preserve dictionary if necessary */
+ if ((cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) && (lastBlockCompressed==fromSrcBuffer)) {
+ if (compressOptionsPtr->stableSrc) {
+ cctxPtr->tmpIn = cctxPtr->tmpBuff;
+ } else {
int const realDictSize = LZ4F_localSaveDict(cctxPtr);
- if (realDictSize==0) return err0r(LZ4F_ERROR_GENERIC);
- cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
- }
- }
-
- /* keep tmpIn within limits */
- if ((cctxPtr->tmpIn + blockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize) /* necessarily LZ4F_blockLinked && lastBlockCompressed==fromTmpBuffer */
- && !(cctxPtr->prefs.autoFlush))
- {
+ if (realDictSize==0) return err0r(LZ4F_ERROR_GENERIC);
+ cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
+ }
+ }
+
+ /* keep tmpIn within limits */
+ if ((cctxPtr->tmpIn + blockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize) /* necessarily LZ4F_blockLinked && lastBlockCompressed==fromTmpBuffer */
+ && !(cctxPtr->prefs.autoFlush))
+ {
int const realDictSize = LZ4F_localSaveDict(cctxPtr);
- cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
- }
-
- /* some input data left, necessarily < blockSize */
- if (srcPtr < srcEnd) {
- /* fill tmp buffer */
+ cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
+ }
+
+ /* some input data left, necessarily < blockSize */
+ if (srcPtr < srcEnd) {
+ /* fill tmp buffer */
size_t const sizeToCopy = (size_t)(srcEnd - srcPtr);
- memcpy(cctxPtr->tmpIn, srcPtr, sizeToCopy);
- cctxPtr->tmpInSize = sizeToCopy;
- }
-
- if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled)
+ memcpy(cctxPtr->tmpIn, srcPtr, sizeToCopy);
+ cctxPtr->tmpInSize = sizeToCopy;
+ }
+
+ if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled)
(void)XXH32_update(&(cctxPtr->xxh), srcBuffer, srcSize);
-
- cctxPtr->totalInSize += srcSize;
+
+ cctxPtr->totalInSize += srcSize;
return (size_t)(dstPtr - dstStart);
-}
-
-
-/*! LZ4F_flush() :
+}
+
+
+/*! LZ4F_flush() :
* When compressed data must be sent immediately, without waiting for a block to be filled,
* invoke LZ4_flush(), which will immediately compress any remaining data stored within LZ4F_cctx.
* The result of the function is the number of bytes written into dstBuffer.
@@ -938,21 +938,21 @@ size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
size_t LZ4F_flush(LZ4F_cctx* cctxPtr,
void* dstBuffer, size_t dstCapacity,
const LZ4F_compressOptions_t* compressOptionsPtr)
-{
- BYTE* const dstStart = (BYTE*)dstBuffer;
- BYTE* dstPtr = dstStart;
- compressFunc_t compress;
-
- if (cctxPtr->tmpInSize == 0) return 0; /* nothing to flush */
- if (cctxPtr->cStage != 1) return err0r(LZ4F_ERROR_GENERIC);
+{
+ BYTE* const dstStart = (BYTE*)dstBuffer;
+ BYTE* dstPtr = dstStart;
+ compressFunc_t compress;
+
+ if (cctxPtr->tmpInSize == 0) return 0; /* nothing to flush */
+ if (cctxPtr->cStage != 1) return err0r(LZ4F_ERROR_GENERIC);
if (dstCapacity < (cctxPtr->tmpInSize + BHSize + BFSize))
return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
- (void)compressOptionsPtr; /* not yet useful */
-
- /* select compression function */
- compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel);
-
- /* compress tmp buffer */
+ (void)compressOptionsPtr; /* not yet useful */
+
+ /* select compression function */
+ compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel);
+
+ /* compress tmp buffer */
dstPtr += LZ4F_makeBlock(dstPtr,
cctxPtr->tmpIn, cctxPtr->tmpInSize,
compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
@@ -962,19 +962,19 @@ size_t LZ4F_flush(LZ4F_cctx* cctxPtr,
if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked)
cctxPtr->tmpIn += cctxPtr->tmpInSize;
- cctxPtr->tmpInSize = 0;
-
- /* keep tmpIn within limits */
- if ((cctxPtr->tmpIn + cctxPtr->maxBlockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)) { /* necessarily LZ4F_blockLinked */
+ cctxPtr->tmpInSize = 0;
+
+ /* keep tmpIn within limits */
+ if ((cctxPtr->tmpIn + cctxPtr->maxBlockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)) { /* necessarily LZ4F_blockLinked */
int const realDictSize = LZ4F_localSaveDict(cctxPtr);
- cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
- }
-
+ cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
+ }
+
return (size_t)(dstPtr - dstStart);
-}
-
-
-/*! LZ4F_compressEnd() :
+}
+
+
+/*! LZ4F_compressEnd() :
* When you want to properly finish the compressed frame, just call LZ4F_compressEnd().
* It will flush whatever data remained within compressionContext (like LZ4_flush())
* but also properly finalize the frame, with an endMark and an (optional) checksum.
@@ -986,46 +986,46 @@ size_t LZ4F_flush(LZ4F_cctx* cctxPtr,
size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr,
void* dstBuffer, size_t dstCapacity,
const LZ4F_compressOptions_t* compressOptionsPtr)
-{
- BYTE* const dstStart = (BYTE*)dstBuffer;
- BYTE* dstPtr = dstStart;
-
+{
+ BYTE* const dstStart = (BYTE*)dstBuffer;
+ BYTE* dstPtr = dstStart;
+
size_t const flushSize = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr);
- DEBUGLOG(5,"LZ4F_compressEnd: dstCapacity=%u", (unsigned)dstCapacity);
- if (LZ4F_isError(flushSize)) return flushSize;
- dstPtr += flushSize;
-
+ DEBUGLOG(5,"LZ4F_compressEnd: dstCapacity=%u", (unsigned)dstCapacity);
+ if (LZ4F_isError(flushSize)) return flushSize;
+ dstPtr += flushSize;
+
assert(flushSize <= dstCapacity);
dstCapacity -= flushSize;
if (dstCapacity < 4) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
- LZ4F_writeLE32(dstPtr, 0);
+ LZ4F_writeLE32(dstPtr, 0);
dstPtr += 4; /* endMark */
-
- if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) {
- U32 const xxh = XXH32_digest(&(cctxPtr->xxh));
+
+ if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) {
+ U32 const xxh = XXH32_digest(&(cctxPtr->xxh));
if (dstCapacity < 8) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
- DEBUGLOG(5,"Writing 32-bit content checksum");
- LZ4F_writeLE32(dstPtr, xxh);
- dstPtr+=4; /* content Checksum */
- }
-
- cctxPtr->cStage = 0; /* state is now re-usable (with identical preferences) */
- cctxPtr->maxBufferSize = 0; /* reuse HC context */
-
- if (cctxPtr->prefs.frameInfo.contentSize) {
- if (cctxPtr->prefs.frameInfo.contentSize != cctxPtr->totalInSize)
- return err0r(LZ4F_ERROR_frameSize_wrong);
- }
-
+ DEBUGLOG(5,"Writing 32-bit content checksum");
+ LZ4F_writeLE32(dstPtr, xxh);
+ dstPtr+=4; /* content Checksum */
+ }
+
+ cctxPtr->cStage = 0; /* state is now re-usable (with identical preferences) */
+ cctxPtr->maxBufferSize = 0; /* reuse HC context */
+
+ if (cctxPtr->prefs.frameInfo.contentSize) {
+ if (cctxPtr->prefs.frameInfo.contentSize != cctxPtr->totalInSize)
+ return err0r(LZ4F_ERROR_frameSize_wrong);
+ }
+
return (size_t)(dstPtr - dstStart);
-}
-
-
-/*-***************************************************
-* Frame Decompression
-*****************************************************/
-
+}
+
+
+/*-***************************************************
+* Frame Decompression
+*****************************************************/
+
typedef enum {
dstage_getFrameHeader=0, dstage_storeFrameHeader,
dstage_init,
@@ -1038,71 +1038,71 @@ typedef enum {
dstage_skipSkippable
} dStage_t;
-struct LZ4F_dctx_s {
- LZ4F_frameInfo_t frameInfo;
- U32 version;
+struct LZ4F_dctx_s {
+ LZ4F_frameInfo_t frameInfo;
+ U32 version;
dStage_t dStage;
- U64 frameRemainingSize;
- size_t maxBlockSize;
- size_t maxBufferSize;
- BYTE* tmpIn;
- size_t tmpInSize;
- size_t tmpInTarget;
- BYTE* tmpOutBuffer;
+ U64 frameRemainingSize;
+ size_t maxBlockSize;
+ size_t maxBufferSize;
+ BYTE* tmpIn;
+ size_t tmpInSize;
+ size_t tmpInTarget;
+ BYTE* tmpOutBuffer;
const BYTE* dict;
- size_t dictSize;
- BYTE* tmpOut;
- size_t tmpOutSize;
- size_t tmpOutStart;
- XXH32_state_t xxh;
+ size_t dictSize;
+ BYTE* tmpOut;
+ size_t tmpOutSize;
+ size_t tmpOutStart;
+ XXH32_state_t xxh;
XXH32_state_t blockChecksum;
BYTE header[LZ4F_HEADER_SIZE_MAX];
-}; /* typedef'd to LZ4F_dctx in lz4frame.h */
-
-
-/*! LZ4F_createDecompressionContext() :
+}; /* typedef'd to LZ4F_dctx in lz4frame.h */
+
+
+/*! LZ4F_createDecompressionContext() :
* Create a decompressionContext object, which will track all decompression operations.
* Provides a pointer to a fully allocated and initialized LZ4F_decompressionContext object.
* Object can later be released using LZ4F_freeDecompressionContext().
* @return : if != 0, there was an error during context creation.
*/
-LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** LZ4F_decompressionContextPtr, unsigned versionNumber)
-{
+LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** LZ4F_decompressionContextPtr, unsigned versionNumber)
+{
LZ4F_dctx* const dctx = (LZ4F_dctx*)ALLOC_AND_ZERO(sizeof(LZ4F_dctx));
- if (dctx == NULL) { /* failed allocation */
- *LZ4F_decompressionContextPtr = NULL;
- return err0r(LZ4F_ERROR_allocation_failed);
- }
-
+ if (dctx == NULL) { /* failed allocation */
+ *LZ4F_decompressionContextPtr = NULL;
+ return err0r(LZ4F_ERROR_allocation_failed);
+ }
+
dctx->version = versionNumber;
*LZ4F_decompressionContextPtr = dctx;
- return LZ4F_OK_NoError;
-}
-
+ return LZ4F_OK_NoError;
+}
+
LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx)
-{
- LZ4F_errorCode_t result = LZ4F_OK_NoError;
+{
+ LZ4F_errorCode_t result = LZ4F_OK_NoError;
if (dctx != NULL) { /* can accept NULL input, like free() */
result = (LZ4F_errorCode_t)dctx->dStage;
FREEMEM(dctx->tmpIn);
FREEMEM(dctx->tmpOutBuffer);
FREEMEM(dctx);
- }
- return result;
-}
-
-
-/*==--- Streaming Decompression operations ---==*/
-
+ }
+ return result;
+}
+
+
+/*==--- Streaming Decompression operations ---==*/
+
void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx)
-{
+{
dctx->dStage = dstage_getFrameHeader;
dctx->dict = NULL;
dctx->dictSize = 0;
-}
-
-
-/*! LZ4F_decodeHeader() :
+}
+
+
+/*! LZ4F_decodeHeader() :
* input : `src` points at the **beginning of the frame**
* output : set internal values of dctx, such as
* dctx->frameInfo and dctx->dStage.
@@ -1111,40 +1111,40 @@ void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx)
* or an error code (testable with LZ4F_isError())
*/
static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize)
-{
+{
unsigned blockMode, blockChecksumFlag, contentSizeFlag, contentChecksumFlag, dictIDFlag, blockSizeID;
- size_t frameHeaderSize;
- const BYTE* srcPtr = (const BYTE*)src;
-
- DEBUGLOG(5, "LZ4F_decodeHeader");
- /* need to decode header to get frameInfo */
- if (srcSize < minFHSize) return err0r(LZ4F_ERROR_frameHeader_incomplete); /* minimal frame header size */
+ size_t frameHeaderSize;
+ const BYTE* srcPtr = (const BYTE*)src;
+
+ DEBUGLOG(5, "LZ4F_decodeHeader");
+ /* need to decode header to get frameInfo */
+ if (srcSize < minFHSize) return err0r(LZ4F_ERROR_frameHeader_incomplete); /* minimal frame header size */
MEM_INIT(&(dctx->frameInfo), 0, sizeof(dctx->frameInfo));
-
- /* special case : skippable frames */
- if ((LZ4F_readLE32(srcPtr) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) {
+
+ /* special case : skippable frames */
+ if ((LZ4F_readLE32(srcPtr) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) {
dctx->frameInfo.frameType = LZ4F_skippableFrame;
if (src == (void*)(dctx->header)) {
dctx->tmpInSize = srcSize;
dctx->tmpInTarget = 8;
dctx->dStage = dstage_storeSFrameSize;
- return srcSize;
- } else {
+ return srcSize;
+ } else {
dctx->dStage = dstage_getSFrameSize;
- return 4;
- }
- }
-
- /* control magic number */
+ return 4;
+ }
+ }
+
+ /* control magic number */
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
- if (LZ4F_readLE32(srcPtr) != LZ4F_MAGICNUMBER) {
- DEBUGLOG(4, "frame header error : unknown magic number");
+ if (LZ4F_readLE32(srcPtr) != LZ4F_MAGICNUMBER) {
+ DEBUGLOG(4, "frame header error : unknown magic number");
return err0r(LZ4F_ERROR_frameType_unknown);
- }
+ }
#endif
dctx->frameInfo.frameType = LZ4F_frame;
-
- /* Flags */
+
+ /* Flags */
{ U32 const FLG = srcPtr[4];
U32 const version = (FLG>>6) & _2BITS;
blockChecksumFlag = (FLG>>4) & _1BIT;
@@ -1156,20 +1156,20 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
if (((FLG>>1)&_1BIT) != 0) return err0r(LZ4F_ERROR_reservedFlag_set); /* Reserved bit */
if (version != 1) return err0r(LZ4F_ERROR_headerVersion_wrong); /* Version Number, only supported value */
}
-
- /* Frame Header Size */
+
+ /* Frame Header Size */
frameHeaderSize = minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0);
-
- if (srcSize < frameHeaderSize) {
- /* not enough input to fully decode frame header */
+
+ if (srcSize < frameHeaderSize) {
+ /* not enough input to fully decode frame header */
if (srcPtr != dctx->header)
memcpy(dctx->header, srcPtr, srcSize);
dctx->tmpInSize = srcSize;
dctx->tmpInTarget = frameHeaderSize;
dctx->dStage = dstage_storeFrameHeader;
- return srcSize;
- }
-
+ return srcSize;
+ }
+
{ U32 const BD = srcPtr[5];
blockSizeID = (BD>>4) & _3BITS;
/* validate */
@@ -1177,8 +1177,8 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
if (blockSizeID < 4) return err0r(LZ4F_ERROR_maxBlockSize_invalid); /* 4-7 only supported values for the time being */
if (((BD>>0)&_4BITS) != 0) return err0r(LZ4F_ERROR_reservedFlag_set); /* Reserved bits */
}
-
- /* check header */
+
+ /* check header */
assert(frameHeaderSize > 5);
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
{ BYTE const HC = LZ4F_headerChecksum(srcPtr+4, frameHeaderSize-5);
@@ -1186,25 +1186,25 @@ static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize
return err0r(LZ4F_ERROR_headerChecksum_invalid);
}
#endif
-
- /* save */
+
+ /* save */
dctx->frameInfo.blockMode = (LZ4F_blockMode_t)blockMode;
dctx->frameInfo.blockChecksumFlag = (LZ4F_blockChecksum_t)blockChecksumFlag;
dctx->frameInfo.contentChecksumFlag = (LZ4F_contentChecksum_t)contentChecksumFlag;
dctx->frameInfo.blockSizeID = (LZ4F_blockSizeID_t)blockSizeID;
dctx->maxBlockSize = LZ4F_getBlockSize(blockSizeID);
- if (contentSizeFlag)
+ if (contentSizeFlag)
dctx->frameRemainingSize =
dctx->frameInfo.contentSize = LZ4F_readLE64(srcPtr+6);
if (dictIDFlag)
dctx->frameInfo.dictID = LZ4F_readLE32(srcPtr + frameHeaderSize - 5);
-
+
dctx->dStage = dstage_init;
-
+
return frameHeaderSize;
}
-
-
+
+
/*! LZ4F_headerSize() :
* @return : size of frame header
* or an error code, which can be tested using LZ4F_isError()
@@ -1233,9 +1233,9 @@ size_t LZ4F_headerSize(const void* src, size_t srcSize)
U32 const dictIDFlag = FLG & _1BIT;
return minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0);
}
-}
-
-/*! LZ4F_getFrameInfo() :
+}
+
+/*! LZ4F_getFrameInfo() :
* This function extracts frame parameters (max blockSize, frame checksum, etc.).
* Usage is optional. Objective is to provide relevant information for allocation purposes.
* This function works in 2 situations :
@@ -1253,16 +1253,16 @@ size_t LZ4F_headerSize(const void* src, size_t srcSize)
LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
LZ4F_frameInfo_t* frameInfoPtr,
const void* srcBuffer, size_t* srcSizePtr)
-{
+{
LZ4F_STATIC_ASSERT(dstage_getFrameHeader < dstage_storeFrameHeader);
if (dctx->dStage > dstage_storeFrameHeader) {
- /* frameInfo already decoded */
- size_t o=0, i=0;
- *srcSizePtr = 0;
+ /* frameInfo already decoded */
+ size_t o=0, i=0;
+ *srcSizePtr = 0;
*frameInfoPtr = dctx->frameInfo;
/* returns : recommended nb of bytes for LZ4F_decompress() */
return LZ4F_decompress(dctx, NULL, &o, NULL, &i, NULL);
- } else {
+ } else {
if (dctx->dStage == dstage_storeFrameHeader) {
/* frame decoding already started, in the middle of header => automatic fail */
*srcSizePtr = 0;
@@ -1274,7 +1274,7 @@ LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
*srcSizePtr=0;
return err0r(LZ4F_ERROR_frameHeader_incomplete);
}
-
+
{ size_t decodeResult = LZ4F_decodeHeader(dctx, srcBuffer, hSize);
if (LZ4F_isError(decodeResult)) {
*srcSizePtr = 0;
@@ -1285,85 +1285,85 @@ LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
*frameInfoPtr = dctx->frameInfo;
return decodeResult;
} } }
-}
-
-
+}
+
+
/* LZ4F_updateDict() :
- * only used for LZ4F_blockLinked mode
- * Condition : dstPtr != NULL
- */
+ * only used for LZ4F_blockLinked mode
+ * Condition : dstPtr != NULL
+ */
static void LZ4F_updateDict(LZ4F_dctx* dctx,
const BYTE* dstPtr, size_t dstSize, const BYTE* dstBufferStart,
unsigned withinTmp)
-{
- assert(dstPtr != NULL);
- if (dctx->dictSize==0) {
- dctx->dict = (const BYTE*)dstPtr; /* priority to prefix mode */
- }
- assert(dctx->dict != NULL);
-
- if (dctx->dict + dctx->dictSize == dstPtr) { /* prefix mode, everything within dstBuffer */
+{
+ assert(dstPtr != NULL);
+ if (dctx->dictSize==0) {
+ dctx->dict = (const BYTE*)dstPtr; /* priority to prefix mode */
+ }
+ assert(dctx->dict != NULL);
+
+ if (dctx->dict + dctx->dictSize == dstPtr) { /* prefix mode, everything within dstBuffer */
dctx->dictSize += dstSize;
- return;
- }
-
+ return;
+ }
+
assert(dstPtr >= dstBufferStart);
if ((size_t)(dstPtr - dstBufferStart) + dstSize >= 64 KB) { /* history in dstBuffer becomes large enough to become dictionary */
dctx->dict = (const BYTE*)dstBufferStart;
dctx->dictSize = (size_t)(dstPtr - dstBufferStart) + dstSize;
- return;
- }
-
+ return;
+ }
+
assert(dstSize < 64 KB); /* if dstSize >= 64 KB, dictionary would be set into dstBuffer directly */
- /* dstBuffer does not contain whole useful history (64 KB), so it must be saved within tmpOutBuffer */
- assert(dctx->tmpOutBuffer != NULL);
+ /* dstBuffer does not contain whole useful history (64 KB), so it must be saved within tmpOutBuffer */
+ assert(dctx->tmpOutBuffer != NULL);
- if (withinTmp && (dctx->dict == dctx->tmpOutBuffer)) { /* continue history within tmpOutBuffer */
+ if (withinTmp && (dctx->dict == dctx->tmpOutBuffer)) { /* continue history within tmpOutBuffer */
/* withinTmp expectation : content of [dstPtr,dstSize] is same as [dict+dictSize,dstSize], so we just extend it */
assert(dctx->dict + dctx->dictSize == dctx->tmpOut + dctx->tmpOutStart);
dctx->dictSize += dstSize;
- return;
- }
-
- if (withinTmp) { /* copy relevant dict portion in front of tmpOut within tmpOutBuffer */
+ return;
+ }
+
+ if (withinTmp) { /* copy relevant dict portion in front of tmpOut within tmpOutBuffer */
size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer);
size_t copySize = 64 KB - dctx->tmpOutSize;
const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart;
if (dctx->tmpOutSize > 64 KB) copySize = 0;
- if (copySize > preserveSize) copySize = preserveSize;
-
+ if (copySize > preserveSize) copySize = preserveSize;
+
memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
-
+
dctx->dict = dctx->tmpOutBuffer;
dctx->dictSize = preserveSize + dctx->tmpOutStart + dstSize;
- return;
- }
-
+ return;
+ }
+
if (dctx->dict == dctx->tmpOutBuffer) { /* copy dst into tmp to complete dict */
if (dctx->dictSize + dstSize > dctx->maxBufferSize) { /* tmp buffer not large enough */
size_t const preserveSize = 64 KB - dstSize;
memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize);
dctx->dictSize = preserveSize;
- }
+ }
memcpy(dctx->tmpOutBuffer + dctx->dictSize, dstPtr, dstSize);
dctx->dictSize += dstSize;
- return;
- }
-
- /* join dict & dest into tmp */
+ return;
+ }
+
+ /* join dict & dest into tmp */
{ size_t preserveSize = 64 KB - dstSize;
if (preserveSize > dctx->dictSize) preserveSize = dctx->dictSize;
memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize);
memcpy(dctx->tmpOutBuffer + preserveSize, dstPtr, dstSize);
dctx->dict = dctx->tmpOutBuffer;
dctx->dictSize = preserveSize + dstSize;
- }
-}
-
-
-
-/*! LZ4F_decompress() :
+ }
+}
+
+
+
+/*! LZ4F_decompress() :
* Call this function repetitively to regenerate compressed data in srcBuffer.
* The function will attempt to decode up to *srcSizePtr bytes from srcBuffer
* into dstBuffer of capacity *dstSizePtr.
@@ -1382,71 +1382,71 @@ static void LZ4F_updateDict(LZ4F_dctx* dctx,
* If decompression failed, @return is an error code which can be tested using LZ4F_isError().
*/
size_t LZ4F_decompress(LZ4F_dctx* dctx,
- void* dstBuffer, size_t* dstSizePtr,
- const void* srcBuffer, size_t* srcSizePtr,
- const LZ4F_decompressOptions_t* decompressOptionsPtr)
-{
- LZ4F_decompressOptions_t optionsNull;
- const BYTE* const srcStart = (const BYTE*)srcBuffer;
- const BYTE* const srcEnd = srcStart + *srcSizePtr;
- const BYTE* srcPtr = srcStart;
- BYTE* const dstStart = (BYTE*)dstBuffer;
- BYTE* const dstEnd = dstStart ? dstStart + *dstSizePtr : NULL;
- BYTE* dstPtr = dstStart;
- const BYTE* selectedIn = NULL;
- unsigned doAnotherStage = 1;
- size_t nextSrcSizeHint = 1;
-
-
- DEBUGLOG(5, "LZ4F_decompress : %p,%u => %p,%u",
- srcBuffer, (unsigned)*srcSizePtr, dstBuffer, (unsigned)*dstSizePtr);
- if (dstBuffer == NULL) assert(*dstSizePtr == 0);
+ void* dstBuffer, size_t* dstSizePtr,
+ const void* srcBuffer, size_t* srcSizePtr,
+ const LZ4F_decompressOptions_t* decompressOptionsPtr)
+{
+ LZ4F_decompressOptions_t optionsNull;
+ const BYTE* const srcStart = (const BYTE*)srcBuffer;
+ const BYTE* const srcEnd = srcStart + *srcSizePtr;
+ const BYTE* srcPtr = srcStart;
+ BYTE* const dstStart = (BYTE*)dstBuffer;
+ BYTE* const dstEnd = dstStart ? dstStart + *dstSizePtr : NULL;
+ BYTE* dstPtr = dstStart;
+ const BYTE* selectedIn = NULL;
+ unsigned doAnotherStage = 1;
+ size_t nextSrcSizeHint = 1;
+
+
+ DEBUGLOG(5, "LZ4F_decompress : %p,%u => %p,%u",
+ srcBuffer, (unsigned)*srcSizePtr, dstBuffer, (unsigned)*dstSizePtr);
+ if (dstBuffer == NULL) assert(*dstSizePtr == 0);
MEM_INIT(&optionsNull, 0, sizeof(optionsNull));
- if (decompressOptionsPtr==NULL) decompressOptionsPtr = &optionsNull;
- *srcSizePtr = 0;
- *dstSizePtr = 0;
- assert(dctx != NULL);
-
+ if (decompressOptionsPtr==NULL) decompressOptionsPtr = &optionsNull;
+ *srcSizePtr = 0;
+ *dstSizePtr = 0;
+ assert(dctx != NULL);
+
/* behaves as a state machine */
-
- while (doAnotherStage) {
-
+
+ while (doAnotherStage) {
+
switch(dctx->dStage)
- {
-
+ {
+
case dstage_getFrameHeader:
- DEBUGLOG(6, "dstage_getFrameHeader");
- if ((size_t)(srcEnd-srcPtr) >= maxFHSize) { /* enough to decode - shortcut */
+ DEBUGLOG(6, "dstage_getFrameHeader");
+ if ((size_t)(srcEnd-srcPtr) >= maxFHSize) { /* enough to decode - shortcut */
size_t const hSize = LZ4F_decodeHeader(dctx, srcPtr, (size_t)(srcEnd-srcPtr)); /* will update dStage appropriately */
- if (LZ4F_isError(hSize)) return hSize;
- srcPtr += hSize;
- break;
- }
+ if (LZ4F_isError(hSize)) return hSize;
+ srcPtr += hSize;
+ break;
+ }
dctx->tmpInSize = 0;
if (srcEnd-srcPtr == 0) return minFHSize; /* 0-size input */
dctx->tmpInTarget = minFHSize; /* minimum size to decode header */
dctx->dStage = dstage_storeFrameHeader;
/* fall-through */
-
+
case dstage_storeFrameHeader:
- DEBUGLOG(6, "dstage_storeFrameHeader");
+ DEBUGLOG(6, "dstage_storeFrameHeader");
{ size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, (size_t)(srcEnd - srcPtr));
memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
dctx->tmpInSize += sizeToCopy;
- srcPtr += sizeToCopy;
+ srcPtr += sizeToCopy;
}
if (dctx->tmpInSize < dctx->tmpInTarget) {
nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize) + BHSize; /* rest of header + nextBlockHeader */
doAnotherStage = 0; /* not enough src data, ask for some more */
- break;
- }
+ break;
+ }
{ size_t const hSize = LZ4F_decodeHeader(dctx, dctx->header, dctx->tmpInTarget); /* will update dStage appropriately */
if (LZ4F_isError(hSize)) return hSize;
}
break;
-
+
case dstage_init:
- DEBUGLOG(6, "dstage_init");
+ DEBUGLOG(6, "dstage_init");
if (dctx->frameInfo.contentChecksumFlag) (void)XXH32_reset(&(dctx->xxh), 0);
/* internal buffers allocation */
{ size_t const bufferNeeded = dctx->maxBlockSize
@@ -1473,107 +1473,107 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
/* fall-through */
case dstage_getBlockHeader:
- if ((size_t)(srcEnd - srcPtr) >= BHSize) {
- selectedIn = srcPtr;
- srcPtr += BHSize;
- } else {
- /* not enough input to read cBlockSize field */
+ if ((size_t)(srcEnd - srcPtr) >= BHSize) {
+ selectedIn = srcPtr;
+ srcPtr += BHSize;
+ } else {
+ /* not enough input to read cBlockSize field */
dctx->tmpInSize = 0;
dctx->dStage = dstage_storeBlockHeader;
- }
-
+ }
+
if (dctx->dStage == dstage_storeBlockHeader) /* can be skipped */
case dstage_storeBlockHeader:
{ size_t const remainingInput = (size_t)(srcEnd - srcPtr);
size_t const wantedData = BHSize - dctx->tmpInSize;
size_t const sizeToCopy = MIN(wantedData, remainingInput);
memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy);
- srcPtr += sizeToCopy;
+ srcPtr += sizeToCopy;
dctx->tmpInSize += sizeToCopy;
if (dctx->tmpInSize < BHSize) { /* not enough input for cBlockSize */
nextSrcSizeHint = BHSize - dctx->tmpInSize;
- doAnotherStage = 0;
- break;
- }
+ doAnotherStage = 0;
+ break;
+ }
selectedIn = dctx->tmpIn;
} /* if (dctx->dStage == dstage_storeBlockHeader) */
-
+
/* decode block header */
- { U32 const blockHeader = LZ4F_readLE32(selectedIn);
- size_t const nextCBlockSize = blockHeader & 0x7FFFFFFFU;
+ { U32 const blockHeader = LZ4F_readLE32(selectedIn);
+ size_t const nextCBlockSize = blockHeader & 0x7FFFFFFFU;
size_t const crcSize = dctx->frameInfo.blockChecksumFlag * BFSize;
- if (blockHeader==0) { /* frameEnd signal, no more block */
- DEBUGLOG(5, "end of frame");
+ if (blockHeader==0) { /* frameEnd signal, no more block */
+ DEBUGLOG(5, "end of frame");
dctx->dStage = dstage_getSuffix;
- break;
- }
- if (nextCBlockSize > dctx->maxBlockSize) {
+ break;
+ }
+ if (nextCBlockSize > dctx->maxBlockSize) {
return err0r(LZ4F_ERROR_maxBlockSize_invalid);
- }
- if (blockHeader & LZ4F_BLOCKUNCOMPRESSED_FLAG) {
+ }
+ if (blockHeader & LZ4F_BLOCKUNCOMPRESSED_FLAG) {
/* next block is uncompressed */
dctx->tmpInTarget = nextCBlockSize;
- DEBUGLOG(5, "next block is uncompressed (size %u)", (U32)nextCBlockSize);
+ DEBUGLOG(5, "next block is uncompressed (size %u)", (U32)nextCBlockSize);
if (dctx->frameInfo.blockChecksumFlag) {
(void)XXH32_reset(&dctx->blockChecksum, 0);
}
dctx->dStage = dstage_copyDirect;
- break;
- }
+ break;
+ }
/* next block is a compressed block */
dctx->tmpInTarget = nextCBlockSize + crcSize;
dctx->dStage = dstage_getCBlock;
if (dstPtr==dstEnd || srcPtr==srcEnd) {
nextSrcSizeHint = BHSize + nextCBlockSize + crcSize;
- doAnotherStage = 0;
- }
- break;
- }
-
- case dstage_copyDirect: /* uncompressed block */
- DEBUGLOG(6, "dstage_copyDirect");
- { size_t sizeToCopy;
- if (dstPtr == NULL) {
- sizeToCopy = 0;
- } else {
- size_t const minBuffSize = MIN((size_t)(srcEnd-srcPtr), (size_t)(dstEnd-dstPtr));
- sizeToCopy = MIN(dctx->tmpInTarget, minBuffSize);
- memcpy(dstPtr, srcPtr, sizeToCopy);
- if (dctx->frameInfo.blockChecksumFlag) {
- (void)XXH32_update(&dctx->blockChecksum, srcPtr, sizeToCopy);
- }
- if (dctx->frameInfo.contentChecksumFlag)
- (void)XXH32_update(&dctx->xxh, srcPtr, sizeToCopy);
- if (dctx->frameInfo.contentSize)
- dctx->frameRemainingSize -= sizeToCopy;
-
- /* history management (linked blocks only)*/
- if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
- LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 0);
- } }
-
- srcPtr += sizeToCopy;
- dstPtr += sizeToCopy;
+ doAnotherStage = 0;
+ }
+ break;
+ }
+
+ case dstage_copyDirect: /* uncompressed block */
+ DEBUGLOG(6, "dstage_copyDirect");
+ { size_t sizeToCopy;
+ if (dstPtr == NULL) {
+ sizeToCopy = 0;
+ } else {
+ size_t const minBuffSize = MIN((size_t)(srcEnd-srcPtr), (size_t)(dstEnd-dstPtr));
+ sizeToCopy = MIN(dctx->tmpInTarget, minBuffSize);
+ memcpy(dstPtr, srcPtr, sizeToCopy);
+ if (dctx->frameInfo.blockChecksumFlag) {
+ (void)XXH32_update(&dctx->blockChecksum, srcPtr, sizeToCopy);
+ }
+ if (dctx->frameInfo.contentChecksumFlag)
+ (void)XXH32_update(&dctx->xxh, srcPtr, sizeToCopy);
+ if (dctx->frameInfo.contentSize)
+ dctx->frameRemainingSize -= sizeToCopy;
+
+ /* history management (linked blocks only)*/
+ if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
+ LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 0);
+ } }
+
+ srcPtr += sizeToCopy;
+ dstPtr += sizeToCopy;
if (sizeToCopy == dctx->tmpInTarget) { /* all done */
if (dctx->frameInfo.blockChecksumFlag) {
dctx->tmpInSize = 0;
dctx->dStage = dstage_getBlockChecksum;
} else
dctx->dStage = dstage_getBlockHeader; /* new block */
- break;
- }
+ break;
+ }
dctx->tmpInTarget -= sizeToCopy; /* need to copy more */
- }
- nextSrcSizeHint = dctx->tmpInTarget +
- +(dctx->frameInfo.blockChecksumFlag ? BFSize : 0)
- + BHSize /* next header size */;
- doAnotherStage = 0;
- break;
-
+ }
+ nextSrcSizeHint = dctx->tmpInTarget +
+ +(dctx->frameInfo.blockChecksumFlag ? BFSize : 0)
+ + BHSize /* next header size */;
+ doAnotherStage = 0;
+ break;
+
/* check block checksum for recently transferred uncompressed block */
case dstage_getBlockChecksum:
- DEBUGLOG(6, "dstage_getBlockChecksum");
+ DEBUGLOG(6, "dstage_getBlockChecksum");
{ const void* crcSrc;
if ((srcEnd-srcPtr >= 4) && (dctx->tmpInSize==0)) {
crcSrc = srcPtr;
@@ -1593,12 +1593,12 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
{ U32 const readCRC = LZ4F_readLE32(crcSrc);
U32 const calcCRC = XXH32_digest(&dctx->blockChecksum);
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
- DEBUGLOG(6, "compare block checksum");
- if (readCRC != calcCRC) {
- DEBUGLOG(4, "incorrect block checksum: %08X != %08X",
- readCRC, calcCRC);
+ DEBUGLOG(6, "compare block checksum");
+ if (readCRC != calcCRC) {
+ DEBUGLOG(4, "incorrect block checksum: %08X != %08X",
+ readCRC, calcCRC);
return err0r(LZ4F_ERROR_blockChecksum_invalid);
- }
+ }
#else
(void)readCRC;
(void)calcCRC;
@@ -1608,34 +1608,34 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
break;
case dstage_getCBlock:
- DEBUGLOG(6, "dstage_getCBlock");
+ DEBUGLOG(6, "dstage_getCBlock");
if ((size_t)(srcEnd-srcPtr) < dctx->tmpInTarget) {
dctx->tmpInSize = 0;
dctx->dStage = dstage_storeCBlock;
- break;
- }
+ break;
+ }
/* input large enough to read full block directly */
- selectedIn = srcPtr;
+ selectedIn = srcPtr;
srcPtr += dctx->tmpInTarget;
-
- if (0) /* always jump over next block */
- case dstage_storeCBlock:
+
+ if (0) /* always jump over next block */
+ case dstage_storeCBlock:
{ size_t const wantedData = dctx->tmpInTarget - dctx->tmpInSize;
size_t const inputLeft = (size_t)(srcEnd-srcPtr);
size_t const sizeToCopy = MIN(wantedData, inputLeft);
memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy);
dctx->tmpInSize += sizeToCopy;
- srcPtr += sizeToCopy;
+ srcPtr += sizeToCopy;
if (dctx->tmpInSize < dctx->tmpInTarget) { /* need more input */
nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize)
+ (dctx->frameInfo.blockChecksumFlag ? BFSize : 0)
+ BHSize /* next header size */;
doAnotherStage = 0;
- break;
- }
+ break;
+ }
selectedIn = dctx->tmpIn;
- }
-
+ }
+
/* At this stage, input is large enough to decode a block */
if (dctx->frameInfo.blockChecksumFlag) {
dctx->tmpInTarget -= 4;
@@ -1650,12 +1650,12 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
(void)calcBlockCrc;
#endif
} }
-
+
if ((size_t)(dstEnd-dstPtr) >= dctx->maxBlockSize) {
const char* dict = (const char*)dctx->dict;
size_t dictSize = dctx->dictSize;
- int decodedSize;
- assert(dstPtr != NULL);
+ int decodedSize;
+ assert(dstPtr != NULL);
if (dict && dictSize > 1 GB) {
/* the dictSize param is an int, avoid truncation / sign issues */
dict += dictSize - 64 KB;
@@ -1666,23 +1666,23 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
(const char*)selectedIn, (char*)dstPtr,
(int)dctx->tmpInTarget, (int)dctx->maxBlockSize,
dict, (int)dictSize);
- if (decodedSize < 0) return err0r(LZ4F_ERROR_GENERIC); /* decompression failed */
+ if (decodedSize < 0) return err0r(LZ4F_ERROR_GENERIC); /* decompression failed */
if (dctx->frameInfo.contentChecksumFlag)
XXH32_update(&(dctx->xxh), dstPtr, (size_t)decodedSize);
if (dctx->frameInfo.contentSize)
dctx->frameRemainingSize -= (size_t)decodedSize;
-
- /* dictionary management */
- if (dctx->frameInfo.blockMode==LZ4F_blockLinked) {
+
+ /* dictionary management */
+ if (dctx->frameInfo.blockMode==LZ4F_blockLinked) {
LZ4F_updateDict(dctx, dstPtr, (size_t)decodedSize, dstStart, 0);
- }
-
- dstPtr += decodedSize;
+ }
+
+ dstPtr += decodedSize;
dctx->dStage = dstage_getBlockHeader;
- break;
- }
-
- /* not enough place into dst : decode into tmpOut */
+ break;
+ }
+
+ /* not enough place into dst : decode into tmpOut */
/* ensure enough place for tmpOut */
if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
if (dctx->dict == dctx->tmpOutBuffer) {
@@ -1699,12 +1699,12 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
/* Decode block */
{ const char* dict = (const char*)dctx->dict;
size_t dictSize = dctx->dictSize;
- int decodedSize;
+ int decodedSize;
if (dict && dictSize > 1 GB) {
/* the dictSize param is an int, avoid truncation / sign issues */
dict += dictSize - 64 KB;
dictSize = 64 KB;
- }
+ }
decodedSize = LZ4_decompress_safe_usingDict(
(const char*)selectedIn, (char*)dctx->tmpOut,
(int)dctx->tmpInTarget, (int)dctx->maxBlockSize,
@@ -1718,32 +1718,32 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
dctx->tmpOutSize = (size_t)decodedSize;
dctx->tmpOutStart = 0;
dctx->dStage = dstage_flushOut;
- }
+ }
/* fall-through */
-
- case dstage_flushOut: /* flush decoded data from tmpOut to dstBuffer */
- DEBUGLOG(6, "dstage_flushOut");
- if (dstPtr != NULL) {
- size_t const sizeToCopy = MIN(dctx->tmpOutSize - dctx->tmpOutStart, (size_t)(dstEnd-dstPtr));
+
+ case dstage_flushOut: /* flush decoded data from tmpOut to dstBuffer */
+ DEBUGLOG(6, "dstage_flushOut");
+ if (dstPtr != NULL) {
+ size_t const sizeToCopy = MIN(dctx->tmpOutSize - dctx->tmpOutStart, (size_t)(dstEnd-dstPtr));
memcpy(dstPtr, dctx->tmpOut + dctx->tmpOutStart, sizeToCopy);
-
- /* dictionary management */
+
+ /* dictionary management */
if (dctx->frameInfo.blockMode == LZ4F_blockLinked)
LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 1 /*withinTmp*/);
-
+
dctx->tmpOutStart += sizeToCopy;
- dstPtr += sizeToCopy;
- }
- if (dctx->tmpOutStart == dctx->tmpOutSize) { /* all flushed */
- dctx->dStage = dstage_getBlockHeader; /* get next block */
- break;
- }
- /* could not flush everything : stop there, just request a block header */
- doAnotherStage = 0;
- nextSrcSizeHint = BHSize;
- break;
-
- case dstage_getSuffix:
+ dstPtr += sizeToCopy;
+ }
+ if (dctx->tmpOutStart == dctx->tmpOutSize) { /* all flushed */
+ dctx->dStage = dstage_getBlockHeader; /* get next block */
+ break;
+ }
+ /* could not flush everything : stop there, just request a block header */
+ doAnotherStage = 0;
+ nextSrcSizeHint = BHSize;
+ break;
+
+ case dstage_getSuffix:
if (dctx->frameRemainingSize)
return err0r(LZ4F_ERROR_frameSize_wrong); /* incorrect frame size decoded */
if (!dctx->frameInfo.contentChecksumFlag) { /* no checksum, frame is completed */
@@ -1751,7 +1751,7 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
LZ4F_resetDecompressionContext(dctx);
doAnotherStage = 0;
break;
- }
+ }
if ((srcEnd - srcPtr) < 4) { /* not enough size for entire CRC */
dctx->tmpInSize = 0;
dctx->dStage = dstage_storeSuffix;
@@ -1759,25 +1759,25 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
selectedIn = srcPtr;
srcPtr += 4;
}
-
+
if (dctx->dStage == dstage_storeSuffix) /* can be skipped */
- case dstage_storeSuffix:
+ case dstage_storeSuffix:
{ size_t const remainingInput = (size_t)(srcEnd - srcPtr);
size_t const wantedData = 4 - dctx->tmpInSize;
size_t const sizeToCopy = MIN(wantedData, remainingInput);
memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy);
- srcPtr += sizeToCopy;
+ srcPtr += sizeToCopy;
dctx->tmpInSize += sizeToCopy;
if (dctx->tmpInSize < 4) { /* not enough input to read complete suffix */
nextSrcSizeHint = 4 - dctx->tmpInSize;
- doAnotherStage=0;
- break;
- }
+ doAnotherStage=0;
+ break;
+ }
selectedIn = dctx->tmpIn;
} /* if (dctx->dStage == dstage_storeSuffix) */
-
+
/* case dstage_checkSuffix: */ /* no direct entry, avoid initialization risks */
- { U32 const readCRC = LZ4F_readLE32(selectedIn);
+ { U32 const readCRC = LZ4F_readLE32(selectedIn);
U32 const resultCRC = XXH32_digest(&(dctx->xxh));
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
if (readCRC != resultCRC)
@@ -1786,97 +1786,97 @@ size_t LZ4F_decompress(LZ4F_dctx* dctx,
(void)readCRC;
(void)resultCRC;
#endif
- nextSrcSizeHint = 0;
+ nextSrcSizeHint = 0;
LZ4F_resetDecompressionContext(dctx);
- doAnotherStage = 0;
- break;
- }
-
- case dstage_getSFrameSize:
- if ((srcEnd - srcPtr) >= 4) {
- selectedIn = srcPtr;
- srcPtr += 4;
- } else {
- /* not enough input to read cBlockSize field */
+ doAnotherStage = 0;
+ break;
+ }
+
+ case dstage_getSFrameSize:
+ if ((srcEnd - srcPtr) >= 4) {
+ selectedIn = srcPtr;
+ srcPtr += 4;
+ } else {
+ /* not enough input to read cBlockSize field */
dctx->tmpInSize = 4;
dctx->tmpInTarget = 8;
dctx->dStage = dstage_storeSFrameSize;
- }
-
+ }
+
if (dctx->dStage == dstage_storeSFrameSize)
- case dstage_storeSFrameSize:
+ case dstage_storeSFrameSize:
{ size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize,
(size_t)(srcEnd - srcPtr) );
memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
- srcPtr += sizeToCopy;
+ srcPtr += sizeToCopy;
dctx->tmpInSize += sizeToCopy;
if (dctx->tmpInSize < dctx->tmpInTarget) {
/* not enough input to get full sBlockSize; wait for more */
nextSrcSizeHint = dctx->tmpInTarget - dctx->tmpInSize;
- doAnotherStage = 0;
- break;
- }
+ doAnotherStage = 0;
+ break;
+ }
selectedIn = dctx->header + 4;
} /* if (dctx->dStage == dstage_storeSFrameSize) */
-
+
/* case dstage_decodeSFrameSize: */ /* no direct entry */
- { size_t const SFrameSize = LZ4F_readLE32(selectedIn);
+ { size_t const SFrameSize = LZ4F_readLE32(selectedIn);
dctx->frameInfo.contentSize = SFrameSize;
dctx->tmpInTarget = SFrameSize;
dctx->dStage = dstage_skipSkippable;
- break;
- }
-
- case dstage_skipSkippable:
+ break;
+ }
+
+ case dstage_skipSkippable:
{ size_t const skipSize = MIN(dctx->tmpInTarget, (size_t)(srcEnd-srcPtr));
- srcPtr += skipSize;
+ srcPtr += skipSize;
dctx->tmpInTarget -= skipSize;
- doAnotherStage = 0;
+ doAnotherStage = 0;
nextSrcSizeHint = dctx->tmpInTarget;
if (nextSrcSizeHint) break; /* still more to skip */
/* frame fully skipped : prepare context for a new frame */
LZ4F_resetDecompressionContext(dctx);
- break;
- }
+ break;
+ }
} /* switch (dctx->dStage) */
} /* while (doAnotherStage) */
-
+
/* preserve history within tmp whenever necessary */
LZ4F_STATIC_ASSERT((unsigned)dstage_init == 2);
if ( (dctx->frameInfo.blockMode==LZ4F_blockLinked) /* next block will use up to 64KB from previous ones */
&& (dctx->dict != dctx->tmpOutBuffer) /* dictionary is not already within tmp */
- && (dctx->dict != NULL) /* dictionary exists */
+ && (dctx->dict != NULL) /* dictionary exists */
&& (!decompressOptionsPtr->stableDst) /* cannot rely on dst data to remain there for next call */
&& ((unsigned)(dctx->dStage)-2 < (unsigned)(dstage_getSuffix)-2) ) /* valid stages : [init ... getSuffix[ */
- {
+ {
if (dctx->dStage == dstage_flushOut) {
size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer);
size_t copySize = 64 KB - dctx->tmpOutSize;
const BYTE* oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart;
if (dctx->tmpOutSize > 64 KB) copySize = 0;
- if (copySize > preserveSize) copySize = preserveSize;
- assert(dctx->tmpOutBuffer != NULL);
-
- memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
-
+ if (copySize > preserveSize) copySize = preserveSize;
+ assert(dctx->tmpOutBuffer != NULL);
+
+ memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
+
dctx->dict = dctx->tmpOutBuffer;
dctx->dictSize = preserveSize + dctx->tmpOutStart;
- } else {
+ } else {
const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize;
size_t const newDictSize = MIN(dctx->dictSize, 64 KB);
-
- memcpy(dctx->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize);
-
+
+ memcpy(dctx->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize);
+
dctx->dict = dctx->tmpOutBuffer;
dctx->dictSize = newDictSize;
dctx->tmpOut = dctx->tmpOutBuffer + newDictSize;
- }
- }
-
+ }
+ }
+
*srcSizePtr = (size_t)(srcPtr - srcStart);
*dstSizePtr = (size_t)(dstPtr - dstStart);
- return nextSrcSizeHint;
-}
+ return nextSrcSizeHint;
+}
/*! LZ4F_decompress_usingDict() :
* Same as LZ4F_decompress(), using a predefined dictionary.
diff --git a/contrib/libs/lz4/lz4frame.h b/contrib/libs/lz4/lz4frame.h
index da959b367f..4573317ef2 100644
--- a/contrib/libs/lz4/lz4frame.h
+++ b/contrib/libs/lz4/lz4frame.h
@@ -1,37 +1,37 @@
-/*
- LZ4 auto-framing library
- Header File
+/*
+ LZ4 auto-framing library
+ Header File
Copyright (C) 2011-2017, Yann Collet.
- BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- You can contact the author at :
- - LZ4 source repository : https://github.com/lz4/lz4
- - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
-*/
-
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 source repository : https://github.com/lz4/lz4
+ - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
/* LZ4F is a stand-alone API able to create and decode LZ4 frames
* conformant with specification v1.6.1 in doc/lz4_Frame_format.md .
* Generated frames are compatible with `lz4` CLI.
@@ -40,18 +40,18 @@
*
* lz4.h is not required when using lz4frame.h,
* except to extract common constant such as LZ4_VERSION_NUMBER.
- * */
-
-#ifndef LZ4F_H_09782039843
-#define LZ4F_H_09782039843
-
-#if defined (__cplusplus)
-extern "C" {
-#endif
-
-/* --- Dependency --- */
-#include <stddef.h> /* size_t */
-
+ * */
+
+#ifndef LZ4F_H_09782039843
+#define LZ4F_H_09782039843
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* --- Dependency --- */
+#include <stddef.h> /* size_t */
+
/**
Introduction
@@ -61,32 +61,32 @@ extern "C" {
of encoding standard metadata alongside LZ4-compressed blocks.
*/
-/*-***************************************************************
+/*-***************************************************************
* Compiler specifics
*****************************************************************/
/* LZ4_DLL_EXPORT :
* Enable exporting of functions when building a Windows DLL
- * LZ4FLIB_VISIBILITY :
+ * LZ4FLIB_VISIBILITY :
* Control library symbols visibility.
*/
-#ifndef LZ4FLIB_VISIBILITY
-# if defined(__GNUC__) && (__GNUC__ >= 4)
-# define LZ4FLIB_VISIBILITY __attribute__ ((visibility ("default")))
-# else
-# define LZ4FLIB_VISIBILITY
-# endif
-#endif
-#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1)
-# define LZ4FLIB_API __declspec(dllexport) LZ4FLIB_VISIBILITY
-#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1)
-# define LZ4FLIB_API __declspec(dllimport) LZ4FLIB_VISIBILITY
-#else
-# define LZ4FLIB_API LZ4FLIB_VISIBILITY
-#endif
-
+#ifndef LZ4FLIB_VISIBILITY
+# if defined(__GNUC__) && (__GNUC__ >= 4)
+# define LZ4FLIB_VISIBILITY __attribute__ ((visibility ("default")))
+# else
+# define LZ4FLIB_VISIBILITY
+# endif
+#endif
+#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1)
+# define LZ4FLIB_API __declspec(dllexport) LZ4FLIB_VISIBILITY
+#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1)
+# define LZ4FLIB_API __declspec(dllimport) LZ4FLIB_VISIBILITY
+#else
+# define LZ4FLIB_API LZ4FLIB_VISIBILITY
+#endif
+
#ifdef LZ4F_DISABLE_DEPRECATE_WARNINGS
# define LZ4F_DEPRECATE(x) x
-#else
+#else
# if defined(_MSC_VER)
# define LZ4F_DEPRECATE(x) x /* __declspec(deprecated) x - only works with C++ */
# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 6))
@@ -94,85 +94,85 @@ extern "C" {
# else
# define LZ4F_DEPRECATE(x) x /* no deprecation warning for this compiler */
# endif
-#endif
-
-
-/*-************************************
+#endif
+
+
+/*-************************************
* Error management
**************************************/
-typedef size_t LZ4F_errorCode_t;
-
+typedef size_t LZ4F_errorCode_t;
+
LZ4FLIB_API unsigned LZ4F_isError(LZ4F_errorCode_t code); /**< tells when a function result is an error code */
LZ4FLIB_API const char* LZ4F_getErrorName(LZ4F_errorCode_t code); /**< return error code string; for debugging */
-
-
-/*-************************************
+
+
+/*-************************************
* Frame compression types
- ************************************* */
+ ************************************* */
/* #define LZ4F_ENABLE_OBSOLETE_ENUMS // uncomment to enable obsolete enums */
#ifdef LZ4F_ENABLE_OBSOLETE_ENUMS
-# define LZ4F_OBSOLETE_ENUM(x) , LZ4F_DEPRECATE(x) = LZ4F_##x
-#else
-# define LZ4F_OBSOLETE_ENUM(x)
-#endif
-
+# define LZ4F_OBSOLETE_ENUM(x) , LZ4F_DEPRECATE(x) = LZ4F_##x
+#else
+# define LZ4F_OBSOLETE_ENUM(x)
+#endif
+
/* The larger the block size, the (slightly) better the compression ratio,
* though there are diminishing returns.
- * Larger blocks also increase memory usage on both compression and decompression sides.
- */
-typedef enum {
- LZ4F_default=0,
- LZ4F_max64KB=4,
- LZ4F_max256KB=5,
- LZ4F_max1MB=6,
- LZ4F_max4MB=7
- LZ4F_OBSOLETE_ENUM(max64KB)
- LZ4F_OBSOLETE_ENUM(max256KB)
- LZ4F_OBSOLETE_ENUM(max1MB)
- LZ4F_OBSOLETE_ENUM(max4MB)
-} LZ4F_blockSizeID_t;
-
+ * Larger blocks also increase memory usage on both compression and decompression sides.
+ */
+typedef enum {
+ LZ4F_default=0,
+ LZ4F_max64KB=4,
+ LZ4F_max256KB=5,
+ LZ4F_max1MB=6,
+ LZ4F_max4MB=7
+ LZ4F_OBSOLETE_ENUM(max64KB)
+ LZ4F_OBSOLETE_ENUM(max256KB)
+ LZ4F_OBSOLETE_ENUM(max1MB)
+ LZ4F_OBSOLETE_ENUM(max4MB)
+} LZ4F_blockSizeID_t;
+
/* Linked blocks sharply reduce inefficiencies when using small blocks,
* they compress better.
* However, some LZ4 decoders are only compatible with independent blocks */
-typedef enum {
- LZ4F_blockLinked=0,
- LZ4F_blockIndependent
- LZ4F_OBSOLETE_ENUM(blockLinked)
- LZ4F_OBSOLETE_ENUM(blockIndependent)
-} LZ4F_blockMode_t;
-
-typedef enum {
- LZ4F_noContentChecksum=0,
- LZ4F_contentChecksumEnabled
- LZ4F_OBSOLETE_ENUM(noContentChecksum)
- LZ4F_OBSOLETE_ENUM(contentChecksumEnabled)
-} LZ4F_contentChecksum_t;
-
-typedef enum {
+typedef enum {
+ LZ4F_blockLinked=0,
+ LZ4F_blockIndependent
+ LZ4F_OBSOLETE_ENUM(blockLinked)
+ LZ4F_OBSOLETE_ENUM(blockIndependent)
+} LZ4F_blockMode_t;
+
+typedef enum {
+ LZ4F_noContentChecksum=0,
+ LZ4F_contentChecksumEnabled
+ LZ4F_OBSOLETE_ENUM(noContentChecksum)
+ LZ4F_OBSOLETE_ENUM(contentChecksumEnabled)
+} LZ4F_contentChecksum_t;
+
+typedef enum {
LZ4F_noBlockChecksum=0,
LZ4F_blockChecksumEnabled
} LZ4F_blockChecksum_t;
typedef enum {
- LZ4F_frame=0,
- LZ4F_skippableFrame
- LZ4F_OBSOLETE_ENUM(skippableFrame)
-} LZ4F_frameType_t;
-
+ LZ4F_frame=0,
+ LZ4F_skippableFrame
+ LZ4F_OBSOLETE_ENUM(skippableFrame)
+} LZ4F_frameType_t;
+
#ifdef LZ4F_ENABLE_OBSOLETE_ENUMS
-typedef LZ4F_blockSizeID_t blockSizeID_t;
-typedef LZ4F_blockMode_t blockMode_t;
-typedef LZ4F_frameType_t frameType_t;
-typedef LZ4F_contentChecksum_t contentChecksum_t;
-#endif
-
+typedef LZ4F_blockSizeID_t blockSizeID_t;
+typedef LZ4F_blockMode_t blockMode_t;
+typedef LZ4F_frameType_t frameType_t;
+typedef LZ4F_contentChecksum_t contentChecksum_t;
+#endif
+
/*! LZ4F_frameInfo_t :
* makes it possible to set or read frame parameters.
* Structure must be first init to 0, using memset() or LZ4F_INIT_FRAMEINFO,
* setting all parameters to default.
* It's then possible to update selectively some parameters */
-typedef struct {
+typedef struct {
LZ4F_blockSizeID_t blockSizeID; /* max64KB, max256KB, max1MB, max4MB; 0 == default */
LZ4F_blockMode_t blockMode; /* LZ4F_blockLinked, LZ4F_blockIndependent; 0 == default */
LZ4F_contentChecksum_t contentChecksumFlag; /* 1: frame terminated with 32-bit checksum of decompressed data; 0: disabled (default) */
@@ -180,29 +180,29 @@ typedef struct {
unsigned long long contentSize; /* Size of uncompressed content ; 0 == unknown */
unsigned dictID; /* Dictionary ID, sent by compressor to help decoder select correct dictionary; 0 == no dictID provided */
LZ4F_blockChecksum_t blockChecksumFlag; /* 1: each block followed by a checksum of block's compressed data; 0: disabled (default) */
-} LZ4F_frameInfo_t;
-
-#define LZ4F_INIT_FRAMEINFO { LZ4F_default, LZ4F_blockLinked, LZ4F_noContentChecksum, LZ4F_frame, 0ULL, 0U, LZ4F_noBlockChecksum } /* v1.8.3+ */
+} LZ4F_frameInfo_t;
+
+#define LZ4F_INIT_FRAMEINFO { LZ4F_default, LZ4F_blockLinked, LZ4F_noContentChecksum, LZ4F_frame, 0ULL, 0U, LZ4F_noBlockChecksum } /* v1.8.3+ */
/*! LZ4F_preferences_t :
* makes it possible to supply advanced compression instructions to streaming interface.
* Structure must be first init to 0, using memset() or LZ4F_INIT_PREFERENCES,
* setting all parameters to default.
* All reserved fields must be set to zero. */
-typedef struct {
- LZ4F_frameInfo_t frameInfo;
+typedef struct {
+ LZ4F_frameInfo_t frameInfo;
int compressionLevel; /* 0: default (fast mode); values > LZ4HC_CLEVEL_MAX count as LZ4HC_CLEVEL_MAX; values < 0 trigger "fast acceleration" */
unsigned autoFlush; /* 1: always flush; reduces usage of internal buffers */
unsigned favorDecSpeed; /* 1: parser favors decompression speed vs compression ratio. Only works for high compression modes (>= LZ4HC_CLEVEL_OPT_MIN) */ /* v1.8.2+ */
unsigned reserved[3]; /* must be zero for forward compatibility */
-} LZ4F_preferences_t;
-
-#define LZ4F_INIT_PREFERENCES { LZ4F_INIT_FRAMEINFO, 0, 0u, 0u, { 0u, 0u, 0u } } /* v1.8.3+ */
-
+} LZ4F_preferences_t;
+
+#define LZ4F_INIT_PREFERENCES { LZ4F_INIT_FRAMEINFO, 0, 0u, 0u, { 0u, 0u, 0u } } /* v1.8.3+ */
+
-/*-*********************************
-* Simple compression function
-***********************************/
+/*-*********************************
+* Simple compression function
+***********************************/
LZ4FLIB_API int LZ4F_compressionLevel_max(void); /* v1.8.0+ */
@@ -211,51 +211,51 @@ LZ4FLIB_API int LZ4F_compressionLevel_max(void); /* v1.8.0+ */
* `preferencesPtr` is optional. It can be replaced by NULL, in which case, the function will assume default preferences.
* Note : this result is only usable with LZ4F_compressFrame().
* It may also be used with LZ4F_compressUpdate() _if no flush() operation_ is performed.
- */
-LZ4FLIB_API size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr);
-
+ */
+LZ4FLIB_API size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr);
+
/*! LZ4F_compressFrame() :
* Compress an entire srcBuffer into a valid LZ4 frame.
* dstCapacity MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
* The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default.
- * @return : number of bytes written into dstBuffer.
- * or an error code if it fails (can be tested using LZ4F_isError())
- */
+ * @return : number of bytes written into dstBuffer.
+ * or an error code if it fails (can be tested using LZ4F_isError())
+ */
LZ4FLIB_API size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
const void* srcBuffer, size_t srcSize,
const LZ4F_preferences_t* preferencesPtr);
-
-
-/*-***********************************
-* Advanced compression functions
-*************************************/
-typedef struct LZ4F_cctx_s LZ4F_cctx; /* incomplete type */
-typedef LZ4F_cctx* LZ4F_compressionContext_t; /* for compatibility with previous API version */
-
-typedef struct {
- unsigned stableSrc; /* 1 == src content will remain present on future calls to LZ4F_compress(); skip copying src content within tmp buffer */
- unsigned reserved[3];
-} LZ4F_compressOptions_t;
-
+
+
+/*-***********************************
+* Advanced compression functions
+*************************************/
+typedef struct LZ4F_cctx_s LZ4F_cctx; /* incomplete type */
+typedef LZ4F_cctx* LZ4F_compressionContext_t; /* for compatibility with previous API version */
+
+typedef struct {
+ unsigned stableSrc; /* 1 == src content will remain present on future calls to LZ4F_compress(); skip copying src content within tmp buffer */
+ unsigned reserved[3];
+} LZ4F_compressOptions_t;
+
/*--- Resource Management ---*/
-
+
#define LZ4F_VERSION 100 /* This number can be used to check for an incompatible API breaking change */
-LZ4FLIB_API unsigned LZ4F_getVersion(void);
+LZ4FLIB_API unsigned LZ4F_getVersion(void);
/*! LZ4F_createCompressionContext() :
- * The first thing to do is to create a compressionContext object, which will be used in all compression operations.
+ * The first thing to do is to create a compressionContext object, which will be used in all compression operations.
* This is achieved using LZ4F_createCompressionContext(), which takes as argument a version.
- * The version provided MUST be LZ4F_VERSION. It is intended to track potential version mismatch, notably when using DLL.
- * The function will provide a pointer to a fully allocated LZ4F_cctx object.
- * If @return != zero, there was an error during context creation.
- * Object can release its memory using LZ4F_freeCompressionContext();
- */
+ * The version provided MUST be LZ4F_VERSION. It is intended to track potential version mismatch, notably when using DLL.
+ * The function will provide a pointer to a fully allocated LZ4F_cctx object.
+ * If @return != zero, there was an error during context creation.
+ * Object can release its memory using LZ4F_freeCompressionContext();
+ */
LZ4FLIB_API LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_cctx** cctxPtr, unsigned version);
LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctx);
-
-
+
+
/*---- Compression ----*/
-
+
#define LZ4F_HEADER_SIZE_MIN 7 /* LZ4 Frame header size can vary, depending on selected paramaters */
#define LZ4F_HEADER_SIZE_MAX 19
@@ -271,14 +271,14 @@ LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctx);
/*! LZ4F_compressBegin() :
* will write the frame header into dstBuffer.
* dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
- * `prefsPtr` is optional : you can provide NULL as argument, all preferences will then be set to default.
- * @return : number of bytes written into dstBuffer for the header
- * or an error code (which can be tested using LZ4F_isError())
- */
+ * `prefsPtr` is optional : you can provide NULL as argument, all preferences will then be set to default.
+ * @return : number of bytes written into dstBuffer for the header
+ * or an error code (which can be tested using LZ4F_isError())
+ */
LZ4FLIB_API size_t LZ4F_compressBegin(LZ4F_cctx* cctx,
void* dstBuffer, size_t dstCapacity,
const LZ4F_preferences_t* prefsPtr);
-
+
/*! LZ4F_compressBound() :
* Provides minimum dstCapacity required to guarantee success of
* LZ4F_compressUpdate(), given a srcSize and preferences, for a worst case scenario.
@@ -290,12 +290,12 @@ LZ4FLIB_API size_t LZ4F_compressBegin(LZ4F_cctx* cctx,
* @return is always the same for a srcSize and prefsPtr.
* prefsPtr is optional : when NULL is provided, preferences will be set to cover worst case scenario.
* tech details :
- * @return if automatic flushing is not enabled, includes the possibility that internal buffer might already be filled by up to (blockSize-1) bytes.
+ * @return if automatic flushing is not enabled, includes the possibility that internal buffer might already be filled by up to (blockSize-1) bytes.
* It also includes frame footer (ending + checksum), since it might be generated by LZ4F_compressEnd().
* @return doesn't include frame header, as it was already generated by LZ4F_compressBegin().
*/
-LZ4FLIB_API size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* prefsPtr);
-
+LZ4FLIB_API size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* prefsPtr);
+
/*! LZ4F_compressUpdate() :
* LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
* Important rule: dstCapacity MUST be large enough to ensure operation success even in worst case situations.
@@ -303,57 +303,57 @@ LZ4FLIB_API size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t*
* If this condition is not respected, LZ4F_compress() will fail (result is an errorCode).
* LZ4F_compressUpdate() doesn't guarantee error recovery.
* When an error occurs, compression context must be freed or resized.
- * `cOptPtr` is optional : NULL can be provided, in which case all options are set to default.
- * @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered).
- * or an error code if it fails (which can be tested using LZ4F_isError())
- */
+ * `cOptPtr` is optional : NULL can be provided, in which case all options are set to default.
+ * @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered).
+ * or an error code if it fails (which can be tested using LZ4F_isError())
+ */
LZ4FLIB_API size_t LZ4F_compressUpdate(LZ4F_cctx* cctx,
void* dstBuffer, size_t dstCapacity,
const void* srcBuffer, size_t srcSize,
const LZ4F_compressOptions_t* cOptPtr);
-
+
/*! LZ4F_flush() :
* When data must be generated and sent immediately, without waiting for a block to be completely filled,
* it's possible to call LZ4_flush(). It will immediately compress any data buffered within cctx.
- * `dstCapacity` must be large enough to ensure the operation will be successful.
- * `cOptPtr` is optional : it's possible to provide NULL, all options will be set to default.
+ * `dstCapacity` must be large enough to ensure the operation will be successful.
+ * `cOptPtr` is optional : it's possible to provide NULL, all options will be set to default.
* @return : nb of bytes written into dstBuffer (can be zero, when there is no data stored within cctx)
- * or an error code if it fails (which can be tested using LZ4F_isError())
+ * or an error code if it fails (which can be tested using LZ4F_isError())
* Note : LZ4F_flush() is guaranteed to be successful when dstCapacity >= LZ4F_compressBound(0, prefsPtr).
- */
+ */
LZ4FLIB_API size_t LZ4F_flush(LZ4F_cctx* cctx,
void* dstBuffer, size_t dstCapacity,
const LZ4F_compressOptions_t* cOptPtr);
-
+
/*! LZ4F_compressEnd() :
* To properly finish an LZ4 frame, invoke LZ4F_compressEnd().
* It will flush whatever data remained within `cctx` (like LZ4_flush())
* and properly finalize the frame, with an endMark and a checksum.
- * `cOptPtr` is optional : NULL can be provided, in which case all options will be set to default.
+ * `cOptPtr` is optional : NULL can be provided, in which case all options will be set to default.
* @return : nb of bytes written into dstBuffer, necessarily >= 4 (endMark),
- * or an error code if it fails (which can be tested using LZ4F_isError())
+ * or an error code if it fails (which can be tested using LZ4F_isError())
* Note : LZ4F_compressEnd() is guaranteed to be successful when dstCapacity >= LZ4F_compressBound(0, prefsPtr).
* A successful call to LZ4F_compressEnd() makes `cctx` available again for another compression task.
- */
+ */
LZ4FLIB_API size_t LZ4F_compressEnd(LZ4F_cctx* cctx,
void* dstBuffer, size_t dstCapacity,
const LZ4F_compressOptions_t* cOptPtr);
-
-
-/*-*********************************
-* Decompression functions
-***********************************/
-typedef struct LZ4F_dctx_s LZ4F_dctx; /* incomplete type */
-typedef LZ4F_dctx* LZ4F_decompressionContext_t; /* compatibility with previous API versions */
-
-typedef struct {
+
+
+/*-*********************************
+* Decompression functions
+***********************************/
+typedef struct LZ4F_dctx_s LZ4F_dctx; /* incomplete type */
+typedef LZ4F_dctx* LZ4F_decompressionContext_t; /* compatibility with previous API versions */
+
+typedef struct {
unsigned stableDst; /* pledges that last 64KB decompressed data will remain available unmodified. This optimization skips storage operations in tmp buffers. */
unsigned reserved[3]; /* must be set to zero for forward compatibility */
-} LZ4F_decompressOptions_t;
-
-
-/* Resource management */
-
+} LZ4F_decompressOptions_t;
+
+
+/* Resource management */
+
/*! LZ4F_createDecompressionContext() :
* Create an LZ4F_dctx object, to track all decompression operations.
* The version provided MUST be LZ4F_VERSION.
@@ -362,15 +362,15 @@ typedef struct {
* dctx memory can be released using LZ4F_freeDecompressionContext();
* Result of LZ4F_freeDecompressionContext() indicates current state of decompressionContext when being released.
* That is, it should be == 0 if decompression has been completed fully and correctly.
- */
-LZ4FLIB_API LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** dctxPtr, unsigned version);
+ */
+LZ4FLIB_API LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** dctxPtr, unsigned version);
LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx);
-
-
+
+
/*-***********************************
* Streaming decompression functions
*************************************/
-
+
#define LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH 5
/*! LZ4F_headerSize() : v1.9.0+
@@ -381,8 +381,8 @@ LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx);
* or an error code, which can be tested using LZ4F_isError()
* note : Frame header size is variable, but is guaranteed to be
* >= LZ4F_HEADER_SIZE_MIN bytes, and <= LZ4F_HEADER_SIZE_MAX bytes.
- */
-LZ4FLIB_API size_t LZ4F_headerSize(const void* src, size_t srcSize);
+ */
+LZ4FLIB_API size_t LZ4F_headerSize(const void* src, size_t srcSize);
/*! LZ4F_getFrameInfo() :
* This function extracts frame parameters (max blockSize, dictID, etc.).
@@ -427,31 +427,31 @@ LZ4FLIB_API size_t LZ4F_headerSize(const void* src, size_t srcSize);
* note 1 : in case of error, dctx is not modified. Decoding operation can resume from beginning safely.
* note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure.
*/
-LZ4FLIB_API size_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
- LZ4F_frameInfo_t* frameInfoPtr,
- const void* srcBuffer, size_t* srcSizePtr);
-
+LZ4FLIB_API size_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
+ LZ4F_frameInfo_t* frameInfoPtr,
+ const void* srcBuffer, size_t* srcSizePtr);
+
/*! LZ4F_decompress() :
- * Call this function repetitively to regenerate data compressed in `srcBuffer`.
- *
- * The function requires a valid dctx state.
- * It will read up to *srcSizePtr bytes from srcBuffer,
+ * Call this function repetitively to regenerate data compressed in `srcBuffer`.
+ *
+ * The function requires a valid dctx state.
+ * It will read up to *srcSizePtr bytes from srcBuffer,
* and decompress data into dstBuffer, of capacity *dstSizePtr.
- *
+ *
* The nb of bytes consumed from srcBuffer will be written into *srcSizePtr (necessarily <= original value).
* The nb of bytes decompressed into dstBuffer will be written into *dstSizePtr (necessarily <= original value).
- *
+ *
* The function does not necessarily read all input bytes, so always check value in *srcSizePtr.
* Unconsumed source data must be presented again in subsequent invocations.
- *
+ *
* `dstBuffer` can freely change between each consecutive function invocation.
* `dstBuffer` content will be overwritten.
- *
+ *
* @return : an hint of how many `srcSize` bytes LZ4F_decompress() expects for next call.
* Schematically, it's the size of the current (or remaining) compressed block + header of next block.
* Respecting the hint provides some small speed benefit, because it skips intermediate buffers.
* This is just a hint though, it's always possible to provide any srcSize.
- *
+ *
* When a frame is fully decoded, @return will be 0 (no more data expected).
* When provided with more bytes than necessary to decode a frame,
* LZ4F_decompress() will stop reading exactly at end of current frame, and @return 0.
@@ -461,27 +461,27 @@ LZ4FLIB_API size_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
* Use LZ4F_resetDecompressionContext() to return to clean state.
*
* After a frame is fully decoded, dctx can be used again to decompress another frame.
- */
-LZ4FLIB_API size_t LZ4F_decompress(LZ4F_dctx* dctx,
- void* dstBuffer, size_t* dstSizePtr,
- const void* srcBuffer, size_t* srcSizePtr,
- const LZ4F_decompressOptions_t* dOptPtr);
-
-
+ */
+LZ4FLIB_API size_t LZ4F_decompress(LZ4F_dctx* dctx,
+ void* dstBuffer, size_t* dstSizePtr,
+ const void* srcBuffer, size_t* srcSizePtr,
+ const LZ4F_decompressOptions_t* dOptPtr);
+
+
/*! LZ4F_resetDecompressionContext() : added in v1.8.0
* In case of an error, the context is left in "undefined" state.
* In which case, it's necessary to reset it, before re-using it.
* This method can also be used to abruptly stop any unfinished decompression,
* and start a new one using same context resources. */
LZ4FLIB_API void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx); /* always successful */
-
-#if defined (__cplusplus)
-}
-#endif
-
-#endif /* LZ4F_H_09782039843 */
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* LZ4F_H_09782039843 */
#if defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843)
#define LZ4F_H_STATIC_09782039843
@@ -501,9 +501,9 @@ extern "C" {
* Use at your own risk.
*/
#ifdef LZ4F_PUBLISH_STATIC_FUNCTIONS
-# define LZ4FLIB_STATIC_API LZ4FLIB_API
+# define LZ4FLIB_STATIC_API LZ4FLIB_API
#else
-# define LZ4FLIB_STATIC_API
+# define LZ4FLIB_STATIC_API
#endif
diff --git a/contrib/libs/lz4/lz4hc.c b/contrib/libs/lz4/lz4hc.c
index 5dc65a8f88..a556d47920 100644
--- a/contrib/libs/lz4/lz4hc.c
+++ b/contrib/libs/lz4/lz4hc.c
@@ -1,78 +1,78 @@
/*
- LZ4 HC - High Compression Mode of LZ4
+ LZ4 HC - High Compression Mode of LZ4
Copyright (C) 2011-2017, Yann Collet.
- BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- You can contact the author at :
- - LZ4 source repository : https://github.com/lz4/lz4
- - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 source repository : https://github.com/lz4/lz4
+ - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
*/
-/* note : lz4hc is not an independent module, it requires lz4.h/lz4.c for proper compilation */
+/* note : lz4hc is not an independent module, it requires lz4.h/lz4.c for proper compilation */
-/* *************************************
-* Tuning Parameter
-***************************************/
+/* *************************************
+* Tuning Parameter
+***************************************/
/*! HEAPMODE :
* Select how default compression function will allocate workplace memory,
* in stack (0:fastest), or in heap (1:requires malloc()).
* Since workplace is rather large, heap mode is recommended.
- */
-#ifndef LZ4HC_HEAPMODE
-# define LZ4HC_HEAPMODE 1
+ */
+#ifndef LZ4HC_HEAPMODE
+# define LZ4HC_HEAPMODE 1
#endif
/*=== Dependency ===*/
#define LZ4_HC_STATIC_LINKING_ONLY
-#include "lz4hc.h"
+#include "lz4hc.h"
-/*=== Common definitions ===*/
-#if defined(__GNUC__)
-# pragma GCC diagnostic ignored "-Wunused-function"
+/*=== Common definitions ===*/
+#if defined(__GNUC__)
+# pragma GCC diagnostic ignored "-Wunused-function"
#endif
-#if defined (__clang__)
-# pragma clang diagnostic ignored "-Wunused-function"
+#if defined (__clang__)
+# pragma clang diagnostic ignored "-Wunused-function"
#endif
-#define LZ4_COMMONDEFS_ONLY
+#define LZ4_COMMONDEFS_ONLY
#ifndef LZ4_SRC_INCLUDED
#include "lz4.c" /* LZ4_count, constants, mem */
#endif
-
-/*=== Enums ===*/
-typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive;
-
-
+
+/*=== Enums ===*/
+typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive;
+
+
/*=== Constants ===*/
-#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
+#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
#define LZ4_OPT_NUM (1<<12)
@@ -85,16 +85,16 @@ typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive;
/* Make fields passed to, and updated by LZ4HC_encodeSequence explicit */
#define UPDATABLE(ip, op, anchor) &ip, &op, &anchor
-static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)); }
+static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)); }
-/**************************************
-* HC Compression
-**************************************/
+/**************************************
+* HC Compression
+**************************************/
static void LZ4HC_clearTables (LZ4HC_CCtx_internal* hc4)
-{
- MEM_INIT(hc4->hashTable, 0, sizeof(hc4->hashTable));
- MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
+{
+ MEM_INIT(hc4->hashTable, 0, sizeof(hc4->hashTable));
+ MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
}
static void LZ4HC_init_internal (LZ4HC_CCtx_internal* hc4, const BYTE* start)
@@ -107,33 +107,33 @@ static void LZ4HC_init_internal (LZ4HC_CCtx_internal* hc4, const BYTE* start)
startingOffset += 64 KB;
hc4->nextToUpdate = (U32) startingOffset;
hc4->base = start - startingOffset;
- hc4->end = start;
+ hc4->end = start;
hc4->dictBase = start - startingOffset;
hc4->dictLimit = (U32) startingOffset;
hc4->lowLimit = (U32) startingOffset;
-}
+}
-/* Update chains up to ip (excluded) */
+/* Update chains up to ip (excluded) */
LZ4_FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip)
-{
- U16* const chainTable = hc4->chainTable;
- U32* const hashTable = hc4->hashTable;
- const BYTE* const base = hc4->base;
- U32 const target = (U32)(ip - base);
- U32 idx = hc4->nextToUpdate;
-
- while (idx < target) {
- U32 const h = LZ4HC_hashPtr(base+idx);
- size_t delta = idx - hashTable[h];
+{
+ U16* const chainTable = hc4->chainTable;
+ U32* const hashTable = hc4->hashTable;
+ const BYTE* const base = hc4->base;
+ U32 const target = (U32)(ip - base);
+ U32 idx = hc4->nextToUpdate;
+
+ while (idx < target) {
+ U32 const h = LZ4HC_hashPtr(base+idx);
+ size_t delta = idx - hashTable[h];
if (delta>LZ4_DISTANCE_MAX) delta = LZ4_DISTANCE_MAX;
DELTANEXTU16(chainTable, idx) = (U16)delta;
- hashTable[h] = idx;
- idx++;
- }
+ hashTable[h] = idx;
+ idx++;
+ }
- hc4->nextToUpdate = target;
-}
+ hc4->nextToUpdate = target;
+}
/** LZ4HC_countBack() :
* @return : negative value, nb of common bytes before ip/match */
@@ -162,7 +162,7 @@ int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match,
static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern)
{
size_t const bitsToRotate = (rotate & (sizeof(pattern) - 1)) << 3;
- if (bitsToRotate == 0) return pattern;
+ if (bitsToRotate == 0) return pattern;
return LZ4HC_rotl32(pattern, (int)bitsToRotate);
}
@@ -170,10 +170,10 @@ static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern)
* pattern32 must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) */
static unsigned
LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32)
-{
+{
const BYTE* const iStart = ip;
- reg_t const pattern = (sizeof(pattern)==8) ?
- (reg_t)pattern32 + (((reg_t)pattern32) << (sizeof(pattern)*4)) : pattern32;
+ reg_t const pattern = (sizeof(pattern)==8) ?
+ (reg_t)pattern32 + (((reg_t)pattern32) << (sizeof(pattern)*4)) : pattern32;
while (likely(ip < iEnd-(sizeof(pattern)-1))) {
reg_t const diff = LZ4_read_ARCH(ip) ^ pattern;
@@ -186,7 +186,7 @@ LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32)
reg_t patternByte = pattern;
while ((ip<iEnd) && (*ip == (BYTE)patternByte)) {
ip++; patternByte >>= 8;
- }
+ }
} else { /* big endian */
U32 bitOffset = (sizeof(pattern)*8) - 8;
while (ip < iEnd) {
@@ -194,10 +194,10 @@ LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32)
if (*ip != byte) break;
ip ++; bitOffset -= 8;
}
- }
+ }
return (unsigned)(ip - iStart);
-}
+}
/* LZ4HC_reverseCountPattern() :
* pattern must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!)
@@ -234,30 +234,30 @@ typedef enum { favorCompressionRatio=0, favorDecompressionSpeed } HCfavor_e;
LZ4_FORCE_INLINE int
LZ4HC_InsertAndGetWiderMatch (
- LZ4HC_CCtx_internal* hc4,
- const BYTE* const ip,
- const BYTE* const iLowLimit,
- const BYTE* const iHighLimit,
- int longest,
- const BYTE** matchpos,
- const BYTE** startpos,
+ LZ4HC_CCtx_internal* hc4,
+ const BYTE* const ip,
+ const BYTE* const iLowLimit,
+ const BYTE* const iHighLimit,
+ int longest,
+ const BYTE** matchpos,
+ const BYTE** startpos,
const int maxNbAttempts,
const int patternAnalysis,
const int chainSwap,
const dictCtx_directive dict,
const HCfavor_e favorDecSpeed)
{
- U16* const chainTable = hc4->chainTable;
- U32* const HashTable = hc4->hashTable;
+ U16* const chainTable = hc4->chainTable;
+ U32* const HashTable = hc4->hashTable;
const LZ4HC_CCtx_internal * const dictCtx = hc4->dictCtx;
- const BYTE* const base = hc4->base;
- const U32 dictLimit = hc4->dictLimit;
- const BYTE* const lowPrefixPtr = base + dictLimit;
+ const BYTE* const base = hc4->base;
+ const U32 dictLimit = hc4->dictLimit;
+ const BYTE* const lowPrefixPtr = base + dictLimit;
const U32 ipIndex = (U32)(ip - base);
const U32 lowestMatchIndex = (hc4->lowLimit + (LZ4_DISTANCE_MAX + 1) > ipIndex) ? hc4->lowLimit : ipIndex - LZ4_DISTANCE_MAX;
- const BYTE* const dictBase = hc4->dictBase;
+ const BYTE* const dictBase = hc4->dictBase;
int const lookBackLength = (int)(ip-iLowLimit);
- int nbAttempts = maxNbAttempts;
+ int nbAttempts = maxNbAttempts;
U32 matchChainPos = 0;
U32 const pattern = LZ4_read32(ip);
U32 matchIndex;
@@ -265,15 +265,15 @@ LZ4HC_InsertAndGetWiderMatch (
size_t srcPatternLength = 0;
DEBUGLOG(7, "LZ4HC_InsertAndGetWiderMatch");
- /* First Match */
- LZ4HC_Insert(hc4, ip);
- matchIndex = HashTable[LZ4HC_hashPtr(ip)];
+ /* First Match */
+ LZ4HC_Insert(hc4, ip);
+ matchIndex = HashTable[LZ4HC_hashPtr(ip)];
DEBUGLOG(7, "First match at index %u / %u (lowestMatchIndex)",
matchIndex, lowestMatchIndex);
- while ((matchIndex>=lowestMatchIndex) && (nbAttempts>0)) {
+ while ((matchIndex>=lowestMatchIndex) && (nbAttempts>0)) {
int matchLength=0;
- nbAttempts--;
+ nbAttempts--;
assert(matchIndex < ipIndex);
if (favorDecSpeed && (ipIndex - matchIndex < 8)) {
/* do nothing */
@@ -390,8 +390,8 @@ LZ4HC_InsertAndGetWiderMatch (
if (lookBackLength==0) { /* no back possible */
size_t const maxML = MIN(currentSegmentLength, srcPatternLength);
if ((size_t)longest < maxML) {
- assert(base + matchIndex != ip);
- if ((size_t)(ip - base) - matchIndex > LZ4_DISTANCE_MAX) break;
+ assert(base + matchIndex != ip);
+ if ((size_t)(ip - base) - matchIndex > LZ4_DISTANCE_MAX) break;
assert(maxML < 2 GB);
longest = (int)maxML;
*matchpos = base + matchIndex; /* virtual pos, relative to ip, to retrieve offset */
@@ -411,7 +411,7 @@ LZ4HC_InsertAndGetWiderMatch (
} /* while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) */
if ( dict == usingDictCtxHc
- && nbAttempts > 0
+ && nbAttempts > 0
&& ipIndex - lowestMatchIndex < LZ4_DISTANCE_MAX) {
size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->base);
U32 dictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)];
@@ -424,22 +424,22 @@ LZ4HC_InsertAndGetWiderMatch (
int mlt;
int back = 0;
const BYTE* vLimit = ip + (dictEndOffset - dictMatchIndex);
- if (vLimit > iHighLimit) vLimit = iHighLimit;
+ if (vLimit > iHighLimit) vLimit = iHighLimit;
mlt = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->base + dictCtx->dictLimit) : 0;
- mlt -= back;
+ mlt -= back;
if (mlt > longest) {
longest = mlt;
*matchpos = base + matchIndex + back;
*startpos = ip + back;
} }
-
+
{ U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, dictMatchIndex);
dictMatchIndex -= nextOffset;
matchIndex -= nextOffset;
} } }
- return longest;
+ return longest;
}
LZ4_FORCE_INLINE
@@ -461,138 +461,138 @@ int LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index tabl
* @return : 0 if ok,
* 1 if buffer issue detected */
LZ4_FORCE_INLINE int LZ4HC_encodeSequence (
- const BYTE** _ip,
- BYTE** _op,
- const BYTE** _anchor,
- int matchLength,
- const BYTE* const match,
+ const BYTE** _ip,
+ BYTE** _op,
+ const BYTE** _anchor,
+ int matchLength,
+ const BYTE* const match,
limitedOutput_directive limit,
- BYTE* oend)
-{
-#define ip (*_ip)
-#define op (*_op)
-#define anchor (*_anchor)
-
+ BYTE* oend)
+{
+#define ip (*_ip)
+#define op (*_op)
+#define anchor (*_anchor)
+
size_t length;
- BYTE* const token = op++;
-
+ BYTE* const token = op++;
+
#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 6)
static const BYTE* start = NULL;
static U32 totalCost = 0;
- U32 const pos = (start==NULL) ? 0 : (U32)(anchor - start);
- U32 const ll = (U32)(ip - anchor);
+ U32 const pos = (start==NULL) ? 0 : (U32)(anchor - start);
+ U32 const ll = (U32)(ip - anchor);
U32 const llAdd = (ll>=15) ? ((ll-15) / 255) + 1 : 0;
U32 const mlAdd = (matchLength>=19) ? ((matchLength-19) / 255) + 1 : 0;
U32 const cost = 1 + llAdd + ll + 2 + mlAdd;
- if (start==NULL) start = anchor; /* only works for single segment */
+ if (start==NULL) start = anchor; /* only works for single segment */
/* g_debuglog_enable = (pos >= 2228) & (pos <= 2262); */
- DEBUGLOG(6, "pos:%7u -- literals:%4u, match:%4i, offset:%5u, cost:%4u + %5u",
+ DEBUGLOG(6, "pos:%7u -- literals:%4u, match:%4i, offset:%5u, cost:%4u + %5u",
pos,
- (U32)(ip - anchor), matchLength, (U32)(ip-match),
+ (U32)(ip - anchor), matchLength, (U32)(ip-match),
cost, totalCost);
totalCost += cost;
#endif
- /* Encode Literal length */
- length = (size_t)(ip - anchor);
- LZ4_STATIC_ASSERT(notLimited == 0);
- /* Check output limit */
- if (limit && ((op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) {
- DEBUGLOG(6, "Not enough room to write %i literals (%i bytes remaining)",
- (int)length, (int)(oend - op));
- return 1;
- }
+ /* Encode Literal length */
+ length = (size_t)(ip - anchor);
+ LZ4_STATIC_ASSERT(notLimited == 0);
+ /* Check output limit */
+ if (limit && ((op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) {
+ DEBUGLOG(6, "Not enough room to write %i literals (%i bytes remaining)",
+ (int)length, (int)(oend - op));
+ return 1;
+ }
if (length >= RUN_MASK) {
size_t len = length - RUN_MASK;
*token = (RUN_MASK << ML_BITS);
- for(; len >= 255 ; len -= 255) *op++ = 255;
- *op++ = (BYTE)len;
+ for(; len >= 255 ; len -= 255) *op++ = 255;
+ *op++ = (BYTE)len;
} else {
*token = (BYTE)(length << ML_BITS);
}
- /* Copy Literals */
- LZ4_wildCopy8(op, anchor, op + length);
- op += length;
+ /* Copy Literals */
+ LZ4_wildCopy8(op, anchor, op + length);
+ op += length;
- /* Encode Offset */
- assert( (ip - match) <= LZ4_DISTANCE_MAX ); /* note : consider providing offset as a value, rather than as a pointer difference */
- LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
+ /* Encode Offset */
+ assert( (ip - match) <= LZ4_DISTANCE_MAX ); /* note : consider providing offset as a value, rather than as a pointer difference */
+ LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
- /* Encode MatchLength */
+ /* Encode MatchLength */
assert(matchLength >= MINMATCH);
length = (size_t)matchLength - MINMATCH;
- if (limit && (op + (length / 255) + (1 + LASTLITERALS) > oend)) {
- DEBUGLOG(6, "Not enough room to write match length");
- return 1; /* Check output limit */
- }
+ if (limit && (op + (length / 255) + (1 + LASTLITERALS) > oend)) {
+ DEBUGLOG(6, "Not enough room to write match length");
+ return 1; /* Check output limit */
+ }
if (length >= ML_MASK) {
- *token += ML_MASK;
- length -= ML_MASK;
- for(; length >= 510 ; length -= 510) { *op++ = 255; *op++ = 255; }
- if (length >= 255) { length -= 255; *op++ = 255; }
- *op++ = (BYTE)length;
- } else {
- *token += (BYTE)(length);
- }
-
- /* Prepare next loop */
- ip += matchLength;
- anchor = ip;
-
- return 0;
+ *token += ML_MASK;
+ length -= ML_MASK;
+ for(; length >= 510 ; length -= 510) { *op++ = 255; *op++ = 255; }
+ if (length >= 255) { length -= 255; *op++ = 255; }
+ *op++ = (BYTE)length;
+ } else {
+ *token += (BYTE)(length);
+ }
+
+ /* Prepare next loop */
+ ip += matchLength;
+ anchor = ip;
+
+ return 0;
}
-#undef ip
-#undef op
-#undef anchor
+#undef ip
+#undef op
+#undef anchor
LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
- LZ4HC_CCtx_internal* const ctx,
- const char* const source,
- char* const dest,
+ LZ4HC_CCtx_internal* const ctx,
+ const char* const source,
+ char* const dest,
int* srcSizePtr,
- int const maxOutputSize,
- int maxNbAttempts,
+ int const maxOutputSize,
+ int maxNbAttempts,
const limitedOutput_directive limit,
const dictCtx_directive dict
- )
+ )
{
const int inputSize = *srcSizePtr;
const int patternAnalysis = (maxNbAttempts > 128); /* levels 9+ */
- const BYTE* ip = (const BYTE*) source;
- const BYTE* anchor = ip;
- const BYTE* const iend = ip + inputSize;
- const BYTE* const mflimit = iend - MFLIMIT;
- const BYTE* const matchlimit = (iend - LASTLITERALS);
+ const BYTE* ip = (const BYTE*) source;
+ const BYTE* anchor = ip;
+ const BYTE* const iend = ip + inputSize;
+ const BYTE* const mflimit = iend - MFLIMIT;
+ const BYTE* const matchlimit = (iend - LASTLITERALS);
BYTE* optr = (BYTE*) dest;
- BYTE* op = (BYTE*) dest;
+ BYTE* op = (BYTE*) dest;
BYTE* oend = op + maxOutputSize;
int ml0, ml, ml2, ml3;
const BYTE* start0;
const BYTE* ref0;
- const BYTE* ref = NULL;
- const BYTE* start2 = NULL;
- const BYTE* ref2 = NULL;
- const BYTE* start3 = NULL;
- const BYTE* ref3 = NULL;
+ const BYTE* ref = NULL;
+ const BYTE* start2 = NULL;
+ const BYTE* ref2 = NULL;
+ const BYTE* start3 = NULL;
+ const BYTE* ref3 = NULL;
- /* init */
+ /* init */
*srcSizePtr = 0;
if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
- if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
+ if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
- /* Main Loop */
+ /* Main Loop */
while (ip <= mflimit) {
ml = LZ4HC_InsertAndFindBestMatch(ctx, ip, matchlimit, &ref, maxNbAttempts, patternAnalysis, dict);
if (ml<MINMATCH) { ip++; continue; }
- /* saved, in case we would skip too much */
+ /* saved, in case we would skip too much */
start0 = ip; ref0 = ref; ml0 = ml;
-_Search2:
+_Search2:
if (ip+ml <= mflimit) {
ml2 = LZ4HC_InsertAndGetWiderMatch(ctx,
ip + ml - 2, ip + 0, matchlimit, ml, &ref2, &start2,
@@ -604,39 +604,39 @@ _Search2:
if (ml2 == ml) { /* No better match => encode ML1 */
optr = op;
if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
- continue;
- }
+ continue;
+ }
if (start0 < ip) { /* first match was skipped at least once */
if (start2 < ip + ml0) { /* squeezing ML1 between ML0(original ML1) and ML2 */
ip = start0; ref = ref0; ml = ml0; /* restore initial ML1 */
} }
- /* Here, start0==ip */
- if ((start2 - ip) < 3) { /* First Match too small : removed */
- ml = ml2;
- ip = start2;
- ref =ref2;
- goto _Search2;
- }
+ /* Here, start0==ip */
+ if ((start2 - ip) < 3) { /* First Match too small : removed */
+ ml = ml2;
+ ip = start2;
+ ref =ref2;
+ goto _Search2;
+ }
-_Search3:
+_Search3:
/* At this stage, we have :
* ml2 > ml1, and
* ip1+3 <= ip2 (usually < ip1+ml1) */
- if ((start2 - ip) < OPTIMAL_ML) {
- int correction;
- int new_ml = ml;
- if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML;
- if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
- correction = new_ml - (int)(start2 - ip);
- if (correction > 0) {
- start2 += correction;
- ref2 += correction;
- ml2 -= correction;
- }
- }
- /* Now, we have start2 = ip+new_ml, with new_ml = min(ml, OPTIMAL_ML=18) */
+ if ((start2 - ip) < OPTIMAL_ML) {
+ int correction;
+ int new_ml = ml;
+ if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML;
+ if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
+ correction = new_ml - (int)(start2 - ip);
+ if (correction > 0) {
+ start2 += correction;
+ ref2 += correction;
+ ml2 -= correction;
+ }
+ }
+ /* Now, we have start2 = ip+new_ml, with new_ml = min(ml, OPTIMAL_ML=18) */
if (start2 + ml2 <= mflimit) {
ml3 = LZ4HC_InsertAndGetWiderMatch(ctx,
@@ -647,73 +647,73 @@ _Search3:
}
if (ml3 == ml2) { /* No better match => encode ML1 and ML2 */
- /* ip & ref are known; Now for ml */
- if (start2 < ip+ml) ml = (int)(start2 - ip);
- /* Now, encode 2 sequences */
+ /* ip & ref are known; Now for ml */
+ if (start2 < ip+ml) ml = (int)(start2 - ip);
+ /* Now, encode 2 sequences */
optr = op;
if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
- ip = start2;
+ ip = start2;
optr = op;
- if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml2, ref2, limit, oend)) {
- ml = ml2;
- ref = ref2;
- goto _dest_overflow;
- }
- continue;
- }
-
- if (start3 < ip+ml+3) { /* Not enough space for match 2 : remove it */
- if (start3 >= (ip+ml)) { /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */
- if (start2 < ip+ml) {
- int correction = (int)(ip+ml - start2);
- start2 += correction;
- ref2 += correction;
- ml2 -= correction;
- if (ml2 < MINMATCH) {
- start2 = start3;
- ref2 = ref3;
- ml2 = ml3;
- }
- }
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml2, ref2, limit, oend)) {
+ ml = ml2;
+ ref = ref2;
+ goto _dest_overflow;
+ }
+ continue;
+ }
+
+ if (start3 < ip+ml+3) { /* Not enough space for match 2 : remove it */
+ if (start3 >= (ip+ml)) { /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */
+ if (start2 < ip+ml) {
+ int correction = (int)(ip+ml - start2);
+ start2 += correction;
+ ref2 += correction;
+ ml2 -= correction;
+ if (ml2 < MINMATCH) {
+ start2 = start3;
+ ref2 = ref3;
+ ml2 = ml3;
+ }
+ }
optr = op;
if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
- ip = start3;
- ref = ref3;
- ml = ml3;
-
- start0 = start2;
- ref0 = ref2;
- ml0 = ml2;
- goto _Search2;
- }
-
- start2 = start3;
- ref2 = ref3;
- ml2 = ml3;
- goto _Search3;
- }
-
- /*
+ ip = start3;
+ ref = ref3;
+ ml = ml3;
+
+ start0 = start2;
+ ref0 = ref2;
+ ml0 = ml2;
+ goto _Search2;
+ }
+
+ start2 = start3;
+ ref2 = ref3;
+ ml2 = ml3;
+ goto _Search3;
+ }
+
+ /*
* OK, now we have 3 ascending matches;
* let's write the first one ML1.
* ip & ref are known; Now decide ml.
- */
- if (start2 < ip+ml) {
+ */
+ if (start2 < ip+ml) {
if ((start2 - ip) < OPTIMAL_ML) {
- int correction;
- if (ml > OPTIMAL_ML) ml = OPTIMAL_ML;
- if (ip + ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH;
- correction = ml - (int)(start2 - ip);
- if (correction > 0) {
- start2 += correction;
- ref2 += correction;
- ml2 -= correction;
- }
- } else {
- ml = (int)(start2 - ip);
- }
- }
+ int correction;
+ if (ml > OPTIMAL_ML) ml = OPTIMAL_ML;
+ if (ip + ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH;
+ correction = ml - (int)(start2 - ip);
+ if (correction > 0) {
+ start2 += correction;
+ ref2 += correction;
+ ml2 -= correction;
+ }
+ } else {
+ ml = (int)(start2 - ip);
+ }
+ }
optr = op;
if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
@@ -724,24 +724,24 @@ _Search3:
start2 = start3; ref2 = ref3; ml2 = ml3;
/* let's find a new ML3 */
- goto _Search3;
- }
+ goto _Search3;
+ }
_last_literals:
- /* Encode Last Literals */
+ /* Encode Last Literals */
{ size_t lastRunSize = (size_t)(iend - anchor); /* literals */
- size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
- size_t const totalSize = 1 + llAdd + lastRunSize;
+ size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
+ size_t const totalSize = 1 + llAdd + lastRunSize;
if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
if (limit && (op + totalSize > oend)) {
- if (limit == limitedOutput) return 0;
+ if (limit == limitedOutput) return 0;
/* adapt lastRunSize to fill 'dest' */
- lastRunSize = (size_t)(oend - op) - 1 /*token*/;
- llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
- lastRunSize -= llAdd;
+ lastRunSize = (size_t)(oend - op) - 1 /*token*/;
+ llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
+ lastRunSize -= llAdd;
}
- DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
- ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
+ DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
+ ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
if (lastRunSize >= RUN_MASK) {
size_t accumulator = lastRunSize - RUN_MASK;
@@ -753,35 +753,35 @@ _last_literals:
}
memcpy(op, anchor, lastRunSize);
op += lastRunSize;
- }
+ }
- /* End */
+ /* End */
*srcSizePtr = (int) (((const char*)ip) - source);
- return (int) (((char*)op)-dest);
+ return (int) (((char*)op)-dest);
_dest_overflow:
if (limit == fillOutput) {
- /* Assumption : ip, anchor, ml and ref must be set correctly */
- size_t const ll = (size_t)(ip - anchor);
- size_t const ll_addbytes = (ll + 240) / 255;
- size_t const ll_totalCost = 1 + ll_addbytes + ll;
- BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
- DEBUGLOG(6, "Last sequence overflowing");
+ /* Assumption : ip, anchor, ml and ref must be set correctly */
+ size_t const ll = (size_t)(ip - anchor);
+ size_t const ll_addbytes = (ll + 240) / 255;
+ size_t const ll_totalCost = 1 + ll_addbytes + ll;
+ BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
+ DEBUGLOG(6, "Last sequence overflowing");
op = optr; /* restore correct out pointer */
- if (op + ll_totalCost <= maxLitPos) {
- /* ll validated; now adjust match length */
- size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
- size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
- assert(maxMlSize < INT_MAX); assert(ml >= 0);
- if ((size_t)ml > maxMlSize) ml = (int)maxMlSize;
- if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ml >= MFLIMIT) {
- LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, notLimited, oend);
- } }
+ if (op + ll_totalCost <= maxLitPos) {
+ /* ll validated; now adjust match length */
+ size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
+ size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
+ assert(maxMlSize < INT_MAX); assert(ml >= 0);
+ if ((size_t)ml > maxMlSize) ml = (int)maxMlSize;
+ if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ml >= MFLIMIT) {
+ LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, notLimited, oend);
+ } }
goto _last_literals;
- }
- /* compression failed */
+ }
+ /* compression failed */
return 0;
-}
+}
static int LZ4HC_compress_optimal( LZ4HC_CCtx_internal* ctx,
@@ -790,11 +790,11 @@ static int LZ4HC_compress_optimal( LZ4HC_CCtx_internal* ctx,
int const nbSearches, size_t sufficient_len,
const limitedOutput_directive limit, int const fullUpdate,
const dictCtx_directive dict,
- const HCfavor_e favorDecSpeed);
+ const HCfavor_e favorDecSpeed);
LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
- LZ4HC_CCtx_internal* const ctx,
+ LZ4HC_CCtx_internal* const ctx,
const char* const src,
char* const dst,
int* const srcSizePtr,
@@ -802,12 +802,12 @@ LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
int cLevel,
const limitedOutput_directive limit,
const dictCtx_directive dict
- )
-{
+ )
+{
typedef enum { lz4hc, lz4opt } lz4hc_strat_e;
typedef struct {
lz4hc_strat_e strat;
- int nbSearches;
+ int nbSearches;
U32 targetLength;
} cParams_t;
static const cParams_t clTable[LZ4HC_CLEVEL_MAX+1] = {
@@ -826,8 +826,8 @@ LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
{ lz4opt,16384,LZ4_OPT_NUM }, /* 12==LZ4HC_CLEVEL_MAX */
};
- DEBUGLOG(4, "LZ4HC_compress_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
- ctx, src, *srcSizePtr, limit);
+ DEBUGLOG(4, "LZ4HC_compress_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
+ ctx, src, *srcSizePtr, limit);
if (limit == fillOutput && dstCapacity < 1) return 0; /* Impossible to store anything */
if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size (too large or negative) */
@@ -847,13 +847,13 @@ LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
assert(cParam.strat == lz4opt);
result = LZ4HC_compress_optimal(ctx,
src, dst, srcSizePtr, dstCapacity,
- cParam.nbSearches, cParam.targetLength, limit,
+ cParam.nbSearches, cParam.targetLength, limit,
cLevel == LZ4HC_CLEVEL_MAX, /* ultra mode */
dict, favor);
- }
+ }
if (result <= 0) ctx->dirty = 1;
return result;
- }
+ }
}
static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock);
@@ -883,7 +883,7 @@ LZ4HC_compress_generic_dictCtx (
int cLevel,
limitedOutput_directive limit
)
-{
+{
const size_t position = (size_t)(ctx->end - ctx->base) - ctx->lowLimit;
assert(ctx->dictCtx != NULL);
if (position >= 64 KB) {
@@ -922,12 +922,12 @@ int LZ4_sizeofStateHC(void) { return (int)sizeof(LZ4_streamHC_t); }
static size_t LZ4_streamHC_t_alignment(void)
{
-#if LZ4_ALIGN_TEST
- typedef struct { char c; LZ4_streamHC_t t; } t_a;
- return sizeof(t_a) - sizeof(LZ4_streamHC_t);
-#else
- return 1; /* effectively disabled */
-#endif
+#if LZ4_ALIGN_TEST
+ typedef struct { char c; LZ4_streamHC_t t; } t_a;
+ return sizeof(t_a) - sizeof(LZ4_streamHC_t);
+#else
+ return 1; /* effectively disabled */
+#endif
}
/* state is presumed correctly initialized,
@@ -935,17 +935,17 @@ static size_t LZ4_streamHC_t_alignment(void)
int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
{
LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)state)->internal_donotuse;
- if (!LZ4_isAligned(state, LZ4_streamHC_t_alignment())) return 0;
+ if (!LZ4_isAligned(state, LZ4_streamHC_t_alignment())) return 0;
LZ4_resetStreamHC_fast((LZ4_streamHC_t*)state, compressionLevel);
LZ4HC_init_internal (ctx, (const BYTE*)src);
if (dstCapacity < LZ4_compressBound(srcSize))
return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, limitedOutput);
- else
+ else
return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, notLimited);
-}
+}
int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
-{
+{
LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx));
if (ctx==NULL) return 0; /* init failure */
return LZ4_compress_HC_extStateHC_fastReset(state, src, dst, srcSize, dstCapacity, compressionLevel);
@@ -953,18 +953,18 @@ int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int src
int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
{
-#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
+#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
LZ4_streamHC_t* const statePtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t));
-#else
- LZ4_streamHC_t state;
- LZ4_streamHC_t* const statePtr = &state;
-#endif
+#else
+ LZ4_streamHC_t state;
+ LZ4_streamHC_t* const statePtr = &state;
+#endif
int const cSize = LZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, dstCapacity, compressionLevel);
-#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
+#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
FREEMEM(statePtr);
-#endif
- return cSize;
-}
+#endif
+ return cSize;
+}
/* state is presumed sized correctly (>= sizeof(LZ4_streamHC_t)) */
int LZ4_compress_HC_destSize(void* state, const char* source, char* dest, int* sourceSizePtr, int targetDestSize, int cLevel)
@@ -978,17 +978,17 @@ int LZ4_compress_HC_destSize(void* state, const char* source, char* dest, int* s
-/**************************************
-* Streaming Functions
-**************************************/
-/* allocation */
+/**************************************
+* Streaming Functions
+**************************************/
+/* allocation */
LZ4_streamHC_t* LZ4_createStreamHC(void)
{
- LZ4_streamHC_t* const state =
- (LZ4_streamHC_t*)ALLOC_AND_ZERO(sizeof(LZ4_streamHC_t));
- if (state == NULL) return NULL;
- LZ4_setCompressionLevel(state, LZ4HC_CLEVEL_DEFAULT);
- return state;
+ LZ4_streamHC_t* const state =
+ (LZ4_streamHC_t*)ALLOC_AND_ZERO(sizeof(LZ4_streamHC_t));
+ if (state == NULL) return NULL;
+ LZ4_setCompressionLevel(state, LZ4HC_CLEVEL_DEFAULT);
+ return state;
}
int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr)
@@ -1001,25 +1001,25 @@ int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr)
LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size)
-{
+{
LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)buffer;
- /* if compilation fails here, LZ4_STREAMHCSIZE must be increased */
- LZ4_STATIC_ASSERT(sizeof(LZ4HC_CCtx_internal) <= LZ4_STREAMHCSIZE);
- DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", buffer, (unsigned)size);
- /* check conditions */
+ /* if compilation fails here, LZ4_STREAMHCSIZE must be increased */
+ LZ4_STATIC_ASSERT(sizeof(LZ4HC_CCtx_internal) <= LZ4_STREAMHCSIZE);
+ DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", buffer, (unsigned)size);
+ /* check conditions */
if (buffer == NULL) return NULL;
if (size < sizeof(LZ4_streamHC_t)) return NULL;
- if (!LZ4_isAligned(buffer, LZ4_streamHC_t_alignment())) return NULL;
- /* init */
- { LZ4HC_CCtx_internal* const hcstate = &(LZ4_streamHCPtr->internal_donotuse);
- MEM_INIT(hcstate, 0, sizeof(*hcstate)); }
+ if (!LZ4_isAligned(buffer, LZ4_streamHC_t_alignment())) return NULL;
+ /* init */
+ { LZ4HC_CCtx_internal* const hcstate = &(LZ4_streamHCPtr->internal_donotuse);
+ MEM_INIT(hcstate, 0, sizeof(*hcstate)); }
LZ4_setCompressionLevel(LZ4_streamHCPtr, LZ4HC_CLEVEL_DEFAULT);
return LZ4_streamHCPtr;
-}
+}
/* just a stub */
void LZ4_resetStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
-{
+{
LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
}
@@ -1061,91 +1061,91 @@ int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr,
const char* dictionary, int dictSize)
{
LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
- DEBUGLOG(4, "LZ4_loadDictHC(ctx:%p, dict:%p, dictSize:%d)", LZ4_streamHCPtr, dictionary, dictSize);
+ DEBUGLOG(4, "LZ4_loadDictHC(ctx:%p, dict:%p, dictSize:%d)", LZ4_streamHCPtr, dictionary, dictSize);
assert(LZ4_streamHCPtr != NULL);
- if (dictSize > 64 KB) {
+ if (dictSize > 64 KB) {
dictionary += (size_t)dictSize - 64 KB;
- dictSize = 64 KB;
- }
+ dictSize = 64 KB;
+ }
/* need a full initialization, there are bad side-effects when using resetFast() */
{ int const cLevel = ctxPtr->compressionLevel;
LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
LZ4_setCompressionLevel(LZ4_streamHCPtr, cLevel);
}
LZ4HC_init_internal (ctxPtr, (const BYTE*)dictionary);
- ctxPtr->end = (const BYTE*)dictionary + dictSize;
+ ctxPtr->end = (const BYTE*)dictionary + dictSize;
if (dictSize >= 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3);
- return dictSize;
-}
+ return dictSize;
+}
void LZ4_attach_HC_dictionary(LZ4_streamHC_t *working_stream, const LZ4_streamHC_t *dictionary_stream) {
working_stream->internal_donotuse.dictCtx = dictionary_stream != NULL ? &(dictionary_stream->internal_donotuse) : NULL;
}
-/* compression */
+/* compression */
-static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock)
-{
+static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock)
+{
DEBUGLOG(4, "LZ4HC_setExternalDict(%p, %p)", ctxPtr, newBlock);
if (ctxPtr->end >= ctxPtr->base + ctxPtr->dictLimit + 4)
LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */
- /* Only one memory segment for extDict, so any previous extDict is lost at this stage */
- ctxPtr->lowLimit = ctxPtr->dictLimit;
- ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base);
- ctxPtr->dictBase = ctxPtr->base;
- ctxPtr->base = newBlock - ctxPtr->dictLimit;
- ctxPtr->end = newBlock;
- ctxPtr->nextToUpdate = ctxPtr->dictLimit; /* match referencing will resume from there */
+ /* Only one memory segment for extDict, so any previous extDict is lost at this stage */
+ ctxPtr->lowLimit = ctxPtr->dictLimit;
+ ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base);
+ ctxPtr->dictBase = ctxPtr->base;
+ ctxPtr->base = newBlock - ctxPtr->dictLimit;
+ ctxPtr->end = newBlock;
+ ctxPtr->nextToUpdate = ctxPtr->dictLimit; /* match referencing will resume from there */
/* cannot reference an extDict and a dictCtx at the same time */
ctxPtr->dictCtx = NULL;
-}
-
-static int
-LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
- const char* src, char* dst,
- int* srcSizePtr, int dstCapacity,
- limitedOutput_directive limit)
-{
+}
+
+static int
+LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
+ const char* src, char* dst,
+ int* srcSizePtr, int dstCapacity,
+ limitedOutput_directive limit)
+{
LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
- DEBUGLOG(5, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
- LZ4_streamHCPtr, src, *srcSizePtr, limit);
+ DEBUGLOG(5, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
+ LZ4_streamHCPtr, src, *srcSizePtr, limit);
assert(ctxPtr != NULL);
- /* auto-init if forgotten */
+ /* auto-init if forgotten */
if (ctxPtr->base == NULL) LZ4HC_init_internal (ctxPtr, (const BYTE*) src);
- /* Check overflow */
- if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 GB) {
- size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base) - ctxPtr->dictLimit;
- if (dictSize > 64 KB) dictSize = 64 KB;
- LZ4_loadDictHC(LZ4_streamHCPtr, (const char*)(ctxPtr->end) - dictSize, (int)dictSize);
- }
+ /* Check overflow */
+ if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 GB) {
+ size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base) - ctxPtr->dictLimit;
+ if (dictSize > 64 KB) dictSize = 64 KB;
+ LZ4_loadDictHC(LZ4_streamHCPtr, (const char*)(ctxPtr->end) - dictSize, (int)dictSize);
+ }
- /* Check if blocks follow each other */
+ /* Check if blocks follow each other */
if ((const BYTE*)src != ctxPtr->end)
LZ4HC_setExternalDict(ctxPtr, (const BYTE*)src);
- /* Check overlapping input/dictionary space */
+ /* Check overlapping input/dictionary space */
{ const BYTE* sourceEnd = (const BYTE*) src + *srcSizePtr;
- const BYTE* const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit;
- const BYTE* const dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit;
+ const BYTE* const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit;
+ const BYTE* const dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit;
if ((sourceEnd > dictBegin) && ((const BYTE*)src < dictEnd)) {
- if (sourceEnd > dictEnd) sourceEnd = dictEnd;
- ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
- if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) ctxPtr->lowLimit = ctxPtr->dictLimit;
- } }
+ if (sourceEnd > dictEnd) sourceEnd = dictEnd;
+ ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
+ if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) ctxPtr->lowLimit = ctxPtr->dictLimit;
+ } }
return LZ4HC_compress_generic (ctxPtr, src, dst, srcSizePtr, dstCapacity, ctxPtr->compressionLevel, limit);
-}
+}
int LZ4_compress_HC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int srcSize, int dstCapacity)
-{
+{
if (dstCapacity < LZ4_compressBound(srcSize))
return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, limitedOutput);
- else
+ else
return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, notLimited);
-}
+}
int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int* srcSizePtr, int targetDestSize)
{
@@ -1154,73 +1154,73 @@ int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const ch
-/* LZ4_saveDictHC :
- * save history content
- * into a user-provided buffer
- * which is then used to continue compression
- */
-int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize)
-{
- LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
- int const prefixSize = (int)(streamPtr->end - (streamPtr->base + streamPtr->dictLimit));
- DEBUGLOG(5, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize);
- assert(prefixSize >= 0);
- if (dictSize > 64 KB) dictSize = 64 KB;
- if (dictSize < 4) dictSize = 0;
- if (dictSize > prefixSize) dictSize = prefixSize;
- if (safeBuffer == NULL) assert(dictSize == 0);
- if (dictSize > 0)
- memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
- { U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
- streamPtr->end = (const BYTE*)safeBuffer + dictSize;
- streamPtr->base = streamPtr->end - endIndex;
+/* LZ4_saveDictHC :
+ * save history content
+ * into a user-provided buffer
+ * which is then used to continue compression
+ */
+int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize)
+{
+ LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
+ int const prefixSize = (int)(streamPtr->end - (streamPtr->base + streamPtr->dictLimit));
+ DEBUGLOG(5, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize);
+ assert(prefixSize >= 0);
+ if (dictSize > 64 KB) dictSize = 64 KB;
+ if (dictSize < 4) dictSize = 0;
+ if (dictSize > prefixSize) dictSize = prefixSize;
+ if (safeBuffer == NULL) assert(dictSize == 0);
+ if (dictSize > 0)
+ memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
+ { U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
+ streamPtr->end = (const BYTE*)safeBuffer + dictSize;
+ streamPtr->base = streamPtr->end - endIndex;
streamPtr->dictLimit = endIndex - (U32)dictSize;
streamPtr->lowLimit = endIndex - (U32)dictSize;
- if (streamPtr->nextToUpdate < streamPtr->dictLimit)
- streamPtr->nextToUpdate = streamPtr->dictLimit;
- }
- return dictSize;
-}
+ if (streamPtr->nextToUpdate < streamPtr->dictLimit)
+ streamPtr->nextToUpdate = streamPtr->dictLimit;
+ }
+ return dictSize;
+}
/***************************************************
-* Deprecated Functions
+* Deprecated Functions
***************************************************/
-/* These functions currently generate deprecation warnings */
+/* These functions currently generate deprecation warnings */
/* Wrappers for deprecated compression functions */
-int LZ4_compressHC(const char* src, char* dst, int srcSize) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
-int LZ4_compressHC_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, 0); }
-int LZ4_compressHC2(const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
-int LZ4_compressHC2_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, cLevel); }
-int LZ4_compressHC_withStateHC (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
-int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, maxDstSize, 0); }
-int LZ4_compressHC2_withStateHC (void* state, const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
-int LZ4_compressHC2_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, maxDstSize, cLevel); }
-int LZ4_compressHC_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, LZ4_compressBound(srcSize)); }
-int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, maxDstSize); }
+int LZ4_compressHC(const char* src, char* dst, int srcSize) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
+int LZ4_compressHC_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, 0); }
+int LZ4_compressHC2(const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
+int LZ4_compressHC2_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, cLevel); }
+int LZ4_compressHC_withStateHC (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
+int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, maxDstSize, 0); }
+int LZ4_compressHC2_withStateHC (void* state, const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
+int LZ4_compressHC2_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, maxDstSize, cLevel); }
+int LZ4_compressHC_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, LZ4_compressBound(srcSize)); }
+int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, maxDstSize); }
-/* Deprecated streaming functions */
-int LZ4_sizeofStreamStateHC(void) { return LZ4_STREAMHCSIZE; }
+/* Deprecated streaming functions */
+int LZ4_sizeofStreamStateHC(void) { return LZ4_STREAMHCSIZE; }
/* state is presumed correctly sized, aka >= sizeof(LZ4_streamHC_t)
* @return : 0 on success, !=0 if error */
-int LZ4_resetStreamStateHC(void* state, char* inputBuffer)
+int LZ4_resetStreamStateHC(void* state, char* inputBuffer)
{
LZ4_streamHC_t* const hc4 = LZ4_initStreamHC(state, sizeof(*hc4));
if (hc4 == NULL) return 1; /* init failed */
LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
- return 0;
-}
+ return 0;
+}
void* LZ4_createHC (const char* inputBuffer)
-{
+{
LZ4_streamHC_t* const hc4 = LZ4_createStreamHC();
- if (hc4 == NULL) return NULL; /* not enough memory */
+ if (hc4 == NULL) return NULL; /* not enough memory */
LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
- return hc4;
+ return hc4;
}
int LZ4_freeHC (void* LZ4HC_Data)
@@ -1231,23 +1231,23 @@ int LZ4_freeHC (void* LZ4HC_Data)
}
int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int cLevel)
-{
+{
return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, 0, cLevel, notLimited);
-}
+}
int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int dstCapacity, int cLevel)
-{
+{
return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, dstCapacity, cLevel, limitedOutput);
-}
-
-char* LZ4_slideInputBufferHC(void* LZ4HC_Data)
-{
+}
+
+char* LZ4_slideInputBufferHC(void* LZ4HC_Data)
+{
LZ4_streamHC_t *ctx = (LZ4_streamHC_t*)LZ4HC_Data;
const BYTE *bufferStart = ctx->internal_donotuse.base + ctx->internal_donotuse.lowLimit;
LZ4_resetStreamHC_fast(ctx, ctx->internal_donotuse.compressionLevel);
/* avoid const char * -> char * conversion warning :( */
return (char *)(uptrval)bufferStart;
-}
+}
/* ================================================
@@ -1327,13 +1327,13 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
const dictCtx_directive dict,
const HCfavor_e favorDecSpeed)
{
- int retval = 0;
+ int retval = 0;
#define TRAILING_LITERALS 3
-#ifdef LZ4HC_HEAPMODE
- LZ4HC_optimal_t* const opt = (LZ4HC_optimal_t*)ALLOC(sizeof(LZ4HC_optimal_t) * (LZ4_OPT_NUM + TRAILING_LITERALS));
-#else
+#ifdef LZ4HC_HEAPMODE
+ LZ4HC_optimal_t* const opt = (LZ4HC_optimal_t*)ALLOC(sizeof(LZ4HC_optimal_t) * (LZ4_OPT_NUM + TRAILING_LITERALS));
+#else
LZ4HC_optimal_t opt[LZ4_OPT_NUM + TRAILING_LITERALS]; /* ~64 KB, which is a bit large for stack... */
-#endif
+#endif
const BYTE* ip = (const BYTE*) source;
const BYTE* anchor = ip;
@@ -1343,13 +1343,13 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
BYTE* op = (BYTE*) dst;
BYTE* opSaved = (BYTE*) dst;
BYTE* oend = op + dstCapacity;
- int ovml = MINMATCH; /* overflow - last sequence */
- const BYTE* ovref = NULL;
+ int ovml = MINMATCH; /* overflow - last sequence */
+ const BYTE* ovref = NULL;
/* init */
-#ifdef LZ4HC_HEAPMODE
- if (opt == NULL) goto _return_label;
-#endif
+#ifdef LZ4HC_HEAPMODE
+ if (opt == NULL) goto _return_label;
+#endif
DEBUGLOG(5, "LZ4HC_compress_optimal(dst=%p, dstCapa=%u)", dst, (unsigned)dstCapacity);
*srcSizePtr = 0;
if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
@@ -1369,11 +1369,11 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
int const firstML = firstMatch.len;
const BYTE* const matchPos = ip - firstMatch.off;
opSaved = op;
- if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, matchPos, limit, oend) ) { /* updates ip, op and anchor */
- ovml = firstML;
- ovref = matchPos;
+ if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, matchPos, limit, oend) ) { /* updates ip, op and anchor */
+ ovml = firstML;
+ ovref = matchPos;
goto _dest_overflow;
- }
+ }
continue;
}
@@ -1515,7 +1515,7 @@ static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
best_off = opt[last_match_pos].off;
cur = last_match_pos - best_mlen;
-encode: /* cur, last_match_pos, best_mlen, best_off must be set */
+encode: /* cur, last_match_pos, best_mlen, best_off must be set */
assert(cur < LZ4_OPT_NUM);
assert(last_match_pos >= 1); /* == 1 when only one candidate */
DEBUGLOG(6, "reverse traversal, looking for shortest path (last_match_pos=%i)", last_match_pos);
@@ -1545,31 +1545,31 @@ encode: /* cur, last_match_pos, best_mlen, best_off must be set */
assert(ml >= MINMATCH);
assert((offset >= 1) && (offset <= LZ4_DISTANCE_MAX));
opSaved = op;
- if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ip - offset, limit, oend) ) { /* updates ip, op and anchor */
- ovml = ml;
- ovref = ip - offset;
+ if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ip - offset, limit, oend) ) { /* updates ip, op and anchor */
+ ovml = ml;
+ ovref = ip - offset;
goto _dest_overflow;
- } } }
+ } } }
} /* while (ip <= mflimit) */
-_last_literals:
+_last_literals:
/* Encode Last Literals */
{ size_t lastRunSize = (size_t)(iend - anchor); /* literals */
- size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
- size_t const totalSize = 1 + llAdd + lastRunSize;
+ size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
+ size_t const totalSize = 1 + llAdd + lastRunSize;
if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
if (limit && (op + totalSize > oend)) {
- if (limit == limitedOutput) { /* Check output limit */
- retval = 0;
- goto _return_label;
- }
+ if (limit == limitedOutput) { /* Check output limit */
+ retval = 0;
+ goto _return_label;
+ }
/* adapt lastRunSize to fill 'dst' */
- lastRunSize = (size_t)(oend - op) - 1 /*token*/;
- llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
- lastRunSize -= llAdd;
+ lastRunSize = (size_t)(oend - op) - 1 /*token*/;
+ llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
+ lastRunSize -= llAdd;
}
- DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
- ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
+ DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
+ ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
if (lastRunSize >= RUN_MASK) {
size_t accumulator = lastRunSize - RUN_MASK;
@@ -1585,35 +1585,35 @@ _last_literals:
/* End */
*srcSizePtr = (int) (((const char*)ip) - source);
- retval = (int) ((char*)op-dst);
- goto _return_label;
-
-_dest_overflow:
-if (limit == fillOutput) {
- /* Assumption : ip, anchor, ovml and ovref must be set correctly */
- size_t const ll = (size_t)(ip - anchor);
- size_t const ll_addbytes = (ll + 240) / 255;
- size_t const ll_totalCost = 1 + ll_addbytes + ll;
- BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
- DEBUGLOG(6, "Last sequence overflowing (only %i bytes remaining)", (int)(oend-1-opSaved));
- op = opSaved; /* restore correct out pointer */
- if (op + ll_totalCost <= maxLitPos) {
- /* ll validated; now adjust match length */
- size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
- size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
- assert(maxMlSize < INT_MAX); assert(ovml >= 0);
- if ((size_t)ovml > maxMlSize) ovml = (int)maxMlSize;
- if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ovml >= MFLIMIT) {
- DEBUGLOG(6, "Space to end : %i + ml (%i)", (int)((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1), ovml);
- DEBUGLOG(6, "Before : ip = %p, anchor = %p", ip, anchor);
- LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ovml, ovref, notLimited, oend);
- DEBUGLOG(6, "After : ip = %p, anchor = %p", ip, anchor);
- } }
- goto _last_literals;
-}
-_return_label:
-#ifdef LZ4HC_HEAPMODE
- FREEMEM(opt);
-#endif
- return retval;
-}
+ retval = (int) ((char*)op-dst);
+ goto _return_label;
+
+_dest_overflow:
+if (limit == fillOutput) {
+ /* Assumption : ip, anchor, ovml and ovref must be set correctly */
+ size_t const ll = (size_t)(ip - anchor);
+ size_t const ll_addbytes = (ll + 240) / 255;
+ size_t const ll_totalCost = 1 + ll_addbytes + ll;
+ BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
+ DEBUGLOG(6, "Last sequence overflowing (only %i bytes remaining)", (int)(oend-1-opSaved));
+ op = opSaved; /* restore correct out pointer */
+ if (op + ll_totalCost <= maxLitPos) {
+ /* ll validated; now adjust match length */
+ size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
+ size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
+ assert(maxMlSize < INT_MAX); assert(ovml >= 0);
+ if ((size_t)ovml > maxMlSize) ovml = (int)maxMlSize;
+ if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ovml >= MFLIMIT) {
+ DEBUGLOG(6, "Space to end : %i + ml (%i)", (int)((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1), ovml);
+ DEBUGLOG(6, "Before : ip = %p, anchor = %p", ip, anchor);
+ LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ovml, ovref, notLimited, oend);
+ DEBUGLOG(6, "After : ip = %p, anchor = %p", ip, anchor);
+ } }
+ goto _last_literals;
+}
+_return_label:
+#ifdef LZ4HC_HEAPMODE
+ FREEMEM(opt);
+#endif
+ return retval;
+}
diff --git a/contrib/libs/lz4/lz4hc.h b/contrib/libs/lz4/lz4hc.h
index adc07ffe5b..3d441fb6fa 100644
--- a/contrib/libs/lz4/lz4hc.h
+++ b/contrib/libs/lz4/lz4hc.h
@@ -28,58 +28,58 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- - LZ4 source repository : https://github.com/lz4/lz4
- - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
+ - LZ4 source repository : https://github.com/lz4/lz4
+ - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
*/
-#ifndef LZ4_HC_H_19834876238432
-#define LZ4_HC_H_19834876238432
+#ifndef LZ4_HC_H_19834876238432
+#define LZ4_HC_H_19834876238432
#if defined (__cplusplus)
extern "C" {
#endif
-/* --- Dependency --- */
+/* --- Dependency --- */
/* note : lz4hc requires lz4.h/lz4.c for compilation */
-#include "lz4.h" /* stddef, LZ4LIB_API, LZ4_DEPRECATED */
+#include "lz4.h" /* stddef, LZ4LIB_API, LZ4_DEPRECATED */
-/* --- Useful constants --- */
-#define LZ4HC_CLEVEL_MIN 3
-#define LZ4HC_CLEVEL_DEFAULT 9
+/* --- Useful constants --- */
+#define LZ4HC_CLEVEL_MIN 3
+#define LZ4HC_CLEVEL_DEFAULT 9
#define LZ4HC_CLEVEL_OPT_MIN 10
-#define LZ4HC_CLEVEL_MAX 12
-
-
-/*-************************************
- * Block Compression
- **************************************/
-/*! LZ4_compress_HC() :
+#define LZ4HC_CLEVEL_MAX 12
+
+
+/*-************************************
+ * Block Compression
+ **************************************/
+/*! LZ4_compress_HC() :
* Compress data from `src` into `dst`, using the powerful but slower "HC" algorithm.
- * `dst` must be already allocated.
+ * `dst` must be already allocated.
* Compression is guaranteed to succeed if `dstCapacity >= LZ4_compressBound(srcSize)` (see "lz4.h")
* Max supported `srcSize` value is LZ4_MAX_INPUT_SIZE (see "lz4.h")
* `compressionLevel` : any value between 1 and LZ4HC_CLEVEL_MAX will work.
* Values > LZ4HC_CLEVEL_MAX behave the same as LZ4HC_CLEVEL_MAX.
- * @return : the number of bytes written into 'dst'
- * or 0 if compression fails.
- */
-LZ4LIB_API int LZ4_compress_HC (const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel);
-
-
-/* Note :
- * Decompression functions are provided within "lz4.h" (BSD license)
- */
-
-
-/*! LZ4_compress_HC_extStateHC() :
+ * @return : the number of bytes written into 'dst'
+ * or 0 if compression fails.
+ */
+LZ4LIB_API int LZ4_compress_HC (const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel);
+
+
+/* Note :
+ * Decompression functions are provided within "lz4.h" (BSD license)
+ */
+
+
+/*! LZ4_compress_HC_extStateHC() :
* Same as LZ4_compress_HC(), but using an externally allocated memory segment for `state`.
- * `state` size is provided by LZ4_sizeofStateHC().
+ * `state` size is provided by LZ4_sizeofStateHC().
* Memory segment must be aligned on 8-bytes boundaries (which a normal malloc() should do properly).
- */
-LZ4LIB_API int LZ4_sizeofStateHC(void);
+ */
+LZ4LIB_API int LZ4_sizeofStateHC(void);
LZ4LIB_API int LZ4_compress_HC_extStateHC(void* stateHC, const char* src, char* dst, int srcSize, int maxDstSize, int compressionLevel);
-
-
+
+
/*! LZ4_compress_HC_destSize() : v1.9.0+
* Will compress as much data as possible from `src`
* to fit into `targetDstSize` budget.
@@ -94,36 +94,36 @@ LZ4LIB_API int LZ4_compress_HC_destSize(void* stateHC,
int compressionLevel);
-/*-************************************
- * Streaming Compression
- * Bufferless synchronous API
- **************************************/
- typedef union LZ4_streamHC_u LZ4_streamHC_t; /* incomplete type (defined later) */
-
-/*! LZ4_createStreamHC() and LZ4_freeStreamHC() :
+/*-************************************
+ * Streaming Compression
+ * Bufferless synchronous API
+ **************************************/
+ typedef union LZ4_streamHC_u LZ4_streamHC_t; /* incomplete type (defined later) */
+
+/*! LZ4_createStreamHC() and LZ4_freeStreamHC() :
* These functions create and release memory for LZ4 HC streaming state.
* Newly created states are automatically initialized.
* A same state can be used multiple times consecutively,
* starting with LZ4_resetStreamHC_fast() to start a new stream of blocks.
- */
-LZ4LIB_API LZ4_streamHC_t* LZ4_createStreamHC(void);
-LZ4LIB_API int LZ4_freeStreamHC (LZ4_streamHC_t* streamHCPtr);
-
+ */
+LZ4LIB_API LZ4_streamHC_t* LZ4_createStreamHC(void);
+LZ4LIB_API int LZ4_freeStreamHC (LZ4_streamHC_t* streamHCPtr);
+
/*
These functions compress data in successive blocks of any size,
using previous blocks as dictionary, to improve compression ratio.
- One key assumption is that previous blocks (up to 64 KB) remain read-accessible while compressing next blocks.
- There is an exception for ring buffers, which can be smaller than 64 KB.
+ One key assumption is that previous blocks (up to 64 KB) remain read-accessible while compressing next blocks.
+ There is an exception for ring buffers, which can be smaller than 64 KB.
Ring-buffer scenario is automatically detected and handled within LZ4_compress_HC_continue().
-
+
Before starting compression, state must be allocated and properly initialized.
LZ4_createStreamHC() does both, though compression level is set to LZ4HC_CLEVEL_DEFAULT.
-
+
Selecting the compression level can be done with LZ4_resetStreamHC_fast() (starts a new stream)
or LZ4_setCompressionLevel() (anytime, between blocks in the same stream) (experimental).
LZ4_resetStreamHC_fast() only works on states which have been properly initialized at least once,
which is automatically the case when state is created using LZ4_createStreamHC().
-
+
After reset, a first "fictional block" can be designated as initial dictionary,
using LZ4_loadDictHC() (Optional).
@@ -158,7 +158,7 @@ LZ4LIB_API int LZ4_loadDictHC (LZ4_streamHC_t* streamHCPtr, const char* diction
LZ4LIB_API int LZ4_compress_HC_continue (LZ4_streamHC_t* streamHCPtr,
const char* src, char* dst,
int srcSize, int maxDstSize);
-
+
/*! LZ4_compress_HC_continue_destSize() : v1.9.0+
* Similar to LZ4_compress_HC_continue(),
* but will read as much data as possible from `src`
@@ -182,7 +182,7 @@ LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, in
***********************************************/
/*-******************************************************************
- * PRIVATE DEFINITIONS :
+ * PRIVATE DEFINITIONS :
* Do not use these definitions directly.
* They are merely exposed to allow static allocation of `LZ4_streamHC_t`.
* Declare an `LZ4_streamHC_t` directly, rather than any type below.
@@ -190,43 +190,43 @@ LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, in
********************************************************************/
#define LZ4HC_DICTIONARY_LOGSIZE 16
-#define LZ4HC_MAXD (1<<LZ4HC_DICTIONARY_LOGSIZE)
-#define LZ4HC_MAXD_MASK (LZ4HC_MAXD - 1)
-
-#define LZ4HC_HASH_LOG 15
-#define LZ4HC_HASHTABLESIZE (1 << LZ4HC_HASH_LOG)
-#define LZ4HC_HASH_MASK (LZ4HC_HASHTABLESIZE - 1)
-
-
+#define LZ4HC_MAXD (1<<LZ4HC_DICTIONARY_LOGSIZE)
+#define LZ4HC_MAXD_MASK (LZ4HC_MAXD - 1)
+
+#define LZ4HC_HASH_LOG 15
+#define LZ4HC_HASHTABLESIZE (1 << LZ4HC_HASH_LOG)
+#define LZ4HC_HASH_MASK (LZ4HC_HASHTABLESIZE - 1)
+
+
typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal;
struct LZ4HC_CCtx_internal
-{
- LZ4_u32 hashTable[LZ4HC_HASHTABLESIZE];
- LZ4_u16 chainTable[LZ4HC_MAXD];
- const LZ4_byte* end; /* next block here to continue on current prefix */
- const LZ4_byte* base; /* All index relative to this position */
- const LZ4_byte* dictBase; /* alternate base for extDict */
- LZ4_u32 dictLimit; /* below that point, need extDict */
- LZ4_u32 lowLimit; /* below that point, no more dict */
- LZ4_u32 nextToUpdate; /* index from which to continue dictionary update */
- short compressionLevel;
- LZ4_i8 favorDecSpeed; /* favor decompression speed if this flag set,
- otherwise, favor compression ratio */
- LZ4_i8 dirty; /* stream has to be fully reset if this flag is set */
+{
+ LZ4_u32 hashTable[LZ4HC_HASHTABLESIZE];
+ LZ4_u16 chainTable[LZ4HC_MAXD];
+ const LZ4_byte* end; /* next block here to continue on current prefix */
+ const LZ4_byte* base; /* All index relative to this position */
+ const LZ4_byte* dictBase; /* alternate base for extDict */
+ LZ4_u32 dictLimit; /* below that point, need extDict */
+ LZ4_u32 lowLimit; /* below that point, no more dict */
+ LZ4_u32 nextToUpdate; /* index from which to continue dictionary update */
+ short compressionLevel;
+ LZ4_i8 favorDecSpeed; /* favor decompression speed if this flag set,
+ otherwise, favor compression ratio */
+ LZ4_i8 dirty; /* stream has to be fully reset if this flag is set */
const LZ4HC_CCtx_internal* dictCtx;
};
-
-
+
+
/* Do not use these definitions directly !
* Declare or allocate an LZ4_streamHC_t instead.
*/
-#define LZ4_STREAMHCSIZE 262200 /* static size, for inter-version compatibility */
-#define LZ4_STREAMHCSIZE_VOIDP (LZ4_STREAMHCSIZE / sizeof(void*))
-union LZ4_streamHC_u {
- void* table[LZ4_STREAMHCSIZE_VOIDP];
- LZ4HC_CCtx_internal internal_donotuse;
+#define LZ4_STREAMHCSIZE 262200 /* static size, for inter-version compatibility */
+#define LZ4_STREAMHCSIZE_VOIDP (LZ4_STREAMHCSIZE / sizeof(void*))
+union LZ4_streamHC_u {
+ void* table[LZ4_STREAMHCSIZE_VOIDP];
+ LZ4HC_CCtx_internal internal_donotuse;
}; /* previously typedef'd to LZ4_streamHC_t */
-
+
/* LZ4_streamHC_t :
* This structure allows static allocation of LZ4 HC streaming state.
* This can be used to allocate statically, on state, or as part of a larger structure.
@@ -247,12 +247,12 @@ union LZ4_streamHC_u {
LZ4LIB_API LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size);
-/*-************************************
-* Deprecated Functions
-**************************************/
-/* see lz4.h LZ4_DISABLE_DEPRECATE_WARNINGS to turn off deprecation warnings */
-
-/* deprecated compression functions */
+/*-************************************
+* Deprecated Functions
+**************************************/
+/* see lz4.h LZ4_DISABLE_DEPRECATE_WARNINGS to turn off deprecation warnings */
+
+/* deprecated compression functions */
LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC (const char* source, char* dest, int inputSize);
LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize);
LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC2 (const char* source, char* dest, int inputSize, int compressionLevel);
@@ -263,7 +263,7 @@ LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_co
LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput_withStateHC(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize);
LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
-
+
/* Obsolete streaming functions; degraded functionality; do not use!
*
* In order to perform streaming compression, these functions depended on data
@@ -279,8 +279,8 @@ LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_comp
LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
LZ4_DEPRECATED("use LZ4_createStreamHC() instead") LZ4LIB_API int LZ4_sizeofStreamStateHC(void);
LZ4_DEPRECATED("use LZ4_initStreamHC() instead") LZ4LIB_API int LZ4_resetStreamStateHC(void* state, char* inputBuffer);
-
-
+
+
/* LZ4_resetStreamHC() is now replaced by LZ4_initStreamHC().
* The intention is to emphasize the difference with LZ4_resetStreamHC_fast(),
* which is now the recommended function to start a new stream of blocks,
@@ -296,7 +296,7 @@ LZ4LIB_API void LZ4_resetStreamHC (LZ4_streamHC_t* streamHCPtr, int compressionL
}
#endif
-#endif /* LZ4_HC_H_19834876238432 */
+#endif /* LZ4_HC_H_19834876238432 */
/*-**************************************************
diff --git a/contrib/libs/lz4/ya.make b/contrib/libs/lz4/ya.make
index 464abc68a2..282dfe3920 100644
--- a/contrib/libs/lz4/ya.make
+++ b/contrib/libs/lz4/ya.make
@@ -1,36 +1,36 @@
-# Generated by devtools/yamaker from nixpkgs 5852a21819542e6809f68ba5a798600e69874e76.
+# Generated by devtools/yamaker from nixpkgs 5852a21819542e6809f68ba5a798600e69874e76.
+
+LIBRARY()
-LIBRARY()
-
OWNER(
orivej
g:cpp-contrib
)
-
-VERSION(1.9.3)
-
-ORIGINAL_SOURCE(https://github.com/lz4/lz4/archive/v1.9.3.tar.gz)
-
+
+VERSION(1.9.3)
+
+ORIGINAL_SOURCE(https://github.com/lz4/lz4/archive/v1.9.3.tar.gz)
+
LICENSE(
BSD-2-Clause AND
BSD-3-Clause
)
-
+
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-PEERDIR(
- contrib/libs/xxhash
-)
+PEERDIR(
+ contrib/libs/xxhash
+)
ADDINCL(
contrib/libs/xxhash
)
NO_RUNTIME()
-
+
SRCS(
lz4.c
- lz4frame.c
+ lz4frame.c
lz4hc.c
)