diff options
author | Devtools Arcadia <arcadia-devtools@yandex-team.ru> | 2022-02-07 18:08:42 +0300 |
---|---|---|
committer | Devtools Arcadia <arcadia-devtools@mous.vla.yp-c.yandex.net> | 2022-02-07 18:08:42 +0300 |
commit | 1110808a9d39d4b808aef724c861a2e1a38d2a69 (patch) | |
tree | e26c9fed0de5d9873cce7e00bc214573dc2195b7 /contrib/libs/lzmasdk | |
download | ydb-1110808a9d39d4b808aef724c861a2e1a38d2a69.tar.gz |
intermediate changes
ref:cde9a383711a11544ce7e107a78147fb96cc4029
Diffstat (limited to 'contrib/libs/lzmasdk')
39 files changed, 11323 insertions, 0 deletions
diff --git a/contrib/libs/lzmasdk/.yandex_meta/devtools.copyrights.report b/contrib/libs/lzmasdk/.yandex_meta/devtools.copyrights.report new file mode 100644 index 0000000000..43d5bf79b0 --- /dev/null +++ b/contrib/libs/lzmasdk/.yandex_meta/devtools.copyrights.report @@ -0,0 +1,40 @@ +# File format ($ symbol means the beginning of a line): +# +# $ # this message +# $ # ======================= +# $ # comments (all commentaries should starts with some number of spaces and # symbol) +# ${action} {license id} {license text hash} +# $BELONGS ./ya/make/file/relative/path/1/ya.make ./ya/make/2/ya.make +# ${all_file_action} filename +# $ # user commentaries (many lines) +# $ generated description - files with this license, license text... (some number of lines that starts with some number of spaces, do not modify) +# ${action} {license spdx} {license text hash} +# $BELONGS ./ya/make/file/relative/path/3/ya.make +# ${all_file_action} filename +# $ # user commentaries +# $ generated description +# $ ... +# +# You can modify action, all_file_action and add commentaries +# Available actions: +# keep - keep license in contrib and use in credits +# skip - skip license +# remove - remove all files with this license +# rename - save license text/links into licenses texts file, but not store SPDX into LINCENSE macro. You should store correct license id into devtools.license.spdx.txt file +# +# {all file action} records will be generated when license text contains filename that exists on filesystem (in contrib directory) +# We suppose that that files can contain some license info +# Available all file actions: +# FILE_IGNORE - ignore file (do nothing) +# FILE_INCLUDE - include all file data into licenses text file +# ======================= + +KEEP COPYRIGHT_SERVICE_LABEL 353136eae3b9f17d8f32a58e555936c0 +BELONGS ya.make + Note: matched license text is too long. Read it in the source files. + Scancode info: + Original SPDX id: COPYRIGHT_SERVICE_LABEL + Score : 100.00 + Match type : COPYRIGHT + Files with this license: + 7zVersion.h [14:18] diff --git a/contrib/libs/lzmasdk/.yandex_meta/devtools.licenses.report b/contrib/libs/lzmasdk/.yandex_meta/devtools.licenses.report new file mode 100644 index 0000000000..59efdc65ba --- /dev/null +++ b/contrib/libs/lzmasdk/.yandex_meta/devtools.licenses.report @@ -0,0 +1,352 @@ +# File format ($ symbol means the beginning of a line): +# +# $ # this message +# $ # ======================= +# $ # comments (all commentaries should starts with some number of spaces and # symbol) +# ${action} {license spdx} {license text hash} +# $BELONGS ./ya/make/file/relative/path/1/ya.make ./ya/make/2/ya.make +# ${all_file_action} filename +# $ # user commentaries (many lines) +# $ generated description - files with this license, license text... (some number of lines that starts with some number of spaces, do not modify) +# ${action} {license spdx} {license text hash} +# $BELONGS ./ya/make/file/relative/path/3/ya.make +# ${all_file_action} filename +# $ # user commentaries +# $ generated description +# $ ... +# +# You can modify action, all_file_action and add commentaries +# Available actions: +# keep - keep license in contrib and use in credits +# skip - skip license +# remove - remove all files with this license +# rename - save license text/links into licenses texts file, but not store SPDX into LINCENSE macro. You should store correct license id into devtools.license.spdx.txt file +# +# {all file action} records will be generated when license text contains filename that exists on filesystem (in contrib directory) +# We suppose that that files can contain some license info +# Available all file actions: +# FILE_IGNORE - ignore file (do nothing) +# FILE_INCLUDE - include all file data into licenses text file +# ======================= + +KEEP Public-Domain 04636d34057aae671b898fdb26e68510 +BELONGS ya.make + License text: + 2015-04-12 : Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + LzHash.h [2:2] + +KEEP Public-Domain 0aed83a65c19a3e1b47e908a0fa75ba1 +BELONGS ya.make + License text: + 2013-01-18 : Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + Aes.h [2:2] + Bra.h [2:2] + LzmaLib.h [2:2] + Sha256.h [2:2] + +KEEP Public-Domain 0e7cb61166020e26a2a42381c4a4c38d +BELONGS ya.make + License text: + 2017-04-03 : Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + 7zStream.c [2:2] + Bra86.c [2:2] + Compiler.h [2:2] + +KEEP Public-Domain 18df2e7d8b1a85aa38e94747108fb527 +BELONGS ya.make + License text: + This code is based on public domain code from Wei Dai's Crypto++ library. */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 100.00 + Match type : TEXT + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + Sha256.c [3:3] + +KEEP Public-Domain 2800998f7603fb93d1739854f8196681 +BELONGS ya.make + License text: + 2018-07-04 : Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + Lzma2Enc.c [2:2] + LzmaDec.c [2:2] + MtCoder.h [2:2] + MtDec.h [2:2] + +KEEP Public-Domain 46591dcb4235f3087ae793dcad791a91 +BELONGS ya.make + License text: + 2018-04-21 : Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + LzmaDec.h [2:2] + +KEEP Public-Domain 4a79ac86eb0da487f5026379369b4fd4 +BELONGS ya.make + License text: + 2018-08-04 : Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + 7zTypes.h [2:2] + +KEEP Public-Domain 4d14825ab215779911dd4cd8b6173355 +BELONGS ya.make + License text: + 2019-02-02 : Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + Lzma2Dec.c [2:2] + +KEEP Public-Domain 51cd5f5fb1bdabfcf5236fa2056c1bdf +BELONGS ya.make + License text: + 2017-06-18 : Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + Threads.h [2:2] + +KEEP Public-Domain 5f4e45838ce681ed993f92a94d13384e +BELONGS ya.make + License text: + 2013-11-12 : Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + Precomp.h [2:2] + +KEEP Public-Domain 631cd577186dc088968fe5d6f0cd1aea +BELONGS ya.make + License text: + 2018-02-18 : Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + CpuArch.h [2:2] + +KEEP Public-Domain 6f687449edb75bf3332d8fd6ea7d19b3 +BELONGS ya.make + License text: + 2017-06-08 : Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + AesOpt.c [2:2] + +KEEP Public-Domain 74632f5e094626cb958b43aa0957e93e +BELONGS ya.make + License text: + 2015-03-25 : Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + RotateDefs.h [2:2] + +KEEP Public-Domain 75e7c3f4b8563147faf0fe41c99da6d3 +BELONGS ya.make + License text: + 2018-02-19 : Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + Alloc.h [2:2] + Lzma2Dec.h [2:2] + +KEEP Public-Domain 770be35f25b6f9a8f738f2fd30dee419 +BELONGS ya.make + License text: + 2018-07-08 : Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + LzFind.c [2:2] + +KEEP Public-Domain 7fe452df5f3dd08e2ceac635d4020633 +BELONGS ya.make + License text: + 2015-06-13 : Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + LzmaLib.c [2:2] + +KEEP Public-Domain 91b3246be8d263264473404e339830e4 +BELONGS ya.make + License text: + 2018-04-27 : Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + Alloc.c [2:2] + +KEEP Public-Domain 953f61593389789718f0032a1b336922 +BELONGS ya.make + License text: + 2017-07-27 : Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + Lzma2Enc.h [2:2] + LzmaEnc.h [2:2] + +KEEP Public-Domain 9b93494cce76495f743e046ce3941c9f +BELONGS ya.make + License text: + 2017-04-04 : Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + Bra.c [2:2] + +KEEP Public-Domain a48d0b47cec78672882da6dbf3ad5267 +BELONGS ya.make + License text: + 2018-02-18: Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + CpuArch.c [2:2] + +KEEP Public-Domain a54a8f74651a577cf383bf0e98c73ace +BELONGS ya.make + License text: + 2017-04-03 : Igor Pavlov : Public domain + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + Sha256.c [2:2] + +KEEP Public-Domain b3e18b49d837655739c8df698ec4e6c3 +BELONGS ya.make + License text: + \#define MY_COPYRIGHT_PD "Igor Pavlov : Public domain" + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + 7zVersion.h [17:17] + +KEEP Public-Domain ccc9a71dea8957155d5ddb6f4d87e9ff +BELONGS ya.make + License text: + 2017-01-26 : Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + BraIA64.c [2:2] + +KEEP Public-Domain df6749ce0784a2967c71af5e751aec05 +BELONGS ya.make + License text: + 2017-06-10 : Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + LzFind.h [2:2] + +KEEP Public-Domain e69c1a7abd3426fe97be3df39ddadbe0 +BELONGS ya.make + License text: + 2019-01-10: Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + LzmaEnc.c [2:2] + +KEEP Public-Domain f5d444f3ae375c6698ad024852693a7e +BELONGS ya.make + License text: + 2017-01-24 : Igor Pavlov : Public domain */ + Scancode info: + Original SPDX id: LicenseRef-scancode-public-domain + Score : 70.00 + Match type : REFERENCE + Links : http://www.linfo.org/publicdomain.html, https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/public-domain.LICENSE + Files with this license: + Aes.c [2:2] diff --git a/contrib/libs/lzmasdk/.yandex_meta/licenses.list.txt b/contrib/libs/lzmasdk/.yandex_meta/licenses.list.txt new file mode 100644 index 0000000000..aa86f84685 --- /dev/null +++ b/contrib/libs/lzmasdk/.yandex_meta/licenses.list.txt @@ -0,0 +1,110 @@ +====================COPYRIGHT==================== +#undef MY_COPYRIGHT +#undef MY_VERSION_COPYRIGHT_DATE +#define MY_AUTHOR_NAME "Igor Pavlov" +#define MY_COPYRIGHT_PD "Igor Pavlov : Public domain" +#define MY_COPYRIGHT_CR "Copyright (c) 1999-2018 Igor Pavlov" + + +====================Public-Domain==================== +#define MY_COPYRIGHT_PD "Igor Pavlov : Public domain" + + +====================Public-Domain==================== +2013-01-18 : Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +2013-11-12 : Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +2015-03-25 : Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +2015-04-12 : Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +2015-06-13 : Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +2017-01-24 : Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +2017-01-26 : Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +2017-04-03 : Igor Pavlov : Public domain + + +====================Public-Domain==================== +2017-04-03 : Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +2017-04-04 : Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +2017-06-08 : Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +2017-06-10 : Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +2017-06-18 : Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +2017-07-27 : Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +2018-02-18 : Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +2018-02-18: Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +2018-02-19 : Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +2018-04-21 : Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +2018-04-27 : Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +2018-07-04 : Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +2018-07-08 : Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +2018-08-04 : Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +2019-01-10: Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +2019-02-02 : Igor Pavlov : Public domain */ + + +====================Public-Domain==================== +This code is based on public domain code from Wei Dai's Crypto++ library. */ diff --git a/contrib/libs/lzmasdk/7zStream.c b/contrib/libs/lzmasdk/7zStream.c new file mode 100644 index 0000000000..579741fadc --- /dev/null +++ b/contrib/libs/lzmasdk/7zStream.c @@ -0,0 +1,176 @@ +/* 7zStream.c -- 7z Stream functions
+2017-04-03 : Igor Pavlov : Public domain */
+
+#include "Precomp.h"
+
+#include <string.h>
+
+#include "7zTypes.h"
+
+SRes SeqInStream_Read2(const ISeqInStream *stream, void *buf, size_t size, SRes errorType)
+{
+ while (size != 0)
+ {
+ size_t processed = size;
+ RINOK(ISeqInStream_Read(stream, buf, &processed));
+ if (processed == 0)
+ return errorType;
+ buf = (void *)((Byte *)buf + processed);
+ size -= processed;
+ }
+ return SZ_OK;
+}
+
+SRes SeqInStream_Read(const ISeqInStream *stream, void *buf, size_t size)
+{
+ return SeqInStream_Read2(stream, buf, size, SZ_ERROR_INPUT_EOF);
+}
+
+SRes SeqInStream_ReadByte(const ISeqInStream *stream, Byte *buf)
+{
+ size_t processed = 1;
+ RINOK(ISeqInStream_Read(stream, buf, &processed));
+ return (processed == 1) ? SZ_OK : SZ_ERROR_INPUT_EOF;
+}
+
+
+
+SRes LookInStream_SeekTo(const ILookInStream *stream, UInt64 offset)
+{
+ Int64 t = offset;
+ return ILookInStream_Seek(stream, &t, SZ_SEEK_SET);
+}
+
+SRes LookInStream_LookRead(const ILookInStream *stream, void *buf, size_t *size)
+{
+ const void *lookBuf;
+ if (*size == 0)
+ return SZ_OK;
+ RINOK(ILookInStream_Look(stream, &lookBuf, size));
+ memcpy(buf, lookBuf, *size);
+ return ILookInStream_Skip(stream, *size);
+}
+
+SRes LookInStream_Read2(const ILookInStream *stream, void *buf, size_t size, SRes errorType)
+{
+ while (size != 0)
+ {
+ size_t processed = size;
+ RINOK(ILookInStream_Read(stream, buf, &processed));
+ if (processed == 0)
+ return errorType;
+ buf = (void *)((Byte *)buf + processed);
+ size -= processed;
+ }
+ return SZ_OK;
+}
+
+SRes LookInStream_Read(const ILookInStream *stream, void *buf, size_t size)
+{
+ return LookInStream_Read2(stream, buf, size, SZ_ERROR_INPUT_EOF);
+}
+
+
+
+#define GET_LookToRead2 CLookToRead2 *p = CONTAINER_FROM_VTBL(pp, CLookToRead2, vt);
+
+static SRes LookToRead2_Look_Lookahead(const ILookInStream *pp, const void **buf, size_t *size)
+{
+ SRes res = SZ_OK;
+ GET_LookToRead2
+ size_t size2 = p->size - p->pos;
+ if (size2 == 0 && *size != 0)
+ {
+ p->pos = 0;
+ p->size = 0;
+ size2 = p->bufSize;
+ res = ISeekInStream_Read(p->realStream, p->buf, &size2);
+ p->size = size2;
+ }
+ if (*size > size2)
+ *size = size2;
+ *buf = p->buf + p->pos;
+ return res;
+}
+
+static SRes LookToRead2_Look_Exact(const ILookInStream *pp, const void **buf, size_t *size)
+{
+ SRes res = SZ_OK;
+ GET_LookToRead2
+ size_t size2 = p->size - p->pos;
+ if (size2 == 0 && *size != 0)
+ {
+ p->pos = 0;
+ p->size = 0;
+ if (*size > p->bufSize)
+ *size = p->bufSize;
+ res = ISeekInStream_Read(p->realStream, p->buf, size);
+ size2 = p->size = *size;
+ }
+ if (*size > size2)
+ *size = size2;
+ *buf = p->buf + p->pos;
+ return res;
+}
+
+static SRes LookToRead2_Skip(const ILookInStream *pp, size_t offset)
+{
+ GET_LookToRead2
+ p->pos += offset;
+ return SZ_OK;
+}
+
+static SRes LookToRead2_Read(const ILookInStream *pp, void *buf, size_t *size)
+{
+ GET_LookToRead2
+ size_t rem = p->size - p->pos;
+ if (rem == 0)
+ return ISeekInStream_Read(p->realStream, buf, size);
+ if (rem > *size)
+ rem = *size;
+ memcpy(buf, p->buf + p->pos, rem);
+ p->pos += rem;
+ *size = rem;
+ return SZ_OK;
+}
+
+static SRes LookToRead2_Seek(const ILookInStream *pp, Int64 *pos, ESzSeek origin)
+{
+ GET_LookToRead2
+ p->pos = p->size = 0;
+ return ISeekInStream_Seek(p->realStream, pos, origin);
+}
+
+void LookToRead2_CreateVTable(CLookToRead2 *p, int lookahead)
+{
+ p->vt.Look = lookahead ?
+ LookToRead2_Look_Lookahead :
+ LookToRead2_Look_Exact;
+ p->vt.Skip = LookToRead2_Skip;
+ p->vt.Read = LookToRead2_Read;
+ p->vt.Seek = LookToRead2_Seek;
+}
+
+
+
+static SRes SecToLook_Read(const ISeqInStream *pp, void *buf, size_t *size)
+{
+ CSecToLook *p = CONTAINER_FROM_VTBL(pp, CSecToLook, vt);
+ return LookInStream_LookRead(p->realStream, buf, size);
+}
+
+void SecToLook_CreateVTable(CSecToLook *p)
+{
+ p->vt.Read = SecToLook_Read;
+}
+
+static SRes SecToRead_Read(const ISeqInStream *pp, void *buf, size_t *size)
+{
+ CSecToRead *p = CONTAINER_FROM_VTBL(pp, CSecToRead, vt);
+ return ILookInStream_Read(p->realStream, buf, size);
+}
+
+void SecToRead_CreateVTable(CSecToRead *p)
+{
+ p->vt.Read = SecToRead_Read;
+}
diff --git a/contrib/libs/lzmasdk/7zTypes.h b/contrib/libs/lzmasdk/7zTypes.h new file mode 100644 index 0000000000..593f5aa259 --- /dev/null +++ b/contrib/libs/lzmasdk/7zTypes.h @@ -0,0 +1,375 @@ +/* 7zTypes.h -- Basic types
+2018-08-04 : Igor Pavlov : Public domain */
+
+#ifndef __7Z_TYPES_H
+#define __7Z_TYPES_H
+
+#ifdef _WIN32
+/* #include <windows.h> */
+#endif
+
+#include <stddef.h>
+
+#ifndef EXTERN_C_BEGIN
+#ifdef __cplusplus
+#define EXTERN_C_BEGIN extern "C" {
+#define EXTERN_C_END }
+#else
+#define EXTERN_C_BEGIN
+#define EXTERN_C_END
+#endif
+#endif
+
+EXTERN_C_BEGIN
+
+#define SZ_OK 0
+
+#define SZ_ERROR_DATA 1
+#define SZ_ERROR_MEM 2
+#define SZ_ERROR_CRC 3
+#define SZ_ERROR_UNSUPPORTED 4
+#define SZ_ERROR_PARAM 5
+#define SZ_ERROR_INPUT_EOF 6
+#define SZ_ERROR_OUTPUT_EOF 7
+#define SZ_ERROR_READ 8
+#define SZ_ERROR_WRITE 9
+#define SZ_ERROR_PROGRESS 10
+#define SZ_ERROR_FAIL 11
+#define SZ_ERROR_THREAD 12
+
+#define SZ_ERROR_ARCHIVE 16
+#define SZ_ERROR_NO_ARCHIVE 17
+
+typedef int SRes;
+
+
+#ifdef _WIN32
+
+/* typedef DWORD WRes; */
+typedef unsigned WRes;
+#define MY_SRes_HRESULT_FROM_WRes(x) HRESULT_FROM_WIN32(x)
+
+#else
+
+typedef int WRes;
+#define MY__FACILITY_WIN32 7
+#define MY__FACILITY__WRes MY__FACILITY_WIN32
+#define MY_SRes_HRESULT_FROM_WRes(x) ((HRESULT)(x) <= 0 ? ((HRESULT)(x)) : ((HRESULT) (((x) & 0x0000FFFF) | (MY__FACILITY__WRes << 16) | 0x80000000)))
+
+#endif
+
+
+#ifndef RINOK
+#define RINOK(x) { int __result__ = (x); if (__result__ != 0) return __result__; }
+#endif
+
+typedef unsigned char Byte;
+typedef short Int16;
+typedef unsigned short UInt16;
+
+#ifdef _LZMA_UINT32_IS_ULONG
+typedef long Int32;
+typedef unsigned long UInt32;
+#else
+typedef int Int32;
+typedef unsigned int UInt32;
+#endif
+
+#ifdef _SZ_NO_INT_64
+
+/* define _SZ_NO_INT_64, if your compiler doesn't support 64-bit integers.
+ NOTES: Some code will work incorrectly in that case! */
+
+typedef long Int64;
+typedef unsigned long UInt64;
+
+#else
+
+#if defined(_MSC_VER) || defined(__BORLANDC__)
+typedef __int64 Int64;
+typedef unsigned __int64 UInt64;
+#define UINT64_CONST(n) n
+#else
+typedef long long int Int64;
+typedef unsigned long long int UInt64;
+#define UINT64_CONST(n) n ## ULL
+#endif
+
+#endif
+
+#ifdef _LZMA_NO_SYSTEM_SIZE_T
+typedef UInt32 SizeT;
+#else
+typedef size_t SizeT;
+#endif
+
+typedef int BoolInt;
+/* typedef BoolInt Bool; */
+#define True 1
+#define False 0
+
+
+#ifdef _WIN32
+#define MY_STD_CALL __stdcall
+#else
+#define MY_STD_CALL
+#endif
+
+#ifdef _MSC_VER
+
+#if _MSC_VER >= 1300
+#define MY_NO_INLINE __declspec(noinline)
+#else
+#define MY_NO_INLINE
+#endif
+
+#define MY_FORCE_INLINE __forceinline
+
+#define MY_CDECL __cdecl
+#define MY_FAST_CALL __fastcall
+
+#else
+
+#define MY_NO_INLINE
+#define MY_FORCE_INLINE
+#define MY_CDECL
+#define MY_FAST_CALL
+
+/* inline keyword : for C++ / C99 */
+
+/* GCC, clang: */
+/*
+#if defined (__GNUC__) && (__GNUC__ >= 4)
+#define MY_FORCE_INLINE __attribute__((always_inline))
+#define MY_NO_INLINE __attribute__((noinline))
+#endif
+*/
+
+#endif
+
+
+/* The following interfaces use first parameter as pointer to structure */
+
+typedef struct IByteIn IByteIn;
+struct IByteIn
+{
+ Byte (*Read)(const IByteIn *p); /* reads one byte, returns 0 in case of EOF or error */
+};
+#define IByteIn_Read(p) (p)->Read(p)
+
+
+typedef struct IByteOut IByteOut;
+struct IByteOut
+{
+ void (*Write)(const IByteOut *p, Byte b);
+};
+#define IByteOut_Write(p, b) (p)->Write(p, b)
+
+
+typedef struct ISeqInStream ISeqInStream;
+struct ISeqInStream
+{
+ SRes (*Read)(const ISeqInStream *p, void *buf, size_t *size);
+ /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
+ (output(*size) < input(*size)) is allowed */
+};
+#define ISeqInStream_Read(p, buf, size) (p)->Read(p, buf, size)
+
+/* it can return SZ_ERROR_INPUT_EOF */
+SRes SeqInStream_Read(const ISeqInStream *stream, void *buf, size_t size);
+SRes SeqInStream_Read2(const ISeqInStream *stream, void *buf, size_t size, SRes errorType);
+SRes SeqInStream_ReadByte(const ISeqInStream *stream, Byte *buf);
+
+
+typedef struct ISeqOutStream ISeqOutStream;
+struct ISeqOutStream
+{
+ size_t (*Write)(const ISeqOutStream *p, const void *buf, size_t size);
+ /* Returns: result - the number of actually written bytes.
+ (result < size) means error */
+};
+#define ISeqOutStream_Write(p, buf, size) (p)->Write(p, buf, size)
+
+typedef enum
+{
+ SZ_SEEK_SET = 0,
+ SZ_SEEK_CUR = 1,
+ SZ_SEEK_END = 2
+} ESzSeek;
+
+
+typedef struct ISeekInStream ISeekInStream;
+struct ISeekInStream
+{
+ SRes (*Read)(const ISeekInStream *p, void *buf, size_t *size); /* same as ISeqInStream::Read */
+ SRes (*Seek)(const ISeekInStream *p, Int64 *pos, ESzSeek origin);
+};
+#define ISeekInStream_Read(p, buf, size) (p)->Read(p, buf, size)
+#define ISeekInStream_Seek(p, pos, origin) (p)->Seek(p, pos, origin)
+
+
+typedef struct ILookInStream ILookInStream;
+struct ILookInStream
+{
+ SRes (*Look)(const ILookInStream *p, const void **buf, size_t *size);
+ /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.
+ (output(*size) > input(*size)) is not allowed
+ (output(*size) < input(*size)) is allowed */
+ SRes (*Skip)(const ILookInStream *p, size_t offset);
+ /* offset must be <= output(*size) of Look */
+
+ SRes (*Read)(const ILookInStream *p, void *buf, size_t *size);
+ /* reads directly (without buffer). It's same as ISeqInStream::Read */
+ SRes (*Seek)(const ILookInStream *p, Int64 *pos, ESzSeek origin);
+};
+
+#define ILookInStream_Look(p, buf, size) (p)->Look(p, buf, size)
+#define ILookInStream_Skip(p, offset) (p)->Skip(p, offset)
+#define ILookInStream_Read(p, buf, size) (p)->Read(p, buf, size)
+#define ILookInStream_Seek(p, pos, origin) (p)->Seek(p, pos, origin)
+
+
+SRes LookInStream_LookRead(const ILookInStream *stream, void *buf, size_t *size);
+SRes LookInStream_SeekTo(const ILookInStream *stream, UInt64 offset);
+
+/* reads via ILookInStream::Read */
+SRes LookInStream_Read2(const ILookInStream *stream, void *buf, size_t size, SRes errorType);
+SRes LookInStream_Read(const ILookInStream *stream, void *buf, size_t size);
+
+
+
+typedef struct
+{
+ ILookInStream vt;
+ const ISeekInStream *realStream;
+
+ size_t pos;
+ size_t size; /* it's data size */
+
+ /* the following variables must be set outside */
+ Byte *buf;
+ size_t bufSize;
+} CLookToRead2;
+
+void LookToRead2_CreateVTable(CLookToRead2 *p, int lookahead);
+
+#define LookToRead2_Init(p) { (p)->pos = (p)->size = 0; }
+
+
+typedef struct
+{
+ ISeqInStream vt;
+ const ILookInStream *realStream;
+} CSecToLook;
+
+void SecToLook_CreateVTable(CSecToLook *p);
+
+
+
+typedef struct
+{
+ ISeqInStream vt;
+ const ILookInStream *realStream;
+} CSecToRead;
+
+void SecToRead_CreateVTable(CSecToRead *p);
+
+
+typedef struct ICompressProgress ICompressProgress;
+
+struct ICompressProgress
+{
+ SRes (*Progress)(const ICompressProgress *p, UInt64 inSize, UInt64 outSize);
+ /* Returns: result. (result != SZ_OK) means break.
+ Value (UInt64)(Int64)-1 for size means unknown value. */
+};
+#define ICompressProgress_Progress(p, inSize, outSize) (p)->Progress(p, inSize, outSize)
+
+
+
+typedef struct ISzAlloc ISzAlloc;
+typedef const ISzAlloc * ISzAllocPtr;
+
+struct ISzAlloc
+{
+ void *(*Alloc)(ISzAllocPtr p, size_t size);
+ void (*Free)(ISzAllocPtr p, void *address); /* address can be 0 */
+};
+
+#define ISzAlloc_Alloc(p, size) (p)->Alloc(p, size)
+#define ISzAlloc_Free(p, a) (p)->Free(p, a)
+
+/* deprecated */
+#define IAlloc_Alloc(p, size) ISzAlloc_Alloc(p, size)
+#define IAlloc_Free(p, a) ISzAlloc_Free(p, a)
+
+
+
+
+
+#ifndef MY_offsetof
+ #ifdef offsetof
+ #define MY_offsetof(type, m) offsetof(type, m)
+ /*
+ #define MY_offsetof(type, m) FIELD_OFFSET(type, m)
+ */
+ #else
+ #define MY_offsetof(type, m) ((size_t)&(((type *)0)->m))
+ #endif
+#endif
+
+
+
+#ifndef MY_container_of
+
+/*
+#define MY_container_of(ptr, type, m) container_of(ptr, type, m)
+#define MY_container_of(ptr, type, m) CONTAINING_RECORD(ptr, type, m)
+#define MY_container_of(ptr, type, m) ((type *)((char *)(ptr) - offsetof(type, m)))
+#define MY_container_of(ptr, type, m) (&((type *)0)->m == (ptr), ((type *)(((char *)(ptr)) - MY_offsetof(type, m))))
+*/
+
+/*
+ GCC shows warning: "perhaps the 'offsetof' macro was used incorrectly"
+ GCC 3.4.4 : classes with constructor
+ GCC 4.8.1 : classes with non-public variable members"
+*/
+
+#define MY_container_of(ptr, type, m) ((type *)((char *)(1 ? (ptr) : &((type *)0)->m) - MY_offsetof(type, m)))
+
+
+#endif
+
+#define CONTAINER_FROM_VTBL_SIMPLE(ptr, type, m) ((type *)(ptr))
+
+/*
+#define CONTAINER_FROM_VTBL(ptr, type, m) CONTAINER_FROM_VTBL_SIMPLE(ptr, type, m)
+*/
+#define CONTAINER_FROM_VTBL(ptr, type, m) MY_container_of(ptr, type, m)
+
+#define CONTAINER_FROM_VTBL_CLS(ptr, type, m) CONTAINER_FROM_VTBL_SIMPLE(ptr, type, m)
+/*
+#define CONTAINER_FROM_VTBL_CLS(ptr, type, m) CONTAINER_FROM_VTBL(ptr, type, m)
+*/
+
+
+
+#ifdef _WIN32
+
+#define CHAR_PATH_SEPARATOR '\\'
+#define WCHAR_PATH_SEPARATOR L'\\'
+#define STRING_PATH_SEPARATOR "\\"
+#define WSTRING_PATH_SEPARATOR L"\\"
+
+#else
+
+#define CHAR_PATH_SEPARATOR '/'
+#define WCHAR_PATH_SEPARATOR L'/'
+#define STRING_PATH_SEPARATOR "/"
+#define WSTRING_PATH_SEPARATOR L"/"
+
+#endif
+
+EXTERN_C_END
+
+#endif
diff --git a/contrib/libs/lzmasdk/7zVersion.h b/contrib/libs/lzmasdk/7zVersion.h new file mode 100644 index 0000000000..0074c64be9 --- /dev/null +++ b/contrib/libs/lzmasdk/7zVersion.h @@ -0,0 +1,27 @@ +#define MY_VER_MAJOR 19
+#define MY_VER_MINOR 00
+#define MY_VER_BUILD 0
+#define MY_VERSION_NUMBERS "19.00"
+#define MY_VERSION MY_VERSION_NUMBERS
+
+#ifdef MY_CPU_NAME
+ #define MY_VERSION_CPU MY_VERSION " (" MY_CPU_NAME ")"
+#else
+ #define MY_VERSION_CPU MY_VERSION
+#endif
+
+#define MY_DATE "2019-02-21"
+#undef MY_COPYRIGHT
+#undef MY_VERSION_COPYRIGHT_DATE
+#define MY_AUTHOR_NAME "Igor Pavlov"
+#define MY_COPYRIGHT_PD "Igor Pavlov : Public domain"
+#define MY_COPYRIGHT_CR "Copyright (c) 1999-2018 Igor Pavlov"
+
+#ifdef USE_COPYRIGHT_CR
+ #define MY_COPYRIGHT MY_COPYRIGHT_CR
+#else
+ #define MY_COPYRIGHT MY_COPYRIGHT_PD
+#endif
+
+#define MY_COPYRIGHT_DATE MY_COPYRIGHT " : " MY_DATE
+#define MY_VERSION_COPYRIGHT_DATE MY_VERSION_CPU " : " MY_COPYRIGHT " : " MY_DATE
diff --git a/contrib/libs/lzmasdk/Aes.c b/contrib/libs/lzmasdk/Aes.c new file mode 100644 index 0000000000..8f7d50ea24 --- /dev/null +++ b/contrib/libs/lzmasdk/Aes.c @@ -0,0 +1,306 @@ +/* Aes.c -- AES encryption / decryption
+2017-01-24 : Igor Pavlov : Public domain */
+
+#include "Precomp.h"
+
+#include "Aes.h"
+#include "CpuArch.h"
+
+static UInt32 T[256 * 4];
+static const Byte Sbox[256] = {
+ 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
+ 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
+ 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
+ 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
+ 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
+ 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
+ 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
+ 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
+ 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
+ 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
+ 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
+ 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
+ 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
+ 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
+ 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
+ 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16};
+
+void MY_FAST_CALL AesCbc_Encode(UInt32 *ivAes, Byte *data, size_t numBlocks);
+void MY_FAST_CALL AesCbc_Decode(UInt32 *ivAes, Byte *data, size_t numBlocks);
+void MY_FAST_CALL AesCtr_Code(UInt32 *ivAes, Byte *data, size_t numBlocks);
+
+void MY_FAST_CALL AesCbc_Encode_Intel(UInt32 *ivAes, Byte *data, size_t numBlocks);
+void MY_FAST_CALL AesCbc_Decode_Intel(UInt32 *ivAes, Byte *data, size_t numBlocks);
+void MY_FAST_CALL AesCtr_Code_Intel(UInt32 *ivAes, Byte *data, size_t numBlocks);
+
+AES_CODE_FUNC g_AesCbc_Encode;
+AES_CODE_FUNC g_AesCbc_Decode;
+AES_CODE_FUNC g_AesCtr_Code;
+
+static UInt32 D[256 * 4];
+static Byte InvS[256];
+
+static const Byte Rcon[11] = { 0x00, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36 };
+
+#define xtime(x) ((((x) << 1) ^ (((x) & 0x80) != 0 ? 0x1B : 0)) & 0xFF)
+
+#define Ui32(a0, a1, a2, a3) ((UInt32)(a0) | ((UInt32)(a1) << 8) | ((UInt32)(a2) << 16) | ((UInt32)(a3) << 24))
+
+#define gb0(x) ( (x) & 0xFF)
+#define gb1(x) (((x) >> ( 8)) & 0xFF)
+#define gb2(x) (((x) >> (16)) & 0xFF)
+#define gb3(x) (((x) >> (24)))
+
+#define gb(n, x) gb ## n(x)
+
+#define TT(x) (T + (x << 8))
+#define DD(x) (D + (x << 8))
+
+
+void AesGenTables(void)
+{
+ unsigned i;
+ for (i = 0; i < 256; i++)
+ InvS[Sbox[i]] = (Byte)i;
+
+ for (i = 0; i < 256; i++)
+ {
+ {
+ UInt32 a1 = Sbox[i];
+ UInt32 a2 = xtime(a1);
+ UInt32 a3 = a2 ^ a1;
+ TT(0)[i] = Ui32(a2, a1, a1, a3);
+ TT(1)[i] = Ui32(a3, a2, a1, a1);
+ TT(2)[i] = Ui32(a1, a3, a2, a1);
+ TT(3)[i] = Ui32(a1, a1, a3, a2);
+ }
+ {
+ UInt32 a1 = InvS[i];
+ UInt32 a2 = xtime(a1);
+ UInt32 a4 = xtime(a2);
+ UInt32 a8 = xtime(a4);
+ UInt32 a9 = a8 ^ a1;
+ UInt32 aB = a8 ^ a2 ^ a1;
+ UInt32 aD = a8 ^ a4 ^ a1;
+ UInt32 aE = a8 ^ a4 ^ a2;
+ DD(0)[i] = Ui32(aE, a9, aD, aB);
+ DD(1)[i] = Ui32(aB, aE, a9, aD);
+ DD(2)[i] = Ui32(aD, aB, aE, a9);
+ DD(3)[i] = Ui32(a9, aD, aB, aE);
+ }
+ }
+
+ g_AesCbc_Encode = AesCbc_Encode;
+ g_AesCbc_Decode = AesCbc_Decode;
+ g_AesCtr_Code = AesCtr_Code;
+
+ #ifdef MY_CPU_X86_OR_AMD64
+ if (CPU_Is_Aes_Supported())
+ {
+ g_AesCbc_Encode = AesCbc_Encode_Intel;
+ g_AesCbc_Decode = AesCbc_Decode_Intel;
+ g_AesCtr_Code = AesCtr_Code_Intel;
+ }
+ #endif
+}
+
+
+#define HT(i, x, s) TT(x)[gb(x, s[(i + x) & 3])]
+
+#define HT4(m, i, s, p) m[i] = \
+ HT(i, 0, s) ^ \
+ HT(i, 1, s) ^ \
+ HT(i, 2, s) ^ \
+ HT(i, 3, s) ^ w[p + i]
+
+#define HT16(m, s, p) \
+ HT4(m, 0, s, p); \
+ HT4(m, 1, s, p); \
+ HT4(m, 2, s, p); \
+ HT4(m, 3, s, p); \
+
+#define FT(i, x) Sbox[gb(x, m[(i + x) & 3])]
+#define FT4(i) dest[i] = Ui32(FT(i, 0), FT(i, 1), FT(i, 2), FT(i, 3)) ^ w[i];
+
+
+#define HD(i, x, s) DD(x)[gb(x, s[(i - x) & 3])]
+
+#define HD4(m, i, s, p) m[i] = \
+ HD(i, 0, s) ^ \
+ HD(i, 1, s) ^ \
+ HD(i, 2, s) ^ \
+ HD(i, 3, s) ^ w[p + i];
+
+#define HD16(m, s, p) \
+ HD4(m, 0, s, p); \
+ HD4(m, 1, s, p); \
+ HD4(m, 2, s, p); \
+ HD4(m, 3, s, p); \
+
+#define FD(i, x) InvS[gb(x, m[(i - x) & 3])]
+#define FD4(i) dest[i] = Ui32(FD(i, 0), FD(i, 1), FD(i, 2), FD(i, 3)) ^ w[i];
+
+void MY_FAST_CALL Aes_SetKey_Enc(UInt32 *w, const Byte *key, unsigned keySize)
+{
+ unsigned i, wSize;
+ wSize = keySize + 28;
+ keySize /= 4;
+ w[0] = ((UInt32)keySize / 2) + 3;
+ w += 4;
+
+ for (i = 0; i < keySize; i++, key += 4)
+ w[i] = GetUi32(key);
+
+ for (; i < wSize; i++)
+ {
+ UInt32 t = w[(size_t)i - 1];
+ unsigned rem = i % keySize;
+ if (rem == 0)
+ t = Ui32(Sbox[gb1(t)] ^ Rcon[i / keySize], Sbox[gb2(t)], Sbox[gb3(t)], Sbox[gb0(t)]);
+ else if (keySize > 6 && rem == 4)
+ t = Ui32(Sbox[gb0(t)], Sbox[gb1(t)], Sbox[gb2(t)], Sbox[gb3(t)]);
+ w[i] = w[i - keySize] ^ t;
+ }
+}
+
+void MY_FAST_CALL Aes_SetKey_Dec(UInt32 *w, const Byte *key, unsigned keySize)
+{
+ unsigned i, num;
+ Aes_SetKey_Enc(w, key, keySize);
+ num = keySize + 20;
+ w += 8;
+ for (i = 0; i < num; i++)
+ {
+ UInt32 r = w[i];
+ w[i] =
+ DD(0)[Sbox[gb0(r)]] ^
+ DD(1)[Sbox[gb1(r)]] ^
+ DD(2)[Sbox[gb2(r)]] ^
+ DD(3)[Sbox[gb3(r)]];
+ }
+}
+
+/* Aes_Encode and Aes_Decode functions work with little-endian words.
+ src and dest are pointers to 4 UInt32 words.
+ src and dest can point to same block */
+
+static void Aes_Encode(const UInt32 *w, UInt32 *dest, const UInt32 *src)
+{
+ UInt32 s[4];
+ UInt32 m[4];
+ UInt32 numRounds2 = w[0];
+ w += 4;
+ s[0] = src[0] ^ w[0];
+ s[1] = src[1] ^ w[1];
+ s[2] = src[2] ^ w[2];
+ s[3] = src[3] ^ w[3];
+ w += 4;
+ for (;;)
+ {
+ HT16(m, s, 0);
+ if (--numRounds2 == 0)
+ break;
+ HT16(s, m, 4);
+ w += 8;
+ }
+ w += 4;
+ FT4(0); FT4(1); FT4(2); FT4(3);
+}
+
+static void Aes_Decode(const UInt32 *w, UInt32 *dest, const UInt32 *src)
+{
+ UInt32 s[4];
+ UInt32 m[4];
+ UInt32 numRounds2 = w[0];
+ w += 4 + numRounds2 * 8;
+ s[0] = src[0] ^ w[0];
+ s[1] = src[1] ^ w[1];
+ s[2] = src[2] ^ w[2];
+ s[3] = src[3] ^ w[3];
+ for (;;)
+ {
+ w -= 8;
+ HD16(m, s, 4);
+ if (--numRounds2 == 0)
+ break;
+ HD16(s, m, 0);
+ }
+ FD4(0); FD4(1); FD4(2); FD4(3);
+}
+
+void AesCbc_Init(UInt32 *p, const Byte *iv)
+{
+ unsigned i;
+ for (i = 0; i < 4; i++)
+ p[i] = GetUi32(iv + i * 4);
+}
+
+void MY_FAST_CALL AesCbc_Encode(UInt32 *p, Byte *data, size_t numBlocks)
+{
+ for (; numBlocks != 0; numBlocks--, data += AES_BLOCK_SIZE)
+ {
+ p[0] ^= GetUi32(data);
+ p[1] ^= GetUi32(data + 4);
+ p[2] ^= GetUi32(data + 8);
+ p[3] ^= GetUi32(data + 12);
+
+ Aes_Encode(p + 4, p, p);
+
+ SetUi32(data, p[0]);
+ SetUi32(data + 4, p[1]);
+ SetUi32(data + 8, p[2]);
+ SetUi32(data + 12, p[3]);
+ }
+}
+
+void MY_FAST_CALL AesCbc_Decode(UInt32 *p, Byte *data, size_t numBlocks)
+{
+ UInt32 in[4], out[4];
+ for (; numBlocks != 0; numBlocks--, data += AES_BLOCK_SIZE)
+ {
+ in[0] = GetUi32(data);
+ in[1] = GetUi32(data + 4);
+ in[2] = GetUi32(data + 8);
+ in[3] = GetUi32(data + 12);
+
+ Aes_Decode(p + 4, out, in);
+
+ SetUi32(data, p[0] ^ out[0]);
+ SetUi32(data + 4, p[1] ^ out[1]);
+ SetUi32(data + 8, p[2] ^ out[2]);
+ SetUi32(data + 12, p[3] ^ out[3]);
+
+ p[0] = in[0];
+ p[1] = in[1];
+ p[2] = in[2];
+ p[3] = in[3];
+ }
+}
+
+void MY_FAST_CALL AesCtr_Code(UInt32 *p, Byte *data, size_t numBlocks)
+{
+ for (; numBlocks != 0; numBlocks--)
+ {
+ UInt32 temp[4];
+ unsigned i;
+
+ if (++p[0] == 0)
+ p[1]++;
+
+ Aes_Encode(p + 4, temp, p);
+
+ for (i = 0; i < 4; i++, data += 4)
+ {
+ UInt32 t = temp[i];
+
+ #ifdef MY_CPU_LE_UNALIGN
+ *((UInt32 *)data) ^= t;
+ #else
+ data[0] ^= (t & 0xFF);
+ data[1] ^= ((t >> 8) & 0xFF);
+ data[2] ^= ((t >> 16) & 0xFF);
+ data[3] ^= ((t >> 24));
+ #endif
+ }
+ }
+}
diff --git a/contrib/libs/lzmasdk/Aes.h b/contrib/libs/lzmasdk/Aes.h new file mode 100644 index 0000000000..381e979d1b --- /dev/null +++ b/contrib/libs/lzmasdk/Aes.h @@ -0,0 +1,38 @@ +/* Aes.h -- AES encryption / decryption
+2013-01-18 : Igor Pavlov : Public domain */
+
+#ifndef __AES_H
+#define __AES_H
+
+#include "7zTypes.h"
+
+EXTERN_C_BEGIN
+
+#define AES_BLOCK_SIZE 16
+
+/* Call AesGenTables one time before other AES functions */
+void AesGenTables(void);
+
+/* UInt32 pointers must be 16-byte aligned */
+
+/* 16-byte (4 * 32-bit words) blocks: 1 (IV) + 1 (keyMode) + 15 (AES-256 roundKeys) */
+#define AES_NUM_IVMRK_WORDS ((1 + 1 + 15) * 4)
+
+/* aes - 16-byte aligned pointer to keyMode+roundKeys sequence */
+/* keySize = 16 or 24 or 32 (bytes) */
+typedef void (MY_FAST_CALL *AES_SET_KEY_FUNC)(UInt32 *aes, const Byte *key, unsigned keySize);
+void MY_FAST_CALL Aes_SetKey_Enc(UInt32 *aes, const Byte *key, unsigned keySize);
+void MY_FAST_CALL Aes_SetKey_Dec(UInt32 *aes, const Byte *key, unsigned keySize);
+
+/* ivAes - 16-byte aligned pointer to iv+keyMode+roundKeys sequence: UInt32[AES_NUM_IVMRK_WORDS] */
+void AesCbc_Init(UInt32 *ivAes, const Byte *iv); /* iv size is AES_BLOCK_SIZE */
+/* data - 16-byte aligned pointer to data */
+/* numBlocks - the number of 16-byte blocks in data array */
+typedef void (MY_FAST_CALL *AES_CODE_FUNC)(UInt32 *ivAes, Byte *data, size_t numBlocks);
+extern AES_CODE_FUNC g_AesCbc_Encode;
+extern AES_CODE_FUNC g_AesCbc_Decode;
+extern AES_CODE_FUNC g_AesCtr_Code;
+
+EXTERN_C_END
+
+#endif
diff --git a/contrib/libs/lzmasdk/AesOpt.c b/contrib/libs/lzmasdk/AesOpt.c new file mode 100644 index 0000000000..65936a414f --- /dev/null +++ b/contrib/libs/lzmasdk/AesOpt.c @@ -0,0 +1,188 @@ +/* AesOpt.c -- Intel's AES
+2017-06-08 : Igor Pavlov : Public domain */
+
+#include "Precomp.h"
+
+#include "CpuArch.h"
+
+#ifdef MY_CPU_X86_OR_AMD64
+#if (_MSC_VER > 1500) || (_MSC_FULL_VER >= 150030729)
+#define USE_INTEL_AES
+#endif
+#endif
+
+#ifdef USE_INTEL_AES
+
+#if defined(__clang__)
+#define TARGET_AES __attribute__((__target__("aes")))
+#else
+#define TARGET_AES
+#endif
+
+#include <wmmintrin.h>
+
+void TARGET_AES MY_FAST_CALL AesCbc_Encode_Intel(__m128i *p, __m128i *data, size_t numBlocks)
+{
+ __m128i m = *p;
+ for (; numBlocks != 0; numBlocks--, data++)
+ {
+ UInt32 numRounds2 = *(const UInt32 *)(p + 1) - 1;
+ const __m128i *w = p + 3;
+ m = _mm_xor_si128(m, *data);
+ m = _mm_xor_si128(m, p[2]);
+ do
+ {
+ m = _mm_aesenc_si128(m, w[0]);
+ m = _mm_aesenc_si128(m, w[1]);
+ w += 2;
+ }
+ while (--numRounds2 != 0);
+ m = _mm_aesenc_si128(m, w[0]);
+ m = _mm_aesenclast_si128(m, w[1]);
+ *data = m;
+ }
+ *p = m;
+}
+
+#define NUM_WAYS 3
+
+#define AES_OP_W(op, n) { \
+ const __m128i t = w[n]; \
+ m0 = op(m0, t); \
+ m1 = op(m1, t); \
+ m2 = op(m2, t); \
+ }
+
+#define AES_DEC(n) AES_OP_W(_mm_aesdec_si128, n)
+#define AES_DEC_LAST(n) AES_OP_W(_mm_aesdeclast_si128, n)
+#define AES_ENC(n) AES_OP_W(_mm_aesenc_si128, n)
+#define AES_ENC_LAST(n) AES_OP_W(_mm_aesenclast_si128, n)
+
+void TARGET_AES MY_FAST_CALL AesCbc_Decode_Intel(__m128i *p, __m128i *data, size_t numBlocks)
+{
+ __m128i iv = *p;
+ for (; numBlocks >= NUM_WAYS; numBlocks -= NUM_WAYS, data += NUM_WAYS)
+ {
+ UInt32 numRounds2 = *(const UInt32 *)(p + 1);
+ const __m128i *w = p + numRounds2 * 2;
+ __m128i m0, m1, m2;
+ {
+ const __m128i t = w[2];
+ m0 = _mm_xor_si128(t, data[0]);
+ m1 = _mm_xor_si128(t, data[1]);
+ m2 = _mm_xor_si128(t, data[2]);
+ }
+ numRounds2--;
+ do
+ {
+ AES_DEC(1)
+ AES_DEC(0)
+ w -= 2;
+ }
+ while (--numRounds2 != 0);
+ AES_DEC(1)
+ AES_DEC_LAST(0)
+
+ {
+ __m128i t;
+ t = _mm_xor_si128(m0, iv); iv = data[0]; data[0] = t;
+ t = _mm_xor_si128(m1, iv); iv = data[1]; data[1] = t;
+ t = _mm_xor_si128(m2, iv); iv = data[2]; data[2] = t;
+ }
+ }
+ for (; numBlocks != 0; numBlocks--, data++)
+ {
+ UInt32 numRounds2 = *(const UInt32 *)(p + 1);
+ const __m128i *w = p + numRounds2 * 2;
+ __m128i m = _mm_xor_si128(w[2], *data);
+ numRounds2--;
+ do
+ {
+ m = _mm_aesdec_si128(m, w[1]);
+ m = _mm_aesdec_si128(m, w[0]);
+ w -= 2;
+ }
+ while (--numRounds2 != 0);
+ m = _mm_aesdec_si128(m, w[1]);
+ m = _mm_aesdeclast_si128(m, w[0]);
+
+ m = _mm_xor_si128(m, iv);
+ iv = *data;
+ *data = m;
+ }
+ *p = iv;
+}
+
+void TARGET_AES MY_FAST_CALL AesCtr_Code_Intel(__m128i *p, __m128i *data, size_t numBlocks)
+{
+ __m128i ctr = *p;
+ __m128i one = _mm_set_epi64x(1, 0);
+ for (; numBlocks >= NUM_WAYS; numBlocks -= NUM_WAYS, data += NUM_WAYS)
+ {
+ UInt32 numRounds2 = *(const UInt32 *)(p + 1) - 1;
+ const __m128i *w = p;
+ __m128i m0, m1, m2;
+ {
+ const __m128i t = w[2];
+ ctr = _mm_add_epi64(ctr, one); m0 = _mm_xor_si128(ctr, t);
+ ctr = _mm_add_epi64(ctr, one); m1 = _mm_xor_si128(ctr, t);
+ ctr = _mm_add_epi64(ctr, one); m2 = _mm_xor_si128(ctr, t);
+ }
+ w += 3;
+ do
+ {
+ AES_ENC(0)
+ AES_ENC(1)
+ w += 2;
+ }
+ while (--numRounds2 != 0);
+ AES_ENC(0)
+ AES_ENC_LAST(1)
+ data[0] = _mm_xor_si128(data[0], m0);
+ data[1] = _mm_xor_si128(data[1], m1);
+ data[2] = _mm_xor_si128(data[2], m2);
+ }
+ for (; numBlocks != 0; numBlocks--, data++)
+ {
+ UInt32 numRounds2 = *(const UInt32 *)(p + 1) - 1;
+ const __m128i *w = p;
+ __m128i m;
+ ctr = _mm_add_epi64(ctr, one);
+ m = _mm_xor_si128(ctr, p[2]);
+ w += 3;
+ do
+ {
+ m = _mm_aesenc_si128(m, w[0]);
+ m = _mm_aesenc_si128(m, w[1]);
+ w += 2;
+ }
+ while (--numRounds2 != 0);
+ m = _mm_aesenc_si128(m, w[0]);
+ m = _mm_aesenclast_si128(m, w[1]);
+ *data = _mm_xor_si128(*data, m);
+ }
+ *p = ctr;
+}
+
+#else
+
+void MY_FAST_CALL AesCbc_Encode(UInt32 *ivAes, Byte *data, size_t numBlocks);
+void MY_FAST_CALL AesCbc_Decode(UInt32 *ivAes, Byte *data, size_t numBlocks);
+void MY_FAST_CALL AesCtr_Code(UInt32 *ivAes, Byte *data, size_t numBlocks);
+
+void MY_FAST_CALL AesCbc_Encode_Intel(UInt32 *p, Byte *data, size_t numBlocks)
+{
+ AesCbc_Encode(p, data, numBlocks);
+}
+
+void MY_FAST_CALL AesCbc_Decode_Intel(UInt32 *p, Byte *data, size_t numBlocks)
+{
+ AesCbc_Decode(p, data, numBlocks);
+}
+
+void MY_FAST_CALL AesCtr_Code_Intel(UInt32 *p, Byte *data, size_t numBlocks)
+{
+ AesCtr_Code(p, data, numBlocks);
+}
+
+#endif
diff --git a/contrib/libs/lzmasdk/Alloc.c b/contrib/libs/lzmasdk/Alloc.c new file mode 100644 index 0000000000..30b499e5ff --- /dev/null +++ b/contrib/libs/lzmasdk/Alloc.c @@ -0,0 +1,455 @@ +/* Alloc.c -- Memory allocation functions
+2018-04-27 : Igor Pavlov : Public domain */
+
+#include "Precomp.h"
+
+#include <stdio.h>
+
+#ifdef _WIN32
+#include <windows.h>
+#endif
+#include <stdlib.h>
+
+#include "Alloc.h"
+
+/* #define _SZ_ALLOC_DEBUG */
+
+/* use _SZ_ALLOC_DEBUG to debug alloc/free operations */
+#ifdef _SZ_ALLOC_DEBUG
+
+#include <stdio.h>
+int g_allocCount = 0;
+int g_allocCountMid = 0;
+int g_allocCountBig = 0;
+
+
+#define CONVERT_INT_TO_STR(charType, tempSize) \
+ unsigned char temp[tempSize]; unsigned i = 0; \
+ while (val >= 10) { temp[i++] = (unsigned char)('0' + (unsigned)(val % 10)); val /= 10; } \
+ *s++ = (charType)('0' + (unsigned)val); \
+ while (i != 0) { i--; *s++ = temp[i]; } \
+ *s = 0;
+
+static void ConvertUInt64ToString(UInt64 val, char *s)
+{
+ CONVERT_INT_TO_STR(char, 24);
+}
+
+#define GET_HEX_CHAR(t) ((char)(((t < 10) ? ('0' + t) : ('A' + (t - 10)))))
+
+static void ConvertUInt64ToHex(UInt64 val, char *s)
+{
+ UInt64 v = val;
+ unsigned i;
+ for (i = 1;; i++)
+ {
+ v >>= 4;
+ if (v == 0)
+ break;
+ }
+ s[i] = 0;
+ do
+ {
+ unsigned t = (unsigned)(val & 0xF);
+ val >>= 4;
+ s[--i] = GET_HEX_CHAR(t);
+ }
+ while (i);
+}
+
+#define DEBUG_OUT_STREAM stderr
+
+static void Print(const char *s)
+{
+ fputs(s, DEBUG_OUT_STREAM);
+}
+
+static void PrintAligned(const char *s, size_t align)
+{
+ size_t len = strlen(s);
+ for(;;)
+ {
+ fputc(' ', DEBUG_OUT_STREAM);
+ if (len >= align)
+ break;
+ ++len;
+ }
+ Print(s);
+}
+
+static void PrintLn()
+{
+ Print("\n");
+}
+
+static void PrintHex(UInt64 v, size_t align)
+{
+ char s[32];
+ ConvertUInt64ToHex(v, s);
+ PrintAligned(s, align);
+}
+
+static void PrintDec(UInt64 v, size_t align)
+{
+ char s[32];
+ ConvertUInt64ToString(v, s);
+ PrintAligned(s, align);
+}
+
+static void PrintAddr(void *p)
+{
+ PrintHex((UInt64)(size_t)(ptrdiff_t)p, 12);
+}
+
+
+#define PRINT_ALLOC(name, cnt, size, ptr) \
+ Print(name " "); \
+ PrintDec(cnt++, 10); \
+ PrintHex(size, 10); \
+ PrintAddr(ptr); \
+ PrintLn();
+
+#define PRINT_FREE(name, cnt, ptr) if (ptr) { \
+ Print(name " "); \
+ PrintDec(--cnt, 10); \
+ PrintAddr(ptr); \
+ PrintLn(); }
+
+#else
+
+#define PRINT_ALLOC(name, cnt, size, ptr)
+#define PRINT_FREE(name, cnt, ptr)
+#define Print(s)
+#define PrintLn()
+#define PrintHex(v, align)
+#define PrintDec(v, align)
+#define PrintAddr(p)
+
+#endif
+
+
+
+void *MyAlloc(size_t size)
+{
+ if (size == 0)
+ return NULL;
+ #ifdef _SZ_ALLOC_DEBUG
+ {
+ void *p = malloc(size);
+ PRINT_ALLOC("Alloc ", g_allocCount, size, p);
+ return p;
+ }
+ #else
+ return malloc(size);
+ #endif
+}
+
+void MyFree(void *address)
+{
+ PRINT_FREE("Free ", g_allocCount, address);
+
+ free(address);
+}
+
+#ifdef _WIN32
+
+void *MidAlloc(size_t size)
+{
+ if (size == 0)
+ return NULL;
+
+ PRINT_ALLOC("Alloc-Mid", g_allocCountMid, size, NULL);
+
+ return VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
+}
+
+void MidFree(void *address)
+{
+ PRINT_FREE("Free-Mid", g_allocCountMid, address);
+
+ if (!address)
+ return;
+ VirtualFree(address, 0, MEM_RELEASE);
+}
+
+#ifndef MEM_LARGE_PAGES
+#undef _7ZIP_LARGE_PAGES
+#endif
+
+#ifdef _7ZIP_LARGE_PAGES
+SIZE_T g_LargePageSize = 0;
+typedef SIZE_T (WINAPI *GetLargePageMinimumP)();
+#endif
+
+void SetLargePageSize()
+{
+ #ifdef _7ZIP_LARGE_PAGES
+ SIZE_T size;
+ GetLargePageMinimumP largePageMinimum = (GetLargePageMinimumP)
+ GetProcAddress(GetModuleHandle(TEXT("kernel32.dll")), "GetLargePageMinimum");
+ if (!largePageMinimum)
+ return;
+ size = largePageMinimum();
+ if (size == 0 || (size & (size - 1)) != 0)
+ return;
+ g_LargePageSize = size;
+ #endif
+}
+
+
+void *BigAlloc(size_t size)
+{
+ if (size == 0)
+ return NULL;
+
+ PRINT_ALLOC("Alloc-Big", g_allocCountBig, size, NULL);
+
+ #ifdef _7ZIP_LARGE_PAGES
+ {
+ SIZE_T ps = g_LargePageSize;
+ if (ps != 0 && ps <= (1 << 30) && size > (ps / 2))
+ {
+ size_t size2;
+ ps--;
+ size2 = (size + ps) & ~ps;
+ if (size2 >= size)
+ {
+ void *res = VirtualAlloc(NULL, size2, MEM_COMMIT | MEM_LARGE_PAGES, PAGE_READWRITE);
+ if (res)
+ return res;
+ }
+ }
+ }
+ #endif
+
+ return VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
+}
+
+void BigFree(void *address)
+{
+ PRINT_FREE("Free-Big", g_allocCountBig, address);
+
+ if (!address)
+ return;
+ VirtualFree(address, 0, MEM_RELEASE);
+}
+
+#endif
+
+
+static void *SzAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p); return MyAlloc(size); }
+static void SzFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p); MyFree(address); }
+const ISzAlloc g_Alloc = { SzAlloc, SzFree };
+
+static void *SzMidAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p); return MidAlloc(size); }
+static void SzMidFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p); MidFree(address); }
+const ISzAlloc g_MidAlloc = { SzMidAlloc, SzMidFree };
+
+static void *SzBigAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p); return BigAlloc(size); }
+static void SzBigFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p); BigFree(address); }
+const ISzAlloc g_BigAlloc = { SzBigAlloc, SzBigFree };
+
+
+/*
+ uintptr_t : <stdint.h> C99 (optional)
+ : unsupported in VS6
+*/
+
+#ifdef _WIN32
+ typedef UINT_PTR UIntPtr;
+#else
+ /*
+ typedef uintptr_t UIntPtr;
+ */
+ typedef ptrdiff_t UIntPtr;
+#endif
+
+
+#define ADJUST_ALLOC_SIZE 0
+/*
+#define ADJUST_ALLOC_SIZE (sizeof(void *) - 1)
+*/
+/*
+ Use (ADJUST_ALLOC_SIZE = (sizeof(void *) - 1)), if
+ MyAlloc() can return address that is NOT multiple of sizeof(void *).
+*/
+
+
+/*
+#define MY_ALIGN_PTR_DOWN(p, align) ((void *)((char *)(p) - ((size_t)(UIntPtr)(p) & ((align) - 1))))
+*/
+#define MY_ALIGN_PTR_DOWN(p, align) ((void *)((((UIntPtr)(p)) & ~((UIntPtr)(align) - 1))))
+
+#define MY_ALIGN_PTR_UP_PLUS(p, align) MY_ALIGN_PTR_DOWN(((char *)(p) + (align) + ADJUST_ALLOC_SIZE), align)
+
+
+#if (_POSIX_C_SOURCE >= 200112L) && !defined(_WIN32)
+ #define USE_posix_memalign
+#endif
+
+/*
+ This posix_memalign() is for test purposes only.
+ We also need special Free() function instead of free(),
+ if this posix_memalign() is used.
+*/
+
+/*
+static int posix_memalign(void **ptr, size_t align, size_t size)
+{
+ size_t newSize = size + align;
+ void *p;
+ void *pAligned;
+ *ptr = NULL;
+ if (newSize < size)
+ return 12; // ENOMEM
+ p = MyAlloc(newSize);
+ if (!p)
+ return 12; // ENOMEM
+ pAligned = MY_ALIGN_PTR_UP_PLUS(p, align);
+ ((void **)pAligned)[-1] = p;
+ *ptr = pAligned;
+ return 0;
+}
+*/
+
+/*
+ ALLOC_ALIGN_SIZE >= sizeof(void *)
+ ALLOC_ALIGN_SIZE >= cache_line_size
+*/
+
+#define ALLOC_ALIGN_SIZE ((size_t)1 << 7)
+
+static void *SzAlignedAlloc(ISzAllocPtr pp, size_t size)
+{
+ #ifndef USE_posix_memalign
+
+ void *p;
+ void *pAligned;
+ size_t newSize;
+ UNUSED_VAR(pp);
+
+ /* also we can allocate additional dummy ALLOC_ALIGN_SIZE bytes after aligned
+ block to prevent cache line sharing with another allocated blocks */
+
+ newSize = size + ALLOC_ALIGN_SIZE * 1 + ADJUST_ALLOC_SIZE;
+ if (newSize < size)
+ return NULL;
+
+ p = MyAlloc(newSize);
+
+ if (!p)
+ return NULL;
+ pAligned = MY_ALIGN_PTR_UP_PLUS(p, ALLOC_ALIGN_SIZE);
+
+ Print(" size="); PrintHex(size, 8);
+ Print(" a_size="); PrintHex(newSize, 8);
+ Print(" ptr="); PrintAddr(p);
+ Print(" a_ptr="); PrintAddr(pAligned);
+ PrintLn();
+
+ ((void **)pAligned)[-1] = p;
+
+ return pAligned;
+
+ #else
+
+ void *p;
+ UNUSED_VAR(pp);
+ if (posix_memalign(&p, ALLOC_ALIGN_SIZE, size))
+ return NULL;
+
+ Print(" posix_memalign="); PrintAddr(p);
+ PrintLn();
+
+ return p;
+
+ #endif
+}
+
+
+static void SzAlignedFree(ISzAllocPtr pp, void *address)
+{
+ UNUSED_VAR(pp);
+ #ifndef USE_posix_memalign
+ if (address)
+ MyFree(((void **)address)[-1]);
+ #else
+ free(address);
+ #endif
+}
+
+
+const ISzAlloc g_AlignedAlloc = { SzAlignedAlloc, SzAlignedFree };
+
+
+
+#define MY_ALIGN_PTR_DOWN_1(p) MY_ALIGN_PTR_DOWN(p, sizeof(void *))
+
+/* we align ptr to support cases where CAlignOffsetAlloc::offset is not multiply of sizeof(void *) */
+#define REAL_BLOCK_PTR_VAR(p) ((void **)MY_ALIGN_PTR_DOWN_1(p))[-1]
+/*
+#define REAL_BLOCK_PTR_VAR(p) ((void **)(p))[-1]
+*/
+
+static void *AlignOffsetAlloc_Alloc(ISzAllocPtr pp, size_t size)
+{
+ CAlignOffsetAlloc *p = CONTAINER_FROM_VTBL(pp, CAlignOffsetAlloc, vt);
+ void *adr;
+ void *pAligned;
+ size_t newSize;
+ size_t extra;
+ size_t alignSize = (size_t)1 << p->numAlignBits;
+
+ if (alignSize < sizeof(void *))
+ alignSize = sizeof(void *);
+
+ if (p->offset >= alignSize)
+ return NULL;
+
+ /* also we can allocate additional dummy ALLOC_ALIGN_SIZE bytes after aligned
+ block to prevent cache line sharing with another allocated blocks */
+ extra = p->offset & (sizeof(void *) - 1);
+ newSize = size + alignSize + extra + ADJUST_ALLOC_SIZE;
+ if (newSize < size)
+ return NULL;
+
+ adr = ISzAlloc_Alloc(p->baseAlloc, newSize);
+
+ if (!adr)
+ return NULL;
+
+ pAligned = (char *)MY_ALIGN_PTR_DOWN((char *)adr +
+ alignSize - p->offset + extra + ADJUST_ALLOC_SIZE, alignSize) + p->offset;
+
+ PrintLn();
+ Print("- Aligned: ");
+ Print(" size="); PrintHex(size, 8);
+ Print(" a_size="); PrintHex(newSize, 8);
+ Print(" ptr="); PrintAddr(adr);
+ Print(" a_ptr="); PrintAddr(pAligned);
+ PrintLn();
+
+ REAL_BLOCK_PTR_VAR(pAligned) = adr;
+
+ return pAligned;
+}
+
+
+static void AlignOffsetAlloc_Free(ISzAllocPtr pp, void *address)
+{
+ if (address)
+ {
+ CAlignOffsetAlloc *p = CONTAINER_FROM_VTBL(pp, CAlignOffsetAlloc, vt);
+ PrintLn();
+ Print("- Aligned Free: ");
+ PrintLn();
+ ISzAlloc_Free(p->baseAlloc, REAL_BLOCK_PTR_VAR(address));
+ }
+}
+
+
+void AlignOffsetAlloc_CreateVTable(CAlignOffsetAlloc *p)
+{
+ p->vt.Alloc = AlignOffsetAlloc_Alloc;
+ p->vt.Free = AlignOffsetAlloc_Free;
+}
diff --git a/contrib/libs/lzmasdk/Alloc.h b/contrib/libs/lzmasdk/Alloc.h new file mode 100644 index 0000000000..3d796e5eee --- /dev/null +++ b/contrib/libs/lzmasdk/Alloc.h @@ -0,0 +1,51 @@ +/* Alloc.h -- Memory allocation functions
+2018-02-19 : Igor Pavlov : Public domain */
+
+#ifndef __COMMON_ALLOC_H
+#define __COMMON_ALLOC_H
+
+#include "7zTypes.h"
+
+EXTERN_C_BEGIN
+
+void *MyAlloc(size_t size);
+void MyFree(void *address);
+
+#ifdef _WIN32
+
+void SetLargePageSize();
+
+void *MidAlloc(size_t size);
+void MidFree(void *address);
+void *BigAlloc(size_t size);
+void BigFree(void *address);
+
+#else
+
+#define MidAlloc(size) MyAlloc(size)
+#define MidFree(address) MyFree(address)
+#define BigAlloc(size) MyAlloc(size)
+#define BigFree(address) MyFree(address)
+
+#endif
+
+extern const ISzAlloc g_Alloc;
+extern const ISzAlloc g_BigAlloc;
+extern const ISzAlloc g_MidAlloc;
+extern const ISzAlloc g_AlignedAlloc;
+
+
+typedef struct
+{
+ ISzAlloc vt;
+ ISzAllocPtr baseAlloc;
+ unsigned numAlignBits; /* ((1 << numAlignBits) >= sizeof(void *)) */
+ size_t offset; /* (offset == (k * sizeof(void *)) && offset < (1 << numAlignBits) */
+} CAlignOffsetAlloc;
+
+void AlignOffsetAlloc_CreateVTable(CAlignOffsetAlloc *p);
+
+
+EXTERN_C_END
+
+#endif
diff --git a/contrib/libs/lzmasdk/Bra.c b/contrib/libs/lzmasdk/Bra.c new file mode 100644 index 0000000000..cbdcb290df --- /dev/null +++ b/contrib/libs/lzmasdk/Bra.c @@ -0,0 +1,230 @@ +/* Bra.c -- Converters for RISC code
+2017-04-04 : Igor Pavlov : Public domain */
+
+#include "Precomp.h"
+
+#include "CpuArch.h"
+#include "Bra.h"
+
+SizeT ARM_Convert(Byte *data, SizeT size, UInt32 ip, int encoding)
+{
+ Byte *p;
+ const Byte *lim;
+ size &= ~(size_t)3;
+ ip += 4;
+ p = data;
+ lim = data + size;
+
+ if (encoding)
+
+ for (;;)
+ {
+ for (;;)
+ {
+ if (p >= lim)
+ return p - data;
+ p += 4;
+ if (p[-1] == 0xEB)
+ break;
+ }
+ {
+ UInt32 v = GetUi32(p - 4);
+ v <<= 2;
+ v += ip + (UInt32)(p - data);
+ v >>= 2;
+ v &= 0x00FFFFFF;
+ v |= 0xEB000000;
+ SetUi32(p - 4, v);
+ }
+ }
+
+ for (;;)
+ {
+ for (;;)
+ {
+ if (p >= lim)
+ return p - data;
+ p += 4;
+ if (p[-1] == 0xEB)
+ break;
+ }
+ {
+ UInt32 v = GetUi32(p - 4);
+ v <<= 2;
+ v -= ip + (UInt32)(p - data);
+ v >>= 2;
+ v &= 0x00FFFFFF;
+ v |= 0xEB000000;
+ SetUi32(p - 4, v);
+ }
+ }
+}
+
+
+SizeT ARMT_Convert(Byte *data, SizeT size, UInt32 ip, int encoding)
+{
+ Byte *p;
+ const Byte *lim;
+ size &= ~(size_t)1;
+ p = data;
+ lim = data + size - 4;
+
+ if (encoding)
+
+ for (;;)
+ {
+ UInt32 b1;
+ for (;;)
+ {
+ UInt32 b3;
+ if (p > lim)
+ return p - data;
+ b1 = p[1];
+ b3 = p[3];
+ p += 2;
+ b1 ^= 8;
+ if ((b3 & b1) >= 0xF8)
+ break;
+ }
+ {
+ UInt32 v =
+ ((UInt32)b1 << 19)
+ + (((UInt32)p[1] & 0x7) << 8)
+ + (((UInt32)p[-2] << 11))
+ + (p[0]);
+
+ p += 2;
+ {
+ UInt32 cur = (ip + (UInt32)(p - data)) >> 1;
+ v += cur;
+ }
+
+ p[-4] = (Byte)(v >> 11);
+ p[-3] = (Byte)(0xF0 | ((v >> 19) & 0x7));
+ p[-2] = (Byte)v;
+ p[-1] = (Byte)(0xF8 | (v >> 8));
+ }
+ }
+
+ for (;;)
+ {
+ UInt32 b1;
+ for (;;)
+ {
+ UInt32 b3;
+ if (p > lim)
+ return p - data;
+ b1 = p[1];
+ b3 = p[3];
+ p += 2;
+ b1 ^= 8;
+ if ((b3 & b1) >= 0xF8)
+ break;
+ }
+ {
+ UInt32 v =
+ ((UInt32)b1 << 19)
+ + (((UInt32)p[1] & 0x7) << 8)
+ + (((UInt32)p[-2] << 11))
+ + (p[0]);
+
+ p += 2;
+ {
+ UInt32 cur = (ip + (UInt32)(p - data)) >> 1;
+ v -= cur;
+ }
+
+ /*
+ SetUi16(p - 4, (UInt16)(((v >> 11) & 0x7FF) | 0xF000));
+ SetUi16(p - 2, (UInt16)(v | 0xF800));
+ */
+
+ p[-4] = (Byte)(v >> 11);
+ p[-3] = (Byte)(0xF0 | ((v >> 19) & 0x7));
+ p[-2] = (Byte)v;
+ p[-1] = (Byte)(0xF8 | (v >> 8));
+ }
+ }
+}
+
+
+SizeT PPC_Convert(Byte *data, SizeT size, UInt32 ip, int encoding)
+{
+ Byte *p;
+ const Byte *lim;
+ size &= ~(size_t)3;
+ ip -= 4;
+ p = data;
+ lim = data + size;
+
+ for (;;)
+ {
+ for (;;)
+ {
+ if (p >= lim)
+ return p - data;
+ p += 4;
+ /* if ((v & 0xFC000003) == 0x48000001) */
+ if ((p[-4] & 0xFC) == 0x48 && (p[-1] & 3) == 1)
+ break;
+ }
+ {
+ UInt32 v = GetBe32(p - 4);
+ if (encoding)
+ v += ip + (UInt32)(p - data);
+ else
+ v -= ip + (UInt32)(p - data);
+ v &= 0x03FFFFFF;
+ v |= 0x48000000;
+ SetBe32(p - 4, v);
+ }
+ }
+}
+
+
+SizeT SPARC_Convert(Byte *data, SizeT size, UInt32 ip, int encoding)
+{
+ Byte *p;
+ const Byte *lim;
+ size &= ~(size_t)3;
+ ip -= 4;
+ p = data;
+ lim = data + size;
+
+ for (;;)
+ {
+ for (;;)
+ {
+ if (p >= lim)
+ return p - data;
+ /*
+ v = GetBe32(p);
+ p += 4;
+ m = v + ((UInt32)5 << 29);
+ m ^= (UInt32)7 << 29;
+ m += (UInt32)1 << 22;
+ if ((m & ((UInt32)0x1FF << 23)) == 0)
+ break;
+ */
+ p += 4;
+ if ((p[-4] == 0x40 && (p[-3] & 0xC0) == 0) ||
+ (p[-4] == 0x7F && (p[-3] >= 0xC0)))
+ break;
+ }
+ {
+ UInt32 v = GetBe32(p - 4);
+ v <<= 2;
+ if (encoding)
+ v += ip + (UInt32)(p - data);
+ else
+ v -= ip + (UInt32)(p - data);
+
+ v &= 0x01FFFFFF;
+ v -= (UInt32)1 << 24;
+ v ^= 0xFF000000;
+ v >>= 2;
+ v |= 0x40000000;
+ SetBe32(p - 4, v);
+ }
+ }
+}
diff --git a/contrib/libs/lzmasdk/Bra.h b/contrib/libs/lzmasdk/Bra.h new file mode 100644 index 0000000000..aba8dce14f --- /dev/null +++ b/contrib/libs/lzmasdk/Bra.h @@ -0,0 +1,64 @@ +/* Bra.h -- Branch converters for executables
+2013-01-18 : Igor Pavlov : Public domain */
+
+#ifndef __BRA_H
+#define __BRA_H
+
+#include "7zTypes.h"
+
+EXTERN_C_BEGIN
+
+/*
+These functions convert relative addresses to absolute addresses
+in CALL instructions to increase the compression ratio.
+
+ In:
+ data - data buffer
+ size - size of data
+ ip - current virtual Instruction Pinter (IP) value
+ state - state variable for x86 converter
+ encoding - 0 (for decoding), 1 (for encoding)
+
+ Out:
+ state - state variable for x86 converter
+
+ Returns:
+ The number of processed bytes. If you call these functions with multiple calls,
+ you must start next call with first byte after block of processed bytes.
+
+ Type Endian Alignment LookAhead
+
+ x86 little 1 4
+ ARMT little 2 2
+ ARM little 4 0
+ PPC big 4 0
+ SPARC big 4 0
+ IA64 little 16 0
+
+ size must be >= Alignment + LookAhead, if it's not last block.
+ If (size < Alignment + LookAhead), converter returns 0.
+
+ Example:
+
+ UInt32 ip = 0;
+ for ()
+ {
+ ; size must be >= Alignment + LookAhead, if it's not last block
+ SizeT processed = Convert(data, size, ip, 1);
+ data += processed;
+ size -= processed;
+ ip += processed;
+ }
+*/
+
+#define x86_Convert_Init(state) { state = 0; }
+SizeT x86_Convert(Byte *data, SizeT size, UInt32 ip, UInt32 *state, int encoding);
+SizeT ARM_Convert(Byte *data, SizeT size, UInt32 ip, int encoding);
+SizeT ARMT_Convert(Byte *data, SizeT size, UInt32 ip, int encoding);
+SizeT PPC_Convert(Byte *data, SizeT size, UInt32 ip, int encoding);
+SizeT SPARC_Convert(Byte *data, SizeT size, UInt32 ip, int encoding);
+SizeT IA64_Convert(Byte *data, SizeT size, UInt32 ip, int encoding);
+
+EXTERN_C_END
+
+#endif
diff --git a/contrib/libs/lzmasdk/Bra86.c b/contrib/libs/lzmasdk/Bra86.c new file mode 100644 index 0000000000..a6463c63ba --- /dev/null +++ b/contrib/libs/lzmasdk/Bra86.c @@ -0,0 +1,82 @@ +/* Bra86.c -- Converter for x86 code (BCJ)
+2017-04-03 : Igor Pavlov : Public domain */
+
+#include "Precomp.h"
+
+#include "Bra.h"
+
+#define Test86MSByte(b) ((((b) + 1) & 0xFE) == 0)
+
+SizeT x86_Convert(Byte *data, SizeT size, UInt32 ip, UInt32 *state, int encoding)
+{
+ SizeT pos = 0;
+ UInt32 mask = *state & 7;
+ if (size < 5)
+ return 0;
+ size -= 4;
+ ip += 5;
+
+ for (;;)
+ {
+ Byte *p = data + pos;
+ const Byte *limit = data + size;
+ for (; p < limit; p++)
+ if ((*p & 0xFE) == 0xE8)
+ break;
+
+ {
+ SizeT d = (SizeT)(p - data - pos);
+ pos = (SizeT)(p - data);
+ if (p >= limit)
+ {
+ *state = (d > 2 ? 0 : mask >> (unsigned)d);
+ return pos;
+ }
+ if (d > 2)
+ mask = 0;
+ else
+ {
+ mask >>= (unsigned)d;
+ if (mask != 0 && (mask > 4 || mask == 3 || Test86MSByte(p[(size_t)(mask >> 1) + 1])))
+ {
+ mask = (mask >> 1) | 4;
+ pos++;
+ continue;
+ }
+ }
+ }
+
+ if (Test86MSByte(p[4]))
+ {
+ UInt32 v = ((UInt32)p[4] << 24) | ((UInt32)p[3] << 16) | ((UInt32)p[2] << 8) | ((UInt32)p[1]);
+ UInt32 cur = ip + (UInt32)pos;
+ pos += 5;
+ if (encoding)
+ v += cur;
+ else
+ v -= cur;
+ if (mask != 0)
+ {
+ unsigned sh = (mask & 6) << 2;
+ if (Test86MSByte((Byte)(v >> sh)))
+ {
+ v ^= (((UInt32)0x100 << sh) - 1);
+ if (encoding)
+ v += cur;
+ else
+ v -= cur;
+ }
+ mask = 0;
+ }
+ p[1] = (Byte)v;
+ p[2] = (Byte)(v >> 8);
+ p[3] = (Byte)(v >> 16);
+ p[4] = (Byte)(0 - ((v >> 24) & 1));
+ }
+ else
+ {
+ mask = (mask >> 1) | 4;
+ pos++;
+ }
+ }
+}
diff --git a/contrib/libs/lzmasdk/BraIA64.c b/contrib/libs/lzmasdk/BraIA64.c new file mode 100644 index 0000000000..2656907a0b --- /dev/null +++ b/contrib/libs/lzmasdk/BraIA64.c @@ -0,0 +1,53 @@ +/* BraIA64.c -- Converter for IA-64 code
+2017-01-26 : Igor Pavlov : Public domain */
+
+#include "Precomp.h"
+
+#include "CpuArch.h"
+#include "Bra.h"
+
+SizeT IA64_Convert(Byte *data, SizeT size, UInt32 ip, int encoding)
+{
+ SizeT i;
+ if (size < 16)
+ return 0;
+ size -= 16;
+ i = 0;
+ do
+ {
+ unsigned m = ((UInt32)0x334B0000 >> (data[i] & 0x1E)) & 3;
+ if (m)
+ {
+ m++;
+ do
+ {
+ Byte *p = data + (i + (size_t)m * 5 - 8);
+ if (((p[3] >> m) & 15) == 5
+ && (((p[-1] | ((UInt32)p[0] << 8)) >> m) & 0x70) == 0)
+ {
+ unsigned raw = GetUi32(p);
+ unsigned v = raw >> m;
+ v = (v & 0xFFFFF) | ((v & (1 << 23)) >> 3);
+
+ v <<= 4;
+ if (encoding)
+ v += ip + (UInt32)i;
+ else
+ v -= ip + (UInt32)i;
+ v >>= 4;
+
+ v &= 0x1FFFFF;
+ v += 0x700000;
+ v &= 0x8FFFFF;
+ raw &= ~((UInt32)0x8FFFFF << m);
+ raw |= (v << m);
+ SetUi32(p, raw);
+ }
+ }
+ while (++m <= 4);
+ }
+ i += 16;
+ }
+ while (i <= size);
+ return i;
+}
diff --git a/contrib/libs/lzmasdk/Compiler.h b/contrib/libs/lzmasdk/Compiler.h new file mode 100644 index 0000000000..c788648cd2 --- /dev/null +++ b/contrib/libs/lzmasdk/Compiler.h @@ -0,0 +1,33 @@ +/* Compiler.h
+2017-04-03 : Igor Pavlov : Public domain */
+
+#ifndef __7Z_COMPILER_H
+#define __7Z_COMPILER_H
+
+#ifdef _MSC_VER
+
+ #ifdef UNDER_CE
+ #define RPC_NO_WINDOWS_H
+ /* #pragma warning(disable : 4115) // '_RPC_ASYNC_STATE' : named type definition in parentheses */
+ #pragma warning(disable : 4201) // nonstandard extension used : nameless struct/union
+ #pragma warning(disable : 4214) // nonstandard extension used : bit field types other than int
+ #endif
+
+ #if _MSC_VER >= 1300
+ #pragma warning(disable : 4996) // This function or variable may be unsafe
+ #else
+ #pragma warning(disable : 4511) // copy constructor could not be generated
+ #pragma warning(disable : 4512) // assignment operator could not be generated
+ #pragma warning(disable : 4514) // unreferenced inline function has been removed
+ #pragma warning(disable : 4702) // unreachable code
+ #pragma warning(disable : 4710) // not inlined
+ #pragma warning(disable : 4714) // function marked as __forceinline not inlined
+ #pragma warning(disable : 4786) // identifier was truncated to '255' characters in the debug information
+ #endif
+
+#endif
+
+#define UNUSED_VAR(x) (void)x;
+/* #define UNUSED_VAR(x) x=x; */
+
+#endif
diff --git a/contrib/libs/lzmasdk/CpuArch.c b/contrib/libs/lzmasdk/CpuArch.c new file mode 100644 index 0000000000..ff1890e7fe --- /dev/null +++ b/contrib/libs/lzmasdk/CpuArch.c @@ -0,0 +1,218 @@ +/* CpuArch.c -- CPU specific code
+2018-02-18: Igor Pavlov : Public domain */
+
+#include "Precomp.h"
+
+#include "CpuArch.h"
+
+#ifdef MY_CPU_X86_OR_AMD64
+
+#if (defined(_MSC_VER) && !defined(MY_CPU_AMD64)) || defined(__GNUC__)
+#define USE_ASM
+#endif
+
+#if !defined(USE_ASM) && _MSC_VER >= 1500
+#include <intrin.h>
+#endif
+
+#if defined(USE_ASM) && !defined(MY_CPU_AMD64)
+static UInt32 CheckFlag(UInt32 flag)
+{
+ #ifdef _MSC_VER
+ __asm pushfd;
+ __asm pop EAX;
+ __asm mov EDX, EAX;
+ __asm xor EAX, flag;
+ __asm push EAX;
+ __asm popfd;
+ __asm pushfd;
+ __asm pop EAX;
+ __asm xor EAX, EDX;
+ __asm push EDX;
+ __asm popfd;
+ __asm and flag, EAX;
+ #else
+ __asm__ __volatile__ (
+ "pushf\n\t"
+ "pop %%EAX\n\t"
+ "movl %%EAX,%%EDX\n\t"
+ "xorl %0,%%EAX\n\t"
+ "push %%EAX\n\t"
+ "popf\n\t"
+ "pushf\n\t"
+ "pop %%EAX\n\t"
+ "xorl %%EDX,%%EAX\n\t"
+ "push %%EDX\n\t"
+ "popf\n\t"
+ "andl %%EAX, %0\n\t":
+ "=c" (flag) : "c" (flag) :
+ "%eax", "%edx");
+ #endif
+ return flag;
+}
+#define CHECK_CPUID_IS_SUPPORTED if (CheckFlag(1 << 18) == 0 || CheckFlag(1 << 21) == 0) return False;
+#else
+#define CHECK_CPUID_IS_SUPPORTED
+#endif
+
+void MyCPUID(UInt32 function, UInt32 *a, UInt32 *b, UInt32 *c, UInt32 *d)
+{
+ #ifdef USE_ASM
+
+ #ifdef _MSC_VER
+
+ UInt32 a2, b2, c2, d2;
+ __asm xor EBX, EBX;
+ __asm xor ECX, ECX;
+ __asm xor EDX, EDX;
+ __asm mov EAX, function;
+ __asm cpuid;
+ __asm mov a2, EAX;
+ __asm mov b2, EBX;
+ __asm mov c2, ECX;
+ __asm mov d2, EDX;
+
+ *a = a2;
+ *b = b2;
+ *c = c2;
+ *d = d2;
+
+ #else
+
+ __asm__ __volatile__ (
+ #if defined(MY_CPU_AMD64) && defined(__PIC__)
+ "mov %%rbx, %%rdi;"
+ "cpuid;"
+ "xchg %%rbx, %%rdi;"
+ : "=a" (*a) ,
+ "=D" (*b) ,
+ #elif defined(MY_CPU_X86) && defined(__PIC__)
+ "mov %%ebx, %%edi;"
+ "cpuid;"
+ "xchgl %%ebx, %%edi;"
+ : "=a" (*a) ,
+ "=D" (*b) ,
+ #else
+ "cpuid"
+ : "=a" (*a) ,
+ "=b" (*b) ,
+ #endif
+ "=c" (*c) ,
+ "=d" (*d)
+ : "0" (function)) ;
+
+ #endif
+
+ #else
+
+ int CPUInfo[4];
+ __cpuid(CPUInfo, function);
+ *a = CPUInfo[0];
+ *b = CPUInfo[1];
+ *c = CPUInfo[2];
+ *d = CPUInfo[3];
+
+ #endif
+}
+
+BoolInt x86cpuid_CheckAndRead(Cx86cpuid *p)
+{
+ CHECK_CPUID_IS_SUPPORTED
+ MyCPUID(0, &p->maxFunc, &p->vendor[0], &p->vendor[2], &p->vendor[1]);
+ MyCPUID(1, &p->ver, &p->b, &p->c, &p->d);
+ return True;
+}
+
+static const UInt32 kVendors[][3] =
+{
+ { 0x756E6547, 0x49656E69, 0x6C65746E},
+ { 0x68747541, 0x69746E65, 0x444D4163},
+ { 0x746E6543, 0x48727561, 0x736C7561}
+};
+
+int x86cpuid_GetFirm(const Cx86cpuid *p)
+{
+ unsigned i;
+ for (i = 0; i < sizeof(kVendors) / sizeof(kVendors[i]); i++)
+ {
+ const UInt32 *v = kVendors[i];
+ if (v[0] == p->vendor[0] &&
+ v[1] == p->vendor[1] &&
+ v[2] == p->vendor[2])
+ return (int)i;
+ }
+ return -1;
+}
+
+BoolInt CPU_Is_InOrder()
+{
+ Cx86cpuid p;
+ int firm;
+ UInt32 family, model;
+ if (!x86cpuid_CheckAndRead(&p))
+ return True;
+
+ family = x86cpuid_GetFamily(p.ver);
+ model = x86cpuid_GetModel(p.ver);
+
+ firm = x86cpuid_GetFirm(&p);
+
+ switch (firm)
+ {
+ case CPU_FIRM_INTEL: return (family < 6 || (family == 6 && (
+ /* In-Order Atom CPU */
+ model == 0x1C /* 45 nm, N4xx, D4xx, N5xx, D5xx, 230, 330 */
+ || model == 0x26 /* 45 nm, Z6xx */
+ || model == 0x27 /* 32 nm, Z2460 */
+ || model == 0x35 /* 32 nm, Z2760 */
+ || model == 0x36 /* 32 nm, N2xxx, D2xxx */
+ )));
+ case CPU_FIRM_AMD: return (family < 5 || (family == 5 && (model < 6 || model == 0xA)));
+ case CPU_FIRM_VIA: return (family < 6 || (family == 6 && model < 0xF));
+ }
+ return True;
+}
+
+#if !defined(MY_CPU_AMD64) && defined(_WIN32)
+#include <windows.h>
+static BoolInt CPU_Sys_Is_SSE_Supported()
+{
+ OSVERSIONINFO vi;
+ vi.dwOSVersionInfoSize = sizeof(vi);
+ if (!GetVersionEx(&vi))
+ return False;
+ return (vi.dwMajorVersion >= 5);
+}
+#define CHECK_SYS_SSE_SUPPORT if (!CPU_Sys_Is_SSE_Supported()) return False;
+#else
+#define CHECK_SYS_SSE_SUPPORT
+#endif
+
+BoolInt CPU_Is_Aes_Supported()
+{
+ Cx86cpuid p;
+ CHECK_SYS_SSE_SUPPORT
+ if (!x86cpuid_CheckAndRead(&p))
+ return False;
+ return (p.c >> 25) & 1;
+}
+
+BoolInt CPU_IsSupported_PageGB()
+{
+ Cx86cpuid cpuid;
+ if (!x86cpuid_CheckAndRead(&cpuid))
+ return False;
+ {
+ UInt32 d[4] = { 0 };
+ MyCPUID(0x80000000, &d[0], &d[1], &d[2], &d[3]);
+ if (d[0] < 0x80000001)
+ return False;
+ }
+ {
+ UInt32 d[4] = { 0 };
+ MyCPUID(0x80000001, &d[0], &d[1], &d[2], &d[3]);
+ return (d[3] >> 26) & 1;
+ }
+}
+
+#endif
diff --git a/contrib/libs/lzmasdk/CpuArch.h b/contrib/libs/lzmasdk/CpuArch.h new file mode 100644 index 0000000000..5f74c1c0cb --- /dev/null +++ b/contrib/libs/lzmasdk/CpuArch.h @@ -0,0 +1,336 @@ +/* CpuArch.h -- CPU specific code
+2018-02-18 : Igor Pavlov : Public domain */
+
+#ifndef __CPU_ARCH_H
+#define __CPU_ARCH_H
+
+#include "7zTypes.h"
+
+EXTERN_C_BEGIN
+
+/*
+MY_CPU_LE means that CPU is LITTLE ENDIAN.
+MY_CPU_BE means that CPU is BIG ENDIAN.
+If MY_CPU_LE and MY_CPU_BE are not defined, we don't know about ENDIANNESS of platform.
+
+MY_CPU_LE_UNALIGN means that CPU is LITTLE ENDIAN and CPU supports unaligned memory accesses.
+*/
+
+#if defined(_M_X64) \
+ || defined(_M_AMD64) \
+ || defined(__x86_64__) \
+ || defined(__AMD64__) \
+ || defined(__amd64__)
+ #define MY_CPU_AMD64
+ #ifdef __ILP32__
+ #define MY_CPU_NAME "x32"
+ #else
+ #define MY_CPU_NAME "x64"
+ #endif
+ #define MY_CPU_64BIT
+#endif
+
+
+#if defined(_M_IX86) \
+ || defined(__i386__)
+ #define MY_CPU_X86
+ #define MY_CPU_NAME "x86"
+ #define MY_CPU_32BIT
+#endif
+
+
+#if defined(_M_ARM64) \
+ || defined(__AARCH64EL__) \
+ || defined(__AARCH64EB__) \
+ || defined(__aarch64__)
+ #define MY_CPU_ARM64
+ #define MY_CPU_NAME "arm64"
+ #define MY_CPU_64BIT
+#endif
+
+
+#if defined(_M_ARM) \
+ || defined(_M_ARM_NT) \
+ || defined(_M_ARMT) \
+ || defined(__arm__) \
+ || defined(__thumb__) \
+ || defined(__ARMEL__) \
+ || defined(__ARMEB__) \
+ || defined(__THUMBEL__) \
+ || defined(__THUMBEB__)
+ #define MY_CPU_ARM
+ #define MY_CPU_NAME "arm"
+ #define MY_CPU_32BIT
+#endif
+
+
+#if defined(_M_IA64) \
+ || defined(__ia64__)
+ #define MY_CPU_IA64
+ #define MY_CPU_NAME "ia64"
+ #define MY_CPU_64BIT
+#endif
+
+
+#if defined(__mips64) \
+ || defined(__mips64__) \
+ || (defined(__mips) && (__mips == 64 || __mips == 4 || __mips == 3))
+ #define MY_CPU_NAME "mips64"
+ #define MY_CPU_64BIT
+#elif defined(__mips__)
+ #define MY_CPU_NAME "mips"
+ /* #define MY_CPU_32BIT */
+#endif
+
+
+#if defined(__ppc64__) \
+ || defined(__powerpc64__)
+ #ifdef __ILP32__
+ #define MY_CPU_NAME "ppc64-32"
+ #else
+ #define MY_CPU_NAME "ppc64"
+ #endif
+ #define MY_CPU_64BIT
+#elif defined(__ppc__) \
+ || defined(__powerpc__)
+ #define MY_CPU_NAME "ppc"
+ #define MY_CPU_32BIT
+#endif
+
+
+#if defined(__sparc64__)
+ #define MY_CPU_NAME "sparc64"
+ #define MY_CPU_64BIT
+#elif defined(__sparc__)
+ #define MY_CPU_NAME "sparc"
+ /* #define MY_CPU_32BIT */
+#endif
+
+
+#if defined(MY_CPU_X86) || defined(MY_CPU_AMD64)
+#define MY_CPU_X86_OR_AMD64
+#endif
+
+
+#ifdef _WIN32
+
+ #ifdef MY_CPU_ARM
+ #define MY_CPU_ARM_LE
+ #endif
+
+ #ifdef MY_CPU_ARM64
+ #define MY_CPU_ARM64_LE
+ #endif
+
+ #ifdef _M_IA64
+ #define MY_CPU_IA64_LE
+ #endif
+
+#endif
+
+
+#if defined(MY_CPU_X86_OR_AMD64) \
+ || defined(MY_CPU_ARM_LE) \
+ || defined(MY_CPU_ARM64_LE) \
+ || defined(MY_CPU_IA64_LE) \
+ || defined(__LITTLE_ENDIAN__) \
+ || defined(__ARMEL__) \
+ || defined(__THUMBEL__) \
+ || defined(__AARCH64EL__) \
+ || defined(__MIPSEL__) \
+ || defined(__MIPSEL) \
+ || defined(_MIPSEL) \
+ || defined(__BFIN__) \
+ || (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
+ #define MY_CPU_LE
+#endif
+
+#if defined(__BIG_ENDIAN__) \
+ || defined(__ARMEB__) \
+ || defined(__THUMBEB__) \
+ || defined(__AARCH64EB__) \
+ || defined(__MIPSEB__) \
+ || defined(__MIPSEB) \
+ || defined(_MIPSEB) \
+ || defined(__m68k__) \
+ || defined(__s390__) \
+ || defined(__s390x__) \
+ || defined(__zarch__) \
+ || (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
+ #define MY_CPU_BE
+#endif
+
+
+#if defined(MY_CPU_LE) && defined(MY_CPU_BE)
+ #error Stop_Compiling_Bad_Endian
+#endif
+
+
+#if defined(MY_CPU_32BIT) && defined(MY_CPU_64BIT)
+ #error Stop_Compiling_Bad_32_64_BIT
+#endif
+
+
+#ifndef MY_CPU_NAME
+ #ifdef MY_CPU_LE
+ #define MY_CPU_NAME "LE"
+ #elif defined(MY_CPU_BE)
+ #define MY_CPU_NAME "BE"
+ #else
+ /*
+ #define MY_CPU_NAME ""
+ */
+ #endif
+#endif
+
+
+
+
+
+#ifdef MY_CPU_LE
+ #if defined(MY_CPU_X86_OR_AMD64) \
+ || defined(MY_CPU_ARM64) \
+ || defined(__ARM_FEATURE_UNALIGNED)
+ #define MY_CPU_LE_UNALIGN
+ #endif
+#endif
+
+
+#ifdef MY_CPU_LE_UNALIGN
+
+#define GetUi16(p) (*(const UInt16 *)(const void *)(p))
+#define GetUi32(p) (*(const UInt32 *)(const void *)(p))
+#define GetUi64(p) (*(const UInt64 *)(const void *)(p))
+
+#define SetUi16(p, v) { *(UInt16 *)(p) = (v); }
+#define SetUi32(p, v) { *(UInt32 *)(p) = (v); }
+#define SetUi64(p, v) { *(UInt64 *)(p) = (v); }
+
+#else
+
+#define GetUi16(p) ( (UInt16) ( \
+ ((const Byte *)(p))[0] | \
+ ((UInt16)((const Byte *)(p))[1] << 8) ))
+
+#define GetUi32(p) ( \
+ ((const Byte *)(p))[0] | \
+ ((UInt32)((const Byte *)(p))[1] << 8) | \
+ ((UInt32)((const Byte *)(p))[2] << 16) | \
+ ((UInt32)((const Byte *)(p))[3] << 24))
+
+#define GetUi64(p) (GetUi32(p) | ((UInt64)GetUi32(((const Byte *)(p)) + 4) << 32))
+
+#define SetUi16(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \
+ _ppp_[0] = (Byte)_vvv_; \
+ _ppp_[1] = (Byte)(_vvv_ >> 8); }
+
+#define SetUi32(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \
+ _ppp_[0] = (Byte)_vvv_; \
+ _ppp_[1] = (Byte)(_vvv_ >> 8); \
+ _ppp_[2] = (Byte)(_vvv_ >> 16); \
+ _ppp_[3] = (Byte)(_vvv_ >> 24); }
+
+#define SetUi64(p, v) { Byte *_ppp2_ = (Byte *)(p); UInt64 _vvv2_ = (v); \
+ SetUi32(_ppp2_ , (UInt32)_vvv2_); \
+ SetUi32(_ppp2_ + 4, (UInt32)(_vvv2_ >> 32)); }
+
+#endif
+
+#ifdef __has_builtin
+ #define MY__has_builtin(x) __has_builtin(x)
+#else
+ #define MY__has_builtin(x) 0
+#endif
+
+#if defined(MY_CPU_LE_UNALIGN) && /* defined(_WIN64) && */ (_MSC_VER >= 1300)
+
+/* Note: we use bswap instruction, that is unsupported in 386 cpu */
+
+#include <stdlib.h>
+
+#pragma intrinsic(_byteswap_ushort)
+#pragma intrinsic(_byteswap_ulong)
+#pragma intrinsic(_byteswap_uint64)
+
+/* #define GetBe16(p) _byteswap_ushort(*(const UInt16 *)(const Byte *)(p)) */
+#define GetBe32(p) _byteswap_ulong(*(const UInt32 *)(const Byte *)(p))
+#define GetBe64(p) _byteswap_uint64(*(const UInt64 *)(const Byte *)(p))
+
+#define SetBe32(p, v) (*(UInt32 *)(void *)(p)) = _byteswap_ulong(v)
+
+#elif defined(MY_CPU_LE_UNALIGN) && ( \
+ (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))) \
+ || (defined(__clang__) && MY__has_builtin(__builtin_bswap16)) )
+
+/* #define GetBe16(p) __builtin_bswap16(*(const UInt16 *)(const Byte *)(p)) */
+#define GetBe32(p) __builtin_bswap32(*(const UInt32 *)(const Byte *)(p))
+#define GetBe64(p) __builtin_bswap64(*(const UInt64 *)(const Byte *)(p))
+
+#define SetBe32(p, v) (*(UInt32 *)(void *)(p)) = __builtin_bswap32(v)
+
+#else
+
+#define GetBe32(p) ( \
+ ((UInt32)((const Byte *)(p))[0] << 24) | \
+ ((UInt32)((const Byte *)(p))[1] << 16) | \
+ ((UInt32)((const Byte *)(p))[2] << 8) | \
+ ((const Byte *)(p))[3] )
+
+#define GetBe64(p) (((UInt64)GetBe32(p) << 32) | GetBe32(((const Byte *)(p)) + 4))
+
+#define SetBe32(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \
+ _ppp_[0] = (Byte)(_vvv_ >> 24); \
+ _ppp_[1] = (Byte)(_vvv_ >> 16); \
+ _ppp_[2] = (Byte)(_vvv_ >> 8); \
+ _ppp_[3] = (Byte)_vvv_; }
+
+#endif
+
+
+#ifndef GetBe16
+
+#define GetBe16(p) ( (UInt16) ( \
+ ((UInt16)((const Byte *)(p))[0] << 8) | \
+ ((const Byte *)(p))[1] ))
+
+#endif
+
+
+
+#ifdef MY_CPU_X86_OR_AMD64
+
+typedef struct
+{
+ UInt32 maxFunc;
+ UInt32 vendor[3];
+ UInt32 ver;
+ UInt32 b;
+ UInt32 c;
+ UInt32 d;
+} Cx86cpuid;
+
+enum
+{
+ CPU_FIRM_INTEL,
+ CPU_FIRM_AMD,
+ CPU_FIRM_VIA
+};
+
+void MyCPUID(UInt32 function, UInt32 *a, UInt32 *b, UInt32 *c, UInt32 *d);
+
+BoolInt x86cpuid_CheckAndRead(Cx86cpuid *p);
+int x86cpuid_GetFirm(const Cx86cpuid *p);
+
+#define x86cpuid_GetFamily(ver) (((ver >> 16) & 0xFF0) | ((ver >> 8) & 0xF))
+#define x86cpuid_GetModel(ver) (((ver >> 12) & 0xF0) | ((ver >> 4) & 0xF))
+#define x86cpuid_GetStepping(ver) (ver & 0xF)
+
+BoolInt CPU_Is_InOrder();
+BoolInt CPU_Is_Aes_Supported();
+BoolInt CPU_IsSupported_PageGB();
+
+#endif
+
+EXTERN_C_END
+
+#endif
diff --git a/contrib/libs/lzmasdk/LzFind.c b/contrib/libs/lzmasdk/LzFind.c new file mode 100644 index 0000000000..4eefc17dd2 --- /dev/null +++ b/contrib/libs/lzmasdk/LzFind.c @@ -0,0 +1,1127 @@ +/* LzFind.c -- Match finder for LZ algorithms
+2018-07-08 : Igor Pavlov : Public domain */
+
+#include "Precomp.h"
+
+#include <string.h>
+
+#include "LzFind.h"
+#include "LzHash.h"
+
+#define kEmptyHashValue 0
+#define kMaxValForNormalize ((UInt32)0xFFFFFFFF)
+#define kNormalizeStepMin (1 << 10) /* it must be power of 2 */
+#define kNormalizeMask (~(UInt32)(kNormalizeStepMin - 1))
+#define kMaxHistorySize ((UInt32)7 << 29)
+
+#define kStartMaxLen 3
+
+static void LzInWindow_Free(CMatchFinder *p, ISzAllocPtr alloc)
+{
+ if (!p->directInput)
+ {
+ ISzAlloc_Free(alloc, p->bufferBase);
+ p->bufferBase = NULL;
+ }
+}
+
+/* keepSizeBefore + keepSizeAfter + keepSizeReserv must be < 4G) */
+
+static int LzInWindow_Create(CMatchFinder *p, UInt32 keepSizeReserv, ISzAllocPtr alloc)
+{
+ UInt32 blockSize = p->keepSizeBefore + p->keepSizeAfter + keepSizeReserv;
+ if (p->directInput)
+ {
+ p->blockSize = blockSize;
+ return 1;
+ }
+ if (!p->bufferBase || p->blockSize != blockSize)
+ {
+ LzInWindow_Free(p, alloc);
+ p->blockSize = blockSize;
+ p->bufferBase = (Byte *)ISzAlloc_Alloc(alloc, (size_t)blockSize);
+ }
+ return (p->bufferBase != NULL);
+}
+
+Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; }
+
+UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return p->streamPos - p->pos; }
+
+void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue)
+{
+ p->posLimit -= subValue;
+ p->pos -= subValue;
+ p->streamPos -= subValue;
+}
+
+static void MatchFinder_ReadBlock(CMatchFinder *p)
+{
+ if (p->streamEndWasReached || p->result != SZ_OK)
+ return;
+
+ /* We use (p->streamPos - p->pos) value. (p->streamPos < p->pos) is allowed. */
+
+ if (p->directInput)
+ {
+ UInt32 curSize = 0xFFFFFFFF - (p->streamPos - p->pos);
+ if (curSize > p->directInputRem)
+ curSize = (UInt32)p->directInputRem;
+ p->directInputRem -= curSize;
+ p->streamPos += curSize;
+ if (p->directInputRem == 0)
+ p->streamEndWasReached = 1;
+ return;
+ }
+
+ for (;;)
+ {
+ Byte *dest = p->buffer + (p->streamPos - p->pos);
+ size_t size = (p->bufferBase + p->blockSize - dest);
+ if (size == 0)
+ return;
+
+ p->result = ISeqInStream_Read(p->stream, dest, &size);
+ if (p->result != SZ_OK)
+ return;
+ if (size == 0)
+ {
+ p->streamEndWasReached = 1;
+ return;
+ }
+ p->streamPos += (UInt32)size;
+ if (p->streamPos - p->pos > p->keepSizeAfter)
+ return;
+ }
+}
+
+void MatchFinder_MoveBlock(CMatchFinder *p)
+{
+ memmove(p->bufferBase,
+ p->buffer - p->keepSizeBefore,
+ (size_t)(p->streamPos - p->pos) + p->keepSizeBefore);
+ p->buffer = p->bufferBase + p->keepSizeBefore;
+}
+
+int MatchFinder_NeedMove(CMatchFinder *p)
+{
+ if (p->directInput)
+ return 0;
+ /* if (p->streamEndWasReached) return 0; */
+ return ((size_t)(p->bufferBase + p->blockSize - p->buffer) <= p->keepSizeAfter);
+}
+
+void MatchFinder_ReadIfRequired(CMatchFinder *p)
+{
+ if (p->streamEndWasReached)
+ return;
+ if (p->keepSizeAfter >= p->streamPos - p->pos)
+ MatchFinder_ReadBlock(p);
+}
+
+static void MatchFinder_CheckAndMoveAndRead(CMatchFinder *p)
+{
+ if (MatchFinder_NeedMove(p))
+ MatchFinder_MoveBlock(p);
+ MatchFinder_ReadBlock(p);
+}
+
+static void MatchFinder_SetDefaultSettings(CMatchFinder *p)
+{
+ p->cutValue = 32;
+ p->btMode = 1;
+ p->numHashBytes = 4;
+ p->bigHash = 0;
+}
+
+#define kCrcPoly 0xEDB88320
+
+void MatchFinder_Construct(CMatchFinder *p)
+{
+ unsigned i;
+ p->bufferBase = NULL;
+ p->directInput = 0;
+ p->hash = NULL;
+ p->expectedDataSize = (UInt64)(Int64)-1;
+ MatchFinder_SetDefaultSettings(p);
+
+ for (i = 0; i < 256; i++)
+ {
+ UInt32 r = (UInt32)i;
+ unsigned j;
+ for (j = 0; j < 8; j++)
+ r = (r >> 1) ^ (kCrcPoly & ((UInt32)0 - (r & 1)));
+ p->crc[i] = r;
+ }
+}
+
+static void MatchFinder_FreeThisClassMemory(CMatchFinder *p, ISzAllocPtr alloc)
+{
+ ISzAlloc_Free(alloc, p->hash);
+ p->hash = NULL;
+}
+
+void MatchFinder_Free(CMatchFinder *p, ISzAllocPtr alloc)
+{
+ MatchFinder_FreeThisClassMemory(p, alloc);
+ LzInWindow_Free(p, alloc);
+}
+
+static CLzRef* AllocRefs(size_t num, ISzAllocPtr alloc)
+{
+ size_t sizeInBytes = (size_t)num * sizeof(CLzRef);
+ if (sizeInBytes / sizeof(CLzRef) != num)
+ return NULL;
+ return (CLzRef *)ISzAlloc_Alloc(alloc, sizeInBytes);
+}
+
+int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
+ UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
+ ISzAllocPtr alloc)
+{
+ UInt32 sizeReserv;
+
+ if (historySize > kMaxHistorySize)
+ {
+ MatchFinder_Free(p, alloc);
+ return 0;
+ }
+
+ sizeReserv = historySize >> 1;
+ if (historySize >= ((UInt32)3 << 30)) sizeReserv = historySize >> 3;
+ else if (historySize >= ((UInt32)2 << 30)) sizeReserv = historySize >> 2;
+
+ sizeReserv += (keepAddBufferBefore + matchMaxLen + keepAddBufferAfter) / 2 + (1 << 19);
+
+ p->keepSizeBefore = historySize + keepAddBufferBefore + 1;
+ p->keepSizeAfter = matchMaxLen + keepAddBufferAfter;
+
+ /* we need one additional byte, since we use MoveBlock after pos++ and before dictionary using */
+
+ if (LzInWindow_Create(p, sizeReserv, alloc))
+ {
+ UInt32 newCyclicBufferSize = historySize + 1;
+ UInt32 hs;
+ p->matchMaxLen = matchMaxLen;
+ {
+ p->fixedHashSize = 0;
+ if (p->numHashBytes == 2)
+ hs = (1 << 16) - 1;
+ else
+ {
+ hs = historySize;
+ if (hs > p->expectedDataSize)
+ hs = (UInt32)p->expectedDataSize;
+ if (hs != 0)
+ hs--;
+ hs |= (hs >> 1);
+ hs |= (hs >> 2);
+ hs |= (hs >> 4);
+ hs |= (hs >> 8);
+ hs >>= 1;
+ hs |= 0xFFFF; /* don't change it! It's required for Deflate */
+ if (hs > (1 << 24))
+ {
+ if (p->numHashBytes == 3)
+ hs = (1 << 24) - 1;
+ else
+ hs >>= 1;
+ /* if (bigHash) mode, GetHeads4b() in LzFindMt.c needs (hs >= ((1 << 24) - 1))) */
+ }
+ }
+ p->hashMask = hs;
+ hs++;
+ if (p->numHashBytes > 2) p->fixedHashSize += kHash2Size;
+ if (p->numHashBytes > 3) p->fixedHashSize += kHash3Size;
+ if (p->numHashBytes > 4) p->fixedHashSize += kHash4Size;
+ hs += p->fixedHashSize;
+ }
+
+ {
+ size_t newSize;
+ size_t numSons;
+ p->historySize = historySize;
+ p->hashSizeSum = hs;
+ p->cyclicBufferSize = newCyclicBufferSize;
+
+ numSons = newCyclicBufferSize;
+ if (p->btMode)
+ numSons <<= 1;
+ newSize = hs + numSons;
+
+ if (p->hash && p->numRefs == newSize)
+ return 1;
+
+ MatchFinder_FreeThisClassMemory(p, alloc);
+ p->numRefs = newSize;
+ p->hash = AllocRefs(newSize, alloc);
+
+ if (p->hash)
+ {
+ p->son = p->hash + p->hashSizeSum;
+ return 1;
+ }
+ }
+ }
+
+ MatchFinder_Free(p, alloc);
+ return 0;
+}
+
+static void MatchFinder_SetLimits(CMatchFinder *p)
+{
+ UInt32 limit = kMaxValForNormalize - p->pos;
+ UInt32 limit2 = p->cyclicBufferSize - p->cyclicBufferPos;
+
+ if (limit2 < limit)
+ limit = limit2;
+ limit2 = p->streamPos - p->pos;
+
+ if (limit2 <= p->keepSizeAfter)
+ {
+ if (limit2 > 0)
+ limit2 = 1;
+ }
+ else
+ limit2 -= p->keepSizeAfter;
+
+ if (limit2 < limit)
+ limit = limit2;
+
+ {
+ UInt32 lenLimit = p->streamPos - p->pos;
+ if (lenLimit > p->matchMaxLen)
+ lenLimit = p->matchMaxLen;
+ p->lenLimit = lenLimit;
+ }
+ p->posLimit = p->pos + limit;
+}
+
+
+void MatchFinder_Init_LowHash(CMatchFinder *p)
+{
+ size_t i;
+ CLzRef *items = p->hash;
+ size_t numItems = p->fixedHashSize;
+ for (i = 0; i < numItems; i++)
+ items[i] = kEmptyHashValue;
+}
+
+
+void MatchFinder_Init_HighHash(CMatchFinder *p)
+{
+ size_t i;
+ CLzRef *items = p->hash + p->fixedHashSize;
+ size_t numItems = (size_t)p->hashMask + 1;
+ for (i = 0; i < numItems; i++)
+ items[i] = kEmptyHashValue;
+}
+
+
+void MatchFinder_Init_3(CMatchFinder *p, int readData)
+{
+ p->cyclicBufferPos = 0;
+ p->buffer = p->bufferBase;
+ p->pos =
+ p->streamPos = p->cyclicBufferSize;
+ p->result = SZ_OK;
+ p->streamEndWasReached = 0;
+
+ if (readData)
+ MatchFinder_ReadBlock(p);
+
+ MatchFinder_SetLimits(p);
+}
+
+
+void MatchFinder_Init(CMatchFinder *p)
+{
+ MatchFinder_Init_HighHash(p);
+ MatchFinder_Init_LowHash(p);
+ MatchFinder_Init_3(p, True);
+}
+
+
+static UInt32 MatchFinder_GetSubValue(CMatchFinder *p)
+{
+ return (p->pos - p->historySize - 1) & kNormalizeMask;
+}
+
+void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, size_t numItems)
+{
+ size_t i;
+ for (i = 0; i < numItems; i++)
+ {
+ UInt32 value = items[i];
+ if (value <= subValue)
+ value = kEmptyHashValue;
+ else
+ value -= subValue;
+ items[i] = value;
+ }
+}
+
+static void MatchFinder_Normalize(CMatchFinder *p)
+{
+ UInt32 subValue = MatchFinder_GetSubValue(p);
+ MatchFinder_Normalize3(subValue, p->hash, p->numRefs);
+ MatchFinder_ReduceOffsets(p, subValue);
+}
+
+
+MY_NO_INLINE
+static void MatchFinder_CheckLimits(CMatchFinder *p)
+{
+ if (p->pos == kMaxValForNormalize)
+ MatchFinder_Normalize(p);
+ if (!p->streamEndWasReached && p->keepSizeAfter == p->streamPos - p->pos)
+ MatchFinder_CheckAndMoveAndRead(p);
+ if (p->cyclicBufferPos == p->cyclicBufferSize)
+ p->cyclicBufferPos = 0;
+ MatchFinder_SetLimits(p);
+}
+
+
+/*
+ (lenLimit > maxLen)
+*/
+MY_FORCE_INLINE
+static UInt32 * Hc_GetMatchesSpec(unsigned lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
+ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
+ UInt32 *distances, unsigned maxLen)
+{
+ /*
+ son[_cyclicBufferPos] = curMatch;
+ for (;;)
+ {
+ UInt32 delta = pos - curMatch;
+ if (cutValue-- == 0 || delta >= _cyclicBufferSize)
+ return distances;
+ {
+ const Byte *pb = cur - delta;
+ curMatch = son[_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)];
+ if (pb[maxLen] == cur[maxLen] && *pb == *cur)
+ {
+ UInt32 len = 0;
+ while (++len != lenLimit)
+ if (pb[len] != cur[len])
+ break;
+ if (maxLen < len)
+ {
+ maxLen = len;
+ *distances++ = len;
+ *distances++ = delta - 1;
+ if (len == lenLimit)
+ return distances;
+ }
+ }
+ }
+ }
+ */
+
+ const Byte *lim = cur + lenLimit;
+ son[_cyclicBufferPos] = curMatch;
+ do
+ {
+ UInt32 delta = pos - curMatch;
+ if (delta >= _cyclicBufferSize)
+ break;
+ {
+ ptrdiff_t diff;
+ curMatch = son[_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)];
+ diff = (ptrdiff_t)0 - delta;
+ if (cur[maxLen] == cur[maxLen + diff])
+ {
+ const Byte *c = cur;
+ while (*c == c[diff])
+ {
+ if (++c == lim)
+ {
+ distances[0] = (UInt32)(lim - cur);
+ distances[1] = delta - 1;
+ return distances + 2;
+ }
+ }
+ {
+ unsigned len = (unsigned)(c - cur);
+ if (maxLen < len)
+ {
+ maxLen = len;
+ distances[0] = (UInt32)len;
+ distances[1] = delta - 1;
+ distances += 2;
+ }
+ }
+ }
+ }
+ }
+ while (--cutValue);
+
+ return distances;
+}
+
+
+MY_FORCE_INLINE
+UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
+ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
+ UInt32 *distances, UInt32 maxLen)
+{
+ CLzRef *ptr0 = son + ((size_t)_cyclicBufferPos << 1) + 1;
+ CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1);
+ unsigned len0 = 0, len1 = 0;
+ for (;;)
+ {
+ UInt32 delta = pos - curMatch;
+ if (cutValue-- == 0 || delta >= _cyclicBufferSize)
+ {
+ *ptr0 = *ptr1 = kEmptyHashValue;
+ return distances;
+ }
+ {
+ CLzRef *pair = son + ((size_t)(_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
+ const Byte *pb = cur - delta;
+ unsigned len = (len0 < len1 ? len0 : len1);
+ UInt32 pair0 = pair[0];
+ if (pb[len] == cur[len])
+ {
+ if (++len != lenLimit && pb[len] == cur[len])
+ while (++len != lenLimit)
+ if (pb[len] != cur[len])
+ break;
+ if (maxLen < len)
+ {
+ maxLen = (UInt32)len;
+ *distances++ = (UInt32)len;
+ *distances++ = delta - 1;
+ if (len == lenLimit)
+ {
+ *ptr1 = pair0;
+ *ptr0 = pair[1];
+ return distances;
+ }
+ }
+ }
+ if (pb[len] < cur[len])
+ {
+ *ptr1 = curMatch;
+ ptr1 = pair + 1;
+ curMatch = *ptr1;
+ len1 = len;
+ }
+ else
+ {
+ *ptr0 = curMatch;
+ ptr0 = pair;
+ curMatch = *ptr0;
+ len0 = len;
+ }
+ }
+ }
+}
+
+static void SkipMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
+ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue)
+{
+ CLzRef *ptr0 = son + ((size_t)_cyclicBufferPos << 1) + 1;
+ CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1);
+ unsigned len0 = 0, len1 = 0;
+ for (;;)
+ {
+ UInt32 delta = pos - curMatch;
+ if (cutValue-- == 0 || delta >= _cyclicBufferSize)
+ {
+ *ptr0 = *ptr1 = kEmptyHashValue;
+ return;
+ }
+ {
+ CLzRef *pair = son + ((size_t)(_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
+ const Byte *pb = cur - delta;
+ unsigned len = (len0 < len1 ? len0 : len1);
+ if (pb[len] == cur[len])
+ {
+ while (++len != lenLimit)
+ if (pb[len] != cur[len])
+ break;
+ {
+ if (len == lenLimit)
+ {
+ *ptr1 = pair[0];
+ *ptr0 = pair[1];
+ return;
+ }
+ }
+ }
+ if (pb[len] < cur[len])
+ {
+ *ptr1 = curMatch;
+ ptr1 = pair + 1;
+ curMatch = *ptr1;
+ len1 = len;
+ }
+ else
+ {
+ *ptr0 = curMatch;
+ ptr0 = pair;
+ curMatch = *ptr0;
+ len0 = len;
+ }
+ }
+ }
+}
+
+#define MOVE_POS \
+ ++p->cyclicBufferPos; \
+ p->buffer++; \
+ if (++p->pos == p->posLimit) MatchFinder_CheckLimits(p);
+
+#define MOVE_POS_RET MOVE_POS return (UInt32)offset;
+
+static void MatchFinder_MovePos(CMatchFinder *p) { MOVE_POS; }
+
+#define GET_MATCHES_HEADER2(minLen, ret_op) \
+ unsigned lenLimit; UInt32 hv; const Byte *cur; UInt32 curMatch; \
+ lenLimit = (unsigned)p->lenLimit; { if (lenLimit < minLen) { MatchFinder_MovePos(p); ret_op; }} \
+ cur = p->buffer;
+
+#define GET_MATCHES_HEADER(minLen) GET_MATCHES_HEADER2(minLen, return 0)
+#define SKIP_HEADER(minLen) GET_MATCHES_HEADER2(minLen, continue)
+
+#define MF_PARAMS(p) p->pos, p->buffer, p->son, p->cyclicBufferPos, p->cyclicBufferSize, p->cutValue
+
+#define GET_MATCHES_FOOTER(offset, maxLen) \
+ offset = (unsigned)(GetMatchesSpec1((UInt32)lenLimit, curMatch, MF_PARAMS(p), \
+ distances + offset, (UInt32)maxLen) - distances); MOVE_POS_RET;
+
+#define SKIP_FOOTER \
+ SkipMatchesSpec((UInt32)lenLimit, curMatch, MF_PARAMS(p)); MOVE_POS;
+
+#define UPDATE_maxLen { \
+ ptrdiff_t diff = (ptrdiff_t)0 - d2; \
+ const Byte *c = cur + maxLen; \
+ const Byte *lim = cur + lenLimit; \
+ for (; c != lim; c++) if (*(c + diff) != *c) break; \
+ maxLen = (unsigned)(c - cur); }
+
+static UInt32 Bt2_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+{
+ unsigned offset;
+ GET_MATCHES_HEADER(2)
+ HASH2_CALC;
+ curMatch = p->hash[hv];
+ p->hash[hv] = p->pos;
+ offset = 0;
+ GET_MATCHES_FOOTER(offset, 1)
+}
+
+UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+{
+ unsigned offset;
+ GET_MATCHES_HEADER(3)
+ HASH_ZIP_CALC;
+ curMatch = p->hash[hv];
+ p->hash[hv] = p->pos;
+ offset = 0;
+ GET_MATCHES_FOOTER(offset, 2)
+}
+
+static UInt32 Bt3_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+{
+ UInt32 h2, d2, pos;
+ unsigned maxLen, offset;
+ UInt32 *hash;
+ GET_MATCHES_HEADER(3)
+
+ HASH3_CALC;
+
+ hash = p->hash;
+ pos = p->pos;
+
+ d2 = pos - hash[h2];
+
+ curMatch = (hash + kFix3HashSize)[hv];
+
+ hash[h2] = pos;
+ (hash + kFix3HashSize)[hv] = pos;
+
+ maxLen = 2;
+ offset = 0;
+
+ if (d2 < p->cyclicBufferSize && *(cur - d2) == *cur)
+ {
+ UPDATE_maxLen
+ distances[0] = (UInt32)maxLen;
+ distances[1] = d2 - 1;
+ offset = 2;
+ if (maxLen == lenLimit)
+ {
+ SkipMatchesSpec((UInt32)lenLimit, curMatch, MF_PARAMS(p));
+ MOVE_POS_RET;
+ }
+ }
+
+ GET_MATCHES_FOOTER(offset, maxLen)
+}
+
+static UInt32 Bt4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+{
+ UInt32 h2, h3, d2, d3, pos;
+ unsigned maxLen, offset;
+ UInt32 *hash;
+ GET_MATCHES_HEADER(4)
+
+ HASH4_CALC;
+
+ hash = p->hash;
+ pos = p->pos;
+
+ d2 = pos - hash [h2];
+ d3 = pos - (hash + kFix3HashSize)[h3];
+
+ curMatch = (hash + kFix4HashSize)[hv];
+
+ hash [h2] = pos;
+ (hash + kFix3HashSize)[h3] = pos;
+ (hash + kFix4HashSize)[hv] = pos;
+
+ maxLen = 0;
+ offset = 0;
+
+ if (d2 < p->cyclicBufferSize && *(cur - d2) == *cur)
+ {
+ maxLen = 2;
+ distances[0] = 2;
+ distances[1] = d2 - 1;
+ offset = 2;
+ }
+
+ if (d2 != d3 && d3 < p->cyclicBufferSize && *(cur - d3) == *cur)
+ {
+ maxLen = 3;
+ distances[(size_t)offset + 1] = d3 - 1;
+ offset += 2;
+ d2 = d3;
+ }
+
+ if (offset != 0)
+ {
+ UPDATE_maxLen
+ distances[(size_t)offset - 2] = (UInt32)maxLen;
+ if (maxLen == lenLimit)
+ {
+ SkipMatchesSpec((UInt32)lenLimit, curMatch, MF_PARAMS(p));
+ MOVE_POS_RET;
+ }
+ }
+
+ if (maxLen < 3)
+ maxLen = 3;
+
+ GET_MATCHES_FOOTER(offset, maxLen)
+}
+
+/*
+static UInt32 Bt5_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+{
+ UInt32 h2, h3, h4, d2, d3, d4, maxLen, offset, pos;
+ UInt32 *hash;
+ GET_MATCHES_HEADER(5)
+
+ HASH5_CALC;
+
+ hash = p->hash;
+ pos = p->pos;
+
+ d2 = pos - hash [h2];
+ d3 = pos - (hash + kFix3HashSize)[h3];
+ d4 = pos - (hash + kFix4HashSize)[h4];
+
+ curMatch = (hash + kFix5HashSize)[hv];
+
+ hash [h2] = pos;
+ (hash + kFix3HashSize)[h3] = pos;
+ (hash + kFix4HashSize)[h4] = pos;
+ (hash + kFix5HashSize)[hv] = pos;
+
+ maxLen = 0;
+ offset = 0;
+
+ if (d2 < p->cyclicBufferSize && *(cur - d2) == *cur)
+ {
+ distances[0] = maxLen = 2;
+ distances[1] = d2 - 1;
+ offset = 2;
+ if (*(cur - d2 + 2) == cur[2])
+ distances[0] = maxLen = 3;
+ else if (d3 < p->cyclicBufferSize && *(cur - d3) == *cur)
+ {
+ distances[2] = maxLen = 3;
+ distances[3] = d3 - 1;
+ offset = 4;
+ d2 = d3;
+ }
+ }
+ else if (d3 < p->cyclicBufferSize && *(cur - d3) == *cur)
+ {
+ distances[0] = maxLen = 3;
+ distances[1] = d3 - 1;
+ offset = 2;
+ d2 = d3;
+ }
+
+ if (d2 != d4 && d4 < p->cyclicBufferSize
+ && *(cur - d4) == *cur
+ && *(cur - d4 + 3) == *(cur + 3))
+ {
+ maxLen = 4;
+ distances[(size_t)offset + 1] = d4 - 1;
+ offset += 2;
+ d2 = d4;
+ }
+
+ if (offset != 0)
+ {
+ UPDATE_maxLen
+ distances[(size_t)offset - 2] = maxLen;
+ if (maxLen == lenLimit)
+ {
+ SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p));
+ MOVE_POS_RET;
+ }
+ }
+
+ if (maxLen < 4)
+ maxLen = 4;
+
+ GET_MATCHES_FOOTER(offset, maxLen)
+}
+*/
+
+static UInt32 Hc4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+{
+ UInt32 h2, h3, d2, d3, pos;
+ unsigned maxLen, offset;
+ UInt32 *hash;
+ GET_MATCHES_HEADER(4)
+
+ HASH4_CALC;
+
+ hash = p->hash;
+ pos = p->pos;
+
+ d2 = pos - hash [h2];
+ d3 = pos - (hash + kFix3HashSize)[h3];
+ curMatch = (hash + kFix4HashSize)[hv];
+
+ hash [h2] = pos;
+ (hash + kFix3HashSize)[h3] = pos;
+ (hash + kFix4HashSize)[hv] = pos;
+
+ maxLen = 0;
+ offset = 0;
+
+ if (d2 < p->cyclicBufferSize && *(cur - d2) == *cur)
+ {
+ maxLen = 2;
+ distances[0] = 2;
+ distances[1] = d2 - 1;
+ offset = 2;
+ }
+
+ if (d2 != d3 && d3 < p->cyclicBufferSize && *(cur - d3) == *cur)
+ {
+ maxLen = 3;
+ distances[(size_t)offset + 1] = d3 - 1;
+ offset += 2;
+ d2 = d3;
+ }
+
+ if (offset != 0)
+ {
+ UPDATE_maxLen
+ distances[(size_t)offset - 2] = (UInt32)maxLen;
+ if (maxLen == lenLimit)
+ {
+ p->son[p->cyclicBufferPos] = curMatch;
+ MOVE_POS_RET;
+ }
+ }
+
+ if (maxLen < 3)
+ maxLen = 3;
+
+ offset = (unsigned)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p),
+ distances + offset, maxLen) - (distances));
+ MOVE_POS_RET
+}
+
+/*
+static UInt32 Hc5_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+{
+ UInt32 h2, h3, h4, d2, d3, d4, maxLen, offset, pos
+ UInt32 *hash;
+ GET_MATCHES_HEADER(5)
+
+ HASH5_CALC;
+
+ hash = p->hash;
+ pos = p->pos;
+
+ d2 = pos - hash [h2];
+ d3 = pos - (hash + kFix3HashSize)[h3];
+ d4 = pos - (hash + kFix4HashSize)[h4];
+
+ curMatch = (hash + kFix5HashSize)[hv];
+
+ hash [h2] = pos;
+ (hash + kFix3HashSize)[h3] = pos;
+ (hash + kFix4HashSize)[h4] = pos;
+ (hash + kFix5HashSize)[hv] = pos;
+
+ maxLen = 0;
+ offset = 0;
+
+ if (d2 < p->cyclicBufferSize && *(cur - d2) == *cur)
+ {
+ distances[0] = maxLen = 2;
+ distances[1] = d2 - 1;
+ offset = 2;
+ if (*(cur - d2 + 2) == cur[2])
+ distances[0] = maxLen = 3;
+ else if (d3 < p->cyclicBufferSize && *(cur - d3) == *cur)
+ {
+ distances[2] = maxLen = 3;
+ distances[3] = d3 - 1;
+ offset = 4;
+ d2 = d3;
+ }
+ }
+ else if (d3 < p->cyclicBufferSize && *(cur - d3) == *cur)
+ {
+ distances[0] = maxLen = 3;
+ distances[1] = d3 - 1;
+ offset = 2;
+ d2 = d3;
+ }
+
+ if (d2 != d4 && d4 < p->cyclicBufferSize
+ && *(cur - d4) == *cur
+ && *(cur - d4 + 3) == *(cur + 3))
+ {
+ maxLen = 4;
+ distances[(size_t)offset + 1] = d4 - 1;
+ offset += 2;
+ d2 = d4;
+ }
+
+ if (offset != 0)
+ {
+ UPDATE_maxLen
+ distances[(size_t)offset - 2] = maxLen;
+ if (maxLen == lenLimit)
+ {
+ p->son[p->cyclicBufferPos] = curMatch;
+ MOVE_POS_RET;
+ }
+ }
+
+ if (maxLen < 4)
+ maxLen = 4;
+
+ offset = (UInt32)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p),
+ distances + offset, maxLen) - (distances));
+ MOVE_POS_RET
+}
+*/
+
+UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
+{
+ unsigned offset;
+ GET_MATCHES_HEADER(3)
+ HASH_ZIP_CALC;
+ curMatch = p->hash[hv];
+ p->hash[hv] = p->pos;
+ offset = (unsigned)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p),
+ distances, 2) - (distances));
+ MOVE_POS_RET
+}
+
+static void Bt2_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+{
+ do
+ {
+ SKIP_HEADER(2)
+ HASH2_CALC;
+ curMatch = p->hash[hv];
+ p->hash[hv] = p->pos;
+ SKIP_FOOTER
+ }
+ while (--num != 0);
+}
+
+void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+{
+ do
+ {
+ SKIP_HEADER(3)
+ HASH_ZIP_CALC;
+ curMatch = p->hash[hv];
+ p->hash[hv] = p->pos;
+ SKIP_FOOTER
+ }
+ while (--num != 0);
+}
+
+static void Bt3_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+{
+ do
+ {
+ UInt32 h2;
+ UInt32 *hash;
+ SKIP_HEADER(3)
+ HASH3_CALC;
+ hash = p->hash;
+ curMatch = (hash + kFix3HashSize)[hv];
+ hash[h2] =
+ (hash + kFix3HashSize)[hv] = p->pos;
+ SKIP_FOOTER
+ }
+ while (--num != 0);
+}
+
+static void Bt4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+{
+ do
+ {
+ UInt32 h2, h3;
+ UInt32 *hash;
+ SKIP_HEADER(4)
+ HASH4_CALC;
+ hash = p->hash;
+ curMatch = (hash + kFix4HashSize)[hv];
+ hash [h2] =
+ (hash + kFix3HashSize)[h3] =
+ (hash + kFix4HashSize)[hv] = p->pos;
+ SKIP_FOOTER
+ }
+ while (--num != 0);
+}
+
+/*
+static void Bt5_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+{
+ do
+ {
+ UInt32 h2, h3, h4;
+ UInt32 *hash;
+ SKIP_HEADER(5)
+ HASH5_CALC;
+ hash = p->hash;
+ curMatch = (hash + kFix5HashSize)[hv];
+ hash [h2] =
+ (hash + kFix3HashSize)[h3] =
+ (hash + kFix4HashSize)[h4] =
+ (hash + kFix5HashSize)[hv] = p->pos;
+ SKIP_FOOTER
+ }
+ while (--num != 0);
+}
+*/
+
+static void Hc4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+{
+ do
+ {
+ UInt32 h2, h3;
+ UInt32 *hash;
+ SKIP_HEADER(4)
+ HASH4_CALC;
+ hash = p->hash;
+ curMatch = (hash + kFix4HashSize)[hv];
+ hash [h2] =
+ (hash + kFix3HashSize)[h3] =
+ (hash + kFix4HashSize)[hv] = p->pos;
+ p->son[p->cyclicBufferPos] = curMatch;
+ MOVE_POS
+ }
+ while (--num != 0);
+}
+
+/*
+static void Hc5_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+{
+ do
+ {
+ UInt32 h2, h3, h4;
+ UInt32 *hash;
+ SKIP_HEADER(5)
+ HASH5_CALC;
+ hash = p->hash;
+ curMatch = hash + kFix5HashSize)[hv];
+ hash [h2] =
+ (hash + kFix3HashSize)[h3] =
+ (hash + kFix4HashSize)[h4] =
+ (hash + kFix5HashSize)[hv] = p->pos;
+ p->son[p->cyclicBufferPos] = curMatch;
+ MOVE_POS
+ }
+ while (--num != 0);
+}
+*/
+
+void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
+{
+ do
+ {
+ SKIP_HEADER(3)
+ HASH_ZIP_CALC;
+ curMatch = p->hash[hv];
+ p->hash[hv] = p->pos;
+ p->son[p->cyclicBufferPos] = curMatch;
+ MOVE_POS
+ }
+ while (--num != 0);
+}
+
+void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable)
+{
+ vTable->Init = (Mf_Init_Func)MatchFinder_Init;
+ vTable->GetNumAvailableBytes = (Mf_GetNumAvailableBytes_Func)MatchFinder_GetNumAvailableBytes;
+ vTable->GetPointerToCurrentPos = (Mf_GetPointerToCurrentPos_Func)MatchFinder_GetPointerToCurrentPos;
+ if (!p->btMode)
+ {
+ /* if (p->numHashBytes <= 4) */
+ {
+ vTable->GetMatches = (Mf_GetMatches_Func)Hc4_MatchFinder_GetMatches;
+ vTable->Skip = (Mf_Skip_Func)Hc4_MatchFinder_Skip;
+ }
+ /*
+ else
+ {
+ vTable->GetMatches = (Mf_GetMatches_Func)Hc5_MatchFinder_GetMatches;
+ vTable->Skip = (Mf_Skip_Func)Hc5_MatchFinder_Skip;
+ }
+ */
+ }
+ else if (p->numHashBytes == 2)
+ {
+ vTable->GetMatches = (Mf_GetMatches_Func)Bt2_MatchFinder_GetMatches;
+ vTable->Skip = (Mf_Skip_Func)Bt2_MatchFinder_Skip;
+ }
+ else if (p->numHashBytes == 3)
+ {
+ vTable->GetMatches = (Mf_GetMatches_Func)Bt3_MatchFinder_GetMatches;
+ vTable->Skip = (Mf_Skip_Func)Bt3_MatchFinder_Skip;
+ }
+ else /* if (p->numHashBytes == 4) */
+ {
+ vTable->GetMatches = (Mf_GetMatches_Func)Bt4_MatchFinder_GetMatches;
+ vTable->Skip = (Mf_Skip_Func)Bt4_MatchFinder_Skip;
+ }
+ /*
+ else
+ {
+ vTable->GetMatches = (Mf_GetMatches_Func)Bt5_MatchFinder_GetMatches;
+ vTable->Skip = (Mf_Skip_Func)Bt5_MatchFinder_Skip;
+ }
+ */
+}
diff --git a/contrib/libs/lzmasdk/LzFind.h b/contrib/libs/lzmasdk/LzFind.h new file mode 100644 index 0000000000..c77added7b --- /dev/null +++ b/contrib/libs/lzmasdk/LzFind.h @@ -0,0 +1,121 @@ +/* LzFind.h -- Match finder for LZ algorithms
+2017-06-10 : Igor Pavlov : Public domain */
+
+#ifndef __LZ_FIND_H
+#define __LZ_FIND_H
+
+#include "7zTypes.h"
+
+EXTERN_C_BEGIN
+
+typedef UInt32 CLzRef;
+
+typedef struct _CMatchFinder
+{
+ Byte *buffer;
+ UInt32 pos;
+ UInt32 posLimit;
+ UInt32 streamPos;
+ UInt32 lenLimit;
+
+ UInt32 cyclicBufferPos;
+ UInt32 cyclicBufferSize; /* it must be = (historySize + 1) */
+
+ Byte streamEndWasReached;
+ Byte btMode;
+ Byte bigHash;
+ Byte directInput;
+
+ UInt32 matchMaxLen;
+ CLzRef *hash;
+ CLzRef *son;
+ UInt32 hashMask;
+ UInt32 cutValue;
+
+ Byte *bufferBase;
+ ISeqInStream *stream;
+
+ UInt32 blockSize;
+ UInt32 keepSizeBefore;
+ UInt32 keepSizeAfter;
+
+ UInt32 numHashBytes;
+ size_t directInputRem;
+ UInt32 historySize;
+ UInt32 fixedHashSize;
+ UInt32 hashSizeSum;
+ SRes result;
+ UInt32 crc[256];
+ size_t numRefs;
+
+ UInt64 expectedDataSize;
+} CMatchFinder;
+
+#define Inline_MatchFinder_GetPointerToCurrentPos(p) ((p)->buffer)
+
+#define Inline_MatchFinder_GetNumAvailableBytes(p) ((p)->streamPos - (p)->pos)
+
+#define Inline_MatchFinder_IsFinishedOK(p) \
+ ((p)->streamEndWasReached \
+ && (p)->streamPos == (p)->pos \
+ && (!(p)->directInput || (p)->directInputRem == 0))
+
+int MatchFinder_NeedMove(CMatchFinder *p);
+Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p);
+void MatchFinder_MoveBlock(CMatchFinder *p);
+void MatchFinder_ReadIfRequired(CMatchFinder *p);
+
+void MatchFinder_Construct(CMatchFinder *p);
+
+/* Conditions:
+ historySize <= 3 GB
+ keepAddBufferBefore + matchMaxLen + keepAddBufferAfter < 511MB
+*/
+int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
+ UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
+ ISzAllocPtr alloc);
+void MatchFinder_Free(CMatchFinder *p, ISzAllocPtr alloc);
+void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, size_t numItems);
+void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue);
+
+UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *buffer, CLzRef *son,
+ UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 _cutValue,
+ UInt32 *distances, UInt32 maxLen);
+
+/*
+Conditions:
+ Mf_GetNumAvailableBytes_Func must be called before each Mf_GetMatchLen_Func.
+ Mf_GetPointerToCurrentPos_Func's result must be used only before any other function
+*/
+
+typedef void (*Mf_Init_Func)(void *object);
+typedef UInt32 (*Mf_GetNumAvailableBytes_Func)(void *object);
+typedef const Byte * (*Mf_GetPointerToCurrentPos_Func)(void *object);
+typedef UInt32 (*Mf_GetMatches_Func)(void *object, UInt32 *distances);
+typedef void (*Mf_Skip_Func)(void *object, UInt32);
+
+typedef struct _IMatchFinder
+{
+ Mf_Init_Func Init;
+ Mf_GetNumAvailableBytes_Func GetNumAvailableBytes;
+ Mf_GetPointerToCurrentPos_Func GetPointerToCurrentPos;
+ Mf_GetMatches_Func GetMatches;
+ Mf_Skip_Func Skip;
+} IMatchFinder;
+
+void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable);
+
+void MatchFinder_Init_LowHash(CMatchFinder *p);
+void MatchFinder_Init_HighHash(CMatchFinder *p);
+void MatchFinder_Init_3(CMatchFinder *p, int readData);
+void MatchFinder_Init(CMatchFinder *p);
+
+UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
+UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);
+
+void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
+void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num);
+
+EXTERN_C_END
+
+#endif
diff --git a/contrib/libs/lzmasdk/LzHash.h b/contrib/libs/lzmasdk/LzHash.h new file mode 100644 index 0000000000..2191444072 --- /dev/null +++ b/contrib/libs/lzmasdk/LzHash.h @@ -0,0 +1,57 @@ +/* LzHash.h -- HASH functions for LZ algorithms
+2015-04-12 : Igor Pavlov : Public domain */
+
+#ifndef __LZ_HASH_H
+#define __LZ_HASH_H
+
+#define kHash2Size (1 << 10)
+#define kHash3Size (1 << 16)
+#define kHash4Size (1 << 20)
+
+#define kFix3HashSize (kHash2Size)
+#define kFix4HashSize (kHash2Size + kHash3Size)
+#define kFix5HashSize (kHash2Size + kHash3Size + kHash4Size)
+
+#define HASH2_CALC hv = cur[0] | ((UInt32)cur[1] << 8);
+
+#define HASH3_CALC { \
+ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
+ h2 = temp & (kHash2Size - 1); \
+ hv = (temp ^ ((UInt32)cur[2] << 8)) & p->hashMask; }
+
+#define HASH4_CALC { \
+ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
+ h2 = temp & (kHash2Size - 1); \
+ temp ^= ((UInt32)cur[2] << 8); \
+ h3 = temp & (kHash3Size - 1); \
+ hv = (temp ^ (p->crc[cur[3]] << 5)) & p->hashMask; }
+
+#define HASH5_CALC { \
+ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
+ h2 = temp & (kHash2Size - 1); \
+ temp ^= ((UInt32)cur[2] << 8); \
+ h3 = temp & (kHash3Size - 1); \
+ temp ^= (p->crc[cur[3]] << 5); \
+ h4 = temp & (kHash4Size - 1); \
+ hv = (temp ^ (p->crc[cur[4]] << 3)) & p->hashMask; }
+
+/* #define HASH_ZIP_CALC hv = ((cur[0] | ((UInt32)cur[1] << 8)) ^ p->crc[cur[2]]) & 0xFFFF; */
+#define HASH_ZIP_CALC hv = ((cur[2] | ((UInt32)cur[0] << 8)) ^ p->crc[cur[1]]) & 0xFFFF;
+
+
+#define MT_HASH2_CALC \
+ h2 = (p->crc[cur[0]] ^ cur[1]) & (kHash2Size - 1);
+
+#define MT_HASH3_CALC { \
+ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
+ h2 = temp & (kHash2Size - 1); \
+ h3 = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); }
+
+#define MT_HASH4_CALC { \
+ UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
+ h2 = temp & (kHash2Size - 1); \
+ temp ^= ((UInt32)cur[2] << 8); \
+ h3 = temp & (kHash3Size - 1); \
+ h4 = (temp ^ (p->crc[cur[3]] << 5)) & (kHash4Size - 1); }
+
+#endif
diff --git a/contrib/libs/lzmasdk/Lzma2Dec.c b/contrib/libs/lzmasdk/Lzma2Dec.c new file mode 100644 index 0000000000..2e631051ba --- /dev/null +++ b/contrib/libs/lzmasdk/Lzma2Dec.c @@ -0,0 +1,488 @@ +/* Lzma2Dec.c -- LZMA2 Decoder
+2019-02-02 : Igor Pavlov : Public domain */
+
+/* #define SHOW_DEBUG_INFO */
+
+#include "Precomp.h"
+
+#ifdef SHOW_DEBUG_INFO
+#include <stdio.h>
+#endif
+
+#include <string.h>
+
+#include "Lzma2Dec.h"
+
+/*
+00000000 - End of data
+00000001 U U - Uncompressed, reset dic, need reset state and set new prop
+00000010 U U - Uncompressed, no reset
+100uuuuu U U P P - LZMA, no reset
+101uuuuu U U P P - LZMA, reset state
+110uuuuu U U P P S - LZMA, reset state + set new prop
+111uuuuu U U P P S - LZMA, reset state + set new prop, reset dic
+
+ u, U - Unpack Size
+ P - Pack Size
+ S - Props
+*/
+
+#define LZMA2_CONTROL_COPY_RESET_DIC 1
+
+#define LZMA2_IS_UNCOMPRESSED_STATE(p) (((p)->control & (1 << 7)) == 0)
+
+#define LZMA2_LCLP_MAX 4
+#define LZMA2_DIC_SIZE_FROM_PROP(p) (((UInt32)2 | ((p) & 1)) << ((p) / 2 + 11))
+
+#ifdef SHOW_DEBUG_INFO
+#define PRF(x) x
+#else
+#define PRF(x)
+#endif
+
+typedef enum
+{
+ LZMA2_STATE_CONTROL,
+ LZMA2_STATE_UNPACK0,
+ LZMA2_STATE_UNPACK1,
+ LZMA2_STATE_PACK0,
+ LZMA2_STATE_PACK1,
+ LZMA2_STATE_PROP,
+ LZMA2_STATE_DATA,
+ LZMA2_STATE_DATA_CONT,
+ LZMA2_STATE_FINISHED,
+ LZMA2_STATE_ERROR
+} ELzma2State;
+
+static SRes Lzma2Dec_GetOldProps(Byte prop, Byte *props)
+{
+ UInt32 dicSize;
+ if (prop > 40)
+ return SZ_ERROR_UNSUPPORTED;
+ dicSize = (prop == 40) ? 0xFFFFFFFF : LZMA2_DIC_SIZE_FROM_PROP(prop);
+ props[0] = (Byte)LZMA2_LCLP_MAX;
+ props[1] = (Byte)(dicSize);
+ props[2] = (Byte)(dicSize >> 8);
+ props[3] = (Byte)(dicSize >> 16);
+ props[4] = (Byte)(dicSize >> 24);
+ return SZ_OK;
+}
+
+SRes Lzma2Dec_AllocateProbs(CLzma2Dec *p, Byte prop, ISzAllocPtr alloc)
+{
+ Byte props[LZMA_PROPS_SIZE];
+ RINOK(Lzma2Dec_GetOldProps(prop, props));
+ return LzmaDec_AllocateProbs(&p->decoder, props, LZMA_PROPS_SIZE, alloc);
+}
+
+SRes Lzma2Dec_Allocate(CLzma2Dec *p, Byte prop, ISzAllocPtr alloc)
+{
+ Byte props[LZMA_PROPS_SIZE];
+ RINOK(Lzma2Dec_GetOldProps(prop, props));
+ return LzmaDec_Allocate(&p->decoder, props, LZMA_PROPS_SIZE, alloc);
+}
+
+void Lzma2Dec_Init(CLzma2Dec *p)
+{
+ p->state = LZMA2_STATE_CONTROL;
+ p->needInitLevel = 0xE0;
+ p->isExtraMode = False;
+ p->unpackSize = 0;
+
+ // p->decoder.dicPos = 0; // we can use it instead of full init
+ LzmaDec_Init(&p->decoder);
+}
+
+static ELzma2State Lzma2Dec_UpdateState(CLzma2Dec *p, Byte b)
+{
+ switch (p->state)
+ {
+ case LZMA2_STATE_CONTROL:
+ p->isExtraMode = False;
+ p->control = b;
+ PRF(printf("\n %8X", (unsigned)p->decoder.dicPos));
+ PRF(printf(" %02X", (unsigned)b));
+ if (b == 0)
+ return LZMA2_STATE_FINISHED;
+ if (LZMA2_IS_UNCOMPRESSED_STATE(p))
+ {
+ if (b == LZMA2_CONTROL_COPY_RESET_DIC)
+ p->needInitLevel = 0xC0;
+ else if (b > 2 || p->needInitLevel == 0xE0)
+ return LZMA2_STATE_ERROR;
+ }
+ else
+ {
+ if (b < p->needInitLevel)
+ return LZMA2_STATE_ERROR;
+ p->needInitLevel = 0;
+ p->unpackSize = (UInt32)(b & 0x1F) << 16;
+ }
+ return LZMA2_STATE_UNPACK0;
+
+ case LZMA2_STATE_UNPACK0:
+ p->unpackSize |= (UInt32)b << 8;
+ return LZMA2_STATE_UNPACK1;
+
+ case LZMA2_STATE_UNPACK1:
+ p->unpackSize |= (UInt32)b;
+ p->unpackSize++;
+ PRF(printf(" %7u", (unsigned)p->unpackSize));
+ return LZMA2_IS_UNCOMPRESSED_STATE(p) ? LZMA2_STATE_DATA : LZMA2_STATE_PACK0;
+
+ case LZMA2_STATE_PACK0:
+ p->packSize = (UInt32)b << 8;
+ return LZMA2_STATE_PACK1;
+
+ case LZMA2_STATE_PACK1:
+ p->packSize |= (UInt32)b;
+ p->packSize++;
+ // if (p->packSize < 5) return LZMA2_STATE_ERROR;
+ PRF(printf(" %5u", (unsigned)p->packSize));
+ return (p->control & 0x40) ? LZMA2_STATE_PROP : LZMA2_STATE_DATA;
+
+ case LZMA2_STATE_PROP:
+ {
+ unsigned lc, lp;
+ if (b >= (9 * 5 * 5))
+ return LZMA2_STATE_ERROR;
+ lc = b % 9;
+ b /= 9;
+ p->decoder.prop.pb = (Byte)(b / 5);
+ lp = b % 5;
+ if (lc + lp > LZMA2_LCLP_MAX)
+ return LZMA2_STATE_ERROR;
+ p->decoder.prop.lc = (Byte)lc;
+ p->decoder.prop.lp = (Byte)lp;
+ return LZMA2_STATE_DATA;
+ }
+ }
+ return LZMA2_STATE_ERROR;
+}
+
+static void LzmaDec_UpdateWithUncompressed(CLzmaDec *p, const Byte *src, SizeT size)
+{
+ memcpy(p->dic + p->dicPos, src, size);
+ p->dicPos += size;
+ if (p->checkDicSize == 0 && p->prop.dicSize - p->processedPos <= size)
+ p->checkDicSize = p->prop.dicSize;
+ p->processedPos += (UInt32)size;
+}
+
+void LzmaDec_InitDicAndState(CLzmaDec *p, BoolInt initDic, BoolInt initState);
+
+
+SRes Lzma2Dec_DecodeToDic(CLzma2Dec *p, SizeT dicLimit,
+ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status)
+{
+ SizeT inSize = *srcLen;
+ *srcLen = 0;
+ *status = LZMA_STATUS_NOT_SPECIFIED;
+
+ while (p->state != LZMA2_STATE_ERROR)
+ {
+ SizeT dicPos;
+
+ if (p->state == LZMA2_STATE_FINISHED)
+ {
+ *status = LZMA_STATUS_FINISHED_WITH_MARK;
+ return SZ_OK;
+ }
+
+ dicPos = p->decoder.dicPos;
+
+ if (dicPos == dicLimit && finishMode == LZMA_FINISH_ANY)
+ {
+ *status = LZMA_STATUS_NOT_FINISHED;
+ return SZ_OK;
+ }
+
+ if (p->state != LZMA2_STATE_DATA && p->state != LZMA2_STATE_DATA_CONT)
+ {
+ if (*srcLen == inSize)
+ {
+ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
+ return SZ_OK;
+ }
+ (*srcLen)++;
+ p->state = Lzma2Dec_UpdateState(p, *src++);
+ if (dicPos == dicLimit && p->state != LZMA2_STATE_FINISHED)
+ break;
+ continue;
+ }
+
+ {
+ SizeT inCur = inSize - *srcLen;
+ SizeT outCur = dicLimit - dicPos;
+ ELzmaFinishMode curFinishMode = LZMA_FINISH_ANY;
+
+ if (outCur >= p->unpackSize)
+ {
+ outCur = (SizeT)p->unpackSize;
+ curFinishMode = LZMA_FINISH_END;
+ }
+
+ if (LZMA2_IS_UNCOMPRESSED_STATE(p))
+ {
+ if (inCur == 0)
+ {
+ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
+ return SZ_OK;
+ }
+
+ if (p->state == LZMA2_STATE_DATA)
+ {
+ BoolInt initDic = (p->control == LZMA2_CONTROL_COPY_RESET_DIC);
+ LzmaDec_InitDicAndState(&p->decoder, initDic, False);
+ }
+
+ if (inCur > outCur)
+ inCur = outCur;
+ if (inCur == 0)
+ break;
+
+ LzmaDec_UpdateWithUncompressed(&p->decoder, src, inCur);
+
+ src += inCur;
+ *srcLen += inCur;
+ p->unpackSize -= (UInt32)inCur;
+ p->state = (p->unpackSize == 0) ? LZMA2_STATE_CONTROL : LZMA2_STATE_DATA_CONT;
+ }
+ else
+ {
+ SRes res;
+
+ if (p->state == LZMA2_STATE_DATA)
+ {
+ BoolInt initDic = (p->control >= 0xE0);
+ BoolInt initState = (p->control >= 0xA0);
+ LzmaDec_InitDicAndState(&p->decoder, initDic, initState);
+ p->state = LZMA2_STATE_DATA_CONT;
+ }
+
+ if (inCur > p->packSize)
+ inCur = (SizeT)p->packSize;
+
+ res = LzmaDec_DecodeToDic(&p->decoder, dicPos + outCur, src, &inCur, curFinishMode, status);
+
+ src += inCur;
+ *srcLen += inCur;
+ p->packSize -= (UInt32)inCur;
+ outCur = p->decoder.dicPos - dicPos;
+ p->unpackSize -= (UInt32)outCur;
+
+ if (res != 0)
+ break;
+
+ if (*status == LZMA_STATUS_NEEDS_MORE_INPUT)
+ {
+ if (p->packSize == 0)
+ break;
+ return SZ_OK;
+ }
+
+ if (inCur == 0 && outCur == 0)
+ {
+ if (*status != LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
+ || p->unpackSize != 0
+ || p->packSize != 0)
+ break;
+ p->state = LZMA2_STATE_CONTROL;
+ }
+
+ *status = LZMA_STATUS_NOT_SPECIFIED;
+ }
+ }
+ }
+
+ *status = LZMA_STATUS_NOT_SPECIFIED;
+ p->state = LZMA2_STATE_ERROR;
+ return SZ_ERROR_DATA;
+}
+
+
+
+
+ELzma2ParseStatus Lzma2Dec_Parse(CLzma2Dec *p,
+ SizeT outSize,
+ const Byte *src, SizeT *srcLen,
+ int checkFinishBlock)
+{
+ SizeT inSize = *srcLen;
+ *srcLen = 0;
+
+ while (p->state != LZMA2_STATE_ERROR)
+ {
+ if (p->state == LZMA2_STATE_FINISHED)
+ return (ELzma2ParseStatus)LZMA_STATUS_FINISHED_WITH_MARK;
+
+ if (outSize == 0 && !checkFinishBlock)
+ return (ELzma2ParseStatus)LZMA_STATUS_NOT_FINISHED;
+
+ if (p->state != LZMA2_STATE_DATA && p->state != LZMA2_STATE_DATA_CONT)
+ {
+ if (*srcLen == inSize)
+ return (ELzma2ParseStatus)LZMA_STATUS_NEEDS_MORE_INPUT;
+ (*srcLen)++;
+
+ p->state = Lzma2Dec_UpdateState(p, *src++);
+
+ if (p->state == LZMA2_STATE_UNPACK0)
+ {
+ // if (p->decoder.dicPos != 0)
+ if (p->control == LZMA2_CONTROL_COPY_RESET_DIC || p->control >= 0xE0)
+ return LZMA2_PARSE_STATUS_NEW_BLOCK;
+ // if (outSize == 0) return LZMA_STATUS_NOT_FINISHED;
+ }
+
+ // The following code can be commented.
+ // It's not big problem, if we read additional input bytes.
+ // It will be stopped later in LZMA2_STATE_DATA / LZMA2_STATE_DATA_CONT state.
+
+ if (outSize == 0 && p->state != LZMA2_STATE_FINISHED)
+ {
+ // checkFinishBlock is true. So we expect that block must be finished,
+ // We can return LZMA_STATUS_NOT_SPECIFIED or LZMA_STATUS_NOT_FINISHED here
+ // break;
+ return (ELzma2ParseStatus)LZMA_STATUS_NOT_FINISHED;
+ }
+
+ if (p->state == LZMA2_STATE_DATA)
+ return LZMA2_PARSE_STATUS_NEW_CHUNK;
+
+ continue;
+ }
+
+ if (outSize == 0)
+ return (ELzma2ParseStatus)LZMA_STATUS_NOT_FINISHED;
+
+ {
+ SizeT inCur = inSize - *srcLen;
+
+ if (LZMA2_IS_UNCOMPRESSED_STATE(p))
+ {
+ if (inCur == 0)
+ return (ELzma2ParseStatus)LZMA_STATUS_NEEDS_MORE_INPUT;
+ if (inCur > p->unpackSize)
+ inCur = p->unpackSize;
+ if (inCur > outSize)
+ inCur = outSize;
+ p->decoder.dicPos += inCur;
+ src += inCur;
+ *srcLen += inCur;
+ outSize -= inCur;
+ p->unpackSize -= (UInt32)inCur;
+ p->state = (p->unpackSize == 0) ? LZMA2_STATE_CONTROL : LZMA2_STATE_DATA_CONT;
+ }
+ else
+ {
+ p->isExtraMode = True;
+
+ if (inCur == 0)
+ {
+ if (p->packSize != 0)
+ return (ELzma2ParseStatus)LZMA_STATUS_NEEDS_MORE_INPUT;
+ }
+ else if (p->state == LZMA2_STATE_DATA)
+ {
+ p->state = LZMA2_STATE_DATA_CONT;
+ if (*src != 0)
+ {
+ // first byte of lzma chunk must be Zero
+ *srcLen += 1;
+ p->packSize--;
+ break;
+ }
+ }
+
+ if (inCur > p->packSize)
+ inCur = (SizeT)p->packSize;
+
+ src += inCur;
+ *srcLen += inCur;
+ p->packSize -= (UInt32)inCur;
+
+ if (p->packSize == 0)
+ {
+ SizeT rem = outSize;
+ if (rem > p->unpackSize)
+ rem = p->unpackSize;
+ p->decoder.dicPos += rem;
+ p->unpackSize -= (UInt32)rem;
+ outSize -= rem;
+ if (p->unpackSize == 0)
+ p->state = LZMA2_STATE_CONTROL;
+ }
+ }
+ }
+ }
+
+ p->state = LZMA2_STATE_ERROR;
+ return (ELzma2ParseStatus)LZMA_STATUS_NOT_SPECIFIED;
+}
+
+
+
+
+SRes Lzma2Dec_DecodeToBuf(CLzma2Dec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status)
+{
+ SizeT outSize = *destLen, inSize = *srcLen;
+ *srcLen = *destLen = 0;
+
+ for (;;)
+ {
+ SizeT inCur = inSize, outCur, dicPos;
+ ELzmaFinishMode curFinishMode;
+ SRes res;
+
+ if (p->decoder.dicPos == p->decoder.dicBufSize)
+ p->decoder.dicPos = 0;
+ dicPos = p->decoder.dicPos;
+ curFinishMode = LZMA_FINISH_ANY;
+ outCur = p->decoder.dicBufSize - dicPos;
+
+ if (outCur >= outSize)
+ {
+ outCur = outSize;
+ curFinishMode = finishMode;
+ }
+
+ res = Lzma2Dec_DecodeToDic(p, dicPos + outCur, src, &inCur, curFinishMode, status);
+
+ src += inCur;
+ inSize -= inCur;
+ *srcLen += inCur;
+ outCur = p->decoder.dicPos - dicPos;
+ memcpy(dest, p->decoder.dic + dicPos, outCur);
+ dest += outCur;
+ outSize -= outCur;
+ *destLen += outCur;
+ if (res != 0)
+ return res;
+ if (outCur == 0 || outSize == 0)
+ return SZ_OK;
+ }
+}
+
+
+SRes Lzma2Decode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
+ Byte prop, ELzmaFinishMode finishMode, ELzmaStatus *status, ISzAllocPtr alloc)
+{
+ CLzma2Dec p;
+ SRes res;
+ SizeT outSize = *destLen, inSize = *srcLen;
+ *destLen = *srcLen = 0;
+ *status = LZMA_STATUS_NOT_SPECIFIED;
+ Lzma2Dec_Construct(&p);
+ RINOK(Lzma2Dec_AllocateProbs(&p, prop, alloc));
+ p.decoder.dic = dest;
+ p.decoder.dicBufSize = outSize;
+ Lzma2Dec_Init(&p);
+ *srcLen = inSize;
+ res = Lzma2Dec_DecodeToDic(&p, outSize, src, srcLen, finishMode, status);
+ *destLen = p.decoder.dicPos;
+ if (res == SZ_OK && *status == LZMA_STATUS_NEEDS_MORE_INPUT)
+ res = SZ_ERROR_INPUT_EOF;
+ Lzma2Dec_FreeProbs(&p, alloc);
+ return res;
+}
diff --git a/contrib/libs/lzmasdk/Lzma2Dec.h b/contrib/libs/lzmasdk/Lzma2Dec.h new file mode 100644 index 0000000000..da50387250 --- /dev/null +++ b/contrib/libs/lzmasdk/Lzma2Dec.h @@ -0,0 +1,120 @@ +/* Lzma2Dec.h -- LZMA2 Decoder
+2018-02-19 : Igor Pavlov : Public domain */
+
+#ifndef __LZMA2_DEC_H
+#define __LZMA2_DEC_H
+
+#include "LzmaDec.h"
+
+EXTERN_C_BEGIN
+
+/* ---------- State Interface ---------- */
+
+typedef struct
+{
+ unsigned state;
+ Byte control;
+ Byte needInitLevel;
+ Byte isExtraMode;
+ Byte _pad_;
+ UInt32 packSize;
+ UInt32 unpackSize;
+ CLzmaDec decoder;
+} CLzma2Dec;
+
+#define Lzma2Dec_Construct(p) LzmaDec_Construct(&(p)->decoder)
+#define Lzma2Dec_FreeProbs(p, alloc) LzmaDec_FreeProbs(&(p)->decoder, alloc)
+#define Lzma2Dec_Free(p, alloc) LzmaDec_Free(&(p)->decoder, alloc)
+
+SRes Lzma2Dec_AllocateProbs(CLzma2Dec *p, Byte prop, ISzAllocPtr alloc);
+SRes Lzma2Dec_Allocate(CLzma2Dec *p, Byte prop, ISzAllocPtr alloc);
+void Lzma2Dec_Init(CLzma2Dec *p);
+
+/*
+finishMode:
+ It has meaning only if the decoding reaches output limit (*destLen or dicLimit).
+ LZMA_FINISH_ANY - use smallest number of input bytes
+ LZMA_FINISH_END - read EndOfStream marker after decoding
+
+Returns:
+ SZ_OK
+ status:
+ LZMA_STATUS_FINISHED_WITH_MARK
+ LZMA_STATUS_NOT_FINISHED
+ LZMA_STATUS_NEEDS_MORE_INPUT
+ SZ_ERROR_DATA - Data error
+*/
+
+SRes Lzma2Dec_DecodeToDic(CLzma2Dec *p, SizeT dicLimit,
+ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
+
+SRes Lzma2Dec_DecodeToBuf(CLzma2Dec *p, Byte *dest, SizeT *destLen,
+ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
+
+
+/* ---------- LZMA2 block and chunk parsing ---------- */
+
+/*
+Lzma2Dec_Parse() parses compressed data stream up to next independent block or next chunk data.
+It can return LZMA_STATUS_* code or LZMA2_PARSE_STATUS_* code:
+ - LZMA2_PARSE_STATUS_NEW_BLOCK - there is new block, and 1 additional byte (control byte of next block header) was read from input.
+ - LZMA2_PARSE_STATUS_NEW_CHUNK - there is new chunk, and only lzma2 header of new chunk was read.
+ CLzma2Dec::unpackSize contains unpack size of that chunk
+*/
+
+typedef enum
+{
+/*
+ LZMA_STATUS_NOT_SPECIFIED // data error
+ LZMA_STATUS_FINISHED_WITH_MARK
+ LZMA_STATUS_NOT_FINISHED //
+ LZMA_STATUS_NEEDS_MORE_INPUT
+ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK // unused
+*/
+ LZMA2_PARSE_STATUS_NEW_BLOCK = LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK + 1,
+ LZMA2_PARSE_STATUS_NEW_CHUNK
+} ELzma2ParseStatus;
+
+ELzma2ParseStatus Lzma2Dec_Parse(CLzma2Dec *p,
+ SizeT outSize, // output size
+ const Byte *src, SizeT *srcLen,
+ int checkFinishBlock // set (checkFinishBlock = 1), if it must read full input data, if decoder.dicPos reaches blockMax position.
+ );
+
+/*
+LZMA2 parser doesn't decode LZMA chunks, so we must read
+ full input LZMA chunk to decode some part of LZMA chunk.
+
+Lzma2Dec_GetUnpackExtra() returns the value that shows
+ max possible number of output bytes that can be output by decoder
+ at current input positon.
+*/
+
+#define Lzma2Dec_GetUnpackExtra(p) ((p)->isExtraMode ? (p)->unpackSize : 0);
+
+
+/* ---------- One Call Interface ---------- */
+
+/*
+finishMode:
+ It has meaning only if the decoding reaches output limit (*destLen).
+ LZMA_FINISH_ANY - use smallest number of input bytes
+ LZMA_FINISH_END - read EndOfStream marker after decoding
+
+Returns:
+ SZ_OK
+ status:
+ LZMA_STATUS_FINISHED_WITH_MARK
+ LZMA_STATUS_NOT_FINISHED
+ SZ_ERROR_DATA - Data error
+ SZ_ERROR_MEM - Memory allocation error
+ SZ_ERROR_UNSUPPORTED - Unsupported properties
+ SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src).
+*/
+
+SRes Lzma2Decode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
+ Byte prop, ELzmaFinishMode finishMode, ELzmaStatus *status, ISzAllocPtr alloc);
+
+EXTERN_C_END
+
+#endif
diff --git a/contrib/libs/lzmasdk/Lzma2Enc.c b/contrib/libs/lzmasdk/Lzma2Enc.c new file mode 100644 index 0000000000..d541477527 --- /dev/null +++ b/contrib/libs/lzmasdk/Lzma2Enc.c @@ -0,0 +1,803 @@ +/* Lzma2Enc.c -- LZMA2 Encoder
+2018-07-04 : Igor Pavlov : Public domain */
+
+#include "Precomp.h"
+
+#include <string.h>
+
+/* #define _7ZIP_ST */
+
+#include "Lzma2Enc.h"
+
+#ifndef _7ZIP_ST
+#include "MtCoder.h"
+#else
+#define MTCODER__THREADS_MAX 1
+#endif
+
+#define LZMA2_CONTROL_LZMA (1 << 7)
+#define LZMA2_CONTROL_COPY_NO_RESET 2
+#define LZMA2_CONTROL_COPY_RESET_DIC 1
+#define LZMA2_CONTROL_EOF 0
+
+#define LZMA2_LCLP_MAX 4
+
+#define LZMA2_DIC_SIZE_FROM_PROP(p) (((UInt32)2 | ((p) & 1)) << ((p) / 2 + 11))
+
+#define LZMA2_PACK_SIZE_MAX (1 << 16)
+#define LZMA2_COPY_CHUNK_SIZE LZMA2_PACK_SIZE_MAX
+#define LZMA2_UNPACK_SIZE_MAX (1 << 21)
+#define LZMA2_KEEP_WINDOW_SIZE LZMA2_UNPACK_SIZE_MAX
+
+#define LZMA2_CHUNK_SIZE_COMPRESSED_MAX ((1 << 16) + 16)
+
+
+#define PRF(x) /* x */
+
+
+/* ---------- CLimitedSeqInStream ---------- */
+
+typedef struct
+{
+ ISeqInStream vt;
+ ISeqInStream *realStream;
+ UInt64 limit;
+ UInt64 processed;
+ int finished;
+} CLimitedSeqInStream;
+
+static void LimitedSeqInStream_Init(CLimitedSeqInStream *p)
+{
+ p->limit = (UInt64)(Int64)-1;
+ p->processed = 0;
+ p->finished = 0;
+}
+
+static SRes LimitedSeqInStream_Read(const ISeqInStream *pp, void *data, size_t *size)
+{
+ CLimitedSeqInStream *p = CONTAINER_FROM_VTBL(pp, CLimitedSeqInStream, vt);
+ size_t size2 = *size;
+ SRes res = SZ_OK;
+
+ if (p->limit != (UInt64)(Int64)-1)
+ {
+ UInt64 rem = p->limit - p->processed;
+ if (size2 > rem)
+ size2 = (size_t)rem;
+ }
+ if (size2 != 0)
+ {
+ res = ISeqInStream_Read(p->realStream, data, &size2);
+ p->finished = (size2 == 0 ? 1 : 0);
+ p->processed += size2;
+ }
+ *size = size2;
+ return res;
+}
+
+
+/* ---------- CLzma2EncInt ---------- */
+
+typedef struct
+{
+ CLzmaEncHandle enc;
+ Byte propsAreSet;
+ Byte propsByte;
+ Byte needInitState;
+ Byte needInitProp;
+ UInt64 srcPos;
+} CLzma2EncInt;
+
+
+static SRes Lzma2EncInt_InitStream(CLzma2EncInt *p, const CLzma2EncProps *props)
+{
+ if (!p->propsAreSet)
+ {
+ SizeT propsSize = LZMA_PROPS_SIZE;
+ Byte propsEncoded[LZMA_PROPS_SIZE];
+ RINOK(LzmaEnc_SetProps(p->enc, &props->lzmaProps));
+ RINOK(LzmaEnc_WriteProperties(p->enc, propsEncoded, &propsSize));
+ p->propsByte = propsEncoded[0];
+ p->propsAreSet = True;
+ }
+ return SZ_OK;
+}
+
+static void Lzma2EncInt_InitBlock(CLzma2EncInt *p)
+{
+ p->srcPos = 0;
+ p->needInitState = True;
+ p->needInitProp = True;
+}
+
+
+SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle pp, ISeqInStream *inStream, UInt32 keepWindowSize,
+ ISzAllocPtr alloc, ISzAllocPtr allocBig);
+SRes LzmaEnc_MemPrepare(CLzmaEncHandle pp, const Byte *src, SizeT srcLen,
+ UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig);
+SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, BoolInt reInit,
+ Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize);
+const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle pp);
+void LzmaEnc_Finish(CLzmaEncHandle pp);
+void LzmaEnc_SaveState(CLzmaEncHandle pp);
+void LzmaEnc_RestoreState(CLzmaEncHandle pp);
+
+/*
+UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp);
+*/
+
+static SRes Lzma2EncInt_EncodeSubblock(CLzma2EncInt *p, Byte *outBuf,
+ size_t *packSizeRes, ISeqOutStream *outStream)
+{
+ size_t packSizeLimit = *packSizeRes;
+ size_t packSize = packSizeLimit;
+ UInt32 unpackSize = LZMA2_UNPACK_SIZE_MAX;
+ unsigned lzHeaderSize = 5 + (p->needInitProp ? 1 : 0);
+ BoolInt useCopyBlock;
+ SRes res;
+
+ *packSizeRes = 0;
+ if (packSize < lzHeaderSize)
+ return SZ_ERROR_OUTPUT_EOF;
+ packSize -= lzHeaderSize;
+
+ LzmaEnc_SaveState(p->enc);
+ res = LzmaEnc_CodeOneMemBlock(p->enc, p->needInitState,
+ outBuf + lzHeaderSize, &packSize, LZMA2_PACK_SIZE_MAX, &unpackSize);
+
+ PRF(printf("\npackSize = %7d unpackSize = %7d ", packSize, unpackSize));
+
+ if (unpackSize == 0)
+ return res;
+
+ if (res == SZ_OK)
+ useCopyBlock = (packSize + 2 >= unpackSize || packSize > (1 << 16));
+ else
+ {
+ if (res != SZ_ERROR_OUTPUT_EOF)
+ return res;
+ res = SZ_OK;
+ useCopyBlock = True;
+ }
+
+ if (useCopyBlock)
+ {
+ size_t destPos = 0;
+ PRF(printf("################# COPY "));
+
+ while (unpackSize > 0)
+ {
+ UInt32 u = (unpackSize < LZMA2_COPY_CHUNK_SIZE) ? unpackSize : LZMA2_COPY_CHUNK_SIZE;
+ if (packSizeLimit - destPos < u + 3)
+ return SZ_ERROR_OUTPUT_EOF;
+ outBuf[destPos++] = (Byte)(p->srcPos == 0 ? LZMA2_CONTROL_COPY_RESET_DIC : LZMA2_CONTROL_COPY_NO_RESET);
+ outBuf[destPos++] = (Byte)((u - 1) >> 8);
+ outBuf[destPos++] = (Byte)(u - 1);
+ memcpy(outBuf + destPos, LzmaEnc_GetCurBuf(p->enc) - unpackSize, u);
+ unpackSize -= u;
+ destPos += u;
+ p->srcPos += u;
+
+ if (outStream)
+ {
+ *packSizeRes += destPos;
+ if (ISeqOutStream_Write(outStream, outBuf, destPos) != destPos)
+ return SZ_ERROR_WRITE;
+ destPos = 0;
+ }
+ else
+ *packSizeRes = destPos;
+ /* needInitState = True; */
+ }
+
+ LzmaEnc_RestoreState(p->enc);
+ return SZ_OK;
+ }
+
+ {
+ size_t destPos = 0;
+ UInt32 u = unpackSize - 1;
+ UInt32 pm = (UInt32)(packSize - 1);
+ unsigned mode = (p->srcPos == 0) ? 3 : (p->needInitState ? (p->needInitProp ? 2 : 1) : 0);
+
+ PRF(printf(" "));
+
+ outBuf[destPos++] = (Byte)(LZMA2_CONTROL_LZMA | (mode << 5) | ((u >> 16) & 0x1F));
+ outBuf[destPos++] = (Byte)(u >> 8);
+ outBuf[destPos++] = (Byte)u;
+ outBuf[destPos++] = (Byte)(pm >> 8);
+ outBuf[destPos++] = (Byte)pm;
+
+ if (p->needInitProp)
+ outBuf[destPos++] = p->propsByte;
+
+ p->needInitProp = False;
+ p->needInitState = False;
+ destPos += packSize;
+ p->srcPos += unpackSize;
+
+ if (outStream)
+ if (ISeqOutStream_Write(outStream, outBuf, destPos) != destPos)
+ return SZ_ERROR_WRITE;
+
+ *packSizeRes = destPos;
+ return SZ_OK;
+ }
+}
+
+
+/* ---------- Lzma2 Props ---------- */
+
+void Lzma2EncProps_Init(CLzma2EncProps *p)
+{
+ LzmaEncProps_Init(&p->lzmaProps);
+ p->blockSize = LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO;
+ p->numBlockThreads_Reduced = -1;
+ p->numBlockThreads_Max = -1;
+ p->numTotalThreads = -1;
+}
+
+void Lzma2EncProps_Normalize(CLzma2EncProps *p)
+{
+ UInt64 fileSize;
+ int t1, t1n, t2, t2r, t3;
+ {
+ CLzmaEncProps lzmaProps = p->lzmaProps;
+ LzmaEncProps_Normalize(&lzmaProps);
+ t1n = lzmaProps.numThreads;
+ }
+
+ t1 = p->lzmaProps.numThreads;
+ t2 = p->numBlockThreads_Max;
+ t3 = p->numTotalThreads;
+
+ if (t2 > MTCODER__THREADS_MAX)
+ t2 = MTCODER__THREADS_MAX;
+
+ if (t3 <= 0)
+ {
+ if (t2 <= 0)
+ t2 = 1;
+ t3 = t1n * t2;
+ }
+ else if (t2 <= 0)
+ {
+ t2 = t3 / t1n;
+ if (t2 == 0)
+ {
+ t1 = 1;
+ t2 = t3;
+ }
+ if (t2 > MTCODER__THREADS_MAX)
+ t2 = MTCODER__THREADS_MAX;
+ }
+ else if (t1 <= 0)
+ {
+ t1 = t3 / t2;
+ if (t1 == 0)
+ t1 = 1;
+ }
+ else
+ t3 = t1n * t2;
+
+ p->lzmaProps.numThreads = t1;
+
+ t2r = t2;
+
+ fileSize = p->lzmaProps.reduceSize;
+
+ if ( p->blockSize != LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID
+ && p->blockSize != LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO
+ && (p->blockSize < fileSize || fileSize == (UInt64)(Int64)-1))
+ p->lzmaProps.reduceSize = p->blockSize;
+
+ LzmaEncProps_Normalize(&p->lzmaProps);
+
+ p->lzmaProps.reduceSize = fileSize;
+
+ t1 = p->lzmaProps.numThreads;
+
+ if (p->blockSize == LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID)
+ {
+ t2r = t2 = 1;
+ t3 = t1;
+ }
+ else if (p->blockSize == LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO && t2 <= 1)
+ {
+ /* if there is no block multi-threading, we use SOLID block */
+ p->blockSize = LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID;
+ }
+ else
+ {
+ if (p->blockSize == LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO)
+ {
+ const UInt32 kMinSize = (UInt32)1 << 20;
+ const UInt32 kMaxSize = (UInt32)1 << 28;
+ const UInt32 dictSize = p->lzmaProps.dictSize;
+ UInt64 blockSize = (UInt64)dictSize << 2;
+ if (blockSize < kMinSize) blockSize = kMinSize;
+ if (blockSize > kMaxSize) blockSize = kMaxSize;
+ if (blockSize < dictSize) blockSize = dictSize;
+ blockSize += (kMinSize - 1);
+ blockSize &= ~(UInt64)(kMinSize - 1);
+ p->blockSize = blockSize;
+ }
+
+ if (t2 > 1 && fileSize != (UInt64)(Int64)-1)
+ {
+ UInt64 numBlocks = fileSize / p->blockSize;
+ if (numBlocks * p->blockSize != fileSize)
+ numBlocks++;
+ if (numBlocks < (unsigned)t2)
+ {
+ t2r = (unsigned)numBlocks;
+ if (t2r == 0)
+ t2r = 1;
+ t3 = t1 * t2r;
+ }
+ }
+ }
+
+ p->numBlockThreads_Max = t2;
+ p->numBlockThreads_Reduced = t2r;
+ p->numTotalThreads = t3;
+}
+
+
+static SRes Progress(ICompressProgress *p, UInt64 inSize, UInt64 outSize)
+{
+ return (p && ICompressProgress_Progress(p, inSize, outSize) != SZ_OK) ? SZ_ERROR_PROGRESS : SZ_OK;
+}
+
+
+/* ---------- Lzma2 ---------- */
+
+typedef struct
+{
+ Byte propEncoded;
+ CLzma2EncProps props;
+ UInt64 expectedDataSize;
+
+ Byte *tempBufLzma;
+
+ ISzAllocPtr alloc;
+ ISzAllocPtr allocBig;
+
+ CLzma2EncInt coders[MTCODER__THREADS_MAX];
+
+ #ifndef _7ZIP_ST
+
+ ISeqOutStream *outStream;
+ Byte *outBuf;
+ size_t outBuf_Rem; /* remainder in outBuf */
+
+ size_t outBufSize; /* size of allocated outBufs[i] */
+ size_t outBufsDataSizes[MTCODER__BLOCKS_MAX];
+ BoolInt mtCoder_WasConstructed;
+ CMtCoder mtCoder;
+ Byte *outBufs[MTCODER__BLOCKS_MAX];
+
+ #endif
+
+} CLzma2Enc;
+
+
+
+CLzma2EncHandle Lzma2Enc_Create(ISzAllocPtr alloc, ISzAllocPtr allocBig)
+{
+ CLzma2Enc *p = (CLzma2Enc *)ISzAlloc_Alloc(alloc, sizeof(CLzma2Enc));
+ if (!p)
+ return NULL;
+ Lzma2EncProps_Init(&p->props);
+ Lzma2EncProps_Normalize(&p->props);
+ p->expectedDataSize = (UInt64)(Int64)-1;
+ p->tempBufLzma = NULL;
+ p->alloc = alloc;
+ p->allocBig = allocBig;
+ {
+ unsigned i;
+ for (i = 0; i < MTCODER__THREADS_MAX; i++)
+ p->coders[i].enc = NULL;
+ }
+
+ #ifndef _7ZIP_ST
+ p->mtCoder_WasConstructed = False;
+ {
+ unsigned i;
+ for (i = 0; i < MTCODER__BLOCKS_MAX; i++)
+ p->outBufs[i] = NULL;
+ p->outBufSize = 0;
+ }
+ #endif
+
+ return p;
+}
+
+
+#ifndef _7ZIP_ST
+
+static void Lzma2Enc_FreeOutBufs(CLzma2Enc *p)
+{
+ unsigned i;
+ for (i = 0; i < MTCODER__BLOCKS_MAX; i++)
+ if (p->outBufs[i])
+ {
+ ISzAlloc_Free(p->alloc, p->outBufs[i]);
+ p->outBufs[i] = NULL;
+ }
+ p->outBufSize = 0;
+}
+
+#endif
+
+
+void Lzma2Enc_Destroy(CLzma2EncHandle pp)
+{
+ CLzma2Enc *p = (CLzma2Enc *)pp;
+ unsigned i;
+ for (i = 0; i < MTCODER__THREADS_MAX; i++)
+ {
+ CLzma2EncInt *t = &p->coders[i];
+ if (t->enc)
+ {
+ LzmaEnc_Destroy(t->enc, p->alloc, p->allocBig);
+ t->enc = NULL;
+ }
+ }
+
+
+ #ifndef _7ZIP_ST
+ if (p->mtCoder_WasConstructed)
+ {
+ MtCoder_Destruct(&p->mtCoder);
+ p->mtCoder_WasConstructed = False;
+ }
+ Lzma2Enc_FreeOutBufs(p);
+ #endif
+
+ ISzAlloc_Free(p->alloc, p->tempBufLzma);
+ p->tempBufLzma = NULL;
+
+ ISzAlloc_Free(p->alloc, pp);
+}
+
+
+SRes Lzma2Enc_SetProps(CLzma2EncHandle pp, const CLzma2EncProps *props)
+{
+ CLzma2Enc *p = (CLzma2Enc *)pp;
+ CLzmaEncProps lzmaProps = props->lzmaProps;
+ LzmaEncProps_Normalize(&lzmaProps);
+ if (lzmaProps.lc + lzmaProps.lp > LZMA2_LCLP_MAX)
+ return SZ_ERROR_PARAM;
+ p->props = *props;
+ Lzma2EncProps_Normalize(&p->props);
+ return SZ_OK;
+}
+
+
+void Lzma2Enc_SetDataSize(CLzmaEncHandle pp, UInt64 expectedDataSiize)
+{
+ CLzma2Enc *p = (CLzma2Enc *)pp;
+ p->expectedDataSize = expectedDataSiize;
+}
+
+
+Byte Lzma2Enc_WriteProperties(CLzma2EncHandle pp)
+{
+ CLzma2Enc *p = (CLzma2Enc *)pp;
+ unsigned i;
+ UInt32 dicSize = LzmaEncProps_GetDictSize(&p->props.lzmaProps);
+ for (i = 0; i < 40; i++)
+ if (dicSize <= LZMA2_DIC_SIZE_FROM_PROP(i))
+ break;
+ return (Byte)i;
+}
+
+
+static SRes Lzma2Enc_EncodeMt1(
+ CLzma2Enc *me,
+ CLzma2EncInt *p,
+ ISeqOutStream *outStream,
+ Byte *outBuf, size_t *outBufSize,
+ ISeqInStream *inStream,
+ const Byte *inData, size_t inDataSize,
+ int finished,
+ ICompressProgress *progress)
+{
+ UInt64 unpackTotal = 0;
+ UInt64 packTotal = 0;
+ size_t outLim = 0;
+ CLimitedSeqInStream limitedInStream;
+
+ if (outBuf)
+ {
+ outLim = *outBufSize;
+ *outBufSize = 0;
+ }
+
+ if (!p->enc)
+ {
+ p->propsAreSet = False;
+ p->enc = LzmaEnc_Create(me->alloc);
+ if (!p->enc)
+ return SZ_ERROR_MEM;
+ }
+
+ limitedInStream.realStream = inStream;
+ if (inStream)
+ {
+ limitedInStream.vt.Read = LimitedSeqInStream_Read;
+ }
+
+ if (!outBuf)
+ {
+ // outStream version works only in one thread. So we use CLzma2Enc::tempBufLzma
+ if (!me->tempBufLzma)
+ {
+ me->tempBufLzma = (Byte *)ISzAlloc_Alloc(me->alloc, LZMA2_CHUNK_SIZE_COMPRESSED_MAX);
+ if (!me->tempBufLzma)
+ return SZ_ERROR_MEM;
+ }
+ }
+
+ RINOK(Lzma2EncInt_InitStream(p, &me->props));
+
+ for (;;)
+ {
+ SRes res = SZ_OK;
+ size_t inSizeCur = 0;
+
+ Lzma2EncInt_InitBlock(p);
+
+ LimitedSeqInStream_Init(&limitedInStream);
+ limitedInStream.limit = me->props.blockSize;
+
+ if (inStream)
+ {
+ UInt64 expected = (UInt64)(Int64)-1;
+ // inStream version works only in one thread. So we use CLzma2Enc::expectedDataSize
+ if (me->expectedDataSize != (UInt64)(Int64)-1
+ && me->expectedDataSize >= unpackTotal)
+ expected = me->expectedDataSize - unpackTotal;
+ if (me->props.blockSize != LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID
+ && expected > me->props.blockSize)
+ expected = (size_t)me->props.blockSize;
+
+ LzmaEnc_SetDataSize(p->enc, expected);
+
+ RINOK(LzmaEnc_PrepareForLzma2(p->enc,
+ &limitedInStream.vt,
+ LZMA2_KEEP_WINDOW_SIZE,
+ me->alloc,
+ me->allocBig));
+ }
+ else
+ {
+ inSizeCur = inDataSize - (size_t)unpackTotal;
+ if (me->props.blockSize != LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID
+ && inSizeCur > me->props.blockSize)
+ inSizeCur = (size_t)me->props.blockSize;
+
+ // LzmaEnc_SetDataSize(p->enc, inSizeCur);
+
+ RINOK(LzmaEnc_MemPrepare(p->enc,
+ inData + (size_t)unpackTotal, inSizeCur,
+ LZMA2_KEEP_WINDOW_SIZE,
+ me->alloc,
+ me->allocBig));
+ }
+
+ for (;;)
+ {
+ size_t packSize = LZMA2_CHUNK_SIZE_COMPRESSED_MAX;
+ if (outBuf)
+ packSize = outLim - (size_t)packTotal;
+
+ res = Lzma2EncInt_EncodeSubblock(p,
+ outBuf ? outBuf + (size_t)packTotal : me->tempBufLzma, &packSize,
+ outBuf ? NULL : outStream);
+
+ if (res != SZ_OK)
+ break;
+
+ packTotal += packSize;
+ if (outBuf)
+ *outBufSize = (size_t)packTotal;
+
+ res = Progress(progress, unpackTotal + p->srcPos, packTotal);
+ if (res != SZ_OK)
+ break;
+
+ /*
+ if (LzmaEnc_GetNumAvailableBytes(p->enc) == 0)
+ break;
+ */
+
+ if (packSize == 0)
+ break;
+ }
+
+ LzmaEnc_Finish(p->enc);
+
+ unpackTotal += p->srcPos;
+
+ RINOK(res);
+
+ if (p->srcPos != (inStream ? limitedInStream.processed : inSizeCur))
+ return SZ_ERROR_FAIL;
+
+ if (inStream ? limitedInStream.finished : (unpackTotal == inDataSize))
+ {
+ if (finished)
+ {
+ if (outBuf)
+ {
+ size_t destPos = *outBufSize;
+ if (destPos >= outLim)
+ return SZ_ERROR_OUTPUT_EOF;
+ outBuf[destPos] = 0;
+ *outBufSize = destPos + 1;
+ }
+ else
+ {
+ Byte b = 0;
+ if (ISeqOutStream_Write(outStream, &b, 1) != 1)
+ return SZ_ERROR_WRITE;
+ }
+ }
+ return SZ_OK;
+ }
+ }
+}
+
+
+
+#ifndef _7ZIP_ST
+
+static SRes Lzma2Enc_MtCallback_Code(void *pp, unsigned coderIndex, unsigned outBufIndex,
+ const Byte *src, size_t srcSize, int finished)
+{
+ CLzma2Enc *me = (CLzma2Enc *)pp;
+ size_t destSize = me->outBufSize;
+ SRes res;
+ CMtProgressThunk progressThunk;
+
+ Byte *dest = me->outBufs[outBufIndex];
+
+ me->outBufsDataSizes[outBufIndex] = 0;
+
+ if (!dest)
+ {
+ dest = (Byte *)ISzAlloc_Alloc(me->alloc, me->outBufSize);
+ if (!dest)
+ return SZ_ERROR_MEM;
+ me->outBufs[outBufIndex] = dest;
+ }
+
+ MtProgressThunk_CreateVTable(&progressThunk);
+ progressThunk.mtProgress = &me->mtCoder.mtProgress;
+ progressThunk.inSize = 0;
+ progressThunk.outSize = 0;
+
+ res = Lzma2Enc_EncodeMt1(me,
+ &me->coders[coderIndex],
+ NULL, dest, &destSize,
+ NULL, src, srcSize,
+ finished,
+ &progressThunk.vt);
+
+ me->outBufsDataSizes[outBufIndex] = destSize;
+
+ return res;
+}
+
+
+static SRes Lzma2Enc_MtCallback_Write(void *pp, unsigned outBufIndex)
+{
+ CLzma2Enc *me = (CLzma2Enc *)pp;
+ size_t size = me->outBufsDataSizes[outBufIndex];
+ const Byte *data = me->outBufs[outBufIndex];
+
+ if (me->outStream)
+ return ISeqOutStream_Write(me->outStream, data, size) == size ? SZ_OK : SZ_ERROR_WRITE;
+
+ if (size > me->outBuf_Rem)
+ return SZ_ERROR_OUTPUT_EOF;
+ memcpy(me->outBuf, data, size);
+ me->outBuf_Rem -= size;
+ me->outBuf += size;
+ return SZ_OK;
+}
+
+#endif
+
+
+
+SRes Lzma2Enc_Encode2(CLzma2EncHandle pp,
+ ISeqOutStream *outStream,
+ Byte *outBuf, size_t *outBufSize,
+ ISeqInStream *inStream,
+ const Byte *inData, size_t inDataSize,
+ ICompressProgress *progress)
+{
+ CLzma2Enc *p = (CLzma2Enc *)pp;
+
+ if (inStream && inData)
+ return SZ_ERROR_PARAM;
+
+ if (outStream && outBuf)
+ return SZ_ERROR_PARAM;
+
+ {
+ unsigned i;
+ for (i = 0; i < MTCODER__THREADS_MAX; i++)
+ p->coders[i].propsAreSet = False;
+ }
+
+ #ifndef _7ZIP_ST
+
+ if (p->props.numBlockThreads_Reduced > 1)
+ {
+ IMtCoderCallback2 vt;
+
+ if (!p->mtCoder_WasConstructed)
+ {
+ p->mtCoder_WasConstructed = True;
+ MtCoder_Construct(&p->mtCoder);
+ }
+
+ vt.Code = Lzma2Enc_MtCallback_Code;
+ vt.Write = Lzma2Enc_MtCallback_Write;
+
+ p->outStream = outStream;
+ p->outBuf = NULL;
+ p->outBuf_Rem = 0;
+ if (!outStream)
+ {
+ p->outBuf = outBuf;
+ p->outBuf_Rem = *outBufSize;
+ *outBufSize = 0;
+ }
+
+ p->mtCoder.allocBig = p->allocBig;
+ p->mtCoder.progress = progress;
+ p->mtCoder.inStream = inStream;
+ p->mtCoder.inData = inData;
+ p->mtCoder.inDataSize = inDataSize;
+ p->mtCoder.mtCallback = &vt;
+ p->mtCoder.mtCallbackObject = p;
+
+ p->mtCoder.blockSize = (size_t)p->props.blockSize;
+ if (p->mtCoder.blockSize != p->props.blockSize)
+ return SZ_ERROR_PARAM; /* SZ_ERROR_MEM */
+
+ {
+ size_t destBlockSize = p->mtCoder.blockSize + (p->mtCoder.blockSize >> 10) + 16;
+ if (destBlockSize < p->mtCoder.blockSize)
+ return SZ_ERROR_PARAM;
+ if (p->outBufSize != destBlockSize)
+ Lzma2Enc_FreeOutBufs(p);
+ p->outBufSize = destBlockSize;
+ }
+
+ p->mtCoder.numThreadsMax = p->props.numBlockThreads_Max;
+ p->mtCoder.expectedDataSize = p->expectedDataSize;
+
+ {
+ SRes res = MtCoder_Code(&p->mtCoder);
+ if (!outStream)
+ *outBufSize = p->outBuf - outBuf;
+ return res;
+ }
+ }
+
+ #endif
+
+
+ return Lzma2Enc_EncodeMt1(p,
+ &p->coders[0],
+ outStream, outBuf, outBufSize,
+ inStream, inData, inDataSize,
+ True, /* finished */
+ progress);
+}
diff --git a/contrib/libs/lzmasdk/Lzma2Enc.h b/contrib/libs/lzmasdk/Lzma2Enc.h new file mode 100644 index 0000000000..65f2dd145d --- /dev/null +++ b/contrib/libs/lzmasdk/Lzma2Enc.h @@ -0,0 +1,55 @@ +/* Lzma2Enc.h -- LZMA2 Encoder
+2017-07-27 : Igor Pavlov : Public domain */
+
+#ifndef __LZMA2_ENC_H
+#define __LZMA2_ENC_H
+
+#include "LzmaEnc.h"
+
+EXTERN_C_BEGIN
+
+#define LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO 0
+#define LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID ((UInt64)(Int64)-1)
+
+typedef struct
+{
+ CLzmaEncProps lzmaProps;
+ UInt64 blockSize;
+ int numBlockThreads_Reduced;
+ int numBlockThreads_Max;
+ int numTotalThreads;
+} CLzma2EncProps;
+
+void Lzma2EncProps_Init(CLzma2EncProps *p);
+void Lzma2EncProps_Normalize(CLzma2EncProps *p);
+
+/* ---------- CLzmaEnc2Handle Interface ---------- */
+
+/* Lzma2Enc_* functions can return the following exit codes:
+SRes:
+ SZ_OK - OK
+ SZ_ERROR_MEM - Memory allocation error
+ SZ_ERROR_PARAM - Incorrect paramater in props
+ SZ_ERROR_WRITE - ISeqOutStream write callback error
+ SZ_ERROR_OUTPUT_EOF - output buffer overflow - version with (Byte *) output
+ SZ_ERROR_PROGRESS - some break from progress callback
+ SZ_ERROR_THREAD - error in multithreading functions (only for Mt version)
+*/
+
+typedef void * CLzma2EncHandle;
+
+CLzma2EncHandle Lzma2Enc_Create(ISzAllocPtr alloc, ISzAllocPtr allocBig);
+void Lzma2Enc_Destroy(CLzma2EncHandle p);
+SRes Lzma2Enc_SetProps(CLzma2EncHandle p, const CLzma2EncProps *props);
+void Lzma2Enc_SetDataSize(CLzma2EncHandle p, UInt64 expectedDataSiize);
+Byte Lzma2Enc_WriteProperties(CLzma2EncHandle p);
+SRes Lzma2Enc_Encode2(CLzma2EncHandle p,
+ ISeqOutStream *outStream,
+ Byte *outBuf, size_t *outBufSize,
+ ISeqInStream *inStream,
+ const Byte *inData, size_t inDataSize,
+ ICompressProgress *progress);
+
+EXTERN_C_END
+
+#endif
diff --git a/contrib/libs/lzmasdk/LzmaDec.c b/contrib/libs/lzmasdk/LzmaDec.c new file mode 100644 index 0000000000..4d1576419f --- /dev/null +++ b/contrib/libs/lzmasdk/LzmaDec.c @@ -0,0 +1,1185 @@ +/* LzmaDec.c -- LZMA Decoder
+2018-07-04 : Igor Pavlov : Public domain */
+
+#include "Precomp.h"
+
+#include <string.h>
+
+/* #include "CpuArch.h" */
+#include "LzmaDec.h"
+
+#define kNumTopBits 24
+#define kTopValue ((UInt32)1 << kNumTopBits)
+
+#define kNumBitModelTotalBits 11
+#define kBitModelTotal (1 << kNumBitModelTotalBits)
+#define kNumMoveBits 5
+
+#define RC_INIT_SIZE 5
+
+#define NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | (*buf++); }
+
+#define IF_BIT_0(p) ttt = *(p); NORMALIZE; bound = (range >> kNumBitModelTotalBits) * (UInt32)ttt; if (code < bound)
+#define UPDATE_0(p) range = bound; *(p) = (CLzmaProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits));
+#define UPDATE_1(p) range -= bound; code -= bound; *(p) = (CLzmaProb)(ttt - (ttt >> kNumMoveBits));
+#define GET_BIT2(p, i, A0, A1) IF_BIT_0(p) \
+ { UPDATE_0(p); i = (i + i); A0; } else \
+ { UPDATE_1(p); i = (i + i) + 1; A1; }
+
+#define TREE_GET_BIT(probs, i) { GET_BIT2(probs + i, i, ;, ;); }
+
+#define REV_BIT(p, i, A0, A1) IF_BIT_0(p + i) \
+ { UPDATE_0(p + i); A0; } else \
+ { UPDATE_1(p + i); A1; }
+#define REV_BIT_VAR( p, i, m) REV_BIT(p, i, i += m; m += m, m += m; i += m; )
+#define REV_BIT_CONST(p, i, m) REV_BIT(p, i, i += m; , i += m * 2; )
+#define REV_BIT_LAST( p, i, m) REV_BIT(p, i, i -= m , ; )
+
+#define TREE_DECODE(probs, limit, i) \
+ { i = 1; do { TREE_GET_BIT(probs, i); } while (i < limit); i -= limit; }
+
+/* #define _LZMA_SIZE_OPT */
+
+#ifdef _LZMA_SIZE_OPT
+#define TREE_6_DECODE(probs, i) TREE_DECODE(probs, (1 << 6), i)
+#else
+#define TREE_6_DECODE(probs, i) \
+ { i = 1; \
+ TREE_GET_BIT(probs, i); \
+ TREE_GET_BIT(probs, i); \
+ TREE_GET_BIT(probs, i); \
+ TREE_GET_BIT(probs, i); \
+ TREE_GET_BIT(probs, i); \
+ TREE_GET_BIT(probs, i); \
+ i -= 0x40; }
+#endif
+
+#define NORMAL_LITER_DEC TREE_GET_BIT(prob, symbol)
+#define MATCHED_LITER_DEC \
+ matchByte += matchByte; \
+ bit = offs; \
+ offs &= matchByte; \
+ probLit = prob + (offs + bit + symbol); \
+ GET_BIT2(probLit, symbol, offs ^= bit; , ;)
+
+
+
+#define NORMALIZE_CHECK if (range < kTopValue) { if (buf >= bufLimit) return DUMMY_ERROR; range <<= 8; code = (code << 8) | (*buf++); }
+
+#define IF_BIT_0_CHECK(p) ttt = *(p); NORMALIZE_CHECK; bound = (range >> kNumBitModelTotalBits) * (UInt32)ttt; if (code < bound)
+#define UPDATE_0_CHECK range = bound;
+#define UPDATE_1_CHECK range -= bound; code -= bound;
+#define GET_BIT2_CHECK(p, i, A0, A1) IF_BIT_0_CHECK(p) \
+ { UPDATE_0_CHECK; i = (i + i); A0; } else \
+ { UPDATE_1_CHECK; i = (i + i) + 1; A1; }
+#define GET_BIT_CHECK(p, i) GET_BIT2_CHECK(p, i, ; , ;)
+#define TREE_DECODE_CHECK(probs, limit, i) \
+ { i = 1; do { GET_BIT_CHECK(probs + i, i) } while (i < limit); i -= limit; }
+
+
+#define REV_BIT_CHECK(p, i, m) IF_BIT_0_CHECK(p + i) \
+ { UPDATE_0_CHECK; i += m; m += m; } else \
+ { UPDATE_1_CHECK; m += m; i += m; }
+
+
+#define kNumPosBitsMax 4
+#define kNumPosStatesMax (1 << kNumPosBitsMax)
+
+#define kLenNumLowBits 3
+#define kLenNumLowSymbols (1 << kLenNumLowBits)
+#define kLenNumHighBits 8
+#define kLenNumHighSymbols (1 << kLenNumHighBits)
+
+#define LenLow 0
+#define LenHigh (LenLow + 2 * (kNumPosStatesMax << kLenNumLowBits))
+#define kNumLenProbs (LenHigh + kLenNumHighSymbols)
+
+#define LenChoice LenLow
+#define LenChoice2 (LenLow + (1 << kLenNumLowBits))
+
+#define kNumStates 12
+#define kNumStates2 16
+#define kNumLitStates 7
+
+#define kStartPosModelIndex 4
+#define kEndPosModelIndex 14
+#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
+
+#define kNumPosSlotBits 6
+#define kNumLenToPosStates 4
+
+#define kNumAlignBits 4
+#define kAlignTableSize (1 << kNumAlignBits)
+
+#define kMatchMinLen 2
+#define kMatchSpecLenStart (kMatchMinLen + kLenNumLowSymbols * 2 + kLenNumHighSymbols)
+
+/* External ASM code needs same CLzmaProb array layout. So don't change it. */
+
+/* (probs_1664) is faster and better for code size at some platforms */
+/*
+#ifdef MY_CPU_X86_OR_AMD64
+*/
+#define kStartOffset 1664
+#define GET_PROBS p->probs_1664
+/*
+#define GET_PROBS p->probs + kStartOffset
+#else
+#define kStartOffset 0
+#define GET_PROBS p->probs
+#endif
+*/
+
+#define SpecPos (-kStartOffset)
+#define IsRep0Long (SpecPos + kNumFullDistances)
+#define RepLenCoder (IsRep0Long + (kNumStates2 << kNumPosBitsMax))
+#define LenCoder (RepLenCoder + kNumLenProbs)
+#define IsMatch (LenCoder + kNumLenProbs)
+#define Align (IsMatch + (kNumStates2 << kNumPosBitsMax))
+#define IsRep (Align + kAlignTableSize)
+#define IsRepG0 (IsRep + kNumStates)
+#define IsRepG1 (IsRepG0 + kNumStates)
+#define IsRepG2 (IsRepG1 + kNumStates)
+#define PosSlot (IsRepG2 + kNumStates)
+#define Literal (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))
+#define NUM_BASE_PROBS (Literal + kStartOffset)
+
+#if Align != 0 && kStartOffset != 0
+ #error Stop_Compiling_Bad_LZMA_kAlign
+#endif
+
+#if NUM_BASE_PROBS != 1984
+ #error Stop_Compiling_Bad_LZMA_PROBS
+#endif
+
+
+#define LZMA_LIT_SIZE 0x300
+
+#define LzmaProps_GetNumProbs(p) (NUM_BASE_PROBS + ((UInt32)LZMA_LIT_SIZE << ((p)->lc + (p)->lp)))
+
+
+#define CALC_POS_STATE(processedPos, pbMask) (((processedPos) & (pbMask)) << 4)
+#define COMBINED_PS_STATE (posState + state)
+#define GET_LEN_STATE (posState)
+
+#define LZMA_DIC_MIN (1 << 12)
+
+/*
+p->remainLen : shows status of LZMA decoder:
+ < kMatchSpecLenStart : normal remain
+ = kMatchSpecLenStart : finished
+ = kMatchSpecLenStart + 1 : need init range coder
+ = kMatchSpecLenStart + 2 : need init range coder and state
+*/
+
+/* ---------- LZMA_DECODE_REAL ---------- */
+/*
+LzmaDec_DecodeReal_3() can be implemented in external ASM file.
+3 - is the code compatibility version of that function for check at link time.
+*/
+
+#define LZMA_DECODE_REAL LzmaDec_DecodeReal_3
+
+/*
+LZMA_DECODE_REAL()
+In:
+ RangeCoder is normalized
+ if (p->dicPos == limit)
+ {
+ LzmaDec_TryDummy() was called before to exclude LITERAL and MATCH-REP cases.
+ So first symbol can be only MATCH-NON-REP. And if that MATCH-NON-REP symbol
+ is not END_OF_PAYALOAD_MARKER, then function returns error code.
+ }
+
+Processing:
+ first LZMA symbol will be decoded in any case
+ All checks for limits are at the end of main loop,
+ It will decode new LZMA-symbols while (p->buf < bufLimit && dicPos < limit),
+ RangeCoder is still without last normalization when (p->buf < bufLimit) is being checked.
+
+Out:
+ RangeCoder is normalized
+ Result:
+ SZ_OK - OK
+ SZ_ERROR_DATA - Error
+ p->remainLen:
+ < kMatchSpecLenStart : normal remain
+ = kMatchSpecLenStart : finished
+*/
+
+
+#ifdef _LZMA_DEC_OPT
+
+int MY_FAST_CALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit);
+
+#else
+
+static
+int MY_FAST_CALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
+{
+ CLzmaProb *probs = GET_PROBS;
+ unsigned state = (unsigned)p->state;
+ UInt32 rep0 = p->reps[0], rep1 = p->reps[1], rep2 = p->reps[2], rep3 = p->reps[3];
+ unsigned pbMask = ((unsigned)1 << (p->prop.pb)) - 1;
+ unsigned lc = p->prop.lc;
+ unsigned lpMask = ((unsigned)0x100 << p->prop.lp) - ((unsigned)0x100 >> lc);
+
+ Byte *dic = p->dic;
+ SizeT dicBufSize = p->dicBufSize;
+ SizeT dicPos = p->dicPos;
+
+ UInt32 processedPos = p->processedPos;
+ UInt32 checkDicSize = p->checkDicSize;
+ unsigned len = 0;
+
+ const Byte *buf = p->buf;
+ UInt32 range = p->range;
+ UInt32 code = p->code;
+
+ do
+ {
+ CLzmaProb *prob;
+ UInt32 bound;
+ unsigned ttt;
+ unsigned posState = CALC_POS_STATE(processedPos, pbMask);
+
+ prob = probs + IsMatch + COMBINED_PS_STATE;
+ IF_BIT_0(prob)
+ {
+ unsigned symbol;
+ UPDATE_0(prob);
+ prob = probs + Literal;
+ if (processedPos != 0 || checkDicSize != 0)
+ prob += (UInt32)3 * ((((processedPos << 8) + dic[(dicPos == 0 ? dicBufSize : dicPos) - 1]) & lpMask) << lc);
+ processedPos++;
+
+ if (state < kNumLitStates)
+ {
+ state -= (state < 4) ? state : 3;
+ symbol = 1;
+ #ifdef _LZMA_SIZE_OPT
+ do { NORMAL_LITER_DEC } while (symbol < 0x100);
+ #else
+ NORMAL_LITER_DEC
+ NORMAL_LITER_DEC
+ NORMAL_LITER_DEC
+ NORMAL_LITER_DEC
+ NORMAL_LITER_DEC
+ NORMAL_LITER_DEC
+ NORMAL_LITER_DEC
+ NORMAL_LITER_DEC
+ #endif
+ }
+ else
+ {
+ unsigned matchByte = dic[dicPos - rep0 + (dicPos < rep0 ? dicBufSize : 0)];
+ unsigned offs = 0x100;
+ state -= (state < 10) ? 3 : 6;
+ symbol = 1;
+ #ifdef _LZMA_SIZE_OPT
+ do
+ {
+ unsigned bit;
+ CLzmaProb *probLit;
+ MATCHED_LITER_DEC
+ }
+ while (symbol < 0x100);
+ #else
+ {
+ unsigned bit;
+ CLzmaProb *probLit;
+ MATCHED_LITER_DEC
+ MATCHED_LITER_DEC
+ MATCHED_LITER_DEC
+ MATCHED_LITER_DEC
+ MATCHED_LITER_DEC
+ MATCHED_LITER_DEC
+ MATCHED_LITER_DEC
+ MATCHED_LITER_DEC
+ }
+ #endif
+ }
+
+ dic[dicPos++] = (Byte)symbol;
+ continue;
+ }
+
+ {
+ UPDATE_1(prob);
+ prob = probs + IsRep + state;
+ IF_BIT_0(prob)
+ {
+ UPDATE_0(prob);
+ state += kNumStates;
+ prob = probs + LenCoder;
+ }
+ else
+ {
+ UPDATE_1(prob);
+ /*
+ // that case was checked before with kBadRepCode
+ if (checkDicSize == 0 && processedPos == 0)
+ return SZ_ERROR_DATA;
+ */
+ prob = probs + IsRepG0 + state;
+ IF_BIT_0(prob)
+ {
+ UPDATE_0(prob);
+ prob = probs + IsRep0Long + COMBINED_PS_STATE;
+ IF_BIT_0(prob)
+ {
+ UPDATE_0(prob);
+ dic[dicPos] = dic[dicPos - rep0 + (dicPos < rep0 ? dicBufSize : 0)];
+ dicPos++;
+ processedPos++;
+ state = state < kNumLitStates ? 9 : 11;
+ continue;
+ }
+ UPDATE_1(prob);
+ }
+ else
+ {
+ UInt32 distance;
+ UPDATE_1(prob);
+ prob = probs + IsRepG1 + state;
+ IF_BIT_0(prob)
+ {
+ UPDATE_0(prob);
+ distance = rep1;
+ }
+ else
+ {
+ UPDATE_1(prob);
+ prob = probs + IsRepG2 + state;
+ IF_BIT_0(prob)
+ {
+ UPDATE_0(prob);
+ distance = rep2;
+ }
+ else
+ {
+ UPDATE_1(prob);
+ distance = rep3;
+ rep3 = rep2;
+ }
+ rep2 = rep1;
+ }
+ rep1 = rep0;
+ rep0 = distance;
+ }
+ state = state < kNumLitStates ? 8 : 11;
+ prob = probs + RepLenCoder;
+ }
+
+ #ifdef _LZMA_SIZE_OPT
+ {
+ unsigned lim, offset;
+ CLzmaProb *probLen = prob + LenChoice;
+ IF_BIT_0(probLen)
+ {
+ UPDATE_0(probLen);
+ probLen = prob + LenLow + GET_LEN_STATE;
+ offset = 0;
+ lim = (1 << kLenNumLowBits);
+ }
+ else
+ {
+ UPDATE_1(probLen);
+ probLen = prob + LenChoice2;
+ IF_BIT_0(probLen)
+ {
+ UPDATE_0(probLen);
+ probLen = prob + LenLow + GET_LEN_STATE + (1 << kLenNumLowBits);
+ offset = kLenNumLowSymbols;
+ lim = (1 << kLenNumLowBits);
+ }
+ else
+ {
+ UPDATE_1(probLen);
+ probLen = prob + LenHigh;
+ offset = kLenNumLowSymbols * 2;
+ lim = (1 << kLenNumHighBits);
+ }
+ }
+ TREE_DECODE(probLen, lim, len);
+ len += offset;
+ }
+ #else
+ {
+ CLzmaProb *probLen = prob + LenChoice;
+ IF_BIT_0(probLen)
+ {
+ UPDATE_0(probLen);
+ probLen = prob + LenLow + GET_LEN_STATE;
+ len = 1;
+ TREE_GET_BIT(probLen, len);
+ TREE_GET_BIT(probLen, len);
+ TREE_GET_BIT(probLen, len);
+ len -= 8;
+ }
+ else
+ {
+ UPDATE_1(probLen);
+ probLen = prob + LenChoice2;
+ IF_BIT_0(probLen)
+ {
+ UPDATE_0(probLen);
+ probLen = prob + LenLow + GET_LEN_STATE + (1 << kLenNumLowBits);
+ len = 1;
+ TREE_GET_BIT(probLen, len);
+ TREE_GET_BIT(probLen, len);
+ TREE_GET_BIT(probLen, len);
+ }
+ else
+ {
+ UPDATE_1(probLen);
+ probLen = prob + LenHigh;
+ TREE_DECODE(probLen, (1 << kLenNumHighBits), len);
+ len += kLenNumLowSymbols * 2;
+ }
+ }
+ }
+ #endif
+
+ if (state >= kNumStates)
+ {
+ UInt32 distance;
+ prob = probs + PosSlot +
+ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << kNumPosSlotBits);
+ TREE_6_DECODE(prob, distance);
+ if (distance >= kStartPosModelIndex)
+ {
+ unsigned posSlot = (unsigned)distance;
+ unsigned numDirectBits = (unsigned)(((distance >> 1) - 1));
+ distance = (2 | (distance & 1));
+ if (posSlot < kEndPosModelIndex)
+ {
+ distance <<= numDirectBits;
+ prob = probs + SpecPos;
+ {
+ UInt32 m = 1;
+ distance++;
+ do
+ {
+ REV_BIT_VAR(prob, distance, m);
+ }
+ while (--numDirectBits);
+ distance -= m;
+ }
+ }
+ else
+ {
+ numDirectBits -= kNumAlignBits;
+ do
+ {
+ NORMALIZE
+ range >>= 1;
+
+ {
+ UInt32 t;
+ code -= range;
+ t = (0 - ((UInt32)code >> 31)); /* (UInt32)((Int32)code >> 31) */
+ distance = (distance << 1) + (t + 1);
+ code += range & t;
+ }
+ /*
+ distance <<= 1;
+ if (code >= range)
+ {
+ code -= range;
+ distance |= 1;
+ }
+ */
+ }
+ while (--numDirectBits);
+ prob = probs + Align;
+ distance <<= kNumAlignBits;
+ {
+ unsigned i = 1;
+ REV_BIT_CONST(prob, i, 1);
+ REV_BIT_CONST(prob, i, 2);
+ REV_BIT_CONST(prob, i, 4);
+ REV_BIT_LAST (prob, i, 8);
+ distance |= i;
+ }
+ if (distance == (UInt32)0xFFFFFFFF)
+ {
+ len = kMatchSpecLenStart;
+ state -= kNumStates;
+ break;
+ }
+ }
+ }
+
+ rep3 = rep2;
+ rep2 = rep1;
+ rep1 = rep0;
+ rep0 = distance + 1;
+ state = (state < kNumStates + kNumLitStates) ? kNumLitStates : kNumLitStates + 3;
+ if (distance >= (checkDicSize == 0 ? processedPos: checkDicSize))
+ {
+ p->dicPos = dicPos;
+ return SZ_ERROR_DATA;
+ }
+ }
+
+ len += kMatchMinLen;
+
+ {
+ SizeT rem;
+ unsigned curLen;
+ SizeT pos;
+
+ if ((rem = limit - dicPos) == 0)
+ {
+ p->dicPos = dicPos;
+ return SZ_ERROR_DATA;
+ }
+
+ curLen = ((rem < len) ? (unsigned)rem : len);
+ pos = dicPos - rep0 + (dicPos < rep0 ? dicBufSize : 0);
+
+ processedPos += (UInt32)curLen;
+
+ len -= curLen;
+ if (curLen <= dicBufSize - pos)
+ {
+ Byte *dest = dic + dicPos;
+ ptrdiff_t src = (ptrdiff_t)pos - (ptrdiff_t)dicPos;
+ const Byte *lim = dest + curLen;
+ dicPos += (SizeT)curLen;
+ do
+ *(dest) = (Byte)*(dest + src);
+ while (++dest != lim);
+ }
+ else
+ {
+ do
+ {
+ dic[dicPos++] = dic[pos];
+ if (++pos == dicBufSize)
+ pos = 0;
+ }
+ while (--curLen != 0);
+ }
+ }
+ }
+ }
+ while (dicPos < limit && buf < bufLimit);
+
+ NORMALIZE;
+
+ p->buf = buf;
+ p->range = range;
+ p->code = code;
+ p->remainLen = (UInt32)len;
+ p->dicPos = dicPos;
+ p->processedPos = processedPos;
+ p->reps[0] = rep0;
+ p->reps[1] = rep1;
+ p->reps[2] = rep2;
+ p->reps[3] = rep3;
+ p->state = (UInt32)state;
+
+ return SZ_OK;
+}
+#endif
+
+static void MY_FAST_CALL LzmaDec_WriteRem(CLzmaDec *p, SizeT limit)
+{
+ if (p->remainLen != 0 && p->remainLen < kMatchSpecLenStart)
+ {
+ Byte *dic = p->dic;
+ SizeT dicPos = p->dicPos;
+ SizeT dicBufSize = p->dicBufSize;
+ unsigned len = (unsigned)p->remainLen;
+ SizeT rep0 = p->reps[0]; /* we use SizeT to avoid the BUG of VC14 for AMD64 */
+ SizeT rem = limit - dicPos;
+ if (rem < len)
+ len = (unsigned)(rem);
+
+ if (p->checkDicSize == 0 && p->prop.dicSize - p->processedPos <= len)
+ p->checkDicSize = p->prop.dicSize;
+
+ p->processedPos += (UInt32)len;
+ p->remainLen -= (UInt32)len;
+ while (len != 0)
+ {
+ len--;
+ dic[dicPos] = dic[dicPos - rep0 + (dicPos < rep0 ? dicBufSize : 0)];
+ dicPos++;
+ }
+ p->dicPos = dicPos;
+ }
+}
+
+
+#define kRange0 0xFFFFFFFF
+#define kBound0 ((kRange0 >> kNumBitModelTotalBits) << (kNumBitModelTotalBits - 1))
+#define kBadRepCode (kBound0 + (((kRange0 - kBound0) >> kNumBitModelTotalBits) << (kNumBitModelTotalBits - 1)))
+#if kBadRepCode != (0xC0000000 - 0x400)
+ #error Stop_Compiling_Bad_LZMA_Check
+#endif
+
+static int MY_FAST_CALL LzmaDec_DecodeReal2(CLzmaDec *p, SizeT limit, const Byte *bufLimit)
+{
+ do
+ {
+ SizeT limit2 = limit;
+ if (p->checkDicSize == 0)
+ {
+ UInt32 rem = p->prop.dicSize - p->processedPos;
+ if (limit - p->dicPos > rem)
+ limit2 = p->dicPos + rem;
+
+ if (p->processedPos == 0)
+ if (p->code >= kBadRepCode)
+ return SZ_ERROR_DATA;
+ }
+
+ RINOK(LZMA_DECODE_REAL(p, limit2, bufLimit));
+
+ if (p->checkDicSize == 0 && p->processedPos >= p->prop.dicSize)
+ p->checkDicSize = p->prop.dicSize;
+
+ LzmaDec_WriteRem(p, limit);
+ }
+ while (p->dicPos < limit && p->buf < bufLimit && p->remainLen < kMatchSpecLenStart);
+
+ return 0;
+}
+
+typedef enum
+{
+ DUMMY_ERROR, /* unexpected end of input stream */
+ DUMMY_LIT,
+ DUMMY_MATCH,
+ DUMMY_REP
+} ELzmaDummy;
+
+static ELzmaDummy LzmaDec_TryDummy(const CLzmaDec *p, const Byte *buf, SizeT inSize)
+{
+ UInt32 range = p->range;
+ UInt32 code = p->code;
+ const Byte *bufLimit = buf + inSize;
+ const CLzmaProb *probs = GET_PROBS;
+ unsigned state = (unsigned)p->state;
+ ELzmaDummy res;
+
+ {
+ const CLzmaProb *prob;
+ UInt32 bound;
+ unsigned ttt;
+ unsigned posState = CALC_POS_STATE(p->processedPos, (1 << p->prop.pb) - 1);
+
+ prob = probs + IsMatch + COMBINED_PS_STATE;
+ IF_BIT_0_CHECK(prob)
+ {
+ UPDATE_0_CHECK
+
+ /* if (bufLimit - buf >= 7) return DUMMY_LIT; */
+
+ prob = probs + Literal;
+ if (p->checkDicSize != 0 || p->processedPos != 0)
+ prob += ((UInt32)LZMA_LIT_SIZE *
+ ((((p->processedPos) & ((1 << (p->prop.lp)) - 1)) << p->prop.lc) +
+ (p->dic[(p->dicPos == 0 ? p->dicBufSize : p->dicPos) - 1] >> (8 - p->prop.lc))));
+
+ if (state < kNumLitStates)
+ {
+ unsigned symbol = 1;
+ do { GET_BIT_CHECK(prob + symbol, symbol) } while (symbol < 0x100);
+ }
+ else
+ {
+ unsigned matchByte = p->dic[p->dicPos - p->reps[0] +
+ (p->dicPos < p->reps[0] ? p->dicBufSize : 0)];
+ unsigned offs = 0x100;
+ unsigned symbol = 1;
+ do
+ {
+ unsigned bit;
+ const CLzmaProb *probLit;
+ matchByte += matchByte;
+ bit = offs;
+ offs &= matchByte;
+ probLit = prob + (offs + bit + symbol);
+ GET_BIT2_CHECK(probLit, symbol, offs ^= bit; , ; )
+ }
+ while (symbol < 0x100);
+ }
+ res = DUMMY_LIT;
+ }
+ else
+ {
+ unsigned len;
+ UPDATE_1_CHECK;
+
+ prob = probs + IsRep + state;
+ IF_BIT_0_CHECK(prob)
+ {
+ UPDATE_0_CHECK;
+ state = 0;
+ prob = probs + LenCoder;
+ res = DUMMY_MATCH;
+ }
+ else
+ {
+ UPDATE_1_CHECK;
+ res = DUMMY_REP;
+ prob = probs + IsRepG0 + state;
+ IF_BIT_0_CHECK(prob)
+ {
+ UPDATE_0_CHECK;
+ prob = probs + IsRep0Long + COMBINED_PS_STATE;
+ IF_BIT_0_CHECK(prob)
+ {
+ UPDATE_0_CHECK;
+ NORMALIZE_CHECK;
+ return DUMMY_REP;
+ }
+ else
+ {
+ UPDATE_1_CHECK;
+ }
+ }
+ else
+ {
+ UPDATE_1_CHECK;
+ prob = probs + IsRepG1 + state;
+ IF_BIT_0_CHECK(prob)
+ {
+ UPDATE_0_CHECK;
+ }
+ else
+ {
+ UPDATE_1_CHECK;
+ prob = probs + IsRepG2 + state;
+ IF_BIT_0_CHECK(prob)
+ {
+ UPDATE_0_CHECK;
+ }
+ else
+ {
+ UPDATE_1_CHECK;
+ }
+ }
+ }
+ state = kNumStates;
+ prob = probs + RepLenCoder;
+ }
+ {
+ unsigned limit, offset;
+ const CLzmaProb *probLen = prob + LenChoice;
+ IF_BIT_0_CHECK(probLen)
+ {
+ UPDATE_0_CHECK;
+ probLen = prob + LenLow + GET_LEN_STATE;
+ offset = 0;
+ limit = 1 << kLenNumLowBits;
+ }
+ else
+ {
+ UPDATE_1_CHECK;
+ probLen = prob + LenChoice2;
+ IF_BIT_0_CHECK(probLen)
+ {
+ UPDATE_0_CHECK;
+ probLen = prob + LenLow + GET_LEN_STATE + (1 << kLenNumLowBits);
+ offset = kLenNumLowSymbols;
+ limit = 1 << kLenNumLowBits;
+ }
+ else
+ {
+ UPDATE_1_CHECK;
+ probLen = prob + LenHigh;
+ offset = kLenNumLowSymbols * 2;
+ limit = 1 << kLenNumHighBits;
+ }
+ }
+ TREE_DECODE_CHECK(probLen, limit, len);
+ len += offset;
+ }
+
+ if (state < 4)
+ {
+ unsigned posSlot;
+ prob = probs + PosSlot +
+ ((len < kNumLenToPosStates - 1 ? len : kNumLenToPosStates - 1) <<
+ kNumPosSlotBits);
+ TREE_DECODE_CHECK(prob, 1 << kNumPosSlotBits, posSlot);
+ if (posSlot >= kStartPosModelIndex)
+ {
+ unsigned numDirectBits = ((posSlot >> 1) - 1);
+
+ /* if (bufLimit - buf >= 8) return DUMMY_MATCH; */
+
+ if (posSlot < kEndPosModelIndex)
+ {
+ prob = probs + SpecPos + ((2 | (posSlot & 1)) << numDirectBits);
+ }
+ else
+ {
+ numDirectBits -= kNumAlignBits;
+ do
+ {
+ NORMALIZE_CHECK
+ range >>= 1;
+ code -= range & (((code - range) >> 31) - 1);
+ /* if (code >= range) code -= range; */
+ }
+ while (--numDirectBits);
+ prob = probs + Align;
+ numDirectBits = kNumAlignBits;
+ }
+ {
+ unsigned i = 1;
+ unsigned m = 1;
+ do
+ {
+ REV_BIT_CHECK(prob, i, m);
+ }
+ while (--numDirectBits);
+ }
+ }
+ }
+ }
+ }
+ NORMALIZE_CHECK;
+ return res;
+}
+
+
+void LzmaDec_InitDicAndState(CLzmaDec *p, BoolInt initDic, BoolInt initState)
+{
+ p->remainLen = kMatchSpecLenStart + 1;
+ p->tempBufSize = 0;
+
+ if (initDic)
+ {
+ p->processedPos = 0;
+ p->checkDicSize = 0;
+ p->remainLen = kMatchSpecLenStart + 2;
+ }
+ if (initState)
+ p->remainLen = kMatchSpecLenStart + 2;
+}
+
+void LzmaDec_Init(CLzmaDec *p)
+{
+ p->dicPos = 0;
+ LzmaDec_InitDicAndState(p, True, True);
+}
+
+
+SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *srcLen,
+ ELzmaFinishMode finishMode, ELzmaStatus *status)
+{
+ SizeT inSize = *srcLen;
+ (*srcLen) = 0;
+
+ *status = LZMA_STATUS_NOT_SPECIFIED;
+
+ if (p->remainLen > kMatchSpecLenStart)
+ {
+ for (; inSize > 0 && p->tempBufSize < RC_INIT_SIZE; (*srcLen)++, inSize--)
+ p->tempBuf[p->tempBufSize++] = *src++;
+ if (p->tempBufSize != 0 && p->tempBuf[0] != 0)
+ return SZ_ERROR_DATA;
+ if (p->tempBufSize < RC_INIT_SIZE)
+ {
+ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
+ return SZ_OK;
+ }
+ p->code =
+ ((UInt32)p->tempBuf[1] << 24)
+ | ((UInt32)p->tempBuf[2] << 16)
+ | ((UInt32)p->tempBuf[3] << 8)
+ | ((UInt32)p->tempBuf[4]);
+ p->range = 0xFFFFFFFF;
+ p->tempBufSize = 0;
+
+ if (p->remainLen > kMatchSpecLenStart + 1)
+ {
+ SizeT numProbs = LzmaProps_GetNumProbs(&p->prop);
+ SizeT i;
+ CLzmaProb *probs = p->probs;
+ for (i = 0; i < numProbs; i++)
+ probs[i] = kBitModelTotal >> 1;
+ p->reps[0] = p->reps[1] = p->reps[2] = p->reps[3] = 1;
+ p->state = 0;
+ }
+
+ p->remainLen = 0;
+ }
+
+ LzmaDec_WriteRem(p, dicLimit);
+
+ while (p->remainLen != kMatchSpecLenStart)
+ {
+ int checkEndMarkNow = 0;
+
+ if (p->dicPos >= dicLimit)
+ {
+ if (p->remainLen == 0 && p->code == 0)
+ {
+ *status = LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK;
+ return SZ_OK;
+ }
+ if (finishMode == LZMA_FINISH_ANY)
+ {
+ *status = LZMA_STATUS_NOT_FINISHED;
+ return SZ_OK;
+ }
+ if (p->remainLen != 0)
+ {
+ *status = LZMA_STATUS_NOT_FINISHED;
+ return SZ_ERROR_DATA;
+ }
+ checkEndMarkNow = 1;
+ }
+
+ if (p->tempBufSize == 0)
+ {
+ SizeT processed;
+ const Byte *bufLimit;
+ if (inSize < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow)
+ {
+ int dummyRes = LzmaDec_TryDummy(p, src, inSize);
+ if (dummyRes == DUMMY_ERROR)
+ {
+ memcpy(p->tempBuf, src, inSize);
+ p->tempBufSize = (unsigned)inSize;
+ (*srcLen) += inSize;
+ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
+ return SZ_OK;
+ }
+ if (checkEndMarkNow && dummyRes != DUMMY_MATCH)
+ {
+ *status = LZMA_STATUS_NOT_FINISHED;
+ return SZ_ERROR_DATA;
+ }
+ bufLimit = src;
+ }
+ else
+ bufLimit = src + inSize - LZMA_REQUIRED_INPUT_MAX;
+ p->buf = src;
+ if (LzmaDec_DecodeReal2(p, dicLimit, bufLimit) != 0)
+ return SZ_ERROR_DATA;
+ processed = (SizeT)(p->buf - src);
+ (*srcLen) += processed;
+ src += processed;
+ inSize -= processed;
+ }
+ else
+ {
+ unsigned rem = p->tempBufSize, lookAhead = 0;
+ while (rem < LZMA_REQUIRED_INPUT_MAX && lookAhead < inSize)
+ p->tempBuf[rem++] = src[lookAhead++];
+ p->tempBufSize = rem;
+ if (rem < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow)
+ {
+ int dummyRes = LzmaDec_TryDummy(p, p->tempBuf, (SizeT)rem);
+ if (dummyRes == DUMMY_ERROR)
+ {
+ (*srcLen) += (SizeT)lookAhead;
+ *status = LZMA_STATUS_NEEDS_MORE_INPUT;
+ return SZ_OK;
+ }
+ if (checkEndMarkNow && dummyRes != DUMMY_MATCH)
+ {
+ *status = LZMA_STATUS_NOT_FINISHED;
+ return SZ_ERROR_DATA;
+ }
+ }
+ p->buf = p->tempBuf;
+ if (LzmaDec_DecodeReal2(p, dicLimit, p->buf) != 0)
+ return SZ_ERROR_DATA;
+
+ {
+ unsigned kkk = (unsigned)(p->buf - p->tempBuf);
+ if (rem < kkk)
+ return SZ_ERROR_FAIL; /* some internal error */
+ rem -= kkk;
+ if (lookAhead < rem)
+ return SZ_ERROR_FAIL; /* some internal error */
+ lookAhead -= rem;
+ }
+ (*srcLen) += (SizeT)lookAhead;
+ src += lookAhead;
+ inSize -= (SizeT)lookAhead;
+ p->tempBufSize = 0;
+ }
+ }
+
+ if (p->code != 0)
+ return SZ_ERROR_DATA;
+ *status = LZMA_STATUS_FINISHED_WITH_MARK;
+ return SZ_OK;
+}
+
+
+SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status)
+{
+ SizeT outSize = *destLen;
+ SizeT inSize = *srcLen;
+ *srcLen = *destLen = 0;
+ for (;;)
+ {
+ SizeT inSizeCur = inSize, outSizeCur, dicPos;
+ ELzmaFinishMode curFinishMode;
+ SRes res;
+ if (p->dicPos == p->dicBufSize)
+ p->dicPos = 0;
+ dicPos = p->dicPos;
+ if (outSize > p->dicBufSize - dicPos)
+ {
+ outSizeCur = p->dicBufSize;
+ curFinishMode = LZMA_FINISH_ANY;
+ }
+ else
+ {
+ outSizeCur = dicPos + outSize;
+ curFinishMode = finishMode;
+ }
+
+ res = LzmaDec_DecodeToDic(p, outSizeCur, src, &inSizeCur, curFinishMode, status);
+ src += inSizeCur;
+ inSize -= inSizeCur;
+ *srcLen += inSizeCur;
+ outSizeCur = p->dicPos - dicPos;
+ memcpy(dest, p->dic + dicPos, outSizeCur);
+ dest += outSizeCur;
+ outSize -= outSizeCur;
+ *destLen += outSizeCur;
+ if (res != 0)
+ return res;
+ if (outSizeCur == 0 || outSize == 0)
+ return SZ_OK;
+ }
+}
+
+void LzmaDec_FreeProbs(CLzmaDec *p, ISzAllocPtr alloc)
+{
+ ISzAlloc_Free(alloc, p->probs);
+ p->probs = NULL;
+}
+
+static void LzmaDec_FreeDict(CLzmaDec *p, ISzAllocPtr alloc)
+{
+ ISzAlloc_Free(alloc, p->dic);
+ p->dic = NULL;
+}
+
+void LzmaDec_Free(CLzmaDec *p, ISzAllocPtr alloc)
+{
+ LzmaDec_FreeProbs(p, alloc);
+ LzmaDec_FreeDict(p, alloc);
+}
+
+SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size)
+{
+ UInt32 dicSize;
+ Byte d;
+
+ if (size < LZMA_PROPS_SIZE)
+ return SZ_ERROR_UNSUPPORTED;
+ else
+ dicSize = data[1] | ((UInt32)data[2] << 8) | ((UInt32)data[3] << 16) | ((UInt32)data[4] << 24);
+
+ if (dicSize < LZMA_DIC_MIN)
+ dicSize = LZMA_DIC_MIN;
+ p->dicSize = dicSize;
+
+ d = data[0];
+ if (d >= (9 * 5 * 5))
+ return SZ_ERROR_UNSUPPORTED;
+
+ p->lc = (Byte)(d % 9);
+ d /= 9;
+ p->pb = (Byte)(d / 5);
+ p->lp = (Byte)(d % 5);
+
+ return SZ_OK;
+}
+
+static SRes LzmaDec_AllocateProbs2(CLzmaDec *p, const CLzmaProps *propNew, ISzAllocPtr alloc)
+{
+ UInt32 numProbs = LzmaProps_GetNumProbs(propNew);
+ if (!p->probs || numProbs != p->numProbs)
+ {
+ LzmaDec_FreeProbs(p, alloc);
+ p->probs = (CLzmaProb *)ISzAlloc_Alloc(alloc, numProbs * sizeof(CLzmaProb));
+ if (!p->probs)
+ return SZ_ERROR_MEM;
+ p->probs_1664 = p->probs + 1664;
+ p->numProbs = numProbs;
+ }
+ return SZ_OK;
+}
+
+SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAllocPtr alloc)
+{
+ CLzmaProps propNew;
+ RINOK(LzmaProps_Decode(&propNew, props, propsSize));
+ RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
+ p->prop = propNew;
+ return SZ_OK;
+}
+
+SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAllocPtr alloc)
+{
+ CLzmaProps propNew;
+ SizeT dicBufSize;
+ RINOK(LzmaProps_Decode(&propNew, props, propsSize));
+ RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc));
+
+ {
+ UInt32 dictSize = propNew.dicSize;
+ SizeT mask = ((UInt32)1 << 12) - 1;
+ if (dictSize >= ((UInt32)1 << 30)) mask = ((UInt32)1 << 22) - 1;
+ else if (dictSize >= ((UInt32)1 << 22)) mask = ((UInt32)1 << 20) - 1;;
+ dicBufSize = ((SizeT)dictSize + mask) & ~mask;
+ if (dicBufSize < dictSize)
+ dicBufSize = dictSize;
+ }
+
+ if (!p->dic || dicBufSize != p->dicBufSize)
+ {
+ LzmaDec_FreeDict(p, alloc);
+ p->dic = (Byte *)ISzAlloc_Alloc(alloc, dicBufSize);
+ if (!p->dic)
+ {
+ LzmaDec_FreeProbs(p, alloc);
+ return SZ_ERROR_MEM;
+ }
+ }
+ p->dicBufSize = dicBufSize;
+ p->prop = propNew;
+ return SZ_OK;
+}
+
+SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
+ const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
+ ELzmaStatus *status, ISzAllocPtr alloc)
+{
+ CLzmaDec p;
+ SRes res;
+ SizeT outSize = *destLen, inSize = *srcLen;
+ *destLen = *srcLen = 0;
+ *status = LZMA_STATUS_NOT_SPECIFIED;
+ if (inSize < RC_INIT_SIZE)
+ return SZ_ERROR_INPUT_EOF;
+ LzmaDec_Construct(&p);
+ RINOK(LzmaDec_AllocateProbs(&p, propData, propSize, alloc));
+ p.dic = dest;
+ p.dicBufSize = outSize;
+ LzmaDec_Init(&p);
+ *srcLen = inSize;
+ res = LzmaDec_DecodeToDic(&p, outSize, src, srcLen, finishMode, status);
+ *destLen = p.dicPos;
+ if (res == SZ_OK && *status == LZMA_STATUS_NEEDS_MORE_INPUT)
+ res = SZ_ERROR_INPUT_EOF;
+ LzmaDec_FreeProbs(&p, alloc);
+ return res;
+}
diff --git a/contrib/libs/lzmasdk/LzmaDec.h b/contrib/libs/lzmasdk/LzmaDec.h new file mode 100644 index 0000000000..28ce60c3ea --- /dev/null +++ b/contrib/libs/lzmasdk/LzmaDec.h @@ -0,0 +1,234 @@ +/* LzmaDec.h -- LZMA Decoder
+2018-04-21 : Igor Pavlov : Public domain */
+
+#ifndef __LZMA_DEC_H
+#define __LZMA_DEC_H
+
+#include "7zTypes.h"
+
+EXTERN_C_BEGIN
+
+/* #define _LZMA_PROB32 */
+/* _LZMA_PROB32 can increase the speed on some CPUs,
+ but memory usage for CLzmaDec::probs will be doubled in that case */
+
+typedef
+#ifdef _LZMA_PROB32
+ UInt32
+#else
+ UInt16
+#endif
+ CLzmaProb;
+
+
+/* ---------- LZMA Properties ---------- */
+
+#define LZMA_PROPS_SIZE 5
+
+typedef struct _CLzmaProps
+{
+ Byte lc;
+ Byte lp;
+ Byte pb;
+ Byte _pad_;
+ UInt32 dicSize;
+} CLzmaProps;
+
+/* LzmaProps_Decode - decodes properties
+Returns:
+ SZ_OK
+ SZ_ERROR_UNSUPPORTED - Unsupported properties
+*/
+
+SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size);
+
+
+/* ---------- LZMA Decoder state ---------- */
+
+/* LZMA_REQUIRED_INPUT_MAX = number of required input bytes for worst case.
+ Num bits = log2((2^11 / 31) ^ 22) + 26 < 134 + 26 = 160; */
+
+#define LZMA_REQUIRED_INPUT_MAX 20
+
+typedef struct
+{
+ /* Don't change this structure. ASM code can use it. */
+ CLzmaProps prop;
+ CLzmaProb *probs;
+ CLzmaProb *probs_1664;
+ Byte *dic;
+ SizeT dicBufSize;
+ SizeT dicPos;
+ const Byte *buf;
+ UInt32 range;
+ UInt32 code;
+ UInt32 processedPos;
+ UInt32 checkDicSize;
+ UInt32 reps[4];
+ UInt32 state;
+ UInt32 remainLen;
+
+ UInt32 numProbs;
+ unsigned tempBufSize;
+ Byte tempBuf[LZMA_REQUIRED_INPUT_MAX];
+} CLzmaDec;
+
+#define LzmaDec_Construct(p) { (p)->dic = NULL; (p)->probs = NULL; }
+
+void LzmaDec_Init(CLzmaDec *p);
+
+/* There are two types of LZMA streams:
+ - Stream with end mark. That end mark adds about 6 bytes to compressed size.
+ - Stream without end mark. You must know exact uncompressed size to decompress such stream. */
+
+typedef enum
+{
+ LZMA_FINISH_ANY, /* finish at any point */
+ LZMA_FINISH_END /* block must be finished at the end */
+} ELzmaFinishMode;
+
+/* ELzmaFinishMode has meaning only if the decoding reaches output limit !!!
+
+ You must use LZMA_FINISH_END, when you know that current output buffer
+ covers last bytes of block. In other cases you must use LZMA_FINISH_ANY.
+
+ If LZMA decoder sees end marker before reaching output limit, it returns SZ_OK,
+ and output value of destLen will be less than output buffer size limit.
+ You can check status result also.
+
+ You can use multiple checks to test data integrity after full decompression:
+ 1) Check Result and "status" variable.
+ 2) Check that output(destLen) = uncompressedSize, if you know real uncompressedSize.
+ 3) Check that output(srcLen) = compressedSize, if you know real compressedSize.
+ You must use correct finish mode in that case. */
+
+typedef enum
+{
+ LZMA_STATUS_NOT_SPECIFIED, /* use main error code instead */
+ LZMA_STATUS_FINISHED_WITH_MARK, /* stream was finished with end mark. */
+ LZMA_STATUS_NOT_FINISHED, /* stream was not finished */
+ LZMA_STATUS_NEEDS_MORE_INPUT, /* you must provide more input bytes */
+ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK /* there is probability that stream was finished without end mark */
+} ELzmaStatus;
+
+/* ELzmaStatus is used only as output value for function call */
+
+
+/* ---------- Interfaces ---------- */
+
+/* There are 3 levels of interfaces:
+ 1) Dictionary Interface
+ 2) Buffer Interface
+ 3) One Call Interface
+ You can select any of these interfaces, but don't mix functions from different
+ groups for same object. */
+
+
+/* There are two variants to allocate state for Dictionary Interface:
+ 1) LzmaDec_Allocate / LzmaDec_Free
+ 2) LzmaDec_AllocateProbs / LzmaDec_FreeProbs
+ You can use variant 2, if you set dictionary buffer manually.
+ For Buffer Interface you must always use variant 1.
+
+LzmaDec_Allocate* can return:
+ SZ_OK
+ SZ_ERROR_MEM - Memory allocation error
+ SZ_ERROR_UNSUPPORTED - Unsupported properties
+*/
+
+SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAllocPtr alloc);
+void LzmaDec_FreeProbs(CLzmaDec *p, ISzAllocPtr alloc);
+
+SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAllocPtr alloc);
+void LzmaDec_Free(CLzmaDec *p, ISzAllocPtr alloc);
+
+/* ---------- Dictionary Interface ---------- */
+
+/* You can use it, if you want to eliminate the overhead for data copying from
+ dictionary to some other external buffer.
+ You must work with CLzmaDec variables directly in this interface.
+
+ STEPS:
+ LzmaDec_Construct()
+ LzmaDec_Allocate()
+ for (each new stream)
+ {
+ LzmaDec_Init()
+ while (it needs more decompression)
+ {
+ LzmaDec_DecodeToDic()
+ use data from CLzmaDec::dic and update CLzmaDec::dicPos
+ }
+ }
+ LzmaDec_Free()
+*/
+
+/* LzmaDec_DecodeToDic
+
+ The decoding to internal dictionary buffer (CLzmaDec::dic).
+ You must manually update CLzmaDec::dicPos, if it reaches CLzmaDec::dicBufSize !!!
+
+finishMode:
+ It has meaning only if the decoding reaches output limit (dicLimit).
+ LZMA_FINISH_ANY - Decode just dicLimit bytes.
+ LZMA_FINISH_END - Stream must be finished after dicLimit.
+
+Returns:
+ SZ_OK
+ status:
+ LZMA_STATUS_FINISHED_WITH_MARK
+ LZMA_STATUS_NOT_FINISHED
+ LZMA_STATUS_NEEDS_MORE_INPUT
+ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
+ SZ_ERROR_DATA - Data error
+*/
+
+SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit,
+ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
+
+
+/* ---------- Buffer Interface ---------- */
+
+/* It's zlib-like interface.
+ See LzmaDec_DecodeToDic description for information about STEPS and return results,
+ but you must use LzmaDec_DecodeToBuf instead of LzmaDec_DecodeToDic and you don't need
+ to work with CLzmaDec variables manually.
+
+finishMode:
+ It has meaning only if the decoding reaches output limit (*destLen).
+ LZMA_FINISH_ANY - Decode just destLen bytes.
+ LZMA_FINISH_END - Stream must be finished after (*destLen).
+*/
+
+SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen,
+ const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status);
+
+
+/* ---------- One Call Interface ---------- */
+
+/* LzmaDecode
+
+finishMode:
+ It has meaning only if the decoding reaches output limit (*destLen).
+ LZMA_FINISH_ANY - Decode just destLen bytes.
+ LZMA_FINISH_END - Stream must be finished after (*destLen).
+
+Returns:
+ SZ_OK
+ status:
+ LZMA_STATUS_FINISHED_WITH_MARK
+ LZMA_STATUS_NOT_FINISHED
+ LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK
+ SZ_ERROR_DATA - Data error
+ SZ_ERROR_MEM - Memory allocation error
+ SZ_ERROR_UNSUPPORTED - Unsupported properties
+ SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src).
+*/
+
+SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,
+ const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,
+ ELzmaStatus *status, ISzAllocPtr alloc);
+
+EXTERN_C_END
+
+#endif
diff --git a/contrib/libs/lzmasdk/LzmaEnc.c b/contrib/libs/lzmasdk/LzmaEnc.c new file mode 100644 index 0000000000..a5a087f556 --- /dev/null +++ b/contrib/libs/lzmasdk/LzmaEnc.c @@ -0,0 +1,2984 @@ +/* LzmaEnc.c -- LZMA Encoder
+2019-01-10: Igor Pavlov : Public domain */
+
+#include "Precomp.h"
+
+#include <string.h>
+
+/* #define SHOW_STAT */
+/* #define SHOW_STAT2 */
+
+#if defined(SHOW_STAT) || defined(SHOW_STAT2)
+#include <stdio.h>
+#endif
+
+#include "LzmaEnc.h"
+
+#include "LzFind.h"
+#ifndef _7ZIP_ST
+#include "LzFindMt.h"
+#endif
+
+#ifdef SHOW_STAT
+static unsigned g_STAT_OFFSET = 0;
+#endif
+
+#define kLzmaMaxHistorySize ((UInt32)3 << 29)
+/* #define kLzmaMaxHistorySize ((UInt32)7 << 29) */
+
+#define kNumTopBits 24
+#define kTopValue ((UInt32)1 << kNumTopBits)
+
+#define kNumBitModelTotalBits 11
+#define kBitModelTotal (1 << kNumBitModelTotalBits)
+#define kNumMoveBits 5
+#define kProbInitValue (kBitModelTotal >> 1)
+
+#define kNumMoveReducingBits 4
+#define kNumBitPriceShiftBits 4
+#define kBitPrice (1 << kNumBitPriceShiftBits)
+
+#define REP_LEN_COUNT 64
+
+void LzmaEncProps_Init(CLzmaEncProps *p)
+{
+ p->level = 5;
+ p->dictSize = p->mc = 0;
+ p->reduceSize = (UInt64)(Int64)-1;
+ p->lc = p->lp = p->pb = p->algo = p->fb = p->btMode = p->numHashBytes = p->numThreads = -1;
+ p->writeEndMark = 0;
+}
+
+void LzmaEncProps_Normalize(CLzmaEncProps *p)
+{
+ int level = p->level;
+ if (level < 0) level = 5;
+ p->level = level;
+
+ if (p->dictSize == 0) p->dictSize = (level <= 5 ? (1 << (level * 2 + 14)) : (level <= 7 ? (1 << 25) : (1 << 26)));
+ if (p->dictSize > p->reduceSize)
+ {
+ unsigned i;
+ UInt32 reduceSize = (UInt32)p->reduceSize;
+ for (i = 11; i <= 30; i++)
+ {
+ if (reduceSize <= ((UInt32)2 << i)) { p->dictSize = ((UInt32)2 << i); break; }
+ if (reduceSize <= ((UInt32)3 << i)) { p->dictSize = ((UInt32)3 << i); break; }
+ }
+ }
+
+ if (p->lc < 0) p->lc = 3;
+ if (p->lp < 0) p->lp = 0;
+ if (p->pb < 0) p->pb = 2;
+
+ if (p->algo < 0) p->algo = (level < 5 ? 0 : 1);
+ if (p->fb < 0) p->fb = (level < 7 ? 32 : 64);
+ if (p->btMode < 0) p->btMode = (p->algo == 0 ? 0 : 1);
+ if (p->numHashBytes < 0) p->numHashBytes = 4;
+ if (p->mc == 0) p->mc = (16 + (p->fb >> 1)) >> (p->btMode ? 0 : 1);
+
+ if (p->numThreads < 0)
+ p->numThreads =
+ #ifndef _7ZIP_ST
+ ((p->btMode && p->algo) ? 2 : 1);
+ #else
+ 1;
+ #endif
+}
+
+UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2)
+{
+ CLzmaEncProps props = *props2;
+ LzmaEncProps_Normalize(&props);
+ return props.dictSize;
+}
+
+#if (_MSC_VER >= 1400)
+/* BSR code is fast for some new CPUs */
+/* #define LZMA_LOG_BSR */
+#endif
+
+#ifdef LZMA_LOG_BSR
+
+#define kDicLogSizeMaxCompress 32
+
+#define BSR2_RET(pos, res) { unsigned long zz; _BitScanReverse(&zz, (pos)); res = (zz + zz) + ((pos >> (zz - 1)) & 1); }
+
+static unsigned GetPosSlot1(UInt32 pos)
+{
+ unsigned res;
+ BSR2_RET(pos, res);
+ return res;
+}
+#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
+#define GetPosSlot(pos, res) { if (pos < 2) res = pos; else BSR2_RET(pos, res); }
+
+#else
+
+#define kNumLogBits (9 + sizeof(size_t) / 2)
+/* #define kNumLogBits (11 + sizeof(size_t) / 8 * 3) */
+
+#define kDicLogSizeMaxCompress ((kNumLogBits - 1) * 2 + 7)
+
+static void LzmaEnc_FastPosInit(Byte *g_FastPos)
+{
+ unsigned slot;
+ g_FastPos[0] = 0;
+ g_FastPos[1] = 1;
+ g_FastPos += 2;
+
+ for (slot = 2; slot < kNumLogBits * 2; slot++)
+ {
+ size_t k = ((size_t)1 << ((slot >> 1) - 1));
+ size_t j;
+ for (j = 0; j < k; j++)
+ g_FastPos[j] = (Byte)slot;
+ g_FastPos += k;
+ }
+}
+
+/* we can use ((limit - pos) >> 31) only if (pos < ((UInt32)1 << 31)) */
+/*
+#define BSR2_RET(pos, res) { unsigned zz = 6 + ((kNumLogBits - 1) & \
+ (0 - (((((UInt32)1 << (kNumLogBits + 6)) - 1) - pos) >> 31))); \
+ res = p->g_FastPos[pos >> zz] + (zz * 2); }
+*/
+
+/*
+#define BSR2_RET(pos, res) { unsigned zz = 6 + ((kNumLogBits - 1) & \
+ (0 - (((((UInt32)1 << (kNumLogBits)) - 1) - (pos >> 6)) >> 31))); \
+ res = p->g_FastPos[pos >> zz] + (zz * 2); }
+*/
+
+#define BSR2_RET(pos, res) { unsigned zz = (pos < (1 << (kNumLogBits + 6))) ? 6 : 6 + kNumLogBits - 1; \
+ res = p->g_FastPos[pos >> zz] + (zz * 2); }
+
+/*
+#define BSR2_RET(pos, res) { res = (pos < (1 << (kNumLogBits + 6))) ? \
+ p->g_FastPos[pos >> 6] + 12 : \
+ p->g_FastPos[pos >> (6 + kNumLogBits - 1)] + (6 + (kNumLogBits - 1)) * 2; }
+*/
+
+#define GetPosSlot1(pos) p->g_FastPos[pos]
+#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); }
+#define GetPosSlot(pos, res) { if (pos < kNumFullDistances) res = p->g_FastPos[pos & (kNumFullDistances - 1)]; else BSR2_RET(pos, res); }
+
+#endif
+
+
+#define LZMA_NUM_REPS 4
+
+typedef UInt16 CState;
+typedef UInt16 CExtra;
+
+typedef struct
+{
+ UInt32 price;
+ CState state;
+ CExtra extra;
+ // 0 : normal
+ // 1 : LIT : MATCH
+ // > 1 : MATCH (extra-1) : LIT : REP0 (len)
+ UInt32 len;
+ UInt32 dist;
+ UInt32 reps[LZMA_NUM_REPS];
+} COptimal;
+
+
+// 18.06
+#define kNumOpts (1 << 11)
+#define kPackReserve (kNumOpts * 8)
+// #define kNumOpts (1 << 12)
+// #define kPackReserve (1 + kNumOpts * 2)
+
+#define kNumLenToPosStates 4
+#define kNumPosSlotBits 6
+#define kDicLogSizeMin 0
+#define kDicLogSizeMax 32
+#define kDistTableSizeMax (kDicLogSizeMax * 2)
+
+#define kNumAlignBits 4
+#define kAlignTableSize (1 << kNumAlignBits)
+#define kAlignMask (kAlignTableSize - 1)
+
+#define kStartPosModelIndex 4
+#define kEndPosModelIndex 14
+#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
+
+typedef
+#ifdef _LZMA_PROB32
+ UInt32
+#else
+ UInt16
+#endif
+ CLzmaProb;
+
+#define LZMA_PB_MAX 4
+#define LZMA_LC_MAX 8
+#define LZMA_LP_MAX 4
+
+#define LZMA_NUM_PB_STATES_MAX (1 << LZMA_PB_MAX)
+
+#define kLenNumLowBits 3
+#define kLenNumLowSymbols (1 << kLenNumLowBits)
+#define kLenNumHighBits 8
+#define kLenNumHighSymbols (1 << kLenNumHighBits)
+#define kLenNumSymbolsTotal (kLenNumLowSymbols * 2 + kLenNumHighSymbols)
+
+#define LZMA_MATCH_LEN_MIN 2
+#define LZMA_MATCH_LEN_MAX (LZMA_MATCH_LEN_MIN + kLenNumSymbolsTotal - 1)
+
+#define kNumStates 12
+
+
+typedef struct
+{
+ CLzmaProb low[LZMA_NUM_PB_STATES_MAX << (kLenNumLowBits + 1)];
+ CLzmaProb high[kLenNumHighSymbols];
+} CLenEnc;
+
+
+typedef struct
+{
+ unsigned tableSize;
+ UInt32 prices[LZMA_NUM_PB_STATES_MAX][kLenNumSymbolsTotal];
+ // UInt32 prices1[LZMA_NUM_PB_STATES_MAX][kLenNumLowSymbols * 2];
+ // UInt32 prices2[kLenNumSymbolsTotal];
+} CLenPriceEnc;
+
+#define GET_PRICE_LEN(p, posState, len) \
+ ((p)->prices[posState][(size_t)(len) - LZMA_MATCH_LEN_MIN])
+
+/*
+#define GET_PRICE_LEN(p, posState, len) \
+ ((p)->prices2[(size_t)(len) - 2] + ((p)->prices1[posState][((len) - 2) & (kLenNumLowSymbols * 2 - 1)] & (((len) - 2 - kLenNumLowSymbols * 2) >> 9)))
+*/
+
+typedef struct
+{
+ UInt32 range;
+ unsigned cache;
+ UInt64 low;
+ UInt64 cacheSize;
+ Byte *buf;
+ Byte *bufLim;
+ Byte *bufBase;
+ ISeqOutStream *outStream;
+ UInt64 processed;
+ SRes res;
+} CRangeEnc;
+
+
+typedef struct
+{
+ CLzmaProb *litProbs;
+
+ unsigned state;
+ UInt32 reps[LZMA_NUM_REPS];
+
+ CLzmaProb posAlignEncoder[1 << kNumAlignBits];
+ CLzmaProb isRep[kNumStates];
+ CLzmaProb isRepG0[kNumStates];
+ CLzmaProb isRepG1[kNumStates];
+ CLzmaProb isRepG2[kNumStates];
+ CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX];
+ CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX];
+
+ CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits];
+ CLzmaProb posEncoders[kNumFullDistances];
+
+ CLenEnc lenProbs;
+ CLenEnc repLenProbs;
+
+} CSaveState;
+
+
+typedef UInt32 CProbPrice;
+
+
+typedef struct
+{
+ void *matchFinderObj;
+ IMatchFinder matchFinder;
+
+ unsigned optCur;
+ unsigned optEnd;
+
+ unsigned longestMatchLen;
+ unsigned numPairs;
+ UInt32 numAvail;
+
+ unsigned state;
+ unsigned numFastBytes;
+ unsigned additionalOffset;
+ UInt32 reps[LZMA_NUM_REPS];
+ unsigned lpMask, pbMask;
+ CLzmaProb *litProbs;
+ CRangeEnc rc;
+
+ UInt32 backRes;
+
+ unsigned lc, lp, pb;
+ unsigned lclp;
+
+ BoolInt fastMode;
+ BoolInt writeEndMark;
+ BoolInt finished;
+ BoolInt multiThread;
+ BoolInt needInit;
+ // BoolInt _maxMode;
+
+ UInt64 nowPos64;
+
+ unsigned matchPriceCount;
+ // unsigned alignPriceCount;
+ int repLenEncCounter;
+
+ unsigned distTableSize;
+
+ UInt32 dictSize;
+ SRes result;
+
+ #ifndef _7ZIP_ST
+ BoolInt mtMode;
+ // begin of CMatchFinderMt is used in LZ thread
+ CMatchFinderMt matchFinderMt;
+ // end of CMatchFinderMt is used in BT and HASH threads
+ #endif
+
+ CMatchFinder matchFinderBase;
+
+ #ifndef _7ZIP_ST
+ Byte pad[128];
+ #endif
+
+ // LZ thread
+ CProbPrice ProbPrices[kBitModelTotal >> kNumMoveReducingBits];
+
+ UInt32 matches[LZMA_MATCH_LEN_MAX * 2 + 2 + 1];
+
+ UInt32 alignPrices[kAlignTableSize];
+ UInt32 posSlotPrices[kNumLenToPosStates][kDistTableSizeMax];
+ UInt32 distancesPrices[kNumLenToPosStates][kNumFullDistances];
+
+ CLzmaProb posAlignEncoder[1 << kNumAlignBits];
+ CLzmaProb isRep[kNumStates];
+ CLzmaProb isRepG0[kNumStates];
+ CLzmaProb isRepG1[kNumStates];
+ CLzmaProb isRepG2[kNumStates];
+ CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX];
+ CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX];
+ CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits];
+ CLzmaProb posEncoders[kNumFullDistances];
+
+ CLenEnc lenProbs;
+ CLenEnc repLenProbs;
+
+ #ifndef LZMA_LOG_BSR
+ Byte g_FastPos[1 << kNumLogBits];
+ #endif
+
+ CLenPriceEnc lenEnc;
+ CLenPriceEnc repLenEnc;
+
+ COptimal opt[kNumOpts];
+
+ CSaveState saveState;
+
+ #ifndef _7ZIP_ST
+ Byte pad2[128];
+ #endif
+} CLzmaEnc;
+
+
+
+#define COPY_ARR(dest, src, arr) memcpy(dest->arr, src->arr, sizeof(src->arr));
+
+void LzmaEnc_SaveState(CLzmaEncHandle pp)
+{
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+ CSaveState *dest = &p->saveState;
+
+ dest->state = p->state;
+
+ dest->lenProbs = p->lenProbs;
+ dest->repLenProbs = p->repLenProbs;
+
+ COPY_ARR(dest, p, reps);
+
+ COPY_ARR(dest, p, posAlignEncoder);
+ COPY_ARR(dest, p, isRep);
+ COPY_ARR(dest, p, isRepG0);
+ COPY_ARR(dest, p, isRepG1);
+ COPY_ARR(dest, p, isRepG2);
+ COPY_ARR(dest, p, isMatch);
+ COPY_ARR(dest, p, isRep0Long);
+ COPY_ARR(dest, p, posSlotEncoder);
+ COPY_ARR(dest, p, posEncoders);
+
+ memcpy(dest->litProbs, p->litProbs, ((UInt32)0x300 << p->lclp) * sizeof(CLzmaProb));
+}
+
+
+void LzmaEnc_RestoreState(CLzmaEncHandle pp)
+{
+ CLzmaEnc *dest = (CLzmaEnc *)pp;
+ const CSaveState *p = &dest->saveState;
+
+ dest->state = p->state;
+
+ dest->lenProbs = p->lenProbs;
+ dest->repLenProbs = p->repLenProbs;
+
+ COPY_ARR(dest, p, reps);
+
+ COPY_ARR(dest, p, posAlignEncoder);
+ COPY_ARR(dest, p, isRep);
+ COPY_ARR(dest, p, isRepG0);
+ COPY_ARR(dest, p, isRepG1);
+ COPY_ARR(dest, p, isRepG2);
+ COPY_ARR(dest, p, isMatch);
+ COPY_ARR(dest, p, isRep0Long);
+ COPY_ARR(dest, p, posSlotEncoder);
+ COPY_ARR(dest, p, posEncoders);
+
+ memcpy(dest->litProbs, p->litProbs, ((UInt32)0x300 << dest->lclp) * sizeof(CLzmaProb));
+}
+
+
+
+SRes LzmaEnc_SetProps(CLzmaEncHandle pp, const CLzmaEncProps *props2)
+{
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+ CLzmaEncProps props = *props2;
+ LzmaEncProps_Normalize(&props);
+
+ if (props.lc > LZMA_LC_MAX
+ || props.lp > LZMA_LP_MAX
+ || props.pb > LZMA_PB_MAX
+ || props.dictSize > ((UInt64)1 << kDicLogSizeMaxCompress)
+ || props.dictSize > kLzmaMaxHistorySize)
+ return SZ_ERROR_PARAM;
+
+ p->dictSize = props.dictSize;
+ {
+ unsigned fb = props.fb;
+ if (fb < 5)
+ fb = 5;
+ if (fb > LZMA_MATCH_LEN_MAX)
+ fb = LZMA_MATCH_LEN_MAX;
+ p->numFastBytes = fb;
+ }
+ p->lc = props.lc;
+ p->lp = props.lp;
+ p->pb = props.pb;
+ p->fastMode = (props.algo == 0);
+ // p->_maxMode = True;
+ p->matchFinderBase.btMode = (Byte)(props.btMode ? 1 : 0);
+ {
+ unsigned numHashBytes = 4;
+ if (props.btMode)
+ {
+ if (props.numHashBytes < 2)
+ numHashBytes = 2;
+ else if (props.numHashBytes < 4)
+ numHashBytes = props.numHashBytes;
+ }
+ p->matchFinderBase.numHashBytes = numHashBytes;
+ }
+
+ p->matchFinderBase.cutValue = props.mc;
+
+ p->writeEndMark = props.writeEndMark;
+
+ #ifndef _7ZIP_ST
+ /*
+ if (newMultiThread != _multiThread)
+ {
+ ReleaseMatchFinder();
+ _multiThread = newMultiThread;
+ }
+ */
+ p->multiThread = (props.numThreads > 1);
+ #endif
+
+ return SZ_OK;
+}
+
+
+void LzmaEnc_SetDataSize(CLzmaEncHandle pp, UInt64 expectedDataSiize)
+{
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+ p->matchFinderBase.expectedDataSize = expectedDataSiize;
+}
+
+
+#define kState_Start 0
+#define kState_LitAfterMatch 4
+#define kState_LitAfterRep 5
+#define kState_MatchAfterLit 7
+#define kState_RepAfterLit 8
+
+static const Byte kLiteralNextStates[kNumStates] = {0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 4, 5};
+static const Byte kMatchNextStates[kNumStates] = {7, 7, 7, 7, 7, 7, 7, 10, 10, 10, 10, 10};
+static const Byte kRepNextStates[kNumStates] = {8, 8, 8, 8, 8, 8, 8, 11, 11, 11, 11, 11};
+static const Byte kShortRepNextStates[kNumStates]= {9, 9, 9, 9, 9, 9, 9, 11, 11, 11, 11, 11};
+
+#define IsLitState(s) ((s) < 7)
+#define GetLenToPosState2(len) (((len) < kNumLenToPosStates - 1) ? (len) : kNumLenToPosStates - 1)
+#define GetLenToPosState(len) (((len) < kNumLenToPosStates + 1) ? (len) - 2 : kNumLenToPosStates - 1)
+
+#define kInfinityPrice (1 << 30)
+
+static void RangeEnc_Construct(CRangeEnc *p)
+{
+ p->outStream = NULL;
+ p->bufBase = NULL;
+}
+
+#define RangeEnc_GetProcessed(p) ((p)->processed + ((p)->buf - (p)->bufBase) + (p)->cacheSize)
+#define RangeEnc_GetProcessed_sizet(p) ((size_t)(p)->processed + ((p)->buf - (p)->bufBase) + (size_t)(p)->cacheSize)
+
+#define RC_BUF_SIZE (1 << 16)
+
+static int RangeEnc_Alloc(CRangeEnc *p, ISzAllocPtr alloc)
+{
+ if (!p->bufBase)
+ {
+ p->bufBase = (Byte *)ISzAlloc_Alloc(alloc, RC_BUF_SIZE);
+ if (!p->bufBase)
+ return 0;
+ p->bufLim = p->bufBase + RC_BUF_SIZE;
+ }
+ return 1;
+}
+
+static void RangeEnc_Free(CRangeEnc *p, ISzAllocPtr alloc)
+{
+ ISzAlloc_Free(alloc, p->bufBase);
+ p->bufBase = 0;
+}
+
+static void RangeEnc_Init(CRangeEnc *p)
+{
+ /* Stream.Init(); */
+ p->range = 0xFFFFFFFF;
+ p->cache = 0;
+ p->low = 0;
+ p->cacheSize = 0;
+
+ p->buf = p->bufBase;
+
+ p->processed = 0;
+ p->res = SZ_OK;
+}
+
+MY_NO_INLINE static void RangeEnc_FlushStream(CRangeEnc *p)
+{
+ size_t num;
+ if (p->res != SZ_OK)
+ return;
+ num = p->buf - p->bufBase;
+ if (num != ISeqOutStream_Write(p->outStream, p->bufBase, num))
+ p->res = SZ_ERROR_WRITE;
+ p->processed += num;
+ p->buf = p->bufBase;
+}
+
+MY_NO_INLINE static void MY_FAST_CALL RangeEnc_ShiftLow(CRangeEnc *p)
+{
+ UInt32 low = (UInt32)p->low;
+ unsigned high = (unsigned)(p->low >> 32);
+ p->low = (UInt32)(low << 8);
+ if (low < (UInt32)0xFF000000 || high != 0)
+ {
+ {
+ Byte *buf = p->buf;
+ *buf++ = (Byte)(p->cache + high);
+ p->cache = (unsigned)(low >> 24);
+ p->buf = buf;
+ if (buf == p->bufLim)
+ RangeEnc_FlushStream(p);
+ if (p->cacheSize == 0)
+ return;
+ }
+ high += 0xFF;
+ for (;;)
+ {
+ Byte *buf = p->buf;
+ *buf++ = (Byte)(high);
+ p->buf = buf;
+ if (buf == p->bufLim)
+ RangeEnc_FlushStream(p);
+ if (--p->cacheSize == 0)
+ return;
+ }
+ }
+ p->cacheSize++;
+}
+
+static void RangeEnc_FlushData(CRangeEnc *p)
+{
+ int i;
+ for (i = 0; i < 5; i++)
+ RangeEnc_ShiftLow(p);
+}
+
+#define RC_NORM(p) if (range < kTopValue) { range <<= 8; RangeEnc_ShiftLow(p); }
+
+#define RC_BIT_PRE(p, prob) \
+ ttt = *(prob); \
+ newBound = (range >> kNumBitModelTotalBits) * ttt;
+
+// #define _LZMA_ENC_USE_BRANCH
+
+#ifdef _LZMA_ENC_USE_BRANCH
+
+#define RC_BIT(p, prob, bit) { \
+ RC_BIT_PRE(p, prob) \
+ if (bit == 0) { range = newBound; ttt += (kBitModelTotal - ttt) >> kNumMoveBits; } \
+ else { (p)->low += newBound; range -= newBound; ttt -= ttt >> kNumMoveBits; } \
+ *(prob) = (CLzmaProb)ttt; \
+ RC_NORM(p) \
+ }
+
+#else
+
+#define RC_BIT(p, prob, bit) { \
+ UInt32 mask; \
+ RC_BIT_PRE(p, prob) \
+ mask = 0 - (UInt32)bit; \
+ range &= mask; \
+ mask &= newBound; \
+ range -= mask; \
+ (p)->low += mask; \
+ mask = (UInt32)bit - 1; \
+ range += newBound & mask; \
+ mask &= (kBitModelTotal - ((1 << kNumMoveBits) - 1)); \
+ mask += ((1 << kNumMoveBits) - 1); \
+ ttt += (Int32)(mask - ttt) >> kNumMoveBits; \
+ *(prob) = (CLzmaProb)ttt; \
+ RC_NORM(p) \
+ }
+
+#endif
+
+
+
+
+#define RC_BIT_0_BASE(p, prob) \
+ range = newBound; *(prob) = (CLzmaProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits));
+
+#define RC_BIT_1_BASE(p, prob) \
+ range -= newBound; (p)->low += newBound; *(prob) = (CLzmaProb)(ttt - (ttt >> kNumMoveBits)); \
+
+#define RC_BIT_0(p, prob) \
+ RC_BIT_0_BASE(p, prob) \
+ RC_NORM(p)
+
+#define RC_BIT_1(p, prob) \
+ RC_BIT_1_BASE(p, prob) \
+ RC_NORM(p)
+
+static void RangeEnc_EncodeBit_0(CRangeEnc *p, CLzmaProb *prob)
+{
+ UInt32 range, ttt, newBound;
+ range = p->range;
+ RC_BIT_PRE(p, prob)
+ RC_BIT_0(p, prob)
+ p->range = range;
+}
+
+static void LitEnc_Encode(CRangeEnc *p, CLzmaProb *probs, UInt32 sym)
+{
+ UInt32 range = p->range;
+ sym |= 0x100;
+ do
+ {
+ UInt32 ttt, newBound;
+ // RangeEnc_EncodeBit(p, probs + (sym >> 8), (sym >> 7) & 1);
+ CLzmaProb *prob = probs + (sym >> 8);
+ UInt32 bit = (sym >> 7) & 1;
+ sym <<= 1;
+ RC_BIT(p, prob, bit);
+ }
+ while (sym < 0x10000);
+ p->range = range;
+}
+
+static void LitEnc_EncodeMatched(CRangeEnc *p, CLzmaProb *probs, UInt32 sym, UInt32 matchByte)
+{
+ UInt32 range = p->range;
+ UInt32 offs = 0x100;
+ sym |= 0x100;
+ do
+ {
+ UInt32 ttt, newBound;
+ CLzmaProb *prob;
+ UInt32 bit;
+ matchByte <<= 1;
+ // RangeEnc_EncodeBit(p, probs + (offs + (matchByte & offs) + (sym >> 8)), (sym >> 7) & 1);
+ prob = probs + (offs + (matchByte & offs) + (sym >> 8));
+ bit = (sym >> 7) & 1;
+ sym <<= 1;
+ offs &= ~(matchByte ^ sym);
+ RC_BIT(p, prob, bit);
+ }
+ while (sym < 0x10000);
+ p->range = range;
+}
+
+
+
+static void LzmaEnc_InitPriceTables(CProbPrice *ProbPrices)
+{
+ UInt32 i;
+ for (i = 0; i < (kBitModelTotal >> kNumMoveReducingBits); i++)
+ {
+ const unsigned kCyclesBits = kNumBitPriceShiftBits;
+ UInt32 w = (i << kNumMoveReducingBits) + (1 << (kNumMoveReducingBits - 1));
+ unsigned bitCount = 0;
+ unsigned j;
+ for (j = 0; j < kCyclesBits; j++)
+ {
+ w = w * w;
+ bitCount <<= 1;
+ while (w >= ((UInt32)1 << 16))
+ {
+ w >>= 1;
+ bitCount++;
+ }
+ }
+ ProbPrices[i] = (CProbPrice)((kNumBitModelTotalBits << kCyclesBits) - 15 - bitCount);
+ // printf("\n%3d: %5d", i, ProbPrices[i]);
+ }
+}
+
+
+#define GET_PRICE(prob, bit) \
+ p->ProbPrices[((prob) ^ (unsigned)(((-(int)(bit))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits];
+
+#define GET_PRICEa(prob, bit) \
+ ProbPrices[((prob) ^ (unsigned)((-((int)(bit))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits];
+
+#define GET_PRICE_0(prob) p->ProbPrices[(prob) >> kNumMoveReducingBits]
+#define GET_PRICE_1(prob) p->ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
+
+#define GET_PRICEa_0(prob) ProbPrices[(prob) >> kNumMoveReducingBits]
+#define GET_PRICEa_1(prob) ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits]
+
+
+static UInt32 LitEnc_GetPrice(const CLzmaProb *probs, UInt32 sym, const CProbPrice *ProbPrices)
+{
+ UInt32 price = 0;
+ sym |= 0x100;
+ do
+ {
+ unsigned bit = sym & 1;
+ sym >>= 1;
+ price += GET_PRICEa(probs[sym], bit);
+ }
+ while (sym >= 2);
+ return price;
+}
+
+
+static UInt32 LitEnc_Matched_GetPrice(const CLzmaProb *probs, UInt32 sym, UInt32 matchByte, const CProbPrice *ProbPrices)
+{
+ UInt32 price = 0;
+ UInt32 offs = 0x100;
+ sym |= 0x100;
+ do
+ {
+ matchByte <<= 1;
+ price += GET_PRICEa(probs[offs + (matchByte & offs) + (sym >> 8)], (sym >> 7) & 1);
+ sym <<= 1;
+ offs &= ~(matchByte ^ sym);
+ }
+ while (sym < 0x10000);
+ return price;
+}
+
+
+static void RcTree_ReverseEncode(CRangeEnc *rc, CLzmaProb *probs, unsigned numBits, unsigned sym)
+{
+ UInt32 range = rc->range;
+ unsigned m = 1;
+ do
+ {
+ UInt32 ttt, newBound;
+ unsigned bit = sym & 1;
+ // RangeEnc_EncodeBit(rc, probs + m, bit);
+ sym >>= 1;
+ RC_BIT(rc, probs + m, bit);
+ m = (m << 1) | bit;
+ }
+ while (--numBits);
+ rc->range = range;
+}
+
+
+
+static void LenEnc_Init(CLenEnc *p)
+{
+ unsigned i;
+ for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << (kLenNumLowBits + 1)); i++)
+ p->low[i] = kProbInitValue;
+ for (i = 0; i < kLenNumHighSymbols; i++)
+ p->high[i] = kProbInitValue;
+}
+
+static void LenEnc_Encode(CLenEnc *p, CRangeEnc *rc, unsigned sym, unsigned posState)
+{
+ UInt32 range, ttt, newBound;
+ CLzmaProb *probs = p->low;
+ range = rc->range;
+ RC_BIT_PRE(rc, probs);
+ if (sym >= kLenNumLowSymbols)
+ {
+ RC_BIT_1(rc, probs);
+ probs += kLenNumLowSymbols;
+ RC_BIT_PRE(rc, probs);
+ if (sym >= kLenNumLowSymbols * 2)
+ {
+ RC_BIT_1(rc, probs);
+ rc->range = range;
+ // RcTree_Encode(rc, p->high, kLenNumHighBits, sym - kLenNumLowSymbols * 2);
+ LitEnc_Encode(rc, p->high, sym - kLenNumLowSymbols * 2);
+ return;
+ }
+ sym -= kLenNumLowSymbols;
+ }
+
+ // RcTree_Encode(rc, probs + (posState << kLenNumLowBits), kLenNumLowBits, sym);
+ {
+ unsigned m;
+ unsigned bit;
+ RC_BIT_0(rc, probs);
+ probs += (posState << (1 + kLenNumLowBits));
+ bit = (sym >> 2) ; RC_BIT(rc, probs + 1, bit); m = (1 << 1) + bit;
+ bit = (sym >> 1) & 1; RC_BIT(rc, probs + m, bit); m = (m << 1) + bit;
+ bit = sym & 1; RC_BIT(rc, probs + m, bit);
+ rc->range = range;
+ }
+}
+
+static void SetPrices_3(const CLzmaProb *probs, UInt32 startPrice, UInt32 *prices, const CProbPrice *ProbPrices)
+{
+ unsigned i;
+ for (i = 0; i < 8; i += 2)
+ {
+ UInt32 price = startPrice;
+ UInt32 prob;
+ price += GET_PRICEa(probs[1 ], (i >> 2));
+ price += GET_PRICEa(probs[2 + (i >> 2)], (i >> 1) & 1);
+ prob = probs[4 + (i >> 1)];
+ prices[i ] = price + GET_PRICEa_0(prob);
+ prices[i + 1] = price + GET_PRICEa_1(prob);
+ }
+}
+
+
+MY_NO_INLINE static void MY_FAST_CALL LenPriceEnc_UpdateTables(
+ CLenPriceEnc *p,
+ unsigned numPosStates,
+ const CLenEnc *enc,
+ const CProbPrice *ProbPrices)
+{
+ UInt32 b;
+
+ {
+ unsigned prob = enc->low[0];
+ UInt32 a, c;
+ unsigned posState;
+ b = GET_PRICEa_1(prob);
+ a = GET_PRICEa_0(prob);
+ c = b + GET_PRICEa_0(enc->low[kLenNumLowSymbols]);
+ for (posState = 0; posState < numPosStates; posState++)
+ {
+ UInt32 *prices = p->prices[posState];
+ const CLzmaProb *probs = enc->low + (posState << (1 + kLenNumLowBits));
+ SetPrices_3(probs, a, prices, ProbPrices);
+ SetPrices_3(probs + kLenNumLowSymbols, c, prices + kLenNumLowSymbols, ProbPrices);
+ }
+ }
+
+ /*
+ {
+ unsigned i;
+ UInt32 b;
+ a = GET_PRICEa_0(enc->low[0]);
+ for (i = 0; i < kLenNumLowSymbols; i++)
+ p->prices2[i] = a;
+ a = GET_PRICEa_1(enc->low[0]);
+ b = a + GET_PRICEa_0(enc->low[kLenNumLowSymbols]);
+ for (i = kLenNumLowSymbols; i < kLenNumLowSymbols * 2; i++)
+ p->prices2[i] = b;
+ a += GET_PRICEa_1(enc->low[kLenNumLowSymbols]);
+ }
+ */
+
+ // p->counter = numSymbols;
+ // p->counter = 64;
+
+ {
+ unsigned i = p->tableSize;
+
+ if (i > kLenNumLowSymbols * 2)
+ {
+ const CLzmaProb *probs = enc->high;
+ UInt32 *prices = p->prices[0] + kLenNumLowSymbols * 2;
+ i -= kLenNumLowSymbols * 2 - 1;
+ i >>= 1;
+ b += GET_PRICEa_1(enc->low[kLenNumLowSymbols]);
+ do
+ {
+ /*
+ p->prices2[i] = a +
+ // RcTree_GetPrice(enc->high, kLenNumHighBits, i - kLenNumLowSymbols * 2, ProbPrices);
+ LitEnc_GetPrice(probs, i - kLenNumLowSymbols * 2, ProbPrices);
+ */
+ // UInt32 price = a + RcTree_GetPrice(probs, kLenNumHighBits - 1, sym, ProbPrices);
+ unsigned sym = --i + (1 << (kLenNumHighBits - 1));
+ UInt32 price = b;
+ do
+ {
+ unsigned bit = sym & 1;
+ sym >>= 1;
+ price += GET_PRICEa(probs[sym], bit);
+ }
+ while (sym >= 2);
+
+ {
+ unsigned prob = probs[(size_t)i + (1 << (kLenNumHighBits - 1))];
+ prices[(size_t)i * 2 ] = price + GET_PRICEa_0(prob);
+ prices[(size_t)i * 2 + 1] = price + GET_PRICEa_1(prob);
+ }
+ }
+ while (i);
+
+ {
+ unsigned posState;
+ size_t num = (p->tableSize - kLenNumLowSymbols * 2) * sizeof(p->prices[0][0]);
+ for (posState = 1; posState < numPosStates; posState++)
+ memcpy(p->prices[posState] + kLenNumLowSymbols * 2, p->prices[0] + kLenNumLowSymbols * 2, num);
+ }
+ }
+ }
+}
+
+/*
+ #ifdef SHOW_STAT
+ g_STAT_OFFSET += num;
+ printf("\n MovePos %u", num);
+ #endif
+*/
+
+#define MOVE_POS(p, num) { \
+ p->additionalOffset += (num); \
+ p->matchFinder.Skip(p->matchFinderObj, (UInt32)(num)); }
+
+
+static unsigned ReadMatchDistances(CLzmaEnc *p, unsigned *numPairsRes)
+{
+ unsigned numPairs;
+
+ p->additionalOffset++;
+ p->numAvail = p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
+ numPairs = p->matchFinder.GetMatches(p->matchFinderObj, p->matches);
+ *numPairsRes = numPairs;
+
+ #ifdef SHOW_STAT
+ printf("\n i = %u numPairs = %u ", g_STAT_OFFSET, numPairs / 2);
+ g_STAT_OFFSET++;
+ {
+ unsigned i;
+ for (i = 0; i < numPairs; i += 2)
+ printf("%2u %6u | ", p->matches[i], p->matches[i + 1]);
+ }
+ #endif
+
+ if (numPairs == 0)
+ return 0;
+ {
+ unsigned len = p->matches[(size_t)numPairs - 2];
+ if (len != p->numFastBytes)
+ return len;
+ {
+ UInt32 numAvail = p->numAvail;
+ if (numAvail > LZMA_MATCH_LEN_MAX)
+ numAvail = LZMA_MATCH_LEN_MAX;
+ {
+ const Byte *p1 = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
+ const Byte *p2 = p1 + len;
+ ptrdiff_t dif = (ptrdiff_t)-1 - p->matches[(size_t)numPairs - 1];
+ const Byte *lim = p1 + numAvail;
+ for (; p2 != lim && *p2 == p2[dif]; p2++)
+ {}
+ return (unsigned)(p2 - p1);
+ }
+ }
+ }
+}
+
+#define MARK_LIT ((UInt32)(Int32)-1)
+
+#define MakeAs_Lit(p) { (p)->dist = MARK_LIT; (p)->extra = 0; }
+#define MakeAs_ShortRep(p) { (p)->dist = 0; (p)->extra = 0; }
+#define IsShortRep(p) ((p)->dist == 0)
+
+
+#define GetPrice_ShortRep(p, state, posState) \
+ ( GET_PRICE_0(p->isRepG0[state]) + GET_PRICE_0(p->isRep0Long[state][posState]))
+
+#define GetPrice_Rep_0(p, state, posState) ( \
+ GET_PRICE_1(p->isMatch[state][posState]) \
+ + GET_PRICE_1(p->isRep0Long[state][posState])) \
+ + GET_PRICE_1(p->isRep[state]) \
+ + GET_PRICE_0(p->isRepG0[state])
+
+MY_FORCE_INLINE
+static UInt32 GetPrice_PureRep(const CLzmaEnc *p, unsigned repIndex, size_t state, size_t posState)
+{
+ UInt32 price;
+ UInt32 prob = p->isRepG0[state];
+ if (repIndex == 0)
+ {
+ price = GET_PRICE_0(prob);
+ price += GET_PRICE_1(p->isRep0Long[state][posState]);
+ }
+ else
+ {
+ price = GET_PRICE_1(prob);
+ prob = p->isRepG1[state];
+ if (repIndex == 1)
+ price += GET_PRICE_0(prob);
+ else
+ {
+ price += GET_PRICE_1(prob);
+ price += GET_PRICE(p->isRepG2[state], repIndex - 2);
+ }
+ }
+ return price;
+}
+
+
+static unsigned Backward(CLzmaEnc *p, unsigned cur)
+{
+ unsigned wr = cur + 1;
+ p->optEnd = wr;
+
+ for (;;)
+ {
+ UInt32 dist = p->opt[cur].dist;
+ unsigned len = (unsigned)p->opt[cur].len;
+ unsigned extra = (unsigned)p->opt[cur].extra;
+ cur -= len;
+
+ if (extra)
+ {
+ wr--;
+ p->opt[wr].len = (UInt32)len;
+ cur -= extra;
+ len = extra;
+ if (extra == 1)
+ {
+ p->opt[wr].dist = dist;
+ dist = MARK_LIT;
+ }
+ else
+ {
+ p->opt[wr].dist = 0;
+ len--;
+ wr--;
+ p->opt[wr].dist = MARK_LIT;
+ p->opt[wr].len = 1;
+ }
+ }
+
+ if (cur == 0)
+ {
+ p->backRes = dist;
+ p->optCur = wr;
+ return len;
+ }
+
+ wr--;
+ p->opt[wr].dist = dist;
+ p->opt[wr].len = (UInt32)len;
+ }
+}
+
+
+
+#define LIT_PROBS(pos, prevByte) \
+ (p->litProbs + (UInt32)3 * (((((pos) << 8) + (prevByte)) & p->lpMask) << p->lc))
+
+
+static unsigned GetOptimum(CLzmaEnc *p, UInt32 position)
+{
+ unsigned last, cur;
+ UInt32 reps[LZMA_NUM_REPS];
+ unsigned repLens[LZMA_NUM_REPS];
+ UInt32 *matches;
+
+ {
+ UInt32 numAvail;
+ unsigned numPairs, mainLen, repMaxIndex, i, posState;
+ UInt32 matchPrice, repMatchPrice;
+ const Byte *data;
+ Byte curByte, matchByte;
+
+ p->optCur = p->optEnd = 0;
+
+ if (p->additionalOffset == 0)
+ mainLen = ReadMatchDistances(p, &numPairs);
+ else
+ {
+ mainLen = p->longestMatchLen;
+ numPairs = p->numPairs;
+ }
+
+ numAvail = p->numAvail;
+ if (numAvail < 2)
+ {
+ p->backRes = MARK_LIT;
+ return 1;
+ }
+ if (numAvail > LZMA_MATCH_LEN_MAX)
+ numAvail = LZMA_MATCH_LEN_MAX;
+
+ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
+ repMaxIndex = 0;
+
+ for (i = 0; i < LZMA_NUM_REPS; i++)
+ {
+ unsigned len;
+ const Byte *data2;
+ reps[i] = p->reps[i];
+ data2 = data - reps[i];
+ if (data[0] != data2[0] || data[1] != data2[1])
+ {
+ repLens[i] = 0;
+ continue;
+ }
+ for (len = 2; len < numAvail && data[len] == data2[len]; len++)
+ {}
+ repLens[i] = len;
+ if (len > repLens[repMaxIndex])
+ repMaxIndex = i;
+ }
+
+ if (repLens[repMaxIndex] >= p->numFastBytes)
+ {
+ unsigned len;
+ p->backRes = (UInt32)repMaxIndex;
+ len = repLens[repMaxIndex];
+ MOVE_POS(p, len - 1)
+ return len;
+ }
+
+ matches = p->matches;
+
+ if (mainLen >= p->numFastBytes)
+ {
+ p->backRes = matches[(size_t)numPairs - 1] + LZMA_NUM_REPS;
+ MOVE_POS(p, mainLen - 1)
+ return mainLen;
+ }
+
+ curByte = *data;
+ matchByte = *(data - reps[0]);
+
+ last = repLens[repMaxIndex];
+ if (last <= mainLen)
+ last = mainLen;
+
+ if (last < 2 && curByte != matchByte)
+ {
+ p->backRes = MARK_LIT;
+ return 1;
+ }
+
+ p->opt[0].state = (CState)p->state;
+
+ posState = (position & p->pbMask);
+
+ {
+ const CLzmaProb *probs = LIT_PROBS(position, *(data - 1));
+ p->opt[1].price = GET_PRICE_0(p->isMatch[p->state][posState]) +
+ (!IsLitState(p->state) ?
+ LitEnc_Matched_GetPrice(probs, curByte, matchByte, p->ProbPrices) :
+ LitEnc_GetPrice(probs, curByte, p->ProbPrices));
+ }
+
+ MakeAs_Lit(&p->opt[1]);
+
+ matchPrice = GET_PRICE_1(p->isMatch[p->state][posState]);
+ repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[p->state]);
+
+ // 18.06
+ if (matchByte == curByte && repLens[0] == 0)
+ {
+ UInt32 shortRepPrice = repMatchPrice + GetPrice_ShortRep(p, p->state, posState);
+ if (shortRepPrice < p->opt[1].price)
+ {
+ p->opt[1].price = shortRepPrice;
+ MakeAs_ShortRep(&p->opt[1]);
+ }
+ if (last < 2)
+ {
+ p->backRes = p->opt[1].dist;
+ return 1;
+ }
+ }
+
+ p->opt[1].len = 1;
+
+ p->opt[0].reps[0] = reps[0];
+ p->opt[0].reps[1] = reps[1];
+ p->opt[0].reps[2] = reps[2];
+ p->opt[0].reps[3] = reps[3];
+
+ // ---------- REP ----------
+
+ for (i = 0; i < LZMA_NUM_REPS; i++)
+ {
+ unsigned repLen = repLens[i];
+ UInt32 price;
+ if (repLen < 2)
+ continue;
+ price = repMatchPrice + GetPrice_PureRep(p, i, p->state, posState);
+ do
+ {
+ UInt32 price2 = price + GET_PRICE_LEN(&p->repLenEnc, posState, repLen);
+ COptimal *opt = &p->opt[repLen];
+ if (price2 < opt->price)
+ {
+ opt->price = price2;
+ opt->len = (UInt32)repLen;
+ opt->dist = (UInt32)i;
+ opt->extra = 0;
+ }
+ }
+ while (--repLen >= 2);
+ }
+
+
+ // ---------- MATCH ----------
+ {
+ unsigned len = repLens[0] + 1;
+ if (len <= mainLen)
+ {
+ unsigned offs = 0;
+ UInt32 normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[p->state]);
+
+ if (len < 2)
+ len = 2;
+ else
+ while (len > matches[offs])
+ offs += 2;
+
+ for (; ; len++)
+ {
+ COptimal *opt;
+ UInt32 dist = matches[(size_t)offs + 1];
+ UInt32 price = normalMatchPrice + GET_PRICE_LEN(&p->lenEnc, posState, len);
+ unsigned lenToPosState = GetLenToPosState(len);
+
+ if (dist < kNumFullDistances)
+ price += p->distancesPrices[lenToPosState][dist & (kNumFullDistances - 1)];
+ else
+ {
+ unsigned slot;
+ GetPosSlot2(dist, slot);
+ price += p->alignPrices[dist & kAlignMask];
+ price += p->posSlotPrices[lenToPosState][slot];
+ }
+
+ opt = &p->opt[len];
+
+ if (price < opt->price)
+ {
+ opt->price = price;
+ opt->len = (UInt32)len;
+ opt->dist = dist + LZMA_NUM_REPS;
+ opt->extra = 0;
+ }
+
+ if (len == matches[offs])
+ {
+ offs += 2;
+ if (offs == numPairs)
+ break;
+ }
+ }
+ }
+ }
+
+
+ cur = 0;
+
+ #ifdef SHOW_STAT2
+ /* if (position >= 0) */
+ {
+ unsigned i;
+ printf("\n pos = %4X", position);
+ for (i = cur; i <= last; i++)
+ printf("\nprice[%4X] = %u", position - cur + i, p->opt[i].price);
+ }
+ #endif
+ }
+
+
+
+ // ---------- Optimal Parsing ----------
+
+ for (;;)
+ {
+ unsigned numAvail;
+ UInt32 numAvailFull;
+ unsigned newLen, numPairs, prev, state, posState, startLen;
+ UInt32 litPrice, matchPrice, repMatchPrice;
+ BoolInt nextIsLit;
+ Byte curByte, matchByte;
+ const Byte *data;
+ COptimal *curOpt, *nextOpt;
+
+ if (++cur == last)
+ break;
+
+ // 18.06
+ if (cur >= kNumOpts - 64)
+ {
+ unsigned j, best;
+ UInt32 price = p->opt[cur].price;
+ best = cur;
+ for (j = cur + 1; j <= last; j++)
+ {
+ UInt32 price2 = p->opt[j].price;
+ if (price >= price2)
+ {
+ price = price2;
+ best = j;
+ }
+ }
+ {
+ unsigned delta = best - cur;
+ if (delta != 0)
+ {
+ MOVE_POS(p, delta);
+ }
+ }
+ cur = best;
+ break;
+ }
+
+ newLen = ReadMatchDistances(p, &numPairs);
+
+ if (newLen >= p->numFastBytes)
+ {
+ p->numPairs = numPairs;
+ p->longestMatchLen = newLen;
+ break;
+ }
+
+ curOpt = &p->opt[cur];
+
+ position++;
+
+ // we need that check here, if skip_items in p->opt are possible
+ /*
+ if (curOpt->price >= kInfinityPrice)
+ continue;
+ */
+
+ prev = cur - curOpt->len;
+
+ if (curOpt->len == 1)
+ {
+ state = (unsigned)p->opt[prev].state;
+ if (IsShortRep(curOpt))
+ state = kShortRepNextStates[state];
+ else
+ state = kLiteralNextStates[state];
+ }
+ else
+ {
+ const COptimal *prevOpt;
+ UInt32 b0;
+ UInt32 dist = curOpt->dist;
+
+ if (curOpt->extra)
+ {
+ prev -= (unsigned)curOpt->extra;
+ state = kState_RepAfterLit;
+ if (curOpt->extra == 1)
+ state = (dist < LZMA_NUM_REPS ? kState_RepAfterLit : kState_MatchAfterLit);
+ }
+ else
+ {
+ state = (unsigned)p->opt[prev].state;
+ if (dist < LZMA_NUM_REPS)
+ state = kRepNextStates[state];
+ else
+ state = kMatchNextStates[state];
+ }
+
+ prevOpt = &p->opt[prev];
+ b0 = prevOpt->reps[0];
+
+ if (dist < LZMA_NUM_REPS)
+ {
+ if (dist == 0)
+ {
+ reps[0] = b0;
+ reps[1] = prevOpt->reps[1];
+ reps[2] = prevOpt->reps[2];
+ reps[3] = prevOpt->reps[3];
+ }
+ else
+ {
+ reps[1] = b0;
+ b0 = prevOpt->reps[1];
+ if (dist == 1)
+ {
+ reps[0] = b0;
+ reps[2] = prevOpt->reps[2];
+ reps[3] = prevOpt->reps[3];
+ }
+ else
+ {
+ reps[2] = b0;
+ reps[0] = prevOpt->reps[dist];
+ reps[3] = prevOpt->reps[dist ^ 1];
+ }
+ }
+ }
+ else
+ {
+ reps[0] = (dist - LZMA_NUM_REPS + 1);
+ reps[1] = b0;
+ reps[2] = prevOpt->reps[1];
+ reps[3] = prevOpt->reps[2];
+ }
+ }
+
+ curOpt->state = (CState)state;
+ curOpt->reps[0] = reps[0];
+ curOpt->reps[1] = reps[1];
+ curOpt->reps[2] = reps[2];
+ curOpt->reps[3] = reps[3];
+
+ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
+ curByte = *data;
+ matchByte = *(data - reps[0]);
+
+ posState = (position & p->pbMask);
+
+ /*
+ The order of Price checks:
+ < LIT
+ <= SHORT_REP
+ < LIT : REP_0
+ < REP [ : LIT : REP_0 ]
+ < MATCH [ : LIT : REP_0 ]
+ */
+
+ {
+ UInt32 curPrice = curOpt->price;
+ unsigned prob = p->isMatch[state][posState];
+ matchPrice = curPrice + GET_PRICE_1(prob);
+ litPrice = curPrice + GET_PRICE_0(prob);
+ }
+
+ nextOpt = &p->opt[(size_t)cur + 1];
+ nextIsLit = False;
+
+ // here we can allow skip_items in p->opt, if we don't check (nextOpt->price < kInfinityPrice)
+ // 18.new.06
+ if ((nextOpt->price < kInfinityPrice
+ // && !IsLitState(state)
+ && matchByte == curByte)
+ || litPrice > nextOpt->price
+ )
+ litPrice = 0;
+ else
+ {
+ const CLzmaProb *probs = LIT_PROBS(position, *(data - 1));
+ litPrice += (!IsLitState(state) ?
+ LitEnc_Matched_GetPrice(probs, curByte, matchByte, p->ProbPrices) :
+ LitEnc_GetPrice(probs, curByte, p->ProbPrices));
+
+ if (litPrice < nextOpt->price)
+ {
+ nextOpt->price = litPrice;
+ nextOpt->len = 1;
+ MakeAs_Lit(nextOpt);
+ nextIsLit = True;
+ }
+ }
+
+ repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[state]);
+
+ numAvailFull = p->numAvail;
+ {
+ unsigned temp = kNumOpts - 1 - cur;
+ if (numAvailFull > temp)
+ numAvailFull = (UInt32)temp;
+ }
+
+ // 18.06
+ // ---------- SHORT_REP ----------
+ if (IsLitState(state)) // 18.new
+ if (matchByte == curByte)
+ if (repMatchPrice < nextOpt->price) // 18.new
+ // if (numAvailFull < 2 || data[1] != *(data - reps[0] + 1))
+ if (
+ // nextOpt->price >= kInfinityPrice ||
+ nextOpt->len < 2 // we can check nextOpt->len, if skip items are not allowed in p->opt
+ || (nextOpt->dist != 0
+ // && nextOpt->extra <= 1 // 17.old
+ )
+ )
+ {
+ UInt32 shortRepPrice = repMatchPrice + GetPrice_ShortRep(p, state, posState);
+ // if (shortRepPrice <= nextOpt->price) // 17.old
+ if (shortRepPrice < nextOpt->price) // 18.new
+ {
+ nextOpt->price = shortRepPrice;
+ nextOpt->len = 1;
+ MakeAs_ShortRep(nextOpt);
+ nextIsLit = False;
+ }
+ }
+
+ if (numAvailFull < 2)
+ continue;
+ numAvail = (numAvailFull <= p->numFastBytes ? numAvailFull : p->numFastBytes);
+
+ // numAvail <= p->numFastBytes
+
+ // ---------- LIT : REP_0 ----------
+
+ if (!nextIsLit
+ && litPrice != 0 // 18.new
+ && matchByte != curByte
+ && numAvailFull > 2)
+ {
+ const Byte *data2 = data - reps[0];
+ if (data[1] == data2[1] && data[2] == data2[2])
+ {
+ unsigned len;
+ unsigned limit = p->numFastBytes + 1;
+ if (limit > numAvailFull)
+ limit = numAvailFull;
+ for (len = 3; len < limit && data[len] == data2[len]; len++)
+ {}
+
+ {
+ unsigned state2 = kLiteralNextStates[state];
+ unsigned posState2 = (position + 1) & p->pbMask;
+ UInt32 price = litPrice + GetPrice_Rep_0(p, state2, posState2);
+ {
+ unsigned offset = cur + len;
+
+ if (last < offset)
+ last = offset;
+
+ // do
+ {
+ UInt32 price2;
+ COptimal *opt;
+ len--;
+ // price2 = price + GetPrice_Len_Rep_0(p, len, state2, posState2);
+ price2 = price + GET_PRICE_LEN(&p->repLenEnc, posState2, len);
+
+ opt = &p->opt[offset];
+ // offset--;
+ if (price2 < opt->price)
+ {
+ opt->price = price2;
+ opt->len = (UInt32)len;
+ opt->dist = 0;
+ opt->extra = 1;
+ }
+ }
+ // while (len >= 3);
+ }
+ }
+ }
+ }
+
+ startLen = 2; /* speed optimization */
+
+ {
+ // ---------- REP ----------
+ unsigned repIndex = 0; // 17.old
+ // unsigned repIndex = IsLitState(state) ? 0 : 1; // 18.notused
+ for (; repIndex < LZMA_NUM_REPS; repIndex++)
+ {
+ unsigned len;
+ UInt32 price;
+ const Byte *data2 = data - reps[repIndex];
+ if (data[0] != data2[0] || data[1] != data2[1])
+ continue;
+
+ for (len = 2; len < numAvail && data[len] == data2[len]; len++)
+ {}
+
+ // if (len < startLen) continue; // 18.new: speed optimization
+
+ {
+ unsigned offset = cur + len;
+ if (last < offset)
+ last = offset;
+ }
+ {
+ unsigned len2 = len;
+ price = repMatchPrice + GetPrice_PureRep(p, repIndex, state, posState);
+ do
+ {
+ UInt32 price2 = price + GET_PRICE_LEN(&p->repLenEnc, posState, len2);
+ COptimal *opt = &p->opt[cur + len2];
+ if (price2 < opt->price)
+ {
+ opt->price = price2;
+ opt->len = (UInt32)len2;
+ opt->dist = (UInt32)repIndex;
+ opt->extra = 0;
+ }
+ }
+ while (--len2 >= 2);
+ }
+
+ if (repIndex == 0) startLen = len + 1; // 17.old
+ // startLen = len + 1; // 18.new
+
+ /* if (_maxMode) */
+ {
+ // ---------- REP : LIT : REP_0 ----------
+ // numFastBytes + 1 + numFastBytes
+
+ unsigned len2 = len + 1;
+ unsigned limit = len2 + p->numFastBytes;
+ if (limit > numAvailFull)
+ limit = numAvailFull;
+
+ len2 += 2;
+ if (len2 <= limit)
+ if (data[len2 - 2] == data2[len2 - 2])
+ if (data[len2 - 1] == data2[len2 - 1])
+ {
+ unsigned state2 = kRepNextStates[state];
+ unsigned posState2 = (position + len) & p->pbMask;
+ price += GET_PRICE_LEN(&p->repLenEnc, posState, len)
+ + GET_PRICE_0(p->isMatch[state2][posState2])
+ + LitEnc_Matched_GetPrice(LIT_PROBS(position + len, data[(size_t)len - 1]),
+ data[len], data2[len], p->ProbPrices);
+
+ // state2 = kLiteralNextStates[state2];
+ state2 = kState_LitAfterRep;
+ posState2 = (posState2 + 1) & p->pbMask;
+
+
+ price += GetPrice_Rep_0(p, state2, posState2);
+
+ for (; len2 < limit && data[len2] == data2[len2]; len2++)
+ {}
+
+ len2 -= len;
+ // if (len2 >= 3)
+ {
+ {
+ unsigned offset = cur + len + len2;
+
+ if (last < offset)
+ last = offset;
+ // do
+ {
+ UInt32 price2;
+ COptimal *opt;
+ len2--;
+ // price2 = price + GetPrice_Len_Rep_0(p, len2, state2, posState2);
+ price2 = price + GET_PRICE_LEN(&p->repLenEnc, posState2, len2);
+
+ opt = &p->opt[offset];
+ // offset--;
+ if (price2 < opt->price)
+ {
+ opt->price = price2;
+ opt->len = (UInt32)len2;
+ opt->extra = (CExtra)(len + 1);
+ opt->dist = (UInt32)repIndex;
+ }
+ }
+ // while (len2 >= 3);
+ }
+ }
+ }
+ }
+ }
+ }
+
+
+ // ---------- MATCH ----------
+ /* for (unsigned len = 2; len <= newLen; len++) */
+ if (newLen > numAvail)
+ {
+ newLen = numAvail;
+ for (numPairs = 0; newLen > matches[numPairs]; numPairs += 2);
+ matches[numPairs] = (UInt32)newLen;
+ numPairs += 2;
+ }
+
+ // startLen = 2; /* speed optimization */
+
+ if (newLen >= startLen)
+ {
+ UInt32 normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[state]);
+ UInt32 dist;
+ unsigned offs, posSlot, len;
+
+ {
+ unsigned offset = cur + newLen;
+ if (last < offset)
+ last = offset;
+ }
+
+ offs = 0;
+ while (startLen > matches[offs])
+ offs += 2;
+ dist = matches[(size_t)offs + 1];
+
+ // if (dist >= kNumFullDistances)
+ GetPosSlot2(dist, posSlot);
+
+ for (len = /*2*/ startLen; ; len++)
+ {
+ UInt32 price = normalMatchPrice + GET_PRICE_LEN(&p->lenEnc, posState, len);
+ {
+ COptimal *opt;
+ unsigned lenNorm = len - 2;
+ lenNorm = GetLenToPosState2(lenNorm);
+ if (dist < kNumFullDistances)
+ price += p->distancesPrices[lenNorm][dist & (kNumFullDistances - 1)];
+ else
+ price += p->posSlotPrices[lenNorm][posSlot] + p->alignPrices[dist & kAlignMask];
+
+ opt = &p->opt[cur + len];
+ if (price < opt->price)
+ {
+ opt->price = price;
+ opt->len = (UInt32)len;
+ opt->dist = dist + LZMA_NUM_REPS;
+ opt->extra = 0;
+ }
+ }
+
+ if (len == matches[offs])
+ {
+ // if (p->_maxMode) {
+ // MATCH : LIT : REP_0
+
+ const Byte *data2 = data - dist - 1;
+ unsigned len2 = len + 1;
+ unsigned limit = len2 + p->numFastBytes;
+ if (limit > numAvailFull)
+ limit = numAvailFull;
+
+ len2 += 2;
+ if (len2 <= limit)
+ if (data[len2 - 2] == data2[len2 - 2])
+ if (data[len2 - 1] == data2[len2 - 1])
+ {
+ for (; len2 < limit && data[len2] == data2[len2]; len2++)
+ {}
+
+ len2 -= len;
+
+ // if (len2 >= 3)
+ {
+ unsigned state2 = kMatchNextStates[state];
+ unsigned posState2 = (position + len) & p->pbMask;
+ unsigned offset;
+ price += GET_PRICE_0(p->isMatch[state2][posState2]);
+ price += LitEnc_Matched_GetPrice(LIT_PROBS(position + len, data[(size_t)len - 1]),
+ data[len], data2[len], p->ProbPrices);
+
+ // state2 = kLiteralNextStates[state2];
+ state2 = kState_LitAfterMatch;
+
+ posState2 = (posState2 + 1) & p->pbMask;
+ price += GetPrice_Rep_0(p, state2, posState2);
+
+ offset = cur + len + len2;
+
+ if (last < offset)
+ last = offset;
+ // do
+ {
+ UInt32 price2;
+ COptimal *opt;
+ len2--;
+ // price2 = price + GetPrice_Len_Rep_0(p, len2, state2, posState2);
+ price2 = price + GET_PRICE_LEN(&p->repLenEnc, posState2, len2);
+ opt = &p->opt[offset];
+ // offset--;
+ if (price2 < opt->price)
+ {
+ opt->price = price2;
+ opt->len = (UInt32)len2;
+ opt->extra = (CExtra)(len + 1);
+ opt->dist = dist + LZMA_NUM_REPS;
+ }
+ }
+ // while (len2 >= 3);
+ }
+
+ }
+
+ offs += 2;
+ if (offs == numPairs)
+ break;
+ dist = matches[(size_t)offs + 1];
+ // if (dist >= kNumFullDistances)
+ GetPosSlot2(dist, posSlot);
+ }
+ }
+ }
+ }
+
+ do
+ p->opt[last].price = kInfinityPrice;
+ while (--last);
+
+ return Backward(p, cur);
+}
+
+
+
+#define ChangePair(smallDist, bigDist) (((bigDist) >> 7) > (smallDist))
+
+
+
+static unsigned GetOptimumFast(CLzmaEnc *p)
+{
+ UInt32 numAvail, mainDist;
+ unsigned mainLen, numPairs, repIndex, repLen, i;
+ const Byte *data;
+
+ if (p->additionalOffset == 0)
+ mainLen = ReadMatchDistances(p, &numPairs);
+ else
+ {
+ mainLen = p->longestMatchLen;
+ numPairs = p->numPairs;
+ }
+
+ numAvail = p->numAvail;
+ p->backRes = MARK_LIT;
+ if (numAvail < 2)
+ return 1;
+ // if (mainLen < 2 && p->state == 0) return 1; // 18.06.notused
+ if (numAvail > LZMA_MATCH_LEN_MAX)
+ numAvail = LZMA_MATCH_LEN_MAX;
+ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
+ repLen = repIndex = 0;
+
+ for (i = 0; i < LZMA_NUM_REPS; i++)
+ {
+ unsigned len;
+ const Byte *data2 = data - p->reps[i];
+ if (data[0] != data2[0] || data[1] != data2[1])
+ continue;
+ for (len = 2; len < numAvail && data[len] == data2[len]; len++)
+ {}
+ if (len >= p->numFastBytes)
+ {
+ p->backRes = (UInt32)i;
+ MOVE_POS(p, len - 1)
+ return len;
+ }
+ if (len > repLen)
+ {
+ repIndex = i;
+ repLen = len;
+ }
+ }
+
+ if (mainLen >= p->numFastBytes)
+ {
+ p->backRes = p->matches[(size_t)numPairs - 1] + LZMA_NUM_REPS;
+ MOVE_POS(p, mainLen - 1)
+ return mainLen;
+ }
+
+ mainDist = 0; /* for GCC */
+
+ if (mainLen >= 2)
+ {
+ mainDist = p->matches[(size_t)numPairs - 1];
+ while (numPairs > 2)
+ {
+ UInt32 dist2;
+ if (mainLen != p->matches[(size_t)numPairs - 4] + 1)
+ break;
+ dist2 = p->matches[(size_t)numPairs - 3];
+ if (!ChangePair(dist2, mainDist))
+ break;
+ numPairs -= 2;
+ mainLen--;
+ mainDist = dist2;
+ }
+ if (mainLen == 2 && mainDist >= 0x80)
+ mainLen = 1;
+ }
+
+ if (repLen >= 2)
+ if ( repLen + 1 >= mainLen
+ || (repLen + 2 >= mainLen && mainDist >= (1 << 9))
+ || (repLen + 3 >= mainLen && mainDist >= (1 << 15)))
+ {
+ p->backRes = (UInt32)repIndex;
+ MOVE_POS(p, repLen - 1)
+ return repLen;
+ }
+
+ if (mainLen < 2 || numAvail <= 2)
+ return 1;
+
+ {
+ unsigned len1 = ReadMatchDistances(p, &p->numPairs);
+ p->longestMatchLen = len1;
+
+ if (len1 >= 2)
+ {
+ UInt32 newDist = p->matches[(size_t)p->numPairs - 1];
+ if ( (len1 >= mainLen && newDist < mainDist)
+ || (len1 == mainLen + 1 && !ChangePair(mainDist, newDist))
+ || (len1 > mainLen + 1)
+ || (len1 + 1 >= mainLen && mainLen >= 3 && ChangePair(newDist, mainDist)))
+ return 1;
+ }
+ }
+
+ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1;
+
+ for (i = 0; i < LZMA_NUM_REPS; i++)
+ {
+ unsigned len, limit;
+ const Byte *data2 = data - p->reps[i];
+ if (data[0] != data2[0] || data[1] != data2[1])
+ continue;
+ limit = mainLen - 1;
+ for (len = 2;; len++)
+ {
+ if (len >= limit)
+ return 1;
+ if (data[len] != data2[len])
+ break;
+ }
+ }
+
+ p->backRes = mainDist + LZMA_NUM_REPS;
+ if (mainLen != 2)
+ {
+ MOVE_POS(p, mainLen - 2)
+ }
+ return mainLen;
+}
+
+
+
+
+static void WriteEndMarker(CLzmaEnc *p, unsigned posState)
+{
+ UInt32 range;
+ range = p->rc.range;
+ {
+ UInt32 ttt, newBound;
+ CLzmaProb *prob = &p->isMatch[p->state][posState];
+ RC_BIT_PRE(&p->rc, prob)
+ RC_BIT_1(&p->rc, prob)
+ prob = &p->isRep[p->state];
+ RC_BIT_PRE(&p->rc, prob)
+ RC_BIT_0(&p->rc, prob)
+ }
+ p->state = kMatchNextStates[p->state];
+
+ p->rc.range = range;
+ LenEnc_Encode(&p->lenProbs, &p->rc, 0, posState);
+ range = p->rc.range;
+
+ {
+ // RcTree_Encode_PosSlot(&p->rc, p->posSlotEncoder[0], (1 << kNumPosSlotBits) - 1);
+ CLzmaProb *probs = p->posSlotEncoder[0];
+ unsigned m = 1;
+ do
+ {
+ UInt32 ttt, newBound;
+ RC_BIT_PRE(p, probs + m)
+ RC_BIT_1(&p->rc, probs + m);
+ m = (m << 1) + 1;
+ }
+ while (m < (1 << kNumPosSlotBits));
+ }
+ {
+ // RangeEnc_EncodeDirectBits(&p->rc, ((UInt32)1 << (30 - kNumAlignBits)) - 1, 30 - kNumAlignBits); UInt32 range = p->range;
+ unsigned numBits = 30 - kNumAlignBits;
+ do
+ {
+ range >>= 1;
+ p->rc.low += range;
+ RC_NORM(&p->rc)
+ }
+ while (--numBits);
+ }
+
+ {
+ // RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, kAlignMask);
+ CLzmaProb *probs = p->posAlignEncoder;
+ unsigned m = 1;
+ do
+ {
+ UInt32 ttt, newBound;
+ RC_BIT_PRE(p, probs + m)
+ RC_BIT_1(&p->rc, probs + m);
+ m = (m << 1) + 1;
+ }
+ while (m < kAlignTableSize);
+ }
+ p->rc.range = range;
+}
+
+
+static SRes CheckErrors(CLzmaEnc *p)
+{
+ if (p->result != SZ_OK)
+ return p->result;
+ if (p->rc.res != SZ_OK)
+ p->result = SZ_ERROR_WRITE;
+ if (p->matchFinderBase.result != SZ_OK)
+ p->result = SZ_ERROR_READ;
+ if (p->result != SZ_OK)
+ p->finished = True;
+ return p->result;
+}
+
+
+MY_NO_INLINE static SRes Flush(CLzmaEnc *p, UInt32 nowPos)
+{
+ /* ReleaseMFStream(); */
+ p->finished = True;
+ if (p->writeEndMark)
+ WriteEndMarker(p, nowPos & p->pbMask);
+ RangeEnc_FlushData(&p->rc);
+ RangeEnc_FlushStream(&p->rc);
+ return CheckErrors(p);
+}
+
+
+MY_NO_INLINE static void FillAlignPrices(CLzmaEnc *p)
+{
+ unsigned i;
+ const CProbPrice *ProbPrices = p->ProbPrices;
+ const CLzmaProb *probs = p->posAlignEncoder;
+ // p->alignPriceCount = 0;
+ for (i = 0; i < kAlignTableSize / 2; i++)
+ {
+ UInt32 price = 0;
+ unsigned sym = i;
+ unsigned m = 1;
+ unsigned bit;
+ UInt32 prob;
+ bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[m], bit); m = (m << 1) + bit;
+ bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[m], bit); m = (m << 1) + bit;
+ bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[m], bit); m = (m << 1) + bit;
+ prob = probs[m];
+ p->alignPrices[i ] = price + GET_PRICEa_0(prob);
+ p->alignPrices[i + 8] = price + GET_PRICEa_1(prob);
+ // p->alignPrices[i] = RcTree_ReverseGetPrice(p->posAlignEncoder, kNumAlignBits, i, p->ProbPrices);
+ }
+}
+
+
+MY_NO_INLINE static void FillDistancesPrices(CLzmaEnc *p)
+{
+ // int y; for (y = 0; y < 100; y++) {
+
+ UInt32 tempPrices[kNumFullDistances];
+ unsigned i, lps;
+
+ const CProbPrice *ProbPrices = p->ProbPrices;
+ p->matchPriceCount = 0;
+
+ for (i = kStartPosModelIndex / 2; i < kNumFullDistances / 2; i++)
+ {
+ unsigned posSlot = GetPosSlot1(i);
+ unsigned footerBits = (posSlot >> 1) - 1;
+ unsigned base = ((2 | (posSlot & 1)) << footerBits);
+ const CLzmaProb *probs = p->posEncoders + (size_t)base * 2;
+ // tempPrices[i] = RcTree_ReverseGetPrice(p->posEncoders + base, footerBits, i - base, p->ProbPrices);
+ UInt32 price = 0;
+ unsigned m = 1;
+ unsigned sym = i;
+ unsigned offset = (unsigned)1 << footerBits;
+ base += i;
+
+ if (footerBits)
+ do
+ {
+ unsigned bit = sym & 1;
+ sym >>= 1;
+ price += GET_PRICEa(probs[m], bit);
+ m = (m << 1) + bit;
+ }
+ while (--footerBits);
+
+ {
+ unsigned prob = probs[m];
+ tempPrices[base ] = price + GET_PRICEa_0(prob);
+ tempPrices[base + offset] = price + GET_PRICEa_1(prob);
+ }
+ }
+
+ for (lps = 0; lps < kNumLenToPosStates; lps++)
+ {
+ unsigned slot;
+ unsigned distTableSize2 = (p->distTableSize + 1) >> 1;
+ UInt32 *posSlotPrices = p->posSlotPrices[lps];
+ const CLzmaProb *probs = p->posSlotEncoder[lps];
+
+ for (slot = 0; slot < distTableSize2; slot++)
+ {
+ // posSlotPrices[slot] = RcTree_GetPrice(encoder, kNumPosSlotBits, slot, p->ProbPrices);
+ UInt32 price;
+ unsigned bit;
+ unsigned sym = slot + (1 << (kNumPosSlotBits - 1));
+ unsigned prob;
+ bit = sym & 1; sym >>= 1; price = GET_PRICEa(probs[sym], bit);
+ bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[sym], bit);
+ bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[sym], bit);
+ bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[sym], bit);
+ bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[sym], bit);
+ prob = probs[(size_t)slot + (1 << (kNumPosSlotBits - 1))];
+ posSlotPrices[(size_t)slot * 2 ] = price + GET_PRICEa_0(prob);
+ posSlotPrices[(size_t)slot * 2 + 1] = price + GET_PRICEa_1(prob);
+ }
+
+ {
+ UInt32 delta = ((UInt32)((kEndPosModelIndex / 2 - 1) - kNumAlignBits) << kNumBitPriceShiftBits);
+ for (slot = kEndPosModelIndex / 2; slot < distTableSize2; slot++)
+ {
+ posSlotPrices[(size_t)slot * 2 ] += delta;
+ posSlotPrices[(size_t)slot * 2 + 1] += delta;
+ delta += ((UInt32)1 << kNumBitPriceShiftBits);
+ }
+ }
+
+ {
+ UInt32 *dp = p->distancesPrices[lps];
+
+ dp[0] = posSlotPrices[0];
+ dp[1] = posSlotPrices[1];
+ dp[2] = posSlotPrices[2];
+ dp[3] = posSlotPrices[3];
+
+ for (i = 4; i < kNumFullDistances; i += 2)
+ {
+ UInt32 slotPrice = posSlotPrices[GetPosSlot1(i)];
+ dp[i ] = slotPrice + tempPrices[i];
+ dp[i + 1] = slotPrice + tempPrices[i + 1];
+ }
+ }
+ }
+ // }
+}
+
+
+
+void LzmaEnc_Construct(CLzmaEnc *p)
+{
+ RangeEnc_Construct(&p->rc);
+ MatchFinder_Construct(&p->matchFinderBase);
+
+ #ifndef _7ZIP_ST
+ MatchFinderMt_Construct(&p->matchFinderMt);
+ p->matchFinderMt.MatchFinder = &p->matchFinderBase;
+ #endif
+
+ {
+ CLzmaEncProps props;
+ LzmaEncProps_Init(&props);
+ LzmaEnc_SetProps(p, &props);
+ }
+
+ #ifndef LZMA_LOG_BSR
+ LzmaEnc_FastPosInit(p->g_FastPos);
+ #endif
+
+ LzmaEnc_InitPriceTables(p->ProbPrices);
+ p->litProbs = NULL;
+ p->saveState.litProbs = NULL;
+
+}
+
+CLzmaEncHandle LzmaEnc_Create(ISzAllocPtr alloc)
+{
+ void *p;
+ p = ISzAlloc_Alloc(alloc, sizeof(CLzmaEnc));
+ if (p)
+ LzmaEnc_Construct((CLzmaEnc *)p);
+ return p;
+}
+
+void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAllocPtr alloc)
+{
+ ISzAlloc_Free(alloc, p->litProbs);
+ ISzAlloc_Free(alloc, p->saveState.litProbs);
+ p->litProbs = NULL;
+ p->saveState.litProbs = NULL;
+}
+
+void LzmaEnc_Destruct(CLzmaEnc *p, ISzAllocPtr alloc, ISzAllocPtr allocBig)
+{
+ #ifndef _7ZIP_ST
+ MatchFinderMt_Destruct(&p->matchFinderMt, allocBig);
+ #endif
+
+ MatchFinder_Free(&p->matchFinderBase, allocBig);
+ LzmaEnc_FreeLits(p, alloc);
+ RangeEnc_Free(&p->rc, alloc);
+}
+
+void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAllocPtr alloc, ISzAllocPtr allocBig)
+{
+ LzmaEnc_Destruct((CLzmaEnc *)p, alloc, allocBig);
+ ISzAlloc_Free(alloc, p);
+}
+
+
+SRes LzmaEnc_CodeOneBlock(CLzmaEncHandle pp, UInt32 maxPackSize, UInt32 maxUnpackSize)
+{
+ CLzmaEnc *p = (CLzmaEnc *) pp;
+ UInt32 nowPos32, startPos32;
+ if (p->needInit)
+ {
+ p->matchFinder.Init(p->matchFinderObj);
+ p->needInit = 0;
+ }
+
+ if (p->finished)
+ return p->result;
+ RINOK(CheckErrors(p));
+
+ nowPos32 = (UInt32)p->nowPos64;
+ startPos32 = nowPos32;
+
+ if (p->nowPos64 == 0)
+ {
+ unsigned numPairs;
+ Byte curByte;
+ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0)
+ return Flush(p, nowPos32);
+ ReadMatchDistances(p, &numPairs);
+ RangeEnc_EncodeBit_0(&p->rc, &p->isMatch[kState_Start][0]);
+ // p->state = kLiteralNextStates[p->state];
+ curByte = *(p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset);
+ LitEnc_Encode(&p->rc, p->litProbs, curByte);
+ p->additionalOffset--;
+ nowPos32++;
+ }
+
+ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) != 0)
+
+ for (;;)
+ {
+ UInt32 dist;
+ unsigned len, posState;
+ UInt32 range, ttt, newBound;
+ CLzmaProb *probs;
+
+ if (p->fastMode)
+ len = GetOptimumFast(p);
+ else
+ {
+ unsigned oci = p->optCur;
+ if (p->optEnd == oci)
+ len = GetOptimum(p, nowPos32);
+ else
+ {
+ const COptimal *opt = &p->opt[oci];
+ len = opt->len;
+ p->backRes = opt->dist;
+ p->optCur = oci + 1;
+ }
+ }
+
+ posState = (unsigned)nowPos32 & p->pbMask;
+ range = p->rc.range;
+ probs = &p->isMatch[p->state][posState];
+
+ RC_BIT_PRE(&p->rc, probs)
+
+ dist = p->backRes;
+
+ #ifdef SHOW_STAT2
+ printf("\n pos = %6X, len = %3u pos = %6u", nowPos32, len, dist);
+ #endif
+
+ if (dist == MARK_LIT)
+ {
+ Byte curByte;
+ const Byte *data;
+ unsigned state;
+
+ RC_BIT_0(&p->rc, probs);
+ p->rc.range = range;
+ data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
+ probs = LIT_PROBS(nowPos32, *(data - 1));
+ curByte = *data;
+ state = p->state;
+ p->state = kLiteralNextStates[state];
+ if (IsLitState(state))
+ LitEnc_Encode(&p->rc, probs, curByte);
+ else
+ LitEnc_EncodeMatched(&p->rc, probs, curByte, *(data - p->reps[0]));
+ }
+ else
+ {
+ RC_BIT_1(&p->rc, probs);
+ probs = &p->isRep[p->state];
+ RC_BIT_PRE(&p->rc, probs)
+
+ if (dist < LZMA_NUM_REPS)
+ {
+ RC_BIT_1(&p->rc, probs);
+ probs = &p->isRepG0[p->state];
+ RC_BIT_PRE(&p->rc, probs)
+ if (dist == 0)
+ {
+ RC_BIT_0(&p->rc, probs);
+ probs = &p->isRep0Long[p->state][posState];
+ RC_BIT_PRE(&p->rc, probs)
+ if (len != 1)
+ {
+ RC_BIT_1_BASE(&p->rc, probs);
+ }
+ else
+ {
+ RC_BIT_0_BASE(&p->rc, probs);
+ p->state = kShortRepNextStates[p->state];
+ }
+ }
+ else
+ {
+ RC_BIT_1(&p->rc, probs);
+ probs = &p->isRepG1[p->state];
+ RC_BIT_PRE(&p->rc, probs)
+ if (dist == 1)
+ {
+ RC_BIT_0_BASE(&p->rc, probs);
+ dist = p->reps[1];
+ }
+ else
+ {
+ RC_BIT_1(&p->rc, probs);
+ probs = &p->isRepG2[p->state];
+ RC_BIT_PRE(&p->rc, probs)
+ if (dist == 2)
+ {
+ RC_BIT_0_BASE(&p->rc, probs);
+ dist = p->reps[2];
+ }
+ else
+ {
+ RC_BIT_1_BASE(&p->rc, probs);
+ dist = p->reps[3];
+ p->reps[3] = p->reps[2];
+ }
+ p->reps[2] = p->reps[1];
+ }
+ p->reps[1] = p->reps[0];
+ p->reps[0] = dist;
+ }
+
+ RC_NORM(&p->rc)
+
+ p->rc.range = range;
+
+ if (len != 1)
+ {
+ LenEnc_Encode(&p->repLenProbs, &p->rc, len - LZMA_MATCH_LEN_MIN, posState);
+ --p->repLenEncCounter;
+ p->state = kRepNextStates[p->state];
+ }
+ }
+ else
+ {
+ unsigned posSlot;
+ RC_BIT_0(&p->rc, probs);
+ p->rc.range = range;
+ p->state = kMatchNextStates[p->state];
+
+ LenEnc_Encode(&p->lenProbs, &p->rc, len - LZMA_MATCH_LEN_MIN, posState);
+ // --p->lenEnc.counter;
+
+ dist -= LZMA_NUM_REPS;
+ p->reps[3] = p->reps[2];
+ p->reps[2] = p->reps[1];
+ p->reps[1] = p->reps[0];
+ p->reps[0] = dist + 1;
+
+ p->matchPriceCount++;
+ GetPosSlot(dist, posSlot);
+ // RcTree_Encode_PosSlot(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], posSlot);
+ {
+ UInt32 sym = (UInt32)posSlot + (1 << kNumPosSlotBits);
+ range = p->rc.range;
+ probs = p->posSlotEncoder[GetLenToPosState(len)];
+ do
+ {
+ CLzmaProb *prob = probs + (sym >> kNumPosSlotBits);
+ UInt32 bit = (sym >> (kNumPosSlotBits - 1)) & 1;
+ sym <<= 1;
+ RC_BIT(&p->rc, prob, bit);
+ }
+ while (sym < (1 << kNumPosSlotBits * 2));
+ p->rc.range = range;
+ }
+
+ if (dist >= kStartPosModelIndex)
+ {
+ unsigned footerBits = ((posSlot >> 1) - 1);
+
+ if (dist < kNumFullDistances)
+ {
+ unsigned base = ((2 | (posSlot & 1)) << footerBits);
+ RcTree_ReverseEncode(&p->rc, p->posEncoders + base, footerBits, (unsigned)(dist /* - base */));
+ }
+ else
+ {
+ UInt32 pos2 = (dist | 0xF) << (32 - footerBits);
+ range = p->rc.range;
+ // RangeEnc_EncodeDirectBits(&p->rc, posReduced >> kNumAlignBits, footerBits - kNumAlignBits);
+ /*
+ do
+ {
+ range >>= 1;
+ p->rc.low += range & (0 - ((dist >> --footerBits) & 1));
+ RC_NORM(&p->rc)
+ }
+ while (footerBits > kNumAlignBits);
+ */
+ do
+ {
+ range >>= 1;
+ p->rc.low += range & (0 - (pos2 >> 31));
+ pos2 += pos2;
+ RC_NORM(&p->rc)
+ }
+ while (pos2 != 0xF0000000);
+
+
+ // RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, posReduced & kAlignMask);
+
+ {
+ unsigned m = 1;
+ unsigned bit;
+ bit = dist & 1; dist >>= 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit); m = (m << 1) + bit;
+ bit = dist & 1; dist >>= 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit); m = (m << 1) + bit;
+ bit = dist & 1; dist >>= 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit); m = (m << 1) + bit;
+ bit = dist & 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit);
+ p->rc.range = range;
+ // p->alignPriceCount++;
+ }
+ }
+ }
+ }
+ }
+
+ nowPos32 += (UInt32)len;
+ p->additionalOffset -= len;
+
+ if (p->additionalOffset == 0)
+ {
+ UInt32 processed;
+
+ if (!p->fastMode)
+ {
+ /*
+ if (p->alignPriceCount >= 16) // kAlignTableSize
+ FillAlignPrices(p);
+ if (p->matchPriceCount >= 128)
+ FillDistancesPrices(p);
+ if (p->lenEnc.counter <= 0)
+ LenPriceEnc_UpdateTables(&p->lenEnc, 1 << p->pb, &p->lenProbs, p->ProbPrices);
+ */
+ if (p->matchPriceCount >= 64)
+ {
+ FillAlignPrices(p);
+ // { int y; for (y = 0; y < 100; y++) {
+ FillDistancesPrices(p);
+ // }}
+ LenPriceEnc_UpdateTables(&p->lenEnc, 1 << p->pb, &p->lenProbs, p->ProbPrices);
+ }
+ if (p->repLenEncCounter <= 0)
+ {
+ p->repLenEncCounter = REP_LEN_COUNT;
+ LenPriceEnc_UpdateTables(&p->repLenEnc, 1 << p->pb, &p->repLenProbs, p->ProbPrices);
+ }
+ }
+
+ if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0)
+ break;
+ processed = nowPos32 - startPos32;
+
+ if (maxPackSize)
+ {
+ if (processed + kNumOpts + 300 >= maxUnpackSize
+ || RangeEnc_GetProcessed_sizet(&p->rc) + kPackReserve >= maxPackSize)
+ break;
+ }
+ else if (processed >= (1 << 17))
+ {
+ p->nowPos64 += nowPos32 - startPos32;
+ return CheckErrors(p);
+ }
+ }
+ }
+
+ p->nowPos64 += nowPos32 - startPos32;
+ return Flush(p, nowPos32);
+}
+
+
+
+#define kBigHashDicLimit ((UInt32)1 << 24)
+
+static SRes LzmaEnc_Alloc(CLzmaEnc *p, UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig)
+{
+ UInt32 beforeSize = kNumOpts;
+ if (!RangeEnc_Alloc(&p->rc, alloc))
+ return SZ_ERROR_MEM;
+
+ #ifndef _7ZIP_ST
+ p->mtMode = (p->multiThread && !p->fastMode && (p->matchFinderBase.btMode != 0));
+ #endif
+
+ {
+ unsigned lclp = p->lc + p->lp;
+ if (!p->litProbs || !p->saveState.litProbs || p->lclp != lclp)
+ {
+ LzmaEnc_FreeLits(p, alloc);
+ p->litProbs = (CLzmaProb *)ISzAlloc_Alloc(alloc, ((UInt32)0x300 << lclp) * sizeof(CLzmaProb));
+ p->saveState.litProbs = (CLzmaProb *)ISzAlloc_Alloc(alloc, ((UInt32)0x300 << lclp) * sizeof(CLzmaProb));
+ if (!p->litProbs || !p->saveState.litProbs)
+ {
+ LzmaEnc_FreeLits(p, alloc);
+ return SZ_ERROR_MEM;
+ }
+ p->lclp = lclp;
+ }
+ }
+
+ p->matchFinderBase.bigHash = (Byte)(p->dictSize > kBigHashDicLimit ? 1 : 0);
+
+ if (beforeSize + p->dictSize < keepWindowSize)
+ beforeSize = keepWindowSize - p->dictSize;
+
+ #ifndef _7ZIP_ST
+ if (p->mtMode)
+ {
+ RINOK(MatchFinderMt_Create(&p->matchFinderMt, p->dictSize, beforeSize, p->numFastBytes,
+ LZMA_MATCH_LEN_MAX
+ + 1 /* 18.04 */
+ , allocBig));
+ p->matchFinderObj = &p->matchFinderMt;
+ p->matchFinderBase.bigHash = (Byte)(
+ (p->dictSize > kBigHashDicLimit && p->matchFinderBase.hashMask >= 0xFFFFFF) ? 1 : 0);
+ MatchFinderMt_CreateVTable(&p->matchFinderMt, &p->matchFinder);
+ }
+ else
+ #endif
+ {
+ if (!MatchFinder_Create(&p->matchFinderBase, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig))
+ return SZ_ERROR_MEM;
+ p->matchFinderObj = &p->matchFinderBase;
+ MatchFinder_CreateVTable(&p->matchFinderBase, &p->matchFinder);
+ }
+
+ return SZ_OK;
+}
+
+void LzmaEnc_Init(CLzmaEnc *p)
+{
+ unsigned i;
+ p->state = 0;
+ p->reps[0] =
+ p->reps[1] =
+ p->reps[2] =
+ p->reps[3] = 1;
+
+ RangeEnc_Init(&p->rc);
+
+ for (i = 0; i < (1 << kNumAlignBits); i++)
+ p->posAlignEncoder[i] = kProbInitValue;
+
+ for (i = 0; i < kNumStates; i++)
+ {
+ unsigned j;
+ for (j = 0; j < LZMA_NUM_PB_STATES_MAX; j++)
+ {
+ p->isMatch[i][j] = kProbInitValue;
+ p->isRep0Long[i][j] = kProbInitValue;
+ }
+ p->isRep[i] = kProbInitValue;
+ p->isRepG0[i] = kProbInitValue;
+ p->isRepG1[i] = kProbInitValue;
+ p->isRepG2[i] = kProbInitValue;
+ }
+
+ {
+ for (i = 0; i < kNumLenToPosStates; i++)
+ {
+ CLzmaProb *probs = p->posSlotEncoder[i];
+ unsigned j;
+ for (j = 0; j < (1 << kNumPosSlotBits); j++)
+ probs[j] = kProbInitValue;
+ }
+ }
+ {
+ for (i = 0; i < kNumFullDistances; i++)
+ p->posEncoders[i] = kProbInitValue;
+ }
+
+ {
+ UInt32 num = (UInt32)0x300 << (p->lp + p->lc);
+ UInt32 k;
+ CLzmaProb *probs = p->litProbs;
+ for (k = 0; k < num; k++)
+ probs[k] = kProbInitValue;
+ }
+
+
+ LenEnc_Init(&p->lenProbs);
+ LenEnc_Init(&p->repLenProbs);
+
+ p->optEnd = 0;
+ p->optCur = 0;
+
+ {
+ for (i = 0; i < kNumOpts; i++)
+ p->opt[i].price = kInfinityPrice;
+ }
+
+ p->additionalOffset = 0;
+
+ p->pbMask = (1 << p->pb) - 1;
+ p->lpMask = ((UInt32)0x100 << p->lp) - ((unsigned)0x100 >> p->lc);
+}
+
+
+void LzmaEnc_InitPrices(CLzmaEnc *p)
+{
+ if (!p->fastMode)
+ {
+ FillDistancesPrices(p);
+ FillAlignPrices(p);
+ }
+
+ p->lenEnc.tableSize =
+ p->repLenEnc.tableSize =
+ p->numFastBytes + 1 - LZMA_MATCH_LEN_MIN;
+
+ p->repLenEncCounter = REP_LEN_COUNT;
+
+ LenPriceEnc_UpdateTables(&p->lenEnc, 1 << p->pb, &p->lenProbs, p->ProbPrices);
+ LenPriceEnc_UpdateTables(&p->repLenEnc, 1 << p->pb, &p->repLenProbs, p->ProbPrices);
+}
+
+static SRes LzmaEnc_AllocAndInit(CLzmaEnc *p, UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig)
+{
+ unsigned i;
+ for (i = kEndPosModelIndex / 2; i < kDicLogSizeMax; i++)
+ if (p->dictSize <= ((UInt32)1 << i))
+ break;
+ p->distTableSize = i * 2;
+
+ p->finished = False;
+ p->result = SZ_OK;
+ RINOK(LzmaEnc_Alloc(p, keepWindowSize, alloc, allocBig));
+ LzmaEnc_Init(p);
+ LzmaEnc_InitPrices(p);
+ p->nowPos64 = 0;
+ return SZ_OK;
+}
+
+SRes LzmaEnc_Prepare(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream,
+ ISzAllocPtr alloc, ISzAllocPtr allocBig)
+{
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+ p->matchFinderBase.stream = inStream;
+ p->needInit = 1;
+ p->rc.outStream = outStream;
+ return LzmaEnc_AllocAndInit(p, 0, alloc, allocBig);
+}
+
+SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle pp,
+ ISeqInStream *inStream, UInt32 keepWindowSize,
+ ISzAllocPtr alloc, ISzAllocPtr allocBig)
+{
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+ p->matchFinderBase.stream = inStream;
+ p->needInit = 1;
+ return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
+}
+
+static void LzmaEnc_SetInputBuf(CLzmaEnc *p, const Byte *src, SizeT srcLen)
+{
+ p->matchFinderBase.directInput = 1;
+ p->matchFinderBase.bufferBase = (Byte *)src;
+ p->matchFinderBase.directInputRem = srcLen;
+}
+
+SRes LzmaEnc_MemPrepare(CLzmaEncHandle pp, const Byte *src, SizeT srcLen,
+ UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig)
+{
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+ LzmaEnc_SetInputBuf(p, src, srcLen);
+ p->needInit = 1;
+
+ LzmaEnc_SetDataSize(pp, srcLen);
+ return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig);
+}
+
+void LzmaEnc_Finish(CLzmaEncHandle pp)
+{
+ #ifndef _7ZIP_ST
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+ if (p->mtMode)
+ MatchFinderMt_ReleaseStream(&p->matchFinderMt);
+ #else
+ UNUSED_VAR(pp);
+ #endif
+}
+
+
+typedef struct
+{
+ ISeqOutStream vt;
+ Byte *data;
+ SizeT rem;
+ BoolInt overflow;
+} CLzmaEnc_SeqOutStreamBuf;
+
+static size_t SeqOutStreamBuf_Write(const ISeqOutStream *pp, const void *data, size_t size)
+{
+ CLzmaEnc_SeqOutStreamBuf *p = CONTAINER_FROM_VTBL(pp, CLzmaEnc_SeqOutStreamBuf, vt);
+ if (p->rem < size)
+ {
+ size = p->rem;
+ p->overflow = True;
+ }
+ memcpy(p->data, data, size);
+ p->rem -= size;
+ p->data += size;
+ return size;
+}
+
+
+UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp)
+{
+ const CLzmaEnc *p = (CLzmaEnc *)pp;
+ return p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
+}
+
+
+const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle pp)
+{
+ const CLzmaEnc *p = (CLzmaEnc *)pp;
+ return p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset;
+}
+
+
+SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, BoolInt reInit,
+ Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize)
+{
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+ UInt64 nowPos64;
+ SRes res;
+ CLzmaEnc_SeqOutStreamBuf outStream;
+
+ outStream.vt.Write = SeqOutStreamBuf_Write;
+ outStream.data = dest;
+ outStream.rem = *destLen;
+ outStream.overflow = False;
+
+ p->writeEndMark = False;
+ p->finished = False;
+ p->result = SZ_OK;
+
+ if (reInit)
+ LzmaEnc_Init(p);
+ LzmaEnc_InitPrices(p);
+
+ nowPos64 = p->nowPos64;
+ RangeEnc_Init(&p->rc);
+ p->rc.outStream = &outStream.vt;
+
+ if (desiredPackSize == 0)
+ return SZ_ERROR_OUTPUT_EOF;
+
+ res = LzmaEnc_CodeOneBlock(p, desiredPackSize, *unpackSize);
+
+ *unpackSize = (UInt32)(p->nowPos64 - nowPos64);
+ *destLen -= outStream.rem;
+ if (outStream.overflow)
+ return SZ_ERROR_OUTPUT_EOF;
+
+ return res;
+}
+
+
+static SRes LzmaEnc_Encode2(CLzmaEnc *p, ICompressProgress *progress)
+{
+ SRes res = SZ_OK;
+
+ #ifndef _7ZIP_ST
+ Byte allocaDummy[0x300];
+ allocaDummy[0] = 0;
+ allocaDummy[1] = allocaDummy[0];
+ #endif
+
+ for (;;)
+ {
+ res = LzmaEnc_CodeOneBlock(p, 0, 0);
+ if (res != SZ_OK || p->finished)
+ break;
+ if (progress)
+ {
+ res = ICompressProgress_Progress(progress, p->nowPos64, RangeEnc_GetProcessed(&p->rc));
+ if (res != SZ_OK)
+ {
+ res = SZ_ERROR_PROGRESS;
+ break;
+ }
+ }
+ }
+
+ LzmaEnc_Finish(p);
+
+ /*
+ if (res == SZ_OK && !Inline_MatchFinder_IsFinishedOK(&p->matchFinderBase))
+ res = SZ_ERROR_FAIL;
+ }
+ */
+
+ return res;
+}
+
+
+SRes LzmaEnc_Encode(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream, ICompressProgress *progress,
+ ISzAllocPtr alloc, ISzAllocPtr allocBig)
+{
+ RINOK(LzmaEnc_Prepare(pp, outStream, inStream, alloc, allocBig));
+ return LzmaEnc_Encode2((CLzmaEnc *)pp, progress);
+}
+
+
+SRes LzmaEnc_WriteProperties(CLzmaEncHandle pp, Byte *props, SizeT *size)
+{
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+ unsigned i;
+ UInt32 dictSize = p->dictSize;
+ if (*size < LZMA_PROPS_SIZE)
+ return SZ_ERROR_PARAM;
+ *size = LZMA_PROPS_SIZE;
+ props[0] = (Byte)((p->pb * 5 + p->lp) * 9 + p->lc);
+
+ if (dictSize >= ((UInt32)1 << 22))
+ {
+ UInt32 kDictMask = ((UInt32)1 << 20) - 1;
+ if (dictSize < (UInt32)0xFFFFFFFF - kDictMask)
+ dictSize = (dictSize + kDictMask) & ~kDictMask;
+ }
+ else for (i = 11; i <= 30; i++)
+ {
+ if (dictSize <= ((UInt32)2 << i)) { dictSize = (2 << i); break; }
+ if (dictSize <= ((UInt32)3 << i)) { dictSize = (3 << i); break; }
+ }
+
+ for (i = 0; i < 4; i++)
+ props[1 + i] = (Byte)(dictSize >> (8 * i));
+ return SZ_OK;
+}
+
+
+unsigned LzmaEnc_IsWriteEndMark(CLzmaEncHandle pp)
+{
+ return ((CLzmaEnc *)pp)->writeEndMark;
+}
+
+
+SRes LzmaEnc_MemEncode(CLzmaEncHandle pp, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
+ int writeEndMark, ICompressProgress *progress, ISzAllocPtr alloc, ISzAllocPtr allocBig)
+{
+ SRes res;
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+
+ CLzmaEnc_SeqOutStreamBuf outStream;
+
+ outStream.vt.Write = SeqOutStreamBuf_Write;
+ outStream.data = dest;
+ outStream.rem = *destLen;
+ outStream.overflow = False;
+
+ p->writeEndMark = writeEndMark;
+ p->rc.outStream = &outStream.vt;
+
+ res = LzmaEnc_MemPrepare(pp, src, srcLen, 0, alloc, allocBig);
+
+ if (res == SZ_OK)
+ {
+ res = LzmaEnc_Encode2(p, progress);
+ if (res == SZ_OK && p->nowPos64 != srcLen)
+ res = SZ_ERROR_FAIL;
+ }
+
+ *destLen -= outStream.rem;
+ if (outStream.overflow)
+ return SZ_ERROR_OUTPUT_EOF;
+ return res;
+}
+
+
+SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
+ const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
+ ICompressProgress *progress, ISzAllocPtr alloc, ISzAllocPtr allocBig)
+{
+ CLzmaEnc *p = (CLzmaEnc *)LzmaEnc_Create(alloc);
+ SRes res;
+ if (!p)
+ return SZ_ERROR_MEM;
+
+ res = LzmaEnc_SetProps(p, props);
+ if (res == SZ_OK)
+ {
+ res = LzmaEnc_WriteProperties(p, propsEncoded, propsSize);
+ if (res == SZ_OK)
+ res = LzmaEnc_MemEncode(p, dest, destLen, src, srcLen,
+ writeEndMark, progress, alloc, allocBig);
+ }
+
+ LzmaEnc_Destroy(p, alloc, allocBig);
+ return res;
+}
+
+BoolInt LzmaEnc_IsFinished(CLzmaEncHandle pp)
+{
+ CLzmaEnc *p = (CLzmaEnc *)pp;
+ return p->finished;
+}
+
\ No newline at end of file diff --git a/contrib/libs/lzmasdk/LzmaEnc.h b/contrib/libs/lzmasdk/LzmaEnc.h new file mode 100644 index 0000000000..b7d9dd2055 --- /dev/null +++ b/contrib/libs/lzmasdk/LzmaEnc.h @@ -0,0 +1,83 @@ +/* LzmaEnc.h -- LZMA Encoder
+2017-07-27 : Igor Pavlov : Public domain */
+
+#ifndef __LZMA_ENC_H
+#define __LZMA_ENC_H
+
+#include "7zTypes.h"
+
+EXTERN_C_BEGIN
+
+#define LZMA_PROPS_SIZE 5
+
+typedef struct _CLzmaEncProps
+{
+ int level; /* 0 <= level <= 9 */
+ UInt32 dictSize; /* (1 << 12) <= dictSize <= (1 << 27) for 32-bit version
+ (1 << 12) <= dictSize <= (3 << 29) for 64-bit version
+ default = (1 << 24) */
+ int lc; /* 0 <= lc <= 8, default = 3 */
+ int lp; /* 0 <= lp <= 4, default = 0 */
+ int pb; /* 0 <= pb <= 4, default = 2 */
+ int algo; /* 0 - fast, 1 - normal, default = 1 */
+ int fb; /* 5 <= fb <= 273, default = 32 */
+ int btMode; /* 0 - hashChain Mode, 1 - binTree mode - normal, default = 1 */
+ int numHashBytes; /* 2, 3 or 4, default = 4 */
+ UInt32 mc; /* 1 <= mc <= (1 << 30), default = 32 */
+ unsigned writeEndMark; /* 0 - do not write EOPM, 1 - write EOPM, default = 0 */
+ int numThreads; /* 1 or 2, default = 2 */
+
+ UInt64 reduceSize; /* estimated size of data that will be compressed. default = (UInt64)(Int64)-1.
+ Encoder uses this value to reduce dictionary size */
+} CLzmaEncProps;
+
+void LzmaEncProps_Init(CLzmaEncProps *p);
+void LzmaEncProps_Normalize(CLzmaEncProps *p);
+UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2);
+
+
+/* ---------- CLzmaEncHandle Interface ---------- */
+
+/* LzmaEnc* functions can return the following exit codes:
+SRes:
+ SZ_OK - OK
+ SZ_ERROR_MEM - Memory allocation error
+ SZ_ERROR_PARAM - Incorrect paramater in props
+ SZ_ERROR_WRITE - ISeqOutStream write callback error
+ SZ_ERROR_OUTPUT_EOF - output buffer overflow - version with (Byte *) output
+ SZ_ERROR_PROGRESS - some break from progress callback
+ SZ_ERROR_THREAD - error in multithreading functions (only for Mt version)
+*/
+
+typedef void * CLzmaEncHandle;
+
+CLzmaEncHandle LzmaEnc_Create(ISzAllocPtr alloc);
+void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAllocPtr alloc, ISzAllocPtr allocBig);
+
+SRes LzmaEnc_SetProps(CLzmaEncHandle p, const CLzmaEncProps *props);
+void LzmaEnc_SetDataSize(CLzmaEncHandle p, UInt64 expectedDataSiize);
+SRes LzmaEnc_WriteProperties(CLzmaEncHandle p, Byte *properties, SizeT *size);
+unsigned LzmaEnc_IsWriteEndMark(CLzmaEncHandle p);
+
+SRes LzmaEnc_Encode(CLzmaEncHandle p, ISeqOutStream *outStream, ISeqInStream *inStream,
+ ICompressProgress *progress, ISzAllocPtr alloc, ISzAllocPtr allocBig);
+SRes LzmaEnc_MemEncode(CLzmaEncHandle p, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
+ int writeEndMark, ICompressProgress *progress, ISzAllocPtr alloc, ISzAllocPtr allocBig);
+
+
+/* ---------- One Call Interface ---------- */
+
+SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
+ const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
+ ICompressProgress *progress, ISzAllocPtr alloc, ISzAllocPtr allocBig);
+
+EXTERN_C_END
+
+/* ---------- Streaming Interface ---------- */
+
+SRes LzmaEnc_Prepare(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream, ISzAllocPtr alloc, ISzAllocPtr allocBig);
+SRes LzmaEnc_CodeOneBlock(CLzmaEncHandle pp, UInt32 maxPackSize, UInt32 maxUnpackSize);
+BoolInt LzmaEnc_IsFinished(CLzmaEncHandle pp);
+void LzmaEnc_Finish(CLzmaEncHandle pp);
+
+#endif
diff --git a/contrib/libs/lzmasdk/LzmaLib.c b/contrib/libs/lzmasdk/LzmaLib.c new file mode 100644 index 0000000000..c10cf1a0f2 --- /dev/null +++ b/contrib/libs/lzmasdk/LzmaLib.c @@ -0,0 +1,40 @@ +/* LzmaLib.c -- LZMA library wrapper
+2015-06-13 : Igor Pavlov : Public domain */
+
+#include "Alloc.h"
+#include "LzmaDec.h"
+#include "LzmaEnc.h"
+#include "LzmaLib.h"
+
+MY_STDAPI LzmaCompress(unsigned char *dest, size_t *destLen, const unsigned char *src, size_t srcLen,
+ unsigned char *outProps, size_t *outPropsSize,
+ int level, /* 0 <= level <= 9, default = 5 */
+ unsigned dictSize, /* use (1 << N) or (3 << N). 4 KB < dictSize <= 128 MB */
+ int lc, /* 0 <= lc <= 8, default = 3 */
+ int lp, /* 0 <= lp <= 4, default = 0 */
+ int pb, /* 0 <= pb <= 4, default = 2 */
+ int fb, /* 5 <= fb <= 273, default = 32 */
+ int numThreads /* 1 or 2, default = 2 */
+)
+{
+ CLzmaEncProps props;
+ LzmaEncProps_Init(&props);
+ props.level = level;
+ props.dictSize = dictSize;
+ props.lc = lc;
+ props.lp = lp;
+ props.pb = pb;
+ props.fb = fb;
+ props.numThreads = numThreads;
+
+ return LzmaEncode(dest, destLen, src, srcLen, &props, outProps, outPropsSize, 0,
+ NULL, &g_Alloc, &g_Alloc);
+}
+
+
+MY_STDAPI LzmaUncompress(unsigned char *dest, size_t *destLen, const unsigned char *src, size_t *srcLen,
+ const unsigned char *props, size_t propsSize)
+{
+ ELzmaStatus status;
+ return LzmaDecode(dest, destLen, src, srcLen, props, (unsigned)propsSize, LZMA_FINISH_ANY, &status, &g_Alloc);
+}
diff --git a/contrib/libs/lzmasdk/LzmaLib.h b/contrib/libs/lzmasdk/LzmaLib.h new file mode 100644 index 0000000000..5c35e53654 --- /dev/null +++ b/contrib/libs/lzmasdk/LzmaLib.h @@ -0,0 +1,131 @@ +/* LzmaLib.h -- LZMA library interface
+2013-01-18 : Igor Pavlov : Public domain */
+
+#ifndef __LZMA_LIB_H
+#define __LZMA_LIB_H
+
+#include "7zTypes.h"
+
+EXTERN_C_BEGIN
+
+#define MY_STDAPI int MY_STD_CALL
+
+#define LZMA_PROPS_SIZE 5
+
+/*
+RAM requirements for LZMA:
+ for compression: (dictSize * 11.5 + 6 MB) + state_size
+ for decompression: dictSize + state_size
+ state_size = (4 + (1.5 << (lc + lp))) KB
+ by default (lc=3, lp=0), state_size = 16 KB.
+
+LZMA properties (5 bytes) format
+ Offset Size Description
+ 0 1 lc, lp and pb in encoded form.
+ 1 4 dictSize (little endian).
+*/
+
+/*
+LzmaCompress
+------------
+
+outPropsSize -
+ In: the pointer to the size of outProps buffer; *outPropsSize = LZMA_PROPS_SIZE = 5.
+ Out: the pointer to the size of written properties in outProps buffer; *outPropsSize = LZMA_PROPS_SIZE = 5.
+
+ LZMA Encoder will use defult values for any parameter, if it is
+ -1 for any from: level, loc, lp, pb, fb, numThreads
+ 0 for dictSize
+
+level - compression level: 0 <= level <= 9;
+
+ level dictSize algo fb
+ 0: 16 KB 0 32
+ 1: 64 KB 0 32
+ 2: 256 KB 0 32
+ 3: 1 MB 0 32
+ 4: 4 MB 0 32
+ 5: 16 MB 1 32
+ 6: 32 MB 1 32
+ 7+: 64 MB 1 64
+
+ The default value for "level" is 5.
+
+ algo = 0 means fast method
+ algo = 1 means normal method
+
+dictSize - The dictionary size in bytes. The maximum value is
+ 128 MB = (1 << 27) bytes for 32-bit version
+ 1 GB = (1 << 30) bytes for 64-bit version
+ The default value is 16 MB = (1 << 24) bytes.
+ It's recommended to use the dictionary that is larger than 4 KB and
+ that can be calculated as (1 << N) or (3 << N) sizes.
+
+lc - The number of literal context bits (high bits of previous literal).
+ It can be in the range from 0 to 8. The default value is 3.
+ Sometimes lc=4 gives the gain for big files.
+
+lp - The number of literal pos bits (low bits of current position for literals).
+ It can be in the range from 0 to 4. The default value is 0.
+ The lp switch is intended for periodical data when the period is equal to 2^lp.
+ For example, for 32-bit (4 bytes) periodical data you can use lp=2. Often it's
+ better to set lc=0, if you change lp switch.
+
+pb - The number of pos bits (low bits of current position).
+ It can be in the range from 0 to 4. The default value is 2.
+ The pb switch is intended for periodical data when the period is equal 2^pb.
+
+fb - Word size (the number of fast bytes).
+ It can be in the range from 5 to 273. The default value is 32.
+ Usually, a big number gives a little bit better compression ratio and
+ slower compression process.
+
+numThreads - The number of thereads. 1 or 2. The default value is 2.
+ Fast mode (algo = 0) can use only 1 thread.
+
+Out:
+ destLen - processed output size
+Returns:
+ SZ_OK - OK
+ SZ_ERROR_MEM - Memory allocation error
+ SZ_ERROR_PARAM - Incorrect paramater
+ SZ_ERROR_OUTPUT_EOF - output buffer overflow
+ SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version)
+*/
+
+MY_STDAPI LzmaCompress(unsigned char *dest, size_t *destLen, const unsigned char *src, size_t srcLen,
+ unsigned char *outProps, size_t *outPropsSize, /* *outPropsSize must be = 5 */
+ int level, /* 0 <= level <= 9, default = 5 */
+ unsigned dictSize, /* default = (1 << 24) */
+ int lc, /* 0 <= lc <= 8, default = 3 */
+ int lp, /* 0 <= lp <= 4, default = 0 */
+ int pb, /* 0 <= pb <= 4, default = 2 */
+ int fb, /* 5 <= fb <= 273, default = 32 */
+ int numThreads /* 1 or 2, default = 2 */
+ );
+
+/*
+LzmaUncompress
+--------------
+In:
+ dest - output data
+ destLen - output data size
+ src - input data
+ srcLen - input data size
+Out:
+ destLen - processed output size
+ srcLen - processed input size
+Returns:
+ SZ_OK - OK
+ SZ_ERROR_DATA - Data error
+ SZ_ERROR_MEM - Memory allocation arror
+ SZ_ERROR_UNSUPPORTED - Unsupported properties
+ SZ_ERROR_INPUT_EOF - it needs more bytes in input buffer (src)
+*/
+
+MY_STDAPI LzmaUncompress(unsigned char *dest, size_t *destLen, const unsigned char *src, SizeT *srcLen,
+ const unsigned char *props, size_t propsSize);
+
+EXTERN_C_END
+
+#endif
diff --git a/contrib/libs/lzmasdk/MtCoder.h b/contrib/libs/lzmasdk/MtCoder.h new file mode 100644 index 0000000000..603329d367 --- /dev/null +++ b/contrib/libs/lzmasdk/MtCoder.h @@ -0,0 +1,141 @@ +/* MtCoder.h -- Multi-thread Coder
+2018-07-04 : Igor Pavlov : Public domain */
+
+#ifndef __MT_CODER_H
+#define __MT_CODER_H
+
+#include "MtDec.h"
+
+EXTERN_C_BEGIN
+
+/*
+ if ( defined MTCODER__USE_WRITE_THREAD) : main thread writes all data blocks to output stream
+ if (not defined MTCODER__USE_WRITE_THREAD) : any coder thread can write data blocks to output stream
+*/
+/* #define MTCODER__USE_WRITE_THREAD */
+
+#ifndef _7ZIP_ST
+ #define MTCODER__GET_NUM_BLOCKS_FROM_THREADS(numThreads) ((numThreads) + (numThreads) / 8 + 1)
+ #define MTCODER__THREADS_MAX 64
+ #define MTCODER__BLOCKS_MAX (MTCODER__GET_NUM_BLOCKS_FROM_THREADS(MTCODER__THREADS_MAX) + 3)
+#else
+ #define MTCODER__THREADS_MAX 1
+ #define MTCODER__BLOCKS_MAX 1
+#endif
+
+
+#ifndef _7ZIP_ST
+
+
+typedef struct
+{
+ ICompressProgress vt;
+ CMtProgress *mtProgress;
+ UInt64 inSize;
+ UInt64 outSize;
+} CMtProgressThunk;
+
+void MtProgressThunk_CreateVTable(CMtProgressThunk *p);
+
+#define MtProgressThunk_Init(p) { (p)->inSize = 0; (p)->outSize = 0; }
+
+
+struct _CMtCoder;
+
+
+typedef struct
+{
+ struct _CMtCoder *mtCoder;
+ unsigned index;
+ int stop;
+ Byte *inBuf;
+
+ CAutoResetEvent startEvent;
+ CThread thread;
+} CMtCoderThread;
+
+
+typedef struct
+{
+ SRes (*Code)(void *p, unsigned coderIndex, unsigned outBufIndex,
+ const Byte *src, size_t srcSize, int finished);
+ SRes (*Write)(void *p, unsigned outBufIndex);
+} IMtCoderCallback2;
+
+
+typedef struct
+{
+ SRes res;
+ unsigned bufIndex;
+ BoolInt finished;
+} CMtCoderBlock;
+
+
+typedef struct _CMtCoder
+{
+ /* input variables */
+
+ size_t blockSize; /* size of input block */
+ unsigned numThreadsMax;
+ UInt64 expectedDataSize;
+
+ ISeqInStream *inStream;
+ const Byte *inData;
+ size_t inDataSize;
+
+ ICompressProgress *progress;
+ ISzAllocPtr allocBig;
+
+ IMtCoderCallback2 *mtCallback;
+ void *mtCallbackObject;
+
+
+ /* internal variables */
+
+ size_t allocatedBufsSize;
+
+ CAutoResetEvent readEvent;
+ CSemaphore blocksSemaphore;
+
+ BoolInt stopReading;
+ SRes readRes;
+
+ #ifdef MTCODER__USE_WRITE_THREAD
+ CAutoResetEvent writeEvents[MTCODER__BLOCKS_MAX];
+ #else
+ CAutoResetEvent finishedEvent;
+ SRes writeRes;
+ unsigned writeIndex;
+ Byte ReadyBlocks[MTCODER__BLOCKS_MAX];
+ LONG numFinishedThreads;
+ #endif
+
+ unsigned numStartedThreadsLimit;
+ unsigned numStartedThreads;
+
+ unsigned numBlocksMax;
+ unsigned blockIndex;
+ UInt64 readProcessed;
+
+ CCriticalSection cs;
+
+ unsigned freeBlockHead;
+ unsigned freeBlockList[MTCODER__BLOCKS_MAX];
+
+ CMtProgress mtProgress;
+ CMtCoderBlock blocks[MTCODER__BLOCKS_MAX];
+ CMtCoderThread threads[MTCODER__THREADS_MAX];
+} CMtCoder;
+
+
+void MtCoder_Construct(CMtCoder *p);
+void MtCoder_Destruct(CMtCoder *p);
+SRes MtCoder_Code(CMtCoder *p);
+
+
+#endif
+
+
+EXTERN_C_END
+
+#endif
diff --git a/contrib/libs/lzmasdk/MtDec.h b/contrib/libs/lzmasdk/MtDec.h new file mode 100644 index 0000000000..9864cc8741 --- /dev/null +++ b/contrib/libs/lzmasdk/MtDec.h @@ -0,0 +1,201 @@ +/* MtDec.h -- Multi-thread Decoder
+2018-07-04 : Igor Pavlov : Public domain */
+
+#ifndef __MT_DEC_H
+#define __MT_DEC_H
+
+#include "7zTypes.h"
+
+#ifndef _7ZIP_ST
+#include "Threads.h"
+#endif
+
+EXTERN_C_BEGIN
+
+#ifndef _7ZIP_ST
+
+#ifndef _7ZIP_ST
+ #define MTDEC__THREADS_MAX 32
+#else
+ #define MTDEC__THREADS_MAX 1
+#endif
+
+
+typedef struct
+{
+ ICompressProgress *progress;
+ SRes res;
+ UInt64 totalInSize;
+ UInt64 totalOutSize;
+ CCriticalSection cs;
+} CMtProgress;
+
+void MtProgress_Init(CMtProgress *p, ICompressProgress *progress);
+SRes MtProgress_Progress_ST(CMtProgress *p);
+SRes MtProgress_ProgressAdd(CMtProgress *p, UInt64 inSize, UInt64 outSize);
+SRes MtProgress_GetError(CMtProgress *p);
+void MtProgress_SetError(CMtProgress *p, SRes res);
+
+struct _CMtDec;
+
+typedef struct
+{
+ struct _CMtDec *mtDec;
+ unsigned index;
+ void *inBuf;
+
+ size_t inDataSize_Start; // size of input data in start block
+ UInt64 inDataSize; // total size of input data in all blocks
+
+ CThread thread;
+ CAutoResetEvent canRead;
+ CAutoResetEvent canWrite;
+ void *allocaPtr;
+} CMtDecThread;
+
+void MtDecThread_FreeInBufs(CMtDecThread *t);
+
+
+typedef enum
+{
+ MTDEC_PARSE_CONTINUE, // continue this block with more input data
+ MTDEC_PARSE_OVERFLOW, // MT buffers overflow, need switch to single-thread
+ MTDEC_PARSE_NEW, // new block
+ MTDEC_PARSE_END // end of block threading. But we still can return to threading after Write(&needContinue)
+} EMtDecParseState;
+
+typedef struct
+{
+ // in
+ int startCall;
+ const Byte *src;
+ size_t srcSize;
+ // in : (srcSize == 0) is allowed
+ // out : it's allowed to return less that actually was used ?
+ int srcFinished;
+
+ // out
+ EMtDecParseState state;
+ BoolInt canCreateNewThread;
+ UInt64 outPos; // check it (size_t)
+} CMtDecCallbackInfo;
+
+
+typedef struct
+{
+ void (*Parse)(void *p, unsigned coderIndex, CMtDecCallbackInfo *ci);
+
+ // PreCode() and Code():
+ // (SRes_return_result != SZ_OK) means stop decoding, no need another blocks
+ SRes (*PreCode)(void *p, unsigned coderIndex);
+ SRes (*Code)(void *p, unsigned coderIndex,
+ const Byte *src, size_t srcSize, int srcFinished,
+ UInt64 *inCodePos, UInt64 *outCodePos, int *stop);
+ // stop - means stop another Code calls
+
+
+ /* Write() must be called, if Parse() was called
+ set (needWrite) if
+ {
+ && (was not interrupted by progress)
+ && (was not interrupted in previous block)
+ }
+
+ out:
+ if (*needContinue), decoder still need to continue decoding with new iteration,
+ even after MTDEC_PARSE_END
+ if (*canRecode), we didn't flush current block data, so we still can decode current block later.
+ */
+ SRes (*Write)(void *p, unsigned coderIndex,
+ BoolInt needWriteToStream,
+ const Byte *src, size_t srcSize,
+ // int srcFinished,
+ BoolInt *needContinue,
+ BoolInt *canRecode);
+} IMtDecCallback;
+
+
+
+typedef struct _CMtDec
+{
+ /* input variables */
+
+ size_t inBufSize; /* size of input block */
+ unsigned numThreadsMax;
+ // size_t inBlockMax;
+ unsigned numThreadsMax_2;
+
+ ISeqInStream *inStream;
+ // const Byte *inData;
+ // size_t inDataSize;
+
+ ICompressProgress *progress;
+ ISzAllocPtr alloc;
+
+ IMtDecCallback *mtCallback;
+ void *mtCallbackObject;
+
+
+ /* internal variables */
+
+ size_t allocatedBufsSize;
+
+ BoolInt exitThread;
+ WRes exitThreadWRes;
+
+ UInt64 blockIndex;
+ BoolInt isAllocError;
+ BoolInt overflow;
+ SRes threadingErrorSRes;
+
+ BoolInt needContinue;
+
+ // CAutoResetEvent finishedEvent;
+
+ SRes readRes;
+ SRes codeRes;
+
+ BoolInt wasInterrupted;
+
+ unsigned numStartedThreads_Limit;
+ unsigned numStartedThreads;
+
+ Byte *crossBlock;
+ size_t crossStart;
+ size_t crossEnd;
+ UInt64 readProcessed;
+ BoolInt readWasFinished;
+ UInt64 inProcessed;
+
+ unsigned filledThreadStart;
+ unsigned numFilledThreads;
+
+ #ifndef _7ZIP_ST
+ BoolInt needInterrupt;
+ UInt64 interruptIndex;
+ CMtProgress mtProgress;
+ CMtDecThread threads[MTDEC__THREADS_MAX];
+ #endif
+} CMtDec;
+
+
+void MtDec_Construct(CMtDec *p);
+void MtDec_Destruct(CMtDec *p);
+
+/*
+MtDec_Code() returns:
+ SZ_OK - in most cases
+ MY_SRes_HRESULT_FROM_WRes(WRes_error) - in case of unexpected error in threading function
+*/
+
+SRes MtDec_Code(CMtDec *p);
+Byte *MtDec_GetCrossBuff(CMtDec *p);
+
+int MtDec_PrepareRead(CMtDec *p);
+const Byte *MtDec_Read(CMtDec *p, size_t *inLim);
+
+#endif
+
+EXTERN_C_END
+
+#endif
diff --git a/contrib/libs/lzmasdk/Precomp.h b/contrib/libs/lzmasdk/Precomp.h new file mode 100644 index 0000000000..edb5814439 --- /dev/null +++ b/contrib/libs/lzmasdk/Precomp.h @@ -0,0 +1,10 @@ +/* Precomp.h -- StdAfx
+2013-11-12 : Igor Pavlov : Public domain */
+
+#ifndef __7Z_PRECOMP_H
+#define __7Z_PRECOMP_H
+
+#include "Compiler.h"
+/* #include "7zTypes.h" */
+
+#endif
diff --git a/contrib/libs/lzmasdk/RotateDefs.h b/contrib/libs/lzmasdk/RotateDefs.h new file mode 100644 index 0000000000..6c790e791e --- /dev/null +++ b/contrib/libs/lzmasdk/RotateDefs.h @@ -0,0 +1,30 @@ +/* RotateDefs.h -- Rotate functions
+2015-03-25 : Igor Pavlov : Public domain */
+
+#ifndef __ROTATE_DEFS_H
+#define __ROTATE_DEFS_H
+
+#ifdef _MSC_VER
+
+#include <stdlib.h>
+
+/* don't use _rotl with MINGW. It can insert slow call to function. */
+
+/* #if (_MSC_VER >= 1200) */
+#pragma intrinsic(_rotl)
+#pragma intrinsic(_rotr)
+/* #endif */
+
+#define rotlFixed(x, n) _rotl((x), (n))
+#define rotrFixed(x, n) _rotr((x), (n))
+
+#else
+
+/* new compilers can translate these macros to fast commands. */
+
+#define rotlFixed(x, n) (((x) << (n)) | ((x) >> (32 - (n))))
+#define rotrFixed(x, n) (((x) >> (n)) | ((x) << (32 - (n))))
+
+#endif
+
+#endif
diff --git a/contrib/libs/lzmasdk/Sha256.c b/contrib/libs/lzmasdk/Sha256.c new file mode 100644 index 0000000000..90994e5abb --- /dev/null +++ b/contrib/libs/lzmasdk/Sha256.c @@ -0,0 +1,248 @@ +/* Crypto/Sha256.c -- SHA-256 Hash
+2017-04-03 : Igor Pavlov : Public domain
+This code is based on public domain code from Wei Dai's Crypto++ library. */
+
+#include "Precomp.h"
+
+#include <string.h>
+
+#include "CpuArch.h"
+#include "RotateDefs.h"
+#include "Sha256.h"
+
+/* define it for speed optimization */
+#ifndef _SFX
+#define _SHA256_UNROLL
+#define _SHA256_UNROLL2
+#endif
+
+/* #define _SHA256_UNROLL2 */
+
+void Sha256_Init(CSha256 *p)
+{
+ p->state[0] = 0x6a09e667;
+ p->state[1] = 0xbb67ae85;
+ p->state[2] = 0x3c6ef372;
+ p->state[3] = 0xa54ff53a;
+ p->state[4] = 0x510e527f;
+ p->state[5] = 0x9b05688c;
+ p->state[6] = 0x1f83d9ab;
+ p->state[7] = 0x5be0cd19;
+ p->count = 0;
+}
+
+#define S0(x) (rotrFixed(x, 2) ^ rotrFixed(x,13) ^ rotrFixed(x, 22))
+#define S1(x) (rotrFixed(x, 6) ^ rotrFixed(x,11) ^ rotrFixed(x, 25))
+#define s0(x) (rotrFixed(x, 7) ^ rotrFixed(x,18) ^ (x >> 3))
+#define s1(x) (rotrFixed(x,17) ^ rotrFixed(x,19) ^ (x >> 10))
+
+#define blk0(i) (W[i])
+#define blk2(i) (W[i] += s1(W[((i)-2)&15]) + W[((i)-7)&15] + s0(W[((i)-15)&15]))
+
+#define Ch(x,y,z) (z^(x&(y^z)))
+#define Maj(x,y,z) ((x&y)|(z&(x|y)))
+
+#ifdef _SHA256_UNROLL2
+
+#define R(a,b,c,d,e,f,g,h, i) \
+ h += S1(e) + Ch(e,f,g) + K[(i)+(size_t)(j)] + (j ? blk2(i) : blk0(i)); \
+ d += h; \
+ h += S0(a) + Maj(a, b, c)
+
+#define RX_8(i) \
+ R(a,b,c,d,e,f,g,h, i); \
+ R(h,a,b,c,d,e,f,g, i+1); \
+ R(g,h,a,b,c,d,e,f, i+2); \
+ R(f,g,h,a,b,c,d,e, i+3); \
+ R(e,f,g,h,a,b,c,d, i+4); \
+ R(d,e,f,g,h,a,b,c, i+5); \
+ R(c,d,e,f,g,h,a,b, i+6); \
+ R(b,c,d,e,f,g,h,a, i+7)
+
+#define RX_16 RX_8(0); RX_8(8);
+
+#else
+
+#define a(i) T[(0-(i))&7]
+#define b(i) T[(1-(i))&7]
+#define c(i) T[(2-(i))&7]
+#define d(i) T[(3-(i))&7]
+#define e(i) T[(4-(i))&7]
+#define f(i) T[(5-(i))&7]
+#define g(i) T[(6-(i))&7]
+#define h(i) T[(7-(i))&7]
+
+#define R(i) \
+ h(i) += S1(e(i)) + Ch(e(i),f(i),g(i)) + K[(i)+(size_t)(j)] + (j ? blk2(i) : blk0(i)); \
+ d(i) += h(i); \
+ h(i) += S0(a(i)) + Maj(a(i), b(i), c(i)) \
+
+#ifdef _SHA256_UNROLL
+
+#define RX_8(i) R(i+0); R(i+1); R(i+2); R(i+3); R(i+4); R(i+5); R(i+6); R(i+7);
+#define RX_16 RX_8(0); RX_8(8);
+
+#else
+
+#define RX_16 unsigned i; for (i = 0; i < 16; i++) { R(i); }
+
+#endif
+
+#endif
+
+static const UInt32 K[64] = {
+ 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
+ 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
+ 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
+ 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
+ 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
+ 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
+ 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
+ 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
+ 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
+ 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
+ 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
+ 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
+ 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
+ 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
+ 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
+ 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
+};
+
+static void Sha256_WriteByteBlock(CSha256 *p)
+{
+ UInt32 W[16];
+ unsigned j;
+ UInt32 *state;
+
+ #ifdef _SHA256_UNROLL2
+ UInt32 a,b,c,d,e,f,g,h;
+ #else
+ UInt32 T[8];
+ #endif
+
+ for (j = 0; j < 16; j += 4)
+ {
+ const Byte *ccc = p->buffer + j * 4;
+ W[j ] = GetBe32(ccc);
+ W[j + 1] = GetBe32(ccc + 4);
+ W[j + 2] = GetBe32(ccc + 8);
+ W[j + 3] = GetBe32(ccc + 12);
+ }
+
+ state = p->state;
+
+ #ifdef _SHA256_UNROLL2
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+ f = state[5];
+ g = state[6];
+ h = state[7];
+ #else
+ for (j = 0; j < 8; j++)
+ T[j] = state[j];
+ #endif
+
+ for (j = 0; j < 64; j += 16)
+ {
+ RX_16
+ }
+
+ #ifdef _SHA256_UNROLL2
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+ state[4] += e;
+ state[5] += f;
+ state[6] += g;
+ state[7] += h;
+ #else
+ for (j = 0; j < 8; j++)
+ state[j] += T[j];
+ #endif
+
+ /* Wipe variables */
+ /* memset(W, 0, sizeof(W)); */
+ /* memset(T, 0, sizeof(T)); */
+}
+
+#undef S0
+#undef S1
+#undef s0
+#undef s1
+
+void Sha256_Update(CSha256 *p, const Byte *data, size_t size)
+{
+ if (size == 0)
+ return;
+
+ {
+ unsigned pos = (unsigned)p->count & 0x3F;
+ unsigned num;
+
+ p->count += size;
+
+ num = 64 - pos;
+ if (num > size)
+ {
+ memcpy(p->buffer + pos, data, size);
+ return;
+ }
+
+ size -= num;
+ memcpy(p->buffer + pos, data, num);
+ data += num;
+ }
+
+ for (;;)
+ {
+ Sha256_WriteByteBlock(p);
+ if (size < 64)
+ break;
+ size -= 64;
+ memcpy(p->buffer, data, 64);
+ data += 64;
+ }
+
+ if (size != 0)
+ memcpy(p->buffer, data, size);
+}
+
+void Sha256_Final(CSha256 *p, Byte *digest)
+{
+ unsigned pos = (unsigned)p->count & 0x3F;
+ unsigned i;
+
+ p->buffer[pos++] = 0x80;
+
+ while (pos != (64 - 8))
+ {
+ pos &= 0x3F;
+ if (pos == 0)
+ Sha256_WriteByteBlock(p);
+ p->buffer[pos++] = 0;
+ }
+
+ {
+ UInt64 numBits = (p->count << 3);
+ SetBe32(p->buffer + 64 - 8, (UInt32)(numBits >> 32));
+ SetBe32(p->buffer + 64 - 4, (UInt32)(numBits));
+ }
+
+ Sha256_WriteByteBlock(p);
+
+ for (i = 0; i < 8; i += 2)
+ {
+ UInt32 v0 = p->state[i];
+ UInt32 v1 = p->state[i + 1];
+ SetBe32(digest , v0);
+ SetBe32(digest + 4, v1);
+ digest += 8;
+ }
+
+ Sha256_Init(p);
+}
diff --git a/contrib/libs/lzmasdk/Sha256.h b/contrib/libs/lzmasdk/Sha256.h new file mode 100644 index 0000000000..7f17ccf9c9 --- /dev/null +++ b/contrib/libs/lzmasdk/Sha256.h @@ -0,0 +1,26 @@ +/* Sha256.h -- SHA-256 Hash
+2013-01-18 : Igor Pavlov : Public domain */
+
+#ifndef __CRYPTO_SHA256_H
+#define __CRYPTO_SHA256_H
+
+#include "7zTypes.h"
+
+EXTERN_C_BEGIN
+
+#define SHA256_DIGEST_SIZE 32
+
+typedef struct
+{
+ UInt32 state[8];
+ UInt64 count;
+ Byte buffer[64];
+} CSha256;
+
+void Sha256_Init(CSha256 *p);
+void Sha256_Update(CSha256 *p, const Byte *data, size_t size);
+void Sha256_Final(CSha256 *p, Byte *digest);
+
+EXTERN_C_END
+
+#endif
diff --git a/contrib/libs/lzmasdk/Threads.h b/contrib/libs/lzmasdk/Threads.h new file mode 100644 index 0000000000..f913241aea --- /dev/null +++ b/contrib/libs/lzmasdk/Threads.h @@ -0,0 +1,68 @@ +/* Threads.h -- multithreading library
+2017-06-18 : Igor Pavlov : Public domain */
+
+#ifndef __7Z_THREADS_H
+#define __7Z_THREADS_H
+
+#ifdef _WIN32
+#include <windows.h>
+#endif
+
+#include "7zTypes.h"
+
+EXTERN_C_BEGIN
+
+WRes HandlePtr_Close(HANDLE *h);
+WRes Handle_WaitObject(HANDLE h);
+
+typedef HANDLE CThread;
+#define Thread_Construct(p) *(p) = NULL
+#define Thread_WasCreated(p) (*(p) != NULL)
+#define Thread_Close(p) HandlePtr_Close(p)
+#define Thread_Wait(p) Handle_WaitObject(*(p))
+
+typedef
+#ifdef UNDER_CE
+ DWORD
+#else
+ unsigned
+#endif
+ THREAD_FUNC_RET_TYPE;
+
+#define THREAD_FUNC_CALL_TYPE MY_STD_CALL
+#define THREAD_FUNC_DECL THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE
+typedef THREAD_FUNC_RET_TYPE (THREAD_FUNC_CALL_TYPE * THREAD_FUNC_TYPE)(void *);
+WRes Thread_Create(CThread *p, THREAD_FUNC_TYPE func, LPVOID param);
+
+typedef HANDLE CEvent;
+typedef CEvent CAutoResetEvent;
+typedef CEvent CManualResetEvent;
+#define Event_Construct(p) *(p) = NULL
+#define Event_IsCreated(p) (*(p) != NULL)
+#define Event_Close(p) HandlePtr_Close(p)
+#define Event_Wait(p) Handle_WaitObject(*(p))
+WRes Event_Set(CEvent *p);
+WRes Event_Reset(CEvent *p);
+WRes ManualResetEvent_Create(CManualResetEvent *p, int signaled);
+WRes ManualResetEvent_CreateNotSignaled(CManualResetEvent *p);
+WRes AutoResetEvent_Create(CAutoResetEvent *p, int signaled);
+WRes AutoResetEvent_CreateNotSignaled(CAutoResetEvent *p);
+
+typedef HANDLE CSemaphore;
+#define Semaphore_Construct(p) *(p) = NULL
+#define Semaphore_IsCreated(p) (*(p) != NULL)
+#define Semaphore_Close(p) HandlePtr_Close(p)
+#define Semaphore_Wait(p) Handle_WaitObject(*(p))
+WRes Semaphore_Create(CSemaphore *p, UInt32 initCount, UInt32 maxCount);
+WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 num);
+WRes Semaphore_Release1(CSemaphore *p);
+
+typedef CRITICAL_SECTION CCriticalSection;
+WRes CriticalSection_Init(CCriticalSection *p);
+#define CriticalSection_Delete(p) DeleteCriticalSection(p)
+#define CriticalSection_Enter(p) EnterCriticalSection(p)
+#define CriticalSection_Leave(p) LeaveCriticalSection(p)
+
+EXTERN_C_END
+
+#endif
diff --git a/contrib/libs/lzmasdk/ya.make b/contrib/libs/lzmasdk/ya.make new file mode 100644 index 0000000000..db0a55788d --- /dev/null +++ b/contrib/libs/lzmasdk/ya.make @@ -0,0 +1,37 @@ +LIBRARY() + +LICENSE(Public-Domain) + +LICENSE_TEXTS(.yandex_meta/licenses.list.txt) + +OWNER( + g:contrib + g:cpp-contrib +) + +# https://www.7-zip.org/sdk.html +VERSION(19.00) + +CFLAGS(-D_7ZIP_ST=1) + +NO_UTIL() + +SRCS( + 7zStream.c + Aes.c + AesOpt.c + Alloc.c + Bra.c + Bra86.c + BraIA64.c + CpuArch.c + LzFind.c + Lzma2Dec.c + Lzma2Enc.c + LzmaDec.c + LzmaEnc.c + LzmaLib.c + Sha256.c +) + +END() |