aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorthegeorg <thegeorg@yandex-team.ru>2022-05-17 12:11:49 +0300
committerthegeorg <thegeorg@yandex-team.ru>2022-05-17 12:11:49 +0300
commit2037874aa0fb0efca88322b14290deab89fccbd4 (patch)
tree8a9d856da3ea564b9e06914a56f7f4dacb0e75f7
parent7c645e66a7bdae9d6c54d50bf87259c4ffc33e5b (diff)
downloadydb-2037874aa0fb0efca88322b14290deab89fccbd4.tar.gz
Update contrib/libs/snappy to 1.1.9
ref:8e094c2e0f44b866d354257c6a902b6d4394b8f0
-rw-r--r--contrib/libs/snappy/.yandex_meta/devtools.licenses.report8
-rw-r--r--contrib/libs/snappy/CONTRIBUTING.md20
-rw-r--r--contrib/libs/snappy/NEWS6
-rw-r--r--contrib/libs/snappy/README.md54
-rw-r--r--contrib/libs/snappy/config-linux.h16
-rw-r--r--contrib/libs/snappy/snappy-internal.h134
-rw-r--r--contrib/libs/snappy/snappy-sinksource.cc35
-rw-r--r--contrib/libs/snappy/snappy-sinksource.h22
-rw-r--r--contrib/libs/snappy/snappy-stubs-internal.cc2
-rw-r--r--contrib/libs/snappy/snappy-stubs-internal.h484
-rw-r--r--contrib/libs/snappy/snappy-stubs-public.h16
-rw-r--r--contrib/libs/snappy/snappy.cc1292
-rw-r--r--contrib/libs/snappy/snappy.h7
13 files changed, 1309 insertions, 787 deletions
diff --git a/contrib/libs/snappy/.yandex_meta/devtools.licenses.report b/contrib/libs/snappy/.yandex_meta/devtools.licenses.report
index 42acfd0bf5..7fd62f702f 100644
--- a/contrib/libs/snappy/.yandex_meta/devtools.licenses.report
+++ b/contrib/libs/snappy/.yandex_meta/devtools.licenses.report
@@ -96,7 +96,7 @@ BELONGS ya.make
Match type : NOTICE
Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/generic-cla.LICENSE
Files with this license:
- CONTRIBUTING.md [6:6]
+ CONTRIBUTING.md [26:26]
KEEP BSD-3-Clause 6aa235708ac9f5dd8e5c6ac415fc5837
BELONGS ya.make
@@ -143,7 +143,7 @@ BELONGS ya.make
Match type : NOTICE
Links : http://www.apache.org/licenses/, http://www.apache.org/licenses/LICENSE-2.0, https://spdx.org/licenses/Apache-2.0
Files with this license:
- NEWS [178:178]
+ NEWS [184:184]
SKIP LicenseRef-scancode-unknown-license-reference bfebd3ac57e8aa2b8b978019ed709cd1
BELONGS ya.make
@@ -156,7 +156,7 @@ BELONGS ya.make
Match type : INTRO
Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/unknown-license-reference.LICENSE
Files with this license:
- README.md [23:23]
+ README.md [25:25]
SKIP LicenseRef-scancode-generic-cla d72fcd21b18e44b666a94e6225ed43eb
BELONGS ya.make
@@ -169,7 +169,7 @@ BELONGS ya.make
Match type : NOTICE
Links : https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data/licenses/generic-cla.LICENSE
Files with this license:
- CONTRIBUTING.md [8:9]
+ CONTRIBUTING.md [28:29]
KEEP BSD-3-Clause f8141230e736a81272884d33c51c5ad4
BELONGS ya.make
diff --git a/contrib/libs/snappy/CONTRIBUTING.md b/contrib/libs/snappy/CONTRIBUTING.md
index c7b84516c2..d0ce551527 100644
--- a/contrib/libs/snappy/CONTRIBUTING.md
+++ b/contrib/libs/snappy/CONTRIBUTING.md
@@ -3,6 +3,26 @@
We'd love to accept your patches and contributions to this project. There are
just a few small guidelines you need to follow.
+## Project Goals
+
+In addition to the aims listed at the top of the [README](README.md) Snappy
+explicitly supports the following:
+
+1. C++11
+2. Clang (gcc and MSVC are best-effort).
+3. Low level optimizations (e.g. assembly or equivalent intrinsics) for:
+ 1. [x86](https://en.wikipedia.org/wiki/X86)
+ 2. [x86-64](https://en.wikipedia.org/wiki/X86-64)
+ 3. ARMv7 (32-bit)
+ 4. ARMv8 (AArch64)
+4. Supports only the Snappy compression scheme as described in
+ [format_description.txt](format_description.txt).
+5. CMake for building
+
+Changes adding features or dependencies outside of the core area of focus listed
+above might not be accepted. If in doubt post a message to the
+[Snappy discussion mailing list](https://groups.google.com/g/snappy-compression).
+
## Contributor License Agreement
Contributions to this project must be accompanied by a Contributor License
diff --git a/contrib/libs/snappy/NEWS b/contrib/libs/snappy/NEWS
index 98048dbdd8..931a5e13fd 100644
--- a/contrib/libs/snappy/NEWS
+++ b/contrib/libs/snappy/NEWS
@@ -1,3 +1,9 @@
+Snappy v1.1.9, May 4th 2021:
+
+ * Performance improvements.
+
+ * Google Test and Google Benchmark are now bundled in third_party/.
+
Snappy v1.1.8, January 15th 2020:
* Small performance improvements.
diff --git a/contrib/libs/snappy/README.md b/contrib/libs/snappy/README.md
index cef4017492..7917d1bf05 100644
--- a/contrib/libs/snappy/README.md
+++ b/contrib/libs/snappy/README.md
@@ -1,5 +1,7 @@
Snappy, a fast compressor/decompressor.
+[![Build Status](https://travis-ci.org/google/snappy.svg?branch=master)](https://travis-ci.org/google/snappy)
+[![Build status](https://ci.appveyor.com/api/projects/status/t9nubcqkwo8rw8yn/branch/master?svg=true)](https://ci.appveyor.com/project/pwnall/leveldb)
Introduction
============
@@ -69,6 +71,7 @@ You need the CMake version specified in [CMakeLists.txt](./CMakeLists.txt)
or later to build:
```bash
+git submodule update --init
mkdir build
cd build && cmake ../ && make
```
@@ -107,42 +110,31 @@ information.
Tests and benchmarks
====================
-When you compile Snappy, snappy_unittest is compiled in addition to the
-library itself. You do not need it to use the compressor from your own library,
-but it contains several useful components for Snappy development.
+When you compile Snappy, the following binaries are compiled in addition to the
+library itself. You do not need them to use the compressor from your own
+library, but they are useful for Snappy development.
-First of all, it contains unit tests, verifying correctness on your machine in
-various scenarios. If you want to change or optimize Snappy, please run the
-tests to verify you have not broken anything. Note that if you have the
-Google Test library installed, unit test behavior (especially failures) will be
-significantly more user-friendly. You can find Google Test at
+* `snappy_benchmark` contains microbenchmarks used to tune compression and
+ decompression performance.
+* `snappy_unittests` contains unit tests, verifying correctness on your machine
+ in various scenarios.
+* `snappy_test_tool` can benchmark Snappy against a few other compression
+ libraries (zlib, LZO, LZF, and QuickLZ), if they were detected at configure
+ time. To benchmark using a given file, give the compression algorithm you want
+ to test Snappy against (e.g. --zlib) and then a list of one or more file names
+ on the command line.
- https://github.com/google/googletest
+If you want to change or optimize Snappy, please run the tests and benchmarks to
+verify you have not broken anything.
-You probably also want the gflags library for handling of command-line flags;
-you can find it at
-
- https://gflags.github.io/gflags/
-
-In addition to the unit tests, snappy contains microbenchmarks used to
-tune compression and decompression performance. These are automatically run
-before the unit tests, but you can disable them using the flag
---run_microbenchmarks=false if you have gflags installed (otherwise you will
-need to edit the source).
-
-Finally, snappy can benchmark Snappy against a few other compression libraries
-(zlib, LZO, LZF, and QuickLZ), if they were detected at configure time.
-To benchmark using a given file, give the compression algorithm you want to test
-Snappy against (e.g. --zlib) and then a list of one or more file names on the
-command line. The testdata/ directory contains the files used by the
-microbenchmark, which should provide a reasonably balanced starting point for
-benchmarking. (Note that baddata[1-3].snappy are not intended as benchmarks; they
-are used to verify correctness in the presence of corrupted data in the unit
-test.)
+The testdata/ directory contains the files used by the microbenchmarks, which
+should provide a reasonably balanced starting point for benchmarking. (Note that
+baddata[1-3].snappy are not intended as benchmarks; they are used to verify
+correctness in the presence of corrupted data in the unit test.)
Contact
=======
-Snappy is distributed through GitHub. For the latest version, a bug tracker,
-and other information, see https://github.com/google/snappy.
+Snappy is distributed through GitHub. For the latest version and other
+information, see https://github.com/google/snappy.
diff --git a/contrib/libs/snappy/config-linux.h b/contrib/libs/snappy/config-linux.h
index f1a066fb97..d540685562 100644
--- a/contrib/libs/snappy/config-linux.h
+++ b/contrib/libs/snappy/config-linux.h
@@ -1,35 +1,29 @@
#ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_CMAKE_CONFIG_H_
#define THIRD_PARTY_SNAPPY_OPENSOURCE_CMAKE_CONFIG_H_
+/* Define to 1 if the compiler supports __attribute__((always_inline)). */
+/* #undef HAVE_ATTRIBUTE_ALWAYS_INLINE */
+
/* Define to 1 if the compiler supports __builtin_ctz and friends. */
#define HAVE_BUILTIN_CTZ 1
/* Define to 1 if the compiler supports __builtin_expect. */
#define HAVE_BUILTIN_EXPECT 1
-/* Define to 1 if you have the <byteswap.h> header file. */
-#define HAVE_BYTESWAP_H 1
-
/* Define to 1 if you have a definition for mmap() in <sys/mman.h>. */
#define HAVE_FUNC_MMAP 1
/* Define to 1 if you have a definition for sysconf() in <unistd.h>. */
#define HAVE_FUNC_SYSCONF 1
-/* Define to 1 to use the gflags package for command-line parsing. */
-/* #undef HAVE_GFLAGS */
-
-/* Define to 1 if you have Google Test. */
-/* #undef HAVE_GTEST */
-
/* Define to 1 if you have the `lzo2' library (-llzo2). */
/* #undef HAVE_LIBLZO2 */
/* Define to 1 if you have the `z' library (-lz). */
/* #undef HAVE_LIBZ */
-/* Define to 1 if you have the <sys/endian.h> header file. */
-/* #undef HAVE_SYS_ENDIAN_H */
+/* Define to 1 if you have the `lz4' library (-llz4). */
+/* #undef HAVE_LIBLZ4 */
/* Define to 1 if you have the <sys/mman.h> header file. */
#define HAVE_SYS_MMAN_H 1
diff --git a/contrib/libs/snappy/snappy-internal.h b/contrib/libs/snappy/snappy-internal.h
index 1e1c307fef..720ccd8282 100644
--- a/contrib/libs/snappy/snappy-internal.h
+++ b/contrib/libs/snappy/snappy-internal.h
@@ -46,16 +46,16 @@ class WorkingMemory {
// Allocates and clears a hash table using memory in "*this",
// stores the number of buckets in "*table_size" and returns a pointer to
// the base of the hash table.
- uint16* GetHashTable(size_t fragment_size, int* table_size) const;
+ uint16_t* GetHashTable(size_t fragment_size, int* table_size) const;
char* GetScratchInput() const { return input_; }
char* GetScratchOutput() const { return output_; }
private:
- char* mem_; // the allocated memory, never nullptr
- size_t size_; // the size of the allocated memory, never 0
- uint16* table_; // the pointer to the hashtable
- char* input_; // the pointer to the input scratch buffer
- char* output_; // the pointer to the output scratch buffer
+ char* mem_; // the allocated memory, never nullptr
+ size_t size_; // the size of the allocated memory, never 0
+ uint16_t* table_; // the pointer to the hashtable
+ char* input_; // the pointer to the input scratch buffer
+ char* output_; // the pointer to the output scratch buffer
// No copying
WorkingMemory(const WorkingMemory&);
@@ -76,7 +76,7 @@ class WorkingMemory {
char* CompressFragment(const char* input,
size_t input_length,
char* op,
- uint16* table,
+ uint16_t* table,
const int table_size);
// Find the largest n such that
@@ -89,12 +89,18 @@ char* CompressFragment(const char* input,
// Does not read *(s1 + (s2_limit - s2)) or beyond.
// Requires that s2_limit >= s2.
//
+// In addition populate *data with the next 5 bytes from the end of the match.
+// This is only done if 8 bytes are available (s2_limit - s2 >= 8). The point is
+// that on some arch's this can be done faster in this routine than subsequent
+// loading from s2 + n.
+//
// Separate implementation for 64-bit, little-endian cpus.
#if !defined(SNAPPY_IS_BIG_ENDIAN) && \
- (defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM))
+ (defined(__x86_64__) || defined(_M_X64) || defined(ARCH_PPC) || defined(ARCH_ARM))
static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
const char* s2,
- const char* s2_limit) {
+ const char* s2_limit,
+ uint64_t* data) {
assert(s2_limit >= s2);
size_t matched = 0;
@@ -103,12 +109,71 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
// uncommon code paths that determine, without extra effort, whether the match
// length is less than 8. In short, we are hoping to avoid a conditional
// branch, and perhaps get better code layout from the C++ compiler.
- if (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 8)) {
- uint64 a1 = UNALIGNED_LOAD64(s1);
- uint64 a2 = UNALIGNED_LOAD64(s2);
- if (a1 != a2) {
- return std::pair<size_t, bool>(Bits::FindLSBSetNonZero64(a1 ^ a2) >> 3,
- true);
+ if (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 16)) {
+ uint64_t a1 = UNALIGNED_LOAD64(s1);
+ uint64_t a2 = UNALIGNED_LOAD64(s2);
+ if (SNAPPY_PREDICT_TRUE(a1 != a2)) {
+ // This code is critical for performance. The reason is that it determines
+ // how much to advance `ip` (s2). This obviously depends on both the loads
+ // from the `candidate` (s1) and `ip`. Furthermore the next `candidate`
+ // depends on the advanced `ip` calculated here through a load, hash and
+ // new candidate hash lookup (a lot of cycles). This makes s1 (ie.
+ // `candidate`) the variable that limits throughput. This is the reason we
+ // go through hoops to have this function update `data` for the next iter.
+ // The straightforward code would use *data, given by
+ //
+ // *data = UNALIGNED_LOAD64(s2 + matched_bytes) (Latency of 5 cycles),
+ //
+ // as input for the hash table lookup to find next candidate. However
+ // this forces the load on the data dependency chain of s1, because
+ // matched_bytes directly depends on s1. However matched_bytes is 0..7, so
+ // we can also calculate *data by
+ //
+ // *data = AlignRight(UNALIGNED_LOAD64(s2), UNALIGNED_LOAD64(s2 + 8),
+ // matched_bytes);
+ //
+ // The loads do not depend on s1 anymore and are thus off the bottleneck.
+ // The straightforward implementation on x86_64 would be to use
+ //
+ // shrd rax, rdx, cl (cl being matched_bytes * 8)
+ //
+ // unfortunately shrd with a variable shift has a 4 cycle latency. So this
+ // only wins 1 cycle. The BMI2 shrx instruction is a 1 cycle variable
+ // shift instruction but can only shift 64 bits. If we focus on just
+ // obtaining the least significant 4 bytes, we can obtain this by
+ //
+ // *data = ConditionalMove(matched_bytes < 4, UNALIGNED_LOAD64(s2),
+ // UNALIGNED_LOAD64(s2 + 4) >> ((matched_bytes & 3) * 8);
+ //
+ // Writen like above this is not a big win, the conditional move would be
+ // a cmp followed by a cmov (2 cycles) followed by a shift (1 cycle).
+ // However matched_bytes < 4 is equal to
+ // static_cast<uint32_t>(xorval) != 0. Writen that way, the conditional
+ // move (2 cycles) can execute in parallel with FindLSBSetNonZero64
+ // (tzcnt), which takes 3 cycles.
+ uint64_t xorval = a1 ^ a2;
+ int shift = Bits::FindLSBSetNonZero64(xorval);
+ size_t matched_bytes = shift >> 3;
+#ifndef __x86_64__
+ *data = UNALIGNED_LOAD64(s2 + matched_bytes);
+#else
+ // Ideally this would just be
+ //
+ // a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2;
+ //
+ // However clang correctly infers that the above statement participates on
+ // a critical data dependency chain and thus, unfortunately, refuses to
+ // use a conditional move (it's tuned to cut data dependencies). In this
+ // case there is a longer parallel chain anyway AND this will be fairly
+ // unpredictable.
+ uint64_t a3 = UNALIGNED_LOAD64(s2 + 4);
+ asm("testl %k2, %k2\n\t"
+ "cmovzq %1, %0\n\t"
+ : "+r"(a2)
+ : "r"(a3), "r"(xorval));
+ *data = a2 >> (shift & (3 * 8));
+#endif
+ return std::pair<size_t, bool>(matched_bytes, true);
} else {
matched = 8;
s2 += 8;
@@ -119,14 +184,27 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
// time until we find a 64-bit block that doesn't match; then we find
// the first non-matching bit and use that to calculate the total
// length of the match.
- while (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 8)) {
- if (UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched)) {
+ while (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 16)) {
+ uint64_t a1 = UNALIGNED_LOAD64(s1 + matched);
+ uint64_t a2 = UNALIGNED_LOAD64(s2);
+ if (a1 == a2) {
s2 += 8;
matched += 8;
} else {
- uint64 x = UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 + matched);
- int matching_bits = Bits::FindLSBSetNonZero64(x);
- matched += matching_bits >> 3;
+ uint64_t xorval = a1 ^ a2;
+ int shift = Bits::FindLSBSetNonZero64(xorval);
+ size_t matched_bytes = shift >> 3;
+#ifndef __x86_64__
+ *data = UNALIGNED_LOAD64(s2 + matched_bytes);
+#else
+ uint64_t a3 = UNALIGNED_LOAD64(s2 + 4);
+ asm("testl %k2, %k2\n\t"
+ "cmovzq %1, %0\n\t"
+ : "+r"(a2)
+ : "r"(a3), "r"(xorval));
+ *data = a2 >> (shift & (3 * 8));
+#endif
+ matched += matched_bytes;
assert(matched >= 8);
return std::pair<size_t, bool>(matched, false);
}
@@ -136,6 +214,9 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
++s2;
++matched;
} else {
+ if (s2 <= s2_limit - 8) {
+ *data = UNALIGNED_LOAD64(s2);
+ }
return std::pair<size_t, bool>(matched, matched < 8);
}
}
@@ -144,7 +225,8 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
#else
static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
const char* s2,
- const char* s2_limit) {
+ const char* s2_limit,
+ uint64_t* data) {
// Implementation based on the x86-64 version, above.
assert(s2_limit >= s2);
int matched = 0;
@@ -155,15 +237,17 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
matched += 4;
}
if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 4) {
- uint32 x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
+ uint32_t x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
int matching_bits = Bits::FindLSBSetNonZero(x);
matched += matching_bits >> 3;
+ s2 += matching_bits >> 3;
} else {
while ((s2 < s2_limit) && (s1[matched] == *s2)) {
++s2;
++matched;
}
}
+ if (s2 <= s2_limit - 8) *data = LittleEndian::Load64(s2);
return std::pair<size_t, bool>(matched, matched < 8);
}
#endif
@@ -190,7 +274,8 @@ static const int kMaximumTagLength = 5; // COPY_4_BYTE_OFFSET plus the actual o
// because of efficiency reasons:
// (1) Extracting a byte is faster than a bit-field
// (2) It properly aligns copy offset so we do not need a <<8
-static const uint16 char_table[256] = {
+static constexpr uint16_t char_table[256] = {
+ // clang-format off
0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
@@ -222,7 +307,8 @@ static const uint16 char_table[256] = {
0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
- 0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040
+ 0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040,
+ // clang-format on
};
} // end namespace internal
diff --git a/contrib/libs/snappy/snappy-sinksource.cc b/contrib/libs/snappy/snappy-sinksource.cc
index 369a13215b..8214964a7e 100644
--- a/contrib/libs/snappy/snappy-sinksource.cc
+++ b/contrib/libs/snappy/snappy-sinksource.cc
@@ -26,23 +26,31 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <string.h>
+#include <stddef.h>
+#include <cstring>
#include "snappy-sinksource.h"
namespace snappy {
-Source::~Source() { }
+Source::~Source() = default;
-Sink::~Sink() { }
+Sink::~Sink() = default;
char* Sink::GetAppendBuffer(size_t length, char* scratch) {
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
+ (void)length;
+
return scratch;
}
char* Sink::GetAppendBufferVariable(
size_t min_size, size_t desired_size_hint, char* scratch,
size_t scratch_size, size_t* allocated_size) {
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
+ (void)min_size;
+ (void)desired_size_hint;
+
*allocated_size = scratch_size;
return scratch;
}
@@ -55,7 +63,7 @@ void Sink::AppendAndTakeOwnership(
(*deleter)(deleter_arg, bytes, n);
}
-ByteArraySource::~ByteArraySource() { }
+ByteArraySource::~ByteArraySource() = default;
size_t ByteArraySource::Available() const { return left_; }
@@ -74,22 +82,26 @@ UncheckedByteArraySink::~UncheckedByteArraySink() { }
void UncheckedByteArraySink::Append(const char* data, size_t n) {
// Do no copying if the caller filled in the result of GetAppendBuffer()
if (data != dest_) {
- memcpy(dest_, data, n);
+ std::memcpy(dest_, data, n);
}
dest_ += n;
}
char* UncheckedByteArraySink::GetAppendBuffer(size_t len, char* scratch) {
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
+ (void)len;
+ (void)scratch;
+
return dest_;
}
void UncheckedByteArraySink::AppendAndTakeOwnership(
- char* data, size_t n,
+ char* bytes, size_t n,
void (*deleter)(void*, const char*, size_t),
void *deleter_arg) {
- if (data != dest_) {
- memcpy(dest_, data, n);
- (*deleter)(deleter_arg, data, n);
+ if (bytes != dest_) {
+ std::memcpy(dest_, bytes, n);
+ (*deleter)(deleter_arg, bytes, n);
}
dest_ += n;
}
@@ -97,6 +109,11 @@ void UncheckedByteArraySink::AppendAndTakeOwnership(
char* UncheckedByteArraySink::GetAppendBufferVariable(
size_t min_size, size_t desired_size_hint, char* scratch,
size_t scratch_size, size_t* allocated_size) {
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
+ (void)min_size;
+ (void)scratch;
+ (void)scratch_size;
+
*allocated_size = desired_size_hint;
return dest_;
}
diff --git a/contrib/libs/snappy/snappy-sinksource.h b/contrib/libs/snappy/snappy-sinksource.h
index 8afcdaaa2c..3c74e1bb6e 100644
--- a/contrib/libs/snappy/snappy-sinksource.h
+++ b/contrib/libs/snappy/snappy-sinksource.h
@@ -146,10 +146,10 @@ class Source {
class ByteArraySource : public Source {
public:
ByteArraySource(const char* p, size_t n) : ptr_(p), left_(n) { }
- virtual ~ByteArraySource();
- virtual size_t Available() const;
- virtual const char* Peek(size_t* len);
- virtual void Skip(size_t n);
+ ~ByteArraySource() override;
+ size_t Available() const override;
+ const char* Peek(size_t* len) override;
+ void Skip(size_t n) override;
private:
const char* ptr_;
size_t left_;
@@ -159,15 +159,15 @@ class ByteArraySource : public Source {
class UncheckedByteArraySink : public Sink {
public:
explicit UncheckedByteArraySink(char* dest) : dest_(dest) { }
- virtual ~UncheckedByteArraySink();
- virtual void Append(const char* data, size_t n);
- virtual char* GetAppendBuffer(size_t len, char* scratch);
- virtual char* GetAppendBufferVariable(
+ ~UncheckedByteArraySink() override;
+ void Append(const char* data, size_t n) override;
+ char* GetAppendBuffer(size_t len, char* scratch) override;
+ char* GetAppendBufferVariable(
size_t min_size, size_t desired_size_hint, char* scratch,
- size_t scratch_size, size_t* allocated_size);
- virtual void AppendAndTakeOwnership(
+ size_t scratch_size, size_t* allocated_size) override;
+ void AppendAndTakeOwnership(
char* bytes, size_t n, void (*deleter)(void*, const char*, size_t),
- void *deleter_arg);
+ void *deleter_arg) override;
// Return the current output pointer so that a caller can see how
// many bytes were produced.
diff --git a/contrib/libs/snappy/snappy-stubs-internal.cc b/contrib/libs/snappy/snappy-stubs-internal.cc
index 66ed2e9039..0bc8c2d344 100644
--- a/contrib/libs/snappy/snappy-stubs-internal.cc
+++ b/contrib/libs/snappy/snappy-stubs-internal.cc
@@ -33,7 +33,7 @@
namespace snappy {
-void Varint::Append32(std::string* s, uint32 value) {
+void Varint::Append32(std::string* s, uint32_t value) {
char buf[Varint::kMax32];
const char* p = Varint::Encode32(buf, value);
s->append(buf, p - buf);
diff --git a/contrib/libs/snappy/snappy-stubs-internal.h b/contrib/libs/snappy/snappy-stubs-internal.h
index 4854689d17..c2a838f38f 100644
--- a/contrib/libs/snappy/snappy-stubs-internal.h
+++ b/contrib/libs/snappy/snappy-stubs-internal.h
@@ -35,11 +35,13 @@
#include "config.h"
#endif
-#include <string>
+#include <stdint.h>
-#include <assert.h>
-#include <stdlib.h>
-#include <string.h>
+#include <cassert>
+#include <cstdlib>
+#include <cstring>
+#include <limits>
+#include <string>
#ifdef HAVE_SYS_MMAN_H
#include <sys/mman.h>
@@ -67,19 +69,11 @@
#include "snappy-stubs-public.h"
-#if defined(__x86_64__)
-
-// Enable 64-bit optimized versions of some routines.
-#define ARCH_K8 1
-
-#elif defined(__ppc64__)
-
+// Used to enable 64-bit optimized versions of some routines.
+#if defined(__PPC64__) || defined(__powerpc64__)
#define ARCH_PPC 1
-
-#elif defined(__aarch64__)
-
+#elif defined(__aarch64__) || defined(_M_ARM64)
#define ARCH_ARM 1
-
#endif
// Needed by OS X, among others.
@@ -93,7 +87,7 @@
#ifdef ARRAYSIZE
#undef ARRAYSIZE
#endif
-#define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a)))
+#define ARRAYSIZE(a) int{sizeof(a) / sizeof(*(a))}
// Static prediction hints.
#ifdef HAVE_BUILTIN_EXPECT
@@ -104,212 +98,66 @@
#define SNAPPY_PREDICT_TRUE(x) x
#endif
-// This is only used for recomputing the tag byte table used during
-// decompression; for simplicity we just remove it from the open-source
-// version (anyone who wants to regenerate it can just do the call
-// themselves within main()).
-#define DEFINE_bool(flag_name, default_value, description) \
- bool FLAGS_ ## flag_name = default_value
-#define DECLARE_bool(flag_name) \
- extern bool FLAGS_ ## flag_name
-
-namespace snappy {
-
-static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF);
-static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
-
-// Potentially unaligned loads and stores.
-
-// x86, PowerPC, and ARM64 can simply do these loads and stores native.
-
-#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
- defined(__aarch64__)
-
-#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
-#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
-#define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
-
-#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
-#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
-#define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
-
-// ARMv7 and newer support native unaligned accesses, but only of 16-bit
-// and 32-bit values (not 64-bit); older versions either raise a fatal signal,
-// do an unaligned read and rotate the words around a bit, or do the reads very
-// slowly (trip through kernel mode). There's no simple #define that says just
-// “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6
-// sub-architectures.
-//
-// This is a mess, but there's not much we can do about it.
-//
-// To further complicate matters, only LDR instructions (single reads) are
-// allowed to be unaligned, not LDRD (two reads) or LDM (many reads). Unless we
-// explicitly tell the compiler that these accesses can be unaligned, it can and
-// will combine accesses. On armcc, the way to signal this is done by accessing
-// through the type (uint32 __packed *), but GCC has no such attribute
-// (it ignores __attribute__((packed)) on individual variables). However,
-// we can tell it that a _struct_ is unaligned, which has the same effect,
-// so we do that.
-
-#elif defined(__arm__) && \
- !defined(__ARM_ARCH_4__) && \
- !defined(__ARM_ARCH_4T__) && \
- !defined(__ARM_ARCH_5__) && \
- !defined(__ARM_ARCH_5T__) && \
- !defined(__ARM_ARCH_5TE__) && \
- !defined(__ARM_ARCH_5TEJ__) && \
- !defined(__ARM_ARCH_6__) && \
- !defined(__ARM_ARCH_6J__) && \
- !defined(__ARM_ARCH_6K__) && \
- !defined(__ARM_ARCH_6Z__) && \
- !defined(__ARM_ARCH_6ZK__) && \
- !defined(__ARM_ARCH_6T2__)
-
-#if __GNUC__
-#define ATTRIBUTE_PACKED __attribute__((__packed__))
+// Inlining hints.
+#ifdef HAVE_ATTRIBUTE_ALWAYS_INLINE
+#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
#else
-#define ATTRIBUTE_PACKED
+#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE
#endif
-namespace base {
-namespace internal {
-
-struct Unaligned16Struct {
- uint16 value;
- uint8 dummy; // To make the size non-power-of-two.
-} ATTRIBUTE_PACKED;
-
-struct Unaligned32Struct {
- uint32 value;
- uint8 dummy; // To make the size non-power-of-two.
-} ATTRIBUTE_PACKED;
-
-} // namespace internal
-} // namespace base
-
-#define UNALIGNED_LOAD16(_p) \
- ((reinterpret_cast<const ::snappy::base::internal::Unaligned16Struct *>(_p))->value)
-#define UNALIGNED_LOAD32(_p) \
- ((reinterpret_cast<const ::snappy::base::internal::Unaligned32Struct *>(_p))->value)
-
-#define UNALIGNED_STORE16(_p, _val) \
- ((reinterpret_cast< ::snappy::base::internal::Unaligned16Struct *>(_p))->value = \
- (_val))
-#define UNALIGNED_STORE32(_p, _val) \
- ((reinterpret_cast< ::snappy::base::internal::Unaligned32Struct *>(_p))->value = \
- (_val))
-
-// TODO: NEON supports unaligned 64-bit loads and stores.
-// See if that would be more efficient on platforms supporting it,
-// at least for copies.
-
-inline uint64 UNALIGNED_LOAD64(const void *p) {
- uint64 t;
- memcpy(&t, p, sizeof t);
- return t;
-}
-
-inline void UNALIGNED_STORE64(void *p, uint64 v) {
- memcpy(p, &v, sizeof v);
-}
+// Stubbed version of ABSL_FLAG.
+//
+// In the open source version, flags can only be changed at compile time.
+#define SNAPPY_FLAG(flag_type, flag_name, default_value, help) \
+ flag_type FLAGS_ ## flag_name = default_value
-#else
+namespace snappy {
-// These functions are provided for architectures that don't support
-// unaligned loads and stores.
+// Stubbed version of absl::GetFlag().
+template <typename T>
+inline T GetFlag(T flag) { return flag; }
-inline uint16 UNALIGNED_LOAD16(const void *p) {
- uint16 t;
- memcpy(&t, p, sizeof t);
- return t;
-}
+static const uint32_t kuint32max = std::numeric_limits<uint32_t>::max();
+static const int64_t kint64max = std::numeric_limits<int64_t>::max();
-inline uint32 UNALIGNED_LOAD32(const void *p) {
- uint32 t;
- memcpy(&t, p, sizeof t);
- return t;
-}
-
-inline uint64 UNALIGNED_LOAD64(const void *p) {
- uint64 t;
- memcpy(&t, p, sizeof t);
- return t;
-}
+// Potentially unaligned loads and stores.
-inline void UNALIGNED_STORE16(void *p, uint16 v) {
- memcpy(p, &v, sizeof v);
+inline uint16_t UNALIGNED_LOAD16(const void *p) {
+ // Compiles to a single movzx/ldrh on clang/gcc/msvc.
+ uint16_t v;
+ std::memcpy(&v, p, sizeof(v));
+ return v;
}
-inline void UNALIGNED_STORE32(void *p, uint32 v) {
- memcpy(p, &v, sizeof v);
+inline uint32_t UNALIGNED_LOAD32(const void *p) {
+ // Compiles to a single mov/ldr on clang/gcc/msvc.
+ uint32_t v;
+ std::memcpy(&v, p, sizeof(v));
+ return v;
}
-inline void UNALIGNED_STORE64(void *p, uint64 v) {
- memcpy(p, &v, sizeof v);
+inline uint64_t UNALIGNED_LOAD64(const void *p) {
+ // Compiles to a single mov/ldr on clang/gcc/msvc.
+ uint64_t v;
+ std::memcpy(&v, p, sizeof(v));
+ return v;
}
-#endif
-
-// The following guarantees declaration of the byte swap functions.
-#if defined(SNAPPY_IS_BIG_ENDIAN)
-
-#ifdef HAVE_SYS_BYTEORDER_H
-#include <sys/byteorder.h>
-#endif
-
-#ifdef HAVE_SYS_ENDIAN_H
-#include <sys/endian.h>
-#endif
-
-#ifdef _MSC_VER
-#include <stdlib.h>
-#define bswap_16(x) _byteswap_ushort(x)
-#define bswap_32(x) _byteswap_ulong(x)
-#define bswap_64(x) _byteswap_uint64(x)
-
-#elif defined(__APPLE__)
-// Mac OS X / Darwin features
-#include <libkern/OSByteOrder.h>
-#define bswap_16(x) OSSwapInt16(x)
-#define bswap_32(x) OSSwapInt32(x)
-#define bswap_64(x) OSSwapInt64(x)
-
-#elif defined(HAVE_BYTESWAP_H)
-#include <byteswap.h>
-
-#elif defined(bswap32)
-// FreeBSD defines bswap{16,32,64} in <sys/endian.h> (already #included).
-#define bswap_16(x) bswap16(x)
-#define bswap_32(x) bswap32(x)
-#define bswap_64(x) bswap64(x)
-
-#elif defined(BSWAP_64)
-// Solaris 10 defines BSWAP_{16,32,64} in <sys/byteorder.h> (already #included).
-#define bswap_16(x) BSWAP_16(x)
-#define bswap_32(x) BSWAP_32(x)
-#define bswap_64(x) BSWAP_64(x)
-
-#else
-
-inline uint16 bswap_16(uint16 x) {
- return (x << 8) | (x >> 8);
+inline void UNALIGNED_STORE16(void *p, uint16_t v) {
+ // Compiles to a single mov/strh on clang/gcc/msvc.
+ std::memcpy(p, &v, sizeof(v));
}
-inline uint32 bswap_32(uint32 x) {
- x = ((x & 0xff00ff00UL) >> 8) | ((x & 0x00ff00ffUL) << 8);
- return (x >> 16) | (x << 16);
+inline void UNALIGNED_STORE32(void *p, uint32_t v) {
+ // Compiles to a single mov/str on clang/gcc/msvc.
+ std::memcpy(p, &v, sizeof(v));
}
-inline uint64 bswap_64(uint64 x) {
- x = ((x & 0xff00ff00ff00ff00ULL) >> 8) | ((x & 0x00ff00ff00ff00ffULL) << 8);
- x = ((x & 0xffff0000ffff0000ULL) >> 16) | ((x & 0x0000ffff0000ffffULL) << 16);
- return (x >> 32) | (x << 32);
+inline void UNALIGNED_STORE64(void *p, uint64_t v) {
+ // Compiles to a single mov/str on clang/gcc/msvc.
+ std::memcpy(p, &v, sizeof(v));
}
-#endif
-
-#endif // defined(SNAPPY_IS_BIG_ENDIAN)
-
// Convert to little-endian storage, opposite of network format.
// Convert x from host to little endian: x = LittleEndian.FromHost(x);
// convert x from little endian to host: x = LittleEndian.ToHost(x);
@@ -321,44 +169,77 @@ inline uint64 bswap_64(uint64 x) {
// x = LittleEndian.Load16(p);
class LittleEndian {
public:
- // Conversion functions.
-#if defined(SNAPPY_IS_BIG_ENDIAN)
-
- static uint16 FromHost16(uint16 x) { return bswap_16(x); }
- static uint16 ToHost16(uint16 x) { return bswap_16(x); }
-
- static uint32 FromHost32(uint32 x) { return bswap_32(x); }
- static uint32 ToHost32(uint32 x) { return bswap_32(x); }
-
- static bool IsLittleEndian() { return false; }
+ // Functions to do unaligned loads and stores in little-endian order.
+ static inline uint16_t Load16(const void *ptr) {
+ const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
-#else // !defined(SNAPPY_IS_BIG_ENDIAN)
+ // Compiles to a single mov/str on recent clang and gcc.
+ return (static_cast<uint16_t>(buffer[0])) |
+ (static_cast<uint16_t>(buffer[1]) << 8);
+ }
- static uint16 FromHost16(uint16 x) { return x; }
- static uint16 ToHost16(uint16 x) { return x; }
+ static inline uint32_t Load32(const void *ptr) {
+ const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
- static uint32 FromHost32(uint32 x) { return x; }
- static uint32 ToHost32(uint32 x) { return x; }
+ // Compiles to a single mov/str on recent clang and gcc.
+ return (static_cast<uint32_t>(buffer[0])) |
+ (static_cast<uint32_t>(buffer[1]) << 8) |
+ (static_cast<uint32_t>(buffer[2]) << 16) |
+ (static_cast<uint32_t>(buffer[3]) << 24);
+ }
- static bool IsLittleEndian() { return true; }
+ static inline uint64_t Load64(const void *ptr) {
+ const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
+
+ // Compiles to a single mov/str on recent clang and gcc.
+ return (static_cast<uint64_t>(buffer[0])) |
+ (static_cast<uint64_t>(buffer[1]) << 8) |
+ (static_cast<uint64_t>(buffer[2]) << 16) |
+ (static_cast<uint64_t>(buffer[3]) << 24) |
+ (static_cast<uint64_t>(buffer[4]) << 32) |
+ (static_cast<uint64_t>(buffer[5]) << 40) |
+ (static_cast<uint64_t>(buffer[6]) << 48) |
+ (static_cast<uint64_t>(buffer[7]) << 56);
+ }
-#endif // !defined(SNAPPY_IS_BIG_ENDIAN)
+ static inline void Store16(void *dst, uint16_t value) {
+ uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
- // Functions to do unaligned loads and stores in little-endian order.
- static uint16 Load16(const void *p) {
- return ToHost16(UNALIGNED_LOAD16(p));
+ // Compiles to a single mov/str on recent clang and gcc.
+ buffer[0] = static_cast<uint8_t>(value);
+ buffer[1] = static_cast<uint8_t>(value >> 8);
}
- static void Store16(void *p, uint16 v) {
- UNALIGNED_STORE16(p, FromHost16(v));
+ static void Store32(void *dst, uint32_t value) {
+ uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
+
+ // Compiles to a single mov/str on recent clang and gcc.
+ buffer[0] = static_cast<uint8_t>(value);
+ buffer[1] = static_cast<uint8_t>(value >> 8);
+ buffer[2] = static_cast<uint8_t>(value >> 16);
+ buffer[3] = static_cast<uint8_t>(value >> 24);
}
- static uint32 Load32(const void *p) {
- return ToHost32(UNALIGNED_LOAD32(p));
+ static void Store64(void* dst, uint64_t value) {
+ uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
+
+ // Compiles to a single mov/str on recent clang and gcc.
+ buffer[0] = static_cast<uint8_t>(value);
+ buffer[1] = static_cast<uint8_t>(value >> 8);
+ buffer[2] = static_cast<uint8_t>(value >> 16);
+ buffer[3] = static_cast<uint8_t>(value >> 24);
+ buffer[4] = static_cast<uint8_t>(value >> 32);
+ buffer[5] = static_cast<uint8_t>(value >> 40);
+ buffer[6] = static_cast<uint8_t>(value >> 48);
+ buffer[7] = static_cast<uint8_t>(value >> 56);
}
- static void Store32(void *p, uint32 v) {
- UNALIGNED_STORE32(p, FromHost32(v));
+ static inline constexpr bool IsLittleEndian() {
+#if defined(SNAPPY_IS_BIG_ENDIAN)
+ return false;
+#else
+ return true;
+#endif // defined(SNAPPY_IS_BIG_ENDIAN)
}
};
@@ -366,19 +247,17 @@ class LittleEndian {
class Bits {
public:
// Return floor(log2(n)) for positive integer n.
- static int Log2FloorNonZero(uint32 n);
+ static int Log2FloorNonZero(uint32_t n);
// Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0.
- static int Log2Floor(uint32 n);
+ static int Log2Floor(uint32_t n);
// Return the first set least / most significant bit, 0-indexed. Returns an
// undefined value if n == 0. FindLSBSetNonZero() is similar to ffs() except
// that it's 0-indexed.
- static int FindLSBSetNonZero(uint32 n);
+ static int FindLSBSetNonZero(uint32_t n);
-#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
- static int FindLSBSetNonZero64(uint64 n);
-#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+ static int FindLSBSetNonZero64(uint64_t n);
private:
// No copying
@@ -386,9 +265,9 @@ class Bits {
void operator=(const Bits&);
};
-#ifdef HAVE_BUILTIN_CTZ
+#if defined(HAVE_BUILTIN_CTZ)
-inline int Bits::Log2FloorNonZero(uint32 n) {
+inline int Bits::Log2FloorNonZero(uint32_t n) {
assert(n != 0);
// (31 ^ x) is equivalent to (31 - x) for x in [0, 31]. An easy proof
// represents subtraction in base 2 and observes that there's no carry.
@@ -399,66 +278,52 @@ inline int Bits::Log2FloorNonZero(uint32 n) {
return 31 ^ __builtin_clz(n);
}
-inline int Bits::Log2Floor(uint32 n) {
+inline int Bits::Log2Floor(uint32_t n) {
return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
}
-inline int Bits::FindLSBSetNonZero(uint32 n) {
+inline int Bits::FindLSBSetNonZero(uint32_t n) {
assert(n != 0);
return __builtin_ctz(n);
}
-#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
-inline int Bits::FindLSBSetNonZero64(uint64 n) {
- assert(n != 0);
- return __builtin_ctzll(n);
-}
-#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
-
#elif defined(_MSC_VER)
-inline int Bits::Log2FloorNonZero(uint32 n) {
+inline int Bits::Log2FloorNonZero(uint32_t n) {
assert(n != 0);
+ // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
unsigned long where;
_BitScanReverse(&where, n);
return static_cast<int>(where);
}
-inline int Bits::Log2Floor(uint32 n) {
+inline int Bits::Log2Floor(uint32_t n) {
+ // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
unsigned long where;
if (_BitScanReverse(&where, n))
return static_cast<int>(where);
return -1;
}
-inline int Bits::FindLSBSetNonZero(uint32 n) {
+inline int Bits::FindLSBSetNonZero(uint32_t n) {
assert(n != 0);
+ // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
unsigned long where;
if (_BitScanForward(&where, n))
return static_cast<int>(where);
return 32;
}
-#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
-inline int Bits::FindLSBSetNonZero64(uint64 n) {
- assert(n != 0);
- unsigned long where;
- if (_BitScanForward64(&where, n))
- return static_cast<int>(where);
- return 64;
-}
-#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
-
#else // Portable versions.
-inline int Bits::Log2FloorNonZero(uint32 n) {
+inline int Bits::Log2FloorNonZero(uint32_t n) {
assert(n != 0);
int log = 0;
- uint32 value = n;
+ uint32_t value = n;
for (int i = 4; i >= 0; --i) {
int shift = (1 << i);
- uint32 x = value >> shift;
+ uint32_t x = value >> shift;
if (x != 0) {
value = x;
log += shift;
@@ -468,16 +333,16 @@ inline int Bits::Log2FloorNonZero(uint32 n) {
return log;
}
-inline int Bits::Log2Floor(uint32 n) {
+inline int Bits::Log2Floor(uint32_t n) {
return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
}
-inline int Bits::FindLSBSetNonZero(uint32 n) {
+inline int Bits::FindLSBSetNonZero(uint32_t n) {
assert(n != 0);
int rc = 31;
for (int i = 4, shift = 1 << 4; i >= 0; --i) {
- const uint32 x = n << shift;
+ const uint32_t x = n << shift;
if (x != 0) {
n = x;
rc -= shift;
@@ -487,27 +352,48 @@ inline int Bits::FindLSBSetNonZero(uint32 n) {
return rc;
}
-#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+#endif // End portable versions.
+
+#if defined(HAVE_BUILTIN_CTZ)
+
+inline int Bits::FindLSBSetNonZero64(uint64_t n) {
+ assert(n != 0);
+ return __builtin_ctzll(n);
+}
+
+#elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64))
+// _BitScanForward64() is only available on x64 and ARM64.
+
+inline int Bits::FindLSBSetNonZero64(uint64_t n) {
+ assert(n != 0);
+ // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
+ unsigned long where;
+ if (_BitScanForward64(&where, n))
+ return static_cast<int>(where);
+ return 64;
+}
+
+#else // Portable version.
+
// FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
-inline int Bits::FindLSBSetNonZero64(uint64 n) {
+inline int Bits::FindLSBSetNonZero64(uint64_t n) {
assert(n != 0);
- const uint32 bottombits = static_cast<uint32>(n);
+ const uint32_t bottombits = static_cast<uint32_t>(n);
if (bottombits == 0) {
- // Bottom bits are zero, so scan in top bits
- return 32 + FindLSBSetNonZero(static_cast<uint32>(n >> 32));
+ // Bottom bits are zero, so scan the top bits.
+ return 32 + FindLSBSetNonZero(static_cast<uint32_t>(n >> 32));
} else {
return FindLSBSetNonZero(bottombits);
}
}
-#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
-#endif // End portable versions.
+#endif // End portable version.
// Variable-length integer encoding.
class Varint {
public:
- // Maximum lengths of varint encoding of uint32.
+ // Maximum lengths of varint encoding of uint32_t.
static const int kMax32 = 5;
// Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1].
@@ -516,23 +402,23 @@ class Varint {
// past the last byte of the varint32. Else returns NULL. On success,
// "result <= limit".
static const char* Parse32WithLimit(const char* ptr, const char* limit,
- uint32* OUTPUT);
+ uint32_t* OUTPUT);
// REQUIRES "ptr" points to a buffer of length sufficient to hold "v".
// EFFECTS Encodes "v" into "ptr" and returns a pointer to the
// byte just past the last encoded byte.
- static char* Encode32(char* ptr, uint32 v);
+ static char* Encode32(char* ptr, uint32_t v);
// EFFECTS Appends the varint representation of "value" to "*s".
- static void Append32(std::string* s, uint32 value);
+ static void Append32(std::string* s, uint32_t value);
};
inline const char* Varint::Parse32WithLimit(const char* p,
const char* l,
- uint32* OUTPUT) {
+ uint32_t* OUTPUT) {
const unsigned char* ptr = reinterpret_cast<const unsigned char*>(p);
const unsigned char* limit = reinterpret_cast<const unsigned char*>(l);
- uint32 b, result;
+ uint32_t b, result;
if (ptr >= limit) return NULL;
b = *(ptr++); result = b & 127; if (b < 128) goto done;
if (ptr >= limit) return NULL;
@@ -549,30 +435,30 @@ inline const char* Varint::Parse32WithLimit(const char* p,
return reinterpret_cast<const char*>(ptr);
}
-inline char* Varint::Encode32(char* sptr, uint32 v) {
+inline char* Varint::Encode32(char* sptr, uint32_t v) {
// Operate on characters as unsigneds
- unsigned char* ptr = reinterpret_cast<unsigned char*>(sptr);
- static const int B = 128;
- if (v < (1<<7)) {
- *(ptr++) = v;
- } else if (v < (1<<14)) {
- *(ptr++) = v | B;
- *(ptr++) = v>>7;
- } else if (v < (1<<21)) {
- *(ptr++) = v | B;
- *(ptr++) = (v>>7) | B;
- *(ptr++) = v>>14;
- } else if (v < (1<<28)) {
- *(ptr++) = v | B;
- *(ptr++) = (v>>7) | B;
- *(ptr++) = (v>>14) | B;
- *(ptr++) = v>>21;
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(sptr);
+ static const uint8_t B = 128;
+ if (v < (1 << 7)) {
+ *(ptr++) = static_cast<uint8_t>(v);
+ } else if (v < (1 << 14)) {
+ *(ptr++) = static_cast<uint8_t>(v | B);
+ *(ptr++) = static_cast<uint8_t>(v >> 7);
+ } else if (v < (1 << 21)) {
+ *(ptr++) = static_cast<uint8_t>(v | B);
+ *(ptr++) = static_cast<uint8_t>((v >> 7) | B);
+ *(ptr++) = static_cast<uint8_t>(v >> 14);
+ } else if (v < (1 << 28)) {
+ *(ptr++) = static_cast<uint8_t>(v | B);
+ *(ptr++) = static_cast<uint8_t>((v >> 7) | B);
+ *(ptr++) = static_cast<uint8_t>((v >> 14) | B);
+ *(ptr++) = static_cast<uint8_t>(v >> 21);
} else {
- *(ptr++) = v | B;
- *(ptr++) = (v>>7) | B;
- *(ptr++) = (v>>14) | B;
- *(ptr++) = (v>>21) | B;
- *(ptr++) = v>>28;
+ *(ptr++) = static_cast<uint8_t>(v | B);
+ *(ptr++) = static_cast<uint8_t>((v>>7) | B);
+ *(ptr++) = static_cast<uint8_t>((v>>14) | B);
+ *(ptr++) = static_cast<uint8_t>((v>>21) | B);
+ *(ptr++) = static_cast<uint8_t>(v >> 28);
}
return reinterpret_cast<char*>(ptr);
}
diff --git a/contrib/libs/snappy/snappy-stubs-public.h b/contrib/libs/snappy/snappy-stubs-public.h
index 357c4b2e4b..ce6edb89af 100644
--- a/contrib/libs/snappy/snappy-stubs-public.h
+++ b/contrib/libs/snappy/snappy-stubs-public.h
@@ -36,32 +36,20 @@
#define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
#include <cstddef>
-#include <cstdint>
-#include <string>
-
#include "config.h"
-
+
#if defined(HAVE_SYS_UIO_H)
#include <sys/uio.h>
#endif // HAVE_SYS_UIO_H
#define SNAPPY_MAJOR 1
#define SNAPPY_MINOR 1
-#define SNAPPY_PATCHLEVEL 8
+#define SNAPPY_PATCHLEVEL 9
#define SNAPPY_VERSION \
((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
namespace snappy {
-using int8 = std::int8_t;
-using uint8 = std::uint8_t;
-using int16 = std::int16_t;
-using uint16 = std::uint16_t;
-using int32 = std::int32_t;
-using uint32 = std::uint32_t;
-using int64 = std::int64_t;
-using uint64 = std::uint64_t;
-
#if !defined(HAVE_SYS_UIO_H)
// Windows does not have an iovec type, yet the concept is universally useful.
// It is simple to define it ourselves, so we put it inside our own namespace.
diff --git a/contrib/libs/snappy/snappy.cc b/contrib/libs/snappy/snappy.cc
index 9351b0f21e..dcc26d59be 100644
--- a/contrib/libs/snappy/snappy.cc
+++ b/contrib/libs/snappy/snappy.cc
@@ -26,9 +26,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "snappy.h"
#include "snappy-internal.h"
#include "snappy-sinksource.h"
+#include "snappy.h"
#if !defined(SNAPPY_HAVE_SSSE3)
// __SSSE3__ is defined by GCC and Clang. Visual Studio doesn't target SIMD
@@ -68,35 +68,92 @@
#include <immintrin.h>
#endif
-#include <stdio.h>
-
#include <algorithm>
+#include <array>
+#include <cstddef>
+#include <cstdint>
+#include <cstdio>
+#include <cstring>
#include <string>
+#include <utility>
#include <vector>
#include <util/generic/string.h>
namespace snappy {
+namespace {
+
+// The amount of slop bytes writers are using for unconditional copies.
+constexpr int kSlopBytes = 64;
+
+using internal::char_table;
using internal::COPY_1_BYTE_OFFSET;
using internal::COPY_2_BYTE_OFFSET;
-using internal::LITERAL;
-using internal::char_table;
+using internal::COPY_4_BYTE_OFFSET;
using internal::kMaximumTagLength;
+using internal::LITERAL;
+
+// We translate the information encoded in a tag through a lookup table to a
+// format that requires fewer instructions to decode. Effectively we store
+// the length minus the tag part of the offset. The lowest significant byte
+// thus stores the length. While total length - offset is given by
+// entry - ExtractOffset(type). The nice thing is that the subtraction
+// immediately sets the flags for the necessary check that offset >= length.
+// This folds the cmp with sub. We engineer the long literals and copy-4 to
+// always fail this check, so their presence doesn't affect the fast path.
+// To prevent literals from triggering the guard against offset < length (offset
+// does not apply to literals) the table is giving them a spurious offset of
+// 256.
+inline constexpr int16_t MakeEntry(int16_t len, int16_t offset) {
+ return len - (offset << 8);
+}
+
+inline constexpr int16_t LengthMinusOffset(int data, int type) {
+ return type == 3 ? 0xFF // copy-4 (or type == 3)
+ : type == 2 ? MakeEntry(data + 1, 0) // copy-2
+ : type == 1 ? MakeEntry((data & 7) + 4, data >> 3) // copy-1
+ : data < 60 ? MakeEntry(data + 1, 1) // note spurious offset.
+ : 0xFF; // long literal
+}
+
+inline constexpr int16_t LengthMinusOffset(uint8_t tag) {
+ return LengthMinusOffset(tag >> 2, tag & 3);
+}
+
+template <size_t... Ints>
+struct index_sequence {};
+
+template <std::size_t N, size_t... Is>
+struct make_index_sequence : make_index_sequence<N - 1, N - 1, Is...> {};
+
+template <size_t... Is>
+struct make_index_sequence<0, Is...> : index_sequence<Is...> {};
+
+template <size_t... seq>
+constexpr std::array<int16_t, 256> MakeTable(index_sequence<seq...>) {
+ return std::array<int16_t, 256>{LengthMinusOffset(seq)...};
+}
+
+// We maximally co-locate the two tables so that only one register needs to be
+// reserved for the table address.
+struct {
+ alignas(64) const std::array<int16_t, 256> length_minus_offset;
+ uint32_t extract_masks[4]; // Used for extracting offset based on tag type.
+} table = {MakeTable(make_index_sequence<256>{}), {0, 0xFF, 0xFFFF, 0}};
// Any hash function will produce a valid compressed bitstream, but a good
// hash function reduces the number of collisions and thus yields better
// compression for compressible input, and more speed for incompressible
// input. Of course, it doesn't hurt if the hash function is reasonably fast
// either, as it gets called a lot.
-static inline uint32 HashBytes(uint32 bytes, int shift) {
- uint32 kMul = 0x1e35a7bd;
- return (bytes * kMul) >> shift;
-}
-static inline uint32 Hash(const char* p, int shift) {
- return HashBytes(UNALIGNED_LOAD32(p), shift);
+inline uint32_t HashBytes(uint32_t bytes, uint32_t mask) {
+ constexpr uint32_t kMagic = 0x1e35a7bd;
+ return ((kMagic * bytes) >> (32 - kMaxHashTableBits)) & mask;
}
-size_t MaxCompressedLength(size_t source_len) {
+} // namespace
+
+size_t MaxCompressedLength(size_t source_bytes) {
// Compressed data can be defined as:
// compressed := item* literal*
// item := literal* copy
@@ -117,24 +174,34 @@ size_t MaxCompressedLength(size_t source_len) {
// I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
//
// This last factor dominates the blowup, so the final estimate is:
- return 32 + source_len + source_len/6;
+ return 32 + source_bytes + source_bytes / 6;
}
namespace {
void UnalignedCopy64(const void* src, void* dst) {
char tmp[8];
- memcpy(tmp, src, 8);
- memcpy(dst, tmp, 8);
+ std::memcpy(tmp, src, 8);
+ std::memcpy(dst, tmp, 8);
}
void UnalignedCopy128(const void* src, void* dst) {
- // memcpy gets vectorized when the appropriate compiler options are used.
- // For example, x86 compilers targeting SSE2+ will optimize to an SSE2 load
- // and store.
+ // std::memcpy() gets vectorized when the appropriate compiler options are
+ // used. For example, x86 compilers targeting SSE2+ will optimize to an SSE2
+ // load and store.
char tmp[16];
- memcpy(tmp, src, 16);
- memcpy(dst, tmp, 16);
+ std::memcpy(tmp, src, 16);
+ std::memcpy(dst, tmp, 16);
+}
+
+template <bool use_16bytes_chunk>
+inline void ConditionalUnalignedCopy128(const char* src, char* dst) {
+ if (use_16bytes_chunk) {
+ UnalignedCopy128(src, dst);
+ } else {
+ UnalignedCopy64(src, dst);
+ UnalignedCopy64(src + 8, dst + 8);
+ }
}
// Copy [src, src+(op_limit-op)) to [op, (op_limit-op)) a byte at a time. Used
@@ -146,7 +213,8 @@ void UnalignedCopy128(const void* src, void* dst) {
// After IncrementalCopySlow(src, op, op_limit), the result will have eleven
// copies of "ab"
// ababababababababababab
-// Note that this does not match the semantics of either memcpy() or memmove().
+// Note that this does not match the semantics of either std::memcpy() or
+// std::memmove().
inline char* IncrementalCopySlow(const char* src, char* op,
char* const op_limit) {
// TODO: Remove pragma when LLVM is aware this
@@ -163,37 +231,171 @@ inline char* IncrementalCopySlow(const char* src, char* op,
#if SNAPPY_HAVE_SSSE3
-// This is a table of shuffle control masks that can be used as the source
+// Computes the bytes for shuffle control mask (please read comments on
+// 'pattern_generation_masks' as well) for the given index_offset and
+// pattern_size. For example, when the 'offset' is 6, it will generate a
+// repeating pattern of size 6. So, the first 16 byte indexes will correspond to
+// the pattern-bytes {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3} and the
+// next 16 byte indexes will correspond to the pattern-bytes {4, 5, 0, 1, 2, 3,
+// 4, 5, 0, 1, 2, 3, 4, 5, 0, 1}. These byte index sequences are generated by
+// calling MakePatternMaskBytes(0, 6, index_sequence<16>()) and
+// MakePatternMaskBytes(16, 6, index_sequence<16>()) respectively.
+template <size_t... indexes>
+inline constexpr std::array<char, sizeof...(indexes)> MakePatternMaskBytes(
+ int index_offset, int pattern_size, index_sequence<indexes...>) {
+ return {static_cast<char>((index_offset + indexes) % pattern_size)...};
+}
+
+// Computes the shuffle control mask bytes array for given pattern-sizes and
+// returns an array.
+template <size_t... pattern_sizes_minus_one>
+inline constexpr std::array<std::array<char, sizeof(__m128i)>,
+ sizeof...(pattern_sizes_minus_one)>
+MakePatternMaskBytesTable(int index_offset,
+ index_sequence<pattern_sizes_minus_one...>) {
+ return {MakePatternMaskBytes(
+ index_offset, pattern_sizes_minus_one + 1,
+ make_index_sequence</*indexes=*/sizeof(__m128i)>())...};
+}
+
+// This is an array of shuffle control masks that can be used as the source
// operand for PSHUFB to permute the contents of the destination XMM register
// into a repeating byte pattern.
-alignas(16) const char pshufb_fill_patterns[7][16] = {
- {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- {0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1},
- {0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0},
- {0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3},
- {0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0},
- {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3},
- {0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1},
-};
+alignas(16) constexpr std::array<std::array<char, sizeof(__m128i)>,
+ 16> pattern_generation_masks =
+ MakePatternMaskBytesTable(
+ /*index_offset=*/0,
+ /*pattern_sizes_minus_one=*/make_index_sequence<16>());
+
+// Similar to 'pattern_generation_masks', this table is used to "rotate" the
+// pattern so that we can copy the *next 16 bytes* consistent with the pattern.
+// Basically, pattern_reshuffle_masks is a continuation of
+// pattern_generation_masks. It follows that, pattern_reshuffle_masks is same as
+// pattern_generation_masks for offsets 1, 2, 4, 8 and 16.
+alignas(16) constexpr std::array<std::array<char, sizeof(__m128i)>,
+ 16> pattern_reshuffle_masks =
+ MakePatternMaskBytesTable(
+ /*index_offset=*/16,
+ /*pattern_sizes_minus_one=*/make_index_sequence<16>());
+
+SNAPPY_ATTRIBUTE_ALWAYS_INLINE
+static inline __m128i LoadPattern(const char* src, const size_t pattern_size) {
+ __m128i generation_mask = _mm_load_si128(reinterpret_cast<const __m128i*>(
+ pattern_generation_masks[pattern_size - 1].data()));
+ // Uninitialized bytes are masked out by the shuffle mask.
+ // TODO: remove annotation and macro defs once MSan is fixed.
+ SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(src + pattern_size, 16 - pattern_size);
+ return _mm_shuffle_epi8(
+ _mm_loadu_si128(reinterpret_cast<const __m128i*>(src)), generation_mask);
+}
+
+SNAPPY_ATTRIBUTE_ALWAYS_INLINE
+static inline std::pair<__m128i /* pattern */, __m128i /* reshuffle_mask */>
+LoadPatternAndReshuffleMask(const char* src, const size_t pattern_size) {
+ __m128i pattern = LoadPattern(src, pattern_size);
+
+ // This mask will generate the next 16 bytes in-place. Doing so enables us to
+ // write data by at most 4 _mm_storeu_si128.
+ //
+ // For example, suppose pattern is: abcdefabcdefabcd
+ // Shuffling with this mask will generate: efabcdefabcdefab
+ // Shuffling again will generate: cdefabcdefabcdef
+ __m128i reshuffle_mask = _mm_load_si128(reinterpret_cast<const __m128i*>(
+ pattern_reshuffle_masks[pattern_size - 1].data()));
+ return {pattern, reshuffle_mask};
+}
#endif // SNAPPY_HAVE_SSSE3
-// Copy [src, src+(op_limit-op)) to [op, (op_limit-op)) but faster than
+// Fallback for when we need to copy while extending the pattern, for example
+// copying 10 bytes from 3 positions back abc -> abcabcabcabca.
+//
+// REQUIRES: [dst - offset, dst + 64) is a valid address range.
+SNAPPY_ATTRIBUTE_ALWAYS_INLINE
+static inline bool Copy64BytesWithPatternExtension(char* dst, size_t offset) {
+#if SNAPPY_HAVE_SSSE3
+ if (SNAPPY_PREDICT_TRUE(offset <= 16)) {
+ switch (offset) {
+ case 0:
+ return false;
+ case 1: {
+ std::memset(dst, dst[-1], 64);
+ return true;
+ }
+ case 2:
+ case 4:
+ case 8:
+ case 16: {
+ __m128i pattern = LoadPattern(dst - offset, offset);
+ for (int i = 0; i < 4; i++) {
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(dst + 16 * i), pattern);
+ }
+ return true;
+ }
+ default: {
+ auto pattern_and_reshuffle_mask =
+ LoadPatternAndReshuffleMask(dst - offset, offset);
+ __m128i pattern = pattern_and_reshuffle_mask.first;
+ __m128i reshuffle_mask = pattern_and_reshuffle_mask.second;
+ for (int i = 0; i < 4; i++) {
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(dst + 16 * i), pattern);
+ pattern = _mm_shuffle_epi8(pattern, reshuffle_mask);
+ }
+ return true;
+ }
+ }
+ }
+#else
+ if (SNAPPY_PREDICT_TRUE(offset < 16)) {
+ if (SNAPPY_PREDICT_FALSE(offset == 0)) return false;
+ // Extend the pattern to the first 16 bytes.
+ for (int i = 0; i < 16; i++) dst[i] = dst[i - offset];
+ // Find a multiple of pattern >= 16.
+ static std::array<uint8_t, 16> pattern_sizes = []() {
+ std::array<uint8_t, 16> res;
+ for (int i = 1; i < 16; i++) res[i] = (16 / i + 1) * i;
+ return res;
+ }();
+ offset = pattern_sizes[offset];
+ for (int i = 1; i < 4; i++) {
+ std::memcpy(dst + i * 16, dst + i * 16 - offset, 16);
+ }
+ return true;
+ }
+#endif // SNAPPY_HAVE_SSSE3
+
+ // Very rare.
+ for (int i = 0; i < 4; i++) {
+ std::memcpy(dst + i * 16, dst + i * 16 - offset, 16);
+ }
+ return true;
+}
+
+// Copy [src, src+(op_limit-op)) to [op, op_limit) but faster than
// IncrementalCopySlow. buf_limit is the address past the end of the writable
// region of the buffer.
inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
char* const buf_limit) {
+#if SNAPPY_HAVE_SSSE3
+ constexpr int big_pattern_size_lower_bound = 16;
+#else
+ constexpr int big_pattern_size_lower_bound = 8;
+#endif
+
// Terminology:
//
// slop = buf_limit - op
// pat = op - src
- // len = limit - op
+ // len = op_limit - op
assert(src < op);
- assert(op <= op_limit);
+ assert(op < op_limit);
assert(op_limit <= buf_limit);
- // NOTE: The compressor always emits 4 <= len <= 64. It is ok to assume that
- // to optimize this function but we have to also handle other cases in case
- // the input does not satisfy these conditions.
+ // NOTE: The copy tags use 3 or 6 bits to store the copy length, so len <= 64.
+ assert(op_limit - op <= 64);
+ // NOTE: In practice the compressor always emits len >= 4, so it is ok to
+ // assume that to optimize this function, but this is not guaranteed by the
+ // compression format, so we have to also handle len < 4 in case the input
+ // does not satisfy these conditions.
size_t pattern_size = op - src;
// The cases are split into different branches to allow the branch predictor,
@@ -217,11 +419,13 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
// input. In general if we always predict len <= 16 it would be an ok
// prediction.
//
- // In order to be fast we want a pattern >= 8 bytes and an unrolled loop
- // copying 2x 8 bytes at a time.
+ // In order to be fast we want a pattern >= 16 bytes (or 8 bytes in non-SSE)
+ // and an unrolled loop copying 1x 16 bytes (or 2x 8 bytes in non-SSE) at a
+ // time.
- // Handle the uncommon case where pattern is less than 8 bytes.
- if (SNAPPY_PREDICT_FALSE(pattern_size < 8)) {
+ // Handle the uncommon case where pattern is less than 16 (or 8 in non-SSE)
+ // bytes.
+ if (pattern_size < big_pattern_size_lower_bound) {
#if SNAPPY_HAVE_SSSE3
// Load the first eight bytes into an 128-bit XMM register, then use PSHUFB
// to permute the register's contents in-place into a repeating sequence of
@@ -235,25 +439,58 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
// The non-SSE fallback implementation suffers from store-forwarding stalls
// because its loads and stores partly overlap. By expanding the pattern
// in-place, we avoid the penalty.
- if (SNAPPY_PREDICT_TRUE(op <= buf_limit - 16)) {
- const __m128i shuffle_mask = _mm_load_si128(
- reinterpret_cast<const __m128i*>(pshufb_fill_patterns)
- + pattern_size - 1);
- const __m128i pattern = _mm_shuffle_epi8(
- _mm_loadl_epi64(reinterpret_cast<const __m128i*>(src)), shuffle_mask);
- // Uninitialized bytes are masked out by the shuffle mask.
- // TODO: remove annotation and macro defs once MSan is fixed.
- SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(&pattern, sizeof(pattern));
- pattern_size *= 16 / pattern_size;
- char* op_end = std::min(op_limit, buf_limit - 15);
- while (op < op_end) {
- _mm_storeu_si128(reinterpret_cast<__m128i*>(op), pattern);
- op += pattern_size;
+
+ // Typically, the op_limit is the gating factor so try to simplify the loop
+ // based on that.
+ if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 15)) {
+ auto pattern_and_reshuffle_mask =
+ LoadPatternAndReshuffleMask(src, pattern_size);
+ __m128i pattern = pattern_and_reshuffle_mask.first;
+ __m128i reshuffle_mask = pattern_and_reshuffle_mask.second;
+
+ // There is at least one, and at most four 16-byte blocks. Writing four
+ // conditionals instead of a loop allows FDO to layout the code with
+ // respect to the actual probabilities of each length.
+ // TODO: Replace with loop with trip count hint.
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(op), pattern);
+
+ if (op + 16 < op_limit) {
+ pattern = _mm_shuffle_epi8(pattern, reshuffle_mask);
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(op + 16), pattern);
}
- if (SNAPPY_PREDICT_TRUE(op >= op_limit)) return op_limit;
+ if (op + 32 < op_limit) {
+ pattern = _mm_shuffle_epi8(pattern, reshuffle_mask);
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(op + 32), pattern);
+ }
+ if (op + 48 < op_limit) {
+ pattern = _mm_shuffle_epi8(pattern, reshuffle_mask);
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(op + 48), pattern);
+ }
+ return op_limit;
}
- return IncrementalCopySlow(src, op, op_limit);
-#else // !SNAPPY_HAVE_SSSE3
+ char* const op_end = buf_limit - 15;
+ if (SNAPPY_PREDICT_TRUE(op < op_end)) {
+ auto pattern_and_reshuffle_mask =
+ LoadPatternAndReshuffleMask(src, pattern_size);
+ __m128i pattern = pattern_and_reshuffle_mask.first;
+ __m128i reshuffle_mask = pattern_and_reshuffle_mask.second;
+
+ // This code path is relatively cold however so we save code size
+ // by avoiding unrolling and vectorizing.
+ //
+ // TODO: Remove pragma when when cold regions don't get
+ // vectorized or unrolled.
+#ifdef __clang__
+#pragma clang loop unroll(disable)
+#endif
+ do {
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(op), pattern);
+ pattern = _mm_shuffle_epi8(pattern, reshuffle_mask);
+ op += 16;
+ } while (SNAPPY_PREDICT_TRUE(op < op_end));
+ }
+ return IncrementalCopySlow(op - pattern_size, op, op_limit);
+#else // !SNAPPY_HAVE_SSSE3
// If plenty of buffer space remains, expand the pattern to at least 8
// bytes. The way the following loop is written, we need 8 bytes of buffer
// space if pattern_size >= 4, 11 bytes if pattern_size is 1 or 3, and 10
@@ -272,34 +509,30 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
}
#endif // SNAPPY_HAVE_SSSE3
}
- assert(pattern_size >= 8);
+ assert(pattern_size >= big_pattern_size_lower_bound);
+ constexpr bool use_16bytes_chunk = big_pattern_size_lower_bound == 16;
- // Copy 2x 8 bytes at a time. Because op - src can be < 16, a single
- // UnalignedCopy128 might overwrite data in op. UnalignedCopy64 is safe
- // because expanding the pattern to at least 8 bytes guarantees that
- // op - src >= 8.
+ // Copy 1x 16 bytes (or 2x 8 bytes in non-SSE) at a time. Because op - src can
+ // be < 16 in non-SSE, a single UnalignedCopy128 might overwrite data in op.
+ // UnalignedCopy64 is safe because expanding the pattern to at least 8 bytes
+ // guarantees that op - src >= 8.
//
// Typically, the op_limit is the gating factor so try to simplify the loop
// based on that.
- if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 16)) {
+ if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 15)) {
// There is at least one, and at most four 16-byte blocks. Writing four
// conditionals instead of a loop allows FDO to layout the code with respect
// to the actual probabilities of each length.
// TODO: Replace with loop with trip count hint.
- UnalignedCopy64(src, op);
- UnalignedCopy64(src + 8, op + 8);
-
+ ConditionalUnalignedCopy128<use_16bytes_chunk>(src, op);
if (op + 16 < op_limit) {
- UnalignedCopy64(src + 16, op + 16);
- UnalignedCopy64(src + 24, op + 24);
+ ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 16, op + 16);
}
if (op + 32 < op_limit) {
- UnalignedCopy64(src + 32, op + 32);
- UnalignedCopy64(src + 40, op + 40);
+ ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 32, op + 32);
}
if (op + 48 < op_limit) {
- UnalignedCopy64(src + 48, op + 48);
- UnalignedCopy64(src + 56, op + 56);
+ ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 48, op + 48);
}
return op_limit;
}
@@ -313,12 +546,10 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
#ifdef __clang__
#pragma clang loop unroll(disable)
#endif
- for (char *op_end = buf_limit - 16; op < op_end; op += 16, src += 16) {
- UnalignedCopy64(src, op);
- UnalignedCopy64(src + 8, op + 8);
+ for (char* op_end = buf_limit - 16; op < op_end; op += 16, src += 16) {
+ ConditionalUnalignedCopy128<use_16bytes_chunk>(src, op);
}
- if (op >= op_limit)
- return op_limit;
+ if (op >= op_limit) return op_limit;
// We only take this branch if we didn't have enough slop and we can do a
// single 8 byte copy.
@@ -333,11 +564,9 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
} // namespace
template <bool allow_fast_path>
-static inline char* EmitLiteral(char* op,
- const char* literal,
- int len) {
+static inline char* EmitLiteral(char* op, const char* literal, int len) {
// The vast majority of copies are below 16 bytes, for which a
- // call to memcpy is overkill. This fast path can sometimes
+ // call to std::memcpy() is overkill. This fast path can sometimes
// copy up to 15 bytes too much, but that is okay in the
// main loop, since we have a bit to go on for both sides:
//
@@ -346,7 +575,7 @@ static inline char* EmitLiteral(char* op,
// if not, allow_fast_path = false.
// - The output will always have 32 spare bytes (see
// MaxCompressedLength).
- assert(len > 0); // Zero-length literals are disallowed
+ assert(len > 0); // Zero-length literals are disallowed
int n = len - 1;
if (allow_fast_path && len <= 16) {
// Fits in tag byte
@@ -367,11 +596,11 @@ static inline char* EmitLiteral(char* op,
// Encode in upcoming bytes.
// Write 4 bytes, though we may care about only 1 of them. The output buffer
// is guaranteed to have at least 3 more spaces left as 'len >= 61' holds
- // here and there is a memcpy of size 'len' below.
+ // here and there is a std::memcpy() of size 'len' below.
LittleEndian::Store32(op, n);
op += count;
}
- memcpy(op, literal, len);
+ std::memcpy(op, literal, len);
return op + len;
}
@@ -382,15 +611,22 @@ static inline char* EmitCopyAtMost64(char* op, size_t offset, size_t len) {
assert(offset < 65536);
assert(len_less_than_12 == (len < 12));
- if (len_less_than_12 && SNAPPY_PREDICT_TRUE(offset < 2048)) {
- // offset fits in 11 bits. The 3 highest go in the top of the first byte,
- // and the rest go in the second byte.
- *op++ = COPY_1_BYTE_OFFSET + ((len - 4) << 2) + ((offset >> 3) & 0xe0);
- *op++ = offset & 0xff;
+ if (len_less_than_12) {
+ uint32_t u = (len << 2) + (offset << 8);
+ uint32_t copy1 = COPY_1_BYTE_OFFSET - (4 << 2) + ((offset >> 3) & 0xe0);
+ uint32_t copy2 = COPY_2_BYTE_OFFSET - (1 << 2);
+ // It turns out that offset < 2048 is a difficult to predict branch.
+ // `perf record` shows this is the highest percentage of branch misses in
+ // benchmarks. This code produces branch free code, the data dependency
+ // chain that bottlenecks the throughput is so long that a few extra
+ // instructions are completely free (IPC << 6 because of data deps).
+ u += offset < 2048 ? copy1 : copy2;
+ LittleEndian::Store32(op, u);
+ op += offset < 2048 ? 2 : 3;
} else {
// Write 4 bytes, though we only care about 3 of them. The output buffer
// is required to have some slack, so the extra byte won't overrun it.
- uint32 u = COPY_2_BYTE_OFFSET + ((len - 1) << 2) + (offset << 8);
+ uint32_t u = COPY_2_BYTE_OFFSET + ((len - 1) << 2) + (offset << 8);
LittleEndian::Store32(op, u);
op += 3;
}
@@ -429,7 +665,7 @@ static inline char* EmitCopy(char* op, size_t offset, size_t len) {
}
bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
- uint32 v = 0;
+ uint32_t v = 0;
const char* limit = start + n;
if (Varint::Parse32WithLimit(start, limit, &v) != NULL) {
*result = v;
@@ -440,7 +676,7 @@ bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
}
namespace {
-uint32 CalculateTableSize(uint32 input_size) {
+uint32_t CalculateTableSize(uint32_t input_size) {
static_assert(
kMaxHashTableSize >= kMinHashTableSize,
"kMaxHashTableSize should be greater or equal to kMinHashTableSize.");
@@ -463,7 +699,7 @@ WorkingMemory::WorkingMemory(size_t input_size) {
size_ = table_size * sizeof(*table_) + max_fragment_size +
MaxCompressedLength(max_fragment_size);
mem_ = std::allocator<char>().allocate(size_);
- table_ = reinterpret_cast<uint16*>(mem_);
+ table_ = reinterpret_cast<uint16_t*>(mem_);
input_ = mem_ + table_size * sizeof(*table_);
output_ = input_ + max_fragment_size;
}
@@ -472,8 +708,8 @@ WorkingMemory::~WorkingMemory() {
std::allocator<char>().deallocate(mem_, size_);
}
-uint16* WorkingMemory::GetHashTable(size_t fragment_size,
- int* table_size) const {
+uint16_t* WorkingMemory::GetHashTable(size_t fragment_size,
+ int* table_size) const {
const size_t htsize = CalculateTableSize(fragment_size);
memset(table_, 0, htsize * sizeof(*table_));
*table_size = htsize;
@@ -481,49 +717,6 @@ uint16* WorkingMemory::GetHashTable(size_t fragment_size,
}
} // end namespace internal
-// For 0 <= offset <= 4, GetUint32AtOffset(GetEightBytesAt(p), offset) will
-// equal UNALIGNED_LOAD32(p + offset). Motivation: On x86-64 hardware we have
-// empirically found that overlapping loads such as
-// UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
-// are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32.
-//
-// We have different versions for 64- and 32-bit; ideally we would avoid the
-// two functions and just inline the UNALIGNED_LOAD64 call into
-// GetUint32AtOffset, but GCC (at least not as of 4.6) is seemingly not clever
-// enough to avoid loading the value multiple times then. For 64-bit, the load
-// is done when GetEightBytesAt() is called, whereas for 32-bit, the load is
-// done at GetUint32AtOffset() time.
-
-#ifdef ARCH_K8
-
-typedef uint64 EightBytesReference;
-
-static inline EightBytesReference GetEightBytesAt(const char* ptr) {
- return UNALIGNED_LOAD64(ptr);
-}
-
-static inline uint32 GetUint32AtOffset(uint64 v, int offset) {
- assert(offset >= 0);
- assert(offset <= 4);
- return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset);
-}
-
-#else
-
-typedef const char* EightBytesReference;
-
-static inline EightBytesReference GetEightBytesAt(const char* ptr) {
- return ptr;
-}
-
-static inline uint32 GetUint32AtOffset(const char* v, int offset) {
- assert(offset >= 0);
- assert(offset <= 4);
- return UNALIGNED_LOAD32(v + offset);
-}
-
-#endif
-
// Flat array compression that does not emit the "uncompressed length"
// prefix. Compresses "input" string to the "*op" buffer.
//
@@ -536,29 +729,25 @@ static inline uint32 GetUint32AtOffset(const char* v, int offset) {
// Returns an "end" pointer into "op" buffer.
// "end - op" is the compressed size of "input".
namespace internal {
-char* CompressFragment(const char* input,
- size_t input_size,
- char* op,
- uint16* table,
- const int table_size) {
+char* CompressFragment(const char* input, size_t input_size, char* op,
+ uint16_t* table, const int table_size) {
// "ip" is the input pointer, and "op" is the output pointer.
const char* ip = input;
assert(input_size <= kBlockSize);
assert((table_size & (table_size - 1)) == 0); // table must be power of two
- const int shift = 32 - Bits::Log2Floor(table_size);
- assert(static_cast<int>(kuint32max >> shift) == table_size - 1);
+ const uint32_t mask = table_size - 1;
const char* ip_end = input + input_size;
const char* base_ip = ip;
- // Bytes in [next_emit, ip) will be emitted as literal bytes. Or
- // [next_emit, ip_end) after the main loop.
- const char* next_emit = ip;
const size_t kInputMarginBytes = 15;
if (SNAPPY_PREDICT_TRUE(input_size >= kInputMarginBytes)) {
const char* ip_limit = input + input_size - kInputMarginBytes;
- for (uint32 next_hash = Hash(++ip, shift); ; ) {
- assert(next_emit < ip);
+ for (uint32_t preload = LittleEndian::Load32(ip + 1);;) {
+ // Bytes in [next_emit, ip) will be emitted as literal bytes. Or
+ // [next_emit, ip_end) after the main loop.
+ const char* next_emit = ip++;
+ uint64_t data = LittleEndian::Load64(ip);
// The body of this loop calls EmitLiteral once and then EmitCopy one or
// more times. (The exception is that when we're close to exhausting
// the input we goto emit_remainder.)
@@ -584,28 +773,60 @@ char* CompressFragment(const char* input,
// The "skip" variable keeps track of how many bytes there are since the
// last match; dividing it by 32 (ie. right-shifting by five) gives the
// number of bytes to move ahead for each iteration.
- uint32 skip = 32;
+ uint32_t skip = 32;
- const char* next_ip = ip;
const char* candidate;
- do {
- ip = next_ip;
- uint32 hash = next_hash;
- assert(hash == Hash(ip, shift));
- uint32 bytes_between_hash_lookups = skip >> 5;
+ if (ip_limit - ip >= 16) {
+ auto delta = ip - base_ip;
+ for (int j = 0; j < 4; ++j) {
+ for (int k = 0; k < 4; ++k) {
+ int i = 4 * j + k;
+ // These for-loops are meant to be unrolled. So we can freely
+ // special case the first iteration to use the value already
+ // loaded in preload.
+ uint32_t dword = i == 0 ? preload : static_cast<uint32_t>(data);
+ assert(dword == LittleEndian::Load32(ip + i));
+ uint32_t hash = HashBytes(dword, mask);
+ candidate = base_ip + table[hash];
+ assert(candidate >= base_ip);
+ assert(candidate < ip + i);
+ table[hash] = delta + i;
+ if (SNAPPY_PREDICT_FALSE(LittleEndian::Load32(candidate) == dword)) {
+ *op = LITERAL | (i << 2);
+ UnalignedCopy128(next_emit, op + 1);
+ ip += i;
+ op = op + i + 2;
+ goto emit_match;
+ }
+ data >>= 8;
+ }
+ data = LittleEndian::Load64(ip + 4 * j + 4);
+ }
+ ip += 16;
+ skip += 16;
+ }
+ while (true) {
+ assert(static_cast<uint32_t>(data) == LittleEndian::Load32(ip));
+ uint32_t hash = HashBytes(data, mask);
+ uint32_t bytes_between_hash_lookups = skip >> 5;
skip += bytes_between_hash_lookups;
- next_ip = ip + bytes_between_hash_lookups;
+ const char* next_ip = ip + bytes_between_hash_lookups;
if (SNAPPY_PREDICT_FALSE(next_ip > ip_limit)) {
+ ip = next_emit;
goto emit_remainder;
}
- next_hash = Hash(next_ip, shift);
candidate = base_ip + table[hash];
assert(candidate >= base_ip);
assert(candidate < ip);
table[hash] = ip - base_ip;
- } while (SNAPPY_PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
- UNALIGNED_LOAD32(candidate)));
+ if (SNAPPY_PREDICT_FALSE(static_cast<uint32_t>(data) ==
+ LittleEndian::Load32(candidate))) {
+ break;
+ }
+ data = LittleEndian::Load32(next_ip);
+ ip = next_ip;
+ }
// Step 2: A 4-byte match has been found. We'll later see if more
// than 4 bytes match. But, prior to the match, input
@@ -621,15 +842,13 @@ char* CompressFragment(const char* input,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can exit
// this loop via goto if we get close to exhausting the input.
- EightBytesReference input_bytes;
- uint32 candidate_bytes = 0;
-
+ emit_match:
do {
// We have a 4-byte match at ip, and no need to emit any
// "literal bytes" prior to ip.
const char* base = ip;
std::pair<size_t, bool> p =
- FindMatchLength(candidate + 4, ip + 4, ip_end);
+ FindMatchLength(candidate + 4, ip + 4, ip_end, &data);
size_t matched = 4 + p.first;
ip += matched;
size_t offset = base - candidate;
@@ -639,32 +858,40 @@ char* CompressFragment(const char* input,
} else {
op = EmitCopy</*len_less_than_12=*/false>(op, offset, matched);
}
- next_emit = ip;
if (SNAPPY_PREDICT_FALSE(ip >= ip_limit)) {
goto emit_remainder;
}
+ // Expect 5 bytes to match
+ assert((data & 0xFFFFFFFFFF) ==
+ (LittleEndian::Load64(ip) & 0xFFFFFFFFFF));
// We are now looking for a 4-byte match again. We read
// table[Hash(ip, shift)] for that. To improve compression,
- // we also update table[Hash(ip - 1, shift)] and table[Hash(ip, shift)].
- input_bytes = GetEightBytesAt(ip - 1);
- uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift);
- table[prev_hash] = ip - base_ip - 1;
- uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift);
- candidate = base_ip + table[cur_hash];
- candidate_bytes = UNALIGNED_LOAD32(candidate);
- table[cur_hash] = ip - base_ip;
- } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes);
-
- next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift);
- ++ip;
+ // we also update table[Hash(ip - 1, mask)] and table[Hash(ip, mask)].
+ table[HashBytes(LittleEndian::Load32(ip - 1), mask)] = ip - base_ip - 1;
+ uint32_t hash = HashBytes(data, mask);
+ candidate = base_ip + table[hash];
+ table[hash] = ip - base_ip;
+ // Measurements on the benchmarks have shown the following probabilities
+ // for the loop to exit (ie. avg. number of iterations is reciprocal).
+ // BM_Flat/6 txt1 p = 0.3-0.4
+ // BM_Flat/7 txt2 p = 0.35
+ // BM_Flat/8 txt3 p = 0.3-0.4
+ // BM_Flat/9 txt3 p = 0.34-0.4
+ // BM_Flat/10 pb p = 0.4
+ // BM_Flat/11 gaviota p = 0.1
+ // BM_Flat/12 cp p = 0.5
+ // BM_Flat/13 c p = 0.3
+ } while (static_cast<uint32_t>(data) == LittleEndian::Load32(candidate));
+ // Because the least significant 5 bytes matched, we can utilize data
+ // for the next iteration.
+ preload = data >> 8;
}
}
- emit_remainder:
+emit_remainder:
// Emit the remaining bytes as a literal
- if (next_emit < ip_end) {
- op = EmitLiteral</*allow_fast_path=*/false>(op, next_emit,
- ip_end - next_emit);
+ if (ip < ip_end) {
+ op = EmitLiteral</*allow_fast_path=*/false>(op, ip, ip_end - ip);
}
return op;
@@ -673,7 +900,12 @@ char* CompressFragment(const char* input,
// Called back at avery compression call to trace parameters and sizes.
static inline void Report(const char *algorithm, size_t compressed_size,
- size_t uncompressed_size) {}
+ size_t uncompressed_size) {
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
+ (void)algorithm;
+ (void)compressed_size;
+ (void)uncompressed_size;
+}
// Signature of output types needed by decompression code.
// The decompression code is templatized on a type that obeys this
@@ -685,12 +917,28 @@ static inline void Report(const char *algorithm, size_t compressed_size,
// // Called before decompression
// void SetExpectedLength(size_t length);
//
+// // For performance a writer may choose to donate the cursor variable to the
+// // decompression function. The decompression will inject it in all its
+// // function calls to the writer. Keeping the important output cursor as a
+// // function local stack variable allows the compiler to keep it in
+// // register, which greatly aids performance by avoiding loads and stores of
+// // this variable in the fast path loop iterations.
+// T GetOutputPtr() const;
+//
+// // At end of decompression the loop donates the ownership of the cursor
+// // variable back to the writer by calling this function.
+// void SetOutputPtr(T op);
+//
// // Called after decompression
// bool CheckLength() const;
//
// // Called repeatedly during decompression
-// bool Append(const char* ip, size_t length);
-// bool AppendFromSelf(uint32 offset, size_t length);
+// // Each function get a pointer to the op (output pointer), that the writer
+// // can use and update. Note it's important that these functions get fully
+// // inlined so that no actual address of the local variable needs to be
+// // taken.
+// bool Append(const char* ip, size_t length, T* op);
+// bool AppendFromSelf(uint32_t offset, size_t length, T* op);
//
// // The rules for how TryFastAppend differs from Append are somewhat
// // convoluted:
@@ -712,25 +960,25 @@ static inline void Report(const char *algorithm, size_t compressed_size,
// // as it is unlikely that one would implement a fast path accepting
// // this much data.
// //
-// bool TryFastAppend(const char* ip, size_t available, size_t length);
+// bool TryFastAppend(const char* ip, size_t available, size_t length, T* op);
// };
-static inline uint32 ExtractLowBytes(uint32 v, int n) {
+static inline uint32_t ExtractLowBytes(uint32_t v, int n) {
assert(n >= 0);
assert(n <= 4);
#if SNAPPY_HAVE_BMI2
return _bzhi_u32(v, 8 * n);
#else
- // This needs to be wider than uint32 otherwise `mask << 32` will be
+ // This needs to be wider than uint32_t otherwise `mask << 32` will be
// undefined.
- uint64 mask = 0xffffffff;
+ uint64_t mask = 0xffffffff;
return v & ~(mask << (8 * n));
#endif
}
-static inline bool LeftShiftOverflows(uint8 value, uint32 shift) {
+static inline bool LeftShiftOverflows(uint8_t value, uint32_t shift) {
assert(shift < 32);
- static const uint8 masks[] = {
+ static const uint8_t masks[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
@@ -738,15 +986,194 @@ static inline bool LeftShiftOverflows(uint8 value, uint32 shift) {
return (value & masks[shift]) != 0;
}
+inline bool Copy64BytesWithPatternExtension(ptrdiff_t dst, size_t offset) {
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
+ (void)dst;
+ return offset != 0;
+}
+
+void MemCopy(char* dst, const uint8_t* src, size_t size) {
+ std::memcpy(dst, src, size);
+}
+
+void MemCopy(ptrdiff_t dst, const uint8_t* src, size_t size) {
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
+ (void)dst;
+ (void)src;
+ (void)size;
+}
+
+void MemMove(char* dst, const void* src, size_t size) {
+ std::memmove(dst, src, size);
+}
+
+void MemMove(ptrdiff_t dst, const void* src, size_t size) {
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
+ (void)dst;
+ (void)src;
+ (void)size;
+}
+
+SNAPPY_ATTRIBUTE_ALWAYS_INLINE
+size_t AdvanceToNextTag(const uint8_t** ip_p, size_t* tag) {
+ const uint8_t*& ip = *ip_p;
+ // This section is crucial for the throughput of the decompression loop.
+ // The latency of an iteration is fundamentally constrained by the
+ // following data chain on ip.
+ // ip -> c = Load(ip) -> ip1 = ip + 1 + (c & 3) -> ip = ip1 or ip2
+ // ip2 = ip + 2 + (c >> 2)
+ // This amounts to 8 cycles.
+ // 5 (load) + 1 (c & 3) + 1 (lea ip1, [ip + (c & 3) + 1]) + 1 (cmov)
+ size_t literal_len = *tag >> 2;
+ size_t tag_type = *tag;
+ bool is_literal;
+#if defined(__GNUC__) && defined(__x86_64__)
+ // TODO clang misses the fact that the (c & 3) already correctly
+ // sets the zero flag.
+ asm("and $3, %k[tag_type]\n\t"
+ : [tag_type] "+r"(tag_type), "=@ccz"(is_literal));
+#else
+ tag_type &= 3;
+ is_literal = (tag_type == 0);
+#endif
+ // TODO
+ // This is code is subtle. Loading the values first and then cmov has less
+ // latency then cmov ip and then load. However clang would move the loads
+ // in an optimization phase, volatile prevents this transformation.
+ // Note that we have enough slop bytes (64) that the loads are always valid.
+ size_t tag_literal =
+ static_cast<const volatile uint8_t*>(ip)[1 + literal_len];
+ size_t tag_copy = static_cast<const volatile uint8_t*>(ip)[tag_type];
+ *tag = is_literal ? tag_literal : tag_copy;
+ const uint8_t* ip_copy = ip + 1 + tag_type;
+ const uint8_t* ip_literal = ip + 2 + literal_len;
+ ip = is_literal ? ip_literal : ip_copy;
+#if defined(__GNUC__) && defined(__x86_64__)
+ // TODO Clang is "optimizing" zero-extension (a totally free
+ // operation) this means that after the cmov of tag, it emits another movzb
+ // tag, byte(tag). It really matters as it's on the core chain. This dummy
+ // asm, persuades clang to do the zero-extension at the load (it's automatic)
+ // removing the expensive movzb.
+ asm("" ::"r"(tag_copy));
+#endif
+ return tag_type;
+}
+
+// Extract the offset for copy-1 and copy-2 returns 0 for literals or copy-4.
+inline uint32_t ExtractOffset(uint32_t val, size_t tag_type) {
+ return val & table.extract_masks[tag_type];
+};
+
+// Core decompression loop, when there is enough data available.
+// Decompresses the input buffer [ip, ip_limit) into the output buffer
+// [op, op_limit_min_slop). Returning when either we are too close to the end
+// of the input buffer, or we exceed op_limit_min_slop or when a exceptional
+// tag is encountered (literal of length > 60) or a copy-4.
+// Returns {ip, op} at the points it stopped decoding.
+// TODO This function probably does not need to be inlined, as it
+// should decode large chunks at a time. This allows runtime dispatch to
+// implementations based on CPU capability (BMI2 / perhaps 32 / 64 byte memcpy).
+template <typename T>
+std::pair<const uint8_t*, ptrdiff_t> DecompressBranchless(
+ const uint8_t* ip, const uint8_t* ip_limit, ptrdiff_t op, T op_base,
+ ptrdiff_t op_limit_min_slop) {
+ // We unroll the inner loop twice so we need twice the spare room.
+ op_limit_min_slop -= kSlopBytes;
+ if (2 * (kSlopBytes + 1) < ip_limit - ip && op < op_limit_min_slop) {
+ const uint8_t* const ip_limit_min_slop = ip_limit - 2 * kSlopBytes - 1;
+ ip++;
+ // ip points just past the tag and we are touching at maximum kSlopBytes
+ // in an iteration.
+ size_t tag = ip[-1];
+ do {
+ // The throughput is limited by instructions, unrolling the inner loop
+ // twice reduces the amount of instructions checking limits and also
+ // leads to reduced mov's.
+ for (int i = 0; i < 2; i++) {
+ const uint8_t* old_ip = ip;
+ assert(tag == ip[-1]);
+ // For literals tag_type = 0, hence we will always obtain 0 from
+ // ExtractLowBytes. For literals offset will thus be kLiteralOffset.
+ ptrdiff_t len_min_offset = table.length_minus_offset[tag];
+ size_t tag_type = AdvanceToNextTag(&ip, &tag);
+ uint32_t next = LittleEndian::Load32(old_ip);
+ size_t len = len_min_offset & 0xFF;
+ len_min_offset -= ExtractOffset(next, tag_type);
+ if (SNAPPY_PREDICT_FALSE(len_min_offset > 0)) {
+ if (SNAPPY_PREDICT_FALSE(len & 0x80)) {
+ // Exceptional case (long literal or copy 4).
+ // Actually doing the copy here is negatively impacting the main
+ // loop due to compiler incorrectly allocating a register for
+ // this fallback. Hence we just break.
+ break_loop:
+ ip = old_ip;
+ goto exit;
+ }
+ // Only copy-1 or copy-2 tags can get here.
+ assert(tag_type == 1 || tag_type == 2);
+ std::ptrdiff_t delta = op + len_min_offset - len;
+ // Guard against copies before the buffer start.
+ if (SNAPPY_PREDICT_FALSE(delta < 0 ||
+ !Copy64BytesWithPatternExtension(
+ op_base + op, len - len_min_offset))) {
+ goto break_loop;
+ }
+ op += len;
+ continue;
+ }
+ std::ptrdiff_t delta = op + len_min_offset - len;
+ if (SNAPPY_PREDICT_FALSE(delta < 0)) {
+#if defined(__GNUC__) && defined(__x86_64__)
+ // TODO
+ // When validating, both code path reduced to `op += len`. Ie. this
+ // becomes effectively
+ //
+ // if (delta < 0) if (tag_type != 0) goto break_loop;
+ // op += len;
+ //
+ // The compiler interchanges the predictable and almost always false
+ // first if-statement with the completely unpredictable second
+ // if-statement, putting an unpredictable branch on every iteration.
+ // This empty asm is worth almost 2x, which I think qualifies for an
+ // award for the most load-bearing empty statement.
+ asm("");
+#endif
+
+ // Due to the spurious offset in literals have this will trigger
+ // at the start of a block when op is still smaller than 256.
+ if (tag_type != 0) goto break_loop;
+ MemCopy(op_base + op, old_ip, 64);
+ op += len;
+ continue;
+ }
+
+ // For copies we need to copy from op_base + delta, for literals
+ // we need to copy from ip instead of from the stream.
+ const void* from =
+ tag_type ? reinterpret_cast<void*>(op_base + delta) : old_ip;
+ MemMove(op_base + op, from, 64);
+ op += len;
+ }
+ } while (ip < ip_limit_min_slop && op < op_limit_min_slop);
+ exit:
+ ip--;
+ assert(ip <= ip_limit);
+ }
+ return {ip, op};
+}
+
// Helper class for decompression
class SnappyDecompressor {
private:
- Source* reader_; // Underlying source of bytes to decompress
- const char* ip_; // Points to next buffered byte
- const char* ip_limit_; // Points just past buffered bytes
- uint32 peeked_; // Bytes peeked from reader (need to skip)
- bool eof_; // Hit end of input without an error?
- char scratch_[kMaximumTagLength]; // See RefillTag().
+ Source* reader_; // Underlying source of bytes to decompress
+ const char* ip_; // Points to next buffered byte
+ const char* ip_limit_; // Points just past buffered bytes
+ // If ip < ip_limit_min_maxtaglen_ it's safe to read kMaxTagLength from
+ // buffer.
+ const char* ip_limit_min_maxtaglen_;
+ uint32_t peeked_; // Bytes peeked from reader (need to skip)
+ bool eof_; // Hit end of input without an error?
+ char scratch_[kMaximumTagLength]; // See RefillTag().
// Ensure that all of the tag metadata for the next tag is available
// in [ip_..ip_limit_-1]. Also ensures that [ip,ip+4] is readable even
@@ -755,14 +1182,14 @@ class SnappyDecompressor {
// Returns true on success, false on error or end of input.
bool RefillTag();
+ void ResetLimit(const char* ip) {
+ ip_limit_min_maxtaglen_ =
+ ip_limit_ - std::min<ptrdiff_t>(ip_limit_ - ip, kMaximumTagLength - 1);
+ }
+
public:
explicit SnappyDecompressor(Source* reader)
- : reader_(reader),
- ip_(NULL),
- ip_limit_(NULL),
- peeked_(0),
- eof_(false) {
- }
+ : reader_(reader), ip_(NULL), ip_limit_(NULL), peeked_(0), eof_(false) {}
~SnappyDecompressor() {
// Advance past any bytes we peeked at from the reader
@@ -770,18 +1197,16 @@ class SnappyDecompressor {
}
// Returns true iff we have hit the end of the input without an error.
- bool eof() const {
- return eof_;
- }
+ bool eof() const { return eof_; }
// Read the uncompressed length stored at the start of the compressed data.
// On success, stores the length in *result and returns true.
// On failure, returns false.
- bool ReadUncompressedLength(uint32* result) {
- assert(ip_ == NULL); // Must not have read anything yet
+ bool ReadUncompressedLength(uint32_t* result) {
+ assert(ip_ == NULL); // Must not have read anything yet
// Length is encoded in 1..5 bytes
*result = 0;
- uint32 shift = 0;
+ uint32_t shift = 0;
while (true) {
if (shift >= 32) return false;
size_t n;
@@ -789,8 +1214,8 @@ class SnappyDecompressor {
if (n == 0) return false;
const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
reader_->Skip(1);
- uint32 val = c & 0x7f;
- if (LeftShiftOverflows(static_cast<uint8>(val), shift)) return false;
+ uint32_t val = c & 0x7f;
+ if (LeftShiftOverflows(static_cast<uint8_t>(val), shift)) return false;
*result |= val << shift;
if (c < 128) {
break;
@@ -806,38 +1231,44 @@ class SnappyDecompressor {
#if defined(__GNUC__) && defined(__x86_64__)
__attribute__((aligned(32)))
#endif
- void DecompressAllTags(Writer* writer) {
- // In x86, pad the function body to start 16 bytes later. This function has
- // a couple of hotspots that are highly sensitive to alignment: we have
- // observed regressions by more than 20% in some metrics just by moving the
- // exact same code to a different position in the benchmark binary.
- //
- // Putting this code on a 32-byte-aligned boundary + 16 bytes makes us hit
- // the "lucky" case consistently. Unfortunately, this is a very brittle
- // workaround, and future differences in code generation may reintroduce
- // this regression. If you experience a big, difficult to explain, benchmark
- // performance regression here, first try removing this hack.
-#if defined(__GNUC__) && defined(__x86_64__)
- // Two 8-byte "NOP DWORD ptr [EAX + EAX*1 + 00000000H]" instructions.
- asm(".byte 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00");
- asm(".byte 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00");
-#endif
-
+ void
+ DecompressAllTags(Writer* writer) {
const char* ip = ip_;
+ ResetLimit(ip);
+ auto op = writer->GetOutputPtr();
// We could have put this refill fragment only at the beginning of the loop.
// However, duplicating it at the end of each branch gives the compiler more
// scope to optimize the <ip_limit_ - ip> expression based on the local
// context, which overall increases speed.
- #define MAYBE_REFILL() \
- if (ip_limit_ - ip < kMaximumTagLength) { \
- ip_ = ip; \
- if (!RefillTag()) return; \
- ip = ip_; \
- }
-
+#define MAYBE_REFILL() \
+ if (SNAPPY_PREDICT_FALSE(ip >= ip_limit_min_maxtaglen_)) { \
+ ip_ = ip; \
+ if (SNAPPY_PREDICT_FALSE(!RefillTag())) goto exit; \
+ ip = ip_; \
+ ResetLimit(ip); \
+ } \
+ preload = static_cast<uint8_t>(*ip)
+
+ // At the start of the for loop below the least significant byte of preload
+ // contains the tag.
+ uint32_t preload;
MAYBE_REFILL();
- for ( ;; ) {
- const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
+ for (;;) {
+ {
+ ptrdiff_t op_limit_min_slop;
+ auto op_base = writer->GetBase(&op_limit_min_slop);
+ if (op_base) {
+ auto res =
+ DecompressBranchless(reinterpret_cast<const uint8_t*>(ip),
+ reinterpret_cast<const uint8_t*>(ip_limit_),
+ op - op_base, op_base, op_limit_min_slop);
+ ip = reinterpret_cast<const char*>(res.first);
+ op = op_base + res.second;
+ MAYBE_REFILL();
+ }
+ }
+ const uint8_t c = static_cast<uint8_t>(preload);
+ ip++;
// Ratio of iterations that have LITERAL vs non-LITERAL for different
// inputs.
@@ -853,12 +1284,13 @@ class SnappyDecompressor {
// bin 24% 76%
if (SNAPPY_PREDICT_FALSE((c & 0x3) == LITERAL)) {
size_t literal_length = (c >> 2) + 1u;
- if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
+ if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length, &op)) {
assert(literal_length < 61);
ip += literal_length;
// NOTE: There is no MAYBE_REFILL() here, as TryFastAppend()
// will not return true unless there's already at least five spare
// bytes in addition to the literal.
+ preload = static_cast<uint8_t>(*ip);
continue;
}
if (SNAPPY_PREDICT_FALSE(literal_length >= 61)) {
@@ -872,48 +1304,79 @@ class SnappyDecompressor {
size_t avail = ip_limit_ - ip;
while (avail < literal_length) {
- if (!writer->Append(ip, avail)) return;
+ if (!writer->Append(ip, avail, &op)) goto exit;
literal_length -= avail;
reader_->Skip(peeked_);
size_t n;
ip = reader_->Peek(&n);
avail = n;
peeked_ = avail;
- if (avail == 0) return; // Premature end of input
+ if (avail == 0) goto exit;
ip_limit_ = ip + avail;
+ ResetLimit(ip);
}
- if (!writer->Append(ip, literal_length)) {
- return;
- }
+ if (!writer->Append(ip, literal_length, &op)) goto exit;
ip += literal_length;
MAYBE_REFILL();
} else {
- const size_t entry = char_table[c];
- const size_t trailer =
- ExtractLowBytes(LittleEndian::Load32(ip), entry >> 11);
- const size_t length = entry & 0xff;
- ip += entry >> 11;
-
- // copy_offset/256 is encoded in bits 8..10. By just fetching
- // those bits, we get copy_offset (since the bit-field starts at
- // bit 8).
- const size_t copy_offset = entry & 0x700;
- if (!writer->AppendFromSelf(copy_offset + trailer, length)) {
- return;
+ if (SNAPPY_PREDICT_FALSE((c & 3) == COPY_4_BYTE_OFFSET)) {
+ const size_t copy_offset = LittleEndian::Load32(ip);
+ const size_t length = (c >> 2) + 1;
+ ip += 4;
+
+ if (!writer->AppendFromSelf(copy_offset, length, &op)) goto exit;
+ } else {
+ const ptrdiff_t entry = table.length_minus_offset[c];
+ preload = LittleEndian::Load32(ip);
+ const uint32_t trailer = ExtractLowBytes(preload, c & 3);
+ const uint32_t length = entry & 0xff;
+ assert(length > 0);
+
+ // copy_offset/256 is encoded in bits 8..10. By just fetching
+ // those bits, we get copy_offset (since the bit-field starts at
+ // bit 8).
+ const uint32_t copy_offset = trailer - entry + length;
+ if (!writer->AppendFromSelf(copy_offset, length, &op)) goto exit;
+
+ ip += (c & 3);
+ // By using the result of the previous load we reduce the critical
+ // dependency chain of ip to 4 cycles.
+ preload >>= (c & 3) * 8;
+ if (ip < ip_limit_min_maxtaglen_) continue;
}
MAYBE_REFILL();
}
}
-
#undef MAYBE_REFILL
+ exit:
+ writer->SetOutputPtr(op);
}
};
+constexpr uint32_t CalculateNeeded(uint8_t tag) {
+ return ((tag & 3) == 0 && tag >= (60 * 4))
+ ? (tag >> 2) - 58
+ : (0x05030201 >> ((tag * 8) & 31)) & 0xFF;
+}
+
+#if __cplusplus >= 201402L
+constexpr bool VerifyCalculateNeeded() {
+ for (int i = 0; i < 1; i++) {
+ if (CalculateNeeded(i) != (char_table[i] >> 11) + 1) return false;
+ }
+ return true;
+}
+
+// Make sure CalculateNeeded is correct by verifying it against the established
+// table encoding the number of added bytes needed.
+static_assert(VerifyCalculateNeeded(), "");
+#endif // c++14
+
bool SnappyDecompressor::RefillTag() {
const char* ip = ip_;
if (ip == ip_limit_) {
// Fetch a new fragment from the reader
- reader_->Skip(peeked_); // All peeked bytes are used up
+ reader_->Skip(peeked_); // All peeked bytes are used up
size_t n;
ip = reader_->Peek(&n);
peeked_ = n;
@@ -925,26 +1388,31 @@ bool SnappyDecompressor::RefillTag() {
// Read the tag character
assert(ip < ip_limit_);
const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
- const uint32 entry = char_table[c];
- const uint32 needed = (entry >> 11) + 1; // +1 byte for 'c'
+ // At this point make sure that the data for the next tag is consecutive.
+ // For copy 1 this means the next 2 bytes (tag and 1 byte offset)
+ // For copy 2 the next 3 bytes (tag and 2 byte offset)
+ // For copy 4 the next 5 bytes (tag and 4 byte offset)
+ // For all small literals we only need 1 byte buf for literals 60...63 the
+ // length is encoded in 1...4 extra bytes.
+ const uint32_t needed = CalculateNeeded(c);
assert(needed <= sizeof(scratch_));
// Read more bytes from reader if needed
- uint32 nbuf = ip_limit_ - ip;
+ uint32_t nbuf = ip_limit_ - ip;
if (nbuf < needed) {
// Stitch together bytes from ip and reader to form the word
// contents. We store the needed bytes in "scratch_". They
// will be consumed immediately by the caller since we do not
// read more than we need.
- memmove(scratch_, ip, nbuf);
+ std::memmove(scratch_, ip, nbuf);
reader_->Skip(peeked_); // All peeked bytes are used up
peeked_ = 0;
while (nbuf < needed) {
size_t length;
const char* src = reader_->Peek(&length);
if (length == 0) return false;
- uint32 to_add = std::min<uint32>(needed - nbuf, length);
- memcpy(scratch_ + nbuf, src, to_add);
+ uint32_t to_add = std::min<uint32_t>(needed - nbuf, length);
+ std::memcpy(scratch_ + nbuf, src, to_add);
nbuf += to_add;
reader_->Skip(to_add);
}
@@ -954,7 +1422,7 @@ bool SnappyDecompressor::RefillTag() {
} else if (nbuf < kMaximumTagLength) {
// Have enough bytes, but move into scratch_ so that we do not
// read past end of input
- memmove(scratch_, ip, nbuf);
+ std::memmove(scratch_, ip, nbuf);
reader_->Skip(peeked_); // All peeked bytes are used up
peeked_ = 0;
ip_ = scratch_;
@@ -970,7 +1438,7 @@ template <typename Writer>
static bool InternalUncompress(Source* r, Writer* writer) {
// Read the uncompressed length from the front of the compressed input
SnappyDecompressor decompressor(r);
- uint32 uncompressed_len = 0;
+ uint32_t uncompressed_len = 0;
if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
return InternalUncompressAllTags(&decompressor, writer, r->Available(),
@@ -979,9 +1447,8 @@ static bool InternalUncompress(Source* r, Writer* writer) {
template <typename Writer>
static bool InternalUncompressAllTags(SnappyDecompressor* decompressor,
- Writer* writer,
- uint32 compressed_len,
- uint32 uncompressed_len) {
+ Writer* writer, uint32_t compressed_len,
+ uint32_t uncompressed_len) {
Report("snappy_uncompress", compressed_len, uncompressed_len);
writer->SetExpectedLength(uncompressed_len);
@@ -992,7 +1459,7 @@ static bool InternalUncompressAllTags(SnappyDecompressor* decompressor,
return (decompressor->eof() && writer->CheckLength());
}
-bool GetUncompressedLength(Source* source, uint32* result) {
+bool GetUncompressedLength(Source* source, uint32_t* result) {
SnappyDecompressor decompressor(source);
return decompressor.ReadUncompressedLength(result);
}
@@ -1003,7 +1470,7 @@ size_t Compress(Source* reader, Sink* writer) {
const size_t uncompressed_size = N;
char ulength[Varint::kMax32];
char* p = Varint::Encode32(ulength, N);
- writer->Append(ulength, p-ulength);
+ writer->Append(ulength, p - ulength);
written += (p - ulength);
internal::WorkingMemory wmem(N);
@@ -1023,13 +1490,13 @@ size_t Compress(Source* reader, Sink* writer) {
fragment_size = num_to_read;
} else {
char* scratch = wmem.GetScratchInput();
- memcpy(scratch, fragment, bytes_read);
+ std::memcpy(scratch, fragment, bytes_read);
reader->Skip(bytes_read);
while (bytes_read < num_to_read) {
fragment = reader->Peek(&fragment_size);
size_t n = std::min<size_t>(fragment_size, num_to_read - bytes_read);
- memcpy(scratch + bytes_read, fragment, n);
+ std::memcpy(scratch + bytes_read, fragment, n);
bytes_read += n;
reader->Skip(n);
}
@@ -1041,7 +1508,7 @@ size_t Compress(Source* reader, Sink* writer) {
// Get encoding table for compression
int table_size;
- uint16* table = wmem.GetHashTable(num_to_read, &table_size);
+ uint16_t* table = wmem.GetHashTable(num_to_read, &table_size);
// Compress input_fragment and append to dest
const int max_output = MaxCompressedLength(num_to_read);
@@ -1116,17 +1583,14 @@ class SnappyIOVecWriter {
: nullptr),
curr_iov_remaining_(iov_count ? iov->iov_len : 0),
total_written_(0),
- output_limit_(-1) {}
-
- inline void SetExpectedLength(size_t len) {
- output_limit_ = len;
+ output_limit_(-1) {
}
- inline bool CheckLength() const {
- return total_written_ == output_limit_;
- }
+ inline void SetExpectedLength(size_t len) { output_limit_ = len; }
- inline bool Append(const char* ip, size_t len) {
+ inline bool CheckLength() const { return total_written_ == output_limit_; }
+
+ inline bool Append(const char* ip, size_t len, char**) {
if (total_written_ + len > output_limit_) {
return false;
}
@@ -1134,6 +1598,13 @@ class SnappyIOVecWriter {
return AppendNoCheck(ip, len);
}
+ char* GetOutputPtr() { return nullptr; }
+ char* GetBase(ptrdiff_t*) { return nullptr; }
+ void SetOutputPtr(char* op) {
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
+ (void)op;
+ }
+
inline bool AppendNoCheck(const char* ip, size_t len) {
while (len > 0) {
if (curr_iov_remaining_ == 0) {
@@ -1147,7 +1618,7 @@ class SnappyIOVecWriter {
}
const size_t to_write = std::min(len, curr_iov_remaining_);
- memcpy(curr_iov_output_, ip, to_write);
+ std::memcpy(curr_iov_output_, ip, to_write);
curr_iov_output_ += to_write;
curr_iov_remaining_ -= to_write;
total_written_ += to_write;
@@ -1158,7 +1629,8 @@ class SnappyIOVecWriter {
return true;
}
- inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
+ inline bool TryFastAppend(const char* ip, size_t available, size_t len,
+ char**) {
const size_t space_left = output_limit_ - total_written_;
if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16 &&
curr_iov_remaining_ >= 16) {
@@ -1173,7 +1645,7 @@ class SnappyIOVecWriter {
return false;
}
- inline bool AppendFromSelf(size_t offset, size_t len) {
+ inline bool AppendFromSelf(size_t offset, size_t len, char**) {
// See SnappyArrayWriter::AppendFromSelf for an explanation of
// the "offset - 1u" trick.
if (offset - 1u >= total_written_) {
@@ -1229,6 +1701,7 @@ class SnappyIOVecWriter {
if (to_copy > len) {
to_copy = len;
}
+ assert(to_copy > 0);
IncrementalCopy(GetIOVecPointer(from_iov, from_iov_offset),
curr_iov_output_, curr_iov_output_ + to_copy,
@@ -1271,59 +1744,74 @@ class SnappyArrayWriter {
char* base_;
char* op_;
char* op_limit_;
+ // If op < op_limit_min_slop_ then it's safe to unconditionally write
+ // kSlopBytes starting at op.
+ char* op_limit_min_slop_;
public:
inline explicit SnappyArrayWriter(char* dst)
: base_(dst),
op_(dst),
- op_limit_(dst) {
- }
+ op_limit_(dst),
+ op_limit_min_slop_(dst) {} // Safe default see invariant.
inline void SetExpectedLength(size_t len) {
op_limit_ = op_ + len;
+ // Prevent pointer from being past the buffer.
+ op_limit_min_slop_ = op_limit_ - std::min<size_t>(kSlopBytes - 1, len);
}
- inline bool CheckLength() const {
- return op_ == op_limit_;
+ inline bool CheckLength() const { return op_ == op_limit_; }
+
+ char* GetOutputPtr() { return op_; }
+ char* GetBase(ptrdiff_t* op_limit_min_slop) {
+ *op_limit_min_slop = op_limit_min_slop_ - base_;
+ return base_;
}
+ void SetOutputPtr(char* op) { op_ = op; }
- inline bool Append(const char* ip, size_t len) {
- char* op = op_;
+ inline bool Append(const char* ip, size_t len, char** op_p) {
+ char* op = *op_p;
const size_t space_left = op_limit_ - op;
- if (space_left < len) {
- return false;
- }
- memcpy(op, ip, len);
- op_ = op + len;
+ if (space_left < len) return false;
+ std::memcpy(op, ip, len);
+ *op_p = op + len;
return true;
}
- inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
- char* op = op_;
+ inline bool TryFastAppend(const char* ip, size_t available, size_t len,
+ char** op_p) {
+ char* op = *op_p;
const size_t space_left = op_limit_ - op;
if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16) {
// Fast path, used for the majority (about 95%) of invocations.
UnalignedCopy128(ip, op);
- op_ = op + len;
+ *op_p = op + len;
return true;
} else {
return false;
}
}
- inline bool AppendFromSelf(size_t offset, size_t len) {
- char* const op_end = op_ + len;
+ SNAPPY_ATTRIBUTE_ALWAYS_INLINE
+ inline bool AppendFromSelf(size_t offset, size_t len, char** op_p) {
+ assert(len > 0);
+ char* const op = *op_p;
+ assert(op >= base_);
+ char* const op_end = op + len;
// Check if we try to append from before the start of the buffer.
- // Normally this would just be a check for "produced < offset",
- // but "produced <= offset - 1u" is equivalent for every case
- // except the one where offset==0, where the right side will wrap around
- // to a very big number. This is convenient, as offset==0 is another
- // invalid case that we also want to catch, so that we do not go
- // into an infinite loop.
- if (Produced() <= offset - 1u || op_end > op_limit_) return false;
- op_ = IncrementalCopy(op_ - offset, op_, op_end, op_limit_);
+ if (SNAPPY_PREDICT_FALSE(static_cast<size_t>(op - base_) < offset))
+ return false;
+ if (SNAPPY_PREDICT_FALSE((kSlopBytes < 64 && len > kSlopBytes) ||
+ op >= op_limit_min_slop_ || offset < len)) {
+ if (op_end > op_limit_ || offset == 0) return false;
+ *op_p = IncrementalCopy(op - offset, op, op_end, op_limit_);
+ return true;
+ }
+ std::memmove(op, op - offset, kSlopBytes);
+ *op_p = op_end;
return true;
}
inline size_t Produced() const {
@@ -1333,8 +1821,9 @@ class SnappyArrayWriter {
inline void Flush() {}
};
-bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
- ByteArraySource reader(compressed, n);
+bool RawUncompress(const char* compressed, size_t compressed_length,
+ char* uncompressed) {
+ ByteArraySource reader(compressed, compressed_length);
return RawUncompress(&reader, uncompressed);
}
@@ -1343,9 +1832,10 @@ bool RawUncompress(Source* compressed, char* uncompressed) {
return InternalUncompress(compressed, &output);
}
-bool Uncompress(const char* compressed, size_t n, std::string* uncompressed) {
+bool Uncompress(const char* compressed, size_t compressed_length,
+ std::string* uncompressed) {
size_t ulength;
- if (!GetUncompressedLength(compressed, n, &ulength)) {
+ if (!GetUncompressedLength(compressed, compressed_length, &ulength)) {
return false;
}
// On 32-bit builds: max_size() < kuint32max. Check for that instead
@@ -1354,7 +1844,8 @@ bool Uncompress(const char* compressed, size_t n, std::string* uncompressed) {
return false;
}
STLStringResizeUninitialized(uncompressed, ulength);
- return RawUncompress(compressed, n, string_as_array(uncompressed));
+ return RawUncompress(compressed, compressed_length,
+ string_as_array(uncompressed));
}
bool Uncompress(const char* compressed, size_t n, TString* uncompressed) {
@@ -1378,32 +1869,44 @@ class SnappyDecompressionValidator {
size_t produced_;
public:
- inline SnappyDecompressionValidator() : expected_(0), produced_(0) { }
- inline void SetExpectedLength(size_t len) {
- expected_ = len;
- }
- inline bool CheckLength() const {
- return expected_ == produced_;
+ inline SnappyDecompressionValidator() : expected_(0), produced_(0) {}
+ inline void SetExpectedLength(size_t len) { expected_ = len; }
+ size_t GetOutputPtr() { return produced_; }
+ size_t GetBase(ptrdiff_t* op_limit_min_slop) {
+ *op_limit_min_slop = std::numeric_limits<ptrdiff_t>::max() - kSlopBytes + 1;
+ return 1;
}
- inline bool Append(const char* ip, size_t len) {
- produced_ += len;
- return produced_ <= expected_;
+ void SetOutputPtr(size_t op) { produced_ = op; }
+ inline bool CheckLength() const { return expected_ == produced_; }
+ inline bool Append(const char* ip, size_t len, size_t* produced) {
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
+ (void)ip;
+
+ *produced += len;
+ return *produced <= expected_;
}
- inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
+ inline bool TryFastAppend(const char* ip, size_t available, size_t length,
+ size_t* produced) {
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
+ (void)ip;
+ (void)available;
+ (void)length;
+ (void)produced;
+
return false;
}
- inline bool AppendFromSelf(size_t offset, size_t len) {
+ inline bool AppendFromSelf(size_t offset, size_t len, size_t* produced) {
// See SnappyArrayWriter::AppendFromSelf for an explanation of
// the "offset - 1u" trick.
- if (produced_ <= offset - 1u) return false;
- produced_ += len;
- return produced_ <= expected_;
+ if (*produced <= offset - 1u) return false;
+ *produced += len;
+ return *produced <= expected_;
}
inline void Flush() {}
};
-bool IsValidCompressedBuffer(const char* compressed, size_t n) {
- ByteArraySource reader(compressed, n);
+bool IsValidCompressedBuffer(const char* compressed, size_t compressed_length) {
+ ByteArraySource reader(compressed, compressed_length);
SnappyDecompressionValidator writer;
return InternalUncompress(&reader, &writer);
}
@@ -1413,9 +1916,7 @@ bool IsValidCompressed(Source* compressed) {
return InternalUncompress(compressed, &writer);
}
-void RawCompress(const char* input,
- size_t input_length,
- char* compressed,
+void RawCompress(const char* input, size_t input_length, char* compressed,
size_t* compressed_length) {
ByteArraySource reader(input, input_length);
UncheckedByteArraySink writer(compressed);
@@ -1470,13 +1971,14 @@ class SnappyScatteredWriter {
size_t full_size_;
// Pointer into current output block
- char* op_base_; // Base of output block
- char* op_ptr_; // Pointer to next unfilled byte in block
- char* op_limit_; // Pointer just past block
+ char* op_base_; // Base of output block
+ char* op_ptr_; // Pointer to next unfilled byte in block
+ char* op_limit_; // Pointer just past block
+ // If op < op_limit_min_slop_ then it's safe to unconditionally write
+ // kSlopBytes starting at op.
+ char* op_limit_min_slop_;
- inline size_t Size() const {
- return full_size_ + (op_ptr_ - op_base_);
- }
+ inline size_t Size() const { return full_size_ + (op_ptr_ - op_base_); }
bool SlowAppend(const char* ip, size_t len);
bool SlowAppendFromSelf(size_t offset, size_t len);
@@ -1487,60 +1989,79 @@ class SnappyScatteredWriter {
full_size_(0),
op_base_(NULL),
op_ptr_(NULL),
- op_limit_(NULL) {
+ op_limit_(NULL),
+ op_limit_min_slop_(NULL) {}
+ char* GetOutputPtr() { return op_ptr_; }
+ char* GetBase(ptrdiff_t* op_limit_min_slop) {
+ *op_limit_min_slop = op_limit_min_slop_ - op_base_;
+ return op_base_;
}
+ void SetOutputPtr(char* op) { op_ptr_ = op; }
inline void SetExpectedLength(size_t len) {
assert(blocks_.empty());
expected_ = len;
}
- inline bool CheckLength() const {
- return Size() == expected_;
- }
+ inline bool CheckLength() const { return Size() == expected_; }
// Return the number of bytes actually uncompressed so far
- inline size_t Produced() const {
- return Size();
- }
+ inline size_t Produced() const { return Size(); }
- inline bool Append(const char* ip, size_t len) {
- size_t avail = op_limit_ - op_ptr_;
+ inline bool Append(const char* ip, size_t len, char** op_p) {
+ char* op = *op_p;
+ size_t avail = op_limit_ - op;
if (len <= avail) {
// Fast path
- memcpy(op_ptr_, ip, len);
- op_ptr_ += len;
+ std::memcpy(op, ip, len);
+ *op_p = op + len;
return true;
} else {
- return SlowAppend(ip, len);
+ op_ptr_ = op;
+ bool res = SlowAppend(ip, len);
+ *op_p = op_ptr_;
+ return res;
}
}
- inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
- char* op = op_ptr_;
+ inline bool TryFastAppend(const char* ip, size_t available, size_t length,
+ char** op_p) {
+ char* op = *op_p;
const int space_left = op_limit_ - op;
if (length <= 16 && available >= 16 + kMaximumTagLength &&
space_left >= 16) {
// Fast path, used for the majority (about 95%) of invocations.
UnalignedCopy128(ip, op);
- op_ptr_ = op + length;
+ *op_p = op + length;
return true;
} else {
return false;
}
}
- inline bool AppendFromSelf(size_t offset, size_t len) {
- char* const op_end = op_ptr_ + len;
- // See SnappyArrayWriter::AppendFromSelf for an explanation of
- // the "offset - 1u" trick.
- if (SNAPPY_PREDICT_TRUE(offset - 1u < op_ptr_ - op_base_ &&
- op_end <= op_limit_)) {
- // Fast path: src and dst in current block.
- op_ptr_ = IncrementalCopy(op_ptr_ - offset, op_ptr_, op_end, op_limit_);
+ inline bool AppendFromSelf(size_t offset, size_t len, char** op_p) {
+ char* op = *op_p;
+ assert(op >= op_base_);
+ // Check if we try to append from before the start of the buffer.
+ if (SNAPPY_PREDICT_FALSE((kSlopBytes < 64 && len > kSlopBytes) ||
+ static_cast<size_t>(op - op_base_) < offset ||
+ op >= op_limit_min_slop_ || offset < len)) {
+ if (offset == 0) return false;
+ if (SNAPPY_PREDICT_FALSE(static_cast<size_t>(op - op_base_) < offset ||
+ op + len > op_limit_)) {
+ op_ptr_ = op;
+ bool res = SlowAppendFromSelf(offset, len);
+ *op_p = op_ptr_;
+ return res;
+ }
+ *op_p = IncrementalCopy(op - offset, op, op + len, op_limit_);
return true;
}
- return SlowAppendFromSelf(offset, len);
+ // Fast path
+ char* const op_end = op + len;
+ std::memmove(op, op - offset, kSlopBytes);
+ *op_p = op_end;
+ return true;
}
// Called at the end of the decompress. We ask the allocator
@@ -1548,12 +2069,12 @@ class SnappyScatteredWriter {
inline void Flush() { allocator_.Flush(Produced()); }
};
-template<typename Allocator>
+template <typename Allocator>
bool SnappyScatteredWriter<Allocator>::SlowAppend(const char* ip, size_t len) {
size_t avail = op_limit_ - op_ptr_;
while (len > avail) {
// Completely fill this block
- memcpy(op_ptr_, ip, avail);
+ std::memcpy(op_ptr_, ip, avail);
op_ptr_ += avail;
assert(op_limit_ - op_ptr_ == 0);
full_size_ += (op_ptr_ - op_base_);
@@ -1561,25 +2082,25 @@ bool SnappyScatteredWriter<Allocator>::SlowAppend(const char* ip, size_t len) {
ip += avail;
// Bounds check
- if (full_size_ + len > expected_) {
- return false;
- }
+ if (full_size_ + len > expected_) return false;
// Make new block
size_t bsize = std::min<size_t>(kBlockSize, expected_ - full_size_);
op_base_ = allocator_.Allocate(bsize);
op_ptr_ = op_base_;
op_limit_ = op_base_ + bsize;
+ op_limit_min_slop_ = op_limit_ - std::min<size_t>(kSlopBytes - 1, bsize);
+
blocks_.push_back(op_base_);
avail = bsize;
}
- memcpy(op_ptr_, ip, len);
+ std::memcpy(op_ptr_, ip, len);
op_ptr_ += len;
return true;
}
-template<typename Allocator>
+template <typename Allocator>
bool SnappyScatteredWriter<Allocator>::SlowAppendFromSelf(size_t offset,
size_t len) {
// Overflow check
@@ -1594,18 +2115,26 @@ bool SnappyScatteredWriter<Allocator>::SlowAppendFromSelf(size_t offset,
// nice if we do not rely on that, since we can get better compression if we
// allow cross-block copies and thus might want to change the compressor in
// the future.
+ // TODO Replace this with a properly optimized path. This is not
+ // triggered right now. But this is so super slow, that it would regress
+ // performance unacceptably if triggered.
size_t src = cur - offset;
+ char* op = op_ptr_;
while (len-- > 0) {
- char c = blocks_[src >> kBlockLog][src & (kBlockSize-1)];
- Append(&c, 1);
+ char c = blocks_[src >> kBlockLog][src & (kBlockSize - 1)];
+ if (!Append(&c, 1, &op)) {
+ op_ptr_ = op;
+ return false;
+ }
src++;
}
+ op_ptr_ = op;
return true;
}
class SnappySinkAllocator {
public:
- explicit SnappySinkAllocator(Sink* dest): dest_(dest) {}
+ explicit SnappySinkAllocator(Sink* dest) : dest_(dest) {}
~SnappySinkAllocator() {}
char* Allocate(int size) {
@@ -1621,10 +2150,9 @@ class SnappySinkAllocator {
// to the blocks.
void Flush(size_t size) {
size_t size_written = 0;
- size_t block_size;
- for (int i = 0; i < blocks_.size(); ++i) {
- block_size = std::min<size_t>(blocks_[i].size, size - size_written);
- dest_->AppendAndTakeOwnership(blocks_[i].data, block_size,
+ for (Datablock& block : blocks_) {
+ size_t block_size = std::min<size_t>(block.size, size - size_written);
+ dest_->AppendAndTakeOwnership(block.data, block_size,
&SnappySinkAllocator::Deleter, NULL);
size_written += block_size;
}
@@ -1639,6 +2167,10 @@ class SnappySinkAllocator {
};
static void Deleter(void* arg, const char* bytes, size_t size) {
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
+ (void)arg;
+ (void)size;
+
delete[] bytes;
}
@@ -1658,15 +2190,15 @@ size_t UncompressAsMuchAsPossible(Source* compressed, Sink* uncompressed) {
bool Uncompress(Source* compressed, Sink* uncompressed) {
// Read the uncompressed length from the front of the compressed input
SnappyDecompressor decompressor(compressed);
- uint32 uncompressed_len = 0;
+ uint32_t uncompressed_len = 0;
if (!decompressor.ReadUncompressedLength(&uncompressed_len)) {
return false;
}
char c;
size_t allocated_size;
- char* buf = uncompressed->GetAppendBufferVariable(
- 1, uncompressed_len, &c, 1, &allocated_size);
+ char* buf = uncompressed->GetAppendBufferVariable(1, uncompressed_len, &c, 1,
+ &allocated_size);
const size_t compressed_len = compressed->Available();
// If we can get a flat buffer, then use it, otherwise do block by block
diff --git a/contrib/libs/snappy/snappy.h b/contrib/libs/snappy/snappy.h
index 9a3bc3fa64..1be786609f 100644
--- a/contrib/libs/snappy/snappy.h
+++ b/contrib/libs/snappy/snappy.h
@@ -39,9 +39,10 @@
#ifndef THIRD_PARTY_SNAPPY_SNAPPY_H__
#define THIRD_PARTY_SNAPPY_SNAPPY_H__
-#include <cstddef>
-#include <string>
+#include <stddef.h>
+#include <stdint.h>
+#include <string>
#include <util/generic/fwd.h>
#include "snappy-stubs-public.h"
@@ -65,7 +66,7 @@ namespace snappy {
// Also note that this leaves "*source" in a state that is unsuitable for
// further operations, such as RawUncompress(). You will need to rewind
// or recreate the source yourself before attempting any further calls.
- bool GetUncompressedLength(Source* source, uint32* result);
+ bool GetUncompressedLength(Source* source, uint32_t* result);
// ------------------------------------------------------------------------
// Higher-level string based routines (should be sufficient for most users)