aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/snappy/snappy.cc
diff options
context:
space:
mode:
authorAnton Samokhvalov <pg83@yandex.ru>2022-02-10 16:45:17 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:45:17 +0300
commitd3a398281c6fd1d3672036cb2d63f842d2cb28c5 (patch)
treedd4bd3ca0f36b817e96812825ffaf10d645803f2 /contrib/libs/snappy/snappy.cc
parent72cb13b4aff9bc9cf22e49251bc8fd143f82538f (diff)
downloadydb-d3a398281c6fd1d3672036cb2d63f842d2cb28c5.tar.gz
Restoring authorship annotation for Anton Samokhvalov <pg83@yandex.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/libs/snappy/snappy.cc')
-rw-r--r--contrib/libs/snappy/snappy.cc1374
1 files changed, 687 insertions, 687 deletions
diff --git a/contrib/libs/snappy/snappy.cc b/contrib/libs/snappy/snappy.cc
index 27e491c043..9351b0f21e 100644
--- a/contrib/libs/snappy/snappy.cc
+++ b/contrib/libs/snappy/snappy.cc
@@ -1,35 +1,35 @@
-// Copyright 2005 Google Inc. All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "snappy.h"
-#include "snappy-internal.h"
-#include "snappy-sinksource.h"
-
+// Copyright 2005 Google Inc. All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "snappy.h"
+#include "snappy-internal.h"
+#include "snappy-sinksource.h"
+
#if !defined(SNAPPY_HAVE_SSSE3)
// __SSSE3__ is defined by GCC and Clang. Visual Studio doesn't target SIMD
// support between SSE2 and AVX (so SSSE3 instructions require AVX support), and
@@ -68,60 +68,60 @@
#include <immintrin.h>
#endif
-#include <stdio.h>
-
-#include <algorithm>
+#include <stdio.h>
+
+#include <algorithm>
#include <string>
#include <vector>
#include <util/generic/string.h>
-
-namespace snappy {
-
+
+namespace snappy {
+
using internal::COPY_1_BYTE_OFFSET;
using internal::COPY_2_BYTE_OFFSET;
using internal::LITERAL;
using internal::char_table;
using internal::kMaximumTagLength;
-// Any hash function will produce a valid compressed bitstream, but a good
-// hash function reduces the number of collisions and thus yields better
-// compression for compressible input, and more speed for incompressible
-// input. Of course, it doesn't hurt if the hash function is reasonably fast
-// either, as it gets called a lot.
-static inline uint32 HashBytes(uint32 bytes, int shift) {
- uint32 kMul = 0x1e35a7bd;
- return (bytes * kMul) >> shift;
-}
-static inline uint32 Hash(const char* p, int shift) {
- return HashBytes(UNALIGNED_LOAD32(p), shift);
-}
-
-size_t MaxCompressedLength(size_t source_len) {
- // Compressed data can be defined as:
- // compressed := item* literal*
- // item := literal* copy
- //
- // The trailing literal sequence has a space blowup of at most 62/60
- // since a literal of length 60 needs one tag byte + one extra byte
- // for length information.
- //
- // Item blowup is trickier to measure. Suppose the "copy" op copies
- // 4 bytes of data. Because of a special check in the encoding code,
- // we produce a 4-byte copy only if the offset is < 65536. Therefore
- // the copy op takes 3 bytes to encode, and this type of item leads
- // to at most the 62/60 blowup for representing literals.
- //
- // Suppose the "copy" op copies 5 bytes of data. If the offset is big
- // enough, it will take 5 bytes to encode the copy op. Therefore the
- // worst case here is a one-byte literal followed by a five-byte copy.
- // I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
- //
- // This last factor dominates the blowup, so the final estimate is:
- return 32 + source_len + source_len/6;
-}
-
+// Any hash function will produce a valid compressed bitstream, but a good
+// hash function reduces the number of collisions and thus yields better
+// compression for compressible input, and more speed for incompressible
+// input. Of course, it doesn't hurt if the hash function is reasonably fast
+// either, as it gets called a lot.
+static inline uint32 HashBytes(uint32 bytes, int shift) {
+ uint32 kMul = 0x1e35a7bd;
+ return (bytes * kMul) >> shift;
+}
+static inline uint32 Hash(const char* p, int shift) {
+ return HashBytes(UNALIGNED_LOAD32(p), shift);
+}
+
+size_t MaxCompressedLength(size_t source_len) {
+ // Compressed data can be defined as:
+ // compressed := item* literal*
+ // item := literal* copy
+ //
+ // The trailing literal sequence has a space blowup of at most 62/60
+ // since a literal of length 60 needs one tag byte + one extra byte
+ // for length information.
+ //
+ // Item blowup is trickier to measure. Suppose the "copy" op copies
+ // 4 bytes of data. Because of a special check in the encoding code,
+ // we produce a 4-byte copy only if the offset is < 65536. Therefore
+ // the copy op takes 3 bytes to encode, and this type of item leads
+ // to at most the 62/60 blowup for representing literals.
+ //
+ // Suppose the "copy" op copies 5 bytes of data. If the offset is big
+ // enough, it will take 5 bytes to encode the copy op. Therefore the
+ // worst case here is a one-byte literal followed by a five-byte copy.
+ // I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
+ //
+ // This last factor dominates the blowup, so the final estimate is:
+ return 32 + source_len + source_len/6;
+}
+
namespace {
-
+
void UnalignedCopy64(const void* src, void* dst) {
char tmp[8];
memcpy(tmp, src, 8);
@@ -145,7 +145,7 @@ void UnalignedCopy128(const void* src, void* dst) {
// op_limit == op + 20
// After IncrementalCopySlow(src, op, op_limit), the result will have eleven
// copies of "ab"
-// ababababababababababab
+// ababababababababababab
// Note that this does not match the semantics of either memcpy() or memmove().
inline char* IncrementalCopySlow(const char* src, char* op,
char* const op_limit) {
@@ -156,13 +156,13 @@ inline char* IncrementalCopySlow(const char* src, char* op,
#pragma clang loop unroll(disable)
#endif
while (op < op_limit) {
- *op++ = *src++;
+ *op++ = *src++;
}
return op_limit;
-}
-
+}
+
#if SNAPPY_HAVE_SSSE3
-
+
// This is a table of shuffle control masks that can be used as the source
// operand for PSHUFB to permute the contents of the destination XMM register
// into a repeating byte pattern.
@@ -175,9 +175,9 @@ alignas(16) const char pshufb_fill_patterns[7][16] = {
{0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3},
{0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1},
};
-
+
#endif // SNAPPY_HAVE_SSSE3
-
+
// Copy [src, src+(op_limit-op)) to [op, (op_limit-op)) but faster than
// IncrementalCopySlow. buf_limit is the address past the end of the writable
// region of the buffer.
@@ -194,7 +194,7 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
// NOTE: The compressor always emits 4 <= len <= 64. It is ok to assume that
// to optimize this function but we have to also handle other cases in case
// the input does not satisfy these conditions.
-
+
size_t pattern_size = op - src;
// The cases are split into different branches to allow the branch predictor,
// FDO, and static prediction hints to work better. For each input we list the
@@ -286,7 +286,7 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
// conditionals instead of a loop allows FDO to layout the code with respect
// to the actual probabilities of each length.
// TODO: Replace with loop with trip count hint.
- UnalignedCopy64(src, op);
+ UnalignedCopy64(src, op);
UnalignedCopy64(src + 8, op + 8);
if (op + 16 < op_limit) {
@@ -302,7 +302,7 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
UnalignedCopy64(src + 56, op + 56);
}
return op_limit;
- }
+ }
// Fall back to doing as much as we can with the available slop in the
// buffer. This code path is relatively cold however so we save code size by
@@ -314,7 +314,7 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
#pragma clang loop unroll(disable)
#endif
for (char *op_end = buf_limit - 16; op < op_end; op += 16, src += 16) {
- UnalignedCopy64(src, op);
+ UnalignedCopy64(src, op);
UnalignedCopy64(src + 8, op + 8);
}
if (op >= op_limit)
@@ -324,17 +324,17 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
// single 8 byte copy.
if (SNAPPY_PREDICT_FALSE(op <= buf_limit - 8)) {
UnalignedCopy64(src, op);
- src += 8;
- op += 8;
- }
+ src += 8;
+ op += 8;
+ }
return IncrementalCopySlow(src, op, op_limit);
-}
-
+}
+
} // namespace
template <bool allow_fast_path>
-static inline char* EmitLiteral(char* op,
- const char* literal,
+static inline char* EmitLiteral(char* op,
+ const char* literal,
int len) {
// The vast majority of copies are below 16 bytes, for which a
// call to memcpy is overkill. This fast path can sometimes
@@ -356,13 +356,13 @@ static inline char* EmitLiteral(char* op,
return op + len;
}
- if (n < 60) {
- // Fits in tag byte
- *op++ = LITERAL | (n << 2);
- } else {
+ if (n < 60) {
+ // Fits in tag byte
+ *op++ = LITERAL | (n << 2);
+ } else {
int count = (Bits::Log2Floor(n) >> 3) + 1;
- assert(count >= 1);
- assert(count <= 4);
+ assert(count >= 1);
+ assert(count <= 4);
*op++ = LITERAL | ((59 + count) << 2);
// Encode in upcoming bytes.
// Write 4 bytes, though we may care about only 1 of them. The output buffer
@@ -370,33 +370,33 @@ static inline char* EmitLiteral(char* op,
// here and there is a memcpy of size 'len' below.
LittleEndian::Store32(op, n);
op += count;
- }
- memcpy(op, literal, len);
- return op + len;
-}
-
+ }
+ memcpy(op, literal, len);
+ return op + len;
+}
+
template <bool len_less_than_12>
static inline char* EmitCopyAtMost64(char* op, size_t offset, size_t len) {
assert(len <= 64);
assert(len >= 4);
assert(offset < 65536);
assert(len_less_than_12 == (len < 12));
-
+
if (len_less_than_12 && SNAPPY_PREDICT_TRUE(offset < 2048)) {
// offset fits in 11 bits. The 3 highest go in the top of the first byte,
// and the rest go in the second byte.
*op++ = COPY_1_BYTE_OFFSET + ((len - 4) << 2) + ((offset >> 3) & 0xe0);
- *op++ = offset & 0xff;
- } else {
+ *op++ = offset & 0xff;
+ } else {
// Write 4 bytes, though we only care about 3 of them. The output buffer
// is required to have some slack, so the extra byte won't overrun it.
uint32 u = COPY_2_BYTE_OFFSET + ((len - 1) << 2) + (offset << 8);
LittleEndian::Store32(op, u);
op += 3;
- }
- return op;
-}
-
+ }
+ return op;
+}
+
template <bool len_less_than_12>
static inline char* EmitCopy(char* op, size_t offset, size_t len) {
assert(len_less_than_12 == (len < 12));
@@ -405,7 +405,7 @@ static inline char* EmitCopy(char* op, size_t offset, size_t len) {
} else {
// A special case for len <= 64 might help, but so far measurements suggest
// it's in the noise.
-
+
// Emit 64 byte copies but make sure to keep at least four bytes reserved.
while (SNAPPY_PREDICT_FALSE(len >= 68)) {
op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, 64);
@@ -425,20 +425,20 @@ static inline char* EmitCopy(char* op, size_t offset, size_t len) {
op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, len);
}
return op;
- }
-}
-
-bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
- uint32 v = 0;
- const char* limit = start + n;
- if (Varint::Parse32WithLimit(start, limit, &v) != NULL) {
- *result = v;
- return true;
- } else {
- return false;
- }
-}
-
+ }
+}
+
+bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
+ uint32 v = 0;
+ const char* limit = start + n;
+ if (Varint::Parse32WithLimit(start, limit, &v) != NULL) {
+ *result = v;
+ return true;
+ } else {
+ return false;
+ }
+}
+
namespace {
uint32 CalculateTableSize(uint32 input_size) {
static_assert(
@@ -446,16 +446,16 @@ uint32 CalculateTableSize(uint32 input_size) {
"kMaxHashTableSize should be greater or equal to kMinHashTableSize.");
if (input_size > kMaxHashTableSize) {
return kMaxHashTableSize;
- }
+ }
if (input_size < kMinHashTableSize) {
return kMinHashTableSize;
- }
+ }
// This is equivalent to Log2Ceiling(input_size), assuming input_size > 1.
// 2 << Log2Floor(x - 1) is equivalent to 1 << (1 + Log2Floor(x - 1)).
return 2u << Bits::Log2Floor(input_size - 1);
}
} // namespace
-
+
namespace internal {
WorkingMemory::WorkingMemory(size_t input_size) {
const size_t max_fragment_size = std::min(input_size, kBlockSize);
@@ -476,225 +476,225 @@ uint16* WorkingMemory::GetHashTable(size_t fragment_size,
int* table_size) const {
const size_t htsize = CalculateTableSize(fragment_size);
memset(table_, 0, htsize * sizeof(*table_));
- *table_size = htsize;
+ *table_size = htsize;
return table_;
-}
-} // end namespace internal
-
-// For 0 <= offset <= 4, GetUint32AtOffset(GetEightBytesAt(p), offset) will
-// equal UNALIGNED_LOAD32(p + offset). Motivation: On x86-64 hardware we have
-// empirically found that overlapping loads such as
-// UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
-// are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32.
-//
-// We have different versions for 64- and 32-bit; ideally we would avoid the
-// two functions and just inline the UNALIGNED_LOAD64 call into
-// GetUint32AtOffset, but GCC (at least not as of 4.6) is seemingly not clever
-// enough to avoid loading the value multiple times then. For 64-bit, the load
-// is done when GetEightBytesAt() is called, whereas for 32-bit, the load is
-// done at GetUint32AtOffset() time.
-
-#ifdef ARCH_K8
-
-typedef uint64 EightBytesReference;
-
-static inline EightBytesReference GetEightBytesAt(const char* ptr) {
- return UNALIGNED_LOAD64(ptr);
-}
-
-static inline uint32 GetUint32AtOffset(uint64 v, int offset) {
+}
+} // end namespace internal
+
+// For 0 <= offset <= 4, GetUint32AtOffset(GetEightBytesAt(p), offset) will
+// equal UNALIGNED_LOAD32(p + offset). Motivation: On x86-64 hardware we have
+// empirically found that overlapping loads such as
+// UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
+// are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32.
+//
+// We have different versions for 64- and 32-bit; ideally we would avoid the
+// two functions and just inline the UNALIGNED_LOAD64 call into
+// GetUint32AtOffset, but GCC (at least not as of 4.6) is seemingly not clever
+// enough to avoid loading the value multiple times then. For 64-bit, the load
+// is done when GetEightBytesAt() is called, whereas for 32-bit, the load is
+// done at GetUint32AtOffset() time.
+
+#ifdef ARCH_K8
+
+typedef uint64 EightBytesReference;
+
+static inline EightBytesReference GetEightBytesAt(const char* ptr) {
+ return UNALIGNED_LOAD64(ptr);
+}
+
+static inline uint32 GetUint32AtOffset(uint64 v, int offset) {
assert(offset >= 0);
assert(offset <= 4);
- return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset);
-}
-
-#else
-
-typedef const char* EightBytesReference;
-
-static inline EightBytesReference GetEightBytesAt(const char* ptr) {
- return ptr;
-}
-
-static inline uint32 GetUint32AtOffset(const char* v, int offset) {
+ return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset);
+}
+
+#else
+
+typedef const char* EightBytesReference;
+
+static inline EightBytesReference GetEightBytesAt(const char* ptr) {
+ return ptr;
+}
+
+static inline uint32 GetUint32AtOffset(const char* v, int offset) {
assert(offset >= 0);
assert(offset <= 4);
- return UNALIGNED_LOAD32(v + offset);
-}
-
-#endif
-
-// Flat array compression that does not emit the "uncompressed length"
-// prefix. Compresses "input" string to the "*op" buffer.
-//
-// REQUIRES: "input" is at most "kBlockSize" bytes long.
-// REQUIRES: "op" points to an array of memory that is at least
-// "MaxCompressedLength(input.size())" in size.
-// REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
-// REQUIRES: "table_size" is a power of two
-//
-// Returns an "end" pointer into "op" buffer.
-// "end - op" is the compressed size of "input".
-namespace internal {
-char* CompressFragment(const char* input,
- size_t input_size,
- char* op,
- uint16* table,
- const int table_size) {
- // "ip" is the input pointer, and "op" is the output pointer.
- const char* ip = input;
+ return UNALIGNED_LOAD32(v + offset);
+}
+
+#endif
+
+// Flat array compression that does not emit the "uncompressed length"
+// prefix. Compresses "input" string to the "*op" buffer.
+//
+// REQUIRES: "input" is at most "kBlockSize" bytes long.
+// REQUIRES: "op" points to an array of memory that is at least
+// "MaxCompressedLength(input.size())" in size.
+// REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
+// REQUIRES: "table_size" is a power of two
+//
+// Returns an "end" pointer into "op" buffer.
+// "end - op" is the compressed size of "input".
+namespace internal {
+char* CompressFragment(const char* input,
+ size_t input_size,
+ char* op,
+ uint16* table,
+ const int table_size) {
+ // "ip" is the input pointer, and "op" is the output pointer.
+ const char* ip = input;
assert(input_size <= kBlockSize);
assert((table_size & (table_size - 1)) == 0); // table must be power of two
- const int shift = 32 - Bits::Log2Floor(table_size);
+ const int shift = 32 - Bits::Log2Floor(table_size);
assert(static_cast<int>(kuint32max >> shift) == table_size - 1);
- const char* ip_end = input + input_size;
- const char* base_ip = ip;
- // Bytes in [next_emit, ip) will be emitted as literal bytes. Or
- // [next_emit, ip_end) after the main loop.
- const char* next_emit = ip;
-
- const size_t kInputMarginBytes = 15;
+ const char* ip_end = input + input_size;
+ const char* base_ip = ip;
+ // Bytes in [next_emit, ip) will be emitted as literal bytes. Or
+ // [next_emit, ip_end) after the main loop.
+ const char* next_emit = ip;
+
+ const size_t kInputMarginBytes = 15;
if (SNAPPY_PREDICT_TRUE(input_size >= kInputMarginBytes)) {
- const char* ip_limit = input + input_size - kInputMarginBytes;
-
- for (uint32 next_hash = Hash(++ip, shift); ; ) {
+ const char* ip_limit = input + input_size - kInputMarginBytes;
+
+ for (uint32 next_hash = Hash(++ip, shift); ; ) {
assert(next_emit < ip);
- // The body of this loop calls EmitLiteral once and then EmitCopy one or
- // more times. (The exception is that when we're close to exhausting
- // the input we goto emit_remainder.)
- //
- // In the first iteration of this loop we're just starting, so
- // there's nothing to copy, so calling EmitLiteral once is
- // necessary. And we only start a new iteration when the
- // current iteration has determined that a call to EmitLiteral will
- // precede the next call to EmitCopy (if any).
- //
- // Step 1: Scan forward in the input looking for a 4-byte-long match.
- // If we get close to exhausting the input then goto emit_remainder.
- //
- // Heuristic match skipping: If 32 bytes are scanned with no matches
- // found, start looking only at every other byte. If 32 more bytes are
+ // The body of this loop calls EmitLiteral once and then EmitCopy one or
+ // more times. (The exception is that when we're close to exhausting
+ // the input we goto emit_remainder.)
+ //
+ // In the first iteration of this loop we're just starting, so
+ // there's nothing to copy, so calling EmitLiteral once is
+ // necessary. And we only start a new iteration when the
+ // current iteration has determined that a call to EmitLiteral will
+ // precede the next call to EmitCopy (if any).
+ //
+ // Step 1: Scan forward in the input looking for a 4-byte-long match.
+ // If we get close to exhausting the input then goto emit_remainder.
+ //
+ // Heuristic match skipping: If 32 bytes are scanned with no matches
+ // found, start looking only at every other byte. If 32 more bytes are
// scanned (or skipped), look at every third byte, etc.. When a match is
// found, immediately go back to looking at every byte. This is a small
// loss (~5% performance, ~0.1% density) for compressible data due to more
- // bookkeeping, but for non-compressible data (such as JPEG) it's a huge
- // win since the compressor quickly "realizes" the data is incompressible
- // and doesn't bother looking for matches everywhere.
- //
- // The "skip" variable keeps track of how many bytes there are since the
- // last match; dividing it by 32 (ie. right-shifting by five) gives the
- // number of bytes to move ahead for each iteration.
- uint32 skip = 32;
-
- const char* next_ip = ip;
- const char* candidate;
- do {
- ip = next_ip;
- uint32 hash = next_hash;
+ // bookkeeping, but for non-compressible data (such as JPEG) it's a huge
+ // win since the compressor quickly "realizes" the data is incompressible
+ // and doesn't bother looking for matches everywhere.
+ //
+ // The "skip" variable keeps track of how many bytes there are since the
+ // last match; dividing it by 32 (ie. right-shifting by five) gives the
+ // number of bytes to move ahead for each iteration.
+ uint32 skip = 32;
+
+ const char* next_ip = ip;
+ const char* candidate;
+ do {
+ ip = next_ip;
+ uint32 hash = next_hash;
assert(hash == Hash(ip, shift));
uint32 bytes_between_hash_lookups = skip >> 5;
skip += bytes_between_hash_lookups;
- next_ip = ip + bytes_between_hash_lookups;
+ next_ip = ip + bytes_between_hash_lookups;
if (SNAPPY_PREDICT_FALSE(next_ip > ip_limit)) {
- goto emit_remainder;
- }
- next_hash = Hash(next_ip, shift);
- candidate = base_ip + table[hash];
+ goto emit_remainder;
+ }
+ next_hash = Hash(next_ip, shift);
+ candidate = base_ip + table[hash];
assert(candidate >= base_ip);
assert(candidate < ip);
-
- table[hash] = ip - base_ip;
+
+ table[hash] = ip - base_ip;
} while (SNAPPY_PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
UNALIGNED_LOAD32(candidate)));
-
- // Step 2: A 4-byte match has been found. We'll later see if more
- // than 4 bytes match. But, prior to the match, input
- // bytes [next_emit, ip) are unmatched. Emit them as "literal bytes."
+
+ // Step 2: A 4-byte match has been found. We'll later see if more
+ // than 4 bytes match. But, prior to the match, input
+ // bytes [next_emit, ip) are unmatched. Emit them as "literal bytes."
assert(next_emit + 16 <= ip_end);
op = EmitLiteral</*allow_fast_path=*/true>(op, next_emit, ip - next_emit);
-
- // Step 3: Call EmitCopy, and then see if another EmitCopy could
- // be our next move. Repeat until we find no match for the
- // input immediately after what was consumed by the last EmitCopy call.
- //
- // If we exit this loop normally then we need to call EmitLiteral next,
- // though we don't yet know how big the literal will be. We handle that
- // by proceeding to the next iteration of the main loop. We also can exit
- // this loop via goto if we get close to exhausting the input.
- EightBytesReference input_bytes;
- uint32 candidate_bytes = 0;
-
- do {
- // We have a 4-byte match at ip, and no need to emit any
- // "literal bytes" prior to ip.
- const char* base = ip;
+
+ // Step 3: Call EmitCopy, and then see if another EmitCopy could
+ // be our next move. Repeat until we find no match for the
+ // input immediately after what was consumed by the last EmitCopy call.
+ //
+ // If we exit this loop normally then we need to call EmitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can exit
+ // this loop via goto if we get close to exhausting the input.
+ EightBytesReference input_bytes;
+ uint32 candidate_bytes = 0;
+
+ do {
+ // We have a 4-byte match at ip, and no need to emit any
+ // "literal bytes" prior to ip.
+ const char* base = ip;
std::pair<size_t, bool> p =
FindMatchLength(candidate + 4, ip + 4, ip_end);
size_t matched = 4 + p.first;
- ip += matched;
- size_t offset = base - candidate;
+ ip += matched;
+ size_t offset = base - candidate;
assert(0 == memcmp(base, candidate, matched));
if (p.second) {
op = EmitCopy</*len_less_than_12=*/true>(op, offset, matched);
} else {
op = EmitCopy</*len_less_than_12=*/false>(op, offset, matched);
}
- next_emit = ip;
+ next_emit = ip;
if (SNAPPY_PREDICT_FALSE(ip >= ip_limit)) {
- goto emit_remainder;
- }
+ goto emit_remainder;
+ }
// We are now looking for a 4-byte match again. We read
// table[Hash(ip, shift)] for that. To improve compression,
// we also update table[Hash(ip - 1, shift)] and table[Hash(ip, shift)].
input_bytes = GetEightBytesAt(ip - 1);
- uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift);
- table[prev_hash] = ip - base_ip - 1;
- uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift);
- candidate = base_ip + table[cur_hash];
- candidate_bytes = UNALIGNED_LOAD32(candidate);
- table[cur_hash] = ip - base_ip;
- } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes);
-
- next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift);
- ++ip;
- }
- }
-
- emit_remainder:
- // Emit the remaining bytes as a literal
- if (next_emit < ip_end) {
+ uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift);
+ table[prev_hash] = ip - base_ip - 1;
+ uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift);
+ candidate = base_ip + table[cur_hash];
+ candidate_bytes = UNALIGNED_LOAD32(candidate);
+ table[cur_hash] = ip - base_ip;
+ } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes);
+
+ next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift);
+ ++ip;
+ }
+ }
+
+ emit_remainder:
+ // Emit the remaining bytes as a literal
+ if (next_emit < ip_end) {
op = EmitLiteral</*allow_fast_path=*/false>(op, next_emit,
ip_end - next_emit);
- }
-
- return op;
-}
-} // end namespace internal
-
+ }
+
+ return op;
+}
+} // end namespace internal
+
// Called back at avery compression call to trace parameters and sizes.
static inline void Report(const char *algorithm, size_t compressed_size,
size_t uncompressed_size) {}
-// Signature of output types needed by decompression code.
-// The decompression code is templatized on a type that obeys this
-// signature so that we do not pay virtual function call overhead in
-// the middle of a tight decompression loop.
-//
-// class DecompressionWriter {
-// public:
-// // Called before decompression
-// void SetExpectedLength(size_t length);
-//
-// // Called after decompression
-// bool CheckLength() const;
-//
-// // Called repeatedly during decompression
-// bool Append(const char* ip, size_t length);
-// bool AppendFromSelf(uint32 offset, size_t length);
-//
+// Signature of output types needed by decompression code.
+// The decompression code is templatized on a type that obeys this
+// signature so that we do not pay virtual function call overhead in
+// the middle of a tight decompression loop.
+//
+// class DecompressionWriter {
+// public:
+// // Called before decompression
+// void SetExpectedLength(size_t length);
+//
+// // Called after decompression
+// bool CheckLength() const;
+//
+// // Called repeatedly during decompression
+// bool Append(const char* ip, size_t length);
+// bool AppendFromSelf(uint32 offset, size_t length);
+//
// // The rules for how TryFastAppend differs from Append are somewhat
// // convoluted:
-// //
+// //
// // - TryFastAppend is allowed to decline (return false) at any
// // time, for any reason -- just "return false" would be
// // a perfectly legal implementation of TryFastAppend.
@@ -711,10 +711,10 @@ static inline void Report(const char *algorithm, size_t compressed_size,
// // decoded fully. In practice, this should not be a big problem,
// // as it is unlikely that one would implement a fast path accepting
// // this much data.
-// //
-// bool TryFastAppend(const char* ip, size_t available, size_t length);
-// };
-
+// //
+// bool TryFastAppend(const char* ip, size_t available, size_t length);
+// };
+
static inline uint32 ExtractLowBytes(uint32 v, int n) {
assert(n >= 0);
assert(n <= 4);
@@ -726,8 +726,8 @@ static inline uint32 ExtractLowBytes(uint32 v, int n) {
uint64 mask = 0xffffffff;
return v & ~(mask << (8 * n));
#endif
-}
-
+}
+
static inline bool LeftShiftOverflows(uint8 value, uint32 shift) {
assert(shift < 32);
static const uint8 masks[] = {
@@ -736,77 +736,77 @@ static inline bool LeftShiftOverflows(uint8 value, uint32 shift) {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe};
return (value & masks[shift]) != 0;
-}
-
-// Helper class for decompression
-class SnappyDecompressor {
- private:
- Source* reader_; // Underlying source of bytes to decompress
- const char* ip_; // Points to next buffered byte
- const char* ip_limit_; // Points just past buffered bytes
- uint32 peeked_; // Bytes peeked from reader (need to skip)
- bool eof_; // Hit end of input without an error?
+}
+
+// Helper class for decompression
+class SnappyDecompressor {
+ private:
+ Source* reader_; // Underlying source of bytes to decompress
+ const char* ip_; // Points to next buffered byte
+ const char* ip_limit_; // Points just past buffered bytes
+ uint32 peeked_; // Bytes peeked from reader (need to skip)
+ bool eof_; // Hit end of input without an error?
char scratch_[kMaximumTagLength]; // See RefillTag().
-
- // Ensure that all of the tag metadata for the next tag is available
- // in [ip_..ip_limit_-1]. Also ensures that [ip,ip+4] is readable even
- // if (ip_limit_ - ip_ < 5).
- //
- // Returns true on success, false on error or end of input.
- bool RefillTag();
-
- public:
- explicit SnappyDecompressor(Source* reader)
- : reader_(reader),
- ip_(NULL),
- ip_limit_(NULL),
- peeked_(0),
- eof_(false) {
- }
-
- ~SnappyDecompressor() {
- // Advance past any bytes we peeked at from the reader
- reader_->Skip(peeked_);
- }
-
- // Returns true iff we have hit the end of the input without an error.
- bool eof() const {
- return eof_;
- }
-
- // Read the uncompressed length stored at the start of the compressed data.
+
+ // Ensure that all of the tag metadata for the next tag is available
+ // in [ip_..ip_limit_-1]. Also ensures that [ip,ip+4] is readable even
+ // if (ip_limit_ - ip_ < 5).
+ //
+ // Returns true on success, false on error or end of input.
+ bool RefillTag();
+
+ public:
+ explicit SnappyDecompressor(Source* reader)
+ : reader_(reader),
+ ip_(NULL),
+ ip_limit_(NULL),
+ peeked_(0),
+ eof_(false) {
+ }
+
+ ~SnappyDecompressor() {
+ // Advance past any bytes we peeked at from the reader
+ reader_->Skip(peeked_);
+ }
+
+ // Returns true iff we have hit the end of the input without an error.
+ bool eof() const {
+ return eof_;
+ }
+
+ // Read the uncompressed length stored at the start of the compressed data.
// On success, stores the length in *result and returns true.
- // On failure, returns false.
- bool ReadUncompressedLength(uint32* result) {
+ // On failure, returns false.
+ bool ReadUncompressedLength(uint32* result) {
assert(ip_ == NULL); // Must not have read anything yet
- // Length is encoded in 1..5 bytes
- *result = 0;
- uint32 shift = 0;
- while (true) {
- if (shift >= 32) return false;
- size_t n;
- const char* ip = reader_->Peek(&n);
- if (n == 0) return false;
- const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
- reader_->Skip(1);
+ // Length is encoded in 1..5 bytes
+ *result = 0;
+ uint32 shift = 0;
+ while (true) {
+ if (shift >= 32) return false;
+ size_t n;
+ const char* ip = reader_->Peek(&n);
+ if (n == 0) return false;
+ const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
+ reader_->Skip(1);
uint32 val = c & 0x7f;
if (LeftShiftOverflows(static_cast<uint8>(val), shift)) return false;
*result |= val << shift;
- if (c < 128) {
- break;
- }
- shift += 7;
- }
- return true;
- }
-
- // Process the next item found in the input.
- // Returns true if successful, false on error or end of input.
- template <class Writer>
+ if (c < 128) {
+ break;
+ }
+ shift += 7;
+ }
+ return true;
+ }
+
+ // Process the next item found in the input.
+ // Returns true if successful, false on error or end of input.
+ template <class Writer>
#if defined(__GNUC__) && defined(__x86_64__)
__attribute__((aligned(32)))
#endif
- void DecompressAllTags(Writer* writer) {
+ void DecompressAllTags(Writer* writer) {
// In x86, pad the function body to start 16 bytes later. This function has
// a couple of hotspots that are highly sensitive to alignment: we have
// observed regressions by more than 20% in some metrics just by moving the
@@ -823,22 +823,22 @@ class SnappyDecompressor {
asm(".byte 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00");
#endif
- const char* ip = ip_;
- // We could have put this refill fragment only at the beginning of the loop.
- // However, duplicating it at the end of each branch gives the compiler more
- // scope to optimize the <ip_limit_ - ip> expression based on the local
- // context, which overall increases speed.
- #define MAYBE_REFILL() \
+ const char* ip = ip_;
+ // We could have put this refill fragment only at the beginning of the loop.
+ // However, duplicating it at the end of each branch gives the compiler more
+ // scope to optimize the <ip_limit_ - ip> expression based on the local
+ // context, which overall increases speed.
+ #define MAYBE_REFILL() \
if (ip_limit_ - ip < kMaximumTagLength) { \
- ip_ = ip; \
- if (!RefillTag()) return; \
- ip = ip_; \
- }
-
- MAYBE_REFILL();
- for ( ;; ) {
- const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
-
+ ip_ = ip; \
+ if (!RefillTag()) return; \
+ ip = ip_; \
+ }
+
+ MAYBE_REFILL();
+ for ( ;; ) {
+ const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
+
// Ratio of iterations that have LITERAL vs non-LITERAL for different
// inputs.
//
@@ -852,202 +852,202 @@ class SnappyDecompressor {
// pb 24% 76%
// bin 24% 76%
if (SNAPPY_PREDICT_FALSE((c & 0x3) == LITERAL)) {
- size_t literal_length = (c >> 2) + 1u;
- if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
+ size_t literal_length = (c >> 2) + 1u;
+ if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
assert(literal_length < 61);
- ip += literal_length;
+ ip += literal_length;
// NOTE: There is no MAYBE_REFILL() here, as TryFastAppend()
// will not return true unless there's already at least five spare
// bytes in addition to the literal.
- continue;
- }
+ continue;
+ }
if (SNAPPY_PREDICT_FALSE(literal_length >= 61)) {
- // Long literal.
- const size_t literal_length_length = literal_length - 60;
- literal_length =
+ // Long literal.
+ const size_t literal_length_length = literal_length - 60;
+ literal_length =
ExtractLowBytes(LittleEndian::Load32(ip), literal_length_length) +
1;
- ip += literal_length_length;
- }
-
- size_t avail = ip_limit_ - ip;
- while (avail < literal_length) {
- if (!writer->Append(ip, avail)) return;
- literal_length -= avail;
- reader_->Skip(peeked_);
- size_t n;
- ip = reader_->Peek(&n);
- avail = n;
- peeked_ = avail;
- if (avail == 0) return; // Premature end of input
- ip_limit_ = ip + avail;
- }
- if (!writer->Append(ip, literal_length)) {
- return;
- }
- ip += literal_length;
- MAYBE_REFILL();
- } else {
+ ip += literal_length_length;
+ }
+
+ size_t avail = ip_limit_ - ip;
+ while (avail < literal_length) {
+ if (!writer->Append(ip, avail)) return;
+ literal_length -= avail;
+ reader_->Skip(peeked_);
+ size_t n;
+ ip = reader_->Peek(&n);
+ avail = n;
+ peeked_ = avail;
+ if (avail == 0) return; // Premature end of input
+ ip_limit_ = ip + avail;
+ }
+ if (!writer->Append(ip, literal_length)) {
+ return;
+ }
+ ip += literal_length;
+ MAYBE_REFILL();
+ } else {
const size_t entry = char_table[c];
const size_t trailer =
ExtractLowBytes(LittleEndian::Load32(ip), entry >> 11);
const size_t length = entry & 0xff;
- ip += entry >> 11;
-
- // copy_offset/256 is encoded in bits 8..10. By just fetching
- // those bits, we get copy_offset (since the bit-field starts at
- // bit 8).
+ ip += entry >> 11;
+
+ // copy_offset/256 is encoded in bits 8..10. By just fetching
+ // those bits, we get copy_offset (since the bit-field starts at
+ // bit 8).
const size_t copy_offset = entry & 0x700;
- if (!writer->AppendFromSelf(copy_offset + trailer, length)) {
- return;
- }
- MAYBE_REFILL();
- }
- }
-
-#undef MAYBE_REFILL
- }
-};
-
-bool SnappyDecompressor::RefillTag() {
- const char* ip = ip_;
- if (ip == ip_limit_) {
- // Fetch a new fragment from the reader
- reader_->Skip(peeked_); // All peeked bytes are used up
- size_t n;
- ip = reader_->Peek(&n);
- peeked_ = n;
+ if (!writer->AppendFromSelf(copy_offset + trailer, length)) {
+ return;
+ }
+ MAYBE_REFILL();
+ }
+ }
+
+#undef MAYBE_REFILL
+ }
+};
+
+bool SnappyDecompressor::RefillTag() {
+ const char* ip = ip_;
+ if (ip == ip_limit_) {
+ // Fetch a new fragment from the reader
+ reader_->Skip(peeked_); // All peeked bytes are used up
+ size_t n;
+ ip = reader_->Peek(&n);
+ peeked_ = n;
eof_ = (n == 0);
if (eof_) return false;
- ip_limit_ = ip + n;
- }
-
- // Read the tag character
+ ip_limit_ = ip + n;
+ }
+
+ // Read the tag character
assert(ip < ip_limit_);
- const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
- const uint32 entry = char_table[c];
- const uint32 needed = (entry >> 11) + 1; // +1 byte for 'c'
+ const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
+ const uint32 entry = char_table[c];
+ const uint32 needed = (entry >> 11) + 1; // +1 byte for 'c'
assert(needed <= sizeof(scratch_));
-
- // Read more bytes from reader if needed
- uint32 nbuf = ip_limit_ - ip;
- if (nbuf < needed) {
- // Stitch together bytes from ip and reader to form the word
- // contents. We store the needed bytes in "scratch_". They
- // will be consumed immediately by the caller since we do not
- // read more than we need.
- memmove(scratch_, ip, nbuf);
- reader_->Skip(peeked_); // All peeked bytes are used up
- peeked_ = 0;
- while (nbuf < needed) {
- size_t length;
- const char* src = reader_->Peek(&length);
- if (length == 0) return false;
+
+ // Read more bytes from reader if needed
+ uint32 nbuf = ip_limit_ - ip;
+ if (nbuf < needed) {
+ // Stitch together bytes from ip and reader to form the word
+ // contents. We store the needed bytes in "scratch_". They
+ // will be consumed immediately by the caller since we do not
+ // read more than we need.
+ memmove(scratch_, ip, nbuf);
+ reader_->Skip(peeked_); // All peeked bytes are used up
+ peeked_ = 0;
+ while (nbuf < needed) {
+ size_t length;
+ const char* src = reader_->Peek(&length);
+ if (length == 0) return false;
uint32 to_add = std::min<uint32>(needed - nbuf, length);
- memcpy(scratch_ + nbuf, src, to_add);
- nbuf += to_add;
- reader_->Skip(to_add);
- }
+ memcpy(scratch_ + nbuf, src, to_add);
+ nbuf += to_add;
+ reader_->Skip(to_add);
+ }
assert(nbuf == needed);
- ip_ = scratch_;
- ip_limit_ = scratch_ + needed;
+ ip_ = scratch_;
+ ip_limit_ = scratch_ + needed;
} else if (nbuf < kMaximumTagLength) {
- // Have enough bytes, but move into scratch_ so that we do not
- // read past end of input
- memmove(scratch_, ip, nbuf);
- reader_->Skip(peeked_); // All peeked bytes are used up
- peeked_ = 0;
- ip_ = scratch_;
- ip_limit_ = scratch_ + nbuf;
- } else {
- // Pass pointer to buffer returned by reader_.
- ip_ = ip;
- }
- return true;
-}
-
-template <typename Writer>
+ // Have enough bytes, but move into scratch_ so that we do not
+ // read past end of input
+ memmove(scratch_, ip, nbuf);
+ reader_->Skip(peeked_); // All peeked bytes are used up
+ peeked_ = 0;
+ ip_ = scratch_;
+ ip_limit_ = scratch_ + nbuf;
+ } else {
+ // Pass pointer to buffer returned by reader_.
+ ip_ = ip;
+ }
+ return true;
+}
+
+template <typename Writer>
static bool InternalUncompress(Source* r, Writer* writer) {
- // Read the uncompressed length from the front of the compressed input
- SnappyDecompressor decompressor(r);
- uint32 uncompressed_len = 0;
- if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
+ // Read the uncompressed length from the front of the compressed input
+ SnappyDecompressor decompressor(r);
+ uint32 uncompressed_len = 0;
+ if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
return InternalUncompressAllTags(&decompressor, writer, r->Available(),
uncompressed_len);
-}
-
-template <typename Writer>
-static bool InternalUncompressAllTags(SnappyDecompressor* decompressor,
- Writer* writer,
+}
+
+template <typename Writer>
+static bool InternalUncompressAllTags(SnappyDecompressor* decompressor,
+ Writer* writer,
uint32 compressed_len,
uint32 uncompressed_len) {
Report("snappy_uncompress", compressed_len, uncompressed_len);
-
- writer->SetExpectedLength(uncompressed_len);
-
- // Process the entire input
- decompressor->DecompressAllTags(writer);
+
+ writer->SetExpectedLength(uncompressed_len);
+
+ // Process the entire input
+ decompressor->DecompressAllTags(writer);
writer->Flush();
- return (decompressor->eof() && writer->CheckLength());
-}
-
-bool GetUncompressedLength(Source* source, uint32* result) {
- SnappyDecompressor decompressor(source);
- return decompressor.ReadUncompressedLength(result);
-}
-
-size_t Compress(Source* reader, Sink* writer) {
- size_t written = 0;
- size_t N = reader->Available();
+ return (decompressor->eof() && writer->CheckLength());
+}
+
+bool GetUncompressedLength(Source* source, uint32* result) {
+ SnappyDecompressor decompressor(source);
+ return decompressor.ReadUncompressedLength(result);
+}
+
+size_t Compress(Source* reader, Sink* writer) {
+ size_t written = 0;
+ size_t N = reader->Available();
const size_t uncompressed_size = N;
- char ulength[Varint::kMax32];
- char* p = Varint::Encode32(ulength, N);
- writer->Append(ulength, p-ulength);
- written += (p - ulength);
-
+ char ulength[Varint::kMax32];
+ char* p = Varint::Encode32(ulength, N);
+ writer->Append(ulength, p-ulength);
+ written += (p - ulength);
+
internal::WorkingMemory wmem(N);
-
- while (N > 0) {
- // Get next block to compress (without copying if possible)
- size_t fragment_size;
- const char* fragment = reader->Peek(&fragment_size);
+
+ while (N > 0) {
+ // Get next block to compress (without copying if possible)
+ size_t fragment_size;
+ const char* fragment = reader->Peek(&fragment_size);
assert(fragment_size != 0); // premature end of input
const size_t num_to_read = std::min(N, kBlockSize);
- size_t bytes_read = fragment_size;
-
- size_t pending_advance = 0;
- if (bytes_read >= num_to_read) {
- // Buffer returned by reader is large enough
- pending_advance = num_to_read;
- fragment_size = num_to_read;
- } else {
+ size_t bytes_read = fragment_size;
+
+ size_t pending_advance = 0;
+ if (bytes_read >= num_to_read) {
+ // Buffer returned by reader is large enough
+ pending_advance = num_to_read;
+ fragment_size = num_to_read;
+ } else {
char* scratch = wmem.GetScratchInput();
- memcpy(scratch, fragment, bytes_read);
- reader->Skip(bytes_read);
-
- while (bytes_read < num_to_read) {
- fragment = reader->Peek(&fragment_size);
+ memcpy(scratch, fragment, bytes_read);
+ reader->Skip(bytes_read);
+
+ while (bytes_read < num_to_read) {
+ fragment = reader->Peek(&fragment_size);
size_t n = std::min<size_t>(fragment_size, num_to_read - bytes_read);
- memcpy(scratch + bytes_read, fragment, n);
- bytes_read += n;
- reader->Skip(n);
- }
+ memcpy(scratch + bytes_read, fragment, n);
+ bytes_read += n;
+ reader->Skip(n);
+ }
assert(bytes_read == num_to_read);
- fragment = scratch;
- fragment_size = num_to_read;
- }
+ fragment = scratch;
+ fragment_size = num_to_read;
+ }
assert(fragment_size == num_to_read);
-
- // Get encoding table for compression
- int table_size;
- uint16* table = wmem.GetHashTable(num_to_read, &table_size);
-
- // Compress input_fragment and append to dest
- const int max_output = MaxCompressedLength(num_to_read);
-
- // Need a scratch buffer for the output, in case the byte sink doesn't
- // have room for us directly.
+
+ // Get encoding table for compression
+ int table_size;
+ uint16* table = wmem.GetHashTable(num_to_read, &table_size);
+
+ // Compress input_fragment and append to dest
+ const int max_output = MaxCompressedLength(num_to_read);
+
+ // Need a scratch buffer for the output, in case the byte sink doesn't
+ // have room for us directly.
// Since we encode kBlockSize regions followed by a region
// which is <= kBlockSize in length, a previously allocated
@@ -1055,19 +1055,19 @@ size_t Compress(Source* reader, Sink* writer) {
char* dest = writer->GetAppendBuffer(max_output, wmem.GetScratchOutput());
char* end = internal::CompressFragment(fragment, fragment_size, dest, table,
table_size);
- writer->Append(dest, end - dest);
- written += (end - dest);
-
- N -= num_to_read;
- reader->Skip(pending_advance);
- }
-
+ writer->Append(dest, end - dest);
+ written += (end - dest);
+
+ N -= num_to_read;
+ reader->Skip(pending_advance);
+ }
+
Report("snappy_compress", written, uncompressed_size);
-
- return written;
-}
-
-// -----------------------------------------------------------------------
+
+ return written;
+}
+
+// -----------------------------------------------------------------------
// IOVec interfaces
// -----------------------------------------------------------------------
@@ -1260,60 +1260,60 @@ bool RawUncompressToIOVec(Source* compressed, const struct iovec* iov,
}
// -----------------------------------------------------------------------
-// Flat array interfaces
-// -----------------------------------------------------------------------
-
-// A type that writes to a flat array.
-// Note that this is not a "ByteSink", but a type that matches the
-// Writer template argument to SnappyDecompressor::DecompressAllTags().
-class SnappyArrayWriter {
- private:
- char* base_;
- char* op_;
- char* op_limit_;
-
- public:
- inline explicit SnappyArrayWriter(char* dst)
- : base_(dst),
+// Flat array interfaces
+// -----------------------------------------------------------------------
+
+// A type that writes to a flat array.
+// Note that this is not a "ByteSink", but a type that matches the
+// Writer template argument to SnappyDecompressor::DecompressAllTags().
+class SnappyArrayWriter {
+ private:
+ char* base_;
+ char* op_;
+ char* op_limit_;
+
+ public:
+ inline explicit SnappyArrayWriter(char* dst)
+ : base_(dst),
op_(dst),
op_limit_(dst) {
- }
-
- inline void SetExpectedLength(size_t len) {
- op_limit_ = op_ + len;
- }
-
- inline bool CheckLength() const {
- return op_ == op_limit_;
- }
-
- inline bool Append(const char* ip, size_t len) {
- char* op = op_;
- const size_t space_left = op_limit_ - op;
- if (space_left < len) {
- return false;
- }
- memcpy(op, ip, len);
- op_ = op + len;
- return true;
- }
-
- inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
- char* op = op_;
- const size_t space_left = op_limit_ - op;
+ }
+
+ inline void SetExpectedLength(size_t len) {
+ op_limit_ = op_ + len;
+ }
+
+ inline bool CheckLength() const {
+ return op_ == op_limit_;
+ }
+
+ inline bool Append(const char* ip, size_t len) {
+ char* op = op_;
+ const size_t space_left = op_limit_ - op;
+ if (space_left < len) {
+ return false;
+ }
+ memcpy(op, ip, len);
+ op_ = op + len;
+ return true;
+ }
+
+ inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
+ char* op = op_;
+ const size_t space_left = op_limit_ - op;
if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16) {
- // Fast path, used for the majority (about 95%) of invocations.
+ // Fast path, used for the majority (about 95%) of invocations.
UnalignedCopy128(ip, op);
- op_ = op + len;
- return true;
- } else {
- return false;
- }
- }
-
- inline bool AppendFromSelf(size_t offset, size_t len) {
+ op_ = op + len;
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ inline bool AppendFromSelf(size_t offset, size_t len) {
char* const op_end = op_ + len;
-
+
// Check if we try to append from before the start of the buffer.
// Normally this would just be a check for "produced < offset",
// but "produced <= offset - 1u" is equivalent for every case
@@ -1323,40 +1323,40 @@ class SnappyArrayWriter {
// into an infinite loop.
if (Produced() <= offset - 1u || op_end > op_limit_) return false;
op_ = IncrementalCopy(op_ - offset, op_, op_end, op_limit_);
-
- return true;
- }
+
+ return true;
+ }
inline size_t Produced() const {
assert(op_ >= base_);
return op_ - base_;
}
inline void Flush() {}
-};
-
-bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
- ByteArraySource reader(compressed, n);
- return RawUncompress(&reader, uncompressed);
-}
-
-bool RawUncompress(Source* compressed, char* uncompressed) {
- SnappyArrayWriter output(uncompressed);
+};
+
+bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
+ ByteArraySource reader(compressed, n);
+ return RawUncompress(&reader, uncompressed);
+}
+
+bool RawUncompress(Source* compressed, char* uncompressed) {
+ SnappyArrayWriter output(uncompressed);
return InternalUncompress(compressed, &output);
-}
-
+}
+
bool Uncompress(const char* compressed, size_t n, std::string* uncompressed) {
- size_t ulength;
- if (!GetUncompressedLength(compressed, n, &ulength)) {
- return false;
- }
+ size_t ulength;
+ if (!GetUncompressedLength(compressed, n, &ulength)) {
+ return false;
+ }
// On 32-bit builds: max_size() < kuint32max. Check for that instead
// of crashing (e.g., consider externally specified compressed data).
if (ulength > uncompressed->max_size()) {
- return false;
- }
- STLStringResizeUninitialized(uncompressed, ulength);
- return RawUncompress(compressed, n, string_as_array(uncompressed));
-}
-
+ return false;
+ }
+ STLStringResizeUninitialized(uncompressed, ulength);
+ return RawUncompress(compressed, n, string_as_array(uncompressed));
+}
+
bool Uncompress(const char* compressed, size_t n, TString* uncompressed) {
size_t ulength;
if (!GetUncompressedLength(compressed, n, &ulength)) {
@@ -1371,72 +1371,72 @@ bool Uncompress(const char* compressed, size_t n, TString* uncompressed) {
return RawUncompress(compressed, n, uncompressed->begin());
}
-// A Writer that drops everything on the floor and just does validation
-class SnappyDecompressionValidator {
- private:
- size_t expected_;
- size_t produced_;
-
- public:
+// A Writer that drops everything on the floor and just does validation
+class SnappyDecompressionValidator {
+ private:
+ size_t expected_;
+ size_t produced_;
+
+ public:
inline SnappyDecompressionValidator() : expected_(0), produced_(0) { }
- inline void SetExpectedLength(size_t len) {
- expected_ = len;
- }
- inline bool CheckLength() const {
- return expected_ == produced_;
- }
- inline bool Append(const char* ip, size_t len) {
- produced_ += len;
- return produced_ <= expected_;
- }
- inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
- return false;
- }
- inline bool AppendFromSelf(size_t offset, size_t len) {
+ inline void SetExpectedLength(size_t len) {
+ expected_ = len;
+ }
+ inline bool CheckLength() const {
+ return expected_ == produced_;
+ }
+ inline bool Append(const char* ip, size_t len) {
+ produced_ += len;
+ return produced_ <= expected_;
+ }
+ inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
+ return false;
+ }
+ inline bool AppendFromSelf(size_t offset, size_t len) {
// See SnappyArrayWriter::AppendFromSelf for an explanation of
// the "offset - 1u" trick.
if (produced_ <= offset - 1u) return false;
- produced_ += len;
- return produced_ <= expected_;
- }
+ produced_ += len;
+ return produced_ <= expected_;
+ }
inline void Flush() {}
-};
-
-bool IsValidCompressedBuffer(const char* compressed, size_t n) {
- ByteArraySource reader(compressed, n);
- SnappyDecompressionValidator writer;
+};
+
+bool IsValidCompressedBuffer(const char* compressed, size_t n) {
+ ByteArraySource reader(compressed, n);
+ SnappyDecompressionValidator writer;
return InternalUncompress(&reader, &writer);
-}
-
+}
+
bool IsValidCompressed(Source* compressed) {
SnappyDecompressionValidator writer;
return InternalUncompress(compressed, &writer);
}
-void RawCompress(const char* input,
- size_t input_length,
- char* compressed,
- size_t* compressed_length) {
- ByteArraySource reader(input, input_length);
- UncheckedByteArraySink writer(compressed);
- Compress(&reader, &writer);
-
- // Compute how many bytes were added
- *compressed_length = (writer.CurrentDestination() - compressed);
-}
-
+void RawCompress(const char* input,
+ size_t input_length,
+ char* compressed,
+ size_t* compressed_length) {
+ ByteArraySource reader(input, input_length);
+ UncheckedByteArraySink writer(compressed);
+ Compress(&reader, &writer);
+
+ // Compute how many bytes were added
+ *compressed_length = (writer.CurrentDestination() - compressed);
+}
+
size_t Compress(const char* input, size_t input_length,
std::string* compressed) {
- // Pre-grow the buffer to the max length of the compressed output
+ // Pre-grow the buffer to the max length of the compressed output
STLStringResizeUninitialized(compressed, MaxCompressedLength(input_length));
-
- size_t compressed_length;
- RawCompress(input, input_length, string_as_array(compressed),
- &compressed_length);
- compressed->resize(compressed_length);
- return compressed_length;
-}
-
+
+ size_t compressed_length;
+ RawCompress(input, input_length, string_as_array(compressed),
+ &compressed_length);
+ compressed->resize(compressed_length);
+ return compressed_length;
+}
+
size_t Compress(const char* input, size_t input_length,
TString* compressed) {
// Pre-grow the buffer to the max length of the compressed output
@@ -1452,14 +1452,14 @@ size_t Compress(const char* input, size_t input_length,
// -----------------------------------------------------------------------
// Sink interface
// -----------------------------------------------------------------------
-
+
// A type that decompresses into a Sink. The template parameter
// Allocator must export one method "char* Allocate(int size);", which
// allocates a buffer of "size" and appends that to the destination.
template <typename Allocator>
class SnappyScatteredWriter {
Allocator allocator_;
-
+
// We need random access into the data generated so far. Therefore
// we keep track of all of the generated data as an array of blocks.
// All of the blocks except the last have length kBlockSize.