aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/snappy/snappy-stubs-internal.h
diff options
context:
space:
mode:
authorthegeorg <thegeorg@yandex-team.ru>2022-05-17 12:11:49 +0300
committerthegeorg <thegeorg@yandex-team.ru>2022-05-17 12:11:49 +0300
commit2037874aa0fb0efca88322b14290deab89fccbd4 (patch)
tree8a9d856da3ea564b9e06914a56f7f4dacb0e75f7 /contrib/libs/snappy/snappy-stubs-internal.h
parent7c645e66a7bdae9d6c54d50bf87259c4ffc33e5b (diff)
downloadydb-2037874aa0fb0efca88322b14290deab89fccbd4.tar.gz
Update contrib/libs/snappy to 1.1.9
ref:8e094c2e0f44b866d354257c6a902b6d4394b8f0
Diffstat (limited to 'contrib/libs/snappy/snappy-stubs-internal.h')
-rw-r--r--contrib/libs/snappy/snappy-stubs-internal.h484
1 files changed, 185 insertions, 299 deletions
diff --git a/contrib/libs/snappy/snappy-stubs-internal.h b/contrib/libs/snappy/snappy-stubs-internal.h
index 4854689d17..c2a838f38f 100644
--- a/contrib/libs/snappy/snappy-stubs-internal.h
+++ b/contrib/libs/snappy/snappy-stubs-internal.h
@@ -35,11 +35,13 @@
#include "config.h"
#endif
-#include <string>
+#include <stdint.h>
-#include <assert.h>
-#include <stdlib.h>
-#include <string.h>
+#include <cassert>
+#include <cstdlib>
+#include <cstring>
+#include <limits>
+#include <string>
#ifdef HAVE_SYS_MMAN_H
#include <sys/mman.h>
@@ -67,19 +69,11 @@
#include "snappy-stubs-public.h"
-#if defined(__x86_64__)
-
-// Enable 64-bit optimized versions of some routines.
-#define ARCH_K8 1
-
-#elif defined(__ppc64__)
-
+// Used to enable 64-bit optimized versions of some routines.
+#if defined(__PPC64__) || defined(__powerpc64__)
#define ARCH_PPC 1
-
-#elif defined(__aarch64__)
-
+#elif defined(__aarch64__) || defined(_M_ARM64)
#define ARCH_ARM 1
-
#endif
// Needed by OS X, among others.
@@ -93,7 +87,7 @@
#ifdef ARRAYSIZE
#undef ARRAYSIZE
#endif
-#define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a)))
+#define ARRAYSIZE(a) int{sizeof(a) / sizeof(*(a))}
// Static prediction hints.
#ifdef HAVE_BUILTIN_EXPECT
@@ -104,212 +98,66 @@
#define SNAPPY_PREDICT_TRUE(x) x
#endif
-// This is only used for recomputing the tag byte table used during
-// decompression; for simplicity we just remove it from the open-source
-// version (anyone who wants to regenerate it can just do the call
-// themselves within main()).
-#define DEFINE_bool(flag_name, default_value, description) \
- bool FLAGS_ ## flag_name = default_value
-#define DECLARE_bool(flag_name) \
- extern bool FLAGS_ ## flag_name
-
-namespace snappy {
-
-static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF);
-static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
-
-// Potentially unaligned loads and stores.
-
-// x86, PowerPC, and ARM64 can simply do these loads and stores native.
-
-#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
- defined(__aarch64__)
-
-#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
-#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
-#define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
-
-#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
-#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
-#define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
-
-// ARMv7 and newer support native unaligned accesses, but only of 16-bit
-// and 32-bit values (not 64-bit); older versions either raise a fatal signal,
-// do an unaligned read and rotate the words around a bit, or do the reads very
-// slowly (trip through kernel mode). There's no simple #define that says just
-// “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6
-// sub-architectures.
-//
-// This is a mess, but there's not much we can do about it.
-//
-// To further complicate matters, only LDR instructions (single reads) are
-// allowed to be unaligned, not LDRD (two reads) or LDM (many reads). Unless we
-// explicitly tell the compiler that these accesses can be unaligned, it can and
-// will combine accesses. On armcc, the way to signal this is done by accessing
-// through the type (uint32 __packed *), but GCC has no such attribute
-// (it ignores __attribute__((packed)) on individual variables). However,
-// we can tell it that a _struct_ is unaligned, which has the same effect,
-// so we do that.
-
-#elif defined(__arm__) && \
- !defined(__ARM_ARCH_4__) && \
- !defined(__ARM_ARCH_4T__) && \
- !defined(__ARM_ARCH_5__) && \
- !defined(__ARM_ARCH_5T__) && \
- !defined(__ARM_ARCH_5TE__) && \
- !defined(__ARM_ARCH_5TEJ__) && \
- !defined(__ARM_ARCH_6__) && \
- !defined(__ARM_ARCH_6J__) && \
- !defined(__ARM_ARCH_6K__) && \
- !defined(__ARM_ARCH_6Z__) && \
- !defined(__ARM_ARCH_6ZK__) && \
- !defined(__ARM_ARCH_6T2__)
-
-#if __GNUC__
-#define ATTRIBUTE_PACKED __attribute__((__packed__))
+// Inlining hints.
+#ifdef HAVE_ATTRIBUTE_ALWAYS_INLINE
+#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
#else
-#define ATTRIBUTE_PACKED
+#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE
#endif
-namespace base {
-namespace internal {
-
-struct Unaligned16Struct {
- uint16 value;
- uint8 dummy; // To make the size non-power-of-two.
-} ATTRIBUTE_PACKED;
-
-struct Unaligned32Struct {
- uint32 value;
- uint8 dummy; // To make the size non-power-of-two.
-} ATTRIBUTE_PACKED;
-
-} // namespace internal
-} // namespace base
-
-#define UNALIGNED_LOAD16(_p) \
- ((reinterpret_cast<const ::snappy::base::internal::Unaligned16Struct *>(_p))->value)
-#define UNALIGNED_LOAD32(_p) \
- ((reinterpret_cast<const ::snappy::base::internal::Unaligned32Struct *>(_p))->value)
-
-#define UNALIGNED_STORE16(_p, _val) \
- ((reinterpret_cast< ::snappy::base::internal::Unaligned16Struct *>(_p))->value = \
- (_val))
-#define UNALIGNED_STORE32(_p, _val) \
- ((reinterpret_cast< ::snappy::base::internal::Unaligned32Struct *>(_p))->value = \
- (_val))
-
-// TODO: NEON supports unaligned 64-bit loads and stores.
-// See if that would be more efficient on platforms supporting it,
-// at least for copies.
-
-inline uint64 UNALIGNED_LOAD64(const void *p) {
- uint64 t;
- memcpy(&t, p, sizeof t);
- return t;
-}
-
-inline void UNALIGNED_STORE64(void *p, uint64 v) {
- memcpy(p, &v, sizeof v);
-}
+// Stubbed version of ABSL_FLAG.
+//
+// In the open source version, flags can only be changed at compile time.
+#define SNAPPY_FLAG(flag_type, flag_name, default_value, help) \
+ flag_type FLAGS_ ## flag_name = default_value
-#else
+namespace snappy {
-// These functions are provided for architectures that don't support
-// unaligned loads and stores.
+// Stubbed version of absl::GetFlag().
+template <typename T>
+inline T GetFlag(T flag) { return flag; }
-inline uint16 UNALIGNED_LOAD16(const void *p) {
- uint16 t;
- memcpy(&t, p, sizeof t);
- return t;
-}
+static const uint32_t kuint32max = std::numeric_limits<uint32_t>::max();
+static const int64_t kint64max = std::numeric_limits<int64_t>::max();
-inline uint32 UNALIGNED_LOAD32(const void *p) {
- uint32 t;
- memcpy(&t, p, sizeof t);
- return t;
-}
-
-inline uint64 UNALIGNED_LOAD64(const void *p) {
- uint64 t;
- memcpy(&t, p, sizeof t);
- return t;
-}
+// Potentially unaligned loads and stores.
-inline void UNALIGNED_STORE16(void *p, uint16 v) {
- memcpy(p, &v, sizeof v);
+inline uint16_t UNALIGNED_LOAD16(const void *p) {
+ // Compiles to a single movzx/ldrh on clang/gcc/msvc.
+ uint16_t v;
+ std::memcpy(&v, p, sizeof(v));
+ return v;
}
-inline void UNALIGNED_STORE32(void *p, uint32 v) {
- memcpy(p, &v, sizeof v);
+inline uint32_t UNALIGNED_LOAD32(const void *p) {
+ // Compiles to a single mov/ldr on clang/gcc/msvc.
+ uint32_t v;
+ std::memcpy(&v, p, sizeof(v));
+ return v;
}
-inline void UNALIGNED_STORE64(void *p, uint64 v) {
- memcpy(p, &v, sizeof v);
+inline uint64_t UNALIGNED_LOAD64(const void *p) {
+ // Compiles to a single mov/ldr on clang/gcc/msvc.
+ uint64_t v;
+ std::memcpy(&v, p, sizeof(v));
+ return v;
}
-#endif
-
-// The following guarantees declaration of the byte swap functions.
-#if defined(SNAPPY_IS_BIG_ENDIAN)
-
-#ifdef HAVE_SYS_BYTEORDER_H
-#include <sys/byteorder.h>
-#endif
-
-#ifdef HAVE_SYS_ENDIAN_H
-#include <sys/endian.h>
-#endif
-
-#ifdef _MSC_VER
-#include <stdlib.h>
-#define bswap_16(x) _byteswap_ushort(x)
-#define bswap_32(x) _byteswap_ulong(x)
-#define bswap_64(x) _byteswap_uint64(x)
-
-#elif defined(__APPLE__)
-// Mac OS X / Darwin features
-#include <libkern/OSByteOrder.h>
-#define bswap_16(x) OSSwapInt16(x)
-#define bswap_32(x) OSSwapInt32(x)
-#define bswap_64(x) OSSwapInt64(x)
-
-#elif defined(HAVE_BYTESWAP_H)
-#include <byteswap.h>
-
-#elif defined(bswap32)
-// FreeBSD defines bswap{16,32,64} in <sys/endian.h> (already #included).
-#define bswap_16(x) bswap16(x)
-#define bswap_32(x) bswap32(x)
-#define bswap_64(x) bswap64(x)
-
-#elif defined(BSWAP_64)
-// Solaris 10 defines BSWAP_{16,32,64} in <sys/byteorder.h> (already #included).
-#define bswap_16(x) BSWAP_16(x)
-#define bswap_32(x) BSWAP_32(x)
-#define bswap_64(x) BSWAP_64(x)
-
-#else
-
-inline uint16 bswap_16(uint16 x) {
- return (x << 8) | (x >> 8);
+inline void UNALIGNED_STORE16(void *p, uint16_t v) {
+ // Compiles to a single mov/strh on clang/gcc/msvc.
+ std::memcpy(p, &v, sizeof(v));
}
-inline uint32 bswap_32(uint32 x) {
- x = ((x & 0xff00ff00UL) >> 8) | ((x & 0x00ff00ffUL) << 8);
- return (x >> 16) | (x << 16);
+inline void UNALIGNED_STORE32(void *p, uint32_t v) {
+ // Compiles to a single mov/str on clang/gcc/msvc.
+ std::memcpy(p, &v, sizeof(v));
}
-inline uint64 bswap_64(uint64 x) {
- x = ((x & 0xff00ff00ff00ff00ULL) >> 8) | ((x & 0x00ff00ff00ff00ffULL) << 8);
- x = ((x & 0xffff0000ffff0000ULL) >> 16) | ((x & 0x0000ffff0000ffffULL) << 16);
- return (x >> 32) | (x << 32);
+inline void UNALIGNED_STORE64(void *p, uint64_t v) {
+ // Compiles to a single mov/str on clang/gcc/msvc.
+ std::memcpy(p, &v, sizeof(v));
}
-#endif
-
-#endif // defined(SNAPPY_IS_BIG_ENDIAN)
-
// Convert to little-endian storage, opposite of network format.
// Convert x from host to little endian: x = LittleEndian.FromHost(x);
// convert x from little endian to host: x = LittleEndian.ToHost(x);
@@ -321,44 +169,77 @@ inline uint64 bswap_64(uint64 x) {
// x = LittleEndian.Load16(p);
class LittleEndian {
public:
- // Conversion functions.
-#if defined(SNAPPY_IS_BIG_ENDIAN)
-
- static uint16 FromHost16(uint16 x) { return bswap_16(x); }
- static uint16 ToHost16(uint16 x) { return bswap_16(x); }
-
- static uint32 FromHost32(uint32 x) { return bswap_32(x); }
- static uint32 ToHost32(uint32 x) { return bswap_32(x); }
-
- static bool IsLittleEndian() { return false; }
+ // Functions to do unaligned loads and stores in little-endian order.
+ static inline uint16_t Load16(const void *ptr) {
+ const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
-#else // !defined(SNAPPY_IS_BIG_ENDIAN)
+ // Compiles to a single mov/str on recent clang and gcc.
+ return (static_cast<uint16_t>(buffer[0])) |
+ (static_cast<uint16_t>(buffer[1]) << 8);
+ }
- static uint16 FromHost16(uint16 x) { return x; }
- static uint16 ToHost16(uint16 x) { return x; }
+ static inline uint32_t Load32(const void *ptr) {
+ const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
- static uint32 FromHost32(uint32 x) { return x; }
- static uint32 ToHost32(uint32 x) { return x; }
+ // Compiles to a single mov/str on recent clang and gcc.
+ return (static_cast<uint32_t>(buffer[0])) |
+ (static_cast<uint32_t>(buffer[1]) << 8) |
+ (static_cast<uint32_t>(buffer[2]) << 16) |
+ (static_cast<uint32_t>(buffer[3]) << 24);
+ }
- static bool IsLittleEndian() { return true; }
+ static inline uint64_t Load64(const void *ptr) {
+ const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
+
+ // Compiles to a single mov/str on recent clang and gcc.
+ return (static_cast<uint64_t>(buffer[0])) |
+ (static_cast<uint64_t>(buffer[1]) << 8) |
+ (static_cast<uint64_t>(buffer[2]) << 16) |
+ (static_cast<uint64_t>(buffer[3]) << 24) |
+ (static_cast<uint64_t>(buffer[4]) << 32) |
+ (static_cast<uint64_t>(buffer[5]) << 40) |
+ (static_cast<uint64_t>(buffer[6]) << 48) |
+ (static_cast<uint64_t>(buffer[7]) << 56);
+ }
-#endif // !defined(SNAPPY_IS_BIG_ENDIAN)
+ static inline void Store16(void *dst, uint16_t value) {
+ uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
- // Functions to do unaligned loads and stores in little-endian order.
- static uint16 Load16(const void *p) {
- return ToHost16(UNALIGNED_LOAD16(p));
+ // Compiles to a single mov/str on recent clang and gcc.
+ buffer[0] = static_cast<uint8_t>(value);
+ buffer[1] = static_cast<uint8_t>(value >> 8);
}
- static void Store16(void *p, uint16 v) {
- UNALIGNED_STORE16(p, FromHost16(v));
+ static void Store32(void *dst, uint32_t value) {
+ uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
+
+ // Compiles to a single mov/str on recent clang and gcc.
+ buffer[0] = static_cast<uint8_t>(value);
+ buffer[1] = static_cast<uint8_t>(value >> 8);
+ buffer[2] = static_cast<uint8_t>(value >> 16);
+ buffer[3] = static_cast<uint8_t>(value >> 24);
}
- static uint32 Load32(const void *p) {
- return ToHost32(UNALIGNED_LOAD32(p));
+ static void Store64(void* dst, uint64_t value) {
+ uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
+
+ // Compiles to a single mov/str on recent clang and gcc.
+ buffer[0] = static_cast<uint8_t>(value);
+ buffer[1] = static_cast<uint8_t>(value >> 8);
+ buffer[2] = static_cast<uint8_t>(value >> 16);
+ buffer[3] = static_cast<uint8_t>(value >> 24);
+ buffer[4] = static_cast<uint8_t>(value >> 32);
+ buffer[5] = static_cast<uint8_t>(value >> 40);
+ buffer[6] = static_cast<uint8_t>(value >> 48);
+ buffer[7] = static_cast<uint8_t>(value >> 56);
}
- static void Store32(void *p, uint32 v) {
- UNALIGNED_STORE32(p, FromHost32(v));
+ static inline constexpr bool IsLittleEndian() {
+#if defined(SNAPPY_IS_BIG_ENDIAN)
+ return false;
+#else
+ return true;
+#endif // defined(SNAPPY_IS_BIG_ENDIAN)
}
};
@@ -366,19 +247,17 @@ class LittleEndian {
class Bits {
public:
// Return floor(log2(n)) for positive integer n.
- static int Log2FloorNonZero(uint32 n);
+ static int Log2FloorNonZero(uint32_t n);
// Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0.
- static int Log2Floor(uint32 n);
+ static int Log2Floor(uint32_t n);
// Return the first set least / most significant bit, 0-indexed. Returns an
// undefined value if n == 0. FindLSBSetNonZero() is similar to ffs() except
// that it's 0-indexed.
- static int FindLSBSetNonZero(uint32 n);
+ static int FindLSBSetNonZero(uint32_t n);
-#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
- static int FindLSBSetNonZero64(uint64 n);
-#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+ static int FindLSBSetNonZero64(uint64_t n);
private:
// No copying
@@ -386,9 +265,9 @@ class Bits {
void operator=(const Bits&);
};
-#ifdef HAVE_BUILTIN_CTZ
+#if defined(HAVE_BUILTIN_CTZ)
-inline int Bits::Log2FloorNonZero(uint32 n) {
+inline int Bits::Log2FloorNonZero(uint32_t n) {
assert(n != 0);
// (31 ^ x) is equivalent to (31 - x) for x in [0, 31]. An easy proof
// represents subtraction in base 2 and observes that there's no carry.
@@ -399,66 +278,52 @@ inline int Bits::Log2FloorNonZero(uint32 n) {
return 31 ^ __builtin_clz(n);
}
-inline int Bits::Log2Floor(uint32 n) {
+inline int Bits::Log2Floor(uint32_t n) {
return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
}
-inline int Bits::FindLSBSetNonZero(uint32 n) {
+inline int Bits::FindLSBSetNonZero(uint32_t n) {
assert(n != 0);
return __builtin_ctz(n);
}
-#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
-inline int Bits::FindLSBSetNonZero64(uint64 n) {
- assert(n != 0);
- return __builtin_ctzll(n);
-}
-#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
-
#elif defined(_MSC_VER)
-inline int Bits::Log2FloorNonZero(uint32 n) {
+inline int Bits::Log2FloorNonZero(uint32_t n) {
assert(n != 0);
+ // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
unsigned long where;
_BitScanReverse(&where, n);
return static_cast<int>(where);
}
-inline int Bits::Log2Floor(uint32 n) {
+inline int Bits::Log2Floor(uint32_t n) {
+ // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
unsigned long where;
if (_BitScanReverse(&where, n))
return static_cast<int>(where);
return -1;
}
-inline int Bits::FindLSBSetNonZero(uint32 n) {
+inline int Bits::FindLSBSetNonZero(uint32_t n) {
assert(n != 0);
+ // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
unsigned long where;
if (_BitScanForward(&where, n))
return static_cast<int>(where);
return 32;
}
-#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
-inline int Bits::FindLSBSetNonZero64(uint64 n) {
- assert(n != 0);
- unsigned long where;
- if (_BitScanForward64(&where, n))
- return static_cast<int>(where);
- return 64;
-}
-#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
-
#else // Portable versions.
-inline int Bits::Log2FloorNonZero(uint32 n) {
+inline int Bits::Log2FloorNonZero(uint32_t n) {
assert(n != 0);
int log = 0;
- uint32 value = n;
+ uint32_t value = n;
for (int i = 4; i >= 0; --i) {
int shift = (1 << i);
- uint32 x = value >> shift;
+ uint32_t x = value >> shift;
if (x != 0) {
value = x;
log += shift;
@@ -468,16 +333,16 @@ inline int Bits::Log2FloorNonZero(uint32 n) {
return log;
}
-inline int Bits::Log2Floor(uint32 n) {
+inline int Bits::Log2Floor(uint32_t n) {
return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
}
-inline int Bits::FindLSBSetNonZero(uint32 n) {
+inline int Bits::FindLSBSetNonZero(uint32_t n) {
assert(n != 0);
int rc = 31;
for (int i = 4, shift = 1 << 4; i >= 0; --i) {
- const uint32 x = n << shift;
+ const uint32_t x = n << shift;
if (x != 0) {
n = x;
rc -= shift;
@@ -487,27 +352,48 @@ inline int Bits::FindLSBSetNonZero(uint32 n) {
return rc;
}
-#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+#endif // End portable versions.
+
+#if defined(HAVE_BUILTIN_CTZ)
+
+inline int Bits::FindLSBSetNonZero64(uint64_t n) {
+ assert(n != 0);
+ return __builtin_ctzll(n);
+}
+
+#elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64))
+// _BitScanForward64() is only available on x64 and ARM64.
+
+inline int Bits::FindLSBSetNonZero64(uint64_t n) {
+ assert(n != 0);
+ // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
+ unsigned long where;
+ if (_BitScanForward64(&where, n))
+ return static_cast<int>(where);
+ return 64;
+}
+
+#else // Portable version.
+
// FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
-inline int Bits::FindLSBSetNonZero64(uint64 n) {
+inline int Bits::FindLSBSetNonZero64(uint64_t n) {
assert(n != 0);
- const uint32 bottombits = static_cast<uint32>(n);
+ const uint32_t bottombits = static_cast<uint32_t>(n);
if (bottombits == 0) {
- // Bottom bits are zero, so scan in top bits
- return 32 + FindLSBSetNonZero(static_cast<uint32>(n >> 32));
+ // Bottom bits are zero, so scan the top bits.
+ return 32 + FindLSBSetNonZero(static_cast<uint32_t>(n >> 32));
} else {
return FindLSBSetNonZero(bottombits);
}
}
-#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
-#endif // End portable versions.
+#endif // End portable version.
// Variable-length integer encoding.
class Varint {
public:
- // Maximum lengths of varint encoding of uint32.
+ // Maximum lengths of varint encoding of uint32_t.
static const int kMax32 = 5;
// Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1].
@@ -516,23 +402,23 @@ class Varint {
// past the last byte of the varint32. Else returns NULL. On success,
// "result <= limit".
static const char* Parse32WithLimit(const char* ptr, const char* limit,
- uint32* OUTPUT);
+ uint32_t* OUTPUT);
// REQUIRES "ptr" points to a buffer of length sufficient to hold "v".
// EFFECTS Encodes "v" into "ptr" and returns a pointer to the
// byte just past the last encoded byte.
- static char* Encode32(char* ptr, uint32 v);
+ static char* Encode32(char* ptr, uint32_t v);
// EFFECTS Appends the varint representation of "value" to "*s".
- static void Append32(std::string* s, uint32 value);
+ static void Append32(std::string* s, uint32_t value);
};
inline const char* Varint::Parse32WithLimit(const char* p,
const char* l,
- uint32* OUTPUT) {
+ uint32_t* OUTPUT) {
const unsigned char* ptr = reinterpret_cast<const unsigned char*>(p);
const unsigned char* limit = reinterpret_cast<const unsigned char*>(l);
- uint32 b, result;
+ uint32_t b, result;
if (ptr >= limit) return NULL;
b = *(ptr++); result = b & 127; if (b < 128) goto done;
if (ptr >= limit) return NULL;
@@ -549,30 +435,30 @@ inline const char* Varint::Parse32WithLimit(const char* p,
return reinterpret_cast<const char*>(ptr);
}
-inline char* Varint::Encode32(char* sptr, uint32 v) {
+inline char* Varint::Encode32(char* sptr, uint32_t v) {
// Operate on characters as unsigneds
- unsigned char* ptr = reinterpret_cast<unsigned char*>(sptr);
- static const int B = 128;
- if (v < (1<<7)) {
- *(ptr++) = v;
- } else if (v < (1<<14)) {
- *(ptr++) = v | B;
- *(ptr++) = v>>7;
- } else if (v < (1<<21)) {
- *(ptr++) = v | B;
- *(ptr++) = (v>>7) | B;
- *(ptr++) = v>>14;
- } else if (v < (1<<28)) {
- *(ptr++) = v | B;
- *(ptr++) = (v>>7) | B;
- *(ptr++) = (v>>14) | B;
- *(ptr++) = v>>21;
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(sptr);
+ static const uint8_t B = 128;
+ if (v < (1 << 7)) {
+ *(ptr++) = static_cast<uint8_t>(v);
+ } else if (v < (1 << 14)) {
+ *(ptr++) = static_cast<uint8_t>(v | B);
+ *(ptr++) = static_cast<uint8_t>(v >> 7);
+ } else if (v < (1 << 21)) {
+ *(ptr++) = static_cast<uint8_t>(v | B);
+ *(ptr++) = static_cast<uint8_t>((v >> 7) | B);
+ *(ptr++) = static_cast<uint8_t>(v >> 14);
+ } else if (v < (1 << 28)) {
+ *(ptr++) = static_cast<uint8_t>(v | B);
+ *(ptr++) = static_cast<uint8_t>((v >> 7) | B);
+ *(ptr++) = static_cast<uint8_t>((v >> 14) | B);
+ *(ptr++) = static_cast<uint8_t>(v >> 21);
} else {
- *(ptr++) = v | B;
- *(ptr++) = (v>>7) | B;
- *(ptr++) = (v>>14) | B;
- *(ptr++) = (v>>21) | B;
- *(ptr++) = v>>28;
+ *(ptr++) = static_cast<uint8_t>(v | B);
+ *(ptr++) = static_cast<uint8_t>((v>>7) | B);
+ *(ptr++) = static_cast<uint8_t>((v>>14) | B);
+ *(ptr++) = static_cast<uint8_t>((v>>21) | B);
+ *(ptr++) = static_cast<uint8_t>(v >> 28);
}
return reinterpret_cast<char*>(ptr);
}