aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/snappy/snappy-stubs-internal.h
diff options
context:
space:
mode:
authorthegeorg <thegeorg@yandex-team.ru>2022-02-10 16:45:12 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:45:12 +0300
commit49116032d905455a7b1c994e4a696afc885c1e71 (patch)
treebe835aa92c6248212e705f25388ebafcf84bc7a1 /contrib/libs/snappy/snappy-stubs-internal.h
parent4e839db24a3bbc9f1c610c43d6faaaa99824dcca (diff)
downloadydb-49116032d905455a7b1c994e4a696afc885c1e71.tar.gz
Restoring authorship annotation for <thegeorg@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/libs/snappy/snappy-stubs-internal.h')
-rw-r--r--contrib/libs/snappy/snappy-stubs-internal.h500
1 files changed, 250 insertions, 250 deletions
diff --git a/contrib/libs/snappy/snappy-stubs-internal.h b/contrib/libs/snappy/snappy-stubs-internal.h
index 128553b328..4854689d17 100644
--- a/contrib/libs/snappy/snappy-stubs-internal.h
+++ b/contrib/libs/snappy/snappy-stubs-internal.h
@@ -28,43 +28,43 @@
//
// Various stubs for the open-source version of Snappy.
-#ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
-#define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
+#ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
+#define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
-#ifdef HAVE_CONFIG_H
-#include "config.h"
+#ifdef HAVE_CONFIG_H
+#include "config.h"
#endif
-#include <string>
+#include <string>
#include <assert.h>
#include <stdlib.h>
#include <string.h>
-#ifdef HAVE_SYS_MMAN_H
-#include <sys/mman.h>
-#endif
-
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-
-#if defined(_MSC_VER)
-#include <intrin.h>
-#endif // defined(_MSC_VER)
-
-#ifndef __has_feature
-#define __has_feature(x) 0
-#endif
-
-#if __has_feature(memory_sanitizer)
-#include <sanitizer/msan_interface.h>
-#define SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
- __msan_unpoison((address), (size))
-#else
-#define SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) /* empty */
-#endif // __has_feature(memory_sanitizer)
-
+#ifdef HAVE_SYS_MMAN_H
+#include <sys/mman.h>
+#endif
+
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#if defined(_MSC_VER)
+#include <intrin.h>
+#endif // defined(_MSC_VER)
+
+#ifndef __has_feature
+#define __has_feature(x) 0
+#endif
+
+#if __has_feature(memory_sanitizer)
+#include <sanitizer/msan_interface.h>
+#define SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
+ __msan_unpoison((address), (size))
+#else
+#define SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) /* empty */
+#endif // __has_feature(memory_sanitizer)
+
#include "snappy-stubs-public.h"
#if defined(__x86_64__)
@@ -72,14 +72,14 @@
// Enable 64-bit optimized versions of some routines.
#define ARCH_K8 1
-#elif defined(__ppc64__)
-
-#define ARCH_PPC 1
-
-#elif defined(__aarch64__)
-
-#define ARCH_ARM 1
-
+#elif defined(__ppc64__)
+
+#define ARCH_PPC 1
+
+#elif defined(__aarch64__)
+
+#define ARCH_ARM 1
+
#endif
// Needed by OS X, among others.
@@ -95,14 +95,14 @@
#endif
#define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a)))
-// Static prediction hints.
-#ifdef HAVE_BUILTIN_EXPECT
-#define SNAPPY_PREDICT_FALSE(x) (__builtin_expect(x, 0))
-#define SNAPPY_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
-#else
-#define SNAPPY_PREDICT_FALSE(x) x
-#define SNAPPY_PREDICT_TRUE(x) x
-#endif
+// Static prediction hints.
+#ifdef HAVE_BUILTIN_EXPECT
+#define SNAPPY_PREDICT_FALSE(x) (__builtin_expect(x, 0))
+#define SNAPPY_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
+#else
+#define SNAPPY_PREDICT_FALSE(x) x
+#define SNAPPY_PREDICT_TRUE(x) x
+#endif
// This is only used for recomputing the tag byte table used during
// decompression; for simplicity we just remove it from the open-source
@@ -120,10 +120,10 @@ static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
// Potentially unaligned loads and stores.
-// x86, PowerPC, and ARM64 can simply do these loads and stores native.
+// x86, PowerPC, and ARM64 can simply do these loads and stores native.
-#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
- defined(__aarch64__)
+#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
+ defined(__aarch64__)
#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
@@ -141,19 +141,19 @@ static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
// sub-architectures.
//
// This is a mess, but there's not much we can do about it.
-//
-// To further complicate matters, only LDR instructions (single reads) are
-// allowed to be unaligned, not LDRD (two reads) or LDM (many reads). Unless we
-// explicitly tell the compiler that these accesses can be unaligned, it can and
-// will combine accesses. On armcc, the way to signal this is done by accessing
-// through the type (uint32 __packed *), but GCC has no such attribute
-// (it ignores __attribute__((packed)) on individual variables). However,
-// we can tell it that a _struct_ is unaligned, which has the same effect,
-// so we do that.
+//
+// To further complicate matters, only LDR instructions (single reads) are
+// allowed to be unaligned, not LDRD (two reads) or LDM (many reads). Unless we
+// explicitly tell the compiler that these accesses can be unaligned, it can and
+// will combine accesses. On armcc, the way to signal this is done by accessing
+// through the type (uint32 __packed *), but GCC has no such attribute
+// (it ignores __attribute__((packed)) on individual variables). However,
+// we can tell it that a _struct_ is unaligned, which has the same effect,
+// so we do that.
#elif defined(__arm__) && \
- !defined(__ARM_ARCH_4__) && \
- !defined(__ARM_ARCH_4T__) && \
+ !defined(__ARM_ARCH_4__) && \
+ !defined(__ARM_ARCH_4T__) && \
!defined(__ARM_ARCH_5__) && \
!defined(__ARM_ARCH_5T__) && \
!defined(__ARM_ARCH_5TE__) && \
@@ -165,41 +165,41 @@ static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
!defined(__ARM_ARCH_6ZK__) && \
!defined(__ARM_ARCH_6T2__)
-#if __GNUC__
-#define ATTRIBUTE_PACKED __attribute__((__packed__))
-#else
-#define ATTRIBUTE_PACKED
-#endif
-
-namespace base {
-namespace internal {
-
-struct Unaligned16Struct {
- uint16 value;
- uint8 dummy; // To make the size non-power-of-two.
-} ATTRIBUTE_PACKED;
-
-struct Unaligned32Struct {
- uint32 value;
- uint8 dummy; // To make the size non-power-of-two.
-} ATTRIBUTE_PACKED;
-
-} // namespace internal
-} // namespace base
-
-#define UNALIGNED_LOAD16(_p) \
- ((reinterpret_cast<const ::snappy::base::internal::Unaligned16Struct *>(_p))->value)
-#define UNALIGNED_LOAD32(_p) \
- ((reinterpret_cast<const ::snappy::base::internal::Unaligned32Struct *>(_p))->value)
-
-#define UNALIGNED_STORE16(_p, _val) \
- ((reinterpret_cast< ::snappy::base::internal::Unaligned16Struct *>(_p))->value = \
- (_val))
-#define UNALIGNED_STORE32(_p, _val) \
- ((reinterpret_cast< ::snappy::base::internal::Unaligned32Struct *>(_p))->value = \
- (_val))
-
-// TODO: NEON supports unaligned 64-bit loads and stores.
+#if __GNUC__
+#define ATTRIBUTE_PACKED __attribute__((__packed__))
+#else
+#define ATTRIBUTE_PACKED
+#endif
+
+namespace base {
+namespace internal {
+
+struct Unaligned16Struct {
+ uint16 value;
+ uint8 dummy; // To make the size non-power-of-two.
+} ATTRIBUTE_PACKED;
+
+struct Unaligned32Struct {
+ uint32 value;
+ uint8 dummy; // To make the size non-power-of-two.
+} ATTRIBUTE_PACKED;
+
+} // namespace internal
+} // namespace base
+
+#define UNALIGNED_LOAD16(_p) \
+ ((reinterpret_cast<const ::snappy::base::internal::Unaligned16Struct *>(_p))->value)
+#define UNALIGNED_LOAD32(_p) \
+ ((reinterpret_cast<const ::snappy::base::internal::Unaligned32Struct *>(_p))->value)
+
+#define UNALIGNED_STORE16(_p, _val) \
+ ((reinterpret_cast< ::snappy::base::internal::Unaligned16Struct *>(_p))->value = \
+ (_val))
+#define UNALIGNED_STORE32(_p, _val) \
+ ((reinterpret_cast< ::snappy::base::internal::Unaligned32Struct *>(_p))->value = \
+ (_val))
+
+// TODO: NEON supports unaligned 64-bit loads and stores.
// See if that would be more efficient on platforms supporting it,
// at least for copies.
@@ -250,66 +250,66 @@ inline void UNALIGNED_STORE64(void *p, uint64 v) {
#endif
-// The following guarantees declaration of the byte swap functions.
-#if defined(SNAPPY_IS_BIG_ENDIAN)
-
-#ifdef HAVE_SYS_BYTEORDER_H
-#include <sys/byteorder.h>
-#endif
-
-#ifdef HAVE_SYS_ENDIAN_H
-#include <sys/endian.h>
-#endif
-
-#ifdef _MSC_VER
-#include <stdlib.h>
-#define bswap_16(x) _byteswap_ushort(x)
-#define bswap_32(x) _byteswap_ulong(x)
-#define bswap_64(x) _byteswap_uint64(x)
-
-#elif defined(__APPLE__)
-// Mac OS X / Darwin features
-#include <libkern/OSByteOrder.h>
-#define bswap_16(x) OSSwapInt16(x)
-#define bswap_32(x) OSSwapInt32(x)
-#define bswap_64(x) OSSwapInt64(x)
-
-#elif defined(HAVE_BYTESWAP_H)
-#include <byteswap.h>
-
-#elif defined(bswap32)
-// FreeBSD defines bswap{16,32,64} in <sys/endian.h> (already #included).
-#define bswap_16(x) bswap16(x)
-#define bswap_32(x) bswap32(x)
-#define bswap_64(x) bswap64(x)
-
-#elif defined(BSWAP_64)
-// Solaris 10 defines BSWAP_{16,32,64} in <sys/byteorder.h> (already #included).
-#define bswap_16(x) BSWAP_16(x)
-#define bswap_32(x) BSWAP_32(x)
-#define bswap_64(x) BSWAP_64(x)
-
-#else
-
-inline uint16 bswap_16(uint16 x) {
- return (x << 8) | (x >> 8);
+// The following guarantees declaration of the byte swap functions.
+#if defined(SNAPPY_IS_BIG_ENDIAN)
+
+#ifdef HAVE_SYS_BYTEORDER_H
+#include <sys/byteorder.h>
+#endif
+
+#ifdef HAVE_SYS_ENDIAN_H
+#include <sys/endian.h>
+#endif
+
+#ifdef _MSC_VER
+#include <stdlib.h>
+#define bswap_16(x) _byteswap_ushort(x)
+#define bswap_32(x) _byteswap_ulong(x)
+#define bswap_64(x) _byteswap_uint64(x)
+
+#elif defined(__APPLE__)
+// Mac OS X / Darwin features
+#include <libkern/OSByteOrder.h>
+#define bswap_16(x) OSSwapInt16(x)
+#define bswap_32(x) OSSwapInt32(x)
+#define bswap_64(x) OSSwapInt64(x)
+
+#elif defined(HAVE_BYTESWAP_H)
+#include <byteswap.h>
+
+#elif defined(bswap32)
+// FreeBSD defines bswap{16,32,64} in <sys/endian.h> (already #included).
+#define bswap_16(x) bswap16(x)
+#define bswap_32(x) bswap32(x)
+#define bswap_64(x) bswap64(x)
+
+#elif defined(BSWAP_64)
+// Solaris 10 defines BSWAP_{16,32,64} in <sys/byteorder.h> (already #included).
+#define bswap_16(x) BSWAP_16(x)
+#define bswap_32(x) BSWAP_32(x)
+#define bswap_64(x) BSWAP_64(x)
+
+#else
+
+inline uint16 bswap_16(uint16 x) {
+ return (x << 8) | (x >> 8);
+}
+
+inline uint32 bswap_32(uint32 x) {
+ x = ((x & 0xff00ff00UL) >> 8) | ((x & 0x00ff00ffUL) << 8);
+ return (x >> 16) | (x << 16);
+}
+
+inline uint64 bswap_64(uint64 x) {
+ x = ((x & 0xff00ff00ff00ff00ULL) >> 8) | ((x & 0x00ff00ff00ff00ffULL) << 8);
+ x = ((x & 0xffff0000ffff0000ULL) >> 16) | ((x & 0x0000ffff0000ffffULL) << 16);
+ return (x >> 32) | (x << 32);
}
-inline uint32 bswap_32(uint32 x) {
- x = ((x & 0xff00ff00UL) >> 8) | ((x & 0x00ff00ffUL) << 8);
- return (x >> 16) | (x << 16);
-}
-
-inline uint64 bswap_64(uint64 x) {
- x = ((x & 0xff00ff00ff00ff00ULL) >> 8) | ((x & 0x00ff00ff00ff00ffULL) << 8);
- x = ((x & 0xffff0000ffff0000ULL) >> 16) | ((x & 0x0000ffff0000ffffULL) << 16);
- return (x >> 32) | (x << 32);
-}
-
-#endif
-
-#endif // defined(SNAPPY_IS_BIG_ENDIAN)
-
+#endif
+
+#endif // defined(SNAPPY_IS_BIG_ENDIAN)
+
// Convert to little-endian storage, opposite of network format.
// Convert x from host to little endian: x = LittleEndian.FromHost(x);
// convert x from little endian to host: x = LittleEndian.ToHost(x);
@@ -322,28 +322,28 @@ inline uint64 bswap_64(uint64 x) {
class LittleEndian {
public:
// Conversion functions.
-#if defined(SNAPPY_IS_BIG_ENDIAN)
+#if defined(SNAPPY_IS_BIG_ENDIAN)
+
+ static uint16 FromHost16(uint16 x) { return bswap_16(x); }
+ static uint16 ToHost16(uint16 x) { return bswap_16(x); }
- static uint16 FromHost16(uint16 x) { return bswap_16(x); }
- static uint16 ToHost16(uint16 x) { return bswap_16(x); }
+ static uint32 FromHost32(uint32 x) { return bswap_32(x); }
+ static uint32 ToHost32(uint32 x) { return bswap_32(x); }
- static uint32 FromHost32(uint32 x) { return bswap_32(x); }
- static uint32 ToHost32(uint32 x) { return bswap_32(x); }
-
static bool IsLittleEndian() { return false; }
-#else // !defined(SNAPPY_IS_BIG_ENDIAN)
-
- static uint16 FromHost16(uint16 x) { return x; }
- static uint16 ToHost16(uint16 x) { return x; }
-
- static uint32 FromHost32(uint32 x) { return x; }
- static uint32 ToHost32(uint32 x) { return x; }
-
- static bool IsLittleEndian() { return true; }
-
-#endif // !defined(SNAPPY_IS_BIG_ENDIAN)
-
+#else // !defined(SNAPPY_IS_BIG_ENDIAN)
+
+ static uint16 FromHost16(uint16 x) { return x; }
+ static uint16 ToHost16(uint16 x) { return x; }
+
+ static uint32 FromHost32(uint32 x) { return x; }
+ static uint32 ToHost32(uint32 x) { return x; }
+
+ static bool IsLittleEndian() { return true; }
+
+#endif // !defined(SNAPPY_IS_BIG_ENDIAN)
+
// Functions to do unaligned loads and stores in little-endian order.
static uint16 Load16(const void *p) {
return ToHost16(UNALIGNED_LOAD16(p));
@@ -365,9 +365,9 @@ class LittleEndian {
// Some bit-manipulation functions.
class Bits {
public:
- // Return floor(log2(n)) for positive integer n.
- static int Log2FloorNonZero(uint32 n);
-
+ // Return floor(log2(n)) for positive integer n.
+ static int Log2FloorNonZero(uint32 n);
+
// Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0.
static int Log2Floor(uint32 n);
@@ -375,85 +375,85 @@ class Bits {
// undefined value if n == 0. FindLSBSetNonZero() is similar to ffs() except
// that it's 0-indexed.
static int FindLSBSetNonZero(uint32 n);
-
-#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+
+#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
static int FindLSBSetNonZero64(uint64 n);
-#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
private:
- // No copying
- Bits(const Bits&);
- void operator=(const Bits&);
+ // No copying
+ Bits(const Bits&);
+ void operator=(const Bits&);
};
#ifdef HAVE_BUILTIN_CTZ
-inline int Bits::Log2FloorNonZero(uint32 n) {
- assert(n != 0);
- // (31 ^ x) is equivalent to (31 - x) for x in [0, 31]. An easy proof
- // represents subtraction in base 2 and observes that there's no carry.
- //
- // GCC and Clang represent __builtin_clz on x86 as 31 ^ _bit_scan_reverse(x).
- // Using "31 ^" here instead of "31 -" allows the optimizer to strip the
- // function body down to _bit_scan_reverse(x).
- return 31 ^ __builtin_clz(n);
-}
-
+inline int Bits::Log2FloorNonZero(uint32 n) {
+ assert(n != 0);
+ // (31 ^ x) is equivalent to (31 - x) for x in [0, 31]. An easy proof
+ // represents subtraction in base 2 and observes that there's no carry.
+ //
+ // GCC and Clang represent __builtin_clz on x86 as 31 ^ _bit_scan_reverse(x).
+ // Using "31 ^" here instead of "31 -" allows the optimizer to strip the
+ // function body down to _bit_scan_reverse(x).
+ return 31 ^ __builtin_clz(n);
+}
+
inline int Bits::Log2Floor(uint32 n) {
- return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
+ return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
}
inline int Bits::FindLSBSetNonZero(uint32 n) {
- assert(n != 0);
+ assert(n != 0);
return __builtin_ctz(n);
}
-#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
inline int Bits::FindLSBSetNonZero64(uint64 n) {
- assert(n != 0);
+ assert(n != 0);
return __builtin_ctzll(n);
}
-#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
-
-#elif defined(_MSC_VER)
-
-inline int Bits::Log2FloorNonZero(uint32 n) {
- assert(n != 0);
- unsigned long where;
- _BitScanReverse(&where, n);
- return static_cast<int>(where);
-}
-
-inline int Bits::Log2Floor(uint32 n) {
- unsigned long where;
- if (_BitScanReverse(&where, n))
- return static_cast<int>(where);
- return -1;
-}
-
-inline int Bits::FindLSBSetNonZero(uint32 n) {
- assert(n != 0);
- unsigned long where;
- if (_BitScanForward(&where, n))
- return static_cast<int>(where);
- return 32;
-}
-
-#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
-inline int Bits::FindLSBSetNonZero64(uint64 n) {
- assert(n != 0);
- unsigned long where;
- if (_BitScanForward64(&where, n))
- return static_cast<int>(where);
- return 64;
-}
-#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
-
+#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+
+#elif defined(_MSC_VER)
+
+inline int Bits::Log2FloorNonZero(uint32 n) {
+ assert(n != 0);
+ unsigned long where;
+ _BitScanReverse(&where, n);
+ return static_cast<int>(where);
+}
+
+inline int Bits::Log2Floor(uint32 n) {
+ unsigned long where;
+ if (_BitScanReverse(&where, n))
+ return static_cast<int>(where);
+ return -1;
+}
+
+inline int Bits::FindLSBSetNonZero(uint32 n) {
+ assert(n != 0);
+ unsigned long where;
+ if (_BitScanForward(&where, n))
+ return static_cast<int>(where);
+ return 32;
+}
+
+#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+inline int Bits::FindLSBSetNonZero64(uint64 n) {
+ assert(n != 0);
+ unsigned long where;
+ if (_BitScanForward64(&where, n))
+ return static_cast<int>(where);
+ return 64;
+}
+#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+
#else // Portable versions.
-inline int Bits::Log2FloorNonZero(uint32 n) {
- assert(n != 0);
-
+inline int Bits::Log2FloorNonZero(uint32 n) {
+ assert(n != 0);
+
int log = 0;
uint32 value = n;
for (int i = 4; i >= 0; --i) {
@@ -468,13 +468,13 @@ inline int Bits::Log2FloorNonZero(uint32 n) {
return log;
}
-inline int Bits::Log2Floor(uint32 n) {
- return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
-}
-
+inline int Bits::Log2Floor(uint32 n) {
+ return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
+}
+
inline int Bits::FindLSBSetNonZero(uint32 n) {
- assert(n != 0);
-
+ assert(n != 0);
+
int rc = 31;
for (int i = 4, shift = 1 << 4; i >= 0; --i) {
const uint32 x = n << shift;
@@ -487,11 +487,11 @@ inline int Bits::FindLSBSetNonZero(uint32 n) {
return rc;
}
-#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
// FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
inline int Bits::FindLSBSetNonZero64(uint64 n) {
- assert(n != 0);
-
+ assert(n != 0);
+
const uint32 bottombits = static_cast<uint32>(n);
if (bottombits == 0) {
// Bottom bits are zero, so scan in top bits
@@ -500,7 +500,7 @@ inline int Bits::FindLSBSetNonZero64(uint64 n) {
return FindLSBSetNonZero(bottombits);
}
}
-#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
+#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
#endif // End portable versions.
@@ -524,7 +524,7 @@ class Varint {
static char* Encode32(char* ptr, uint32 v);
// EFFECTS Appends the varint representation of "value" to "*s".
- static void Append32(std::string* s, uint32 value);
+ static void Append32(std::string* s, uint32 value);
};
inline const char* Varint::Parse32WithLimit(const char* p,
@@ -577,12 +577,12 @@ inline char* Varint::Encode32(char* sptr, uint32 v) {
return reinterpret_cast<char*>(ptr);
}
-// If you know the internal layout of the std::string in use, you can
+// If you know the internal layout of the std::string in use, you can
// replace this function with one that resizes the string without
// filling the new space with zeros (if applicable) --
// it will be non-portable but faster.
-inline void STLStringResizeUninitialized(std::string* s, size_t new_size) {
- s->resize(new_size);
+inline void STLStringResizeUninitialized(std::string* s, size_t new_size) {
+ s->resize(new_size);
}
// Return a mutable char* pointing to a string's internal buffer,
@@ -597,10 +597,10 @@ inline void STLStringResizeUninitialized(std::string* s, size_t new_size) {
// (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-defects.html#530)
// proposes this as the method. It will officially be part of the standard
// for C++0x. This should already work on all current implementations.
-inline char* string_as_array(std::string* str) {
- return str->empty() ? NULL : &*str->begin();
+inline char* string_as_array(std::string* str) {
+ return str->empty() ? NULL : &*str->begin();
}
} // namespace snappy
-#endif // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
+#endif // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_