diff options
author | vitalyisaev <vitalyisaev@ydb.tech> | 2023-11-14 09:58:56 +0300 |
---|---|---|
committer | vitalyisaev <vitalyisaev@ydb.tech> | 2023-11-14 10:20:20 +0300 |
commit | c2b2dfd9827a400a8495e172a56343462e3ceb82 (patch) | |
tree | cd4e4f597d01bede4c82dffeb2d780d0a9046bd0 /contrib/restricted/murmurhash | |
parent | d4ae8f119e67808cb0cf776ba6e0cf95296f2df7 (diff) | |
download | ydb-c2b2dfd9827a400a8495e172a56343462e3ceb82.tar.gz |
YQ Connector: move tests from yql to ydb (OSS)
Перенос папки с тестами на Коннектор из папки yql в папку ydb (синхронизируется с github).
Diffstat (limited to 'contrib/restricted/murmurhash')
-rw-r--r-- | contrib/restricted/murmurhash/MurmurHash1.cpp | 174 | ||||
-rw-r--r-- | contrib/restricted/murmurhash/MurmurHash1.h | 44 | ||||
-rw-r--r-- | contrib/restricted/murmurhash/MurmurHash2.cpp | 545 | ||||
-rw-r--r-- | contrib/restricted/murmurhash/MurmurHash2.h | 49 | ||||
-rw-r--r-- | contrib/restricted/murmurhash/MurmurHash3.cpp | 340 | ||||
-rw-r--r-- | contrib/restricted/murmurhash/MurmurHash3.h | 47 | ||||
-rw-r--r-- | contrib/restricted/murmurhash/README.md | 16 | ||||
-rw-r--r-- | contrib/restricted/murmurhash/ya.make | 25 |
8 files changed, 1240 insertions, 0 deletions
diff --git a/contrib/restricted/murmurhash/MurmurHash1.cpp b/contrib/restricted/murmurhash/MurmurHash1.cpp new file mode 100644 index 0000000000..07edf2bd43 --- /dev/null +++ b/contrib/restricted/murmurhash/MurmurHash1.cpp @@ -0,0 +1,174 @@ +//----------------------------------------------------------------------------- +// MurmurHash was written by Austin Appleby, and is placed in the public +// domain. The author hereby disclaims copyright to this source code. + +// Note - This code makes a few assumptions about how your machine behaves - + +// 1. We can read a 4-byte value from any address without crashing +// 2. sizeof(int) == 4 + +// And it has a few limitations - + +// 1. It will not work incrementally. +// 2. It will not produce the same results on little-endian and big-endian +// machines. + +#include "MurmurHash1.h" + +//----------------------------------------------------------------------------- + +uint32_t MurmurHash1 ( const void * key, size_t len, uint32_t seed ) +{ + const unsigned int m = 0xc6a4a793; + + const int r = 16; + + unsigned int h = seed ^ (len * m); + + //---------- + + const unsigned char * data = (const unsigned char *)key; + + while(len >= 4) + { + unsigned int k = *(unsigned int *)data; + + h += k; + h *= m; + h ^= h >> 16; + + data += 4; + len -= 4; + } + + //---------- + + switch(len) + { + case 3: + h += data[2] << 16; + case 2: + h += data[1] << 8; + case 1: + h += data[0]; + h *= m; + h ^= h >> r; + }; + + //---------- + + h *= m; + h ^= h >> 10; + h *= m; + h ^= h >> 17; + + return h; +} + +//----------------------------------------------------------------------------- +// MurmurHash1Aligned, by Austin Appleby + +// Same algorithm as MurmurHash1, but only does aligned reads - should be safer +// on certain platforms. + +// Performance should be equal to or better than the simple version. + +unsigned int MurmurHash1Aligned ( const void * key, size_t len, unsigned int seed ) +{ + const unsigned int m = 0xc6a4a793; + const int r = 16; + + const unsigned char * data = (const unsigned char *)key; + + unsigned int h = seed ^ (len * m); + + size_t align = (uint64_t)data & 3; + + if(align && (len >= 4)) + { + // Pre-load the temp registers + + unsigned int t = 0, d = 0; + + switch(align) + { + case 1: t |= data[2] << 16; + case 2: t |= data[1] << 8; + case 3: t |= data[0]; + } + + t <<= (8 * align); + + data += 4-align; + len -= 4-align; + + int sl = 8 * (4-align); + int sr = 8 * align; + + // Mix + + while(len >= 4) + { + d = *(unsigned int *)data; + t = (t >> sr) | (d << sl); + h += t; + h *= m; + h ^= h >> r; + t = d; + + data += 4; + len -= 4; + } + + // Handle leftover data in temp registers + + size_t pack = len < align ? len : align; + + d = 0; + + switch(pack) + { + case 3: d |= data[2] << 16; + case 2: d |= data[1] << 8; + case 1: d |= data[0]; + case 0: h += (t >> sr) | (d << sl); + h *= m; + h ^= h >> r; + } + + data += pack; + len -= pack; + } + else + { + while(len >= 4) + { + h += *(unsigned int *)data; + h *= m; + h ^= h >> r; + + data += 4; + len -= 4; + } + } + + //---------- + // Handle tail bytes + + switch(len) + { + case 3: h += data[2] << 16; + case 2: h += data[1] << 8; + case 1: h += data[0]; + h *= m; + h ^= h >> r; + }; + + h *= m; + h ^= h >> 10; + h *= m; + h ^= h >> 17; + + return h; +} + diff --git a/contrib/restricted/murmurhash/MurmurHash1.h b/contrib/restricted/murmurhash/MurmurHash1.h new file mode 100644 index 0000000000..cf6f5b7f9d --- /dev/null +++ b/contrib/restricted/murmurhash/MurmurHash1.h @@ -0,0 +1,44 @@ +//----------------------------------------------------------------------------- +// MurmurHash1 was written by Austin Appleby, and is placed in the public +// domain. The author hereby disclaims copyright to this source code. + +#ifndef _MURMURHASH1_H_ +#define _MURMURHASH1_H_ + +#include <stddef.h> + +//----------------------------------------------------------------------------- +// Platform-specific functions and macros + +// Microsoft Visual Studio + +#if defined(_MSC_VER) && (_MSC_VER < 1600) + +typedef unsigned char uint8_t; +typedef unsigned int uint32_t; +typedef unsigned __int64 uint64_t; + +// Other compilers + +#else // defined(_MSC_VER) + +#include <stdint.h> + +#endif // !defined(_MSC_VER) + +//----------------------------------------------------------------------------- + +#ifdef __cplusplus +extern "C" { +#endif + +uint32_t MurmurHash1 ( const void * key, size_t len, uint32_t seed ); +uint32_t MurmurHash1Aligned ( const void * key, size_t len, uint32_t seed ); + +#ifdef __cplusplus +} +#endif + +//----------------------------------------------------------------------------- + +#endif // _MURMURHASH1_H_ diff --git a/contrib/restricted/murmurhash/MurmurHash2.cpp b/contrib/restricted/murmurhash/MurmurHash2.cpp new file mode 100644 index 0000000000..aceda398a2 --- /dev/null +++ b/contrib/restricted/murmurhash/MurmurHash2.cpp @@ -0,0 +1,545 @@ +//----------------------------------------------------------------------------- +// MurmurHash2 was written by Austin Appleby, and is placed in the public +// domain. The author hereby disclaims copyright to this source code. + +// Note - This code makes a few assumptions about how your machine behaves - + +// 1. We can read a 4-byte value from any address without crashing +// 2. sizeof(int) == 4 + +// And it has a few limitations - + +// 1. It will not work incrementally. +// 2. It will not produce the same results on little-endian and big-endian +// machines. + +#include "MurmurHash2.h" +#include <string.h> + +//----------------------------------------------------------------------------- +// Platform-specific functions and macros + +// Microsoft Visual Studio + +#if defined(_MSC_VER) + +#define FORCE_INLINE __forceinline + +#define BIG_CONSTANT(x) (x) + +// Other compilers + +#else // defined(_MSC_VER) + +#define FORCE_INLINE inline __attribute__((always_inline)) + +#define BIG_CONSTANT(x) (x##LLU) + +#endif // !defined(_MSC_VER) + +//----------------------------------------------------------------------------- +// Block read - if your platform needs to do endian-swapping or can only +// handle aligned reads, do the conversion here + +FORCE_INLINE uint32_t getblock32 ( const uint32_t * p, int i ) +{ + uint32_t res; + memcpy(&res, p + i, sizeof(res)); + return res; +} + +FORCE_INLINE uint64_t getblock64 ( const uint64_t * p, int i ) +{ + uint64_t res; + memcpy(&res, p + i, sizeof(res)); + return res; +} + +uint32_t MurmurHash2 ( const void * key, size_t len, uint32_t seed ) +{ + // 'm' and 'r' are mixing constants generated offline. + // They're not really 'magic', they just happen to work well. + + const uint32_t m = 0x5bd1e995; + const int r = 24; + + // Initialize the hash to a 'random' value + + uint32_t h = seed ^ len; + + // Mix 4 bytes at a time into the hash + + const unsigned char * data = (const unsigned char *)key; + + while(len >= 4) + { + uint32_t k = getblock32((const uint32_t*)data, 0); + + k *= m; + k ^= k >> r; + k *= m; + + h *= m; + h ^= k; + + data += 4; + len -= 4; + } + + // Handle the last few bytes of the input array + + switch(len) + { + case 3: h ^= data[2] << 16; + case 2: h ^= data[1] << 8; + case 1: h ^= data[0]; + h *= m; + }; + + // Do a few final mixes of the hash to ensure the last few + // bytes are well-incorporated. + + h ^= h >> 13; + h *= m; + h ^= h >> 15; + + return h; +} + +//----------------------------------------------------------------------------- +// MurmurHash2, 64-bit versions, by Austin Appleby + +// The same caveats as 32-bit MurmurHash2 apply here - beware of alignment +// and endian-ness issues if used across multiple platforms. + +// 64-bit hash for 64-bit platforms + +uint64_t MurmurHash64A ( const void * key, size_t len, uint64_t seed ) +{ + const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995); + const int r = 47; + + uint64_t h = seed ^ (len * m); + + const uint64_t * data = (const uint64_t *)key; + const uint64_t * end = data + (len/8); + + while(data != end) + { + uint64_t k = getblock64(data, 0); + data++; + + k *= m; + k ^= k >> r; + k *= m; + + h ^= k; + h *= m; + } + + const unsigned char * data2 = (const unsigned char*)data; + + switch(len & 7) + { + case 7: h ^= uint64_t(data2[6]) << 48; + case 6: h ^= uint64_t(data2[5]) << 40; + case 5: h ^= uint64_t(data2[4]) << 32; + case 4: h ^= uint64_t(data2[3]) << 24; + case 3: h ^= uint64_t(data2[2]) << 16; + case 2: h ^= uint64_t(data2[1]) << 8; + case 1: h ^= uint64_t(data2[0]); + h *= m; + }; + + h ^= h >> r; + h *= m; + h ^= h >> r; + + return h; +} + + +// 64-bit hash for 32-bit platforms + +uint64_t MurmurHash64B ( const void * key, size_t len, uint64_t seed ) +{ + const uint32_t m = 0x5bd1e995; + const int r = 24; + + uint32_t h1 = uint32_t(seed) ^ len; + uint32_t h2 = uint32_t(seed >> 32); + + const uint32_t * data = (const uint32_t *)key; + + while(len >= 8) + { + uint32_t k1 = *data++; + k1 *= m; k1 ^= k1 >> r; k1 *= m; + h1 *= m; h1 ^= k1; + len -= 4; + + uint32_t k2 = *data++; + k2 *= m; k2 ^= k2 >> r; k2 *= m; + h2 *= m; h2 ^= k2; + len -= 4; + } + + if(len >= 4) + { + uint32_t k1 = *data++; + k1 *= m; k1 ^= k1 >> r; k1 *= m; + h1 *= m; h1 ^= k1; + len -= 4; + } + + switch(len) + { + case 3: h2 ^= ((unsigned char*)data)[2] << 16; + case 2: h2 ^= ((unsigned char*)data)[1] << 8; + case 1: h2 ^= ((unsigned char*)data)[0]; + h2 *= m; + }; + + h1 ^= h2 >> 18; h1 *= m; + h2 ^= h1 >> 22; h2 *= m; + h1 ^= h2 >> 17; h1 *= m; + h2 ^= h1 >> 19; h2 *= m; + + uint64_t h = h1; + + h = (h << 32) | h2; + + return h; +} + +//----------------------------------------------------------------------------- +// MurmurHash2A, by Austin Appleby + +// This is a variant of MurmurHash2 modified to use the Merkle-Damgard +// construction. Bulk speed should be identical to Murmur2, small-key speed +// will be 10%-20% slower due to the added overhead at the end of the hash. + +// This variant fixes a minor issue where null keys were more likely to +// collide with each other than expected, and also makes the function +// more amenable to incremental implementations. + +#define mmix(h,k) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; } + +uint32_t MurmurHash2A ( const void * key, size_t len, uint32_t seed ) +{ + const uint32_t m = 0x5bd1e995; + const int r = 24; + uint32_t l = len; + + const unsigned char * data = (const unsigned char *)key; + + uint32_t h = seed; + + while(len >= 4) + { + uint32_t k = *(uint32_t*)data; + + mmix(h,k); + + data += 4; + len -= 4; + } + + uint32_t t = 0; + + switch(len) + { + case 3: t ^= data[2] << 16; + case 2: t ^= data[1] << 8; + case 1: t ^= data[0]; + }; + + mmix(h,t); + mmix(h,l); + + h ^= h >> 13; + h *= m; + h ^= h >> 15; + + return h; +} + +//----------------------------------------------------------------------------- +// CMurmurHash2A, by Austin Appleby + +// This is a sample implementation of MurmurHash2A designed to work +// incrementally. + +// Usage - + +// CMurmurHash2A hasher +// hasher.Begin(seed); +// hasher.Add(data1,size1); +// hasher.Add(data2,size2); +// ... +// hasher.Add(dataN,sizeN); +// uint32_t hash = hasher.End() + +class CMurmurHash2A +{ +public: + + void Begin ( uint32_t seed = 0 ) + { + m_hash = seed; + m_tail = 0; + m_count = 0; + m_size = 0; + } + + void Add ( const unsigned char * data, size_t len ) + { + m_size += len; + + MixTail(data,len); + + while(len >= 4) + { + uint32_t k = *(uint32_t*)data; + + mmix(m_hash,k); + + data += 4; + len -= 4; + } + + MixTail(data,len); + } + + uint32_t End ( void ) + { + mmix(m_hash,m_tail); + mmix(m_hash,m_size); + + m_hash ^= m_hash >> 13; + m_hash *= m; + m_hash ^= m_hash >> 15; + + return m_hash; + } + +private: + + static const uint32_t m = 0x5bd1e995; + static const int r = 24; + + void MixTail ( const unsigned char * & data, size_t & len ) + { + while( len && ((len<4) || m_count) ) + { + m_tail |= (*data++) << (m_count * 8); + + m_count++; + len--; + + if(m_count == 4) + { + mmix(m_hash,m_tail); + m_tail = 0; + m_count = 0; + } + } + } + + uint32_t m_hash; + uint32_t m_tail; + uint32_t m_count; + uint32_t m_size; +}; + +//----------------------------------------------------------------------------- +// MurmurHashNeutral2, by Austin Appleby + +// Same as MurmurHash2, but endian- and alignment-neutral. +// Half the speed though, alas. + +uint32_t MurmurHashNeutral2 ( const void * key, size_t len, uint32_t seed ) +{ + const uint32_t m = 0x5bd1e995; + const int r = 24; + + uint32_t h = seed ^ len; + + const unsigned char * data = (const unsigned char *)key; + + while(len >= 4) + { + uint32_t k; + + k = data[0]; + k |= data[1] << 8; + k |= data[2] << 16; + k |= data[3] << 24; + + k *= m; + k ^= k >> r; + k *= m; + + h *= m; + h ^= k; + + data += 4; + len -= 4; + } + + switch(len) + { + case 3: h ^= data[2] << 16; + case 2: h ^= data[1] << 8; + case 1: h ^= data[0]; + h *= m; + }; + + h ^= h >> 13; + h *= m; + h ^= h >> 15; + + return h; +} + +//----------------------------------------------------------------------------- +// MurmurHashAligned2, by Austin Appleby + +// Same algorithm as MurmurHash2, but only does aligned reads - should be safer +// on certain platforms. + +// Performance will be lower than MurmurHash2 + +#define MIX(h,k,m) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; } + + +uint32_t MurmurHashAligned2 ( const void * key, size_t len, uint32_t seed ) +{ + const uint32_t m = 0x5bd1e995; + const int r = 24; + + const unsigned char * data = (const unsigned char *)key; + + uint32_t h = seed ^ len; + + size_t align = (uint64_t)data & 3; + + if(align && (len >= 4)) + { + // Pre-load the temp registers + + uint32_t t = 0, d = 0; + + switch(align) + { + case 1: t |= data[2] << 16; + case 2: t |= data[1] << 8; + case 3: t |= data[0]; + } + + t <<= (8 * align); + + data += 4-align; + len -= 4-align; + + int sl = 8 * (4-align); + int sr = 8 * align; + + // Mix + + while(len >= 4) + { + d = *(uint32_t *)data; + t = (t >> sr) | (d << sl); + + uint32_t k = t; + + MIX(h,k,m); + + t = d; + + data += 4; + len -= 4; + } + + // Handle leftover data in temp registers + + d = 0; + + if(len >= align) + { + switch(align) + { + case 3: d |= data[2] << 16; + case 2: d |= data[1] << 8; + case 1: d |= data[0]; + } + + uint32_t k = (t >> sr) | (d << sl); + MIX(h,k,m); + + data += align; + len -= align; + + //---------- + // Handle tail bytes + + switch(len) + { + case 3: h ^= data[2] << 16; + case 2: h ^= data[1] << 8; + case 1: h ^= data[0]; + h *= m; + }; + } + else + { + switch(len) + { + case 3: d |= data[2] << 16; + case 2: d |= data[1] << 8; + case 1: d |= data[0]; + case 0: h ^= (t >> sr) | (d << sl); + h *= m; + } + } + + h ^= h >> 13; + h *= m; + h ^= h >> 15; + + return h; + } + else + { + while(len >= 4) + { + uint32_t k = *(uint32_t *)data; + + MIX(h,k,m); + + data += 4; + len -= 4; + } + + //---------- + // Handle tail bytes + + switch(len) + { + case 3: h ^= data[2] << 16; + case 2: h ^= data[1] << 8; + case 1: h ^= data[0]; + h *= m; + }; + + h ^= h >> 13; + h *= m; + h ^= h >> 15; + + return h; + } +} + +//----------------------------------------------------------------------------- + diff --git a/contrib/restricted/murmurhash/MurmurHash2.h b/contrib/restricted/murmurhash/MurmurHash2.h new file mode 100644 index 0000000000..017475923b --- /dev/null +++ b/contrib/restricted/murmurhash/MurmurHash2.h @@ -0,0 +1,49 @@ +//----------------------------------------------------------------------------- +// MurmurHash2 was written by Austin Appleby, and is placed in the public +// domain. The author hereby disclaims copyright to this source code. + +#ifndef _MURMURHASH2_H_ +#define _MURMURHASH2_H_ + +#include <stddef.h> + +//----------------------------------------------------------------------------- +// Platform-specific functions and macros + +// Microsoft Visual Studio + +#if defined(_MSC_VER) && (_MSC_VER < 1600) + +typedef unsigned char uint8_t; +typedef unsigned int uint32_t; +typedef unsigned __int64 uint64_t; + +// Other compilers + +#else // defined(_MSC_VER) + +#include <stdint.h> + +#endif // !defined(_MSC_VER) + +//----------------------------------------------------------------------------- + +#ifdef __cplusplus +extern "C" { +#endif + +uint32_t MurmurHash2 ( const void * key, size_t len, uint32_t seed ); +uint64_t MurmurHash64A ( const void * key, size_t len, uint64_t seed ); +uint64_t MurmurHash64B ( const void * key, size_t len, uint64_t seed ); +uint32_t MurmurHash2A ( const void * key, size_t len, uint32_t seed ); +uint32_t MurmurHashNeutral2 ( const void * key, size_t len, uint32_t seed ); +uint32_t MurmurHashAligned2 ( const void * key, size_t len, uint32_t seed ); + +#ifdef __cplusplus +} +#endif + +//----------------------------------------------------------------------------- + +#endif // _MURMURHASH2_H_ + diff --git a/contrib/restricted/murmurhash/MurmurHash3.cpp b/contrib/restricted/murmurhash/MurmurHash3.cpp new file mode 100644 index 0000000000..ec90e0fec1 --- /dev/null +++ b/contrib/restricted/murmurhash/MurmurHash3.cpp @@ -0,0 +1,340 @@ +//----------------------------------------------------------------------------- +// MurmurHash3 was written by Austin Appleby, and is placed in the public +// domain. The author hereby disclaims copyright to this source code. + +// Note - The x86 and x64 versions do _not_ produce the same results, as the +// algorithms are optimized for their respective platforms. You can still +// compile and run any of them on any platform, but your performance with the +// non-native version will be less than optimal. + +#include "MurmurHash3.h" +#include <string.h> + +//----------------------------------------------------------------------------- +// Platform-specific functions and macros + +// Microsoft Visual Studio + +#if defined(_MSC_VER) + +#define FORCE_INLINE __forceinline + +#include <stdlib.h> + +#define ROTL32(x,y) _rotl(x,y) +#define ROTL64(x,y) _rotl64(x,y) + +#define BIG_CONSTANT(x) (x) + +// Other compilers + +#else // defined(_MSC_VER) + +#define FORCE_INLINE inline __attribute__((always_inline)) + +inline uint32_t rotl32 ( uint32_t x, int8_t r ) +{ + return (x << r) | (x >> (32 - r)); +} + +inline uint64_t rotl64 ( uint64_t x, int8_t r ) +{ + return (x << r) | (x >> (64 - r)); +} + +#define ROTL32(x,y) rotl32(x,y) +#define ROTL64(x,y) rotl64(x,y) + +#define BIG_CONSTANT(x) (x##LLU) + +#endif // !defined(_MSC_VER) + +//----------------------------------------------------------------------------- +// Block read - if your platform needs to do endian-swapping or can only +// handle aligned reads, do the conversion here + +FORCE_INLINE uint32_t getblock32 ( const uint32_t * p, int i ) +{ + uint32_t res; + memcpy(&res, p + i, sizeof(res)); + return res; +} + +FORCE_INLINE uint64_t getblock64 ( const uint64_t * p, int i ) +{ + uint64_t res; + memcpy(&res, p + i, sizeof(res)); + return res; +} + +//----------------------------------------------------------------------------- +// Finalization mix - force all bits of a hash block to avalanche + +FORCE_INLINE uint32_t fmix32 ( uint32_t h ) +{ + h ^= h >> 16; + h *= 0x85ebca6b; + h ^= h >> 13; + h *= 0xc2b2ae35; + h ^= h >> 16; + + return h; +} + +//---------- + +FORCE_INLINE uint64_t fmix64 ( uint64_t k ) +{ + k ^= k >> 33; + k *= BIG_CONSTANT(0xff51afd7ed558ccd); + k ^= k >> 33; + k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53); + k ^= k >> 33; + + return k; +} + +//----------------------------------------------------------------------------- + +void MurmurHash3_x86_32 ( const void * key, size_t len, + uint32_t seed, void * out ) +{ + const uint8_t * data = (const uint8_t*)key; + const int nblocks = len / 4; + + uint32_t h1 = seed; + + const uint32_t c1 = 0xcc9e2d51; + const uint32_t c2 = 0x1b873593; + + //---------- + // body + + const uint32_t * blocks = (const uint32_t *)(data + nblocks*4); + + for(int i = -nblocks; i; i++) + { + uint32_t k1 = getblock32(blocks,i); + + k1 *= c1; + k1 = ROTL32(k1,15); + k1 *= c2; + + h1 ^= k1; + h1 = ROTL32(h1,13); + h1 = h1*5+0xe6546b64; + } + + //---------- + // tail + + const uint8_t * tail = (const uint8_t*)(data + nblocks*4); + + uint32_t k1 = 0; + + switch(len & 3) + { + case 3: k1 ^= tail[2] << 16; + case 2: k1 ^= tail[1] << 8; + case 1: k1 ^= tail[0]; + k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; + }; + + //---------- + // finalization + + h1 ^= len; + + h1 = fmix32(h1); + + *(uint32_t*)out = h1; +} + +//----------------------------------------------------------------------------- + +void MurmurHash3_x86_128 ( const void * key, const size_t len, + uint32_t seed, void * out ) +{ + const uint8_t * data = (const uint8_t*)key; + const int nblocks = len / 16; + + uint32_t h1 = seed; + uint32_t h2 = seed; + uint32_t h3 = seed; + uint32_t h4 = seed; + + const uint32_t c1 = 0x239b961b; + const uint32_t c2 = 0xab0e9789; + const uint32_t c3 = 0x38b34ae5; + const uint32_t c4 = 0xa1e38b93; + + //---------- + // body + + const uint32_t * blocks = (const uint32_t *)(data + nblocks*16); + + for(int i = -nblocks; i; i++) + { + uint32_t k1 = getblock32(blocks,i*4+0); + uint32_t k2 = getblock32(blocks,i*4+1); + uint32_t k3 = getblock32(blocks,i*4+2); + uint32_t k4 = getblock32(blocks,i*4+3); + + k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; + + h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b; + + k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2; + + h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747; + + k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3; + + h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35; + + k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4; + + h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17; + } + + //---------- + // tail + + const uint8_t * tail = (const uint8_t*)(data + nblocks*16); + + uint32_t k1 = 0; + uint32_t k2 = 0; + uint32_t k3 = 0; + uint32_t k4 = 0; + + switch(len & 15) + { + case 15: k4 ^= tail[14] << 16; + case 14: k4 ^= tail[13] << 8; + case 13: k4 ^= tail[12] << 0; + k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4; + + case 12: k3 ^= tail[11] << 24; + case 11: k3 ^= tail[10] << 16; + case 10: k3 ^= tail[ 9] << 8; + case 9: k3 ^= tail[ 8] << 0; + k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3; + + case 8: k2 ^= tail[ 7] << 24; + case 7: k2 ^= tail[ 6] << 16; + case 6: k2 ^= tail[ 5] << 8; + case 5: k2 ^= tail[ 4] << 0; + k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2; + + case 4: k1 ^= tail[ 3] << 24; + case 3: k1 ^= tail[ 2] << 16; + case 2: k1 ^= tail[ 1] << 8; + case 1: k1 ^= tail[ 0] << 0; + k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; + }; + + //---------- + // finalization + + h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; + + h1 += h2; h1 += h3; h1 += h4; + h2 += h1; h3 += h1; h4 += h1; + + h1 = fmix32(h1); + h2 = fmix32(h2); + h3 = fmix32(h3); + h4 = fmix32(h4); + + h1 += h2; h1 += h3; h1 += h4; + h2 += h1; h3 += h1; h4 += h1; + + ((uint32_t*)out)[0] = h1; + ((uint32_t*)out)[1] = h2; + ((uint32_t*)out)[2] = h3; + ((uint32_t*)out)[3] = h4; +} + +//----------------------------------------------------------------------------- + +void MurmurHash3_x64_128 ( const void * key, const size_t len, + const uint32_t seed, void * out ) +{ + const uint8_t * data = (const uint8_t*)key; + const int nblocks = len / 16; + + uint64_t h1 = seed; + uint64_t h2 = seed; + + const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5); + const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f); + + //---------- + // body + + const uint64_t * blocks = (const uint64_t *)(data); + + for(int i = 0; i < nblocks; i++) + { + uint64_t k1 = getblock64(blocks,i*2+0); + uint64_t k2 = getblock64(blocks,i*2+1); + + k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1; + + h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729; + + k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2; + + h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5; + } + + //---------- + // tail + + const uint8_t * tail = (const uint8_t*)(data + nblocks*16); + + uint64_t k1 = 0; + uint64_t k2 = 0; + + switch(len & 15) + { + case 15: k2 ^= ((uint64_t)tail[14]) << 48; + case 14: k2 ^= ((uint64_t)tail[13]) << 40; + case 13: k2 ^= ((uint64_t)tail[12]) << 32; + case 12: k2 ^= ((uint64_t)tail[11]) << 24; + case 11: k2 ^= ((uint64_t)tail[10]) << 16; + case 10: k2 ^= ((uint64_t)tail[ 9]) << 8; + case 9: k2 ^= ((uint64_t)tail[ 8]) << 0; + k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2; + + case 8: k1 ^= ((uint64_t)tail[ 7]) << 56; + case 7: k1 ^= ((uint64_t)tail[ 6]) << 48; + case 6: k1 ^= ((uint64_t)tail[ 5]) << 40; + case 5: k1 ^= ((uint64_t)tail[ 4]) << 32; + case 4: k1 ^= ((uint64_t)tail[ 3]) << 24; + case 3: k1 ^= ((uint64_t)tail[ 2]) << 16; + case 2: k1 ^= ((uint64_t)tail[ 1]) << 8; + case 1: k1 ^= ((uint64_t)tail[ 0]) << 0; + k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1; + }; + + //---------- + // finalization + + h1 ^= len; h2 ^= len; + + h1 += h2; + h2 += h1; + + h1 = fmix64(h1); + h2 = fmix64(h2); + + h1 += h2; + h2 += h1; + + ((uint64_t*)out)[0] = h1; + ((uint64_t*)out)[1] = h2; +} + +//----------------------------------------------------------------------------- + diff --git a/contrib/restricted/murmurhash/MurmurHash3.h b/contrib/restricted/murmurhash/MurmurHash3.h new file mode 100644 index 0000000000..920a49da3c --- /dev/null +++ b/contrib/restricted/murmurhash/MurmurHash3.h @@ -0,0 +1,47 @@ +//----------------------------------------------------------------------------- +// MurmurHash3 was written by Austin Appleby, and is placed in the public +// domain. The author hereby disclaims copyright to this source code. + +#ifndef _MURMURHASH3_H_ +#define _MURMURHASH3_H_ + +#include <stddef.h> + +//----------------------------------------------------------------------------- +// Platform-specific functions and macros + +// Microsoft Visual Studio + +#if defined(_MSC_VER) && (_MSC_VER < 1600) + +typedef unsigned char uint8_t; +typedef unsigned int uint32_t; +typedef unsigned __int64 uint64_t; + +// Other compilers + +#else // defined(_MSC_VER) + +#include <stdint.h> + +#endif // !defined(_MSC_VER) + +//----------------------------------------------------------------------------- + +#ifdef __cplusplus +extern "C" { +#endif + +void MurmurHash3_x86_32 ( const void * key, size_t len, uint32_t seed, void * out ); + +void MurmurHash3_x86_128 ( const void * key, size_t len, uint32_t seed, void * out ); + +void MurmurHash3_x64_128 ( const void * key, size_t len, uint32_t seed, void * out ); + +#ifdef __cplusplus +} +#endif + +//----------------------------------------------------------------------------- + +#endif // _MURMURHASH3_H_ diff --git a/contrib/restricted/murmurhash/README.md b/contrib/restricted/murmurhash/README.md new file mode 100644 index 0000000000..f648b06b55 --- /dev/null +++ b/contrib/restricted/murmurhash/README.md @@ -0,0 +1,16 @@ +## [SMHasher](https://github.com/aappleby/smhasher/wiki) is a test suite designed to test the distribution, collision, and performance properties of non-cryptographic hash functions. + +This is the home for the [MurmurHash](https://github.com/aappleby/smhasher/tree/master/src) family of hash functions along with the [SMHasher](https://github.com/aappleby/smhasher/tree/master/src) test suite used to verify them. SMHasher is released under the MIT license. All MurmurHash versions are public domain software, and the author disclaims all copyright to their code. + +SMHasher is a test suite designed to test the distribution, collision, and performance properties of non-cryptographic hash functions - it aims to be the [DieHarder](http://www.phy.duke.edu/~rgb/General/dieharder.php) of hash testing, and does a pretty good job of finding flaws with a number of popular hashes. + +The SMHasher suite also includes [MurmurHash3](https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp), which is the latest version in the series of MurmurHash functions - the new version is faster, more robust, and its variants can produce 32- and 128-bit hash values efficiently on both x86 and x64 platforms. + + +## Updates + +### 1/8/2016 + +Woo, we're on Github! I've been putting off the migration for a few, uh, years or so, but hopefully Github won't be shutting down any time soon so I don't have to move things again. MurmurHash is still used all over the place, SMHasher is still the defacto hash function test suite, and there are all sorts of interesting new hash functions out there that improve bulk hashing speed and use new shiny hardware instructions for AES and whatnot. Interesting times. :) + +I've copied the few wiki pages from code.google.com/p/smhasher to this wiki, though I haven't reformatted them to Markdown yet. The MurmurHash page on Wikipedia should also be linking here now as well. Feel free to send me pull requests. diff --git a/contrib/restricted/murmurhash/ya.make b/contrib/restricted/murmurhash/ya.make new file mode 100644 index 0000000000..fa37eaf473 --- /dev/null +++ b/contrib/restricted/murmurhash/ya.make @@ -0,0 +1,25 @@ +# Generated by devtools/yamaker from nixpkgs 22.05. + +LIBRARY() + +LICENSE(Public-Domain) + +LICENSE_TEXTS(.yandex_meta/licenses.list.txt) + +VERSION(2016-01-09) + +ORIGINAL_SOURCE(https://github.com/aappleby/smhasher/archive/61a0530f28277f2e850bfc39600ce61d02b518de.tar.gz) + +NO_COMPILER_WARNINGS() + +NO_RUNTIME() + +NO_UTIL() + +SRCS( + MurmurHash1.cpp + MurmurHash2.cpp + MurmurHash3.cpp +) + +END() |