summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/angle/src/third_party/murmurhash/MurmurHash3.cpp
diff options
context:
space:
mode:
authorOliver Wolff <oliver.wolff@theqtcompany.com>2016-03-24 12:38:18 +0100
committerOliver Wolff <oliver.wolff@qt.io>2016-04-25 05:57:38 +0000
commite12ba07322cd61c5cf50c25ed8d1f08f6b1ff879 (patch)
treed31a44c9f123ed764a00eff7b4fff656a07d54ab /src/3rdparty/angle/src/third_party/murmurhash/MurmurHash3.cpp
parentd3dcc6f610b97be7cbfbb0a65988e5940568c825 (diff)
Update ANGLE to chromium/2651
Change-Id: I1cd32b780b1a0b913fab870e155ae1f4f9ac40d7 Reviewed-by: Maurice Kalinowski <maurice.kalinowski@qt.io>
Diffstat (limited to 'src/3rdparty/angle/src/third_party/murmurhash/MurmurHash3.cpp')
-rw-r--r--src/3rdparty/angle/src/third_party/murmurhash/MurmurHash3.cpp71
1 files changed, 35 insertions, 36 deletions
diff --git a/src/3rdparty/angle/src/third_party/murmurhash/MurmurHash3.cpp b/src/3rdparty/angle/src/third_party/murmurhash/MurmurHash3.cpp
index a599c26502..aa7982d3ee 100644
--- a/src/3rdparty/angle/src/third_party/murmurhash/MurmurHash3.cpp
+++ b/src/3rdparty/angle/src/third_party/murmurhash/MurmurHash3.cpp
@@ -29,9 +29,7 @@
#else // defined(_MSC_VER)
-// Ignore GCC force inline warnings
-#pragma GCC diagnostic ignored "-Wattributes"
-#define FORCE_INLINE __attribute__((always_inline))
+#define FORCE_INLINE inline __attribute__((always_inline))
inline uint32_t rotl32 ( uint32_t x, int8_t r )
{
@@ -54,12 +52,12 @@ inline uint64_t rotl64 ( uint64_t x, int8_t r )
// Block read - if your platform needs to do endian-swapping or can only
// handle aligned reads, do the conversion here
-FORCE_INLINE uint32_t getblock ( const uint32_t * p, int i )
+FORCE_INLINE uint32_t getblock32 ( const uint32_t * p, int i )
{
return p[i];
}
-FORCE_INLINE uint64_t getblock ( const uint64_t * p, int i )
+FORCE_INLINE uint64_t getblock64 ( const uint64_t * p, int i )
{
return p[i];
}
@@ -67,7 +65,7 @@ FORCE_INLINE uint64_t getblock ( const uint64_t * p, int i )
//-----------------------------------------------------------------------------
// Finalization mix - force all bits of a hash block to avalanche
-FORCE_INLINE uint32_t fmix ( uint32_t h )
+FORCE_INLINE uint32_t fmix32 ( uint32_t h )
{
h ^= h >> 16;
h *= 0x85ebca6b;
@@ -80,7 +78,7 @@ FORCE_INLINE uint32_t fmix ( uint32_t h )
//----------
-FORCE_INLINE uint64_t fmix ( uint64_t k )
+FORCE_INLINE uint64_t fmix64 ( uint64_t k )
{
k ^= k >> 33;
k *= BIG_CONSTANT(0xff51afd7ed558ccd);
@@ -111,7 +109,7 @@ void MurmurHash3_x86_32 ( const void * key, int len,
for(int i = -nblocks; i; i++)
{
- uint32_t k1 = getblock(blocks,i);
+ uint32_t k1 = getblock32(blocks,i);
k1 *= c1;
k1 = ROTL32(k1,15);
@@ -142,7 +140,7 @@ void MurmurHash3_x86_32 ( const void * key, int len,
h1 ^= len;
- h1 = fmix(h1);
+ h1 = fmix32(h1);
*(uint32_t*)out = h1;
}
@@ -172,10 +170,10 @@ void MurmurHash3_x86_128 ( const void * key, const int len,
for(int i = -nblocks; i; i++)
{
- uint32_t k1 = getblock(blocks,i*4+0);
- uint32_t k2 = getblock(blocks,i*4+1);
- uint32_t k3 = getblock(blocks,i*4+2);
- uint32_t k4 = getblock(blocks,i*4+3);
+ uint32_t k1 = getblock32(blocks,i*4+0);
+ uint32_t k2 = getblock32(blocks,i*4+1);
+ uint32_t k3 = getblock32(blocks,i*4+2);
+ uint32_t k4 = getblock32(blocks,i*4+3);
k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
@@ -238,10 +236,10 @@ void MurmurHash3_x86_128 ( const void * key, const int len,
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
- h1 = fmix(h1);
- h2 = fmix(h2);
- h3 = fmix(h3);
- h4 = fmix(h4);
+ h1 = fmix32(h1);
+ h2 = fmix32(h2);
+ h3 = fmix32(h3);
+ h4 = fmix32(h4);
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
@@ -273,8 +271,8 @@ void MurmurHash3_x64_128 ( const void * key, const int len,
for(int i = 0; i < nblocks; i++)
{
- uint64_t k1 = getblock(blocks,i*2+0);
- uint64_t k2 = getblock(blocks,i*2+1);
+ uint64_t k1 = getblock64(blocks,i*2+0);
+ uint64_t k2 = getblock64(blocks,i*2+1);
k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
@@ -295,23 +293,23 @@ void MurmurHash3_x64_128 ( const void * key, const int len,
switch(len & 15)
{
- case 15: k2 ^= uint64_t(tail[14]) << 48;
- case 14: k2 ^= uint64_t(tail[13]) << 40;
- case 13: k2 ^= uint64_t(tail[12]) << 32;
- case 12: k2 ^= uint64_t(tail[11]) << 24;
- case 11: k2 ^= uint64_t(tail[10]) << 16;
- case 10: k2 ^= uint64_t(tail[ 9]) << 8;
- case 9: k2 ^= uint64_t(tail[ 8]) << 0;
+ case 15: k2 ^= ((uint64_t)tail[14]) << 48;
+ case 14: k2 ^= ((uint64_t)tail[13]) << 40;
+ case 13: k2 ^= ((uint64_t)tail[12]) << 32;
+ case 12: k2 ^= ((uint64_t)tail[11]) << 24;
+ case 11: k2 ^= ((uint64_t)tail[10]) << 16;
+ case 10: k2 ^= ((uint64_t)tail[ 9]) << 8;
+ case 9: k2 ^= ((uint64_t)tail[ 8]) << 0;
k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
- case 8: k1 ^= uint64_t(tail[ 7]) << 56;
- case 7: k1 ^= uint64_t(tail[ 6]) << 48;
- case 6: k1 ^= uint64_t(tail[ 5]) << 40;
- case 5: k1 ^= uint64_t(tail[ 4]) << 32;
- case 4: k1 ^= uint64_t(tail[ 3]) << 24;
- case 3: k1 ^= uint64_t(tail[ 2]) << 16;
- case 2: k1 ^= uint64_t(tail[ 1]) << 8;
- case 1: k1 ^= uint64_t(tail[ 0]) << 0;
+ case 8: k1 ^= ((uint64_t)tail[ 7]) << 56;
+ case 7: k1 ^= ((uint64_t)tail[ 6]) << 48;
+ case 6: k1 ^= ((uint64_t)tail[ 5]) << 40;
+ case 5: k1 ^= ((uint64_t)tail[ 4]) << 32;
+ case 4: k1 ^= ((uint64_t)tail[ 3]) << 24;
+ case 3: k1 ^= ((uint64_t)tail[ 2]) << 16;
+ case 2: k1 ^= ((uint64_t)tail[ 1]) << 8;
+ case 1: k1 ^= ((uint64_t)tail[ 0]) << 0;
k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
};
@@ -323,8 +321,8 @@ void MurmurHash3_x64_128 ( const void * key, const int len,
h1 += h2;
h2 += h1;
- h1 = fmix(h1);
- h2 = fmix(h2);
+ h1 = fmix64(h1);
+ h2 = fmix64(h2);
h1 += h2;
h2 += h1;
@@ -334,3 +332,4 @@ void MurmurHash3_x64_128 ( const void * key, const int len,
}
//-----------------------------------------------------------------------------
+