mirror of
https://github.com/justinethier/cyclone.git
synced 2025-05-21 14:49:17 +02:00
289 lines
6.5 KiB
C
289 lines
6.5 KiB
C
/*
|
|
* Copyright 2012-2015 Samy Al Bahra
|
|
* Copyright 2011-2014 AppNexus, Inc.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
* SUCH DAMAGE.
|
|
*/
|
|
|
|
#ifndef CK_HT_HASH_H
|
|
#define CK_HT_HASH_H
|
|
|
|
/*
|
|
* This is the Murmur hash written by Austin Appleby.
|
|
*/
|
|
|
|
#include <ck_stdint.h>
|
|
|
|
//-----------------------------------------------------------------------------
|
|
// MurmurHash3 was written by Austin Appleby, and is placed in the public
|
|
// domain. The author hereby disclaims copyright to this source code.
|
|
|
|
// Note - The x86 and x64 versions do _not_ produce the same results, as the
|
|
// algorithms are optimized for their respective platforms. You can still
|
|
// compile and run any of them on any platform, but your performance with the
|
|
// non-native version will be less than optimal.
|
|
|
|
//-----------------------------------------------------------------------------
|
|
// Platform-specific functions and macros
|
|
|
|
// Microsoft Visual Studio
|
|
|
|
#if defined(_MSC_VER)
|
|
|
|
#define FORCE_INLINE __forceinline
|
|
|
|
#include <stdlib.h>
|
|
|
|
#define ROTL32(x,y) _rotl(x,y)
|
|
#define ROTL64(x,y) _rotl64(x,y)
|
|
|
|
#define BIG_CONSTANT(x) (x)
|
|
|
|
// Other compilers
|
|
|
|
#else // defined(_MSC_VER)
|
|
|
|
#define FORCE_INLINE inline __attribute__((always_inline))
|
|
|
|
static inline uint32_t rotl32(uint32_t x, int8_t r)
|
|
{
|
|
return (x << r) | (x >> (32 - r));
|
|
}
|
|
|
|
static inline uint64_t rotl64(uint64_t x, int8_t r)
|
|
{
|
|
return (x << r) | (x >> (64 - r));
|
|
}
|
|
|
|
#define ROTL32(x,y) rotl32(x,y)
|
|
#define ROTL64(x,y) rotl64(x,y)
|
|
|
|
#define BIG_CONSTANT(x) (x##LLU)
|
|
|
|
#endif // !defined(_MSC_VER)
|
|
|
|
//-----------------------------------------------------------------------------
|
|
// Block read - if your platform needs to do endian-swapping or can only
|
|
// handle aligned reads, do the conversion here
|
|
|
|
FORCE_INLINE static uint32_t getblock(const uint32_t * p, int i)
|
|
{
|
|
return p[i];
|
|
}
|
|
|
|
//-----------------------------------------------------------------------------
|
|
// Finalization mix - force all bits of a hash block to avalanche
|
|
|
|
FORCE_INLINE static uint32_t fmix(uint32_t h)
|
|
{
|
|
h ^= h >> 16;
|
|
h *= 0x85ebca6b;
|
|
h ^= h >> 13;
|
|
h *= 0xc2b2ae35;
|
|
h ^= h >> 16;
|
|
|
|
return h;
|
|
}
|
|
|
|
//-----------------------------------------------------------------------------
|
|
|
|
static inline void MurmurHash3_x86_32(const void *key, int len,
|
|
uint32_t seed, uint32_t * out)
|
|
{
|
|
const uint8_t *data = (const uint8_t *)key;
|
|
const int nblocks = len / 4;
|
|
int i;
|
|
|
|
uint32_t h1 = seed;
|
|
|
|
uint32_t c1 = 0xcc9e2d51;
|
|
uint32_t c2 = 0x1b873593;
|
|
|
|
//----------
|
|
// body
|
|
|
|
const uint32_t *blocks = (const uint32_t *)(const void *)(data + nblocks * 4);
|
|
|
|
for (i = -nblocks; i; i++) {
|
|
uint32_t k1 = getblock(blocks, i);
|
|
|
|
k1 *= c1;
|
|
k1 = ROTL32(k1, 15);
|
|
k1 *= c2;
|
|
|
|
h1 ^= k1;
|
|
h1 = ROTL32(h1, 13);
|
|
h1 = h1 * 5 + 0xe6546b64;
|
|
}
|
|
|
|
//----------
|
|
// tail
|
|
|
|
const uint8_t *tail = (const uint8_t *)(data + nblocks * 4);
|
|
|
|
uint32_t k1 = 0;
|
|
|
|
switch (len & 3) {
|
|
case 3:
|
|
k1 ^= tail[2] << 16;
|
|
case 2:
|
|
k1 ^= tail[1] << 8;
|
|
case 1:
|
|
k1 ^= tail[0];
|
|
k1 *= c1;
|
|
k1 = ROTL32(k1, 15);
|
|
k1 *= c2;
|
|
h1 ^= k1;
|
|
};
|
|
|
|
//----------
|
|
// finalization
|
|
|
|
h1 ^= len;
|
|
|
|
h1 = fmix(h1);
|
|
|
|
*(uint32_t *) out = h1;
|
|
}
|
|
|
|
static inline uint64_t MurmurHash64A(const void *key, int len, uint64_t seed)
|
|
{
|
|
const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995);
|
|
const int r = 47;
|
|
|
|
uint64_t h = seed ^ (len * m);
|
|
|
|
const uint64_t *data = (const uint64_t *)key;
|
|
const uint64_t *end = data + (len / 8);
|
|
|
|
while (data != end) {
|
|
uint64_t k;
|
|
|
|
if (!((uintptr_t) data & 0x7))
|
|
k = *data++;
|
|
else {
|
|
memcpy(&k, data, sizeof(k));
|
|
data++;
|
|
}
|
|
|
|
k *= m;
|
|
k ^= k >> r;
|
|
k *= m;
|
|
|
|
h ^= k;
|
|
h *= m;
|
|
}
|
|
|
|
const unsigned char *data2 = (const unsigned char *)data;
|
|
|
|
switch (len & 7) {
|
|
case 7:
|
|
h ^= (uint64_t) (data2[6]) << 48;
|
|
case 6:
|
|
h ^= (uint64_t) (data2[5]) << 40;
|
|
case 5:
|
|
h ^= (uint64_t) (data2[4]) << 32;
|
|
case 4:
|
|
h ^= (uint64_t) (data2[3]) << 24;
|
|
case 3:
|
|
h ^= (uint64_t) (data2[2]) << 16;
|
|
case 2:
|
|
h ^= (uint64_t) (data2[1]) << 8;
|
|
case 1:
|
|
h ^= (uint64_t) (data2[0]);
|
|
h *= m;
|
|
};
|
|
|
|
h ^= h >> r;
|
|
h *= m;
|
|
h ^= h >> r;
|
|
|
|
return h;
|
|
}
|
|
|
|
// 64-bit hash for 32-bit platforms
|
|
|
|
static inline uint64_t MurmurHash64B(const void *key, int len, uint64_t seed)
|
|
{
|
|
const uint32_t m = 0x5bd1e995;
|
|
const int r = 24;
|
|
|
|
uint32_t h1 = (uint32_t) (seed) ^ len;
|
|
uint32_t h2 = (uint32_t) (seed >> 32);
|
|
|
|
const uint32_t *data = (const uint32_t *)key;
|
|
|
|
while (len >= 8) {
|
|
uint32_t k1 = *data++;
|
|
k1 *= m;
|
|
k1 ^= k1 >> r;
|
|
k1 *= m;
|
|
h1 *= m;
|
|
h1 ^= k1;
|
|
len -= 4;
|
|
|
|
uint32_t k2 = *data++;
|
|
k2 *= m;
|
|
k2 ^= k2 >> r;
|
|
k2 *= m;
|
|
h2 *= m;
|
|
h2 ^= k2;
|
|
len -= 4;
|
|
}
|
|
|
|
if (len >= 4) {
|
|
uint32_t k1 = *data++;
|
|
k1 *= m;
|
|
k1 ^= k1 >> r;
|
|
k1 *= m;
|
|
h1 *= m;
|
|
h1 ^= k1;
|
|
len -= 4;
|
|
}
|
|
|
|
switch (len) {
|
|
case 3:
|
|
h2 ^= ((const unsigned char *)data)[2] << 16;
|
|
case 2:
|
|
h2 ^= ((const unsigned char *)data)[1] << 8;
|
|
case 1:
|
|
h2 ^= ((const unsigned char *)data)[0];
|
|
h2 *= m;
|
|
};
|
|
|
|
h1 ^= h2 >> 18;
|
|
h1 *= m;
|
|
h2 ^= h1 >> 22;
|
|
h2 *= m;
|
|
h1 ^= h2 >> 17;
|
|
h1 *= m;
|
|
h2 ^= h1 >> 19;
|
|
h2 *= m;
|
|
|
|
uint64_t h = h1;
|
|
|
|
h = (h << 32) | h2;
|
|
|
|
return h;
|
|
}
|
|
|
|
#endif /* CK_HT_HASH_H */
|