Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

hash: add explicit u32 and u64 versions of hash

The 32-bit version is more efficient (and apparently gives better hash
results than the 64-bit version), so users who are only hashing a 32-bit
quantity can now opt to use the 32-bit version explicitly, rather than
promoting to a long.

Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Cc: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Matthew Wilcox and committed by
Linus Torvalds
4e701482 d9ae90ac

+28 -16
+28 -16
include/linux/hash.h
··· 1 1 #ifndef _LINUX_HASH_H 2 2 #define _LINUX_HASH_H 3 - /* Fast hashing routine for a long. 3 + /* Fast hashing routine for ints, longs and pointers. 4 4 (C) 2002 William Lee Irwin III, IBM */ 5 5 6 6 /* ··· 13 13 * them can use shifts and additions instead of multiplications for 14 14 * machines where multiplications are slow. 15 15 */ 16 - #if BITS_PER_LONG == 32 16 + 17 + #include <asm/types.h> 18 + 17 19 /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ 18 - #define GOLDEN_RATIO_PRIME 0x9e370001UL 19 - #elif BITS_PER_LONG == 64 20 + #define GOLDEN_RATIO_PRIME_32 0x9e370001UL 20 21 /* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */ 21 - #define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL 22 + #define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL 23 + 24 + #if BITS_PER_LONG == 32 25 + #define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32 26 + #define hash_long(val, bits) hash_32(val, bits) 27 + #elif BITS_PER_LONG == 64 28 + #define hash_long(val, bits) hash_64(val, bits) 29 + #define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64 22 30 #else 23 - #error Define GOLDEN_RATIO_PRIME for your wordsize. 31 + #error Wordsize not 32 or 64 24 32 #endif 25 33 26 - static inline unsigned long hash_long(unsigned long val, unsigned int bits) 34 + static inline u64 hash_64(u64 val, unsigned int bits) 27 35 { 28 - unsigned long hash = val; 36 + u64 hash = val; 29 37 30 - #if BITS_PER_LONG == 64 31 38 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ 32 - unsigned long n = hash; 39 + u64 n = hash; 33 40 n <<= 18; 34 41 hash -= n; 35 42 n <<= 33; ··· 49 42 hash += n; 50 43 n <<= 2; 51 44 hash += n; 52 - #else 53 - /* On some cpus multiply is faster, on others gcc will do shifts */ 54 - hash *= GOLDEN_RATIO_PRIME; 55 - #endif 56 45 57 46 /* High bits are more random, so use them. */ 58 - return hash >> (BITS_PER_LONG - bits); 47 + return hash >> (64 - bits); 59 48 } 60 - 49 + 50 + static inline u32 hash_32(u32 val, unsigned int bits) 51 + { 52 + /* On some cpus multiply is faster, on others gcc will do shifts */ 53 + u32 hash = val * GOLDEN_RATIO_PRIME_32; 54 + 55 + /* High bits are more random, so use them. */ 56 + return hash >> (32 - bits); 57 + } 58 + 61 59 static inline unsigned long hash_ptr(void *ptr, unsigned int bits) 62 60 { 63 61 return hash_long((unsigned long)ptr, bits);