Reference-counted, deduplicated string store.
1/*
2 * rapidhash V3 - Very fast, high quality, platform-independent hashing algorithm.
3 *
4 * Based on 'wyhash', by Wang Yi <godspeed_china@yeah.net>
5 *
6 * Copyright (C) 2025 Nicolas De Carli
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in all
16 * copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 *
26 * You can contact the author at:
27 * - rapidhash source repository: https://github.com/Nicoshev/rapidhash
28 */
29
30 #pragma once
31
32/*
33 * Includes.
34 */
35 #include <stdint.h>
36 #include <string.h>
37 #if defined(_MSC_VER)
38 # include <intrin.h>
39 # if defined(_M_X64) && !defined(_M_ARM64EC)
40 # pragma intrinsic(_umul128)
41 # endif
42 #endif
43
44 /*
45 * C/C++ macros.
46 */
47
48 #ifdef _MSC_VER
49 # define RAPIDHASH_ALWAYS_INLINE __forceinline
50 #elif defined(__GNUC__)
51 # define RAPIDHASH_ALWAYS_INLINE inline __attribute__((__always_inline__))
52 #else
53 # define RAPIDHASH_ALWAYS_INLINE inline
54 #endif
55
56 #ifdef __cplusplus
57 # define RAPIDHASH_NOEXCEPT noexcept
58 # define RAPIDHASH_CONSTEXPR constexpr
59 # ifndef RAPIDHASH_INLINE
60 # define RAPIDHASH_INLINE RAPIDHASH_ALWAYS_INLINE
61 # endif
62 # if __cplusplus >= 201402L && !defined(_MSC_VER)
63 # define RAPIDHASH_INLINE_CONSTEXPR RAPIDHASH_ALWAYS_INLINE constexpr
64 # else
65 # define RAPIDHASH_INLINE_CONSTEXPR RAPIDHASH_ALWAYS_INLINE
66 # endif
67 #else
68 # define RAPIDHASH_NOEXCEPT
69 # define RAPIDHASH_CONSTEXPR static const
70 # ifndef RAPIDHASH_INLINE
71 # define RAPIDHASH_INLINE static RAPIDHASH_ALWAYS_INLINE
72 # endif
73 # define RAPIDHASH_INLINE_CONSTEXPR RAPIDHASH_INLINE
74 #endif
75
76 /*
77 * Unrolled macro.
78 * Improves large input speed, but increases code size and worsens small input speed.
79 *
80 * RAPIDHASH_COMPACT: Normal behavior.
81 * RAPIDHASH_UNROLLED:
82 *
83 */
84 #ifndef RAPIDHASH_UNROLLED
85 # define RAPIDHASH_COMPACT
86 #elif defined(RAPIDHASH_COMPACT)
87 # error "cannot define RAPIDHASH_COMPACT and RAPIDHASH_UNROLLED simultaneously."
88 #endif
89
90 /*
91 * Protection macro, alters behaviour of rapid_mum multiplication function.
92 *
93 * RAPIDHASH_FAST: Normal behavior, max speed.
94 * RAPIDHASH_PROTECTED: Extra protection against entropy loss.
95 */
96 #ifndef RAPIDHASH_PROTECTED
97 # define RAPIDHASH_FAST
98 #elif defined(RAPIDHASH_FAST)
99 # error "cannot define RAPIDHASH_PROTECTED and RAPIDHASH_FAST simultaneously."
100 #endif
101
102 /*
103 * Likely and unlikely macros.
104 */
105 #if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__)
106 # define _likely_(x) __builtin_expect(x,1)
107 # define _unlikely_(x) __builtin_expect(x,0)
108 #else
109 # define _likely_(x) (x)
110 # define _unlikely_(x) (x)
111 #endif
112
113 /*
114 * Endianness macros.
115 */
116 #ifndef RAPIDHASH_LITTLE_ENDIAN
117 # if defined(_WIN32) || defined(__LITTLE_ENDIAN__) || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
118 # define RAPIDHASH_LITTLE_ENDIAN
119 # elif defined(__BIG_ENDIAN__) || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
120 # define RAPIDHASH_BIG_ENDIAN
121 # else
122 # warning "could not determine endianness! Falling back to little endian."
123 # define RAPIDHASH_LITTLE_ENDIAN
124 # endif
125 #endif
126
127 /*
128 * Default secret parameters.
129 */
130 RAPIDHASH_CONSTEXPR uint64_t rapid_secret[8] = {
131 0x2d358dccaa6c78a5ull,
132 0x8bb84b93962eacc9ull,
133 0x4b33a62ed433d4a3ull,
134 0x4d5a2da51de1aa47ull,
135 0xa0761d6478bd642full,
136 0xe7037ed1a0b428dbull,
137 0x90ed1765281c388cull,
138 0xaaaaaaaaaaaaaaaaull};
139
140 /*
141 * 64*64 -> 128bit multiply function.
142 *
143 * @param A Address of 64-bit number.
144 * @param B Address of 64-bit number.
145 *
146 * Calculates 128-bit C = *A * *B.
147 *
148 * When RAPIDHASH_FAST is defined:
149 * Overwrites A contents with C's low 64 bits.
150 * Overwrites B contents with C's high 64 bits.
151 *
152 * When RAPIDHASH_PROTECTED is defined:
153 * Xors and overwrites A contents with C's low 64 bits.
154 * Xors and overwrites B contents with C's high 64 bits.
155 */
156 RAPIDHASH_INLINE_CONSTEXPR void rapid_mum(uint64_t *A, uint64_t *B) RAPIDHASH_NOEXCEPT {
157 #if defined(__SIZEOF_INT128__)
158 __uint128_t r=*A; r*=*B;
159 #ifdef RAPIDHASH_PROTECTED
160 *A^=(uint64_t)r; *B^=(uint64_t)(r>>64);
161 #else
162 *A=(uint64_t)r; *B=(uint64_t)(r>>64);
163 #endif
164 #elif defined(_MSC_VER) && (defined(_WIN64) || defined(_M_HYBRID_CHPE_ARM64))
165 #if defined(_M_X64)
166 #ifdef RAPIDHASH_PROTECTED
167 uint64_t a, b;
168 a=_umul128(*A,*B,&b);
169 *A^=a; *B^=b;
170 #else
171 *A=_umul128(*A,*B,B);
172 #endif
173 #else
174 #ifdef RAPIDHASH_PROTECTED
175 uint64_t a, b;
176 b = __umulh(*A, *B);
177 a = *A * *B;
178 *A^=a; *B^=b;
179 #else
180 uint64_t c = __umulh(*A, *B);
181 *A = *A * *B;
182 *B = c;
183 #endif
184 #endif
185 #else
186 uint64_t ha=*A>>32, hb=*B>>32, la=(uint32_t)*A, lb=(uint32_t)*B;
187 uint64_t rh=ha*hb, rm0=ha*lb, rm1=hb*la, rl=la*lb, t=rl+(rm0<<32), c=t<rl;
188 uint64_t lo=t+(rm1<<32);
189 c+=lo<t;
190 uint64_t hi=rh+(rm0>>32)+(rm1>>32)+c;
191 #ifdef RAPIDHASH_PROTECTED
192 *A^=lo; *B^=hi;
193 #else
194 *A=lo; *B=hi;
195 #endif
196 #endif
197 }
198
199 /*
200 * Multiply and xor mix function.
201 *
202 * @param A 64-bit number.
203 * @param B 64-bit number.
204 *
205 * Calculates 128-bit C = A * B.
206 * Returns 64-bit xor between high and low 64 bits of C.
207 */
208 RAPIDHASH_INLINE_CONSTEXPR uint64_t rapid_mix(uint64_t A, uint64_t B) RAPIDHASH_NOEXCEPT { rapid_mum(&A,&B); return A^B; }
209
210 /*
211 * Read functions.
212 */
213 #ifdef RAPIDHASH_LITTLE_ENDIAN
214 RAPIDHASH_INLINE uint64_t rapid_read64(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint64_t v; memcpy(&v, p, sizeof(uint64_t)); return v;}
215 RAPIDHASH_INLINE uint64_t rapid_read32(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint32_t v; memcpy(&v, p, sizeof(uint32_t)); return v;}
216 #elif defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__)
217 RAPIDHASH_INLINE uint64_t rapid_read64(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint64_t v; memcpy(&v, p, sizeof(uint64_t)); return __builtin_bswap64(v);}
218 RAPIDHASH_INLINE uint64_t rapid_read32(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint32_t v; memcpy(&v, p, sizeof(uint32_t)); return __builtin_bswap32(v);}
219 #elif defined(_MSC_VER)
220 RAPIDHASH_INLINE uint64_t rapid_read64(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint64_t v; memcpy(&v, p, sizeof(uint64_t)); return _byteswap_uint64(v);}
221 RAPIDHASH_INLINE uint64_t rapid_read32(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint32_t v; memcpy(&v, p, sizeof(uint32_t)); return _byteswap_ulong(v);}
222 #else
223 RAPIDHASH_INLINE uint64_t rapid_read64(const uint8_t *p) RAPIDHASH_NOEXCEPT {
224 uint64_t v; memcpy(&v, p, 8);
225 return (((v >> 56) & 0xff)| ((v >> 40) & 0xff00)| ((v >> 24) & 0xff0000)| ((v >> 8) & 0xff000000)| ((v << 8) & 0xff00000000)| ((v << 24) & 0xff0000000000)| ((v << 40) & 0xff000000000000)| ((v << 56) & 0xff00000000000000));
226 }
227 RAPIDHASH_INLINE uint64_t rapid_read32(const uint8_t *p) RAPIDHASH_NOEXCEPT {
228 uint32_t v; memcpy(&v, p, 4);
229 return (((v >> 24) & 0xff)| ((v >> 8) & 0xff00)| ((v << 8) & 0xff0000)| ((v << 24) & 0xff000000));
230 }
231 #endif
232
233 /*
234 * rapidhash main function.
235 *
236 * @param key Buffer to be hashed.
237 * @param len @key length, in bytes.
238 * @param seed 64-bit seed used to alter the hash result predictably.
239 * @param secret Triplet of 64-bit secrets used to alter hash result predictably.
240 *
241 * Returns a 64-bit hash.
242 */
243RAPIDHASH_INLINE_CONSTEXPR uint64_t rapidhash_internal(const void *key, size_t len, uint64_t seed, const uint64_t* secret) RAPIDHASH_NOEXCEPT {
244 const uint8_t *p=(const uint8_t *)key;
245 seed ^= rapid_mix(seed ^ secret[2], secret[1]);
246 uint64_t a=0, b=0;
247 size_t i = len;
248 if (_likely_(len <= 16)) {
249 if (len >= 4) {
250 seed ^= len;
251 if (len >= 8) {
252 const uint8_t* plast = p + len - 8;
253 a = rapid_read64(p);
254 b = rapid_read64(plast);
255 } else {
256 const uint8_t* plast = p + len - 4;
257 a = rapid_read32(p);
258 b = rapid_read32(plast);
259 }
260 } else if (len > 0) {
261 a = (((uint64_t)p[0])<<45)|p[len-1];
262 b = p[len>>1];
263 } else
264 a = b = 0;
265 } else {
266 if (len > 112) {
267 uint64_t see1 = seed, see2 = seed;
268 uint64_t see3 = seed, see4 = seed;
269 uint64_t see5 = seed, see6 = seed;
270#ifdef RAPIDHASH_COMPACT
271 do {
272 seed = rapid_mix(rapid_read64(p) ^ secret[0], rapid_read64(p + 8) ^ seed);
273 see1 = rapid_mix(rapid_read64(p + 16) ^ secret[1], rapid_read64(p + 24) ^ see1);
274 see2 = rapid_mix(rapid_read64(p + 32) ^ secret[2], rapid_read64(p + 40) ^ see2);
275 see3 = rapid_mix(rapid_read64(p + 48) ^ secret[3], rapid_read64(p + 56) ^ see3);
276 see4 = rapid_mix(rapid_read64(p + 64) ^ secret[4], rapid_read64(p + 72) ^ see4);
277 see5 = rapid_mix(rapid_read64(p + 80) ^ secret[5], rapid_read64(p + 88) ^ see5);
278 see6 = rapid_mix(rapid_read64(p + 96) ^ secret[6], rapid_read64(p + 104) ^ see6);
279 p += 112;
280 i -= 112;
281 } while(i > 112);
282#else
283 while (i > 224) {
284 seed = rapid_mix(rapid_read64(p) ^ secret[0], rapid_read64(p + 8) ^ seed);
285 see1 = rapid_mix(rapid_read64(p + 16) ^ secret[1], rapid_read64(p + 24) ^ see1);
286 see2 = rapid_mix(rapid_read64(p + 32) ^ secret[2], rapid_read64(p + 40) ^ see2);
287 see3 = rapid_mix(rapid_read64(p + 48) ^ secret[3], rapid_read64(p + 56) ^ see3);
288 see4 = rapid_mix(rapid_read64(p + 64) ^ secret[4], rapid_read64(p + 72) ^ see4);
289 see5 = rapid_mix(rapid_read64(p + 80) ^ secret[5], rapid_read64(p + 88) ^ see5);
290 see6 = rapid_mix(rapid_read64(p + 96) ^ secret[6], rapid_read64(p + 104) ^ see6);
291 seed = rapid_mix(rapid_read64(p + 112) ^ secret[0], rapid_read64(p + 120) ^ seed);
292 see1 = rapid_mix(rapid_read64(p + 128) ^ secret[1], rapid_read64(p + 136) ^ see1);
293 see2 = rapid_mix(rapid_read64(p + 144) ^ secret[2], rapid_read64(p + 152) ^ see2);
294 see3 = rapid_mix(rapid_read64(p + 160) ^ secret[3], rapid_read64(p + 168) ^ see3);
295 see4 = rapid_mix(rapid_read64(p + 176) ^ secret[4], rapid_read64(p + 184) ^ see4);
296 see5 = rapid_mix(rapid_read64(p + 192) ^ secret[5], rapid_read64(p + 200) ^ see5);
297 see6 = rapid_mix(rapid_read64(p + 208) ^ secret[6], rapid_read64(p + 216) ^ see6);
298 p += 224;
299 i -= 224;
300 }
301 if (i > 112) {
302 seed = rapid_mix(rapid_read64(p) ^ secret[0], rapid_read64(p + 8) ^ seed);
303 see1 = rapid_mix(rapid_read64(p + 16) ^ secret[1], rapid_read64(p + 24) ^ see1);
304 see2 = rapid_mix(rapid_read64(p + 32) ^ secret[2], rapid_read64(p + 40) ^ see2);
305 see3 = rapid_mix(rapid_read64(p + 48) ^ secret[3], rapid_read64(p + 56) ^ see3);
306 see4 = rapid_mix(rapid_read64(p + 64) ^ secret[4], rapid_read64(p + 72) ^ see4);
307 see5 = rapid_mix(rapid_read64(p + 80) ^ secret[5], rapid_read64(p + 88) ^ see5);
308 see6 = rapid_mix(rapid_read64(p + 96) ^ secret[6], rapid_read64(p + 104) ^ see6);
309 p += 112;
310 i -= 112;
311 }
312#endif
313 seed ^= see1;
314 see2 ^= see3;
315 see4 ^= see5;
316 seed ^= see6;
317 see2 ^= see4;
318 seed ^= see2;
319 }
320 if (i > 16) {
321 seed = rapid_mix(rapid_read64(p) ^ secret[2], rapid_read64(p + 8) ^ seed);
322 if (i > 32) {
323 seed = rapid_mix(rapid_read64(p + 16) ^ secret[2], rapid_read64(p + 24) ^ seed);
324 if (i > 48) {
325 seed = rapid_mix(rapid_read64(p + 32) ^ secret[1], rapid_read64(p + 40) ^ seed);
326 if (i > 64) {
327 seed = rapid_mix(rapid_read64(p + 48) ^ secret[1], rapid_read64(p + 56) ^ seed);
328 if (i > 80) {
329 seed = rapid_mix(rapid_read64(p + 64) ^ secret[2], rapid_read64(p + 72) ^ seed);
330 if (i > 96) {
331 seed = rapid_mix(rapid_read64(p + 80) ^ secret[1], rapid_read64(p + 88) ^ seed);
332 }
333 }
334 }
335 }
336 }
337 }
338 a=rapid_read64(p+i-16) ^ i; b=rapid_read64(p+i-8);
339 }
340 a ^= secret[1];
341 b ^= seed;
342 rapid_mum(&a, &b);
343 return rapid_mix(a ^ secret[7], b ^ secret[1] ^ i);
344}
345
346 /*
347 * rapidhashMicro main function.
348 *
349 * @param key Buffer to be hashed.
350 * @param len @key length, in bytes.
351 * @param seed 64-bit seed used to alter the hash result predictably.
352 * @param secret Triplet of 64-bit secrets used to alter hash result predictably.
353 *
354 * Returns a 64-bit hash.
355 */
356 RAPIDHASH_INLINE_CONSTEXPR uint64_t rapidhashMicro_internal(const void *key, size_t len, uint64_t seed, const uint64_t* secret) RAPIDHASH_NOEXCEPT {
357 const uint8_t *p=(const uint8_t *)key;
358 seed ^= rapid_mix(seed ^ secret[2], secret[1]);
359 uint64_t a=0, b=0;
360 size_t i = len;
361 if (_likely_(len <= 16)) {
362 if (len >= 4) {
363 seed ^= len;
364 if (len >= 8) {
365 const uint8_t* plast = p + len - 8;
366 a = rapid_read64(p);
367 b = rapid_read64(plast);
368 } else {
369 const uint8_t* plast = p + len - 4;
370 a = rapid_read32(p);
371 b = rapid_read32(plast);
372 }
373 } else if (len > 0) {
374 a = (((uint64_t)p[0])<<45)|p[len-1];
375 b = p[len>>1];
376 } else
377 a = b = 0;
378 } else {
379 if (i > 80) {
380 uint64_t see1 = seed, see2 = seed;
381 uint64_t see3 = seed, see4 = seed;
382 do {
383 seed = rapid_mix(rapid_read64(p) ^ secret[0], rapid_read64(p + 8) ^ seed);
384 see1 = rapid_mix(rapid_read64(p + 16) ^ secret[1], rapid_read64(p + 24) ^ see1);
385 see2 = rapid_mix(rapid_read64(p + 32) ^ secret[2], rapid_read64(p + 40) ^ see2);
386 see3 = rapid_mix(rapid_read64(p + 48) ^ secret[3], rapid_read64(p + 56) ^ see3);
387 see4 = rapid_mix(rapid_read64(p + 64) ^ secret[4], rapid_read64(p + 72) ^ see4);
388 p += 80;
389 i -= 80;
390 } while(i > 80);
391 seed ^= see1;
392 see2 ^= see3;
393 seed ^= see4;
394 seed ^= see2;
395 }
396 if (i > 16) {
397 seed = rapid_mix(rapid_read64(p) ^ secret[2], rapid_read64(p + 8) ^ seed);
398 if (i > 32) {
399 seed = rapid_mix(rapid_read64(p + 16) ^ secret[2], rapid_read64(p + 24) ^ seed);
400 if (i > 48) {
401 seed = rapid_mix(rapid_read64(p + 32) ^ secret[1], rapid_read64(p + 40) ^ seed);
402 if (i > 64) {
403 seed = rapid_mix(rapid_read64(p + 48) ^ secret[1], rapid_read64(p + 56) ^ seed);
404 }
405 }
406 }
407 }
408 a=rapid_read64(p+i-16) ^ i; b=rapid_read64(p+i-8);
409 }
410 a ^= secret[1];
411 b ^= seed;
412 rapid_mum(&a, &b);
413 return rapid_mix(a ^ secret[7], b ^ secret[1] ^ i);
414 }
415
416 /*
417 * rapidhashNano main function.
418 *
419 * @param key Buffer to be hashed.
420 * @param len @key length, in bytes.
421 * @param seed 64-bit seed used to alter the hash result predictably.
422 * @param secret Triplet of 64-bit secrets used to alter hash result predictably.
423 *
424 * Returns a 64-bit hash.
425 */
426 RAPIDHASH_INLINE_CONSTEXPR uint64_t rapidhashNano_internal(const void *key, size_t len, uint64_t seed, const uint64_t* secret) RAPIDHASH_NOEXCEPT {
427 const uint8_t *p=(const uint8_t *)key;
428 seed ^= rapid_mix(seed ^ secret[2], secret[1]);
429 uint64_t a=0, b=0;
430 size_t i = len;
431 if (_likely_(len <= 16)) {
432 if (len >= 4) {
433 seed ^= len;
434 if (len >= 8) {
435 const uint8_t* plast = p + len - 8;
436 a = rapid_read64(p);
437 b = rapid_read64(plast);
438 } else {
439 const uint8_t* plast = p + len - 4;
440 a = rapid_read32(p);
441 b = rapid_read32(plast);
442 }
443 } else if (len > 0) {
444 a = (((uint64_t)p[0])<<45)|p[len-1];
445 b = p[len>>1];
446 } else
447 a = b = 0;
448 } else {
449 if (i > 48) {
450 uint64_t see1 = seed, see2 = seed;
451 do {
452 seed = rapid_mix(rapid_read64(p) ^ secret[0], rapid_read64(p + 8) ^ seed);
453 see1 = rapid_mix(rapid_read64(p + 16) ^ secret[1], rapid_read64(p + 24) ^ see1);
454 see2 = rapid_mix(rapid_read64(p + 32) ^ secret[2], rapid_read64(p + 40) ^ see2);
455 p += 48;
456 i -= 48;
457 } while(i > 48);
458 seed ^= see1;
459 seed ^= see2;
460 }
461 if (i > 16) {
462 seed = rapid_mix(rapid_read64(p) ^ secret[2], rapid_read64(p + 8) ^ seed);
463 if (i > 32) {
464 seed = rapid_mix(rapid_read64(p + 16) ^ secret[2], rapid_read64(p + 24) ^ seed);
465 }
466 }
467 a=rapid_read64(p+i-16) ^ i; b=rapid_read64(p+i-8);
468 }
469 a ^= secret[1];
470 b ^= seed;
471 rapid_mum(&a, &b);
472 return rapid_mix(a ^ secret[7], b ^ secret[1] ^ i);
473 }
474
475/*
476 * rapidhash seeded hash function.
477 *
478 * @param key Buffer to be hashed.
479 * @param len @key length, in bytes.
480 * @param seed 64-bit seed used to alter the hash result predictably.
481 *
482 * Calls rapidhash_internal using provided parameters and default secrets.
483 *
484 * Returns a 64-bit hash.
485 */
486RAPIDHASH_INLINE_CONSTEXPR uint64_t rapidhash_withSeed(const void *key, size_t len, uint64_t seed) RAPIDHASH_NOEXCEPT {
487 return rapidhash_internal(key, len, seed, rapid_secret);
488}
489
490/*
491 * rapidhash general purpose hash function.
492 *
493 * @param key Buffer to be hashed.
494 * @param len @key length, in bytes.
495 *
496 * Calls rapidhash_withSeed using provided parameters and the default seed.
497 *
498 * Returns a 64-bit hash.
499 */
500RAPIDHASH_INLINE_CONSTEXPR uint64_t rapidhash(const void *key, size_t len) RAPIDHASH_NOEXCEPT {
501 return rapidhash_withSeed(key, len, 0);
502}
503
504/*
505 * rapidhashMicro seeded hash function.
506 *
507 * Designed for HPC and server applications, where cache misses make a noticeable performance detriment.
508 * Clang-18+ compiles it to ~140 instructions without stack usage, both on x86-64 and aarch64.
509 * Faster for sizes up to 512 bytes, just 15%-20% slower for inputs above 1kb.
510 *
511 * @param key Buffer to be hashed.
512 * @param len @key length, in bytes.
513 * @param seed 64-bit seed used to alter the hash result predictably.
514 *
515 * Calls rapidhash_internal using provided parameters and default secrets.
516 *
517 * Returns a 64-bit hash.
518 */
519 RAPIDHASH_INLINE_CONSTEXPR uint64_t rapidhashMicro_withSeed(const void *key, size_t len, uint64_t seed) RAPIDHASH_NOEXCEPT {
520 return rapidhashMicro_internal(key, len, seed, rapid_secret);
521}
522
523/*
524 * rapidhashMicro hash function.
525 *
526 * @param key Buffer to be hashed.
527 * @param len @key length, in bytes.
528 *
529 * Calls rapidhash_withSeed using provided parameters and the default seed.
530 *
531 * Returns a 64-bit hash.
532 */
533RAPIDHASH_INLINE_CONSTEXPR uint64_t rapidhashMicro(const void *key, size_t len) RAPIDHASH_NOEXCEPT {
534 return rapidhashMicro_withSeed(key, len, 0);
535}
536
537/*
538 * rapidhashNano seeded hash function.
539 *
540 * @param key Buffer to be hashed.
541 * @param len @key length, in bytes.
542 * @param seed 64-bit seed used to alter the hash result predictably.
543 *
544 * Calls rapidhash_internal using provided parameters and default secrets.
545 *
546 * Returns a 64-bit hash.
547 */
548 RAPIDHASH_INLINE_CONSTEXPR uint64_t rapidhashNano_withSeed(const void *key, size_t len, uint64_t seed) RAPIDHASH_NOEXCEPT {
549 return rapidhashNano_internal(key, len, seed, rapid_secret);
550}
551
552/*
553 * rapidhashNano hash function.
554 *
555 * Designed for Mobile and embedded applications, where keeping a small code size is a top priority.
556 * Clang-18+ compiles it to less than 100 instructions without stack usage, both on x86-64 and aarch64.
557 * The fastest for sizes up to 48 bytes, but may be considerably slower for larger inputs.
558 *
559 * @param key Buffer to be hashed.
560 * @param len @key length, in bytes.
561 *
562 * Calls rapidhash_withSeed using provided parameters and the default seed.
563 *
564 * Returns a 64-bit hash.
565 */
566RAPIDHASH_INLINE_CONSTEXPR uint64_t rapidhashNano(const void *key, size_t len) RAPIDHASH_NOEXCEPT {
567 return rapidhashNano_withSeed(key, len, 0);
568}