at v3.17 21 kB view raw
1/* 2 * Resizable, Scalable, Concurrent Hash Table 3 * 4 * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch> 5 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> 6 * 7 * Based on the following paper: 8 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf 9 * 10 * Code partially derived from nft_hash 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License version 2 as 14 * published by the Free Software Foundation. 15 */ 16 17#include <linux/kernel.h> 18#include <linux/init.h> 19#include <linux/log2.h> 20#include <linux/slab.h> 21#include <linux/vmalloc.h> 22#include <linux/mm.h> 23#include <linux/hash.h> 24#include <linux/random.h> 25#include <linux/rhashtable.h> 26 27#define HASH_DEFAULT_SIZE 64UL 28#define HASH_MIN_SIZE 4UL 29 30#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) 31 32#ifdef CONFIG_PROVE_LOCKING 33int lockdep_rht_mutex_is_held(const struct rhashtable *ht) 34{ 35 return ht->p.mutex_is_held(); 36} 37EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); 38#endif 39 40static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) 41{ 42 return (void *) he - ht->p.head_offset; 43} 44 45static u32 __hashfn(const struct rhashtable *ht, const void *key, 46 u32 len, u32 hsize) 47{ 48 u32 h; 49 50 h = ht->p.hashfn(key, len, ht->p.hash_rnd); 51 52 return h & (hsize - 1); 53} 54 55/** 56 * rhashtable_hashfn - compute hash for key of given length 57 * @ht: hash table to compuate for 58 * @key: pointer to key 59 * @len: length of key 60 * 61 * Computes the hash value using the hash function provided in the 'hashfn' 62 * of struct rhashtable_params. The returned value is guaranteed to be 63 * smaller than the number of buckets in the hash table. 64 */ 65u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len) 66{ 67 struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); 68 69 return __hashfn(ht, key, len, tbl->size); 70} 71EXPORT_SYMBOL_GPL(rhashtable_hashfn); 72 73static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize) 74{ 75 if (unlikely(!ht->p.key_len)) { 76 u32 h; 77 78 h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd); 79 80 return h & (hsize - 1); 81 } 82 83 return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize); 84} 85 86/** 87 * rhashtable_obj_hashfn - compute hash for hashed object 88 * @ht: hash table to compuate for 89 * @ptr: pointer to hashed object 90 * 91 * Computes the hash value using the hash function `hashfn` respectively 92 * 'obj_hashfn' depending on whether the hash table is set up to work with 93 * a fixed length key. The returned value is guaranteed to be smaller than 94 * the number of buckets in the hash table. 95 */ 96u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr) 97{ 98 struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); 99 100 return obj_hashfn(ht, ptr, tbl->size); 101} 102EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn); 103 104static u32 head_hashfn(const struct rhashtable *ht, 105 const struct rhash_head *he, u32 hsize) 106{ 107 return obj_hashfn(ht, rht_obj(ht, he), hsize); 108} 109 110static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags) 111{ 112 struct bucket_table *tbl; 113 size_t size; 114 115 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); 116 tbl = kzalloc(size, flags); 117 if (tbl == NULL) 118 tbl = vzalloc(size); 119 120 if (tbl == NULL) 121 return NULL; 122 123 tbl->size = nbuckets; 124 125 return tbl; 126} 127 128static void bucket_table_free(const struct bucket_table *tbl) 129{ 130 kvfree(tbl); 131} 132 133/** 134 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size 135 * @ht: hash table 136 * @new_size: new table size 137 */ 138bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) 139{ 140 /* Expand table when exceeding 75% load */ 141 return ht->nelems > (new_size / 4 * 3); 142} 143EXPORT_SYMBOL_GPL(rht_grow_above_75); 144 145/** 146 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size 147 * @ht: hash table 148 * @new_size: new table size 149 */ 150bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) 151{ 152 /* Shrink table beneath 30% load */ 153 return ht->nelems < (new_size * 3 / 10); 154} 155EXPORT_SYMBOL_GPL(rht_shrink_below_30); 156 157static void hashtable_chain_unzip(const struct rhashtable *ht, 158 const struct bucket_table *new_tbl, 159 struct bucket_table *old_tbl, size_t n) 160{ 161 struct rhash_head *he, *p, *next; 162 unsigned int h; 163 164 /* Old bucket empty, no work needed. */ 165 p = rht_dereference(old_tbl->buckets[n], ht); 166 if (!p) 167 return; 168 169 /* Advance the old bucket pointer one or more times until it 170 * reaches a node that doesn't hash to the same bucket as the 171 * previous node p. Call the previous node p; 172 */ 173 h = head_hashfn(ht, p, new_tbl->size); 174 rht_for_each(he, p->next, ht) { 175 if (head_hashfn(ht, he, new_tbl->size) != h) 176 break; 177 p = he; 178 } 179 RCU_INIT_POINTER(old_tbl->buckets[n], p->next); 180 181 /* Find the subsequent node which does hash to the same 182 * bucket as node P, or NULL if no such node exists. 183 */ 184 next = NULL; 185 if (he) { 186 rht_for_each(he, he->next, ht) { 187 if (head_hashfn(ht, he, new_tbl->size) == h) { 188 next = he; 189 break; 190 } 191 } 192 } 193 194 /* Set p's next pointer to that subsequent node pointer, 195 * bypassing the nodes which do not hash to p's bucket 196 */ 197 RCU_INIT_POINTER(p->next, next); 198} 199 200/** 201 * rhashtable_expand - Expand hash table while allowing concurrent lookups 202 * @ht: the hash table to expand 203 * @flags: allocation flags 204 * 205 * A secondary bucket array is allocated and the hash entries are migrated 206 * while keeping them on both lists until the end of the RCU grace period. 207 * 208 * This function may only be called in a context where it is safe to call 209 * synchronize_rcu(), e.g. not within a rcu_read_lock() section. 210 * 211 * The caller must ensure that no concurrent table mutations take place. 212 * It is however valid to have concurrent lookups if they are RCU protected. 213 */ 214int rhashtable_expand(struct rhashtable *ht, gfp_t flags) 215{ 216 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); 217 struct rhash_head *he; 218 unsigned int i, h; 219 bool complete; 220 221 ASSERT_RHT_MUTEX(ht); 222 223 if (ht->p.max_shift && ht->shift >= ht->p.max_shift) 224 return 0; 225 226 new_tbl = bucket_table_alloc(old_tbl->size * 2, flags); 227 if (new_tbl == NULL) 228 return -ENOMEM; 229 230 ht->shift++; 231 232 /* For each new bucket, search the corresponding old bucket 233 * for the first entry that hashes to the new bucket, and 234 * link the new bucket to that entry. Since all the entries 235 * which will end up in the new bucket appear in the same 236 * old bucket, this constructs an entirely valid new hash 237 * table, but with multiple buckets "zipped" together into a 238 * single imprecise chain. 239 */ 240 for (i = 0; i < new_tbl->size; i++) { 241 h = i & (old_tbl->size - 1); 242 rht_for_each(he, old_tbl->buckets[h], ht) { 243 if (head_hashfn(ht, he, new_tbl->size) == i) { 244 RCU_INIT_POINTER(new_tbl->buckets[i], he); 245 break; 246 } 247 } 248 } 249 250 /* Publish the new table pointer. Lookups may now traverse 251 * the new table, but they will not benefit from any 252 * additional efficiency until later steps unzip the buckets. 253 */ 254 rcu_assign_pointer(ht->tbl, new_tbl); 255 256 /* Unzip interleaved hash chains */ 257 do { 258 /* Wait for readers. All new readers will see the new 259 * table, and thus no references to the old table will 260 * remain. 261 */ 262 synchronize_rcu(); 263 264 /* For each bucket in the old table (each of which 265 * contains items from multiple buckets of the new 266 * table): ... 267 */ 268 complete = true; 269 for (i = 0; i < old_tbl->size; i++) { 270 hashtable_chain_unzip(ht, new_tbl, old_tbl, i); 271 if (old_tbl->buckets[i] != NULL) 272 complete = false; 273 } 274 } while (!complete); 275 276 bucket_table_free(old_tbl); 277 return 0; 278} 279EXPORT_SYMBOL_GPL(rhashtable_expand); 280 281/** 282 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups 283 * @ht: the hash table to shrink 284 * @flags: allocation flags 285 * 286 * This function may only be called in a context where it is safe to call 287 * synchronize_rcu(), e.g. not within a rcu_read_lock() section. 288 * 289 * The caller must ensure that no concurrent table mutations take place. 290 * It is however valid to have concurrent lookups if they are RCU protected. 291 */ 292int rhashtable_shrink(struct rhashtable *ht, gfp_t flags) 293{ 294 struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht); 295 struct rhash_head __rcu **pprev; 296 unsigned int i; 297 298 ASSERT_RHT_MUTEX(ht); 299 300 if (tbl->size <= HASH_MIN_SIZE) 301 return 0; 302 303 ntbl = bucket_table_alloc(tbl->size / 2, flags); 304 if (ntbl == NULL) 305 return -ENOMEM; 306 307 ht->shift--; 308 309 /* Link each bucket in the new table to the first bucket 310 * in the old table that contains entries which will hash 311 * to the new bucket. 312 */ 313 for (i = 0; i < ntbl->size; i++) { 314 ntbl->buckets[i] = tbl->buckets[i]; 315 316 /* Link each bucket in the new table to the first bucket 317 * in the old table that contains entries which will hash 318 * to the new bucket. 319 */ 320 for (pprev = &ntbl->buckets[i]; *pprev != NULL; 321 pprev = &rht_dereference(*pprev, ht)->next) 322 ; 323 RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]); 324 } 325 326 /* Publish the new, valid hash table */ 327 rcu_assign_pointer(ht->tbl, ntbl); 328 329 /* Wait for readers. No new readers will have references to the 330 * old hash table. 331 */ 332 synchronize_rcu(); 333 334 bucket_table_free(tbl); 335 336 return 0; 337} 338EXPORT_SYMBOL_GPL(rhashtable_shrink); 339 340/** 341 * rhashtable_insert - insert object into hash hash table 342 * @ht: hash table 343 * @obj: pointer to hash head inside object 344 * @flags: allocation flags (table expansion) 345 * 346 * Will automatically grow the table via rhashtable_expand() if the the 347 * grow_decision function specified at rhashtable_init() returns true. 348 * 349 * The caller must ensure that no concurrent table mutations occur. It is 350 * however valid to have concurrent lookups if they are RCU protected. 351 */ 352void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, 353 gfp_t flags) 354{ 355 struct bucket_table *tbl = rht_dereference(ht->tbl, ht); 356 u32 hash; 357 358 ASSERT_RHT_MUTEX(ht); 359 360 hash = head_hashfn(ht, obj, tbl->size); 361 RCU_INIT_POINTER(obj->next, tbl->buckets[hash]); 362 rcu_assign_pointer(tbl->buckets[hash], obj); 363 ht->nelems++; 364 365 if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) 366 rhashtable_expand(ht, flags); 367} 368EXPORT_SYMBOL_GPL(rhashtable_insert); 369 370/** 371 * rhashtable_remove_pprev - remove object from hash table given previous element 372 * @ht: hash table 373 * @obj: pointer to hash head inside object 374 * @pprev: pointer to previous element 375 * @flags: allocation flags (table expansion) 376 * 377 * Identical to rhashtable_remove() but caller is alreayd aware of the element 378 * in front of the element to be deleted. This is in particular useful for 379 * deletion when combined with walking or lookup. 380 */ 381void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, 382 struct rhash_head __rcu **pprev, gfp_t flags) 383{ 384 struct bucket_table *tbl = rht_dereference(ht->tbl, ht); 385 386 ASSERT_RHT_MUTEX(ht); 387 388 RCU_INIT_POINTER(*pprev, obj->next); 389 ht->nelems--; 390 391 if (ht->p.shrink_decision && 392 ht->p.shrink_decision(ht, tbl->size)) 393 rhashtable_shrink(ht, flags); 394} 395EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); 396 397/** 398 * rhashtable_remove - remove object from hash table 399 * @ht: hash table 400 * @obj: pointer to hash head inside object 401 * @flags: allocation flags (table expansion) 402 * 403 * Since the hash chain is single linked, the removal operation needs to 404 * walk the bucket chain upon removal. The removal operation is thus 405 * considerable slow if the hash table is not correctly sized. 406 * 407 * Will automatically shrink the table via rhashtable_expand() if the the 408 * shrink_decision function specified at rhashtable_init() returns true. 409 * 410 * The caller must ensure that no concurrent table mutations occur. It is 411 * however valid to have concurrent lookups if they are RCU protected. 412 */ 413bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj, 414 gfp_t flags) 415{ 416 struct bucket_table *tbl = rht_dereference(ht->tbl, ht); 417 struct rhash_head __rcu **pprev; 418 struct rhash_head *he; 419 u32 h; 420 421 ASSERT_RHT_MUTEX(ht); 422 423 h = head_hashfn(ht, obj, tbl->size); 424 425 pprev = &tbl->buckets[h]; 426 rht_for_each(he, tbl->buckets[h], ht) { 427 if (he != obj) { 428 pprev = &he->next; 429 continue; 430 } 431 432 rhashtable_remove_pprev(ht, he, pprev, flags); 433 return true; 434 } 435 436 return false; 437} 438EXPORT_SYMBOL_GPL(rhashtable_remove); 439 440/** 441 * rhashtable_lookup - lookup key in hash table 442 * @ht: hash table 443 * @key: pointer to key 444 * 445 * Computes the hash value for the key and traverses the bucket chain looking 446 * for a entry with an identical key. The first matching entry is returned. 447 * 448 * This lookup function may only be used for fixed key hash table (key_len 449 * paramter set). It will BUG() if used inappropriately. 450 * 451 * Lookups may occur in parallel with hash mutations as long as the lookup is 452 * guarded by rcu_read_lock(). The caller must take care of this. 453 */ 454void *rhashtable_lookup(const struct rhashtable *ht, const void *key) 455{ 456 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); 457 struct rhash_head *he; 458 u32 h; 459 460 BUG_ON(!ht->p.key_len); 461 462 h = __hashfn(ht, key, ht->p.key_len, tbl->size); 463 rht_for_each_rcu(he, tbl->buckets[h], ht) { 464 if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key, 465 ht->p.key_len)) 466 continue; 467 return (void *) he - ht->p.head_offset; 468 } 469 470 return NULL; 471} 472EXPORT_SYMBOL_GPL(rhashtable_lookup); 473 474/** 475 * rhashtable_lookup_compare - search hash table with compare function 476 * @ht: hash table 477 * @hash: hash value of desired entry 478 * @compare: compare function, must return true on match 479 * @arg: argument passed on to compare function 480 * 481 * Traverses the bucket chain behind the provided hash value and calls the 482 * specified compare function for each entry. 483 * 484 * Lookups may occur in parallel with hash mutations as long as the lookup is 485 * guarded by rcu_read_lock(). The caller must take care of this. 486 * 487 * Returns the first entry on which the compare function returned true. 488 */ 489void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, 490 bool (*compare)(void *, void *), void *arg) 491{ 492 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); 493 struct rhash_head *he; 494 495 if (unlikely(hash >= tbl->size)) 496 return NULL; 497 498 rht_for_each_rcu(he, tbl->buckets[hash], ht) { 499 if (!compare(rht_obj(ht, he), arg)) 500 continue; 501 return (void *) he - ht->p.head_offset; 502 } 503 504 return NULL; 505} 506EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); 507 508static size_t rounded_hashtable_size(unsigned int nelem) 509{ 510 return max(roundup_pow_of_two(nelem * 4 / 3), HASH_MIN_SIZE); 511} 512 513/** 514 * rhashtable_init - initialize a new hash table 515 * @ht: hash table to be initialized 516 * @params: configuration parameters 517 * 518 * Initializes a new hash table based on the provided configuration 519 * parameters. A table can be configured either with a variable or 520 * fixed length key: 521 * 522 * Configuration Example 1: Fixed length keys 523 * struct test_obj { 524 * int key; 525 * void * my_member; 526 * struct rhash_head node; 527 * }; 528 * 529 * struct rhashtable_params params = { 530 * .head_offset = offsetof(struct test_obj, node), 531 * .key_offset = offsetof(struct test_obj, key), 532 * .key_len = sizeof(int), 533 * .hashfn = arch_fast_hash, 534 * .mutex_is_held = &my_mutex_is_held, 535 * }; 536 * 537 * Configuration Example 2: Variable length keys 538 * struct test_obj { 539 * [...] 540 * struct rhash_head node; 541 * }; 542 * 543 * u32 my_hash_fn(const void *data, u32 seed) 544 * { 545 * struct test_obj *obj = data; 546 * 547 * return [... hash ...]; 548 * } 549 * 550 * struct rhashtable_params params = { 551 * .head_offset = offsetof(struct test_obj, node), 552 * .hashfn = arch_fast_hash, 553 * .obj_hashfn = my_hash_fn, 554 * .mutex_is_held = &my_mutex_is_held, 555 * }; 556 */ 557int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) 558{ 559 struct bucket_table *tbl; 560 size_t size; 561 562 size = HASH_DEFAULT_SIZE; 563 564 if ((params->key_len && !params->hashfn) || 565 (!params->key_len && !params->obj_hashfn)) 566 return -EINVAL; 567 568 if (params->nelem_hint) 569 size = rounded_hashtable_size(params->nelem_hint); 570 571 tbl = bucket_table_alloc(size, GFP_KERNEL); 572 if (tbl == NULL) 573 return -ENOMEM; 574 575 memset(ht, 0, sizeof(*ht)); 576 ht->shift = ilog2(tbl->size); 577 memcpy(&ht->p, params, sizeof(*params)); 578 RCU_INIT_POINTER(ht->tbl, tbl); 579 580 if (!ht->p.hash_rnd) 581 get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); 582 583 return 0; 584} 585EXPORT_SYMBOL_GPL(rhashtable_init); 586 587/** 588 * rhashtable_destroy - destroy hash table 589 * @ht: the hash table to destroy 590 * 591 * Frees the bucket array. This function is not rcu safe, therefore the caller 592 * has to make sure that no resizing may happen by unpublishing the hashtable 593 * and waiting for the quiescent cycle before releasing the bucket array. 594 */ 595void rhashtable_destroy(const struct rhashtable *ht) 596{ 597 bucket_table_free(ht->tbl); 598} 599EXPORT_SYMBOL_GPL(rhashtable_destroy); 600 601/************************************************************************** 602 * Self Test 603 **************************************************************************/ 604 605#ifdef CONFIG_TEST_RHASHTABLE 606 607#define TEST_HT_SIZE 8 608#define TEST_ENTRIES 2048 609#define TEST_PTR ((void *) 0xdeadbeef) 610#define TEST_NEXPANDS 4 611 612static int test_mutex_is_held(void) 613{ 614 return 1; 615} 616 617struct test_obj { 618 void *ptr; 619 int value; 620 struct rhash_head node; 621}; 622 623static int __init test_rht_lookup(struct rhashtable *ht) 624{ 625 unsigned int i; 626 627 for (i = 0; i < TEST_ENTRIES * 2; i++) { 628 struct test_obj *obj; 629 bool expected = !(i % 2); 630 u32 key = i; 631 632 obj = rhashtable_lookup(ht, &key); 633 634 if (expected && !obj) { 635 pr_warn("Test failed: Could not find key %u\n", key); 636 return -ENOENT; 637 } else if (!expected && obj) { 638 pr_warn("Test failed: Unexpected entry found for key %u\n", 639 key); 640 return -EEXIST; 641 } else if (expected && obj) { 642 if (obj->ptr != TEST_PTR || obj->value != i) { 643 pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n", 644 obj->ptr, TEST_PTR, obj->value, i); 645 return -EINVAL; 646 } 647 } 648 } 649 650 return 0; 651} 652 653static void test_bucket_stats(struct rhashtable *ht, 654 struct bucket_table *tbl, 655 bool quiet) 656{ 657 unsigned int cnt, i, total = 0; 658 struct test_obj *obj; 659 660 for (i = 0; i < tbl->size; i++) { 661 cnt = 0; 662 663 if (!quiet) 664 pr_info(" [%#4x/%zu]", i, tbl->size); 665 666 rht_for_each_entry_rcu(obj, tbl->buckets[i], node) { 667 cnt++; 668 total++; 669 if (!quiet) 670 pr_cont(" [%p],", obj); 671 } 672 673 if (!quiet) 674 pr_cont("\n [%#x] first element: %p, chain length: %u\n", 675 i, tbl->buckets[i], cnt); 676 } 677 678 pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n", 679 total, ht->nelems, TEST_ENTRIES); 680} 681 682static int __init test_rhashtable(struct rhashtable *ht) 683{ 684 struct bucket_table *tbl; 685 struct test_obj *obj, *next; 686 int err; 687 unsigned int i; 688 689 /* 690 * Insertion Test: 691 * Insert TEST_ENTRIES into table with all keys even numbers 692 */ 693 pr_info(" Adding %d keys\n", TEST_ENTRIES); 694 for (i = 0; i < TEST_ENTRIES; i++) { 695 struct test_obj *obj; 696 697 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 698 if (!obj) { 699 err = -ENOMEM; 700 goto error; 701 } 702 703 obj->ptr = TEST_PTR; 704 obj->value = i * 2; 705 706 rhashtable_insert(ht, &obj->node, GFP_KERNEL); 707 } 708 709 rcu_read_lock(); 710 tbl = rht_dereference_rcu(ht->tbl, ht); 711 test_bucket_stats(ht, tbl, true); 712 test_rht_lookup(ht); 713 rcu_read_unlock(); 714 715 for (i = 0; i < TEST_NEXPANDS; i++) { 716 pr_info(" Table expansion iteration %u...\n", i); 717 rhashtable_expand(ht, GFP_KERNEL); 718 719 rcu_read_lock(); 720 pr_info(" Verifying lookups...\n"); 721 test_rht_lookup(ht); 722 rcu_read_unlock(); 723 } 724 725 for (i = 0; i < TEST_NEXPANDS; i++) { 726 pr_info(" Table shrinkage iteration %u...\n", i); 727 rhashtable_shrink(ht, GFP_KERNEL); 728 729 rcu_read_lock(); 730 pr_info(" Verifying lookups...\n"); 731 test_rht_lookup(ht); 732 rcu_read_unlock(); 733 } 734 735 pr_info(" Deleting %d keys\n", TEST_ENTRIES); 736 for (i = 0; i < TEST_ENTRIES; i++) { 737 u32 key = i * 2; 738 739 obj = rhashtable_lookup(ht, &key); 740 BUG_ON(!obj); 741 742 rhashtable_remove(ht, &obj->node, GFP_KERNEL); 743 kfree(obj); 744 } 745 746 return 0; 747 748error: 749 tbl = rht_dereference_rcu(ht->tbl, ht); 750 for (i = 0; i < tbl->size; i++) 751 rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node) 752 kfree(obj); 753 754 return err; 755} 756 757static int __init test_rht_init(void) 758{ 759 struct rhashtable ht; 760 struct rhashtable_params params = { 761 .nelem_hint = TEST_HT_SIZE, 762 .head_offset = offsetof(struct test_obj, node), 763 .key_offset = offsetof(struct test_obj, value), 764 .key_len = sizeof(int), 765 .hashfn = arch_fast_hash, 766 .mutex_is_held = &test_mutex_is_held, 767 .grow_decision = rht_grow_above_75, 768 .shrink_decision = rht_shrink_below_30, 769 }; 770 int err; 771 772 pr_info("Running resizable hashtable tests...\n"); 773 774 err = rhashtable_init(&ht, &params); 775 if (err < 0) { 776 pr_warn("Test failed: Unable to initialize hashtable: %d\n", 777 err); 778 return err; 779 } 780 781 err = test_rhashtable(&ht); 782 783 rhashtable_destroy(&ht); 784 785 return err; 786} 787 788subsys_initcall(test_rht_init); 789 790#endif /* CONFIG_TEST_RHASHTABLE */