Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rhashtable: Drop gfp_flags arg in insert/remove functions

Reallocation is only required for shrinking and expanding and both rely
on a mutex for synchronization and callers of rhashtable_init() are in
non atomic context. Therefore, no reason to continue passing allocation
hints through the API.

Instead, use GFP_KERNEL and add __GFP_NOWARN | __GFP_NORETRY to allow
for silent fall back to vzalloc() without the OOM killer jumping in as
pointed out by Eric Dumazet and Eric W. Biederman.

Signed-off-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Thomas Graf and committed by
David S. Miller
6eba8224 64bb7e99

+26 -33
+5 -5
include/linux/rhashtable.h
··· 99 99 u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len); 100 100 u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr); 101 101 102 - void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node, gfp_t); 103 - bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node, gfp_t); 102 + void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); 103 + bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); 104 104 void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, 105 - struct rhash_head __rcu **pprev, gfp_t flags); 105 + struct rhash_head __rcu **pprev); 106 106 107 107 bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size); 108 108 bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); 109 109 110 - int rhashtable_expand(struct rhashtable *ht, gfp_t flags); 111 - int rhashtable_shrink(struct rhashtable *ht, gfp_t flags); 110 + int rhashtable_expand(struct rhashtable *ht); 111 + int rhashtable_shrink(struct rhashtable *ht); 112 112 113 113 void *rhashtable_lookup(const struct rhashtable *ht, const void *key); 114 114 void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
+17 -24
lib/rhashtable.c
··· 107 107 return obj_hashfn(ht, rht_obj(ht, he), hsize); 108 108 } 109 109 110 - static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags) 110 + static struct bucket_table *bucket_table_alloc(size_t nbuckets) 111 111 { 112 112 struct bucket_table *tbl; 113 113 size_t size; 114 114 115 115 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); 116 - tbl = kzalloc(size, flags); 116 + tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 117 117 if (tbl == NULL) 118 118 tbl = vzalloc(size); 119 119 ··· 200 200 /** 201 201 * rhashtable_expand - Expand hash table while allowing concurrent lookups 202 202 * @ht: the hash table to expand 203 - * @flags: allocation flags 204 203 * 205 204 * A secondary bucket array is allocated and the hash entries are migrated 206 205 * while keeping them on both lists until the end of the RCU grace period. ··· 210 211 * The caller must ensure that no concurrent table mutations take place. 211 212 * It is however valid to have concurrent lookups if they are RCU protected. 212 213 */ 213 - int rhashtable_expand(struct rhashtable *ht, gfp_t flags) 214 + int rhashtable_expand(struct rhashtable *ht) 214 215 { 215 216 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); 216 217 struct rhash_head *he; ··· 222 223 if (ht->p.max_shift && ht->shift >= ht->p.max_shift) 223 224 return 0; 224 225 225 - new_tbl = bucket_table_alloc(old_tbl->size * 2, flags); 226 + new_tbl = bucket_table_alloc(old_tbl->size * 2); 226 227 if (new_tbl == NULL) 227 228 return -ENOMEM; 228 229 ··· 280 281 /** 281 282 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups 282 283 * @ht: the hash table to shrink 283 - * @flags: allocation flags 284 284 * 285 285 * This function may only be called in a context where it is safe to call 286 286 * synchronize_rcu(), e.g. not within a rcu_read_lock() section. ··· 287 289 * The caller must ensure that no concurrent table mutations take place. 288 290 * It is however valid to have concurrent lookups if they are RCU protected. 289 291 */ 290 - int rhashtable_shrink(struct rhashtable *ht, gfp_t flags) 292 + int rhashtable_shrink(struct rhashtable *ht) 291 293 { 292 294 struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht); 293 295 struct rhash_head __rcu **pprev; ··· 298 300 if (ht->shift <= ht->p.min_shift) 299 301 return 0; 300 302 301 - ntbl = bucket_table_alloc(tbl->size / 2, flags); 303 + ntbl = bucket_table_alloc(tbl->size / 2); 302 304 if (ntbl == NULL) 303 305 return -ENOMEM; 304 306 ··· 339 341 * rhashtable_insert - insert object into hash hash table 340 342 * @ht: hash table 341 343 * @obj: pointer to hash head inside object 342 - * @flags: allocation flags (table expansion) 343 344 * 344 345 * Will automatically grow the table via rhashtable_expand() if the the 345 346 * grow_decision function specified at rhashtable_init() returns true. ··· 346 349 * The caller must ensure that no concurrent table mutations occur. It is 347 350 * however valid to have concurrent lookups if they are RCU protected. 348 351 */ 349 - void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, 350 - gfp_t flags) 352 + void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) 351 353 { 352 354 struct bucket_table *tbl = rht_dereference(ht->tbl, ht); 353 355 u32 hash; ··· 359 363 ht->nelems++; 360 364 361 365 if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) 362 - rhashtable_expand(ht, flags); 366 + rhashtable_expand(ht); 363 367 } 364 368 EXPORT_SYMBOL_GPL(rhashtable_insert); 365 369 ··· 368 372 * @ht: hash table 369 373 * @obj: pointer to hash head inside object 370 374 * @pprev: pointer to previous element 371 - * @flags: allocation flags (table expansion) 372 375 * 373 376 * Identical to rhashtable_remove() but caller is alreayd aware of the element 374 377 * in front of the element to be deleted. This is in particular useful for 375 378 * deletion when combined with walking or lookup. 376 379 */ 377 380 void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, 378 - struct rhash_head __rcu **pprev, gfp_t flags) 381 + struct rhash_head __rcu **pprev) 379 382 { 380 383 struct bucket_table *tbl = rht_dereference(ht->tbl, ht); 381 384 ··· 385 390 386 391 if (ht->p.shrink_decision && 387 392 ht->p.shrink_decision(ht, tbl->size)) 388 - rhashtable_shrink(ht, flags); 393 + rhashtable_shrink(ht); 389 394 } 390 395 EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); 391 396 ··· 393 398 * rhashtable_remove - remove object from hash table 394 399 * @ht: hash table 395 400 * @obj: pointer to hash head inside object 396 - * @flags: allocation flags (table expansion) 397 401 * 398 402 * Since the hash chain is single linked, the removal operation needs to 399 403 * walk the bucket chain upon removal. The removal operation is thus ··· 404 410 * The caller must ensure that no concurrent table mutations occur. It is 405 411 * however valid to have concurrent lookups if they are RCU protected. 406 412 */ 407 - bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj, 408 - gfp_t flags) 413 + bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) 409 414 { 410 415 struct bucket_table *tbl = rht_dereference(ht->tbl, ht); 411 416 struct rhash_head __rcu **pprev; ··· 422 429 continue; 423 430 } 424 431 425 - rhashtable_remove_pprev(ht, he, pprev, flags); 432 + rhashtable_remove_pprev(ht, he, pprev); 426 433 return true; 427 434 } 428 435 ··· 569 576 if (params->nelem_hint) 570 577 size = rounded_hashtable_size(params); 571 578 572 - tbl = bucket_table_alloc(size, GFP_KERNEL); 579 + tbl = bucket_table_alloc(size); 573 580 if (tbl == NULL) 574 581 return -ENOMEM; 575 582 ··· 706 713 obj->ptr = TEST_PTR; 707 714 obj->value = i * 2; 708 715 709 - rhashtable_insert(ht, &obj->node, GFP_KERNEL); 716 + rhashtable_insert(ht, &obj->node); 710 717 } 711 718 712 719 rcu_read_lock(); ··· 717 724 718 725 for (i = 0; i < TEST_NEXPANDS; i++) { 719 726 pr_info(" Table expansion iteration %u...\n", i); 720 - rhashtable_expand(ht, GFP_KERNEL); 727 + rhashtable_expand(ht); 721 728 722 729 rcu_read_lock(); 723 730 pr_info(" Verifying lookups...\n"); ··· 727 734 728 735 for (i = 0; i < TEST_NEXPANDS; i++) { 729 736 pr_info(" Table shrinkage iteration %u...\n", i); 730 - rhashtable_shrink(ht, GFP_KERNEL); 737 + rhashtable_shrink(ht); 731 738 732 739 rcu_read_lock(); 733 740 pr_info(" Verifying lookups...\n"); ··· 742 749 obj = rhashtable_lookup(ht, &key); 743 750 BUG_ON(!obj); 744 751 745 - rhashtable_remove(ht, &obj->node, GFP_KERNEL); 752 + rhashtable_remove(ht, &obj->node); 746 753 kfree(obj); 747 754 } 748 755
+2 -2
net/netfilter/nft_hash.c
··· 65 65 if (set->flags & NFT_SET_MAP) 66 66 nft_data_copy(he->data, &elem->data); 67 67 68 - rhashtable_insert(priv, &he->node, GFP_KERNEL); 68 + rhashtable_insert(priv, &he->node); 69 69 70 70 return 0; 71 71 } ··· 88 88 pprev = elem->cookie; 89 89 he = rht_dereference((*pprev), priv); 90 90 91 - rhashtable_remove_pprev(priv, he, pprev, GFP_KERNEL); 91 + rhashtable_remove_pprev(priv, he, pprev); 92 92 93 93 synchronize_rcu(); 94 94 kfree(he);
+2 -2
net/netlink/af_netlink.c
··· 1092 1092 1093 1093 nlk_sk(sk)->portid = portid; 1094 1094 sock_hold(sk); 1095 - rhashtable_insert(&table->hash, &nlk_sk(sk)->node, GFP_KERNEL); 1095 + rhashtable_insert(&table->hash, &nlk_sk(sk)->node); 1096 1096 err = 0; 1097 1097 err: 1098 1098 mutex_unlock(&nl_sk_hash_lock); ··· 1105 1105 1106 1106 mutex_lock(&nl_sk_hash_lock); 1107 1107 table = &nl_table[sk->sk_protocol]; 1108 - if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node, GFP_KERNEL)) { 1108 + if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node)) { 1109 1109 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); 1110 1110 __sock_put(sk); 1111 1111 }