Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'rhashtable-cleanups'

Herbert Xu says:

====================
rhashtable hash cleanups

This is a rebase on top of the nested lock annotation fix.

Nothing to see here, just a bunch of simple clean-ups before
I move onto something more substantial (hopefully).
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+16 -27
+16 -27
lib/rhashtable.c
··· 63 63 64 64 static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash) 65 65 { 66 - return hash & (tbl->size - 1); 67 - } 68 - 69 - static u32 obj_raw_hashfn(struct rhashtable *ht, 70 - const struct bucket_table *tbl, const void *ptr) 71 - { 72 - u32 hash; 73 - 74 - if (unlikely(!ht->p.key_len)) 75 - hash = ht->p.obj_hashfn(ptr, tbl->hash_rnd); 76 - else 77 - hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len, 78 - tbl->hash_rnd); 79 - 80 - return hash >> HASH_RESERVED_SPACE; 66 + return (hash >> HASH_RESERVED_SPACE) & (tbl->size - 1); 81 67 } 82 68 83 69 static u32 key_hashfn(struct rhashtable *ht, const struct bucket_table *tbl, 84 - const void *key, u32 len) 70 + const void *key) 85 71 { 86 - return ht->p.hashfn(key, len, tbl->hash_rnd) >> HASH_RESERVED_SPACE; 72 + return rht_bucket_index(tbl, ht->p.hashfn(key, ht->p.key_len, 73 + tbl->hash_rnd)); 87 74 } 88 75 89 76 static u32 head_hashfn(struct rhashtable *ht, 90 77 const struct bucket_table *tbl, 91 78 const struct rhash_head *he) 92 79 { 93 - return rht_bucket_index(tbl, obj_raw_hashfn(ht, tbl, rht_obj(ht, he))); 80 + const char *ptr = rht_obj(ht, he); 81 + 82 + return likely(ht->p.key_len) ? 83 + key_hashfn(ht, tbl, ptr + ht->p.key_offset) : 84 + rht_bucket_index(tbl, ht->p.obj_hashfn(ptr, tbl->hash_rnd)); 94 85 } 95 86 96 87 #ifdef CONFIG_PROVE_LOCKING ··· 393 402 rcu_read_lock(); 394 403 395 404 old_tbl = rht_dereference_rcu(ht->tbl, ht); 396 - hash = obj_raw_hashfn(ht, old_tbl, rht_obj(ht, obj)); 405 + hash = head_hashfn(ht, old_tbl, obj); 397 406 398 407 spin_lock_bh(bucket_lock(old_tbl, hash)); 399 408 ··· 405 414 */ 406 415 tbl = rht_dereference_rcu(ht->future_tbl, ht); 407 416 if (tbl != old_tbl) { 408 - hash = obj_raw_hashfn(ht, tbl, rht_obj(ht, obj)); 417 + hash = head_hashfn(ht, tbl, obj); 409 418 spin_lock_nested(bucket_lock(tbl, hash), RHT_LOCK_NESTED); 410 419 } 411 420 ··· 418 427 419 428 no_resize_running = tbl == old_tbl; 420 429 421 - hash = rht_bucket_index(tbl, hash); 422 430 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); 423 431 424 432 if (rht_is_a_nulls(head)) ··· 433 443 434 444 exit: 435 445 if (tbl != old_tbl) { 436 - hash = obj_raw_hashfn(ht, tbl, rht_obj(ht, obj)); 446 + hash = head_hashfn(ht, tbl, obj); 437 447 spin_unlock(bucket_lock(tbl, hash)); 438 448 } 439 449 440 - hash = obj_raw_hashfn(ht, old_tbl, rht_obj(ht, obj)); 450 + hash = head_hashfn(ht, old_tbl, obj); 441 451 spin_unlock_bh(bucket_lock(old_tbl, hash)); 442 452 443 453 rcu_read_unlock(); ··· 476 486 unsigned hash; 477 487 bool ret = false; 478 488 479 - hash = obj_raw_hashfn(ht, tbl, rht_obj(ht, obj)); 489 + hash = head_hashfn(ht, tbl, obj); 480 490 lock = bucket_lock(tbl, hash); 481 - hash = rht_bucket_index(tbl, hash); 482 491 483 492 spin_lock_bh(lock); 484 493 ··· 609 620 rcu_read_lock(); 610 621 611 622 tbl = rht_dereference_rcu(ht->tbl, ht); 612 - hash = key_hashfn(ht, tbl, key, ht->p.key_len); 623 + hash = key_hashfn(ht, tbl, key); 613 624 restart: 614 - rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) { 625 + rht_for_each_rcu(he, tbl, hash) { 615 626 if (!compare(rht_obj(ht, he), arg)) 616 627 continue; 617 628 rcu_read_unlock();