Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sunrpc: Switch to using hash list instead single list

Switch using list_head for cache_head in cache_detail,
it is useful of remove an cache_head entry directly from cache_detail.

v8, using hash list, not head list

Signed-off-by: Kinglong Mee <kinglongmee@gmail.com>
Reviewed-by: NeilBrown <neilb@suse.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>

authored by

Kinglong Mee and committed by
J. Bruce Fields
129e5824 c8c081b7

+33 -31
+2 -2
include/linux/sunrpc/cache.h
··· 46 46 * 47 47 */ 48 48 struct cache_head { 49 - struct cache_head * next; 49 + struct hlist_node cache_list; 50 50 time_t expiry_time; /* After time time, don't use the data */ 51 51 time_t last_refresh; /* If CACHE_PENDING, this is when upcall 52 52 * was sent, else this is when update was received ··· 73 73 struct cache_detail { 74 74 struct module * owner; 75 75 int hash_size; 76 - struct cache_head ** hash_table; 76 + struct hlist_head * hash_table; 77 77 rwlock_t hash_lock; 78 78 79 79 atomic_t inuse; /* active user-space update or lookup */
+31 -29
net/sunrpc/cache.c
··· 44 44 static void cache_init(struct cache_head *h) 45 45 { 46 46 time_t now = seconds_since_boot(); 47 - h->next = NULL; 47 + INIT_HLIST_NODE(&h->cache_list); 48 48 h->flags = 0; 49 49 kref_init(&h->ref); 50 50 h->expiry_time = now + CACHE_NEW_EXPIRY; ··· 54 54 struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, 55 55 struct cache_head *key, int hash) 56 56 { 57 - struct cache_head **head, **hp; 58 - struct cache_head *new = NULL, *freeme = NULL; 57 + struct cache_head *new = NULL, *freeme = NULL, *tmp = NULL; 58 + struct hlist_head *head; 59 59 60 60 head = &detail->hash_table[hash]; 61 61 62 62 read_lock(&detail->hash_lock); 63 63 64 - for (hp=head; *hp != NULL ; hp = &(*hp)->next) { 65 - struct cache_head *tmp = *hp; 64 + hlist_for_each_entry(tmp, head, cache_list) { 66 65 if (detail->match(tmp, key)) { 67 66 if (cache_is_expired(detail, tmp)) 68 67 /* This entry is expired, we will discard it. */ ··· 87 88 write_lock(&detail->hash_lock); 88 89 89 90 /* check if entry appeared while we slept */ 90 - for (hp=head; *hp != NULL ; hp = &(*hp)->next) { 91 - struct cache_head *tmp = *hp; 91 + hlist_for_each_entry(tmp, head, cache_list) { 92 92 if (detail->match(tmp, key)) { 93 93 if (cache_is_expired(detail, tmp)) { 94 - *hp = tmp->next; 95 - tmp->next = NULL; 94 + hlist_del_init(&tmp->cache_list); 96 95 detail->entries --; 97 96 freeme = tmp; 98 97 break; ··· 101 104 return tmp; 102 105 } 103 106 } 104 - new->next = *head; 105 - *head = new; 107 + 108 + hlist_add_head(&new->cache_list, head); 106 109 detail->entries++; 107 110 cache_get(new); 108 111 write_unlock(&detail->hash_lock); ··· 140 143 * If 'old' is not VALID, we update it directly, 141 144 * otherwise we need to replace it 142 145 */ 143 - struct cache_head **head; 144 146 struct cache_head *tmp; 145 147 146 148 if (!test_bit(CACHE_VALID, &old->flags)) { ··· 164 168 } 165 169 cache_init(tmp); 166 170 detail->init(tmp, old); 167 - head = &detail->hash_table[hash]; 168 171 169 172 write_lock(&detail->hash_lock); 170 173 if (test_bit(CACHE_NEGATIVE, &new->flags)) 171 174 set_bit(CACHE_NEGATIVE, &tmp->flags); 172 175 else 173 176 detail->update(tmp, new); 174 - tmp->next = *head; 175 - *head = tmp; 177 + hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]); 176 178 detail->entries++; 177 179 cache_get(tmp); 178 180 cache_fresh_locked(tmp, new->expiry_time); ··· 410 416 /* find a non-empty bucket in the table */ 411 417 while (current_detail && 412 418 current_index < current_detail->hash_size && 413 - current_detail->hash_table[current_index] == NULL) 419 + hlist_empty(&current_detail->hash_table[current_index])) 414 420 current_index++; 415 421 416 422 /* find a cleanable entry in the bucket and clean it, or set to next bucket */ 417 423 418 424 if (current_detail && current_index < current_detail->hash_size) { 419 - struct cache_head *ch, **cp; 425 + struct cache_head *ch = NULL; 420 426 struct cache_detail *d; 427 + struct hlist_head *head; 428 + struct hlist_node *tmp; 421 429 422 430 write_lock(&current_detail->hash_lock); 423 431 424 432 /* Ok, now to clean this strand */ 425 433 426 - cp = & current_detail->hash_table[current_index]; 427 - for (ch = *cp ; ch ; cp = & ch->next, ch = *cp) { 434 + head = &current_detail->hash_table[current_index]; 435 + hlist_for_each_entry_safe(ch, tmp, head, cache_list) { 428 436 if (current_detail->nextcheck > ch->expiry_time) 429 437 current_detail->nextcheck = ch->expiry_time+1; 430 438 if (!cache_is_expired(current_detail, ch)) 431 439 continue; 432 440 433 - *cp = ch->next; 434 - ch->next = NULL; 441 + hlist_del_init(&ch->cache_list); 435 442 current_detail->entries--; 436 443 rv = 1; 437 444 break; ··· 1279 1284 hash = n >> 32; 1280 1285 entry = n & ((1LL<<32) - 1); 1281 1286 1282 - for (ch=cd->hash_table[hash]; ch; ch=ch->next) 1287 + hlist_for_each_entry(ch, &cd->hash_table[hash], cache_list) 1283 1288 if (!entry--) 1284 1289 return ch; 1285 1290 n &= ~((1LL<<32) - 1); ··· 1287 1292 hash++; 1288 1293 n += 1LL<<32; 1289 1294 } while(hash < cd->hash_size && 1290 - cd->hash_table[hash]==NULL); 1295 + hlist_empty(&cd->hash_table[hash])); 1291 1296 if (hash >= cd->hash_size) 1292 1297 return NULL; 1293 1298 *pos = n+1; 1294 - return cd->hash_table[hash]; 1299 + return hlist_entry_safe(cd->hash_table[hash].first, 1300 + struct cache_head, cache_list); 1295 1301 } 1296 1302 EXPORT_SYMBOL_GPL(cache_seq_start); 1297 1303 ··· 1304 1308 1305 1309 if (p == SEQ_START_TOKEN) 1306 1310 hash = 0; 1307 - else if (ch->next == NULL) { 1311 + else if (ch->cache_list.next == NULL) { 1308 1312 hash++; 1309 1313 *pos += 1LL<<32; 1310 1314 } else { 1311 1315 ++*pos; 1312 - return ch->next; 1316 + return hlist_entry_safe(ch->cache_list.next, 1317 + struct cache_head, cache_list); 1313 1318 } 1314 1319 *pos &= ~((1LL<<32) - 1); 1315 1320 while (hash < cd->hash_size && 1316 - cd->hash_table[hash] == NULL) { 1321 + hlist_empty(&cd->hash_table[hash])) { 1317 1322 hash++; 1318 1323 *pos += 1LL<<32; 1319 1324 } 1320 1325 if (hash >= cd->hash_size) 1321 1326 return NULL; 1322 1327 ++*pos; 1323 - return cd->hash_table[hash]; 1328 + return hlist_entry_safe(cd->hash_table[hash].first, 1329 + struct cache_head, cache_list); 1324 1330 } 1325 1331 EXPORT_SYMBOL_GPL(cache_seq_next); 1326 1332 ··· 1664 1666 struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net) 1665 1667 { 1666 1668 struct cache_detail *cd; 1669 + int i; 1667 1670 1668 1671 cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL); 1669 1672 if (cd == NULL) 1670 1673 return ERR_PTR(-ENOMEM); 1671 1674 1672 - cd->hash_table = kzalloc(cd->hash_size * sizeof(struct cache_head *), 1675 + cd->hash_table = kzalloc(cd->hash_size * sizeof(struct hlist_head), 1673 1676 GFP_KERNEL); 1674 1677 if (cd->hash_table == NULL) { 1675 1678 kfree(cd); 1676 1679 return ERR_PTR(-ENOMEM); 1677 1680 } 1681 + 1682 + for (i = 0; i < cd->hash_size; i++) 1683 + INIT_HLIST_HEAD(&cd->hash_table[i]); 1678 1684 cd->net = net; 1679 1685 return cd; 1680 1686 }