inetpeer: reduce stack usage

On 64bit arches, we use 752 bytes of stack when cleanup_once() is called
from inet_getpeer().

Lets share the avl stack to save ~376 bytes.

Before patch :

# objdump -d net/ipv4/inetpeer.o | scripts/checkstack.pl

0x000006c3 unlink_from_pool [inetpeer.o]: 376
0x00000721 unlink_from_pool [inetpeer.o]: 376
0x00000cb1 inet_getpeer [inetpeer.o]: 376
0x00000e6d inet_getpeer [inetpeer.o]: 376
0x0004 inet_initpeers [inetpeer.o]: 112
# size net/ipv4/inetpeer.o
text data bss dec hex filename
5320 432 21 5773 168d net/ipv4/inetpeer.o

After patch :

objdump -d net/ipv4/inetpeer.o | scripts/checkstack.pl
0x00000c11 inet_getpeer [inetpeer.o]: 376
0x00000dcd inet_getpeer [inetpeer.o]: 376
0x00000ab9 peer_check_expire [inetpeer.o]: 328
0x00000b7f peer_check_expire [inetpeer.o]: 328
0x0004 inet_initpeers [inetpeer.o]: 112
# size net/ipv4/inetpeer.o
text data bss dec hex filename
5163 432 21 5616 15f0 net/ipv4/inetpeer.o

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Scot Doyle <lkml@scotdoyle.com>
Cc: Stephen Hemminger <shemminger@vyatta.com>
Cc: Hiroaki SHIMODA <shimoda.hiroaki@gmail.com>
Reviewed-by: Hiroaki SHIMODA <shimoda.hiroaki@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by Eric Dumazet and committed by David S. Miller 66944e1c f8e9881c

+7 -6
+7 -6
net/ipv4/inetpeer.c
··· 354 } 355 356 /* May be called with local BH enabled. */ 357 - static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base) 358 { 359 int do_free; 360 ··· 369 * We use refcnt=-1 to alert lockless readers this entry is deleted. 370 */ 371 if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { 372 - struct inet_peer __rcu **stack[PEER_MAXDEPTH]; 373 struct inet_peer __rcu ***stackptr, ***delp; 374 if (lookup(&p->daddr, stack, base) != p) 375 BUG(); ··· 422 } 423 424 /* May be called with local BH enabled. */ 425 - static int cleanup_once(unsigned long ttl) 426 { 427 struct inet_peer *p = NULL; 428 ··· 454 * happen because of entry limits in route cache. */ 455 return -1; 456 457 - unlink_from_pool(p, peer_to_base(p)); 458 return 0; 459 } 460 ··· 524 525 if (base->total >= inet_peer_threshold) 526 /* Remove one less-recently-used entry. */ 527 - cleanup_once(0); 528 529 return p; 530 } ··· 540 { 541 unsigned long now = jiffies; 542 int ttl, total; 543 544 total = compute_total(); 545 if (total >= inet_peer_threshold) ··· 549 ttl = inet_peer_maxttl 550 - (inet_peer_maxttl - inet_peer_minttl) / HZ * 551 total / inet_peer_threshold * HZ; 552 - while (!cleanup_once(ttl)) { 553 if (jiffies != now) 554 break; 555 }
··· 354 } 355 356 /* May be called with local BH enabled. */ 357 + static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base, 358 + struct inet_peer __rcu **stack[PEER_MAXDEPTH]) 359 { 360 int do_free; 361 ··· 368 * We use refcnt=-1 to alert lockless readers this entry is deleted. 369 */ 370 if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { 371 struct inet_peer __rcu ***stackptr, ***delp; 372 if (lookup(&p->daddr, stack, base) != p) 373 BUG(); ··· 422 } 423 424 /* May be called with local BH enabled. */ 425 + static int cleanup_once(unsigned long ttl, struct inet_peer __rcu **stack[PEER_MAXDEPTH]) 426 { 427 struct inet_peer *p = NULL; 428 ··· 454 * happen because of entry limits in route cache. */ 455 return -1; 456 457 + unlink_from_pool(p, peer_to_base(p), stack); 458 return 0; 459 } 460 ··· 524 525 if (base->total >= inet_peer_threshold) 526 /* Remove one less-recently-used entry. */ 527 + cleanup_once(0, stack); 528 529 return p; 530 } ··· 540 { 541 unsigned long now = jiffies; 542 int ttl, total; 543 + struct inet_peer __rcu **stack[PEER_MAXDEPTH]; 544 545 total = compute_total(); 546 if (total >= inet_peer_threshold) ··· 548 ttl = inet_peer_maxttl 549 - (inet_peer_maxttl - inet_peer_minttl) / HZ * 550 total / inet_peer_threshold * HZ; 551 + while (!cleanup_once(ttl, stack)) { 552 if (jiffies != now) 553 break; 554 }