Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: indirect call helpers for ipv4/ipv6 dst_check functions

This patch avoids the indirect call for the common case:
ip6_dst_check and ipv4_dst_check

Signed-off-by: Brian Vazquez <brianvv@google.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Brian Vazquez and committed by
Jakub Kicinski
bbd807df f67fbeae

+34 -9
+6 -1
include/net/dst.h
··· 459 459 ip6_input, ip_local_deliver, skb); 460 460 } 461 461 462 + INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *, 463 + u32)); 464 + INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *, 465 + u32)); 462 466 static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie) 463 467 { 464 468 if (dst->obsolete) 465 - dst = dst->ops->check(dst, cookie); 469 + dst = INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, 470 + ipv4_dst_check, dst, cookie); 466 471 return dst; 467 472 } 468 473
+10 -2
net/core/sock.c
··· 526 526 } 527 527 EXPORT_SYMBOL(__sk_receive_skb); 528 528 529 + INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *, 530 + u32)); 531 + INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *, 532 + u32)); 529 533 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 530 534 { 531 535 struct dst_entry *dst = __sk_dst_get(sk); 532 536 533 - if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 537 + if (dst && dst->obsolete && 538 + INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check, 539 + dst, cookie) == NULL) { 534 540 sk_tx_queue_clear(sk); 535 541 sk->sk_dst_pending_confirm = 0; 536 542 RCU_INIT_POINTER(sk->sk_dst_cache, NULL); ··· 552 546 { 553 547 struct dst_entry *dst = sk_dst_get(sk); 554 548 555 - if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 549 + if (dst && dst->obsolete && 550 + INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check, 551 + dst, cookie) == NULL) { 556 552 sk_dst_reset(sk); 557 553 dst_release(dst); 558 554 return NULL;
+5 -2
net/ipv4/route.c
··· 133 133 * Interface to generic destination cache. 134 134 */ 135 135 136 - static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie); 136 + INDIRECT_CALLABLE_SCOPE 137 + struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie); 137 138 static unsigned int ipv4_default_advmss(const struct dst_entry *dst); 138 139 INDIRECT_CALLABLE_SCOPE 139 140 unsigned int ipv4_mtu(const struct dst_entry *dst); ··· 1189 1188 } 1190 1189 EXPORT_SYMBOL_GPL(ipv4_sk_redirect); 1191 1190 1192 - static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) 1191 + INDIRECT_CALLABLE_SCOPE struct dst_entry *ipv4_dst_check(struct dst_entry *dst, 1192 + u32 cookie) 1193 1193 { 1194 1194 struct rtable *rt = (struct rtable *) dst; 1195 1195 ··· 1206 1204 return NULL; 1207 1205 return dst; 1208 1206 } 1207 + EXPORT_SYMBOL(ipv4_dst_check); 1209 1208 1210 1209 static void ipv4_send_dest_unreach(struct sk_buff *skb) 1211 1210 {
+4 -1
net/ipv4/tcp_ipv4.c
··· 1649 1649 return mss; 1650 1650 } 1651 1651 1652 + INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *, 1653 + u32)); 1652 1654 /* The socket must have it's spinlock held when we get 1653 1655 * here, unless it is a TCP_LISTEN socket. 1654 1656 * ··· 1670 1668 sk_mark_napi_id(sk, skb); 1671 1669 if (dst) { 1672 1670 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || 1673 - !dst->ops->check(dst, 0)) { 1671 + !INDIRECT_CALL_1(dst->ops->check, ipv4_dst_check, 1672 + dst, 0)) { 1674 1673 dst_release(dst); 1675 1674 sk->sk_rx_dst = NULL; 1676 1675 }
+5 -2
net/ipv6/route.c
··· 81 81 RT6_NUD_SUCCEED = 1 82 82 }; 83 83 84 - static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); 84 + INDIRECT_CALLABLE_SCOPE 85 + struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); 85 86 static unsigned int ip6_default_advmss(const struct dst_entry *dst); 86 87 INDIRECT_CALLABLE_SCOPE 87 88 unsigned int ip6_mtu(const struct dst_entry *dst); ··· 2613 2612 return NULL; 2614 2613 } 2615 2614 2616 - static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) 2615 + INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst, 2616 + u32 cookie) 2617 2617 { 2618 2618 struct dst_entry *dst_ret; 2619 2619 struct fib6_info *from; ··· 2644 2642 2645 2643 return dst_ret; 2646 2644 } 2645 + EXPORT_SYMBOL(ip6_dst_check); 2647 2646 2648 2647 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst) 2649 2648 {
+4 -1
net/ipv6/tcp_ipv6.c
··· 1420 1420 return NULL; 1421 1421 } 1422 1422 1423 + INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *, 1424 + u32)); 1423 1425 /* The socket must have it's spinlock held when we get 1424 1426 * here, unless it is a TCP_LISTEN socket. 1425 1427 * ··· 1475 1473 sk_mark_napi_id(sk, skb); 1476 1474 if (dst) { 1477 1475 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || 1478 - dst->ops->check(dst, np->rx_dst_cookie) == NULL) { 1476 + INDIRECT_CALL_1(dst->ops->check, ip6_dst_check, 1477 + dst, np->rx_dst_cookie) == NULL) { 1479 1478 dst_release(dst); 1480 1479 sk->sk_rx_dst = NULL; 1481 1480 }