Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: move netdev_max_backlog to net_hotdata

netdev_max_backlog is used in rx fat path.

Move it to net_hodata for better cache locality.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Reviewed-by: David Ahern <dsahern@kernel.org>
Link: https://lore.kernel.org/r/20240306160031.874438-6-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Eric Dumazet and committed by
Jakub Kicinski
edbc666c 0b91fa4b

+14 -10
-1
include/linux/netdevice.h
··· 4793 4793 const struct pcpu_sw_netstats __percpu *netstats); 4794 4794 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s); 4795 4795 4796 - extern int netdev_max_backlog; 4797 4796 extern int dev_rx_weight; 4798 4797 extern int dev_tx_weight; 4799 4798
+1
include/net/hotdata.h
··· 12 12 int netdev_budget; 13 13 int netdev_budget_usecs; 14 14 int tstamp_prequeue; 15 + int max_backlog; 15 16 }; 16 17 17 18 extern struct net_hotdata net_hotdata;
+3 -5
net/core/dev.c
··· 4404 4404 * Receiver routines 4405 4405 *************************************************************************/ 4406 4406 4407 - int netdev_max_backlog __read_mostly = 1000; 4408 - EXPORT_SYMBOL(netdev_max_backlog); 4409 - 4410 4407 unsigned int sysctl_skb_defer_max __read_mostly = 64; 4411 4408 int weight_p __read_mostly = 64; /* old backlog weight */ 4412 4409 int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ ··· 4710 4713 struct softnet_data *sd; 4711 4714 unsigned int old_flow, new_flow; 4712 4715 4713 - if (qlen < (READ_ONCE(netdev_max_backlog) >> 1)) 4716 + if (qlen < (READ_ONCE(net_hotdata.max_backlog) >> 1)) 4714 4717 return false; 4715 4718 4716 4719 sd = this_cpu_ptr(&softnet_data); ··· 4758 4761 if (!netif_running(skb->dev)) 4759 4762 goto drop; 4760 4763 qlen = skb_queue_len(&sd->input_pkt_queue); 4761 - if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) { 4764 + if (qlen <= READ_ONCE(net_hotdata.max_backlog) && 4765 + !skb_flow_limit(skb, qlen)) { 4762 4766 if (qlen) { 4763 4767 enqueue: 4764 4768 __skb_queue_tail(&sd->input_pkt_queue, skb);
+2 -1
net/core/gro_cells.c
··· 3 3 #include <linux/slab.h> 4 4 #include <linux/netdevice.h> 5 5 #include <net/gro_cells.h> 6 + #include <net/hotdata.h> 6 7 7 8 struct gro_cell { 8 9 struct sk_buff_head napi_skbs; ··· 27 26 28 27 cell = this_cpu_ptr(gcells->cells); 29 28 30 - if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(netdev_max_backlog)) { 29 + if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(net_hotdata.max_backlog)) { 31 30 drop: 32 31 dev_core_stats_rx_dropped_inc(dev); 33 32 kfree_skb(skb);
+2
net/core/hotdata.c
··· 15 15 .netdev_budget_usecs = 2 * USEC_PER_SEC / HZ, 16 16 17 17 .tstamp_prequeue = 1, 18 + .max_backlog = 1000, 18 19 }; 20 + EXPORT_SYMBOL(net_hotdata);
+1 -1
net/core/sysctl_net_core.c
··· 440 440 }, 441 441 { 442 442 .procname = "netdev_max_backlog", 443 - .data = &netdev_max_backlog, 443 + .data = &net_hotdata.max_backlog, 444 444 .maxlen = sizeof(int), 445 445 .mode = 0644, 446 446 .proc_handler = proc_dointvec
+3 -1
net/xfrm/espintcp.c
··· 10 10 #if IS_ENABLED(CONFIG_IPV6) 11 11 #include <net/ipv6_stubs.h> 12 12 #endif 13 + #include <net/hotdata.h> 13 14 14 15 static void handle_nonesp(struct espintcp_ctx *ctx, struct sk_buff *skb, 15 16 struct sock *sk) ··· 170 169 { 171 170 struct espintcp_ctx *ctx = espintcp_getctx(sk); 172 171 173 - if (skb_queue_len(&ctx->out_queue) >= READ_ONCE(netdev_max_backlog)) 172 + if (skb_queue_len(&ctx->out_queue) >= 173 + READ_ONCE(net_hotdata.max_backlog)) 174 174 return -ENOBUFS; 175 175 176 176 __skb_queue_tail(&ctx->out_queue, skb);
+2 -1
net/xfrm/xfrm_input.c
··· 21 21 #include <net/ip_tunnels.h> 22 22 #include <net/ip6_tunnel.h> 23 23 #include <net/dst_metadata.h> 24 + #include <net/hotdata.h> 24 25 25 26 #include "xfrm_inout.h" 26 27 ··· 765 764 766 765 trans = this_cpu_ptr(&xfrm_trans_tasklet); 767 766 768 - if (skb_queue_len(&trans->queue) >= READ_ONCE(netdev_max_backlog)) 767 + if (skb_queue_len(&trans->queue) >= READ_ONCE(net_hotdata.max_backlog)) 769 768 return -ENOBUFS; 770 769 771 770 BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb));