Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: introduce struct net_hotdata

Instead of spreading networking critical fields
all over the places, add a custom net_hotdata
structure so that we can precisely control its layout.

In this first patch, move :

- gro_normal_batch used in rx (GRO stack)
- offload_base used in rx and tx (GRO and TSO stacks)

Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Reviewed-by: David Ahern <dsahern@kernel.org>
Link: https://lore.kernel.org/r/20240306160031.874438-2-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Eric Dumazet and committed by
Jakub Kicinski
2658b5a8 d3423ed9

+37 -16
-1
include/linux/netdevice.h
··· 4796 4796 extern int netdev_max_backlog; 4797 4797 extern int dev_rx_weight; 4798 4798 extern int dev_tx_weight; 4799 - extern int gro_normal_batch; 4800 4799 4801 4800 enum { 4802 4801 NESTED_SYNC_IMM_BIT,
+2 -3
include/net/gro.h
··· 9 9 #include <net/ip6_checksum.h> 10 10 #include <linux/skbuff.h> 11 11 #include <net/udp.h> 12 + #include <net/hotdata.h> 12 13 13 14 struct napi_gro_cb { 14 15 union { ··· 447 446 { 448 447 list_add_tail(&skb->list, &napi->rx_list); 449 448 napi->rx_count += segs; 450 - if (napi->rx_count >= READ_ONCE(gro_normal_batch)) 449 + if (napi->rx_count >= READ_ONCE(net_hotdata.gro_normal_batch)) 451 450 gro_normal_list(napi); 452 451 } 453 452 ··· 493 492 } 494 493 #endif 495 494 } 496 - 497 - extern struct list_head offload_base; 498 495 499 496 #endif /* _NET_IPV6_GRO_H */
+15
include/net/hotdata.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + #ifndef _NET_HOTDATA_H 3 + #define _NET_HOTDATA_H 4 + 5 + #include <linux/types.h> 6 + 7 + /* Read mostly data used in network fast paths. */ 8 + struct net_hotdata { 9 + struct list_head offload_base; 10 + int gro_normal_batch; 11 + }; 12 + 13 + extern struct net_hotdata net_hotdata; 14 + 15 + #endif /* _NET_HOTDATA_H */
+1
net/core/Makefile
··· 18 18 obj-$(CONFIG_NETDEV_ADDR_LIST_TEST) += dev_addr_lists_test.o 19 19 20 20 obj-y += net-sysfs.o 21 + obj-y += hotdata.o 21 22 obj-$(CONFIG_PAGE_POOL) += page_pool.o page_pool_user.o 22 23 obj-$(CONFIG_PROC_FS) += net-procfs.o 23 24 obj-$(CONFIG_NET_PKTGEN) += pktgen.o
+6 -9
net/core/gro.c
··· 10 10 #define GRO_MAX_HEAD (MAX_HEADER + 128) 11 11 12 12 static DEFINE_SPINLOCK(offload_lock); 13 - struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base); 14 - /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */ 15 - int gro_normal_batch __read_mostly = 8; 16 13 17 14 /** 18 15 * dev_add_offload - register offload handlers ··· 28 31 struct packet_offload *elem; 29 32 30 33 spin_lock(&offload_lock); 31 - list_for_each_entry(elem, &offload_base, list) { 34 + list_for_each_entry(elem, &net_hotdata.offload_base, list) { 32 35 if (po->priority < elem->priority) 33 36 break; 34 37 } ··· 52 55 */ 53 56 static void __dev_remove_offload(struct packet_offload *po) 54 57 { 55 - struct list_head *head = &offload_base; 58 + struct list_head *head = &net_hotdata.offload_base; 56 59 struct packet_offload *po1; 57 60 58 61 spin_lock(&offload_lock); ··· 232 235 233 236 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) 234 237 { 238 + struct list_head *head = &net_hotdata.offload_base; 235 239 struct packet_offload *ptype; 236 240 __be16 type = skb->protocol; 237 - struct list_head *head = &offload_base; 238 241 int err = -ENOENT; 239 242 240 243 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); ··· 441 444 { 442 445 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); 443 446 struct gro_list *gro_list = &napi->gro_hash[bucket]; 444 - struct list_head *head = &offload_base; 447 + struct list_head *head = &net_hotdata.offload_base; 445 448 struct packet_offload *ptype; 446 449 __be16 type = skb->protocol; 447 450 struct sk_buff *pp = NULL; ··· 547 550 548 551 struct packet_offload *gro_find_receive_by_type(__be16 type) 549 552 { 550 - struct list_head *offload_head = &offload_base; 553 + struct list_head *offload_head = &net_hotdata.offload_base; 551 554 struct packet_offload *ptype; 552 555 553 556 list_for_each_entry_rcu(ptype, offload_head, list) { ··· 561 564 562 565 struct packet_offload *gro_find_complete_by_type(__be16 type) 563 566 { 564 - struct list_head *offload_head = &offload_base; 567 + struct list_head *offload_head = &net_hotdata.offload_base; 565 568 struct packet_offload *ptype; 566 569 567 570 list_for_each_entry_rcu(ptype, offload_head, list) {
+2 -2
net/core/gso.c
··· 17 17 struct packet_offload *ptype; 18 18 19 19 rcu_read_lock(); 20 - list_for_each_entry_rcu(ptype, &offload_base, list) { 20 + list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) { 21 21 if (ptype->type == type && ptype->callbacks.gso_segment) { 22 22 segs = ptype->callbacks.gso_segment(skb, features); 23 23 break; ··· 48 48 __skb_pull(skb, vlan_depth); 49 49 50 50 rcu_read_lock(); 51 - list_for_each_entry_rcu(ptype, &offload_base, list) { 51 + list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) { 52 52 if (ptype->type == type && ptype->callbacks.gso_segment) { 53 53 segs = ptype->callbacks.gso_segment(skb, features); 54 54 break;
+9
net/core/hotdata.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + #include <net/hotdata.h> 3 + #include <linux/cache.h> 4 + #include <linux/list.h> 5 + 6 + struct net_hotdata net_hotdata __cacheline_aligned = { 7 + .offload_base = LIST_HEAD_INIT(net_hotdata.offload_base), 8 + .gro_normal_batch = 8, 9 + };
+2 -1
net/core/sysctl_net_core.c
··· 23 23 #include <net/net_ratelimit.h> 24 24 #include <net/busy_poll.h> 25 25 #include <net/pkt_sched.h> 26 + #include <net/hotdata.h> 26 27 27 28 #include "dev.h" 28 29 ··· 633 632 }, 634 633 { 635 634 .procname = "gro_normal_batch", 636 - .data = &gro_normal_batch, 635 + .data = &net_hotdata.gro_normal_batch, 637 636 .maxlen = sizeof(unsigned int), 638 637 .mode = 0644, 639 638 .proc_handler = proc_dointvec_minmax,