Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: softnet_data: Make xmit per task.

Softirq is preemptible on PREEMPT_RT. Without a per-CPU lock in
local_bh_disable() there is no guarantee that only one device is
transmitting at a time.
With preemption and multiple senders it is possible that the per-CPU
`recursion' counter gets incremented by different threads and exceeds
XMIT_RECURSION_LIMIT leading to a false positive recursion alert.
The `more' member is subject to similar problems if set by one thread
for one driver and wrongly used by another driver within another thread.

Instead of adding a lock to protect the per-CPU variable it is simpler
to make xmit per-task. Sending and receiving skbs happens always
in thread context anyway.

Having a lock to protected the per-CPU counter would block/ serialize two
sending threads needlessly. It would also require a recursive lock to
ensure that the owner can increment the counter further.

Make the softnet_data.xmit a task_struct member on PREEMPT_RT. Add
needed wrapper.

Cc: Ben Segall <bsegall@google.com>
Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://patch.msgid.link/20240620132727.660738-9-bigeasy@linutronix.de
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Sebastian Andrzej Siewior and committed by
Jakub Kicinski
ecefbc09 c67ef53a

+80 -12
+31 -11
include/linux/netdevice.h
··· 43 43 44 44 #include <linux/netdev_features.h> 45 45 #include <linux/neighbour.h> 46 + #include <linux/netdevice_xmit.h> 46 47 #include <uapi/linux/netdevice.h> 47 48 #include <uapi/linux/if_bonding.h> 48 49 #include <uapi/linux/pkt_cls.h> ··· 3224 3223 struct sk_buff_head xfrm_backlog; 3225 3224 #endif 3226 3225 /* written and read only by owning cpu: */ 3227 - struct { 3228 - u16 recursion; 3229 - u8 more; 3230 - #ifdef CONFIG_NET_EGRESS 3231 - u8 skip_txqueue; 3232 - #endif 3233 - } xmit; 3226 + struct netdev_xmit xmit; 3234 3227 #ifdef CONFIG_RPS 3235 3228 /* input_queue_head should be written by cpu owning this struct, 3236 3229 * and only read by other cpus. Worth using a cache line. ··· 3252 3257 3253 3258 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 3254 3259 3260 + #ifndef CONFIG_PREEMPT_RT 3255 3261 static inline int dev_recursion_level(void) 3256 3262 { 3257 3263 return this_cpu_read(softnet_data.xmit.recursion); 3258 3264 } 3265 + #else 3266 + static inline int dev_recursion_level(void) 3267 + { 3268 + return current->net_xmit.recursion; 3269 + } 3270 + 3271 + #endif 3259 3272 3260 3273 void __netif_schedule(struct Qdisc *q); 3261 3274 void netif_schedule_queue(struct netdev_queue *txq); ··· 4875 4872 return hwtstamps->hwtstamp; 4876 4873 } 4877 4874 4878 - static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, 4879 - struct sk_buff *skb, struct net_device *dev, 4880 - bool more) 4875 + #ifndef CONFIG_PREEMPT_RT 4876 + static inline void netdev_xmit_set_more(bool more) 4881 4877 { 4882 4878 __this_cpu_write(softnet_data.xmit.more, more); 4883 - return ops->ndo_start_xmit(skb, dev); 4884 4879 } 4885 4880 4886 4881 static inline bool netdev_xmit_more(void) 4887 4882 { 4888 4883 return __this_cpu_read(softnet_data.xmit.more); 4884 + } 4885 + #else 4886 + static inline void netdev_xmit_set_more(bool more) 4887 + { 4888 + current->net_xmit.more = more; 4889 + } 4890 + 4891 + static inline bool netdev_xmit_more(void) 4892 + { 4893 + return current->net_xmit.more; 4894 + } 4895 + #endif 4896 + 4897 + static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, 4898 + struct sk_buff *skb, struct net_device *dev, 4899 + bool more) 4900 + { 4901 + netdev_xmit_set_more(more); 4902 + return ops->ndo_start_xmit(skb, dev); 4889 4903 } 4890 4904 4891 4905 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
+13
include/linux/netdevice_xmit.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + #ifndef _LINUX_NETDEVICE_XMIT_H 3 + #define _LINUX_NETDEVICE_XMIT_H 4 + 5 + struct netdev_xmit { 6 + u16 recursion; 7 + u8 more; 8 + #ifdef CONFIG_NET_EGRESS 9 + u8 skip_txqueue; 10 + #endif 11 + }; 12 + 13 + #endif
+4 -1
include/linux/sched.h
··· 36 36 #include <linux/signal_types.h> 37 37 #include <linux/syscall_user_dispatch_types.h> 38 38 #include <linux/mm_types_task.h> 39 + #include <linux/netdevice_xmit.h> 39 40 #include <linux/task_io_accounting.h> 40 41 #include <linux/posix-timers_types.h> 41 42 #include <linux/restart_block.h> ··· 976 975 /* delay due to memory thrashing */ 977 976 unsigned in_thrashing:1; 978 977 #endif 979 - 978 + #ifdef CONFIG_PREEMPT_RT 979 + struct netdev_xmit net_xmit; 980 + #endif 980 981 unsigned long atomic_flags; /* Flags requiring atomic access. */ 981 982 982 983 struct restart_block restart_block;
+14
net/core/dev.c
··· 3940 3940 return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm)); 3941 3941 } 3942 3942 3943 + #ifndef CONFIG_PREEMPT_RT 3943 3944 static bool netdev_xmit_txqueue_skipped(void) 3944 3945 { 3945 3946 return __this_cpu_read(softnet_data.xmit.skip_txqueue); ··· 3951 3950 __this_cpu_write(softnet_data.xmit.skip_txqueue, skip); 3952 3951 } 3953 3952 EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue); 3953 + 3954 + #else 3955 + static bool netdev_xmit_txqueue_skipped(void) 3956 + { 3957 + return current->net_xmit.skip_txqueue; 3958 + } 3959 + 3960 + void netdev_xmit_skip_txqueue(bool skip) 3961 + { 3962 + current->net_xmit.skip_txqueue = skip; 3963 + } 3964 + EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue); 3965 + #endif 3954 3966 #endif /* CONFIG_NET_EGRESS */ 3955 3967 3956 3968 #ifdef CONFIG_NET_XGRESS
+18
net/core/dev.h
··· 150 150 void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu); 151 151 152 152 #define XMIT_RECURSION_LIMIT 8 153 + 154 + #ifndef CONFIG_PREEMPT_RT 153 155 static inline bool dev_xmit_recursion(void) 154 156 { 155 157 return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > ··· 167 165 { 168 166 __this_cpu_dec(softnet_data.xmit.recursion); 169 167 } 168 + #else 169 + static inline bool dev_xmit_recursion(void) 170 + { 171 + return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT); 172 + } 173 + 174 + static inline void dev_xmit_recursion_inc(void) 175 + { 176 + current->net_xmit.recursion++; 177 + } 178 + 179 + static inline void dev_xmit_recursion_dec(void) 180 + { 181 + current->net_xmit.recursion--; 182 + } 183 + #endif 170 184 171 185 int dev_set_hwtstamp_phylib(struct net_device *dev, 172 186 struct kernel_hwtstamp_config *cfg,