Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: Add rfs_needed() helper

Add a helper to check if RFS is needed or not. Allows to make the code a
bit cleaner and the next patch to have MPTCP use this helper to decide
whether or not to iterate over the subflows.

tun_flow_update() was calling sock_rps_record_flow_hash() regardless of
the state of rfs_needed. This was not really a bug as sock_flow_table
simply ends up being NULL and thus everything will be fine.
This commit here thus also implicitly makes tun_flow_update() respect
the state of rfs_needed.

Suggested-by: Matthieu Baerts <matttbe@kernel.org>
Signed-off-by: Christoph Paasch <cpaasch@openai.com>
Acked-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
Link: https://patch.msgid.link/20250902-net-next-mptcp-misc-feat-6-18-v2-3-fa02bb3188b1@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Christoph Paasch and committed by
Jakub Kicinski
92932491 3fff72f8

+56 -29
+56 -29
include/net/rps.h
··· 85 85 WRITE_ONCE(table->ents[index], val); 86 86 } 87 87 88 - #endif /* CONFIG_RPS */ 89 - 90 - static inline void sock_rps_record_flow_hash(__u32 hash) 88 + static inline void _sock_rps_record_flow_hash(__u32 hash) 91 89 { 92 - #ifdef CONFIG_RPS 93 90 struct rps_sock_flow_table *sock_flow_table; 94 91 95 92 if (!hash) ··· 96 99 if (sock_flow_table) 97 100 rps_record_sock_flow(sock_flow_table, hash); 98 101 rcu_read_unlock(); 99 - #endif 100 102 } 101 103 102 - static inline void sock_rps_record_flow(const struct sock *sk) 104 + static inline void _sock_rps_record_flow(const struct sock *sk) 103 105 { 104 - #ifdef CONFIG_RPS 105 - if (static_branch_unlikely(&rfs_needed)) { 106 - /* Reading sk->sk_rxhash might incur an expensive cache line 107 - * miss. 108 - * 109 - * TCP_ESTABLISHED does cover almost all states where RFS 110 - * might be useful, and is cheaper [1] than testing : 111 - * IPv4: inet_sk(sk)->inet_daddr 112 - * IPv6: ipv6_addr_any(&sk->sk_v6_daddr) 113 - * OR an additional socket flag 114 - * [1] : sk_state and sk_prot are in the same cache line. 106 + /* Reading sk->sk_rxhash might incur an expensive cache line 107 + * miss. 108 + * 109 + * TCP_ESTABLISHED does cover almost all states where RFS 110 + * might be useful, and is cheaper [1] than testing : 111 + * IPv4: inet_sk(sk)->inet_daddr 112 + * IPv6: ipv6_addr_any(&sk->sk_v6_daddr) 113 + * OR an additional socket flag 114 + * [1] : sk_state and sk_prot are in the same cache line. 115 + */ 116 + if (sk->sk_state == TCP_ESTABLISHED) { 117 + /* This READ_ONCE() is paired with the WRITE_ONCE() 118 + * from sock_rps_save_rxhash() and sock_rps_reset_rxhash(). 115 119 */ 116 - if (sk->sk_state == TCP_ESTABLISHED) { 117 - /* This READ_ONCE() is paired with the WRITE_ONCE() 118 - * from sock_rps_save_rxhash() and sock_rps_reset_rxhash(). 119 - */ 120 - sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash)); 121 - } 120 + _sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash)); 122 121 } 123 - #endif 124 122 } 125 123 126 - static inline void sock_rps_delete_flow(const struct sock *sk) 124 + static inline void _sock_rps_delete_flow(const struct sock *sk) 127 125 { 128 - #ifdef CONFIG_RPS 129 126 struct rps_sock_flow_table *table; 130 127 u32 hash, index; 131 - 132 - if (!static_branch_unlikely(&rfs_needed)) 133 - return; 134 128 135 129 hash = READ_ONCE(sk->sk_rxhash); 136 130 if (!hash) ··· 135 147 WRITE_ONCE(table->ents[index], RPS_NO_CPU); 136 148 } 137 149 rcu_read_unlock(); 150 + } 151 + #endif /* CONFIG_RPS */ 152 + 153 + static inline bool rfs_is_needed(void) 154 + { 155 + #ifdef CONFIG_RPS 156 + return static_branch_unlikely(&rfs_needed); 157 + #else 158 + return false; 159 + #endif 160 + } 161 + 162 + static inline void sock_rps_record_flow_hash(__u32 hash) 163 + { 164 + #ifdef CONFIG_RPS 165 + if (!rfs_is_needed()) 166 + return; 167 + 168 + _sock_rps_record_flow_hash(hash); 169 + #endif 170 + } 171 + 172 + static inline void sock_rps_record_flow(const struct sock *sk) 173 + { 174 + #ifdef CONFIG_RPS 175 + if (!rfs_is_needed()) 176 + return; 177 + 178 + _sock_rps_record_flow(sk); 179 + #endif 180 + } 181 + 182 + static inline void sock_rps_delete_flow(const struct sock *sk) 183 + { 184 + #ifdef CONFIG_RPS 185 + if (!rfs_is_needed()) 186 + return; 187 + 188 + _sock_rps_delete_flow(sk); 138 189 #endif 139 190 } 140 191