Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'rfs-lockless-annotate'

Eric Dumazet says:

====================
rfs: annotate lockless accesses

rfs runs without locks held, so we should annotate
read and writes to shared variables.

It should prevent compilers forcing writes
in the following situation:

if (var != val)
var = val;

A compiler could indeed simply avoid the conditional:

var = val;

This matters if var is shared between many cpus.

v2: aligns one closing bracket (Simon)
adds Fixes: tags (Jakub)
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+22 -9
+5 -2
include/linux/netdevice.h
··· 768 768 /* We only give a hint, preemption can change CPU under us */ 769 769 val |= raw_smp_processor_id(); 770 770 771 - if (table->ents[index] != val) 772 - table->ents[index] = val; 771 + /* The following WRITE_ONCE() is paired with the READ_ONCE() 772 + * here, and another one in get_rps_cpu(). 773 + */ 774 + if (READ_ONCE(table->ents[index]) != val) 775 + WRITE_ONCE(table->ents[index], val); 773 776 } 774 777 } 775 778
+13 -5
include/net/sock.h
··· 1152 1152 * OR an additional socket flag 1153 1153 * [1] : sk_state and sk_prot are in the same cache line. 1154 1154 */ 1155 - if (sk->sk_state == TCP_ESTABLISHED) 1156 - sock_rps_record_flow_hash(sk->sk_rxhash); 1155 + if (sk->sk_state == TCP_ESTABLISHED) { 1156 + /* This READ_ONCE() is paired with the WRITE_ONCE() 1157 + * from sock_rps_save_rxhash() and sock_rps_reset_rxhash(). 1158 + */ 1159 + sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash)); 1160 + } 1157 1161 } 1158 1162 #endif 1159 1163 } ··· 1166 1162 const struct sk_buff *skb) 1167 1163 { 1168 1164 #ifdef CONFIG_RPS 1169 - if (unlikely(sk->sk_rxhash != skb->hash)) 1170 - sk->sk_rxhash = skb->hash; 1165 + /* The following WRITE_ONCE() is paired with the READ_ONCE() 1166 + * here, and another one in sock_rps_record_flow(). 1167 + */ 1168 + if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash)) 1169 + WRITE_ONCE(sk->sk_rxhash, skb->hash); 1171 1170 #endif 1172 1171 } 1173 1172 1174 1173 static inline void sock_rps_reset_rxhash(struct sock *sk) 1175 1174 { 1176 1175 #ifdef CONFIG_RPS 1177 - sk->sk_rxhash = 0; 1176 + /* Paired with READ_ONCE() in sock_rps_record_flow() */ 1177 + WRITE_ONCE(sk->sk_rxhash, 0); 1178 1178 #endif 1179 1179 } 1180 1180
+4 -2
net/core/dev.c
··· 4471 4471 u32 next_cpu; 4472 4472 u32 ident; 4473 4473 4474 - /* First check into global flow table if there is a match */ 4475 - ident = sock_flow_table->ents[hash & sock_flow_table->mask]; 4474 + /* First check into global flow table if there is a match. 4475 + * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow(). 4476 + */ 4477 + ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]); 4476 4478 if ((ident ^ hash) & ~rps_cpu_mask) 4477 4479 goto try_rps; 4478 4480