Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf: Consistently use bpf_rcu_lock_held() everywhere

We have many places which open-code what's now is bpf_rcu_lock_held()
macro, so replace all those places with a clean and short macro invocation.
For that, move bpf_rcu_lock_held() macro into include/linux/bpf.h.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: https://lore.kernel.org/bpf/20251014201403.4104511-1-andrii@kernel.org

authored by

Andrii Nakryiko and committed by
Daniel Borkmann
48a97ffc 39e9d5f6

+14 -25
+3
include/linux/bpf.h
··· 2381 2381 bool bpf_jit_bypass_spec_v1(void); 2382 2382 bool bpf_jit_bypass_spec_v4(void); 2383 2383 2384 + #define bpf_rcu_lock_held() \ 2385 + (rcu_read_lock_held() || rcu_read_lock_trace_held() || rcu_read_lock_bh_held()) 2386 + 2384 2387 #ifdef CONFIG_BPF_SYSCALL 2385 2388 DECLARE_PER_CPU(int, bpf_prog_active); 2386 2389 extern struct mutex bpf_stats_enabled_mutex;
-3
include/linux/bpf_local_storage.h
··· 18 18 19 19 #define BPF_LOCAL_STORAGE_CACHE_SIZE 16 20 20 21 - #define bpf_rcu_lock_held() \ 22 - (rcu_read_lock_held() || rcu_read_lock_trace_held() || \ 23 - rcu_read_lock_bh_held()) 24 21 struct bpf_local_storage_map_bucket { 25 22 struct hlist_head list; 26 23 raw_spinlock_t lock;
+7 -14
kernel/bpf/hashtab.c
··· 657 657 struct htab_elem *l; 658 658 u32 hash, key_size; 659 659 660 - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 661 - !rcu_read_lock_bh_held()); 660 + WARN_ON_ONCE(!bpf_rcu_lock_held()); 662 661 663 662 key_size = map->key_size; 664 663 ··· 1085 1086 /* unknown flags */ 1086 1087 return -EINVAL; 1087 1088 1088 - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 1089 - !rcu_read_lock_bh_held()); 1089 + WARN_ON_ONCE(!bpf_rcu_lock_held()); 1090 1090 1091 1091 key_size = map->key_size; 1092 1092 ··· 1192 1194 /* unknown flags */ 1193 1195 return -EINVAL; 1194 1196 1195 - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 1196 - !rcu_read_lock_bh_held()); 1197 + WARN_ON_ONCE(!bpf_rcu_lock_held()); 1197 1198 1198 1199 key_size = map->key_size; 1199 1200 ··· 1260 1263 /* unknown flags */ 1261 1264 return -EINVAL; 1262 1265 1263 - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 1264 - !rcu_read_lock_bh_held()); 1266 + WARN_ON_ONCE(!bpf_rcu_lock_held()); 1265 1267 1266 1268 key_size = map->key_size; 1267 1269 ··· 1322 1326 /* unknown flags */ 1323 1327 return -EINVAL; 1324 1328 1325 - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 1326 - !rcu_read_lock_bh_held()); 1329 + WARN_ON_ONCE(!bpf_rcu_lock_held()); 1327 1330 1328 1331 key_size = map->key_size; 1329 1332 ··· 1399 1404 u32 hash, key_size; 1400 1405 int ret; 1401 1406 1402 - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 1403 - !rcu_read_lock_bh_held()); 1407 + WARN_ON_ONCE(!bpf_rcu_lock_held()); 1404 1408 1405 1409 key_size = map->key_size; 1406 1410 ··· 1434 1440 u32 hash, key_size; 1435 1441 int ret; 1436 1442 1437 - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 1438 - !rcu_read_lock_bh_held()); 1443 + WARN_ON_ONCE(!bpf_rcu_lock_held()); 1439 1444 1440 1445 key_size = map->key_size; 1441 1446
+4 -8
kernel/bpf/helpers.c
··· 42 42 */ 43 43 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) 44 44 { 45 - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 46 - !rcu_read_lock_bh_held()); 45 + WARN_ON_ONCE(!bpf_rcu_lock_held()); 47 46 return (unsigned long) map->ops->map_lookup_elem(map, key); 48 47 } 49 48 ··· 58 59 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, 59 60 void *, value, u64, flags) 60 61 { 61 - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 62 - !rcu_read_lock_bh_held()); 62 + WARN_ON_ONCE(!bpf_rcu_lock_held()); 63 63 return map->ops->map_update_elem(map, key, value, flags); 64 64 } 65 65 ··· 75 77 76 78 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) 77 79 { 78 - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 79 - !rcu_read_lock_bh_held()); 80 + WARN_ON_ONCE(!bpf_rcu_lock_held()); 80 81 return map->ops->map_delete_elem(map, key); 81 82 } 82 83 ··· 131 134 132 135 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu) 133 136 { 134 - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && 135 - !rcu_read_lock_bh_held()); 137 + WARN_ON_ONCE(!bpf_rcu_lock_held()); 136 138 return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu); 137 139 } 138 140