Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf: Remove smap argument from bpf_selem_free()

Since selem already saves a pointer to smap, use it instead of an
additional argument in bpf_selem_free(). This requires moving the
SDATA(selem)->smap assignment from bpf_selem_link_map() to
bpf_selem_alloc() since bpf_selem_free() may be called without the
selem being linked to smap in bpf_local_storage_update().

Signed-off-by: Amery Hung <ameryhung@gmail.com>
Reviewed-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20251114201329.3275875-3-ameryhung@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Amery Hung and committed by
Alexei Starovoitov
e76a33e1 0e854e55

+11 -11
-1
include/linux/bpf_local_storage.h
··· 187 187 bool swap_uptrs, gfp_t gfp_flags); 188 188 189 189 void bpf_selem_free(struct bpf_local_storage_elem *selem, 190 - struct bpf_local_storage_map *smap, 191 190 bool reuse_now); 192 191 193 192 int
+10 -9
kernel/bpf/bpf_local_storage.c
··· 97 97 } 98 98 99 99 if (selem) { 100 + RCU_INIT_POINTER(SDATA(selem)->smap, smap); 101 + 100 102 if (value) { 101 103 /* No need to call check_and_init_map_value as memory is zero init */ 102 104 copy_map_value(&smap->map, SDATA(selem)->data, value); ··· 229 227 } 230 228 231 229 void bpf_selem_free(struct bpf_local_storage_elem *selem, 232 - struct bpf_local_storage_map *smap, 233 230 bool reuse_now) 234 231 { 232 + struct bpf_local_storage_map *smap; 233 + 234 + smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held()); 235 + 235 236 if (!smap->bpf_ma) { 236 237 /* Only task storage has uptrs and task storage 237 238 * has moved to bpf_mem_alloc. Meaning smap->bpf_ma == true ··· 268 263 static void bpf_selem_free_list(struct hlist_head *list, bool reuse_now) 269 264 { 270 265 struct bpf_local_storage_elem *selem; 271 - struct bpf_local_storage_map *smap; 272 266 struct hlist_node *n; 273 267 274 268 /* The "_safe" iteration is needed. ··· 275 271 * but bpf_selem_free will use the selem->rcu_head 276 272 * which is union-ized with the selem->free_node. 277 273 */ 278 - hlist_for_each_entry_safe(selem, n, list, free_node) { 279 - smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held()); 280 - bpf_selem_free(selem, smap, reuse_now); 281 - } 274 + hlist_for_each_entry_safe(selem, n, list, free_node) 275 + bpf_selem_free(selem, reuse_now); 282 276 } 283 277 284 278 /* local_storage->lock must be held and selem->local_storage == local_storage. ··· 434 432 unsigned long flags; 435 433 436 434 raw_spin_lock_irqsave(&b->lock, flags); 437 - RCU_INIT_POINTER(SDATA(selem)->smap, smap); 438 435 hlist_add_head_rcu(&selem->map_node, &b->list); 439 436 raw_spin_unlock_irqrestore(&b->lock, flags); 440 437 } ··· 587 586 588 587 err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags); 589 588 if (err) { 590 - bpf_selem_free(selem, smap, true); 589 + bpf_selem_free(selem, true); 591 590 mem_uncharge(smap, owner, smap->elem_size); 592 591 return ERR_PTR(err); 593 592 } ··· 663 662 bpf_selem_free_list(&old_selem_free_list, false); 664 663 if (alloc_selem) { 665 664 mem_uncharge(smap, owner, smap->elem_size); 666 - bpf_selem_free(alloc_selem, smap, true); 665 + bpf_selem_free(alloc_selem, true); 667 666 } 668 667 return err ? ERR_PTR(err) : SDATA(selem); 669 668 }
+1 -1
net/core/bpf_sk_storage.c
··· 196 196 } else { 197 197 ret = bpf_local_storage_alloc(newsk, smap, copy_selem, GFP_ATOMIC); 198 198 if (ret) { 199 - bpf_selem_free(copy_selem, smap, true); 199 + bpf_selem_free(copy_selem, true); 200 200 atomic_sub(smap->elem_size, 201 201 &newsk->sk_omem_alloc); 202 202 bpf_map_put(map);