Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Add test to verify freeing the special fields in pcpu maps

Add test to verify that updating [lru_,]percpu_hash maps decrements
refcount when BPF_KPTR_REF objects are involved.

The tests perform the following steps:
. Call update_elem() to insert an initial value.
. Use bpf_refcount_acquire() to increment the refcount.
. Store the node pointer in the map value.
. Add the node to a linked list.
. Probe-read the refcount and verify it is *2*.
. Call update_elem() again to trigger refcount decrement.
. Probe-read the refcount and verify it is *1*.

Signed-off-by: Leon Hwang <leon.hwang@linux.dev>
Acked-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/r/20251105151407.12723-3-leon.hwang@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Leon Hwang and committed by
Alexei Starovoitov
c1cbf0d2 6af6e49a

+116
+56
tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c
··· 44 44 ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a2 retval"); 45 45 refcounted_kptr__destroy(skel); 46 46 } 47 + 48 + void test_percpu_hash_refcounted_kptr_refcount_leak(void) 49 + { 50 + struct refcounted_kptr *skel; 51 + int cpu_nr, fd, err, key = 0; 52 + struct bpf_map *map; 53 + size_t values_sz; 54 + u64 *values; 55 + LIBBPF_OPTS(bpf_test_run_opts, opts, 56 + .data_in = &pkt_v4, 57 + .data_size_in = sizeof(pkt_v4), 58 + .repeat = 1, 59 + ); 60 + 61 + cpu_nr = libbpf_num_possible_cpus(); 62 + if (!ASSERT_GT(cpu_nr, 0, "libbpf_num_possible_cpus")) 63 + return; 64 + 65 + values = calloc(cpu_nr, sizeof(u64)); 66 + if (!ASSERT_OK_PTR(values, "calloc values")) 67 + return; 68 + 69 + skel = refcounted_kptr__open_and_load(); 70 + if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load")) { 71 + free(values); 72 + return; 73 + } 74 + 75 + values_sz = cpu_nr * sizeof(u64); 76 + memset(values, 0, values_sz); 77 + 78 + map = skel->maps.percpu_hash; 79 + err = bpf_map__update_elem(map, &key, sizeof(key), values, values_sz, 0); 80 + if (!ASSERT_OK(err, "bpf_map__update_elem")) 81 + goto out; 82 + 83 + fd = bpf_program__fd(skel->progs.percpu_hash_refcount_leak); 84 + err = bpf_prog_test_run_opts(fd, &opts); 85 + if (!ASSERT_OK(err, "bpf_prog_test_run_opts")) 86 + goto out; 87 + if (!ASSERT_EQ(opts.retval, 2, "opts.retval")) 88 + goto out; 89 + 90 + err = bpf_map__update_elem(map, &key, sizeof(key), values, values_sz, 0); 91 + if (!ASSERT_OK(err, "bpf_map__update_elem")) 92 + goto out; 93 + 94 + fd = bpf_program__fd(skel->progs.check_percpu_hash_refcount); 95 + err = bpf_prog_test_run_opts(fd, &opts); 96 + ASSERT_OK(err, "bpf_prog_test_run_opts"); 97 + ASSERT_EQ(opts.retval, 1, "opts.retval"); 98 + 99 + out: 100 + refcounted_kptr__destroy(skel); 101 + free(values); 102 + }
+60
tools/testing/selftests/bpf/progs/refcounted_kptr.c
··· 568 568 return 0; 569 569 } 570 570 571 + private(kptr_ref) u64 ref; 572 + 573 + static int probe_read_refcount(void) 574 + { 575 + u32 refcount; 576 + 577 + bpf_probe_read_kernel(&refcount, sizeof(refcount), (void *) ref); 578 + return refcount; 579 + } 580 + 581 + static int __insert_in_list(struct bpf_list_head *head, struct bpf_spin_lock *lock, 582 + struct node_data __kptr **node) 583 + { 584 + struct node_data *node_new, *node_ref, *node_old; 585 + 586 + node_new = bpf_obj_new(typeof(*node_new)); 587 + if (!node_new) 588 + return -1; 589 + 590 + node_ref = bpf_refcount_acquire(node_new); 591 + node_old = bpf_kptr_xchg(node, node_new); 592 + if (node_old) { 593 + bpf_obj_drop(node_old); 594 + bpf_obj_drop(node_ref); 595 + return -2; 596 + } 597 + 598 + bpf_spin_lock(lock); 599 + bpf_list_push_front(head, &node_ref->l); 600 + ref = (u64)(void *) &node_ref->ref; 601 + bpf_spin_unlock(lock); 602 + return probe_read_refcount(); 603 + } 604 + 605 + struct { 606 + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); 607 + __type(key, int); 608 + __type(value, struct map_value); 609 + __uint(max_entries, 1); 610 + } percpu_hash SEC(".maps"); 611 + 612 + SEC("tc") 613 + int percpu_hash_refcount_leak(void *ctx) 614 + { 615 + struct map_value *v; 616 + int key = 0; 617 + 618 + v = bpf_map_lookup_elem(&percpu_hash, &key); 619 + if (!v) 620 + return 0; 621 + 622 + return __insert_in_list(&head, &lock, &v->node); 623 + } 624 + 625 + SEC("tc") 626 + int check_percpu_hash_refcount(void *ctx) 627 + { 628 + return probe_read_refcount(); 629 + } 630 + 571 631 char _license[] SEC("license") = "GPL";