Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests: bpf: add bpf_cpumask_populate selftests

Add selftests for the bpf_cpumask_populate helper that sets a
bpf_cpumask to a bit pattern provided by a BPF program.

Signed-off-by: Emil Tsalapatis (Meta) <emil@etsalapatis.com>
Acked-by: Hou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/r/20250309230427.26603-3-emil@etsalapatis.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Emil Tsalapatis and committed by
Alexei Starovoitov
918ba263 950ad93d

+161
+3
tools/testing/selftests/bpf/prog_tests/cpumask.c
··· 25 25 "test_global_mask_nested_deep_rcu", 26 26 "test_global_mask_nested_deep_array_rcu", 27 27 "test_cpumask_weight", 28 + "test_populate_reject_small_mask", 29 + "test_populate_reject_unaligned", 30 + "test_populate", 28 31 }; 29 32 30 33 static void verify_success(const char *prog_name)
+1
tools/testing/selftests/bpf/progs/cpumask_common.h
··· 61 61 u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, 62 62 const struct cpumask *src2) __ksym __weak; 63 63 u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym __weak; 64 + int bpf_cpumask_populate(struct cpumask *cpumask, void *src, size_t src__sz) __ksym __weak; 64 65 65 66 void bpf_rcu_read_lock(void) __ksym __weak; 66 67 void bpf_rcu_read_unlock(void) __ksym __weak;
+38
tools/testing/selftests/bpf/progs/cpumask_failure.c
··· 222 222 223 223 return 0; 224 224 } 225 + 226 + SEC("tp_btf/task_newtask") 227 + __failure __msg("type=scalar expected=fp") 228 + int BPF_PROG(test_populate_invalid_destination, struct task_struct *task, u64 clone_flags) 229 + { 230 + struct bpf_cpumask *invalid = (struct bpf_cpumask *)0x123456; 231 + u64 bits; 232 + int ret; 233 + 234 + ret = bpf_cpumask_populate((struct cpumask *)invalid, &bits, sizeof(bits)); 235 + if (!ret) 236 + err = 2; 237 + 238 + return 0; 239 + } 240 + 241 + SEC("tp_btf/task_newtask") 242 + __failure __msg("leads to invalid memory access") 243 + int BPF_PROG(test_populate_invalid_source, struct task_struct *task, u64 clone_flags) 244 + { 245 + void *garbage = (void *)0x123456; 246 + struct bpf_cpumask *local; 247 + int ret; 248 + 249 + local = create_cpumask(); 250 + if (!local) { 251 + err = 1; 252 + return 0; 253 + } 254 + 255 + ret = bpf_cpumask_populate((struct cpumask *)local, garbage, 8); 256 + if (!ret) 257 + err = 2; 258 + 259 + bpf_cpumask_release(local); 260 + 261 + return 0; 262 + }
+119
tools/testing/selftests/bpf/progs/cpumask_success.c
··· 770 770 bpf_cpumask_release(mask2); 771 771 return 0; 772 772 } 773 + 774 + SEC("tp_btf/task_newtask") 775 + int BPF_PROG(test_populate_reject_small_mask, struct task_struct *task, u64 clone_flags) 776 + { 777 + struct bpf_cpumask *local; 778 + u8 toofewbits; 779 + int ret; 780 + 781 + if (!is_test_task()) 782 + return 0; 783 + 784 + local = create_cpumask(); 785 + if (!local) 786 + return 0; 787 + 788 + /* The kfunc should prevent this operation */ 789 + ret = bpf_cpumask_populate((struct cpumask *)local, &toofewbits, sizeof(toofewbits)); 790 + if (ret != -EACCES) 791 + err = 2; 792 + 793 + bpf_cpumask_release(local); 794 + 795 + return 0; 796 + } 797 + 798 + /* Mask is guaranteed to be large enough for bpf_cpumask_t. */ 799 + #define CPUMASK_TEST_MASKLEN (sizeof(cpumask_t)) 800 + 801 + /* Add an extra word for the test_populate_reject_unaligned test. */ 802 + u64 bits[CPUMASK_TEST_MASKLEN / 8 + 1]; 803 + extern bool CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS __kconfig __weak; 804 + 805 + SEC("tp_btf/task_newtask") 806 + int BPF_PROG(test_populate_reject_unaligned, struct task_struct *task, u64 clone_flags) 807 + { 808 + struct bpf_cpumask *mask; 809 + char *src; 810 + int ret; 811 + 812 + if (!is_test_task()) 813 + return 0; 814 + 815 + /* Skip if unaligned accesses are fine for this arch. */ 816 + if (CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 817 + return 0; 818 + 819 + mask = bpf_cpumask_create(); 820 + if (!mask) { 821 + err = 1; 822 + return 0; 823 + } 824 + 825 + /* Misalign the source array by a byte. */ 826 + src = &((char *)bits)[1]; 827 + 828 + ret = bpf_cpumask_populate((struct cpumask *)mask, src, CPUMASK_TEST_MASKLEN); 829 + if (ret != -EINVAL) 830 + err = 2; 831 + 832 + bpf_cpumask_release(mask); 833 + 834 + return 0; 835 + } 836 + 837 + 838 + SEC("tp_btf/task_newtask") 839 + int BPF_PROG(test_populate, struct task_struct *task, u64 clone_flags) 840 + { 841 + struct bpf_cpumask *mask; 842 + bool bit; 843 + int ret; 844 + int i; 845 + 846 + if (!is_test_task()) 847 + return 0; 848 + 849 + /* Set only odd bits. */ 850 + __builtin_memset(bits, 0xaa, CPUMASK_TEST_MASKLEN); 851 + 852 + mask = bpf_cpumask_create(); 853 + if (!mask) { 854 + err = 1; 855 + return 0; 856 + } 857 + 858 + /* Pass the entire bits array, the kfunc will only copy the valid bits. */ 859 + ret = bpf_cpumask_populate((struct cpumask *)mask, bits, CPUMASK_TEST_MASKLEN); 860 + if (ret) { 861 + err = 2; 862 + goto out; 863 + } 864 + 865 + /* 866 + * Test is there to appease the verifier. We cannot directly 867 + * access NR_CPUS, the upper bound for nr_cpus, so we infer 868 + * it from the size of cpumask_t. 869 + */ 870 + if (nr_cpus < 0 || nr_cpus >= CPUMASK_TEST_MASKLEN * 8) { 871 + err = 3; 872 + goto out; 873 + } 874 + 875 + bpf_for(i, 0, nr_cpus) { 876 + /* Odd-numbered bits should be set, even ones unset. */ 877 + bit = bpf_cpumask_test_cpu(i, (const struct cpumask *)mask); 878 + if (bit == (i % 2 != 0)) 879 + continue; 880 + 881 + err = 4; 882 + break; 883 + } 884 + 885 + out: 886 + bpf_cpumask_release(mask); 887 + 888 + return 0; 889 + } 890 + 891 + #undef CPUMASK_TEST_MASKLEN