Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Cope with 512 bytes limit with bpf_global_percpu_ma

In the previous patch, the maximum data size for bpf_global_percpu_ma
is 512 bytes. This breaks selftest test_bpf_ma. The test is adjusted
in two aspects:
- Since the maximum allowed data size for bpf_global_percpu_ma is
512, remove all tests beyond that, names sizes 1024, 2048 and 4096.
- Previously the percpu data size is bucket_size - 8 in order to
avoid percpu allocation into the next bucket. This patch removed
such data size adjustment thanks to Patch 1.

Also, a better way to generate BTF type is used than adding
a member to the value struct.

Acked-by: Hou Tao <houtao1@huawei.com>
Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/r/20231222031807.1292853-1-yonghong.song@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Yonghong Song and committed by
Alexei Starovoitov
21f5a801 5c1a3765

+46 -40
+13 -7
tools/testing/selftests/bpf/prog_tests/test_bpf_ma.c
··· 14 14 struct test_bpf_ma *skel; 15 15 struct bpf_program *prog; 16 16 struct btf *btf; 17 - int i, err; 17 + int i, err, id; 18 + char tname[32]; 18 19 19 20 skel = test_bpf_ma__open(); 20 21 if (!ASSERT_OK_PTR(skel, "open")) ··· 26 25 goto out; 27 26 28 27 for (i = 0; i < ARRAY_SIZE(skel->rodata->data_sizes); i++) { 29 - char name[32]; 30 - int id; 31 - 32 - snprintf(name, sizeof(name), "bin_data_%u", skel->rodata->data_sizes[i]); 33 - id = btf__find_by_name_kind(btf, name, BTF_KIND_STRUCT); 34 - if (!ASSERT_GT(id, 0, "bin_data")) 28 + snprintf(tname, sizeof(tname), "bin_data_%u", skel->rodata->data_sizes[i]); 29 + id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT); 30 + if (!ASSERT_GT(id, 0, tname)) 35 31 goto out; 36 32 skel->rodata->data_btf_ids[i] = id; 33 + } 34 + 35 + for (i = 0; i < ARRAY_SIZE(skel->rodata->percpu_data_sizes); i++) { 36 + snprintf(tname, sizeof(tname), "percpu_bin_data_%u", skel->rodata->percpu_data_sizes[i]); 37 + id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT); 38 + if (!ASSERT_GT(id, 0, tname)) 39 + goto out; 40 + skel->rodata->percpu_data_btf_ids[i] = id; 37 41 } 38 42 39 43 prog = bpf_object__find_program_by_name(skel->obj, name);
+33 -33
tools/testing/selftests/bpf/progs/test_bpf_ma.c
··· 20 20 const unsigned int data_sizes[] = {16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096}; 21 21 const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {}; 22 22 23 + const unsigned int percpu_data_sizes[] = {8, 16, 32, 64, 96, 128, 192, 256, 512}; 24 + const volatile unsigned int percpu_data_btf_ids[ARRAY_SIZE(data_sizes)] = {}; 25 + 23 26 int err = 0; 24 27 u32 pid = 0; 25 28 ··· 30 27 struct bin_data_##_size { \ 31 28 char data[_size - sizeof(void *)]; \ 32 29 }; \ 30 + /* See Commit 5d8d6634ccc, force btf generation for type bin_data_##_size */ \ 31 + struct bin_data_##_size *__bin_data_##_size; \ 33 32 struct map_value_##_size { \ 34 33 struct bin_data_##_size __kptr * data; \ 35 - /* To emit BTF info for bin_data_xx */ \ 36 - struct bin_data_##_size not_used; \ 37 34 }; \ 38 35 struct { \ 39 36 __uint(type, BPF_MAP_TYPE_ARRAY); \ ··· 43 40 } array_##_size SEC(".maps") 44 41 45 42 #define DEFINE_ARRAY_WITH_PERCPU_KPTR(_size) \ 43 + struct percpu_bin_data_##_size { \ 44 + char data[_size]; \ 45 + }; \ 46 + struct percpu_bin_data_##_size *__percpu_bin_data_##_size; \ 46 47 struct map_value_percpu_##_size { \ 47 - struct bin_data_##_size __percpu_kptr * data; \ 48 + struct percpu_bin_data_##_size __percpu_kptr * data; \ 48 49 }; \ 49 50 struct { \ 50 51 __uint(type, BPF_MAP_TYPE_ARRAY); \ ··· 121 114 return; 122 115 } 123 116 /* per-cpu allocator may not be able to refill in time */ 124 - new = bpf_percpu_obj_new_impl(data_btf_ids[idx], NULL); 117 + new = bpf_percpu_obj_new_impl(percpu_data_btf_ids[idx], NULL); 125 118 if (!new) 126 119 continue; 127 120 ··· 186 179 DEFINE_ARRAY_WITH_KPTR(2048); 187 180 DEFINE_ARRAY_WITH_KPTR(4096); 188 181 189 - /* per-cpu kptr doesn't support bin_data_8 which is a zero-sized array */ 182 + DEFINE_ARRAY_WITH_PERCPU_KPTR(8); 190 183 DEFINE_ARRAY_WITH_PERCPU_KPTR(16); 191 184 DEFINE_ARRAY_WITH_PERCPU_KPTR(32); 192 185 DEFINE_ARRAY_WITH_PERCPU_KPTR(64); ··· 195 188 DEFINE_ARRAY_WITH_PERCPU_KPTR(192); 196 189 DEFINE_ARRAY_WITH_PERCPU_KPTR(256); 197 190 DEFINE_ARRAY_WITH_PERCPU_KPTR(512); 198 - DEFINE_ARRAY_WITH_PERCPU_KPTR(1024); 199 - DEFINE_ARRAY_WITH_PERCPU_KPTR(2048); 200 - DEFINE_ARRAY_WITH_PERCPU_KPTR(4096); 201 191 202 192 SEC("?fentry/" SYS_PREFIX "sys_nanosleep") 203 193 int test_batch_alloc_free(void *ctx) ··· 250 246 if ((u32)bpf_get_current_pid_tgid() != pid) 251 247 return 0; 252 248 253 - /* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling, 254 - * then free 128 16-bytes per-cpu objects in batch to trigger freeing. 249 + /* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling, 250 + * then free 128 8-bytes per-cpu objects in batch to trigger freeing. 255 251 */ 256 - CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 0); 257 - CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 1); 258 - CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 2); 259 - CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 3); 260 - CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 4); 261 - CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 5); 262 - CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 6); 263 - CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 7); 264 - CALL_BATCH_PERCPU_ALLOC_FREE(1024, 32, 8); 265 - CALL_BATCH_PERCPU_ALLOC_FREE(2048, 16, 9); 266 - CALL_BATCH_PERCPU_ALLOC_FREE(4096, 8, 10); 252 + CALL_BATCH_PERCPU_ALLOC_FREE(8, 128, 0); 253 + CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 1); 254 + CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 2); 255 + CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 3); 256 + CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 4); 257 + CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 5); 258 + CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6); 259 + CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7); 260 + CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8); 267 261 268 262 return 0; 269 263 } ··· 272 270 if ((u32)bpf_get_current_pid_tgid() != pid) 273 271 return 0; 274 272 275 - /* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling, 273 + /* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling, 276 274 * then free these object through map free. 277 275 */ 278 - CALL_BATCH_PERCPU_ALLOC(16, 128, 0); 279 - CALL_BATCH_PERCPU_ALLOC(32, 128, 1); 280 - CALL_BATCH_PERCPU_ALLOC(64, 128, 2); 281 - CALL_BATCH_PERCPU_ALLOC(96, 128, 3); 282 - CALL_BATCH_PERCPU_ALLOC(128, 128, 4); 283 - CALL_BATCH_PERCPU_ALLOC(192, 128, 5); 284 - CALL_BATCH_PERCPU_ALLOC(256, 128, 6); 285 - CALL_BATCH_PERCPU_ALLOC(512, 64, 7); 286 - CALL_BATCH_PERCPU_ALLOC(1024, 32, 8); 287 - CALL_BATCH_PERCPU_ALLOC(2048, 16, 9); 288 - CALL_BATCH_PERCPU_ALLOC(4096, 8, 10); 276 + CALL_BATCH_PERCPU_ALLOC(8, 128, 0); 277 + CALL_BATCH_PERCPU_ALLOC(16, 128, 1); 278 + CALL_BATCH_PERCPU_ALLOC(32, 128, 2); 279 + CALL_BATCH_PERCPU_ALLOC(64, 128, 3); 280 + CALL_BATCH_PERCPU_ALLOC(96, 128, 4); 281 + CALL_BATCH_PERCPU_ALLOC(128, 128, 5); 282 + CALL_BATCH_PERCPU_ALLOC(192, 128, 6); 283 + CALL_BATCH_PERCPU_ALLOC(256, 128, 7); 284 + CALL_BATCH_PERCPU_ALLOC(512, 64, 8); 289 285 290 286 return 0; 291 287 }