Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tools/bpf: selftests: Add bpf_iter selftests

The added test includes the following subtests:
- test verifier change for btf_id_or_null
- test load/create_iter/read for
ipv6_route/netlink/bpf_map/task/task_file
- test anon bpf iterator
- test anon bpf iterator reading one char at a time
- test file bpf iterator
- test overflow (single bpf program output not overflow)
- test overflow (single bpf program output overflows)
- test bpf prog returning 1

The ipv6_route tests the following verifier change
- access fields in the variable length array of the structure.

The netlink load tests the following verifier change
- put a btf_id ptr value in a stack and accessible to
tracing/iter programs.

The anon bpf iterator also tests link auto attach through skeleton.

$ test_progs -n 2
#2/1 btf_id_or_null:OK
#2/2 ipv6_route:OK
#2/3 netlink:OK
#2/4 bpf_map:OK
#2/5 task:OK
#2/6 task_file:OK
#2/7 anon:OK
#2/8 anon-read-one-char:OK
#2/9 file:OK
#2/10 overflow:OK
#2/11 overflow-e2big:OK
#2/12 prog-ret-1:OK
#2 bpf_iter:OK
Summary: 1/12 PASSED, 0 SKIPPED, 0 FAILED

Signed-off-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Link: https://lore.kernel.org/bpf/20200509175923.2477637-1-yhs@fb.com

authored by

Yonghong Song and committed by
Alexei Starovoitov
6879c042 acf61631

+509
+409
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2020 Facebook */ 3 + #include <test_progs.h> 4 + #include "bpf_iter_ipv6_route.skel.h" 5 + #include "bpf_iter_netlink.skel.h" 6 + #include "bpf_iter_bpf_map.skel.h" 7 + #include "bpf_iter_task.skel.h" 8 + #include "bpf_iter_task_file.skel.h" 9 + #include "bpf_iter_test_kern1.skel.h" 10 + #include "bpf_iter_test_kern2.skel.h" 11 + #include "bpf_iter_test_kern3.skel.h" 12 + #include "bpf_iter_test_kern4.skel.h" 13 + 14 + static int duration; 15 + 16 + static void test_btf_id_or_null(void) 17 + { 18 + struct bpf_iter_test_kern3 *skel; 19 + 20 + skel = bpf_iter_test_kern3__open_and_load(); 21 + if (CHECK(skel, "bpf_iter_test_kern3__open_and_load", 22 + "skeleton open_and_load unexpectedly succeeded\n")) { 23 + bpf_iter_test_kern3__destroy(skel); 24 + return; 25 + } 26 + } 27 + 28 + static void do_dummy_read(struct bpf_program *prog) 29 + { 30 + struct bpf_link *link; 31 + char buf[16] = {}; 32 + int iter_fd, len; 33 + 34 + link = bpf_program__attach_iter(prog, NULL); 35 + if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 36 + return; 37 + 38 + iter_fd = bpf_iter_create(bpf_link__fd(link)); 39 + if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 40 + goto free_link; 41 + 42 + /* not check contents, but ensure read() ends without error */ 43 + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 44 + ; 45 + CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)); 46 + 47 + close(iter_fd); 48 + 49 + free_link: 50 + bpf_link__destroy(link); 51 + } 52 + 53 + static void test_ipv6_route(void) 54 + { 55 + struct bpf_iter_ipv6_route *skel; 56 + 57 + skel = bpf_iter_ipv6_route__open_and_load(); 58 + if (CHECK(!skel, "bpf_iter_ipv6_route__open_and_load", 59 + "skeleton open_and_load failed\n")) 60 + return; 61 + 62 + do_dummy_read(skel->progs.dump_ipv6_route); 63 + 64 + bpf_iter_ipv6_route__destroy(skel); 65 + } 66 + 67 + static void test_netlink(void) 68 + { 69 + struct bpf_iter_netlink *skel; 70 + 71 + skel = bpf_iter_netlink__open_and_load(); 72 + if (CHECK(!skel, "bpf_iter_netlink__open_and_load", 73 + "skeleton open_and_load failed\n")) 74 + return; 75 + 76 + do_dummy_read(skel->progs.dump_netlink); 77 + 78 + bpf_iter_netlink__destroy(skel); 79 + } 80 + 81 + static void test_bpf_map(void) 82 + { 83 + struct bpf_iter_bpf_map *skel; 84 + 85 + skel = bpf_iter_bpf_map__open_and_load(); 86 + if (CHECK(!skel, "bpf_iter_bpf_map__open_and_load", 87 + "skeleton open_and_load failed\n")) 88 + return; 89 + 90 + do_dummy_read(skel->progs.dump_bpf_map); 91 + 92 + bpf_iter_bpf_map__destroy(skel); 93 + } 94 + 95 + static void test_task(void) 96 + { 97 + struct bpf_iter_task *skel; 98 + 99 + skel = bpf_iter_task__open_and_load(); 100 + if (CHECK(!skel, "bpf_iter_task__open_and_load", 101 + "skeleton open_and_load failed\n")) 102 + return; 103 + 104 + do_dummy_read(skel->progs.dump_task); 105 + 106 + bpf_iter_task__destroy(skel); 107 + } 108 + 109 + static void test_task_file(void) 110 + { 111 + struct bpf_iter_task_file *skel; 112 + 113 + skel = bpf_iter_task_file__open_and_load(); 114 + if (CHECK(!skel, "bpf_iter_task_file__open_and_load", 115 + "skeleton open_and_load failed\n")) 116 + return; 117 + 118 + do_dummy_read(skel->progs.dump_task_file); 119 + 120 + bpf_iter_task_file__destroy(skel); 121 + } 122 + 123 + /* The expected string is less than 16 bytes */ 124 + static int do_read_with_fd(int iter_fd, const char *expected, 125 + bool read_one_char) 126 + { 127 + int err = -1, len, read_buf_len, start; 128 + char buf[16] = {}; 129 + 130 + read_buf_len = read_one_char ? 1 : 16; 131 + start = 0; 132 + while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) { 133 + start += len; 134 + if (CHECK(start >= 16, "read", "read len %d\n", len)) 135 + return -1; 136 + read_buf_len = read_one_char ? 1 : 16 - start; 137 + } 138 + if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 139 + return -1; 140 + 141 + err = strcmp(buf, expected); 142 + if (CHECK(err, "read", "incorrect read result: buf %s, expected %s\n", 143 + buf, expected)) 144 + return -1; 145 + 146 + return 0; 147 + } 148 + 149 + static void test_anon_iter(bool read_one_char) 150 + { 151 + struct bpf_iter_test_kern1 *skel; 152 + struct bpf_link *link; 153 + int iter_fd, err; 154 + 155 + skel = bpf_iter_test_kern1__open_and_load(); 156 + if (CHECK(!skel, "bpf_iter_test_kern1__open_and_load", 157 + "skeleton open_and_load failed\n")) 158 + return; 159 + 160 + err = bpf_iter_test_kern1__attach(skel); 161 + if (CHECK(err, "bpf_iter_test_kern1__attach", 162 + "skeleton attach failed\n")) { 163 + goto out; 164 + } 165 + 166 + link = skel->links.dump_task; 167 + iter_fd = bpf_iter_create(bpf_link__fd(link)); 168 + if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 169 + goto out; 170 + 171 + do_read_with_fd(iter_fd, "abcd", read_one_char); 172 + close(iter_fd); 173 + 174 + out: 175 + bpf_iter_test_kern1__destroy(skel); 176 + } 177 + 178 + static int do_read(const char *path, const char *expected) 179 + { 180 + int err, iter_fd; 181 + 182 + iter_fd = open(path, O_RDONLY); 183 + if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n", 184 + path, strerror(errno))) 185 + return -1; 186 + 187 + err = do_read_with_fd(iter_fd, expected, false); 188 + close(iter_fd); 189 + return err; 190 + } 191 + 192 + static void test_file_iter(void) 193 + { 194 + const char *path = "/sys/fs/bpf/bpf_iter_test1"; 195 + struct bpf_iter_test_kern1 *skel1; 196 + struct bpf_iter_test_kern2 *skel2; 197 + struct bpf_link *link; 198 + int err; 199 + 200 + skel1 = bpf_iter_test_kern1__open_and_load(); 201 + if (CHECK(!skel1, "bpf_iter_test_kern1__open_and_load", 202 + "skeleton open_and_load failed\n")) 203 + return; 204 + 205 + link = bpf_program__attach_iter(skel1->progs.dump_task, NULL); 206 + if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 207 + goto out; 208 + 209 + /* unlink this path if it exists. */ 210 + unlink(path); 211 + 212 + err = bpf_link__pin(link, path); 213 + if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err)) 214 + goto free_link; 215 + 216 + err = do_read(path, "abcd"); 217 + if (err) 218 + goto unlink_path; 219 + 220 + /* file based iterator seems working fine. Let us a link update 221 + * of the underlying link and `cat` the iterator again, its content 222 + * should change. 223 + */ 224 + skel2 = bpf_iter_test_kern2__open_and_load(); 225 + if (CHECK(!skel2, "bpf_iter_test_kern2__open_and_load", 226 + "skeleton open_and_load failed\n")) 227 + goto unlink_path; 228 + 229 + err = bpf_link__update_program(link, skel2->progs.dump_task); 230 + if (CHECK(err, "update_prog", "update_prog failed\n")) 231 + goto destroy_skel2; 232 + 233 + do_read(path, "ABCD"); 234 + 235 + destroy_skel2: 236 + bpf_iter_test_kern2__destroy(skel2); 237 + unlink_path: 238 + unlink(path); 239 + free_link: 240 + bpf_link__destroy(link); 241 + out: 242 + bpf_iter_test_kern1__destroy(skel1); 243 + } 244 + 245 + static void test_overflow(bool test_e2big_overflow, bool ret1) 246 + { 247 + __u32 map_info_len, total_read_len, expected_read_len; 248 + int err, iter_fd, map1_fd, map2_fd, len; 249 + struct bpf_map_info map_info = {}; 250 + struct bpf_iter_test_kern4 *skel; 251 + struct bpf_link *link; 252 + __u32 page_size; 253 + char *buf; 254 + 255 + skel = bpf_iter_test_kern4__open(); 256 + if (CHECK(!skel, "bpf_iter_test_kern4__open", 257 + "skeleton open failed\n")) 258 + return; 259 + 260 + /* create two maps: bpf program will only do bpf_seq_write 261 + * for these two maps. The goal is one map output almost 262 + * fills seq_file buffer and then the other will trigger 263 + * overflow and needs restart. 264 + */ 265 + map1_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); 266 + if (CHECK(map1_fd < 0, "bpf_create_map", 267 + "map_creation failed: %s\n", strerror(errno))) 268 + goto out; 269 + map2_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); 270 + if (CHECK(map2_fd < 0, "bpf_create_map", 271 + "map_creation failed: %s\n", strerror(errno))) 272 + goto free_map1; 273 + 274 + /* bpf_seq_printf kernel buffer is one page, so one map 275 + * bpf_seq_write will mostly fill it, and the other map 276 + * will partially fill and then trigger overflow and need 277 + * bpf_seq_read restart. 278 + */ 279 + page_size = sysconf(_SC_PAGE_SIZE); 280 + 281 + if (test_e2big_overflow) { 282 + skel->rodata->print_len = (page_size + 8) / 8; 283 + expected_read_len = 2 * (page_size + 8); 284 + } else if (!ret1) { 285 + skel->rodata->print_len = (page_size - 8) / 8; 286 + expected_read_len = 2 * (page_size - 8); 287 + } else { 288 + skel->rodata->print_len = 1; 289 + expected_read_len = 2 * 8; 290 + } 291 + skel->rodata->ret1 = ret1; 292 + 293 + if (CHECK(bpf_iter_test_kern4__load(skel), 294 + "bpf_iter_test_kern4__load", "skeleton load failed\n")) 295 + goto free_map2; 296 + 297 + /* setup filtering map_id in bpf program */ 298 + map_info_len = sizeof(map_info); 299 + err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len); 300 + if (CHECK(err, "get_map_info", "get map info failed: %s\n", 301 + strerror(errno))) 302 + goto free_map2; 303 + skel->bss->map1_id = map_info.id; 304 + 305 + err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len); 306 + if (CHECK(err, "get_map_info", "get map info failed: %s\n", 307 + strerror(errno))) 308 + goto free_map2; 309 + skel->bss->map2_id = map_info.id; 310 + 311 + link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL); 312 + if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 313 + goto free_map2; 314 + 315 + iter_fd = bpf_iter_create(bpf_link__fd(link)); 316 + if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 317 + goto free_link; 318 + 319 + buf = malloc(expected_read_len); 320 + if (!buf) 321 + goto close_iter; 322 + 323 + /* do read */ 324 + total_read_len = 0; 325 + if (test_e2big_overflow) { 326 + while ((len = read(iter_fd, buf, expected_read_len)) > 0) 327 + total_read_len += len; 328 + 329 + CHECK(len != -1 || errno != E2BIG, "read", 330 + "expected ret -1, errno E2BIG, but get ret %d, error %s\n", 331 + len, strerror(errno)); 332 + goto free_buf; 333 + } else if (!ret1) { 334 + while ((len = read(iter_fd, buf, expected_read_len)) > 0) 335 + total_read_len += len; 336 + 337 + if (CHECK(len < 0, "read", "read failed: %s\n", 338 + strerror(errno))) 339 + goto free_buf; 340 + } else { 341 + do { 342 + len = read(iter_fd, buf, expected_read_len); 343 + if (len > 0) 344 + total_read_len += len; 345 + } while (len > 0 || len == -EAGAIN); 346 + 347 + if (CHECK(len < 0, "read", "read failed: %s\n", 348 + strerror(errno))) 349 + goto free_buf; 350 + } 351 + 352 + if (CHECK(total_read_len != expected_read_len, "read", 353 + "total len %u, expected len %u\n", total_read_len, 354 + expected_read_len)) 355 + goto free_buf; 356 + 357 + if (CHECK(skel->bss->map1_accessed != 1, "map1_accessed", 358 + "expected 1 actual %d\n", skel->bss->map1_accessed)) 359 + goto free_buf; 360 + 361 + if (CHECK(skel->bss->map2_accessed != 2, "map2_accessed", 362 + "expected 2 actual %d\n", skel->bss->map2_accessed)) 363 + goto free_buf; 364 + 365 + CHECK(skel->bss->map2_seqnum1 != skel->bss->map2_seqnum2, 366 + "map2_seqnum", "two different seqnum %lld %lld\n", 367 + skel->bss->map2_seqnum1, skel->bss->map2_seqnum2); 368 + 369 + free_buf: 370 + free(buf); 371 + close_iter: 372 + close(iter_fd); 373 + free_link: 374 + bpf_link__destroy(link); 375 + free_map2: 376 + close(map2_fd); 377 + free_map1: 378 + close(map1_fd); 379 + out: 380 + bpf_iter_test_kern4__destroy(skel); 381 + } 382 + 383 + void test_bpf_iter(void) 384 + { 385 + if (test__start_subtest("btf_id_or_null")) 386 + test_btf_id_or_null(); 387 + if (test__start_subtest("ipv6_route")) 388 + test_ipv6_route(); 389 + if (test__start_subtest("netlink")) 390 + test_netlink(); 391 + if (test__start_subtest("bpf_map")) 392 + test_bpf_map(); 393 + if (test__start_subtest("task")) 394 + test_task(); 395 + if (test__start_subtest("task_file")) 396 + test_task_file(); 397 + if (test__start_subtest("anon")) 398 + test_anon_iter(false); 399 + if (test__start_subtest("anon-read-one-char")) 400 + test_anon_iter(true); 401 + if (test__start_subtest("file")) 402 + test_file_iter(); 403 + if (test__start_subtest("overflow")) 404 + test_overflow(false, false); 405 + if (test__start_subtest("overflow-e2big")) 406 + test_overflow(true, false); 407 + if (test__start_subtest("prog-ret-1")) 408 + test_overflow(false, true); 409 + }
+4
tools/testing/selftests/bpf/progs/bpf_iter_test_kern1.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2020 Facebook */ 3 + #define START_CHAR 'a' 4 + #include "bpf_iter_test_kern_common.h"
+4
tools/testing/selftests/bpf/progs/bpf_iter_test_kern2.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2020 Facebook */ 3 + #define START_CHAR 'A' 4 + #include "bpf_iter_test_kern_common.h"
+18
tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2020 Facebook */ 3 + #include "vmlinux.h" 4 + #include <bpf/bpf_helpers.h> 5 + 6 + char _license[] SEC("license") = "GPL"; 7 + 8 + SEC("iter/task") 9 + int dump_task(struct bpf_iter__task *ctx) 10 + { 11 + struct seq_file *seq = ctx->meta->seq; 12 + struct task_struct *task = ctx->task; 13 + int tgid; 14 + 15 + tgid = task->tgid; 16 + bpf_seq_write(seq, &tgid, sizeof(tgid)); 17 + return 0; 18 + }
+52
tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2020 Facebook */ 3 + #include "vmlinux.h" 4 + #include <bpf/bpf_helpers.h> 5 + 6 + char _license[] SEC("license") = "GPL"; 7 + 8 + __u32 map1_id = 0, map2_id = 0; 9 + __u32 map1_accessed = 0, map2_accessed = 0; 10 + __u64 map1_seqnum = 0, map2_seqnum1 = 0, map2_seqnum2 = 0; 11 + 12 + static volatile const __u32 print_len; 13 + static volatile const __u32 ret1; 14 + 15 + SEC("iter/bpf_map") 16 + int dump_bpf_map(struct bpf_iter__bpf_map *ctx) 17 + { 18 + struct seq_file *seq = ctx->meta->seq; 19 + struct bpf_map *map = ctx->map; 20 + __u64 seq_num; 21 + int i, ret = 0; 22 + 23 + if (map == (void *)0) 24 + return 0; 25 + 26 + /* only dump map1_id and map2_id */ 27 + if (map->id != map1_id && map->id != map2_id) 28 + return 0; 29 + 30 + seq_num = ctx->meta->seq_num; 31 + if (map->id == map1_id) { 32 + map1_seqnum = seq_num; 33 + map1_accessed++; 34 + } 35 + 36 + if (map->id == map2_id) { 37 + if (map2_accessed == 0) { 38 + map2_seqnum1 = seq_num; 39 + if (ret1) 40 + ret = 1; 41 + } else { 42 + map2_seqnum2 = seq_num; 43 + } 44 + map2_accessed++; 45 + } 46 + 47 + /* fill seq_file buffer */ 48 + for (i = 0; i < print_len; i++) 49 + bpf_seq_write(seq, &seq_num, sizeof(seq_num)); 50 + 51 + return ret; 52 + }
+22
tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) 2020 Facebook */ 3 + #include "vmlinux.h" 4 + #include <bpf/bpf_helpers.h> 5 + 6 + char _license[] SEC("license") = "GPL"; 7 + int count = 0; 8 + 9 + SEC("iter/task") 10 + int dump_task(struct bpf_iter__task *ctx) 11 + { 12 + struct seq_file *seq = ctx->meta->seq; 13 + char c; 14 + 15 + if (count < 4) { 16 + c = START_CHAR + count; 17 + bpf_seq_write(seq, &c, sizeof(c)); 18 + count++; 19 + } 20 + 21 + return 0; 22 + }