Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Migrate from bpf_prog_test_run_xattr

bpf_prog_test_run_xattr is being deprecated in favor of the OPTS-based
bpf_prog_test_run_opts.
We end up unable to use CHECK_ATTR so replace usages with ASSERT_* calls.
Also, prog_run_xattr is now prog_run_opts.

Signed-off-by: Delyan Kratunov <delyank@fb.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20220202235423.1097270-3-delyank@fb.com

authored by

Delyan Kratunov and committed by
Andrii Nakryiko
39316183 04fcb5f9

+218 -283
+13 -27
tools/testing/selftests/bpf/prog_tests/check_mtu.c
··· 79 79 struct bpf_program *prog, 80 80 __u32 mtu_expect) 81 81 { 82 - const char *prog_name = bpf_program__name(prog); 83 82 int retval_expect = XDP_PASS; 84 83 __u32 mtu_result = 0; 85 84 char buf[256] = {}; 86 - int err; 87 - struct bpf_prog_test_run_attr tattr = { 85 + int err, prog_fd = bpf_program__fd(prog); 86 + LIBBPF_OPTS(bpf_test_run_opts, topts, 88 87 .repeat = 1, 89 88 .data_in = &pkt_v4, 90 89 .data_size_in = sizeof(pkt_v4), 91 90 .data_out = buf, 92 91 .data_size_out = sizeof(buf), 93 - .prog_fd = bpf_program__fd(prog), 94 - }; 92 + ); 95 93 96 - err = bpf_prog_test_run_xattr(&tattr); 97 - CHECK_ATTR(err != 0, "bpf_prog_test_run", 98 - "prog_name:%s (err %d errno %d retval %d)\n", 99 - prog_name, err, errno, tattr.retval); 100 - 101 - CHECK(tattr.retval != retval_expect, "retval", 102 - "progname:%s unexpected retval=%d expected=%d\n", 103 - prog_name, tattr.retval, retval_expect); 94 + err = bpf_prog_test_run_opts(prog_fd, &topts); 95 + ASSERT_OK(err, "test_run"); 96 + ASSERT_EQ(topts.retval, retval_expect, "retval"); 104 97 105 98 /* Extract MTU that BPF-prog got */ 106 99 mtu_result = skel->bss->global_bpf_mtu_xdp; ··· 132 139 struct bpf_program *prog, 133 140 __u32 mtu_expect) 134 141 { 135 - const char *prog_name = bpf_program__name(prog); 136 142 int retval_expect = BPF_OK; 137 143 __u32 mtu_result = 0; 138 144 char buf[256] = {}; 139 - int err; 140 - struct bpf_prog_test_run_attr tattr = { 141 - .repeat = 1, 145 + int err, prog_fd = bpf_program__fd(prog); 146 + LIBBPF_OPTS(bpf_test_run_opts, topts, 142 147 .data_in = &pkt_v4, 143 148 .data_size_in = sizeof(pkt_v4), 144 149 .data_out = buf, 145 150 .data_size_out = sizeof(buf), 146 - .prog_fd = bpf_program__fd(prog), 147 - }; 151 + .repeat = 1, 152 + ); 148 153 149 - err = bpf_prog_test_run_xattr(&tattr); 150 - CHECK_ATTR(err != 0, "bpf_prog_test_run", 151 - "prog_name:%s (err %d errno %d retval %d)\n", 152 - prog_name, err, errno, tattr.retval); 153 - 154 - CHECK(tattr.retval != retval_expect, "retval", 155 - "progname:%s unexpected retval=%d expected=%d\n", 156 - prog_name, tattr.retval, retval_expect); 154 + err = bpf_prog_test_run_opts(prog_fd, &topts); 155 + ASSERT_OK(err, "test_run"); 156 + ASSERT_EQ(topts.retval, retval_expect, "retval"); 157 157 158 158 /* Extract MTU that BPF-prog got */ 159 159 mtu_result = skel->bss->global_bpf_mtu_tc;
+5 -5
tools/testing/selftests/bpf/prog_tests/cls_redirect.c
··· 161 161 } 162 162 } 163 163 164 - static bool was_decapsulated(struct bpf_prog_test_run_attr *tattr) 164 + static bool was_decapsulated(struct bpf_test_run_opts *tattr) 165 165 { 166 166 return tattr->data_size_out < tattr->data_size_in; 167 167 } ··· 367 367 368 368 static void test_cls_redirect_common(struct bpf_program *prog) 369 369 { 370 - struct bpf_prog_test_run_attr tattr = {}; 370 + LIBBPF_OPTS(bpf_test_run_opts, tattr); 371 371 int families[] = { AF_INET, AF_INET6 }; 372 372 struct sockaddr_storage ss; 373 373 struct sockaddr *addr; 374 374 socklen_t slen; 375 - int i, j, err; 375 + int i, j, err, prog_fd; 376 376 int servers[__NR_KIND][ARRAY_SIZE(families)] = {}; 377 377 int conns[__NR_KIND][ARRAY_SIZE(families)] = {}; 378 378 struct tuple tuples[__NR_KIND][ARRAY_SIZE(families)]; ··· 394 394 goto cleanup; 395 395 } 396 396 397 - tattr.prog_fd = bpf_program__fd(prog); 397 + prog_fd = bpf_program__fd(prog); 398 398 for (i = 0; i < ARRAY_SIZE(tests); i++) { 399 399 struct test_cfg *test = &tests[i]; 400 400 ··· 415 415 if (CHECK_FAIL(!tattr.data_size_in)) 416 416 continue; 417 417 418 - err = bpf_prog_test_run_xattr(&tattr); 418 + err = bpf_prog_test_run_opts(prog_fd, &tattr); 419 419 if (CHECK_FAIL(err)) 420 420 continue; 421 421
+12 -15
tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c
··· 26 26 static void test_dummy_init_ret_value(void) 27 27 { 28 28 __u64 args[1] = {0}; 29 - struct bpf_prog_test_run_attr attr = { 30 - .ctx_size_in = sizeof(args), 29 + LIBBPF_OPTS(bpf_test_run_opts, attr, 31 30 .ctx_in = args, 32 - }; 31 + .ctx_size_in = sizeof(args), 32 + ); 33 33 struct dummy_st_ops *skel; 34 34 int fd, err; 35 35 ··· 38 38 return; 39 39 40 40 fd = bpf_program__fd(skel->progs.test_1); 41 - attr.prog_fd = fd; 42 - err = bpf_prog_test_run_xattr(&attr); 41 + err = bpf_prog_test_run_opts(fd, &attr); 43 42 ASSERT_OK(err, "test_run"); 44 43 ASSERT_EQ(attr.retval, 0xf2f3f4f5, "test_ret"); 45 44 ··· 52 53 .val = exp_retval, 53 54 }; 54 55 __u64 args[1] = {(unsigned long)&in_state}; 55 - struct bpf_prog_test_run_attr attr = { 56 - .ctx_size_in = sizeof(args), 56 + LIBBPF_OPTS(bpf_test_run_opts, attr, 57 57 .ctx_in = args, 58 - }; 58 + .ctx_size_in = sizeof(args), 59 + ); 59 60 struct dummy_st_ops *skel; 60 61 int fd, err; 61 62 ··· 64 65 return; 65 66 66 67 fd = bpf_program__fd(skel->progs.test_1); 67 - attr.prog_fd = fd; 68 - err = bpf_prog_test_run_xattr(&attr); 68 + err = bpf_prog_test_run_opts(fd, &attr); 69 69 ASSERT_OK(err, "test_run"); 70 70 ASSERT_EQ(in_state.val, 0x5a, "test_ptr_ret"); 71 71 ASSERT_EQ(attr.retval, exp_retval, "test_ret"); ··· 75 77 static void test_dummy_multiple_args(void) 76 78 { 77 79 __u64 args[5] = {0, -100, 0x8a5f, 'c', 0x1234567887654321ULL}; 78 - struct bpf_prog_test_run_attr attr = { 79 - .ctx_size_in = sizeof(args), 80 + LIBBPF_OPTS(bpf_test_run_opts, attr, 80 81 .ctx_in = args, 81 - }; 82 + .ctx_size_in = sizeof(args), 83 + ); 82 84 struct dummy_st_ops *skel; 83 85 int fd, err; 84 86 size_t i; ··· 89 91 return; 90 92 91 93 fd = bpf_program__fd(skel->progs.test_2); 92 - attr.prog_fd = fd; 93 - err = bpf_prog_test_run_xattr(&attr); 94 + err = bpf_prog_test_run_opts(fd, &attr); 94 95 ASSERT_OK(err, "test_run"); 95 96 for (i = 0; i < ARRAY_SIZE(args); i++) { 96 97 snprintf(name, sizeof(name), "arg %zu", i);
+14 -17
tools/testing/selftests/bpf/prog_tests/flow_dissector.c
··· 13 13 #endif 14 14 15 15 #define CHECK_FLOW_KEYS(desc, got, expected) \ 16 - CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0, \ 16 + _CHECK(memcmp(&got, &expected, sizeof(got)) != 0, \ 17 17 desc, \ 18 + topts.duration, \ 18 19 "nhoff=%u/%u " \ 19 20 "thoff=%u/%u " \ 20 21 "addr_proto=0x%x/0x%x " \ ··· 488 487 /* Keep in sync with 'flags' from eth_get_headlen. */ 489 488 __u32 eth_get_headlen_flags = 490 489 BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG; 491 - struct bpf_prog_test_run_attr tattr = {}; 490 + LIBBPF_OPTS(bpf_test_run_opts, topts); 492 491 struct bpf_flow_keys flow_keys = {}; 493 492 __u32 key = (__u32)(tests[i].keys.sport) << 16 | 494 493 tests[i].keys.dport; ··· 504 503 CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno); 505 504 506 505 err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys); 507 - CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err); 506 + ASSERT_OK(err, "bpf_map_lookup_elem"); 508 507 509 - CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err); 510 508 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); 511 509 512 510 err = bpf_map_delete_elem(keys_fd, &key); 513 - CHECK_ATTR(err, tests[i].name, "bpf_map_delete_elem %d\n", err); 511 + ASSERT_OK(err, "bpf_map_delete_elem"); 514 512 } 515 513 } 516 514 ··· 573 573 574 574 for (i = 0; i < ARRAY_SIZE(tests); i++) { 575 575 struct bpf_flow_keys flow_keys; 576 - struct bpf_prog_test_run_attr tattr = { 577 - .prog_fd = prog_fd, 576 + LIBBPF_OPTS(bpf_test_run_opts, topts, 578 577 .data_in = &tests[i].pkt, 579 578 .data_size_in = sizeof(tests[i].pkt), 580 579 .data_out = &flow_keys, 581 - }; 580 + ); 582 581 static struct bpf_flow_keys ctx = {}; 583 582 584 583 if (tests[i].flags) { 585 - tattr.ctx_in = &ctx; 586 - tattr.ctx_size_in = sizeof(ctx); 584 + topts.ctx_in = &ctx; 585 + topts.ctx_size_in = sizeof(ctx); 587 586 ctx.flags = tests[i].flags; 588 587 } 589 588 590 - err = bpf_prog_test_run_xattr(&tattr); 591 - CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) || 592 - err || tattr.retval != 1, 593 - tests[i].name, 594 - "err %d errno %d retval %d duration %d size %u/%zu\n", 595 - err, errno, tattr.retval, tattr.duration, 596 - tattr.data_size_out, sizeof(flow_keys)); 589 + err = bpf_prog_test_run_opts(prog_fd, &topts); 590 + ASSERT_OK(err, "test_run"); 591 + ASSERT_EQ(topts.retval, 1, "test_run retval"); 592 + ASSERT_EQ(topts.data_size_out, sizeof(flow_keys), 593 + "test_run data_size_out"); 597 594 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); 598 595 } 599 596
+7 -9
tools/testing/selftests/bpf/prog_tests/kfree_skb.c
··· 53 53 void serial_test_kfree_skb(void) 54 54 { 55 55 struct __sk_buff skb = {}; 56 - struct bpf_prog_test_run_attr tattr = { 56 + LIBBPF_OPTS(bpf_test_run_opts, topts, 57 57 .data_in = &pkt_v6, 58 58 .data_size_in = sizeof(pkt_v6), 59 59 .ctx_in = &skb, 60 60 .ctx_size_in = sizeof(skb), 61 - }; 61 + ); 62 62 struct kfree_skb *skel = NULL; 63 63 struct bpf_link *link; 64 64 struct bpf_object *obj; 65 65 struct perf_buffer *pb = NULL; 66 - int err; 66 + int err, prog_fd; 67 67 bool passed = false; 68 68 __u32 duration = 0; 69 69 const int zero = 0; 70 70 bool test_ok[2]; 71 71 72 72 err = bpf_prog_test_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, 73 - &obj, &tattr.prog_fd); 73 + &obj, &prog_fd); 74 74 if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) 75 75 return; 76 76 ··· 100 100 goto close_prog; 101 101 102 102 memcpy(skb.cb, &cb, sizeof(cb)); 103 - err = bpf_prog_test_run_xattr(&tattr); 104 - duration = tattr.duration; 105 - CHECK(err || tattr.retval, "ipv6", 106 - "err %d errno %d retval %d duration %d\n", 107 - err, errno, tattr.retval, duration); 103 + err = bpf_prog_test_run_opts(prog_fd, &topts); 104 + ASSERT_OK(err, "ipv6 test_run"); 105 + ASSERT_OK(topts.retval, "ipv6 test_run retval"); 108 106 109 107 /* read perf buffer */ 110 108 err = perf_buffer__poll(pb, 100);
+77
tools/testing/selftests/bpf/prog_tests/prog_run_opts.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <test_progs.h> 3 + #include <network_helpers.h> 4 + 5 + #include "test_pkt_access.skel.h" 6 + 7 + static const __u32 duration; 8 + 9 + static void check_run_cnt(int prog_fd, __u64 run_cnt) 10 + { 11 + struct bpf_prog_info info = {}; 12 + __u32 info_len = sizeof(info); 13 + int err; 14 + 15 + err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); 16 + if (CHECK(err, "get_prog_info", "failed to get bpf_prog_info for fd %d\n", prog_fd)) 17 + return; 18 + 19 + CHECK(run_cnt != info.run_cnt, "run_cnt", 20 + "incorrect number of repetitions, want %llu have %llu\n", run_cnt, info.run_cnt); 21 + } 22 + 23 + void test_prog_run_opts(void) 24 + { 25 + struct test_pkt_access *skel; 26 + int err, stats_fd = -1, prog_fd; 27 + char buf[10] = {}; 28 + __u64 run_cnt = 0; 29 + 30 + LIBBPF_OPTS(bpf_test_run_opts, topts, 31 + .repeat = 1, 32 + .data_in = &pkt_v4, 33 + .data_size_in = sizeof(pkt_v4), 34 + .data_out = buf, 35 + .data_size_out = 5, 36 + ); 37 + 38 + stats_fd = bpf_enable_stats(BPF_STATS_RUN_TIME); 39 + if (!ASSERT_GE(stats_fd, 0, "enable_stats good fd")) 40 + return; 41 + 42 + skel = test_pkt_access__open_and_load(); 43 + if (!ASSERT_OK_PTR(skel, "open_and_load")) 44 + goto cleanup; 45 + 46 + prog_fd = bpf_program__fd(skel->progs.test_pkt_access); 47 + 48 + err = bpf_prog_test_run_opts(prog_fd, &topts); 49 + ASSERT_EQ(errno, ENOSPC, "test_run errno"); 50 + ASSERT_ERR(err, "test_run"); 51 + ASSERT_OK(topts.retval, "test_run retval"); 52 + 53 + ASSERT_EQ(topts.data_size_out, sizeof(pkt_v4), "test_run data_size_out"); 54 + ASSERT_EQ(buf[5], 0, "overflow, BPF_PROG_TEST_RUN ignored size hint"); 55 + 56 + run_cnt += topts.repeat; 57 + check_run_cnt(prog_fd, run_cnt); 58 + 59 + topts.data_out = NULL; 60 + topts.data_size_out = 0; 61 + topts.repeat = 2; 62 + errno = 0; 63 + 64 + err = bpf_prog_test_run_opts(prog_fd, &topts); 65 + ASSERT_OK(errno, "run_no_output errno"); 66 + ASSERT_OK(err, "run_no_output err"); 67 + ASSERT_OK(topts.retval, "run_no_output retval"); 68 + 69 + run_cnt += topts.repeat; 70 + check_run_cnt(prog_fd, run_cnt); 71 + 72 + cleanup: 73 + if (skel) 74 + test_pkt_access__destroy(skel); 75 + if (stats_fd >= 0) 76 + close(stats_fd); 77 + }
-83
tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - #include <test_progs.h> 3 - #include <network_helpers.h> 4 - 5 - #include "test_pkt_access.skel.h" 6 - 7 - static const __u32 duration; 8 - 9 - static void check_run_cnt(int prog_fd, __u64 run_cnt) 10 - { 11 - struct bpf_prog_info info = {}; 12 - __u32 info_len = sizeof(info); 13 - int err; 14 - 15 - err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); 16 - if (CHECK(err, "get_prog_info", "failed to get bpf_prog_info for fd %d\n", prog_fd)) 17 - return; 18 - 19 - CHECK(run_cnt != info.run_cnt, "run_cnt", 20 - "incorrect number of repetitions, want %llu have %llu\n", run_cnt, info.run_cnt); 21 - } 22 - 23 - void test_prog_run_xattr(void) 24 - { 25 - struct test_pkt_access *skel; 26 - int err, stats_fd = -1; 27 - char buf[10] = {}; 28 - __u64 run_cnt = 0; 29 - 30 - struct bpf_prog_test_run_attr tattr = { 31 - .repeat = 1, 32 - .data_in = &pkt_v4, 33 - .data_size_in = sizeof(pkt_v4), 34 - .data_out = buf, 35 - .data_size_out = 5, 36 - }; 37 - 38 - stats_fd = bpf_enable_stats(BPF_STATS_RUN_TIME); 39 - if (CHECK_ATTR(stats_fd < 0, "enable_stats", "failed %d\n", errno)) 40 - return; 41 - 42 - skel = test_pkt_access__open_and_load(); 43 - if (CHECK_ATTR(!skel, "open_and_load", "failed\n")) 44 - goto cleanup; 45 - 46 - tattr.prog_fd = bpf_program__fd(skel->progs.test_pkt_access); 47 - 48 - err = bpf_prog_test_run_xattr(&tattr); 49 - CHECK_ATTR(err >= 0 || errno != ENOSPC || tattr.retval, "run", 50 - "err %d errno %d retval %d\n", err, errno, tattr.retval); 51 - 52 - CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out", 53 - "incorrect output size, want %zu have %u\n", 54 - sizeof(pkt_v4), tattr.data_size_out); 55 - 56 - CHECK_ATTR(buf[5] != 0, "overflow", 57 - "BPF_PROG_TEST_RUN ignored size hint\n"); 58 - 59 - run_cnt += tattr.repeat; 60 - check_run_cnt(tattr.prog_fd, run_cnt); 61 - 62 - tattr.data_out = NULL; 63 - tattr.data_size_out = 0; 64 - tattr.repeat = 2; 65 - errno = 0; 66 - 67 - err = bpf_prog_test_run_xattr(&tattr); 68 - CHECK_ATTR(err || errno || tattr.retval, "run_no_output", 69 - "err %d errno %d retval %d\n", err, errno, tattr.retval); 70 - 71 - tattr.data_size_out = 1; 72 - err = bpf_prog_test_run_xattr(&tattr); 73 - CHECK_ATTR(err != -EINVAL, "run_wrong_size_out", "err %d\n", err); 74 - 75 - run_cnt += tattr.repeat; 76 - check_run_cnt(tattr.prog_fd, run_cnt); 77 - 78 - cleanup: 79 - if (skel) 80 - test_pkt_access__destroy(skel); 81 - if (stats_fd >= 0) 82 - close(stats_fd); 83 - }
+27 -37
tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c
··· 5 5 #include "bpf/libbpf_internal.h" 6 6 #include "test_raw_tp_test_run.skel.h" 7 7 8 - static int duration; 9 - 10 8 void test_raw_tp_test_run(void) 11 9 { 12 - struct bpf_prog_test_run_attr test_attr = {}; 13 10 int comm_fd = -1, err, nr_online, i, prog_fd; 14 11 __u64 args[2] = {0x1234ULL, 0x5678ULL}; 15 12 int expected_retval = 0x1234 + 0x5678; 16 13 struct test_raw_tp_test_run *skel; 17 14 char buf[] = "new_name"; 18 15 bool *online = NULL; 19 - DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, 20 - .ctx_in = args, 21 - .ctx_size_in = sizeof(args), 22 - .flags = BPF_F_TEST_RUN_ON_CPU, 23 - ); 16 + LIBBPF_OPTS(bpf_test_run_opts, opts, 17 + .ctx_in = args, 18 + .ctx_size_in = sizeof(args), 19 + .flags = BPF_F_TEST_RUN_ON_CPU, 20 + ); 24 21 25 22 err = parse_cpu_mask_file("/sys/devices/system/cpu/online", &online, 26 23 &nr_online); 27 - if (CHECK(err, "parse_cpu_mask_file", "err %d\n", err)) 24 + if (!ASSERT_OK(err, "parse_cpu_mask_file")) 28 25 return; 29 26 30 27 skel = test_raw_tp_test_run__open_and_load(); 31 - if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) 28 + if (!ASSERT_OK_PTR(skel, "skel_open")) 32 29 goto cleanup; 33 30 34 31 err = test_raw_tp_test_run__attach(skel); 35 - if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) 32 + if (!ASSERT_OK(err, "skel_attach")) 36 33 goto cleanup; 37 34 38 35 comm_fd = open("/proc/self/comm", O_WRONLY|O_TRUNC); 39 - if (CHECK(comm_fd < 0, "open /proc/self/comm", "err %d\n", errno)) 36 + if (!ASSERT_GE(comm_fd, 0, "open /proc/self/comm")) 40 37 goto cleanup; 41 38 42 39 err = write(comm_fd, buf, sizeof(buf)); 43 - CHECK(err < 0, "task rename", "err %d", errno); 40 + ASSERT_GE(err, 0, "task rename"); 44 41 45 - CHECK(skel->bss->count == 0, "check_count", "didn't increase\n"); 46 - CHECK(skel->data->on_cpu != 0xffffffff, "check_on_cpu", "got wrong value\n"); 42 + ASSERT_NEQ(skel->bss->count, 0, "check_count"); 43 + ASSERT_EQ(skel->data->on_cpu, 0xffffffff, "check_on_cpu"); 47 44 48 45 prog_fd = bpf_program__fd(skel->progs.rename); 49 - test_attr.prog_fd = prog_fd; 50 - test_attr.ctx_in = args; 51 - test_attr.ctx_size_in = sizeof(__u64); 46 + opts.ctx_in = args; 47 + opts.ctx_size_in = sizeof(__u64); 52 48 53 - err = bpf_prog_test_run_xattr(&test_attr); 54 - CHECK(err == 0, "test_run", "should fail for too small ctx\n"); 49 + err = bpf_prog_test_run_opts(prog_fd, &opts); 50 + ASSERT_NEQ(err, 0, "test_run should fail for too small ctx"); 55 51 56 - test_attr.ctx_size_in = sizeof(args); 57 - err = bpf_prog_test_run_xattr(&test_attr); 58 - CHECK(err < 0, "test_run", "err %d\n", errno); 59 - CHECK(test_attr.retval != expected_retval, "check_retval", 60 - "expect 0x%x, got 0x%x\n", expected_retval, test_attr.retval); 52 + opts.ctx_size_in = sizeof(args); 53 + err = bpf_prog_test_run_opts(prog_fd, &opts); 54 + ASSERT_OK(err, "test_run"); 55 + ASSERT_EQ(opts.retval, expected_retval, "check_retval"); 61 56 62 57 for (i = 0; i < nr_online; i++) { 63 58 if (!online[i]) ··· 61 66 opts.cpu = i; 62 67 opts.retval = 0; 63 68 err = bpf_prog_test_run_opts(prog_fd, &opts); 64 - CHECK(err < 0, "test_run_opts", "err %d\n", errno); 65 - CHECK(skel->data->on_cpu != i, "check_on_cpu", 66 - "expect %d got %d\n", i, skel->data->on_cpu); 67 - CHECK(opts.retval != expected_retval, 68 - "check_retval", "expect 0x%x, got 0x%x\n", 69 - expected_retval, opts.retval); 69 + ASSERT_OK(err, "test_run_opts"); 70 + ASSERT_EQ(skel->data->on_cpu, i, "check_on_cpu"); 71 + ASSERT_EQ(opts.retval, expected_retval, "check_retval"); 70 72 } 71 73 72 74 /* invalid cpu ID should fail with ENXIO */ 73 75 opts.cpu = 0xffffffff; 74 76 err = bpf_prog_test_run_opts(prog_fd, &opts); 75 - CHECK(err >= 0 || errno != ENXIO, 76 - "test_run_opts_fail", 77 - "should failed with ENXIO\n"); 77 + ASSERT_EQ(errno, ENXIO, "test_run_opts should fail with ENXIO"); 78 + ASSERT_ERR(err, "test_run_opts_fail"); 78 79 79 80 /* non-zero cpu w/o BPF_F_TEST_RUN_ON_CPU should fail with EINVAL */ 80 81 opts.cpu = 1; 81 82 opts.flags = 0; 82 83 err = bpf_prog_test_run_opts(prog_fd, &opts); 83 - CHECK(err >= 0 || errno != EINVAL, 84 - "test_run_opts_fail", 85 - "should failed with EINVAL\n"); 84 + ASSERT_EQ(errno, EINVAL, "test_run_opts should fail with EINVAL"); 85 + ASSERT_ERR(err, "test_run_opts_fail"); 86 86 87 87 cleanup: 88 88 close(comm_fd);
+28 -53
tools/testing/selftests/bpf/prog_tests/skb_ctx.c
··· 20 20 .gso_size = 10, 21 21 .hwtstamp = 11, 22 22 }; 23 - struct bpf_prog_test_run_attr tattr = { 23 + LIBBPF_OPTS(bpf_test_run_opts, tattr, 24 24 .data_in = &pkt_v4, 25 25 .data_size_in = sizeof(pkt_v4), 26 26 .ctx_in = &skb, 27 27 .ctx_size_in = sizeof(skb), 28 28 .ctx_out = &skb, 29 29 .ctx_size_out = sizeof(skb), 30 - }; 30 + ); 31 31 struct bpf_object *obj; 32 - int err; 33 - int i; 32 + int err, prog_fd, i; 34 33 35 - err = bpf_prog_test_load("./test_skb_ctx.o", BPF_PROG_TYPE_SCHED_CLS, &obj, 36 - &tattr.prog_fd); 37 - if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno)) 34 + err = bpf_prog_test_load("./test_skb_ctx.o", BPF_PROG_TYPE_SCHED_CLS, 35 + &obj, &prog_fd); 36 + if (!ASSERT_OK(err, "load")) 38 37 return; 39 38 40 39 /* ctx_in != NULL, ctx_size_in == 0 */ 41 40 42 41 tattr.ctx_size_in = 0; 43 - err = bpf_prog_test_run_xattr(&tattr); 44 - CHECK_ATTR(err == 0, "ctx_size_in", "err %d errno %d\n", err, errno); 42 + err = bpf_prog_test_run_opts(prog_fd, &tattr); 43 + ASSERT_NEQ(err, 0, "ctx_size_in"); 45 44 tattr.ctx_size_in = sizeof(skb); 46 45 47 46 /* ctx_out != NULL, ctx_size_out == 0 */ 48 47 49 48 tattr.ctx_size_out = 0; 50 - err = bpf_prog_test_run_xattr(&tattr); 51 - CHECK_ATTR(err == 0, "ctx_size_out", "err %d errno %d\n", err, errno); 49 + err = bpf_prog_test_run_opts(prog_fd, &tattr); 50 + ASSERT_NEQ(err, 0, "ctx_size_out"); 52 51 tattr.ctx_size_out = sizeof(skb); 53 52 54 53 /* non-zero [len, tc_index] fields should be rejected*/ 55 54 56 55 skb.len = 1; 57 - err = bpf_prog_test_run_xattr(&tattr); 58 - CHECK_ATTR(err == 0, "len", "err %d errno %d\n", err, errno); 56 + err = bpf_prog_test_run_opts(prog_fd, &tattr); 57 + ASSERT_NEQ(err, 0, "len"); 59 58 skb.len = 0; 60 59 61 60 skb.tc_index = 1; 62 - err = bpf_prog_test_run_xattr(&tattr); 63 - CHECK_ATTR(err == 0, "tc_index", "err %d errno %d\n", err, errno); 61 + err = bpf_prog_test_run_opts(prog_fd, &tattr); 62 + ASSERT_NEQ(err, 0, "tc_index"); 64 63 skb.tc_index = 0; 65 64 66 65 /* non-zero [hash, sk] fields should be rejected */ 67 66 68 67 skb.hash = 1; 69 - err = bpf_prog_test_run_xattr(&tattr); 70 - CHECK_ATTR(err == 0, "hash", "err %d errno %d\n", err, errno); 68 + err = bpf_prog_test_run_opts(prog_fd, &tattr); 69 + ASSERT_NEQ(err, 0, "hash"); 71 70 skb.hash = 0; 72 71 73 72 skb.sk = (struct bpf_sock *)1; 74 - err = bpf_prog_test_run_xattr(&tattr); 75 - CHECK_ATTR(err == 0, "sk", "err %d errno %d\n", err, errno); 73 + err = bpf_prog_test_run_opts(prog_fd, &tattr); 74 + ASSERT_NEQ(err, 0, "sk"); 76 75 skb.sk = 0; 77 76 78 - err = bpf_prog_test_run_xattr(&tattr); 79 - CHECK_ATTR(err != 0 || tattr.retval, 80 - "run", 81 - "err %d errno %d retval %d\n", 82 - err, errno, tattr.retval); 83 - 84 - CHECK_ATTR(tattr.ctx_size_out != sizeof(skb), 85 - "ctx_size_out", 86 - "incorrect output size, want %zu have %u\n", 87 - sizeof(skb), tattr.ctx_size_out); 77 + err = bpf_prog_test_run_opts(prog_fd, &tattr); 78 + ASSERT_OK(err, "test_run"); 79 + ASSERT_OK(tattr.retval, "test_run retval"); 80 + ASSERT_EQ(tattr.ctx_size_out, sizeof(skb), "ctx_size_out"); 88 81 89 82 for (i = 0; i < 5; i++) 90 - CHECK_ATTR(skb.cb[i] != i + 2, 91 - "ctx_out_cb", 92 - "skb->cb[i] == %d, expected %d\n", 93 - skb.cb[i], i + 2); 94 - CHECK_ATTR(skb.priority != 7, 95 - "ctx_out_priority", 96 - "skb->priority == %d, expected %d\n", 97 - skb.priority, 7); 98 - CHECK_ATTR(skb.ifindex != 1, 99 - "ctx_out_ifindex", 100 - "skb->ifindex == %d, expected %d\n", 101 - skb.ifindex, 1); 102 - CHECK_ATTR(skb.ingress_ifindex != 11, 103 - "ctx_out_ingress_ifindex", 104 - "skb->ingress_ifindex == %d, expected %d\n", 105 - skb.ingress_ifindex, 11); 106 - CHECK_ATTR(skb.tstamp != 8, 107 - "ctx_out_tstamp", 108 - "skb->tstamp == %lld, expected %d\n", 109 - skb.tstamp, 8); 110 - CHECK_ATTR(skb.mark != 10, 111 - "ctx_out_mark", 112 - "skb->mark == %u, expected %d\n", 113 - skb.mark, 10); 83 + ASSERT_EQ(skb.cb[i], i + 2, "ctx_out_cb"); 84 + ASSERT_EQ(skb.priority, 7, "ctx_out_priority"); 85 + ASSERT_EQ(skb.ifindex, 1, "ctx_out_ifindex"); 86 + ASSERT_EQ(skb.ingress_ifindex, 11, "ctx_out_ingress_ifindex"); 87 + ASSERT_EQ(skb.tstamp, 8, "ctx_out_tstamp"); 88 + ASSERT_EQ(skb.mark, 10, "ctx_out_mark"); 114 89 115 90 bpf_object__close(obj); 116 91 }
+8 -8
tools/testing/selftests/bpf/prog_tests/skb_helpers.c
··· 9 9 .gso_segs = 8, 10 10 .gso_size = 10, 11 11 }; 12 - struct bpf_prog_test_run_attr tattr = { 12 + LIBBPF_OPTS(bpf_test_run_opts, topts, 13 13 .data_in = &pkt_v4, 14 14 .data_size_in = sizeof(pkt_v4), 15 15 .ctx_in = &skb, 16 16 .ctx_size_in = sizeof(skb), 17 17 .ctx_out = &skb, 18 18 .ctx_size_out = sizeof(skb), 19 - }; 19 + ); 20 20 struct bpf_object *obj; 21 - int err; 21 + int err, prog_fd; 22 22 23 - err = bpf_prog_test_load("./test_skb_helpers.o", BPF_PROG_TYPE_SCHED_CLS, &obj, 24 - &tattr.prog_fd); 25 - if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno)) 23 + err = bpf_prog_test_load("./test_skb_helpers.o", 24 + BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); 25 + if (!ASSERT_OK(err, "load")) 26 26 return; 27 - err = bpf_prog_test_run_xattr(&tattr); 28 - CHECK_ATTR(err, "len", "err %d errno %d\n", err, errno); 27 + err = bpf_prog_test_run_opts(prog_fd, &topts); 28 + ASSERT_OK(err, "test_run"); 29 29 bpf_object__close(obj); 30 30 }
+9 -11
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
··· 140 140 141 141 static void test_sockmap_update(enum bpf_map_type map_type) 142 142 { 143 - struct bpf_prog_test_run_attr tattr; 144 143 int err, prog, src, duration = 0; 145 144 struct test_sockmap_update *skel; 146 145 struct bpf_map *dst_map; 147 146 const __u32 zero = 0; 148 147 char dummy[14] = {0}; 148 + LIBBPF_OPTS(bpf_test_run_opts, topts, 149 + .data_in = dummy, 150 + .data_size_in = sizeof(dummy), 151 + .repeat = 1, 152 + ); 149 153 __s64 sk; 150 154 151 155 sk = connected_socket_v4(); ··· 171 167 if (CHECK(err, "update_elem(src)", "errno=%u\n", errno)) 172 168 goto out; 173 169 174 - tattr = (struct bpf_prog_test_run_attr){ 175 - .prog_fd = prog, 176 - .repeat = 1, 177 - .data_in = dummy, 178 - .data_size_in = sizeof(dummy), 179 - }; 180 - 181 - err = bpf_prog_test_run_xattr(&tattr); 182 - if (CHECK_ATTR(err || !tattr.retval, "bpf_prog_test_run", 183 - "errno=%u retval=%u\n", errno, tattr.retval)) 170 + err = bpf_prog_test_run_opts(prog, &topts); 171 + if (!ASSERT_OK(err, "test_run")) 172 + goto out; 173 + if (!ASSERT_NEQ(topts.retval, 0, "test_run retval")) 184 174 goto out; 185 175 186 176 compare_cookies(skel->maps.src, dst_map);
+5 -5
tools/testing/selftests/bpf/prog_tests/syscall.c
··· 20 20 .log_buf = (uintptr_t) verifier_log, 21 21 .log_size = sizeof(verifier_log), 22 22 }; 23 - struct bpf_prog_test_run_attr tattr = { 23 + LIBBPF_OPTS(bpf_test_run_opts, tattr, 24 24 .ctx_in = &ctx, 25 25 .ctx_size_in = sizeof(ctx), 26 - }; 26 + ); 27 27 struct syscall *skel = NULL; 28 28 __u64 key = 12, value = 0; 29 - int err; 29 + int err, prog_fd; 30 30 31 31 skel = syscall__open_and_load(); 32 32 if (!ASSERT_OK_PTR(skel, "skel_load")) 33 33 goto cleanup; 34 34 35 - tattr.prog_fd = bpf_program__fd(skel->progs.bpf_prog); 36 - err = bpf_prog_test_run_xattr(&tattr); 35 + prog_fd = bpf_program__fd(skel->progs.bpf_prog); 36 + err = bpf_prog_test_run_opts(prog_fd, &tattr); 37 37 ASSERT_EQ(err, 0, "err"); 38 38 ASSERT_EQ(tattr.retval, 1, "retval"); 39 39 ASSERT_GT(ctx.map_fd, 0, "ctx.map_fd");
+7 -7
tools/testing/selftests/bpf/prog_tests/test_profiler.c
··· 8 8 9 9 static int sanity_run(struct bpf_program *prog) 10 10 { 11 - struct bpf_prog_test_run_attr test_attr = {}; 11 + LIBBPF_OPTS(bpf_test_run_opts, test_attr); 12 12 __u64 args[] = {1, 2, 3}; 13 - __u32 duration = 0; 14 13 int err, prog_fd; 15 14 16 15 prog_fd = bpf_program__fd(prog); 17 - test_attr.prog_fd = prog_fd; 18 16 test_attr.ctx_in = args; 19 17 test_attr.ctx_size_in = sizeof(args); 20 - err = bpf_prog_test_run_xattr(&test_attr); 21 - if (CHECK(err || test_attr.retval, "test_run", 22 - "err %d errno %d retval %d duration %d\n", 23 - err, errno, test_attr.retval, duration)) 18 + err = bpf_prog_test_run_opts(prog_fd, &test_attr); 19 + if (!ASSERT_OK(err, "test_run")) 24 20 return -1; 21 + 22 + if (!ASSERT_OK(test_attr.retval, "test_run retval")) 23 + return -1; 24 + 25 25 return 0; 26 26 } 27 27
+6 -6
tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
··· 78 78 int tailroom = 320; /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info))*/; 79 79 struct bpf_object *obj; 80 80 int err, cnt, i; 81 - int max_grow; 81 + int max_grow, prog_fd; 82 82 83 - struct bpf_prog_test_run_attr tattr = { 83 + LIBBPF_OPTS(bpf_test_run_opts, tattr, 84 84 .repeat = 1, 85 85 .data_in = &buf, 86 86 .data_out = &buf, 87 87 .data_size_in = 0, /* Per test */ 88 88 .data_size_out = 0, /* Per test */ 89 - }; 89 + ); 90 90 91 - err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &tattr.prog_fd); 91 + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); 92 92 if (ASSERT_OK(err, "test_xdp_adjust_tail_grow")) 93 93 return; 94 94 ··· 97 97 tattr.data_size_in = 64; /* Determine test case via pkt size */ 98 98 tattr.data_size_out = 128; /* Limit copy_size */ 99 99 /* Kernel side alloc packet memory area that is zero init */ 100 - err = bpf_prog_test_run_xattr(&tattr); 100 + err = bpf_prog_test_run_opts(prog_fd, &tattr); 101 101 102 102 ASSERT_EQ(errno, ENOSPC, "case-64 errno"); /* Due limit copy_size in bpf_test_finish */ 103 103 ASSERT_EQ(tattr.retval, XDP_TX, "case-64 retval"); ··· 115 115 memset(buf, 2, sizeof(buf)); 116 116 tattr.data_size_in = 128; /* Determine test case via pkt size */ 117 117 tattr.data_size_out = sizeof(buf); /* Copy everything */ 118 - err = bpf_prog_test_run_xattr(&tattr); 118 + err = bpf_prog_test_run_opts(prog_fd, &tattr); 119 119 120 120 max_grow = 4096 - XDP_PACKET_HEADROOM - tailroom; /* 3520 */ 121 121 ASSERT_OK(err, "case-128");