Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf: Add type cast unit tests

Three tests are added. One is from John Fastabend ({1]) which tests
tracing style access for xdp program from the kernel ctx.
Another is a tc test to test both kernel ctx tracing style access
and explicit non-ctx type cast. The third one is for negative tests
including two tests, a tp_bpf test where the bpf_rdonly_cast()
returns a untrusted ptr which cannot be used as helper argument,
and a tracepoint test where the kernel ctx is a u64.

Also added the test to DENYLIST.s390x since s390 does not currently
support calling kernel functions in JIT mode.

[1] https://lore.kernel.org/bpf/20221109215242.1279993-1-john.fastabend@gmail.com/

Signed-off-by: Yonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/r/20221120195442.3114844-1-yhs@fb.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Yonghong Song and committed by
Alexei Starovoitov
58d84bee a35b9af4

+198
+1
tools/testing/selftests/bpf/DENYLIST.s390x
··· 71 71 trace_vprintk # trace_vprintk__open_and_load unexpected error: -9 (?) 72 72 tracing_struct # failed to auto-attach: -524 (trampoline) 73 73 trampoline_count # prog 'prog1': failed to attach: ERROR: strerror_r(-524)=22 (trampoline) 74 + type_cast # JIT does not support calling kernel function 74 75 unpriv_bpf_disabled # fentry 75 76 user_ringbuf # failed to find kernel BTF type ID of '__s390x_sys_prctl': -3 (?) 76 77 verif_stats # trace_vprintk__open_and_load unexpected error: -9 (?)
+114
tools/testing/selftests/bpf/prog_tests/type_cast.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ 3 + #include <test_progs.h> 4 + #include <network_helpers.h> 5 + #include "type_cast.skel.h" 6 + 7 + static void test_xdp(void) 8 + { 9 + struct type_cast *skel; 10 + int err, prog_fd; 11 + char buf[128]; 12 + 13 + LIBBPF_OPTS(bpf_test_run_opts, topts, 14 + .data_in = &pkt_v4, 15 + .data_size_in = sizeof(pkt_v4), 16 + .data_out = buf, 17 + .data_size_out = sizeof(buf), 18 + .repeat = 1, 19 + ); 20 + 21 + skel = type_cast__open(); 22 + if (!ASSERT_OK_PTR(skel, "skel_open")) 23 + return; 24 + 25 + bpf_program__set_autoload(skel->progs.md_xdp, true); 26 + err = type_cast__load(skel); 27 + if (!ASSERT_OK(err, "skel_load")) 28 + goto out; 29 + 30 + prog_fd = bpf_program__fd(skel->progs.md_xdp); 31 + err = bpf_prog_test_run_opts(prog_fd, &topts); 32 + ASSERT_OK(err, "test_run"); 33 + ASSERT_EQ(topts.retval, XDP_PASS, "xdp test_run retval"); 34 + 35 + ASSERT_EQ(skel->bss->ifindex, 1, "xdp_md ifindex"); 36 + ASSERT_EQ(skel->bss->ifindex, skel->bss->ingress_ifindex, "xdp_md ingress_ifindex"); 37 + ASSERT_STREQ(skel->bss->name, "lo", "xdp_md name"); 38 + ASSERT_NEQ(skel->bss->inum, 0, "xdp_md inum"); 39 + 40 + out: 41 + type_cast__destroy(skel); 42 + } 43 + 44 + static void test_tc(void) 45 + { 46 + struct type_cast *skel; 47 + int err, prog_fd; 48 + 49 + LIBBPF_OPTS(bpf_test_run_opts, topts, 50 + .data_in = &pkt_v4, 51 + .data_size_in = sizeof(pkt_v4), 52 + .repeat = 1, 53 + ); 54 + 55 + skel = type_cast__open(); 56 + if (!ASSERT_OK_PTR(skel, "skel_open")) 57 + return; 58 + 59 + bpf_program__set_autoload(skel->progs.md_skb, true); 60 + err = type_cast__load(skel); 61 + if (!ASSERT_OK(err, "skel_load")) 62 + goto out; 63 + 64 + prog_fd = bpf_program__fd(skel->progs.md_skb); 65 + err = bpf_prog_test_run_opts(prog_fd, &topts); 66 + ASSERT_OK(err, "test_run"); 67 + ASSERT_EQ(topts.retval, 0, "tc test_run retval"); 68 + 69 + ASSERT_EQ(skel->bss->meta_len, 0, "skb meta_len"); 70 + ASSERT_EQ(skel->bss->frag0_len, 0, "skb frag0_len"); 71 + ASSERT_NEQ(skel->bss->kskb_len, 0, "skb len"); 72 + ASSERT_NEQ(skel->bss->kskb2_len, 0, "skb2 len"); 73 + ASSERT_EQ(skel->bss->kskb_len, skel->bss->kskb2_len, "skb len compare"); 74 + 75 + out: 76 + type_cast__destroy(skel); 77 + } 78 + 79 + static const char * const negative_tests[] = { 80 + "untrusted_ptr", 81 + "kctx_u64", 82 + }; 83 + 84 + static void test_negative(void) 85 + { 86 + struct bpf_program *prog; 87 + struct type_cast *skel; 88 + int i, err; 89 + 90 + for (i = 0; i < ARRAY_SIZE(negative_tests); i++) { 91 + skel = type_cast__open(); 92 + if (!ASSERT_OK_PTR(skel, "skel_open")) 93 + return; 94 + 95 + prog = bpf_object__find_program_by_name(skel->obj, negative_tests[i]); 96 + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) 97 + goto out; 98 + bpf_program__set_autoload(prog, true); 99 + err = type_cast__load(skel); 100 + ASSERT_ERR(err, "skel_load"); 101 + out: 102 + type_cast__destroy(skel); 103 + } 104 + } 105 + 106 + void test_type_cast(void) 107 + { 108 + if (test__start_subtest("xdp")) 109 + test_xdp(); 110 + if (test__start_subtest("tc")) 111 + test_tc(); 112 + if (test__start_subtest("negative")) 113 + test_negative(); 114 + }
+83
tools/testing/selftests/bpf/progs/type_cast.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ 3 + #include "vmlinux.h" 4 + #include <bpf/bpf_helpers.h> 5 + #include <bpf/bpf_tracing.h> 6 + #include <bpf/bpf_core_read.h> 7 + 8 + struct { 9 + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); 10 + __uint(map_flags, BPF_F_NO_PREALLOC); 11 + __type(key, int); 12 + __type(value, long); 13 + } enter_id SEC(".maps"); 14 + 15 + #define IFNAMSIZ 16 16 + 17 + int ifindex, ingress_ifindex; 18 + char name[IFNAMSIZ]; 19 + unsigned int inum; 20 + unsigned int meta_len, frag0_len, kskb_len, kskb2_len; 21 + 22 + void *bpf_cast_to_kern_ctx(void *) __ksym; 23 + void *bpf_rdonly_cast(void *, __u32) __ksym; 24 + 25 + SEC("?xdp") 26 + int md_xdp(struct xdp_md *ctx) 27 + { 28 + struct xdp_buff *kctx = bpf_cast_to_kern_ctx(ctx); 29 + struct net_device *dev; 30 + 31 + dev = kctx->rxq->dev; 32 + ifindex = dev->ifindex; 33 + inum = dev->nd_net.net->ns.inum; 34 + __builtin_memcpy(name, dev->name, IFNAMSIZ); 35 + ingress_ifindex = ctx->ingress_ifindex; 36 + return XDP_PASS; 37 + } 38 + 39 + SEC("?tc") 40 + int md_skb(struct __sk_buff *skb) 41 + { 42 + struct sk_buff *kskb = bpf_cast_to_kern_ctx(skb); 43 + struct skb_shared_info *shared_info; 44 + struct sk_buff *kskb2; 45 + 46 + kskb_len = kskb->len; 47 + 48 + /* Simulate the following kernel macro: 49 + * #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) 50 + */ 51 + shared_info = bpf_rdonly_cast(kskb->head + kskb->end, 52 + bpf_core_type_id_kernel(struct skb_shared_info)); 53 + meta_len = shared_info->meta_len; 54 + frag0_len = shared_info->frag_list->len; 55 + 56 + /* kskb2 should be equal to kskb */ 57 + kskb2 = bpf_rdonly_cast(kskb, bpf_core_type_id_kernel(struct sk_buff)); 58 + kskb2_len = kskb2->len; 59 + return 0; 60 + } 61 + 62 + SEC("?tp_btf/sys_enter") 63 + int BPF_PROG(untrusted_ptr, struct pt_regs *regs, long id) 64 + { 65 + struct task_struct *task, *task_dup; 66 + long *ptr; 67 + 68 + task = bpf_get_current_task_btf(); 69 + task_dup = bpf_rdonly_cast(task, bpf_core_type_id_kernel(struct task_struct)); 70 + (void)bpf_task_storage_get(&enter_id, task_dup, 0, 0); 71 + return 0; 72 + } 73 + 74 + SEC("?tracepoint/syscalls/sys_enter_nanosleep") 75 + int kctx_u64(void *ctx) 76 + { 77 + u64 *kctx = bpf_rdonly_cast(ctx, bpf_core_type_id_kernel(u64)); 78 + 79 + (void)kctx; 80 + return 0; 81 + } 82 + 83 + char _license[] SEC("license") = "GPL";