Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Additional test for CO-RE in the kernel.

Add a test where randmap() function is appended to three different bpf
programs. That action checks struct bpf_core_relo replication logic
and offset adjustment in gen loader part of libbpf.

Fourth bpf program has 360 CO-RE relocations from vmlinux, bpf_testmod,
and non-existing type. It tests candidate cache logic.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20211201181040.23337-16-alexei.starovoitov@gmail.com

authored by

Alexei Starovoitov and committed by
Andrii Nakryiko
26b367e3 650c9dbd

+119 -1
+1 -1
tools/testing/selftests/bpf/Makefile
··· 326 326 327 327 LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \ 328 328 test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c \ 329 - map_ptr_kern.c 329 + map_ptr_kern.c core_kern.c 330 330 # Generate both light skeleton and libbpf skeleton for these 331 331 LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c kfunc_call_test_subprog.c 332 332 SKEL_BLACKLIST += $$(LSKELS)
+14
tools/testing/selftests/bpf/prog_tests/core_kern.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2021 Facebook */ 3 + 4 + #include "test_progs.h" 5 + #include "core_kern.lskel.h" 6 + 7 + void test_core_kern_lskel(void) 8 + { 9 + struct core_kern_lskel *skel; 10 + 11 + skel = core_kern_lskel__open_and_load(); 12 + ASSERT_OK_PTR(skel, "open_and_load"); 13 + core_kern_lskel__destroy(skel); 14 + }
+104
tools/testing/selftests/bpf/progs/core_kern.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2021 Facebook */ 3 + #include "vmlinux.h" 4 + 5 + #include <bpf/bpf_helpers.h> 6 + #include <bpf/bpf_tracing.h> 7 + #include <bpf/bpf_core_read.h> 8 + 9 + #define ATTR __always_inline 10 + #include "test_jhash.h" 11 + 12 + struct { 13 + __uint(type, BPF_MAP_TYPE_ARRAY); 14 + __type(key, u32); 15 + __type(value, u32); 16 + __uint(max_entries, 256); 17 + } array1 SEC(".maps"); 18 + 19 + struct { 20 + __uint(type, BPF_MAP_TYPE_ARRAY); 21 + __type(key, u32); 22 + __type(value, u32); 23 + __uint(max_entries, 256); 24 + } array2 SEC(".maps"); 25 + 26 + static __noinline int randmap(int v, const struct net_device *dev) 27 + { 28 + struct bpf_map *map = (struct bpf_map *)&array1; 29 + int key = bpf_get_prandom_u32() & 0xff; 30 + int *val; 31 + 32 + if (bpf_get_prandom_u32() & 1) 33 + map = (struct bpf_map *)&array2; 34 + 35 + val = bpf_map_lookup_elem(map, &key); 36 + if (val) 37 + *val = bpf_get_prandom_u32() + v + dev->mtu; 38 + 39 + return 0; 40 + } 41 + 42 + SEC("tp_btf/xdp_devmap_xmit") 43 + int BPF_PROG(tp_xdp_devmap_xmit_multi, const struct net_device 44 + *from_dev, const struct net_device *to_dev, int sent, int drops, 45 + int err) 46 + { 47 + return randmap(from_dev->ifindex, from_dev); 48 + } 49 + 50 + SEC("fentry/eth_type_trans") 51 + int BPF_PROG(fentry_eth_type_trans, struct sk_buff *skb, 52 + struct net_device *dev, unsigned short protocol) 53 + { 54 + return randmap(dev->ifindex + skb->len, dev); 55 + } 56 + 57 + SEC("fexit/eth_type_trans") 58 + int BPF_PROG(fexit_eth_type_trans, struct sk_buff *skb, 59 + struct net_device *dev, unsigned short protocol) 60 + { 61 + return randmap(dev->ifindex + skb->len, dev); 62 + } 63 + 64 + volatile const int never; 65 + 66 + struct __sk_bUfF /* it will not exist in vmlinux */ { 67 + int len; 68 + } __attribute__((preserve_access_index)); 69 + 70 + struct bpf_testmod_test_read_ctx /* it exists in bpf_testmod */ { 71 + size_t len; 72 + } __attribute__((preserve_access_index)); 73 + 74 + SEC("tc") 75 + int balancer_ingress(struct __sk_buff *ctx) 76 + { 77 + void *data_end = (void *)(long)ctx->data_end; 78 + void *data = (void *)(long)ctx->data; 79 + void *ptr; 80 + int ret = 0, nh_off, i = 0; 81 + 82 + nh_off = 14; 83 + 84 + /* pragma unroll doesn't work on large loops */ 85 + #define C do { \ 86 + ptr = data + i; \ 87 + if (ptr + nh_off > data_end) \ 88 + break; \ 89 + ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \ 90 + if (never) { \ 91 + /* below is a dead code with unresolvable CO-RE relo */ \ 92 + i += ((struct __sk_bUfF *)ctx)->len; \ 93 + /* this CO-RE relo may or may not resolve 94 + * depending on whether bpf_testmod is loaded. 95 + */ \ 96 + i += ((struct bpf_testmod_test_read_ctx *)ctx)->len; \ 97 + } \ 98 + } while (0); 99 + #define C30 C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C; 100 + C30;C30;C30; /* 90 calls */ 101 + return 0; 102 + } 103 + 104 + char LICENSE[] SEC("license") = "GPL";