Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

samples, bpf: Refactor kprobe, tail call kern progs map definition

Because the previous two commit replaced the bpf_load implementation of
the user program with libbpf, the corresponding kernel program's MAP
definition can be replaced with new BTF-defined map syntax.

This commit only updates the samples which uses libbpf API for loading
bpf program not with bpf_load.

Signed-off-by: Daniel T. Lee <danieltimlee@gmail.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Yonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20200516040608.1377876-6-danieltimlee@gmail.com

authored by

Daniel T. Lee and committed by
Daniel Borkmann
59929cd1 14846dda

+93 -91
+6 -6
samples/bpf/sampleip_kern.c
··· 13 13 14 14 #define MAX_IPS 8192 15 15 16 - struct bpf_map_def SEC("maps") ip_map = { 17 - .type = BPF_MAP_TYPE_HASH, 18 - .key_size = sizeof(u64), 19 - .value_size = sizeof(u32), 20 - .max_entries = MAX_IPS, 21 - }; 16 + struct { 17 + __uint(type, BPF_MAP_TYPE_HASH); 18 + __type(key, u64); 19 + __type(value, u32); 20 + __uint(max_entries, MAX_IPS); 21 + } ip_map SEC(".maps"); 22 22 23 23 SEC("perf_event") 24 24 int do_sample(struct bpf_perf_event_data *ctx)
+18 -18
samples/bpf/sockex3_kern.c
··· 19 19 20 20 #define PROG(F) SEC("socket/"__stringify(F)) int bpf_func_##F 21 21 22 - struct bpf_map_def SEC("maps") jmp_table = { 23 - .type = BPF_MAP_TYPE_PROG_ARRAY, 24 - .key_size = sizeof(u32), 25 - .value_size = sizeof(u32), 26 - .max_entries = 8, 27 - }; 22 + struct { 23 + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); 24 + __uint(key_size, sizeof(u32)); 25 + __uint(value_size, sizeof(u32)); 26 + __uint(max_entries, 8); 27 + } jmp_table SEC(".maps"); 28 28 29 29 #define PARSE_VLAN 1 30 30 #define PARSE_MPLS 2 ··· 92 92 struct flow_key_record flow; 93 93 }; 94 94 95 - struct bpf_map_def SEC("maps") percpu_map = { 96 - .type = BPF_MAP_TYPE_ARRAY, 97 - .key_size = sizeof(__u32), 98 - .value_size = sizeof(struct globals), 99 - .max_entries = 32, 100 - }; 95 + struct { 96 + __uint(type, BPF_MAP_TYPE_ARRAY); 97 + __type(key, __u32); 98 + __type(value, struct globals); 99 + __uint(max_entries, 32); 100 + } percpu_map SEC(".maps"); 101 101 102 102 /* user poor man's per_cpu until native support is ready */ 103 103 static struct globals *this_cpu_globals(void) ··· 113 113 __u64 bytes; 114 114 }; 115 115 116 - struct bpf_map_def SEC("maps") hash_map = { 117 - .type = BPF_MAP_TYPE_HASH, 118 - .key_size = sizeof(struct flow_key_record), 119 - .value_size = sizeof(struct pair), 120 - .max_entries = 1024, 121 - }; 116 + struct { 117 + __uint(type, BPF_MAP_TYPE_HASH); 118 + __type(key, struct flow_key_record); 119 + __type(value, struct pair); 120 + __uint(max_entries, 1024); 121 + } hash_map SEC(".maps"); 122 122 123 123 static void update_stats(struct __sk_buff *skb, struct globals *g) 124 124 {
+12 -12
samples/bpf/trace_event_kern.c
··· 18 18 u32 userstack; 19 19 }; 20 20 21 - struct bpf_map_def SEC("maps") counts = { 22 - .type = BPF_MAP_TYPE_HASH, 23 - .key_size = sizeof(struct key_t), 24 - .value_size = sizeof(u64), 25 - .max_entries = 10000, 26 - }; 21 + struct { 22 + __uint(type, BPF_MAP_TYPE_HASH); 23 + __type(key, struct key_t); 24 + __type(value, u64); 25 + __uint(max_entries, 10000); 26 + } counts SEC(".maps"); 27 27 28 - struct bpf_map_def SEC("maps") stackmap = { 29 - .type = BPF_MAP_TYPE_STACK_TRACE, 30 - .key_size = sizeof(u32), 31 - .value_size = PERF_MAX_STACK_DEPTH * sizeof(u64), 32 - .max_entries = 10000, 33 - }; 28 + struct { 29 + __uint(type, BPF_MAP_TYPE_STACK_TRACE); 30 + __uint(key_size, sizeof(u32)); 31 + __uint(value_size, PERF_MAX_STACK_DEPTH * sizeof(u64)); 32 + __uint(max_entries, 10000); 33 + } stackmap SEC(".maps"); 34 34 35 35 #define KERN_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP) 36 36 #define USER_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP | BPF_F_USER_STACK)
+12 -12
samples/bpf/tracex2_kern.c
··· 12 12 #include <bpf/bpf_tracing.h> 13 13 #include "trace_common.h" 14 14 15 - struct bpf_map_def SEC("maps") my_map = { 16 - .type = BPF_MAP_TYPE_HASH, 17 - .key_size = sizeof(long), 18 - .value_size = sizeof(long), 19 - .max_entries = 1024, 20 - }; 15 + struct { 16 + __uint(type, BPF_MAP_TYPE_HASH); 17 + __type(key, long); 18 + __type(value, long); 19 + __uint(max_entries, 1024); 20 + } my_map SEC(".maps"); 21 21 22 22 /* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe 23 23 * example will no longer be meaningful ··· 71 71 u64 index; 72 72 }; 73 73 74 - struct bpf_map_def SEC("maps") my_hist_map = { 75 - .type = BPF_MAP_TYPE_PERCPU_HASH, 76 - .key_size = sizeof(struct hist_key), 77 - .value_size = sizeof(long), 78 - .max_entries = 1024, 79 - }; 74 + struct { 75 + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); 76 + __uint(key_size, sizeof(struct hist_key)); 77 + __uint(value_size, sizeof(long)); 78 + __uint(max_entries, 1024); 79 + } my_hist_map SEC(".maps"); 80 80 81 81 SEC("kprobe/" SYSCALL(sys_write)) 82 82 int bpf_prog3(struct pt_regs *ctx)
+12 -12
samples/bpf/tracex3_kern.c
··· 11 11 #include <bpf/bpf_helpers.h> 12 12 #include <bpf/bpf_tracing.h> 13 13 14 - struct bpf_map_def SEC("maps") my_map = { 15 - .type = BPF_MAP_TYPE_HASH, 16 - .key_size = sizeof(long), 17 - .value_size = sizeof(u64), 18 - .max_entries = 4096, 19 - }; 14 + struct { 15 + __uint(type, BPF_MAP_TYPE_HASH); 16 + __type(key, long); 17 + __type(value, u64); 18 + __uint(max_entries, 4096); 19 + } my_map SEC(".maps"); 20 20 21 21 /* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe 22 22 * example will no longer be meaningful ··· 42 42 43 43 #define SLOTS 100 44 44 45 - struct bpf_map_def SEC("maps") lat_map = { 46 - .type = BPF_MAP_TYPE_PERCPU_ARRAY, 47 - .key_size = sizeof(u32), 48 - .value_size = sizeof(u64), 49 - .max_entries = SLOTS, 50 - }; 45 + struct { 46 + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 47 + __uint(key_size, sizeof(u32)); 48 + __uint(value_size, sizeof(u64)); 49 + __uint(max_entries, SLOTS); 50 + } lat_map SEC(".maps"); 51 51 52 52 SEC("kprobe/blk_account_io_completion") 53 53 int bpf_prog2(struct pt_regs *ctx)
+6 -6
samples/bpf/tracex4_kern.c
··· 15 15 u64 ip; 16 16 }; 17 17 18 - struct bpf_map_def SEC("maps") my_map = { 19 - .type = BPF_MAP_TYPE_HASH, 20 - .key_size = sizeof(long), 21 - .value_size = sizeof(struct pair), 22 - .max_entries = 1000000, 23 - }; 18 + struct { 19 + __uint(type, BPF_MAP_TYPE_HASH); 20 + __type(key, long); 21 + __type(value, struct pair); 22 + __uint(max_entries, 1000000); 23 + } my_map SEC(".maps"); 24 24 25 25 /* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe 26 26 * example will no longer be meaningful
+7 -7
samples/bpf/tracex5_kern.c
··· 15 15 16 16 #define PROG(F) SEC("kprobe/"__stringify(F)) int bpf_func_##F 17 17 18 - struct bpf_map_def SEC("maps") progs = { 19 - .type = BPF_MAP_TYPE_PROG_ARRAY, 20 - .key_size = sizeof(u32), 21 - .value_size = sizeof(u32), 18 + struct { 19 + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); 20 + __uint(key_size, sizeof(u32)); 21 + __uint(value_size, sizeof(u32)); 22 22 #ifdef __mips__ 23 - .max_entries = 6000, /* MIPS n64 syscalls start at 5000 */ 23 + __uint(max_entries, 6000); /* MIPS n64 syscalls start at 5000 */ 24 24 #else 25 - .max_entries = 1024, 25 + __uint(max_entries, 1024); 26 26 #endif 27 - }; 27 + } progs SEC(".maps"); 28 28 29 29 SEC("kprobe/__seccomp_filter") 30 30 int bpf_prog1(struct pt_regs *ctx)
+20 -18
samples/bpf/tracex6_kern.c
··· 3 3 #include <uapi/linux/bpf.h> 4 4 #include <bpf/bpf_helpers.h> 5 5 6 - struct bpf_map_def SEC("maps") counters = { 7 - .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, 8 - .key_size = sizeof(int), 9 - .value_size = sizeof(u32), 10 - .max_entries = 64, 11 - }; 12 - struct bpf_map_def SEC("maps") values = { 13 - .type = BPF_MAP_TYPE_HASH, 14 - .key_size = sizeof(int), 15 - .value_size = sizeof(u64), 16 - .max_entries = 64, 17 - }; 18 - struct bpf_map_def SEC("maps") values2 = { 19 - .type = BPF_MAP_TYPE_HASH, 20 - .key_size = sizeof(int), 21 - .value_size = sizeof(struct bpf_perf_event_value), 22 - .max_entries = 64, 23 - }; 6 + struct { 7 + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); 8 + __uint(key_size, sizeof(int)); 9 + __uint(value_size, sizeof(u32)); 10 + __uint(max_entries, 64); 11 + } counters SEC(".maps"); 12 + 13 + struct { 14 + __uint(type, BPF_MAP_TYPE_HASH); 15 + __type(key, int); 16 + __type(value, u64); 17 + __uint(max_entries, 64); 18 + } values SEC(".maps"); 19 + 20 + struct { 21 + __uint(type, BPF_MAP_TYPE_HASH); 22 + __type(key, int); 23 + __type(value, struct bpf_perf_event_value); 24 + __uint(max_entries, 64); 25 + } values2 SEC(".maps"); 24 26 25 27 SEC("kprobe/htab_map_get_next_key") 26 28 int bpf_prog1(struct pt_regs *ctx)