Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

libbpf: Add BPF_PROG_BIND_MAP syscall and use it on .rodata section

The patch adds a simple wrapper bpf_prog_bind_map around the syscall.
When the libbpf tries to load a program, it will probe the kernel for
the support of this syscall and unconditionally bind .rodata section
to the program.

Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Cc: YiFei Zhu <zhuyifei1999@gmail.com>
Link: https://lore.kernel.org/bpf/20200915234543.3220146-4-sdf@google.com

authored by

YiFei Zhu and committed by
Alexei Starovoitov
5d23328d ef15314a

+94
+16
tools/lib/bpf/bpf.c
··· 872 872 873 873 return sys_bpf(BPF_ENABLE_STATS, &attr, sizeof(attr)); 874 874 } 875 + 876 + int bpf_prog_bind_map(int prog_fd, int map_fd, 877 + const struct bpf_prog_bind_opts *opts) 878 + { 879 + union bpf_attr attr; 880 + 881 + if (!OPTS_VALID(opts, bpf_prog_bind_opts)) 882 + return -EINVAL; 883 + 884 + memset(&attr, 0, sizeof(attr)); 885 + attr.prog_bind_map.prog_fd = prog_fd; 886 + attr.prog_bind_map.map_fd = map_fd; 887 + attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0); 888 + 889 + return sys_bpf(BPF_PROG_BIND_MAP, &attr, sizeof(attr)); 890 + }
+8
tools/lib/bpf/bpf.h
··· 243 243 enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */ 244 244 LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type); 245 245 246 + struct bpf_prog_bind_opts { 247 + size_t sz; /* size of this struct for forward/backward compatibility */ 248 + __u32 flags; 249 + }; 250 + #define bpf_prog_bind_opts__last_field flags 251 + 252 + LIBBPF_API int bpf_prog_bind_map(int prog_fd, int map_fd, 253 + const struct bpf_prog_bind_opts *opts); 246 254 #ifdef __cplusplus 247 255 } /* extern "C" */ 248 256 #endif
+69
tools/lib/bpf/libbpf.c
··· 174 174 FEAT_EXP_ATTACH_TYPE, 175 175 /* bpf_probe_read_{kernel,user}[_str] helpers */ 176 176 FEAT_PROBE_READ_KERN, 177 + /* BPF_PROG_BIND_MAP is supported */ 178 + FEAT_PROG_BIND_MAP, 177 179 __FEAT_CNT, 178 180 }; 179 181 ··· 411 409 struct extern_desc *externs; 412 410 int nr_extern; 413 411 int kconfig_map_idx; 412 + int rodata_map_idx; 414 413 415 414 bool loaded; 416 415 bool has_subcalls; ··· 1073 1070 obj->efile.bss_shndx = -1; 1074 1071 obj->efile.st_ops_shndx = -1; 1075 1072 obj->kconfig_map_idx = -1; 1073 + obj->rodata_map_idx = -1; 1076 1074 1077 1075 obj->kern_version = get_kernel_version(); 1078 1076 obj->loaded = false; ··· 1432 1428 obj->efile.rodata->d_size); 1433 1429 if (err) 1434 1430 return err; 1431 + 1432 + obj->rodata_map_idx = obj->nr_maps - 1; 1435 1433 } 1436 1434 if (obj->efile.bss_shndx >= 0) { 1437 1435 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS, ··· 3900 3894 return probe_fd(bpf_load_program_xattr(&attr, NULL, 0)); 3901 3895 } 3902 3896 3897 + static int probe_prog_bind_map(void) 3898 + { 3899 + struct bpf_load_program_attr prg_attr; 3900 + struct bpf_create_map_attr map_attr; 3901 + char *cp, errmsg[STRERR_BUFSIZE]; 3902 + struct bpf_insn insns[] = { 3903 + BPF_MOV64_IMM(BPF_REG_0, 0), 3904 + BPF_EXIT_INSN(), 3905 + }; 3906 + int ret, map, prog; 3907 + 3908 + memset(&map_attr, 0, sizeof(map_attr)); 3909 + map_attr.map_type = BPF_MAP_TYPE_ARRAY; 3910 + map_attr.key_size = sizeof(int); 3911 + map_attr.value_size = 32; 3912 + map_attr.max_entries = 1; 3913 + 3914 + map = bpf_create_map_xattr(&map_attr); 3915 + if (map < 0) { 3916 + ret = -errno; 3917 + cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); 3918 + pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", 3919 + __func__, cp, -ret); 3920 + return ret; 3921 + } 3922 + 3923 + memset(&prg_attr, 0, sizeof(prg_attr)); 3924 + prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 3925 + prg_attr.insns = insns; 3926 + prg_attr.insns_cnt = ARRAY_SIZE(insns); 3927 + prg_attr.license = "GPL"; 3928 + 3929 + prog = bpf_load_program_xattr(&prg_attr, NULL, 0); 3930 + if (prog < 0) { 3931 + close(map); 3932 + return 0; 3933 + } 3934 + 3935 + ret = bpf_prog_bind_map(prog, map, NULL); 3936 + 3937 + close(map); 3938 + close(prog); 3939 + 3940 + return ret >= 0; 3941 + } 3942 + 3903 3943 enum kern_feature_result { 3904 3944 FEAT_UNKNOWN = 0, 3905 3945 FEAT_SUPPORTED = 1, ··· 3986 3934 }, 3987 3935 [FEAT_PROBE_READ_KERN] = { 3988 3936 "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel, 3937 + }, 3938 + [FEAT_PROG_BIND_MAP] = { 3939 + "BPF_PROG_BIND_MAP support", probe_prog_bind_map, 3989 3940 } 3990 3941 }; 3991 3942 ··· 6523 6468 if (ret >= 0) { 6524 6469 if (log_buf && load_attr.log_level) 6525 6470 pr_debug("verifier log:\n%s", log_buf); 6471 + 6472 + if (prog->obj->rodata_map_idx >= 0 && 6473 + kernel_supports(FEAT_PROG_BIND_MAP)) { 6474 + struct bpf_map *rodata_map = 6475 + &prog->obj->maps[prog->obj->rodata_map_idx]; 6476 + 6477 + if (bpf_prog_bind_map(ret, bpf_map__fd(rodata_map), NULL)) { 6478 + cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 6479 + pr_warn("prog '%s': failed to bind .rodata map: %s\n", 6480 + prog->name, cp); 6481 + /* Don't fail hard if can't bind rodata. */ 6482 + } 6483 + } 6484 + 6526 6485 *pfd = ret; 6527 6486 ret = 0; 6528 6487 goto out;
+1
tools/lib/bpf/libbpf.map
··· 302 302 303 303 LIBBPF_0.2.0 { 304 304 global: 305 + bpf_prog_bind_map; 305 306 bpf_program__section_name; 306 307 perf_buffer__buffer_cnt; 307 308 perf_buffer__buffer_fd;