Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2020-03-27

The following pull-request contains BPF updates for your *net* tree.

We've added 3 non-merge commits during the last 4 day(s) which contain
a total of 4 files changed, 25 insertions(+), 20 deletions(-).

The main changes are:

1) Explicitly memset the bpf_attr structure on bpf() syscall to avoid
having to rely on compiler to do so. Issues have been noticed on
some compilers with padding and other oddities where the request was
then unexpectedly rejected, from Greg Kroah-Hartman.

2) Sanitize the bpf_struct_ops TCP congestion control name in order to
avoid problematic characters such as whitespaces, from Martin KaFai Lau.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+25 -20
+1
include/linux/bpf.h
··· 160 160 } 161 161 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, 162 162 bool lock_src); 163 + int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); 163 164 164 165 struct bpf_offload_dev; 165 166 struct bpf_offloaded_map;
+2 -1
kernel/bpf/btf.c
··· 4564 4564 union bpf_attr __user *uattr) 4565 4565 { 4566 4566 struct bpf_btf_info __user *uinfo; 4567 - struct bpf_btf_info info = {}; 4567 + struct bpf_btf_info info; 4568 4568 u32 info_copy, btf_copy; 4569 4569 void __user *ubtf; 4570 4570 u32 uinfo_len; ··· 4573 4573 uinfo_len = attr->info.info_len; 4574 4574 4575 4575 info_copy = min_t(u32, uinfo_len, sizeof(info)); 4576 + memset(&info, 0, sizeof(info)); 4576 4577 if (copy_from_user(&info, uinfo, info_copy)) 4577 4578 return -EFAULT; 4578 4579
+20 -14
kernel/bpf/syscall.c
··· 696 696 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 697 697 sizeof(attr->CMD##_LAST_FIELD)) != NULL 698 698 699 - /* dst and src must have at least BPF_OBJ_NAME_LEN number of bytes. 700 - * Return 0 on success and < 0 on error. 699 + /* dst and src must have at least "size" number of bytes. 700 + * Return strlen on success and < 0 on error. 701 701 */ 702 - static int bpf_obj_name_cpy(char *dst, const char *src) 702 + int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size) 703 703 { 704 - const char *end = src + BPF_OBJ_NAME_LEN; 704 + const char *end = src + size; 705 + const char *orig_src = src; 705 706 706 - memset(dst, 0, BPF_OBJ_NAME_LEN); 707 + memset(dst, 0, size); 707 708 /* Copy all isalnum(), '_' and '.' chars. */ 708 709 while (src < end && *src) { 709 710 if (!isalnum(*src) && ··· 713 712 *dst++ = *src++; 714 713 } 715 714 716 - /* No '\0' found in BPF_OBJ_NAME_LEN number of bytes */ 715 + /* No '\0' found in "size" number of bytes */ 717 716 if (src == end) 718 717 return -EINVAL; 719 718 720 - return 0; 719 + return src - orig_src; 721 720 } 722 721 723 722 int map_check_no_btf(const struct bpf_map *map, ··· 811 810 if (IS_ERR(map)) 812 811 return PTR_ERR(map); 813 812 814 - err = bpf_obj_name_cpy(map->name, attr->map_name); 815 - if (err) 813 + err = bpf_obj_name_cpy(map->name, attr->map_name, 814 + sizeof(attr->map_name)); 815 + if (err < 0) 816 816 goto free_map; 817 817 818 818 atomic64_set(&map->refcnt, 1); ··· 2100 2098 goto free_prog; 2101 2099 2102 2100 prog->aux->load_time = ktime_get_boottime_ns(); 2103 - err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name); 2104 - if (err) 2101 + err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name, 2102 + sizeof(attr->prog_name)); 2103 + if (err < 0) 2105 2104 goto free_prog; 2106 2105 2107 2106 /* run eBPF verifier */ ··· 2795 2792 union bpf_attr __user *uattr) 2796 2793 { 2797 2794 struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); 2798 - struct bpf_prog_info info = {}; 2795 + struct bpf_prog_info info; 2799 2796 u32 info_len = attr->info.info_len; 2800 2797 struct bpf_prog_stats stats; 2801 2798 char __user *uinsns; ··· 2807 2804 return err; 2808 2805 info_len = min_t(u32, sizeof(info), info_len); 2809 2806 2807 + memset(&info, 0, sizeof(info)); 2810 2808 if (copy_from_user(&info, uinfo, info_len)) 2811 2809 return -EFAULT; 2812 2810 ··· 3071 3067 union bpf_attr __user *uattr) 3072 3068 { 3073 3069 struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); 3074 - struct bpf_map_info info = {}; 3070 + struct bpf_map_info info; 3075 3071 u32 info_len = attr->info.info_len; 3076 3072 int err; 3077 3073 ··· 3080 3076 return err; 3081 3077 info_len = min_t(u32, sizeof(info), info_len); 3082 3078 3079 + memset(&info, 0, sizeof(info)); 3083 3080 info.type = map->map_type; 3084 3081 info.id = map->id; 3085 3082 info.key_size = map->key_size; ··· 3364 3359 3365 3360 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 3366 3361 { 3367 - union bpf_attr attr = {}; 3362 + union bpf_attr attr; 3368 3363 int err; 3369 3364 3370 3365 if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN)) ··· 3376 3371 size = min_t(u32, size, sizeof(attr)); 3377 3372 3378 3373 /* copy attributes from user space, may be less than sizeof(bpf_attr) */ 3374 + memset(&attr, 0, sizeof(attr)); 3379 3375 if (copy_from_user(&attr, uattr, size) != 0) 3380 3376 return -EFAULT; 3381 3377
+2 -5
net/ipv4/bpf_tcp_ca.c
··· 184 184 { 185 185 const struct tcp_congestion_ops *utcp_ca; 186 186 struct tcp_congestion_ops *tcp_ca; 187 - size_t tcp_ca_name_len; 188 187 int prog_fd; 189 188 u32 moff; 190 189 ··· 198 199 tcp_ca->flags = utcp_ca->flags; 199 200 return 1; 200 201 case offsetof(struct tcp_congestion_ops, name): 201 - tcp_ca_name_len = strnlen(utcp_ca->name, sizeof(utcp_ca->name)); 202 - if (!tcp_ca_name_len || 203 - tcp_ca_name_len == sizeof(utcp_ca->name)) 202 + if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name, 203 + sizeof(tcp_ca->name)) <= 0) 204 204 return -EINVAL; 205 205 if (tcp_ca_find(utcp_ca->name)) 206 206 return -EEXIST; 207 - memcpy(tcp_ca->name, utcp_ca->name, sizeof(tcp_ca->name)); 208 207 return 1; 209 208 } 210 209