Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf: Add uptr support in the map_value of the task local storage.

This patch adds uptr support in the map_value of the task local storage.

struct map_value {
struct user_data __uptr *uptr;
};

struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__type(key, int);
__type(value, struct value_type);
} datamap SEC(".maps");

A new bpf_obj_pin_uptrs() is added to pin the user page and
also stores the kernel address back to the uptr for the
bpf prog to use later. It currently does not support
the uptr pointing to a user struct across two pages.
It also excludes PageHighMem support to keep it simple.
As of now, the 32bit bpf jit is missing other more crucial bpf
features. For example, many important bpf features depend on
bpf kfunc now but so far only one arch (x86-32) supports it
which was added by me as an example when kfunc was first
introduced to bpf.

The uptr can only be stored to the task local storage by the
syscall update_elem. Meaning the uptr will not be considered
if it is provided by the bpf prog through
bpf_task_storage_get(BPF_LOCAL_STORAGE_GET_F_CREATE).
This is enforced by only calling
bpf_local_storage_update(swap_uptrs==true) in
bpf_pid_task_storage_update_elem. Everywhere else will
have swap_uptrs==false.

This will pump down to bpf_selem_alloc(swap_uptrs==true). It is
the only case that bpf_selem_alloc() will take the uptr value when
updating the newly allocated selem. bpf_obj_swap_uptrs() is added
to swap the uptr between the SDATA(selem)->data and the user provided
map_value in "void *value". bpf_obj_swap_uptrs() makes the
SDATA(selem)->data takes the ownership of the uptr and the user space
provided map_value will have NULL in the uptr.

The bpf_obj_unpin_uptrs() is called after map->ops->map_update_elem()
returning error. If the map->ops->map_update_elem has reached
a state that the local storage has taken the uptr ownership,
the bpf_obj_unpin_uptrs() will be a no op because the uptr
is NULL. A "__"bpf_obj_unpin_uptrs is added to make this
error path unpin easier such that it does not have to check
the map->record is NULL or not.

BPF_F_LOCK is not supported when the map_value has uptr.
This can be revisited later if there is a use case. A similar
swap_uptrs idea can be considered.

The final bit is to do unpin_user_page in the bpf_obj_free_fields().
The earlier patch has ensured that the bpf_obj_free_fields() has
gone through the rcu gp when needed.

Cc: linux-mm@kvack.org
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Link: https://lore.kernel.org/r/20241023234759.860539-7-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Martin KaFai Lau and committed by
Alexei Starovoitov
ba512b00 9bac675e

+131 -7
+20
include/linux/bpf.h
··· 424 424 case BPF_KPTR_UNREF: 425 425 case BPF_KPTR_REF: 426 426 case BPF_KPTR_PERCPU: 427 + case BPF_UPTR: 427 428 break; 428 429 default: 429 430 WARN_ON_ONCE(1); ··· 511 510 static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src) 512 511 { 513 512 bpf_obj_memcpy(map->record, dst, src, map->value_size, true); 513 + } 514 + 515 + static inline void bpf_obj_swap_uptrs(const struct btf_record *rec, void *dst, void *src) 516 + { 517 + unsigned long *src_uptr, *dst_uptr; 518 + const struct btf_field *field; 519 + int i; 520 + 521 + if (!btf_record_has_field(rec, BPF_UPTR)) 522 + return; 523 + 524 + for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) { 525 + if (field->type != BPF_UPTR) 526 + continue; 527 + 528 + src_uptr = src + field->offset; 529 + dst_uptr = dst + field->offset; 530 + swap(*src_uptr, *dst_uptr); 531 + } 514 532 } 515 533 516 534 static inline void bpf_obj_memzero(struct btf_record *rec, void *dst, u32 size)
+5 -2
kernel/bpf/bpf_local_storage.c
··· 99 99 } 100 100 101 101 if (selem) { 102 - if (value) 102 + if (value) { 103 + /* No need to call check_and_init_map_value as memory is zero init */ 103 104 copy_map_value(&smap->map, SDATA(selem)->data, value); 104 - /* No need to call check_and_init_map_value as memory is zero init */ 105 + if (swap_uptrs) 106 + bpf_obj_swap_uptrs(smap->map.record, SDATA(selem)->data, value); 107 + } 105 108 return selem; 106 109 } 107 110
+4 -1
kernel/bpf/bpf_task_storage.c
··· 129 129 struct pid *pid; 130 130 int fd, err; 131 131 132 + if ((map_flags & BPF_F_LOCK) && btf_record_has_field(map->record, BPF_UPTR)) 133 + return -EOPNOTSUPP; 134 + 132 135 fd = *(int *)key; 133 136 pid = pidfd_get_pid(fd, &f_flags); 134 137 if (IS_ERR(pid)) ··· 150 147 bpf_task_storage_lock(); 151 148 sdata = bpf_local_storage_update( 152 149 task, (struct bpf_local_storage_map *)map, value, map_flags, 153 - false, GFP_ATOMIC); 150 + true, GFP_ATOMIC); 154 151 bpf_task_storage_unlock(); 155 152 156 153 err = PTR_ERR_OR_ZERO(sdata);
+102 -4
kernel/bpf/syscall.c
··· 155 155 synchronize_rcu(); 156 156 } 157 157 158 + static void unpin_uptr_kaddr(void *kaddr) 159 + { 160 + if (kaddr) 161 + unpin_user_page(virt_to_page(kaddr)); 162 + } 163 + 164 + static void __bpf_obj_unpin_uptrs(struct btf_record *rec, u32 cnt, void *obj) 165 + { 166 + const struct btf_field *field; 167 + void **uptr_addr; 168 + int i; 169 + 170 + for (i = 0, field = rec->fields; i < cnt; i++, field++) { 171 + if (field->type != BPF_UPTR) 172 + continue; 173 + 174 + uptr_addr = obj + field->offset; 175 + unpin_uptr_kaddr(*uptr_addr); 176 + } 177 + } 178 + 179 + static void bpf_obj_unpin_uptrs(struct btf_record *rec, void *obj) 180 + { 181 + if (!btf_record_has_field(rec, BPF_UPTR)) 182 + return; 183 + 184 + __bpf_obj_unpin_uptrs(rec, rec->cnt, obj); 185 + } 186 + 187 + static int bpf_obj_pin_uptrs(struct btf_record *rec, void *obj) 188 + { 189 + const struct btf_field *field; 190 + const struct btf_type *t; 191 + unsigned long start, end; 192 + struct page *page; 193 + void **uptr_addr; 194 + int i, err; 195 + 196 + if (!btf_record_has_field(rec, BPF_UPTR)) 197 + return 0; 198 + 199 + for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) { 200 + if (field->type != BPF_UPTR) 201 + continue; 202 + 203 + uptr_addr = obj + field->offset; 204 + start = *(unsigned long *)uptr_addr; 205 + if (!start) 206 + continue; 207 + 208 + t = btf_type_by_id(field->kptr.btf, field->kptr.btf_id); 209 + /* t->size was checked for zero before */ 210 + if (check_add_overflow(start, t->size - 1, &end)) { 211 + err = -EFAULT; 212 + goto unpin_all; 213 + } 214 + 215 + /* The uptr's struct cannot span across two pages */ 216 + if ((start & PAGE_MASK) != (end & PAGE_MASK)) { 217 + err = -EOPNOTSUPP; 218 + goto unpin_all; 219 + } 220 + 221 + err = pin_user_pages_fast(start, 1, FOLL_LONGTERM | FOLL_WRITE, &page); 222 + if (err != 1) 223 + goto unpin_all; 224 + 225 + if (PageHighMem(page)) { 226 + err = -EOPNOTSUPP; 227 + unpin_user_page(page); 228 + goto unpin_all; 229 + } 230 + 231 + *uptr_addr = page_address(page) + offset_in_page(start); 232 + } 233 + 234 + return 0; 235 + 236 + unpin_all: 237 + __bpf_obj_unpin_uptrs(rec, i, obj); 238 + return err; 239 + } 240 + 158 241 static int bpf_map_update_value(struct bpf_map *map, struct file *map_file, 159 242 void *key, void *value, __u64 flags) 160 243 { ··· 282 199 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { 283 200 err = map->ops->map_push_elem(map, value, flags); 284 201 } else { 285 - rcu_read_lock(); 286 - err = map->ops->map_update_elem(map, key, value, flags); 287 - rcu_read_unlock(); 202 + err = bpf_obj_pin_uptrs(map->record, value); 203 + if (!err) { 204 + rcu_read_lock(); 205 + err = map->ops->map_update_elem(map, key, value, flags); 206 + rcu_read_unlock(); 207 + if (err) 208 + bpf_obj_unpin_uptrs(map->record, value); 209 + } 288 210 } 289 211 bpf_enable_instrumentation(); 290 212 ··· 804 716 field->kptr.dtor(xchgd_field); 805 717 } 806 718 break; 719 + case BPF_UPTR: 720 + /* The caller ensured that no one is using the uptr */ 721 + unpin_uptr_kaddr(*(void **)field_ptr); 722 + break; 807 723 case BPF_LIST_HEAD: 808 724 if (WARN_ON_ONCE(rec->spin_lock_off < 0)) 809 725 continue; ··· 1199 1107 1200 1108 map->record = btf_parse_fields(btf, value_type, 1201 1109 BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD | 1202 - BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE, 1110 + BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE | BPF_UPTR, 1203 1111 map->value_size); 1204 1112 if (!IS_ERR_OR_NULL(map->record)) { 1205 1113 int i; ··· 1251 1159 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && 1252 1160 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && 1253 1161 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { 1162 + ret = -EOPNOTSUPP; 1163 + goto free_map_tab; 1164 + } 1165 + break; 1166 + case BPF_UPTR: 1167 + if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) { 1254 1168 ret = -EOPNOTSUPP; 1255 1169 goto free_map_tab; 1256 1170 }