Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'introduce-kfuncs-for-memory-reads-into-dynptrs'

Mykyta Yatsenko says:

====================
Introduce kfuncs for memory reads into dynptrs

From: Mykyta Yatsenko <yatsenko@meta.com>

This patch adds new kfuncs that enable reading variable-length
user or kernel data directly into dynptrs.
These kfuncs provide a way to perform dynamically-sized reads
while maintaining memory safety. Unlike existing
`bpf_probe_read_{user|kernel}` APIs, which are limited to constant-sized
reads, these new kfuncs allow for more flexible data access.

v4 -> v5
* Fix pointers annotations, use __user where necessary, cast where needed

v3 -> v4
* Added pid filtering in selftests

v2 -> v3
* Add KF_TRUSTED_ARGS for kfuncs that take pointer to task_struct
as an argument
* Remove checks for non-NULL task, where it was not necessary
* Added comments on constants used in selftests, etc.

v1 -> v2
* Renaming helper functions to use "user_str" instead of "user_data_str"
suffix

====================

Link: https://patch.msgid.link/20250512205348.191079-1-mykyta.yatsenko5@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

+462 -12
+14
include/linux/bpf.h
··· 1349 1349 const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len); 1350 1350 void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len); 1351 1351 bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr); 1352 + int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u32 offset, 1353 + void *src, u32 len, u64 flags); 1354 + void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset, 1355 + void *buffer__opt, u32 buffer__szk); 1356 + 1357 + static inline int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len) 1358 + { 1359 + u32 size = __bpf_dynptr_size(ptr); 1360 + 1361 + if (len > size || offset > size - len) 1362 + return -E2BIG; 1363 + 1364 + return 0; 1365 + } 1352 1366 1353 1367 #ifdef CONFIG_BPF_JIT 1354 1368 int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
+10 -12
kernel/bpf/helpers.c
··· 1714 1714 memset(ptr, 0, sizeof(*ptr)); 1715 1715 } 1716 1716 1717 - static int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len) 1718 - { 1719 - u32 size = __bpf_dynptr_size(ptr); 1720 - 1721 - if (len > size || offset > size - len) 1722 - return -E2BIG; 1723 - 1724 - return 0; 1725 - } 1726 - 1727 1717 BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr) 1728 1718 { 1729 1719 int err; ··· 1800 1810 .arg5_type = ARG_ANYTHING, 1801 1811 }; 1802 1812 1803 - static int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u32 offset, void *src, 1804 - u32 len, u64 flags) 1813 + int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u32 offset, void *src, 1814 + u32 len, u64 flags) 1805 1815 { 1806 1816 enum bpf_dynptr_type type; 1807 1817 int err; ··· 3378 3388 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE) 3379 3389 BTF_ID_FLAGS(func, bpf_local_irq_save) 3380 3390 BTF_ID_FLAGS(func, bpf_local_irq_restore) 3391 + BTF_ID_FLAGS(func, bpf_probe_read_user_dynptr) 3392 + BTF_ID_FLAGS(func, bpf_probe_read_kernel_dynptr) 3393 + BTF_ID_FLAGS(func, bpf_probe_read_user_str_dynptr) 3394 + BTF_ID_FLAGS(func, bpf_probe_read_kernel_str_dynptr) 3395 + BTF_ID_FLAGS(func, bpf_copy_from_user_dynptr, KF_SLEEPABLE) 3396 + BTF_ID_FLAGS(func, bpf_copy_from_user_str_dynptr, KF_SLEEPABLE) 3397 + BTF_ID_FLAGS(func, bpf_copy_from_user_task_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS) 3398 + BTF_ID_FLAGS(func, bpf_copy_from_user_task_str_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS) 3381 3399 BTF_KFUNCS_END(common_btf_ids) 3382 3400 3383 3401 static const struct btf_kfunc_id_set common_kfunc_set = {
+194
kernel/trace/bpf_trace.c
··· 3466 3466 3467 3467 late_initcall(bpf_kprobe_multi_kfuncs_init); 3468 3468 3469 + typedef int (*copy_fn_t)(void *dst, const void *src, u32 size, struct task_struct *tsk); 3470 + 3471 + /* 3472 + * The __always_inline is to make sure the compiler doesn't 3473 + * generate indirect calls into callbacks, which is expensive, 3474 + * on some kernel configurations. This allows compiler to put 3475 + * direct calls into all the specific callback implementations 3476 + * (copy_user_data_sleepable, copy_user_data_nofault, and so on) 3477 + */ 3478 + static __always_inline int __bpf_dynptr_copy_str(struct bpf_dynptr *dptr, u32 doff, u32 size, 3479 + const void *unsafe_src, 3480 + copy_fn_t str_copy_fn, 3481 + struct task_struct *tsk) 3482 + { 3483 + struct bpf_dynptr_kern *dst; 3484 + u32 chunk_sz, off; 3485 + void *dst_slice; 3486 + int cnt, err; 3487 + char buf[256]; 3488 + 3489 + dst_slice = bpf_dynptr_slice_rdwr(dptr, doff, NULL, size); 3490 + if (likely(dst_slice)) 3491 + return str_copy_fn(dst_slice, unsafe_src, size, tsk); 3492 + 3493 + dst = (struct bpf_dynptr_kern *)dptr; 3494 + if (bpf_dynptr_check_off_len(dst, doff, size)) 3495 + return -E2BIG; 3496 + 3497 + for (off = 0; off < size; off += chunk_sz - 1) { 3498 + chunk_sz = min_t(u32, sizeof(buf), size - off); 3499 + /* Expect str_copy_fn to return count of copied bytes, including 3500 + * zero terminator. Next iteration increment off by chunk_sz - 1 to 3501 + * overwrite NUL. 3502 + */ 3503 + cnt = str_copy_fn(buf, unsafe_src + off, chunk_sz, tsk); 3504 + if (cnt < 0) 3505 + return cnt; 3506 + err = __bpf_dynptr_write(dst, doff + off, buf, cnt, 0); 3507 + if (err) 3508 + return err; 3509 + if (cnt < chunk_sz || chunk_sz == 1) /* we are done */ 3510 + return off + cnt; 3511 + } 3512 + return off; 3513 + } 3514 + 3515 + static __always_inline int __bpf_dynptr_copy(const struct bpf_dynptr *dptr, u32 doff, 3516 + u32 size, const void *unsafe_src, 3517 + copy_fn_t copy_fn, struct task_struct *tsk) 3518 + { 3519 + struct bpf_dynptr_kern *dst; 3520 + void *dst_slice; 3521 + char buf[256]; 3522 + u32 off, chunk_sz; 3523 + int err; 3524 + 3525 + dst_slice = bpf_dynptr_slice_rdwr(dptr, doff, NULL, size); 3526 + if (likely(dst_slice)) 3527 + return copy_fn(dst_slice, unsafe_src, size, tsk); 3528 + 3529 + dst = (struct bpf_dynptr_kern *)dptr; 3530 + if (bpf_dynptr_check_off_len(dst, doff, size)) 3531 + return -E2BIG; 3532 + 3533 + for (off = 0; off < size; off += chunk_sz) { 3534 + chunk_sz = min_t(u32, sizeof(buf), size - off); 3535 + err = copy_fn(buf, unsafe_src + off, chunk_sz, tsk); 3536 + if (err) 3537 + return err; 3538 + err = __bpf_dynptr_write(dst, doff + off, buf, chunk_sz, 0); 3539 + if (err) 3540 + return err; 3541 + } 3542 + return 0; 3543 + } 3544 + 3545 + static __always_inline int copy_user_data_nofault(void *dst, const void *unsafe_src, 3546 + u32 size, struct task_struct *tsk) 3547 + { 3548 + return copy_from_user_nofault(dst, (const void __user *)unsafe_src, size); 3549 + } 3550 + 3551 + static __always_inline int copy_user_data_sleepable(void *dst, const void *unsafe_src, 3552 + u32 size, struct task_struct *tsk) 3553 + { 3554 + int ret; 3555 + 3556 + if (!tsk) /* Read from the current task */ 3557 + return copy_from_user(dst, (const void __user *)unsafe_src, size); 3558 + 3559 + ret = access_process_vm(tsk, (unsigned long)unsafe_src, dst, size, 0); 3560 + if (ret != size) 3561 + return -EFAULT; 3562 + return 0; 3563 + } 3564 + 3565 + static __always_inline int copy_kernel_data_nofault(void *dst, const void *unsafe_src, 3566 + u32 size, struct task_struct *tsk) 3567 + { 3568 + return copy_from_kernel_nofault(dst, unsafe_src, size); 3569 + } 3570 + 3571 + static __always_inline int copy_user_str_nofault(void *dst, const void *unsafe_src, 3572 + u32 size, struct task_struct *tsk) 3573 + { 3574 + return strncpy_from_user_nofault(dst, (const void __user *)unsafe_src, size); 3575 + } 3576 + 3577 + static __always_inline int copy_user_str_sleepable(void *dst, const void *unsafe_src, 3578 + u32 size, struct task_struct *tsk) 3579 + { 3580 + int ret; 3581 + 3582 + if (unlikely(size == 0)) 3583 + return 0; 3584 + 3585 + if (tsk) { 3586 + ret = copy_remote_vm_str(tsk, (unsigned long)unsafe_src, dst, size, 0); 3587 + } else { 3588 + ret = strncpy_from_user(dst, (const void __user *)unsafe_src, size - 1); 3589 + /* strncpy_from_user does not guarantee NUL termination */ 3590 + if (ret >= 0) 3591 + ((char *)dst)[ret] = '\0'; 3592 + } 3593 + 3594 + if (ret < 0) 3595 + return ret; 3596 + return ret + 1; 3597 + } 3598 + 3599 + static __always_inline int copy_kernel_str_nofault(void *dst, const void *unsafe_src, 3600 + u32 size, struct task_struct *tsk) 3601 + { 3602 + return strncpy_from_kernel_nofault(dst, unsafe_src, size); 3603 + } 3604 + 3469 3605 __bpf_kfunc_start_defs(); 3470 3606 3471 3607 __bpf_kfunc int bpf_send_signal_task(struct task_struct *task, int sig, enum pid_type type, ··· 3611 3475 return -EINVAL; 3612 3476 3613 3477 return bpf_send_signal_common(sig, type, task, value); 3478 + } 3479 + 3480 + __bpf_kfunc int bpf_probe_read_user_dynptr(struct bpf_dynptr *dptr, u32 off, 3481 + u32 size, const void __user *unsafe_ptr__ign) 3482 + { 3483 + return __bpf_dynptr_copy(dptr, off, size, (const void *)unsafe_ptr__ign, 3484 + copy_user_data_nofault, NULL); 3485 + } 3486 + 3487 + __bpf_kfunc int bpf_probe_read_kernel_dynptr(struct bpf_dynptr *dptr, u32 off, 3488 + u32 size, const void *unsafe_ptr__ign) 3489 + { 3490 + return __bpf_dynptr_copy(dptr, off, size, unsafe_ptr__ign, 3491 + copy_kernel_data_nofault, NULL); 3492 + } 3493 + 3494 + __bpf_kfunc int bpf_probe_read_user_str_dynptr(struct bpf_dynptr *dptr, u32 off, 3495 + u32 size, const void __user *unsafe_ptr__ign) 3496 + { 3497 + return __bpf_dynptr_copy_str(dptr, off, size, (const void *)unsafe_ptr__ign, 3498 + copy_user_str_nofault, NULL); 3499 + } 3500 + 3501 + __bpf_kfunc int bpf_probe_read_kernel_str_dynptr(struct bpf_dynptr *dptr, u32 off, 3502 + u32 size, const void *unsafe_ptr__ign) 3503 + { 3504 + return __bpf_dynptr_copy_str(dptr, off, size, unsafe_ptr__ign, 3505 + copy_kernel_str_nofault, NULL); 3506 + } 3507 + 3508 + __bpf_kfunc int bpf_copy_from_user_dynptr(struct bpf_dynptr *dptr, u32 off, 3509 + u32 size, const void __user *unsafe_ptr__ign) 3510 + { 3511 + return __bpf_dynptr_copy(dptr, off, size, (const void *)unsafe_ptr__ign, 3512 + copy_user_data_sleepable, NULL); 3513 + } 3514 + 3515 + __bpf_kfunc int bpf_copy_from_user_str_dynptr(struct bpf_dynptr *dptr, u32 off, 3516 + u32 size, const void __user *unsafe_ptr__ign) 3517 + { 3518 + return __bpf_dynptr_copy_str(dptr, off, size, (const void *)unsafe_ptr__ign, 3519 + copy_user_str_sleepable, NULL); 3520 + } 3521 + 3522 + __bpf_kfunc int bpf_copy_from_user_task_dynptr(struct bpf_dynptr *dptr, u32 off, 3523 + u32 size, const void __user *unsafe_ptr__ign, 3524 + struct task_struct *tsk) 3525 + { 3526 + return __bpf_dynptr_copy(dptr, off, size, (const void *)unsafe_ptr__ign, 3527 + copy_user_data_sleepable, tsk); 3528 + } 3529 + 3530 + __bpf_kfunc int bpf_copy_from_user_task_str_dynptr(struct bpf_dynptr *dptr, u32 off, 3531 + u32 size, const void __user *unsafe_ptr__ign, 3532 + struct task_struct *tsk) 3533 + { 3534 + return __bpf_dynptr_copy_str(dptr, off, size, (const void *)unsafe_ptr__ign, 3535 + copy_user_str_sleepable, tsk); 3614 3536 } 3615 3537 3616 3538 __bpf_kfunc_end_defs();
+1
tools/testing/selftests/bpf/DENYLIST
··· 1 1 # TEMPORARY 2 2 # Alphabetical order 3 + dynptr/test_probe_read_user_str_dynptr # disabled until https://patchwork.kernel.org/project/linux-mm/patch/20250422131449.57177-1-mykyta.yatsenko5@gmail.com/ makes it into the bpf-next 3 4 get_stack_raw_tp # spams with kernel warnings until next bpf -> bpf-next merge 4 5 stacktrace_build_id 5 6 stacktrace_build_id_nmi
+13
tools/testing/selftests/bpf/prog_tests/dynptr.c
··· 33 33 {"test_dynptr_skb_no_buff", SETUP_SKB_PROG}, 34 34 {"test_dynptr_skb_strcmp", SETUP_SKB_PROG}, 35 35 {"test_dynptr_skb_tp_btf", SETUP_SKB_PROG_TP}, 36 + {"test_probe_read_user_dynptr", SETUP_XDP_PROG}, 37 + {"test_probe_read_kernel_dynptr", SETUP_XDP_PROG}, 38 + {"test_probe_read_user_str_dynptr", SETUP_XDP_PROG}, 39 + {"test_probe_read_kernel_str_dynptr", SETUP_XDP_PROG}, 40 + {"test_copy_from_user_dynptr", SETUP_SYSCALL_SLEEP}, 41 + {"test_copy_from_user_str_dynptr", SETUP_SYSCALL_SLEEP}, 42 + {"test_copy_from_user_task_dynptr", SETUP_SYSCALL_SLEEP}, 43 + {"test_copy_from_user_task_str_dynptr", SETUP_SYSCALL_SLEEP}, 36 44 }; 37 45 38 46 static void verify_success(const char *prog_name, enum test_setup_type setup_type) 39 47 { 48 + char user_data[384] = {[0 ... 382] = 'a', '\0'}; 40 49 struct dynptr_success *skel; 41 50 struct bpf_program *prog; 42 51 struct bpf_link *link; ··· 66 57 err = dynptr_success__load(skel); 67 58 if (!ASSERT_OK(err, "dynptr_success__load")) 68 59 goto cleanup; 60 + 61 + skel->bss->user_ptr = user_data; 62 + skel->data->test_len[0] = sizeof(user_data); 63 + memcpy(skel->bss->expected_str, user_data, sizeof(user_data)); 69 64 70 65 switch (setup_type) { 71 66 case SETUP_SYSCALL_SLEEP:
+230
tools/testing/selftests/bpf/progs/dynptr_success.c
··· 680 680 bpf_ringbuf_discard_dynptr(&ptr_buf, 0); 681 681 return XDP_DROP; 682 682 } 683 + 684 + void *user_ptr; 685 + /* Contains the copy of the data pointed by user_ptr. 686 + * Size 384 to make it not fit into a single kernel chunk when copying 687 + * but less than the maximum bpf stack size (512). 688 + */ 689 + char expected_str[384]; 690 + __u32 test_len[7] = {0/* placeholder */, 0, 1, 2, 255, 256, 257}; 691 + 692 + typedef int (*bpf_read_dynptr_fn_t)(struct bpf_dynptr *dptr, u32 off, 693 + u32 size, const void *unsafe_ptr); 694 + 695 + /* Returns the offset just before the end of the maximum sized xdp fragment. 696 + * Any write larger than 32 bytes will be split between 2 fragments. 697 + */ 698 + __u32 xdp_near_frag_end_offset(void) 699 + { 700 + const __u32 headroom = 256; 701 + const __u32 max_frag_size = __PAGE_SIZE - headroom - sizeof(struct skb_shared_info); 702 + 703 + /* 32 bytes before the approximate end of the fragment */ 704 + return max_frag_size - 32; 705 + } 706 + 707 + /* Use __always_inline on test_dynptr_probe[_str][_xdp]() and callbacks 708 + * of type bpf_read_dynptr_fn_t to prevent compiler from generating 709 + * indirect calls that make program fail to load with "unknown opcode" error. 710 + */ 711 + static __always_inline void test_dynptr_probe(void *ptr, bpf_read_dynptr_fn_t bpf_read_dynptr_fn) 712 + { 713 + char buf[sizeof(expected_str)]; 714 + struct bpf_dynptr ptr_buf; 715 + int i; 716 + 717 + if (bpf_get_current_pid_tgid() >> 32 != pid) 718 + return; 719 + 720 + err = bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(buf), 0, &ptr_buf); 721 + 722 + bpf_for(i, 0, ARRAY_SIZE(test_len)) { 723 + __u32 len = test_len[i]; 724 + 725 + err = err ?: bpf_read_dynptr_fn(&ptr_buf, 0, test_len[i], ptr); 726 + if (len > sizeof(buf)) 727 + break; 728 + err = err ?: bpf_dynptr_read(&buf, len, &ptr_buf, 0, 0); 729 + 730 + if (err || bpf_memcmp(expected_str, buf, len)) 731 + err = 1; 732 + 733 + /* Reset buffer and dynptr */ 734 + __builtin_memset(buf, 0, sizeof(buf)); 735 + err = err ?: bpf_dynptr_write(&ptr_buf, 0, buf, len, 0); 736 + } 737 + bpf_ringbuf_discard_dynptr(&ptr_buf, 0); 738 + } 739 + 740 + static __always_inline void test_dynptr_probe_str(void *ptr, 741 + bpf_read_dynptr_fn_t bpf_read_dynptr_fn) 742 + { 743 + char buf[sizeof(expected_str)]; 744 + struct bpf_dynptr ptr_buf; 745 + __u32 cnt, i; 746 + 747 + if (bpf_get_current_pid_tgid() >> 32 != pid) 748 + return; 749 + 750 + bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(buf), 0, &ptr_buf); 751 + 752 + bpf_for(i, 0, ARRAY_SIZE(test_len)) { 753 + __u32 len = test_len[i]; 754 + 755 + cnt = bpf_read_dynptr_fn(&ptr_buf, 0, len, ptr); 756 + if (cnt != len) 757 + err = 1; 758 + 759 + if (len > sizeof(buf)) 760 + continue; 761 + err = err ?: bpf_dynptr_read(&buf, len, &ptr_buf, 0, 0); 762 + if (!len) 763 + continue; 764 + if (err || bpf_memcmp(expected_str, buf, len - 1) || buf[len - 1] != '\0') 765 + err = 1; 766 + } 767 + bpf_ringbuf_discard_dynptr(&ptr_buf, 0); 768 + } 769 + 770 + static __always_inline void test_dynptr_probe_xdp(struct xdp_md *xdp, void *ptr, 771 + bpf_read_dynptr_fn_t bpf_read_dynptr_fn) 772 + { 773 + struct bpf_dynptr ptr_xdp; 774 + char buf[sizeof(expected_str)]; 775 + __u32 off, i; 776 + 777 + if (bpf_get_current_pid_tgid() >> 32 != pid) 778 + return; 779 + 780 + off = xdp_near_frag_end_offset(); 781 + err = bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp); 782 + 783 + bpf_for(i, 0, ARRAY_SIZE(test_len)) { 784 + __u32 len = test_len[i]; 785 + 786 + err = err ?: bpf_read_dynptr_fn(&ptr_xdp, off, len, ptr); 787 + if (len > sizeof(buf)) 788 + continue; 789 + err = err ?: bpf_dynptr_read(&buf, len, &ptr_xdp, off, 0); 790 + if (err || bpf_memcmp(expected_str, buf, len)) 791 + err = 1; 792 + /* Reset buffer and dynptr */ 793 + __builtin_memset(buf, 0, sizeof(buf)); 794 + err = err ?: bpf_dynptr_write(&ptr_xdp, off, buf, len, 0); 795 + } 796 + } 797 + 798 + static __always_inline void test_dynptr_probe_str_xdp(struct xdp_md *xdp, void *ptr, 799 + bpf_read_dynptr_fn_t bpf_read_dynptr_fn) 800 + { 801 + struct bpf_dynptr ptr_xdp; 802 + char buf[sizeof(expected_str)]; 803 + __u32 cnt, off, i; 804 + 805 + if (bpf_get_current_pid_tgid() >> 32 != pid) 806 + return; 807 + 808 + off = xdp_near_frag_end_offset(); 809 + err = bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp); 810 + if (err) 811 + return; 812 + 813 + bpf_for(i, 0, ARRAY_SIZE(test_len)) { 814 + __u32 len = test_len[i]; 815 + 816 + cnt = bpf_read_dynptr_fn(&ptr_xdp, off, len, ptr); 817 + if (cnt != len) 818 + err = 1; 819 + 820 + if (len > sizeof(buf)) 821 + continue; 822 + err = err ?: bpf_dynptr_read(&buf, len, &ptr_xdp, off, 0); 823 + 824 + if (!len) 825 + continue; 826 + if (err || bpf_memcmp(expected_str, buf, len - 1) || buf[len - 1] != '\0') 827 + err = 1; 828 + 829 + __builtin_memset(buf, 0, sizeof(buf)); 830 + err = err ?: bpf_dynptr_write(&ptr_xdp, off, buf, len, 0); 831 + } 832 + } 833 + 834 + SEC("xdp") 835 + int test_probe_read_user_dynptr(struct xdp_md *xdp) 836 + { 837 + test_dynptr_probe(user_ptr, bpf_probe_read_user_dynptr); 838 + if (!err) 839 + test_dynptr_probe_xdp(xdp, user_ptr, bpf_probe_read_user_dynptr); 840 + return XDP_PASS; 841 + } 842 + 843 + SEC("xdp") 844 + int test_probe_read_kernel_dynptr(struct xdp_md *xdp) 845 + { 846 + test_dynptr_probe(expected_str, bpf_probe_read_kernel_dynptr); 847 + if (!err) 848 + test_dynptr_probe_xdp(xdp, expected_str, bpf_probe_read_kernel_dynptr); 849 + return XDP_PASS; 850 + } 851 + 852 + SEC("xdp") 853 + int test_probe_read_user_str_dynptr(struct xdp_md *xdp) 854 + { 855 + test_dynptr_probe_str(user_ptr, bpf_probe_read_user_str_dynptr); 856 + if (!err) 857 + test_dynptr_probe_str_xdp(xdp, user_ptr, bpf_probe_read_user_str_dynptr); 858 + return XDP_PASS; 859 + } 860 + 861 + SEC("xdp") 862 + int test_probe_read_kernel_str_dynptr(struct xdp_md *xdp) 863 + { 864 + test_dynptr_probe_str(expected_str, bpf_probe_read_kernel_str_dynptr); 865 + if (!err) 866 + test_dynptr_probe_str_xdp(xdp, expected_str, bpf_probe_read_kernel_str_dynptr); 867 + return XDP_PASS; 868 + } 869 + 870 + SEC("fentry.s/" SYS_PREFIX "sys_nanosleep") 871 + int test_copy_from_user_dynptr(void *ctx) 872 + { 873 + test_dynptr_probe(user_ptr, bpf_copy_from_user_dynptr); 874 + return 0; 875 + } 876 + 877 + SEC("fentry.s/" SYS_PREFIX "sys_nanosleep") 878 + int test_copy_from_user_str_dynptr(void *ctx) 879 + { 880 + test_dynptr_probe_str(user_ptr, bpf_copy_from_user_str_dynptr); 881 + return 0; 882 + } 883 + 884 + static int bpf_copy_data_from_user_task(struct bpf_dynptr *dptr, u32 off, 885 + u32 size, const void *unsafe_ptr) 886 + { 887 + struct task_struct *task = bpf_get_current_task_btf(); 888 + 889 + return bpf_copy_from_user_task_dynptr(dptr, off, size, unsafe_ptr, task); 890 + } 891 + 892 + static int bpf_copy_data_from_user_task_str(struct bpf_dynptr *dptr, u32 off, 893 + u32 size, const void *unsafe_ptr) 894 + { 895 + struct task_struct *task = bpf_get_current_task_btf(); 896 + 897 + return bpf_copy_from_user_task_str_dynptr(dptr, off, size, unsafe_ptr, task); 898 + } 899 + 900 + SEC("fentry.s/" SYS_PREFIX "sys_nanosleep") 901 + int test_copy_from_user_task_dynptr(void *ctx) 902 + { 903 + test_dynptr_probe(user_ptr, bpf_copy_data_from_user_task); 904 + return 0; 905 + } 906 + 907 + SEC("fentry.s/" SYS_PREFIX "sys_nanosleep") 908 + int test_copy_from_user_task_str_dynptr(void *ctx) 909 + { 910 + test_dynptr_probe_str(user_ptr, bpf_copy_data_from_user_task_str); 911 + return 0; 912 + }