Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf: widen dynptr size/offset to 64 bit

Dynptr currently caps size and offset at 24 bits, which isn’t sufficient
for file-backed use cases; even 32 bits can be limiting. Refactor dynptr
helpers/kfuncs to use 64-bit size and offset, ensuring consistency
across the APIs.

This change does not affect internals of xdp, skb or other dynptrs,
which continue to behave as before. Also it does not break binary
compatibility.

The widening enables large-file access support via dynptr, implemented
in the next patches.

Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20251026203853.135105-3-mykyta.yatsenko5@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Mykyta Yatsenko and committed by
Alexei Starovoitov
531b87d8 a61a257f

+86 -86
+10 -10
include/linux/bpf.h
··· 1387 1387 BPF_DYNPTR_TYPE_SKB_META, 1388 1388 }; 1389 1389 1390 - int bpf_dynptr_check_size(u32 size); 1391 - u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr); 1392 - const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len); 1393 - void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len); 1390 + int bpf_dynptr_check_size(u64 size); 1391 + u64 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr); 1392 + const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u64 len); 1393 + void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u64 len); 1394 1394 bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr); 1395 - int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u32 offset, 1396 - void *src, u32 len, u64 flags); 1397 - void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset, 1398 - void *buffer__opt, u32 buffer__szk); 1395 + int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u64 offset, 1396 + void *src, u64 len, u64 flags); 1397 + void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u64 offset, 1398 + void *buffer__opt, u64 buffer__szk); 1399 1399 1400 - static inline int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len) 1400 + static inline int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u64 offset, u64 len) 1401 1401 { 1402 - u32 size = __bpf_dynptr_size(ptr); 1402 + u64 size = __bpf_dynptr_size(ptr); 1403 1403 1404 1404 if (len > size || offset > size - len) 1405 1405 return -E2BIG;
+4 -4
include/uapi/linux/bpf.h
··· 5618 5618 * Return 5619 5619 * *sk* if casting is valid, or **NULL** otherwise. 5620 5620 * 5621 - * long bpf_dynptr_from_mem(void *data, u32 size, u64 flags, struct bpf_dynptr *ptr) 5621 + * long bpf_dynptr_from_mem(void *data, u64 size, u64 flags, struct bpf_dynptr *ptr) 5622 5622 * Description 5623 5623 * Get a dynptr to local memory *data*. 5624 5624 * ··· 5661 5661 * Return 5662 5662 * Nothing. Always succeeds. 5663 5663 * 5664 - * long bpf_dynptr_read(void *dst, u32 len, const struct bpf_dynptr *src, u32 offset, u64 flags) 5664 + * long bpf_dynptr_read(void *dst, u64 len, const struct bpf_dynptr *src, u64 offset, u64 flags) 5665 5665 * Description 5666 5666 * Read *len* bytes from *src* into *dst*, starting from *offset* 5667 5667 * into *src*. ··· 5671 5671 * of *src*'s data, -EINVAL if *src* is an invalid dynptr or if 5672 5672 * *flags* is not 0. 5673 5673 * 5674 - * long bpf_dynptr_write(const struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags) 5674 + * long bpf_dynptr_write(const struct bpf_dynptr *dst, u64 offset, void *src, u64 len, u64 flags) 5675 5675 * Description 5676 5676 * Write *len* bytes from *src* into *dst*, starting from *offset* 5677 5677 * into *dst*. ··· 5692 5692 * is a read-only dynptr or if *flags* is not correct. For skb-type dynptrs, 5693 5693 * other errors correspond to errors returned by **bpf_skb_store_bytes**\ (). 5694 5694 * 5695 - * void *bpf_dynptr_data(const struct bpf_dynptr *ptr, u32 offset, u32 len) 5695 + * void *bpf_dynptr_data(const struct bpf_dynptr *ptr, u64 offset, u64 len) 5696 5696 * Description 5697 5697 * Get a pointer to the underlying dynptr data. 5698 5698 *
+33 -33
kernel/bpf/helpers.c
··· 1684 1684 return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT; 1685 1685 } 1686 1686 1687 - u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr) 1687 + u64 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr) 1688 1688 { 1689 1689 return ptr->size & DYNPTR_SIZE_MASK; 1690 1690 } 1691 1691 1692 - static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size) 1692 + static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u64 new_size) 1693 1693 { 1694 1694 u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK; 1695 1695 1696 - ptr->size = new_size | metadata; 1696 + ptr->size = (u32)new_size | metadata; 1697 1697 } 1698 1698 1699 - int bpf_dynptr_check_size(u32 size) 1699 + int bpf_dynptr_check_size(u64 size) 1700 1700 { 1701 1701 return size > DYNPTR_MAX_SIZE ? -E2BIG : 0; 1702 1702 } ··· 1715 1715 memset(ptr, 0, sizeof(*ptr)); 1716 1716 } 1717 1717 1718 - BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr) 1718 + BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u64, size, u64, flags, struct bpf_dynptr_kern *, ptr) 1719 1719 { 1720 1720 int err; 1721 1721 ··· 1750 1750 .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT | MEM_WRITE, 1751 1751 }; 1752 1752 1753 - static int __bpf_dynptr_read(void *dst, u32 len, const struct bpf_dynptr_kern *src, 1754 - u32 offset, u64 flags) 1753 + static int __bpf_dynptr_read(void *dst, u64 len, const struct bpf_dynptr_kern *src, 1754 + u64 offset, u64 flags) 1755 1755 { 1756 1756 enum bpf_dynptr_type type; 1757 1757 int err; ··· 1787 1787 } 1788 1788 } 1789 1789 1790 - BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src, 1791 - u32, offset, u64, flags) 1790 + BPF_CALL_5(bpf_dynptr_read, void *, dst, u64, len, const struct bpf_dynptr_kern *, src, 1791 + u64, offset, u64, flags) 1792 1792 { 1793 1793 return __bpf_dynptr_read(dst, len, src, offset, flags); 1794 1794 } ··· 1804 1804 .arg5_type = ARG_ANYTHING, 1805 1805 }; 1806 1806 1807 - int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u32 offset, void *src, 1808 - u32 len, u64 flags) 1807 + int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u64 offset, void *src, 1808 + u64 len, u64 flags) 1809 1809 { 1810 1810 enum bpf_dynptr_type type; 1811 1811 int err; ··· 1848 1848 } 1849 1849 } 1850 1850 1851 - BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src, 1852 - u32, len, u64, flags) 1851 + BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u64, offset, void *, src, 1852 + u64, len, u64, flags) 1853 1853 { 1854 1854 return __bpf_dynptr_write(dst, offset, src, len, flags); 1855 1855 } ··· 1865 1865 .arg5_type = ARG_ANYTHING, 1866 1866 }; 1867 1867 1868 - BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len) 1868 + BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u64, offset, u64, len) 1869 1869 { 1870 1870 enum bpf_dynptr_type type; 1871 1871 int err; ··· 2680 2680 * provided buffer, with its contents containing the data, if unable to obtain 2681 2681 * direct pointer) 2682 2682 */ 2683 - __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u32 offset, 2684 - void *buffer__opt, u32 buffer__szk) 2683 + __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u64 offset, 2684 + void *buffer__opt, u64 buffer__szk) 2685 2685 { 2686 2686 const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2687 2687 enum bpf_dynptr_type type; 2688 - u32 len = buffer__szk; 2688 + u64 len = buffer__szk; 2689 2689 int err; 2690 2690 2691 2691 if (!ptr->data) ··· 2767 2767 * provided buffer, with its contents containing the data, if unable to obtain 2768 2768 * direct pointer) 2769 2769 */ 2770 - __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset, 2771 - void *buffer__opt, u32 buffer__szk) 2770 + __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u64 offset, 2771 + void *buffer__opt, u64 buffer__szk) 2772 2772 { 2773 2773 const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2774 2774 ··· 2800 2800 return bpf_dynptr_slice(p, offset, buffer__opt, buffer__szk); 2801 2801 } 2802 2802 2803 - __bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u32 start, u32 end) 2803 + __bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u64 start, u64 end) 2804 2804 { 2805 2805 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2806 - u32 size; 2806 + u64 size; 2807 2807 2808 2808 if (!ptr->data || start > end) 2809 2809 return -EINVAL; ··· 2836 2836 return __bpf_dynptr_is_rdonly(ptr); 2837 2837 } 2838 2838 2839 - __bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr *p) 2839 + __bpf_kfunc u64 bpf_dynptr_size(const struct bpf_dynptr *p) 2840 2840 { 2841 2841 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2842 2842 ··· 2873 2873 * Copies data from source dynptr to destination dynptr. 2874 2874 * Returns 0 on success; negative error, otherwise. 2875 2875 */ 2876 - __bpf_kfunc int bpf_dynptr_copy(struct bpf_dynptr *dst_ptr, u32 dst_off, 2877 - struct bpf_dynptr *src_ptr, u32 src_off, u32 size) 2876 + __bpf_kfunc int bpf_dynptr_copy(struct bpf_dynptr *dst_ptr, u64 dst_off, 2877 + struct bpf_dynptr *src_ptr, u64 src_off, u64 size) 2878 2878 { 2879 2879 struct bpf_dynptr_kern *dst = (struct bpf_dynptr_kern *)dst_ptr; 2880 2880 struct bpf_dynptr_kern *src = (struct bpf_dynptr_kern *)src_ptr; 2881 2881 void *src_slice, *dst_slice; 2882 2882 char buf[256]; 2883 - u32 off; 2883 + u64 off; 2884 2884 2885 2885 src_slice = bpf_dynptr_slice(src_ptr, src_off, NULL, size); 2886 2886 dst_slice = bpf_dynptr_slice_rdwr(dst_ptr, dst_off, NULL, size); ··· 2902 2902 2903 2903 off = 0; 2904 2904 while (off < size) { 2905 - u32 chunk_sz = min_t(u32, sizeof(buf), size - off); 2905 + u64 chunk_sz = min_t(u64, sizeof(buf), size - off); 2906 2906 int err; 2907 2907 2908 2908 err = __bpf_dynptr_read(buf, chunk_sz, src, src_off + off, 0); ··· 2928 2928 * at @offset with the constant byte @val. 2929 2929 * Returns 0 on success; negative error, otherwise. 2930 2930 */ 2931 - __bpf_kfunc int bpf_dynptr_memset(struct bpf_dynptr *p, u32 offset, u32 size, u8 val) 2932 - { 2931 + __bpf_kfunc int bpf_dynptr_memset(struct bpf_dynptr *p, u64 offset, u64 size, u8 val) 2932 + { 2933 2933 struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; 2934 - u32 chunk_sz, write_off; 2934 + u64 chunk_sz, write_off; 2935 2935 char buf[256]; 2936 2936 void* slice; 2937 2937 int err; ··· 2950 2950 return err; 2951 2951 2952 2952 /* Non-linear data under the dynptr, write from a local buffer */ 2953 - chunk_sz = min_t(u32, sizeof(buf), size); 2953 + chunk_sz = min_t(u64, sizeof(buf), size); 2954 2954 memset(buf, val, chunk_sz); 2955 2955 2956 2956 for (write_off = 0; write_off < size; write_off += chunk_sz) { 2957 - chunk_sz = min_t(u32, sizeof(buf), size - write_off); 2957 + chunk_sz = min_t(u64, sizeof(buf), size - write_off); 2958 2958 err = __bpf_dynptr_write(ptr, offset + write_off, buf, chunk_sz, 0); 2959 2959 if (err) 2960 2960 return err; ··· 4469 4469 /* Get a pointer to dynptr data up to len bytes for read only access. If 4470 4470 * the dynptr doesn't have continuous data up to len bytes, return NULL. 4471 4471 */ 4472 - const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len) 4472 + const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u64 len) 4473 4473 { 4474 4474 const struct bpf_dynptr *p = (struct bpf_dynptr *)ptr; 4475 4475 ··· 4480 4480 * the dynptr doesn't have continuous data up to len bytes, or the dynptr 4481 4481 * is read only, return NULL. 4482 4482 */ 4483 - void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len) 4483 + void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u64 len) 4484 4484 { 4485 4485 if (__bpf_dynptr_is_rdonly(ptr)) 4486 4486 return NULL;
+23 -23
kernel/trace/bpf_trace.c
··· 3372 3372 * direct calls into all the specific callback implementations 3373 3373 * (copy_user_data_sleepable, copy_user_data_nofault, and so on) 3374 3374 */ 3375 - static __always_inline int __bpf_dynptr_copy_str(struct bpf_dynptr *dptr, u32 doff, u32 size, 3375 + static __always_inline int __bpf_dynptr_copy_str(struct bpf_dynptr *dptr, u64 doff, u64 size, 3376 3376 const void *unsafe_src, 3377 3377 copy_fn_t str_copy_fn, 3378 3378 struct task_struct *tsk) 3379 3379 { 3380 3380 struct bpf_dynptr_kern *dst; 3381 - u32 chunk_sz, off; 3381 + u64 chunk_sz, off; 3382 3382 void *dst_slice; 3383 3383 int cnt, err; 3384 3384 char buf[256]; ··· 3392 3392 return -E2BIG; 3393 3393 3394 3394 for (off = 0; off < size; off += chunk_sz - 1) { 3395 - chunk_sz = min_t(u32, sizeof(buf), size - off); 3395 + chunk_sz = min_t(u64, sizeof(buf), size - off); 3396 3396 /* Expect str_copy_fn to return count of copied bytes, including 3397 3397 * zero terminator. Next iteration increment off by chunk_sz - 1 to 3398 3398 * overwrite NUL. ··· 3409 3409 return off; 3410 3410 } 3411 3411 3412 - static __always_inline int __bpf_dynptr_copy(const struct bpf_dynptr *dptr, u32 doff, 3413 - u32 size, const void *unsafe_src, 3412 + static __always_inline int __bpf_dynptr_copy(const struct bpf_dynptr *dptr, u64 doff, 3413 + u64 size, const void *unsafe_src, 3414 3414 copy_fn_t copy_fn, struct task_struct *tsk) 3415 3415 { 3416 3416 struct bpf_dynptr_kern *dst; 3417 3417 void *dst_slice; 3418 3418 char buf[256]; 3419 - u32 off, chunk_sz; 3419 + u64 off, chunk_sz; 3420 3420 int err; 3421 3421 3422 3422 dst_slice = bpf_dynptr_slice_rdwr(dptr, doff, NULL, size); ··· 3428 3428 return -E2BIG; 3429 3429 3430 3430 for (off = 0; off < size; off += chunk_sz) { 3431 - chunk_sz = min_t(u32, sizeof(buf), size - off); 3431 + chunk_sz = min_t(u64, sizeof(buf), size - off); 3432 3432 err = copy_fn(buf, unsafe_src + off, chunk_sz, tsk); 3433 3433 if (err) 3434 3434 return err; ··· 3514 3514 return bpf_send_signal_common(sig, type, task, value); 3515 3515 } 3516 3516 3517 - __bpf_kfunc int bpf_probe_read_user_dynptr(struct bpf_dynptr *dptr, u32 off, 3518 - u32 size, const void __user *unsafe_ptr__ign) 3517 + __bpf_kfunc int bpf_probe_read_user_dynptr(struct bpf_dynptr *dptr, u64 off, 3518 + u64 size, const void __user *unsafe_ptr__ign) 3519 3519 { 3520 3520 return __bpf_dynptr_copy(dptr, off, size, (const void *)unsafe_ptr__ign, 3521 3521 copy_user_data_nofault, NULL); 3522 3522 } 3523 3523 3524 - __bpf_kfunc int bpf_probe_read_kernel_dynptr(struct bpf_dynptr *dptr, u32 off, 3525 - u32 size, const void *unsafe_ptr__ign) 3524 + __bpf_kfunc int bpf_probe_read_kernel_dynptr(struct bpf_dynptr *dptr, u64 off, 3525 + u64 size, const void *unsafe_ptr__ign) 3526 3526 { 3527 3527 return __bpf_dynptr_copy(dptr, off, size, unsafe_ptr__ign, 3528 3528 copy_kernel_data_nofault, NULL); 3529 3529 } 3530 3530 3531 - __bpf_kfunc int bpf_probe_read_user_str_dynptr(struct bpf_dynptr *dptr, u32 off, 3532 - u32 size, const void __user *unsafe_ptr__ign) 3531 + __bpf_kfunc int bpf_probe_read_user_str_dynptr(struct bpf_dynptr *dptr, u64 off, 3532 + u64 size, const void __user *unsafe_ptr__ign) 3533 3533 { 3534 3534 return __bpf_dynptr_copy_str(dptr, off, size, (const void *)unsafe_ptr__ign, 3535 3535 copy_user_str_nofault, NULL); 3536 3536 } 3537 3537 3538 - __bpf_kfunc int bpf_probe_read_kernel_str_dynptr(struct bpf_dynptr *dptr, u32 off, 3539 - u32 size, const void *unsafe_ptr__ign) 3538 + __bpf_kfunc int bpf_probe_read_kernel_str_dynptr(struct bpf_dynptr *dptr, u64 off, 3539 + u64 size, const void *unsafe_ptr__ign) 3540 3540 { 3541 3541 return __bpf_dynptr_copy_str(dptr, off, size, unsafe_ptr__ign, 3542 3542 copy_kernel_str_nofault, NULL); 3543 3543 } 3544 3544 3545 - __bpf_kfunc int bpf_copy_from_user_dynptr(struct bpf_dynptr *dptr, u32 off, 3546 - u32 size, const void __user *unsafe_ptr__ign) 3545 + __bpf_kfunc int bpf_copy_from_user_dynptr(struct bpf_dynptr *dptr, u64 off, 3546 + u64 size, const void __user *unsafe_ptr__ign) 3547 3547 { 3548 3548 return __bpf_dynptr_copy(dptr, off, size, (const void *)unsafe_ptr__ign, 3549 3549 copy_user_data_sleepable, NULL); 3550 3550 } 3551 3551 3552 - __bpf_kfunc int bpf_copy_from_user_str_dynptr(struct bpf_dynptr *dptr, u32 off, 3553 - u32 size, const void __user *unsafe_ptr__ign) 3552 + __bpf_kfunc int bpf_copy_from_user_str_dynptr(struct bpf_dynptr *dptr, u64 off, 3553 + u64 size, const void __user *unsafe_ptr__ign) 3554 3554 { 3555 3555 return __bpf_dynptr_copy_str(dptr, off, size, (const void *)unsafe_ptr__ign, 3556 3556 copy_user_str_sleepable, NULL); 3557 3557 } 3558 3558 3559 - __bpf_kfunc int bpf_copy_from_user_task_dynptr(struct bpf_dynptr *dptr, u32 off, 3560 - u32 size, const void __user *unsafe_ptr__ign, 3559 + __bpf_kfunc int bpf_copy_from_user_task_dynptr(struct bpf_dynptr *dptr, u64 off, 3560 + u64 size, const void __user *unsafe_ptr__ign, 3561 3561 struct task_struct *tsk) 3562 3562 { 3563 3563 return __bpf_dynptr_copy(dptr, off, size, (const void *)unsafe_ptr__ign, 3564 3564 copy_user_data_sleepable, tsk); 3565 3565 } 3566 3566 3567 - __bpf_kfunc int bpf_copy_from_user_task_str_dynptr(struct bpf_dynptr *dptr, u32 off, 3568 - u32 size, const void __user *unsafe_ptr__ign, 3567 + __bpf_kfunc int bpf_copy_from_user_task_str_dynptr(struct bpf_dynptr *dptr, u64 off, 3568 + u64 size, const void __user *unsafe_ptr__ign, 3569 3569 struct task_struct *tsk) 3570 3570 { 3571 3571 return __bpf_dynptr_copy_str(dptr, off, size, (const void *)unsafe_ptr__ign,
+4 -4
tools/include/uapi/linux/bpf.h
··· 5618 5618 * Return 5619 5619 * *sk* if casting is valid, or **NULL** otherwise. 5620 5620 * 5621 - * long bpf_dynptr_from_mem(void *data, u32 size, u64 flags, struct bpf_dynptr *ptr) 5621 + * long bpf_dynptr_from_mem(void *data, u64 size, u64 flags, struct bpf_dynptr *ptr) 5622 5622 * Description 5623 5623 * Get a dynptr to local memory *data*. 5624 5624 * ··· 5661 5661 * Return 5662 5662 * Nothing. Always succeeds. 5663 5663 * 5664 - * long bpf_dynptr_read(void *dst, u32 len, const struct bpf_dynptr *src, u32 offset, u64 flags) 5664 + * long bpf_dynptr_read(void *dst, u64 len, const struct bpf_dynptr *src, u64 offset, u64 flags) 5665 5665 * Description 5666 5666 * Read *len* bytes from *src* into *dst*, starting from *offset* 5667 5667 * into *src*. ··· 5671 5671 * of *src*'s data, -EINVAL if *src* is an invalid dynptr or if 5672 5672 * *flags* is not 0. 5673 5673 * 5674 - * long bpf_dynptr_write(const struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags) 5674 + * long bpf_dynptr_write(const struct bpf_dynptr *dst, u64 offset, void *src, u64 len, u64 flags) 5675 5675 * Description 5676 5676 * Write *len* bytes from *src* into *dst*, starting from *offset* 5677 5677 * into *dst*. ··· 5692 5692 * is a read-only dynptr or if *flags* is not correct. For skb-type dynptrs, 5693 5693 * other errors correspond to errors returned by **bpf_skb_store_bytes**\ (). 5694 5694 * 5695 - * void *bpf_dynptr_data(const struct bpf_dynptr *ptr, u32 offset, u32 len) 5695 + * void *bpf_dynptr_data(const struct bpf_dynptr *ptr, u64 offset, u64 len) 5696 5696 * Description 5697 5697 * Get a pointer to the underlying dynptr data. 5698 5698 *
+6 -6
tools/testing/selftests/bpf/bpf_kfuncs.h
··· 28 28 * Either a direct pointer to the dynptr data or a pointer to the user-provided 29 29 * buffer if unable to obtain a direct pointer 30 30 */ 31 - extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, __u32 offset, 32 - void *buffer, __u32 buffer__szk) __ksym __weak; 31 + extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, __u64 offset, 32 + void *buffer, __u64 buffer__szk) __ksym __weak; 33 33 34 34 /* Description 35 35 * Obtain a read-write pointer to the dynptr's data ··· 37 37 * Either a direct pointer to the dynptr data or a pointer to the user-provided 38 38 * buffer if unable to obtain a direct pointer 39 39 */ 40 - extern void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *ptr, __u32 offset, 41 - void *buffer, __u32 buffer__szk) __ksym __weak; 40 + extern void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *ptr, __u64 offset, void *buffer, 41 + __u64 buffer__szk) __ksym __weak; 42 42 43 - extern int bpf_dynptr_adjust(const struct bpf_dynptr *ptr, __u32 start, __u32 end) __ksym __weak; 43 + extern int bpf_dynptr_adjust(const struct bpf_dynptr *ptr, __u64 start, __u64 end) __ksym __weak; 44 44 extern bool bpf_dynptr_is_null(const struct bpf_dynptr *ptr) __ksym __weak; 45 45 extern bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *ptr) __ksym __weak; 46 - extern __u32 bpf_dynptr_size(const struct bpf_dynptr *ptr) __ksym __weak; 46 + extern __u64 bpf_dynptr_size(const struct bpf_dynptr *ptr) __ksym __weak; 47 47 extern int bpf_dynptr_clone(const struct bpf_dynptr *ptr, struct bpf_dynptr *clone__init) __ksym __weak; 48 48 49 49 /* Description
+6 -6
tools/testing/selftests/bpf/progs/dynptr_success.c
··· 914 914 char expected_str[384]; 915 915 __u32 test_len[7] = {0/* placeholder */, 0, 1, 2, 255, 256, 257}; 916 916 917 - typedef int (*bpf_read_dynptr_fn_t)(struct bpf_dynptr *dptr, u32 off, 918 - u32 size, const void *unsafe_ptr); 917 + typedef int (*bpf_read_dynptr_fn_t)(struct bpf_dynptr *dptr, u64 off, 918 + u64 size, const void *unsafe_ptr); 919 919 920 920 /* Returns the offset just before the end of the maximum sized xdp fragment. 921 921 * Any write larger than 32 bytes will be split between 2 fragments. ··· 1106 1106 return 0; 1107 1107 } 1108 1108 1109 - static int bpf_copy_data_from_user_task(struct bpf_dynptr *dptr, u32 off, 1110 - u32 size, const void *unsafe_ptr) 1109 + static int bpf_copy_data_from_user_task(struct bpf_dynptr *dptr, u64 off, 1110 + u64 size, const void *unsafe_ptr) 1111 1111 { 1112 1112 struct task_struct *task = bpf_get_current_task_btf(); 1113 1113 1114 1114 return bpf_copy_from_user_task_dynptr(dptr, off, size, unsafe_ptr, task); 1115 1115 } 1116 1116 1117 - static int bpf_copy_data_from_user_task_str(struct bpf_dynptr *dptr, u32 off, 1118 - u32 size, const void *unsafe_ptr) 1117 + static int bpf_copy_data_from_user_task_str(struct bpf_dynptr *dptr, u64 off, 1118 + u64 size, const void *unsafe_ptr) 1119 1119 { 1120 1120 struct task_struct *task = bpf_get_current_task_btf(); 1121 1121