Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64/sve: ptrace and ELF coredump support

This patch defines and implements a new regset NT_ARM_SVE, which
describes a thread's SVE register state. This allows a debugger to
manipulate the SVE state, as well as being included in ELF
coredumps for post-mortem debugging.

Because the regset size and layout are dependent on the thread's
current vector length, it is not possible to define a C struct to
describe the regset contents as is done for existing regsets.
Instead, and for the same reasons, NT_ARM_SVE is based on the
freeform variable-layout approach used for the SVE signal frame.

Additionally, to reduce debug overhead when debugging threads that
might or might not have live SVE register state, NT_ARM_SVE may be
presented in one of two different formats: the old struct
user_fpsimd_state format is embedded for describing the state of a
thread with no live SVE state, whereas a new variable-layout
structure is embedded for describing live SVE state. This avoids a
debugger needing to poll NT_PRFPREG in addition to NT_ARM_SVE, and
allows existing userspace code to handle the non-SVE case without
too much modification.

For this to work, NT_ARM_SVE is defined with a fixed-format header
of type struct user_sve_header, which the recipient can use to
figure out the content, size and layout of the reset of the regset.
Accessor macros are defined to allow the vector-length-dependent
parts of the regset to be manipulated.

Signed-off-by: Alan Hayward <alan.hayward@arm.com>
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Alex Bennée <alex.bennee@linaro.org>
Cc: Okamoto Takayuki <tokamoto@jp.fujitsu.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>

authored by

Dave Martin and committed by
Will Deacon
43d4da2c fdfa976c

+482 -9
+11 -1
arch/arm64/include/asm/fpsimd.h
··· 38 38 __uint128_t vregs[32]; 39 39 u32 fpsr; 40 40 u32 fpcr; 41 + /* 42 + * For ptrace compatibility, pad to next 128-bit 43 + * boundary here if extending this struct. 44 + */ 41 45 }; 42 46 }; 43 47 /* the id of the last cpu to have restored this state */ 44 48 unsigned int cpu; 45 49 }; 46 - 47 50 48 51 #if defined(__KERNEL__) && defined(CONFIG_COMPAT) 49 52 /* Masks for extracting the FPSR and FPCR from the FPSCR */ ··· 91 88 92 89 extern void sve_alloc(struct task_struct *task); 93 90 extern void fpsimd_release_task(struct task_struct *task); 91 + extern void fpsimd_sync_to_sve(struct task_struct *task); 92 + extern void sve_sync_to_fpsimd(struct task_struct *task); 93 + extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task); 94 + 94 95 extern int sve_set_vector_length(struct task_struct *task, 95 96 unsigned long vl, unsigned long flags); 96 97 ··· 111 104 112 105 static inline void sve_alloc(struct task_struct *task) { } 113 106 static inline void fpsimd_release_task(struct task_struct *task) { } 107 + static inline void sve_sync_to_fpsimd(struct task_struct *task) { } 108 + static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { } 109 + 114 110 static inline void sve_init_vq_map(void) { } 115 111 static inline void sve_update_vq_map(void) { } 116 112 static inline int sve_verify_vq_map(void) { return 0; }
+138
arch/arm64/include/uapi/asm/ptrace.h
··· 22 22 #include <linux/types.h> 23 23 24 24 #include <asm/hwcap.h> 25 + #include <asm/sigcontext.h> 25 26 26 27 27 28 /* ··· 63 62 64 63 #ifndef __ASSEMBLY__ 65 64 65 + #include <linux/prctl.h> 66 + 66 67 /* 67 68 * User structures for general purpose, floating point and debug registers. 68 69 */ ··· 91 88 __u32 pad; 92 89 } dbg_regs[16]; 93 90 }; 91 + 92 + /* SVE/FP/SIMD state (NT_ARM_SVE) */ 93 + 94 + struct user_sve_header { 95 + __u32 size; /* total meaningful regset content in bytes */ 96 + __u32 max_size; /* maxmium possible size for this thread */ 97 + __u16 vl; /* current vector length */ 98 + __u16 max_vl; /* maximum possible vector length */ 99 + __u16 flags; 100 + __u16 __reserved; 101 + }; 102 + 103 + /* Definitions for user_sve_header.flags: */ 104 + #define SVE_PT_REGS_MASK (1 << 0) 105 + 106 + #define SVE_PT_REGS_FPSIMD 0 107 + #define SVE_PT_REGS_SVE SVE_PT_REGS_MASK 108 + 109 + /* 110 + * Common SVE_PT_* flags: 111 + * These must be kept in sync with prctl interface in <linux/ptrace.h> 112 + */ 113 + #define SVE_PT_VL_INHERIT (PR_SVE_VL_INHERIT >> 16) 114 + #define SVE_PT_VL_ONEXEC (PR_SVE_SET_VL_ONEXEC >> 16) 115 + 116 + 117 + /* 118 + * The remainder of the SVE state follows struct user_sve_header. The 119 + * total size of the SVE state (including header) depends on the 120 + * metadata in the header: SVE_PT_SIZE(vq, flags) gives the total size 121 + * of the state in bytes, including the header. 122 + * 123 + * Refer to <asm/sigcontext.h> for details of how to pass the correct 124 + * "vq" argument to these macros. 125 + */ 126 + 127 + /* Offset from the start of struct user_sve_header to the register data */ 128 + #define SVE_PT_REGS_OFFSET \ 129 + ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \ 130 + / SVE_VQ_BYTES * SVE_VQ_BYTES) 131 + 132 + /* 133 + * The register data content and layout depends on the value of the 134 + * flags field. 135 + */ 136 + 137 + /* 138 + * (flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD case: 139 + * 140 + * The payload starts at offset SVE_PT_FPSIMD_OFFSET, and is of type 141 + * struct user_fpsimd_state. Additional data might be appended in the 142 + * future: use SVE_PT_FPSIMD_SIZE(vq, flags) to compute the total size. 143 + * SVE_PT_FPSIMD_SIZE(vq, flags) will never be less than 144 + * sizeof(struct user_fpsimd_state). 145 + */ 146 + 147 + #define SVE_PT_FPSIMD_OFFSET SVE_PT_REGS_OFFSET 148 + 149 + #define SVE_PT_FPSIMD_SIZE(vq, flags) (sizeof(struct user_fpsimd_state)) 150 + 151 + /* 152 + * (flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE case: 153 + * 154 + * The payload starts at offset SVE_PT_SVE_OFFSET, and is of size 155 + * SVE_PT_SVE_SIZE(vq, flags). 156 + * 157 + * Additional macros describe the contents and layout of the payload. 158 + * For each, SVE_PT_SVE_x_OFFSET(args) is the start offset relative to 159 + * the start of struct user_sve_header, and SVE_PT_SVE_x_SIZE(args) is 160 + * the size in bytes: 161 + * 162 + * x type description 163 + * - ---- ----------- 164 + * ZREGS \ 165 + * ZREG | 166 + * PREGS | refer to <asm/sigcontext.h> 167 + * PREG | 168 + * FFR / 169 + * 170 + * FPSR uint32_t FPSR 171 + * FPCR uint32_t FPCR 172 + * 173 + * Additional data might be appended in the future. 174 + */ 175 + 176 + #define SVE_PT_SVE_ZREG_SIZE(vq) SVE_SIG_ZREG_SIZE(vq) 177 + #define SVE_PT_SVE_PREG_SIZE(vq) SVE_SIG_PREG_SIZE(vq) 178 + #define SVE_PT_SVE_FFR_SIZE(vq) SVE_SIG_FFR_SIZE(vq) 179 + #define SVE_PT_SVE_FPSR_SIZE sizeof(__u32) 180 + #define SVE_PT_SVE_FPCR_SIZE sizeof(__u32) 181 + 182 + #define __SVE_SIG_TO_PT(offset) \ 183 + ((offset) - SVE_SIG_REGS_OFFSET + SVE_PT_REGS_OFFSET) 184 + 185 + #define SVE_PT_SVE_OFFSET SVE_PT_REGS_OFFSET 186 + 187 + #define SVE_PT_SVE_ZREGS_OFFSET \ 188 + __SVE_SIG_TO_PT(SVE_SIG_ZREGS_OFFSET) 189 + #define SVE_PT_SVE_ZREG_OFFSET(vq, n) \ 190 + __SVE_SIG_TO_PT(SVE_SIG_ZREG_OFFSET(vq, n)) 191 + #define SVE_PT_SVE_ZREGS_SIZE(vq) \ 192 + (SVE_PT_SVE_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET) 193 + 194 + #define SVE_PT_SVE_PREGS_OFFSET(vq) \ 195 + __SVE_SIG_TO_PT(SVE_SIG_PREGS_OFFSET(vq)) 196 + #define SVE_PT_SVE_PREG_OFFSET(vq, n) \ 197 + __SVE_SIG_TO_PT(SVE_SIG_PREG_OFFSET(vq, n)) 198 + #define SVE_PT_SVE_PREGS_SIZE(vq) \ 199 + (SVE_PT_SVE_PREG_OFFSET(vq, SVE_NUM_PREGS) - \ 200 + SVE_PT_SVE_PREGS_OFFSET(vq)) 201 + 202 + #define SVE_PT_SVE_FFR_OFFSET(vq) \ 203 + __SVE_SIG_TO_PT(SVE_SIG_FFR_OFFSET(vq)) 204 + 205 + #define SVE_PT_SVE_FPSR_OFFSET(vq) \ 206 + ((SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq) + \ 207 + (SVE_VQ_BYTES - 1)) \ 208 + / SVE_VQ_BYTES * SVE_VQ_BYTES) 209 + #define SVE_PT_SVE_FPCR_OFFSET(vq) \ 210 + (SVE_PT_SVE_FPSR_OFFSET(vq) + SVE_PT_SVE_FPSR_SIZE) 211 + 212 + /* 213 + * Any future extension appended after FPCR must be aligned to the next 214 + * 128-bit boundary. 215 + */ 216 + 217 + #define SVE_PT_SVE_SIZE(vq, flags) \ 218 + ((SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE \ 219 + - SVE_PT_SVE_OFFSET + (SVE_VQ_BYTES - 1)) \ 220 + / SVE_VQ_BYTES * SVE_VQ_BYTES) 221 + 222 + #define SVE_PT_SIZE(vq, flags) \ 223 + (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? \ 224 + SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, flags) \ 225 + : SVE_PT_FPSIMD_OFFSET + SVE_PT_FPSIMD_SIZE(vq, flags)) 94 226 95 227 #endif /* __ASSEMBLY__ */ 96 228
+60
arch/arm64/kernel/fpsimd.c
··· 428 428 BUG_ON(!task->thread.sve_state); 429 429 } 430 430 431 + 432 + /* 433 + * Ensure that task->thread.sve_state is up to date with respect to 434 + * the user task, irrespective of when SVE is in use or not. 435 + * 436 + * This should only be called by ptrace. task must be non-runnable. 437 + * task->thread.sve_state must point to at least sve_state_size(task) 438 + * bytes of allocated kernel memory. 439 + */ 440 + void fpsimd_sync_to_sve(struct task_struct *task) 441 + { 442 + if (!test_tsk_thread_flag(task, TIF_SVE)) 443 + fpsimd_to_sve(task); 444 + } 445 + 446 + /* 447 + * Ensure that task->thread.fpsimd_state is up to date with respect to 448 + * the user task, irrespective of whether SVE is in use or not. 449 + * 450 + * This should only be called by ptrace. task must be non-runnable. 451 + * task->thread.sve_state must point to at least sve_state_size(task) 452 + * bytes of allocated kernel memory. 453 + */ 454 + void sve_sync_to_fpsimd(struct task_struct *task) 455 + { 456 + if (test_tsk_thread_flag(task, TIF_SVE)) 457 + sve_to_fpsimd(task); 458 + } 459 + 460 + /* 461 + * Ensure that task->thread.sve_state is up to date with respect to 462 + * the task->thread.fpsimd_state. 463 + * 464 + * This should only be called by ptrace to merge new FPSIMD register 465 + * values into a task for which SVE is currently active. 466 + * task must be non-runnable. 467 + * task->thread.sve_state must point to at least sve_state_size(task) 468 + * bytes of allocated kernel memory. 469 + * task->thread.fpsimd_state must already have been initialised with 470 + * the new FPSIMD register values to be merged in. 471 + */ 472 + void sve_sync_from_fpsimd_zeropad(struct task_struct *task) 473 + { 474 + unsigned int vq; 475 + void *sst = task->thread.sve_state; 476 + struct fpsimd_state const *fst = &task->thread.fpsimd_state; 477 + unsigned int i; 478 + 479 + if (!test_tsk_thread_flag(task, TIF_SVE)) 480 + return; 481 + 482 + vq = sve_vq_from_vl(task->thread.sve_vl); 483 + 484 + memset(sst, 0, SVE_SIG_REGS_SIZE(vq)); 485 + 486 + for (i = 0; i < 32; ++i) 487 + memcpy(ZREG(sst, vq, i), &fst->vregs[i], 488 + sizeof(fst->vregs[i])); 489 + } 490 + 431 491 int sve_set_vector_length(struct task_struct *task, 432 492 unsigned long vl, unsigned long flags) 433 493 {
+272 -8
arch/arm64/kernel/ptrace.c
··· 32 32 #include <linux/security.h> 33 33 #include <linux/init.h> 34 34 #include <linux/signal.h> 35 + #include <linux/string.h> 35 36 #include <linux/uaccess.h> 36 37 #include <linux/perf_event.h> 37 38 #include <linux/hw_breakpoint.h> ··· 41 40 #include <linux/elf.h> 42 41 43 42 #include <asm/compat.h> 43 + #include <asm/cpufeature.h> 44 44 #include <asm/debug-monitors.h> 45 45 #include <asm/pgtable.h> 46 46 #include <asm/stacktrace.h> ··· 620 618 /* 621 619 * TODO: update fp accessors for lazy context switching (sync/flush hwstate) 622 620 */ 621 + static int __fpr_get(struct task_struct *target, 622 + const struct user_regset *regset, 623 + unsigned int pos, unsigned int count, 624 + void *kbuf, void __user *ubuf, unsigned int start_pos) 625 + { 626 + struct user_fpsimd_state *uregs; 627 + 628 + sve_sync_to_fpsimd(target); 629 + 630 + uregs = &target->thread.fpsimd_state.user_fpsimd; 631 + 632 + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 633 + start_pos, start_pos + sizeof(*uregs)); 634 + } 635 + 623 636 static int fpr_get(struct task_struct *target, const struct user_regset *regset, 624 637 unsigned int pos, unsigned int count, 625 638 void *kbuf, void __user *ubuf) 626 639 { 627 - struct user_fpsimd_state *uregs; 628 - uregs = &target->thread.fpsimd_state.user_fpsimd; 629 - 630 640 if (target == current) 631 641 fpsimd_preserve_current_state(); 632 642 633 - return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); 643 + return __fpr_get(target, regset, pos, count, kbuf, ubuf, 0); 644 + } 645 + 646 + static int __fpr_set(struct task_struct *target, 647 + const struct user_regset *regset, 648 + unsigned int pos, unsigned int count, 649 + const void *kbuf, const void __user *ubuf, 650 + unsigned int start_pos) 651 + { 652 + int ret; 653 + struct user_fpsimd_state newstate; 654 + 655 + /* 656 + * Ensure target->thread.fpsimd_state is up to date, so that a 657 + * short copyin can't resurrect stale data. 658 + */ 659 + sve_sync_to_fpsimd(target); 660 + 661 + newstate = target->thread.fpsimd_state.user_fpsimd; 662 + 663 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 664 + start_pos, start_pos + sizeof(newstate)); 665 + if (ret) 666 + return ret; 667 + 668 + target->thread.fpsimd_state.user_fpsimd = newstate; 669 + 670 + return ret; 634 671 } 635 672 636 673 static int fpr_set(struct task_struct *target, const struct user_regset *regset, ··· 677 636 const void *kbuf, const void __user *ubuf) 678 637 { 679 638 int ret; 680 - struct user_fpsimd_state newstate = 681 - target->thread.fpsimd_state.user_fpsimd; 682 639 683 - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1); 640 + ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0); 684 641 if (ret) 685 642 return ret; 686 643 687 - target->thread.fpsimd_state.user_fpsimd = newstate; 644 + sve_sync_from_fpsimd_zeropad(target); 688 645 fpsimd_flush_task_state(target); 646 + 689 647 return ret; 690 648 } 691 649 ··· 742 702 return ret; 743 703 } 744 704 705 + #ifdef CONFIG_ARM64_SVE 706 + 707 + static void sve_init_header_from_task(struct user_sve_header *header, 708 + struct task_struct *target) 709 + { 710 + unsigned int vq; 711 + 712 + memset(header, 0, sizeof(*header)); 713 + 714 + header->flags = test_tsk_thread_flag(target, TIF_SVE) ? 715 + SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD; 716 + if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT)) 717 + header->flags |= SVE_PT_VL_INHERIT; 718 + 719 + header->vl = target->thread.sve_vl; 720 + vq = sve_vq_from_vl(header->vl); 721 + 722 + header->max_vl = sve_max_vl; 723 + if (WARN_ON(!sve_vl_valid(sve_max_vl))) 724 + header->max_vl = header->vl; 725 + 726 + header->size = SVE_PT_SIZE(vq, header->flags); 727 + header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl), 728 + SVE_PT_REGS_SVE); 729 + } 730 + 731 + static unsigned int sve_size_from_header(struct user_sve_header const *header) 732 + { 733 + return ALIGN(header->size, SVE_VQ_BYTES); 734 + } 735 + 736 + static unsigned int sve_get_size(struct task_struct *target, 737 + const struct user_regset *regset) 738 + { 739 + struct user_sve_header header; 740 + 741 + if (!system_supports_sve()) 742 + return 0; 743 + 744 + sve_init_header_from_task(&header, target); 745 + return sve_size_from_header(&header); 746 + } 747 + 748 + static int sve_get(struct task_struct *target, 749 + const struct user_regset *regset, 750 + unsigned int pos, unsigned int count, 751 + void *kbuf, void __user *ubuf) 752 + { 753 + int ret; 754 + struct user_sve_header header; 755 + unsigned int vq; 756 + unsigned long start, end; 757 + 758 + if (!system_supports_sve()) 759 + return -EINVAL; 760 + 761 + /* Header */ 762 + sve_init_header_from_task(&header, target); 763 + vq = sve_vq_from_vl(header.vl); 764 + 765 + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &header, 766 + 0, sizeof(header)); 767 + if (ret) 768 + return ret; 769 + 770 + if (target == current) 771 + fpsimd_preserve_current_state(); 772 + 773 + /* Registers: FPSIMD-only case */ 774 + 775 + BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 776 + if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) 777 + return __fpr_get(target, regset, pos, count, kbuf, ubuf, 778 + SVE_PT_FPSIMD_OFFSET); 779 + 780 + /* Otherwise: full SVE case */ 781 + 782 + BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 783 + start = SVE_PT_SVE_OFFSET; 784 + end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 785 + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 786 + target->thread.sve_state, 787 + start, end); 788 + if (ret) 789 + return ret; 790 + 791 + start = end; 792 + end = SVE_PT_SVE_FPSR_OFFSET(vq); 793 + ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 794 + start, end); 795 + if (ret) 796 + return ret; 797 + 798 + /* 799 + * Copy fpsr, and fpcr which must follow contiguously in 800 + * struct fpsimd_state: 801 + */ 802 + start = end; 803 + end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 804 + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 805 + &target->thread.fpsimd_state.fpsr, 806 + start, end); 807 + if (ret) 808 + return ret; 809 + 810 + start = end; 811 + end = sve_size_from_header(&header); 812 + return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 813 + start, end); 814 + } 815 + 816 + static int sve_set(struct task_struct *target, 817 + const struct user_regset *regset, 818 + unsigned int pos, unsigned int count, 819 + const void *kbuf, const void __user *ubuf) 820 + { 821 + int ret; 822 + struct user_sve_header header; 823 + unsigned int vq; 824 + unsigned long start, end; 825 + 826 + if (!system_supports_sve()) 827 + return -EINVAL; 828 + 829 + /* Header */ 830 + if (count < sizeof(header)) 831 + return -EINVAL; 832 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, 833 + 0, sizeof(header)); 834 + if (ret) 835 + goto out; 836 + 837 + /* 838 + * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by 839 + * sve_set_vector_length(), which will also validate them for us: 840 + */ 841 + ret = sve_set_vector_length(target, header.vl, 842 + ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16); 843 + if (ret) 844 + goto out; 845 + 846 + /* Actual VL set may be less than the user asked for: */ 847 + vq = sve_vq_from_vl(target->thread.sve_vl); 848 + 849 + /* Registers: FPSIMD-only case */ 850 + 851 + BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 852 + if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) { 853 + ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 854 + SVE_PT_FPSIMD_OFFSET); 855 + clear_tsk_thread_flag(target, TIF_SVE); 856 + goto out; 857 + } 858 + 859 + /* Otherwise: full SVE case */ 860 + 861 + /* 862 + * If setting a different VL from the requested VL and there is 863 + * register data, the data layout will be wrong: don't even 864 + * try to set the registers in this case. 865 + */ 866 + if (count && vq != sve_vq_from_vl(header.vl)) { 867 + ret = -EIO; 868 + goto out; 869 + } 870 + 871 + sve_alloc(target); 872 + 873 + /* 874 + * Ensure target->thread.sve_state is up to date with target's 875 + * FPSIMD regs, so that a short copyin leaves trailing registers 876 + * unmodified. 877 + */ 878 + fpsimd_sync_to_sve(target); 879 + set_tsk_thread_flag(target, TIF_SVE); 880 + 881 + BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 882 + start = SVE_PT_SVE_OFFSET; 883 + end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 884 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 885 + target->thread.sve_state, 886 + start, end); 887 + if (ret) 888 + goto out; 889 + 890 + start = end; 891 + end = SVE_PT_SVE_FPSR_OFFSET(vq); 892 + ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 893 + start, end); 894 + if (ret) 895 + goto out; 896 + 897 + /* 898 + * Copy fpsr, and fpcr which must follow contiguously in 899 + * struct fpsimd_state: 900 + */ 901 + start = end; 902 + end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 903 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 904 + &target->thread.fpsimd_state.fpsr, 905 + start, end); 906 + 907 + out: 908 + fpsimd_flush_task_state(target); 909 + return ret; 910 + } 911 + 912 + #endif /* CONFIG_ARM64_SVE */ 913 + 745 914 enum aarch64_regset { 746 915 REGSET_GPR, 747 916 REGSET_FPR, ··· 960 711 REGSET_HW_WATCH, 961 712 #endif 962 713 REGSET_SYSTEM_CALL, 714 + #ifdef CONFIG_ARM64_SVE 715 + REGSET_SVE, 716 + #endif 963 717 }; 964 718 965 719 static const struct user_regset aarch64_regsets[] = { ··· 1020 768 .get = system_call_get, 1021 769 .set = system_call_set, 1022 770 }, 771 + #ifdef CONFIG_ARM64_SVE 772 + [REGSET_SVE] = { /* Scalable Vector Extension */ 773 + .core_note_type = NT_ARM_SVE, 774 + .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE), 775 + SVE_VQ_BYTES), 776 + .size = SVE_VQ_BYTES, 777 + .align = SVE_VQ_BYTES, 778 + .get = sve_get, 779 + .set = sve_set, 780 + .get_size = sve_get_size, 781 + }, 782 + #endif 1023 783 }; 1024 784 1025 785 static const struct user_regset_view user_aarch64_view = {
+1
include/uapi/linux/elf.h
··· 416 416 #define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */ 417 417 #define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */ 418 418 #define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */ 419 + #define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension registers */ 419 420 #define NT_METAG_CBUF 0x500 /* Metag catch buffer registers */ 420 421 #define NT_METAG_RPIPE 0x501 /* Metag read pipeline state */ 421 422 #define NT_METAG_TLS 0x502 /* Metag TLS pointer */