Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64/sve: Disentangle <uapi/asm/ptrace.h> from <uapi/asm/sigcontext.h>

Currently, <uapi/asm/sigcontext.h> provides common definitions for
describing SVE context structures that are also used by the ptrace
definitions in <uapi/asm/ptrace.h>.

For this reason, a #include of <asm/sigcontext.h> was added in
ptrace.h, but it this turns out that this can interact badly with
userspace code that tries to include ptrace.h on top of the libc
headers (which may provide their own shadow definitions for
sigcontext.h).

To make the headers easier for userspace to consume, this patch
bounces the common definitions into an __SVE_* namespace and moves
them to a backend header <uapi/asm/sve_context.h> that can be
included by the other headers as appropriate. This should allow
ptrace.h to be used alongside libc's sigcontext.h (if any) without
ill effects.

This should make the situation unambiguous: <asm/sigcontext.h> is
the header to include for the sigframe-specific definitions, while
<asm/ptrace.h> is the header to include for ptrace-specific
definitions.

To avoid conflicting with existing usage, <asm/sigcontext.h>
remains the canonical way to get the common definitions for
SVE_VQ_MIN, sve_vq_from_vl() etc., both in userspace and in the
kernel: relying on these being defined as a side effect of
including just <asm/ptrace.h> was never intended to be safe.

Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>

authored by

Dave Martin and committed by
Will Deacon
9966a05c ee1b465b

+99 -49
+18 -21
arch/arm64/include/uapi/asm/ptrace.h
··· 23 23 #include <linux/types.h> 24 24 25 25 #include <asm/hwcap.h> 26 - #include <asm/sigcontext.h> 26 + #include <asm/sve_context.h> 27 27 28 28 29 29 /* ··· 130 130 */ 131 131 132 132 /* Offset from the start of struct user_sve_header to the register data */ 133 - #define SVE_PT_REGS_OFFSET \ 134 - ((sizeof(struct user_sve_header) + (SVE_VQ_BYTES - 1)) \ 135 - / SVE_VQ_BYTES * SVE_VQ_BYTES) 133 + #define SVE_PT_REGS_OFFSET \ 134 + ((sizeof(struct user_sve_header) + (__SVE_VQ_BYTES - 1)) \ 135 + / __SVE_VQ_BYTES * __SVE_VQ_BYTES) 136 136 137 137 /* 138 138 * The register data content and layout depends on the value of the ··· 178 178 * Additional data might be appended in the future. 179 179 */ 180 180 181 - #define SVE_PT_SVE_ZREG_SIZE(vq) SVE_SIG_ZREG_SIZE(vq) 182 - #define SVE_PT_SVE_PREG_SIZE(vq) SVE_SIG_PREG_SIZE(vq) 183 - #define SVE_PT_SVE_FFR_SIZE(vq) SVE_SIG_FFR_SIZE(vq) 181 + #define SVE_PT_SVE_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq) 182 + #define SVE_PT_SVE_PREG_SIZE(vq) __SVE_PREG_SIZE(vq) 183 + #define SVE_PT_SVE_FFR_SIZE(vq) __SVE_FFR_SIZE(vq) 184 184 #define SVE_PT_SVE_FPSR_SIZE sizeof(__u32) 185 185 #define SVE_PT_SVE_FPCR_SIZE sizeof(__u32) 186 - 187 - #define __SVE_SIG_TO_PT(offset) \ 188 - ((offset) - SVE_SIG_REGS_OFFSET + SVE_PT_REGS_OFFSET) 189 186 190 187 #define SVE_PT_SVE_OFFSET SVE_PT_REGS_OFFSET 191 188 192 189 #define SVE_PT_SVE_ZREGS_OFFSET \ 193 - __SVE_SIG_TO_PT(SVE_SIG_ZREGS_OFFSET) 190 + (SVE_PT_REGS_OFFSET + __SVE_ZREGS_OFFSET) 194 191 #define SVE_PT_SVE_ZREG_OFFSET(vq, n) \ 195 - __SVE_SIG_TO_PT(SVE_SIG_ZREG_OFFSET(vq, n)) 192 + (SVE_PT_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n)) 196 193 #define SVE_PT_SVE_ZREGS_SIZE(vq) \ 197 - (SVE_PT_SVE_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET) 194 + (SVE_PT_SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET) 198 195 199 196 #define SVE_PT_SVE_PREGS_OFFSET(vq) \ 200 - __SVE_SIG_TO_PT(SVE_SIG_PREGS_OFFSET(vq)) 197 + (SVE_PT_REGS_OFFSET + __SVE_PREGS_OFFSET(vq)) 201 198 #define SVE_PT_SVE_PREG_OFFSET(vq, n) \ 202 - __SVE_SIG_TO_PT(SVE_SIG_PREG_OFFSET(vq, n)) 199 + (SVE_PT_REGS_OFFSET + __SVE_PREG_OFFSET(vq, n)) 203 200 #define SVE_PT_SVE_PREGS_SIZE(vq) \ 204 - (SVE_PT_SVE_PREG_OFFSET(vq, SVE_NUM_PREGS) - \ 201 + (SVE_PT_SVE_PREG_OFFSET(vq, __SVE_NUM_PREGS) - \ 205 202 SVE_PT_SVE_PREGS_OFFSET(vq)) 206 203 207 204 #define SVE_PT_SVE_FFR_OFFSET(vq) \ 208 - __SVE_SIG_TO_PT(SVE_SIG_FFR_OFFSET(vq)) 205 + (SVE_PT_REGS_OFFSET + __SVE_FFR_OFFSET(vq)) 209 206 210 207 #define SVE_PT_SVE_FPSR_OFFSET(vq) \ 211 208 ((SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq) + \ 212 - (SVE_VQ_BYTES - 1)) \ 213 - / SVE_VQ_BYTES * SVE_VQ_BYTES) 209 + (__SVE_VQ_BYTES - 1)) \ 210 + / __SVE_VQ_BYTES * __SVE_VQ_BYTES) 214 211 #define SVE_PT_SVE_FPCR_OFFSET(vq) \ 215 212 (SVE_PT_SVE_FPSR_OFFSET(vq) + SVE_PT_SVE_FPSR_SIZE) 216 213 ··· 218 221 219 222 #define SVE_PT_SVE_SIZE(vq, flags) \ 220 223 ((SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE \ 221 - - SVE_PT_SVE_OFFSET + (SVE_VQ_BYTES - 1)) \ 222 - / SVE_VQ_BYTES * SVE_VQ_BYTES) 224 + - SVE_PT_SVE_OFFSET + (__SVE_VQ_BYTES - 1)) \ 225 + / __SVE_VQ_BYTES * __SVE_VQ_BYTES) 223 226 224 227 #define SVE_PT_SIZE(vq, flags) \ 225 228 (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? \
+28 -28
arch/arm64/include/uapi/asm/sigcontext.h
··· 130 130 131 131 #endif /* !__ASSEMBLY__ */ 132 132 133 + #include <asm/sve_context.h> 134 + 133 135 /* 134 136 * The SVE architecture leaves space for future expansion of the 135 137 * vector length beyond its initial architectural limit of 2048 bits ··· 140 138 * See linux/Documentation/arm64/sve.txt for a description of the VL/VQ 141 139 * terminology. 142 140 */ 143 - #define SVE_VQ_BYTES 16 /* number of bytes per quadword */ 141 + #define SVE_VQ_BYTES __SVE_VQ_BYTES /* bytes per quadword */ 144 142 145 - #define SVE_VQ_MIN 1 146 - #define SVE_VQ_MAX 512 143 + #define SVE_VQ_MIN __SVE_VQ_MIN 144 + #define SVE_VQ_MAX __SVE_VQ_MAX 147 145 148 - #define SVE_VL_MIN (SVE_VQ_MIN * SVE_VQ_BYTES) 149 - #define SVE_VL_MAX (SVE_VQ_MAX * SVE_VQ_BYTES) 146 + #define SVE_VL_MIN __SVE_VL_MIN 147 + #define SVE_VL_MAX __SVE_VL_MAX 150 148 151 - #define SVE_NUM_ZREGS 32 152 - #define SVE_NUM_PREGS 16 149 + #define SVE_NUM_ZREGS __SVE_NUM_ZREGS 150 + #define SVE_NUM_PREGS __SVE_NUM_PREGS 153 151 154 - #define sve_vl_valid(vl) \ 155 - ((vl) % SVE_VQ_BYTES == 0 && (vl) >= SVE_VL_MIN && (vl) <= SVE_VL_MAX) 156 - #define sve_vq_from_vl(vl) ((vl) / SVE_VQ_BYTES) 157 - #define sve_vl_from_vq(vq) ((vq) * SVE_VQ_BYTES) 152 + #define sve_vl_valid(vl) __sve_vl_valid(vl) 153 + #define sve_vq_from_vl(vl) __sve_vq_from_vl(vl) 154 + #define sve_vl_from_vq(vq) __sve_vl_from_vq(vq) 158 155 159 156 /* 160 157 * If the SVE registers are currently live for the thread at signal delivery, ··· 206 205 * Additional data might be appended in the future. 207 206 */ 208 207 209 - #define SVE_SIG_ZREG_SIZE(vq) ((__u32)(vq) * SVE_VQ_BYTES) 210 - #define SVE_SIG_PREG_SIZE(vq) ((__u32)(vq) * (SVE_VQ_BYTES / 8)) 211 - #define SVE_SIG_FFR_SIZE(vq) SVE_SIG_PREG_SIZE(vq) 208 + #define SVE_SIG_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq) 209 + #define SVE_SIG_PREG_SIZE(vq) __SVE_PREG_SIZE(vq) 210 + #define SVE_SIG_FFR_SIZE(vq) __SVE_FFR_SIZE(vq) 212 211 213 212 #define SVE_SIG_REGS_OFFSET \ 214 - ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \ 215 - / SVE_VQ_BYTES * SVE_VQ_BYTES) 213 + ((sizeof(struct sve_context) + (__SVE_VQ_BYTES - 1)) \ 214 + / __SVE_VQ_BYTES * __SVE_VQ_BYTES) 216 215 217 - #define SVE_SIG_ZREGS_OFFSET SVE_SIG_REGS_OFFSET 216 + #define SVE_SIG_ZREGS_OFFSET \ 217 + (SVE_SIG_REGS_OFFSET + __SVE_ZREGS_OFFSET) 218 218 #define SVE_SIG_ZREG_OFFSET(vq, n) \ 219 - (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREG_SIZE(vq) * (n)) 220 - #define SVE_SIG_ZREGS_SIZE(vq) \ 221 - (SVE_SIG_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_SIG_ZREGS_OFFSET) 219 + (SVE_SIG_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n)) 220 + #define SVE_SIG_ZREGS_SIZE(vq) __SVE_ZREGS_SIZE(vq) 222 221 223 222 #define SVE_SIG_PREGS_OFFSET(vq) \ 224 - (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREGS_SIZE(vq)) 223 + (SVE_SIG_REGS_OFFSET + __SVE_PREGS_OFFSET(vq)) 225 224 #define SVE_SIG_PREG_OFFSET(vq, n) \ 226 - (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREG_SIZE(vq) * (n)) 227 - #define SVE_SIG_PREGS_SIZE(vq) \ 228 - (SVE_SIG_PREG_OFFSET(vq, SVE_NUM_PREGS) - SVE_SIG_PREGS_OFFSET(vq)) 225 + (SVE_SIG_REGS_OFFSET + __SVE_PREG_OFFSET(vq, n)) 226 + #define SVE_SIG_PREGS_SIZE(vq) __SVE_PREGS_SIZE(vq) 229 227 230 228 #define SVE_SIG_FFR_OFFSET(vq) \ 231 - (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREGS_SIZE(vq)) 229 + (SVE_SIG_REGS_OFFSET + __SVE_FFR_OFFSET(vq)) 232 230 233 231 #define SVE_SIG_REGS_SIZE(vq) \ 234 - (SVE_SIG_FFR_OFFSET(vq) + SVE_SIG_FFR_SIZE(vq) - SVE_SIG_REGS_OFFSET) 232 + (__SVE_FFR_OFFSET(vq) + __SVE_FFR_SIZE(vq)) 235 233 236 - #define SVE_SIG_CONTEXT_SIZE(vq) (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq)) 237 - 234 + #define SVE_SIG_CONTEXT_SIZE(vq) \ 235 + (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq)) 238 236 239 237 #endif /* _UAPI__ASM_SIGCONTEXT_H */
+53
arch/arm64/include/uapi/asm/sve_context.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 + /* Copyright (C) 2017-2018 ARM Limited */ 3 + 4 + /* 5 + * For use by other UAPI headers only. 6 + * Do not make direct use of header or its definitions. 7 + */ 8 + 9 + #ifndef _UAPI__ASM_SVE_CONTEXT_H 10 + #define _UAPI__ASM_SVE_CONTEXT_H 11 + 12 + #include <linux/types.h> 13 + 14 + #define __SVE_VQ_BYTES 16 /* number of bytes per quadword */ 15 + 16 + #define __SVE_VQ_MIN 1 17 + #define __SVE_VQ_MAX 512 18 + 19 + #define __SVE_VL_MIN (__SVE_VQ_MIN * __SVE_VQ_BYTES) 20 + #define __SVE_VL_MAX (__SVE_VQ_MAX * __SVE_VQ_BYTES) 21 + 22 + #define __SVE_NUM_ZREGS 32 23 + #define __SVE_NUM_PREGS 16 24 + 25 + #define __sve_vl_valid(vl) \ 26 + ((vl) % __SVE_VQ_BYTES == 0 && \ 27 + (vl) >= __SVE_VL_MIN && \ 28 + (vl) <= __SVE_VL_MAX) 29 + 30 + #define __sve_vq_from_vl(vl) ((vl) / __SVE_VQ_BYTES) 31 + #define __sve_vl_from_vq(vq) ((vq) * __SVE_VQ_BYTES) 32 + 33 + #define __SVE_ZREG_SIZE(vq) ((__u32)(vq) * __SVE_VQ_BYTES) 34 + #define __SVE_PREG_SIZE(vq) ((__u32)(vq) * (__SVE_VQ_BYTES / 8)) 35 + #define __SVE_FFR_SIZE(vq) __SVE_PREG_SIZE(vq) 36 + 37 + #define __SVE_ZREGS_OFFSET 0 38 + #define __SVE_ZREG_OFFSET(vq, n) \ 39 + (__SVE_ZREGS_OFFSET + __SVE_ZREG_SIZE(vq) * (n)) 40 + #define __SVE_ZREGS_SIZE(vq) \ 41 + (__SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - __SVE_ZREGS_OFFSET) 42 + 43 + #define __SVE_PREGS_OFFSET(vq) \ 44 + (__SVE_ZREGS_OFFSET + __SVE_ZREGS_SIZE(vq)) 45 + #define __SVE_PREG_OFFSET(vq, n) \ 46 + (__SVE_PREGS_OFFSET(vq) + __SVE_PREG_SIZE(vq) * (n)) 47 + #define __SVE_PREGS_SIZE(vq) \ 48 + (__SVE_PREG_OFFSET(vq, __SVE_NUM_PREGS) - __SVE_PREGS_OFFSET(vq)) 49 + 50 + #define __SVE_FFR_OFFSET(vq) \ 51 + (__SVE_PREGS_OFFSET(vq) + __SVE_PREGS_SIZE(vq)) 52 + 53 + #endif /* ! _UAPI__ASM_SVE_CONTEXT_H */