Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: Avoid breakage caused by .altmacro in fpsimd save/restore macros

Alternate macro mode is not a property of a macro definition, but a
gas runtime state that alters the way macros are expanded for ever
after (until .noaltmacro is seen).

This means that subsequent assembly code that calls other macros can
break if fpsimdmacros.h is included.

Since these instruction sequences are simple (if dull -- but in a
good way), this patch solves the problem by simply expanding the
.irp loops. The pre-existing fpsimd_{save,restore} macros weren't
rolled with .irp anyway and the sequences affected are short, so
this change restores consistency at little cost.

Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

authored by

Dave P Martin and committed by
Catalin Marinas
6917c857 a1c76574

+32 -11
+32 -11
arch/arm64/include/asm/fpsimdmacros.h
··· 76 76 fpsimd_restore_fpcr x\tmpnr, \state 77 77 .endm 78 78 79 - .altmacro 80 79 .macro fpsimd_save_partial state, numnr, tmpnr1, tmpnr2 81 80 mrs x\tmpnr1, fpsr 82 81 str w\numnr, [\state, #8] ··· 85 86 add \state, \state, x\numnr, lsl #4 86 87 sub x\tmpnr1, x\tmpnr1, x\numnr, lsl #1 87 88 br x\tmpnr1 88 - .irp qa, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0 89 - .irp qb, %(qa + 1) 90 - stp q\qa, q\qb, [\state, # -16 * \qa - 16] 91 - .endr 92 - .endr 89 + stp q30, q31, [\state, #-16 * 30 - 16] 90 + stp q28, q29, [\state, #-16 * 28 - 16] 91 + stp q26, q27, [\state, #-16 * 26 - 16] 92 + stp q24, q25, [\state, #-16 * 24 - 16] 93 + stp q22, q23, [\state, #-16 * 22 - 16] 94 + stp q20, q21, [\state, #-16 * 20 - 16] 95 + stp q18, q19, [\state, #-16 * 18 - 16] 96 + stp q16, q17, [\state, #-16 * 16 - 16] 97 + stp q14, q15, [\state, #-16 * 14 - 16] 98 + stp q12, q13, [\state, #-16 * 12 - 16] 99 + stp q10, q11, [\state, #-16 * 10 - 16] 100 + stp q8, q9, [\state, #-16 * 8 - 16] 101 + stp q6, q7, [\state, #-16 * 6 - 16] 102 + stp q4, q5, [\state, #-16 * 4 - 16] 103 + stp q2, q3, [\state, #-16 * 2 - 16] 104 + stp q0, q1, [\state, #-16 * 0 - 16] 93 105 0: 94 106 .endm 95 107 ··· 113 103 add \state, \state, x\tmpnr2, lsl #4 114 104 sub x\tmpnr1, x\tmpnr1, x\tmpnr2, lsl #1 115 105 br x\tmpnr1 116 - .irp qa, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0 117 - .irp qb, %(qa + 1) 118 - ldp q\qa, q\qb, [\state, # -16 * \qa - 16] 119 - .endr 120 - .endr 106 + ldp q30, q31, [\state, #-16 * 30 - 16] 107 + ldp q28, q29, [\state, #-16 * 28 - 16] 108 + ldp q26, q27, [\state, #-16 * 26 - 16] 109 + ldp q24, q25, [\state, #-16 * 24 - 16] 110 + ldp q22, q23, [\state, #-16 * 22 - 16] 111 + ldp q20, q21, [\state, #-16 * 20 - 16] 112 + ldp q18, q19, [\state, #-16 * 18 - 16] 113 + ldp q16, q17, [\state, #-16 * 16 - 16] 114 + ldp q14, q15, [\state, #-16 * 14 - 16] 115 + ldp q12, q13, [\state, #-16 * 12 - 16] 116 + ldp q10, q11, [\state, #-16 * 10 - 16] 117 + ldp q8, q9, [\state, #-16 * 8 - 16] 118 + ldp q6, q7, [\state, #-16 * 6 - 16] 119 + ldp q4, q5, [\state, #-16 * 4 - 16] 120 + ldp q2, q3, [\state, #-16 * 2 - 16] 121 + ldp q0, q1, [\state, #-16 * 0 - 16] 121 122 0: 122 123 .endm