[ARM] vfp: make fpexc bit names less verbose

Use the fpexc abbreviated names instead of long verbose names
for fpexc bits.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by Russell King and committed by Russell King 228adef1 21d1ca04

+14 -14
+6 -6
arch/arm/vfp/vfphw.S
··· 74 74 75 75 VFPFMRX r1, FPEXC @ Is the VFP enabled? 76 76 DBGSTR1 "fpexc %08x", r1 77 - tst r1, #FPEXC_ENABLE 77 + tst r1, #FPEXC_EN 78 78 bne look_for_VFP_exceptions @ VFP is already enabled 79 79 80 80 DBGSTR1 "enable %x", r10 81 81 ldr r3, last_VFP_context_address 82 - orr r1, r1, #FPEXC_ENABLE @ user FPEXC has the enable bit set 82 + orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set 83 83 ldr r4, [r3, r11, lsl #2] @ last_VFP_context pointer 84 - bic r5, r1, #FPEXC_EXCEPTION @ make sure exceptions are disabled 84 + bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled 85 85 cmp r4, r10 86 86 beq check_for_exception @ we are returning to the same 87 87 @ process, so the registers are ··· 124 124 VFPFMXR FPSCR, r5 @ restore status 125 125 126 126 check_for_exception: 127 - tst r1, #FPEXC_EXCEPTION 127 + tst r1, #FPEXC_EX 128 128 bne process_exception @ might as well handle the pending 129 129 @ exception before retrying branch 130 130 @ out before setting an FPEXC that ··· 136 136 137 137 138 138 look_for_VFP_exceptions: 139 - tst r1, #FPEXC_EXCEPTION 139 + tst r1, #FPEXC_EX 140 140 bne process_exception 141 141 VFPFMRX r5, FPSCR 142 - tst r5, #FPSCR_IXE @ IXE doesn't set FPEXC_EXCEPTION ! 142 + tst r5, #FPSCR_IXE @ IXE doesn't set FPEXC_EX ! 143 143 bne process_exception 144 144 145 145 @ Fall into hand on to next handler - appropriate coproc instr
+6 -6
arch/arm/vfp/vfpmodule.c
··· 53 53 * case the thread migrates to a different CPU. The 54 54 * restoring is done lazily. 55 55 */ 56 - if ((fpexc & FPEXC_ENABLE) && last_VFP_context[cpu]) { 56 + if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) { 57 57 vfp_save_state(last_VFP_context[cpu], fpexc); 58 58 last_VFP_context[cpu]->hard.cpu = cpu; 59 59 } ··· 70 70 * Always disable VFP so we can lazily save/restore the 71 71 * old state. 72 72 */ 73 - fmxr(FPEXC, fpexc & ~FPEXC_ENABLE); 73 + fmxr(FPEXC, fpexc & ~FPEXC_EN); 74 74 return NOTIFY_DONE; 75 75 } 76 76 ··· 81 81 */ 82 82 memset(vfp, 0, sizeof(union vfp_state)); 83 83 84 - vfp->hard.fpexc = FPEXC_ENABLE; 84 + vfp->hard.fpexc = FPEXC_EN; 85 85 vfp->hard.fpscr = FPSCR_ROUND_NEAREST; 86 86 87 87 /* 88 88 * Disable VFP to ensure we initialise it first. 89 89 */ 90 - fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_ENABLE); 90 + fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); 91 91 } 92 92 93 93 /* flush and release case: Per-thread VFP cleanup. */ ··· 229 229 /* 230 230 * Enable access to the VFP so we can handle the bounce. 231 231 */ 232 - fmxr(FPEXC, fpexc & ~(FPEXC_EXCEPTION|FPEXC_INV|FPEXC_UFC|FPEXC_IOC)); 232 + fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_INV|FPEXC_UFC|FPEXC_IOC)); 233 233 234 234 orig_fpscr = fpscr = fmrx(FPSCR); 235 235 ··· 248 248 /* 249 249 * Modify fpscr to indicate the number of iterations remaining 250 250 */ 251 - if (fpexc & FPEXC_EXCEPTION) { 251 + if (fpexc & FPEXC_EX) { 252 252 u32 len; 253 253 254 254 len = fpexc + (1 << FPEXC_LENGTH_BIT);
+2 -2
include/asm-arm/vfp.h
··· 26 26 #define FPSID_REV_MASK (0xF << FPSID_REV_BIT) 27 27 28 28 /* FPEXC bits */ 29 - #define FPEXC_EXCEPTION (1<<31) 30 - #define FPEXC_ENABLE (1<<30) 29 + #define FPEXC_EX (1 << 31) 30 + #define FPEXC_EN (1 << 30) 31 31 32 32 /* FPSCR bits */ 33 33 #define FPSCR_DEFAULT_NAN (1<<25)