···74747575 VFPFMRX r1, FPEXC @ Is the VFP enabled?7676 DBGSTR1 "fpexc %08x", r17777- tst r1, #FPEXC_ENABLE7777+ tst r1, #FPEXC_EN7878 bne look_for_VFP_exceptions @ VFP is already enabled79798080 DBGSTR1 "enable %x", r108181 ldr r3, last_VFP_context_address8282- orr r1, r1, #FPEXC_ENABLE @ user FPEXC has the enable bit set8282+ orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set8383 ldr r4, [r3, r11, lsl #2] @ last_VFP_context pointer8484- bic r5, r1, #FPEXC_EXCEPTION @ make sure exceptions are disabled8484+ bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled8585 cmp r4, r108686 beq check_for_exception @ we are returning to the same8787 @ process, so the registers are···124124 VFPFMXR FPSCR, r5 @ restore status125125126126check_for_exception:127127- tst r1, #FPEXC_EXCEPTION127127+ tst r1, #FPEXC_EX128128 bne process_exception @ might as well handle the pending129129 @ exception before retrying branch130130 @ out before setting an FPEXC that···136136137137138138look_for_VFP_exceptions:139139- tst r1, #FPEXC_EXCEPTION139139+ tst r1, #FPEXC_EX140140 bne process_exception141141 VFPFMRX r5, FPSCR142142- tst r5, #FPSCR_IXE @ IXE doesn't set FPEXC_EXCEPTION !142142+ tst r5, #FPSCR_IXE @ IXE doesn't set FPEXC_EX !143143 bne process_exception144144145145 @ Fall into hand on to next handler - appropriate coproc instr
+6-6
arch/arm/vfp/vfpmodule.c
···5353 * case the thread migrates to a different CPU. The5454 * restoring is done lazily.5555 */5656- if ((fpexc & FPEXC_ENABLE) && last_VFP_context[cpu]) {5656+ if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) {5757 vfp_save_state(last_VFP_context[cpu], fpexc);5858 last_VFP_context[cpu]->hard.cpu = cpu;5959 }···7070 * Always disable VFP so we can lazily save/restore the7171 * old state.7272 */7373- fmxr(FPEXC, fpexc & ~FPEXC_ENABLE);7373+ fmxr(FPEXC, fpexc & ~FPEXC_EN);7474 return NOTIFY_DONE;7575 }7676···8181 */8282 memset(vfp, 0, sizeof(union vfp_state));83838484- vfp->hard.fpexc = FPEXC_ENABLE;8484+ vfp->hard.fpexc = FPEXC_EN;8585 vfp->hard.fpscr = FPSCR_ROUND_NEAREST;86868787 /*8888 * Disable VFP to ensure we initialise it first.8989 */9090- fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_ENABLE);9090+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);9191 }92929393 /* flush and release case: Per-thread VFP cleanup. */···229229 /*230230 * Enable access to the VFP so we can handle the bounce.231231 */232232- fmxr(FPEXC, fpexc & ~(FPEXC_EXCEPTION|FPEXC_INV|FPEXC_UFC|FPEXC_IOC));232232+ fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_INV|FPEXC_UFC|FPEXC_IOC));233233234234 orig_fpscr = fpscr = fmrx(FPSCR);235235···248248 /*249249 * Modify fpscr to indicate the number of iterations remaining250250 */251251- if (fpexc & FPEXC_EXCEPTION) {251251+ if (fpexc & FPEXC_EX) {252252 u32 len;253253254254 len = fpexc + (1 << FPEXC_LENGTH_BIT);