Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:

- Refactor VFP code and convert to C code (Ard Biesheuvel)

- Fix hardware breakpoint single-stepping using bpf_overflow_handler

- Make SMP stop calls asynchronous allowing panic from irq context to
work

- Fix for kernel-doc warnings for locomo

* tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm:
Revert part of ae1f8d793a19 ("ARM: 9304/1: add prototype for function called only from asm")
ARM: 9318/1: locomo: move kernel-doc to prevent warnings
ARM: 9317/1: kexec: Make smp stop calls asynchronous
ARM: 9316/1: hw_breakpoint: fix single-stepping when using bpf_overflow_handler
ARM: entry: Make asm coproc dispatch code NWFPE only
ARM: iwmmxt: Use undef hook to enable coprocessor for task
ARM: entry: Disregard Thumb undef exception in coproc dispatch
ARM: vfp: Use undef hook for handling VFP exceptions
ARM: kernel: Get rid of thread_info::used_cp[] array
ARM: vfp: Reimplement VFP exception entry in C code
ARM: vfp: Remove workaround for Feroceon CPUs
ARM: vfp: Record VFP bounces as perf emulation faults

+314 -555
+15 -13
arch/arm/common/locomo.c
··· 350 350 } 351 351 #endif 352 352 353 - 354 - /** 355 - * locomo_probe - probe for a single LoCoMo chip. 356 - * @phys_addr: physical address of device. 357 - * 358 - * Probe for a LoCoMo chip. This must be called 359 - * before any other locomo-specific code. 360 - * 361 - * Returns: 362 - * %-ENODEV device not found. 363 - * %-EBUSY physical address already marked in-use. 364 - * %0 successful. 365 - */ 366 353 static int 367 354 __locomo_probe(struct device *me, struct resource *mem, int irq) 368 355 { ··· 466 479 kfree(lchip); 467 480 } 468 481 482 + /** 483 + * locomo_probe - probe for a single LoCoMo chip. 484 + * @dev: platform device 485 + * 486 + * Probe for a LoCoMo chip. This must be called 487 + * before any other locomo-specific code. 488 + * 489 + * Returns: 490 + * * %-EINVAL - device's IORESOURCE_MEM not found 491 + * * %-ENXIO - could not allocate an IRQ for the device 492 + * * %-ENODEV - device not found. 493 + * * %-EBUSY - physical address already marked in-use. 494 + * * %-ENOMEM - could not allocate or iomap memory. 495 + * * %0 - successful. 496 + */ 469 497 static int locomo_probe(struct platform_device *dev) 470 498 { 471 499 struct resource *mem;
+16 -1
arch/arm/include/asm/thread_info.h
··· 40 40 DECLARE_PER_CPU(struct task_struct *, __entry_task); 41 41 42 42 #include <asm/types.h> 43 + #include <asm/traps.h> 43 44 44 45 struct cpu_context_save { 45 46 __u32 r4; ··· 67 66 __u32 cpu_domain; /* cpu domain */ 68 67 struct cpu_context_save cpu_context; /* cpu context */ 69 68 __u32 abi_syscall; /* ABI type and syscall nr */ 70 - __u8 used_cp[16]; /* thread used copro */ 71 69 unsigned long tp_value[2]; /* TLS registers */ 72 70 union fp_state fpstate __attribute__((aligned(8))); 73 71 union vfp_state vfpstate; ··· 104 104 extern void iwmmxt_task_restore(struct thread_info *, void *); 105 105 extern void iwmmxt_task_release(struct thread_info *); 106 106 extern void iwmmxt_task_switch(struct thread_info *); 107 + 108 + extern int iwmmxt_undef_handler(struct pt_regs *, u32); 109 + 110 + static inline void register_iwmmxt_undef_handler(void) 111 + { 112 + static struct undef_hook iwmmxt_undef_hook = { 113 + .instr_mask = 0x0c000e00, 114 + .instr_val = 0x0c000000, 115 + .cpsr_mask = MODE_MASK | PSR_T_BIT, 116 + .cpsr_val = USR_MODE, 117 + .fn = iwmmxt_undef_handler, 118 + }; 119 + 120 + register_undef_hook(&iwmmxt_undef_hook); 121 + } 107 122 108 123 extern void vfp_sync_hwstate(struct thread_info *); 109 124 extern void vfp_flush_hwstate(struct thread_info *);
-1
arch/arm/include/asm/vfp.h
··· 102 102 103 103 #ifndef __ASSEMBLY__ 104 104 void vfp_disable(void); 105 - void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs); 106 105 #endif 107 106 108 107 #endif /* __ASM_VFP_H */
-1
arch/arm/kernel/asm-offsets.c
··· 47 47 DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain)); 48 48 DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context)); 49 49 DEFINE(TI_ABI_SYSCALL, offsetof(struct thread_info, abi_syscall)); 50 - DEFINE(TI_USED_CP, offsetof(struct thread_info, used_cp)); 51 50 DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value)); 52 51 DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate)); 53 52 #ifdef CONFIG_VFP
+11 -243
arch/arm/kernel/entry-armv.S
··· 446 446 __und_usr: 447 447 usr_entry uaccess=0 448 448 449 - mov r2, r4 450 - mov r3, r5 451 - 452 - @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the 453 - @ faulting instruction depending on Thumb mode. 454 - @ r3 = regs->ARM_cpsr 455 - @ 456 - @ The emulation code returns using r9 if it has emulated the 457 - @ instruction, or the more conventional lr if we are to treat 458 - @ this as a real undefined instruction 459 - @ 460 - badr r9, ret_from_exception 461 - 462 449 @ IRQs must be enabled before attempting to read the instruction from 463 450 @ user space since that could cause a page/translation fault if the 464 451 @ page table was modified by another CPU. 465 452 enable_irq 466 453 467 - tst r3, #PSR_T_BIT @ Thumb mode? 468 - bne __und_usr_thumb 469 - sub r4, r2, #4 @ ARM instr at LR - 4 470 - 1: ldrt r0, [r4] 471 - ARM_BE8(rev r0, r0) @ little endian instruction 472 - 454 + tst r5, #PSR_T_BIT @ Thumb mode? 455 + mov r1, #2 @ set insn size to 2 for Thumb 456 + bne 0f @ handle as Thumb undef exception 457 + #ifdef CONFIG_FPE_NWFPE 458 + adr r9, ret_from_exception 459 + bl call_fpe @ returns via R9 on success 460 + #endif 461 + mov r1, #4 @ set insn size to 4 for ARM 462 + 0: mov r0, sp 473 463 uaccess_disable ip 474 - 475 - @ r0 = 32-bit ARM instruction which caused the exception 476 - @ r2 = PC value for the following instruction (:= regs->ARM_pc) 477 - @ r4 = PC value for the faulting instruction 478 - @ lr = 32-bit undefined instruction function 479 - badr lr, __und_usr_fault_32 480 - b call_fpe 481 - 482 - __und_usr_thumb: 483 - @ Thumb instruction 484 - sub r4, r2, #2 @ First half of thumb instr at LR - 2 485 - #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 486 - /* 487 - * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms 488 - * can never be supported in a single kernel, this code is not applicable at 489 - * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be 490 - * made about .arch directives. 491 - */ 492 - #if __LINUX_ARM_ARCH__ < 7 493 - /* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */ 494 - ldr_va r5, cpu_architecture 495 - cmp r5, #CPU_ARCH_ARMv7 496 - blo __und_usr_fault_16 @ 16bit undefined instruction 497 - /* 498 - * The following code won't get run unless the running CPU really is v7, so 499 - * coding round the lack of ldrht on older arches is pointless. Temporarily 500 - * override the assembler target arch with the minimum required instead: 501 - */ 502 - .arch armv6t2 503 - #endif 504 - 2: ldrht r5, [r4] 505 - ARM_BE8(rev16 r5, r5) @ little endian instruction 506 - cmp r5, #0xe800 @ 32bit instruction if xx != 0 507 - blo __und_usr_fault_16_pan @ 16bit undefined instruction 508 - 3: ldrht r0, [r2] 509 - ARM_BE8(rev16 r0, r0) @ little endian instruction 510 - uaccess_disable ip 511 - add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 512 - str r2, [sp, #S_PC] @ it's a 2x16bit instr, update 513 - orr r0, r0, r5, lsl #16 514 - badr lr, __und_usr_fault_32 515 - @ r0 = the two 16-bit Thumb instructions which caused the exception 516 - @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc) 517 - @ r4 = PC value for the first 16-bit Thumb instruction 518 - @ lr = 32bit undefined instruction function 519 - 520 - #if __LINUX_ARM_ARCH__ < 7 521 - /* If the target arch was overridden, change it back: */ 522 - #ifdef CONFIG_CPU_32v6K 523 - .arch armv6k 524 - #else 525 - .arch armv6 526 - #endif 527 - #endif /* __LINUX_ARM_ARCH__ < 7 */ 528 - #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */ 529 - b __und_usr_fault_16 530 - #endif 464 + bl __und_fault 465 + b ret_from_exception 531 466 UNWIND(.fnend) 532 467 ENDPROC(__und_usr) 533 - 534 - /* 535 - * The out of line fixup for the ldrt instructions above. 536 - */ 537 - .pushsection .text.fixup, "ax" 538 - .align 2 539 - 4: str r4, [sp, #S_PC] @ retry current instruction 540 - ret r9 541 - .popsection 542 - .pushsection __ex_table,"a" 543 - .long 1b, 4b 544 - #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 545 - .long 2b, 4b 546 - .long 3b, 4b 547 - #endif 548 - .popsection 549 - 550 - /* 551 - * Check whether the instruction is a co-processor instruction. 552 - * If yes, we need to call the relevant co-processor handler. 553 - * 554 - * Note that we don't do a full check here for the co-processor 555 - * instructions; all instructions with bit 27 set are well 556 - * defined. The only instructions that should fault are the 557 - * co-processor instructions. However, we have to watch out 558 - * for the ARM6/ARM7 SWI bug. 559 - * 560 - * NEON is a special case that has to be handled here. Not all 561 - * NEON instructions are co-processor instructions, so we have 562 - * to make a special case of checking for them. Plus, there's 563 - * five groups of them, so we have a table of mask/opcode pairs 564 - * to check against, and if any match then we branch off into the 565 - * NEON handler code. 566 - * 567 - * Emulators may wish to make use of the following registers: 568 - * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) 569 - * r2 = PC value to resume execution after successful emulation 570 - * r9 = normal "successful" return address 571 - * r10 = this threads thread_info structure 572 - * lr = unrecognised instruction return address 573 - * IRQs enabled, FIQs enabled. 574 - */ 575 - @ 576 - @ Fall-through from Thumb-2 __und_usr 577 - @ 578 - #ifdef CONFIG_NEON 579 - get_thread_info r10 @ get current thread 580 - adr r6, .LCneon_thumb_opcodes 581 - b 2f 582 - #endif 583 - call_fpe: 584 - get_thread_info r10 @ get current thread 585 - #ifdef CONFIG_NEON 586 - adr r6, .LCneon_arm_opcodes 587 - 2: ldr r5, [r6], #4 @ mask value 588 - ldr r7, [r6], #4 @ opcode bits matching in mask 589 - cmp r5, #0 @ end mask? 590 - beq 1f 591 - and r8, r0, r5 592 - cmp r8, r7 @ NEON instruction? 593 - bne 2b 594 - mov r7, #1 595 - strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used 596 - strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used 597 - b do_vfp @ let VFP handler handle this 598 - 1: 599 - #endif 600 - tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 601 - tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 602 - reteq lr 603 - and r8, r0, #0x00000f00 @ mask out CP number 604 - mov r7, #1 605 - add r6, r10, r8, lsr #8 @ add used_cp[] array offset first 606 - strb r7, [r6, #TI_USED_CP] @ set appropriate used_cp[] 607 - #ifdef CONFIG_IWMMXT 608 - @ Test if we need to give access to iWMMXt coprocessors 609 - ldr r5, [r10, #TI_FLAGS] 610 - rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only 611 - movscs r7, r5, lsr #(TIF_USING_IWMMXT + 1) 612 - bcs iwmmxt_task_enable 613 - #endif 614 - ARM( add pc, pc, r8, lsr #6 ) 615 - THUMB( lsr r8, r8, #6 ) 616 - THUMB( add pc, r8 ) 617 - nop 618 - 619 - ret.w lr @ CP#0 620 - W(b) do_fpe @ CP#1 (FPE) 621 - W(b) do_fpe @ CP#2 (FPE) 622 - ret.w lr @ CP#3 623 - ret.w lr @ CP#4 624 - ret.w lr @ CP#5 625 - ret.w lr @ CP#6 626 - ret.w lr @ CP#7 627 - ret.w lr @ CP#8 628 - ret.w lr @ CP#9 629 - #ifdef CONFIG_VFP 630 - W(b) do_vfp @ CP#10 (VFP) 631 - W(b) do_vfp @ CP#11 (VFP) 632 - #else 633 - ret.w lr @ CP#10 (VFP) 634 - ret.w lr @ CP#11 (VFP) 635 - #endif 636 - ret.w lr @ CP#12 637 - ret.w lr @ CP#13 638 - ret.w lr @ CP#14 (Debug) 639 - ret.w lr @ CP#15 (Control) 640 - 641 - #ifdef CONFIG_NEON 642 - .align 6 643 - 644 - .LCneon_arm_opcodes: 645 - .word 0xfe000000 @ mask 646 - .word 0xf2000000 @ opcode 647 - 648 - .word 0xff100000 @ mask 649 - .word 0xf4000000 @ opcode 650 - 651 - .word 0x00000000 @ mask 652 - .word 0x00000000 @ opcode 653 - 654 - .LCneon_thumb_opcodes: 655 - .word 0xef000000 @ mask 656 - .word 0xef000000 @ opcode 657 - 658 - .word 0xff100000 @ mask 659 - .word 0xf9000000 @ opcode 660 - 661 - .word 0x00000000 @ mask 662 - .word 0x00000000 @ opcode 663 - #endif 664 - 665 - do_fpe: 666 - add r10, r10, #TI_FPSTATE @ r10 = workspace 667 - ldr_va pc, fp_enter, tmp=r4 @ Call FP module USR entry point 668 - 669 - /* 670 - * The FP module is called with these registers set: 671 - * r0 = instruction 672 - * r2 = PC+4 673 - * r9 = normal "successful" return address 674 - * r10 = FP workspace 675 - * lr = unrecognised FP instruction return address 676 - */ 677 - 678 - .pushsection .data 679 - .align 2 680 - ENTRY(fp_enter) 681 - .word no_fp 682 - .popsection 683 - 684 - ENTRY(no_fp) 685 - ret lr 686 - ENDPROC(no_fp) 687 - 688 - __und_usr_fault_32: 689 - mov r1, #4 690 - b 1f 691 - __und_usr_fault_16_pan: 692 - uaccess_disable ip 693 - __und_usr_fault_16: 694 - mov r1, #2 695 - 1: mov r0, sp 696 - badr lr, ret_from_exception 697 - b __und_fault 698 - ENDPROC(__und_usr_fault_32) 699 - ENDPROC(__und_usr_fault_16) 700 468 701 469 .align 5 702 470 __pabt_usr:
+14 -4
arch/arm/kernel/iwmmxt.S
··· 58 58 .text 59 59 .arm 60 60 61 + ENTRY(iwmmxt_undef_handler) 62 + push {r9, r10, lr} 63 + get_thread_info r10 64 + mov r9, pc 65 + b iwmmxt_task_enable 66 + mov r0, #0 67 + pop {r9, r10, pc} 68 + ENDPROC(iwmmxt_undef_handler) 69 + 61 70 /* 62 71 * Lazy switching of Concan coprocessor context 63 72 * 73 + * r0 = struct pt_regs pointer 64 74 * r10 = struct thread_info pointer 65 75 * r9 = ret_from_exception 66 76 * lr = undefined instr exit ··· 94 84 PJ4(mcr p15, 0, r2, c1, c0, 2) 95 85 96 86 ldr r3, =concan_owner 97 - add r0, r10, #TI_IWMMXT_STATE @ get task Concan save area 98 - ldr r2, [sp, #60] @ current task pc value 87 + ldr r2, [r0, #S_PC] @ current task pc value 99 88 ldr r1, [r3] @ get current Concan owner 100 - str r0, [r3] @ this task now owns Concan regs 101 89 sub r2, r2, #4 @ adjust pc back 102 - str r2, [sp, #60] 90 + str r2, [r0, #S_PC] 91 + add r0, r10, #TI_IWMMXT_STATE @ get task Concan save area 92 + str r0, [r3] @ this task now owns Concan regs 103 93 104 94 mrc p15, 0, r2, c2, c0, 0 105 95 mov r2, r2 @ cpwait
+13 -1
arch/arm/kernel/machine_kexec.c
··· 94 94 } 95 95 } 96 96 97 + static DEFINE_PER_CPU(call_single_data_t, cpu_stop_csd) = 98 + CSD_INIT(machine_crash_nonpanic_core, NULL); 99 + 97 100 void crash_smp_send_stop(void) 98 101 { 99 102 static int cpus_stopped; 100 103 unsigned long msecs; 104 + call_single_data_t *csd; 105 + int cpu, this_cpu = raw_smp_processor_id(); 101 106 102 107 if (cpus_stopped) 103 108 return; 104 109 105 110 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); 106 - smp_call_function(machine_crash_nonpanic_core, NULL, false); 111 + for_each_online_cpu(cpu) { 112 + if (cpu == this_cpu) 113 + continue; 114 + 115 + csd = &per_cpu(cpu_stop_csd, cpu); 116 + smp_call_function_single_async(cpu, csd); 117 + } 118 + 107 119 msecs = 1000; /* Wait at most a second for the other cpus to stop */ 108 120 while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { 109 121 mdelay(1);
+1
arch/arm/kernel/pj4-cp0.c
··· 126 126 pr_info("PJ4 iWMMXt v%d coprocessor enabled.\n", vers); 127 127 elf_hwcap |= HWCAP_IWMMXT; 128 128 thread_register_notifier(&iwmmxt_notifier_block); 129 + register_iwmmxt_undef_handler(); 129 130 #endif 130 131 131 132 return 0;
-1
arch/arm/kernel/process.c
··· 222 222 223 223 flush_ptrace_hw_breakpoint(tsk); 224 224 225 - memset(thread->used_cp, 0, sizeof(thread->used_cp)); 226 225 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 227 226 memset(&thread->fpstate, 0, sizeof(union fp_state)); 228 227
-2
arch/arm/kernel/ptrace.c
··· 584 584 { 585 585 struct thread_info *thread = task_thread_info(target); 586 586 587 - thread->used_cp[1] = thread->used_cp[2] = 1; 588 - 589 587 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 590 588 &thread->fpstate, 591 589 0, sizeof(struct user_fp));
+1
arch/arm/kernel/xscale-cp0.c
··· 166 166 pr_info("XScale iWMMXt coprocessor detected.\n"); 167 167 elf_hwcap |= HWCAP_IWMMXT; 168 168 thread_register_notifier(&iwmmxt_notifier_block); 169 + register_iwmmxt_undef_handler(); 169 170 #endif 170 171 } else { 171 172 pr_info("XScale DSP coprocessor detected.\n");
+4
arch/arm/mm/proc-feroceon.S
··· 56 56 movne r2, r2, lsr #2 @ turned into # of sets 57 57 sub r2, r2, #(1 << 5) 58 58 stmia r1, {r2, r3} 59 + #ifdef CONFIG_VFP 60 + mov r1, #1 @ disable quirky VFP 61 + str_l r1, VFP_arch_feroceon, r2 62 + #endif 59 63 ret lr 60 64 61 65 /*
+77
arch/arm/nwfpe/entry.S
··· 7 7 Direct questions, comments to Scott Bambrough <scottb@netwinder.org> 8 8 9 9 */ 10 + #include <linux/linkage.h> 10 11 #include <asm/assembler.h> 11 12 #include <asm/opcodes.h> 12 13 ··· 105 104 @ plain LDR instruction. Weird, but it seems harmless. 106 105 .pushsection .text.fixup,"ax" 107 106 .align 2 107 + .Lrep: str r4, [sp, #S_PC] @ retry current instruction 108 108 .Lfix: ret r9 @ let the user eat segfaults 109 109 .popsection 110 110 ··· 113 111 .align 3 114 112 .long .Lx1, .Lfix 115 113 .popsection 114 + 115 + @ 116 + @ Check whether the instruction is a co-processor instruction. 117 + @ If yes, we need to call the relevant co-processor handler. 118 + @ Only FPE instructions are dispatched here, everything else 119 + @ is handled by undef hooks. 120 + @ 121 + @ Emulators may wish to make use of the following registers: 122 + @ r4 = PC value to resume execution after successful emulation 123 + @ r9 = normal "successful" return address 124 + @ lr = unrecognised instruction return address 125 + @ IRQs enabled, FIQs enabled. 126 + @ 127 + ENTRY(call_fpe) 128 + mov r2, r4 129 + sub r4, r4, #4 @ ARM instruction at user PC - 4 130 + USERL( .Lrep, ldrt r0, [r4]) @ load opcode from user space 131 + ARM_BE8(rev r0, r0) @ little endian instruction 132 + 133 + uaccess_disable ip 134 + 135 + get_thread_info r10 @ get current thread 136 + tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 137 + reteq lr 138 + and r8, r0, #0x00000f00 @ mask out CP number 139 + #ifdef CONFIG_IWMMXT 140 + @ Test if we need to give access to iWMMXt coprocessors 141 + ldr r5, [r10, #TI_FLAGS] 142 + rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only 143 + movscs r7, r5, lsr #(TIF_USING_IWMMXT + 1) 144 + movcs r0, sp @ pass struct pt_regs 145 + bcs iwmmxt_task_enable 146 + #endif 147 + add pc, pc, r8, lsr #6 148 + nop 149 + 150 + ret lr @ CP#0 151 + b do_fpe @ CP#1 (FPE) 152 + b do_fpe @ CP#2 (FPE) 153 + ret lr @ CP#3 154 + ret lr @ CP#4 155 + ret lr @ CP#5 156 + ret lr @ CP#6 157 + ret lr @ CP#7 158 + ret lr @ CP#8 159 + ret lr @ CP#9 160 + ret lr @ CP#10 (VFP) 161 + ret lr @ CP#11 (VFP) 162 + ret lr @ CP#12 163 + ret lr @ CP#13 164 + ret lr @ CP#14 (Debug) 165 + ret lr @ CP#15 (Control) 166 + 167 + do_fpe: 168 + add r10, r10, #TI_FPSTATE @ r10 = workspace 169 + ldr_va pc, fp_enter, tmp=r4 @ Call FP module USR entry point 170 + 171 + @ 172 + @ The FP module is called with these registers set: 173 + @ r0 = instruction 174 + @ r2 = PC+4 175 + @ r9 = normal "successful" return address 176 + @ r10 = FP workspace 177 + @ lr = unrecognised FP instruction return address 178 + @ 179 + 180 + .pushsection .data 181 + .align 2 182 + ENTRY(fp_enter) 183 + .word no_fp 184 + .popsection 185 + 186 + no_fp: 187 + ret lr 188 + ENDPROC(no_fp)
+1 -1
arch/arm/vfp/Makefile
··· 8 8 # ccflags-y := -DDEBUG 9 9 # asflags-y := -DDEBUG 10 10 11 - obj-y += vfpmodule.o entry.o vfphw.o vfpsingle.o vfpdouble.o 11 + obj-y += vfpmodule.o vfphw.o vfpsingle.o vfpdouble.o
-31
arch/arm/vfp/entry.S
··· 1 - /* SPDX-License-Identifier: GPL-2.0-only */ 2 - /* 3 - * linux/arch/arm/vfp/entry.S 4 - * 5 - * Copyright (C) 2004 ARM Limited. 6 - * Written by Deep Blue Solutions Limited. 7 - */ 8 - #include <linux/init.h> 9 - #include <linux/linkage.h> 10 - #include <asm/thread_info.h> 11 - #include <asm/vfpmacros.h> 12 - #include <asm/assembler.h> 13 - #include <asm/asm-offsets.h> 14 - 15 - @ VFP entry point. 16 - @ 17 - @ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) 18 - @ r2 = PC value to resume execution after successful emulation 19 - @ r9 = normal "successful" return address 20 - @ r10 = this threads thread_info structure 21 - @ lr = unrecognised instruction return address 22 - @ IRQs enabled. 23 - @ 24 - ENTRY(do_vfp) 25 - mov r1, r10 26 - str lr, [sp, #-8]! 27 - add r3, sp, #4 28 - str r9, [r3] 29 - bl vfp_entry 30 - ldr pc, [sp], #8 31 - ENDPROC(do_vfp)
+1
arch/arm/vfp/vfp.h
··· 375 375 }; 376 376 377 377 asmlinkage void vfp_save_state(void *location, u32 fpexc); 378 + asmlinkage u32 vfp_load_state(const void *location);
+14 -194
arch/arm/vfp/vfphw.S
··· 4 4 * 5 5 * Copyright (C) 2004 ARM Limited. 6 6 * Written by Deep Blue Solutions Limited. 7 - * 8 - * This code is called from the kernel's undefined instruction trap. 9 - * r1 holds the thread_info pointer 10 - * r3 holds the return address for successful handling. 11 - * lr holds the return address for unrecognised instructions. 12 - * sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h) 13 7 */ 14 8 #include <linux/init.h> 15 9 #include <linux/linkage.h> ··· 12 18 #include <linux/kern_levels.h> 13 19 #include <asm/assembler.h> 14 20 #include <asm/asm-offsets.h> 15 - 16 - .macro DBGSTR, str 17 - #ifdef DEBUG 18 - stmfd sp!, {r0-r3, ip, lr} 19 - ldr r0, =1f 20 - bl _printk 21 - ldmfd sp!, {r0-r3, ip, lr} 22 - 23 - .pushsection .rodata, "a" 24 - 1: .ascii KERN_DEBUG "VFP: \str\n" 25 - .byte 0 26 - .previous 27 - #endif 28 - .endm 29 21 30 22 .macro DBGSTR1, str, arg 31 23 #ifdef DEBUG ··· 28 48 #endif 29 49 .endm 30 50 31 - .macro DBGSTR3, str, arg1, arg2, arg3 32 - #ifdef DEBUG 33 - stmfd sp!, {r0-r3, ip, lr} 34 - mov r3, \arg3 35 - mov r2, \arg2 36 - mov r1, \arg1 37 - ldr r0, =1f 38 - bl _printk 39 - ldmfd sp!, {r0-r3, ip, lr} 40 - 41 - .pushsection .rodata, "a" 42 - 1: .ascii KERN_DEBUG "VFP: \str\n" 43 - .byte 0 44 - .previous 45 - #endif 46 - .endm 47 - 48 - 49 - @ VFP hardware support entry point. 50 - @ 51 - @ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) 52 - @ r1 = thread_info pointer 53 - @ r2 = PC value to resume execution after successful emulation 54 - @ r3 = normal "successful" return address 55 - @ lr = unrecognised instruction return address 56 - @ IRQs enabled. 57 - ENTRY(vfp_support_entry) 58 - ldr r11, [r1, #TI_CPU] @ CPU number 59 - add r10, r1, #TI_VFPSTATE @ r10 = workspace 60 - 61 - DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10 62 - 63 - .fpu vfpv2 64 - VFPFMRX r1, FPEXC @ Is the VFP enabled? 65 - DBGSTR1 "fpexc %08x", r1 66 - tst r1, #FPEXC_EN 67 - bne look_for_VFP_exceptions @ VFP is already enabled 68 - 69 - DBGSTR1 "enable %x", r10 70 - ldr r9, vfp_current_hw_state_address 71 - orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set 72 - ldr r4, [r9, r11, lsl #2] @ vfp_current_hw_state pointer 73 - bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled 74 - cmp r4, r10 @ this thread owns the hw context? 75 - #ifndef CONFIG_SMP 76 - @ For UP, checking that this thread owns the hw context is 77 - @ sufficient to determine that the hardware state is valid. 78 - beq vfp_hw_state_valid 79 - 80 - @ On UP, we lazily save the VFP context. As a different 81 - @ thread wants ownership of the VFP hardware, save the old 82 - @ state if there was a previous (valid) owner. 83 - 84 - VFPFMXR FPEXC, r5 @ enable VFP, disable any pending 85 - @ exceptions, so we can get at the 86 - @ rest of it 87 - 88 - DBGSTR1 "save old state %p", r4 89 - cmp r4, #0 @ if the vfp_current_hw_state is NULL 90 - beq vfp_reload_hw @ then the hw state needs reloading 91 - VFPFSTMIA r4, r5 @ save the working registers 92 - VFPFMRX r5, FPSCR @ current status 93 - #ifndef CONFIG_CPU_FEROCEON 94 - tst r1, #FPEXC_EX @ is there additional state to save? 95 - beq 1f 96 - VFPFMRX r6, FPINST @ FPINST (only if FPEXC.EX is set) 97 - tst r1, #FPEXC_FP2V @ is there an FPINST2 to read? 98 - beq 1f 99 - VFPFMRX r8, FPINST2 @ FPINST2 if needed (and present) 100 - 1: 101 - #endif 102 - stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2 103 - vfp_reload_hw: 104 - 105 - #else 106 - @ For SMP, if this thread does not own the hw context, then we 107 - @ need to reload it. No need to save the old state as on SMP, 108 - @ we always save the state when we switch away from a thread. 109 - bne vfp_reload_hw 110 - 111 - @ This thread has ownership of the current hardware context. 112 - @ However, it may have been migrated to another CPU, in which 113 - @ case the saved state is newer than the hardware context. 114 - @ Check this by looking at the CPU number which the state was 115 - @ last loaded onto. 116 - ldr ip, [r10, #VFP_CPU] 117 - teq ip, r11 118 - beq vfp_hw_state_valid 119 - 120 - vfp_reload_hw: 121 - @ We're loading this threads state into the VFP hardware. Update 122 - @ the CPU number which contains the most up to date VFP context. 123 - str r11, [r10, #VFP_CPU] 124 - 125 - VFPFMXR FPEXC, r5 @ enable VFP, disable any pending 126 - @ exceptions, so we can get at the 127 - @ rest of it 128 - #endif 129 - 130 - DBGSTR1 "load state %p", r10 131 - str r10, [r9, r11, lsl #2] @ update the vfp_current_hw_state pointer 51 + ENTRY(vfp_load_state) 52 + @ Load the current VFP state 53 + @ r0 - load location 54 + @ returns FPEXC 55 + DBGSTR1 "load VFP state %p", r0 132 56 @ Load the saved state back into the VFP 133 - VFPFLDMIA r10, r5 @ reload the working registers while 57 + VFPFLDMIA r0, r1 @ reload the working registers while 134 58 @ FPEXC is in a safe state 135 - ldmia r10, {r1, r5, r6, r8} @ load FPEXC, FPSCR, FPINST, FPINST2 136 - #ifndef CONFIG_CPU_FEROCEON 137 - tst r1, #FPEXC_EX @ is there additional state to restore? 59 + ldmia r0, {r0-r3} @ load FPEXC, FPSCR, FPINST, FPINST2 60 + tst r0, #FPEXC_EX @ is there additional state to restore? 138 61 beq 1f 139 - VFPFMXR FPINST, r6 @ restore FPINST (only if FPEXC.EX is set) 140 - tst r1, #FPEXC_FP2V @ is there an FPINST2 to write? 62 + VFPFMXR FPINST, r2 @ restore FPINST (only if FPEXC.EX is set) 63 + tst r0, #FPEXC_FP2V @ is there an FPINST2 to write? 141 64 beq 1f 142 - VFPFMXR FPINST2, r8 @ FPINST2 if needed (and present) 65 + VFPFMXR FPINST2, r3 @ FPINST2 if needed (and present) 143 66 1: 144 - #endif 145 - VFPFMXR FPSCR, r5 @ restore status 146 - 147 - @ The context stored in the VFP hardware is up to date with this thread 148 - vfp_hw_state_valid: 149 - tst r1, #FPEXC_EX 150 - bne process_exception @ might as well handle the pending 151 - @ exception before retrying branch 152 - @ out before setting an FPEXC that 153 - @ stops us reading stuff 154 - VFPFMXR FPEXC, r1 @ Restore FPEXC last 155 - mov sp, r3 @ we think we have handled things 156 - pop {lr} 157 - sub r2, r2, #4 @ Retry current instruction - if Thumb 158 - str r2, [sp, #S_PC] @ mode it's two 16-bit instructions, 159 - @ else it's one 32-bit instruction, so 160 - @ always subtract 4 from the following 161 - @ instruction address. 162 - 163 - local_bh_enable_and_ret: 164 - adr r0, . 165 - mov r1, #SOFTIRQ_DISABLE_OFFSET 166 - b __local_bh_enable_ip @ tail call 167 - 168 - look_for_VFP_exceptions: 169 - @ Check for synchronous or asynchronous exception 170 - tst r1, #FPEXC_EX | FPEXC_DEX 171 - bne process_exception 172 - @ On some implementations of the VFP subarch 1, setting FPSCR.IXE 173 - @ causes all the CDP instructions to be bounced synchronously without 174 - @ setting the FPEXC.EX bit 175 - VFPFMRX r5, FPSCR 176 - tst r5, #FPSCR_IXE 177 - bne process_exception 178 - 179 - tst r5, #FPSCR_LENGTH_MASK 180 - beq skip 181 - orr r1, r1, #FPEXC_DEX 182 - b process_exception 183 - skip: 184 - 185 - @ Fall into hand on to next handler - appropriate coproc instr 186 - @ not recognised by VFP 187 - 188 - DBGSTR "not VFP" 189 - b local_bh_enable_and_ret 190 - 191 - process_exception: 192 - DBGSTR "bounce" 193 - mov sp, r3 @ setup for a return to the user code. 194 - pop {lr} 195 - mov r2, sp @ nothing stacked - regdump is at TOS 196 - 197 - @ Now call the C code to package up the bounce to the support code 198 - @ r0 holds the trigger instruction 199 - @ r1 holds the FPEXC value 200 - @ r2 pointer to register dump 201 - b VFP_bounce @ we have handled this - the support 202 - @ code will raise an exception if 203 - @ required. If not, the user code will 204 - @ retry the faulted instruction 205 - ENDPROC(vfp_support_entry) 67 + VFPFMXR FPSCR, r1 @ restore status 68 + ret lr 69 + ENDPROC(vfp_load_state) 206 70 207 71 ENTRY(vfp_save_state) 208 72 @ Save the current VFP state ··· 65 241 stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2 66 242 ret lr 67 243 ENDPROC(vfp_save_state) 68 - 69 - .align 70 - vfp_current_hw_state_address: 71 - .word vfp_current_hw_state 72 244 73 245 .macro tbl_branch, base, tmp, shift 74 246 #ifdef CONFIG_THUMB2_KERNEL
+146 -62
arch/arm/vfp/vfpmodule.c
··· 18 18 #include <linux/uaccess.h> 19 19 #include <linux/user.h> 20 20 #include <linux/export.h> 21 + #include <linux/perf_event.h> 21 22 22 23 #include <asm/cp15.h> 23 24 #include <asm/cputype.h> ··· 31 30 #include "vfpinstr.h" 32 31 #include "vfp.h" 33 32 34 - /* 35 - * Our undef handlers (in entry.S) 36 - */ 37 - asmlinkage void vfp_support_entry(u32, void *, u32, u32); 38 - 39 33 static bool have_vfp __ro_after_init; 40 34 41 35 /* ··· 38 42 * Used in startup: set to non-zero if VFP checks fail 39 43 * After startup, holds VFP architecture 40 44 */ 41 - static unsigned int __initdata VFP_arch; 45 + static unsigned int VFP_arch; 46 + 47 + #ifdef CONFIG_CPU_FEROCEON 48 + extern unsigned int VFP_arch_feroceon __alias(VFP_arch); 49 + #endif 42 50 43 51 /* 44 52 * The pointer to the vfpstate structure of the thread which currently ··· 314 314 * emulate it. 315 315 */ 316 316 } 317 + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc); 317 318 return exceptions & ~VFP_NAN_FLAG; 318 319 } 319 320 320 321 /* 321 322 * Package up a bounce condition. 322 323 */ 323 - void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) 324 + static void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) 324 325 { 325 326 u32 fpscr, orig_fpscr, fpsid, exceptions; 326 327 ··· 357 356 } 358 357 359 358 if (fpexc & FPEXC_EX) { 360 - #ifndef CONFIG_CPU_FEROCEON 361 359 /* 362 360 * Asynchronous exception. The instruction is read from FPINST 363 361 * and the interrupted instruction has to be restarted. 364 362 */ 365 363 trigger = fmrx(FPINST); 366 364 regs->ARM_pc -= 4; 367 - #endif 368 365 } else if (!(fpexc & FPEXC_DEX)) { 369 366 /* 370 367 * Illegal combination of bits. It can be caused by an ··· 370 371 * on VFP subarch 1. 371 372 */ 372 373 vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs); 373 - goto exit; 374 + return; 374 375 } 375 376 376 377 /* ··· 401 402 * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1. 402 403 */ 403 404 if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V)) 404 - goto exit; 405 + return; 405 406 406 407 /* 407 408 * The barrier() here prevents fpinst2 being read ··· 414 415 exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs); 415 416 if (exceptions) 416 417 vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); 417 - exit: 418 - local_bh_enable(); 419 418 } 420 419 421 420 static void vfp_enable(void *unused) ··· 642 645 return 0; 643 646 } 644 647 645 - /* 646 - * Entered with: 647 - * 648 - * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) 649 - * r1 = thread_info pointer 650 - * r2 = PC value to resume execution after successful emulation 651 - * r3 = normal "successful" return address 652 - * lr = unrecognised instruction return address 653 - */ 654 - asmlinkage void vfp_entry(u32 trigger, struct thread_info *ti, u32 resume_pc, 655 - u32 resume_return_address) 656 - { 657 - if (unlikely(!have_vfp)) 658 - return; 659 - 660 - local_bh_disable(); 661 - vfp_support_entry(trigger, ti, resume_pc, resume_return_address); 662 - } 663 - 664 - #ifdef CONFIG_KERNEL_MODE_NEON 665 - 666 648 static int vfp_kmode_exception(struct pt_regs *regs, unsigned int instr) 667 649 { 668 650 /* ··· 664 688 return 1; 665 689 } 666 690 667 - static struct undef_hook vfp_kmode_exception_hook[] = {{ 691 + /* 692 + * vfp_support_entry - Handle VFP exception 693 + * 694 + * @regs: pt_regs structure holding the register state at exception entry 695 + * @trigger: The opcode of the instruction that triggered the exception 696 + * 697 + * Returns 0 if the exception was handled, or an error code otherwise. 698 + */ 699 + static int vfp_support_entry(struct pt_regs *regs, u32 trigger) 700 + { 701 + struct thread_info *ti = current_thread_info(); 702 + u32 fpexc; 703 + 704 + if (unlikely(!have_vfp)) 705 + return -ENODEV; 706 + 707 + if (!user_mode(regs)) 708 + return vfp_kmode_exception(regs, trigger); 709 + 710 + local_bh_disable(); 711 + fpexc = fmrx(FPEXC); 712 + 713 + /* 714 + * If the VFP unit was not enabled yet, we have to check whether the 715 + * VFP state in the CPU's registers is the most recent VFP state 716 + * associated with the process. On UP systems, we don't save the VFP 717 + * state eagerly on a context switch, so we may need to save the 718 + * VFP state to memory first, as it may belong to another process. 719 + */ 720 + if (!(fpexc & FPEXC_EN)) { 721 + /* 722 + * Enable the VFP unit but mask the FP exception flag for the 723 + * time being, so we can access all the registers. 724 + */ 725 + fpexc |= FPEXC_EN; 726 + fmxr(FPEXC, fpexc & ~FPEXC_EX); 727 + 728 + /* 729 + * Check whether or not the VFP state in the CPU's registers is 730 + * the most recent VFP state associated with this task. On SMP, 731 + * migration may result in multiple CPUs holding VFP states 732 + * that belong to the same task, but only the most recent one 733 + * is valid. 734 + */ 735 + if (!vfp_state_in_hw(ti->cpu, ti)) { 736 + if (!IS_ENABLED(CONFIG_SMP) && 737 + vfp_current_hw_state[ti->cpu] != NULL) { 738 + /* 739 + * This CPU is currently holding the most 740 + * recent VFP state associated with another 741 + * task, and we must save that to memory first. 742 + */ 743 + vfp_save_state(vfp_current_hw_state[ti->cpu], 744 + fpexc); 745 + } 746 + 747 + /* 748 + * We can now proceed with loading the task's VFP state 749 + * from memory into the CPU registers. 750 + */ 751 + fpexc = vfp_load_state(&ti->vfpstate); 752 + vfp_current_hw_state[ti->cpu] = &ti->vfpstate; 753 + #ifdef CONFIG_SMP 754 + /* 755 + * Record that this CPU is now the one holding the most 756 + * recent VFP state of the task. 757 + */ 758 + ti->vfpstate.hard.cpu = ti->cpu; 759 + #endif 760 + } 761 + 762 + if (fpexc & FPEXC_EX) 763 + /* 764 + * Might as well handle the pending exception before 765 + * retrying branch out before setting an FPEXC that 766 + * stops us reading stuff. 767 + */ 768 + goto bounce; 769 + 770 + /* 771 + * No FP exception is pending: just enable the VFP and 772 + * replay the instruction that trapped. 773 + */ 774 + fmxr(FPEXC, fpexc); 775 + } else { 776 + /* Check for synchronous or asynchronous exceptions */ 777 + if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) { 778 + u32 fpscr = fmrx(FPSCR); 779 + 780 + /* 781 + * On some implementations of the VFP subarch 1, 782 + * setting FPSCR.IXE causes all the CDP instructions to 783 + * be bounced synchronously without setting the 784 + * FPEXC.EX bit 785 + */ 786 + if (!(fpscr & FPSCR_IXE)) { 787 + if (!(fpscr & FPSCR_LENGTH_MASK)) { 788 + pr_debug("not VFP\n"); 789 + local_bh_enable(); 790 + return -ENOEXEC; 791 + } 792 + fpexc |= FPEXC_DEX; 793 + } 794 + } 795 + bounce: regs->ARM_pc += 4; 796 + VFP_bounce(trigger, fpexc, regs); 797 + } 798 + 799 + local_bh_enable(); 800 + return 0; 801 + } 802 + 803 + static struct undef_hook neon_support_hook[] = {{ 668 804 .instr_mask = 0xfe000000, 669 805 .instr_val = 0xf2000000, 670 - .cpsr_mask = MODE_MASK | PSR_T_BIT, 671 - .cpsr_val = SVC_MODE, 672 - .fn = vfp_kmode_exception, 806 + .cpsr_mask = PSR_T_BIT, 807 + .cpsr_val = 0, 808 + .fn = vfp_support_entry, 673 809 }, { 674 810 .instr_mask = 0xff100000, 675 811 .instr_val = 0xf4000000, 676 - .cpsr_mask = MODE_MASK | PSR_T_BIT, 677 - .cpsr_val = SVC_MODE, 678 - .fn = vfp_kmode_exception, 812 + .cpsr_mask = PSR_T_BIT, 813 + .cpsr_val = 0, 814 + .fn = vfp_support_entry, 679 815 }, { 680 816 .instr_mask = 0xef000000, 681 817 .instr_val = 0xef000000, 682 - .cpsr_mask = MODE_MASK | PSR_T_BIT, 683 - .cpsr_val = SVC_MODE | PSR_T_BIT, 684 - .fn = vfp_kmode_exception, 818 + .cpsr_mask = PSR_T_BIT, 819 + .cpsr_val = PSR_T_BIT, 820 + .fn = vfp_support_entry, 685 821 }, { 686 822 .instr_mask = 0xff100000, 687 823 .instr_val = 0xf9000000, 688 - .cpsr_mask = MODE_MASK | PSR_T_BIT, 689 - .cpsr_val = SVC_MODE | PSR_T_BIT, 690 - .fn = vfp_kmode_exception, 691 - }, { 692 - .instr_mask = 0x0c000e00, 693 - .instr_val = 0x0c000a00, 694 - .cpsr_mask = MODE_MASK, 695 - .cpsr_val = SVC_MODE, 696 - .fn = vfp_kmode_exception, 824 + .cpsr_mask = PSR_T_BIT, 825 + .cpsr_val = PSR_T_BIT, 826 + .fn = vfp_support_entry, 697 827 }}; 698 828 699 - static int __init vfp_kmode_exception_hook_init(void) 700 - { 701 - int i; 829 + static struct undef_hook vfp_support_hook = { 830 + .instr_mask = 0x0c000e00, 831 + .instr_val = 0x0c000a00, 832 + .fn = vfp_support_entry, 833 + }; 702 834 703 - for (i = 0; i < ARRAY_SIZE(vfp_kmode_exception_hook); i++) 704 - register_undef_hook(&vfp_kmode_exception_hook[i]); 705 - return 0; 706 - } 707 - subsys_initcall(vfp_kmode_exception_hook_init); 835 + #ifdef CONFIG_KERNEL_MODE_NEON 708 836 709 837 /* 710 838 * Kernel-side NEON support functions ··· 913 833 * for NEON if the hardware has the MVFR registers. 914 834 */ 915 835 if (IS_ENABLED(CONFIG_NEON) && 916 - (fmrx(MVFR1) & 0x000fff00) == 0x00011100) 836 + (fmrx(MVFR1) & 0x000fff00) == 0x00011100) { 917 837 elf_hwcap |= HWCAP_NEON; 838 + for (int i = 0; i < ARRAY_SIZE(neon_support_hook); i++) 839 + register_undef_hook(&neon_support_hook[i]); 840 + } 918 841 919 842 if (IS_ENABLED(CONFIG_VFPv3)) { 920 843 u32 mvfr0 = fmrx(MVFR0); ··· 986 903 987 904 have_vfp = true; 988 905 906 + register_undef_hook(&vfp_support_hook); 989 907 thread_register_notifier(&vfp_notifier_block); 990 908 vfp_pm_init(); 991 909