[PATCH] Remove general use functions from head.S

As well as the interrupt vectors and initialization code, head.S
contains several asm functions which are used during runtime. This
patch moves these to misc.S, a more sensible location for random asm
support code. A couple The functions moved are:
disable_kernel_fp
giveup_fpu
disable_kernel_altivec
giveup_altivec
__setup_cpu_power3 (empty function)

Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by

David Gibson and committed by
Paul Mackerras
0ab20002 c59c464a

+98 -95
-95
arch/ppc64/kernel/head.S
··· 1542 1542 .align 8 1543 1543 copy_to_here: 1544 1544 1545 - /* 1546 - * disable_kernel_fp() 1547 - * Disable the FPU. 1548 - */ 1549 - _GLOBAL(disable_kernel_fp) 1550 - mfmsr r3 1551 - rldicl r0,r3,(63-MSR_FP_LG),1 1552 - rldicl r3,r0,(MSR_FP_LG+1),0 1553 - mtmsrd r3 /* disable use of fpu now */ 1554 - isync 1555 - blr 1556 - 1557 - /* 1558 - * giveup_fpu(tsk) 1559 - * Disable FP for the task given as the argument, 1560 - * and save the floating-point registers in its thread_struct. 1561 - * Enables the FPU for use in the kernel on return. 1562 - */ 1563 - _GLOBAL(giveup_fpu) 1564 - mfmsr r5 1565 - ori r5,r5,MSR_FP 1566 - mtmsrd r5 /* enable use of fpu now */ 1567 - isync 1568 - cmpdi 0,r3,0 1569 - beqlr- /* if no previous owner, done */ 1570 - addi r3,r3,THREAD /* want THREAD of task */ 1571 - ld r5,PT_REGS(r3) 1572 - cmpdi 0,r5,0 1573 - SAVE_32FPRS(0, r3) 1574 - mffs fr0 1575 - stfd fr0,THREAD_FPSCR(r3) 1576 - beq 1f 1577 - ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1578 - li r3,MSR_FP|MSR_FE0|MSR_FE1 1579 - andc r4,r4,r3 /* disable FP for previous task */ 1580 - std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1581 - 1: 1582 - #ifndef CONFIG_SMP 1583 - li r5,0 1584 - ld r4,last_task_used_math@got(r2) 1585 - std r5,0(r4) 1586 - #endif /* CONFIG_SMP */ 1587 - blr 1588 - 1589 - #ifdef CONFIG_ALTIVEC 1590 - /* 1591 - * disable_kernel_altivec() 1592 - * Disable the VMX. 1593 - */ 1594 - _GLOBAL(disable_kernel_altivec) 1595 - mfmsr r3 1596 - rldicl r0,r3,(63-MSR_VEC_LG),1 1597 - rldicl r3,r0,(MSR_VEC_LG+1),0 1598 - mtmsrd r3 /* disable use of VMX now */ 1599 - isync 1600 - blr 1601 - 1602 - /* 1603 - * giveup_altivec(tsk) 1604 - * Disable VMX for the task given as the argument, 1605 - * and save the vector registers in its thread_struct. 1606 - * Enables the VMX for use in the kernel on return. 1607 - */ 1608 - _GLOBAL(giveup_altivec) 1609 - mfmsr r5 1610 - oris r5,r5,MSR_VEC@h 1611 - mtmsrd r5 /* enable use of VMX now */ 1612 - isync 1613 - cmpdi 0,r3,0 1614 - beqlr- /* if no previous owner, done */ 1615 - addi r3,r3,THREAD /* want THREAD of task */ 1616 - ld r5,PT_REGS(r3) 1617 - cmpdi 0,r5,0 1618 - SAVE_32VRS(0,r4,r3) 1619 - mfvscr vr0 1620 - li r4,THREAD_VSCR 1621 - stvx vr0,r4,r3 1622 - beq 1f 1623 - ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1624 - lis r3,MSR_VEC@h 1625 - andc r4,r4,r3 /* disable FP for previous task */ 1626 - std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 1627 - 1: 1628 - #ifndef CONFIG_SMP 1629 - li r5,0 1630 - ld r4,last_task_used_altivec@got(r2) 1631 - std r5,0(r4) 1632 - #endif /* CONFIG_SMP */ 1633 - blr 1634 - 1635 - #endif /* CONFIG_ALTIVEC */ 1636 - 1637 1545 #ifdef CONFIG_SMP 1638 1546 #ifdef CONFIG_PPC_PMAC 1639 1547 /* ··· 1891 1983 #endif 1892 1984 1893 1985 bl .start_kernel 1894 - 1895 - _GLOBAL(__setup_cpu_power3) 1896 - blr 1897 1986 1898 1987 _GLOBAL(hmt_init) 1899 1988 #ifdef CONFIG_HMT
+98
arch/ppc64/kernel/misc.S
··· 680 680 ld r30,-16(r1) 681 681 blr 682 682 683 + /* 684 + * disable_kernel_fp() 685 + * Disable the FPU. 686 + */ 687 + _GLOBAL(disable_kernel_fp) 688 + mfmsr r3 689 + rldicl r0,r3,(63-MSR_FP_LG),1 690 + rldicl r3,r0,(MSR_FP_LG+1),0 691 + mtmsrd r3 /* disable use of fpu now */ 692 + isync 693 + blr 694 + 695 + /* 696 + * giveup_fpu(tsk) 697 + * Disable FP for the task given as the argument, 698 + * and save the floating-point registers in its thread_struct. 699 + * Enables the FPU for use in the kernel on return. 700 + */ 701 + _GLOBAL(giveup_fpu) 702 + mfmsr r5 703 + ori r5,r5,MSR_FP 704 + mtmsrd r5 /* enable use of fpu now */ 705 + isync 706 + cmpdi 0,r3,0 707 + beqlr- /* if no previous owner, done */ 708 + addi r3,r3,THREAD /* want THREAD of task */ 709 + ld r5,PT_REGS(r3) 710 + cmpdi 0,r5,0 711 + SAVE_32FPRS(0, r3) 712 + mffs fr0 713 + stfd fr0,THREAD_FPSCR(r3) 714 + beq 1f 715 + ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 716 + li r3,MSR_FP|MSR_FE0|MSR_FE1 717 + andc r4,r4,r3 /* disable FP for previous task */ 718 + std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 719 + 1: 720 + #ifndef CONFIG_SMP 721 + li r5,0 722 + ld r4,last_task_used_math@got(r2) 723 + std r5,0(r4) 724 + #endif /* CONFIG_SMP */ 725 + blr 726 + 727 + #ifdef CONFIG_ALTIVEC 728 + 729 + #if 0 /* this has no callers for now */ 730 + /* 731 + * disable_kernel_altivec() 732 + * Disable the VMX. 733 + */ 734 + _GLOBAL(disable_kernel_altivec) 735 + mfmsr r3 736 + rldicl r0,r3,(63-MSR_VEC_LG),1 737 + rldicl r3,r0,(MSR_VEC_LG+1),0 738 + mtmsrd r3 /* disable use of VMX now */ 739 + isync 740 + blr 741 + #endif /* 0 */ 742 + 743 + /* 744 + * giveup_altivec(tsk) 745 + * Disable VMX for the task given as the argument, 746 + * and save the vector registers in its thread_struct. 747 + * Enables the VMX for use in the kernel on return. 748 + */ 749 + _GLOBAL(giveup_altivec) 750 + mfmsr r5 751 + oris r5,r5,MSR_VEC@h 752 + mtmsrd r5 /* enable use of VMX now */ 753 + isync 754 + cmpdi 0,r3,0 755 + beqlr- /* if no previous owner, done */ 756 + addi r3,r3,THREAD /* want THREAD of task */ 757 + ld r5,PT_REGS(r3) 758 + cmpdi 0,r5,0 759 + SAVE_32VRS(0,r4,r3) 760 + mfvscr vr0 761 + li r4,THREAD_VSCR 762 + stvx vr0,r4,r3 763 + beq 1f 764 + ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) 765 + lis r3,MSR_VEC@h 766 + andc r4,r4,r3 /* disable FP for previous task */ 767 + std r4,_MSR-STACK_FRAME_OVERHEAD(r5) 768 + 1: 769 + #ifndef CONFIG_SMP 770 + li r5,0 771 + ld r4,last_task_used_altivec@got(r2) 772 + std r5,0(r4) 773 + #endif /* CONFIG_SMP */ 774 + blr 775 + 776 + #endif /* CONFIG_ALTIVEC */ 777 + 778 + _GLOBAL(__setup_cpu_power3) 779 + blr 780 + 683 781 /* kexec_wait(phys_cpu) 684 782 * 685 783 * wait for the flag to change, indicating this kernel is going away but