Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: No need to use dot symbols when branching to a function

binutils is smart enough to know that a branch to a function
descriptor is actually a branch to the functions text address.

Alan tells me that binutils has been doing this for 9 years.

Signed-off-by: Anton Blanchard <anton@samba.org>

+271 -271
+2 -2
arch/powerpc/boot/util.S
··· 45 45 mfspr r4,SPRN_PVR 46 46 srwi r4,r4,16 47 47 cmpwi 0,r4,1 /* 601 ? */ 48 - bne .udelay_not_601 48 + bne .Ludelay_not_601 49 49 00: li r0,86 /* Instructions / microsecond? */ 50 50 mtctr r0 51 51 10: addi r0,r0,0 /* NOP */ ··· 54 54 bne 00b 55 55 blr 56 56 57 - .udelay_not_601: 57 + .Ludelay_not_601: 58 58 mulli r4,r3,1000 /* nanoseconds */ 59 59 /* Change r4 to be the number of ticks using: 60 60 * (nanoseconds + (timebase_period_ns - 1 )) / timebase_period_ns
+2 -2
arch/powerpc/include/asm/context_tracking.h
··· 2 2 #define _ASM_POWERPC_CONTEXT_TRACKING_H 3 3 4 4 #ifdef CONFIG_CONTEXT_TRACKING 5 - #define SCHEDULE_USER bl .schedule_user 5 + #define SCHEDULE_USER bl schedule_user 6 6 #else 7 - #define SCHEDULE_USER bl .schedule 7 + #define SCHEDULE_USER bl schedule 8 8 #endif 9 9 10 10 #endif
+3 -3
arch/powerpc/include/asm/exception-64e.h
··· 174 174 mtlr r16; 175 175 #define TLB_MISS_STATS_D(name) \ 176 176 addi r9,r13,MMSTAT_DSTATS+name; \ 177 - bl .tlb_stat_inc; 177 + bl tlb_stat_inc; 178 178 #define TLB_MISS_STATS_I(name) \ 179 179 addi r9,r13,MMSTAT_ISTATS+name; \ 180 - bl .tlb_stat_inc; 180 + bl tlb_stat_inc; 181 181 #define TLB_MISS_STATS_X(name) \ 182 182 ld r8,PACA_EXTLB+EX_TLB_ESR(r13); \ 183 183 cmpdi cr2,r8,-1; \ ··· 185 185 addi r9,r13,MMSTAT_DSTATS+name; \ 186 186 b 62f; \ 187 187 61: addi r9,r13,MMSTAT_ISTATS+name; \ 188 - 62: bl .tlb_stat_inc; 188 + 62: bl tlb_stat_inc; 189 189 #define TLB_MISS_STATS_SAVE_INFO \ 190 190 std r14,EX_TLB_ESR(r12); /* save ESR */ 191 191 #define TLB_MISS_STATS_SAVE_INFO_BOLTED \
+1 -1
arch/powerpc/include/asm/exception-64s.h
··· 517 517 #define DISABLE_INTS RECONCILE_IRQ_STATE(r10,r11) 518 518 519 519 #define ADD_NVGPRS \ 520 - bl .save_nvgprs 520 + bl save_nvgprs 521 521 522 522 #define RUNLATCH_ON \ 523 523 BEGIN_FTR_SECTION \
+2 -2
arch/powerpc/include/asm/irqflags.h
··· 36 36 * have to call a C function so call a wrapper that saves all the 37 37 * C-clobbered registers. 38 38 */ 39 - #define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on) 40 - #define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off) 39 + #define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_on) 40 + #define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_off) 41 41 42 42 /* 43 43 * This is used by assembly code to soft-disable interrupts first and
+1 -1
arch/powerpc/include/asm/ppc_asm.h
··· 57 57 LDX_BE r10,0,r10; /* get log write index */ \ 58 58 cmpd cr1,r11,r10; \ 59 59 beq+ cr1,33f; \ 60 - bl .accumulate_stolen_time; \ 60 + bl accumulate_stolen_time; \ 61 61 ld r12,_MSR(r1); \ 62 62 andi. r10,r12,MSR_PR; /* Restore cr0 (coming from user) */ \ 63 63 33: \
+14 -14
arch/powerpc/kernel/cpu_setup_fsl_booke.S
··· 94 94 _GLOBAL(__setup_cpu_e6500) 95 95 mflr r6 96 96 #ifdef CONFIG_PPC64 97 - bl .setup_altivec_ivors 97 + bl setup_altivec_ivors 98 98 /* Touch IVOR42 only if the CPU supports E.HV category */ 99 99 mfspr r10,SPRN_MMUCFG 100 100 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE 101 101 beq 1f 102 - bl .setup_lrat_ivor 102 + bl setup_lrat_ivor 103 103 1: 104 104 #endif 105 105 bl setup_pw20_idle ··· 164 164 #ifdef CONFIG_PPC_BOOK3E_64 165 165 _GLOBAL(__restore_cpu_e6500) 166 166 mflr r5 167 - bl .setup_altivec_ivors 167 + bl setup_altivec_ivors 168 168 /* Touch IVOR42 only if the CPU supports E.HV category */ 169 169 mfspr r10,SPRN_MMUCFG 170 170 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE 171 171 beq 1f 172 - bl .setup_lrat_ivor 172 + bl setup_lrat_ivor 173 173 1: 174 - bl .setup_pw20_idle 175 - bl .setup_altivec_idle 174 + bl setup_pw20_idle 175 + bl setup_altivec_idle 176 176 bl __restore_cpu_e5500 177 177 mtlr r5 178 178 blr ··· 181 181 mflr r4 182 182 bl __e500_icache_setup 183 183 bl __e500_dcache_setup 184 - bl .__setup_base_ivors 185 - bl .setup_perfmon_ivor 186 - bl .setup_doorbell_ivors 184 + bl __setup_base_ivors 185 + bl setup_perfmon_ivor 186 + bl setup_doorbell_ivors 187 187 /* 188 188 * We only want to touch IVOR38-41 if we're running on hardware 189 189 * that supports category E.HV. The architectural way to determine ··· 192 192 mfspr r10,SPRN_MMUCFG 193 193 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE 194 194 beq 1f 195 - bl .setup_ehv_ivors 195 + bl setup_ehv_ivors 196 196 1: 197 197 mtlr r4 198 198 blr ··· 201 201 mflr r5 202 202 bl __e500_icache_setup 203 203 bl __e500_dcache_setup 204 - bl .__setup_base_ivors 205 - bl .setup_perfmon_ivor 206 - bl .setup_doorbell_ivors 204 + bl __setup_base_ivors 205 + bl setup_perfmon_ivor 206 + bl setup_doorbell_ivors 207 207 /* 208 208 * We only want to touch IVOR38-41 if we're running on hardware 209 209 * that supports category E.HV. The architectural way to determine ··· 212 212 mfspr r10,SPRN_MMUCFG 213 213 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE 214 214 beq 1f 215 - bl .setup_ehv_ivors 215 + bl setup_ehv_ivors 216 216 b 2f 217 217 1: 218 218 ld r10,CPU_SPEC_FEATURES(r4)
+43 -43
arch/powerpc/kernel/entry_64.S
··· 106 106 LDX_BE r10,0,r10 /* get log write index */ 107 107 cmpd cr1,r11,r10 108 108 beq+ cr1,33f 109 - bl .accumulate_stolen_time 109 + bl accumulate_stolen_time 110 110 REST_GPR(0,r1) 111 111 REST_4GPRS(3,r1) 112 112 REST_2GPRS(7,r1) ··· 143 143 std r10,SOFTE(r1) 144 144 145 145 #ifdef SHOW_SYSCALLS 146 - bl .do_show_syscall 146 + bl do_show_syscall 147 147 REST_GPR(0,r1) 148 148 REST_4GPRS(3,r1) 149 149 REST_2GPRS(7,r1) ··· 181 181 syscall_exit: 182 182 std r3,RESULT(r1) 183 183 #ifdef SHOW_SYSCALLS 184 - bl .do_show_syscall_exit 184 + bl do_show_syscall_exit 185 185 ld r3,RESULT(r1) 186 186 #endif 187 187 CURRENT_THREAD_INFO(r12, r1) ··· 248 248 249 249 /* Traced system call support */ 250 250 syscall_dotrace: 251 - bl .save_nvgprs 251 + bl save_nvgprs 252 252 addi r3,r1,STACK_FRAME_OVERHEAD 253 - bl .do_syscall_trace_enter 253 + bl do_syscall_trace_enter 254 254 /* 255 255 * Restore argument registers possibly just changed. 256 256 * We use the return value of do_syscall_trace_enter ··· 308 308 4: /* Anything else left to do? */ 309 309 SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */ 310 310 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) 311 - beq .ret_from_except_lite 311 + beq ret_from_except_lite 312 312 313 313 /* Re-enable interrupts */ 314 314 #ifdef CONFIG_PPC_BOOK3E ··· 319 319 mtmsrd r10,1 320 320 #endif /* CONFIG_PPC_BOOK3E */ 321 321 322 - bl .save_nvgprs 322 + bl save_nvgprs 323 323 addi r3,r1,STACK_FRAME_OVERHEAD 324 - bl .do_syscall_trace_leave 325 - b .ret_from_except 324 + bl do_syscall_trace_leave 325 + b ret_from_except 326 326 327 327 /* Save non-volatile GPRs, if not already saved. */ 328 328 _GLOBAL(save_nvgprs) ··· 345 345 */ 346 346 347 347 _GLOBAL(ppc_fork) 348 - bl .save_nvgprs 349 - bl .sys_fork 348 + bl save_nvgprs 349 + bl sys_fork 350 350 b syscall_exit 351 351 352 352 _GLOBAL(ppc_vfork) 353 - bl .save_nvgprs 354 - bl .sys_vfork 353 + bl save_nvgprs 354 + bl sys_vfork 355 355 b syscall_exit 356 356 357 357 _GLOBAL(ppc_clone) 358 - bl .save_nvgprs 359 - bl .sys_clone 358 + bl save_nvgprs 359 + bl sys_clone 360 360 b syscall_exit 361 361 362 362 _GLOBAL(ppc32_swapcontext) 363 - bl .save_nvgprs 364 - bl .compat_sys_swapcontext 363 + bl save_nvgprs 364 + bl compat_sys_swapcontext 365 365 b syscall_exit 366 366 367 367 _GLOBAL(ppc64_swapcontext) 368 - bl .save_nvgprs 369 - bl .sys_swapcontext 368 + bl save_nvgprs 369 + bl sys_swapcontext 370 370 b syscall_exit 371 371 372 372 _GLOBAL(ret_from_fork) 373 - bl .schedule_tail 373 + bl schedule_tail 374 374 REST_NVGPRS(r1) 375 375 li r3,0 376 376 b syscall_exit 377 377 378 378 _GLOBAL(ret_from_kernel_thread) 379 - bl .schedule_tail 379 + bl schedule_tail 380 380 REST_NVGPRS(r1) 381 381 ld r14, 0(r14) 382 382 mtlr r14 ··· 611 611 _GLOBAL(ret_from_except) 612 612 ld r11,_TRAP(r1) 613 613 andi. r0,r11,1 614 - bne .ret_from_except_lite 614 + bne ret_from_except_lite 615 615 REST_NVGPRS(r1) 616 616 617 617 _GLOBAL(ret_from_except_lite) ··· 661 661 #endif 662 662 1: andi. r0,r4,_TIF_NEED_RESCHED 663 663 beq 2f 664 - bl .restore_interrupts 664 + bl restore_interrupts 665 665 SCHEDULE_USER 666 - b .ret_from_except_lite 666 + b ret_from_except_lite 667 667 2: 668 668 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 669 669 andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM 670 670 bne 3f /* only restore TM if nothing else to do */ 671 671 addi r3,r1,STACK_FRAME_OVERHEAD 672 - bl .restore_tm_state 672 + bl restore_tm_state 673 673 b restore 674 674 3: 675 675 #endif 676 - bl .save_nvgprs 677 - bl .restore_interrupts 676 + bl save_nvgprs 677 + bl restore_interrupts 678 678 addi r3,r1,STACK_FRAME_OVERHEAD 679 - bl .do_notify_resume 680 - b .ret_from_except 679 + bl do_notify_resume 680 + b ret_from_except 681 681 682 682 resume_kernel: 683 683 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ ··· 730 730 * sure we are soft-disabled first and reconcile irq state. 731 731 */ 732 732 RECONCILE_IRQ_STATE(r3,r4) 733 - 1: bl .preempt_schedule_irq 733 + 1: bl preempt_schedule_irq 734 734 735 735 /* Re-test flags and eventually loop */ 736 736 CURRENT_THREAD_INFO(r9, r1) ··· 792 792 */ 793 793 do_restore: 794 794 #ifdef CONFIG_PPC_BOOK3E 795 - b .exception_return_book3e 795 + b exception_return_book3e 796 796 #else 797 797 /* 798 798 * Clear the reservation. If we know the CPU tracks the address of ··· 907 907 * 908 908 * Still, this might be useful for things like hash_page 909 909 */ 910 - bl .__check_irq_replay 910 + bl __check_irq_replay 911 911 cmpwi cr0,r3,0 912 912 beq restore_no_replay 913 913 ··· 928 928 cmpwi cr0,r3,0x500 929 929 bne 1f 930 930 addi r3,r1,STACK_FRAME_OVERHEAD; 931 - bl .do_IRQ 932 - b .ret_from_except 931 + bl do_IRQ 932 + b ret_from_except 933 933 1: cmpwi cr0,r3,0x900 934 934 bne 1f 935 935 addi r3,r1,STACK_FRAME_OVERHEAD; 936 - bl .timer_interrupt 937 - b .ret_from_except 936 + bl timer_interrupt 937 + b ret_from_except 938 938 #ifdef CONFIG_PPC_DOORBELL 939 939 1: 940 940 #ifdef CONFIG_PPC_BOOK3E ··· 948 948 #endif /* CONFIG_PPC_BOOK3E */ 949 949 bne 1f 950 950 addi r3,r1,STACK_FRAME_OVERHEAD; 951 - bl .doorbell_exception 952 - b .ret_from_except 951 + bl doorbell_exception 952 + b ret_from_except 953 953 #endif /* CONFIG_PPC_DOORBELL */ 954 - 1: b .ret_from_except /* What else to do here ? */ 954 + 1: b ret_from_except /* What else to do here ? */ 955 955 956 956 unrecov_restore: 957 957 addi r3,r1,STACK_FRAME_OVERHEAD 958 - bl .unrecoverable_exception 958 + bl unrecoverable_exception 959 959 b unrecov_restore 960 960 961 961 #ifdef CONFIG_PPC_RTAS ··· 1238 1238 ld r11, 112(r1) 1239 1239 addi r3, r11, 16 1240 1240 1241 - bl .prepare_ftrace_return 1241 + bl prepare_ftrace_return 1242 1242 nop 1243 1243 1244 1244 ld r0, 128(r1) ··· 1254 1254 mr r31, r1 1255 1255 stdu r1, -112(r1) 1256 1256 1257 - bl .ftrace_return_to_handler 1257 + bl ftrace_return_to_handler 1258 1258 nop 1259 1259 1260 1260 /* return value has real return address */ ··· 1284 1284 */ 1285 1285 ld r2, PACATOC(r13) 1286 1286 1287 - bl .ftrace_return_to_handler 1287 + bl ftrace_return_to_handler 1288 1288 nop 1289 1289 1290 1290 /* return value has real return address */
+64 -64
arch/powerpc/kernel/exceptions-64e.S
··· 499 499 CHECK_NAPPING(); \ 500 500 addi r3,r1,STACK_FRAME_OVERHEAD; \ 501 501 bl hdlr; \ 502 - b .ret_from_except_lite; 502 + b ret_from_except_lite; 503 503 504 504 /* This value is used to mark exception frames on the stack. */ 505 505 .section ".toc","aw" ··· 550 550 CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL, 551 551 PROLOG_ADDITION_NONE) 552 552 EXCEPTION_COMMON_CRIT(0x100) 553 - bl .save_nvgprs 553 + bl save_nvgprs 554 554 bl special_reg_save 555 555 CHECK_NAPPING(); 556 556 addi r3,r1,STACK_FRAME_OVERHEAD 557 - bl .unknown_exception 557 + bl unknown_exception 558 558 b ret_from_crit_except 559 559 560 560 /* Machine Check Interrupt */ ··· 562 562 MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK, 563 563 PROLOG_ADDITION_NONE) 564 564 EXCEPTION_COMMON_MC(0x000) 565 - bl .save_nvgprs 565 + bl save_nvgprs 566 566 bl special_reg_save 567 567 CHECK_NAPPING(); 568 568 addi r3,r1,STACK_FRAME_OVERHEAD 569 - bl .machine_check_exception 569 + bl machine_check_exception 570 570 b ret_from_mc_except 571 571 572 572 /* Data Storage Interrupt */ ··· 612 612 std r14,_DSISR(r1) 613 613 addi r3,r1,STACK_FRAME_OVERHEAD 614 614 ld r14,PACA_EXGEN+EX_R14(r13) 615 - bl .save_nvgprs 616 - bl .program_check_exception 617 - b .ret_from_except 615 + bl save_nvgprs 616 + bl program_check_exception 617 + b ret_from_except 618 618 619 619 /* Floating Point Unavailable Interrupt */ 620 620 START_EXCEPTION(fp_unavailable); ··· 625 625 ld r12,_MSR(r1) 626 626 andi. r0,r12,MSR_PR; 627 627 beq- 1f 628 - bl .load_up_fpu 628 + bl load_up_fpu 629 629 b fast_exception_return 630 630 1: INTS_DISABLE 631 - bl .save_nvgprs 631 + bl save_nvgprs 632 632 addi r3,r1,STACK_FRAME_OVERHEAD 633 - bl .kernel_fp_unavailable_exception 634 - b .ret_from_except 633 + bl kernel_fp_unavailable_exception 634 + b ret_from_except 635 635 636 636 /* Altivec Unavailable Interrupt */ 637 637 START_EXCEPTION(altivec_unavailable); ··· 644 644 ld r12,_MSR(r1) 645 645 andi. r0,r12,MSR_PR; 646 646 beq- 1f 647 - bl .load_up_altivec 647 + bl load_up_altivec 648 648 b fast_exception_return 649 649 1: 650 650 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 651 651 #endif 652 652 INTS_DISABLE 653 - bl .save_nvgprs 653 + bl save_nvgprs 654 654 addi r3,r1,STACK_FRAME_OVERHEAD 655 - bl .altivec_unavailable_exception 656 - b .ret_from_except 655 + bl altivec_unavailable_exception 656 + b ret_from_except 657 657 658 658 /* AltiVec Assist */ 659 659 START_EXCEPTION(altivec_assist); ··· 662 662 PROLOG_ADDITION_NONE) 663 663 EXCEPTION_COMMON(0x220) 664 664 INTS_DISABLE 665 - bl .save_nvgprs 665 + bl save_nvgprs 666 666 addi r3,r1,STACK_FRAME_OVERHEAD 667 667 #ifdef CONFIG_ALTIVEC 668 668 BEGIN_FTR_SECTION 669 - bl .altivec_assist_exception 669 + bl altivec_assist_exception 670 670 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 671 671 #else 672 - bl .unknown_exception 672 + bl unknown_exception 673 673 #endif 674 - b .ret_from_except 674 + b ret_from_except 675 675 676 676 677 677 /* Decrementer Interrupt */ ··· 687 687 CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG, 688 688 PROLOG_ADDITION_NONE) 689 689 EXCEPTION_COMMON_CRIT(0x9f0) 690 - bl .save_nvgprs 690 + bl save_nvgprs 691 691 bl special_reg_save 692 692 CHECK_NAPPING(); 693 693 addi r3,r1,STACK_FRAME_OVERHEAD 694 694 #ifdef CONFIG_BOOKE_WDT 695 - bl .WatchdogException 695 + bl WatchdogException 696 696 #else 697 - bl .unknown_exception 697 + bl unknown_exception 698 698 #endif 699 699 b ret_from_crit_except 700 700 ··· 712 712 PROLOG_ADDITION_NONE) 713 713 EXCEPTION_COMMON(0xf20) 714 714 INTS_DISABLE 715 - bl .save_nvgprs 715 + bl save_nvgprs 716 716 addi r3,r1,STACK_FRAME_OVERHEAD 717 - bl .unknown_exception 718 - b .ret_from_except 717 + bl unknown_exception 718 + b ret_from_except 719 719 720 720 /* Debug exception as a critical interrupt*/ 721 721 START_EXCEPTION(debug_crit); ··· 774 774 mr r4,r14 775 775 ld r14,PACA_EXCRIT+EX_R14(r13) 776 776 ld r15,PACA_EXCRIT+EX_R15(r13) 777 - bl .save_nvgprs 778 - bl .DebugException 779 - b .ret_from_except 777 + bl save_nvgprs 778 + bl DebugException 779 + b ret_from_except 780 780 781 781 kernel_dbg_exc: 782 782 b . /* NYI */ ··· 839 839 mr r4,r14 840 840 ld r14,PACA_EXDBG+EX_R14(r13) 841 841 ld r15,PACA_EXDBG+EX_R15(r13) 842 - bl .save_nvgprs 843 - bl .DebugException 844 - b .ret_from_except 842 + bl save_nvgprs 843 + bl DebugException 844 + b ret_from_except 845 845 846 846 START_EXCEPTION(perfmon); 847 847 NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR, ··· 850 850 INTS_DISABLE 851 851 CHECK_NAPPING() 852 852 addi r3,r1,STACK_FRAME_OVERHEAD 853 - bl .performance_monitor_exception 854 - b .ret_from_except_lite 853 + bl performance_monitor_exception 854 + b ret_from_except_lite 855 855 856 856 /* Doorbell interrupt */ 857 857 MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL, ··· 862 862 CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL, 863 863 PROLOG_ADDITION_NONE) 864 864 EXCEPTION_COMMON_CRIT(0x2a0) 865 - bl .save_nvgprs 865 + bl save_nvgprs 866 866 bl special_reg_save 867 867 CHECK_NAPPING(); 868 868 addi r3,r1,STACK_FRAME_OVERHEAD 869 - bl .unknown_exception 869 + bl unknown_exception 870 870 b ret_from_crit_except 871 871 872 872 /* ··· 878 878 PROLOG_ADDITION_NONE) 879 879 EXCEPTION_COMMON(0x2c0) 880 880 addi r3,r1,STACK_FRAME_OVERHEAD 881 - bl .save_nvgprs 881 + bl save_nvgprs 882 882 INTS_RESTORE_HARD 883 - bl .unknown_exception 884 - b .ret_from_except 883 + bl unknown_exception 884 + b ret_from_except 885 885 886 886 /* Guest Doorbell critical Interrupt */ 887 887 START_EXCEPTION(guest_doorbell_crit); 888 888 CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT, 889 889 PROLOG_ADDITION_NONE) 890 890 EXCEPTION_COMMON_CRIT(0x2e0) 891 - bl .save_nvgprs 891 + bl save_nvgprs 892 892 bl special_reg_save 893 893 CHECK_NAPPING(); 894 894 addi r3,r1,STACK_FRAME_OVERHEAD 895 - bl .unknown_exception 895 + bl unknown_exception 896 896 b ret_from_crit_except 897 897 898 898 /* Hypervisor call */ ··· 901 901 PROLOG_ADDITION_NONE) 902 902 EXCEPTION_COMMON(0x310) 903 903 addi r3,r1,STACK_FRAME_OVERHEAD 904 - bl .save_nvgprs 904 + bl save_nvgprs 905 905 INTS_RESTORE_HARD 906 - bl .unknown_exception 907 - b .ret_from_except 906 + bl unknown_exception 907 + b ret_from_except 908 908 909 909 /* Embedded Hypervisor priviledged */ 910 910 START_EXCEPTION(ehpriv); ··· 912 912 PROLOG_ADDITION_NONE) 913 913 EXCEPTION_COMMON(0x320) 914 914 addi r3,r1,STACK_FRAME_OVERHEAD 915 - bl .save_nvgprs 915 + bl save_nvgprs 916 916 INTS_RESTORE_HARD 917 - bl .unknown_exception 918 - b .ret_from_except 917 + bl unknown_exception 918 + b ret_from_except 919 919 920 920 /* LRAT Error interrupt */ 921 921 START_EXCEPTION(lrat_error); ··· 1014 1014 mr r5,r15 1015 1015 ld r14,PACA_EXGEN+EX_R14(r13) 1016 1016 ld r15,PACA_EXGEN+EX_R15(r13) 1017 - bl .do_page_fault 1017 + bl do_page_fault 1018 1018 cmpdi r3,0 1019 1019 bne- 1f 1020 - b .ret_from_except_lite 1021 - 1: bl .save_nvgprs 1020 + b ret_from_except_lite 1021 + 1: bl save_nvgprs 1022 1022 mr r5,r3 1023 1023 addi r3,r1,STACK_FRAME_OVERHEAD 1024 1024 ld r4,_DAR(r1) 1025 - bl .bad_page_fault 1026 - b .ret_from_except 1025 + bl bad_page_fault 1026 + b ret_from_except 1027 1027 1028 1028 /* 1029 1029 * Alignment exception doesn't fit entirely in the 0x100 bytes so it ··· 1035 1035 addi r3,r1,STACK_FRAME_OVERHEAD 1036 1036 ld r14,PACA_EXGEN+EX_R14(r13) 1037 1037 ld r15,PACA_EXGEN+EX_R15(r13) 1038 - bl .save_nvgprs 1038 + bl save_nvgprs 1039 1039 INTS_RESTORE_HARD 1040 - bl .alignment_exception 1041 - b .ret_from_except 1040 + bl alignment_exception 1041 + b ret_from_except 1042 1042 1043 1043 /* 1044 1044 * We branch here from entry_64.S for the last stage of the exception ··· 1172 1172 std r12,0(r11) 1173 1173 ld r2,PACATOC(r13) 1174 1174 1: addi r3,r1,STACK_FRAME_OVERHEAD 1175 - bl .kernel_bad_stack 1175 + bl kernel_bad_stack 1176 1176 b 1b 1177 1177 1178 1178 /* ··· 1521 1521 * and always use AS 0, so we just set it up to match our link 1522 1522 * address and never use 0 based addresses. 1523 1523 */ 1524 - bl .initial_tlb_book3e 1524 + bl initial_tlb_book3e 1525 1525 1526 1526 /* Init global core bits */ 1527 - bl .init_core_book3e 1527 + bl init_core_book3e 1528 1528 1529 1529 /* Init per-thread bits */ 1530 - bl .init_thread_book3e 1530 + bl init_thread_book3e 1531 1531 1532 1532 /* Return to common init code */ 1533 1533 tovirt(r28,r28) ··· 1548 1548 */ 1549 1549 _GLOBAL(book3e_secondary_core_init_tlb_set) 1550 1550 li r4,1 1551 - b .generic_secondary_smp_init 1551 + b generic_secondary_smp_init 1552 1552 1553 1553 _GLOBAL(book3e_secondary_core_init) 1554 1554 mflr r28 ··· 1558 1558 bne 2f 1559 1559 1560 1560 /* Setup TLB for this core */ 1561 - bl .initial_tlb_book3e 1561 + bl initial_tlb_book3e 1562 1562 1563 1563 /* We can return from the above running at a different 1564 1564 * address, so recalculate r2 (TOC) 1565 1565 */ 1566 - bl .relative_toc 1566 + bl relative_toc 1567 1567 1568 1568 /* Init global core bits */ 1569 - 2: bl .init_core_book3e 1569 + 2: bl init_core_book3e 1570 1570 1571 1571 /* Init per-thread bits */ 1572 - 3: bl .init_thread_book3e 1572 + 3: bl init_thread_book3e 1573 1573 1574 1574 /* Return to common init code at proper virtual address. 1575 1575 *
+70 -70
arch/powerpc/kernel/exceptions-64s.S
··· 132 132 #endif 133 133 134 134 beq cr1,2f 135 - b .power7_wakeup_noloss 136 - 2: b .power7_wakeup_loss 135 + b power7_wakeup_noloss 136 + 2: b power7_wakeup_loss 137 137 138 138 /* Fast Sleep wakeup on PowerNV */ 139 139 8: GET_PACA(r13) 140 - b .power7_wakeup_tb_loss 140 + b power7_wakeup_tb_loss 141 141 142 142 9: 143 143 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) ··· 211 211 #endif /* __DISABLED__ */ 212 212 mfspr r12,SPRN_SRR1 213 213 #ifndef CONFIG_RELOCATABLE 214 - b .slb_miss_realmode 214 + b slb_miss_realmode 215 215 #else 216 216 /* 217 217 * We can't just use a direct branch to .slb_miss_realmode ··· 243 243 #endif /* __DISABLED__ */ 244 244 mfspr r12,SPRN_SRR1 245 245 #ifndef CONFIG_RELOCATABLE 246 - b .slb_miss_realmode 246 + b slb_miss_realmode 247 247 #else 248 248 mfctr r11 249 249 ld r10,PACAKBASE(r13) ··· 829 829 mfspr r3,SPRN_DAR 830 830 mfspr r12,SPRN_SRR1 831 831 #ifndef CONFIG_RELOCATABLE 832 - b .slb_miss_realmode 832 + b slb_miss_realmode 833 833 #else 834 834 /* 835 835 * We can't just use a direct branch to .slb_miss_realmode ··· 854 854 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 855 855 mfspr r12,SPRN_SRR1 856 856 #ifndef CONFIG_RELOCATABLE 857 - b .slb_miss_realmode 857 + b slb_miss_realmode 858 858 #else 859 859 mfctr r11 860 860 ld r10,PACAKBASE(r13) ··· 966 966 b system_call_common 967 967 968 968 ppc64_runlatch_on_trampoline: 969 - b .__ppc64_runlatch_on 969 + b __ppc64_runlatch_on 970 970 971 971 /* 972 972 * Here we have detected that the kernel stack pointer is bad. ··· 1025 1025 std r12,RESULT(r1) 1026 1026 std r11,STACK_FRAME_OVERHEAD-16(r1) 1027 1027 1: addi r3,r1,STACK_FRAME_OVERHEAD 1028 - bl .kernel_bad_stack 1028 + bl kernel_bad_stack 1029 1029 b 1b 1030 1030 1031 1031 /* ··· 1046 1046 ld r3,PACA_EXGEN+EX_DAR(r13) 1047 1047 lwz r4,PACA_EXGEN+EX_DSISR(r13) 1048 1048 li r5,0x300 1049 - b .do_hash_page /* Try to handle as hpte fault */ 1049 + b do_hash_page /* Try to handle as hpte fault */ 1050 1050 1051 1051 .align 7 1052 1052 .globl h_data_storage_common ··· 1056 1056 mfspr r10,SPRN_HDSISR 1057 1057 stw r10,PACA_EXGEN+EX_DSISR(r13) 1058 1058 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) 1059 - bl .save_nvgprs 1059 + bl save_nvgprs 1060 1060 DISABLE_INTS 1061 1061 addi r3,r1,STACK_FRAME_OVERHEAD 1062 - bl .unknown_exception 1063 - b .ret_from_except 1062 + bl unknown_exception 1063 + b ret_from_except 1064 1064 1065 1065 .align 7 1066 1066 .globl instruction_access_common ··· 1071 1071 ld r3,_NIP(r1) 1072 1072 andis. r4,r12,0x5820 1073 1073 li r5,0x400 1074 - b .do_hash_page /* Try to handle as hpte fault */ 1074 + b do_hash_page /* Try to handle as hpte fault */ 1075 1075 1076 1076 STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) 1077 1077 ··· 1088 1088 stw r9,PACA_EXGEN+EX_CCR(r13) 1089 1089 std r10,PACA_EXGEN+EX_LR(r13) 1090 1090 std r11,PACA_EXGEN+EX_SRR0(r13) 1091 - bl .slb_allocate_user 1091 + bl slb_allocate_user 1092 1092 1093 1093 ld r10,PACA_EXGEN+EX_LR(r13) 1094 1094 ld r3,PACA_EXGEN+EX_R3(r13) ··· 1131 1131 unrecov_user_slb: 1132 1132 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) 1133 1133 DISABLE_INTS 1134 - bl .save_nvgprs 1134 + bl save_nvgprs 1135 1135 1: addi r3,r1,STACK_FRAME_OVERHEAD 1136 - bl .unrecoverable_exception 1136 + bl unrecoverable_exception 1137 1137 b 1b 1138 1138 1139 1139 #endif /* __DISABLED__ */ ··· 1158 1158 lwz r4,PACA_EXGEN+EX_DSISR(r13) 1159 1159 std r3,_DAR(r1) 1160 1160 std r4,_DSISR(r1) 1161 - bl .save_nvgprs 1161 + bl save_nvgprs 1162 1162 addi r3,r1,STACK_FRAME_OVERHEAD 1163 - bl .machine_check_exception 1164 - b .ret_from_except 1163 + bl machine_check_exception 1164 + b ret_from_except 1165 1165 1166 1166 .align 7 1167 1167 .globl alignment_common ··· 1175 1175 lwz r4,PACA_EXGEN+EX_DSISR(r13) 1176 1176 std r3,_DAR(r1) 1177 1177 std r4,_DSISR(r1) 1178 - bl .save_nvgprs 1178 + bl save_nvgprs 1179 1179 DISABLE_INTS 1180 1180 addi r3,r1,STACK_FRAME_OVERHEAD 1181 - bl .alignment_exception 1182 - b .ret_from_except 1181 + bl alignment_exception 1182 + b ret_from_except 1183 1183 1184 1184 .align 7 1185 1185 .globl program_check_common 1186 1186 program_check_common: 1187 1187 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) 1188 - bl .save_nvgprs 1188 + bl save_nvgprs 1189 1189 DISABLE_INTS 1190 1190 addi r3,r1,STACK_FRAME_OVERHEAD 1191 - bl .program_check_exception 1192 - b .ret_from_except 1191 + bl program_check_exception 1192 + b ret_from_except 1193 1193 1194 1194 .align 7 1195 1195 .globl fp_unavailable_common 1196 1196 fp_unavailable_common: 1197 1197 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) 1198 1198 bne 1f /* if from user, just load it up */ 1199 - bl .save_nvgprs 1199 + bl save_nvgprs 1200 1200 DISABLE_INTS 1201 1201 addi r3,r1,STACK_FRAME_OVERHEAD 1202 - bl .kernel_fp_unavailable_exception 1202 + bl kernel_fp_unavailable_exception 1203 1203 BUG_OPCODE 1204 1204 1: 1205 1205 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM ··· 1211 1211 bne- 2f 1212 1212 END_FTR_SECTION_IFSET(CPU_FTR_TM) 1213 1213 #endif 1214 - bl .load_up_fpu 1214 + bl load_up_fpu 1215 1215 b fast_exception_return 1216 1216 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1217 1217 2: /* User process was in a transaction */ 1218 - bl .save_nvgprs 1218 + bl save_nvgprs 1219 1219 DISABLE_INTS 1220 1220 addi r3,r1,STACK_FRAME_OVERHEAD 1221 - bl .fp_unavailable_tm 1222 - b .ret_from_except 1221 + bl fp_unavailable_tm 1222 + b ret_from_except 1223 1223 #endif 1224 1224 .align 7 1225 1225 .globl altivec_unavailable_common ··· 1237 1237 bne- 2f 1238 1238 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 1239 1239 #endif 1240 - bl .load_up_altivec 1240 + bl load_up_altivec 1241 1241 b fast_exception_return 1242 1242 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1243 1243 2: /* User process was in a transaction */ 1244 - bl .save_nvgprs 1244 + bl save_nvgprs 1245 1245 DISABLE_INTS 1246 1246 addi r3,r1,STACK_FRAME_OVERHEAD 1247 - bl .altivec_unavailable_tm 1248 - b .ret_from_except 1247 + bl altivec_unavailable_tm 1248 + b ret_from_except 1249 1249 #endif 1250 1250 1: 1251 1251 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1252 1252 #endif 1253 - bl .save_nvgprs 1253 + bl save_nvgprs 1254 1254 DISABLE_INTS 1255 1255 addi r3,r1,STACK_FRAME_OVERHEAD 1256 - bl .altivec_unavailable_exception 1257 - b .ret_from_except 1256 + bl altivec_unavailable_exception 1257 + b ret_from_except 1258 1258 1259 1259 .align 7 1260 1260 .globl vsx_unavailable_common ··· 1272 1272 bne- 2f 1273 1273 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 1274 1274 #endif 1275 - b .load_up_vsx 1275 + b load_up_vsx 1276 1276 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1277 1277 2: /* User process was in a transaction */ 1278 - bl .save_nvgprs 1278 + bl save_nvgprs 1279 1279 DISABLE_INTS 1280 1280 addi r3,r1,STACK_FRAME_OVERHEAD 1281 - bl .vsx_unavailable_tm 1282 - b .ret_from_except 1281 + bl vsx_unavailable_tm 1282 + b ret_from_except 1283 1283 #endif 1284 1284 1: 1285 1285 END_FTR_SECTION_IFSET(CPU_FTR_VSX) 1286 1286 #endif 1287 - bl .save_nvgprs 1287 + bl save_nvgprs 1288 1288 DISABLE_INTS 1289 1289 addi r3,r1,STACK_FRAME_OVERHEAD 1290 - bl .vsx_unavailable_exception 1291 - b .ret_from_except 1290 + bl vsx_unavailable_exception 1291 + b ret_from_except 1292 1292 1293 1293 STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) 1294 1294 STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception) ··· 1386 1386 machine_check_handle_early: 1387 1387 std r0,GPR0(r1) /* Save r0 */ 1388 1388 EXCEPTION_PROLOG_COMMON_3(0x200) 1389 - bl .save_nvgprs 1389 + bl save_nvgprs 1390 1390 addi r3,r1,STACK_FRAME_OVERHEAD 1391 - bl .machine_check_early 1391 + bl machine_check_early 1392 1392 ld r12,_MSR(r1) 1393 1393 #ifdef CONFIG_PPC_P7_NAP 1394 1394 /* ··· 1408 1408 /* Supervisor state loss */ 1409 1409 li r0,1 1410 1410 stb r0,PACA_NAPSTATELOST(r13) 1411 - 3: bl .machine_check_queue_event 1411 + 3: bl machine_check_queue_event 1412 1412 MACHINE_CHECK_HANDLER_WINDUP 1413 1413 GET_PACA(r13) 1414 1414 ld r1,PACAR1(r13) 1415 - b .power7_enter_nap_mode 1415 + b power7_enter_nap_mode 1416 1416 4: 1417 1417 #endif 1418 1418 /* ··· 1444 1444 andi. r11,r12,MSR_RI 1445 1445 bne 2f 1446 1446 1: addi r3,r1,STACK_FRAME_OVERHEAD 1447 - bl .unrecoverable_exception 1447 + bl unrecoverable_exception 1448 1448 b 1b 1449 1449 2: 1450 1450 /* ··· 1452 1452 * Queue up the MCE event so that we can log it later, while 1453 1453 * returning from kernel or opal call. 1454 1454 */ 1455 - bl .machine_check_queue_event 1455 + bl machine_check_queue_event 1456 1456 MACHINE_CHECK_HANDLER_WINDUP 1457 1457 rfid 1458 1458 9: ··· 1477 1477 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 1478 1478 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 1479 1479 1480 - bl .slb_allocate_realmode 1480 + bl slb_allocate_realmode 1481 1481 1482 1482 /* All done -- return from exception. */ 1483 1483 ··· 1517 1517 unrecov_slb: 1518 1518 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) 1519 1519 DISABLE_INTS 1520 - bl .save_nvgprs 1520 + bl save_nvgprs 1521 1521 1: addi r3,r1,STACK_FRAME_OVERHEAD 1522 - bl .unrecoverable_exception 1522 + bl unrecoverable_exception 1523 1523 b 1b 1524 1524 1525 1525 ··· 1573 1573 * 1574 1574 * at return r3 = 0 for success, 1 for page fault, negative for error 1575 1575 */ 1576 - bl .hash_page /* build HPTE if possible */ 1576 + bl hash_page /* build HPTE if possible */ 1577 1577 cmpdi r3,0 /* see if hash_page succeeded */ 1578 1578 1579 1579 /* Success */ ··· 1587 1587 11: ld r4,_DAR(r1) 1588 1588 ld r5,_DSISR(r1) 1589 1589 addi r3,r1,STACK_FRAME_OVERHEAD 1590 - bl .do_page_fault 1590 + bl do_page_fault 1591 1591 cmpdi r3,0 1592 1592 beq+ 12f 1593 - bl .save_nvgprs 1593 + bl save_nvgprs 1594 1594 mr r5,r3 1595 1595 addi r3,r1,STACK_FRAME_OVERHEAD 1596 1596 lwz r4,_DAR(r1) 1597 - bl .bad_page_fault 1598 - b .ret_from_except 1597 + bl bad_page_fault 1598 + b ret_from_except 1599 1599 1600 1600 /* We have a data breakpoint exception - handle it */ 1601 1601 handle_dabr_fault: 1602 - bl .save_nvgprs 1602 + bl save_nvgprs 1603 1603 ld r4,_DAR(r1) 1604 1604 ld r5,_DSISR(r1) 1605 1605 addi r3,r1,STACK_FRAME_OVERHEAD 1606 - bl .do_break 1607 - 12: b .ret_from_except_lite 1606 + bl do_break 1607 + 12: b ret_from_except_lite 1608 1608 1609 1609 1610 1610 /* We have a page fault that hash_page could handle but HV refused 1611 1611 * the PTE insertion 1612 1612 */ 1613 - 13: bl .save_nvgprs 1613 + 13: bl save_nvgprs 1614 1614 mr r5,r3 1615 1615 addi r3,r1,STACK_FRAME_OVERHEAD 1616 1616 ld r4,_DAR(r1) 1617 - bl .low_hash_fault 1618 - b .ret_from_except 1617 + bl low_hash_fault 1618 + b ret_from_except 1619 1619 1620 1620 /* 1621 1621 * We come here as a result of a DSI at a point where we don't want ··· 1624 1624 * were soft-disabled. We want to invoke the exception handler for 1625 1625 * the access, or panic if there isn't a handler. 1626 1626 */ 1627 - 77: bl .save_nvgprs 1627 + 77: bl save_nvgprs 1628 1628 mr r4,r3 1629 1629 addi r3,r1,STACK_FRAME_OVERHEAD 1630 1630 li r5,SIGSEGV 1631 - bl .bad_page_fault 1632 - b .ret_from_except 1631 + bl bad_page_fault 1632 + b ret_from_except 1633 1633 1634 1634 /* here we have a segment miss */ 1635 1635 do_ste_alloc: 1636 - bl .ste_allocate /* try to insert stab entry */ 1636 + bl ste_allocate /* try to insert stab entry */ 1637 1637 cmpdi r3,0 1638 1638 bne- handle_page_fault 1639 1639 b fast_exception_return
+33 -33
arch/powerpc/kernel/head_64.S
··· 70 70 /* NOP this out unconditionally */ 71 71 BEGIN_FTR_SECTION 72 72 FIXUP_ENDIAN 73 - b .__start_initialization_multiplatform 73 + b __start_initialization_multiplatform 74 74 END_FTR_SECTION(0, 1) 75 75 76 76 /* Catch branch to 0 in real mode */ ··· 186 186 mr r24,r3 187 187 188 188 /* turn on 64-bit mode */ 189 - bl .enable_64b_mode 189 + bl enable_64b_mode 190 190 191 191 /* get a valid TOC pointer, wherever we're mapped at */ 192 - bl .relative_toc 192 + bl relative_toc 193 193 tovirt(r2,r2) 194 194 195 195 #ifdef CONFIG_PPC_BOOK3E 196 196 /* Book3E initialization */ 197 197 mr r3,r24 198 - bl .book3e_secondary_thread_init 198 + bl book3e_secondary_thread_init 199 199 #endif 200 200 b generic_secondary_common_init 201 201 ··· 214 214 mr r25,r4 215 215 216 216 /* turn on 64-bit mode */ 217 - bl .enable_64b_mode 217 + bl enable_64b_mode 218 218 219 219 /* get a valid TOC pointer, wherever we're mapped at */ 220 - bl .relative_toc 220 + bl relative_toc 221 221 tovirt(r2,r2) 222 222 223 223 #ifdef CONFIG_PPC_BOOK3E 224 224 /* Book3E initialization */ 225 225 mr r3,r24 226 226 mr r4,r25 227 - bl .book3e_secondary_core_init 227 + bl book3e_secondary_core_init 228 228 #endif 229 229 230 230 generic_secondary_common_init: ··· 236 236 ld r13,0(r13) /* Get base vaddr of paca array */ 237 237 #ifndef CONFIG_SMP 238 238 addi r13,r13,PACA_SIZE /* know r13 if used accidentally */ 239 - b .kexec_wait /* wait for next kernel if !SMP */ 239 + b kexec_wait /* wait for next kernel if !SMP */ 240 240 #else 241 241 LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */ 242 242 lwz r7,0(r7) /* also the max paca allocated */ ··· 250 250 blt 1b 251 251 252 252 mr r3,r24 /* not found, copy phys to r3 */ 253 - b .kexec_wait /* next kernel might do better */ 253 + b kexec_wait /* next kernel might do better */ 254 254 255 255 2: SET_PACA(r13) 256 256 #ifdef CONFIG_PPC_BOOK3E ··· 326 326 */ 327 327 _GLOBAL(__start_initialization_multiplatform) 328 328 /* Make sure we are running in 64 bits mode */ 329 - bl .enable_64b_mode 329 + bl enable_64b_mode 330 330 331 331 /* Get TOC pointer (current runtime address) */ 332 - bl .relative_toc 332 + bl relative_toc 333 333 334 334 /* find out where we are now */ 335 335 bcl 20,31,$+4 ··· 342 342 */ 343 343 cmpldi cr0,r5,0 344 344 beq 1f 345 - b .__boot_from_prom /* yes -> prom */ 345 + b __boot_from_prom /* yes -> prom */ 346 346 1: 347 347 /* Save parameters */ 348 348 mr r31,r3 ··· 354 354 #endif 355 355 356 356 #ifdef CONFIG_PPC_BOOK3E 357 - bl .start_initialization_book3e 358 - b .__after_prom_start 357 + bl start_initialization_book3e 358 + b __after_prom_start 359 359 #else 360 360 /* Setup some critical 970 SPRs before switching MMU off */ 361 361 mfspr r0,SPRN_PVR ··· 368 368 beq 1f 369 369 cmpwi r0,0x45 /* 970GX */ 370 370 bne 2f 371 - 1: bl .__cpu_preinit_ppc970 371 + 1: bl __cpu_preinit_ppc970 372 372 2: 373 373 374 374 /* Switch off MMU if not already off */ 375 - bl .__mmu_off 376 - b .__after_prom_start 375 + bl __mmu_off 376 + b __after_prom_start 377 377 #endif /* CONFIG_PPC_BOOK3E */ 378 378 379 379 _INIT_STATIC(__boot_from_prom) ··· 395 395 #ifdef CONFIG_RELOCATABLE 396 396 /* Relocate code for where we are now */ 397 397 mr r3,r26 398 - bl .relocate 398 + bl relocate 399 399 #endif 400 400 401 401 /* Restore parameters */ ··· 407 407 408 408 /* Do all of the interaction with OF client interface */ 409 409 mr r8,r26 410 - bl .prom_init 410 + bl prom_init 411 411 #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */ 412 412 413 413 /* We never return. We also hit that trap if trying to boot ··· 424 424 bne 1f 425 425 add r25,r25,r26 426 426 1: mr r3,r25 427 - bl .relocate 427 + bl relocate 428 428 #endif 429 429 430 430 /* ··· 464 464 lis r5,(copy_to_here - _stext)@ha 465 465 addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */ 466 466 467 - bl .copy_and_flush /* copy the first n bytes */ 467 + bl copy_and_flush /* copy the first n bytes */ 468 468 /* this includes the code being */ 469 469 /* executed here. */ 470 470 addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */ ··· 478 478 4: /* Now copy the rest of the kernel up to _end */ 479 479 addis r5,r26,(p_end - _stext)@ha 480 480 ld r5,(p_end - _stext)@l(r5) /* get _end */ 481 - 5: bl .copy_and_flush /* copy the rest */ 481 + 5: bl copy_and_flush /* copy the rest */ 482 482 483 - 9: b .start_here_multiplatform 483 + 9: b start_here_multiplatform 484 484 485 485 /* 486 486 * Copy routine used to copy the kernel to start at physical address 0 ··· 544 544 545 545 _GLOBAL(pmac_secondary_start) 546 546 /* turn on 64-bit mode */ 547 - bl .enable_64b_mode 547 + bl enable_64b_mode 548 548 549 549 li r0,0 550 550 mfspr r3,SPRN_HID4 ··· 556 556 slbia 557 557 558 558 /* get TOC pointer (real address) */ 559 - bl .relative_toc 559 + bl relative_toc 560 560 tovirt(r2,r2) 561 561 562 562 /* Copy some CPU settings from CPU 0 */ 563 - bl .__restore_cpu_ppc970 563 + bl __restore_cpu_ppc970 564 564 565 565 /* pSeries do that early though I don't think we really need it */ 566 566 mfmsr r3 ··· 619 619 std r14,PACAKSAVE(r13) 620 620 621 621 /* Do early setup for that CPU (stab, slb, hash table pointer) */ 622 - bl .early_setup_secondary 622 + bl early_setup_secondary 623 623 624 624 /* 625 625 * setup the new stack pointer, but *don't* use this until ··· 656 656 ld r2,PACATOC(r13) 657 657 li r3,0 658 658 std r3,0(r1) /* Zero the stack frame pointer */ 659 - bl .start_secondary 659 + bl start_secondary 660 660 b . 661 661 /* 662 662 * Reset stack pointer and call start_secondary ··· 667 667 ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */ 668 668 li r3,0 669 669 std r3,0(r1) /* Zero the stack frame pointer */ 670 - bl .start_secondary 670 + bl start_secondary 671 671 b . 672 672 #endif 673 673 ··· 717 717 */ 718 718 _INIT_STATIC(start_here_multiplatform) 719 719 /* set up the TOC */ 720 - bl .relative_toc 720 + bl relative_toc 721 721 tovirt(r2,r2) 722 722 723 723 /* Clear out the BSS. It may have been done in prom_init, ··· 776 776 777 777 /* Restore parameters passed from prom_init/kexec */ 778 778 mr r3,r31 779 - bl .early_setup /* also sets r13 and SPRG_PACA */ 779 + bl early_setup /* also sets r13 and SPRG_PACA */ 780 780 781 781 LOAD_REG_ADDR(r3, .start_here_common) 782 782 ld r4,PACAKMSR(r13) ··· 794 794 ld r2,PACATOC(r13) 795 795 796 796 /* Do more system initializations in virtual mode */ 797 - bl .setup_system 797 + bl setup_system 798 798 799 799 /* Mark interrupts soft and hard disabled (they might be enabled 800 800 * in the PACA when doing hotplug) ··· 805 805 stb r0,PACAIRQHAPPENED(r13) 806 806 807 807 /* Generic kernel entry */ 808 - bl .start_kernel 808 + bl start_kernel 809 809 810 810 /* Not reached */ 811 811 BUG_OPCODE
+1 -1
arch/powerpc/kernel/idle_book3e.S
··· 43 43 */ 44 44 #ifdef CONFIG_TRACE_IRQFLAGS 45 45 stdu r1,-128(r1) 46 - bl .trace_hardirqs_on 46 + bl trace_hardirqs_on 47 47 addi r1,r1,128 48 48 #endif 49 49 li r0,1
+1 -1
arch/powerpc/kernel/idle_power4.S
··· 46 46 mflr r0 47 47 std r0,16(r1) 48 48 stdu r1,-128(r1) 49 - bl .trace_hardirqs_on 49 + bl trace_hardirqs_on 50 50 addi r1,r1,128 51 51 ld r0,16(r1) 52 52 mtlr r0
+2 -2
arch/powerpc/kernel/idle_power7.S
··· 58 58 /* Make sure FPU, VSX etc... are flushed as we may lose 59 59 * state when going to nap mode 60 60 */ 61 - bl .discard_lazy_cpu_state 61 + bl discard_lazy_cpu_state 62 62 #endif /* CONFIG_SMP */ 63 63 64 64 /* Hard disable interrupts */ ··· 168 168 _GLOBAL(power7_wakeup_noloss) 169 169 lbz r0,PACA_NAPSTATELOST(r13) 170 170 cmpwi r0,0 171 - bne .power7_wakeup_loss 171 + bne power7_wakeup_loss 172 172 ld r1,PACAR1(r13) 173 173 ld r4,_MSR(r1) 174 174 ld r5,_NIP(r1)
+5 -5
arch/powerpc/kernel/misc_64.S
··· 34 34 std r0,16(r1) 35 35 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) 36 36 mr r1,r3 37 - bl .__do_softirq 37 + bl __do_softirq 38 38 ld r1,0(r1) 39 39 ld r0,16(r1) 40 40 mtlr r0 ··· 45 45 std r0,16(r1) 46 46 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) 47 47 mr r1,r4 48 - bl .__do_irq 48 + bl __do_irq 49 49 ld r1,0(r1) 50 50 ld r0,16(r1) 51 51 mtlr r0 ··· 506 506 stb r4,PACAKEXECSTATE(r13) 507 507 SYNC 508 508 509 - b .kexec_wait 509 + b kexec_wait 510 510 511 511 /* 512 512 * switch to real mode (turn mmu off) ··· 576 576 577 577 /* copy dest pages, flush whole dest image */ 578 578 mr r3,r29 579 - bl .kexec_copy_flush /* (image) */ 579 + bl kexec_copy_flush /* (image) */ 580 580 581 581 /* turn off mmu */ 582 582 bl real_mode ··· 586 586 mr r4,r30 /* start, aka phys mem offset */ 587 587 li r5,0x100 588 588 li r6,0 589 - bl .copy_and_flush /* (dest, src, copy limit, start offset) */ 589 + bl copy_and_flush /* (dest, src, copy limit, start offset) */ 590 590 1: /* assume normal blr return */ 591 591 592 592 /* release other cpus to the new kernel secondary start at 0x60 */
+1 -1
arch/powerpc/kvm/book3s_hv_interrupts.S
··· 171 171 #endif /* CONFIG_SMP */ 172 172 173 173 /* Jump to partition switch code */ 174 - bl .kvmppc_hv_entry_trampoline 174 + bl kvmppc_hv_entry_trampoline 175 175 nop 176 176 177 177 /*
+3 -3
arch/powerpc/kvm/book3s_hv_rmhandlers.S
··· 1647 1647 /* Search the hash table. */ 1648 1648 mr r3, r9 /* vcpu pointer */ 1649 1649 li r7, 1 /* data fault */ 1650 - bl .kvmppc_hpte_hv_fault 1650 + bl kvmppc_hpte_hv_fault 1651 1651 ld r9, HSTATE_KVM_VCPU(r13) 1652 1652 ld r10, VCPU_PC(r9) 1653 1653 ld r11, VCPU_MSR(r9) ··· 1721 1721 mr r4, r10 1722 1722 mr r6, r11 1723 1723 li r7, 0 /* instruction fault */ 1724 - bl .kvmppc_hpte_hv_fault 1724 + bl kvmppc_hpte_hv_fault 1725 1725 ld r9, HSTATE_KVM_VCPU(r13) 1726 1726 ld r10, VCPU_PC(r9) 1727 1727 ld r11, VCPU_MSR(r9) ··· 2099 2099 /* Try to handle a machine check in real mode */ 2100 2100 machine_check_realmode: 2101 2101 mr r3, r9 /* get vcpu pointer */ 2102 - bl .kvmppc_realmode_machine_check 2102 + bl kvmppc_realmode_machine_check 2103 2103 nop 2104 2104 cmpdi r3, 0 /* continue exiting from guest? */ 2105 2105 ld r9, HSTATE_KVM_VCPU(r13)
+1 -1
arch/powerpc/lib/copypage_64.S
··· 20 20 BEGIN_FTR_SECTION 21 21 lis r5,PAGE_SIZE@h 22 22 FTR_SECTION_ELSE 23 - b .copypage_power7 23 + b copypage_power7 24 24 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) 25 25 ori r5,r5,PAGE_SIZE@l 26 26 BEGIN_FTR_SECTION
+2 -2
arch/powerpc/lib/copypage_power7.S
··· 60 60 std r4,56(r1) 61 61 std r0,16(r1) 62 62 stdu r1,-STACKFRAMESIZE(r1) 63 - bl .enter_vmx_copy 63 + bl enter_vmx_copy 64 64 cmpwi r3,0 65 65 ld r0,STACKFRAMESIZE+16(r1) 66 66 ld r3,STACKFRAMESIZE+48(r1) ··· 103 103 addi r3,r3,128 104 104 bdnz 1b 105 105 106 - b .exit_vmx_copy /* tail call optimise */ 106 + b exit_vmx_copy /* tail call optimise */ 107 107 108 108 #else 109 109 li r0,(PAGE_SIZE/128)
+4 -4
arch/powerpc/lib/copyuser_power7.S
··· 66 66 ld r15,STK_REG(R15)(r1) 67 67 ld r14,STK_REG(R14)(r1) 68 68 .Ldo_err3: 69 - bl .exit_vmx_usercopy 69 + bl exit_vmx_usercopy 70 70 ld r0,STACKFRAMESIZE+16(r1) 71 71 mtlr r0 72 72 b .Lexit ··· 295 295 mflr r0 296 296 std r0,16(r1) 297 297 stdu r1,-STACKFRAMESIZE(r1) 298 - bl .enter_vmx_usercopy 298 + bl enter_vmx_usercopy 299 299 cmpwi cr1,r3,0 300 300 ld r0,STACKFRAMESIZE+16(r1) 301 301 ld r3,STACKFRAMESIZE+48(r1) ··· 514 514 err3; stb r0,0(r3) 515 515 516 516 15: addi r1,r1,STACKFRAMESIZE 517 - b .exit_vmx_usercopy /* tail call optimise */ 517 + b exit_vmx_usercopy /* tail call optimise */ 518 518 519 519 .Lvmx_unaligned_copy: 520 520 /* Get the destination 16B aligned */ ··· 717 717 err3; stb r0,0(r3) 718 718 719 719 15: addi r1,r1,STACKFRAMESIZE 720 - b .exit_vmx_usercopy /* tail call optimise */ 720 + b exit_vmx_usercopy /* tail call optimise */ 721 721 #endif /* CONFiG_ALTIVEC */
+4 -4
arch/powerpc/lib/hweight_64.S
··· 24 24 25 25 _GLOBAL(__arch_hweight8) 26 26 BEGIN_FTR_SECTION 27 - b .__sw_hweight8 27 + b __sw_hweight8 28 28 nop 29 29 nop 30 30 FTR_SECTION_ELSE ··· 35 35 36 36 _GLOBAL(__arch_hweight16) 37 37 BEGIN_FTR_SECTION 38 - b .__sw_hweight16 38 + b __sw_hweight16 39 39 nop 40 40 nop 41 41 nop ··· 57 57 58 58 _GLOBAL(__arch_hweight32) 59 59 BEGIN_FTR_SECTION 60 - b .__sw_hweight32 60 + b __sw_hweight32 61 61 nop 62 62 nop 63 63 nop ··· 82 82 83 83 _GLOBAL(__arch_hweight64) 84 84 BEGIN_FTR_SECTION 85 - b .__sw_hweight64 85 + b __sw_hweight64 86 86 nop 87 87 nop 88 88 nop
+2 -2
arch/powerpc/lib/mem_64.S
··· 79 79 80 80 _GLOBAL(memmove) 81 81 cmplw 0,r3,r4 82 - bgt .backwards_memcpy 83 - b .memcpy 82 + bgt backwards_memcpy 83 + b memcpy 84 84 85 85 _GLOBAL(backwards_memcpy) 86 86 rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
+3 -3
arch/powerpc/lib/memcpy_power7.S
··· 230 230 std r5,64(r1) 231 231 std r0,16(r1) 232 232 stdu r1,-STACKFRAMESIZE(r1) 233 - bl .enter_vmx_copy 233 + bl enter_vmx_copy 234 234 cmpwi cr1,r3,0 235 235 ld r0,STACKFRAMESIZE+16(r1) 236 236 ld r3,STACKFRAMESIZE+48(r1) ··· 448 448 449 449 15: addi r1,r1,STACKFRAMESIZE 450 450 ld r3,48(r1) 451 - b .exit_vmx_copy /* tail call optimise */ 451 + b exit_vmx_copy /* tail call optimise */ 452 452 453 453 .Lvmx_unaligned_copy: 454 454 /* Get the destination 16B aligned */ ··· 652 652 653 653 15: addi r1,r1,STACKFRAMESIZE 654 654 ld r3,48(r1) 655 - b .exit_vmx_copy /* tail call optimise */ 655 + b exit_vmx_copy /* tail call optimise */ 656 656 #endif /* CONFiG_ALTIVEC */
+4 -4
arch/powerpc/mm/hash_low_64.S
··· 159 159 BEGIN_FTR_SECTION 160 160 mr r4,r30 161 161 mr r5,r7 162 - bl .hash_page_do_lazy_icache 162 + bl hash_page_do_lazy_icache 163 163 END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) 164 164 165 165 /* At this point, r3 contains new PP bits, save them in ··· 471 471 BEGIN_FTR_SECTION 472 472 mr r4,r30 473 473 mr r5,r7 474 - bl .hash_page_do_lazy_icache 474 + bl hash_page_do_lazy_icache 475 475 END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) 476 476 477 477 /* At this point, r3 contains new PP bits, save them in ··· 588 588 li r6,MMU_PAGE_64K /* psize */ 589 589 ld r7,STK_PARAM(R9)(r1) /* ssize */ 590 590 ld r8,STK_PARAM(R8)(r1) /* local */ 591 - bl .flush_hash_page 591 + bl flush_hash_page 592 592 /* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */ 593 593 lis r0,_PAGE_HPTE_SUB@h 594 594 ori r0,r0,_PAGE_HPTE_SUB@l ··· 812 812 BEGIN_FTR_SECTION 813 813 mr r4,r30 814 814 mr r5,r7 815 - bl .hash_page_do_lazy_icache 815 + bl hash_page_do_lazy_icache 816 816 END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) 817 817 818 818 /* At this point, r3 contains new PP bits, save them in
+1 -1
arch/powerpc/platforms/pasemi/powersave.S
··· 66 66 std r3, 48(r1) 67 67 68 68 /* Only do power savings when in astate 0 */ 69 - bl .check_astate 69 + bl check_astate 70 70 cmpwi r3,0 71 71 bne 1f 72 72
+2 -2
arch/powerpc/platforms/pseries/hvCall.S
··· 49 49 std r0,16(r1); \ 50 50 addi r4,r1,STK_PARAM(FIRST_REG); \ 51 51 stdu r1,-STACK_FRAME_OVERHEAD(r1); \ 52 - bl .__trace_hcall_entry; \ 52 + bl __trace_hcall_entry; \ 53 53 addi r1,r1,STACK_FRAME_OVERHEAD; \ 54 54 ld r0,16(r1); \ 55 55 ld r3,STK_PARAM(R3)(r1); \ ··· 83 83 mr r3,r6; \ 84 84 std r0,16(r1); \ 85 85 stdu r1,-STACK_FRAME_OVERHEAD(r1); \ 86 - bl .__trace_hcall_exit; \ 86 + bl __trace_hcall_exit; \ 87 87 addi r1,r1,STACK_FRAME_OVERHEAD; \ 88 88 ld r0,16(r1); \ 89 89 ld r3,STK_PARAM(R3)(r1); \