Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge remote-tracking branch 'anton/abiv2' into next

This series adds support for building the powerpc 64-bit
LE kernel using the new ABI v2. We already supported
running ABI v2 userspace programs but this adds support
for building the kernel itself using the new ABI.

+904 -682
+8 -3
arch/powerpc/Makefile
··· 113 113 endif 114 114 endif 115 115 116 - CFLAGS-$(CONFIG_PPC64) := -mtraceback=no -mcall-aixdesc 117 - CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1) 116 + CFLAGS-$(CONFIG_PPC64) := -mtraceback=no 117 + ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) 118 + CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,-mcall-aixdesc) 119 + AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2) 120 + else 121 + CFLAGS-$(CONFIG_PPC64) += -mcall-aixdesc 122 + endif 118 123 CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,-mminimal-toc) 119 124 CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions) 120 125 CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD) ··· 156 151 CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell) 157 152 158 153 KBUILD_CPPFLAGS += -Iarch/$(ARCH) 159 - KBUILD_AFLAGS += -Iarch/$(ARCH) 154 + KBUILD_AFLAGS += -Iarch/$(ARCH) $(AFLAGS-y) 160 155 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y) 161 156 CPP = $(CC) -E $(KBUILD_CFLAGS) 162 157
+2 -2
arch/powerpc/boot/util.S
··· 45 45 mfspr r4,SPRN_PVR 46 46 srwi r4,r4,16 47 47 cmpwi 0,r4,1 /* 601 ? */ 48 - bne .udelay_not_601 48 + bne .Ludelay_not_601 49 49 00: li r0,86 /* Instructions / microsecond? */ 50 50 mtctr r0 51 51 10: addi r0,r0,0 /* NOP */ ··· 54 54 bne 00b 55 55 blr 56 56 57 - .udelay_not_601: 57 + .Ludelay_not_601: 58 58 mulli r4,r3,1000 /* nanoseconds */ 59 59 /* Change r4 to be the number of ticks using: 60 60 * (nanoseconds + (timebase_period_ns - 1 )) / timebase_period_ns
+36 -4
arch/powerpc/include/asm/code-patching.h
··· 42 42 } while (0) 43 43 #endif 44 44 45 + #define OP_RT_RA_MASK 0xffff0000UL 46 + #define LIS_R2 0x3c020000UL 47 + #define ADDIS_R2_R12 0x3c4c0000UL 48 + #define ADDI_R2_R2 0x38420000UL 49 + 45 50 static inline unsigned long ppc_function_entry(void *func) 46 51 { 47 - #ifdef CONFIG_PPC64 52 + #if defined(CONFIG_PPC64) 53 + #if defined(_CALL_ELF) && _CALL_ELF == 2 54 + u32 *insn = func; 55 + 48 56 /* 49 - * On PPC64 the function pointer actually points to the function's 50 - * descriptor. The first entry in the descriptor is the address 51 - * of the function text. 57 + * A PPC64 ABIv2 function may have a local and a global entry 58 + * point. We need to use the local entry point when patching 59 + * functions, so identify and step over the global entry point 60 + * sequence. 61 + * 62 + * The global entry point sequence is always of the form: 63 + * 64 + * addis r2,r12,XXXX 65 + * addi r2,r2,XXXX 66 + * 67 + * A linker optimisation may convert the addis to lis: 68 + * 69 + * lis r2,XXXX 70 + * addi r2,r2,XXXX 71 + */ 72 + if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) || 73 + ((*insn & OP_RT_RA_MASK) == LIS_R2)) && 74 + ((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2)) 75 + return (unsigned long)(insn + 2); 76 + else 77 + return (unsigned long)func; 78 + #else 79 + /* 80 + * On PPC64 ABIv1 the function pointer actually points to the 81 + * function's descriptor. The first entry in the descriptor is the 82 + * address of the function text. 52 83 */ 53 84 return ((func_descr_t *)func)->entry; 85 + #endif 54 86 #else 55 87 return (unsigned long)func; 56 88 #endif
+2 -2
arch/powerpc/include/asm/context_tracking.h
··· 2 2 #define _ASM_POWERPC_CONTEXT_TRACKING_H 3 3 4 4 #ifdef CONFIG_CONTEXT_TRACKING 5 - #define SCHEDULE_USER bl .schedule_user 5 + #define SCHEDULE_USER bl schedule_user 6 6 #else 7 - #define SCHEDULE_USER bl .schedule 7 + #define SCHEDULE_USER bl schedule 8 8 #endif 9 9 10 10 #endif
+3 -3
arch/powerpc/include/asm/exception-64e.h
··· 174 174 mtlr r16; 175 175 #define TLB_MISS_STATS_D(name) \ 176 176 addi r9,r13,MMSTAT_DSTATS+name; \ 177 - bl .tlb_stat_inc; 177 + bl tlb_stat_inc; 178 178 #define TLB_MISS_STATS_I(name) \ 179 179 addi r9,r13,MMSTAT_ISTATS+name; \ 180 - bl .tlb_stat_inc; 180 + bl tlb_stat_inc; 181 181 #define TLB_MISS_STATS_X(name) \ 182 182 ld r8,PACA_EXTLB+EX_TLB_ESR(r13); \ 183 183 cmpdi cr2,r8,-1; \ ··· 185 185 addi r9,r13,MMSTAT_DSTATS+name; \ 186 186 b 62f; \ 187 187 61: addi r9,r13,MMSTAT_ISTATS+name; \ 188 - 62: bl .tlb_stat_inc; 188 + 62: bl tlb_stat_inc; 189 189 #define TLB_MISS_STATS_SAVE_INFO \ 190 190 std r14,EX_TLB_ESR(r12); /* save ESR */ 191 191 #define TLB_MISS_STATS_SAVE_INFO_BOLTED \
+1 -1
arch/powerpc/include/asm/exception-64s.h
··· 517 517 #define DISABLE_INTS RECONCILE_IRQ_STATE(r10,r11) 518 518 519 519 #define ADD_NVGPRS \ 520 - bl .save_nvgprs 520 + bl save_nvgprs 521 521 522 522 #define RUNLATCH_ON \ 523 523 BEGIN_FTR_SECTION \
+2
arch/powerpc/include/asm/ftrace.h
··· 61 61 #endif 62 62 63 63 #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) && !defined(__ASSEMBLY__) 64 + #if !defined(_CALL_ELF) || _CALL_ELF != 2 64 65 #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME 65 66 static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) 66 67 { ··· 73 72 */ 74 73 return !strcmp(sym + 4, name + 3); 75 74 } 75 + #endif 76 76 #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 && !__ASSEMBLY__ */ 77 77 78 78 #endif /* _ASM_POWERPC_FTRACE */
+4 -4
arch/powerpc/include/asm/irqflags.h
··· 20 20 */ 21 21 #define TRACE_WITH_FRAME_BUFFER(func) \ 22 22 mflr r0; \ 23 - stdu r1, -32(r1); \ 23 + stdu r1, -STACK_FRAME_OVERHEAD(r1); \ 24 24 std r0, 16(r1); \ 25 - stdu r1, -32(r1); \ 25 + stdu r1, -STACK_FRAME_OVERHEAD(r1); \ 26 26 bl func; \ 27 27 ld r1, 0(r1); \ 28 28 ld r1, 0(r1); ··· 36 36 * have to call a C function so call a wrapper that saves all the 37 37 * C-clobbered registers. 38 38 */ 39 - #define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on) 40 - #define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off) 39 + #define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_on) 40 + #define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_off) 41 41 42 42 /* 43 43 * This is used by assembly code to soft-disable interrupts first and
+3 -2
arch/powerpc/include/asm/kprobes.h
··· 30 30 #include <linux/ptrace.h> 31 31 #include <linux/percpu.h> 32 32 #include <asm/probes.h> 33 + #include <asm/code-patching.h> 33 34 34 35 #define __ARCH_WANT_KPROBES_INSN_SLOT 35 36 ··· 57 56 if ((colon = strchr(name, ':')) != NULL) { \ 58 57 colon++; \ 59 58 if (*colon != '\0' && *colon != '.') \ 60 - addr = *(kprobe_opcode_t **)addr; \ 59 + addr = (kprobe_opcode_t *)ppc_function_entry(addr); \ 61 60 } else if (name[0] != '.') \ 62 - addr = *(kprobe_opcode_t **)addr; \ 61 + addr = (kprobe_opcode_t *)ppc_function_entry(addr); \ 63 62 } else { \ 64 63 char dot_name[KSYM_NAME_LEN]; \ 65 64 dot_name[0] = '.'; \
+2
arch/powerpc/include/asm/linkage.h
··· 2 2 #define _ASM_POWERPC_LINKAGE_H 3 3 4 4 #ifdef CONFIG_PPC64 5 + #if !defined(_CALL_ELF) || _CALL_ELF != 2 5 6 #define cond_syscall(x) \ 6 7 asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \ 7 8 "\t.weak ." #x "\n\t.set ." #x ", .sys_ni_syscall\n") 8 9 #define SYSCALL_ALIAS(alias, name) \ 9 10 asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \ 10 11 "\t.globl ." #alias "\n\t.set ." #alias ", ." #name) 12 + #endif 11 13 #endif 12 14 13 15 #endif /* _ASM_POWERPC_LINKAGE_H */
+4
arch/powerpc/include/asm/module.h
··· 35 35 #ifdef __powerpc64__ 36 36 unsigned int stubs_section; /* Index of stubs section in module */ 37 37 unsigned int toc_section; /* What section is the TOC? */ 38 + bool toc_fixed; /* Have we fixed up .TOC.? */ 38 39 #ifdef CONFIG_DYNAMIC_FTRACE 39 40 unsigned long toc; 40 41 unsigned long tramp; ··· 78 77 # endif /* MODULE */ 79 78 #endif 80 79 80 + bool is_module_trampoline(u32 *insns); 81 + int module_trampoline_target(struct module *mod, u32 *trampoline, 82 + unsigned long *target); 81 83 82 84 struct exception_table_entry; 83 85 void sort_ex_table(struct exception_table_entry *start,
+40 -36
arch/powerpc/include/asm/ppc_asm.h
··· 57 57 LDX_BE r10,0,r10; /* get log write index */ \ 58 58 cmpd cr1,r11,r10; \ 59 59 beq+ cr1,33f; \ 60 - bl .accumulate_stolen_time; \ 60 + bl accumulate_stolen_time; \ 61 61 ld r12,_MSR(r1); \ 62 62 andi. r10,r12,MSR_PR; /* Restore cr0 (coming from user) */ \ 63 63 33: \ ··· 189 189 #define __STK_REG(i) (112 + ((i)-14)*8) 190 190 #define STK_REG(i) __STK_REG(__REG_##i) 191 191 192 + #if defined(_CALL_ELF) && _CALL_ELF == 2 193 + #define STK_GOT 24 194 + #define __STK_PARAM(i) (32 + ((i)-3)*8) 195 + #else 196 + #define STK_GOT 40 192 197 #define __STK_PARAM(i) (48 + ((i)-3)*8) 198 + #endif 193 199 #define STK_PARAM(i) __STK_PARAM(__REG_##i) 200 + 201 + #if defined(_CALL_ELF) && _CALL_ELF == 2 202 + 203 + #define _GLOBAL(name) \ 204 + .section ".text"; \ 205 + .align 2 ; \ 206 + .type name,@function; \ 207 + .globl name; \ 208 + name: 209 + 210 + #define _GLOBAL_TOC(name) \ 211 + .section ".text"; \ 212 + .align 2 ; \ 213 + .type name,@function; \ 214 + .globl name; \ 215 + name: \ 216 + 0: addis r2,r12,(.TOC.-0b)@ha; \ 217 + addi r2,r2,(.TOC.-0b)@l; \ 218 + .localentry name,.-name 219 + 220 + #define _KPROBE(name) \ 221 + .section ".kprobes.text","a"; \ 222 + .align 2 ; \ 223 + .type name,@function; \ 224 + .globl name; \ 225 + name: 226 + 227 + #define DOTSYM(a) a 228 + 229 + #else 194 230 195 231 #define XGLUE(a,b) a##b 196 232 #define GLUE(a,b) XGLUE(a,b) ··· 245 209 .type GLUE(.,name),@function; \ 246 210 GLUE(.,name): 247 211 248 - #define _INIT_GLOBAL(name) \ 249 - __REF; \ 250 - .align 2 ; \ 251 - .globl name; \ 252 - .globl GLUE(.,name); \ 253 - .section ".opd","aw"; \ 254 - name: \ 255 - .quad GLUE(.,name); \ 256 - .quad .TOC.@tocbase; \ 257 - .quad 0; \ 258 - .previous; \ 259 - .type GLUE(.,name),@function; \ 260 - GLUE(.,name): 212 + #define _GLOBAL_TOC(name) _GLOBAL(name) 261 213 262 214 #define _KPROBE(name) \ 263 215 .section ".kprobes.text","a"; \ ··· 261 237 .type GLUE(.,name),@function; \ 262 238 GLUE(.,name): 263 239 264 - #define _STATIC(name) \ 265 - .section ".text"; \ 266 - .align 2 ; \ 267 - .section ".opd","aw"; \ 268 - name: \ 269 - .quad GLUE(.,name); \ 270 - .quad .TOC.@tocbase; \ 271 - .quad 0; \ 272 - .previous; \ 273 - .type GLUE(.,name),@function; \ 274 - GLUE(.,name): 240 + #define DOTSYM(a) GLUE(.,a) 275 241 276 - #define _INIT_STATIC(name) \ 277 - __REF; \ 278 - .align 2 ; \ 279 - .section ".opd","aw"; \ 280 - name: \ 281 - .quad GLUE(.,name); \ 282 - .quad .TOC.@tocbase; \ 283 - .quad 0; \ 284 - .previous; \ 285 - .type GLUE(.,name),@function; \ 286 - GLUE(.,name): 242 + #endif 287 243 288 244 #else /* 32-bit */ 289 245
+2
arch/powerpc/include/asm/sections.h
··· 39 39 (unsigned long)_stext < end; 40 40 } 41 41 42 + #if !defined(_CALL_ELF) || _CALL_ELF != 2 42 43 #undef dereference_function_descriptor 43 44 static inline void *dereference_function_descriptor(void *ptr) 44 45 { ··· 50 49 ptr = p; 51 50 return ptr; 52 51 } 52 + #endif 53 53 54 54 #endif 55 55
+3 -3
arch/powerpc/include/asm/systbl.h
··· 62 62 SYSCALL(ni_syscall) 63 63 SYSCALL_SPU(setpgid) 64 64 SYSCALL(ni_syscall) 65 - SYSX(sys_ni_syscall,sys_olduname, sys_olduname) 65 + SYSX(sys_ni_syscall,sys_olduname,sys_olduname) 66 66 SYSCALL_SPU(umask) 67 67 SYSCALL_SPU(chroot) 68 68 COMPAT_SYS(ustat) ··· 258 258 COMPAT_SYS_SPU(utimes) 259 259 COMPAT_SYS_SPU(statfs64) 260 260 COMPAT_SYS_SPU(fstatfs64) 261 - SYSX(sys_ni_syscall, ppc_fadvise64_64, ppc_fadvise64_64) 261 + SYSX(sys_ni_syscall,ppc_fadvise64_64,ppc_fadvise64_64) 262 262 PPC_SYS_SPU(rtas) 263 263 OLDSYS(debug_setcontext) 264 264 SYSCALL(ni_syscall) ··· 295 295 SYSCALL_SPU(mknodat) 296 296 SYSCALL_SPU(fchownat) 297 297 COMPAT_SYS_SPU(futimesat) 298 - SYSX_SPU(sys_newfstatat, sys_fstatat64, sys_fstatat64) 298 + SYSX_SPU(sys_newfstatat,sys_fstatat64,sys_fstatat64) 299 299 SYSCALL_SPU(unlinkat) 300 300 SYSCALL_SPU(renameat) 301 301 SYSCALL_SPU(linkat)
+9 -1
arch/powerpc/include/uapi/asm/elf.h
··· 291 291 #define R_PPC64_DTPREL16_HIGHERA 104 /* half16 (sym+add)@dtprel@highera */ 292 292 #define R_PPC64_DTPREL16_HIGHEST 105 /* half16 (sym+add)@dtprel@highest */ 293 293 #define R_PPC64_DTPREL16_HIGHESTA 106 /* half16 (sym+add)@dtprel@highesta */ 294 + #define R_PPC64_TLSGD 107 295 + #define R_PPC64_TLSLD 108 296 + #define R_PPC64_TOCSAVE 109 297 + 298 + #define R_PPC64_REL16 249 299 + #define R_PPC64_REL16_LO 250 300 + #define R_PPC64_REL16_HI 251 301 + #define R_PPC64_REL16_HA 252 294 302 295 303 /* Keep this the last entry. */ 296 - #define R_PPC64_NUM 107 304 + #define R_PPC64_NUM 253 297 305 298 306 /* There's actually a third entry here, but it's unused */ 299 307 struct ppc64_opd_entry
+14 -14
arch/powerpc/kernel/cpu_setup_fsl_booke.S
··· 94 94 _GLOBAL(__setup_cpu_e6500) 95 95 mflr r6 96 96 #ifdef CONFIG_PPC64 97 - bl .setup_altivec_ivors 97 + bl setup_altivec_ivors 98 98 /* Touch IVOR42 only if the CPU supports E.HV category */ 99 99 mfspr r10,SPRN_MMUCFG 100 100 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE 101 101 beq 1f 102 - bl .setup_lrat_ivor 102 + bl setup_lrat_ivor 103 103 1: 104 104 #endif 105 105 bl setup_pw20_idle ··· 164 164 #ifdef CONFIG_PPC_BOOK3E_64 165 165 _GLOBAL(__restore_cpu_e6500) 166 166 mflr r5 167 - bl .setup_altivec_ivors 167 + bl setup_altivec_ivors 168 168 /* Touch IVOR42 only if the CPU supports E.HV category */ 169 169 mfspr r10,SPRN_MMUCFG 170 170 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE 171 171 beq 1f 172 - bl .setup_lrat_ivor 172 + bl setup_lrat_ivor 173 173 1: 174 - bl .setup_pw20_idle 175 - bl .setup_altivec_idle 174 + bl setup_pw20_idle 175 + bl setup_altivec_idle 176 176 bl __restore_cpu_e5500 177 177 mtlr r5 178 178 blr ··· 181 181 mflr r4 182 182 bl __e500_icache_setup 183 183 bl __e500_dcache_setup 184 - bl .__setup_base_ivors 185 - bl .setup_perfmon_ivor 186 - bl .setup_doorbell_ivors 184 + bl __setup_base_ivors 185 + bl setup_perfmon_ivor 186 + bl setup_doorbell_ivors 187 187 /* 188 188 * We only want to touch IVOR38-41 if we're running on hardware 189 189 * that supports category E.HV. The architectural way to determine ··· 192 192 mfspr r10,SPRN_MMUCFG 193 193 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE 194 194 beq 1f 195 - bl .setup_ehv_ivors 195 + bl setup_ehv_ivors 196 196 1: 197 197 mtlr r4 198 198 blr ··· 201 201 mflr r5 202 202 bl __e500_icache_setup 203 203 bl __e500_dcache_setup 204 - bl .__setup_base_ivors 205 - bl .setup_perfmon_ivor 206 - bl .setup_doorbell_ivors 204 + bl __setup_base_ivors 205 + bl setup_perfmon_ivor 206 + bl setup_doorbell_ivors 207 207 /* 208 208 * We only want to touch IVOR38-41 if we're running on hardware 209 209 * that supports category E.HV. The architectural way to determine ··· 212 212 mfspr r10,SPRN_MMUCFG 213 213 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE 214 214 beq 1f 215 - bl .setup_ehv_ivors 215 + bl setup_ehv_ivors 216 216 b 2f 217 217 1: 218 218 ld r10,CPU_SPEC_FEATURES(r4)
+58 -59
arch/powerpc/kernel/entry_64.S
··· 39 39 * System calls. 40 40 */ 41 41 .section ".toc","aw" 42 - .SYS_CALL_TABLE: 43 - .tc .sys_call_table[TC],.sys_call_table 42 + SYS_CALL_TABLE: 43 + .tc sys_call_table[TC],sys_call_table 44 44 45 45 /* This value is used to mark exception frames on the stack. */ 46 46 exception_marker: ··· 106 106 LDX_BE r10,0,r10 /* get log write index */ 107 107 cmpd cr1,r11,r10 108 108 beq+ cr1,33f 109 - bl .accumulate_stolen_time 109 + bl accumulate_stolen_time 110 110 REST_GPR(0,r1) 111 111 REST_4GPRS(3,r1) 112 112 REST_2GPRS(7,r1) ··· 143 143 std r10,SOFTE(r1) 144 144 145 145 #ifdef SHOW_SYSCALLS 146 - bl .do_show_syscall 146 + bl do_show_syscall 147 147 REST_GPR(0,r1) 148 148 REST_4GPRS(3,r1) 149 149 REST_2GPRS(7,r1) ··· 162 162 * Need to vector to 32 Bit or default sys_call_table here, 163 163 * based on caller's run-mode / personality. 164 164 */ 165 - ld r11,.SYS_CALL_TABLE@toc(2) 165 + ld r11,SYS_CALL_TABLE@toc(2) 166 166 andi. r10,r10,_TIF_32BIT 167 167 beq 15f 168 168 addi r11,r11,8 /* use 32-bit syscall entries */ ··· 174 174 clrldi r8,r8,32 175 175 15: 176 176 slwi r0,r0,4 177 - ldx r10,r11,r0 /* Fetch system call handler [ptr] */ 178 - mtctr r10 177 + ldx r12,r11,r0 /* Fetch system call handler [ptr] */ 178 + mtctr r12 179 179 bctrl /* Call handler */ 180 180 181 181 syscall_exit: 182 182 std r3,RESULT(r1) 183 183 #ifdef SHOW_SYSCALLS 184 - bl .do_show_syscall_exit 184 + bl do_show_syscall_exit 185 185 ld r3,RESULT(r1) 186 186 #endif 187 187 CURRENT_THREAD_INFO(r12, r1) ··· 248 248 249 249 /* Traced system call support */ 250 250 syscall_dotrace: 251 - bl .save_nvgprs 251 + bl save_nvgprs 252 252 addi r3,r1,STACK_FRAME_OVERHEAD 253 - bl .do_syscall_trace_enter 253 + bl do_syscall_trace_enter 254 254 /* 255 255 * Restore argument registers possibly just changed. 256 256 * We use the return value of do_syscall_trace_enter ··· 308 308 4: /* Anything else left to do? */ 309 309 SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */ 310 310 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) 311 - beq .ret_from_except_lite 311 + beq ret_from_except_lite 312 312 313 313 /* Re-enable interrupts */ 314 314 #ifdef CONFIG_PPC_BOOK3E ··· 319 319 mtmsrd r10,1 320 320 #endif /* CONFIG_PPC_BOOK3E */ 321 321 322 - bl .save_nvgprs 322 + bl save_nvgprs 323 323 addi r3,r1,STACK_FRAME_OVERHEAD 324 - bl .do_syscall_trace_leave 325 - b .ret_from_except 324 + bl do_syscall_trace_leave 325 + b ret_from_except 326 326 327 327 /* Save non-volatile GPRs, if not already saved. */ 328 328 _GLOBAL(save_nvgprs) ··· 345 345 */ 346 346 347 347 _GLOBAL(ppc_fork) 348 - bl .save_nvgprs 349 - bl .sys_fork 348 + bl save_nvgprs 349 + bl sys_fork 350 350 b syscall_exit 351 351 352 352 _GLOBAL(ppc_vfork) 353 - bl .save_nvgprs 354 - bl .sys_vfork 353 + bl save_nvgprs 354 + bl sys_vfork 355 355 b syscall_exit 356 356 357 357 _GLOBAL(ppc_clone) 358 - bl .save_nvgprs 359 - bl .sys_clone 358 + bl save_nvgprs 359 + bl sys_clone 360 360 b syscall_exit 361 361 362 362 _GLOBAL(ppc32_swapcontext) 363 - bl .save_nvgprs 364 - bl .compat_sys_swapcontext 363 + bl save_nvgprs 364 + bl compat_sys_swapcontext 365 365 b syscall_exit 366 366 367 367 _GLOBAL(ppc64_swapcontext) 368 - bl .save_nvgprs 369 - bl .sys_swapcontext 368 + bl save_nvgprs 369 + bl sys_swapcontext 370 370 b syscall_exit 371 371 372 372 _GLOBAL(ret_from_fork) 373 - bl .schedule_tail 373 + bl schedule_tail 374 374 REST_NVGPRS(r1) 375 375 li r3,0 376 376 b syscall_exit 377 377 378 378 _GLOBAL(ret_from_kernel_thread) 379 - bl .schedule_tail 379 + bl schedule_tail 380 380 REST_NVGPRS(r1) 381 - ld r14, 0(r14) 382 381 mtlr r14 383 382 mr r3,r15 383 + #if defined(_CALL_ELF) && _CALL_ELF == 2 384 + mr r12,r14 385 + #endif 384 386 blrl 385 387 li r3,0 386 388 b syscall_exit ··· 613 611 _GLOBAL(ret_from_except) 614 612 ld r11,_TRAP(r1) 615 613 andi. r0,r11,1 616 - bne .ret_from_except_lite 614 + bne ret_from_except_lite 617 615 REST_NVGPRS(r1) 618 616 619 617 _GLOBAL(ret_from_except_lite) ··· 663 661 #endif 664 662 1: andi. r0,r4,_TIF_NEED_RESCHED 665 663 beq 2f 666 - bl .restore_interrupts 664 + bl restore_interrupts 667 665 SCHEDULE_USER 668 - b .ret_from_except_lite 666 + b ret_from_except_lite 669 667 2: 670 668 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 671 669 andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM 672 670 bne 3f /* only restore TM if nothing else to do */ 673 671 addi r3,r1,STACK_FRAME_OVERHEAD 674 - bl .restore_tm_state 672 + bl restore_tm_state 675 673 b restore 676 674 3: 677 675 #endif 678 - bl .save_nvgprs 679 - bl .restore_interrupts 676 + bl save_nvgprs 677 + bl restore_interrupts 680 678 addi r3,r1,STACK_FRAME_OVERHEAD 681 - bl .do_notify_resume 682 - b .ret_from_except 679 + bl do_notify_resume 680 + b ret_from_except 683 681 684 682 resume_kernel: 685 683 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ ··· 732 730 * sure we are soft-disabled first and reconcile irq state. 733 731 */ 734 732 RECONCILE_IRQ_STATE(r3,r4) 735 - 1: bl .preempt_schedule_irq 733 + 1: bl preempt_schedule_irq 736 734 737 735 /* Re-test flags and eventually loop */ 738 736 CURRENT_THREAD_INFO(r9, r1) ··· 794 792 */ 795 793 do_restore: 796 794 #ifdef CONFIG_PPC_BOOK3E 797 - b .exception_return_book3e 795 + b exception_return_book3e 798 796 #else 799 797 /* 800 798 * Clear the reservation. If we know the CPU tracks the address of ··· 909 907 * 910 908 * Still, this might be useful for things like hash_page 911 909 */ 912 - bl .__check_irq_replay 910 + bl __check_irq_replay 913 911 cmpwi cr0,r3,0 914 912 beq restore_no_replay 915 913 ··· 930 928 cmpwi cr0,r3,0x500 931 929 bne 1f 932 930 addi r3,r1,STACK_FRAME_OVERHEAD; 933 - bl .do_IRQ 934 - b .ret_from_except 931 + bl do_IRQ 932 + b ret_from_except 935 933 1: cmpwi cr0,r3,0x900 936 934 bne 1f 937 935 addi r3,r1,STACK_FRAME_OVERHEAD; 938 - bl .timer_interrupt 939 - b .ret_from_except 936 + bl timer_interrupt 937 + b ret_from_except 940 938 #ifdef CONFIG_PPC_DOORBELL 941 939 1: 942 940 #ifdef CONFIG_PPC_BOOK3E ··· 950 948 #endif /* CONFIG_PPC_BOOK3E */ 951 949 bne 1f 952 950 addi r3,r1,STACK_FRAME_OVERHEAD; 953 - bl .doorbell_exception 954 - b .ret_from_except 951 + bl doorbell_exception 952 + b ret_from_except 955 953 #endif /* CONFIG_PPC_DOORBELL */ 956 - 1: b .ret_from_except /* What else to do here ? */ 954 + 1: b ret_from_except /* What else to do here ? */ 957 955 958 956 unrecov_restore: 959 957 addi r3,r1,STACK_FRAME_OVERHEAD 960 - bl .unrecoverable_exception 958 + bl unrecoverable_exception 961 959 b unrecov_restore 962 960 963 961 #ifdef CONFIG_PPC_RTAS ··· 1023 1021 std r6,PACASAVEDMSR(r13) 1024 1022 1025 1023 /* Setup our real return addr */ 1026 - LOAD_REG_ADDR(r4,.rtas_return_loc) 1024 + LOAD_REG_ADDR(r4,rtas_return_loc) 1027 1025 clrldi r4,r4,2 /* convert to realmode address */ 1028 1026 mtlr r4 1029 1027 ··· 1047 1045 rfid 1048 1046 b . /* prevent speculative execution */ 1049 1047 1050 - _STATIC(rtas_return_loc) 1048 + rtas_return_loc: 1051 1049 FIXUP_ENDIAN 1052 1050 1053 1051 /* relocation is off at this point */ ··· 1056 1054 1057 1055 bcl 20,31,$+4 1058 1056 0: mflr r3 1059 - ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */ 1057 + ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */ 1060 1058 1061 1059 mfmsr r6 1062 1060 li r0,MSR_RI ··· 1073 1071 b . /* prevent speculative execution */ 1074 1072 1075 1073 .align 3 1076 - 1: .llong .rtas_restore_regs 1074 + 1: .llong rtas_restore_regs 1077 1075 1078 - _STATIC(rtas_restore_regs) 1076 + rtas_restore_regs: 1079 1077 /* relocation is on at this point */ 1080 1078 REST_GPR(2, r1) /* Restore the TOC */ 1081 1079 REST_GPR(13, r1) /* Restore paca */ ··· 1175 1173 _GLOBAL(_mcount) 1176 1174 blr 1177 1175 1178 - _GLOBAL(ftrace_caller) 1176 + _GLOBAL_TOC(ftrace_caller) 1179 1177 /* Taken from output of objdump from lib64/glibc */ 1180 1178 mflr r3 1181 1179 ld r11, 0(r1) ··· 1199 1197 _GLOBAL(ftrace_stub) 1200 1198 blr 1201 1199 #else 1202 - _GLOBAL(mcount) 1203 - blr 1204 - 1205 - _GLOBAL(_mcount) 1200 + _GLOBAL_TOC(_mcount) 1206 1201 /* Taken from output of objdump from lib64/glibc */ 1207 1202 mflr r3 1208 1203 ld r11, 0(r1) ··· 1237 1238 ld r11, 112(r1) 1238 1239 addi r3, r11, 16 1239 1240 1240 - bl .prepare_ftrace_return 1241 + bl prepare_ftrace_return 1241 1242 nop 1242 1243 1243 1244 ld r0, 128(r1) ··· 1253 1254 mr r31, r1 1254 1255 stdu r1, -112(r1) 1255 1256 1256 - bl .ftrace_return_to_handler 1257 + bl ftrace_return_to_handler 1257 1258 nop 1258 1259 1259 1260 /* return value has real return address */ ··· 1283 1284 */ 1284 1285 ld r2, PACATOC(r13) 1285 1286 1286 - bl .ftrace_return_to_handler 1287 + bl ftrace_return_to_handler 1287 1288 nop 1288 1289 1289 1290 /* return value has real return address */
+70 -70
arch/powerpc/kernel/exceptions-64e.S
··· 499 499 CHECK_NAPPING(); \ 500 500 addi r3,r1,STACK_FRAME_OVERHEAD; \ 501 501 bl hdlr; \ 502 - b .ret_from_except_lite; 502 + b ret_from_except_lite; 503 503 504 504 /* This value is used to mark exception frames on the stack. */ 505 505 .section ".toc","aw" ··· 550 550 CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL, 551 551 PROLOG_ADDITION_NONE) 552 552 EXCEPTION_COMMON_CRIT(0x100) 553 - bl .save_nvgprs 553 + bl save_nvgprs 554 554 bl special_reg_save 555 555 CHECK_NAPPING(); 556 556 addi r3,r1,STACK_FRAME_OVERHEAD 557 - bl .unknown_exception 557 + bl unknown_exception 558 558 b ret_from_crit_except 559 559 560 560 /* Machine Check Interrupt */ ··· 562 562 MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK, 563 563 PROLOG_ADDITION_NONE) 564 564 EXCEPTION_COMMON_MC(0x000) 565 - bl .save_nvgprs 565 + bl save_nvgprs 566 566 bl special_reg_save 567 567 CHECK_NAPPING(); 568 568 addi r3,r1,STACK_FRAME_OVERHEAD 569 - bl .machine_check_exception 569 + bl machine_check_exception 570 570 b ret_from_mc_except 571 571 572 572 /* Data Storage Interrupt */ ··· 591 591 592 592 /* External Input Interrupt */ 593 593 MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL, 594 - external_input, .do_IRQ, ACK_NONE) 594 + external_input, do_IRQ, ACK_NONE) 595 595 596 596 /* Alignment */ 597 597 START_EXCEPTION(alignment); ··· 612 612 std r14,_DSISR(r1) 613 613 addi r3,r1,STACK_FRAME_OVERHEAD 614 614 ld r14,PACA_EXGEN+EX_R14(r13) 615 - bl .save_nvgprs 616 - bl .program_check_exception 617 - b .ret_from_except 615 + bl save_nvgprs 616 + bl program_check_exception 617 + b ret_from_except 618 618 619 619 /* Floating Point Unavailable Interrupt */ 620 620 START_EXCEPTION(fp_unavailable); ··· 625 625 ld r12,_MSR(r1) 626 626 andi. r0,r12,MSR_PR; 627 627 beq- 1f 628 - bl .load_up_fpu 628 + bl load_up_fpu 629 629 b fast_exception_return 630 630 1: INTS_DISABLE 631 - bl .save_nvgprs 631 + bl save_nvgprs 632 632 addi r3,r1,STACK_FRAME_OVERHEAD 633 - bl .kernel_fp_unavailable_exception 634 - b .ret_from_except 633 + bl kernel_fp_unavailable_exception 634 + b ret_from_except 635 635 636 636 /* Altivec Unavailable Interrupt */ 637 637 START_EXCEPTION(altivec_unavailable); ··· 644 644 ld r12,_MSR(r1) 645 645 andi. r0,r12,MSR_PR; 646 646 beq- 1f 647 - bl .load_up_altivec 647 + bl load_up_altivec 648 648 b fast_exception_return 649 649 1: 650 650 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 651 651 #endif 652 652 INTS_DISABLE 653 - bl .save_nvgprs 653 + bl save_nvgprs 654 654 addi r3,r1,STACK_FRAME_OVERHEAD 655 - bl .altivec_unavailable_exception 656 - b .ret_from_except 655 + bl altivec_unavailable_exception 656 + b ret_from_except 657 657 658 658 /* AltiVec Assist */ 659 659 START_EXCEPTION(altivec_assist); ··· 662 662 PROLOG_ADDITION_NONE) 663 663 EXCEPTION_COMMON(0x220) 664 664 INTS_DISABLE 665 - bl .save_nvgprs 665 + bl save_nvgprs 666 666 addi r3,r1,STACK_FRAME_OVERHEAD 667 667 #ifdef CONFIG_ALTIVEC 668 668 BEGIN_FTR_SECTION 669 - bl .altivec_assist_exception 669 + bl altivec_assist_exception 670 670 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 671 671 #else 672 - bl .unknown_exception 672 + bl unknown_exception 673 673 #endif 674 - b .ret_from_except 674 + b ret_from_except 675 675 676 676 677 677 /* Decrementer Interrupt */ 678 678 MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER, 679 - decrementer, .timer_interrupt, ACK_DEC) 679 + decrementer, timer_interrupt, ACK_DEC) 680 680 681 681 /* Fixed Interval Timer Interrupt */ 682 682 MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT, 683 - fixed_interval, .unknown_exception, ACK_FIT) 683 + fixed_interval, unknown_exception, ACK_FIT) 684 684 685 685 /* Watchdog Timer Interrupt */ 686 686 START_EXCEPTION(watchdog); 687 687 CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG, 688 688 PROLOG_ADDITION_NONE) 689 689 EXCEPTION_COMMON_CRIT(0x9f0) 690 - bl .save_nvgprs 690 + bl save_nvgprs 691 691 bl special_reg_save 692 692 CHECK_NAPPING(); 693 693 addi r3,r1,STACK_FRAME_OVERHEAD 694 694 #ifdef CONFIG_BOOKE_WDT 695 - bl .WatchdogException 695 + bl WatchdogException 696 696 #else 697 - bl .unknown_exception 697 + bl unknown_exception 698 698 #endif 699 699 b ret_from_crit_except 700 700 ··· 712 712 PROLOG_ADDITION_NONE) 713 713 EXCEPTION_COMMON(0xf20) 714 714 INTS_DISABLE 715 - bl .save_nvgprs 715 + bl save_nvgprs 716 716 addi r3,r1,STACK_FRAME_OVERHEAD 717 - bl .unknown_exception 718 - b .ret_from_except 717 + bl unknown_exception 718 + b ret_from_except 719 719 720 720 /* Debug exception as a critical interrupt*/ 721 721 START_EXCEPTION(debug_crit); ··· 774 774 mr r4,r14 775 775 ld r14,PACA_EXCRIT+EX_R14(r13) 776 776 ld r15,PACA_EXCRIT+EX_R15(r13) 777 - bl .save_nvgprs 778 - bl .DebugException 779 - b .ret_from_except 777 + bl save_nvgprs 778 + bl DebugException 779 + b ret_from_except 780 780 781 781 kernel_dbg_exc: 782 782 b . /* NYI */ ··· 839 839 mr r4,r14 840 840 ld r14,PACA_EXDBG+EX_R14(r13) 841 841 ld r15,PACA_EXDBG+EX_R15(r13) 842 - bl .save_nvgprs 843 - bl .DebugException 844 - b .ret_from_except 842 + bl save_nvgprs 843 + bl DebugException 844 + b ret_from_except 845 845 846 846 START_EXCEPTION(perfmon); 847 847 NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR, ··· 850 850 INTS_DISABLE 851 851 CHECK_NAPPING() 852 852 addi r3,r1,STACK_FRAME_OVERHEAD 853 - bl .performance_monitor_exception 854 - b .ret_from_except_lite 853 + bl performance_monitor_exception 854 + b ret_from_except_lite 855 855 856 856 /* Doorbell interrupt */ 857 857 MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL, 858 - doorbell, .doorbell_exception, ACK_NONE) 858 + doorbell, doorbell_exception, ACK_NONE) 859 859 860 860 /* Doorbell critical Interrupt */ 861 861 START_EXCEPTION(doorbell_crit); 862 862 CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL, 863 863 PROLOG_ADDITION_NONE) 864 864 EXCEPTION_COMMON_CRIT(0x2a0) 865 - bl .save_nvgprs 865 + bl save_nvgprs 866 866 bl special_reg_save 867 867 CHECK_NAPPING(); 868 868 addi r3,r1,STACK_FRAME_OVERHEAD 869 - bl .unknown_exception 869 + bl unknown_exception 870 870 b ret_from_crit_except 871 871 872 872 /* ··· 878 878 PROLOG_ADDITION_NONE) 879 879 EXCEPTION_COMMON(0x2c0) 880 880 addi r3,r1,STACK_FRAME_OVERHEAD 881 - bl .save_nvgprs 881 + bl save_nvgprs 882 882 INTS_RESTORE_HARD 883 - bl .unknown_exception 884 - b .ret_from_except 883 + bl unknown_exception 884 + b ret_from_except 885 885 886 886 /* Guest Doorbell critical Interrupt */ 887 887 START_EXCEPTION(guest_doorbell_crit); 888 888 CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT, 889 889 PROLOG_ADDITION_NONE) 890 890 EXCEPTION_COMMON_CRIT(0x2e0) 891 - bl .save_nvgprs 891 + bl save_nvgprs 892 892 bl special_reg_save 893 893 CHECK_NAPPING(); 894 894 addi r3,r1,STACK_FRAME_OVERHEAD 895 - bl .unknown_exception 895 + bl unknown_exception 896 896 b ret_from_crit_except 897 897 898 898 /* Hypervisor call */ ··· 901 901 PROLOG_ADDITION_NONE) 902 902 EXCEPTION_COMMON(0x310) 903 903 addi r3,r1,STACK_FRAME_OVERHEAD 904 - bl .save_nvgprs 904 + bl save_nvgprs 905 905 INTS_RESTORE_HARD 906 - bl .unknown_exception 907 - b .ret_from_except 906 + bl unknown_exception 907 + b ret_from_except 908 908 909 909 /* Embedded Hypervisor priviledged */ 910 910 START_EXCEPTION(ehpriv); ··· 912 912 PROLOG_ADDITION_NONE) 913 913 EXCEPTION_COMMON(0x320) 914 914 addi r3,r1,STACK_FRAME_OVERHEAD 915 - bl .save_nvgprs 915 + bl save_nvgprs 916 916 INTS_RESTORE_HARD 917 - bl .unknown_exception 918 - b .ret_from_except 917 + bl unknown_exception 918 + b ret_from_except 919 919 920 920 /* LRAT Error interrupt */ 921 921 START_EXCEPTION(lrat_error); ··· 1014 1014 mr r5,r15 1015 1015 ld r14,PACA_EXGEN+EX_R14(r13) 1016 1016 ld r15,PACA_EXGEN+EX_R15(r13) 1017 - bl .do_page_fault 1017 + bl do_page_fault 1018 1018 cmpdi r3,0 1019 1019 bne- 1f 1020 - b .ret_from_except_lite 1021 - 1: bl .save_nvgprs 1020 + b ret_from_except_lite 1021 + 1: bl save_nvgprs 1022 1022 mr r5,r3 1023 1023 addi r3,r1,STACK_FRAME_OVERHEAD 1024 1024 ld r4,_DAR(r1) 1025 - bl .bad_page_fault 1026 - b .ret_from_except 1025 + bl bad_page_fault 1026 + b ret_from_except 1027 1027 1028 1028 /* 1029 1029 * Alignment exception doesn't fit entirely in the 0x100 bytes so it ··· 1035 1035 addi r3,r1,STACK_FRAME_OVERHEAD 1036 1036 ld r14,PACA_EXGEN+EX_R14(r13) 1037 1037 ld r15,PACA_EXGEN+EX_R15(r13) 1038 - bl .save_nvgprs 1038 + bl save_nvgprs 1039 1039 INTS_RESTORE_HARD 1040 - bl .alignment_exception 1041 - b .ret_from_except 1040 + bl alignment_exception 1041 + b ret_from_except 1042 1042 1043 1043 /* 1044 1044 * We branch here from entry_64.S for the last stage of the exception ··· 1172 1172 std r12,0(r11) 1173 1173 ld r2,PACATOC(r13) 1174 1174 1: addi r3,r1,STACK_FRAME_OVERHEAD 1175 - bl .kernel_bad_stack 1175 + bl kernel_bad_stack 1176 1176 b 1b 1177 1177 1178 1178 /* ··· 1521 1521 * and always use AS 0, so we just set it up to match our link 1522 1522 * address and never use 0 based addresses. 1523 1523 */ 1524 - bl .initial_tlb_book3e 1524 + bl initial_tlb_book3e 1525 1525 1526 1526 /* Init global core bits */ 1527 - bl .init_core_book3e 1527 + bl init_core_book3e 1528 1528 1529 1529 /* Init per-thread bits */ 1530 - bl .init_thread_book3e 1530 + bl init_thread_book3e 1531 1531 1532 1532 /* Return to common init code */ 1533 1533 tovirt(r28,r28) ··· 1548 1548 */ 1549 1549 _GLOBAL(book3e_secondary_core_init_tlb_set) 1550 1550 li r4,1 1551 - b .generic_secondary_smp_init 1551 + b generic_secondary_smp_init 1552 1552 1553 1553 _GLOBAL(book3e_secondary_core_init) 1554 1554 mflr r28 ··· 1558 1558 bne 2f 1559 1559 1560 1560 /* Setup TLB for this core */ 1561 - bl .initial_tlb_book3e 1561 + bl initial_tlb_book3e 1562 1562 1563 1563 /* We can return from the above running at a different 1564 1564 * address, so recalculate r2 (TOC) 1565 1565 */ 1566 - bl .relative_toc 1566 + bl relative_toc 1567 1567 1568 1568 /* Init global core bits */ 1569 - 2: bl .init_core_book3e 1569 + 2: bl init_core_book3e 1570 1570 1571 1571 /* Init per-thread bits */ 1572 - 3: bl .init_thread_book3e 1572 + 3: bl init_thread_book3e 1573 1573 1574 1574 /* Return to common init code at proper virtual address. 1575 1575 * ··· 1596 1596 mflr r28 1597 1597 b 3b 1598 1598 1599 - _STATIC(init_core_book3e) 1599 + init_core_book3e: 1600 1600 /* Establish the interrupt vector base */ 1601 1601 LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e) 1602 1602 mtspr SPRN_IVPR,r3 1603 1603 sync 1604 1604 blr 1605 1605 1606 - _STATIC(init_thread_book3e) 1606 + init_thread_book3e: 1607 1607 lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h 1608 1608 mtspr SPRN_EPCR,r3 1609 1609
+103 -103
arch/powerpc/kernel/exceptions-64s.S
··· 132 132 #endif 133 133 134 134 beq cr1,2f 135 - b .power7_wakeup_noloss 136 - 2: b .power7_wakeup_loss 135 + b power7_wakeup_noloss 136 + 2: b power7_wakeup_loss 137 137 138 138 /* Fast Sleep wakeup on PowerNV */ 139 139 8: GET_PACA(r13) 140 - b .power7_wakeup_tb_loss 140 + b power7_wakeup_tb_loss 141 141 142 142 9: 143 143 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) ··· 211 211 #endif /* __DISABLED__ */ 212 212 mfspr r12,SPRN_SRR1 213 213 #ifndef CONFIG_RELOCATABLE 214 - b .slb_miss_realmode 214 + b slb_miss_realmode 215 215 #else 216 216 /* 217 - * We can't just use a direct branch to .slb_miss_realmode 217 + * We can't just use a direct branch to slb_miss_realmode 218 218 * because the distance from here to there depends on where 219 219 * the kernel ends up being put. 220 220 */ 221 221 mfctr r11 222 222 ld r10,PACAKBASE(r13) 223 - LOAD_HANDLER(r10, .slb_miss_realmode) 223 + LOAD_HANDLER(r10, slb_miss_realmode) 224 224 mtctr r10 225 225 bctr 226 226 #endif ··· 243 243 #endif /* __DISABLED__ */ 244 244 mfspr r12,SPRN_SRR1 245 245 #ifndef CONFIG_RELOCATABLE 246 - b .slb_miss_realmode 246 + b slb_miss_realmode 247 247 #else 248 248 mfctr r11 249 249 ld r10,PACAKBASE(r13) 250 - LOAD_HANDLER(r10, .slb_miss_realmode) 250 + LOAD_HANDLER(r10, slb_miss_realmode) 251 251 mtctr r10 252 252 bctr 253 253 #endif ··· 524 524 std r12,PACA_EXSLB+EX_R12(r13) 525 525 GET_SCRATCH0(r10) 526 526 std r10,PACA_EXSLB+EX_R13(r13) 527 - EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) 527 + EXCEPTION_PROLOG_PSERIES_1(do_stab_bolted, EXC_STD) 528 528 529 529 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) 530 530 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) ··· 769 769 770 770 /*** Common interrupt handlers ***/ 771 771 772 - STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) 772 + STD_EXCEPTION_COMMON(0x100, system_reset, system_reset_exception) 773 773 774 774 STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) 775 - STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt) 776 - STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt) 775 + STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, timer_interrupt) 776 + STD_EXCEPTION_COMMON(0x980, hdecrementer, hdec_interrupt) 777 777 #ifdef CONFIG_PPC_DOORBELL 778 - STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .doorbell_exception) 778 + STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, doorbell_exception) 779 779 #else 780 - STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .unknown_exception) 780 + STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, unknown_exception) 781 781 #endif 782 - STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) 783 - STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) 784 - STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) 785 - STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt) 786 - STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) 782 + STD_EXCEPTION_COMMON(0xb00, trap_0b, unknown_exception) 783 + STD_EXCEPTION_COMMON(0xd00, single_step, single_step_exception) 784 + STD_EXCEPTION_COMMON(0xe00, trap_0e, unknown_exception) 785 + STD_EXCEPTION_COMMON(0xe40, emulation_assist, emulation_assist_interrupt) 786 + STD_EXCEPTION_COMMON(0xe60, hmi_exception, unknown_exception) 787 787 #ifdef CONFIG_PPC_DOORBELL 788 - STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception) 788 + STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, doorbell_exception) 789 789 #else 790 - STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .unknown_exception) 790 + STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, unknown_exception) 791 791 #endif 792 - STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception) 793 - STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) 794 - STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception) 792 + STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, performance_monitor_exception) 793 + STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, instruction_breakpoint_exception) 794 + STD_EXCEPTION_COMMON(0x1502, denorm, unknown_exception) 795 795 #ifdef CONFIG_ALTIVEC 796 - STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) 796 + STD_EXCEPTION_COMMON(0x1700, altivec_assist, altivec_assist_exception) 797 797 #else 798 - STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) 798 + STD_EXCEPTION_COMMON(0x1700, altivec_assist, unknown_exception) 799 799 #endif 800 800 #ifdef CONFIG_CBE_RAS 801 - STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) 802 - STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) 803 - STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) 801 + STD_EXCEPTION_COMMON(0x1200, cbe_system_error, cbe_system_error_exception) 802 + STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, cbe_maintenance_exception) 803 + STD_EXCEPTION_COMMON(0x1800, cbe_thermal, cbe_thermal_exception) 804 804 #endif /* CONFIG_CBE_RAS */ 805 805 806 806 /* ··· 829 829 mfspr r3,SPRN_DAR 830 830 mfspr r12,SPRN_SRR1 831 831 #ifndef CONFIG_RELOCATABLE 832 - b .slb_miss_realmode 832 + b slb_miss_realmode 833 833 #else 834 834 /* 835 - * We can't just use a direct branch to .slb_miss_realmode 835 + * We can't just use a direct branch to slb_miss_realmode 836 836 * because the distance from here to there depends on where 837 837 * the kernel ends up being put. 838 838 */ 839 839 mfctr r11 840 840 ld r10,PACAKBASE(r13) 841 - LOAD_HANDLER(r10, .slb_miss_realmode) 841 + LOAD_HANDLER(r10, slb_miss_realmode) 842 842 mtctr r10 843 843 bctr 844 844 #endif ··· 854 854 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 855 855 mfspr r12,SPRN_SRR1 856 856 #ifndef CONFIG_RELOCATABLE 857 - b .slb_miss_realmode 857 + b slb_miss_realmode 858 858 #else 859 859 mfctr r11 860 860 ld r10,PACAKBASE(r13) 861 - LOAD_HANDLER(r10, .slb_miss_realmode) 861 + LOAD_HANDLER(r10, slb_miss_realmode) 862 862 mtctr r10 863 863 bctr 864 864 #endif ··· 966 966 b system_call_common 967 967 968 968 ppc64_runlatch_on_trampoline: 969 - b .__ppc64_runlatch_on 969 + b __ppc64_runlatch_on 970 970 971 971 /* 972 972 * Here we have detected that the kernel stack pointer is bad. ··· 1025 1025 std r12,RESULT(r1) 1026 1026 std r11,STACK_FRAME_OVERHEAD-16(r1) 1027 1027 1: addi r3,r1,STACK_FRAME_OVERHEAD 1028 - bl .kernel_bad_stack 1028 + bl kernel_bad_stack 1029 1029 b 1b 1030 1030 1031 1031 /* ··· 1046 1046 ld r3,PACA_EXGEN+EX_DAR(r13) 1047 1047 lwz r4,PACA_EXGEN+EX_DSISR(r13) 1048 1048 li r5,0x300 1049 - b .do_hash_page /* Try to handle as hpte fault */ 1049 + b do_hash_page /* Try to handle as hpte fault */ 1050 1050 1051 1051 .align 7 1052 1052 .globl h_data_storage_common ··· 1056 1056 mfspr r10,SPRN_HDSISR 1057 1057 stw r10,PACA_EXGEN+EX_DSISR(r13) 1058 1058 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) 1059 - bl .save_nvgprs 1059 + bl save_nvgprs 1060 1060 DISABLE_INTS 1061 1061 addi r3,r1,STACK_FRAME_OVERHEAD 1062 - bl .unknown_exception 1063 - b .ret_from_except 1062 + bl unknown_exception 1063 + b ret_from_except 1064 1064 1065 1065 .align 7 1066 1066 .globl instruction_access_common ··· 1071 1071 ld r3,_NIP(r1) 1072 1072 andis. r4,r12,0x5820 1073 1073 li r5,0x400 1074 - b .do_hash_page /* Try to handle as hpte fault */ 1074 + b do_hash_page /* Try to handle as hpte fault */ 1075 1075 1076 - STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) 1076 + STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception) 1077 1077 1078 1078 /* 1079 1079 * Here is the common SLB miss user that is used when going to virtual ··· 1088 1088 stw r9,PACA_EXGEN+EX_CCR(r13) 1089 1089 std r10,PACA_EXGEN+EX_LR(r13) 1090 1090 std r11,PACA_EXGEN+EX_SRR0(r13) 1091 - bl .slb_allocate_user 1091 + bl slb_allocate_user 1092 1092 1093 1093 ld r10,PACA_EXGEN+EX_LR(r13) 1094 1094 ld r3,PACA_EXGEN+EX_R3(r13) ··· 1131 1131 unrecov_user_slb: 1132 1132 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) 1133 1133 DISABLE_INTS 1134 - bl .save_nvgprs 1134 + bl save_nvgprs 1135 1135 1: addi r3,r1,STACK_FRAME_OVERHEAD 1136 - bl .unrecoverable_exception 1136 + bl unrecoverable_exception 1137 1137 b 1b 1138 1138 1139 1139 #endif /* __DISABLED__ */ ··· 1158 1158 lwz r4,PACA_EXGEN+EX_DSISR(r13) 1159 1159 std r3,_DAR(r1) 1160 1160 std r4,_DSISR(r1) 1161 - bl .save_nvgprs 1161 + bl save_nvgprs 1162 1162 addi r3,r1,STACK_FRAME_OVERHEAD 1163 - bl .machine_check_exception 1164 - b .ret_from_except 1163 + bl machine_check_exception 1164 + b ret_from_except 1165 1165 1166 1166 .align 7 1167 1167 .globl alignment_common ··· 1175 1175 lwz r4,PACA_EXGEN+EX_DSISR(r13) 1176 1176 std r3,_DAR(r1) 1177 1177 std r4,_DSISR(r1) 1178 - bl .save_nvgprs 1178 + bl save_nvgprs 1179 1179 DISABLE_INTS 1180 1180 addi r3,r1,STACK_FRAME_OVERHEAD 1181 - bl .alignment_exception 1182 - b .ret_from_except 1181 + bl alignment_exception 1182 + b ret_from_except 1183 1183 1184 1184 .align 7 1185 1185 .globl program_check_common 1186 1186 program_check_common: 1187 1187 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) 1188 - bl .save_nvgprs 1188 + bl save_nvgprs 1189 1189 DISABLE_INTS 1190 1190 addi r3,r1,STACK_FRAME_OVERHEAD 1191 - bl .program_check_exception 1192 - b .ret_from_except 1191 + bl program_check_exception 1192 + b ret_from_except 1193 1193 1194 1194 .align 7 1195 1195 .globl fp_unavailable_common 1196 1196 fp_unavailable_common: 1197 1197 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) 1198 1198 bne 1f /* if from user, just load it up */ 1199 - bl .save_nvgprs 1199 + bl save_nvgprs 1200 1200 DISABLE_INTS 1201 1201 addi r3,r1,STACK_FRAME_OVERHEAD 1202 - bl .kernel_fp_unavailable_exception 1202 + bl kernel_fp_unavailable_exception 1203 1203 BUG_OPCODE 1204 1204 1: 1205 1205 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM ··· 1211 1211 bne- 2f 1212 1212 END_FTR_SECTION_IFSET(CPU_FTR_TM) 1213 1213 #endif 1214 - bl .load_up_fpu 1214 + bl load_up_fpu 1215 1215 b fast_exception_return 1216 1216 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1217 1217 2: /* User process was in a transaction */ 1218 - bl .save_nvgprs 1218 + bl save_nvgprs 1219 1219 DISABLE_INTS 1220 1220 addi r3,r1,STACK_FRAME_OVERHEAD 1221 - bl .fp_unavailable_tm 1222 - b .ret_from_except 1221 + bl fp_unavailable_tm 1222 + b ret_from_except 1223 1223 #endif 1224 1224 .align 7 1225 1225 .globl altivec_unavailable_common ··· 1237 1237 bne- 2f 1238 1238 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 1239 1239 #endif 1240 - bl .load_up_altivec 1240 + bl load_up_altivec 1241 1241 b fast_exception_return 1242 1242 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1243 1243 2: /* User process was in a transaction */ 1244 - bl .save_nvgprs 1244 + bl save_nvgprs 1245 1245 DISABLE_INTS 1246 1246 addi r3,r1,STACK_FRAME_OVERHEAD 1247 - bl .altivec_unavailable_tm 1248 - b .ret_from_except 1247 + bl altivec_unavailable_tm 1248 + b ret_from_except 1249 1249 #endif 1250 1250 1: 1251 1251 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 1252 1252 #endif 1253 - bl .save_nvgprs 1253 + bl save_nvgprs 1254 1254 DISABLE_INTS 1255 1255 addi r3,r1,STACK_FRAME_OVERHEAD 1256 - bl .altivec_unavailable_exception 1257 - b .ret_from_except 1256 + bl altivec_unavailable_exception 1257 + b ret_from_except 1258 1258 1259 1259 .align 7 1260 1260 .globl vsx_unavailable_common ··· 1272 1272 bne- 2f 1273 1273 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) 1274 1274 #endif 1275 - b .load_up_vsx 1275 + b load_up_vsx 1276 1276 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1277 1277 2: /* User process was in a transaction */ 1278 - bl .save_nvgprs 1278 + bl save_nvgprs 1279 1279 DISABLE_INTS 1280 1280 addi r3,r1,STACK_FRAME_OVERHEAD 1281 - bl .vsx_unavailable_tm 1282 - b .ret_from_except 1281 + bl vsx_unavailable_tm 1282 + b ret_from_except 1283 1283 #endif 1284 1284 1: 1285 1285 END_FTR_SECTION_IFSET(CPU_FTR_VSX) 1286 1286 #endif 1287 - bl .save_nvgprs 1287 + bl save_nvgprs 1288 1288 DISABLE_INTS 1289 1289 addi r3,r1,STACK_FRAME_OVERHEAD 1290 - bl .vsx_unavailable_exception 1291 - b .ret_from_except 1290 + bl vsx_unavailable_exception 1291 + b ret_from_except 1292 1292 1293 - STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) 1294 - STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception) 1293 + STD_EXCEPTION_COMMON(0xf60, facility_unavailable, facility_unavailable_exception) 1294 + STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, facility_unavailable_exception) 1295 1295 1296 1296 .align 7 1297 1297 .globl __end_handlers ··· 1386 1386 machine_check_handle_early: 1387 1387 std r0,GPR0(r1) /* Save r0 */ 1388 1388 EXCEPTION_PROLOG_COMMON_3(0x200) 1389 - bl .save_nvgprs 1389 + bl save_nvgprs 1390 1390 addi r3,r1,STACK_FRAME_OVERHEAD 1391 - bl .machine_check_early 1391 + bl machine_check_early 1392 1392 ld r12,_MSR(r1) 1393 1393 #ifdef CONFIG_PPC_P7_NAP 1394 1394 /* ··· 1408 1408 /* Supervisor state loss */ 1409 1409 li r0,1 1410 1410 stb r0,PACA_NAPSTATELOST(r13) 1411 - 3: bl .machine_check_queue_event 1411 + 3: bl machine_check_queue_event 1412 1412 MACHINE_CHECK_HANDLER_WINDUP 1413 1413 GET_PACA(r13) 1414 1414 ld r1,PACAR1(r13) 1415 - b .power7_enter_nap_mode 1415 + b power7_enter_nap_mode 1416 1416 4: 1417 1417 #endif 1418 1418 /* ··· 1444 1444 andi. r11,r12,MSR_RI 1445 1445 bne 2f 1446 1446 1: addi r3,r1,STACK_FRAME_OVERHEAD 1447 - bl .unrecoverable_exception 1447 + bl unrecoverable_exception 1448 1448 b 1b 1449 1449 2: 1450 1450 /* ··· 1452 1452 * Queue up the MCE event so that we can log it later, while 1453 1453 * returning from kernel or opal call. 1454 1454 */ 1455 - bl .machine_check_queue_event 1455 + bl machine_check_queue_event 1456 1456 MACHINE_CHECK_HANDLER_WINDUP 1457 1457 rfid 1458 1458 9: ··· 1468 1468 * r3 is saved in paca->slb_r3 1469 1469 * We assume we aren't going to take any exceptions during this procedure. 1470 1470 */ 1471 - _GLOBAL(slb_miss_realmode) 1471 + slb_miss_realmode: 1472 1472 mflr r10 1473 1473 #ifdef CONFIG_RELOCATABLE 1474 1474 mtctr r11 ··· 1477 1477 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 1478 1478 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ 1479 1479 1480 - bl .slb_allocate_realmode 1480 + bl slb_allocate_realmode 1481 1481 1482 1482 /* All done -- return from exception. */ 1483 1483 ··· 1517 1517 unrecov_slb: 1518 1518 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) 1519 1519 DISABLE_INTS 1520 - bl .save_nvgprs 1520 + bl save_nvgprs 1521 1521 1: addi r3,r1,STACK_FRAME_OVERHEAD 1522 - bl .unrecoverable_exception 1522 + bl unrecoverable_exception 1523 1523 b 1b 1524 1524 1525 1525 ··· 1536 1536 * Hash table stuff 1537 1537 */ 1538 1538 .align 7 1539 - _STATIC(do_hash_page) 1539 + do_hash_page: 1540 1540 std r3,_DAR(r1) 1541 1541 std r4,_DSISR(r1) 1542 1542 ··· 1573 1573 * 1574 1574 * at return r3 = 0 for success, 1 for page fault, negative for error 1575 1575 */ 1576 - bl .hash_page /* build HPTE if possible */ 1576 + bl hash_page /* build HPTE if possible */ 1577 1577 cmpdi r3,0 /* see if hash_page succeeded */ 1578 1578 1579 1579 /* Success */ ··· 1587 1587 11: ld r4,_DAR(r1) 1588 1588 ld r5,_DSISR(r1) 1589 1589 addi r3,r1,STACK_FRAME_OVERHEAD 1590 - bl .do_page_fault 1590 + bl do_page_fault 1591 1591 cmpdi r3,0 1592 1592 beq+ 12f 1593 - bl .save_nvgprs 1593 + bl save_nvgprs 1594 1594 mr r5,r3 1595 1595 addi r3,r1,STACK_FRAME_OVERHEAD 1596 1596 lwz r4,_DAR(r1) 1597 - bl .bad_page_fault 1598 - b .ret_from_except 1597 + bl bad_page_fault 1598 + b ret_from_except 1599 1599 1600 1600 /* We have a data breakpoint exception - handle it */ 1601 1601 handle_dabr_fault: 1602 - bl .save_nvgprs 1602 + bl save_nvgprs 1603 1603 ld r4,_DAR(r1) 1604 1604 ld r5,_DSISR(r1) 1605 1605 addi r3,r1,STACK_FRAME_OVERHEAD 1606 - bl .do_break 1607 - 12: b .ret_from_except_lite 1606 + bl do_break 1607 + 12: b ret_from_except_lite 1608 1608 1609 1609 1610 1610 /* We have a page fault that hash_page could handle but HV refused 1611 1611 * the PTE insertion 1612 1612 */ 1613 - 13: bl .save_nvgprs 1613 + 13: bl save_nvgprs 1614 1614 mr r5,r3 1615 1615 addi r3,r1,STACK_FRAME_OVERHEAD 1616 1616 ld r4,_DAR(r1) 1617 - bl .low_hash_fault 1618 - b .ret_from_except 1617 + bl low_hash_fault 1618 + b ret_from_except 1619 1619 1620 1620 /* 1621 1621 * We come here as a result of a DSI at a point where we don't want ··· 1624 1624 * were soft-disabled. We want to invoke the exception handler for 1625 1625 * the access, or panic if there isn't a handler. 1626 1626 */ 1627 - 77: bl .save_nvgprs 1627 + 77: bl save_nvgprs 1628 1628 mr r4,r3 1629 1629 addi r3,r1,STACK_FRAME_OVERHEAD 1630 1630 li r5,SIGSEGV 1631 - bl .bad_page_fault 1632 - b .ret_from_except 1631 + bl bad_page_fault 1632 + b ret_from_except 1633 1633 1634 1634 /* here we have a segment miss */ 1635 1635 do_ste_alloc: 1636 - bl .ste_allocate /* try to insert stab entry */ 1636 + bl ste_allocate /* try to insert stab entry */ 1637 1637 cmpdi r3,0 1638 1638 bne- handle_page_fault 1639 1639 b fast_exception_return ··· 1646 1646 * We assume (DAR >> 60) == 0xc. 1647 1647 */ 1648 1648 .align 7 1649 - _GLOBAL(do_stab_bolted) 1649 + do_stab_bolted: 1650 1650 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ 1651 1651 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ 1652 1652 mfspr r11,SPRN_DAR /* ea */
+39 -98
arch/powerpc/kernel/ftrace.c
··· 105 105 struct dyn_ftrace *rec, unsigned long addr) 106 106 { 107 107 unsigned int op; 108 - unsigned int jmp[5]; 109 108 unsigned long ptr; 110 109 unsigned long ip = rec->ip; 111 - unsigned long tramp; 112 - int offset; 110 + void *tramp; 113 111 114 112 /* read where this goes */ 115 113 if (probe_kernel_read(&op, (void *)ip, sizeof(int))) ··· 120 122 } 121 123 122 124 /* lets find where the pointer goes */ 123 - tramp = find_bl_target(ip, op); 125 + tramp = (void *)find_bl_target(ip, op); 124 126 125 - /* 126 - * On PPC64 the trampoline looks like: 127 - * 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high> 128 - * 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low> 129 - * Where the bytes 2,3,6 and 7 make up the 32bit offset 130 - * to the TOC that holds the pointer. 131 - * to jump to. 132 - * 0xf8, 0x41, 0x00, 0x28, std r2,40(r1) 133 - * 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12) 134 - * The actually address is 32 bytes from the offset 135 - * into the TOC. 136 - * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12) 137 - */ 127 + pr_devel("ip:%lx jumps to %p", ip, tramp); 138 128 139 - pr_devel("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc); 140 - 141 - /* Find where the trampoline jumps to */ 142 - if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { 143 - printk(KERN_ERR "Failed to read %lx\n", tramp); 144 - return -EFAULT; 145 - } 146 - 147 - pr_devel(" %08x %08x", jmp[0], jmp[1]); 148 - 149 - /* verify that this is what we expect it to be */ 150 - if (((jmp[0] & 0xffff0000) != 0x3d820000) || 151 - ((jmp[1] & 0xffff0000) != 0x398c0000) || 152 - (jmp[2] != 0xf8410028) || 153 - (jmp[3] != 0xe96c0020) || 154 - (jmp[4] != 0xe84c0028)) { 129 + if (!is_module_trampoline(tramp)) { 155 130 printk(KERN_ERR "Not a trampoline\n"); 156 131 return -EINVAL; 157 132 } 158 133 159 - /* The bottom half is signed extended */ 160 - offset = ((unsigned)((unsigned short)jmp[0]) << 16) + 161 - (int)((short)jmp[1]); 162 - 163 - pr_devel(" %x ", offset); 164 - 165 - /* get the address this jumps too */ 166 - tramp = mod->arch.toc + offset + 32; 167 - pr_devel("toc: %lx", tramp); 168 - 169 - if (probe_kernel_read(jmp, (void *)tramp, 8)) { 170 - printk(KERN_ERR "Failed to read %lx\n", tramp); 134 + if (module_trampoline_target(mod, tramp, &ptr)) { 135 + printk(KERN_ERR "Failed to get trampoline target\n"); 171 136 return -EFAULT; 172 137 } 173 138 174 - pr_devel(" %08x %08x\n", jmp[0], jmp[1]); 175 - 176 - #ifdef __LITTLE_ENDIAN__ 177 - ptr = ((unsigned long)jmp[1] << 32) + jmp[0]; 178 - #else 179 - ptr = ((unsigned long)jmp[0] << 32) + jmp[1]; 180 - #endif 139 + pr_devel("trampoline target %lx", ptr); 181 140 182 141 /* This should match what was called */ 183 142 if (ptr != ppc_function_entry((void *)addr)) { 184 - printk(KERN_ERR "addr does not match %lx\n", ptr); 143 + printk(KERN_ERR "addr %lx does not match expected %lx\n", 144 + ptr, ppc_function_entry((void *)addr)); 185 145 return -EINVAL; 186 146 } 187 147 188 148 /* 189 - * We want to nop the line, but the next line is 190 - * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1) 191 - * This needs to be turned to a nop too. 192 - */ 193 - if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) 194 - return -EFAULT; 195 - 196 - if (op != 0xe8410028) { 197 - printk(KERN_ERR "Next line is not ld! (%08x)\n", op); 198 - return -EINVAL; 199 - } 200 - 201 - /* 202 - * Milton Miller pointed out that we can not blindly do nops. 203 - * If a task was preempted when calling a trace function, 204 - * the nops will remove the way to restore the TOC in r2 205 - * and the r2 TOC will get corrupted. 206 - */ 207 - 208 - /* 209 - * Replace: 210 - * bl <tramp> <==== will be replaced with "b 1f" 211 - * ld r2,40(r1) 212 - * 1: 149 + * Our original call site looks like: 150 + * 151 + * bl <tramp> 152 + * ld r2,XX(r1) 153 + * 154 + * Milton Miller pointed out that we can not simply nop the branch. 155 + * If a task was preempted when calling a trace function, the nops 156 + * will remove the way to restore the TOC in r2 and the r2 TOC will 157 + * get corrupted. 158 + * 159 + * Use a b +8 to jump over the load. 213 160 */ 214 161 op = 0x48000008; /* b +8 */ 215 162 ··· 292 349 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 293 350 { 294 351 unsigned int op[2]; 295 - unsigned long ip = rec->ip; 352 + void *ip = (void *)rec->ip; 296 353 297 354 /* read where this goes */ 298 - if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2)) 355 + if (probe_kernel_read(op, ip, sizeof(op))) 299 356 return -EFAULT; 300 357 301 358 /* 302 - * It should be pointing to two nops or 303 - * b +8; ld r2,40(r1) 359 + * We expect to see: 360 + * 361 + * b +8 362 + * ld r2,XX(r1) 363 + * 364 + * The load offset is different depending on the ABI. For simplicity 365 + * just mask it out when doing the compare. 304 366 */ 305 - if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) && 306 - ((op[0] != PPC_INST_NOP) || (op[1] != PPC_INST_NOP))) { 307 - printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]); 367 + if ((op[0] != 0x48000008) || ((op[1] & 0xffff00000) != 0xe8410000)) { 368 + printk(KERN_ERR "Unexpected call sequence: %x %x\n", 369 + op[0], op[1]); 308 370 return -EINVAL; 309 371 } 310 372 ··· 319 371 return -EINVAL; 320 372 } 321 373 322 - /* create the branch to the trampoline */ 323 - op[0] = create_branch((unsigned int *)ip, 324 - rec->arch.mod->arch.tramp, BRANCH_SET_LINK); 325 - if (!op[0]) { 326 - printk(KERN_ERR "REL24 out of range!\n"); 374 + /* Ensure branch is within 24 bits */ 375 + if (create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { 376 + printk(KERN_ERR "Branch out of range"); 327 377 return -EINVAL; 328 378 } 329 379 330 - /* ld r2,40(r1) */ 331 - op[1] = 0xe8410028; 332 - 333 - pr_devel("write to %lx\n", rec->ip); 334 - 335 - if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2)) 336 - return -EPERM; 337 - 338 - flush_icache_range(ip, ip + 8); 380 + if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { 381 + printk(KERN_ERR "REL24 out of range!\n"); 382 + return -EINVAL; 383 + } 339 384 340 385 return 0; 341 386 }
+59 -58
arch/powerpc/kernel/head_64.S
··· 70 70 /* NOP this out unconditionally */ 71 71 BEGIN_FTR_SECTION 72 72 FIXUP_ENDIAN 73 - b .__start_initialization_multiplatform 73 + b __start_initialization_multiplatform 74 74 END_FTR_SECTION(0, 1) 75 75 76 76 /* Catch branch to 0 in real mode */ 77 77 trap 78 78 79 - /* Secondary processors spin on this value until it becomes nonzero. 80 - * When it does it contains the real address of the descriptor 81 - * of the function that the cpu should jump to to continue 82 - * initialization. 79 + /* Secondary processors spin on this value until it becomes non-zero. 80 + * When non-zero, it contains the real address of the function the cpu 81 + * should jump to. 83 82 */ 84 83 .balign 8 85 84 .globl __secondary_hold_spinloop ··· 139 140 tovirt(r26,r26) 140 141 #endif 141 142 /* All secondary cpus wait here until told to start. */ 142 - 100: ld r4,__secondary_hold_spinloop-_stext(r26) 143 - cmpdi 0,r4,0 143 + 100: ld r12,__secondary_hold_spinloop-_stext(r26) 144 + cmpdi 0,r12,0 144 145 beq 100b 145 146 146 147 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) 147 148 #ifdef CONFIG_PPC_BOOK3E 148 - tovirt(r4,r4) 149 + tovirt(r12,r12) 149 150 #endif 150 - ld r4,0(r4) /* deref function descriptor */ 151 - mtctr r4 151 + mtctr r12 152 152 mr r3,r24 153 153 /* 154 154 * it may be the case that other platforms have r4 right to ··· 184 186 mr r24,r3 185 187 186 188 /* turn on 64-bit mode */ 187 - bl .enable_64b_mode 189 + bl enable_64b_mode 188 190 189 191 /* get a valid TOC pointer, wherever we're mapped at */ 190 - bl .relative_toc 192 + bl relative_toc 191 193 tovirt(r2,r2) 192 194 193 195 #ifdef CONFIG_PPC_BOOK3E 194 196 /* Book3E initialization */ 195 197 mr r3,r24 196 - bl .book3e_secondary_thread_init 198 + bl book3e_secondary_thread_init 197 199 #endif 198 200 b generic_secondary_common_init 199 201 ··· 212 214 mr r25,r4 213 215 214 216 /* turn on 64-bit mode */ 215 - bl .enable_64b_mode 217 + bl enable_64b_mode 216 218 217 219 /* get a valid TOC pointer, wherever we're mapped at */ 218 - bl .relative_toc 220 + bl relative_toc 219 221 tovirt(r2,r2) 220 222 221 223 #ifdef CONFIG_PPC_BOOK3E 222 224 /* Book3E initialization */ 223 225 mr r3,r24 224 226 mr r4,r25 225 - bl .book3e_secondary_core_init 227 + bl book3e_secondary_core_init 226 228 #endif 227 229 228 230 generic_secondary_common_init: ··· 234 236 ld r13,0(r13) /* Get base vaddr of paca array */ 235 237 #ifndef CONFIG_SMP 236 238 addi r13,r13,PACA_SIZE /* know r13 if used accidentally */ 237 - b .kexec_wait /* wait for next kernel if !SMP */ 239 + b kexec_wait /* wait for next kernel if !SMP */ 238 240 #else 239 241 LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */ 240 242 lwz r7,0(r7) /* also the max paca allocated */ ··· 248 250 blt 1b 249 251 250 252 mr r3,r24 /* not found, copy phys to r3 */ 251 - b .kexec_wait /* next kernel might do better */ 253 + b kexec_wait /* next kernel might do better */ 252 254 253 255 2: SET_PACA(r13) 254 256 #ifdef CONFIG_PPC_BOOK3E ··· 262 264 /* See if we need to call a cpu state restore handler */ 263 265 LOAD_REG_ADDR(r23, cur_cpu_spec) 264 266 ld r23,0(r23) 265 - ld r23,CPU_SPEC_RESTORE(r23) 266 - cmpdi 0,r23,0 267 + ld r12,CPU_SPEC_RESTORE(r23) 268 + cmpdi 0,r12,0 267 269 beq 3f 268 - ld r23,0(r23) 269 - mtctr r23 270 + #if !defined(_CALL_ELF) || _CALL_ELF != 2 271 + ld r12,0(r12) 272 + #endif 273 + mtctr r12 270 274 bctrl 271 275 272 276 3: LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */ ··· 299 299 * Assumes we're mapped EA == RA if the MMU is on. 300 300 */ 301 301 #ifdef CONFIG_PPC_BOOK3S 302 - _STATIC(__mmu_off) 302 + __mmu_off: 303 303 mfmsr r3 304 304 andi. r0,r3,MSR_IR|MSR_DR 305 305 beqlr ··· 324 324 * DT block, r4 is a physical pointer to the kernel itself 325 325 * 326 326 */ 327 - _GLOBAL(__start_initialization_multiplatform) 327 + __start_initialization_multiplatform: 328 328 /* Make sure we are running in 64 bits mode */ 329 - bl .enable_64b_mode 329 + bl enable_64b_mode 330 330 331 331 /* Get TOC pointer (current runtime address) */ 332 - bl .relative_toc 332 + bl relative_toc 333 333 334 334 /* find out where we are now */ 335 335 bcl 20,31,$+4 ··· 342 342 */ 343 343 cmpldi cr0,r5,0 344 344 beq 1f 345 - b .__boot_from_prom /* yes -> prom */ 345 + b __boot_from_prom /* yes -> prom */ 346 346 1: 347 347 /* Save parameters */ 348 348 mr r31,r3 ··· 354 354 #endif 355 355 356 356 #ifdef CONFIG_PPC_BOOK3E 357 - bl .start_initialization_book3e 358 - b .__after_prom_start 357 + bl start_initialization_book3e 358 + b __after_prom_start 359 359 #else 360 360 /* Setup some critical 970 SPRs before switching MMU off */ 361 361 mfspr r0,SPRN_PVR ··· 368 368 beq 1f 369 369 cmpwi r0,0x45 /* 970GX */ 370 370 bne 2f 371 - 1: bl .__cpu_preinit_ppc970 371 + 1: bl __cpu_preinit_ppc970 372 372 2: 373 373 374 374 /* Switch off MMU if not already off */ 375 - bl .__mmu_off 376 - b .__after_prom_start 375 + bl __mmu_off 376 + b __after_prom_start 377 377 #endif /* CONFIG_PPC_BOOK3E */ 378 378 379 - _INIT_STATIC(__boot_from_prom) 379 + __boot_from_prom: 380 380 #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE 381 381 /* Save parameters */ 382 382 mr r31,r3 ··· 395 395 #ifdef CONFIG_RELOCATABLE 396 396 /* Relocate code for where we are now */ 397 397 mr r3,r26 398 - bl .relocate 398 + bl relocate 399 399 #endif 400 400 401 401 /* Restore parameters */ ··· 407 407 408 408 /* Do all of the interaction with OF client interface */ 409 409 mr r8,r26 410 - bl .prom_init 410 + bl prom_init 411 411 #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */ 412 412 413 413 /* We never return. We also hit that trap if trying to boot 414 414 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */ 415 415 trap 416 416 417 - _STATIC(__after_prom_start) 417 + __after_prom_start: 418 418 #ifdef CONFIG_RELOCATABLE 419 419 /* process relocations for the final address of the kernel */ 420 420 lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ ··· 424 424 bne 1f 425 425 add r25,r25,r26 426 426 1: mr r3,r25 427 - bl .relocate 427 + bl relocate 428 428 #endif 429 429 430 430 /* ··· 464 464 lis r5,(copy_to_here - _stext)@ha 465 465 addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */ 466 466 467 - bl .copy_and_flush /* copy the first n bytes */ 467 + bl copy_and_flush /* copy the first n bytes */ 468 468 /* this includes the code being */ 469 469 /* executed here. */ 470 470 addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */ 471 - addi r8,r8,(4f - _stext)@l /* that we just made */ 472 - mtctr r8 471 + addi r12,r8,(4f - _stext)@l /* that we just made */ 472 + mtctr r12 473 473 bctr 474 474 475 475 .balign 8 ··· 478 478 4: /* Now copy the rest of the kernel up to _end */ 479 479 addis r5,r26,(p_end - _stext)@ha 480 480 ld r5,(p_end - _stext)@l(r5) /* get _end */ 481 - 5: bl .copy_and_flush /* copy the rest */ 481 + 5: bl copy_and_flush /* copy the rest */ 482 482 483 - 9: b .start_here_multiplatform 483 + 9: b start_here_multiplatform 484 484 485 485 /* 486 486 * Copy routine used to copy the kernel to start at physical address 0 ··· 544 544 545 545 _GLOBAL(pmac_secondary_start) 546 546 /* turn on 64-bit mode */ 547 - bl .enable_64b_mode 547 + bl enable_64b_mode 548 548 549 549 li r0,0 550 550 mfspr r3,SPRN_HID4 ··· 556 556 slbia 557 557 558 558 /* get TOC pointer (real address) */ 559 - bl .relative_toc 559 + bl relative_toc 560 560 tovirt(r2,r2) 561 561 562 562 /* Copy some CPU settings from CPU 0 */ 563 - bl .__restore_cpu_ppc970 563 + bl __restore_cpu_ppc970 564 564 565 565 /* pSeries do that early though I don't think we really need it */ 566 566 mfmsr r3 ··· 619 619 std r14,PACAKSAVE(r13) 620 620 621 621 /* Do early setup for that CPU (stab, slb, hash table pointer) */ 622 - bl .early_setup_secondary 622 + bl early_setup_secondary 623 623 624 624 /* 625 625 * setup the new stack pointer, but *don't* use this until ··· 639 639 stb r0,PACAIRQHAPPENED(r13) 640 640 641 641 /* enable MMU and jump to start_secondary */ 642 - LOAD_REG_ADDR(r3, .start_secondary_prolog) 642 + LOAD_REG_ADDR(r3, start_secondary_prolog) 643 643 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) 644 644 645 645 mtspr SPRN_SRR0,r3 ··· 652 652 * zero the stack back-chain pointer and get the TOC virtual address 653 653 * before going into C code. 654 654 */ 655 - _GLOBAL(start_secondary_prolog) 655 + start_secondary_prolog: 656 656 ld r2,PACATOC(r13) 657 657 li r3,0 658 658 std r3,0(r1) /* Zero the stack frame pointer */ 659 - bl .start_secondary 659 + bl start_secondary 660 660 b . 661 661 /* 662 662 * Reset stack pointer and call start_secondary ··· 667 667 ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */ 668 668 li r3,0 669 669 std r3,0(r1) /* Zero the stack frame pointer */ 670 - bl .start_secondary 670 + bl start_secondary 671 671 b . 672 672 #endif 673 673 674 674 /* 675 675 * This subroutine clobbers r11 and r12 676 676 */ 677 - _GLOBAL(enable_64b_mode) 677 + enable_64b_mode: 678 678 mfmsr r11 /* grab the current MSR */ 679 679 #ifdef CONFIG_PPC_BOOK3E 680 680 oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ ··· 715 715 /* 716 716 * This is where the main kernel code starts. 717 717 */ 718 - _INIT_STATIC(start_here_multiplatform) 718 + start_here_multiplatform: 719 719 /* set up the TOC */ 720 - bl .relative_toc 720 + bl relative_toc 721 721 tovirt(r2,r2) 722 722 723 723 /* Clear out the BSS. It may have been done in prom_init, ··· 776 776 777 777 /* Restore parameters passed from prom_init/kexec */ 778 778 mr r3,r31 779 - bl .early_setup /* also sets r13 and SPRG_PACA */ 779 + bl early_setup /* also sets r13 and SPRG_PACA */ 780 780 781 - LOAD_REG_ADDR(r3, .start_here_common) 781 + LOAD_REG_ADDR(r3, start_here_common) 782 782 ld r4,PACAKMSR(r13) 783 783 mtspr SPRN_SRR0,r3 784 784 mtspr SPRN_SRR1,r4 ··· 786 786 b . /* prevent speculative execution */ 787 787 788 788 /* This is where all platforms converge execution */ 789 - _INIT_GLOBAL(start_here_common) 789 + 790 + start_here_common: 790 791 /* relocation is on at this point */ 791 792 std r1,PACAKSAVE(r13) 792 793 ··· 795 794 ld r2,PACATOC(r13) 796 795 797 796 /* Do more system initializations in virtual mode */ 798 - bl .setup_system 797 + bl setup_system 799 798 800 799 /* Mark interrupts soft and hard disabled (they might be enabled 801 800 * in the PACA when doing hotplug) ··· 806 805 stb r0,PACAIRQHAPPENED(r13) 807 806 808 807 /* Generic kernel entry */ 809 - bl .start_kernel 808 + bl start_kernel 810 809 811 810 /* Not reached */ 812 811 BUG_OPCODE
+1 -1
arch/powerpc/kernel/idle_book3e.S
··· 43 43 */ 44 44 #ifdef CONFIG_TRACE_IRQFLAGS 45 45 stdu r1,-128(r1) 46 - bl .trace_hardirqs_on 46 + bl trace_hardirqs_on 47 47 addi r1,r1,128 48 48 #endif 49 49 li r0,1
+1 -1
arch/powerpc/kernel/idle_power4.S
··· 46 46 mflr r0 47 47 std r0,16(r1) 48 48 stdu r1,-128(r1) 49 - bl .trace_hardirqs_on 49 + bl trace_hardirqs_on 50 50 addi r1,r1,128 51 51 ld r0,16(r1) 52 52 mtlr r0
+2 -2
arch/powerpc/kernel/idle_power7.S
··· 58 58 /* Make sure FPU, VSX etc... are flushed as we may lose 59 59 * state when going to nap mode 60 60 */ 61 - bl .discard_lazy_cpu_state 61 + bl discard_lazy_cpu_state 62 62 #endif /* CONFIG_SMP */ 63 63 64 64 /* Hard disable interrupts */ ··· 168 168 _GLOBAL(power7_wakeup_noloss) 169 169 lbz r0,PACA_NAPSTATELOST(r13) 170 170 cmpwi r0,0 171 - bne .power7_wakeup_loss 171 + bne power7_wakeup_loss 172 172 ld r1,PACAR1(r13) 173 173 ld r4,_MSR(r1) 174 174 ld r5,_NIP(r1)
+39 -7
arch/powerpc/kernel/misc_64.S
··· 34 34 std r0,16(r1) 35 35 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) 36 36 mr r1,r3 37 - bl .__do_softirq 37 + bl __do_softirq 38 38 ld r1,0(r1) 39 39 ld r0,16(r1) 40 40 mtlr r0 ··· 45 45 std r0,16(r1) 46 46 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) 47 47 mr r1,r4 48 - bl .__do_irq 48 + bl __do_irq 49 49 ld r1,0(r1) 50 50 ld r0,16(r1) 51 51 mtlr r0 ··· 506 506 stb r4,PACAKEXECSTATE(r13) 507 507 SYNC 508 508 509 - b .kexec_wait 509 + b kexec_wait 510 510 511 511 /* 512 512 * switch to real mode (turn mmu off) ··· 576 576 577 577 /* copy dest pages, flush whole dest image */ 578 578 mr r3,r29 579 - bl .kexec_copy_flush /* (image) */ 579 + bl kexec_copy_flush /* (image) */ 580 580 581 581 /* turn off mmu */ 582 582 bl real_mode ··· 586 586 mr r4,r30 /* start, aka phys mem offset */ 587 587 li r5,0x100 588 588 li r6,0 589 - bl .copy_and_flush /* (dest, src, copy limit, start offset) */ 589 + bl copy_and_flush /* (dest, src, copy limit, start offset) */ 590 590 1: /* assume normal blr return */ 591 591 592 592 /* release other cpus to the new kernel secondary start at 0x60 */ ··· 595 595 stw r6,kexec_flag-1b(5) 596 596 597 597 /* clear out hardware hash page table and tlb */ 598 - ld r5,0(r27) /* deref function descriptor */ 599 - mtctr r5 598 + #if !defined(_CALL_ELF) || _CALL_ELF != 2 599 + ld r12,0(r27) /* deref function descriptor */ 600 + #else 601 + mr r12,r27 602 + #endif 603 + mtctr r12 600 604 bctrl /* ppc_md.hpte_clear_all(void); */ 601 605 602 606 /* ··· 634 630 li r5,0 635 631 blr /* image->start(physid, image->start, 0); */ 636 632 #endif /* CONFIG_KEXEC */ 633 + 634 + #ifdef CONFIG_MODULES 635 + #if defined(_CALL_ELF) && _CALL_ELF == 2 636 + 637 + #ifdef CONFIG_MODVERSIONS 638 + .weak __crc_TOC. 639 + .section "___kcrctab+TOC.","a" 640 + .globl __kcrctab_TOC. 641 + __kcrctab_TOC.: 642 + .llong __crc_TOC. 643 + #endif 644 + 645 + /* 646 + * Export a fake .TOC. since both modpost and depmod will complain otherwise. 647 + * Both modpost and depmod strip the leading . so we do the same here. 648 + */ 649 + .section "__ksymtab_strings","a" 650 + __kstrtab_TOC.: 651 + .asciz "TOC." 652 + 653 + .section "___ksymtab+TOC.","a" 654 + /* This symbol name is important: it's used by modpost to find exported syms */ 655 + .globl __ksymtab_TOC. 656 + __ksymtab_TOC.: 657 + .llong 0 /* .value */ 658 + .llong __kstrtab_TOC. 659 + #endif /* ELFv2 */ 660 + #endif /* MODULES */
+225 -54
arch/powerpc/kernel/module_64.c
··· 22 22 #include <linux/vmalloc.h> 23 23 #include <linux/ftrace.h> 24 24 #include <linux/bug.h> 25 + #include <linux/uaccess.h> 25 26 #include <asm/module.h> 26 27 #include <asm/firmware.h> 27 28 #include <asm/code-patching.h> ··· 42 41 #define DEBUGP(fmt , ...) 43 42 #endif 44 43 44 + #if defined(_CALL_ELF) && _CALL_ELF == 2 45 + #define R2_STACK_OFFSET 24 46 + 47 + /* An address is simply the address of the function. */ 48 + typedef unsigned long func_desc_t; 49 + 50 + static func_desc_t func_desc(unsigned long addr) 51 + { 52 + return addr; 53 + } 54 + static unsigned long func_addr(unsigned long addr) 55 + { 56 + return addr; 57 + } 58 + static unsigned long stub_func_addr(func_desc_t func) 59 + { 60 + return func; 61 + } 62 + 63 + /* PowerPC64 specific values for the Elf64_Sym st_other field. */ 64 + #define STO_PPC64_LOCAL_BIT 5 65 + #define STO_PPC64_LOCAL_MASK (7 << STO_PPC64_LOCAL_BIT) 66 + #define PPC64_LOCAL_ENTRY_OFFSET(other) \ 67 + (((1 << (((other) & STO_PPC64_LOCAL_MASK) >> STO_PPC64_LOCAL_BIT)) >> 2) << 2) 68 + 69 + static unsigned int local_entry_offset(const Elf64_Sym *sym) 70 + { 71 + /* sym->st_other indicates offset to local entry point 72 + * (otherwise it will assume r12 is the address of the start 73 + * of function and try to derive r2 from it). */ 74 + return PPC64_LOCAL_ENTRY_OFFSET(sym->st_other); 75 + } 76 + #else 77 + #define R2_STACK_OFFSET 40 78 + 79 + /* An address is address of the OPD entry, which contains address of fn. */ 80 + typedef struct ppc64_opd_entry func_desc_t; 81 + 82 + static func_desc_t func_desc(unsigned long addr) 83 + { 84 + return *(struct ppc64_opd_entry *)addr; 85 + } 86 + static unsigned long func_addr(unsigned long addr) 87 + { 88 + return func_desc(addr).funcaddr; 89 + } 90 + static unsigned long stub_func_addr(func_desc_t func) 91 + { 92 + return func.funcaddr; 93 + } 94 + static unsigned int local_entry_offset(const Elf64_Sym *sym) 95 + { 96 + return 0; 97 + } 98 + #endif 99 + 45 100 /* Like PPC32, we need little trampolines to do > 24-bit jumps (into 46 101 the kernel itself). But on PPC64, these need to be used for every 47 102 jump, actually, to reset r2 (TOC+0x8000). */ 48 103 struct ppc64_stub_entry 49 104 { 50 - /* 28 byte jump instruction sequence (7 instructions) */ 51 - unsigned char jump[28]; 52 - unsigned char unused[4]; 105 + /* 28 byte jump instruction sequence (7 instructions). We only 106 + * need 6 instructions on ABIv2 but we always allocate 7 so 107 + * so we don't have to modify the trampoline load instruction. */ 108 + u32 jump[7]; 109 + u32 unused; 53 110 /* Data for the above code */ 54 - struct ppc64_opd_entry opd; 111 + func_desc_t funcdata; 55 112 }; 56 113 57 - /* We use a stub to fix up r2 (TOC ptr) and to jump to the (external) 58 - function which may be more than 24-bits away. We could simply 59 - patch the new r2 value and function pointer into the stub, but it's 60 - significantly shorter to put these values at the end of the stub 61 - code, and patch the stub address (32-bits relative to the TOC ptr, 62 - r2) into the stub. */ 63 - static struct ppc64_stub_entry ppc64_stub = 64 - { .jump = { 65 - #ifdef __LITTLE_ENDIAN__ 66 - 0x00, 0x00, 0x82, 0x3d, /* addis r12,r2, <high> */ 67 - 0x00, 0x00, 0x8c, 0x39, /* addi r12,r12, <low> */ 114 + /* 115 + * PPC64 uses 24 bit jumps, but we need to jump into other modules or 116 + * the kernel which may be further. So we jump to a stub. 117 + * 118 + * For ELFv1 we need to use this to set up the new r2 value (aka TOC 119 + * pointer). For ELFv2 it's the callee's responsibility to set up the 120 + * new r2, but for both we need to save the old r2. 121 + * 122 + * We could simply patch the new r2 value and function pointer into 123 + * the stub, but it's significantly shorter to put these values at the 124 + * end of the stub code, and patch the stub address (32-bits relative 125 + * to the TOC ptr, r2) into the stub. 126 + */ 127 + 128 + static u32 ppc64_stub_insns[] = { 129 + 0x3d620000, /* addis r11,r2, <high> */ 130 + 0x396b0000, /* addi r11,r11, <low> */ 68 131 /* Save current r2 value in magic place on the stack. */ 69 - 0x28, 0x00, 0x41, 0xf8, /* std r2,40(r1) */ 70 - 0x20, 0x00, 0x6c, 0xe9, /* ld r11,32(r12) */ 71 - 0x28, 0x00, 0x4c, 0xe8, /* ld r2,40(r12) */ 72 - 0xa6, 0x03, 0x69, 0x7d, /* mtctr r11 */ 73 - 0x20, 0x04, 0x80, 0x4e /* bctr */ 74 - #else 75 - 0x3d, 0x82, 0x00, 0x00, /* addis r12,r2, <high> */ 76 - 0x39, 0x8c, 0x00, 0x00, /* addi r12,r12, <low> */ 77 - /* Save current r2 value in magic place on the stack. */ 78 - 0xf8, 0x41, 0x00, 0x28, /* std r2,40(r1) */ 79 - 0xe9, 0x6c, 0x00, 0x20, /* ld r11,32(r12) */ 80 - 0xe8, 0x4c, 0x00, 0x28, /* ld r2,40(r12) */ 81 - 0x7d, 0x69, 0x03, 0xa6, /* mtctr r11 */ 82 - 0x4e, 0x80, 0x04, 0x20 /* bctr */ 132 + 0xf8410000|R2_STACK_OFFSET, /* std r2,R2_STACK_OFFSET(r1) */ 133 + 0xe98b0020, /* ld r12,32(r11) */ 134 + #if !defined(_CALL_ELF) || _CALL_ELF != 2 135 + /* Set up new r2 from function descriptor */ 136 + 0xe84b0026, /* ld r2,40(r11) */ 83 137 #endif 84 - } }; 138 + 0x7d8903a6, /* mtctr r12 */ 139 + 0x4e800420 /* bctr */ 140 + }; 141 + 142 + #ifdef CONFIG_DYNAMIC_FTRACE 143 + 144 + static u32 ppc64_stub_mask[] = { 145 + 0xffff0000, 146 + 0xffff0000, 147 + 0xffffffff, 148 + 0xffffffff, 149 + #if !defined(_CALL_ELF) || _CALL_ELF != 2 150 + 0xffffffff, 151 + #endif 152 + 0xffffffff, 153 + 0xffffffff 154 + }; 155 + 156 + bool is_module_trampoline(u32 *p) 157 + { 158 + unsigned int i; 159 + u32 insns[ARRAY_SIZE(ppc64_stub_insns)]; 160 + 161 + BUILD_BUG_ON(sizeof(ppc64_stub_insns) != sizeof(ppc64_stub_mask)); 162 + 163 + if (probe_kernel_read(insns, p, sizeof(insns))) 164 + return -EFAULT; 165 + 166 + for (i = 0; i < ARRAY_SIZE(ppc64_stub_insns); i++) { 167 + u32 insna = insns[i]; 168 + u32 insnb = ppc64_stub_insns[i]; 169 + u32 mask = ppc64_stub_mask[i]; 170 + 171 + if ((insna & mask) != (insnb & mask)) 172 + return false; 173 + } 174 + 175 + return true; 176 + } 177 + 178 + int module_trampoline_target(struct module *mod, u32 *trampoline, 179 + unsigned long *target) 180 + { 181 + u32 buf[2]; 182 + u16 upper, lower; 183 + long offset; 184 + void *toc_entry; 185 + 186 + if (probe_kernel_read(buf, trampoline, sizeof(buf))) 187 + return -EFAULT; 188 + 189 + upper = buf[0] & 0xffff; 190 + lower = buf[1] & 0xffff; 191 + 192 + /* perform the addis/addi, both signed */ 193 + offset = ((short)upper << 16) + (short)lower; 194 + 195 + /* 196 + * Now get the address this trampoline jumps to. This 197 + * is always 32 bytes into our trampoline stub. 198 + */ 199 + toc_entry = (void *)mod->arch.toc + offset + 32; 200 + 201 + if (probe_kernel_read(target, toc_entry, sizeof(*target))) 202 + return -EFAULT; 203 + 204 + return 0; 205 + } 206 + 207 + #endif 85 208 86 209 /* Count how many different 24-bit relocations (different symbol, 87 210 different addend) */ ··· 308 183 return relocs * sizeof(struct ppc64_stub_entry); 309 184 } 310 185 186 + /* Still needed for ELFv2, for .TOC. */ 311 187 static void dedotify_versions(struct modversion_info *vers, 312 188 unsigned long size) 313 189 { ··· 319 193 memmove(vers->name, vers->name+1, strlen(vers->name)); 320 194 } 321 195 322 - /* Undefined symbols which refer to .funcname, hack to funcname */ 196 + /* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */ 323 197 static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) 324 198 { 325 199 unsigned int i; ··· 331 205 memmove(name, name+1, strlen(name)); 332 206 } 333 207 } 208 + } 209 + 210 + static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs, 211 + const char *strtab, 212 + unsigned int symindex) 213 + { 214 + unsigned int i, numsyms; 215 + Elf64_Sym *syms; 216 + 217 + syms = (Elf64_Sym *)sechdrs[symindex].sh_addr; 218 + numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym); 219 + 220 + for (i = 1; i < numsyms; i++) { 221 + if (syms[i].st_shndx == SHN_UNDEF 222 + && strcmp(strtab + syms[i].st_name, "TOC.") == 0) 223 + return &syms[i]; 224 + } 225 + return NULL; 334 226 } 335 227 336 228 int module_frob_arch_sections(Elf64_Ehdr *hdr, ··· 415 271 /* Patch stub to reference function and correct r2 value. */ 416 272 static inline int create_stub(Elf64_Shdr *sechdrs, 417 273 struct ppc64_stub_entry *entry, 418 - struct ppc64_opd_entry *opd, 274 + unsigned long addr, 419 275 struct module *me) 420 276 { 421 - Elf64_Half *loc1, *loc2; 422 277 long reladdr; 423 278 424 - *entry = ppc64_stub; 425 - 426 - #ifdef __LITTLE_ENDIAN__ 427 - loc1 = (Elf64_Half *)&entry->jump[0]; 428 - loc2 = (Elf64_Half *)&entry->jump[4]; 429 - #else 430 - loc1 = (Elf64_Half *)&entry->jump[2]; 431 - loc2 = (Elf64_Half *)&entry->jump[6]; 432 - #endif 279 + memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns)); 433 280 434 281 /* Stub uses address relative to r2. */ 435 282 reladdr = (unsigned long)entry - my_r2(sechdrs, me); ··· 431 296 } 432 297 DEBUGP("Stub %p get data from reladdr %li\n", entry, reladdr); 433 298 434 - *loc1 = PPC_HA(reladdr); 435 - *loc2 = PPC_LO(reladdr); 436 - entry->opd.funcaddr = opd->funcaddr; 437 - entry->opd.r2 = opd->r2; 299 + entry->jump[0] |= PPC_HA(reladdr); 300 + entry->jump[1] |= PPC_LO(reladdr); 301 + entry->funcdata = func_desc(addr); 438 302 return 1; 439 303 } 440 304 441 - /* Create stub to jump to function described in this OPD: we need the 305 + /* Create stub to jump to function described in this OPD/ptr: we need the 442 306 stub to set up the TOC ptr (r2) for the function. */ 443 307 static unsigned long stub_for_addr(Elf64_Shdr *sechdrs, 444 - unsigned long opdaddr, 308 + unsigned long addr, 445 309 struct module *me) 446 310 { 447 311 struct ppc64_stub_entry *stubs; 448 - struct ppc64_opd_entry *opd = (void *)opdaddr; 449 312 unsigned int i, num_stubs; 450 313 451 314 num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs); 452 315 453 316 /* Find this stub, or if that fails, the next avail. entry */ 454 317 stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr; 455 - for (i = 0; stubs[i].opd.funcaddr; i++) { 318 + for (i = 0; stub_func_addr(stubs[i].funcdata); i++) { 456 319 BUG_ON(i >= num_stubs); 457 320 458 - if (stubs[i].opd.funcaddr == opd->funcaddr) 321 + if (stub_func_addr(stubs[i].funcdata) == func_addr(addr)) 459 322 return (unsigned long)&stubs[i]; 460 323 } 461 324 462 - if (!create_stub(sechdrs, &stubs[i], opd, me)) 325 + if (!create_stub(sechdrs, &stubs[i], addr, me)) 463 326 return 0; 464 327 465 328 return (unsigned long)&stubs[i]; ··· 472 339 me->name, *instruction); 473 340 return 0; 474 341 } 475 - *instruction = 0xe8410028; /* ld r2,40(r1) */ 342 + /* ld r2,R2_STACK_OFFSET(r1) */ 343 + *instruction = 0xe8410000 | R2_STACK_OFFSET; 476 344 return 1; 477 345 } 478 346 ··· 491 357 492 358 DEBUGP("Applying ADD relocate section %u to %u\n", relsec, 493 359 sechdrs[relsec].sh_info); 360 + 361 + /* First time we're called, we can fix up .TOC. */ 362 + if (!me->arch.toc_fixed) { 363 + sym = find_dot_toc(sechdrs, strtab, symindex); 364 + /* It's theoretically possible that a module doesn't want a 365 + * .TOC. so don't fail it just for that. */ 366 + if (sym) 367 + sym->st_value = my_r2(sechdrs, me); 368 + me->arch.toc_fixed = true; 369 + } 370 + 494 371 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { 495 372 /* This is where to make the change */ 496 373 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr ··· 598 453 return -ENOENT; 599 454 if (!restore_r2((u32 *)location + 1, me)) 600 455 return -ENOEXEC; 601 - } 456 + } else 457 + value += local_entry_offset(sym); 602 458 603 459 /* Convert value to relative */ 604 460 value -= (unsigned long)location; ··· 618 472 case R_PPC64_REL64: 619 473 /* 64 bits relative (used by features fixups) */ 620 474 *location = value - (unsigned long)location; 475 + break; 476 + 477 + case R_PPC64_TOCSAVE: 478 + /* 479 + * Marker reloc indicates we don't have to save r2. 480 + * That would only save us one instruction, so ignore 481 + * it. 482 + */ 483 + break; 484 + 485 + case R_PPC64_REL16_HA: 486 + /* Subtract location pointer */ 487 + value -= (unsigned long)location; 488 + value = ((value + 0x8000) >> 16); 489 + *((uint16_t *) location) 490 + = (*((uint16_t *) location) & ~0xffff) 491 + | (value & 0xffff); 492 + break; 493 + 494 + case R_PPC64_REL16_LO: 495 + /* Subtract location pointer */ 496 + value -= (unsigned long)location; 497 + *((uint16_t *) location) 498 + = (*((uint16_t *) location) & ~0xffff) 499 + | (value & 0xffff); 621 500 break; 622 501 623 502 default:
+5 -12
arch/powerpc/kernel/process.c
··· 54 54 #ifdef CONFIG_PPC64 55 55 #include <asm/firmware.h> 56 56 #endif 57 + #include <asm/code-patching.h> 57 58 #include <linux/kprobes.h> 58 59 #include <linux/kdebug.h> 59 60 ··· 1109 1108 struct thread_info *ti = (void *)task_stack_page(p); 1110 1109 memset(childregs, 0, sizeof(struct pt_regs)); 1111 1110 childregs->gpr[1] = sp + sizeof(struct pt_regs); 1112 - childregs->gpr[14] = usp; /* function */ 1111 + /* function */ 1112 + if (usp) 1113 + childregs->gpr[14] = ppc_function_entry((void *)usp); 1113 1114 #ifdef CONFIG_PPC64 1114 1115 clear_tsk_thread_flag(p, TIF_32BIT); 1115 1116 childregs->softe = 1; ··· 1190 1187 if (cpu_has_feature(CPU_FTR_HAS_PPR)) 1191 1188 p->thread.ppr = INIT_PPR; 1192 1189 #endif 1193 - /* 1194 - * The PPC64 ABI makes use of a TOC to contain function 1195 - * pointers. The function (ret_from_except) is actually a pointer 1196 - * to the TOC entry. The first entry is a pointer to the actual 1197 - * function. 1198 - */ 1199 - #ifdef CONFIG_PPC64 1200 - kregs->nip = *((unsigned long *)f); 1201 - #else 1202 - kregs->nip = (unsigned long)f; 1203 - #endif 1190 + kregs->nip = ppc_function_entry(f); 1204 1191 return 0; 1205 1192 } 1206 1193
+1 -1
arch/powerpc/kernel/prom_init_check.sh
··· 23 23 reloc_got2 kernstart_addr memstart_addr linux_banner _stext 24 24 opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry 25 25 boot_command_line __prom_init_toc_start __prom_init_toc_end 26 - btext_setup_display" 26 + btext_setup_display TOC." 27 27 28 28 NM="$1" 29 29 OBJ="$2"
+1 -1
arch/powerpc/kernel/setup_64.c
··· 341 341 342 342 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop 343 343 - PHYSICAL_START); 344 - *ptr = __pa(generic_secondary_smp_init); 344 + *ptr = ppc_function_entry(generic_secondary_smp_init); 345 345 346 346 /* And wait a bit for them to catch up */ 347 347 for (i = 0; i < 100000; i++) {
+11 -7
arch/powerpc/kernel/systbl.S
··· 17 17 #include <asm/ppc_asm.h> 18 18 19 19 #ifdef CONFIG_PPC64 20 - #define SYSCALL(func) .llong .sys_##func,.sys_##func 21 - #define COMPAT_SYS(func) .llong .sys_##func,.compat_sys_##func 22 - #define PPC_SYS(func) .llong .ppc_##func,.ppc_##func 23 - #define OLDSYS(func) .llong .sys_ni_syscall,.sys_ni_syscall 24 - #define SYS32ONLY(func) .llong .sys_ni_syscall,.compat_sys_##func 25 - #define SYSX(f, f3264, f32) .llong .f,.f3264 20 + #define SYSCALL(func) .llong DOTSYM(sys_##func),DOTSYM(sys_##func) 21 + #define COMPAT_SYS(func) .llong DOTSYM(sys_##func),DOTSYM(compat_sys_##func) 22 + #define PPC_SYS(func) .llong DOTSYM(ppc_##func),DOTSYM(ppc_##func) 23 + #define OLDSYS(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall) 24 + #define SYS32ONLY(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func) 25 + #define SYSX(f, f3264, f32) .llong DOTSYM(f),DOTSYM(f3264) 26 26 #else 27 27 #define SYSCALL(func) .long sys_##func 28 28 #define COMPAT_SYS(func) .long sys_##func ··· 36 36 #define PPC_SYS_SPU(func) PPC_SYS(func) 37 37 #define SYSX_SPU(f, f3264, f32) SYSX(f, f3264, f32) 38 38 39 + .section .rodata,"a" 40 + 39 41 #ifdef CONFIG_PPC64 40 42 #define sys_sigpending sys_ni_syscall 41 43 #define sys_old_getrlimit sys_ni_syscall ··· 45 43 .p2align 3 46 44 #endif 47 45 48 - _GLOBAL(sys_call_table) 46 + .globl sys_call_table 47 + sys_call_table: 48 + 49 49 #include <asm/systbl.h>
+6 -7
arch/powerpc/kernel/tm.S
··· 42 42 /* Stack frame offsets for local variables. */ 43 43 #define TM_FRAME_L0 TM_FRAME_SIZE-16 44 44 #define TM_FRAME_L1 TM_FRAME_SIZE-8 45 - #define STACK_PARAM(x) (48+((x)*8)) 46 45 47 46 48 47 /* In order to access the TM SPRs, TM must be enabled. So, do so: */ ··· 108 109 mflr r0 109 110 stw r6, 8(r1) 110 111 std r0, 16(r1) 111 - std r2, 40(r1) 112 + std r2, STK_GOT(r1) 112 113 stdu r1, -TM_FRAME_SIZE(r1) 113 114 114 115 /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. */ 115 116 116 - std r3, STACK_PARAM(0)(r1) 117 + std r3, STK_PARAM(R3)(r1) 117 118 SAVE_NVGPRS(r1) 118 119 119 120 /* We need to setup MSR for VSX register save instructions. Here we ··· 209 210 /* Now get some more GPRS free */ 210 211 std r7, GPR7(r1) /* Temporary stash */ 211 212 std r12, GPR12(r1) /* '' '' '' */ 212 - ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */ 213 + ld r12, STK_PARAM(R3)(r1) /* Param 0, thread_struct * */ 213 214 214 215 std r11, THREAD_TM_PPR(r12) /* Store PPR and free r11 */ 215 216 ··· 296 297 ld r0, 16(r1) 297 298 mtcr r4 298 299 mtlr r0 299 - ld r2, 40(r1) 300 + ld r2, STK_GOT(r1) 300 301 301 302 /* Load system default DSCR */ 302 303 ld r4, DSCR_DEFAULT@toc(r2) ··· 319 320 mflr r0 320 321 stw r5, 8(r1) 321 322 std r0, 16(r1) 322 - std r2, 40(r1) 323 + std r2, STK_GOT(r1) 323 324 stdu r1, -TM_FRAME_SIZE(r1) 324 325 325 326 /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. ··· 477 478 ld r0, 16(r1) 478 479 mtcr r4 479 480 mtlr r0 480 - ld r2, 40(r1) 481 + ld r2, STK_GOT(r1) 481 482 482 483 /* Load system default DSCR */ 483 484 ld r4, DSCR_DEFAULT@toc(r2)
+1 -1
arch/powerpc/kvm/book3s_hv_interrupts.S
··· 171 171 #endif /* CONFIG_SMP */ 172 172 173 173 /* Jump to partition switch code */ 174 - bl .kvmppc_hv_entry_trampoline 174 + bl kvmppc_hv_entry_trampoline 175 175 nop 176 176 177 177 /*
+17 -17
arch/powerpc/kvm/book3s_hv_rmhandlers.S
··· 1658 1658 /* Search the hash table. */ 1659 1659 mr r3, r9 /* vcpu pointer */ 1660 1660 li r7, 1 /* data fault */ 1661 - bl .kvmppc_hpte_hv_fault 1661 + bl kvmppc_hpte_hv_fault 1662 1662 ld r9, HSTATE_KVM_VCPU(r13) 1663 1663 ld r10, VCPU_PC(r9) 1664 1664 ld r11, VCPU_MSR(r9) ··· 1732 1732 mr r4, r10 1733 1733 mr r6, r11 1734 1734 li r7, 0 /* instruction fault */ 1735 - bl .kvmppc_hpte_hv_fault 1735 + bl kvmppc_hpte_hv_fault 1736 1736 ld r9, HSTATE_KVM_VCPU(r13) 1737 1737 ld r10, VCPU_PC(r9) 1738 1738 ld r11, VCPU_MSR(r9) ··· 1806 1806 .globl hcall_real_table 1807 1807 hcall_real_table: 1808 1808 .long 0 /* 0 - unused */ 1809 - .long .kvmppc_h_remove - hcall_real_table 1810 - .long .kvmppc_h_enter - hcall_real_table 1811 - .long .kvmppc_h_read - hcall_real_table 1809 + .long DOTSYM(kvmppc_h_remove) - hcall_real_table 1810 + .long DOTSYM(kvmppc_h_enter) - hcall_real_table 1811 + .long DOTSYM(kvmppc_h_read) - hcall_real_table 1812 1812 .long 0 /* 0x10 - H_CLEAR_MOD */ 1813 1813 .long 0 /* 0x14 - H_CLEAR_REF */ 1814 - .long .kvmppc_h_protect - hcall_real_table 1815 - .long .kvmppc_h_get_tce - hcall_real_table 1816 - .long .kvmppc_h_put_tce - hcall_real_table 1814 + .long DOTSYM(kvmppc_h_protect) - hcall_real_table 1815 + .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table 1816 + .long DOTSYM(kvmppc_h_put_tce) - hcall_real_table 1817 1817 .long 0 /* 0x24 - H_SET_SPRG0 */ 1818 - .long .kvmppc_h_set_dabr - hcall_real_table 1818 + .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table 1819 1819 .long 0 /* 0x2c */ 1820 1820 .long 0 /* 0x30 */ 1821 1821 .long 0 /* 0x34 */ ··· 1831 1831 .long 0 /* 0x5c */ 1832 1832 .long 0 /* 0x60 */ 1833 1833 #ifdef CONFIG_KVM_XICS 1834 - .long .kvmppc_rm_h_eoi - hcall_real_table 1835 - .long .kvmppc_rm_h_cppr - hcall_real_table 1836 - .long .kvmppc_rm_h_ipi - hcall_real_table 1834 + .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table 1835 + .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table 1836 + .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table 1837 1837 .long 0 /* 0x70 - H_IPOLL */ 1838 - .long .kvmppc_rm_h_xirr - hcall_real_table 1838 + .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table 1839 1839 #else 1840 1840 .long 0 /* 0x64 - H_EOI */ 1841 1841 .long 0 /* 0x68 - H_CPPR */ ··· 1869 1869 .long 0 /* 0xd4 */ 1870 1870 .long 0 /* 0xd8 */ 1871 1871 .long 0 /* 0xdc */ 1872 - .long .kvmppc_h_cede - hcall_real_table 1872 + .long DOTSYM(kvmppc_h_cede) - hcall_real_table 1873 1873 .long 0 /* 0xe4 */ 1874 1874 .long 0 /* 0xe8 */ 1875 1875 .long 0 /* 0xec */ ··· 1886 1886 .long 0 /* 0x118 */ 1887 1887 .long 0 /* 0x11c */ 1888 1888 .long 0 /* 0x120 */ 1889 - .long .kvmppc_h_bulk_remove - hcall_real_table 1889 + .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table 1890 1890 .long 0 /* 0x128 */ 1891 1891 .long 0 /* 0x12c */ 1892 1892 .long 0 /* 0x130 */ 1893 - .long .kvmppc_h_set_xdabr - hcall_real_table 1893 + .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table 1894 1894 hcall_real_table_end: 1895 1895 1896 1896 ignore_hdec: ··· 2115 2115 /* Try to handle a machine check in real mode */ 2116 2116 machine_check_realmode: 2117 2117 mr r3, r9 /* get vcpu pointer */ 2118 - bl .kvmppc_realmode_machine_check 2118 + bl kvmppc_realmode_machine_check 2119 2119 nop 2120 2120 cmpdi r3, 0 /* continue exiting from guest? */ 2121 2121 ld r9, HSTATE_KVM_VCPU(r13)
+1 -1
arch/powerpc/lib/copypage_64.S
··· 20 20 BEGIN_FTR_SECTION 21 21 lis r5,PAGE_SIZE@h 22 22 FTR_SECTION_ELSE 23 - b .copypage_power7 23 + b copypage_power7 24 24 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) 25 25 ori r5,r5,PAGE_SIZE@l 26 26 BEGIN_FTR_SECTION
+6 -6
arch/powerpc/lib/copypage_power7.S
··· 56 56 57 57 #ifdef CONFIG_ALTIVEC 58 58 mflr r0 59 - std r3,48(r1) 60 - std r4,56(r1) 59 + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) 60 + std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) 61 61 std r0,16(r1) 62 62 stdu r1,-STACKFRAMESIZE(r1) 63 - bl .enter_vmx_copy 63 + bl enter_vmx_copy 64 64 cmpwi r3,0 65 65 ld r0,STACKFRAMESIZE+16(r1) 66 - ld r3,STACKFRAMESIZE+48(r1) 67 - ld r4,STACKFRAMESIZE+56(r1) 66 + ld r3,STK_REG(R31)(r1) 67 + ld r4,STK_REG(R30)(r1) 68 68 mtlr r0 69 69 70 70 li r0,(PAGE_SIZE/128) ··· 103 103 addi r3,r3,128 104 104 bdnz 1b 105 105 106 - b .exit_vmx_copy /* tail call optimise */ 106 + b exit_vmx_copy /* tail call optimise */ 107 107 108 108 #else 109 109 li r0,(PAGE_SIZE/128)
+1 -1
arch/powerpc/lib/copyuser_64.S
··· 18 18 #endif 19 19 20 20 .align 7 21 - _GLOBAL(__copy_tofrom_user) 21 + _GLOBAL_TOC(__copy_tofrom_user) 22 22 BEGIN_FTR_SECTION 23 23 nop 24 24 FTR_SECTION_ELSE
+16 -16
arch/powerpc/lib/copyuser_power7.S
··· 66 66 ld r15,STK_REG(R15)(r1) 67 67 ld r14,STK_REG(R14)(r1) 68 68 .Ldo_err3: 69 - bl .exit_vmx_usercopy 69 + bl exit_vmx_usercopy 70 70 ld r0,STACKFRAMESIZE+16(r1) 71 71 mtlr r0 72 72 b .Lexit ··· 85 85 .Lexit: 86 86 addi r1,r1,STACKFRAMESIZE 87 87 .Ldo_err1: 88 - ld r3,48(r1) 89 - ld r4,56(r1) 90 - ld r5,64(r1) 88 + ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) 89 + ld r4,-STACKFRAMESIZE+STK_REG(R30)(r1) 90 + ld r5,-STACKFRAMESIZE+STK_REG(R29)(r1) 91 91 b __copy_tofrom_user_base 92 92 93 93 ··· 96 96 cmpldi r5,16 97 97 cmpldi cr1,r5,4096 98 98 99 - std r3,48(r1) 100 - std r4,56(r1) 101 - std r5,64(r1) 99 + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) 100 + std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) 101 + std r5,-STACKFRAMESIZE+STK_REG(R29)(r1) 102 102 103 103 blt .Lshort_copy 104 104 bgt cr1,.Lvmx_copy 105 105 #else 106 106 cmpldi r5,16 107 107 108 - std r3,48(r1) 109 - std r4,56(r1) 110 - std r5,64(r1) 108 + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) 109 + std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) 110 + std r5,-STACKFRAMESIZE+STK_REG(R29)(r1) 111 111 112 112 blt .Lshort_copy 113 113 #endif ··· 295 295 mflr r0 296 296 std r0,16(r1) 297 297 stdu r1,-STACKFRAMESIZE(r1) 298 - bl .enter_vmx_usercopy 298 + bl enter_vmx_usercopy 299 299 cmpwi cr1,r3,0 300 300 ld r0,STACKFRAMESIZE+16(r1) 301 - ld r3,STACKFRAMESIZE+48(r1) 302 - ld r4,STACKFRAMESIZE+56(r1) 303 - ld r5,STACKFRAMESIZE+64(r1) 301 + ld r3,STK_REG(R31)(r1) 302 + ld r4,STK_REG(R30)(r1) 303 + ld r5,STK_REG(R29)(r1) 304 304 mtlr r0 305 305 306 306 /* ··· 514 514 err3; stb r0,0(r3) 515 515 516 516 15: addi r1,r1,STACKFRAMESIZE 517 - b .exit_vmx_usercopy /* tail call optimise */ 517 + b exit_vmx_usercopy /* tail call optimise */ 518 518 519 519 .Lvmx_unaligned_copy: 520 520 /* Get the destination 16B aligned */ ··· 717 717 err3; stb r0,0(r3) 718 718 719 719 15: addi r1,r1,STACKFRAMESIZE 720 - b .exit_vmx_usercopy /* tail call optimise */ 720 + b exit_vmx_usercopy /* tail call optimise */ 721 721 #endif /* CONFiG_ALTIVEC */
+4 -4
arch/powerpc/lib/hweight_64.S
··· 24 24 25 25 _GLOBAL(__arch_hweight8) 26 26 BEGIN_FTR_SECTION 27 - b .__sw_hweight8 27 + b __sw_hweight8 28 28 nop 29 29 nop 30 30 FTR_SECTION_ELSE ··· 35 35 36 36 _GLOBAL(__arch_hweight16) 37 37 BEGIN_FTR_SECTION 38 - b .__sw_hweight16 38 + b __sw_hweight16 39 39 nop 40 40 nop 41 41 nop ··· 57 57 58 58 _GLOBAL(__arch_hweight32) 59 59 BEGIN_FTR_SECTION 60 - b .__sw_hweight32 60 + b __sw_hweight32 61 61 nop 62 62 nop 63 63 nop ··· 82 82 83 83 _GLOBAL(__arch_hweight64) 84 84 BEGIN_FTR_SECTION 85 - b .__sw_hweight64 85 + b __sw_hweight64 86 86 nop 87 87 nop 88 88 nop
+2 -2
arch/powerpc/lib/mem_64.S
··· 79 79 80 80 _GLOBAL(memmove) 81 81 cmplw 0,r3,r4 82 - bgt .backwards_memcpy 83 - b .memcpy 82 + bgt backwards_memcpy 83 + b memcpy 84 84 85 85 _GLOBAL(backwards_memcpy) 86 86 rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
+5 -5
arch/powerpc/lib/memcpy_64.S
··· 10 10 #include <asm/ppc_asm.h> 11 11 12 12 .align 7 13 - _GLOBAL(memcpy) 13 + _GLOBAL_TOC(memcpy) 14 14 BEGIN_FTR_SECTION 15 15 #ifdef __LITTLE_ENDIAN__ 16 16 cmpdi cr7,r5,0 17 17 #else 18 - std r3,48(r1) /* save destination pointer for return value */ 18 + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* save destination pointer for return value */ 19 19 #endif 20 20 FTR_SECTION_ELSE 21 21 #ifndef SELFTEST ··· 88 88 2: bf cr7*4+3,3f 89 89 lbz r9,8(r4) 90 90 stb r9,0(r3) 91 - 3: ld r3,48(r1) /* return dest pointer */ 91 + 3: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */ 92 92 blr 93 93 94 94 .Lsrc_unaligned: ··· 171 171 2: bf cr7*4+3,3f 172 172 rotldi r9,r9,8 173 173 stb r9,0(r3) 174 - 3: ld r3,48(r1) /* return dest pointer */ 174 + 3: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */ 175 175 blr 176 176 177 177 .Ldst_unaligned: ··· 216 216 3: bf cr7*4+3,4f 217 217 lbz r0,0(r4) 218 218 stb r0,0(r3) 219 - 4: ld r3,48(r1) /* return dest pointer */ 219 + 4: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */ 220 220 blr 221 221 #endif
+13 -13
arch/powerpc/lib/memcpy_power7.S
··· 33 33 cmpldi r5,16 34 34 cmpldi cr1,r5,4096 35 35 36 - std r3,48(r1) 36 + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) 37 37 38 38 blt .Lshort_copy 39 39 bgt cr1,.Lvmx_copy 40 40 #else 41 41 cmpldi r5,16 42 42 43 - std r3,48(r1) 43 + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) 44 44 45 45 blt .Lshort_copy 46 46 #endif ··· 216 216 lbz r0,0(r4) 217 217 stb r0,0(r3) 218 218 219 - 15: ld r3,48(r1) 219 + 15: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) 220 220 blr 221 221 222 222 .Lunwind_stack_nonvmx_copy: ··· 226 226 #ifdef CONFIG_ALTIVEC 227 227 .Lvmx_copy: 228 228 mflr r0 229 - std r4,56(r1) 230 - std r5,64(r1) 229 + std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) 230 + std r5,-STACKFRAMESIZE+STK_REG(R29)(r1) 231 231 std r0,16(r1) 232 232 stdu r1,-STACKFRAMESIZE(r1) 233 - bl .enter_vmx_copy 233 + bl enter_vmx_copy 234 234 cmpwi cr1,r3,0 235 235 ld r0,STACKFRAMESIZE+16(r1) 236 - ld r3,STACKFRAMESIZE+48(r1) 237 - ld r4,STACKFRAMESIZE+56(r1) 238 - ld r5,STACKFRAMESIZE+64(r1) 236 + ld r3,STK_REG(R31)(r1) 237 + ld r4,STK_REG(R30)(r1) 238 + ld r5,STK_REG(R29)(r1) 239 239 mtlr r0 240 240 241 241 /* ··· 447 447 stb r0,0(r3) 448 448 449 449 15: addi r1,r1,STACKFRAMESIZE 450 - ld r3,48(r1) 451 - b .exit_vmx_copy /* tail call optimise */ 450 + ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) 451 + b exit_vmx_copy /* tail call optimise */ 452 452 453 453 .Lvmx_unaligned_copy: 454 454 /* Get the destination 16B aligned */ ··· 651 651 stb r0,0(r3) 652 652 653 653 15: addi r1,r1,STACKFRAMESIZE 654 - ld r3,48(r1) 655 - b .exit_vmx_copy /* tail call optimise */ 654 + ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) 655 + b exit_vmx_copy /* tail call optimise */ 656 656 #endif /* CONFiG_ALTIVEC */
+28 -16
arch/powerpc/mm/hash_low_64.S
··· 159 159 BEGIN_FTR_SECTION 160 160 mr r4,r30 161 161 mr r5,r7 162 - bl .hash_page_do_lazy_icache 162 + bl hash_page_do_lazy_icache 163 163 END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) 164 164 165 165 /* At this point, r3 contains new PP bits, save them in ··· 201 201 li r8,MMU_PAGE_4K /* page size */ 202 202 li r9,MMU_PAGE_4K /* actual page size */ 203 203 ld r10,STK_PARAM(R9)(r1) /* segment size */ 204 - _GLOBAL(htab_call_hpte_insert1) 204 + .globl htab_call_hpte_insert1 205 + htab_call_hpte_insert1: 205 206 bl . /* Patched by htab_finish_init() */ 206 207 cmpdi 0,r3,0 207 208 bge htab_pte_insert_ok /* Insertion successful */ ··· 226 225 li r8,MMU_PAGE_4K /* page size */ 227 226 li r9,MMU_PAGE_4K /* actual page size */ 228 227 ld r10,STK_PARAM(R9)(r1) /* segment size */ 229 - _GLOBAL(htab_call_hpte_insert2) 228 + .globl htab_call_hpte_insert2 229 + htab_call_hpte_insert2: 230 230 bl . /* Patched by htab_finish_init() */ 231 231 cmpdi 0,r3,0 232 232 bge+ htab_pte_insert_ok /* Insertion successful */ ··· 244 242 2: and r0,r5,r27 245 243 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ 246 244 /* Call ppc_md.hpte_remove */ 247 - _GLOBAL(htab_call_hpte_remove) 245 + .globl htab_call_hpte_remove 246 + htab_call_hpte_remove: 248 247 bl . /* Patched by htab_finish_init() */ 249 248 250 249 /* Try all again */ ··· 299 296 li r7,MMU_PAGE_4K /* actual page size */ 300 297 ld r8,STK_PARAM(R9)(r1) /* segment size */ 301 298 ld r9,STK_PARAM(R8)(r1) /* get "local" param */ 302 - _GLOBAL(htab_call_hpte_updatepp) 299 + .globl htab_call_hpte_updatepp 300 + htab_call_hpte_updatepp: 303 301 bl . /* Patched by htab_finish_init() */ 304 302 305 303 /* if we failed because typically the HPTE wasn't really here ··· 475 471 BEGIN_FTR_SECTION 476 472 mr r4,r30 477 473 mr r5,r7 478 - bl .hash_page_do_lazy_icache 474 + bl hash_page_do_lazy_icache 479 475 END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) 480 476 481 477 /* At this point, r3 contains new PP bits, save them in ··· 530 526 li r8,MMU_PAGE_4K /* page size */ 531 527 li r9,MMU_PAGE_4K /* actual page size */ 532 528 ld r10,STK_PARAM(R9)(r1) /* segment size */ 533 - _GLOBAL(htab_call_hpte_insert1) 529 + .globl htab_call_hpte_insert1 530 + htab_call_hpte_insert1: 534 531 bl . /* patched by htab_finish_init() */ 535 532 cmpdi 0,r3,0 536 533 bge htab_pte_insert_ok /* Insertion successful */ ··· 559 554 li r8,MMU_PAGE_4K /* page size */ 560 555 li r9,MMU_PAGE_4K /* actual page size */ 561 556 ld r10,STK_PARAM(R9)(r1) /* segment size */ 562 - _GLOBAL(htab_call_hpte_insert2) 557 + .globl htab_call_hpte_insert2 558 + htab_call_hpte_insert2: 563 559 bl . /* patched by htab_finish_init() */ 564 560 cmpdi 0,r3,0 565 561 bge+ htab_pte_insert_ok /* Insertion successful */ ··· 577 571 2: and r0,r5,r27 578 572 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ 579 573 /* Call ppc_md.hpte_remove */ 580 - _GLOBAL(htab_call_hpte_remove) 574 + .globl htab_call_hpte_remove 575 + htab_call_hpte_remove: 581 576 bl . /* patched by htab_finish_init() */ 582 577 583 578 /* Try all again */ ··· 595 588 li r6,MMU_PAGE_64K /* psize */ 596 589 ld r7,STK_PARAM(R9)(r1) /* ssize */ 597 590 ld r8,STK_PARAM(R8)(r1) /* local */ 598 - bl .flush_hash_page 591 + bl flush_hash_page 599 592 /* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */ 600 593 lis r0,_PAGE_HPTE_SUB@h 601 594 ori r0,r0,_PAGE_HPTE_SUB@l ··· 667 660 li r7,MMU_PAGE_4K /* actual page size */ 668 661 ld r8,STK_PARAM(R9)(r1) /* segment size */ 669 662 ld r9,STK_PARAM(R8)(r1) /* get "local" param */ 670 - _GLOBAL(htab_call_hpte_updatepp) 663 + .globl htab_call_hpte_updatepp 664 + htab_call_hpte_updatepp: 671 665 bl . /* patched by htab_finish_init() */ 672 666 673 667 /* if we failed because typically the HPTE wasn't really here ··· 820 812 BEGIN_FTR_SECTION 821 813 mr r4,r30 822 814 mr r5,r7 823 - bl .hash_page_do_lazy_icache 815 + bl hash_page_do_lazy_icache 824 816 END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) 825 817 826 818 /* At this point, r3 contains new PP bits, save them in ··· 865 857 li r8,MMU_PAGE_64K 866 858 li r9,MMU_PAGE_64K /* actual page size */ 867 859 ld r10,STK_PARAM(R9)(r1) /* segment size */ 868 - _GLOBAL(ht64_call_hpte_insert1) 860 + .globl ht64_call_hpte_insert1 861 + ht64_call_hpte_insert1: 869 862 bl . /* patched by htab_finish_init() */ 870 863 cmpdi 0,r3,0 871 864 bge ht64_pte_insert_ok /* Insertion successful */ ··· 890 881 li r8,MMU_PAGE_64K 891 882 li r9,MMU_PAGE_64K /* actual page size */ 892 883 ld r10,STK_PARAM(R9)(r1) /* segment size */ 893 - _GLOBAL(ht64_call_hpte_insert2) 884 + .globl ht64_call_hpte_insert2 885 + ht64_call_hpte_insert2: 894 886 bl . /* patched by htab_finish_init() */ 895 887 cmpdi 0,r3,0 896 888 bge+ ht64_pte_insert_ok /* Insertion successful */ ··· 908 898 2: and r0,r5,r27 909 899 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ 910 900 /* Call ppc_md.hpte_remove */ 911 - _GLOBAL(ht64_call_hpte_remove) 901 + .globl ht64_call_hpte_remove 902 + ht64_call_hpte_remove: 912 903 bl . /* patched by htab_finish_init() */ 913 904 914 905 /* Try all again */ ··· 963 952 li r7,MMU_PAGE_64K /* actual page size */ 964 953 ld r8,STK_PARAM(R9)(r1) /* segment size */ 965 954 ld r9,STK_PARAM(R8)(r1) /* get "local" param */ 966 - _GLOBAL(ht64_call_hpte_updatepp) 955 + .globl ht64_call_hpte_updatepp 956 + ht64_call_hpte_updatepp: 967 957 bl . /* patched by htab_finish_init() */ 968 958 969 959 /* if we failed because typically the HPTE wasn't really here
+16 -20
arch/powerpc/mm/hash_utils_64.c
··· 622 622 } 623 623 #endif /* CONFIG_MEMORY_HOTPLUG */ 624 624 625 - #define FUNCTION_TEXT(A) ((*(unsigned long *)(A))) 625 + extern u32 htab_call_hpte_insert1[]; 626 + extern u32 htab_call_hpte_insert2[]; 627 + extern u32 htab_call_hpte_remove[]; 628 + extern u32 htab_call_hpte_updatepp[]; 629 + extern u32 ht64_call_hpte_insert1[]; 630 + extern u32 ht64_call_hpte_insert2[]; 631 + extern u32 ht64_call_hpte_remove[]; 632 + extern u32 ht64_call_hpte_updatepp[]; 626 633 627 634 static void __init htab_finish_init(void) 628 635 { 629 - extern unsigned int *htab_call_hpte_insert1; 630 - extern unsigned int *htab_call_hpte_insert2; 631 - extern unsigned int *htab_call_hpte_remove; 632 - extern unsigned int *htab_call_hpte_updatepp; 633 - 634 636 #ifdef CONFIG_PPC_HAS_HASH_64K 635 - extern unsigned int *ht64_call_hpte_insert1; 636 - extern unsigned int *ht64_call_hpte_insert2; 637 - extern unsigned int *ht64_call_hpte_remove; 638 - extern unsigned int *ht64_call_hpte_updatepp; 639 - 640 637 patch_branch(ht64_call_hpte_insert1, 641 - FUNCTION_TEXT(ppc_md.hpte_insert), 638 + ppc_function_entry(ppc_md.hpte_insert), 642 639 BRANCH_SET_LINK); 643 640 patch_branch(ht64_call_hpte_insert2, 644 - FUNCTION_TEXT(ppc_md.hpte_insert), 641 + ppc_function_entry(ppc_md.hpte_insert), 645 642 BRANCH_SET_LINK); 646 643 patch_branch(ht64_call_hpte_remove, 647 - FUNCTION_TEXT(ppc_md.hpte_remove), 644 + ppc_function_entry(ppc_md.hpte_remove), 648 645 BRANCH_SET_LINK); 649 646 patch_branch(ht64_call_hpte_updatepp, 650 - FUNCTION_TEXT(ppc_md.hpte_updatepp), 647 + ppc_function_entry(ppc_md.hpte_updatepp), 651 648 BRANCH_SET_LINK); 652 - 653 649 #endif /* CONFIG_PPC_HAS_HASH_64K */ 654 650 655 651 patch_branch(htab_call_hpte_insert1, 656 - FUNCTION_TEXT(ppc_md.hpte_insert), 652 + ppc_function_entry(ppc_md.hpte_insert), 657 653 BRANCH_SET_LINK); 658 654 patch_branch(htab_call_hpte_insert2, 659 - FUNCTION_TEXT(ppc_md.hpte_insert), 655 + ppc_function_entry(ppc_md.hpte_insert), 660 656 BRANCH_SET_LINK); 661 657 patch_branch(htab_call_hpte_remove, 662 - FUNCTION_TEXT(ppc_md.hpte_remove), 658 + ppc_function_entry(ppc_md.hpte_remove), 663 659 BRANCH_SET_LINK); 664 660 patch_branch(htab_call_hpte_updatepp, 665 - FUNCTION_TEXT(ppc_md.hpte_updatepp), 661 + ppc_function_entry(ppc_md.hpte_updatepp), 666 662 BRANCH_SET_LINK); 667 663 } 668 664
+6 -6
arch/powerpc/mm/slb.c
··· 256 256 patch_instruction(insn_addr, insn); 257 257 } 258 258 259 + extern u32 slb_compare_rr_to_size[]; 260 + extern u32 slb_miss_kernel_load_linear[]; 261 + extern u32 slb_miss_kernel_load_io[]; 262 + extern u32 slb_compare_rr_to_size[]; 263 + extern u32 slb_miss_kernel_load_vmemmap[]; 264 + 259 265 void slb_set_size(u16 size) 260 266 { 261 - extern unsigned int *slb_compare_rr_to_size; 262 - 263 267 if (mmu_slb_size == size) 264 268 return; 265 269 ··· 276 272 unsigned long linear_llp, vmalloc_llp, io_llp; 277 273 unsigned long lflags, vflags; 278 274 static int slb_encoding_inited; 279 - extern unsigned int *slb_miss_kernel_load_linear; 280 - extern unsigned int *slb_miss_kernel_load_io; 281 - extern unsigned int *slb_compare_rr_to_size; 282 275 #ifdef CONFIG_SPARSEMEM_VMEMMAP 283 - extern unsigned int *slb_miss_kernel_load_vmemmap; 284 276 unsigned long vmemmap_llp; 285 277 #endif 286 278
+8 -4
arch/powerpc/mm/slb_low.S
··· 59 59 /* Linear mapping encoding bits, the "li" instruction below will 60 60 * be patched by the kernel at boot 61 61 */ 62 - _GLOBAL(slb_miss_kernel_load_linear) 62 + .globl slb_miss_kernel_load_linear 63 + slb_miss_kernel_load_linear: 63 64 li r11,0 64 65 /* 65 66 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 ··· 80 79 /* Check virtual memmap region. To be patches at kernel boot */ 81 80 cmpldi cr0,r9,0xf 82 81 bne 1f 83 - _GLOBAL(slb_miss_kernel_load_vmemmap) 82 + .globl slb_miss_kernel_load_vmemmap 83 + slb_miss_kernel_load_vmemmap: 84 84 li r11,0 85 85 b 6f 86 86 1: ··· 97 95 b 6f 98 96 5: 99 97 /* IO mapping */ 100 - _GLOBAL(slb_miss_kernel_load_io) 98 + .globl slb_miss_kernel_load_io 99 + slb_miss_kernel_load_io: 101 100 li r11,0 102 101 6: 103 102 /* ··· 253 250 7: ld r10,PACASTABRR(r13) 254 251 addi r10,r10,1 255 252 /* This gets soft patched on boot. */ 256 - _GLOBAL(slb_compare_rr_to_size) 253 + .globl slb_compare_rr_to_size 254 + slb_compare_rr_to_size: 257 255 cmpldi r10,0 258 256 259 257 blt+ 4f
+2 -1
arch/powerpc/platforms/85xx/smp.c
··· 27 27 #include <asm/cacheflush.h> 28 28 #include <asm/dbell.h> 29 29 #include <asm/fsl_guts.h> 30 + #include <asm/code-patching.h> 30 31 31 32 #include <sysdev/fsl_soc.h> 32 33 #include <sysdev/mpic.h> ··· 268 267 flush_spin_table(spin_table); 269 268 out_be32(&spin_table->pir, hw_cpu); 270 269 out_be64((u64 *)(&spin_table->addr_h), 271 - __pa((u64)*((unsigned long long *)generic_secondary_smp_init))); 270 + __pa(ppc_function_entry(generic_secondary_smp_init))); 272 271 flush_spin_table(spin_table); 273 272 #endif 274 273
+3 -2
arch/powerpc/platforms/cell/smp.c
··· 40 40 #include <asm/firmware.h> 41 41 #include <asm/rtas.h> 42 42 #include <asm/cputhreads.h> 43 + #include <asm/code-patching.h> 43 44 44 45 #include "interrupt.h" 45 46 #include <asm/udbg.h> ··· 71 70 static inline int smp_startup_cpu(unsigned int lcpu) 72 71 { 73 72 int status; 74 - unsigned long start_here = __pa((u32)*((unsigned long *) 75 - generic_secondary_smp_init)); 73 + unsigned long start_here = 74 + __pa(ppc_function_entry(generic_secondary_smp_init)); 76 75 unsigned int pcpu; 77 76 int start_cpu; 78 77
+1 -1
arch/powerpc/platforms/pasemi/powersave.S
··· 66 66 std r3, 48(r1) 67 67 68 68 /* Only do power savings when in astate 0 */ 69 - bl .check_astate 69 + bl check_astate 70 70 cmpwi r3,0 71 71 bne 1f 72 72
+2
arch/powerpc/platforms/powernv/opal-takeover.S
··· 21 21 _GLOBAL(opal_query_takeover) 22 22 mfcr r0 23 23 stw r0,8(r1) 24 + stdu r1,-STACKFRAMESIZE(r1) 24 25 std r3,STK_PARAM(R3)(r1) 25 26 std r4,STK_PARAM(R4)(r1) 26 27 li r3,H_HAL_TAKEOVER 27 28 li r4,H_HAL_TAKEOVER_QUERY_MAGIC 28 29 HVSC 30 + addi r1,r1,STACKFRAMESIZE 29 31 ld r10,STK_PARAM(R3)(r1) 30 32 std r4,0(r10) 31 33 ld r10,STK_PARAM(R4)(r1)
+2 -2
arch/powerpc/platforms/powernv/opal-wrappers.S
··· 32 32 std r12,PACASAVEDMSR(r13); \ 33 33 andc r12,r12,r0; \ 34 34 mtmsrd r12,1; \ 35 - LOAD_REG_ADDR(r0,.opal_return); \ 35 + LOAD_REG_ADDR(r0,opal_return); \ 36 36 mtlr r0; \ 37 37 li r0,MSR_DR|MSR_IR|MSR_LE;\ 38 38 andc r12,r12,r0; \ ··· 44 44 mtspr SPRN_HSRR0,r12; \ 45 45 hrfid 46 46 47 - _STATIC(opal_return) 47 + opal_return: 48 48 /* 49 49 * Fixup endian on OPAL return... we should be able to simplify 50 50 * this by instead converting the below trampoline to a set of
+3 -2
arch/powerpc/platforms/powernv/smp.c
··· 31 31 #include <asm/xics.h> 32 32 #include <asm/opal.h> 33 33 #include <asm/runlatch.h> 34 + #include <asm/code-patching.h> 34 35 35 36 #include "powernv.h" 36 37 ··· 51 50 int pnv_smp_kick_cpu(int nr) 52 51 { 53 52 unsigned int pcpu = get_hard_smp_processor_id(nr); 54 - unsigned long start_here = __pa(*((unsigned long *) 55 - generic_secondary_smp_init)); 53 + unsigned long start_here = 54 + __pa(ppc_function_entry(generic_secondary_smp_init)); 56 55 long rc; 57 56 58 57 BUG_ON(nr < 0 || nr >= NR_CPUS);
+2 -2
arch/powerpc/platforms/pseries/hvCall.S
··· 49 49 std r0,16(r1); \ 50 50 addi r4,r1,STK_PARAM(FIRST_REG); \ 51 51 stdu r1,-STACK_FRAME_OVERHEAD(r1); \ 52 - bl .__trace_hcall_entry; \ 52 + bl __trace_hcall_entry; \ 53 53 addi r1,r1,STACK_FRAME_OVERHEAD; \ 54 54 ld r0,16(r1); \ 55 55 ld r3,STK_PARAM(R3)(r1); \ ··· 83 83 mr r3,r6; \ 84 84 std r0,16(r1); \ 85 85 stdu r1,-STACK_FRAME_OVERHEAD(r1); \ 86 - bl .__trace_hcall_exit; \ 86 + bl __trace_hcall_exit; \ 87 87 addi r1,r1,STACK_FRAME_OVERHEAD; \ 88 88 ld r0,16(r1); \ 89 89 ld r3,STK_PARAM(R3)(r1); \
+3 -2
arch/powerpc/platforms/pseries/smp.c
··· 44 44 #include <asm/xics.h> 45 45 #include <asm/dbell.h> 46 46 #include <asm/plpar_wrappers.h> 47 + #include <asm/code-patching.h> 47 48 48 49 #include "pseries.h" 49 50 #include "offline_states.h" ··· 97 96 static inline int smp_startup_cpu(unsigned int lcpu) 98 97 { 99 98 int status; 100 - unsigned long start_here = __pa((u32)*((unsigned long *) 101 - generic_secondary_smp_init)); 99 + unsigned long start_here = 100 + __pa(ppc_function_entry(generic_secondary_smp_init)); 102 101 unsigned int pcpu; 103 102 int start_cpu; 104 103
+2 -1
arch/powerpc/platforms/wsp/scom_smp.c
··· 20 20 #include <asm/reg_a2.h> 21 21 #include <asm/scom.h> 22 22 #include <asm/udbg.h> 23 + #include <asm/code-patching.h> 23 24 24 25 #include "wsp.h" 25 26 ··· 406 405 goto fail; 407 406 } 408 407 409 - start_here = *(unsigned long *)(core_setup ? generic_secondary_smp_init 408 + start_here = ppc_function_entry(core_setup ? generic_secondary_smp_init 410 409 : generic_secondary_thread_init); 411 410 pr_devel("CPU%d entry point at 0x%lx...\n", lcpu, start_here); 412 411
+4 -1
tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h
··· 46 46 #define R20 r20 47 47 #define R21 r21 48 48 #define R22 r22 49 + #define R29 r29 50 + #define R30 r30 51 + #define R31 r31 49 52 50 53 #define STACKFRAMESIZE 256 51 - #define STK_PARAM(i) (48 + ((i)-3)*8) 52 54 #define STK_REG(i) (112 + ((i)-14)*8) 53 55 54 56 #define _GLOBAL(A) FUNC_START(test_ ## A) 57 + #define _GLOBAL_TOC(A) _GLOBAL(A) 55 58 56 59 #define PPC_MTOCRF(A, B) mtocrf A, B 57 60