Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Thumb-2: Implementation of the unified start-up and exceptions code

This patch implements the ARM/Thumb-2 unified kernel start-up and
exception handling code.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

+262 -119
+11
arch/arm/include/asm/assembler.h
··· 127 127 #endif 128 128 #endif 129 129 .endm 130 + 131 + #ifdef CONFIG_THUMB2_KERNEL 132 + .macro setmode, mode, reg 133 + mov \reg, #\mode 134 + msr cpsr_c, \reg 135 + .endm 136 + #else 137 + .macro setmode, mode, reg 138 + msr cpsr_c, #\mode 139 + .endm 140 + #endif
+1
arch/arm/include/asm/futex.h
··· 99 99 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" 100 100 "1: ldrt %0, [%3]\n" 101 101 " teq %0, %1\n" 102 + " it eq @ explicit IT needed for the 2b label\n" 102 103 "2: streqt %2, [%3]\n" 103 104 "3:\n" 104 105 " .section __ex_table,\"a\"\n"
+97 -66
arch/arm/kernel/entry-armv.S
··· 34 34 @ 35 35 @ routine called with r0 = irq number, r1 = struct pt_regs * 36 36 @ 37 - adrne lr, 1b 37 + adrne lr, BSYM(1b) 38 38 bne asm_do_IRQ 39 39 40 40 #ifdef CONFIG_SMP ··· 46 46 */ 47 47 test_for_ipi r0, r6, r5, lr 48 48 movne r0, sp 49 - adrne lr, 1b 49 + adrne lr, BSYM(1b) 50 50 bne do_IPI 51 51 52 52 #ifdef CONFIG_LOCAL_TIMERS 53 53 test_for_ltirq r0, r6, r5, lr 54 54 movne r0, sp 55 - adrne lr, 1b 55 + adrne lr, BSYM(1b) 56 56 bne do_local_timer 57 57 #endif 58 58 #endif ··· 70 70 */ 71 71 .macro inv_entry, reason 72 72 sub sp, sp, #S_FRAME_SIZE 73 - stmib sp, {r1 - lr} 73 + ARM( stmib sp, {r1 - lr} ) 74 + THUMB( stmia sp, {r0 - r12} ) 75 + THUMB( str sp, [sp, #S_SP] ) 76 + THUMB( str lr, [sp, #S_LR] ) 74 77 mov r1, #\reason 75 78 .endm 76 79 ··· 129 126 .macro svc_entry, stack_hole=0 130 127 UNWIND(.fnstart ) 131 128 UNWIND(.save {r0 - pc} ) 132 - sub sp, sp, #(S_FRAME_SIZE + \stack_hole) 129 + sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) 130 + #ifdef CONFIG_THUMB2_KERNEL 131 + SPFIX( str r0, [sp] ) @ temporarily saved 132 + SPFIX( mov r0, sp ) 133 + SPFIX( tst r0, #4 ) @ test original stack alignment 134 + SPFIX( ldr r0, [sp] ) @ restored 135 + #else 133 136 SPFIX( tst sp, #4 ) 134 - SPFIX( bicne sp, sp, #4 ) 135 - stmib sp, {r1 - r12} 137 + #endif 138 + SPFIX( subeq sp, sp, #4 ) 139 + stmia sp, {r1 - r12} 136 140 137 141 ldmia r0, {r1 - r3} 138 - add r5, sp, #S_SP @ here for interlock avoidance 142 + add r5, sp, #S_SP - 4 @ here for interlock avoidance 139 143 mov r4, #-1 @ "" "" "" "" 140 - add r0, sp, #(S_FRAME_SIZE + \stack_hole) 141 - SPFIX( addne r0, r0, #4 ) 142 - str r1, [sp] @ save the "real" r0 copied 144 + add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4) 145 + SPFIX( addeq r0, r0, #4 ) 146 + str r1, [sp, #-4]! @ save the "real" r0 copied 143 147 @ from the exception stack 144 148 145 149 mov r1, lr ··· 206 196 @ 207 197 @ restore SPSR and restart the instruction 208 198 @ 209 - ldr r0, [sp, #S_PSR] 210 - msr spsr_cxsf, r0 211 - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 199 + ldr r2, [sp, #S_PSR] 200 + svc_exit r2 @ return from exception 212 201 UNWIND(.fnend ) 213 202 ENDPROC(__dabt_svc) 214 203 ··· 234 225 tst r0, #_TIF_NEED_RESCHED 235 226 blne svc_preempt 236 227 #endif 237 - ldr r0, [sp, #S_PSR] @ irqs are already disabled 238 - msr spsr_cxsf, r0 228 + ldr r4, [sp, #S_PSR] @ irqs are already disabled 239 229 #ifdef CONFIG_TRACE_IRQFLAGS 240 - tst r0, #PSR_I_BIT 230 + tst r4, #PSR_I_BIT 241 231 bleq trace_hardirqs_on 242 232 #endif 243 - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 233 + svc_exit r4 @ return from exception 244 234 UNWIND(.fnend ) 245 235 ENDPROC(__irq_svc) 246 236 ··· 274 266 @ r0 - instruction 275 267 @ 276 268 ldr r0, [r2, #-4] 277 - adr r9, 1f 269 + adr r9, BSYM(1f) 278 270 bl call_fpe 279 271 280 272 mov r0, sp @ struct pt_regs *regs ··· 288 280 @ 289 281 @ restore SPSR and restart the instruction 290 282 @ 291 - ldr lr, [sp, #S_PSR] @ Get SVC cpsr 292 - msr spsr_cxsf, lr 293 - ldmia sp, {r0 - pc}^ @ Restore SVC registers 283 + ldr r2, [sp, #S_PSR] @ Get SVC cpsr 284 + svc_exit r2 @ return from exception 294 285 UNWIND(.fnend ) 295 286 ENDPROC(__und_svc) 296 287 ··· 330 323 @ 331 324 @ restore SPSR and restart the instruction 332 325 @ 333 - ldr r0, [sp, #S_PSR] 334 - msr spsr_cxsf, r0 335 - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 326 + ldr r2, [sp, #S_PSR] 327 + svc_exit r2 @ return from exception 336 328 UNWIND(.fnend ) 337 329 ENDPROC(__pabt_svc) 338 330 ··· 359 353 UNWIND(.fnstart ) 360 354 UNWIND(.cantunwind ) @ don't unwind the user space 361 355 sub sp, sp, #S_FRAME_SIZE 362 - stmib sp, {r1 - r12} 356 + ARM( stmib sp, {r1 - r12} ) 357 + THUMB( stmia sp, {r0 - r12} ) 363 358 364 359 ldmia r0, {r1 - r3} 365 360 add r0, sp, #S_PC @ here for interlock avoidance ··· 379 372 @ Also, separately save sp_usr and lr_usr 380 373 @ 381 374 stmia r0, {r2 - r4} 382 - stmdb r0, {sp, lr}^ 375 + ARM( stmdb r0, {sp, lr}^ ) 376 + THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) 383 377 384 378 @ 385 379 @ Enable the alignment trap while in kernel mode ··· 435 427 @ 436 428 enable_irq 437 429 mov r2, sp 438 - adr lr, ret_from_exception 430 + adr lr, BSYM(ret_from_exception) 439 431 b do_DataAbort 440 432 UNWIND(.fnend ) 441 433 ENDPROC(__dabt_usr) ··· 460 452 ldr r0, [tsk, #TI_PREEMPT] 461 453 str r8, [tsk, #TI_PREEMPT] 462 454 teq r0, r7 463 - strne r0, [r0, -r0] 455 + ARM( strne r0, [r0, -r0] ) 456 + THUMB( movne r0, #0 ) 457 + THUMB( strne r0, [r0] ) 464 458 #endif 465 459 #ifdef CONFIG_TRACE_IRQFLAGS 466 460 bl trace_hardirqs_on ··· 486 476 @ 487 477 @ r0 - instruction 488 478 @ 489 - adr r9, ret_from_exception 490 - adr lr, __und_usr_unknown 479 + adr r9, BSYM(ret_from_exception) 480 + adr lr, BSYM(__und_usr_unknown) 491 481 tst r3, #PSR_T_BIT @ Thumb mode? 482 + itet eq @ explicit IT needed for the 1f label 492 483 subeq r4, r2, #4 @ ARM instr at LR - 4 493 484 subne r4, r2, #2 @ Thumb instr at LR - 2 494 485 1: ldreqt r0, [r4] ··· 499 488 beq call_fpe 500 489 @ Thumb instruction 501 490 #if __LINUX_ARM_ARCH__ >= 7 502 - 2: ldrht r5, [r4], #2 491 + 2: 492 + ARM( ldrht r5, [r4], #2 ) 493 + THUMB( ldrht r5, [r4] ) 494 + THUMB( add r4, r4, #2 ) 503 495 and r0, r5, #0xf800 @ mask bits 111x x... .... .... 504 496 cmp r0, #0xe800 @ 32bit instruction if xx != 0 505 497 blo __und_usr_unknown ··· 591 577 moveq pc, lr 592 578 get_thread_info r10 @ get current thread 593 579 and r8, r0, #0x00000f00 @ mask out CP number 580 + THUMB( lsr r8, r8, #8 ) 594 581 mov r7, #1 595 582 add r6, r10, #TI_USED_CP 596 - strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[] 583 + ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[] 584 + THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[] 597 585 #ifdef CONFIG_IWMMXT 598 586 @ Test if we need to give access to iWMMXt coprocessors 599 587 ldr r5, [r10, #TI_FLAGS] ··· 603 587 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) 604 588 bcs iwmmxt_task_enable 605 589 #endif 606 - add pc, pc, r8, lsr #6 607 - mov r0, r0 590 + ARM( add pc, pc, r8, lsr #6 ) 591 + THUMB( lsl r8, r8, #2 ) 592 + THUMB( add pc, r8 ) 593 + nop 608 594 609 - mov pc, lr @ CP#0 610 - b do_fpe @ CP#1 (FPE) 611 - b do_fpe @ CP#2 (FPE) 612 - mov pc, lr @ CP#3 595 + W(mov) pc, lr @ CP#0 596 + W(b) do_fpe @ CP#1 (FPE) 597 + W(b) do_fpe @ CP#2 (FPE) 598 + W(mov) pc, lr @ CP#3 613 599 #ifdef CONFIG_CRUNCH 614 600 b crunch_task_enable @ CP#4 (MaverickCrunch) 615 601 b crunch_task_enable @ CP#5 (MaverickCrunch) 616 602 b crunch_task_enable @ CP#6 (MaverickCrunch) 617 603 #else 618 - mov pc, lr @ CP#4 619 - mov pc, lr @ CP#5 620 - mov pc, lr @ CP#6 604 + W(mov) pc, lr @ CP#4 605 + W(mov) pc, lr @ CP#5 606 + W(mov) pc, lr @ CP#6 621 607 #endif 622 - mov pc, lr @ CP#7 623 - mov pc, lr @ CP#8 624 - mov pc, lr @ CP#9 608 + W(mov) pc, lr @ CP#7 609 + W(mov) pc, lr @ CP#8 610 + W(mov) pc, lr @ CP#9 625 611 #ifdef CONFIG_VFP 626 - b do_vfp @ CP#10 (VFP) 627 - b do_vfp @ CP#11 (VFP) 612 + W(b) do_vfp @ CP#10 (VFP) 613 + W(b) do_vfp @ CP#11 (VFP) 628 614 #else 629 - mov pc, lr @ CP#10 (VFP) 630 - mov pc, lr @ CP#11 (VFP) 615 + W(mov) pc, lr @ CP#10 (VFP) 616 + W(mov) pc, lr @ CP#11 (VFP) 631 617 #endif 632 - mov pc, lr @ CP#12 633 - mov pc, lr @ CP#13 634 - mov pc, lr @ CP#14 (Debug) 635 - mov pc, lr @ CP#15 (Control) 618 + W(mov) pc, lr @ CP#12 619 + W(mov) pc, lr @ CP#13 620 + W(mov) pc, lr @ CP#14 (Debug) 621 + W(mov) pc, lr @ CP#15 (Control) 636 622 637 623 #ifdef CONFIG_NEON 638 624 .align 6 ··· 685 667 __und_usr_unknown: 686 668 enable_irq 687 669 mov r0, sp 688 - adr lr, ret_from_exception 670 + adr lr, BSYM(ret_from_exception) 689 671 b do_undefinstr 690 672 ENDPROC(__und_usr_unknown) 691 673 ··· 729 711 UNWIND(.cantunwind ) 730 712 add ip, r1, #TI_CPU_SAVE 731 713 ldr r3, [r2, #TI_TP_VALUE] 732 - stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack 714 + ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack 715 + THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack 716 + THUMB( str sp, [ip], #4 ) 717 + THUMB( str lr, [ip], #4 ) 733 718 #ifdef CONFIG_MMU 734 719 ldr r6, [r2, #TI_CPU_DOMAIN] 735 720 #endif ··· 757 736 ldr r0, =thread_notify_head 758 737 mov r1, #THREAD_NOTIFY_SWITCH 759 738 bl atomic_notifier_call_chain 739 + THUMB( mov ip, r4 ) 760 740 mov r0, r5 761 - ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously 741 + ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously 742 + THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously 743 + THUMB( ldr sp, [ip], #4 ) 744 + THUMB( ldr pc, [ip] ) 762 745 UNWIND(.fnend ) 763 746 ENDPROC(__switch_to) 764 747 ··· 797 772 * if your compiled code is not going to use the new instructions for other 798 773 * purpose. 799 774 */ 775 + THUMB( .arm ) 800 776 801 777 .macro usr_ret, reg 802 778 #ifdef CONFIG_ARM_THUMB ··· 1046 1020 .globl __kuser_helper_end 1047 1021 __kuser_helper_end: 1048 1022 1023 + THUMB( .thumb ) 1049 1024 1050 1025 /* 1051 1026 * Vector stubs. ··· 1081 1054 @ Prepare for SVC32 mode. IRQs remain disabled. 1082 1055 @ 1083 1056 mrs r0, cpsr 1084 - eor r0, r0, #(\mode ^ SVC_MODE) 1057 + eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE) 1085 1058 msr spsr_cxsf, r0 1086 1059 1087 1060 @ 1088 1061 @ the branch table must immediately follow this code 1089 1062 @ 1090 1063 and lr, lr, #0x0f 1064 + THUMB( adr r0, 1f ) 1065 + THUMB( ldr lr, [r0, lr, lsl #2] ) 1091 1066 mov r0, sp 1092 - ldr lr, [pc, lr, lsl #2] 1067 + ARM( ldr lr, [pc, lr, lsl #2] ) 1093 1068 movs pc, lr @ branch to handler in SVC mode 1094 1069 ENDPROC(vector_\name) 1095 1070 ··· 1235 1206 1236 1207 .globl __vectors_start 1237 1208 __vectors_start: 1238 - swi SYS_ERROR0 1239 - b vector_und + stubs_offset 1240 - ldr pc, .LCvswi + stubs_offset 1241 - b vector_pabt + stubs_offset 1242 - b vector_dabt + stubs_offset 1243 - b vector_addrexcptn + stubs_offset 1244 - b vector_irq + stubs_offset 1245 - b vector_fiq + stubs_offset 1209 + ARM( swi SYS_ERROR0 ) 1210 + THUMB( svc #0 ) 1211 + THUMB( nop ) 1212 + W(b) vector_und + stubs_offset 1213 + W(ldr) pc, .LCvswi + stubs_offset 1214 + W(b) vector_pabt + stubs_offset 1215 + W(b) vector_dabt + stubs_offset 1216 + W(b) vector_addrexcptn + stubs_offset 1217 + W(b) vector_irq + stubs_offset 1218 + W(b) vector_fiq + stubs_offset 1246 1219 1247 1220 .globl __vectors_end 1248 1221 __vectors_end:
+8 -20
arch/arm/kernel/entry-common.S
··· 33 33 /* perform architecture specific actions before user return */ 34 34 arch_ret_to_user r1, lr 35 35 36 - @ fast_restore_user_regs 37 - ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr 38 - ldr lr, [sp, #S_OFF + S_PC]! @ get pc 39 - msr spsr_cxsf, r1 @ save in spsr_svc 40 - ldmdb sp, {r1 - lr}^ @ get calling r1 - lr 41 - mov r0, r0 42 - add sp, sp, #S_FRAME_SIZE - S_PC 43 - movs pc, lr @ return & move spsr_svc into cpsr 36 + restore_user_regs fast = 1, offset = S_OFF 44 37 UNWIND(.fnend ) 45 38 46 39 /* ··· 66 73 /* perform architecture specific actions before user return */ 67 74 arch_ret_to_user r1, lr 68 75 69 - @ slow_restore_user_regs 70 - ldr r1, [sp, #S_PSR] @ get calling cpsr 71 - ldr lr, [sp, #S_PC]! @ get pc 72 - msr spsr_cxsf, r1 @ save in spsr_svc 73 - ldmdb sp, {r0 - lr}^ @ get calling r0 - lr 74 - mov r0, r0 75 - add sp, sp, #S_FRAME_SIZE - S_PC 76 - movs pc, lr @ return & move spsr_svc into cpsr 76 + restore_user_regs fast = 0, offset = 0 77 77 ENDPROC(ret_to_user) 78 78 79 79 /* ··· 168 182 ENTRY(vector_swi) 169 183 sub sp, sp, #S_FRAME_SIZE 170 184 stmia sp, {r0 - r12} @ Calling r0 - r12 171 - add r8, sp, #S_PC 172 - stmdb r8, {sp, lr}^ @ Calling sp, lr 185 + ARM( add r8, sp, #S_PC ) 186 + ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr 187 + THUMB( mov r8, sp ) 188 + THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr 173 189 mrs r8, spsr @ called from non-FIQ mode, so ok. 174 190 str lr, [sp, #S_PC] @ Save calling PC 175 191 str r8, [sp, #S_PSR] @ Save CPSR ··· 260 272 bne __sys_trace 261 273 262 274 cmp scno, #NR_syscalls @ check upper syscall limit 263 - adr lr, ret_fast_syscall @ return address 275 + adr lr, BSYM(ret_fast_syscall) @ return address 264 276 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 265 277 266 278 add r1, sp, #S_OFF ··· 281 293 mov r0, #0 @ trace entry [IP = 0] 282 294 bl syscall_trace 283 295 284 - adr lr, __sys_trace_return @ return address 296 + adr lr, BSYM(__sys_trace_return) @ return address 285 297 mov scno, r0 @ syscall number (possibly new) 286 298 add r1, sp, #S_R0 + S_OFF @ pointer to regs 287 299 cmp scno, #NR_syscalls @ check upper syscall limit
+87 -5
arch/arm/kernel/entry-header.S
··· 36 36 #endif 37 37 .endm 38 38 39 - .macro get_thread_info, rd 40 - mov \rd, sp, lsr #13 41 - mov \rd, \rd, lsl #13 42 - .endm 43 - 44 39 .macro alignment_trap, rtemp 45 40 #ifdef CONFIG_ALIGNMENT_TRAP 46 41 ldr \rtemp, .LCcralign ··· 44 49 #endif 45 50 .endm 46 51 52 + @ 53 + @ Store/load the USER SP and LR registers by switching to the SYS 54 + @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not 55 + @ available. Should only be called from SVC mode 56 + @ 57 + .macro store_user_sp_lr, rd, rtemp, offset = 0 58 + mrs \rtemp, cpsr 59 + eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) 60 + msr cpsr_c, \rtemp @ switch to the SYS mode 61 + 62 + str sp, [\rd, #\offset] @ save sp_usr 63 + str lr, [\rd, #\offset + 4] @ save lr_usr 64 + 65 + eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) 66 + msr cpsr_c, \rtemp @ switch back to the SVC mode 67 + .endm 68 + 69 + .macro load_user_sp_lr, rd, rtemp, offset = 0 70 + mrs \rtemp, cpsr 71 + eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) 72 + msr cpsr_c, \rtemp @ switch to the SYS mode 73 + 74 + ldr sp, [\rd, #\offset] @ load sp_usr 75 + ldr lr, [\rd, #\offset + 4] @ load lr_usr 76 + 77 + eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) 78 + msr cpsr_c, \rtemp @ switch back to the SVC mode 79 + .endm 80 + 81 + #ifndef CONFIG_THUMB2_KERNEL 82 + .macro svc_exit, rpsr 83 + msr spsr_cxsf, \rpsr 84 + ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 85 + .endm 86 + 87 + .macro restore_user_regs, fast = 0, offset = 0 88 + ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr 89 + ldr lr, [sp, #\offset + S_PC]! @ get pc 90 + msr spsr_cxsf, r1 @ save in spsr_svc 91 + .if \fast 92 + ldmdb sp, {r1 - lr}^ @ get calling r1 - lr 93 + .else 94 + ldmdb sp, {r0 - lr}^ @ get calling r0 - lr 95 + .endif 96 + add sp, sp, #S_FRAME_SIZE - S_PC 97 + movs pc, lr @ return & move spsr_svc into cpsr 98 + .endm 99 + 100 + .macro get_thread_info, rd 101 + mov \rd, sp, lsr #13 102 + mov \rd, \rd, lsl #13 103 + .endm 104 + #else /* CONFIG_THUMB2_KERNEL */ 105 + .macro svc_exit, rpsr 106 + ldr r0, [sp, #S_SP] @ top of the stack 107 + ldr r1, [sp, #S_PC] @ return address 108 + tst r0, #4 @ orig stack 8-byte aligned? 109 + stmdb r0, {r1, \rpsr} @ rfe context 110 + ldmia sp, {r0 - r12} 111 + ldr lr, [sp, #S_LR] 112 + addeq sp, sp, #S_FRAME_SIZE - 8 @ aligned 113 + addne sp, sp, #S_FRAME_SIZE - 4 @ not aligned 114 + rfeia sp! 115 + .endm 116 + 117 + .macro restore_user_regs, fast = 0, offset = 0 118 + mov r2, sp 119 + load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr 120 + ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr 121 + ldr lr, [sp, #\offset + S_PC] @ get pc 122 + add sp, sp, #\offset + S_SP 123 + msr spsr_cxsf, r1 @ save in spsr_svc 124 + .if \fast 125 + ldmdb sp, {r1 - r12} @ get calling r1 - r12 126 + .else 127 + ldmdb sp, {r0 - r12} @ get calling r0 - r12 128 + .endif 129 + add sp, sp, #S_FRAME_SIZE - S_SP 130 + movs pc, lr @ return & move spsr_svc into cpsr 131 + .endm 132 + 133 + .macro get_thread_info, rd 134 + mov \rd, sp 135 + lsr \rd, \rd, #13 136 + mov \rd, \rd, lsl #13 137 + .endm 138 + #endif /* !CONFIG_THUMB2_KERNEL */ 47 139 48 140 /* 49 141 * These are the registers used in the syscall handler, and allow us to
+8 -5
arch/arm/kernel/head-common.S
··· 52 52 strcc fp, [r6],#4 53 53 bcc 1b 54 54 55 - ldmia r3, {r4, r5, r6, r7, sp} 55 + ARM( ldmia r3, {r4, r5, r6, r7, sp}) 56 + THUMB( ldmia r3, {r4, r5, r6, r7} ) 57 + THUMB( ldr sp, [r3, #16] ) 56 58 str r9, [r4] @ Save processor ID 57 59 str r1, [r5] @ Save machine type 58 60 str r2, [r6] @ Save atags pointer ··· 158 156 */ 159 157 __lookup_processor_type: 160 158 adr r3, 3f 161 - ldmda r3, {r5 - r7} 159 + ldmia r3, {r5 - r7} 160 + add r3, r3, #8 162 161 sub r3, r3, r7 @ get offset between virt&phys 163 162 add r5, r5, r3 @ convert virt addresses to 164 163 add r6, r6, r3 @ physical address space ··· 190 187 * more information about the __proc_info and __arch_info structures. 191 188 */ 192 189 .align 2 193 - .long __proc_info_begin 190 + 3: .long __proc_info_begin 194 191 .long __proc_info_end 195 - 3: .long . 192 + 4: .long . 196 193 .long __arch_info_begin 197 194 .long __arch_info_end 198 195 ··· 208 205 * r5 = mach_info pointer in physical address space 209 206 */ 210 207 __lookup_machine_type: 211 - adr r3, 3b 208 + adr r3, 4b 212 209 ldmia r3, {r4, r5, r6} 213 210 sub r3, r3, r4 @ get offset between virt&phys 214 211 add r5, r5, r3 @ convert virt addresses to
+7 -4
arch/arm/kernel/head-nommu.S
··· 34 34 */ 35 35 .section ".text.head", "ax" 36 36 ENTRY(stext) 37 - msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode 37 + setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode 38 38 @ and irqs disabled 39 39 #ifndef CONFIG_CPU_CP15 40 40 ldr r9, =CONFIG_PROCESSOR_ID ··· 50 50 51 51 ldr r13, __switch_data @ address to jump to after 52 52 @ the initialization is done 53 - adr lr, __after_proc_init @ return (PIC) address 54 - add pc, r10, #PROCINFO_INITFUNC 53 + adr lr, BSYM(__after_proc_init) @ return (PIC) address 54 + ARM( add pc, r10, #PROCINFO_INITFUNC ) 55 + THUMB( add r12, r10, #PROCINFO_INITFUNC ) 56 + THUMB( mov pc, r12 ) 55 57 ENDPROC(stext) 56 58 57 59 /* ··· 84 82 mcr p15, 0, r0, c1, c0, 0 @ write control reg 85 83 #endif /* CONFIG_CPU_CP15 */ 86 84 87 - mov pc, r13 @ clear the BSS and jump 85 + mov r3, r13 86 + mov pc, r3 @ clear the BSS and jump 88 87 @ to start_kernel 89 88 ENDPROC(__after_proc_init) 90 89 .ltorg
+17 -11
arch/arm/kernel/head.S
··· 76 76 */ 77 77 .section ".text.head", "ax" 78 78 ENTRY(stext) 79 - msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode 79 + setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode 80 80 @ and irqs disabled 81 81 mrc p15, 0, r9, c0, c0 @ get processor id 82 82 bl __lookup_processor_type @ r5=procinfo r9=cpuid ··· 97 97 */ 98 98 ldr r13, __switch_data @ address to jump to after 99 99 @ mmu has been enabled 100 - adr lr, __enable_mmu @ return (PIC) address 101 - add pc, r10, #PROCINFO_INITFUNC 100 + adr lr, BSYM(__enable_mmu) @ return (PIC) address 101 + ARM( add pc, r10, #PROCINFO_INITFUNC ) 102 + THUMB( add r12, r10, #PROCINFO_INITFUNC ) 103 + THUMB( mov pc, r12 ) 102 104 ENDPROC(stext) 103 105 104 106 #if defined(CONFIG_SMP) ··· 112 110 * the processor type - there is no need to check the machine type 113 111 * as it has already been validated by the primary processor. 114 112 */ 115 - msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE 113 + setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 116 114 mrc p15, 0, r9, c0, c0 @ get processor id 117 115 bl __lookup_processor_type 118 116 movs r10, r5 @ invalid processor? ··· 123 121 * Use the page tables supplied from __cpu_up. 124 122 */ 125 123 adr r4, __secondary_data 126 - ldmia r4, {r5, r7, r13} @ address to jump to after 124 + ldmia r4, {r5, r7, r12} @ address to jump to after 127 125 sub r4, r4, r5 @ mmu has been enabled 128 126 ldr r4, [r7, r4] @ get secondary_data.pgdir 129 - adr lr, __enable_mmu @ return address 130 - add pc, r10, #PROCINFO_INITFUNC @ initialise processor 131 - @ (return control reg) 127 + adr lr, BSYM(__enable_mmu) @ return address 128 + mov r13, r12 @ __secondary_switched address 129 + ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor 130 + @ (return control reg) 131 + THUMB( add r12, r10, #PROCINFO_INITFUNC ) 132 + THUMB( mov pc, r12 ) 132 133 ENDPROC(secondary_startup) 133 134 134 135 /* ··· 198 193 mcr p15, 0, r0, c1, c0, 0 @ write control reg 199 194 mrc p15, 0, r3, c0, c0, 0 @ read id reg 200 195 mov r3, r3 201 - mov r3, r3 202 - mov pc, r13 196 + mov r3, r13 197 + mov pc, r3 203 198 ENDPROC(__turn_mmu_on) 204 199 205 200 ··· 240 235 * will be removed by paging_init(). We use our current program 241 236 * counter to determine corresponding section base address. 242 237 */ 243 - mov r6, pc, lsr #20 @ start of kernel section 238 + mov r6, pc 239 + mov r6, r6, lsr #20 @ start of kernel section 244 240 orr r3, r7, r6, lsl #20 @ flags + kernel base 245 241 str r3, [r4, r6, lsl #2] @ identity mapping 246 242
+1 -1
arch/arm/kernel/process.c
··· 388 388 regs.ARM_r2 = (unsigned long)fn; 389 389 regs.ARM_r3 = (unsigned long)kernel_thread_exit; 390 390 regs.ARM_pc = (unsigned long)kernel_thread_helper; 391 - regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE; 391 + regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE; 392 392 393 393 return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); 394 394 }
+21 -7
arch/arm/kernel/setup.c
··· 25 25 #include <linux/smp.h> 26 26 #include <linux/fs.h> 27 27 28 + #include <asm/unified.h> 28 29 #include <asm/cpu.h> 29 30 #include <asm/cputype.h> 30 31 #include <asm/elf.h> ··· 328 327 } 329 328 330 329 /* 330 + * Define the placement constraint for the inline asm directive below. 331 + * In Thumb-2, msr with an immediate value is not allowed. 332 + */ 333 + #ifdef CONFIG_THUMB2_KERNEL 334 + #define PLC "r" 335 + #else 336 + #define PLC "I" 337 + #endif 338 + 339 + /* 331 340 * setup stacks for re-entrant exception handlers 332 341 */ 333 342 __asm__ ( 334 343 "msr cpsr_c, %1\n\t" 335 - "add sp, %0, %2\n\t" 344 + "add r14, %0, %2\n\t" 345 + "mov sp, r14\n\t" 336 346 "msr cpsr_c, %3\n\t" 337 - "add sp, %0, %4\n\t" 347 + "add r14, %0, %4\n\t" 348 + "mov sp, r14\n\t" 338 349 "msr cpsr_c, %5\n\t" 339 - "add sp, %0, %6\n\t" 350 + "add r14, %0, %6\n\t" 351 + "mov sp, r14\n\t" 340 352 "msr cpsr_c, %7" 341 353 : 342 354 : "r" (stk), 343 - "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), 355 + PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), 344 356 "I" (offsetof(struct stack, irq[0])), 345 - "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE), 357 + PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE), 346 358 "I" (offsetof(struct stack, abt[0])), 347 - "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE), 359 + PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE), 348 360 "I" (offsetof(struct stack, und[0])), 349 - "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE) 361 + PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) 350 362 : "r14"); 351 363 } 352 364
+4
arch/arm/kernel/unwind.c
··· 62 62 }; 63 63 64 64 enum regs { 65 + #ifdef CONFIG_THUMB2_KERNEL 66 + FP = 7, 67 + #else 65 68 FP = 11, 69 + #endif 66 70 SP = 13, 67 71 LR = 14, 68 72 PC = 15