Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: 6384/1: Remove the domain switching on ARMv6k/v7 CPUs

This patch removes the domain switching functionality via the set_fs and
__switch_to functions on cores that have a TLS register.

Currently, the ioremap and vmalloc areas share the same level 1 page
tables and therefore have the same domain (DOMAIN_KERNEL). When the
kernel domain is modified from Client to Manager (via the __set_fs or in
the __switch_to function), the XN (eXecute Never) bit is overridden and
newer CPUs can speculatively prefetch the ioremap'ed memory.

Linux performs the kernel domain switching to allow user-specific
functions (copy_to/from_user, get/put_user etc.) to access kernel
memory. In order for these functions to work with the kernel domain set
to Client, the patch modifies the LDRT/STRT and related instructions to
the LDR/STR ones.

The user pages access rights are also modified for kernel read-only
access rather than read/write so that the copy-on-write mechanism still
works. CPU_USE_DOMAINS gets disabled only if the hardware has a TLS register
(CPU_32v6K is defined) since writing the TLS value to the high vectors page
isn't possible.

The user addresses passed to the kernel are checked by the access_ok()
function so that they do not point to the kernel space.

Tested-by: Anton Vorontsov <cbouatmailru@gmail.com>
Cc: Tony Lindgren <tony@atomide.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by

Catalin Marinas and committed by
Russell King
247055aa ff8b16d7

+153 -92
+7 -6
arch/arm/include/asm/assembler.h
··· 18 18 #endif 19 19 20 20 #include <asm/ptrace.h> 21 + #include <asm/domain.h> 21 22 22 23 /* 23 24 * Endian independent macros for shifting bytes within registers. ··· 207 206 */ 208 207 #ifdef CONFIG_THUMB2_KERNEL 209 208 210 - .macro usraccoff, instr, reg, ptr, inc, off, cond, abort 209 + .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=T() 211 210 9999: 212 211 .if \inc == 1 213 - \instr\cond\()bt \reg, [\ptr, #\off] 212 + \instr\cond\()b\()\t\().w \reg, [\ptr, #\off] 214 213 .elseif \inc == 4 215 - \instr\cond\()t \reg, [\ptr, #\off] 214 + \instr\cond\()\t\().w \reg, [\ptr, #\off] 216 215 .else 217 216 .error "Unsupported inc macro argument" 218 217 .endif ··· 247 246 248 247 #else /* !CONFIG_THUMB2_KERNEL */ 249 248 250 - .macro usracc, instr, reg, ptr, inc, cond, rept, abort 249 + .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=T() 251 250 .rept \rept 252 251 9999: 253 252 .if \inc == 1 254 - \instr\cond\()bt \reg, [\ptr], #\inc 253 + \instr\cond\()b\()\t \reg, [\ptr], #\inc 255 254 .elseif \inc == 4 256 - \instr\cond\()t \reg, [\ptr], #\inc 255 + \instr\cond\()\t \reg, [\ptr], #\inc 257 256 .else 258 257 .error "Unsupported inc macro argument" 259 258 .endif
+29 -2
arch/arm/include/asm/domain.h
··· 45 45 */ 46 46 #define DOMAIN_NOACCESS 0 47 47 #define DOMAIN_CLIENT 1 48 + #ifdef CONFIG_CPU_USE_DOMAINS 48 49 #define DOMAIN_MANAGER 3 50 + #else 51 + #define DOMAIN_MANAGER 1 52 + #endif 49 53 50 54 #define domain_val(dom,type) ((type) << (2*(dom))) 51 55 52 56 #ifndef __ASSEMBLY__ 53 57 54 - #ifdef CONFIG_MMU 58 + #ifdef CONFIG_CPU_USE_DOMAINS 55 59 #define set_domain(x) \ 56 60 do { \ 57 61 __asm__ __volatile__( \ ··· 78 74 #define modify_domain(dom,type) do { } while (0) 79 75 #endif 80 76 77 + /* 78 + * Generate the T (user) versions of the LDR/STR and related 79 + * instructions (inline assembly) 80 + */ 81 + #ifdef CONFIG_CPU_USE_DOMAINS 82 + #define T(instr) #instr "t" 83 + #else 84 + #define T(instr) #instr 81 85 #endif 82 - #endif /* !__ASSEMBLY__ */ 86 + 87 + #else /* __ASSEMBLY__ */ 88 + 89 + /* 90 + * Generate the T (user) versions of the LDR/STR and related 91 + * instructions 92 + */ 93 + #ifdef CONFIG_CPU_USE_DOMAINS 94 + #define T(instr) instr ## t 95 + #else 96 + #define T(instr) instr 97 + #endif 98 + 99 + #endif /* __ASSEMBLY__ */ 100 + 101 + #endif /* !__ASM_PROC_DOMAIN_H */
+5 -4
arch/arm/include/asm/futex.h
··· 13 13 #include <linux/preempt.h> 14 14 #include <linux/uaccess.h> 15 15 #include <asm/errno.h> 16 + #include <asm/domain.h> 16 17 17 18 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 18 19 __asm__ __volatile__( \ 19 - "1: ldrt %1, [%2]\n" \ 20 + "1: " T(ldr) " %1, [%2]\n" \ 20 21 " " insn "\n" \ 21 - "2: strt %0, [%2]\n" \ 22 + "2: " T(str) " %0, [%2]\n" \ 22 23 " mov %0, #0\n" \ 23 24 "3:\n" \ 24 25 " .pushsection __ex_table,\"a\"\n" \ ··· 98 97 pagefault_disable(); /* implies preempt_disable() */ 99 98 100 99 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" 101 - "1: ldrt %0, [%3]\n" 100 + "1: " T(ldr) " %0, [%3]\n" 102 101 " teq %0, %1\n" 103 102 " it eq @ explicit IT needed for the 2b label\n" 104 - "2: streqt %2, [%3]\n" 103 + "2: " T(streq) " %2, [%3]\n" 105 104 "3:\n" 106 105 " .pushsection __ex_table,\"a\"\n" 107 106 " .align 3\n"
+2
arch/arm/include/asm/traps.h
··· 27 27 extern void __init early_trap_init(void); 28 28 extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame); 29 29 30 + extern void *vectors_page; 31 + 30 32 #endif
+8 -8
arch/arm/include/asm/uaccess.h
··· 227 227 228 228 #define __get_user_asm_byte(x,addr,err) \ 229 229 __asm__ __volatile__( \ 230 - "1: ldrbt %1,[%2]\n" \ 230 + "1: " T(ldrb) " %1,[%2],#0\n" \ 231 231 "2:\n" \ 232 232 " .pushsection .fixup,\"ax\"\n" \ 233 233 " .align 2\n" \ ··· 263 263 264 264 #define __get_user_asm_word(x,addr,err) \ 265 265 __asm__ __volatile__( \ 266 - "1: ldrt %1,[%2]\n" \ 266 + "1: " T(ldr) " %1,[%2],#0\n" \ 267 267 "2:\n" \ 268 268 " .pushsection .fixup,\"ax\"\n" \ 269 269 " .align 2\n" \ ··· 308 308 309 309 #define __put_user_asm_byte(x,__pu_addr,err) \ 310 310 __asm__ __volatile__( \ 311 - "1: strbt %1,[%2]\n" \ 311 + "1: " T(strb) " %1,[%2],#0\n" \ 312 312 "2:\n" \ 313 313 " .pushsection .fixup,\"ax\"\n" \ 314 314 " .align 2\n" \ ··· 341 341 342 342 #define __put_user_asm_word(x,__pu_addr,err) \ 343 343 __asm__ __volatile__( \ 344 - "1: strt %1,[%2]\n" \ 344 + "1: " T(str) " %1,[%2],#0\n" \ 345 345 "2:\n" \ 346 346 " .pushsection .fixup,\"ax\"\n" \ 347 347 " .align 2\n" \ ··· 366 366 367 367 #define __put_user_asm_dword(x,__pu_addr,err) \ 368 368 __asm__ __volatile__( \ 369 - ARM( "1: strt " __reg_oper1 ", [%1], #4\n" ) \ 370 - ARM( "2: strt " __reg_oper0 ", [%1]\n" ) \ 371 - THUMB( "1: strt " __reg_oper1 ", [%1]\n" ) \ 372 - THUMB( "2: strt " __reg_oper0 ", [%1, #4]\n" ) \ 369 + ARM( "1: " T(str) " " __reg_oper1 ", [%1], #4\n" ) \ 370 + ARM( "2: " T(str) " " __reg_oper0 ", [%1]\n" ) \ 371 + THUMB( "1: " T(str) " " __reg_oper1 ", [%1]\n" ) \ 372 + THUMB( "2: " T(str) " " __reg_oper0 ", [%1, #4]\n" ) \ 373 373 "3:\n" \ 374 374 " .pushsection .fixup,\"ax\"\n" \ 375 375 " .align 2\n" \
+2 -2
arch/arm/kernel/entry-armv.S
··· 735 735 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack 736 736 THUMB( str sp, [ip], #4 ) 737 737 THUMB( str lr, [ip], #4 ) 738 - #ifdef CONFIG_MMU 738 + #ifdef CONFIG_CPU_USE_DOMAINS 739 739 ldr r6, [r2, #TI_CPU_DOMAIN] 740 740 #endif 741 741 set_tls r3, r4, r5 ··· 744 744 ldr r8, =__stack_chk_guard 745 745 ldr r7, [r7, #TSK_STACK_CANARY] 746 746 #endif 747 - #ifdef CONFIG_MMU 747 + #ifdef CONFIG_CPU_USE_DOMAINS 748 748 mcr p15, 0, r6, c3, c0, 0 @ Set domain register 749 749 #endif 750 750 mov r5, r0
+5
arch/arm/kernel/fiq.c
··· 45 45 #include <asm/fiq.h> 46 46 #include <asm/irq.h> 47 47 #include <asm/system.h> 48 + #include <asm/traps.h> 48 49 49 50 static unsigned long no_fiq_insn; 50 51 ··· 78 77 79 78 void set_fiq_handler(void *start, unsigned int length) 80 79 { 80 + #if defined(CONFIG_CPU_USE_DOMAINS) 81 81 memcpy((void *)0xffff001c, start, length); 82 + #else 83 + memcpy(vectors_page + 0x1c, start, length); 84 + #endif 82 85 flush_icache_range(0xffff001c, 0xffff001c + length); 83 86 if (!vectors_high()) 84 87 flush_icache_range(0x1c, 0x1c + length);
+10 -4
arch/arm/kernel/traps.c
··· 37 37 38 38 static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; 39 39 40 + void *vectors_page; 41 + 40 42 #ifdef CONFIG_DEBUG_USER 41 43 unsigned int user_debug; 42 44 ··· 761 759 762 760 void __init early_trap_init(void) 763 761 { 762 + #if defined(CONFIG_CPU_USE_DOMAINS) 764 763 unsigned long vectors = CONFIG_VECTORS_BASE; 764 + #else 765 + unsigned long vectors = (unsigned long)vectors_page; 766 + #endif 765 767 extern char __stubs_start[], __stubs_end[]; 766 768 extern char __vectors_start[], __vectors_end[]; 767 769 extern char __kuser_helper_start[], __kuser_helper_end[]; ··· 789 783 * Copy signal return handlers into the vector page, and 790 784 * set sigreturn to be a pointer to these. 791 785 */ 792 - memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes, 793 - sizeof(sigreturn_codes)); 794 - memcpy((void *)KERN_RESTART_CODE, syscall_restart_code, 795 - sizeof(syscall_restart_code)); 786 + memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE), 787 + sigreturn_codes, sizeof(sigreturn_codes)); 788 + memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE), 789 + syscall_restart_code, sizeof(syscall_restart_code)); 796 790 797 791 flush_icache_range(vectors, vectors + PAGE_SIZE); 798 792 modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
+7 -6
arch/arm/lib/getuser.S
··· 28 28 */ 29 29 #include <linux/linkage.h> 30 30 #include <asm/errno.h> 31 + #include <asm/domain.h> 31 32 32 33 ENTRY(__get_user_1) 33 - 1: ldrbt r2, [r0] 34 + 1: T(ldrb) r2, [r0] 34 35 mov r0, #0 35 36 mov pc, lr 36 37 ENDPROC(__get_user_1) 37 38 38 39 ENTRY(__get_user_2) 39 40 #ifdef CONFIG_THUMB2_KERNEL 40 - 2: ldrbt r2, [r0] 41 - 3: ldrbt r3, [r0, #1] 41 + 2: T(ldrb) r2, [r0] 42 + 3: T(ldrb) r3, [r0, #1] 42 43 #else 43 - 2: ldrbt r2, [r0], #1 44 - 3: ldrbt r3, [r0] 44 + 2: T(ldrb) r2, [r0], #1 45 + 3: T(ldrb) r3, [r0] 45 46 #endif 46 47 #ifndef __ARMEB__ 47 48 orr r2, r2, r3, lsl #8 ··· 54 53 ENDPROC(__get_user_2) 55 54 56 55 ENTRY(__get_user_4) 57 - 4: ldrt r2, [r0] 56 + 4: T(ldr) r2, [r0] 58 57 mov r0, #0 59 58 mov pc, lr 60 59 ENDPROC(__get_user_4)
+15 -14
arch/arm/lib/putuser.S
··· 28 28 */ 29 29 #include <linux/linkage.h> 30 30 #include <asm/errno.h> 31 + #include <asm/domain.h> 31 32 32 33 ENTRY(__put_user_1) 33 - 1: strbt r2, [r0] 34 + 1: T(strb) r2, [r0] 34 35 mov r0, #0 35 36 mov pc, lr 36 37 ENDPROC(__put_user_1) ··· 40 39 mov ip, r2, lsr #8 41 40 #ifdef CONFIG_THUMB2_KERNEL 42 41 #ifndef __ARMEB__ 43 - 2: strbt r2, [r0] 44 - 3: strbt ip, [r0, #1] 42 + 2: T(strb) r2, [r0] 43 + 3: T(strb) ip, [r0, #1] 45 44 #else 46 - 2: strbt ip, [r0] 47 - 3: strbt r2, [r0, #1] 45 + 2: T(strb) ip, [r0] 46 + 3: T(strb) r2, [r0, #1] 48 47 #endif 49 48 #else /* !CONFIG_THUMB2_KERNEL */ 50 49 #ifndef __ARMEB__ 51 - 2: strbt r2, [r0], #1 52 - 3: strbt ip, [r0] 50 + 2: T(strb) r2, [r0], #1 51 + 3: T(strb) ip, [r0] 53 52 #else 54 - 2: strbt ip, [r0], #1 55 - 3: strbt r2, [r0] 53 + 2: T(strb) ip, [r0], #1 54 + 3: T(strb) r2, [r0] 56 55 #endif 57 56 #endif /* CONFIG_THUMB2_KERNEL */ 58 57 mov r0, #0 ··· 60 59 ENDPROC(__put_user_2) 61 60 62 61 ENTRY(__put_user_4) 63 - 4: strt r2, [r0] 62 + 4: T(str) r2, [r0] 64 63 mov r0, #0 65 64 mov pc, lr 66 65 ENDPROC(__put_user_4) 67 66 68 67 ENTRY(__put_user_8) 69 68 #ifdef CONFIG_THUMB2_KERNEL 70 - 5: strt r2, [r0] 71 - 6: strt r3, [r0, #4] 69 + 5: T(str) r2, [r0] 70 + 6: T(str) r3, [r0, #4] 72 71 #else 73 - 5: strt r2, [r0], #4 74 - 6: strt r3, [r0] 72 + 5: T(str) r2, [r0], #4 73 + 6: T(str) r3, [r0] 75 74 #endif 76 75 mov r0, #0 77 76 mov pc, lr
+42 -41
arch/arm/lib/uaccess.S
··· 14 14 #include <linux/linkage.h> 15 15 #include <asm/assembler.h> 16 16 #include <asm/errno.h> 17 + #include <asm/domain.h> 17 18 18 19 .text 19 20 ··· 32 31 rsb ip, ip, #4 33 32 cmp ip, #2 34 33 ldrb r3, [r1], #1 35 - USER( strbt r3, [r0], #1) @ May fault 34 + USER( T(strb) r3, [r0], #1) @ May fault 36 35 ldrgeb r3, [r1], #1 37 - USER( strgebt r3, [r0], #1) @ May fault 36 + USER( T(strgeb) r3, [r0], #1) @ May fault 38 37 ldrgtb r3, [r1], #1 39 - USER( strgtbt r3, [r0], #1) @ May fault 38 + USER( T(strgtb) r3, [r0], #1) @ May fault 40 39 sub r2, r2, ip 41 40 b .Lc2u_dest_aligned 42 41 ··· 59 58 addmi ip, r2, #4 60 59 bmi .Lc2u_0nowords 61 60 ldr r3, [r1], #4 62 - USER( strt r3, [r0], #4) @ May fault 61 + USER( T(str) r3, [r0], #4) @ May fault 63 62 mov ip, r0, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction 64 63 rsb ip, ip, #0 65 64 movs ip, ip, lsr #32 - PAGE_SHIFT ··· 88 87 stmneia r0!, {r3 - r4} @ Shouldnt fault 89 88 tst ip, #4 90 89 ldrne r3, [r1], #4 91 - strnet r3, [r0], #4 @ Shouldnt fault 90 + T(strne) r3, [r0], #4 @ Shouldnt fault 92 91 ands ip, ip, #3 93 92 beq .Lc2u_0fupi 94 93 .Lc2u_0nowords: teq ip, #0 95 94 beq .Lc2u_finished 96 95 .Lc2u_nowords: cmp ip, #2 97 96 ldrb r3, [r1], #1 98 - USER( strbt r3, [r0], #1) @ May fault 97 + USER( T(strb) r3, [r0], #1) @ May fault 99 98 ldrgeb r3, [r1], #1 100 - USER( strgebt r3, [r0], #1) @ May fault 99 + USER( T(strgeb) r3, [r0], #1) @ May fault 101 100 ldrgtb r3, [r1], #1 102 - USER( strgtbt r3, [r0], #1) @ May fault 101 + USER( T(strgtb) r3, [r0], #1) @ May fault 103 102 b .Lc2u_finished 104 103 105 104 .Lc2u_not_enough: ··· 120 119 mov r3, r7, pull #8 121 120 ldr r7, [r1], #4 122 121 orr r3, r3, r7, push #24 123 - USER( strt r3, [r0], #4) @ May fault 122 + USER( T(str) r3, [r0], #4) @ May fault 124 123 mov ip, r0, lsl #32 - PAGE_SHIFT 125 124 rsb ip, ip, #0 126 125 movs ip, ip, lsr #32 - PAGE_SHIFT ··· 155 154 movne r3, r7, pull #8 156 155 ldrne r7, [r1], #4 157 156 orrne r3, r3, r7, push #24 158 - strnet r3, [r0], #4 @ Shouldnt fault 157 + T(strne) r3, [r0], #4 @ Shouldnt fault 159 158 ands ip, ip, #3 160 159 beq .Lc2u_1fupi 161 160 .Lc2u_1nowords: mov r3, r7, get_byte_1 162 161 teq ip, #0 163 162 beq .Lc2u_finished 164 163 cmp ip, #2 165 - USER( strbt r3, [r0], #1) @ May fault 164 + USER( T(strb) r3, [r0], #1) @ May fault 166 165 movge r3, r7, get_byte_2 167 - USER( strgebt r3, [r0], #1) @ May fault 166 + USER( T(strgeb) r3, [r0], #1) @ May fault 168 167 movgt r3, r7, get_byte_3 169 - USER( strgtbt r3, [r0], #1) @ May fault 168 + USER( T(strgtb) r3, [r0], #1) @ May fault 170 169 b .Lc2u_finished 171 170 172 171 .Lc2u_2fupi: subs r2, r2, #4 ··· 175 174 mov r3, r7, pull #16 176 175 ldr r7, [r1], #4 177 176 orr r3, r3, r7, push #16 178 - USER( strt r3, [r0], #4) @ May fault 177 + USER( T(str) r3, [r0], #4) @ May fault 179 178 mov ip, r0, lsl #32 - PAGE_SHIFT 180 179 rsb ip, ip, #0 181 180 movs ip, ip, lsr #32 - PAGE_SHIFT ··· 210 209 movne r3, r7, pull #16 211 210 ldrne r7, [r1], #4 212 211 orrne r3, r3, r7, push #16 213 - strnet r3, [r0], #4 @ Shouldnt fault 212 + T(strne) r3, [r0], #4 @ Shouldnt fault 214 213 ands ip, ip, #3 215 214 beq .Lc2u_2fupi 216 215 .Lc2u_2nowords: mov r3, r7, get_byte_2 217 216 teq ip, #0 218 217 beq .Lc2u_finished 219 218 cmp ip, #2 220 - USER( strbt r3, [r0], #1) @ May fault 219 + USER( T(strb) r3, [r0], #1) @ May fault 221 220 movge r3, r7, get_byte_3 222 - USER( strgebt r3, [r0], #1) @ May fault 221 + USER( T(strgeb) r3, [r0], #1) @ May fault 223 222 ldrgtb r3, [r1], #0 224 - USER( strgtbt r3, [r0], #1) @ May fault 223 + USER( T(strgtb) r3, [r0], #1) @ May fault 225 224 b .Lc2u_finished 226 225 227 226 .Lc2u_3fupi: subs r2, r2, #4 ··· 230 229 mov r3, r7, pull #24 231 230 ldr r7, [r1], #4 232 231 orr r3, r3, r7, push #8 233 - USER( strt r3, [r0], #4) @ May fault 232 + USER( T(str) r3, [r0], #4) @ May fault 234 233 mov ip, r0, lsl #32 - PAGE_SHIFT 235 234 rsb ip, ip, #0 236 235 movs ip, ip, lsr #32 - PAGE_SHIFT ··· 265 264 movne r3, r7, pull #24 266 265 ldrne r7, [r1], #4 267 266 orrne r3, r3, r7, push #8 268 - strnet r3, [r0], #4 @ Shouldnt fault 267 + T(strne) r3, [r0], #4 @ Shouldnt fault 269 268 ands ip, ip, #3 270 269 beq .Lc2u_3fupi 271 270 .Lc2u_3nowords: mov r3, r7, get_byte_3 272 271 teq ip, #0 273 272 beq .Lc2u_finished 274 273 cmp ip, #2 275 - USER( strbt r3, [r0], #1) @ May fault 274 + USER( T(strb) r3, [r0], #1) @ May fault 276 275 ldrgeb r3, [r1], #1 277 - USER( strgebt r3, [r0], #1) @ May fault 276 + USER( T(strgeb) r3, [r0], #1) @ May fault 278 277 ldrgtb r3, [r1], #0 279 - USER( strgtbt r3, [r0], #1) @ May fault 278 + USER( T(strgtb) r3, [r0], #1) @ May fault 280 279 b .Lc2u_finished 281 280 ENDPROC(__copy_to_user) 282 281 ··· 295 294 .Lcfu_dest_not_aligned: 296 295 rsb ip, ip, #4 297 296 cmp ip, #2 298 - USER( ldrbt r3, [r1], #1) @ May fault 297 + USER( T(ldrb) r3, [r1], #1) @ May fault 299 298 strb r3, [r0], #1 300 - USER( ldrgebt r3, [r1], #1) @ May fault 299 + USER( T(ldrgeb) r3, [r1], #1) @ May fault 301 300 strgeb r3, [r0], #1 302 - USER( ldrgtbt r3, [r1], #1) @ May fault 301 + USER( T(ldrgtb) r3, [r1], #1) @ May fault 303 302 strgtb r3, [r0], #1 304 303 sub r2, r2, ip 305 304 b .Lcfu_dest_aligned ··· 322 321 .Lcfu_0fupi: subs r2, r2, #4 323 322 addmi ip, r2, #4 324 323 bmi .Lcfu_0nowords 325 - USER( ldrt r3, [r1], #4) 324 + USER( T(ldr) r3, [r1], #4) 326 325 str r3, [r0], #4 327 326 mov ip, r1, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction 328 327 rsb ip, ip, #0 ··· 351 350 ldmneia r1!, {r3 - r4} @ Shouldnt fault 352 351 stmneia r0!, {r3 - r4} 353 352 tst ip, #4 354 - ldrnet r3, [r1], #4 @ Shouldnt fault 353 + T(ldrne) r3, [r1], #4 @ Shouldnt fault 355 354 strne r3, [r0], #4 356 355 ands ip, ip, #3 357 356 beq .Lcfu_0fupi 358 357 .Lcfu_0nowords: teq ip, #0 359 358 beq .Lcfu_finished 360 359 .Lcfu_nowords: cmp ip, #2 361 - USER( ldrbt r3, [r1], #1) @ May fault 360 + USER( T(ldrb) r3, [r1], #1) @ May fault 362 361 strb r3, [r0], #1 363 - USER( ldrgebt r3, [r1], #1) @ May fault 362 + USER( T(ldrgeb) r3, [r1], #1) @ May fault 364 363 strgeb r3, [r0], #1 365 - USER( ldrgtbt r3, [r1], #1) @ May fault 364 + USER( T(ldrgtb) r3, [r1], #1) @ May fault 366 365 strgtb r3, [r0], #1 367 366 b .Lcfu_finished 368 367 ··· 375 374 376 375 .Lcfu_src_not_aligned: 377 376 bic r1, r1, #3 378 - USER( ldrt r7, [r1], #4) @ May fault 377 + USER( T(ldr) r7, [r1], #4) @ May fault 379 378 cmp ip, #2 380 379 bgt .Lcfu_3fupi 381 380 beq .Lcfu_2fupi ··· 383 382 addmi ip, r2, #4 384 383 bmi .Lcfu_1nowords 385 384 mov r3, r7, pull #8 386 - USER( ldrt r7, [r1], #4) @ May fault 385 + USER( T(ldr) r7, [r1], #4) @ May fault 387 386 orr r3, r3, r7, push #24 388 387 str r3, [r0], #4 389 388 mov ip, r1, lsl #32 - PAGE_SHIFT ··· 418 417 stmneia r0!, {r3 - r4} 419 418 tst ip, #4 420 419 movne r3, r7, pull #8 421 - USER( ldrnet r7, [r1], #4) @ May fault 420 + USER( T(ldrne) r7, [r1], #4) @ May fault 422 421 orrne r3, r3, r7, push #24 423 422 strne r3, [r0], #4 424 423 ands ip, ip, #3 ··· 438 437 addmi ip, r2, #4 439 438 bmi .Lcfu_2nowords 440 439 mov r3, r7, pull #16 441 - USER( ldrt r7, [r1], #4) @ May fault 440 + USER( T(ldr) r7, [r1], #4) @ May fault 442 441 orr r3, r3, r7, push #16 443 442 str r3, [r0], #4 444 443 mov ip, r1, lsl #32 - PAGE_SHIFT ··· 474 473 stmneia r0!, {r3 - r4} 475 474 tst ip, #4 476 475 movne r3, r7, pull #16 477 - USER( ldrnet r7, [r1], #4) @ May fault 476 + USER( T(ldrne) r7, [r1], #4) @ May fault 478 477 orrne r3, r3, r7, push #16 479 478 strne r3, [r0], #4 480 479 ands ip, ip, #3 ··· 486 485 strb r3, [r0], #1 487 486 movge r3, r7, get_byte_3 488 487 strgeb r3, [r0], #1 489 - USER( ldrgtbt r3, [r1], #0) @ May fault 488 + USER( T(ldrgtb) r3, [r1], #0) @ May fault 490 489 strgtb r3, [r0], #1 491 490 b .Lcfu_finished 492 491 ··· 494 493 addmi ip, r2, #4 495 494 bmi .Lcfu_3nowords 496 495 mov r3, r7, pull #24 497 - USER( ldrt r7, [r1], #4) @ May fault 496 + USER( T(ldr) r7, [r1], #4) @ May fault 498 497 orr r3, r3, r7, push #8 499 498 str r3, [r0], #4 500 499 mov ip, r1, lsl #32 - PAGE_SHIFT ··· 529 528 stmneia r0!, {r3 - r4} 530 529 tst ip, #4 531 530 movne r3, r7, pull #24 532 - USER( ldrnet r7, [r1], #4) @ May fault 531 + USER( T(ldrne) r7, [r1], #4) @ May fault 533 532 orrne r3, r3, r7, push #8 534 533 strne r3, [r0], #4 535 534 ands ip, ip, #3 ··· 539 538 beq .Lcfu_finished 540 539 cmp ip, #2 541 540 strb r3, [r0], #1 542 - USER( ldrgebt r3, [r1], #1) @ May fault 541 + USER( T(ldrgeb) r3, [r1], #1) @ May fault 543 542 strgeb r3, [r0], #1 544 - USER( ldrgtbt r3, [r1], #1) @ May fault 543 + USER( T(ldrgtb) r3, [r1], #1) @ May fault 545 544 strgtb r3, [r0], #1 546 545 b .Lcfu_finished 547 546 ENDPROC(__copy_from_user)
+8
arch/arm/mm/Kconfig
··· 599 599 help 600 600 Processor has the CP15 register, which has MPU related registers. 601 601 602 + config CPU_USE_DOMAINS 603 + bool 604 + depends on MMU 605 + default y if !CPU_32v6K 606 + help 607 + This option enables or disables the use of domain switching 608 + via the set_fs() function. 609 + 602 610 # 603 611 # CPU supports 36-bit I/O 604 612 #
+3 -3
arch/arm/mm/mmu.c
··· 24 24 #include <asm/smp_plat.h> 25 25 #include <asm/tlb.h> 26 26 #include <asm/highmem.h> 27 + #include <asm/traps.h> 27 28 28 29 #include <asm/mach/arch.h> 29 30 #include <asm/mach/map.h> ··· 915 914 { 916 915 struct map_desc map; 917 916 unsigned long addr; 918 - void *vectors; 919 917 920 918 /* 921 919 * Allocate the vector page early. 922 920 */ 923 - vectors = early_alloc(PAGE_SIZE); 921 + vectors_page = early_alloc(PAGE_SIZE); 924 922 925 923 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) 926 924 pmd_clear(pmd_off_k(addr)); ··· 959 959 * location (0xffff0000). If we aren't using high-vectors, also 960 960 * create a mapping at the low-vectors virtual address. 961 961 */ 962 - map.pfn = __phys_to_pfn(virt_to_phys(vectors)); 962 + map.pfn = __phys_to_pfn(virt_to_phys(vectors_page)); 963 963 map.virtual = 0xffff0000; 964 964 map.length = PAGE_SIZE; 965 965 map.type = MT_HIGH_VECTORS;
+7
arch/arm/mm/proc-macros.S
··· 99 99 * 110x 0 1 0 r/w r/o 100 100 * 11x0 0 1 0 r/w r/o 101 101 * 1111 0 1 1 r/w r/w 102 + * 103 + * If !CONFIG_CPU_USE_DOMAINS, the following permissions are changed: 104 + * 110x 1 1 1 r/o r/o 105 + * 11x0 1 1 1 r/o r/o 102 106 */ 103 107 .macro armv6_mt_table pfx 104 108 \pfx\()_mt_table: ··· 142 138 143 139 tst r1, #L_PTE_USER 144 140 orrne r3, r3, #PTE_EXT_AP1 141 + #ifdef CONFIG_CPU_USE_DOMAINS 142 + @ allow kernel read/write access to read-only user pages 145 143 tstne r3, #PTE_EXT_APX 146 144 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 145 + #endif 147 146 148 147 tst r1, #L_PTE_EXEC 149 148 orreq r3, r3, #PTE_EXT_XN
+3 -2
arch/arm/mm/proc-v7.S
··· 148 148 149 149 tst r1, #L_PTE_USER 150 150 orrne r3, r3, #PTE_EXT_AP1 151 + #ifdef CONFIG_CPU_USE_DOMAINS 152 + @ allow kernel read/write access to read-only user pages 151 153 tstne r3, #PTE_EXT_APX 152 154 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 155 + #endif 153 156 154 157 tst r1, #L_PTE_EXEC 155 158 orreq r3, r3, #PTE_EXT_XN ··· 276 273 ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) 277 274 ALT_UP(orr r4, r4, #TTB_FLAGS_UP) 278 275 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 279 - mov r10, #0x1f @ domains 0, 1 = manager 280 - mcr p15, 0, r10, c3, c0, 0 @ load domain access register 281 276 /* 282 277 * Memory region attributes with SCTLR.TRE=1 283 278 *