Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: Allow SMP kernels to boot on UP systems

UP systems do not implement all the instructions that SMP systems have,
so in order to boot a SMP kernel on a UP system, we need to rewrite
parts of the kernel.

Do this using an 'alternatives' scheme, where the kernel code and data
is modified prior to initialization to replace the SMP instructions,
thereby rendering the problematical code ineffectual. We use the linker
to generate a list of 32-bit word locations and their replacement values,
and run through these replacements when we detect a UP system.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

+237 -102
+13
arch/arm/Kconfig
··· 1191 1191 1192 1192 If you don't know what to do here, say N. 1193 1193 1194 + config SMP_ON_UP 1195 + bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)" 1196 + depends on EXPERIMENTAL 1197 + depends on SMP && !XIP && !THUMB2_KERNEL 1198 + default y 1199 + help 1200 + SMP kernels contain instructions which fail on non-SMP processors. 1201 + Enabling this option allows the kernel to modify itself to make 1202 + these instructions safe. Disabling it allows about 1K of space 1203 + savings. 1204 + 1205 + If you don't know what to do here, say Y. 1206 + 1194 1207 config HAVE_ARM_SCU 1195 1208 bool 1196 1209 depends on SMP
+25 -2
arch/arm/include/asm/assembler.h
··· 154 154 .long 9999b,9001f; \ 155 155 .popsection 156 156 157 + #ifdef CONFIG_SMP 158 + #define ALT_SMP(instr...) \ 159 + 9998: instr 160 + #define ALT_UP(instr...) \ 161 + .pushsection ".alt.smp.init", "a" ;\ 162 + .long 9998b ;\ 163 + instr ;\ 164 + .popsection 165 + #define ALT_UP_B(label) \ 166 + .equ up_b_offset, label - 9998b ;\ 167 + .pushsection ".alt.smp.init", "a" ;\ 168 + .long 9998b ;\ 169 + b . + up_b_offset ;\ 170 + .popsection 171 + #else 172 + #define ALT_SMP(instr...) 173 + #define ALT_UP(instr...) instr 174 + #define ALT_UP_B(label) b label 175 + #endif 176 + 157 177 /* 158 178 * SMP data memory barrier 159 179 */ 160 180 .macro smp_dmb 161 181 #ifdef CONFIG_SMP 162 182 #if __LINUX_ARM_ARCH__ >= 7 163 - dmb 183 + ALT_SMP(dmb) 164 184 #elif __LINUX_ARM_ARCH__ == 6 165 - mcr p15, 0, r0, c7, c10, 5 @ dmb 185 + ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb 186 + #else 187 + #error Incompatible SMP platform 166 188 #endif 189 + ALT_UP(nop) 167 190 #endif 168 191 .endm 169 192
+6 -1
arch/arm/include/asm/smp_mpidr.h
··· 4 4 #define hard_smp_processor_id() \ 5 5 ({ \ 6 6 unsigned int cpunum; \ 7 - __asm__("mrc p15, 0, %0, c0, c0, 5\n" \ 7 + __asm__("\n" \ 8 + "1: mrc p15, 0, %0, c0, c0, 5\n" \ 9 + " .pushsection \".alt.smp.init\", \"a\"\n"\ 10 + " .long 1b\n" \ 11 + " mov %0, #0\n" \ 12 + " .popsection" \ 8 13 : "=r" (cpunum)); \ 9 14 cpunum &= 0x0F; \ 10 15 })
+15
arch/arm/include/asm/smp_plat.h
··· 18 18 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1; 19 19 } 20 20 21 + /* 22 + * Return true if we are running on a SMP platform 23 + */ 24 + static inline bool is_smp(void) 25 + { 26 + #ifndef CONFIG_SMP 27 + return false; 28 + #elif defined(CONFIG_SMP_ON_UP) 29 + extern unsigned int smp_on_up; 30 + return !!smp_on_up; 31 + #else 32 + return true; 33 + #endif 34 + } 35 + 21 36 #endif
+17 -7
arch/arm/include/asm/tlbflush.h
··· 70 70 #undef _TLB 71 71 #undef MULTI_TLB 72 72 73 + #ifdef CONFIG_SMP_ON_UP 74 + #define MULTI_TLB 1 75 + #endif 76 + 73 77 #define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE) 74 78 75 79 #ifdef CONFIG_CPU_TLB_V3 ··· 189 185 # define v6wbi_always_flags (-1UL) 190 186 #endif 191 187 192 - #ifdef CONFIG_SMP 193 - #define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \ 188 + #define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \ 194 189 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) 195 - #else 196 - #define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \ 190 + #define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BTB | \ 197 191 TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID) 198 - #endif 199 192 200 193 #ifdef CONFIG_CPU_TLB_V7 201 - # define v7wbi_possible_flags v7wbi_tlb_flags 202 - # define v7wbi_always_flags v7wbi_tlb_flags 194 + 195 + # ifdef CONFIG_SMP_ON_UP 196 + # define v7wbi_possible_flags (v7wbi_tlb_flags_smp | v7wbi_tlb_flags_up) 197 + # define v7wbi_always_flags (v7wbi_tlb_flags_smp & v7wbi_tlb_flags_up) 198 + # elif defined(CONFIG_SMP) 199 + # define v7wbi_possible_flags v7wbi_tlb_flags_smp 200 + # define v7wbi_always_flags v7wbi_tlb_flags_smp 201 + # else 202 + # define v7wbi_possible_flags v7wbi_tlb_flags_up 203 + # define v7wbi_always_flags v7wbi_tlb_flags_up 204 + # endif 203 205 # ifdef _TLB 204 206 # define MULTI_TLB 1 205 207 # else
+5 -6
arch/arm/kernel/entry-armv.S
··· 46 46 * this macro assumes that irqstat (r6) and base (r5) are 47 47 * preserved from get_irqnr_and_base above 48 48 */ 49 - test_for_ipi r0, r6, r5, lr 49 + ALT_SMP(test_for_ipi r0, r6, r5, lr) 50 + ALT_UP_B(9997f) 50 51 movne r0, sp 51 52 adrne lr, BSYM(1b) 52 53 bne do_IPI ··· 58 57 adrne lr, BSYM(1b) 59 58 bne do_local_timer 60 59 #endif 60 + 9997: 61 61 #endif 62 62 63 63 .endm ··· 967 965 beq 1b 968 966 rsbs r0, r3, #0 969 967 /* beware -- each __kuser slot must be 8 instructions max */ 970 - #ifdef CONFIG_SMP 971 - b __kuser_memory_barrier 972 - #else 973 - usr_ret lr 974 - #endif 968 + ALT_SMP(b __kuser_memory_barrier) 969 + ALT_UP(usr_ret lr) 975 970 976 971 #endif 977 972
+50
arch/arm/kernel/head.S
··· 86 86 movs r8, r5 @ invalid machine (r5=0)? 87 87 beq __error_a @ yes, error 'a' 88 88 bl __vet_atags 89 + #ifdef CONFIG_SMP_ON_UP 90 + bl __fixup_smp 91 + #endif 89 92 bl __create_page_tables 90 93 91 94 /* ··· 335 332 mov pc, lr 336 333 ENDPROC(__create_page_tables) 337 334 .ltorg 335 + 336 + #ifdef CONFIG_SMP_ON_UP 337 + __fixup_smp: 338 + mov r7, #0x00070000 339 + orr r6, r7, #0xff000000 @ mask 0xff070000 340 + orr r7, r7, #0x41000000 @ val 0x41070000 341 + and r0, r9, r6 342 + teq r0, r7 @ ARM CPU and ARMv6/v7? 343 + bne __fixup_smp_on_up @ no, assume UP 344 + 345 + orr r6, r6, #0x0000ff00 346 + orr r6, r6, #0x000000f0 @ mask 0xff07fff0 347 + orr r7, r7, #0x0000b000 348 + orr r7, r7, #0x00000020 @ val 0x4107b020 349 + and r0, r9, r6 350 + teq r0, r7 @ ARM 11MPCore? 351 + moveq pc, lr @ yes, assume SMP 352 + 353 + mrc p15, 0, r0, c0, c0, 5 @ read MPIDR 354 + tst r0, #1 << 31 355 + movne pc, lr @ bit 31 => SMP 356 + 357 + __fixup_smp_on_up: 358 + adr r0, 1f 359 + ldmia r0, {r3, r6, r7} 360 + sub r3, r0, r3 361 + add r6, r6, r3 362 + add r7, r7, r3 363 + 2: cmp r6, r7 364 + ldmia r6!, {r0, r4} 365 + strlo r4, [r0, r3] 366 + blo 2b 367 + mov pc, lr 368 + ENDPROC(__fixup_smp) 369 + 370 + 1: .word . 371 + .word __smpalt_begin 372 + .word __smpalt_end 373 + 374 + .pushsection .data 375 + .globl smp_on_up 376 + smp_on_up: 377 + ALT_SMP(.long 1) 378 + ALT_UP(.long 0) 379 + .popsection 380 + 381 + #endif 338 382 339 383 #include "head-common.S"
+3 -1
arch/arm/kernel/setup.c
··· 36 36 #include <asm/procinfo.h> 37 37 #include <asm/sections.h> 38 38 #include <asm/setup.h> 39 + #include <asm/smp_plat.h> 39 40 #include <asm/mach-types.h> 40 41 #include <asm/cacheflush.h> 41 42 #include <asm/cachetype.h> ··· 826 825 request_standard_resources(&meminfo, mdesc); 827 826 828 827 #ifdef CONFIG_SMP 829 - smp_init_cpus(); 828 + if (is_smp()) 829 + smp_init_cpus(); 830 830 #endif 831 831 reserve_crashkernel(); 832 832
+11
arch/arm/kernel/vmlinux.lds.S
··· 40 40 __tagtable_begin = .; 41 41 *(.taglist.init) 42 42 __tagtable_end = .; 43 + #ifdef CONFIG_SMP_ON_UP 44 + __smpalt_begin = .; 45 + *(.alt.smp.init) 46 + __smpalt_end = .; 47 + #endif 43 48 44 49 INIT_SETUP(16) 45 50 ··· 242 237 243 238 /* Default discards */ 244 239 DISCARDS 240 + 241 + #ifndef CONFIG_SMP_ON_UP 242 + /DISCARD/ : { 243 + *(.alt.smp.init) 244 + } 245 + #endif 245 246 } 246 247 247 248 /*
+4 -10
arch/arm/mm/cache-v7.S
··· 91 91 THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) 92 92 bl v7_flush_dcache_all 93 93 mov r0, #0 94 - #ifdef CONFIG_SMP 95 - mcr p15, 0, r0, c7, c1, 0 @ invalidate I-cache inner shareable 96 - #else 97 - mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 98 - #endif 94 + ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable 95 + ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 99 96 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) 100 97 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) 101 98 mov pc, lr ··· 168 171 cmp r0, r1 169 172 blo 1b 170 173 mov r0, #0 171 - #ifdef CONFIG_SMP 172 - mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable 173 - #else 174 - mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB 175 - #endif 174 + ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable 175 + ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB 176 176 dsb 177 177 isb 178 178 mov pc, lr
+21 -25
arch/arm/mm/mmu.c
··· 310 310 cachepolicy = CPOLICY_WRITEBACK; 311 311 ecc_mask = 0; 312 312 } 313 - #ifdef CONFIG_SMP 314 - cachepolicy = CPOLICY_WRITEALLOC; 315 - #endif 313 + if (is_smp()) 314 + cachepolicy = CPOLICY_WRITEALLOC; 316 315 317 316 /* 318 317 * Strip out features not present on earlier architectures. ··· 405 406 cp = &cache_policies[cachepolicy]; 406 407 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 407 408 408 - #ifndef CONFIG_SMP 409 409 /* 410 410 * Only use write-through for non-SMP systems 411 411 */ 412 - if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) 412 + if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) 413 413 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; 414 - #endif 415 414 416 415 /* 417 416 * Enable CPU-specific coherency if supported. ··· 433 436 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 434 437 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 435 438 436 - #ifdef CONFIG_SMP 437 - /* 438 - * Mark memory with the "shared" attribute for SMP systems 439 - */ 440 - user_pgprot |= L_PTE_SHARED; 441 - kern_pgprot |= L_PTE_SHARED; 442 - vecs_pgprot |= L_PTE_SHARED; 443 - mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; 444 - mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; 445 - mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 446 - mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 447 - mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 448 - mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; 449 - mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 450 - mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; 451 - #endif 439 + if (is_smp()) { 440 + /* 441 + * Mark memory with the "shared" attribute 442 + * for SMP systems 443 + */ 444 + user_pgprot |= L_PTE_SHARED; 445 + kern_pgprot |= L_PTE_SHARED; 446 + vecs_pgprot |= L_PTE_SHARED; 447 + mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; 448 + mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; 449 + mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 450 + mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 451 + mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 452 + mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; 453 + mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 454 + mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; 455 + } 452 456 } 453 457 454 458 /* ··· 827 829 * rather difficult. 828 830 */ 829 831 reason = "with VIPT aliasing cache"; 830 - #ifdef CONFIG_SMP 831 - } else if (tlb_ops_need_broadcast()) { 832 + } else if (is_smp() && tlb_ops_need_broadcast()) { 832 833 /* 833 834 * kmap_high needs to occasionally flush TLB entries, 834 835 * however, if the TLB entries need to be broadcast ··· 837 840 * (must not be called with irqs off) 838 841 */ 839 842 reason = "without hardware TLB ops broadcasting"; 840 - #endif 841 843 } 842 844 if (reason) { 843 845 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
+28 -15
arch/arm/mm/proc-v6.S
··· 30 30 #define TTB_RGN_WT (2 << 3) 31 31 #define TTB_RGN_WB (3 << 3) 32 32 33 - #ifndef CONFIG_SMP 34 - #define TTB_FLAGS TTB_RGN_WBWA 35 - #define PMD_FLAGS PMD_SECT_WB 36 - #else 37 - #define TTB_FLAGS TTB_RGN_WBWA|TTB_S 38 - #define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S 39 - #endif 33 + #define TTB_FLAGS_UP TTB_RGN_WBWA 34 + #define PMD_FLAGS_UP PMD_SECT_WB 35 + #define TTB_FLAGS_SMP TTB_RGN_WBWA|TTB_S 36 + #define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S 40 37 41 38 ENTRY(cpu_v6_proc_init) 42 39 mov pc, lr ··· 94 97 #ifdef CONFIG_MMU 95 98 mov r2, #0 96 99 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 97 - orr r0, r0, #TTB_FLAGS 100 + ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) 101 + ALT_UP(orr r0, r0, #TTB_FLAGS_UP) 98 102 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 99 103 mcr p15, 0, r2, c7, c10, 4 @ drain write buffer 100 104 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 ··· 154 156 */ 155 157 __v6_setup: 156 158 #ifdef CONFIG_SMP 157 - mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode 159 + ALT_SMP(mrc p15, 0, r0, c1, c0, 1) @ Enable SMP/nAMP mode 160 + ALT_UP(nop) 158 161 orr r0, r0, #0x20 159 - mcr p15, 0, r0, c1, c0, 1 162 + ALT_SMP(mcr p15, 0, r0, c1, c0, 1) 163 + ALT_UP(nop) 160 164 #endif 161 165 162 166 mov r0, #0 ··· 169 169 #ifdef CONFIG_MMU 170 170 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs 171 171 mcr p15, 0, r0, c2, c0, 2 @ TTB control register 172 - orr r4, r4, #TTB_FLAGS 172 + ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) 173 + ALT_UP(orr r4, r4, #TTB_FLAGS_UP) 173 174 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 174 175 #endif /* CONFIG_MMU */ 175 176 adr r5, v6_crval ··· 226 225 __v6_proc_info: 227 226 .long 0x0007b000 228 227 .long 0x0007f000 229 - .long PMD_TYPE_SECT | \ 228 + ALT_SMP(.long \ 229 + PMD_TYPE_SECT | \ 230 230 PMD_SECT_AP_WRITE | \ 231 231 PMD_SECT_AP_READ | \ 232 - PMD_FLAGS 232 + PMD_FLAGS_SMP) 233 + ALT_UP(.long \ 234 + PMD_TYPE_SECT | \ 235 + PMD_SECT_AP_WRITE | \ 236 + PMD_SECT_AP_READ | \ 237 + PMD_FLAGS_UP) 233 238 .long PMD_TYPE_SECT | \ 234 239 PMD_SECT_XN | \ 235 240 PMD_SECT_AP_WRITE | \ ··· 256 249 __pj4_v6_proc_info: 257 250 .long 0x560f5810 258 251 .long 0xff0ffff0 259 - .long PMD_TYPE_SECT | \ 252 + ALT_SMP(.long \ 253 + PMD_TYPE_SECT | \ 260 254 PMD_SECT_AP_WRITE | \ 261 255 PMD_SECT_AP_READ | \ 262 - PMD_FLAGS 256 + PMD_FLAGS_SMP) 257 + ALT_UP(.long \ 258 + PMD_TYPE_SECT | \ 259 + PMD_SECT_AP_WRITE | \ 260 + PMD_SECT_AP_READ | \ 261 + PMD_FLAGS_UP) 263 262 .long PMD_TYPE_SECT | \ 264 263 PMD_SECT_XN | \ 265 264 PMD_SECT_AP_WRITE | \
+27 -14
arch/arm/mm/proc-v7.S
··· 30 30 #define TTB_IRGN_WT ((1 << 0) | (0 << 6)) 31 31 #define TTB_IRGN_WB ((1 << 0) | (1 << 6)) 32 32 33 - #ifndef CONFIG_SMP 34 33 /* PTWs cacheable, inner WB not shareable, outer WB not shareable */ 35 - #define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB 36 - #define PMD_FLAGS PMD_SECT_WB 37 - #else 34 + #define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB 35 + #define PMD_FLAGS_UP PMD_SECT_WB 36 + 38 37 /* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ 39 - #define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA 40 - #define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S 41 - #endif 38 + #define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA 39 + #define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S 42 40 43 41 ENTRY(cpu_v7_proc_init) 44 42 mov pc, lr ··· 103 105 #ifdef CONFIG_MMU 104 106 mov r2, #0 105 107 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 106 - orr r0, r0, #TTB_FLAGS 108 + ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) 109 + ALT_UP(orr r0, r0, #TTB_FLAGS_UP) 107 110 #ifdef CONFIG_ARM_ERRATA_430973 108 111 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 109 112 #endif ··· 187 188 */ 188 189 __v7_ca9mp_setup: 189 190 #ifdef CONFIG_SMP 190 - mrc p15, 0, r0, c1, c0, 1 191 + ALT_SMP(mrc p15, 0, r0, c1, c0, 1) 192 + ALT_UP(mov r0, #(1 << 6)) @ fake it for UP 191 193 tst r0, #(1 << 6) @ SMP/nAMP mode enabled? 192 194 orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and 193 195 mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting ··· 262 262 #ifdef CONFIG_MMU 263 263 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs 264 264 mcr p15, 0, r10, c2, c0, 2 @ TTB control register 265 - orr r4, r4, #TTB_FLAGS 265 + ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) 266 + ALT_UP(orr r4, r4, #TTB_FLAGS_UP) 266 267 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 267 268 mov r10, #0x1f @ domains 0, 1 = manager 268 269 mcr p15, 0, r10, c3, c0, 0 @ load domain access register ··· 355 354 __v7_ca9mp_proc_info: 356 355 .long 0x410fc090 @ Required ID value 357 356 .long 0xff0ffff0 @ Mask for ID 358 - .long PMD_TYPE_SECT | \ 357 + ALT_SMP(.long \ 358 + PMD_TYPE_SECT | \ 359 359 PMD_SECT_AP_WRITE | \ 360 360 PMD_SECT_AP_READ | \ 361 - PMD_FLAGS 361 + PMD_FLAGS_SMP) 362 + ALT_UP(.long \ 363 + PMD_TYPE_SECT | \ 364 + PMD_SECT_AP_WRITE | \ 365 + PMD_SECT_AP_READ | \ 366 + PMD_FLAGS_UP) 362 367 .long PMD_TYPE_SECT | \ 363 368 PMD_SECT_XN | \ 364 369 PMD_SECT_AP_WRITE | \ ··· 387 380 __v7_proc_info: 388 381 .long 0x000f0000 @ Required ID value 389 382 .long 0x000f0000 @ Mask for ID 390 - .long PMD_TYPE_SECT | \ 383 + ALT_SMP(.long \ 384 + PMD_TYPE_SECT | \ 391 385 PMD_SECT_AP_WRITE | \ 392 386 PMD_SECT_AP_READ | \ 393 - PMD_FLAGS 387 + PMD_FLAGS_SMP) 388 + ALT_UP(.long \ 389 + PMD_TYPE_SECT | \ 390 + PMD_SECT_AP_WRITE | \ 391 + PMD_SECT_AP_READ | \ 392 + PMD_FLAGS_UP) 394 393 .long PMD_TYPE_SECT | \ 395 394 PMD_SECT_XN | \ 396 395 PMD_SECT_AP_WRITE | \
+12 -21
arch/arm/mm/tlb-v7.S
··· 13 13 */ 14 14 #include <linux/init.h> 15 15 #include <linux/linkage.h> 16 + #include <asm/assembler.h> 16 17 #include <asm/asm-offsets.h> 17 18 #include <asm/page.h> 18 19 #include <asm/tlbflush.h> ··· 42 41 orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA 43 42 mov r1, r1, lsl #PAGE_SHIFT 44 43 1: 45 - #ifdef CONFIG_SMP 46 - mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) 47 - #else 48 - mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA 49 - #endif 44 + ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable) 45 + ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA 46 + 50 47 add r0, r0, #PAGE_SZ 51 48 cmp r0, r1 52 49 blo 1b 53 50 mov ip, #0 54 - #ifdef CONFIG_SMP 55 - mcr p15, 0, ip, c7, c1, 6 @ flush BTAC/BTB Inner Shareable 56 - #else 57 - mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB 58 - #endif 51 + ALT_SMP(mcr p15, 0, ip, c7, c1, 6) @ flush BTAC/BTB Inner Shareable 52 + ALT_UP(mcr p15, 0, ip, c7, c5, 6) @ flush BTAC/BTB 59 53 dsb 60 54 mov pc, lr 61 55 ENDPROC(v7wbi_flush_user_tlb_range) ··· 70 74 mov r0, r0, lsl #PAGE_SHIFT 71 75 mov r1, r1, lsl #PAGE_SHIFT 72 76 1: 73 - #ifdef CONFIG_SMP 74 - mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) 75 - #else 76 - mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA 77 - #endif 77 + ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable) 78 + ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA 78 79 add r0, r0, #PAGE_SZ 79 80 cmp r0, r1 80 81 blo 1b 81 82 mov r2, #0 82 - #ifdef CONFIG_SMP 83 - mcr p15, 0, r2, c7, c1, 6 @ flush BTAC/BTB Inner Shareable 84 - #else 85 - mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 86 - #endif 83 + ALT_SMP(mcr p15, 0, r2, c7, c1, 6) @ flush BTAC/BTB Inner Shareable 84 + ALT_UP(mcr p15, 0, r2, c7, c5, 6) @ flush BTAC/BTB 87 85 dsb 88 86 isb 89 87 mov pc, lr ··· 89 99 ENTRY(v7wbi_tlb_fns) 90 100 .long v7wbi_flush_user_tlb_range 91 101 .long v7wbi_flush_kern_tlb_range 92 - .long v7wbi_tlb_flags 102 + ALT_SMP(.long v7wbi_tlb_flags_smp) 103 + ALT_UP(.long v7wbi_tlb_flags_up) 93 104 .size v7wbi_tlb_fns, . - v7wbi_tlb_fns