Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'kexec/idmap' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into devel-stable

+144 -93
+14
arch/arm/include/asm/idmap.h
··· 1 + #ifndef __ASM_IDMAP_H 2 + #define __ASM_IDMAP_H 3 + 4 + #include <linux/compiler.h> 5 + #include <asm/pgtable.h> 6 + 7 + /* Tag a function as requiring to be executed via an identity mapping. */ 8 + #define __idmap __section(.idmap.text) noinline notrace 9 + 10 + extern pgd_t *idmap_pgd; 11 + 12 + void setup_mm_for_reboot(void); 13 + 14 + #endif /* __ASM_IDMAP_H */
-3
arch/arm/include/asm/pgtable.h
··· 347 347 348 348 #define pgtable_cache_init() do { } while (0) 349 349 350 - void identity_mapping_add(pgd_t *, unsigned long, unsigned long); 351 - void identity_mapping_del(pgd_t *, unsigned long, unsigned long); 352 - 353 350 #endif /* !__ASSEMBLY__ */ 354 351 355 352 #endif /* CONFIG_MMU */
+10 -8
arch/arm/kernel/head.S
··· 170 170 * Create identity mapping to cater for __enable_mmu. 171 171 * This identity mapping will be removed by paging_init(). 172 172 */ 173 - adr r0, __enable_mmu_loc 173 + adr r0, __turn_mmu_on_loc 174 174 ldmia r0, {r3, r5, r6} 175 175 sub r0, r0, r3 @ virt->phys offset 176 - add r5, r5, r0 @ phys __enable_mmu 177 - add r6, r6, r0 @ phys __enable_mmu_end 176 + add r5, r5, r0 @ phys __turn_mmu_on 177 + add r6, r6, r0 @ phys __turn_mmu_on_end 178 178 mov r5, r5, lsr #SECTION_SHIFT 179 179 mov r6, r6, lsr #SECTION_SHIFT 180 180 ··· 287 287 ENDPROC(__create_page_tables) 288 288 .ltorg 289 289 .align 290 - __enable_mmu_loc: 290 + __turn_mmu_on_loc: 291 291 .long . 292 - .long __enable_mmu 293 - .long __enable_mmu_end 292 + .long __turn_mmu_on 293 + .long __turn_mmu_on_end 294 294 295 295 #if defined(CONFIG_SMP) 296 296 __CPUINIT ··· 398 398 * other registers depend on the function called upon completion 399 399 */ 400 400 .align 5 401 - __turn_mmu_on: 401 + .pushsection .idmap.text, "ax" 402 + ENTRY(__turn_mmu_on) 402 403 mov r0, r0 403 404 mcr p15, 0, r0, c1, c0, 0 @ write control reg 404 405 mrc p15, 0, r3, c0, c0, 0 @ read id reg 405 406 mov r3, r3 406 407 mov r3, r13 407 408 mov pc, r3 408 - __enable_mmu_end: 409 + __turn_mmu_on_end: 409 410 ENDPROC(__turn_mmu_on) 411 + .popsection 410 412 411 413 412 414 #ifdef CONFIG_SMP_ON_UP
+2
arch/arm/kernel/sleep.S
··· 54 54 * r0 = control register value 55 55 */ 56 56 .align 5 57 + .pushsection .idmap.text,"ax" 57 58 ENTRY(cpu_resume_mmu) 58 59 ldr r3, =cpu_resume_after_mmu 59 60 mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc ··· 63 62 mov r0, r0 64 63 mov pc, r3 @ jump to virtual address 65 64 ENDPROC(cpu_resume_mmu) 65 + .popsection 66 66 cpu_resume_after_mmu: 67 67 bl cpu_init @ restore the und/abt/irq banked regs 68 68 mov r0, #0 @ return zero on success
+2 -30
arch/arm/kernel/smp.c
··· 31 31 #include <asm/cpu.h> 32 32 #include <asm/cputype.h> 33 33 #include <asm/exception.h> 34 + #include <asm/idmap.h> 34 35 #include <asm/topology.h> 35 36 #include <asm/mmu_context.h> 36 37 #include <asm/pgtable.h> ··· 62 61 { 63 62 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); 64 63 struct task_struct *idle = ci->idle; 65 - pgd_t *pgd; 66 64 int ret; 67 65 68 66 /* ··· 84 84 } 85 85 86 86 /* 87 - * Allocate initial page tables to allow the new CPU to 88 - * enable the MMU safely. This essentially means a set 89 - * of our "standard" page tables, with the addition of 90 - * a 1:1 mapping for the physical address of the kernel. 91 - */ 92 - pgd = pgd_alloc(&init_mm); 93 - if (!pgd) 94 - return -ENOMEM; 95 - 96 - if (PHYS_OFFSET != PAGE_OFFSET) { 97 - #ifndef CONFIG_HOTPLUG_CPU 98 - identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end)); 99 - #endif 100 - identity_mapping_add(pgd, __pa(_stext), __pa(_etext)); 101 - identity_mapping_add(pgd, __pa(_sdata), __pa(_edata)); 102 - } 103 - 104 - /* 105 87 * We need to tell the secondary core where to find 106 88 * its stack and the page tables. 107 89 */ 108 90 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; 109 - secondary_data.pgdir = virt_to_phys(pgd); 91 + secondary_data.pgdir = virt_to_phys(idmap_pgd); 110 92 secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir); 111 93 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); 112 94 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); ··· 123 141 124 142 secondary_data.stack = NULL; 125 143 secondary_data.pgdir = 0; 126 - 127 - if (PHYS_OFFSET != PAGE_OFFSET) { 128 - #ifndef CONFIG_HOTPLUG_CPU 129 - identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end)); 130 - #endif 131 - identity_mapping_del(pgd, __pa(_stext), __pa(_etext)); 132 - identity_mapping_del(pgd, __pa(_sdata), __pa(_edata)); 133 - } 134 - 135 - pgd_free(&init_mm, pgd); 136 144 137 145 return ret; 138 146 }
+3 -15
arch/arm/kernel/suspend.c
··· 1 1 #include <linux/init.h> 2 2 3 + #include <asm/idmap.h> 3 4 #include <asm/pgalloc.h> 4 5 #include <asm/pgtable.h> 5 6 #include <asm/memory.h> 6 7 #include <asm/suspend.h> 7 8 #include <asm/tlbflush.h> 8 - 9 - static pgd_t *suspend_pgd; 10 9 11 10 extern int __cpu_suspend(unsigned long, int (*)(unsigned long)); 12 11 extern void cpu_resume_mmu(void); ··· 20 21 *save_ptr = virt_to_phys(ptr); 21 22 22 23 /* This must correspond to the LDM in cpu_resume() assembly */ 23 - *ptr++ = virt_to_phys(suspend_pgd); 24 + *ptr++ = virt_to_phys(idmap_pgd); 24 25 *ptr++ = sp; 25 26 *ptr++ = virt_to_phys(cpu_do_resume); 26 27 ··· 41 42 struct mm_struct *mm = current->active_mm; 42 43 int ret; 43 44 44 - if (!suspend_pgd) 45 + if (!idmap_pgd) 45 46 return -EINVAL; 46 47 47 48 /* ··· 58 59 59 60 return ret; 60 61 } 61 - 62 - static int __init cpu_suspend_init(void) 63 - { 64 - suspend_pgd = pgd_alloc(&init_mm); 65 - if (suspend_pgd) { 66 - unsigned long addr = virt_to_phys(cpu_resume_mmu); 67 - identity_mapping_add(suspend_pgd, addr, addr + SECTION_SIZE); 68 - } 69 - return suspend_pgd ? 0 : -ENOMEM; 70 - } 71 - core_initcall(cpu_suspend_init);
+7
arch/arm/kernel/vmlinux.lds.S
··· 13 13 *(.proc.info.init) \ 14 14 VMLINUX_SYMBOL(__proc_info_end) = .; 15 15 16 + #define IDMAP_TEXT \ 17 + ALIGN_FUNCTION(); \ 18 + VMLINUX_SYMBOL(__idmap_text_start) = .; \ 19 + *(.idmap.text) \ 20 + VMLINUX_SYMBOL(__idmap_text_end) = .; 21 + 16 22 #ifdef CONFIG_HOTPLUG_CPU 17 23 #define ARM_CPU_DISCARD(x) 18 24 #define ARM_CPU_KEEP(x) x ··· 98 92 SCHED_TEXT 99 93 LOCK_TEXT 100 94 KPROBES_TEXT 95 + IDMAP_TEXT 101 96 #ifdef CONFIG_MMU 102 97 *(.fixup) 103 98 #endif
+34 -37
arch/arm/mm/idmap.c
··· 1 1 #include <linux/kernel.h> 2 2 3 3 #include <asm/cputype.h> 4 + #include <asm/idmap.h> 4 5 #include <asm/pgalloc.h> 5 6 #include <asm/pgtable.h> 7 + #include <asm/sections.h> 8 + 9 + pgd_t *idmap_pgd; 6 10 7 11 static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, 8 12 unsigned long prot) ··· 32 28 } while (pud++, addr = next, addr != end); 33 29 } 34 30 35 - void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) 31 + static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) 36 32 { 37 33 unsigned long prot, next; 38 34 ··· 47 43 } while (pgd++, addr = next, addr != end); 48 44 } 49 45 50 - #ifdef CONFIG_SMP 51 - static void idmap_del_pmd(pud_t *pud, unsigned long addr, unsigned long end) 46 + extern char __idmap_text_start[], __idmap_text_end[]; 47 + 48 + static int __init init_static_idmap(void) 52 49 { 53 - pmd_t *pmd = pmd_offset(pud, addr); 54 - pmd_clear(pmd); 50 + phys_addr_t idmap_start, idmap_end; 51 + 52 + idmap_pgd = pgd_alloc(&init_mm); 53 + if (!idmap_pgd) 54 + return -ENOMEM; 55 + 56 + /* Add an identity mapping for the physical address of the section. */ 57 + idmap_start = virt_to_phys((void *)__idmap_text_start); 58 + idmap_end = virt_to_phys((void *)__idmap_text_end); 59 + 60 + pr_info("Setting up static identity map for 0x%llx - 0x%llx\n", 61 + (long long)idmap_start, (long long)idmap_end); 62 + identity_mapping_add(idmap_pgd, idmap_start, idmap_end); 63 + 64 + return 0; 55 65 } 56 - 57 - static void idmap_del_pud(pgd_t *pgd, unsigned long addr, unsigned long end) 58 - { 59 - pud_t *pud = pud_offset(pgd, addr); 60 - unsigned long next; 61 - 62 - do { 63 - next = pud_addr_end(addr, end); 64 - idmap_del_pmd(pud, addr, next); 65 - } while (pud++, addr = next, addr != end); 66 - } 67 - 68 - void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end) 69 - { 70 - unsigned long next; 71 - 72 - pgd += pgd_index(addr); 73 - do { 74 - next = pgd_addr_end(addr, end); 75 - idmap_del_pud(pgd, addr, next); 76 - } while (pgd++, addr = next, addr != end); 77 - } 78 - #endif 66 + early_initcall(init_static_idmap); 79 67 80 68 /* 81 - * In order to soft-boot, we need to insert a 1:1 mapping in place of 82 - * the user-mode pages. This will then ensure that we have predictable 83 - * results when turning the mmu off 69 + * In order to soft-boot, we need to switch to a 1:1 mapping for the 70 + * cpu_reset functions. This will then ensure that we have predictable 71 + * results when turning off the mmu. 84 72 */ 85 73 void setup_mm_for_reboot(void) 86 74 { 87 - /* 88 - * We need to access to user-mode page tables here. For kernel threads 89 - * we don't have any user-mode mappings so we use the context that we 90 - * "borrowed". 91 - */ 92 - identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE); 75 + /* Clean and invalidate L1. */ 76 + flush_cache_all(); 77 + 78 + /* Switch to the identity mapping. */ 79 + cpu_switch_mm(idmap_pgd, &init_mm); 80 + 81 + /* Flush the TLB. */ 93 82 local_flush_tlb_all(); 94 83 }
+3
arch/arm/mm/proc-arm1020.S
··· 95 95 * loc: location to jump to for soft reset 96 96 */ 97 97 .align 5 98 + .pushsection .idmap.text, "ax" 98 99 ENTRY(cpu_arm1020_reset) 99 100 mov ip, #0 100 101 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches ··· 108 107 bic ip, ip, #0x1100 @ ...i...s........ 109 108 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 110 109 mov pc, r0 110 + ENDPROC(cpu_arm1020_reset) 111 + .popsection 111 112 112 113 /* 113 114 * cpu_arm1020_do_idle()
+3
arch/arm/mm/proc-arm1020e.S
··· 95 95 * loc: location to jump to for soft reset 96 96 */ 97 97 .align 5 98 + .pushsection .idmap.text, "ax" 98 99 ENTRY(cpu_arm1020e_reset) 99 100 mov ip, #0 100 101 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches ··· 108 107 bic ip, ip, #0x1100 @ ...i...s........ 109 108 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 110 109 mov pc, r0 110 + ENDPROC(cpu_arm1020e_reset) 111 + .popsection 111 112 112 113 /* 113 114 * cpu_arm1020e_do_idle()
+3
arch/arm/mm/proc-arm1022.S
··· 84 84 * loc: location to jump to for soft reset 85 85 */ 86 86 .align 5 87 + .pushsection .idmap.text, "ax" 87 88 ENTRY(cpu_arm1022_reset) 88 89 mov ip, #0 89 90 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches ··· 97 96 bic ip, ip, #0x1100 @ ...i...s........ 98 97 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 99 98 mov pc, r0 99 + ENDPROC(cpu_arm1022_reset) 100 + .popsection 100 101 101 102 /* 102 103 * cpu_arm1022_do_idle()
+3
arch/arm/mm/proc-arm1026.S
··· 84 84 * loc: location to jump to for soft reset 85 85 */ 86 86 .align 5 87 + .pushsection .idmap.text, "ax" 87 88 ENTRY(cpu_arm1026_reset) 88 89 mov ip, #0 89 90 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches ··· 97 96 bic ip, ip, #0x1100 @ ...i...s........ 98 97 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 99 98 mov pc, r0 99 + ENDPROC(cpu_arm1026_reset) 100 + .popsection 100 101 101 102 /* 102 103 * cpu_arm1026_do_idle()
+4
arch/arm/mm/proc-arm6_7.S
··· 225 225 * Params : r0 = address to jump to 226 226 * Notes : This sets up everything for a reset 227 227 */ 228 + .pushsection .idmap.text, "ax" 228 229 ENTRY(cpu_arm6_reset) 229 230 ENTRY(cpu_arm7_reset) 230 231 mov r1, #0 ··· 236 235 mov r1, #0x30 237 236 mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc 238 237 mov pc, r0 238 + ENDPROC(cpu_arm6_reset) 239 + ENDPROC(cpu_arm7_reset) 240 + .popsection 239 241 240 242 __CPUINIT 241 243
+3
arch/arm/mm/proc-arm720.S
··· 101 101 * Params : r0 = address to jump to 102 102 * Notes : This sets up everything for a reset 103 103 */ 104 + .pushsection .idmap.text, "ax" 104 105 ENTRY(cpu_arm720_reset) 105 106 mov ip, #0 106 107 mcr p15, 0, ip, c7, c7, 0 @ invalidate cache ··· 113 112 bic ip, ip, #0x2100 @ ..v....s........ 114 113 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 115 114 mov pc, r0 115 + ENDPROC(cpu_arm720_reset) 116 + .popsection 116 117 117 118 __CPUINIT 118 119
+3
arch/arm/mm/proc-arm740.S
··· 49 49 * Params : r0 = address to jump to 50 50 * Notes : This sets up everything for a reset 51 51 */ 52 + .pushsection .idmap.text, "ax" 52 53 ENTRY(cpu_arm740_reset) 53 54 mov ip, #0 54 55 mcr p15, 0, ip, c7, c0, 0 @ invalidate cache ··· 57 56 bic ip, ip, #0x0000000c @ ............wc.. 58 57 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 59 58 mov pc, r0 59 + ENDPROC(cpu_arm740_reset) 60 + .popsection 60 61 61 62 __CPUINIT 62 63
+3
arch/arm/mm/proc-arm7tdmi.S
··· 45 45 * Params : loc(r0) address to jump to 46 46 * Purpose : Sets up everything for a reset and jump to the location for soft reset. 47 47 */ 48 + .pushsection .idmap.text, "ax" 48 49 ENTRY(cpu_arm7tdmi_reset) 49 50 mov pc, r0 51 + ENDPROC(cpu_arm7tdmi_reset) 52 + .popsection 50 53 51 54 __CPUINIT 52 55
+3
arch/arm/mm/proc-arm920.S
··· 85 85 * loc: location to jump to for soft reset 86 86 */ 87 87 .align 5 88 + .pushsection .idmap.text, "ax" 88 89 ENTRY(cpu_arm920_reset) 89 90 mov ip, #0 90 91 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches ··· 98 97 bic ip, ip, #0x1100 @ ...i...s........ 99 98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 100 99 mov pc, r0 100 + ENDPROC(cpu_arm920_reset) 101 + .popsection 101 102 102 103 /* 103 104 * cpu_arm920_do_idle()
+3
arch/arm/mm/proc-arm922.S
··· 87 87 * loc: location to jump to for soft reset 88 88 */ 89 89 .align 5 90 + .pushsection .idmap.text, "ax" 90 91 ENTRY(cpu_arm922_reset) 91 92 mov ip, #0 92 93 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches ··· 100 99 bic ip, ip, #0x1100 @ ...i...s........ 101 100 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 102 101 mov pc, r0 102 + ENDPROC(cpu_arm922_reset) 103 + .popsection 103 104 104 105 /* 105 106 * cpu_arm922_do_idle()
+3
arch/arm/mm/proc-arm925.S
··· 108 108 * loc: location to jump to for soft reset 109 109 */ 110 110 .align 5 111 + .pushsection .idmap.text, "ax" 111 112 ENTRY(cpu_arm925_reset) 112 113 /* Send software reset to MPU and DSP */ 113 114 mov ip, #0xff000000 ··· 116 115 orr ip, ip, #0x0000ce00 117 116 mov r4, #1 118 117 strh r4, [ip, #0x10] 118 + ENDPROC(cpu_arm925_reset) 119 + .popsection 119 120 120 121 mov ip, #0 121 122 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
+3
arch/arm/mm/proc-arm926.S
··· 77 77 * loc: location to jump to for soft reset 78 78 */ 79 79 .align 5 80 + .pushsection .idmap.text, "ax" 80 81 ENTRY(cpu_arm926_reset) 81 82 mov ip, #0 82 83 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches ··· 90 89 bic ip, ip, #0x1100 @ ...i...s........ 91 90 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 92 91 mov pc, r0 92 + ENDPROC(cpu_arm926_reset) 93 + .popsection 93 94 94 95 /* 95 96 * cpu_arm926_do_idle()
+3
arch/arm/mm/proc-arm940.S
··· 48 48 * Params : r0 = address to jump to 49 49 * Notes : This sets up everything for a reset 50 50 */ 51 + .pushsection .idmap.text, "ax" 51 52 ENTRY(cpu_arm940_reset) 52 53 mov ip, #0 53 54 mcr p15, 0, ip, c7, c5, 0 @ flush I cache ··· 59 58 bic ip, ip, #0x00001000 @ i-cache 60 59 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 61 60 mov pc, r0 61 + ENDPROC(cpu_arm940_reset) 62 + .popsection 62 63 63 64 /* 64 65 * cpu_arm940_do_idle()
+3
arch/arm/mm/proc-arm946.S
··· 55 55 * Params : r0 = address to jump to 56 56 * Notes : This sets up everything for a reset 57 57 */ 58 + .pushsection .idmap.text, "ax" 58 59 ENTRY(cpu_arm946_reset) 59 60 mov ip, #0 60 61 mcr p15, 0, ip, c7, c5, 0 @ flush I cache ··· 66 65 bic ip, ip, #0x00001000 @ i-cache 67 66 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 68 67 mov pc, r0 68 + ENDPROC(cpu_arm946_reset) 69 + .popsection 69 70 70 71 /* 71 72 * cpu_arm946_do_idle()
+3
arch/arm/mm/proc-arm9tdmi.S
··· 45 45 * Params : loc(r0) address to jump to 46 46 * Purpose : Sets up everything for a reset and jump to the location for soft reset. 47 47 */ 48 + .pushsection .idmap.text, "ax" 48 49 ENTRY(cpu_arm9tdmi_reset) 49 50 mov pc, r0 51 + ENDPROC(cpu_arm9tdmi_reset) 52 + .popsection 50 53 51 54 __CPUINIT 52 55
+3
arch/arm/mm/proc-fa526.S
··· 57 57 * loc: location to jump to for soft reset 58 58 */ 59 59 .align 4 60 + .pushsection .idmap.text, "ax" 60 61 ENTRY(cpu_fa526_reset) 61 62 /* TODO: Use CP8 if possible... */ 62 63 mov ip, #0 ··· 74 73 nop 75 74 nop 76 75 mov pc, r0 76 + ENDPROC(cpu_fa526_reset) 77 + .popsection 77 78 78 79 /* 79 80 * cpu_fa526_do_idle()
+3
arch/arm/mm/proc-feroceon.S
··· 98 98 * loc: location to jump to for soft reset 99 99 */ 100 100 .align 5 101 + .pushsection .idmap.text, "ax" 101 102 ENTRY(cpu_feroceon_reset) 102 103 mov ip, #0 103 104 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches ··· 111 110 bic ip, ip, #0x1100 @ ...i...s........ 112 111 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 113 112 mov pc, r0 113 + ENDPROC(cpu_feroceon_reset) 114 + .popsection 114 115 115 116 /* 116 117 * cpu_feroceon_do_idle()
+3
arch/arm/mm/proc-mohawk.S
··· 69 69 * (same as arm926) 70 70 */ 71 71 .align 5 72 + .pushsection .idmap.text, "ax" 72 73 ENTRY(cpu_mohawk_reset) 73 74 mov ip, #0 74 75 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches ··· 80 79 bic ip, ip, #0x1100 @ ...i...s........ 81 80 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 82 81 mov pc, r0 82 + ENDPROC(cpu_mohawk_reset) 83 + .popsection 83 84 84 85 /* 85 86 * cpu_mohawk_do_idle()
+3
arch/arm/mm/proc-sa110.S
··· 62 62 * loc: location to jump to for soft reset 63 63 */ 64 64 .align 5 65 + .pushsection .idmap.text, "ax" 65 66 ENTRY(cpu_sa110_reset) 66 67 mov ip, #0 67 68 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches ··· 75 74 bic ip, ip, #0x1100 @ ...i...s........ 76 75 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 77 76 mov pc, r0 77 + ENDPROC(cpu_sa110_reset) 78 + .popsection 78 79 79 80 /* 80 81 * cpu_sa110_do_idle(type)
+3
arch/arm/mm/proc-sa1100.S
··· 70 70 * loc: location to jump to for soft reset 71 71 */ 72 72 .align 5 73 + .pushsection .idmap.text, "ax" 73 74 ENTRY(cpu_sa1100_reset) 74 75 mov ip, #0 75 76 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches ··· 83 82 bic ip, ip, #0x1100 @ ...i...s........ 84 83 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 85 84 mov pc, r0 85 + ENDPROC(cpu_sa1100_reset) 86 + .popsection 86 87 87 88 /* 88 89 * cpu_sa1100_do_idle(type)
+3
arch/arm/mm/proc-v6.S
··· 55 55 * - loc - location to jump to for soft reset 56 56 */ 57 57 .align 5 58 + .pushsection .idmap.text, "ax" 58 59 ENTRY(cpu_v6_reset) 59 60 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 60 61 bic r1, r1, #0x1 @ ...............m ··· 63 62 mov r1, #0 64 63 mcr p15, 0, r1, c7, c5, 4 @ ISB 65 64 mov pc, r0 65 + ENDPROC(cpu_v6_reset) 66 + .popsection 66 67 67 68 /* 68 69 * cpu_v6_do_idle()
+2
arch/arm/mm/proc-v7.S
··· 63 63 * caches disabled. 64 64 */ 65 65 .align 5 66 + .pushsection .idmap.text, "ax" 66 67 ENTRY(cpu_v7_reset) 67 68 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 68 69 bic r1, r1, #0x1 @ ...............m ··· 72 71 isb 73 72 mov pc, r0 74 73 ENDPROC(cpu_v7_reset) 74 + .popsection 75 75 76 76 /* 77 77 * cpu_v7_do_idle()
+3
arch/arm/mm/proc-xsc3.S
··· 105 105 * loc: location to jump to for soft reset 106 106 */ 107 107 .align 5 108 + .pushsection .idmap.text, "ax" 108 109 ENTRY(cpu_xsc3_reset) 109 110 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 110 111 msr cpsr_c, r1 @ reset CPSR ··· 120 119 @ already containing those two last instructions to survive. 121 120 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 122 121 mov pc, r0 122 + ENDPROC(cpu_xsc3_reset) 123 + .popsection 123 124 124 125 /* 125 126 * cpu_xsc3_do_idle()
+3
arch/arm/mm/proc-xscale.S
··· 142 142 * Beware PXA270 erratum E7. 143 143 */ 144 144 .align 5 145 + .pushsection .idmap.text, "ax" 145 146 ENTRY(cpu_xscale_reset) 146 147 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 147 148 msr cpsr_c, r1 @ reset CPSR ··· 161 160 @ already containing those two last instructions to survive. 162 161 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 163 162 mov pc, r0 163 + ENDPROC(cpu_xscale_reset) 164 + .popsection 164 165 165 166 /* 166 167 * cpu_xscale_do_idle()