Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64/kexec: Add core kexec support

Add three new files, kexec.h, machine_kexec.c and relocate_kernel.S to the
arm64 architecture that add support for the kexec re-boot mechanism
(CONFIG_KEXEC) on arm64 platforms.

Signed-off-by: Geoff Levand <geoff@infradead.org>
Reviewed-by: James Morse <james.morse@arm.com>
[catalin.marinas@arm.com: removed dead code following James Morse's comments]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

authored by

Geoff Levand and committed by
Catalin Marinas
d28f6df1 f9076ecf

+361
+10
arch/arm64/Kconfig
··· 665 665 666 666 If in doubt, say N here. 667 667 668 + config KEXEC 669 + depends on PM_SLEEP_SMP 670 + select KEXEC_CORE 671 + bool "kexec system call" 672 + ---help--- 673 + kexec is a system call that implements the ability to shutdown your 674 + current kernel, and to start another kernel. It is like a reboot 675 + but it is independent of the system firmware. And like a reboot 676 + you can start any kernel with it, not just Linux. 677 + 668 678 config XEN_DOM0 669 679 def_bool y 670 680 depends on XEN
+48
arch/arm64/include/asm/kexec.h
··· 1 + /* 2 + * kexec for arm64 3 + * 4 + * Copyright (C) Linaro. 5 + * Copyright (C) Huawei Futurewei Technologies. 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + */ 11 + 12 + #ifndef _ARM64_KEXEC_H 13 + #define _ARM64_KEXEC_H 14 + 15 + /* Maximum physical address we can use pages from */ 16 + 17 + #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) 18 + 19 + /* Maximum address we can reach in physical address mode */ 20 + 21 + #define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) 22 + 23 + /* Maximum address we can use for the control code buffer */ 24 + 25 + #define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) 26 + 27 + #define KEXEC_CONTROL_PAGE_SIZE 4096 28 + 29 + #define KEXEC_ARCH KEXEC_ARCH_AARCH64 30 + 31 + #ifndef __ASSEMBLY__ 32 + 33 + /** 34 + * crash_setup_regs() - save registers for the panic kernel 35 + * 36 + * @newregs: registers are saved here 37 + * @oldregs: registers to be saved (may be %NULL) 38 + */ 39 + 40 + static inline void crash_setup_regs(struct pt_regs *newregs, 41 + struct pt_regs *oldregs) 42 + { 43 + /* Empty routine needed to avoid build errors. */ 44 + } 45 + 46 + #endif /* __ASSEMBLY__ */ 47 + 48 + #endif
+2
arch/arm64/kernel/Makefile
··· 46 46 arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o 47 47 arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o 48 48 arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o 49 + arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \ 50 + cpu-reset.o 49 51 50 52 obj-y += $(arm64-obj-y) vdso/ 51 53 obj-m += $(arm64-obj-m)
+170
arch/arm64/kernel/machine_kexec.c
··· 1 + /* 2 + * kexec for arm64 3 + * 4 + * Copyright (C) Linaro. 5 + * Copyright (C) Huawei Futurewei Technologies. 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + */ 11 + 12 + #include <linux/kexec.h> 13 + #include <linux/smp.h> 14 + 15 + #include <asm/cacheflush.h> 16 + #include <asm/cpu_ops.h> 17 + #include <asm/mmu_context.h> 18 + 19 + #include "cpu-reset.h" 20 + 21 + /* Global variables for the arm64_relocate_new_kernel routine. */ 22 + extern const unsigned char arm64_relocate_new_kernel[]; 23 + extern const unsigned long arm64_relocate_new_kernel_size; 24 + 25 + static unsigned long kimage_start; 26 + 27 + void machine_kexec_cleanup(struct kimage *kimage) 28 + { 29 + /* Empty routine needed to avoid build errors. */ 30 + } 31 + 32 + /** 33 + * machine_kexec_prepare - Prepare for a kexec reboot. 34 + * 35 + * Called from the core kexec code when a kernel image is loaded. 36 + * Forbid loading a kexec kernel if we have no way of hotplugging cpus or cpus 37 + * are stuck in the kernel. This avoids a panic once we hit machine_kexec(). 38 + */ 39 + int machine_kexec_prepare(struct kimage *kimage) 40 + { 41 + kimage_start = kimage->start; 42 + 43 + if (kimage->type != KEXEC_TYPE_CRASH && cpus_are_stuck_in_kernel()) { 44 + pr_err("Can't kexec: CPUs are stuck in the kernel.\n"); 45 + return -EBUSY; 46 + } 47 + 48 + return 0; 49 + } 50 + 51 + /** 52 + * kexec_list_flush - Helper to flush the kimage list and source pages to PoC. 53 + */ 54 + static void kexec_list_flush(struct kimage *kimage) 55 + { 56 + kimage_entry_t *entry; 57 + 58 + for (entry = &kimage->head; ; entry++) { 59 + unsigned int flag; 60 + void *addr; 61 + 62 + /* flush the list entries. */ 63 + __flush_dcache_area(entry, sizeof(kimage_entry_t)); 64 + 65 + flag = *entry & IND_FLAGS; 66 + if (flag == IND_DONE) 67 + break; 68 + 69 + addr = phys_to_virt(*entry & PAGE_MASK); 70 + 71 + switch (flag) { 72 + case IND_INDIRECTION: 73 + /* Set entry point just before the new list page. */ 74 + entry = (kimage_entry_t *)addr - 1; 75 + break; 76 + case IND_SOURCE: 77 + /* flush the source pages. */ 78 + __flush_dcache_area(addr, PAGE_SIZE); 79 + break; 80 + case IND_DESTINATION: 81 + break; 82 + default: 83 + BUG(); 84 + } 85 + } 86 + } 87 + 88 + /** 89 + * kexec_segment_flush - Helper to flush the kimage segments to PoC. 90 + */ 91 + static void kexec_segment_flush(const struct kimage *kimage) 92 + { 93 + unsigned long i; 94 + 95 + pr_debug("%s:\n", __func__); 96 + 97 + for (i = 0; i < kimage->nr_segments; i++) { 98 + pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n", 99 + i, 100 + kimage->segment[i].mem, 101 + kimage->segment[i].mem + kimage->segment[i].memsz, 102 + kimage->segment[i].memsz, 103 + kimage->segment[i].memsz / PAGE_SIZE); 104 + 105 + __flush_dcache_area(phys_to_virt(kimage->segment[i].mem), 106 + kimage->segment[i].memsz); 107 + } 108 + } 109 + 110 + /** 111 + * machine_kexec - Do the kexec reboot. 112 + * 113 + * Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC. 114 + */ 115 + void machine_kexec(struct kimage *kimage) 116 + { 117 + phys_addr_t reboot_code_buffer_phys; 118 + void *reboot_code_buffer; 119 + 120 + /* 121 + * New cpus may have become stuck_in_kernel after we loaded the image. 122 + */ 123 + BUG_ON(cpus_are_stuck_in_kernel() || (num_online_cpus() > 1)); 124 + 125 + reboot_code_buffer_phys = page_to_phys(kimage->control_code_page); 126 + reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys); 127 + 128 + /* 129 + * Copy arm64_relocate_new_kernel to the reboot_code_buffer for use 130 + * after the kernel is shut down. 131 + */ 132 + memcpy(reboot_code_buffer, arm64_relocate_new_kernel, 133 + arm64_relocate_new_kernel_size); 134 + 135 + /* Flush the reboot_code_buffer in preparation for its execution. */ 136 + __flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size); 137 + flush_icache_range((uintptr_t)reboot_code_buffer, 138 + arm64_relocate_new_kernel_size); 139 + 140 + /* Flush the kimage list and its buffers. */ 141 + kexec_list_flush(kimage); 142 + 143 + /* Flush the new image if already in place. */ 144 + if (kimage->head & IND_DONE) 145 + kexec_segment_flush(kimage); 146 + 147 + pr_info("Bye!\n"); 148 + 149 + /* Disable all DAIF exceptions. */ 150 + asm volatile ("msr daifset, #0xf" : : : "memory"); 151 + 152 + /* 153 + * cpu_soft_restart will shutdown the MMU, disable data caches, then 154 + * transfer control to the reboot_code_buffer which contains a copy of 155 + * the arm64_relocate_new_kernel routine. arm64_relocate_new_kernel 156 + * uses physical addressing to relocate the new image to its final 157 + * position and transfers control to the image entry point when the 158 + * relocation is complete. 159 + */ 160 + 161 + cpu_soft_restart(1, reboot_code_buffer_phys, kimage->head, 162 + kimage_start, 0); 163 + 164 + BUG(); /* Should never get here. */ 165 + } 166 + 167 + void machine_crash_shutdown(struct pt_regs *regs) 168 + { 169 + /* Empty routine needed to avoid build errors. */ 170 + }
+130
arch/arm64/kernel/relocate_kernel.S
··· 1 + /* 2 + * kexec for arm64 3 + * 4 + * Copyright (C) Linaro. 5 + * Copyright (C) Huawei Futurewei Technologies. 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + */ 11 + 12 + #include <linux/kexec.h> 13 + #include <linux/linkage.h> 14 + 15 + #include <asm/assembler.h> 16 + #include <asm/kexec.h> 17 + #include <asm/page.h> 18 + #include <asm/sysreg.h> 19 + 20 + /* 21 + * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it. 22 + * 23 + * The memory that the old kernel occupies may be overwritten when coping the 24 + * new image to its final location. To assure that the 25 + * arm64_relocate_new_kernel routine which does that copy is not overwritten, 26 + * all code and data needed by arm64_relocate_new_kernel must be between the 27 + * symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end. The 28 + * machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec 29 + * control_code_page, a special page which has been set up to be preserved 30 + * during the copy operation. 31 + */ 32 + ENTRY(arm64_relocate_new_kernel) 33 + 34 + /* Setup the list loop variables. */ 35 + mov x17, x1 /* x17 = kimage_start */ 36 + mov x16, x0 /* x16 = kimage_head */ 37 + dcache_line_size x15, x0 /* x15 = dcache line size */ 38 + mov x14, xzr /* x14 = entry ptr */ 39 + mov x13, xzr /* x13 = copy dest */ 40 + 41 + /* Clear the sctlr_el2 flags. */ 42 + mrs x0, CurrentEL 43 + cmp x0, #CurrentEL_EL2 44 + b.ne 1f 45 + mrs x0, sctlr_el2 46 + ldr x1, =SCTLR_ELx_FLAGS 47 + bic x0, x0, x1 48 + msr sctlr_el2, x0 49 + isb 50 + 1: 51 + 52 + /* Check if the new image needs relocation. */ 53 + tbnz x16, IND_DONE_BIT, .Ldone 54 + 55 + .Lloop: 56 + and x12, x16, PAGE_MASK /* x12 = addr */ 57 + 58 + /* Test the entry flags. */ 59 + .Ltest_source: 60 + tbz x16, IND_SOURCE_BIT, .Ltest_indirection 61 + 62 + /* Invalidate dest page to PoC. */ 63 + mov x0, x13 64 + add x20, x0, #PAGE_SIZE 65 + sub x1, x15, #1 66 + bic x0, x0, x1 67 + 2: dc ivac, x0 68 + add x0, x0, x15 69 + cmp x0, x20 70 + b.lo 2b 71 + dsb sy 72 + 73 + mov x20, x13 74 + mov x21, x12 75 + copy_page x20, x21, x0, x1, x2, x3, x4, x5, x6, x7 76 + 77 + /* dest += PAGE_SIZE */ 78 + add x13, x13, PAGE_SIZE 79 + b .Lnext 80 + 81 + .Ltest_indirection: 82 + tbz x16, IND_INDIRECTION_BIT, .Ltest_destination 83 + 84 + /* ptr = addr */ 85 + mov x14, x12 86 + b .Lnext 87 + 88 + .Ltest_destination: 89 + tbz x16, IND_DESTINATION_BIT, .Lnext 90 + 91 + /* dest = addr */ 92 + mov x13, x12 93 + 94 + .Lnext: 95 + /* entry = *ptr++ */ 96 + ldr x16, [x14], #8 97 + 98 + /* while (!(entry & DONE)) */ 99 + tbz x16, IND_DONE_BIT, .Lloop 100 + 101 + .Ldone: 102 + /* wait for writes from copy_page to finish */ 103 + dsb nsh 104 + ic iallu 105 + dsb nsh 106 + isb 107 + 108 + /* Start new image. */ 109 + mov x0, xzr 110 + mov x1, xzr 111 + mov x2, xzr 112 + mov x3, xzr 113 + br x17 114 + 115 + ENDPROC(arm64_relocate_new_kernel) 116 + 117 + .ltorg 118 + 119 + .align 3 /* To keep the 64-bit values below naturally aligned. */ 120 + 121 + .Lcopy_end: 122 + .org KEXEC_CONTROL_PAGE_SIZE 123 + 124 + /* 125 + * arm64_relocate_new_kernel_size - Number of bytes to copy to the 126 + * control_code_page. 127 + */ 128 + .globl arm64_relocate_new_kernel_size 129 + arm64_relocate_new_kernel_size: 130 + .quad .Lcopy_end - arm64_relocate_new_kernel
+1
include/uapi/linux/kexec.h
··· 39 39 #define KEXEC_ARCH_SH (42 << 16) 40 40 #define KEXEC_ARCH_MIPS_LE (10 << 16) 41 41 #define KEXEC_ARCH_MIPS ( 8 << 16) 42 + #define KEXEC_ARCH_AARCH64 (183 << 16) 42 43 43 44 /* The artificial cap on the number of segments passed to kexec_load. */ 44 45 #define KEXEC_SEGMENT_MAX 16