Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: kernel: suspend/resume registers save/restore

Power management software requires the kernel to save and restore
CPU registers while going through suspend and resume operations
triggered by kernel subsystems like CPU idle and suspend to RAM.

This patch implements code that provides save and restore mechanism
for the arm v8 implementation. Memory for the context is passed as
parameter to both cpu_do_suspend and cpu_do_resume functions, and allows
the callers to implement context allocation as they deem fit.

The registers that are saved and restored correspond to the registers set
actually required by the kernel to be up and running which represents a
subset of v8 ISA.

Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>

+90
+3
arch/arm64/include/asm/proc-fns.h
··· 26 26 #include <asm/page.h> 27 27 28 28 struct mm_struct; 29 + struct cpu_suspend_ctx; 29 30 30 31 extern void cpu_cache_off(void); 31 32 extern void cpu_do_idle(void); 32 33 extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); 33 34 extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); 35 + extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr); 36 + extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); 34 37 35 38 #include <asm/memory.h> 36 39
+18
arch/arm64/include/asm/suspend.h
··· 1 + #ifndef __ASM_SUSPEND_H 2 + #define __ASM_SUSPEND_H 3 + 4 + #define NR_CTX_REGS 11 5 + 6 + /* 7 + * struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on 8 + * the stack, which must be 16-byte aligned on v8 9 + */ 10 + struct cpu_suspend_ctx { 11 + /* 12 + * This struct must be kept in sync with 13 + * cpu_do_{suspend/resume} in mm/proc.S 14 + */ 15 + u64 ctx_regs[NR_CTX_REGS]; 16 + u64 sp; 17 + } __aligned(16); 18 + #endif
+69
arch/arm64/mm/proc.S
··· 80 80 ret 81 81 ENDPROC(cpu_do_idle) 82 82 83 + #ifdef CONFIG_ARM64_CPU_SUSPEND 84 + /** 85 + * cpu_do_suspend - save CPU registers context 86 + * 87 + * x0: virtual address of context pointer 88 + */ 89 + ENTRY(cpu_do_suspend) 90 + mrs x2, tpidr_el0 91 + mrs x3, tpidrro_el0 92 + mrs x4, contextidr_el1 93 + mrs x5, mair_el1 94 + mrs x6, cpacr_el1 95 + mrs x7, ttbr1_el1 96 + mrs x8, tcr_el1 97 + mrs x9, vbar_el1 98 + mrs x10, mdscr_el1 99 + mrs x11, oslsr_el1 100 + mrs x12, sctlr_el1 101 + stp x2, x3, [x0] 102 + stp x4, x5, [x0, #16] 103 + stp x6, x7, [x0, #32] 104 + stp x8, x9, [x0, #48] 105 + stp x10, x11, [x0, #64] 106 + str x12, [x0, #80] 107 + ret 108 + ENDPROC(cpu_do_suspend) 109 + 110 + /** 111 + * cpu_do_resume - restore CPU register context 112 + * 113 + * x0: Physical address of context pointer 114 + * x1: ttbr0_el1 to be restored 115 + * 116 + * Returns: 117 + * sctlr_el1 value in x0 118 + */ 119 + ENTRY(cpu_do_resume) 120 + /* 121 + * Invalidate local tlb entries before turning on MMU 122 + */ 123 + tlbi vmalle1 124 + ldp x2, x3, [x0] 125 + ldp x4, x5, [x0, #16] 126 + ldp x6, x7, [x0, #32] 127 + ldp x8, x9, [x0, #48] 128 + ldp x10, x11, [x0, #64] 129 + ldr x12, [x0, #80] 130 + msr tpidr_el0, x2 131 + msr tpidrro_el0, x3 132 + msr contextidr_el1, x4 133 + msr mair_el1, x5 134 + msr cpacr_el1, x6 135 + msr ttbr0_el1, x1 136 + msr ttbr1_el1, x7 137 + msr tcr_el1, x8 138 + msr vbar_el1, x9 139 + msr mdscr_el1, x10 140 + /* 141 + * Restore oslsr_el1 by writing oslar_el1 142 + */ 143 + ubfx x11, x11, #1, #1 144 + msr oslar_el1, x11 145 + mov x0, x12 146 + dsb nsh // Make sure local tlb invalidation completed 147 + isb 148 + ret 149 + ENDPROC(cpu_do_resume) 150 + #endif 151 + 83 152 /* 84 153 * cpu_switch_mm(pgd_phys, tsk) 85 154 *