Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86-32, hibernate: Set up temporary text mapping for 32bit system

Set up the temporary text mapping for the final jump address
so that the system could jump to the right address after all
the pages have been copied back to their original address -
otherwise the final mapping for the jump address is invalid.

Analogous changes were made for 64-bit in commit 65c0554b73c9
(x86/power/64: Fix kernel text mapping corruption during image
restoration).

Signed-off-by: Zhimin Gu <kookoo.gu@intel.com>
Acked-by: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Chen Yu <yu.c.chen@intel.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>

authored by

Zhimin Gu and committed by
Rafael J. Wysocki
5331d2c7 6bae499a

+34 -4
-4
arch/x86/power/hibernate.c
··· 157 157 if (max_size < sizeof(struct restore_data_record)) 158 158 return -EOVERFLOW; 159 159 rdr->magic = RESTORE_MAGIC; 160 - #ifdef CONFIG_X86_64 161 160 rdr->jump_address = (unsigned long)restore_registers; 162 161 rdr->jump_address_phys = __pa_symbol(restore_registers); 163 - #endif 164 162 165 163 /* 166 164 * The restore code fixes up CR3 and CR4 in the following sequence: ··· 196 198 return -EINVAL; 197 199 } 198 200 199 - #ifdef CONFIG_X86_64 200 201 restore_jump_address = rdr->jump_address; 201 202 jump_address_phys = rdr->jump_address_phys; 202 - #endif 203 203 restore_cr3 = rdr->cr3; 204 204 205 205 if (hibernation_e820_mismatch(rdr->e820_digest)) {
+31
arch/x86/power/hibernate_32.c
··· 143 143 #endif 144 144 } 145 145 146 + static int set_up_temporary_text_mapping(pgd_t *pgd_base) 147 + { 148 + pgd_t *pgd; 149 + pmd_t *pmd; 150 + pte_t *pte; 151 + 152 + pgd = pgd_base + pgd_index(restore_jump_address); 153 + 154 + pmd = resume_one_md_table_init(pgd); 155 + if (!pmd) 156 + return -ENOMEM; 157 + 158 + if (boot_cpu_has(X86_FEATURE_PSE)) { 159 + set_pmd(pmd + pmd_index(restore_jump_address), 160 + __pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC))); 161 + } else { 162 + pte = resume_one_page_table_init(pmd); 163 + if (!pte) 164 + return -ENOMEM; 165 + set_pte(pte + pte_index(restore_jump_address), 166 + __pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC))); 167 + } 168 + 169 + return 0; 170 + } 171 + 146 172 asmlinkage int swsusp_arch_resume(void) 147 173 { 148 174 int error; ··· 178 152 return -ENOMEM; 179 153 180 154 resume_init_first_level_page_table(resume_pg_dir); 155 + 156 + error = set_up_temporary_text_mapping(resume_pg_dir); 157 + if (error) 158 + return error; 159 + 181 160 error = resume_physical_mapping_init(resume_pg_dir); 182 161 if (error) 183 162 return error;
+3
arch/x86/power/hibernate_asm_32.S
··· 36 36 ENDPROC(swsusp_arch_suspend) 37 37 38 38 ENTRY(restore_image) 39 + /* prepare to jump to the image kernel */ 40 + movl restore_jump_address, %ebx 39 41 movl restore_cr3, %ebp 40 42 41 43 movl mmu_cr4_features, %ecx ··· 76 74 .p2align 4,,7 77 75 78 76 done: 77 + jmpl *%ebx 79 78 80 79 /* code below belongs to the image kernel */ 81 80 .align PAGE_SIZE