Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[S390] hibernation: fix guest page hinting related crash

On resume the system that loads the to be resumed image might have
unstable pages.
When the resume image is copied back and a write access happen to an
unstable page this causes an exception and the system crashes.

To fix this set all free pages to stable before copying the resumed
image data. Also after everything has been restored set all free
pages of the resumed system to unstable again.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by

Heiko Carstens and committed by
Martin Schwidefsky
846955c8 2e50195f

+53 -10
+7
arch/s390/kernel/swsusp_asm64.S
··· 102 102 aghi %r15,-STACK_FRAME_OVERHEAD 103 103 stg %r1,__SF_BACKCHAIN(%r15) 104 104 105 + /* Make all free pages stable */ 106 + lghi %r2,1 107 + brasl %r14,arch_set_page_states 105 108 #ifdef CONFIG_SMP 106 109 /* Save boot cpu number */ 107 110 brasl %r14,smp_get_phys_cpu_id ··· 180 177 181 178 /* Activate DAT */ 182 179 stosm __SF_EMPTY(%r15),0x04 180 + 181 + /* Make all free pages unstable */ 182 + lghi %r2,0 183 + brasl %r14,arch_set_page_states 183 184 184 185 /* Return 0 */ 185 186 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
+46 -10
arch/s390/mm/page-states.c
··· 50 50 cmma_flag = 0; 51 51 } 52 52 53 - void arch_free_page(struct page *page, int order) 53 + static inline void set_page_unstable(struct page *page, int order) 54 54 { 55 55 int i, rc; 56 56 57 - if (!cmma_flag) 58 - return; 59 57 for (i = 0; i < (1 << order); i++) 60 58 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" 61 59 : "=&d" (rc) 62 - : "a" ((page_to_pfn(page) + i) << PAGE_SHIFT), 60 + : "a" (page_to_phys(page + i)), 63 61 "i" (ESSA_SET_UNUSED)); 62 + } 63 + 64 + void arch_free_page(struct page *page, int order) 65 + { 66 + if (!cmma_flag) 67 + return; 68 + set_page_unstable(page, order); 69 + } 70 + 71 + static inline void set_page_stable(struct page *page, int order) 72 + { 73 + int i, rc; 74 + 75 + for (i = 0; i < (1 << order); i++) 76 + asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" 77 + : "=&d" (rc) 78 + : "a" (page_to_phys(page + i)), 79 + "i" (ESSA_SET_STABLE)); 64 80 } 65 81 66 82 void arch_alloc_page(struct page *page, int order) 67 83 { 68 - int i, rc; 84 + if (!cmma_flag) 85 + return; 86 + set_page_stable(page, order); 87 + } 88 + 89 + void arch_set_page_states(int make_stable) 90 + { 91 + unsigned long flags, order, t; 92 + struct list_head *l; 93 + struct page *page; 94 + struct zone *zone; 69 95 70 96 if (!cmma_flag) 71 97 return; 72 - for (i = 0; i < (1 << order); i++) 73 - asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" 74 - : "=&d" (rc) 75 - : "a" ((page_to_pfn(page) + i) << PAGE_SHIFT), 76 - "i" (ESSA_SET_STABLE)); 98 + if (make_stable) 99 + drain_local_pages(NULL); 100 + for_each_populated_zone(zone) { 101 + spin_lock_irqsave(&zone->lock, flags); 102 + for_each_migratetype_order(order, t) { 103 + list_for_each(l, &zone->free_area[order].free_list[t]) { 104 + page = list_entry(l, struct page, lru); 105 + if (make_stable) 106 + set_page_stable(page, order); 107 + else 108 + set_page_unstable(page, order); 109 + } 110 + } 111 + spin_unlock_irqrestore(&zone->lock, flags); 112 + } 77 113 }