Merge branches 'pm-sleep' and 'pm-cpufreq'

* pm-sleep:
PM / hibernate: Restore processor state before using per-CPU variables
x86/power/64: Always create temporary identity mapping correctly

* pm-cpufreq:
cpufreq: powernv: Fix crash in gpstate_timer_handler()

+36 -14
+2 -2
arch/x86/include/asm/init.h
··· 5 void *(*alloc_pgt_page)(void *); /* allocate buf for page table */ 6 void *context; /* context for alloc_pgt_page */ 7 unsigned long pmd_flag; /* page flag for PMD entry */ 8 - bool kernel_mapping; /* kernel mapping or ident mapping */ 9 }; 10 11 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, 12 - unsigned long addr, unsigned long end); 13 14 #endif /* _ASM_X86_INIT_H */
··· 5 void *(*alloc_pgt_page)(void *); /* allocate buf for page table */ 6 void *context; /* context for alloc_pgt_page */ 7 unsigned long pmd_flag; /* page flag for PMD entry */ 8 + unsigned long offset; /* ident mapping offset */ 9 }; 10 11 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, 12 + unsigned long pstart, unsigned long pend); 13 14 #endif /* _ASM_X86_INIT_H */
+11 -8
arch/x86/mm/ident_map.c
··· 3 * included by both the compressed kernel and the regular kernel. 4 */ 5 6 - static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page, 7 unsigned long addr, unsigned long end) 8 { 9 addr &= PMD_MASK; 10 for (; addr < end; addr += PMD_SIZE) { 11 pmd_t *pmd = pmd_page + pmd_index(addr); 12 13 - if (!pmd_present(*pmd)) 14 - set_pmd(pmd, __pmd(addr | pmd_flag)); 15 } 16 } 17 ··· 32 33 if (pud_present(*pud)) { 34 pmd = pmd_offset(pud, 0); 35 - ident_pmd_init(info->pmd_flag, pmd, addr, next); 36 continue; 37 } 38 pmd = (pmd_t *)info->alloc_pgt_page(info->context); 39 if (!pmd) 40 return -ENOMEM; 41 - ident_pmd_init(info->pmd_flag, pmd, addr, next); 42 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); 43 } 44 ··· 46 } 47 48 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, 49 - unsigned long addr, unsigned long end) 50 { 51 unsigned long next; 52 int result; 53 - int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0; 54 55 for (; addr < end; addr = next) { 56 - pgd_t *pgd = pgd_page + pgd_index(addr) + off; 57 pud_t *pud; 58 59 next = (addr & PGDIR_MASK) + PGDIR_SIZE;
··· 3 * included by both the compressed kernel and the regular kernel. 4 */ 5 6 + static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page, 7 unsigned long addr, unsigned long end) 8 { 9 addr &= PMD_MASK; 10 for (; addr < end; addr += PMD_SIZE) { 11 pmd_t *pmd = pmd_page + pmd_index(addr); 12 13 + if (pmd_present(*pmd)) 14 + continue; 15 + 16 + set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag)); 17 } 18 } 19 ··· 30 31 if (pud_present(*pud)) { 32 pmd = pmd_offset(pud, 0); 33 + ident_pmd_init(info, pmd, addr, next); 34 continue; 35 } 36 pmd = (pmd_t *)info->alloc_pgt_page(info->context); 37 if (!pmd) 38 return -ENOMEM; 39 + ident_pmd_init(info, pmd, addr, next); 40 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); 41 } 42 ··· 44 } 45 46 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, 47 + unsigned long pstart, unsigned long pend) 48 { 49 + unsigned long addr = pstart + info->offset; 50 + unsigned long end = pend + info->offset; 51 unsigned long next; 52 int result; 53 54 for (; addr < end; addr = next) { 55 + pgd_t *pgd = pgd_page + pgd_index(addr); 56 pud_t *pud; 57 58 next = (addr & PGDIR_MASK) + PGDIR_SIZE;
+1 -1
arch/x86/power/hibernate_64.c
··· 87 struct x86_mapping_info info = { 88 .alloc_pgt_page = alloc_pgt_page, 89 .pmd_flag = __PAGE_KERNEL_LARGE_EXEC, 90 - .kernel_mapping = true, 91 }; 92 unsigned long mstart, mend; 93 pgd_t *pgd;
··· 87 struct x86_mapping_info info = { 88 .alloc_pgt_page = alloc_pgt_page, 89 .pmd_flag = __PAGE_KERNEL_LARGE_EXEC, 90 + .offset = __PAGE_OFFSET, 91 }; 92 unsigned long mstart, mend; 93 pgd_t *pgd;
+20 -1
drivers/cpufreq/powernv-cpufreq.c
··· 145 /* Use following macros for conversions between pstate_id and index */ 146 static inline int idx_to_pstate(unsigned int i) 147 { 148 return powernv_freqs[i].driver_data; 149 } 150 151 static inline unsigned int pstate_to_idx(int pstate) 152 { 153 /* 154 * abs() is deliberately used so that is works with 155 * both monotonically increasing and decreasing ··· 612 } else { 613 gpstate_idx = calc_global_pstate(gpstates->elapsed_time, 614 gpstates->highest_lpstate_idx, 615 - freq_data.pstate_id); 616 } 617 618 /*
··· 145 /* Use following macros for conversions between pstate_id and index */ 146 static inline int idx_to_pstate(unsigned int i) 147 { 148 + if (unlikely(i >= powernv_pstate_info.nr_pstates)) { 149 + pr_warn_once("index %u is out of bound\n", i); 150 + return powernv_freqs[powernv_pstate_info.nominal].driver_data; 151 + } 152 + 153 return powernv_freqs[i].driver_data; 154 } 155 156 static inline unsigned int pstate_to_idx(int pstate) 157 { 158 + int min = powernv_freqs[powernv_pstate_info.min].driver_data; 159 + int max = powernv_freqs[powernv_pstate_info.max].driver_data; 160 + 161 + if (min > 0) { 162 + if (unlikely((pstate < max) || (pstate > min))) { 163 + pr_warn_once("pstate %d is out of bound\n", pstate); 164 + return powernv_pstate_info.nominal; 165 + } 166 + } else { 167 + if (unlikely((pstate > max) || (pstate < min))) { 168 + pr_warn_once("pstate %d is out of bound\n", pstate); 169 + return powernv_pstate_info.nominal; 170 + } 171 + } 172 /* 173 * abs() is deliberately used so that is works with 174 * both monotonically increasing and decreasing ··· 593 } else { 594 gpstate_idx = calc_global_pstate(gpstates->elapsed_time, 595 gpstates->highest_lpstate_idx, 596 + gpstates->last_lpstate_idx); 597 } 598 599 /*
+2 -2
kernel/power/hibernate.c
··· 300 save_processor_state(); 301 trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true); 302 error = swsusp_arch_suspend(); 303 trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false); 304 if (error) 305 printk(KERN_ERR "PM: Error %d creating hibernation image\n", 306 error); 307 - /* Restore control flow magically appears here */ 308 - restore_processor_state(); 309 if (!in_suspend) 310 events_check_enabled = false; 311
··· 300 save_processor_state(); 301 trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true); 302 error = swsusp_arch_suspend(); 303 + /* Restore control flow magically appears here */ 304 + restore_processor_state(); 305 trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false); 306 if (error) 307 printk(KERN_ERR "PM: Error %d creating hibernation image\n", 308 error); 309 if (!in_suspend) 310 events_check_enabled = false; 311