Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
"This update contains:

- Hopefully the last ASM CLAC fixups

- A fix for the Quark family related to the IMR lock which makes
kexec work again

- A off-by-one fix in the MPX code. Ironic, isn't it?

- A fix for X86_PAE which addresses once more an unsigned long vs
phys_addr_t hickup"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/mpx: Fix off-by-one comparison with nr_registers
x86/mm: Fix slow_virt_to_phys() for X86_PAE again
x86/entry/compat: Add missing CLAC to entry_INT80_32
x86/entry/32: Add an ASM_CLAC to entry_SYSENTER_32
x86/platform/intel/quark: Change the kernel's IMR lock bit to false

Changed files
+15 -7
arch
x86
entry
mm
platform
intel-quark
+1
arch/x86/entry/entry_32.S
··· 294 294 pushl $__USER_DS /* pt_regs->ss */ 295 295 pushl %ebp /* pt_regs->sp (stashed in bp) */ 296 296 pushfl /* pt_regs->flags (except IF = 0) */ 297 + ASM_CLAC /* Clear AC after saving FLAGS */ 297 298 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ 298 299 pushl $__USER_CS /* pt_regs->cs */ 299 300 pushl $0 /* pt_regs->ip = 0 (placeholder) */
+1
arch/x86/entry/entry_64_compat.S
··· 261 261 * Interrupts are off on entry. 262 262 */ 263 263 PARAVIRT_ADJUST_EXCEPTION_FRAME 264 + ASM_CLAC /* Do this early to minimize exposure */ 264 265 SWAPGS 265 266 266 267 /*
+1 -1
arch/x86/mm/mpx.c
··· 123 123 break; 124 124 } 125 125 126 - if (regno > nr_registers) { 126 + if (regno >= nr_registers) { 127 127 WARN_ONCE(1, "decoded an instruction with an invalid register"); 128 128 return -EINVAL; 129 129 }
+10 -4
arch/x86/mm/pageattr.c
··· 419 419 phys_addr_t slow_virt_to_phys(void *__virt_addr) 420 420 { 421 421 unsigned long virt_addr = (unsigned long)__virt_addr; 422 - unsigned long phys_addr, offset; 422 + phys_addr_t phys_addr; 423 + unsigned long offset; 423 424 enum pg_level level; 424 425 pte_t *pte; 425 426 426 427 pte = lookup_address(virt_addr, &level); 427 428 BUG_ON(!pte); 428 429 430 + /* 431 + * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t 432 + * before being left-shifted PAGE_SHIFT bits -- this trick is to 433 + * make 32-PAE kernel work correctly. 434 + */ 429 435 switch (level) { 430 436 case PG_LEVEL_1G: 431 - phys_addr = pud_pfn(*(pud_t *)pte) << PAGE_SHIFT; 437 + phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT; 432 438 offset = virt_addr & ~PUD_PAGE_MASK; 433 439 break; 434 440 case PG_LEVEL_2M: 435 - phys_addr = pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT; 441 + phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT; 436 442 offset = virt_addr & ~PMD_PAGE_MASK; 437 443 break; 438 444 default: 439 - phys_addr = pte_pfn(*pte) << PAGE_SHIFT; 445 + phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; 440 446 offset = virt_addr & ~PAGE_MASK; 441 447 } 442 448
+2 -2
arch/x86/platform/intel-quark/imr.c
··· 592 592 end = (unsigned long)__end_rodata - 1; 593 593 594 594 /* 595 - * Setup a locked IMR around the physical extent of the kernel 595 + * Setup an unlocked IMR around the physical extent of the kernel 596 596 * from the beginning of the .text secton to the end of the 597 597 * .rodata section as one physically contiguous block. 598 598 * 599 599 * We don't round up @size since it is already PAGE_SIZE aligned. 600 600 * See vmlinux.lds.S for details. 601 601 */ 602 - ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true); 602 + ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false); 603 603 if (ret < 0) { 604 604 pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n", 605 605 size / 1024, start, end);