Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
"This update contains:

- Hopefully the last ASM CLAC fixups

- A fix for the Quark family related to the IMR lock which makes
kexec work again

- A off-by-one fix in the MPX code. Ironic, isn't it?

- A fix for X86_PAE which addresses once more an unsigned long vs
phys_addr_t hickup"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/mpx: Fix off-by-one comparison with nr_registers
x86/mm: Fix slow_virt_to_phys() for X86_PAE again
x86/entry/compat: Add missing CLAC to entry_INT80_32
x86/entry/32: Add an ASM_CLAC to entry_SYSENTER_32
x86/platform/intel/quark: Change the kernel's IMR lock bit to false

+15 -7
+1
arch/x86/entry/entry_32.S
··· 294 pushl $__USER_DS /* pt_regs->ss */ 295 pushl %ebp /* pt_regs->sp (stashed in bp) */ 296 pushfl /* pt_regs->flags (except IF = 0) */ 297 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ 298 pushl $__USER_CS /* pt_regs->cs */ 299 pushl $0 /* pt_regs->ip = 0 (placeholder) */
··· 294 pushl $__USER_DS /* pt_regs->ss */ 295 pushl %ebp /* pt_regs->sp (stashed in bp) */ 296 pushfl /* pt_regs->flags (except IF = 0) */ 297 + ASM_CLAC /* Clear AC after saving FLAGS */ 298 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ 299 pushl $__USER_CS /* pt_regs->cs */ 300 pushl $0 /* pt_regs->ip = 0 (placeholder) */
+1
arch/x86/entry/entry_64_compat.S
··· 261 * Interrupts are off on entry. 262 */ 263 PARAVIRT_ADJUST_EXCEPTION_FRAME 264 SWAPGS 265 266 /*
··· 261 * Interrupts are off on entry. 262 */ 263 PARAVIRT_ADJUST_EXCEPTION_FRAME 264 + ASM_CLAC /* Do this early to minimize exposure */ 265 SWAPGS 266 267 /*
+1 -1
arch/x86/mm/mpx.c
··· 123 break; 124 } 125 126 - if (regno > nr_registers) { 127 WARN_ONCE(1, "decoded an instruction with an invalid register"); 128 return -EINVAL; 129 }
··· 123 break; 124 } 125 126 + if (regno >= nr_registers) { 127 WARN_ONCE(1, "decoded an instruction with an invalid register"); 128 return -EINVAL; 129 }
+10 -4
arch/x86/mm/pageattr.c
··· 419 phys_addr_t slow_virt_to_phys(void *__virt_addr) 420 { 421 unsigned long virt_addr = (unsigned long)__virt_addr; 422 - unsigned long phys_addr, offset; 423 enum pg_level level; 424 pte_t *pte; 425 426 pte = lookup_address(virt_addr, &level); 427 BUG_ON(!pte); 428 429 switch (level) { 430 case PG_LEVEL_1G: 431 - phys_addr = pud_pfn(*(pud_t *)pte) << PAGE_SHIFT; 432 offset = virt_addr & ~PUD_PAGE_MASK; 433 break; 434 case PG_LEVEL_2M: 435 - phys_addr = pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT; 436 offset = virt_addr & ~PMD_PAGE_MASK; 437 break; 438 default: 439 - phys_addr = pte_pfn(*pte) << PAGE_SHIFT; 440 offset = virt_addr & ~PAGE_MASK; 441 } 442
··· 419 phys_addr_t slow_virt_to_phys(void *__virt_addr) 420 { 421 unsigned long virt_addr = (unsigned long)__virt_addr; 422 + phys_addr_t phys_addr; 423 + unsigned long offset; 424 enum pg_level level; 425 pte_t *pte; 426 427 pte = lookup_address(virt_addr, &level); 428 BUG_ON(!pte); 429 430 + /* 431 + * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t 432 + * before being left-shifted PAGE_SHIFT bits -- this trick is to 433 + * make 32-PAE kernel work correctly. 434 + */ 435 switch (level) { 436 case PG_LEVEL_1G: 437 + phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT; 438 offset = virt_addr & ~PUD_PAGE_MASK; 439 break; 440 case PG_LEVEL_2M: 441 + phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT; 442 offset = virt_addr & ~PMD_PAGE_MASK; 443 break; 444 default: 445 + phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; 446 offset = virt_addr & ~PAGE_MASK; 447 } 448
+2 -2
arch/x86/platform/intel-quark/imr.c
··· 592 end = (unsigned long)__end_rodata - 1; 593 594 /* 595 - * Setup a locked IMR around the physical extent of the kernel 596 * from the beginning of the .text secton to the end of the 597 * .rodata section as one physically contiguous block. 598 * 599 * We don't round up @size since it is already PAGE_SIZE aligned. 600 * See vmlinux.lds.S for details. 601 */ 602 - ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true); 603 if (ret < 0) { 604 pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n", 605 size / 1024, start, end);
··· 592 end = (unsigned long)__end_rodata - 1; 593 594 /* 595 + * Setup an unlocked IMR around the physical extent of the kernel 596 * from the beginning of the .text secton to the end of the 597 * .rodata section as one physically contiguous block. 598 * 599 * We don't round up @size since it is already PAGE_SIZE aligned. 600 * See vmlinux.lds.S for details. 601 */ 602 + ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false); 603 if (ret < 0) { 604 pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n", 605 size / 1024, start, end);