Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
"A set of fixes for x86:

- Make the unwinder more robust when it encounters a NULL pointer
call, so the backtrace becomes more useful

- Fix the bogus ORC unwind table alignment

- Prevent kernel panic during kexec on HyperV caused by a cleared but
not disabled hypercall page.

- Remove the now pointless stacksize increase for KASAN_EXTRA, as
KASAN_EXTRA is gone.

- Remove unused variables from the x86 memory management code"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/hyperv: Fix kernel panic when kexec on HyperV
x86/mm: Remove unused variable 'old_pte'
x86/mm: Remove unused variable 'cpu'
Revert "x86_64: Increase stack size for KASAN_EXTRA"
x86/unwind: Add hardcoded ORC entry for NULL
x86/unwind: Handle NULL pointer calls better in frame unwinder
x86/unwind/orc: Fix ORC unwind table alignment

+55 -10
+7
arch/x86/hyperv/hv_init.c
··· 407 407 /* Reset our OS id */ 408 408 wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); 409 409 410 + /* 411 + * Reset hypercall page reference before reset the page, 412 + * let hypercall operations fail safely rather than 413 + * panic the kernel for using invalid hypercall page 414 + */ 415 + hv_hypercall_pg = NULL; 416 + 410 417 /* Reset the hypercall page */ 411 418 hypercall_msr.as_uint64 = 0; 412 419 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
-4
arch/x86/include/asm/page_64_types.h
··· 7 7 #endif 8 8 9 9 #ifdef CONFIG_KASAN 10 - #ifdef CONFIG_KASAN_EXTRA 11 - #define KASAN_STACK_ORDER 2 12 - #else 13 10 #define KASAN_STACK_ORDER 1 14 - #endif 15 11 #else 16 12 #define KASAN_STACK_ORDER 0 17 13 #endif
+6
arch/x86/include/asm/unwind.h
··· 23 23 #elif defined(CONFIG_UNWINDER_FRAME_POINTER) 24 24 bool got_irq; 25 25 unsigned long *bp, *orig_sp, ip; 26 + /* 27 + * If non-NULL: The current frame is incomplete and doesn't contain a 28 + * valid BP. When looking for the next frame, use this instead of the 29 + * non-existent saved BP. 30 + */ 31 + unsigned long *next_bp; 26 32 struct pt_regs *regs; 27 33 #else 28 34 unsigned long *sp;
+22 -3
arch/x86/kernel/unwind_frame.c
··· 320 320 } 321 321 322 322 /* Get the next frame pointer: */ 323 - if (state->regs) 323 + if (state->next_bp) { 324 + next_bp = state->next_bp; 325 + state->next_bp = NULL; 326 + } else if (state->regs) { 324 327 next_bp = (unsigned long *)state->regs->bp; 325 - else 328 + } else { 326 329 next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task, *state->bp); 330 + } 327 331 328 332 /* Move to the next frame if it's safe: */ 329 333 if (!update_stack_state(state, next_bp)) ··· 402 398 403 399 bp = get_frame_pointer(task, regs); 404 400 401 + /* 402 + * If we crash with IP==0, the last successfully executed instruction 403 + * was probably an indirect function call with a NULL function pointer. 404 + * That means that SP points into the middle of an incomplete frame: 405 + * *SP is a return pointer, and *(SP-sizeof(unsigned long)) is where we 406 + * would have written a frame pointer if we hadn't crashed. 407 + * Pretend that the frame is complete and that BP points to it, but save 408 + * the real BP so that we can use it when looking for the next frame. 409 + */ 410 + if (regs && regs->ip == 0 && 411 + (unsigned long *)kernel_stack_pointer(regs) >= first_frame) { 412 + state->next_bp = bp; 413 + bp = ((unsigned long *)kernel_stack_pointer(regs)) - 1; 414 + } 415 + 405 416 /* Initialize stack info and make sure the frame data is accessible: */ 406 417 get_stack_info(bp, state->task, &state->stack_info, 407 418 &state->stack_mask); ··· 429 410 */ 430 411 while (!unwind_done(state) && 431 412 (!on_stack(&state->stack_info, first_frame, sizeof(long)) || 432 - state->bp < first_frame)) 413 + (state->next_bp == NULL && state->bp < first_frame))) 433 414 unwind_next_frame(state); 434 415 } 435 416 EXPORT_SYMBOL_GPL(__unwind_start);
+17
arch/x86/kernel/unwind_orc.c
··· 113 113 } 114 114 #endif 115 115 116 + /* 117 + * If we crash with IP==0, the last successfully executed instruction 118 + * was probably an indirect function call with a NULL function pointer, 119 + * and we don't have unwind information for NULL. 120 + * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function 121 + * pointer into its parent and then continue normally from there. 122 + */ 123 + static struct orc_entry null_orc_entry = { 124 + .sp_offset = sizeof(long), 125 + .sp_reg = ORC_REG_SP, 126 + .bp_reg = ORC_REG_UNDEFINED, 127 + .type = ORC_TYPE_CALL 128 + }; 129 + 116 130 static struct orc_entry *orc_find(unsigned long ip) 117 131 { 118 132 static struct orc_entry *orc; 119 133 120 134 if (!orc_init) 121 135 return NULL; 136 + 137 + if (ip == 0) 138 + return &null_orc_entry; 122 139 123 140 /* For non-init vmlinux addresses, use the fast lookup table: */ 124 141 if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
+2 -2
arch/x86/mm/pageattr.c
··· 738 738 { 739 739 unsigned long numpages, pmask, psize, lpaddr, pfn, old_pfn; 740 740 pgprot_t old_prot, new_prot, req_prot, chk_prot; 741 - pte_t new_pte, old_pte, *tmp; 741 + pte_t new_pte, *tmp; 742 742 enum pg_level level; 743 743 744 744 /* ··· 781 781 * Convert protection attributes to 4k-format, as cpa->mask* are set 782 782 * up accordingly. 783 783 */ 784 - old_pte = *kpte; 784 + 785 785 /* Clear PSE (aka _PAGE_PAT) and move PAT bit to correct position */ 786 786 req_prot = pgprot_large_2_4k(old_prot); 787 787
+1 -1
include/asm-generic/vmlinux.lds.h
··· 733 733 KEEP(*(.orc_unwind_ip)) \ 734 734 __stop_orc_unwind_ip = .; \ 735 735 } \ 736 - . = ALIGN(6); \ 736 + . = ALIGN(2); \ 737 737 .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \ 738 738 __start_orc_unwind = .; \ 739 739 KEEP(*(.orc_unwind)) \