Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
"Another set of x86 related updates:

- Fix the long broken x32 version of the IPC user space headers which
was noticed by Arnd Bergman in course of his ongoing y2038 work.
GLIBC seems to have non broken private copies of these headers so
this went unnoticed.

- Two microcode fixlets which address some more fallout from the
recent modifications in that area:

- Unconditionally save the microcode patch, which was only saved
when CPU_HOTPLUG was enabled causing failures in the late
loading mechanism

- Make the later loader synchronization finally work under all
circumstances. It was exiting early and causing timeout failures
due to a missing synchronization point.

- Do not use mwait_play_dead() on AMD systems to prevent excessive
power consumption as the CPU cannot go into deep power states from
there.

- Address an annoying sparse warning due to lost type qualifiers of
the vmemmap and vmalloc base address constants.

- Prevent reserving crash kernel region on Xen PV as this leads to
the wrong perception that crash kernels actually work there which
is not the case. Xen PV has its own crash mechanism handled by the
hypervisor.

- Add missing TLB cpuid values to the table to make the printout on
certain machines correct.

- Enumerate the new CLDEMOTE instruction

- Fix an incorrect SPDX identifier

- Remove stale macros"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/ipc: Fix x32 version of shmid64_ds and msqid64_ds
x86/setup: Do not reserve a crash kernel region if booted on Xen PV
x86/cpu/intel: Add missing TLB cpuid values
x86/smpboot: Don't use mwait_play_dead() on AMD systems
x86/mm: Make vmemmap and vmalloc base address constants unsigned long
x86/vector: Remove the unused macro FPU_IRQ
x86/vector: Remove the macro VECTOR_OFFSET_START
x86/cpufeatures: Enumerate cldemote instruction
x86/microcode: Do not exit early from __reload_late()
x86/microcode/intel: Save microcode patch unconditionally
x86/jailhouse: Fix incorrect SPDX identifier

+1
arch/x86/include/asm/cpufeatures.h
··· 320 320 #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ 321 321 #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ 322 322 #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ 323 + #define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */ 323 324 324 325 /* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ 325 326 #define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
-7
arch/x86/include/asm/irq_vectors.h
··· 34 34 * (0x80 is the syscall vector, 0x30-0x3f are for ISA) 35 35 */ 36 36 #define FIRST_EXTERNAL_VECTOR 0x20 37 - /* 38 - * We start allocating at 0x21 to spread out vectors evenly between 39 - * priority levels. (0x80 is the syscall vector) 40 - */ 41 - #define VECTOR_OFFSET_START 1 42 37 43 38 /* 44 39 * Reserve the lowest usable vector (and hence lowest priority) 0x20 for ··· 113 118 #else 114 119 #define FIRST_SYSTEM_VECTOR NR_VECTORS 115 120 #endif 116 - 117 - #define FPU_IRQ 13 118 121 119 122 /* 120 123 * Size the maximum number of interrupts.
+1 -1
arch/x86/include/asm/jailhouse_para.h
··· 1 - /* SPDX-License-Identifier: GPL2.0 */ 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 2 3 3 /* 4 4 * Jailhouse paravirt detection
+4 -4
arch/x86/include/asm/pgtable_64_types.h
··· 105 105 #define LDT_PGD_ENTRY (pgtable_l5_enabled ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4) 106 106 #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) 107 107 108 - #define __VMALLOC_BASE_L4 0xffffc90000000000 109 - #define __VMALLOC_BASE_L5 0xffa0000000000000 108 + #define __VMALLOC_BASE_L4 0xffffc90000000000UL 109 + #define __VMALLOC_BASE_L5 0xffa0000000000000UL 110 110 111 111 #define VMALLOC_SIZE_TB_L4 32UL 112 112 #define VMALLOC_SIZE_TB_L5 12800UL 113 113 114 - #define __VMEMMAP_BASE_L4 0xffffea0000000000 115 - #define __VMEMMAP_BASE_L5 0xffd4000000000000 114 + #define __VMEMMAP_BASE_L4 0xffffea0000000000UL 115 + #define __VMEMMAP_BASE_L5 0xffd4000000000000UL 116 116 117 117 #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT 118 118 # define VMALLOC_START vmalloc_base
+31
arch/x86/include/uapi/asm/msgbuf.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 + #ifndef __ASM_X64_MSGBUF_H 3 + #define __ASM_X64_MSGBUF_H 4 + 5 + #if !defined(__x86_64__) || !defined(__ILP32__) 1 6 #include <asm-generic/msgbuf.h> 7 + #else 8 + /* 9 + * The msqid64_ds structure for x86 architecture with x32 ABI. 10 + * 11 + * On x86-32 and x86-64 we can just use the generic definition, but 12 + * x32 uses the same binary layout as x86_64, which is differnet 13 + * from other 32-bit architectures. 14 + */ 15 + 16 + struct msqid64_ds { 17 + struct ipc64_perm msg_perm; 18 + __kernel_time_t msg_stime; /* last msgsnd time */ 19 + __kernel_time_t msg_rtime; /* last msgrcv time */ 20 + __kernel_time_t msg_ctime; /* last change time */ 21 + __kernel_ulong_t msg_cbytes; /* current number of bytes on queue */ 22 + __kernel_ulong_t msg_qnum; /* number of messages in queue */ 23 + __kernel_ulong_t msg_qbytes; /* max number of bytes on queue */ 24 + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ 25 + __kernel_pid_t msg_lrpid; /* last receive pid */ 26 + __kernel_ulong_t __unused4; 27 + __kernel_ulong_t __unused5; 28 + }; 29 + 30 + #endif 31 + 32 + #endif /* __ASM_GENERIC_MSGBUF_H */
+42
arch/x86/include/uapi/asm/shmbuf.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 + #ifndef __ASM_X86_SHMBUF_H 3 + #define __ASM_X86_SHMBUF_H 4 + 5 + #if !defined(__x86_64__) || !defined(__ILP32__) 1 6 #include <asm-generic/shmbuf.h> 7 + #else 8 + /* 9 + * The shmid64_ds structure for x86 architecture with x32 ABI. 10 + * 11 + * On x86-32 and x86-64 we can just use the generic definition, but 12 + * x32 uses the same binary layout as x86_64, which is differnet 13 + * from other 32-bit architectures. 14 + */ 15 + 16 + struct shmid64_ds { 17 + struct ipc64_perm shm_perm; /* operation perms */ 18 + size_t shm_segsz; /* size of segment (bytes) */ 19 + __kernel_time_t shm_atime; /* last attach time */ 20 + __kernel_time_t shm_dtime; /* last detach time */ 21 + __kernel_time_t shm_ctime; /* last change time */ 22 + __kernel_pid_t shm_cpid; /* pid of creator */ 23 + __kernel_pid_t shm_lpid; /* pid of last operator */ 24 + __kernel_ulong_t shm_nattch; /* no. of current attaches */ 25 + __kernel_ulong_t __unused4; 26 + __kernel_ulong_t __unused5; 27 + }; 28 + 29 + struct shminfo64 { 30 + __kernel_ulong_t shmmax; 31 + __kernel_ulong_t shmmin; 32 + __kernel_ulong_t shmmni; 33 + __kernel_ulong_t shmseg; 34 + __kernel_ulong_t shmall; 35 + __kernel_ulong_t __unused1; 36 + __kernel_ulong_t __unused2; 37 + __kernel_ulong_t __unused3; 38 + __kernel_ulong_t __unused4; 39 + }; 40 + 41 + #endif 42 + 43 + #endif /* __ASM_X86_SHMBUF_H */
+3
arch/x86/kernel/cpu/intel.c
··· 835 835 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, 836 836 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" }, 837 837 { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" }, 838 + { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" }, 839 + { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" }, 840 + { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" }, 838 841 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, 839 842 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" }, 840 843 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
+2 -4
arch/x86/kernel/cpu/microcode/core.c
··· 564 564 apply_microcode_local(&err); 565 565 spin_unlock(&update_lock); 566 566 567 + /* siblings return UCODE_OK because their engine got updated already */ 567 568 if (err > UCODE_NFOUND) { 568 569 pr_warn("Error reloading microcode on CPU %d\n", cpu); 569 - return -1; 570 - /* siblings return UCODE_OK because their engine got updated already */ 570 + ret = -1; 571 571 } else if (err == UCODE_UPDATED || err == UCODE_OK) { 572 572 ret = 1; 573 - } else { 574 - return ret; 575 573 } 576 574 577 575 /*
-2
arch/x86/kernel/cpu/microcode/intel.c
··· 485 485 */ 486 486 static void save_mc_for_early(u8 *mc, unsigned int size) 487 487 { 488 - #ifdef CONFIG_HOTPLUG_CPU 489 488 /* Synchronization during CPU hotplug. */ 490 489 static DEFINE_MUTEX(x86_cpu_microcode_mutex); 491 490 ··· 494 495 show_saved_mc(); 495 496 496 497 mutex_unlock(&x86_cpu_microcode_mutex); 497 - #endif 498 498 } 499 499 500 500 static bool load_builtin_intel_microcode(struct cpio_data *cp)
+1 -1
arch/x86/kernel/jailhouse.c
··· 1 - // SPDX-License-Identifier: GPL2.0 1 + // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 3 * Jailhouse paravirt_ops implementation 4 4 *
+6
arch/x86/kernel/setup.c
··· 50 50 #include <linux/init_ohci1394_dma.h> 51 51 #include <linux/kvm_para.h> 52 52 #include <linux/dma-contiguous.h> 53 + #include <xen/xen.h> 53 54 54 55 #include <linux/errno.h> 55 56 #include <linux/kernel.h> ··· 533 532 if (ret != 0 || crash_size <= 0) 534 533 return; 535 534 high = true; 535 + } 536 + 537 + if (xen_pv_domain()) { 538 + pr_info("Ignoring crashkernel for a Xen PV domain\n"); 539 + return; 536 540 } 537 541 538 542 /* 0 means: find the address automatically */
+2
arch/x86/kernel/smpboot.c
··· 1571 1571 void *mwait_ptr; 1572 1572 int i; 1573 1573 1574 + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 1575 + return; 1574 1576 if (!this_cpu_has(X86_FEATURE_MWAIT)) 1575 1577 return; 1576 1578 if (!this_cpu_has(X86_FEATURE_CLFLUSH))