Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 apic updates from Thomas Gleixner:

- Cleanup the apic IPI implementation by removing duplicated code and
consolidating the functions into the APIC core.

- Implement a safe variant of the IPI broadcast mode. Contrary to
earlier attempts this uses the core tracking of which CPUs have been
brought online at least once so that a broadcast does not end up in
some dead end in BIOS/SMM code when the CPU is still waiting for
init. Once all CPUs have been brought up once, IPI broadcasting is
enabled. Before that regular one by one IPIs are issued.

- Drop the paravirt CR8 related functions as they have no user anymore

- Initialize the APIC TPR to block interrupt 16-31 as they are reserved
for CPU exceptions and should never be raised by any well behaving
device.

- Emit a warning when vector space exhaustion breaks the admin set
affinity of an interrupt.

- Make sure to use the NMI fallback when shutdown via reboot vector IPI
fails. The original code had conditions which prevent the code path
to be reached.

- Annotate various APIC config variables as RO after init.

[ The ipi broadcase change came in earlier through the cpu hotplug
branch, but I left the explanation in the commit message since it was
shared between the two different branches - Linus ]

* 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (28 commits)
x86/apic/vector: Warn when vector space exhaustion breaks affinity
x86/apic: Annotate global config variables as "read-only after init"
x86/apic/x2apic: Implement IPI shorthands support
x86/apic/flat64: Remove the IPI shorthand decision logic
x86/apic: Share common IPI helpers
x86/apic: Remove the shorthand decision logic
x86/smp: Enhance native_send_call_func_ipi()
x86/smp: Move smp_function_call implementations into IPI code
x86/apic: Provide and use helper for send_IPI_allbutself()
x86/apic: Add static key to Control IPI shorthands
x86/apic: Move no_ipi_broadcast() out of 32bit
x86/apic: Add NMI_VECTOR wait to IPI shorthand
x86/apic: Remove dest argument from __default_send_IPI_shortcut()
x86/hotplug: Silence APIC and NMI when CPU is dead
x86/cpu: Move arch_smt_update() to a neutral place
x86/apic/uv: Make x2apic_extra_bits static
x86/apic: Consolidate the apic local headers
x86/apic: Move apic_flat_64 header into apic directory
x86/apic: Move ipi header into apic directory
x86/apic: Cleanup the include maze
...

+436 -576
+5 -6
arch/x86/include/asm/apic.h
··· 136 136 extern void clear_local_APIC(void); 137 137 extern void disconnect_bsp_APIC(int virt_wire_setup); 138 138 extern void disable_local_APIC(void); 139 + extern void apic_soft_disable(void); 139 140 extern void lapic_shutdown(void); 140 141 extern void sync_Arb_IDs(void); 141 142 extern void init_bsp_APIC(void); ··· 176 175 extern void lapic_online(void); 177 176 extern void lapic_offline(void); 178 177 extern bool apic_needs_pit(void); 178 + 179 + extern void apic_send_IPI_allbutself(unsigned int vector); 179 180 180 181 #else /* !CONFIG_X86_LOCAL_APIC */ 181 182 static inline void lapic_shutdown(void) { } ··· 468 465 #define TRAMPOLINE_PHYS_LOW 0x467 469 466 #define TRAMPOLINE_PHYS_HIGH 0x469 470 467 471 - #ifdef CONFIG_X86_64 472 - extern void apic_send_IPI_self(int vector); 473 - 474 - DECLARE_PER_CPU(int, x2apic_extra_bits); 475 - #endif 476 - 477 468 extern void generic_bigsmp_probe(void); 478 469 479 470 #ifdef CONFIG_X86_LOCAL_APIC ··· 503 506 504 507 #ifdef CONFIG_SMP 505 508 bool apic_id_is_primary_thread(unsigned int id); 509 + void apic_smt_update(void); 506 510 #else 507 511 static inline bool apic_id_is_primary_thread(unsigned int id) { return false; } 512 + static inline void apic_smt_update(void) { } 508 513 #endif 509 514 510 515 extern void irq_enter(void);
-8
arch/x86/include/asm/apic_flat_64.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _ASM_X86_APIC_FLAT_64_H 3 - #define _ASM_X86_APIC_FLAT_64_H 4 - 5 - extern void flat_init_apic_ldr(void); 6 - 7 - #endif 8 -
+2
arch/x86/include/asm/bugs.h
··· 18 18 static inline int ppro_with_ram_bug(void) { return 0; } 19 19 #endif 20 20 21 + extern void cpu_bugs_smt_update(void); 22 + 21 23 #endif /* _ASM_X86_BUGS_H */
-109
arch/x86/include/asm/ipi.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0-only */ 2 - #ifndef _ASM_X86_IPI_H 3 - #define _ASM_X86_IPI_H 4 - 5 - #ifdef CONFIG_X86_LOCAL_APIC 6 - 7 - /* 8 - * Copyright 2004 James Cleverdon, IBM. 9 - * 10 - * Generic APIC InterProcessor Interrupt code. 11 - * 12 - * Moved to include file by James Cleverdon from 13 - * arch/x86-64/kernel/smp.c 14 - * 15 - * Copyrights from kernel/smp.c: 16 - * 17 - * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> 18 - * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com> 19 - * (c) 2002,2003 Andi Kleen, SuSE Labs. 20 - */ 21 - 22 - #include <asm/hw_irq.h> 23 - #include <asm/apic.h> 24 - #include <asm/smp.h> 25 - 26 - /* 27 - * the following functions deal with sending IPIs between CPUs. 28 - * 29 - * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. 30 - */ 31 - 32 - static inline unsigned int __prepare_ICR(unsigned int shortcut, int vector, 33 - unsigned int dest) 34 - { 35 - unsigned int icr = shortcut | dest; 36 - 37 - switch (vector) { 38 - default: 39 - icr |= APIC_DM_FIXED | vector; 40 - break; 41 - case NMI_VECTOR: 42 - icr |= APIC_DM_NMI; 43 - break; 44 - } 45 - return icr; 46 - } 47 - 48 - static inline int __prepare_ICR2(unsigned int mask) 49 - { 50 - return SET_APIC_DEST_FIELD(mask); 51 - } 52 - 53 - static inline void __xapic_wait_icr_idle(void) 54 - { 55 - while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY) 56 - cpu_relax(); 57 - } 58 - 59 - void __default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest); 60 - 61 - /* 62 - * This is used to send an IPI with no shorthand notation (the destination is 63 - * specified in bits 56 to 63 of the ICR). 64 - */ 65 - void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest); 66 - 67 - extern void default_send_IPI_single(int cpu, int vector); 68 - extern void default_send_IPI_single_phys(int cpu, int vector); 69 - extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, 70 - int vector); 71 - extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, 72 - int vector); 73 - 74 - /* Avoid include hell */ 75 - #define NMI_VECTOR 0x02 76 - 77 - extern int no_broadcast; 78 - 79 - static inline void __default_local_send_IPI_allbutself(int vector) 80 - { 81 - if (no_broadcast || vector == NMI_VECTOR) 82 - apic->send_IPI_mask_allbutself(cpu_online_mask, vector); 83 - else 84 - __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical); 85 - } 86 - 87 - static inline void __default_local_send_IPI_all(int vector) 88 - { 89 - if (no_broadcast || vector == NMI_VECTOR) 90 - apic->send_IPI_mask(cpu_online_mask, vector); 91 - else 92 - __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical); 93 - } 94 - 95 - #ifdef CONFIG_X86_32 96 - extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, 97 - int vector); 98 - extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, 99 - int vector); 100 - extern void default_send_IPI_mask_logical(const struct cpumask *mask, 101 - int vector); 102 - extern void default_send_IPI_allbutself(int vector); 103 - extern void default_send_IPI_all(int vector); 104 - extern void default_send_IPI_self(int vector); 105 - #endif 106 - 107 - #endif 108 - 109 - #endif /* _ASM_X86_IPI_H */
-12
arch/x86/include/asm/paravirt.h
··· 139 139 PVOP_VCALL1(cpu.write_cr4, x); 140 140 } 141 141 142 - #ifdef CONFIG_X86_64 143 - static inline unsigned long read_cr8(void) 144 - { 145 - return PVOP_CALL0(unsigned long, cpu.read_cr8); 146 - } 147 - 148 - static inline void write_cr8(unsigned long x) 149 - { 150 - PVOP_VCALL1(cpu.write_cr8, x); 151 - } 152 - #endif 153 - 154 142 static inline void arch_safe_halt(void) 155 143 { 156 144 PVOP_VCALL0(irq.safe_halt);
-5
arch/x86/include/asm/paravirt_types.h
··· 119 119 120 120 void (*write_cr4)(unsigned long); 121 121 122 - #ifdef CONFIG_X86_64 123 - unsigned long (*read_cr8)(void); 124 - void (*write_cr8)(unsigned long); 125 - #endif 126 - 127 122 /* Segment descriptor handling */ 128 123 void (*load_tr_desc)(void); 129 124 void (*load_gdt)(const struct desc_ptr *);
+1
arch/x86/include/asm/smp.h
··· 143 143 void wbinvd_on_cpu(int cpu); 144 144 int wbinvd_on_all_cpus(void); 145 145 146 + void native_smp_send_reschedule(int cpu); 146 147 void native_send_call_func_ipi(const struct cpumask *mask); 147 148 void native_send_call_func_single_ipi(int cpu); 148 149 void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
-24
arch/x86/include/asm/special_insns.h
··· 73 73 74 74 void native_write_cr4(unsigned long val); 75 75 76 - #ifdef CONFIG_X86_64 77 - static inline unsigned long native_read_cr8(void) 78 - { 79 - unsigned long cr8; 80 - asm volatile("movq %%cr8,%0" : "=r" (cr8)); 81 - return cr8; 82 - } 83 - 84 - static inline void native_write_cr8(unsigned long val) 85 - { 86 - asm volatile("movq %0,%%cr8" :: "r" (val) : "memory"); 87 - } 88 - #endif 89 - 90 76 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 91 77 static inline u32 rdpkru(void) 92 78 { ··· 185 199 } 186 200 187 201 #ifdef CONFIG_X86_64 188 - 189 - static inline unsigned long read_cr8(void) 190 - { 191 - return native_read_cr8(); 192 - } 193 - 194 - static inline void write_cr8(unsigned long x) 195 - { 196 - native_write_cr8(x); 197 - } 198 202 199 203 static inline void load_gs_index(unsigned selector) 200 204 {
+1 -1
arch/x86/include/asm/suspend_64.h
··· 34 34 */ 35 35 unsigned long kernelmode_gs_base, usermode_gs_base, fs_base; 36 36 37 - unsigned long cr0, cr2, cr3, cr4, cr8; 37 + unsigned long cr0, cr2, cr3, cr4; 38 38 u64 misc_enable; 39 39 bool misc_enable_saved; 40 40 struct saved_msrs saved_msrs;
+116 -74
arch/x86/kernel/apic/apic.c
··· 65 65 unsigned disabled_cpus; 66 66 67 67 /* Processor that is doing the boot up */ 68 - unsigned int boot_cpu_physical_apicid = -1U; 68 + unsigned int boot_cpu_physical_apicid __ro_after_init = -1U; 69 69 EXPORT_SYMBOL_GPL(boot_cpu_physical_apicid); 70 70 71 - u8 boot_cpu_apic_version; 71 + u8 boot_cpu_apic_version __ro_after_init; 72 72 73 73 /* 74 74 * The highest APIC ID seen during enumeration. ··· 85 85 * disable_cpu_apicid=<int>, mostly used for the kdump 2nd kernel to 86 86 * avoid undefined behaviour caused by sending INIT from AP to BSP. 87 87 */ 88 - static unsigned int disabled_cpu_apicid __read_mostly = BAD_APICID; 88 + static unsigned int disabled_cpu_apicid __ro_after_init = BAD_APICID; 89 89 90 90 /* 91 91 * This variable controls which CPUs receive external NMIs. By default, 92 92 * external NMIs are delivered only to the BSP. 93 93 */ 94 - static int apic_extnmi = APIC_EXTNMI_BSP; 94 + static int apic_extnmi __ro_after_init = APIC_EXTNMI_BSP; 95 95 96 96 /* 97 97 * Map cpu index to physical APIC ID ··· 114 114 DEFINE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid, BAD_APICID); 115 115 116 116 /* Local APIC was disabled by the BIOS and enabled by the kernel */ 117 - static int enabled_via_apicbase; 117 + static int enabled_via_apicbase __ro_after_init; 118 118 119 119 /* 120 120 * Handle interrupt mode configuration register (IMCR). ··· 172 172 __setup("apicpmtimer", setup_apicpmtimer); 173 173 #endif 174 174 175 - unsigned long mp_lapic_addr; 176 - int disable_apic; 175 + unsigned long mp_lapic_addr __ro_after_init; 176 + int disable_apic __ro_after_init; 177 177 /* Disable local APIC timer from the kernel commandline or via dmi quirk */ 178 178 static int disable_apic_timer __initdata; 179 179 /* Local APIC timer works in C2 */ 180 - int local_apic_timer_c2_ok; 180 + int local_apic_timer_c2_ok __ro_after_init; 181 181 EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); 182 182 183 183 /* 184 184 * Debug level, exported for io_apic.c 185 185 */ 186 - int apic_verbosity; 186 + int apic_verbosity __ro_after_init; 187 187 188 - int pic_mode; 188 + int pic_mode __ro_after_init; 189 189 190 190 /* Have we found an MP table */ 191 - int smp_found_config; 191 + int smp_found_config __ro_after_init; 192 192 193 193 static struct resource lapic_resource = { 194 194 .name = "Local APIC", ··· 199 199 200 200 static void apic_pm_activate(void); 201 201 202 - static unsigned long apic_phys; 202 + static unsigned long apic_phys __ro_after_init; 203 203 204 204 /* 205 205 * Get the LAPIC version ··· 1224 1224 } 1225 1225 1226 1226 /** 1227 + * apic_soft_disable - Clears and software disables the local APIC on hotplug 1228 + * 1229 + * Contrary to disable_local_APIC() this does not touch the enable bit in 1230 + * MSR_IA32_APICBASE. Clearing that bit on systems based on the 3 wire APIC 1231 + * bus would require a hardware reset as the APIC would lose track of bus 1232 + * arbitration. On systems with FSB delivery APICBASE could be disabled, 1233 + * but it has to be guaranteed that no interrupt is sent to the APIC while 1234 + * in that state and it's not clear from the SDM whether it still responds 1235 + * to INIT/SIPI messages. Stay on the safe side and use software disable. 1236 + */ 1237 + void apic_soft_disable(void) 1238 + { 1239 + u32 value; 1240 + 1241 + clear_local_APIC(); 1242 + 1243 + /* Soft disable APIC (implies clearing of registers for 82489DX!). */ 1244 + value = apic_read(APIC_SPIV); 1245 + value &= ~APIC_SPIV_APIC_ENABLED; 1246 + apic_write(APIC_SPIV, value); 1247 + } 1248 + 1249 + /** 1227 1250 * disable_local_APIC - clear and disable the local APIC 1228 1251 */ 1229 1252 void disable_local_APIC(void) 1230 1253 { 1231 - unsigned int value; 1232 - 1233 1254 /* APIC hasn't been mapped yet */ 1234 1255 if (!x2apic_mode && !apic_phys) 1235 1256 return; 1236 1257 1237 - clear_local_APIC(); 1238 - 1239 - /* 1240 - * Disable APIC (implies clearing of registers 1241 - * for 82489DX!). 1242 - */ 1243 - value = apic_read(APIC_SPIV); 1244 - value &= ~APIC_SPIV_APIC_ENABLED; 1245 - apic_write(APIC_SPIV, value); 1258 + apic_soft_disable(); 1246 1259 1247 1260 #ifdef CONFIG_X86_32 1248 1261 /* ··· 1320 1307 APIC_INT_LEVELTRIG | APIC_DM_INIT); 1321 1308 } 1322 1309 1323 - enum apic_intr_mode_id apic_intr_mode; 1310 + enum apic_intr_mode_id apic_intr_mode __ro_after_init; 1324 1311 1325 1312 static int __init apic_intr_mode_select(void) 1326 1313 { ··· 1508 1495 oldvalue, value); 1509 1496 } 1510 1497 1498 + #define APIC_IR_REGS APIC_ISR_NR 1499 + #define APIC_IR_BITS (APIC_IR_REGS * 32) 1500 + #define APIC_IR_MAPSIZE (APIC_IR_BITS / BITS_PER_LONG) 1501 + 1502 + union apic_ir { 1503 + unsigned long map[APIC_IR_MAPSIZE]; 1504 + u32 regs[APIC_IR_REGS]; 1505 + }; 1506 + 1507 + static bool apic_check_and_ack(union apic_ir *irr, union apic_ir *isr) 1508 + { 1509 + int i, bit; 1510 + 1511 + /* Read the IRRs */ 1512 + for (i = 0; i < APIC_IR_REGS; i++) 1513 + irr->regs[i] = apic_read(APIC_IRR + i * 0x10); 1514 + 1515 + /* Read the ISRs */ 1516 + for (i = 0; i < APIC_IR_REGS; i++) 1517 + isr->regs[i] = apic_read(APIC_ISR + i * 0x10); 1518 + 1519 + /* 1520 + * If the ISR map is not empty. ACK the APIC and run another round 1521 + * to verify whether a pending IRR has been unblocked and turned 1522 + * into a ISR. 1523 + */ 1524 + if (!bitmap_empty(isr->map, APIC_IR_BITS)) { 1525 + /* 1526 + * There can be multiple ISR bits set when a high priority 1527 + * interrupt preempted a lower priority one. Issue an ACK 1528 + * per set bit. 1529 + */ 1530 + for_each_set_bit(bit, isr->map, APIC_IR_BITS) 1531 + ack_APIC_irq(); 1532 + return true; 1533 + } 1534 + 1535 + return !bitmap_empty(irr->map, APIC_IR_BITS); 1536 + } 1537 + 1538 + /* 1539 + * After a crash, we no longer service the interrupts and a pending 1540 + * interrupt from previous kernel might still have ISR bit set. 1541 + * 1542 + * Most probably by now the CPU has serviced that pending interrupt and it 1543 + * might not have done the ack_APIC_irq() because it thought, interrupt 1544 + * came from i8259 as ExtInt. LAPIC did not get EOI so it does not clear 1545 + * the ISR bit and cpu thinks it has already serivced the interrupt. Hence 1546 + * a vector might get locked. It was noticed for timer irq (vector 1547 + * 0x31). Issue an extra EOI to clear ISR. 1548 + * 1549 + * If there are pending IRR bits they turn into ISR bits after a higher 1550 + * priority ISR bit has been acked. 1551 + */ 1511 1552 static void apic_pending_intr_clear(void) 1512 1553 { 1513 - long long max_loops = cpu_khz ? cpu_khz : 1000000; 1514 - unsigned long long tsc = 0, ntsc; 1515 - unsigned int queued; 1516 - unsigned long value; 1517 - int i, j, acked = 0; 1554 + union apic_ir irr, isr; 1555 + unsigned int i; 1518 1556 1519 - if (boot_cpu_has(X86_FEATURE_TSC)) 1520 - tsc = rdtsc(); 1521 - /* 1522 - * After a crash, we no longer service the interrupts and a pending 1523 - * interrupt from previous kernel might still have ISR bit set. 1524 - * 1525 - * Most probably by now CPU has serviced that pending interrupt and 1526 - * it might not have done the ack_APIC_irq() because it thought, 1527 - * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it 1528 - * does not clear the ISR bit and cpu thinks it has already serivced 1529 - * the interrupt. Hence a vector might get locked. It was noticed 1530 - * for timer irq (vector 0x31). Issue an extra EOI to clear ISR. 1531 - */ 1532 - do { 1533 - queued = 0; 1534 - for (i = APIC_ISR_NR - 1; i >= 0; i--) 1535 - queued |= apic_read(APIC_IRR + i*0x10); 1536 - 1537 - for (i = APIC_ISR_NR - 1; i >= 0; i--) { 1538 - value = apic_read(APIC_ISR + i*0x10); 1539 - for_each_set_bit(j, &value, 32) { 1540 - ack_APIC_irq(); 1541 - acked++; 1542 - } 1543 - } 1544 - if (acked > 256) { 1545 - pr_err("LAPIC pending interrupts after %d EOI\n", acked); 1546 - break; 1547 - } 1548 - if (queued) { 1549 - if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) { 1550 - ntsc = rdtsc(); 1551 - max_loops = (long long)cpu_khz << 10; 1552 - max_loops -= ntsc - tsc; 1553 - } else { 1554 - max_loops--; 1555 - } 1556 - } 1557 - } while (queued && max_loops > 0); 1558 - WARN_ON(max_loops <= 0); 1557 + /* 512 loops are way oversized and give the APIC a chance to obey. */ 1558 + for (i = 0; i < 512; i++) { 1559 + if (!apic_check_and_ack(&irr, &isr)) 1560 + return; 1561 + } 1562 + /* Dump the IRR/ISR content if that failed */ 1563 + pr_warn("APIC: Stale IRR: %256pb ISR: %256pb\n", irr.map, isr.map); 1559 1564 } 1560 1565 1561 1566 /** ··· 1590 1559 int logical_apicid, ldr_apicid; 1591 1560 #endif 1592 1561 1593 - 1594 1562 if (disable_apic) { 1595 1563 disable_ioapic_support(); 1596 1564 return; 1597 1565 } 1566 + 1567 + /* 1568 + * If this comes from kexec/kcrash the APIC might be enabled in 1569 + * SPIV. Soft disable it before doing further initialization. 1570 + */ 1571 + value = apic_read(APIC_SPIV); 1572 + value &= ~APIC_SPIV_APIC_ENABLED; 1573 + apic_write(APIC_SPIV, value); 1598 1574 1599 1575 #ifdef CONFIG_X86_32 1600 1576 /* Pound the ESR really hard over the head with a big hammer - mbligh */ ··· 1612 1574 apic_write(APIC_ESR, 0); 1613 1575 } 1614 1576 #endif 1615 - perf_events_lapic_init(); 1616 - 1617 1577 /* 1618 1578 * Double-check whether this APIC is really registered. 1619 1579 * This is meaningless in clustered apic mode, so we skip it. ··· 1639 1603 #endif 1640 1604 1641 1605 /* 1642 - * Set Task Priority to 'accept all'. We never change this 1643 - * later on. 1606 + * Set Task Priority to 'accept all except vectors 0-31'. An APIC 1607 + * vector in the 16-31 range could be delivered if TPR == 0, but we 1608 + * would think it's an exception and terrible things will happen. We 1609 + * never change this later on. 1644 1610 */ 1645 1611 value = apic_read(APIC_TASKPRI); 1646 1612 value &= ~APIC_TPRI_MASK; 1613 + value |= 0x10; 1647 1614 apic_write(APIC_TASKPRI, value); 1648 1615 1616 + /* Clear eventually stale ISR/IRR bits */ 1649 1617 apic_pending_intr_clear(); 1650 1618 1651 1619 /* ··· 1695 1655 */ 1696 1656 value |= SPURIOUS_APIC_VECTOR; 1697 1657 apic_write(APIC_SPIV, value); 1658 + 1659 + perf_events_lapic_init(); 1698 1660 1699 1661 /* 1700 1662 * Set up LVT0, LVT1:
+10 -56
arch/x86/kernel/apic/apic_flat_64.c
··· 8 8 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and 9 9 * James Cleverdon. 10 10 */ 11 - #include <linux/acpi.h> 12 - #include <linux/errno.h> 13 - #include <linux/threads.h> 14 11 #include <linux/cpumask.h> 15 - #include <linux/string.h> 16 - #include <linux/kernel.h> 17 - #include <linux/ctype.h> 18 - #include <linux/hardirq.h> 19 12 #include <linux/export.h> 13 + #include <linux/acpi.h> 20 14 21 - #include <asm/smp.h> 22 - #include <asm/ipi.h> 23 - #include <asm/apic.h> 24 - #include <asm/apic_flat_64.h> 25 15 #include <asm/jailhouse_para.h> 16 + #include <asm/apic.h> 17 + 18 + #include "local.h" 26 19 27 20 static struct apic apic_physflat; 28 21 static struct apic apic_flat; ··· 74 81 __clear_bit(cpu, &mask); 75 82 76 83 _flat_send_IPI_mask(mask, vector); 77 - } 78 - 79 - static void flat_send_IPI_allbutself(int vector) 80 - { 81 - int cpu = smp_processor_id(); 82 - 83 - if (IS_ENABLED(CONFIG_HOTPLUG_CPU) || vector == NMI_VECTOR) { 84 - if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) { 85 - unsigned long mask = cpumask_bits(cpu_online_mask)[0]; 86 - 87 - if (cpu < BITS_PER_LONG) 88 - __clear_bit(cpu, &mask); 89 - 90 - _flat_send_IPI_mask(mask, vector); 91 - } 92 - } else if (num_online_cpus() > 1) { 93 - __default_send_IPI_shortcut(APIC_DEST_ALLBUT, 94 - vector, apic->dest_logical); 95 - } 96 - } 97 - 98 - static void flat_send_IPI_all(int vector) 99 - { 100 - if (vector == NMI_VECTOR) { 101 - flat_send_IPI_mask(cpu_online_mask, vector); 102 - } else { 103 - __default_send_IPI_shortcut(APIC_DEST_ALLINC, 104 - vector, apic->dest_logical); 105 - } 106 84 } 107 85 108 86 static unsigned int flat_get_apic_id(unsigned long x) ··· 137 173 .send_IPI = default_send_IPI_single, 138 174 .send_IPI_mask = flat_send_IPI_mask, 139 175 .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, 140 - .send_IPI_allbutself = flat_send_IPI_allbutself, 141 - .send_IPI_all = flat_send_IPI_all, 142 - .send_IPI_self = apic_send_IPI_self, 176 + .send_IPI_allbutself = default_send_IPI_allbutself, 177 + .send_IPI_all = default_send_IPI_all, 178 + .send_IPI_self = default_send_IPI_self, 143 179 144 180 .inquire_remote_apic = default_inquire_remote_apic, 145 181 ··· 189 225 */ 190 226 } 191 227 192 - static void physflat_send_IPI_allbutself(int vector) 193 - { 194 - default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); 195 - } 196 - 197 - static void physflat_send_IPI_all(int vector) 198 - { 199 - default_send_IPI_mask_sequence_phys(cpu_online_mask, vector); 200 - } 201 - 202 228 static int physflat_probe(void) 203 229 { 204 230 if (apic == &apic_physflat || num_possible_cpus() > 8 || ··· 230 276 .send_IPI = default_send_IPI_single_phys, 231 277 .send_IPI_mask = default_send_IPI_mask_sequence_phys, 232 278 .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_phys, 233 - .send_IPI_allbutself = physflat_send_IPI_allbutself, 234 - .send_IPI_all = physflat_send_IPI_all, 235 - .send_IPI_self = apic_send_IPI_self, 279 + .send_IPI_allbutself = default_send_IPI_allbutself, 280 + .send_IPI_all = default_send_IPI_all, 281 + .send_IPI_self = default_send_IPI_self, 236 282 237 283 .inquire_remote_apic = default_inquire_remote_apic, 238 284
+1 -17
arch/x86/kernel/apic/apic_noop.c
··· 9 9 * to not uglify the caller's code and allow to call (some) apic routines 10 10 * like self-ipi, etc... 11 11 */ 12 - 13 - #include <linux/threads.h> 14 12 #include <linux/cpumask.h> 15 - #include <linux/string.h> 16 - #include <linux/kernel.h> 17 - #include <linux/ctype.h> 18 - #include <linux/errno.h> 19 - #include <asm/fixmap.h> 20 - #include <asm/mpspec.h> 21 - #include <asm/apicdef.h> 13 + 22 14 #include <asm/apic.h> 23 - #include <asm/setup.h> 24 - 25 - #include <linux/smp.h> 26 - #include <asm/ipi.h> 27 - 28 - #include <linux/interrupt.h> 29 - #include <asm/acpi.h> 30 - #include <asm/e820/api.h> 31 15 32 16 static void noop_init_apic_ldr(void) { } 33 17 static void noop_send_IPI(int cpu, int vector) { }
+4 -4
arch/x86/kernel/apic/apic_numachip.c
··· 10 10 * Send feedback to <support@numascale.com> 11 11 * 12 12 */ 13 - 13 + #include <linux/types.h> 14 14 #include <linux/init.h> 15 15 16 16 #include <asm/numachip/numachip.h> 17 17 #include <asm/numachip/numachip_csr.h> 18 - #include <asm/ipi.h> 19 - #include <asm/apic_flat_64.h> 18 + 20 19 #include <asm/pgtable.h> 21 - #include <asm/pci_x86.h> 20 + 21 + #include "local.h" 22 22 23 23 u8 numachip_system __read_mostly; 24 24 static const struct apic apic_numachip1;
+2 -7
arch/x86/kernel/apic/bigsmp_32.c
··· 4 4 * 5 5 * Drives the local APIC in "clustered mode". 6 6 */ 7 - #include <linux/threads.h> 8 7 #include <linux/cpumask.h> 9 - #include <linux/kernel.h> 10 - #include <linux/init.h> 11 8 #include <linux/dmi.h> 12 9 #include <linux/smp.h> 13 10 14 - #include <asm/apicdef.h> 15 - #include <asm/fixmap.h> 16 - #include <asm/mpspec.h> 17 11 #include <asm/apic.h> 18 - #include <asm/ipi.h> 12 + 13 + #include "local.h" 19 14 20 15 static unsigned bigsmp_get_apic_id(unsigned long x) 21 16 {
+127 -41
arch/x86/kernel/apic/ipi.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + 2 3 #include <linux/cpumask.h> 3 - #include <linux/interrupt.h> 4 + #include <linux/smp.h> 4 5 5 - #include <linux/mm.h> 6 - #include <linux/delay.h> 7 - #include <linux/spinlock.h> 8 - #include <linux/kernel_stat.h> 9 - #include <linux/mc146818rtc.h> 10 - #include <linux/cache.h> 11 - #include <linux/cpu.h> 6 + #include "local.h" 12 7 13 - #include <asm/smp.h> 14 - #include <asm/mtrr.h> 15 - #include <asm/tlbflush.h> 16 - #include <asm/mmu_context.h> 17 - #include <asm/apic.h> 18 - #include <asm/proto.h> 19 - #include <asm/ipi.h> 8 + DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand); 20 9 21 - void __default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest) 10 + #ifdef CONFIG_SMP 11 + static int apic_ipi_shorthand_off __ro_after_init; 12 + 13 + static __init int apic_ipi_shorthand(char *str) 14 + { 15 + get_option(&str, &apic_ipi_shorthand_off); 16 + return 1; 17 + } 18 + __setup("no_ipi_broadcast=", apic_ipi_shorthand); 19 + 20 + static int __init print_ipi_mode(void) 21 + { 22 + pr_info("IPI shorthand broadcast: %s\n", 23 + apic_ipi_shorthand_off ? "disabled" : "enabled"); 24 + return 0; 25 + } 26 + late_initcall(print_ipi_mode); 27 + 28 + void apic_smt_update(void) 29 + { 30 + /* 31 + * Do not switch to broadcast mode if: 32 + * - Disabled on the command line 33 + * - Only a single CPU is online 34 + * - Not all present CPUs have been at least booted once 35 + * 36 + * The latter is important as the local APIC might be in some 37 + * random state and a broadcast might cause havoc. That's 38 + * especially true for NMI broadcasting. 39 + */ 40 + if (apic_ipi_shorthand_off || num_online_cpus() == 1 || 41 + !cpumask_equal(cpu_present_mask, &cpus_booted_once_mask)) { 42 + static_branch_disable(&apic_use_ipi_shorthand); 43 + } else { 44 + static_branch_enable(&apic_use_ipi_shorthand); 45 + } 46 + } 47 + 48 + void apic_send_IPI_allbutself(unsigned int vector) 49 + { 50 + if (num_online_cpus() < 2) 51 + return; 52 + 53 + if (static_branch_likely(&apic_use_ipi_shorthand)) 54 + apic->send_IPI_allbutself(vector); 55 + else 56 + apic->send_IPI_mask_allbutself(cpu_online_mask, vector); 57 + } 58 + 59 + /* 60 + * Send a 'reschedule' IPI to another CPU. It goes straight through and 61 + * wastes no time serializing anything. Worst case is that we lose a 62 + * reschedule ... 63 + */ 64 + void native_smp_send_reschedule(int cpu) 65 + { 66 + if (unlikely(cpu_is_offline(cpu))) { 67 + WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu); 68 + return; 69 + } 70 + apic->send_IPI(cpu, RESCHEDULE_VECTOR); 71 + } 72 + 73 + void native_send_call_func_single_ipi(int cpu) 74 + { 75 + apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR); 76 + } 77 + 78 + void native_send_call_func_ipi(const struct cpumask *mask) 79 + { 80 + if (static_branch_likely(&apic_use_ipi_shorthand)) { 81 + unsigned int cpu = smp_processor_id(); 82 + 83 + if (!cpumask_or_equal(mask, cpumask_of(cpu), cpu_online_mask)) 84 + goto sendmask; 85 + 86 + if (cpumask_test_cpu(cpu, mask)) 87 + apic->send_IPI_all(CALL_FUNCTION_VECTOR); 88 + else if (num_online_cpus() > 1) 89 + apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR); 90 + return; 91 + } 92 + 93 + sendmask: 94 + apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR); 95 + } 96 + 97 + #endif /* CONFIG_SMP */ 98 + 99 + static inline int __prepare_ICR2(unsigned int mask) 100 + { 101 + return SET_APIC_DEST_FIELD(mask); 102 + } 103 + 104 + static inline void __xapic_wait_icr_idle(void) 105 + { 106 + while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY) 107 + cpu_relax(); 108 + } 109 + 110 + void __default_send_IPI_shortcut(unsigned int shortcut, int vector) 22 111 { 23 112 /* 24 113 * Subtle. In the case of the 'never do double writes' workaround ··· 121 32 /* 122 33 * Wait for idle. 123 34 */ 124 - __xapic_wait_icr_idle(); 35 + if (unlikely(vector == NMI_VECTOR)) 36 + safe_apic_wait_icr_idle(); 37 + else 38 + __xapic_wait_icr_idle(); 125 39 126 40 /* 127 - * No need to touch the target chip field 41 + * No need to touch the target chip field. Also the destination 42 + * mode is ignored when a shorthand is used. 128 43 */ 129 - cfg = __prepare_ICR(shortcut, vector, dest); 44 + cfg = __prepare_ICR(shortcut, vector, 0); 130 45 131 46 /* 132 47 * Send the IPI. The write to APIC_ICR fires this off. ··· 226 133 apic->send_IPI_mask(cpumask_of(cpu), vector); 227 134 } 228 135 136 + void default_send_IPI_allbutself(int vector) 137 + { 138 + __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector); 139 + } 140 + 141 + void default_send_IPI_all(int vector) 142 + { 143 + __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector); 144 + } 145 + 146 + void default_send_IPI_self(int vector) 147 + { 148 + __default_send_IPI_shortcut(APIC_DEST_SELF, vector); 149 + } 150 + 229 151 #ifdef CONFIG_X86_32 230 152 231 153 void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, ··· 298 190 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); 299 191 __default_send_IPI_dest_field(mask, vector, apic->dest_logical); 300 192 local_irq_restore(flags); 301 - } 302 - 303 - void default_send_IPI_allbutself(int vector) 304 - { 305 - /* 306 - * if there are no other CPUs in the system then we get an APIC send 307 - * error if we try to broadcast, thus avoid sending IPIs in this case. 308 - */ 309 - if (!(num_online_cpus() > 1)) 310 - return; 311 - 312 - __default_local_send_IPI_allbutself(vector); 313 - } 314 - 315 - void default_send_IPI_all(int vector) 316 - { 317 - __default_local_send_IPI_all(vector); 318 - } 319 - 320 - void default_send_IPI_self(int vector) 321 - { 322 - __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical); 323 193 } 324 194 325 195 /* must come after the send_IPI functions above for inlining */
+68
arch/x86/kernel/apic/local.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Historical copyright notices: 4 + * 5 + * Copyright 2004 James Cleverdon, IBM. 6 + * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> 7 + * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com> 8 + * (c) 2002,2003 Andi Kleen, SuSE Labs. 9 + */ 10 + 11 + #include <linux/jump_label.h> 12 + 13 + #include <asm/apic.h> 14 + 15 + /* APIC flat 64 */ 16 + void flat_init_apic_ldr(void); 17 + 18 + /* X2APIC */ 19 + int x2apic_apic_id_valid(u32 apicid); 20 + int x2apic_apic_id_registered(void); 21 + void __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest); 22 + unsigned int x2apic_get_apic_id(unsigned long id); 23 + u32 x2apic_set_apic_id(unsigned int id); 24 + int x2apic_phys_pkg_id(int initial_apicid, int index_msb); 25 + void x2apic_send_IPI_self(int vector); 26 + void __x2apic_send_IPI_shorthand(int vector, u32 which); 27 + 28 + /* IPI */ 29 + 30 + DECLARE_STATIC_KEY_FALSE(apic_use_ipi_shorthand); 31 + 32 + static inline unsigned int __prepare_ICR(unsigned int shortcut, int vector, 33 + unsigned int dest) 34 + { 35 + unsigned int icr = shortcut | dest; 36 + 37 + switch (vector) { 38 + default: 39 + icr |= APIC_DM_FIXED | vector; 40 + break; 41 + case NMI_VECTOR: 42 + icr |= APIC_DM_NMI; 43 + break; 44 + } 45 + return icr; 46 + } 47 + 48 + void __default_send_IPI_shortcut(unsigned int shortcut, int vector); 49 + 50 + /* 51 + * This is used to send an IPI with no shorthand notation (the destination is 52 + * specified in bits 56 to 63 of the ICR). 53 + */ 54 + void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest); 55 + 56 + void default_send_IPI_single(int cpu, int vector); 57 + void default_send_IPI_single_phys(int cpu, int vector); 58 + void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector); 59 + void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, int vector); 60 + void default_send_IPI_allbutself(int vector); 61 + void default_send_IPI_all(int vector); 62 + void default_send_IPI_self(int vector); 63 + 64 + #ifdef CONFIG_X86_32 65 + void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector); 66 + void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, int vector); 67 + void default_send_IPI_mask_logical(const struct cpumask *mask, int vector); 68 + #endif
+2 -39
arch/x86/kernel/apic/probe_32.c
··· 6 6 * 7 7 * Generic x86 APIC driver probe layer. 8 8 */ 9 - #include <linux/threads.h> 10 - #include <linux/cpumask.h> 11 9 #include <linux/export.h> 12 - #include <linux/string.h> 13 - #include <linux/kernel.h> 14 - #include <linux/ctype.h> 15 - #include <linux/init.h> 16 10 #include <linux/errno.h> 17 - #include <asm/fixmap.h> 18 - #include <asm/mpspec.h> 19 - #include <asm/apicdef.h> 20 - #include <asm/apic.h> 21 - #include <asm/setup.h> 22 - 23 11 #include <linux/smp.h> 24 - #include <asm/ipi.h> 25 12 26 - #include <linux/interrupt.h> 13 + #include <asm/apic.h> 27 14 #include <asm/acpi.h> 28 - #include <asm/e820/api.h> 29 15 30 - #ifdef CONFIG_HOTPLUG_CPU 31 - #define DEFAULT_SEND_IPI (1) 32 - #else 33 - #define DEFAULT_SEND_IPI (0) 34 - #endif 35 - 36 - int no_broadcast = DEFAULT_SEND_IPI; 37 - 38 - static __init int no_ipi_broadcast(char *str) 39 - { 40 - get_option(&str, &no_broadcast); 41 - pr_info("Using %s mode\n", 42 - no_broadcast ? "No IPI Broadcast" : "IPI Broadcast"); 43 - return 1; 44 - } 45 - __setup("no_ipi_broadcast=", no_ipi_broadcast); 46 - 47 - static int __init print_ipi_mode(void) 48 - { 49 - pr_info("Using IPI %s mode\n", 50 - no_broadcast ? "No-Shortcut" : "Shortcut"); 51 - return 0; 52 - } 53 - late_initcall(print_ipi_mode); 16 + #include "local.h" 54 17 55 18 static int default_x86_32_early_logical_apicid(int cpu) 56 19 {
+2 -19
arch/x86/kernel/apic/probe_64.c
··· 8 8 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and 9 9 * James Cleverdon. 10 10 */ 11 - #include <linux/threads.h> 12 - #include <linux/cpumask.h> 13 - #include <linux/string.h> 14 - #include <linux/init.h> 15 - #include <linux/kernel.h> 16 - #include <linux/ctype.h> 17 - #include <linux/hardirq.h> 18 - #include <linux/dmar.h> 19 - 20 - #include <asm/smp.h> 21 11 #include <asm/apic.h> 22 - #include <asm/ipi.h> 23 - #include <asm/setup.h> 12 + 13 + #include "local.h" 24 14 25 15 /* 26 16 * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode. ··· 34 44 35 45 if (x86_platform.apic_post_init) 36 46 x86_platform.apic_post_init(); 37 - } 38 - 39 - /* Same for both flat and physical. */ 40 - 41 - void apic_send_IPI_self(int vector) 42 - { 43 - __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL); 44 47 } 45 48 46 49 int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+11
arch/x86/kernel/apic/vector.c
··· 398 398 if (!irqd_can_reserve(irqd)) 399 399 apicd->can_reserve = false; 400 400 } 401 + 402 + /* 403 + * Check to ensure that the effective affinity mask is a subset 404 + * the user supplied affinity mask, and warn the user if it is not 405 + */ 406 + if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd), 407 + irq_data_get_affinity_mask(irqd))) { 408 + pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n", 409 + irqd->irq); 410 + } 411 + 401 412 return ret; 402 413 } 403 414
-9
arch/x86/kernel/apic/x2apic.h
··· 1 - /* Common bits for X2APIC cluster/physical modes. */ 2 - 3 - int x2apic_apic_id_valid(u32 apicid); 4 - int x2apic_apic_id_registered(void); 5 - void __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest); 6 - unsigned int x2apic_get_apic_id(unsigned long id); 7 - u32 x2apic_set_apic_id(unsigned int id); 8 - int x2apic_phys_pkg_id(int initial_apicid, int index_msb); 9 - void x2apic_send_IPI_self(int vector);
+10 -12
arch/x86/kernel/apic/x2apic_cluster.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - #include <linux/threads.h> 3 - #include <linux/cpumask.h> 4 - #include <linux/string.h> 5 - #include <linux/kernel.h> 6 - #include <linux/ctype.h> 7 - #include <linux/dmar.h> 8 - #include <linux/irq.h> 9 - #include <linux/cpu.h> 10 2 11 - #include <asm/smp.h> 12 - #include "x2apic.h" 3 + #include <linux/cpuhotplug.h> 4 + #include <linux/cpumask.h> 5 + #include <linux/slab.h> 6 + #include <linux/mm.h> 7 + 8 + #include <asm/apic.h> 9 + 10 + #include "local.h" 13 11 14 12 struct cluster_mask { 15 13 unsigned int clusterid; ··· 82 84 83 85 static void x2apic_send_IPI_allbutself(int vector) 84 86 { 85 - __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLBUT); 87 + __x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLBUT); 86 88 } 87 89 88 90 static void x2apic_send_IPI_all(int vector) 89 91 { 90 - __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC); 92 + __x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLINC); 91 93 } 92 94 93 95 static u32 x2apic_calc_apicid(unsigned int cpu)
+14 -11
arch/x86/kernel/apic/x2apic_phys.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - #include <linux/threads.h> 3 - #include <linux/cpumask.h> 4 - #include <linux/string.h> 5 - #include <linux/kernel.h> 6 - #include <linux/ctype.h> 7 - #include <linux/dmar.h> 8 2 9 - #include <asm/smp.h> 10 - #include <asm/ipi.h> 11 - #include "x2apic.h" 3 + #include <linux/cpumask.h> 4 + #include <linux/acpi.h> 5 + 6 + #include "local.h" 12 7 13 8 int x2apic_phys; 14 9 ··· 75 80 76 81 static void x2apic_send_IPI_allbutself(int vector) 77 82 { 78 - __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLBUT); 83 + __x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLBUT); 79 84 } 80 85 81 86 static void x2apic_send_IPI_all(int vector) 82 87 { 83 - __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC); 88 + __x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLINC); 84 89 } 85 90 86 91 static void init_x2apic_ldr(void) ··· 110 115 { 111 116 unsigned long cfg = __prepare_ICR(0, vector, dest); 112 117 native_x2apic_icr_write(cfg, apicid); 118 + } 119 + 120 + void __x2apic_send_IPI_shorthand(int vector, u32 which) 121 + { 122 + unsigned long cfg = __prepare_ICR(which, vector, 0); 123 + 124 + x2apic_wrmsr_fence(); 125 + native_x2apic_icr_write(cfg, 0); 113 126 } 114 127 115 128 unsigned int x2apic_get_apic_id(unsigned long id)
+7 -27
arch/x86/kernel/apic/x2apic_uv_x.c
··· 7 7 * 8 8 * Copyright (C) 2007-2014 Silicon Graphics, Inc. All rights reserved. 9 9 */ 10 - #include <linux/cpumask.h> 11 - #include <linux/hardirq.h> 12 - #include <linux/proc_fs.h> 13 - #include <linux/threads.h> 14 - #include <linux/kernel.h> 15 - #include <linux/export.h> 16 - #include <linux/string.h> 17 - #include <linux/ctype.h> 18 - #include <linux/sched.h> 19 - #include <linux/timer.h> 20 - #include <linux/slab.h> 21 - #include <linux/cpu.h> 22 - #include <linux/init.h> 23 - #include <linux/io.h> 24 - #include <linux/pci.h> 25 - #include <linux/kdebug.h> 26 - #include <linux/delay.h> 27 10 #include <linux/crash_dump.h> 28 - #include <linux/reboot.h> 11 + #include <linux/cpuhotplug.h> 12 + #include <linux/cpumask.h> 13 + #include <linux/proc_fs.h> 29 14 #include <linux/memory.h> 30 - #include <linux/numa.h> 15 + #include <linux/export.h> 16 + #include <linux/pci.h> 31 17 18 + #include <asm/e820/api.h> 32 19 #include <asm/uv/uv_mmrs.h> 33 20 #include <asm/uv/uv_hub.h> 34 - #include <asm/current.h> 35 - #include <asm/pgtable.h> 36 21 #include <asm/uv/bios.h> 37 22 #include <asm/uv/uv.h> 38 23 #include <asm/apic.h> 39 - #include <asm/e820/api.h> 40 - #include <asm/ipi.h> 41 - #include <asm/smp.h> 42 - #include <asm/x86_init.h> 43 - #include <asm/nmi.h> 44 24 45 - DEFINE_PER_CPU(int, x2apic_extra_bits); 25 + static DEFINE_PER_CPU(int, x2apic_extra_bits); 46 26 47 27 static enum uv_system_type uv_system_type; 48 28 static bool uv_hubless_system;
-1
arch/x86/kernel/asm-offsets_64.c
··· 77 77 ENTRY(cr2); 78 78 ENTRY(cr3); 79 79 ENTRY(cr4); 80 - ENTRY(cr8); 81 80 ENTRY(gdt_desc); 82 81 BLANK(); 83 82 #undef ENTRY
+1 -1
arch/x86/kernel/cpu/bugs.c
··· 787 787 788 788 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" 789 789 790 - void arch_smt_update(void) 790 + void cpu_bugs_smt_update(void) 791 791 { 792 792 /* Enhanced IBRS implies STIBP. No update required. */ 793 793 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+11
arch/x86/kernel/cpu/common.c
··· 1958 1958 pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n"); 1959 1959 pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); 1960 1960 } 1961 + 1962 + /* 1963 + * Invoked from core CPU hotplug code after hotplug operations 1964 + */ 1965 + void arch_smt_update(void) 1966 + { 1967 + /* Handle the speculative execution misfeatures */ 1968 + cpu_bugs_smt_update(); 1969 + /* Check whether IPI broadcasting can be enabled */ 1970 + apic_smt_update(); 1971 + }
+1 -1
arch/x86/kernel/kgdb.c
··· 416 416 */ 417 417 void kgdb_roundup_cpus(void) 418 418 { 419 - apic->send_IPI_allbutself(APIC_DM_NMI); 419 + apic_send_IPI_allbutself(NMI_VECTOR); 420 420 } 421 421 #endif 422 422
+3
arch/x86/kernel/nmi.c
··· 512 512 dotraplinkage notrace void 513 513 do_nmi(struct pt_regs *regs, long error_code) 514 514 { 515 + if (IS_ENABLED(CONFIG_SMP) && cpu_is_offline(smp_processor_id())) 516 + return; 517 + 515 518 if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { 516 519 this_cpu_write(nmi_state, NMI_LATCHED); 517 520 return;
-4
arch/x86/kernel/paravirt.c
··· 311 311 .cpu.read_cr0 = native_read_cr0, 312 312 .cpu.write_cr0 = native_write_cr0, 313 313 .cpu.write_cr4 = native_write_cr4, 314 - #ifdef CONFIG_X86_64 315 - .cpu.read_cr8 = native_read_cr8, 316 - .cpu.write_cr8 = native_write_cr8, 317 - #endif 318 314 .cpu.wbinvd = native_wbinvd, 319 315 .cpu.read_msr = native_read_msr, 320 316 .cpu.write_msr = native_write_msr,
+1 -6
arch/x86/kernel/reboot.c
··· 828 828 return NMI_HANDLED; 829 829 } 830 830 831 - static void smp_send_nmi_allbutself(void) 832 - { 833 - apic->send_IPI_allbutself(NMI_VECTOR); 834 - } 835 - 836 831 /* 837 832 * Halt all other CPUs, calling the specified function on each of them 838 833 * ··· 856 861 */ 857 862 wmb(); 858 863 859 - smp_send_nmi_allbutself(); 864 + apic_send_IPI_allbutself(NMI_VECTOR); 860 865 861 866 /* Kick CPUs looping in NMI context. */ 862 867 WRITE_ONCE(crash_ipi_issued, 1);
+30 -62
arch/x86/kernel/smp.c
··· 115 115 static atomic_t stopping_cpu = ATOMIC_INIT(-1); 116 116 static bool smp_no_nmi_ipi = false; 117 117 118 - /* 119 - * this function sends a 'reschedule' IPI to another CPU. 120 - * it goes straight through and wastes no time serializing 121 - * anything. Worst case is that we lose a reschedule ... 122 - */ 123 - static void native_smp_send_reschedule(int cpu) 124 - { 125 - if (unlikely(cpu_is_offline(cpu))) { 126 - WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu); 127 - return; 128 - } 129 - apic->send_IPI(cpu, RESCHEDULE_VECTOR); 130 - } 131 - 132 - void native_send_call_func_single_ipi(int cpu) 133 - { 134 - apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR); 135 - } 136 - 137 - void native_send_call_func_ipi(const struct cpumask *mask) 138 - { 139 - cpumask_var_t allbutself; 140 - 141 - if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) { 142 - apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR); 143 - return; 144 - } 145 - 146 - cpumask_copy(allbutself, cpu_online_mask); 147 - __cpumask_clear_cpu(smp_processor_id(), allbutself); 148 - 149 - if (cpumask_equal(mask, allbutself) && 150 - cpumask_equal(cpu_online_mask, cpu_callout_mask)) 151 - apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR); 152 - else 153 - apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR); 154 - 155 - free_cpumask_var(allbutself); 156 - } 157 - 158 118 static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) 159 119 { 160 120 /* We are registered on stopping cpu too, avoid spurious NMI */ ··· 137 177 cpu_emergency_vmxoff(); 138 178 stop_this_cpu(NULL); 139 179 irq_exit(); 180 + } 181 + 182 + static int register_stop_handler(void) 183 + { 184 + return register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, 185 + NMI_FLAG_FIRST, "smp_stop"); 140 186 } 141 187 142 188 static void native_stop_other_cpus(int wait) ··· 175 209 /* sync above data before sending IRQ */ 176 210 wmb(); 177 211 178 - apic->send_IPI_allbutself(REBOOT_VECTOR); 212 + apic_send_IPI_allbutself(REBOOT_VECTOR); 179 213 180 214 /* 181 - * Don't wait longer than a second if the caller 182 - * didn't ask us to wait. 215 + * Don't wait longer than a second for IPI completion. The 216 + * wait request is not checked here because that would 217 + * prevent an NMI shutdown attempt in case that not all 218 + * CPUs reach shutdown state. 183 219 */ 184 220 timeout = USEC_PER_SEC; 185 - while (num_online_cpus() > 1 && (wait || timeout--)) 221 + while (num_online_cpus() > 1 && timeout--) 186 222 udelay(1); 187 223 } 188 - 224 + 189 225 /* if the REBOOT_VECTOR didn't work, try with the NMI */ 190 - if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) { 191 - if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, 192 - NMI_FLAG_FIRST, "smp_stop")) 193 - /* Note: we ignore failures here */ 194 - /* Hope the REBOOT_IRQ is good enough */ 195 - goto finish; 196 - 197 - /* sync above data before sending IRQ */ 198 - wmb(); 199 - 200 - pr_emerg("Shutting down cpus with NMI\n"); 201 - 202 - apic->send_IPI_allbutself(NMI_VECTOR); 203 - 226 + if (num_online_cpus() > 1) { 204 227 /* 205 - * Don't wait longer than a 10 ms if the caller 206 - * didn't ask us to wait. 228 + * If NMI IPI is enabled, try to register the stop handler 229 + * and send the IPI. In any case try to wait for the other 230 + * CPUs to stop. 231 + */ 232 + if (!smp_no_nmi_ipi && !register_stop_handler()) { 233 + /* Sync above data before sending IRQ */ 234 + wmb(); 235 + 236 + pr_emerg("Shutting down cpus with NMI\n"); 237 + 238 + apic_send_IPI_allbutself(NMI_VECTOR); 239 + } 240 + /* 241 + * Don't wait longer than 10 ms if the caller didn't 242 + * reqeust it. If wait is true, the machine hangs here if 243 + * one or more CPUs do not reach shutdown state. 207 244 */ 208 245 timeout = USEC_PER_MSEC * 10; 209 246 while (num_online_cpus() > 1 && (wait || timeout--)) 210 247 udelay(1); 211 248 } 212 249 213 - finish: 214 250 local_irq_save(flags); 215 251 disable_local_APIC(); 216 252 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
+6 -1
arch/x86/kernel/smpboot.c
··· 1591 1591 if (ret) 1592 1592 return ret; 1593 1593 1594 - clear_local_APIC(); 1594 + /* 1595 + * Disable the local APIC. Otherwise IPI broadcasts will reach 1596 + * it. It still responds normally to INIT, NMI, SMI, and SIPI 1597 + * messages. 1598 + */ 1599 + apic_soft_disable(); 1595 1600 cpu_disable_common(); 1596 1601 1597 1602 return 0;
-4
arch/x86/power/cpu.c
··· 123 123 ctxt->cr2 = read_cr2(); 124 124 ctxt->cr3 = __read_cr3(); 125 125 ctxt->cr4 = __read_cr4(); 126 - #ifdef CONFIG_X86_64 127 - ctxt->cr8 = read_cr8(); 128 - #endif 129 126 ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE, 130 127 &ctxt->misc_enable); 131 128 msr_save_context(ctxt); ··· 205 208 #else 206 209 /* CONFIG X86_64 */ 207 210 wrmsrl(MSR_EFER, ctxt->efer); 208 - write_cr8(ctxt->cr8); 209 211 __write_cr4(ctxt->cr4); 210 212 #endif 211 213 write_cr3(ctxt->cr3);
-15
arch/x86/xen/enlighten_pv.c
··· 877 877 878 878 native_write_cr4(cr4); 879 879 } 880 - #ifdef CONFIG_X86_64 881 - static inline unsigned long xen_read_cr8(void) 882 - { 883 - return 0; 884 - } 885 - static inline void xen_write_cr8(unsigned long val) 886 - { 887 - BUG_ON(val); 888 - } 889 - #endif 890 880 891 881 static u64 xen_read_msr_safe(unsigned int msr, int *err) 892 882 { ··· 1012 1022 .write_cr0 = xen_write_cr0, 1013 1023 1014 1024 .write_cr4 = xen_write_cr4, 1015 - 1016 - #ifdef CONFIG_X86_64 1017 - .read_cr8 = xen_read_cr8, 1018 - .write_cr8 = xen_write_cr8, 1019 - #endif 1020 1025 1021 1026 .wbinvd = native_wbinvd, 1022 1027