Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq fix from Thomas Gleixner:
"A single fix for a cpu hotplug race vs. interrupt descriptors:

Prevent irq setup/teardown across the cpu starting/dying parts of cpu
hotplug so that the starting/dying cpu has a stable view of the
descriptor space. This has been an issue for all architectures in the
cpu dying phase, where interrupts are migrated away from the dying
cpu. In the starting phase its mostly a x86 issue vs the vector space
update"

* 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
hotplug: Prevent alloc/free of irq descriptors during cpu up/down

+27 -6
+6 -1
include/linux/irqdesc.h
··· 87 87 const char *name; 88 88 } ____cacheline_internodealigned_in_smp; 89 89 90 - #ifndef CONFIG_SPARSE_IRQ 90 + #ifdef CONFIG_SPARSE_IRQ 91 + extern void irq_lock_sparse(void); 92 + extern void irq_unlock_sparse(void); 93 + #else 94 + static inline void irq_lock_sparse(void) { } 95 + static inline void irq_unlock_sparse(void) { } 91 96 extern struct irq_desc irq_desc[NR_IRQS]; 92 97 #endif 93 98
+21 -1
kernel/cpu.c
··· 21 21 #include <linux/suspend.h> 22 22 #include <linux/lockdep.h> 23 23 #include <linux/tick.h> 24 + #include <linux/irq.h> 24 25 #include <trace/events/power.h> 25 26 26 27 #include "smpboot.h" ··· 393 392 smpboot_park_threads(cpu); 394 393 395 394 /* 395 + * Prevent irq alloc/free while the dying cpu reorganizes the 396 + * interrupt affinities. 397 + */ 398 + irq_lock_sparse(); 399 + 400 + /* 396 401 * So now all preempt/rcu users must observe !cpu_active(). 397 402 */ 398 - 399 403 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 400 404 if (err) { 401 405 /* CPU didn't die: tell everyone. Can't complain. */ 402 406 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); 407 + irq_unlock_sparse(); 403 408 goto out_release; 404 409 } 405 410 BUG_ON(cpu_online(cpu)); ··· 421 414 cpu_relax(); 422 415 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */ 423 416 per_cpu(cpu_dead_idle, cpu) = false; 417 + 418 + /* Interrupts are moved away from the dying cpu, reenable alloc/free */ 419 + irq_unlock_sparse(); 424 420 425 421 hotplug_cpu__broadcast_tick_pull(cpu); 426 422 /* This actually kills the CPU. */ ··· 527 517 goto out_notify; 528 518 } 529 519 520 + /* 521 + * Some architectures have to walk the irq descriptors to 522 + * setup the vector space for the cpu which comes online. 523 + * Prevent irq alloc/free across the bringup. 524 + */ 525 + irq_lock_sparse(); 526 + 530 527 /* Arch-specific enabling code. */ 531 528 ret = __cpu_up(cpu, idle); 529 + 530 + irq_unlock_sparse(); 531 + 532 532 if (ret != 0) 533 533 goto out_notify; 534 534 BUG_ON(!cpu_online(cpu));
-4
kernel/irq/internals.h
··· 76 76 77 77 #ifdef CONFIG_SPARSE_IRQ 78 78 static inline void irq_mark_irq(unsigned int irq) { } 79 - extern void irq_lock_sparse(void); 80 - extern void irq_unlock_sparse(void); 81 79 #else 82 80 extern void irq_mark_irq(unsigned int irq); 83 - static inline void irq_lock_sparse(void) { } 84 - static inline void irq_unlock_sparse(void) { } 85 81 #endif 86 82 87 83 extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);