Merge branch irq/lpi-resend into irq/irqchip-next

* irq/lpi-resend:
: .
: Patch series from James Gowans, working around an issue with
: GICv3 LPIs that can fire concurrently on multiple CPUs.
: .
irqchip/gic-v3-its: Enable RESEND_WHEN_IN_PROGRESS for LPIs
genirq: Allow fasteoi handler to resend interrupts on concurrent handling
genirq: Expand doc for PENDING and REPLAY flags
genirq: Use BIT() for the IRQD_* state flags

Signed-off-by: Marc Zyngier <maz@kernel.org>

+60 -26
+2
drivers/irqchip/irq-gic-v3-its.c
··· 3585 irqd = irq_get_irq_data(virq + i); 3586 irqd_set_single_target(irqd); 3587 irqd_set_affinity_on_activate(irqd); 3588 pr_debug("ID:%d pID:%d vID:%d\n", 3589 (int)(hwirq + i - its_dev->event_map.lpi_base), 3590 (int)(hwirq + i), virq + i); ··· 4524 irq_domain_set_hwirq_and_chip(domain, virq + i, i, 4525 irqchip, vm->vpes[i]); 4526 set_bit(i, bitmap); 4527 } 4528 4529 if (err) {
··· 3585 irqd = irq_get_irq_data(virq + i); 3586 irqd_set_single_target(irqd); 3587 irqd_set_affinity_on_activate(irqd); 3588 + irqd_set_resend_when_in_progress(irqd); 3589 pr_debug("ID:%d pID:%d vID:%d\n", 3590 (int)(hwirq + i - its_dev->event_map.lpi_base), 3591 (int)(hwirq + i), virq + i); ··· 4523 irq_domain_set_hwirq_and_chip(domain, virq + i, i, 4524 irqchip, vm->vpes[i]); 4525 set_bit(i, bitmap); 4526 + irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i)); 4527 } 4528 4529 if (err) {
+36 -23
include/linux/irq.h
··· 223 * irq_chip::irq_set_affinity() when deactivated. 224 * IRQD_IRQ_ENABLED_ON_SUSPEND - Interrupt is enabled on suspend by irq pm if 225 * irqchip have flag IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND set. 226 */ 227 enum { 228 IRQD_TRIGGER_MASK = 0xf, 229 - IRQD_SETAFFINITY_PENDING = (1 << 8), 230 - IRQD_ACTIVATED = (1 << 9), 231 - IRQD_NO_BALANCING = (1 << 10), 232 - IRQD_PER_CPU = (1 << 11), 233 - IRQD_AFFINITY_SET = (1 << 12), 234 - IRQD_LEVEL = (1 << 13), 235 - IRQD_WAKEUP_STATE = (1 << 14), 236 - IRQD_MOVE_PCNTXT = (1 << 15), 237 - IRQD_IRQ_DISABLED = (1 << 16), 238 - IRQD_IRQ_MASKED = (1 << 17), 239 - IRQD_IRQ_INPROGRESS = (1 << 18), 240 - IRQD_WAKEUP_ARMED = (1 << 19), 241 - IRQD_FORWARDED_TO_VCPU = (1 << 20), 242 - IRQD_AFFINITY_MANAGED = (1 << 21), 243 - IRQD_IRQ_STARTED = (1 << 22), 244 - IRQD_MANAGED_SHUTDOWN = (1 << 23), 245 - IRQD_SINGLE_TARGET = (1 << 24), 246 - IRQD_DEFAULT_TRIGGER_SET = (1 << 25), 247 - IRQD_CAN_RESERVE = (1 << 26), 248 - IRQD_MSI_NOMASK_QUIRK = (1 << 27), 249 - IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28), 250 - IRQD_AFFINITY_ON_ACTIVATE = (1 << 29), 251 - IRQD_IRQ_ENABLED_ON_SUSPEND = (1 << 30), 252 }; 253 254 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) ··· 449 static inline bool irqd_affinity_on_activate(struct irq_data *d) 450 { 451 return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE; 452 } 453 454 #undef __irqd_to_state
··· 223 * irq_chip::irq_set_affinity() when deactivated. 224 * IRQD_IRQ_ENABLED_ON_SUSPEND - Interrupt is enabled on suspend by irq pm if 225 * irqchip have flag IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND set. 226 + * IRQD_RESEND_WHEN_IN_PROGRESS - Interrupt may fire when already in progress in which 227 + * case it must be resent at the next available opportunity. 228 */ 229 enum { 230 IRQD_TRIGGER_MASK = 0xf, 231 + IRQD_SETAFFINITY_PENDING = BIT(8), 232 + IRQD_ACTIVATED = BIT(9), 233 + IRQD_NO_BALANCING = BIT(10), 234 + IRQD_PER_CPU = BIT(11), 235 + IRQD_AFFINITY_SET = BIT(12), 236 + IRQD_LEVEL = BIT(13), 237 + IRQD_WAKEUP_STATE = BIT(14), 238 + IRQD_MOVE_PCNTXT = BIT(15), 239 + IRQD_IRQ_DISABLED = BIT(16), 240 + IRQD_IRQ_MASKED = BIT(17), 241 + IRQD_IRQ_INPROGRESS = BIT(18), 242 + IRQD_WAKEUP_ARMED = BIT(19), 243 + IRQD_FORWARDED_TO_VCPU = BIT(20), 244 + IRQD_AFFINITY_MANAGED = BIT(21), 245 + IRQD_IRQ_STARTED = BIT(22), 246 + IRQD_MANAGED_SHUTDOWN = BIT(23), 247 + IRQD_SINGLE_TARGET = BIT(24), 248 + IRQD_DEFAULT_TRIGGER_SET = BIT(25), 249 + IRQD_CAN_RESERVE = BIT(26), 250 + IRQD_MSI_NOMASK_QUIRK = BIT(27), 251 + IRQD_HANDLE_ENFORCE_IRQCTX = BIT(28), 252 + IRQD_AFFINITY_ON_ACTIVATE = BIT(29), 253 + IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(30), 254 + IRQD_RESEND_WHEN_IN_PROGRESS = BIT(31), 255 }; 256 257 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) ··· 446 static inline bool irqd_affinity_on_activate(struct irq_data *d) 447 { 448 return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE; 449 + } 450 + 451 + static inline void irqd_set_resend_when_in_progress(struct irq_data *d) 452 + { 453 + __irqd_to_state(d) |= IRQD_RESEND_WHEN_IN_PROGRESS; 454 + } 455 + 456 + static inline bool irqd_needs_resend_when_in_progress(struct irq_data *d) 457 + { 458 + return __irqd_to_state(d) & IRQD_RESEND_WHEN_IN_PROGRESS; 459 } 460 461 #undef __irqd_to_state
+15 -1
kernel/irq/chip.c
··· 692 693 raw_spin_lock(&desc->lock); 694 695 - if (!irq_may_run(desc)) 696 goto out; 697 698 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 699 ··· 722 handle_irq_event(desc); 723 724 cond_unmask_eoi_irq(desc, chip); 725 726 raw_spin_unlock(&desc->lock); 727 return;
··· 692 693 raw_spin_lock(&desc->lock); 694 695 + /* 696 + * When an affinity change races with IRQ handling, the next interrupt 697 + * can arrive on the new CPU before the original CPU has completed 698 + * handling the previous one - it may need to be resent. 699 + */ 700 + if (!irq_may_run(desc)) { 701 + if (irqd_needs_resend_when_in_progress(&desc->irq_data)) 702 + desc->istate |= IRQS_PENDING; 703 goto out; 704 + } 705 706 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 707 ··· 714 handle_irq_event(desc); 715 716 cond_unmask_eoi_irq(desc, chip); 717 + 718 + /* 719 + * When the race described above happens this will resend the interrupt. 720 + */ 721 + if (unlikely(desc->istate & IRQS_PENDING)) 722 + check_irq_resend(desc, false); 723 724 raw_spin_unlock(&desc->lock); 725 return;
+2
kernel/irq/debugfs.c
··· 133 BIT_MASK_DESCR(IRQD_HANDLE_ENFORCE_IRQCTX), 134 135 BIT_MASK_DESCR(IRQD_IRQ_ENABLED_ON_SUSPEND), 136 }; 137 138 static const struct irq_bit_descr irqdesc_states[] = {
··· 133 BIT_MASK_DESCR(IRQD_HANDLE_ENFORCE_IRQCTX), 134 135 BIT_MASK_DESCR(IRQD_IRQ_ENABLED_ON_SUSPEND), 136 + 137 + BIT_MASK_DESCR(IRQD_RESEND_WHEN_IN_PROGRESS), 138 }; 139 140 static const struct irq_bit_descr irqdesc_states[] = {
+5 -2
kernel/irq/internals.h
··· 47 * detection 48 * IRQS_POLL_INPROGRESS - polling in progress 49 * IRQS_ONESHOT - irq is not unmasked in primary handler 50 - * IRQS_REPLAY - irq is replayed 51 * IRQS_WAITING - irq is waiting 52 - * IRQS_PENDING - irq is pending and replayed later 53 * IRQS_SUSPENDED - irq is suspended 54 * IRQS_NMI - irq line is used to deliver NMIs 55 * IRQS_SYSFS - descriptor has been added to sysfs
··· 47 * detection 48 * IRQS_POLL_INPROGRESS - polling in progress 49 * IRQS_ONESHOT - irq is not unmasked in primary handler 50 + * IRQS_REPLAY - irq has been resent and will not be resent 51 + * again until the handler has run and cleared 52 + * this flag. 53 * IRQS_WAITING - irq is waiting 54 + * IRQS_PENDING - irq needs to be resent and should be resent 55 + * at the next available opportunity. 56 * IRQS_SUSPENDED - irq is suspended 57 * IRQS_NMI - irq line is used to deliver NMIs 58 * IRQS_SYSFS - descriptor has been added to sysfs