genirq/msi: Activate Multi-MSI early when MSI_FLAG_ACTIVATE_EARLY is set

When MSI_FLAG_ACTIVATE_EARLY is set (which is the case for PCI),
__msi_domain_alloc_irqs() performs the activation of the interrupt (which
in the case of PCI results in the endpoint being programmed) as soon as the
interrupt is allocated.

But it appears that this is only done for the first vector, introducing an
inconsistent behaviour for PCI Multi-MSI.

Fix it by iterating over the number of vectors allocated to each MSI
descriptor. This is easily achieved by introducing a new
"for_each_msi_vector" iterator, together with a tiny bit of refactoring.

Fixes: f3b0946d629c ("genirq/msi: Make sure PCI MSIs are activated early")
Reported-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20210123122759.1781359-1-maz@kernel.org

authored by Marc Zyngier and committed by Thomas Gleixner 4c457e8c 13391c60

Changed files
+26 -24
include
linux
kernel
irq
+6
include/linux/msi.h
··· 178 178 list_for_each_entry((desc), dev_to_msi_list((dev)), list) 179 179 #define for_each_msi_entry_safe(desc, tmp, dev) \ 180 180 list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list) 181 + #define for_each_msi_vector(desc, __irq, dev) \ 182 + for_each_msi_entry((desc), (dev)) \ 183 + if ((desc)->irq) \ 184 + for (__irq = (desc)->irq; \ 185 + __irq < ((desc)->irq + (desc)->nvec_used); \ 186 + __irq++) 181 187 182 188 #ifdef CONFIG_IRQ_MSI_IOMMU 183 189 static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
+20 -24
kernel/irq/msi.c
··· 436 436 437 437 can_reserve = msi_check_reservation_mode(domain, info, dev); 438 438 439 - for_each_msi_entry(desc, dev) { 440 - virq = desc->irq; 441 - if (desc->nvec_used == 1) 442 - dev_dbg(dev, "irq %d for MSI\n", virq); 443 - else 439 + /* 440 + * This flag is set by the PCI layer as we need to activate 441 + * the MSI entries before the PCI layer enables MSI in the 442 + * card. Otherwise the card latches a random msi message. 443 + */ 444 + if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY)) 445 + goto skip_activate; 446 + 447 + for_each_msi_vector(desc, i, dev) { 448 + if (desc->irq == i) { 449 + virq = desc->irq; 444 450 dev_dbg(dev, "irq [%d-%d] for MSI\n", 445 451 virq, virq + desc->nvec_used - 1); 446 - /* 447 - * This flag is set by the PCI layer as we need to activate 448 - * the MSI entries before the PCI layer enables MSI in the 449 - * card. Otherwise the card latches a random msi message. 450 - */ 451 - if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY)) 452 - continue; 452 + } 453 453 454 - irq_data = irq_domain_get_irq_data(domain, desc->irq); 454 + irq_data = irq_domain_get_irq_data(domain, i); 455 455 if (!can_reserve) { 456 456 irqd_clr_can_reserve(irq_data); 457 457 if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK) ··· 462 462 goto cleanup; 463 463 } 464 464 465 + skip_activate: 465 466 /* 466 467 * If these interrupts use reservation mode, clear the activated bit 467 468 * so request_irq() will assign the final vector. 468 469 */ 469 470 if (can_reserve) { 470 - for_each_msi_entry(desc, dev) { 471 - irq_data = irq_domain_get_irq_data(domain, desc->irq); 471 + for_each_msi_vector(desc, i, dev) { 472 + irq_data = irq_domain_get_irq_data(domain, i); 472 473 irqd_clr_activated(irq_data); 473 474 } 474 475 } 475 476 return 0; 476 477 477 478 cleanup: 478 - for_each_msi_entry(desc, dev) { 479 - struct irq_data *irqd; 480 - 481 - if (desc->irq == virq) 482 - break; 483 - 484 - irqd = irq_domain_get_irq_data(domain, desc->irq); 485 - if (irqd_is_activated(irqd)) 486 - irq_domain_deactivate_irq(irqd); 479 + for_each_msi_vector(desc, i, dev) { 480 + irq_data = irq_domain_get_irq_data(domain, i); 481 + if (irqd_is_activated(irq_data)) 482 + irq_domain_deactivate_irq(irq_data); 487 483 } 488 484 msi_domain_free_irqs(domain, dev); 489 485 return ret;