Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq fixes from Thomas Gleixner:
"A rather large update after the kaisered maintainer finally found time
to handle regression reports.

- The larger part addresses a regression caused by the x86 vector
management rework.

The reservation based model does not work reliably for MSI
interrupts, if they cannot be masked (yes, yet another hw
engineering trainwreck). The reason is that the reservation mode
assigns a dummy vector when the interrupt is allocated and switches
to a real vector when the interrupt is requested.

If the MSI entry cannot be masked then the initialization might
raise an interrupt before the interrupt is requested, which ends up
as spurious interrupt and causes device malfunction and worse. The
fix is to exclude MSI interrupts which do not support masking from
reservation mode and assign a real vector right away.

- Extend the extra lockdep class setup for nested interrupts with a
class for the recently added irq_desc::request_mutex so lockdep can
differeniate and does not emit false positive warnings.

- A ratelimit guard for the bad irq printout so in case a bad irq
comes back immediately the system does not drown in dmesg spam"

* 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
genirq/msi, x86/vector: Prevent reservation mode for non maskable MSI
genirq/irqdomain: Rename early argument of irq_domain_activate_irq()
x86/vector: Use IRQD_CAN_RESERVE flag
genirq: Introduce IRQD_CAN_RESERVE flag
genirq/msi: Handle reactivation only on success
gpio: brcmstb: Make really use of the new lockdep class
genirq: Guard handle_bad_irq log messages
kernel/irq: Extend lockdep class for request mutex

+196 -72
+3 -1
arch/powerpc/sysdev/fsl_msi.c
··· 354 } 355 356 static struct lock_class_key fsl_msi_irq_class; 357 358 static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev, 359 int offset, int irq_index) ··· 374 dev_err(&dev->dev, "No memory for MSI cascade data\n"); 375 return -ENOMEM; 376 } 377 - irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class); 378 cascade_data->index = offset; 379 cascade_data->msi_data = msi; 380 cascade_data->virq = virt_msir;
··· 354 } 355 356 static struct lock_class_key fsl_msi_irq_class; 357 + static struct lock_class_key fsl_msi_irq_request_class; 358 359 static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev, 360 int offset, int irq_index) ··· 373 dev_err(&dev->dev, "No memory for MSI cascade data\n"); 374 return -ENOMEM; 375 } 376 + irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class, 377 + &fsl_msi_irq_request_class); 378 cascade_data->index = offset; 379 cascade_data->msi_data = msi; 380 cascade_data->virq = virt_msir;
+1 -1
arch/x86/include/asm/irqdomain.h
··· 44 extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq, 45 unsigned int nr_irqs); 46 extern int mp_irqdomain_activate(struct irq_domain *domain, 47 - struct irq_data *irq_data, bool early); 48 extern void mp_irqdomain_deactivate(struct irq_domain *domain, 49 struct irq_data *irq_data); 50 extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain);
··· 44 extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq, 45 unsigned int nr_irqs); 46 extern int mp_irqdomain_activate(struct irq_domain *domain, 47 + struct irq_data *irq_data, bool reserve); 48 extern void mp_irqdomain_deactivate(struct irq_domain *domain, 49 struct irq_data *irq_data); 50 extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain);
+8 -8
arch/x86/include/asm/trace/irq_vectors.h
··· 283 DECLARE_EVENT_CLASS(vector_activate, 284 285 TP_PROTO(unsigned int irq, bool is_managed, bool can_reserve, 286 - bool early), 287 288 - TP_ARGS(irq, is_managed, can_reserve, early), 289 290 TP_STRUCT__entry( 291 __field( unsigned int, irq ) 292 __field( bool, is_managed ) 293 __field( bool, can_reserve ) 294 - __field( bool, early ) 295 ), 296 297 TP_fast_assign( 298 __entry->irq = irq; 299 __entry->is_managed = is_managed; 300 __entry->can_reserve = can_reserve; 301 - __entry->early = early; 302 ), 303 304 - TP_printk("irq=%u is_managed=%d can_reserve=%d early=%d", 305 __entry->irq, __entry->is_managed, __entry->can_reserve, 306 - __entry->early) 307 ); 308 309 #define DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(name) \ 310 DEFINE_EVENT_FN(vector_activate, name, \ 311 TP_PROTO(unsigned int irq, bool is_managed, \ 312 - bool can_reserve, bool early), \ 313 - TP_ARGS(irq, is_managed, can_reserve, early), NULL, NULL); \ 314 315 DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_activate); 316 DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_deactivate);
··· 283 DECLARE_EVENT_CLASS(vector_activate, 284 285 TP_PROTO(unsigned int irq, bool is_managed, bool can_reserve, 286 + bool reserve), 287 288 + TP_ARGS(irq, is_managed, can_reserve, reserve), 289 290 TP_STRUCT__entry( 291 __field( unsigned int, irq ) 292 __field( bool, is_managed ) 293 __field( bool, can_reserve ) 294 + __field( bool, reserve ) 295 ), 296 297 TP_fast_assign( 298 __entry->irq = irq; 299 __entry->is_managed = is_managed; 300 __entry->can_reserve = can_reserve; 301 + __entry->reserve = reserve; 302 ), 303 304 + TP_printk("irq=%u is_managed=%d can_reserve=%d reserve=%d", 305 __entry->irq, __entry->is_managed, __entry->can_reserve, 306 + __entry->reserve) 307 ); 308 309 #define DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(name) \ 310 DEFINE_EVENT_FN(vector_activate, name, \ 311 TP_PROTO(unsigned int irq, bool is_managed, \ 312 + bool can_reserve, bool reserve), \ 313 + TP_ARGS(irq, is_managed, can_reserve, reserve), NULL, NULL); \ 314 315 DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_activate); 316 DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_deactivate);
+1 -1
arch/x86/kernel/apic/io_apic.c
··· 2988 } 2989 2990 int mp_irqdomain_activate(struct irq_domain *domain, 2991 - struct irq_data *irq_data, bool early) 2992 { 2993 unsigned long flags; 2994
··· 2988 } 2989 2990 int mp_irqdomain_activate(struct irq_domain *domain, 2991 + struct irq_data *irq_data, bool reserve) 2992 { 2993 unsigned long flags; 2994
+16 -4
arch/x86/kernel/apic/vector.c
··· 184 irq_matrix_reserve(vector_matrix); 185 apicd->can_reserve = true; 186 apicd->has_reserved = true; 187 trace_vector_reserve(irqd->irq, 0); 188 vector_assign_managed_shutdown(irqd); 189 } ··· 369 int ret; 370 371 ret = assign_irq_vector_any_locked(irqd); 372 - if (!ret) 373 apicd->has_reserved = false; 374 return ret; 375 } 376 ··· 409 } 410 411 static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd, 412 - bool early) 413 { 414 struct apic_chip_data *apicd = apic_chip_data(irqd); 415 unsigned long flags; 416 int ret = 0; 417 418 trace_vector_activate(irqd->irq, apicd->is_managed, 419 - apicd->can_reserve, early); 420 421 /* Nothing to do for fixed assigned vectors */ 422 if (!apicd->can_reserve && !apicd->is_managed) 423 return 0; 424 425 raw_spin_lock_irqsave(&vector_lock, flags); 426 - if (early || irqd_is_managed_and_shutdown(irqd)) 427 vector_assign_managed_shutdown(irqd); 428 else if (apicd->is_managed) 429 ret = activate_managed(irqd); ··· 489 } else { 490 /* Release the vector */ 491 apicd->can_reserve = true; 492 clear_irq_vector(irqd); 493 realloc = true; 494 }
··· 184 irq_matrix_reserve(vector_matrix); 185 apicd->can_reserve = true; 186 apicd->has_reserved = true; 187 + irqd_set_can_reserve(irqd); 188 trace_vector_reserve(irqd->irq, 0); 189 vector_assign_managed_shutdown(irqd); 190 } ··· 368 int ret; 369 370 ret = assign_irq_vector_any_locked(irqd); 371 + if (!ret) { 372 apicd->has_reserved = false; 373 + /* 374 + * Core might have disabled reservation mode after 375 + * allocating the irq descriptor. Ideally this should 376 + * happen before allocation time, but that would require 377 + * completely convoluted ways of transporting that 378 + * information. 379 + */ 380 + if (!irqd_can_reserve(irqd)) 381 + apicd->can_reserve = false; 382 + } 383 return ret; 384 } 385 ··· 398 } 399 400 static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd, 401 + bool reserve) 402 { 403 struct apic_chip_data *apicd = apic_chip_data(irqd); 404 unsigned long flags; 405 int ret = 0; 406 407 trace_vector_activate(irqd->irq, apicd->is_managed, 408 + apicd->can_reserve, reserve); 409 410 /* Nothing to do for fixed assigned vectors */ 411 if (!apicd->can_reserve && !apicd->is_managed) 412 return 0; 413 414 raw_spin_lock_irqsave(&vector_lock, flags); 415 + if (reserve || irqd_is_managed_and_shutdown(irqd)) 416 vector_assign_managed_shutdown(irqd); 417 else if (apicd->is_managed) 418 ret = activate_managed(irqd); ··· 478 } else { 479 /* Release the vector */ 480 apicd->can_reserve = true; 481 + irqd_set_can_reserve(irqd); 482 clear_irq_vector(irqd); 483 realloc = true; 484 }
+1 -1
arch/x86/platform/uv/uv_irq.c
··· 128 * on the specified blade to allow the sending of MSIs to the specified CPU. 129 */ 130 static int uv_domain_activate(struct irq_domain *domain, 131 - struct irq_data *irq_data, bool early) 132 { 133 uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data); 134 return 0;
··· 128 * on the specified blade to allow the sending of MSIs to the specified CPU. 129 */ 130 static int uv_domain_activate(struct irq_domain *domain, 131 + struct irq_data *irq_data, bool reserve) 132 { 133 uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data); 134 return 0;
+2 -1
drivers/gpio/gpio-bcm-kona.c
··· 522 * category than their parents, so it won't report false recursion. 523 */ 524 static struct lock_class_key gpio_lock_class; 525 526 static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq, 527 irq_hw_number_t hwirq) ··· 532 ret = irq_set_chip_data(irq, d->host_data); 533 if (ret < 0) 534 return ret; 535 - irq_set_lockdep_class(irq, &gpio_lock_class); 536 irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip, handle_simple_irq); 537 irq_set_noprobe(irq); 538
··· 522 * category than their parents, so it won't report false recursion. 523 */ 524 static struct lock_class_key gpio_lock_class; 525 + static struct lock_class_key gpio_request_class; 526 527 static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq, 528 irq_hw_number_t hwirq) ··· 531 ret = irq_set_chip_data(irq, d->host_data); 532 if (ret < 0) 533 return ret; 534 + irq_set_lockdep_class(irq, &gpio_lock_class, &gpio_request_class); 535 irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip, handle_simple_irq); 536 irq_set_noprobe(irq); 537
+3 -1
drivers/gpio/gpio-brcmstb.c
··· 327 * category than their parents, so it won't report false recursion. 328 */ 329 static struct lock_class_key brcmstb_gpio_irq_lock_class; 330 331 332 static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq, ··· 347 ret = irq_set_chip_data(irq, &bank->gc); 348 if (ret < 0) 349 return ret; 350 - irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class); 351 irq_set_chip_and_handler(irq, &priv->irq_chip, handle_level_irq); 352 irq_set_noprobe(irq); 353 return 0;
··· 327 * category than their parents, so it won't report false recursion. 328 */ 329 static struct lock_class_key brcmstb_gpio_irq_lock_class; 330 + static struct lock_class_key brcmstb_gpio_irq_request_class; 331 332 333 static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq, ··· 346 ret = irq_set_chip_data(irq, &bank->gc); 347 if (ret < 0) 348 return ret; 349 + irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class, 350 + &brcmstb_gpio_irq_request_class); 351 irq_set_chip_and_handler(irq, &priv->irq_chip, handle_level_irq); 352 irq_set_noprobe(irq); 353 return 0;
+3 -1
drivers/gpio/gpio-tegra.c
··· 565 * than their parents, so it won't report false recursion. 566 */ 567 static struct lock_class_key gpio_lock_class; 568 569 static int tegra_gpio_probe(struct platform_device *pdev) 570 { ··· 671 672 bank = &tgi->bank_info[GPIO_BANK(gpio)]; 673 674 - irq_set_lockdep_class(irq, &gpio_lock_class); 675 irq_set_chip_data(irq, bank); 676 irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq); 677 }
··· 565 * than their parents, so it won't report false recursion. 566 */ 567 static struct lock_class_key gpio_lock_class; 568 + static struct lock_class_key gpio_request_class; 569 570 static int tegra_gpio_probe(struct platform_device *pdev) 571 { ··· 670 671 bank = &tgi->bank_info[GPIO_BANK(gpio)]; 672 673 + irq_set_lockdep_class(irq, &gpio_lock_class, 674 + &gpio_request_class); 675 irq_set_chip_data(irq, bank); 676 irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq); 677 }
+1 -1
drivers/gpio/gpio-xgene-sb.c
··· 139 140 static int xgene_gpio_sb_domain_activate(struct irq_domain *d, 141 struct irq_data *irq_data, 142 - bool early) 143 { 144 struct xgene_gpio_sb *priv = d->host_data; 145 u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
··· 139 140 static int xgene_gpio_sb_domain_activate(struct irq_domain *d, 141 struct irq_data *irq_data, 142 + bool reserve) 143 { 144 struct xgene_gpio_sb *priv = d->host_data; 145 u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
+18 -9
drivers/gpio/gpiolib.c
··· 73 74 static void gpiochip_free_hogs(struct gpio_chip *chip); 75 static int gpiochip_add_irqchip(struct gpio_chip *gpiochip, 76 - struct lock_class_key *key); 77 static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip); 78 static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip); 79 static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip); ··· 1101 } 1102 1103 int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, 1104 - struct lock_class_key *key) 1105 { 1106 unsigned long flags; 1107 int status = 0; ··· 1248 if (status) 1249 goto err_remove_from_list; 1250 1251 - status = gpiochip_add_irqchip(chip, key); 1252 if (status) 1253 goto err_remove_chip; 1254 ··· 1634 * This lock class tells lockdep that GPIO irqs are in a different 1635 * category than their parents, so it won't report false recursion. 1636 */ 1637 - irq_set_lockdep_class(irq, chip->irq.lock_key); 1638 irq_set_chip_and_handler(irq, chip->irq.chip, chip->irq.handler); 1639 /* Chips that use nested thread handlers have them marked */ 1640 if (chip->irq.threaded) ··· 1714 /** 1715 * gpiochip_add_irqchip() - adds an IRQ chip to a GPIO chip 1716 * @gpiochip: the GPIO chip to add the IRQ chip to 1717 - * @lock_key: lockdep class 1718 */ 1719 static int gpiochip_add_irqchip(struct gpio_chip *gpiochip, 1720 - struct lock_class_key *lock_key) 1721 { 1722 struct irq_chip *irqchip = gpiochip->irq.chip; 1723 const struct irq_domain_ops *ops; ··· 1757 gpiochip->to_irq = gpiochip_to_irq; 1758 gpiochip->irq.default_type = type; 1759 gpiochip->irq.lock_key = lock_key; 1760 1761 if (gpiochip->irq.domain_ops) 1762 ops = gpiochip->irq.domain_ops; ··· 1855 * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE 1856 * to have the core avoid setting up any default type in the hardware. 1857 * @threaded: whether this irqchip uses a nested thread handler 1858 - * @lock_key: lockdep class 1859 * 1860 * This function closely associates a certain irqchip with a certain 1861 * gpiochip, providing an irq domain to translate the local IRQs to ··· 1878 irq_flow_handler_t handler, 1879 unsigned int type, 1880 bool threaded, 1881 - struct lock_class_key *lock_key) 1882 { 1883 struct device_node *of_node; 1884 ··· 1920 gpiochip->irq.default_type = type; 1921 gpiochip->to_irq = gpiochip_to_irq; 1922 gpiochip->irq.lock_key = lock_key; 1923 gpiochip->irq.domain = irq_domain_add_simple(of_node, 1924 gpiochip->ngpio, first_irq, 1925 &gpiochip_domain_ops, gpiochip); ··· 1948 #else /* CONFIG_GPIOLIB_IRQCHIP */ 1949 1950 static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip, 1951 - struct lock_class_key *key) 1952 { 1953 return 0; 1954 }
··· 73 74 static void gpiochip_free_hogs(struct gpio_chip *chip); 75 static int gpiochip_add_irqchip(struct gpio_chip *gpiochip, 76 + struct lock_class_key *lock_key, 77 + struct lock_class_key *request_key); 78 static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip); 79 static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip); 80 static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip); ··· 1100 } 1101 1102 int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, 1103 + struct lock_class_key *lock_key, 1104 + struct lock_class_key *request_key) 1105 { 1106 unsigned long flags; 1107 int status = 0; ··· 1246 if (status) 1247 goto err_remove_from_list; 1248 1249 + status = gpiochip_add_irqchip(chip, lock_key, request_key); 1250 if (status) 1251 goto err_remove_chip; 1252 ··· 1632 * This lock class tells lockdep that GPIO irqs are in a different 1633 * category than their parents, so it won't report false recursion. 1634 */ 1635 + irq_set_lockdep_class(irq, chip->irq.lock_key, chip->irq.request_key); 1636 irq_set_chip_and_handler(irq, chip->irq.chip, chip->irq.handler); 1637 /* Chips that use nested thread handlers have them marked */ 1638 if (chip->irq.threaded) ··· 1712 /** 1713 * gpiochip_add_irqchip() - adds an IRQ chip to a GPIO chip 1714 * @gpiochip: the GPIO chip to add the IRQ chip to 1715 + * @lock_key: lockdep class for IRQ lock 1716 + * @request_key: lockdep class for IRQ request 1717 */ 1718 static int gpiochip_add_irqchip(struct gpio_chip *gpiochip, 1719 + struct lock_class_key *lock_key, 1720 + struct lock_class_key *request_key) 1721 { 1722 struct irq_chip *irqchip = gpiochip->irq.chip; 1723 const struct irq_domain_ops *ops; ··· 1753 gpiochip->to_irq = gpiochip_to_irq; 1754 gpiochip->irq.default_type = type; 1755 gpiochip->irq.lock_key = lock_key; 1756 + gpiochip->irq.request_key = request_key; 1757 1758 if (gpiochip->irq.domain_ops) 1759 ops = gpiochip->irq.domain_ops; ··· 1850 * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE 1851 * to have the core avoid setting up any default type in the hardware. 1852 * @threaded: whether this irqchip uses a nested thread handler 1853 + * @lock_key: lockdep class for IRQ lock 1854 + * @request_key: lockdep class for IRQ request 1855 * 1856 * This function closely associates a certain irqchip with a certain 1857 * gpiochip, providing an irq domain to translate the local IRQs to ··· 1872 irq_flow_handler_t handler, 1873 unsigned int type, 1874 bool threaded, 1875 + struct lock_class_key *lock_key, 1876 + struct lock_class_key *request_key) 1877 { 1878 struct device_node *of_node; 1879 ··· 1913 gpiochip->irq.default_type = type; 1914 gpiochip->to_irq = gpiochip_to_irq; 1915 gpiochip->irq.lock_key = lock_key; 1916 + gpiochip->irq.request_key = request_key; 1917 gpiochip->irq.domain = irq_domain_add_simple(of_node, 1918 gpiochip->ngpio, first_irq, 1919 &gpiochip_domain_ops, gpiochip); ··· 1940 #else /* CONFIG_GPIOLIB_IRQCHIP */ 1941 1942 static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip, 1943 + struct lock_class_key *lock_key, 1944 + struct lock_class_key *request_key) 1945 { 1946 return 0; 1947 }
+1 -1
drivers/iommu/amd_iommu.c
··· 4184 struct irq_cfg *cfg); 4185 4186 static int irq_remapping_activate(struct irq_domain *domain, 4187 - struct irq_data *irq_data, bool early) 4188 { 4189 struct amd_ir_data *data = irq_data->chip_data; 4190 struct irq_2_irte *irte_info = &data->irq_2_irte;
··· 4184 struct irq_cfg *cfg); 4185 4186 static int irq_remapping_activate(struct irq_domain *domain, 4187 + struct irq_data *irq_data, bool reserve) 4188 { 4189 struct amd_ir_data *data = irq_data->chip_data; 4190 struct irq_2_irte *irte_info = &data->irq_2_irte;
+1 -1
drivers/iommu/intel_irq_remapping.c
··· 1397 } 1398 1399 static int intel_irq_remapping_activate(struct irq_domain *domain, 1400 - struct irq_data *irq_data, bool early) 1401 { 1402 intel_ir_reconfigure_irte(irq_data, true); 1403 return 0;
··· 1397 } 1398 1399 static int intel_irq_remapping_activate(struct irq_domain *domain, 1400 + struct irq_data *irq_data, bool reserve) 1401 { 1402 intel_ir_reconfigure_irte(irq_data, true); 1403 return 0;
+2 -2
drivers/irqchip/irq-gic-v3-its.c
··· 2303 } 2304 2305 static int its_irq_domain_activate(struct irq_domain *domain, 2306 - struct irq_data *d, bool early) 2307 { 2308 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 2309 u32 event = its_get_event_id(d); ··· 2818 } 2819 2820 static int its_vpe_irq_domain_activate(struct irq_domain *domain, 2821 - struct irq_data *d, bool early) 2822 { 2823 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 2824 struct its_node *its;
··· 2303 } 2304 2305 static int its_irq_domain_activate(struct irq_domain *domain, 2306 + struct irq_data *d, bool reserve) 2307 { 2308 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 2309 u32 event = its_get_event_id(d); ··· 2818 } 2819 2820 static int its_vpe_irq_domain_activate(struct irq_domain *domain, 2821 + struct irq_data *d, bool reserve) 2822 { 2823 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 2824 struct its_node *its;
+5 -1
drivers/irqchip/irq-renesas-intc-irqpin.c
··· 342 */ 343 static struct lock_class_key intc_irqpin_irq_lock_class; 344 345 static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq, 346 irq_hw_number_t hw) 347 { ··· 355 356 intc_irqpin_dbg(&p->irq[hw], "map"); 357 irq_set_chip_data(virq, h->host_data); 358 - irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class); 359 irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq); 360 return 0; 361 }
··· 342 */ 343 static struct lock_class_key intc_irqpin_irq_lock_class; 344 345 + /* And this is for the request mutex */ 346 + static struct lock_class_key intc_irqpin_irq_request_class; 347 + 348 static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq, 349 irq_hw_number_t hw) 350 { ··· 352 353 intc_irqpin_dbg(&p->irq[hw], "map"); 354 irq_set_chip_data(virq, h->host_data); 355 + irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class, 356 + &intc_irqpin_irq_request_class); 357 irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq); 358 return 0; 359 }
+3 -1
drivers/mfd/arizona-irq.c
··· 184 }; 185 186 static struct lock_class_key arizona_irq_lock_class; 187 188 static int arizona_irq_map(struct irq_domain *h, unsigned int virq, 189 irq_hw_number_t hw) ··· 192 struct arizona *data = h->host_data; 193 194 irq_set_chip_data(virq, data); 195 - irq_set_lockdep_class(virq, &arizona_irq_lock_class); 196 irq_set_chip_and_handler(virq, &arizona_irq_chip, handle_simple_irq); 197 irq_set_nested_thread(virq, 1); 198 irq_set_noprobe(virq);
··· 184 }; 185 186 static struct lock_class_key arizona_irq_lock_class; 187 + static struct lock_class_key arizona_irq_request_class; 188 189 static int arizona_irq_map(struct irq_domain *h, unsigned int virq, 190 irq_hw_number_t hw) ··· 191 struct arizona *data = h->host_data; 192 193 irq_set_chip_data(virq, data); 194 + irq_set_lockdep_class(virq, &arizona_irq_lock_class, 195 + &arizona_irq_request_class); 196 irq_set_chip_and_handler(virq, &arizona_irq_chip, handle_simple_irq); 197 irq_set_nested_thread(virq, 1); 198 irq_set_noprobe(virq);
+4 -1
drivers/pinctrl/pinctrl-single.c
··· 222 */ 223 static struct lock_class_key pcs_lock_class; 224 225 /* 226 * REVISIT: Reads and writes could eventually use regmap or something 227 * generic. But at least on omaps, some mux registers are performance ··· 1489 irq_set_chip_data(irq, pcs_soc); 1490 irq_set_chip_and_handler(irq, &pcs->chip, 1491 handle_level_irq); 1492 - irq_set_lockdep_class(irq, &pcs_lock_class); 1493 irq_set_noprobe(irq); 1494 1495 return 0;
··· 222 */ 223 static struct lock_class_key pcs_lock_class; 224 225 + /* Class for the IRQ request mutex */ 226 + static struct lock_class_key pcs_request_class; 227 + 228 /* 229 * REVISIT: Reads and writes could eventually use regmap or something 230 * generic. But at least on omaps, some mux registers are performance ··· 1486 irq_set_chip_data(irq, pcs_soc); 1487 irq_set_chip_and_handler(irq, &pcs->chip, 1488 handle_level_irq); 1489 + irq_set_lockdep_class(irq, &pcs_lock_class, &pcs_request_class); 1490 irq_set_noprobe(irq); 1491 1492 return 0;
+1 -1
drivers/pinctrl/stm32/pinctrl-stm32.c
··· 290 } 291 292 static int stm32_gpio_domain_activate(struct irq_domain *d, 293 - struct irq_data *irq_data, bool early) 294 { 295 struct stm32_gpio_bank *bank = d->host_data; 296 struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
··· 290 } 291 292 static int stm32_gpio_domain_activate(struct irq_domain *d, 293 + struct irq_data *irq_data, bool reserve) 294 { 295 struct stm32_gpio_bank *bank = d->host_data; 296 struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
+21 -12
include/linux/gpio/driver.h
··· 66 /** 67 * @lock_key: 68 * 69 - * Per GPIO IRQ chip lockdep class. 70 */ 71 struct lock_class_key *lock_key; 72 73 /** 74 * @parent_handler: ··· 324 325 /* add/remove chips */ 326 extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, 327 - struct lock_class_key *lock_key); 328 329 /** 330 * gpiochip_add_data() - register a gpio_chip ··· 352 */ 353 #ifdef CONFIG_LOCKDEP 354 #define gpiochip_add_data(chip, data) ({ \ 355 - static struct lock_class_key key; \ 356 - gpiochip_add_data_with_key(chip, data, &key); \ 357 }) 358 #else 359 - #define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL) 360 #endif 361 362 static inline int gpiochip_add(struct gpio_chip *chip) ··· 433 irq_flow_handler_t handler, 434 unsigned int type, 435 bool threaded, 436 - struct lock_class_key *lock_key); 437 438 #ifdef CONFIG_LOCKDEP 439 ··· 450 irq_flow_handler_t handler, 451 unsigned int type) 452 { 453 - static struct lock_class_key key; 454 455 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, 456 - handler, type, false, &key); 457 } 458 459 static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, ··· 465 unsigned int type) 466 { 467 468 - static struct lock_class_key key; 469 470 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, 471 - handler, type, true, &key); 472 } 473 #else 474 static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, ··· 480 unsigned int type) 481 { 482 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, 483 - handler, type, false, NULL); 484 } 485 486 static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, ··· 490 unsigned int type) 491 { 492 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, 493 - handler, type, true, NULL); 494 } 495 #endif /* CONFIG_LOCKDEP */ 496
··· 66 /** 67 * @lock_key: 68 * 69 + * Per GPIO IRQ chip lockdep classes. 70 */ 71 struct lock_class_key *lock_key; 72 + struct lock_class_key *request_key; 73 74 /** 75 * @parent_handler: ··· 323 324 /* add/remove chips */ 325 extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, 326 + struct lock_class_key *lock_key, 327 + struct lock_class_key *request_key); 328 329 /** 330 * gpiochip_add_data() - register a gpio_chip ··· 350 */ 351 #ifdef CONFIG_LOCKDEP 352 #define gpiochip_add_data(chip, data) ({ \ 353 + static struct lock_class_key lock_key; \ 354 + static struct lock_class_key request_key; \ 355 + gpiochip_add_data_with_key(chip, data, &lock_key, \ 356 + &request_key); \ 357 }) 358 #else 359 + #define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL, NULL) 360 #endif 361 362 static inline int gpiochip_add(struct gpio_chip *chip) ··· 429 irq_flow_handler_t handler, 430 unsigned int type, 431 bool threaded, 432 + struct lock_class_key *lock_key, 433 + struct lock_class_key *request_key); 434 435 #ifdef CONFIG_LOCKDEP 436 ··· 445 irq_flow_handler_t handler, 446 unsigned int type) 447 { 448 + static struct lock_class_key lock_key; 449 + static struct lock_class_key request_key; 450 451 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, 452 + handler, type, false, 453 + &lock_key, &request_key); 454 } 455 456 static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, ··· 458 unsigned int type) 459 { 460 461 + static struct lock_class_key lock_key; 462 + static struct lock_class_key request_key; 463 464 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, 465 + handler, type, true, 466 + &lock_key, &request_key); 467 } 468 #else 469 static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, ··· 471 unsigned int type) 472 { 473 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, 474 + handler, type, false, NULL, NULL); 475 } 476 477 static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, ··· 481 unsigned int type) 482 { 483 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, 484 + handler, type, true, NULL, NULL); 485 } 486 #endif /* CONFIG_LOCKDEP */ 487
+17
include/linux/irq.h
··· 212 * mask. Applies only to affinity managed irqs. 213 * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target 214 * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set 215 */ 216 enum { 217 IRQD_TRIGGER_MASK = 0xf, ··· 234 IRQD_MANAGED_SHUTDOWN = (1 << 23), 235 IRQD_SINGLE_TARGET = (1 << 24), 236 IRQD_DEFAULT_TRIGGER_SET = (1 << 25), 237 }; 238 239 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) ··· 377 static inline bool irqd_is_managed_and_shutdown(struct irq_data *d) 378 { 379 return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN; 380 } 381 382 #undef __irqd_to_state
··· 212 * mask. Applies only to affinity managed irqs. 213 * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target 214 * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set 215 + * IRQD_CAN_RESERVE - Can use reservation mode 216 */ 217 enum { 218 IRQD_TRIGGER_MASK = 0xf, ··· 233 IRQD_MANAGED_SHUTDOWN = (1 << 23), 234 IRQD_SINGLE_TARGET = (1 << 24), 235 IRQD_DEFAULT_TRIGGER_SET = (1 << 25), 236 + IRQD_CAN_RESERVE = (1 << 26), 237 }; 238 239 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) ··· 375 static inline bool irqd_is_managed_and_shutdown(struct irq_data *d) 376 { 377 return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN; 378 + } 379 + 380 + static inline void irqd_set_can_reserve(struct irq_data *d) 381 + { 382 + __irqd_to_state(d) |= IRQD_CAN_RESERVE; 383 + } 384 + 385 + static inline void irqd_clr_can_reserve(struct irq_data *d) 386 + { 387 + __irqd_to_state(d) &= ~IRQD_CAN_RESERVE; 388 + } 389 + 390 + static inline bool irqd_can_reserve(struct irq_data *d) 391 + { 392 + return __irqd_to_state(d) & IRQD_CAN_RESERVE; 393 } 394 395 #undef __irqd_to_state
+6 -3
include/linux/irqdesc.h
··· 255 } 256 257 static inline void 258 - irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class) 259 { 260 struct irq_desc *desc = irq_to_desc(irq); 261 262 - if (desc) 263 - lockdep_set_class(&desc->lock, class); 264 } 265 266 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
··· 255 } 256 257 static inline void 258 + irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, 259 + struct lock_class_key *request_class) 260 { 261 struct irq_desc *desc = irq_to_desc(irq); 262 263 + if (desc) { 264 + lockdep_set_class(&desc->lock, lock_class); 265 + lockdep_set_class(&desc->request_mutex, request_class); 266 + } 267 } 268 269 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
+1 -1
include/linux/irqdomain.h
··· 113 unsigned int nr_irqs, void *arg); 114 void (*free)(struct irq_domain *d, unsigned int virq, 115 unsigned int nr_irqs); 116 - int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool early); 117 void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); 118 int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec, 119 unsigned long *out_hwirq, unsigned int *out_type);
··· 113 unsigned int nr_irqs, void *arg); 114 void (*free)(struct irq_domain *d, unsigned int virq, 115 unsigned int nr_irqs); 116 + int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve); 117 void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); 118 int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec, 119 unsigned long *out_hwirq, unsigned int *out_type);
+5
kernel/irq/debug.h
··· 12 13 static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) 14 { 15 printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n", 16 irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); 17 printk("->handle_irq(): %p, ", desc->handle_irq);
··· 12 13 static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) 14 { 15 + static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); 16 + 17 + if (!__ratelimit(&ratelimit)) 18 + return; 19 + 20 printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n", 21 irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); 22 printk("->handle_irq(): %p, ", desc->handle_irq);
+1
kernel/irq/debugfs.c
··· 113 BIT_MASK_DESCR(IRQD_SETAFFINITY_PENDING), 114 BIT_MASK_DESCR(IRQD_AFFINITY_MANAGED), 115 BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN), 116 117 BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU), 118
··· 113 BIT_MASK_DESCR(IRQD_SETAFFINITY_PENDING), 114 BIT_MASK_DESCR(IRQD_AFFINITY_MANAGED), 115 BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN), 116 + BIT_MASK_DESCR(IRQD_CAN_RESERVE), 117 118 BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU), 119
+7 -4
kernel/irq/generic-chip.c
··· 364 EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip); 365 366 /* 367 - * Separate lockdep class for interrupt chip which can nest irq_desc 368 - * lock. 369 */ 370 static struct lock_class_key irq_nested_lock_class; 371 372 /* 373 * irq_map_generic_chip - Map a generic chip for an irq domain ··· 410 set_bit(idx, &gc->installed); 411 412 if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK) 413 - irq_set_lockdep_class(virq, &irq_nested_lock_class); 414 415 if (chip->irq_calc_mask) 416 chip->irq_calc_mask(data); ··· 481 continue; 482 483 if (flags & IRQ_GC_INIT_NESTED_LOCK) 484 - irq_set_lockdep_class(i, &irq_nested_lock_class); 485 486 if (!(flags & IRQ_GC_NO_MASK)) { 487 struct irq_data *d = irq_get_irq_data(i);
··· 364 EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip); 365 366 /* 367 + * Separate lockdep classes for interrupt chip which can nest irq_desc 368 + * lock and request mutex. 369 */ 370 static struct lock_class_key irq_nested_lock_class; 371 + static struct lock_class_key irq_nested_request_class; 372 373 /* 374 * irq_map_generic_chip - Map a generic chip for an irq domain ··· 409 set_bit(idx, &gc->installed); 410 411 if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK) 412 + irq_set_lockdep_class(virq, &irq_nested_lock_class, 413 + &irq_nested_request_class); 414 415 if (chip->irq_calc_mask) 416 chip->irq_calc_mask(data); ··· 479 continue; 480 481 if (flags & IRQ_GC_INIT_NESTED_LOCK) 482 + irq_set_lockdep_class(i, &irq_nested_lock_class, 483 + &irq_nested_request_class); 484 485 if (!(flags & IRQ_GC_NO_MASK)) { 486 struct irq_data *d = irq_get_irq_data(i);
+1 -1
kernel/irq/internals.h
··· 440 #endif /* !CONFIG_GENERIC_PENDING_IRQ */ 441 442 #if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY) 443 - static inline int irq_domain_activate_irq(struct irq_data *data, bool early) 444 { 445 irqd_set_activated(data); 446 return 0;
··· 440 #endif /* !CONFIG_GENERIC_PENDING_IRQ */ 441 442 #if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY) 443 + static inline int irq_domain_activate_irq(struct irq_data *data, bool reserve) 444 { 445 irqd_set_activated(data); 446 return 0;
+7 -6
kernel/irq/irqdomain.c
··· 1693 } 1694 } 1695 1696 - static int __irq_domain_activate_irq(struct irq_data *irqd, bool early) 1697 { 1698 int ret = 0; 1699 ··· 1702 1703 if (irqd->parent_data) 1704 ret = __irq_domain_activate_irq(irqd->parent_data, 1705 - early); 1706 if (!ret && domain->ops->activate) { 1707 - ret = domain->ops->activate(domain, irqd, early); 1708 /* Rollback in case of error */ 1709 if (ret && irqd->parent_data) 1710 __irq_domain_deactivate_irq(irqd->parent_data); ··· 1716 /** 1717 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate 1718 * interrupt 1719 - * @irq_data: outermost irq_data associated with interrupt 1720 * 1721 * This is the second step to call domain_ops->activate to program interrupt 1722 * controllers, so the interrupt could actually get delivered. 1723 */ 1724 - int irq_domain_activate_irq(struct irq_data *irq_data, bool early) 1725 { 1726 int ret = 0; 1727 1728 if (!irqd_is_activated(irq_data)) 1729 - ret = __irq_domain_activate_irq(irq_data, early); 1730 if (!ret) 1731 irqd_set_activated(irq_data); 1732 return ret;
··· 1693 } 1694 } 1695 1696 + static int __irq_domain_activate_irq(struct irq_data *irqd, bool reserve) 1697 { 1698 int ret = 0; 1699 ··· 1702 1703 if (irqd->parent_data) 1704 ret = __irq_domain_activate_irq(irqd->parent_data, 1705 + reserve); 1706 if (!ret && domain->ops->activate) { 1707 + ret = domain->ops->activate(domain, irqd, reserve); 1708 /* Rollback in case of error */ 1709 if (ret && irqd->parent_data) 1710 __irq_domain_deactivate_irq(irqd->parent_data); ··· 1716 /** 1717 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate 1718 * interrupt 1719 + * @irq_data: Outermost irq_data associated with interrupt 1720 + * @reserve: If set only reserve an interrupt vector instead of assigning one 1721 * 1722 * This is the second step to call domain_ops->activate to program interrupt 1723 * controllers, so the interrupt could actually get delivered. 1724 */ 1725 + int irq_domain_activate_irq(struct irq_data *irq_data, bool reserve) 1726 { 1727 int ret = 0; 1728 1729 if (!irqd_is_activated(irq_data)) 1730 + ret = __irq_domain_activate_irq(irq_data, reserve); 1731 if (!ret) 1732 irqd_set_activated(irq_data); 1733 return ret;
+56 -8
kernel/irq/msi.c
··· 339 return ret; 340 } 341 342 /** 343 * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain 344 * @domain: The domain to allocate from ··· 387 { 388 struct msi_domain_info *info = domain->host_data; 389 struct msi_domain_ops *ops = info->ops; 390 - msi_alloc_info_t arg; 391 struct msi_desc *desc; 392 int i, ret, virq; 393 394 ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg); 395 if (ret) ··· 421 if (ops->msi_finish) 422 ops->msi_finish(&arg, 0); 423 424 for_each_msi_entry(desc, dev) { 425 virq = desc->irq; 426 if (desc->nvec_used == 1) ··· 435 * the MSI entries before the PCI layer enables MSI in the 436 * card. Otherwise the card latches a random msi message. 437 */ 438 - if (info->flags & MSI_FLAG_ACTIVATE_EARLY) { 439 - struct irq_data *irq_data; 440 441 irq_data = irq_domain_get_irq_data(domain, desc->irq); 442 - ret = irq_domain_activate_irq(irq_data, true); 443 - if (ret) 444 - goto cleanup; 445 - if (info->flags & MSI_FLAG_MUST_REACTIVATE) 446 - irqd_clr_activated(irq_data); 447 } 448 } 449 return 0;
··· 339 return ret; 340 } 341 342 + /* 343 + * Carefully check whether the device can use reservation mode. If 344 + * reservation mode is enabled then the early activation will assign a 345 + * dummy vector to the device. If the PCI/MSI device does not support 346 + * masking of the entry then this can result in spurious interrupts when 347 + * the device driver is not absolutely careful. But even then a malfunction 348 + * of the hardware could result in a spurious interrupt on the dummy vector 349 + * and render the device unusable. If the entry can be masked then the core 350 + * logic will prevent the spurious interrupt and reservation mode can be 351 + * used. For now reservation mode is restricted to PCI/MSI. 352 + */ 353 + static bool msi_check_reservation_mode(struct irq_domain *domain, 354 + struct msi_domain_info *info, 355 + struct device *dev) 356 + { 357 + struct msi_desc *desc; 358 + 359 + if (domain->bus_token != DOMAIN_BUS_PCI_MSI) 360 + return false; 361 + 362 + if (!(info->flags & MSI_FLAG_MUST_REACTIVATE)) 363 + return false; 364 + 365 + if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask) 366 + return false; 367 + 368 + /* 369 + * Checking the first MSI descriptor is sufficient. MSIX supports 370 + * masking and MSI does so when the maskbit is set. 371 + */ 372 + desc = first_msi_entry(dev); 373 + return desc->msi_attrib.is_msix || desc->msi_attrib.maskbit; 374 + } 375 + 376 /** 377 * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain 378 * @domain: The domain to allocate from ··· 353 { 354 struct msi_domain_info *info = domain->host_data; 355 struct msi_domain_ops *ops = info->ops; 356 + struct irq_data *irq_data; 357 struct msi_desc *desc; 358 + msi_alloc_info_t arg; 359 int i, ret, virq; 360 + bool can_reserve; 361 362 ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg); 363 if (ret) ··· 385 if (ops->msi_finish) 386 ops->msi_finish(&arg, 0); 387 388 + can_reserve = msi_check_reservation_mode(domain, info, dev); 389 + 390 for_each_msi_entry(desc, dev) { 391 virq = desc->irq; 392 if (desc->nvec_used == 1) ··· 397 * the MSI entries before the PCI layer enables MSI in the 398 * card. Otherwise the card latches a random msi message. 399 */ 400 + if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY)) 401 + continue; 402 403 + irq_data = irq_domain_get_irq_data(domain, desc->irq); 404 + if (!can_reserve) 405 + irqd_clr_can_reserve(irq_data); 406 + ret = irq_domain_activate_irq(irq_data, can_reserve); 407 + if (ret) 408 + goto cleanup; 409 + } 410 + 411 + /* 412 + * If these interrupts use reservation mode, clear the activated bit 413 + * so request_irq() will assign the final vector. 414 + */ 415 + if (can_reserve) { 416 + for_each_msi_entry(desc, dev) { 417 irq_data = irq_domain_get_irq_data(domain, desc->irq); 418 + irqd_clr_activated(irq_data); 419 } 420 } 421 return 0;