Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq fixes from Thomas Gleixner:
"A rather large update after the kaisered maintainer finally found time
to handle regression reports.

- The larger part addresses a regression caused by the x86 vector
management rework.

The reservation based model does not work reliably for MSI
interrupts, if they cannot be masked (yes, yet another hw
engineering trainwreck). The reason is that the reservation mode
assigns a dummy vector when the interrupt is allocated and switches
to a real vector when the interrupt is requested.

If the MSI entry cannot be masked then the initialization might
raise an interrupt before the interrupt is requested, which ends up
as spurious interrupt and causes device malfunction and worse. The
fix is to exclude MSI interrupts which do not support masking from
reservation mode and assign a real vector right away.

- Extend the extra lockdep class setup for nested interrupts with a
class for the recently added irq_desc::request_mutex so lockdep can
differeniate and does not emit false positive warnings.

- A ratelimit guard for the bad irq printout so in case a bad irq
comes back immediately the system does not drown in dmesg spam"

* 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
genirq/msi, x86/vector: Prevent reservation mode for non maskable MSI
genirq/irqdomain: Rename early argument of irq_domain_activate_irq()
x86/vector: Use IRQD_CAN_RESERVE flag
genirq: Introduce IRQD_CAN_RESERVE flag
genirq/msi: Handle reactivation only on success
gpio: brcmstb: Make really use of the new lockdep class
genirq: Guard handle_bad_irq log messages
kernel/irq: Extend lockdep class for request mutex

+196 -72
+3 -1
arch/powerpc/sysdev/fsl_msi.c
··· 354 354 } 355 355 356 356 static struct lock_class_key fsl_msi_irq_class; 357 + static struct lock_class_key fsl_msi_irq_request_class; 357 358 358 359 static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev, 359 360 int offset, int irq_index) ··· 374 373 dev_err(&dev->dev, "No memory for MSI cascade data\n"); 375 374 return -ENOMEM; 376 375 } 377 - irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class); 376 + irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class, 377 + &fsl_msi_irq_request_class); 378 378 cascade_data->index = offset; 379 379 cascade_data->msi_data = msi; 380 380 cascade_data->virq = virt_msir;
+1 -1
arch/x86/include/asm/irqdomain.h
··· 44 44 extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq, 45 45 unsigned int nr_irqs); 46 46 extern int mp_irqdomain_activate(struct irq_domain *domain, 47 - struct irq_data *irq_data, bool early); 47 + struct irq_data *irq_data, bool reserve); 48 48 extern void mp_irqdomain_deactivate(struct irq_domain *domain, 49 49 struct irq_data *irq_data); 50 50 extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain);
+8 -8
arch/x86/include/asm/trace/irq_vectors.h
··· 283 283 DECLARE_EVENT_CLASS(vector_activate, 284 284 285 285 TP_PROTO(unsigned int irq, bool is_managed, bool can_reserve, 286 - bool early), 286 + bool reserve), 287 287 288 - TP_ARGS(irq, is_managed, can_reserve, early), 288 + TP_ARGS(irq, is_managed, can_reserve, reserve), 289 289 290 290 TP_STRUCT__entry( 291 291 __field( unsigned int, irq ) 292 292 __field( bool, is_managed ) 293 293 __field( bool, can_reserve ) 294 - __field( bool, early ) 294 + __field( bool, reserve ) 295 295 ), 296 296 297 297 TP_fast_assign( 298 298 __entry->irq = irq; 299 299 __entry->is_managed = is_managed; 300 300 __entry->can_reserve = can_reserve; 301 - __entry->early = early; 301 + __entry->reserve = reserve; 302 302 ), 303 303 304 - TP_printk("irq=%u is_managed=%d can_reserve=%d early=%d", 304 + TP_printk("irq=%u is_managed=%d can_reserve=%d reserve=%d", 305 305 __entry->irq, __entry->is_managed, __entry->can_reserve, 306 - __entry->early) 306 + __entry->reserve) 307 307 ); 308 308 309 309 #define DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(name) \ 310 310 DEFINE_EVENT_FN(vector_activate, name, \ 311 311 TP_PROTO(unsigned int irq, bool is_managed, \ 312 - bool can_reserve, bool early), \ 313 - TP_ARGS(irq, is_managed, can_reserve, early), NULL, NULL); \ 312 + bool can_reserve, bool reserve), \ 313 + TP_ARGS(irq, is_managed, can_reserve, reserve), NULL, NULL); \ 314 314 315 315 DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_activate); 316 316 DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_deactivate);
+1 -1
arch/x86/kernel/apic/io_apic.c
··· 2988 2988 } 2989 2989 2990 2990 int mp_irqdomain_activate(struct irq_domain *domain, 2991 - struct irq_data *irq_data, bool early) 2991 + struct irq_data *irq_data, bool reserve) 2992 2992 { 2993 2993 unsigned long flags; 2994 2994
+16 -4
arch/x86/kernel/apic/vector.c
··· 184 184 irq_matrix_reserve(vector_matrix); 185 185 apicd->can_reserve = true; 186 186 apicd->has_reserved = true; 187 + irqd_set_can_reserve(irqd); 187 188 trace_vector_reserve(irqd->irq, 0); 188 189 vector_assign_managed_shutdown(irqd); 189 190 } ··· 369 368 int ret; 370 369 371 370 ret = assign_irq_vector_any_locked(irqd); 372 - if (!ret) 371 + if (!ret) { 373 372 apicd->has_reserved = false; 373 + /* 374 + * Core might have disabled reservation mode after 375 + * allocating the irq descriptor. Ideally this should 376 + * happen before allocation time, but that would require 377 + * completely convoluted ways of transporting that 378 + * information. 379 + */ 380 + if (!irqd_can_reserve(irqd)) 381 + apicd->can_reserve = false; 382 + } 374 383 return ret; 375 384 } 376 385 ··· 409 398 } 410 399 411 400 static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd, 412 - bool early) 401 + bool reserve) 413 402 { 414 403 struct apic_chip_data *apicd = apic_chip_data(irqd); 415 404 unsigned long flags; 416 405 int ret = 0; 417 406 418 407 trace_vector_activate(irqd->irq, apicd->is_managed, 419 - apicd->can_reserve, early); 408 + apicd->can_reserve, reserve); 420 409 421 410 /* Nothing to do for fixed assigned vectors */ 422 411 if (!apicd->can_reserve && !apicd->is_managed) 423 412 return 0; 424 413 425 414 raw_spin_lock_irqsave(&vector_lock, flags); 426 - if (early || irqd_is_managed_and_shutdown(irqd)) 415 + if (reserve || irqd_is_managed_and_shutdown(irqd)) 427 416 vector_assign_managed_shutdown(irqd); 428 417 else if (apicd->is_managed) 429 418 ret = activate_managed(irqd); ··· 489 478 } else { 490 479 /* Release the vector */ 491 480 apicd->can_reserve = true; 481 + irqd_set_can_reserve(irqd); 492 482 clear_irq_vector(irqd); 493 483 realloc = true; 494 484 }
+1 -1
arch/x86/platform/uv/uv_irq.c
··· 128 128 * on the specified blade to allow the sending of MSIs to the specified CPU. 129 129 */ 130 130 static int uv_domain_activate(struct irq_domain *domain, 131 - struct irq_data *irq_data, bool early) 131 + struct irq_data *irq_data, bool reserve) 132 132 { 133 133 uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data); 134 134 return 0;
+2 -1
drivers/gpio/gpio-bcm-kona.c
··· 522 522 * category than their parents, so it won't report false recursion. 523 523 */ 524 524 static struct lock_class_key gpio_lock_class; 525 + static struct lock_class_key gpio_request_class; 525 526 526 527 static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq, 527 528 irq_hw_number_t hwirq) ··· 532 531 ret = irq_set_chip_data(irq, d->host_data); 533 532 if (ret < 0) 534 533 return ret; 535 - irq_set_lockdep_class(irq, &gpio_lock_class); 534 + irq_set_lockdep_class(irq, &gpio_lock_class, &gpio_request_class); 536 535 irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip, handle_simple_irq); 537 536 irq_set_noprobe(irq); 538 537
+3 -1
drivers/gpio/gpio-brcmstb.c
··· 327 327 * category than their parents, so it won't report false recursion. 328 328 */ 329 329 static struct lock_class_key brcmstb_gpio_irq_lock_class; 330 + static struct lock_class_key brcmstb_gpio_irq_request_class; 330 331 331 332 332 333 static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq, ··· 347 346 ret = irq_set_chip_data(irq, &bank->gc); 348 347 if (ret < 0) 349 348 return ret; 350 - irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class); 349 + irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class, 350 + &brcmstb_gpio_irq_request_class); 351 351 irq_set_chip_and_handler(irq, &priv->irq_chip, handle_level_irq); 352 352 irq_set_noprobe(irq); 353 353 return 0;
+3 -1
drivers/gpio/gpio-tegra.c
··· 565 565 * than their parents, so it won't report false recursion. 566 566 */ 567 567 static struct lock_class_key gpio_lock_class; 568 + static struct lock_class_key gpio_request_class; 568 569 569 570 static int tegra_gpio_probe(struct platform_device *pdev) 570 571 { ··· 671 670 672 671 bank = &tgi->bank_info[GPIO_BANK(gpio)]; 673 672 674 - irq_set_lockdep_class(irq, &gpio_lock_class); 673 + irq_set_lockdep_class(irq, &gpio_lock_class, 674 + &gpio_request_class); 675 675 irq_set_chip_data(irq, bank); 676 676 irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq); 677 677 }
+1 -1
drivers/gpio/gpio-xgene-sb.c
··· 139 139 140 140 static int xgene_gpio_sb_domain_activate(struct irq_domain *d, 141 141 struct irq_data *irq_data, 142 - bool early) 142 + bool reserve) 143 143 { 144 144 struct xgene_gpio_sb *priv = d->host_data; 145 145 u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
+18 -9
drivers/gpio/gpiolib.c
··· 73 73 74 74 static void gpiochip_free_hogs(struct gpio_chip *chip); 75 75 static int gpiochip_add_irqchip(struct gpio_chip *gpiochip, 76 - struct lock_class_key *key); 76 + struct lock_class_key *lock_key, 77 + struct lock_class_key *request_key); 77 78 static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip); 78 79 static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip); 79 80 static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip); ··· 1101 1100 } 1102 1101 1103 1102 int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, 1104 - struct lock_class_key *key) 1103 + struct lock_class_key *lock_key, 1104 + struct lock_class_key *request_key) 1105 1105 { 1106 1106 unsigned long flags; 1107 1107 int status = 0; ··· 1248 1246 if (status) 1249 1247 goto err_remove_from_list; 1250 1248 1251 - status = gpiochip_add_irqchip(chip, key); 1249 + status = gpiochip_add_irqchip(chip, lock_key, request_key); 1252 1250 if (status) 1253 1251 goto err_remove_chip; 1254 1252 ··· 1634 1632 * This lock class tells lockdep that GPIO irqs are in a different 1635 1633 * category than their parents, so it won't report false recursion. 1636 1634 */ 1637 - irq_set_lockdep_class(irq, chip->irq.lock_key); 1635 + irq_set_lockdep_class(irq, chip->irq.lock_key, chip->irq.request_key); 1638 1636 irq_set_chip_and_handler(irq, chip->irq.chip, chip->irq.handler); 1639 1637 /* Chips that use nested thread handlers have them marked */ 1640 1638 if (chip->irq.threaded) ··· 1714 1712 /** 1715 1713 * gpiochip_add_irqchip() - adds an IRQ chip to a GPIO chip 1716 1714 * @gpiochip: the GPIO chip to add the IRQ chip to 1717 - * @lock_key: lockdep class 1715 + * @lock_key: lockdep class for IRQ lock 1716 + * @request_key: lockdep class for IRQ request 1718 1717 */ 1719 1718 static int gpiochip_add_irqchip(struct gpio_chip *gpiochip, 1720 - struct lock_class_key *lock_key) 1719 + struct lock_class_key *lock_key, 1720 + struct lock_class_key *request_key) 1721 1721 { 1722 1722 struct irq_chip *irqchip = gpiochip->irq.chip; 1723 1723 const struct irq_domain_ops *ops; ··· 1757 1753 gpiochip->to_irq = gpiochip_to_irq; 1758 1754 gpiochip->irq.default_type = type; 1759 1755 gpiochip->irq.lock_key = lock_key; 1756 + gpiochip->irq.request_key = request_key; 1760 1757 1761 1758 if (gpiochip->irq.domain_ops) 1762 1759 ops = gpiochip->irq.domain_ops; ··· 1855 1850 * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE 1856 1851 * to have the core avoid setting up any default type in the hardware. 1857 1852 * @threaded: whether this irqchip uses a nested thread handler 1858 - * @lock_key: lockdep class 1853 + * @lock_key: lockdep class for IRQ lock 1854 + * @request_key: lockdep class for IRQ request 1859 1855 * 1860 1856 * This function closely associates a certain irqchip with a certain 1861 1857 * gpiochip, providing an irq domain to translate the local IRQs to ··· 1878 1872 irq_flow_handler_t handler, 1879 1873 unsigned int type, 1880 1874 bool threaded, 1881 - struct lock_class_key *lock_key) 1875 + struct lock_class_key *lock_key, 1876 + struct lock_class_key *request_key) 1882 1877 { 1883 1878 struct device_node *of_node; 1884 1879 ··· 1920 1913 gpiochip->irq.default_type = type; 1921 1914 gpiochip->to_irq = gpiochip_to_irq; 1922 1915 gpiochip->irq.lock_key = lock_key; 1916 + gpiochip->irq.request_key = request_key; 1923 1917 gpiochip->irq.domain = irq_domain_add_simple(of_node, 1924 1918 gpiochip->ngpio, first_irq, 1925 1919 &gpiochip_domain_ops, gpiochip); ··· 1948 1940 #else /* CONFIG_GPIOLIB_IRQCHIP */ 1949 1941 1950 1942 static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip, 1951 - struct lock_class_key *key) 1943 + struct lock_class_key *lock_key, 1944 + struct lock_class_key *request_key) 1952 1945 { 1953 1946 return 0; 1954 1947 }
+1 -1
drivers/iommu/amd_iommu.c
··· 4184 4184 struct irq_cfg *cfg); 4185 4185 4186 4186 static int irq_remapping_activate(struct irq_domain *domain, 4187 - struct irq_data *irq_data, bool early) 4187 + struct irq_data *irq_data, bool reserve) 4188 4188 { 4189 4189 struct amd_ir_data *data = irq_data->chip_data; 4190 4190 struct irq_2_irte *irte_info = &data->irq_2_irte;
+1 -1
drivers/iommu/intel_irq_remapping.c
··· 1397 1397 } 1398 1398 1399 1399 static int intel_irq_remapping_activate(struct irq_domain *domain, 1400 - struct irq_data *irq_data, bool early) 1400 + struct irq_data *irq_data, bool reserve) 1401 1401 { 1402 1402 intel_ir_reconfigure_irte(irq_data, true); 1403 1403 return 0;
+2 -2
drivers/irqchip/irq-gic-v3-its.c
··· 2303 2303 } 2304 2304 2305 2305 static int its_irq_domain_activate(struct irq_domain *domain, 2306 - struct irq_data *d, bool early) 2306 + struct irq_data *d, bool reserve) 2307 2307 { 2308 2308 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 2309 2309 u32 event = its_get_event_id(d); ··· 2818 2818 } 2819 2819 2820 2820 static int its_vpe_irq_domain_activate(struct irq_domain *domain, 2821 - struct irq_data *d, bool early) 2821 + struct irq_data *d, bool reserve) 2822 2822 { 2823 2823 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 2824 2824 struct its_node *its;
+5 -1
drivers/irqchip/irq-renesas-intc-irqpin.c
··· 342 342 */ 343 343 static struct lock_class_key intc_irqpin_irq_lock_class; 344 344 345 + /* And this is for the request mutex */ 346 + static struct lock_class_key intc_irqpin_irq_request_class; 347 + 345 348 static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq, 346 349 irq_hw_number_t hw) 347 350 { ··· 355 352 356 353 intc_irqpin_dbg(&p->irq[hw], "map"); 357 354 irq_set_chip_data(virq, h->host_data); 358 - irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class); 355 + irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class, 356 + &intc_irqpin_irq_request_class); 359 357 irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq); 360 358 return 0; 361 359 }
+3 -1
drivers/mfd/arizona-irq.c
··· 184 184 }; 185 185 186 186 static struct lock_class_key arizona_irq_lock_class; 187 + static struct lock_class_key arizona_irq_request_class; 187 188 188 189 static int arizona_irq_map(struct irq_domain *h, unsigned int virq, 189 190 irq_hw_number_t hw) ··· 192 191 struct arizona *data = h->host_data; 193 192 194 193 irq_set_chip_data(virq, data); 195 - irq_set_lockdep_class(virq, &arizona_irq_lock_class); 194 + irq_set_lockdep_class(virq, &arizona_irq_lock_class, 195 + &arizona_irq_request_class); 196 196 irq_set_chip_and_handler(virq, &arizona_irq_chip, handle_simple_irq); 197 197 irq_set_nested_thread(virq, 1); 198 198 irq_set_noprobe(virq);
+4 -1
drivers/pinctrl/pinctrl-single.c
··· 222 222 */ 223 223 static struct lock_class_key pcs_lock_class; 224 224 225 + /* Class for the IRQ request mutex */ 226 + static struct lock_class_key pcs_request_class; 227 + 225 228 /* 226 229 * REVISIT: Reads and writes could eventually use regmap or something 227 230 * generic. But at least on omaps, some mux registers are performance ··· 1489 1486 irq_set_chip_data(irq, pcs_soc); 1490 1487 irq_set_chip_and_handler(irq, &pcs->chip, 1491 1488 handle_level_irq); 1492 - irq_set_lockdep_class(irq, &pcs_lock_class); 1489 + irq_set_lockdep_class(irq, &pcs_lock_class, &pcs_request_class); 1493 1490 irq_set_noprobe(irq); 1494 1491 1495 1492 return 0;
+1 -1
drivers/pinctrl/stm32/pinctrl-stm32.c
··· 290 290 } 291 291 292 292 static int stm32_gpio_domain_activate(struct irq_domain *d, 293 - struct irq_data *irq_data, bool early) 293 + struct irq_data *irq_data, bool reserve) 294 294 { 295 295 struct stm32_gpio_bank *bank = d->host_data; 296 296 struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
+21 -12
include/linux/gpio/driver.h
··· 66 66 /** 67 67 * @lock_key: 68 68 * 69 - * Per GPIO IRQ chip lockdep class. 69 + * Per GPIO IRQ chip lockdep classes. 70 70 */ 71 71 struct lock_class_key *lock_key; 72 + struct lock_class_key *request_key; 72 73 73 74 /** 74 75 * @parent_handler: ··· 324 323 325 324 /* add/remove chips */ 326 325 extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, 327 - struct lock_class_key *lock_key); 326 + struct lock_class_key *lock_key, 327 + struct lock_class_key *request_key); 328 328 329 329 /** 330 330 * gpiochip_add_data() - register a gpio_chip ··· 352 350 */ 353 351 #ifdef CONFIG_LOCKDEP 354 352 #define gpiochip_add_data(chip, data) ({ \ 355 - static struct lock_class_key key; \ 356 - gpiochip_add_data_with_key(chip, data, &key); \ 353 + static struct lock_class_key lock_key; \ 354 + static struct lock_class_key request_key; \ 355 + gpiochip_add_data_with_key(chip, data, &lock_key, \ 356 + &request_key); \ 357 357 }) 358 358 #else 359 - #define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL) 359 + #define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL, NULL) 360 360 #endif 361 361 362 362 static inline int gpiochip_add(struct gpio_chip *chip) ··· 433 429 irq_flow_handler_t handler, 434 430 unsigned int type, 435 431 bool threaded, 436 - struct lock_class_key *lock_key); 432 + struct lock_class_key *lock_key, 433 + struct lock_class_key *request_key); 437 434 438 435 #ifdef CONFIG_LOCKDEP 439 436 ··· 450 445 irq_flow_handler_t handler, 451 446 unsigned int type) 452 447 { 453 - static struct lock_class_key key; 448 + static struct lock_class_key lock_key; 449 + static struct lock_class_key request_key; 454 450 455 451 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, 456 - handler, type, false, &key); 452 + handler, type, false, 453 + &lock_key, &request_key); 457 454 } 458 455 459 456 static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, ··· 465 458 unsigned int type) 466 459 { 467 460 468 - static struct lock_class_key key; 461 + static struct lock_class_key lock_key; 462 + static struct lock_class_key request_key; 469 463 470 464 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, 471 - handler, type, true, &key); 465 + handler, type, true, 466 + &lock_key, &request_key); 472 467 } 473 468 #else 474 469 static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, ··· 480 471 unsigned int type) 481 472 { 482 473 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, 483 - handler, type, false, NULL); 474 + handler, type, false, NULL, NULL); 484 475 } 485 476 486 477 static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, ··· 490 481 unsigned int type) 491 482 { 492 483 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, 493 - handler, type, true, NULL); 484 + handler, type, true, NULL, NULL); 494 485 } 495 486 #endif /* CONFIG_LOCKDEP */ 496 487
+17
include/linux/irq.h
··· 212 212 * mask. Applies only to affinity managed irqs. 213 213 * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target 214 214 * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set 215 + * IRQD_CAN_RESERVE - Can use reservation mode 215 216 */ 216 217 enum { 217 218 IRQD_TRIGGER_MASK = 0xf, ··· 234 233 IRQD_MANAGED_SHUTDOWN = (1 << 23), 235 234 IRQD_SINGLE_TARGET = (1 << 24), 236 235 IRQD_DEFAULT_TRIGGER_SET = (1 << 25), 236 + IRQD_CAN_RESERVE = (1 << 26), 237 237 }; 238 238 239 239 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) ··· 377 375 static inline bool irqd_is_managed_and_shutdown(struct irq_data *d) 378 376 { 379 377 return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN; 378 + } 379 + 380 + static inline void irqd_set_can_reserve(struct irq_data *d) 381 + { 382 + __irqd_to_state(d) |= IRQD_CAN_RESERVE; 383 + } 384 + 385 + static inline void irqd_clr_can_reserve(struct irq_data *d) 386 + { 387 + __irqd_to_state(d) &= ~IRQD_CAN_RESERVE; 388 + } 389 + 390 + static inline bool irqd_can_reserve(struct irq_data *d) 391 + { 392 + return __irqd_to_state(d) & IRQD_CAN_RESERVE; 380 393 } 381 394 382 395 #undef __irqd_to_state
+6 -3
include/linux/irqdesc.h
··· 255 255 } 256 256 257 257 static inline void 258 - irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class) 258 + irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, 259 + struct lock_class_key *request_class) 259 260 { 260 261 struct irq_desc *desc = irq_to_desc(irq); 261 262 262 - if (desc) 263 - lockdep_set_class(&desc->lock, class); 263 + if (desc) { 264 + lockdep_set_class(&desc->lock, lock_class); 265 + lockdep_set_class(&desc->request_mutex, request_class); 266 + } 264 267 } 265 268 266 269 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
+1 -1
include/linux/irqdomain.h
··· 113 113 unsigned int nr_irqs, void *arg); 114 114 void (*free)(struct irq_domain *d, unsigned int virq, 115 115 unsigned int nr_irqs); 116 - int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool early); 116 + int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve); 117 117 void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); 118 118 int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec, 119 119 unsigned long *out_hwirq, unsigned int *out_type);
+5
kernel/irq/debug.h
··· 12 12 13 13 static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) 14 14 { 15 + static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); 16 + 17 + if (!__ratelimit(&ratelimit)) 18 + return; 19 + 15 20 printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n", 16 21 irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); 17 22 printk("->handle_irq(): %p, ", desc->handle_irq);
+1
kernel/irq/debugfs.c
··· 113 113 BIT_MASK_DESCR(IRQD_SETAFFINITY_PENDING), 114 114 BIT_MASK_DESCR(IRQD_AFFINITY_MANAGED), 115 115 BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN), 116 + BIT_MASK_DESCR(IRQD_CAN_RESERVE), 116 117 117 118 BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU), 118 119
+7 -4
kernel/irq/generic-chip.c
··· 364 364 EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip); 365 365 366 366 /* 367 - * Separate lockdep class for interrupt chip which can nest irq_desc 368 - * lock. 367 + * Separate lockdep classes for interrupt chip which can nest irq_desc 368 + * lock and request mutex. 369 369 */ 370 370 static struct lock_class_key irq_nested_lock_class; 371 + static struct lock_class_key irq_nested_request_class; 371 372 372 373 /* 373 374 * irq_map_generic_chip - Map a generic chip for an irq domain ··· 410 409 set_bit(idx, &gc->installed); 411 410 412 411 if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK) 413 - irq_set_lockdep_class(virq, &irq_nested_lock_class); 412 + irq_set_lockdep_class(virq, &irq_nested_lock_class, 413 + &irq_nested_request_class); 414 414 415 415 if (chip->irq_calc_mask) 416 416 chip->irq_calc_mask(data); ··· 481 479 continue; 482 480 483 481 if (flags & IRQ_GC_INIT_NESTED_LOCK) 484 - irq_set_lockdep_class(i, &irq_nested_lock_class); 482 + irq_set_lockdep_class(i, &irq_nested_lock_class, 483 + &irq_nested_request_class); 485 484 486 485 if (!(flags & IRQ_GC_NO_MASK)) { 487 486 struct irq_data *d = irq_get_irq_data(i);
+1 -1
kernel/irq/internals.h
··· 440 440 #endif /* !CONFIG_GENERIC_PENDING_IRQ */ 441 441 442 442 #if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY) 443 - static inline int irq_domain_activate_irq(struct irq_data *data, bool early) 443 + static inline int irq_domain_activate_irq(struct irq_data *data, bool reserve) 444 444 { 445 445 irqd_set_activated(data); 446 446 return 0;
+7 -6
kernel/irq/irqdomain.c
··· 1693 1693 } 1694 1694 } 1695 1695 1696 - static int __irq_domain_activate_irq(struct irq_data *irqd, bool early) 1696 + static int __irq_domain_activate_irq(struct irq_data *irqd, bool reserve) 1697 1697 { 1698 1698 int ret = 0; 1699 1699 ··· 1702 1702 1703 1703 if (irqd->parent_data) 1704 1704 ret = __irq_domain_activate_irq(irqd->parent_data, 1705 - early); 1705 + reserve); 1706 1706 if (!ret && domain->ops->activate) { 1707 - ret = domain->ops->activate(domain, irqd, early); 1707 + ret = domain->ops->activate(domain, irqd, reserve); 1708 1708 /* Rollback in case of error */ 1709 1709 if (ret && irqd->parent_data) 1710 1710 __irq_domain_deactivate_irq(irqd->parent_data); ··· 1716 1716 /** 1717 1717 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate 1718 1718 * interrupt 1719 - * @irq_data: outermost irq_data associated with interrupt 1719 + * @irq_data: Outermost irq_data associated with interrupt 1720 + * @reserve: If set only reserve an interrupt vector instead of assigning one 1720 1721 * 1721 1722 * This is the second step to call domain_ops->activate to program interrupt 1722 1723 * controllers, so the interrupt could actually get delivered. 1723 1724 */ 1724 - int irq_domain_activate_irq(struct irq_data *irq_data, bool early) 1725 + int irq_domain_activate_irq(struct irq_data *irq_data, bool reserve) 1725 1726 { 1726 1727 int ret = 0; 1727 1728 1728 1729 if (!irqd_is_activated(irq_data)) 1729 - ret = __irq_domain_activate_irq(irq_data, early); 1730 + ret = __irq_domain_activate_irq(irq_data, reserve); 1730 1731 if (!ret) 1731 1732 irqd_set_activated(irq_data); 1732 1733 return ret;
+56 -8
kernel/irq/msi.c
··· 339 339 return ret; 340 340 } 341 341 342 + /* 343 + * Carefully check whether the device can use reservation mode. If 344 + * reservation mode is enabled then the early activation will assign a 345 + * dummy vector to the device. If the PCI/MSI device does not support 346 + * masking of the entry then this can result in spurious interrupts when 347 + * the device driver is not absolutely careful. But even then a malfunction 348 + * of the hardware could result in a spurious interrupt on the dummy vector 349 + * and render the device unusable. If the entry can be masked then the core 350 + * logic will prevent the spurious interrupt and reservation mode can be 351 + * used. For now reservation mode is restricted to PCI/MSI. 352 + */ 353 + static bool msi_check_reservation_mode(struct irq_domain *domain, 354 + struct msi_domain_info *info, 355 + struct device *dev) 356 + { 357 + struct msi_desc *desc; 358 + 359 + if (domain->bus_token != DOMAIN_BUS_PCI_MSI) 360 + return false; 361 + 362 + if (!(info->flags & MSI_FLAG_MUST_REACTIVATE)) 363 + return false; 364 + 365 + if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask) 366 + return false; 367 + 368 + /* 369 + * Checking the first MSI descriptor is sufficient. MSIX supports 370 + * masking and MSI does so when the maskbit is set. 371 + */ 372 + desc = first_msi_entry(dev); 373 + return desc->msi_attrib.is_msix || desc->msi_attrib.maskbit; 374 + } 375 + 342 376 /** 343 377 * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain 344 378 * @domain: The domain to allocate from ··· 387 353 { 388 354 struct msi_domain_info *info = domain->host_data; 389 355 struct msi_domain_ops *ops = info->ops; 390 - msi_alloc_info_t arg; 356 + struct irq_data *irq_data; 391 357 struct msi_desc *desc; 358 + msi_alloc_info_t arg; 392 359 int i, ret, virq; 360 + bool can_reserve; 393 361 394 362 ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg); 395 363 if (ret) ··· 421 385 if (ops->msi_finish) 422 386 ops->msi_finish(&arg, 0); 423 387 388 + can_reserve = msi_check_reservation_mode(domain, info, dev); 389 + 424 390 for_each_msi_entry(desc, dev) { 425 391 virq = desc->irq; 426 392 if (desc->nvec_used == 1) ··· 435 397 * the MSI entries before the PCI layer enables MSI in the 436 398 * card. Otherwise the card latches a random msi message. 437 399 */ 438 - if (info->flags & MSI_FLAG_ACTIVATE_EARLY) { 439 - struct irq_data *irq_data; 400 + if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY)) 401 + continue; 440 402 403 + irq_data = irq_domain_get_irq_data(domain, desc->irq); 404 + if (!can_reserve) 405 + irqd_clr_can_reserve(irq_data); 406 + ret = irq_domain_activate_irq(irq_data, can_reserve); 407 + if (ret) 408 + goto cleanup; 409 + } 410 + 411 + /* 412 + * If these interrupts use reservation mode, clear the activated bit 413 + * so request_irq() will assign the final vector. 414 + */ 415 + if (can_reserve) { 416 + for_each_msi_entry(desc, dev) { 441 417 irq_data = irq_domain_get_irq_data(domain, desc->irq); 442 - ret = irq_domain_activate_irq(irq_data, true); 443 - if (ret) 444 - goto cleanup; 445 - if (info->flags & MSI_FLAG_MUST_REACTIVATE) 446 - irqd_clr_activated(irq_data); 418 + irqd_clr_activated(irq_data); 447 419 } 448 420 } 449 421 return 0;