Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'irq-core-2025-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq core updates from Thomas Gleixner:
"Updates for the generic interrupt subsystem core code:

- Address a long standing subtle problem in the CPU hotplug code for
affinity-managed interrupts.

Affinity-managed interrupts are shut down by the core code when the
last CPU in the affinity set goes offline and started up again when
the first CPU in the affinity set becomes online again.

This unfortunately does not take into account whether an interrupt
has been disabled before the last CPU goes offline and starts up
the interrupt unconditionally when the first CPU becomes online
again.

That's obviously not what drivers expect.

Address this by preserving the disabled state for affinity-managed
interrupts accross these CPU hotplug operations. All non-managed
interrupts are not affected by this because startup/shutdown is
coupled to request/free_irq() which obviously has to reset state.

- Support three-cell scheme interrupts to allow GPIO drivers to
specify interrupts from an already existing scheme

- Switch the interrupt subsystem core to lock guards. This gets rid
of quite some copy & pasta boilerplate code all over the place.

- The usual small cleanups and improvements all over the place"

* tag 'irq-core-2025-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (59 commits)
genirq/irqdesc: Remove double locking in hwirq_show()
genirq: Retain disable depth for managed interrupts across CPU hotplug
genirq: Bump the size of the local variable for sprintf()
genirq/manage: Use the correct lock guard in irq_set_irq_wake()
genirq: Consistently use '%u' format specifier for unsigned int variables
genirq: Ensure flags in lock guard is consistently initialized
genirq: Fix inverted condition in handle_nested_irq()
genirq/cpuhotplug: Fix up lock guards conversion brainf..t
genirq: Use scoped_guard() to shut clang up
genirq: Remove unused remove_percpu_irq()
genirq: Remove irq_[get|put]_desc*()
genirq/manage: Rework irq_set_irqchip_state()
genirq/manage: Rework irq_get_irqchip_state()
genirq/manage: Rework teardown_percpu_nmi()
genirq/manage: Rework prepare_percpu_nmi()
genirq/manage: Rework disable_percpu_irq()
genirq/manage: Rework irq_percpu_is_enabled()
genirq/manage: Rework enable_percpu_irq()
genirq/manage: Rework irq_set_parent()
genirq/manage: Rework can_request_irq()
...

+1008 -1362
+1 -1
include/linux/interrupt.h
··· 140 140 /* 141 141 * If a (PCI) device interrupt is not connected we set dev->irq to 142 142 * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we 143 - * can distingiush that case from other error returns. 143 + * can distinguish that case from other error returns. 144 144 * 145 145 * 0x80000000 is guaranteed to be outside the available range of interrupts 146 146 * and easy to distinguish from other possible incorrect values.
+1 -2
include/linux/irq.h
··· 597 597 598 598 struct irqaction; 599 599 extern int setup_percpu_irq(unsigned int irq, struct irqaction *new); 600 - extern void remove_percpu_irq(unsigned int irq, struct irqaction *act); 601 600 602 601 #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE 603 602 extern void irq_cpu_online(void); ··· 699 700 extern int noirqdebug_setup(char *str); 700 701 701 702 /* Checks whether the interrupt can be requested by request_irq(): */ 702 - extern int can_request_irq(unsigned int irq, unsigned long irqflags); 703 + extern bool can_request_irq(unsigned int irq, unsigned long irqflags); 703 704 704 705 /* Dummy irq-chip implementations: */ 705 706 extern struct irq_chip no_irq_chip;
+9 -9
include/linux/irqdomain.h
··· 571 571 int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, 572 572 const u32 *intspec, unsigned int intsize, 573 573 irq_hw_number_t *out_hwirq, unsigned int *out_type); 574 + int irq_domain_xlate_twothreecell(struct irq_domain *d, struct device_node *ctrlr, 575 + const u32 *intspec, unsigned int intsize, 576 + irq_hw_number_t *out_hwirq, unsigned int *out_type); 574 577 575 - int irq_domain_translate_twocell(struct irq_domain *d, 576 - struct irq_fwspec *fwspec, 577 - unsigned long *out_hwirq, 578 - unsigned int *out_type); 579 - 580 - int irq_domain_translate_onecell(struct irq_domain *d, 581 - struct irq_fwspec *fwspec, 582 - unsigned long *out_hwirq, 583 - unsigned int *out_type); 578 + int irq_domain_translate_onecell(struct irq_domain *d, struct irq_fwspec *fwspec, 579 + unsigned long *out_hwirq, unsigned int *out_type); 580 + int irq_domain_translate_twocell(struct irq_domain *d, struct irq_fwspec *fwspec, 581 + unsigned long *out_hwirq, unsigned int *out_type); 582 + int irq_domain_translate_twothreecell(struct irq_domain *d, struct irq_fwspec *fwspec, 583 + unsigned long *out_hwirq, unsigned int *out_type); 584 584 585 585 /* IPI functions */ 586 586 int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest);
+9 -17
kernel/irq/autoprobe.c
··· 43 43 * flush such a longstanding irq before considering it as spurious. 44 44 */ 45 45 for_each_irq_desc_reverse(i, desc) { 46 - raw_spin_lock_irq(&desc->lock); 46 + guard(raw_spinlock_irq)(&desc->lock); 47 47 if (!desc->action && irq_settings_can_probe(desc)) { 48 48 /* 49 49 * Some chips need to know about probing in 50 50 * progress: 51 51 */ 52 52 if (desc->irq_data.chip->irq_set_type) 53 - desc->irq_data.chip->irq_set_type(&desc->irq_data, 54 - IRQ_TYPE_PROBE); 53 + desc->irq_data.chip->irq_set_type(&desc->irq_data, IRQ_TYPE_PROBE); 55 54 irq_activate_and_startup(desc, IRQ_NORESEND); 56 55 } 57 - raw_spin_unlock_irq(&desc->lock); 58 56 } 59 57 60 58 /* Wait for longstanding interrupts to trigger. */ ··· 64 66 * happened in the previous stage, it may have masked itself) 65 67 */ 66 68 for_each_irq_desc_reverse(i, desc) { 67 - raw_spin_lock_irq(&desc->lock); 69 + guard(raw_spinlock_irq)(&desc->lock); 68 70 if (!desc->action && irq_settings_can_probe(desc)) { 69 71 desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; 70 72 if (irq_activate_and_startup(desc, IRQ_NORESEND)) 71 73 desc->istate |= IRQS_PENDING; 72 74 } 73 - raw_spin_unlock_irq(&desc->lock); 74 75 } 75 76 76 77 /* ··· 81 84 * Now filter out any obviously spurious interrupts 82 85 */ 83 86 for_each_irq_desc(i, desc) { 84 - raw_spin_lock_irq(&desc->lock); 85 - 87 + guard(raw_spinlock_irq)(&desc->lock); 86 88 if (desc->istate & IRQS_AUTODETECT) { 87 89 /* It triggered already - consider it spurious. */ 88 90 if (!(desc->istate & IRQS_WAITING)) { 89 91 desc->istate &= ~IRQS_AUTODETECT; 90 92 irq_shutdown_and_deactivate(desc); 91 - } else 92 - if (i < 32) 93 - mask |= 1 << i; 93 + } else if (i < 32) { 94 + mask |= 1 << i; 95 + } 94 96 } 95 - raw_spin_unlock_irq(&desc->lock); 96 97 } 97 98 98 99 return mask; ··· 116 121 int i; 117 122 118 123 for_each_irq_desc(i, desc) { 119 - raw_spin_lock_irq(&desc->lock); 124 + guard(raw_spinlock_irq)(&desc->lock); 120 125 if (desc->istate & IRQS_AUTODETECT) { 121 126 if (i < 16 && !(desc->istate & IRQS_WAITING)) 122 127 mask |= 1 << i; ··· 124 129 desc->istate &= ~IRQS_AUTODETECT; 125 130 irq_shutdown_and_deactivate(desc); 126 131 } 127 - raw_spin_unlock_irq(&desc->lock); 128 132 } 129 133 mutex_unlock(&probing_active); 130 134 ··· 154 160 struct irq_desc *desc; 155 161 156 162 for_each_irq_desc(i, desc) { 157 - raw_spin_lock_irq(&desc->lock); 158 - 163 + guard(raw_spinlock_irq)(&desc->lock); 159 164 if (desc->istate & IRQS_AUTODETECT) { 160 165 if (!(desc->istate & IRQS_WAITING)) { 161 166 if (!nr_of_irqs) ··· 164 171 desc->istate &= ~IRQS_AUTODETECT; 165 172 irq_shutdown_and_deactivate(desc); 166 173 } 167 - raw_spin_unlock_irq(&desc->lock); 168 174 } 169 175 mutex_unlock(&probing_active); 170 176
+265 -368
kernel/irq/chip.c
··· 34 34 }; 35 35 36 36 /** 37 - * irq_set_chip - set the irq chip for an irq 38 - * @irq: irq number 39 - * @chip: pointer to irq chip description structure 37 + * irq_set_chip - set the irq chip for an irq 38 + * @irq: irq number 39 + * @chip: pointer to irq chip description structure 40 40 */ 41 41 int irq_set_chip(unsigned int irq, const struct irq_chip *chip) 42 42 { 43 - unsigned long flags; 44 - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 43 + int ret = -EINVAL; 45 44 46 - if (!desc) 47 - return -EINVAL; 48 - 49 - desc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip); 50 - irq_put_desc_unlock(desc, flags); 51 - /* 52 - * For !CONFIG_SPARSE_IRQ make the irq show up in 53 - * allocated_irqs. 54 - */ 55 - irq_mark_irq(irq); 56 - return 0; 45 + scoped_irqdesc_get_and_lock(irq, 0) { 46 + scoped_irqdesc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip); 47 + ret = 0; 48 + } 49 + /* For !CONFIG_SPARSE_IRQ make the irq show up in allocated_irqs. */ 50 + if (!ret) 51 + irq_mark_irq(irq); 52 + return ret; 57 53 } 58 54 EXPORT_SYMBOL(irq_set_chip); 59 55 60 56 /** 61 - * irq_set_irq_type - set the irq trigger type for an irq 62 - * @irq: irq number 63 - * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 57 + * irq_set_irq_type - set the irq trigger type for an irq 58 + * @irq: irq number 59 + * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 64 60 */ 65 61 int irq_set_irq_type(unsigned int irq, unsigned int type) 66 62 { 67 - unsigned long flags; 68 - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 69 - int ret = 0; 70 - 71 - if (!desc) 72 - return -EINVAL; 73 - 74 - ret = __irq_set_trigger(desc, type); 75 - irq_put_desc_busunlock(desc, flags); 76 - return ret; 63 + scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) 64 + return __irq_set_trigger(scoped_irqdesc, type); 65 + return -EINVAL; 77 66 } 78 67 EXPORT_SYMBOL(irq_set_irq_type); 79 68 80 69 /** 81 - * irq_set_handler_data - set irq handler data for an irq 82 - * @irq: Interrupt number 83 - * @data: Pointer to interrupt specific data 70 + * irq_set_handler_data - set irq handler data for an irq 71 + * @irq: Interrupt number 72 + * @data: Pointer to interrupt specific data 84 73 * 85 - * Set the hardware irq controller data for an irq 74 + * Set the hardware irq controller data for an irq 86 75 */ 87 76 int irq_set_handler_data(unsigned int irq, void *data) 88 77 { 89 - unsigned long flags; 90 - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 91 - 92 - if (!desc) 93 - return -EINVAL; 94 - desc->irq_common_data.handler_data = data; 95 - irq_put_desc_unlock(desc, flags); 96 - return 0; 78 + scoped_irqdesc_get_and_lock(irq, 0) { 79 + scoped_irqdesc->irq_common_data.handler_data = data; 80 + return 0; 81 + } 82 + return -EINVAL; 97 83 } 98 84 EXPORT_SYMBOL(irq_set_handler_data); 99 85 100 86 /** 101 - * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset 102 - * @irq_base: Interrupt number base 103 - * @irq_offset: Interrupt number offset 104 - * @entry: Pointer to MSI descriptor data 87 + * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset 88 + * @irq_base: Interrupt number base 89 + * @irq_offset: Interrupt number offset 90 + * @entry: Pointer to MSI descriptor data 105 91 * 106 - * Set the MSI descriptor entry for an irq at offset 92 + * Set the MSI descriptor entry for an irq at offset 107 93 */ 108 - int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, 109 - struct msi_desc *entry) 94 + int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, struct msi_desc *entry) 110 95 { 111 - unsigned long flags; 112 - struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 113 - 114 - if (!desc) 115 - return -EINVAL; 116 - desc->irq_common_data.msi_desc = entry; 117 - if (entry && !irq_offset) 118 - entry->irq = irq_base; 119 - irq_put_desc_unlock(desc, flags); 120 - return 0; 96 + scoped_irqdesc_get_and_lock(irq_base + irq_offset, IRQ_GET_DESC_CHECK_GLOBAL) { 97 + scoped_irqdesc->irq_common_data.msi_desc = entry; 98 + if (entry && !irq_offset) 99 + entry->irq = irq_base; 100 + return 0; 101 + } 102 + return -EINVAL; 121 103 } 122 104 123 105 /** 124 - * irq_set_msi_desc - set MSI descriptor data for an irq 125 - * @irq: Interrupt number 126 - * @entry: Pointer to MSI descriptor data 106 + * irq_set_msi_desc - set MSI descriptor data for an irq 107 + * @irq: Interrupt number 108 + * @entry: Pointer to MSI descriptor data 127 109 * 128 - * Set the MSI descriptor entry for an irq 110 + * Set the MSI descriptor entry for an irq 129 111 */ 130 112 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 131 113 { ··· 115 133 } 116 134 117 135 /** 118 - * irq_set_chip_data - set irq chip data for an irq 119 - * @irq: Interrupt number 120 - * @data: Pointer to chip specific data 136 + * irq_set_chip_data - set irq chip data for an irq 137 + * @irq: Interrupt number 138 + * @data: Pointer to chip specific data 121 139 * 122 - * Set the hardware irq chip data for an irq 140 + * Set the hardware irq chip data for an irq 123 141 */ 124 142 int irq_set_chip_data(unsigned int irq, void *data) 125 143 { 126 - unsigned long flags; 127 - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 128 - 129 - if (!desc) 130 - return -EINVAL; 131 - desc->irq_data.chip_data = data; 132 - irq_put_desc_unlock(desc, flags); 133 - return 0; 144 + scoped_irqdesc_get_and_lock(irq, 0) { 145 + scoped_irqdesc->irq_data.chip_data = data; 146 + return 0; 147 + } 148 + return -EINVAL; 134 149 } 135 150 EXPORT_SYMBOL(irq_set_chip_data); 136 151 ··· 202 223 return IRQ_STARTUP_ABORT; 203 224 return IRQ_STARTUP_MANAGED; 204 225 } 226 + 227 + void irq_startup_managed(struct irq_desc *desc) 228 + { 229 + /* 230 + * Only start it up when the disable depth is 1, so that a disable, 231 + * hotunplug, hotplug sequence does not end up enabling it during 232 + * hotplug unconditionally. 233 + */ 234 + desc->depth--; 235 + if (!desc->depth) 236 + irq_startup(desc, IRQ_RESEND, IRQ_START_COND); 237 + } 238 + 205 239 #else 206 240 static __always_inline int 207 241 __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, ··· 282 290 ret = __irq_startup(desc); 283 291 break; 284 292 case IRQ_STARTUP_ABORT: 293 + desc->depth = 1; 285 294 irqd_set_managed_shutdown(d); 286 295 return 0; 287 296 } ··· 315 322 { 316 323 if (irqd_is_started(&desc->irq_data)) { 317 324 clear_irq_resend(desc); 318 - desc->depth = 1; 325 + /* 326 + * Increment disable depth, so that a managed shutdown on 327 + * CPU hotunplug preserves the actual disabled state when the 328 + * CPU comes back online. See irq_startup_managed(). 329 + */ 330 + desc->depth++; 331 + 319 332 if (desc->irq_data.chip->irq_shutdown) { 320 333 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 321 334 irq_state_set_disabled(desc); ··· 449 450 unmask_irq(desc); 450 451 } 451 452 452 - /* 453 - * handle_nested_irq - Handle a nested irq from a irq thread 454 - * @irq: the interrupt number 455 - * 456 - * Handle interrupts which are nested into a threaded interrupt 457 - * handler. The handler function is called inside the calling 458 - * threads context. 459 - */ 460 - void handle_nested_irq(unsigned int irq) 461 - { 462 - struct irq_desc *desc = irq_to_desc(irq); 463 - struct irqaction *action; 464 - irqreturn_t action_ret; 465 - 466 - might_sleep(); 467 - 468 - raw_spin_lock_irq(&desc->lock); 469 - 470 - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 471 - 472 - action = desc->action; 473 - if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { 474 - desc->istate |= IRQS_PENDING; 475 - raw_spin_unlock_irq(&desc->lock); 476 - return; 477 - } 478 - 479 - kstat_incr_irqs_this_cpu(desc); 480 - atomic_inc(&desc->threads_active); 481 - raw_spin_unlock_irq(&desc->lock); 482 - 483 - action_ret = IRQ_NONE; 484 - for_each_action_of_desc(desc, action) 485 - action_ret |= action->thread_fn(action->irq, action->dev_id); 486 - 487 - if (!irq_settings_no_debug(desc)) 488 - note_interrupt(desc, action_ret); 489 - 490 - wake_threads_waitq(desc); 491 - } 492 - EXPORT_SYMBOL_GPL(handle_nested_irq); 493 - 494 453 static bool irq_check_poll(struct irq_desc *desc) 495 454 { 496 455 if (!(desc->istate & IRQS_POLL_INPROGRESS)) ··· 456 499 return irq_wait_for_poll(desc); 457 500 } 458 501 459 - static bool irq_may_run(struct irq_desc *desc) 502 + static bool irq_can_handle_pm(struct irq_desc *desc) 460 503 { 461 504 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED; 462 505 ··· 481 524 return irq_check_poll(desc); 482 525 } 483 526 484 - /** 485 - * handle_simple_irq - Simple and software-decoded IRQs. 486 - * @desc: the interrupt description structure for this irq 487 - * 488 - * Simple interrupts are either sent from a demultiplexing interrupt 489 - * handler or come from hardware, where no interrupt hardware control 490 - * is necessary. 491 - * 492 - * Note: The caller is expected to handle the ack, clear, mask and 493 - * unmask issues if necessary. 494 - */ 495 - void handle_simple_irq(struct irq_desc *desc) 527 + static inline bool irq_can_handle_actions(struct irq_desc *desc) 496 528 { 497 - raw_spin_lock(&desc->lock); 498 - 499 - if (!irq_may_run(desc)) 500 - goto out_unlock; 501 - 502 529 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 503 530 504 531 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 505 532 desc->istate |= IRQS_PENDING; 506 - goto out_unlock; 533 + return false; 507 534 } 535 + return true; 536 + } 537 + 538 + static inline bool irq_can_handle(struct irq_desc *desc) 539 + { 540 + if (!irq_can_handle_pm(desc)) 541 + return false; 542 + 543 + return irq_can_handle_actions(desc); 544 + } 545 + 546 + /** 547 + * handle_nested_irq - Handle a nested irq from a irq thread 548 + * @irq: the interrupt number 549 + * 550 + * Handle interrupts which are nested into a threaded interrupt 551 + * handler. The handler function is called inside the calling threads 552 + * context. 553 + */ 554 + void handle_nested_irq(unsigned int irq) 555 + { 556 + struct irq_desc *desc = irq_to_desc(irq); 557 + struct irqaction *action; 558 + irqreturn_t action_ret; 559 + 560 + might_sleep(); 561 + 562 + scoped_guard(raw_spinlock_irq, &desc->lock) { 563 + if (!irq_can_handle_actions(desc)) 564 + return; 565 + 566 + action = desc->action; 567 + kstat_incr_irqs_this_cpu(desc); 568 + atomic_inc(&desc->threads_active); 569 + } 570 + 571 + action_ret = IRQ_NONE; 572 + for_each_action_of_desc(desc, action) 573 + action_ret |= action->thread_fn(action->irq, action->dev_id); 574 + 575 + if (!irq_settings_no_debug(desc)) 576 + note_interrupt(desc, action_ret); 577 + 578 + wake_threads_waitq(desc); 579 + } 580 + EXPORT_SYMBOL_GPL(handle_nested_irq); 581 + 582 + /** 583 + * handle_simple_irq - Simple and software-decoded IRQs. 584 + * @desc: the interrupt description structure for this irq 585 + * 586 + * Simple interrupts are either sent from a demultiplexing interrupt 587 + * handler or come from hardware, where no interrupt hardware control is 588 + * necessary. 589 + * 590 + * Note: The caller is expected to handle the ack, clear, mask and unmask 591 + * issues if necessary. 592 + */ 593 + void handle_simple_irq(struct irq_desc *desc) 594 + { 595 + guard(raw_spinlock)(&desc->lock); 596 + 597 + if (!irq_can_handle(desc)) 598 + return; 508 599 509 600 kstat_incr_irqs_this_cpu(desc); 510 601 handle_irq_event(desc); 511 - 512 - out_unlock: 513 - raw_spin_unlock(&desc->lock); 514 602 } 515 603 EXPORT_SYMBOL_GPL(handle_simple_irq); 516 604 517 605 /** 518 - * handle_untracked_irq - Simple and software-decoded IRQs. 519 - * @desc: the interrupt description structure for this irq 606 + * handle_untracked_irq - Simple and software-decoded IRQs. 607 + * @desc: the interrupt description structure for this irq 520 608 * 521 - * Untracked interrupts are sent from a demultiplexing interrupt 522 - * handler when the demultiplexer does not know which device it its 523 - * multiplexed irq domain generated the interrupt. IRQ's handled 524 - * through here are not subjected to stats tracking, randomness, or 525 - * spurious interrupt detection. 609 + * Untracked interrupts are sent from a demultiplexing interrupt handler 610 + * when the demultiplexer does not know which device it its multiplexed irq 611 + * domain generated the interrupt. IRQ's handled through here are not 612 + * subjected to stats tracking, randomness, or spurious interrupt 613 + * detection. 526 614 * 527 - * Note: Like handle_simple_irq, the caller is expected to handle 528 - * the ack, clear, mask and unmask issues if necessary. 615 + * Note: Like handle_simple_irq, the caller is expected to handle the ack, 616 + * clear, mask and unmask issues if necessary. 529 617 */ 530 618 void handle_untracked_irq(struct irq_desc *desc) 531 619 { 532 - raw_spin_lock(&desc->lock); 620 + scoped_guard(raw_spinlock, &desc->lock) { 621 + if (!irq_can_handle(desc)) 622 + return; 533 623 534 - if (!irq_may_run(desc)) 535 - goto out_unlock; 536 - 537 - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 538 - 539 - if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 540 - desc->istate |= IRQS_PENDING; 541 - goto out_unlock; 624 + desc->istate &= ~IRQS_PENDING; 625 + irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 542 626 } 543 - 544 - desc->istate &= ~IRQS_PENDING; 545 - irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 546 - raw_spin_unlock(&desc->lock); 547 627 548 628 __handle_irq_event_percpu(desc); 549 629 550 - raw_spin_lock(&desc->lock); 551 - irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 552 - 553 - out_unlock: 554 - raw_spin_unlock(&desc->lock); 630 + scoped_guard(raw_spinlock, &desc->lock) 631 + irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 555 632 } 556 633 EXPORT_SYMBOL_GPL(handle_untracked_irq); 557 634 ··· 608 617 } 609 618 610 619 /** 611 - * handle_level_irq - Level type irq handler 612 - * @desc: the interrupt description structure for this irq 620 + * handle_level_irq - Level type irq handler 621 + * @desc: the interrupt description structure for this irq 613 622 * 614 - * Level type interrupts are active as long as the hardware line has 615 - * the active level. This may require to mask the interrupt and unmask 616 - * it after the associated handler has acknowledged the device, so the 617 - * interrupt line is back to inactive. 623 + * Level type interrupts are active as long as the hardware line has the 624 + * active level. This may require to mask the interrupt and unmask it after 625 + * the associated handler has acknowledged the device, so the interrupt 626 + * line is back to inactive. 618 627 */ 619 628 void handle_level_irq(struct irq_desc *desc) 620 629 { 621 - raw_spin_lock(&desc->lock); 630 + guard(raw_spinlock)(&desc->lock); 622 631 mask_ack_irq(desc); 623 632 624 - if (!irq_may_run(desc)) 625 - goto out_unlock; 626 - 627 - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 628 - 629 - /* 630 - * If its disabled or no action available 631 - * keep it masked and get out of here 632 - */ 633 - if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 634 - desc->istate |= IRQS_PENDING; 635 - goto out_unlock; 636 - } 633 + if (!irq_can_handle(desc)) 634 + return; 637 635 638 636 kstat_incr_irqs_this_cpu(desc); 639 637 handle_irq_event(desc); 640 638 641 639 cond_unmask_irq(desc); 642 - 643 - out_unlock: 644 - raw_spin_unlock(&desc->lock); 645 640 } 646 641 EXPORT_SYMBOL_GPL(handle_level_irq); 647 642 ··· 652 675 } 653 676 } 654 677 678 + static inline void cond_eoi_irq(struct irq_chip *chip, struct irq_data *data) 679 + { 680 + if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 681 + chip->irq_eoi(data); 682 + } 683 + 655 684 /** 656 - * handle_fasteoi_irq - irq handler for transparent controllers 657 - * @desc: the interrupt description structure for this irq 685 + * handle_fasteoi_irq - irq handler for transparent controllers 686 + * @desc: the interrupt description structure for this irq 658 687 * 659 - * Only a single callback will be issued to the chip: an ->eoi() 660 - * call when the interrupt has been serviced. This enables support 661 - * for modern forms of interrupt handlers, which handle the flow 662 - * details in hardware, transparently. 688 + * Only a single callback will be issued to the chip: an ->eoi() call when 689 + * the interrupt has been serviced. This enables support for modern forms 690 + * of interrupt handlers, which handle the flow details in hardware, 691 + * transparently. 663 692 */ 664 693 void handle_fasteoi_irq(struct irq_desc *desc) 665 694 { 666 695 struct irq_chip *chip = desc->irq_data.chip; 667 696 668 - raw_spin_lock(&desc->lock); 697 + guard(raw_spinlock)(&desc->lock); 669 698 670 699 /* 671 700 * When an affinity change races with IRQ handling, the next interrupt 672 701 * can arrive on the new CPU before the original CPU has completed 673 702 * handling the previous one - it may need to be resent. 674 703 */ 675 - if (!irq_may_run(desc)) { 704 + if (!irq_can_handle_pm(desc)) { 676 705 if (irqd_needs_resend_when_in_progress(&desc->irq_data)) 677 706 desc->istate |= IRQS_PENDING; 678 - goto out; 707 + cond_eoi_irq(chip, &desc->irq_data); 708 + return; 679 709 } 680 710 681 - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 682 - 683 - /* 684 - * If its disabled or no action available 685 - * then mask it and get out of here: 686 - */ 687 - if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 688 - desc->istate |= IRQS_PENDING; 711 + if (!irq_can_handle_actions(desc)) { 689 712 mask_irq(desc); 690 - goto out; 713 + cond_eoi_irq(chip, &desc->irq_data); 714 + return; 691 715 } 692 716 693 717 kstat_incr_irqs_this_cpu(desc); ··· 704 726 */ 705 727 if (unlikely(desc->istate & IRQS_PENDING)) 706 728 check_irq_resend(desc, false); 707 - 708 - raw_spin_unlock(&desc->lock); 709 - return; 710 - out: 711 - if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 712 - chip->irq_eoi(&desc->irq_data); 713 - raw_spin_unlock(&desc->lock); 714 729 } 715 730 EXPORT_SYMBOL_GPL(handle_fasteoi_irq); 716 731 ··· 741 770 EXPORT_SYMBOL_GPL(handle_fasteoi_nmi); 742 771 743 772 /** 744 - * handle_edge_irq - edge type IRQ handler 745 - * @desc: the interrupt description structure for this irq 773 + * handle_edge_irq - edge type IRQ handler 774 + * @desc: the interrupt description structure for this irq 746 775 * 747 - * Interrupt occurs on the falling and/or rising edge of a hardware 748 - * signal. The occurrence is latched into the irq controller hardware 749 - * and must be acked in order to be reenabled. After the ack another 750 - * interrupt can happen on the same source even before the first one 751 - * is handled by the associated event handler. If this happens it 752 - * might be necessary to disable (mask) the interrupt depending on the 753 - * controller hardware. This requires to reenable the interrupt inside 754 - * of the loop which handles the interrupts which have arrived while 755 - * the handler was running. If all pending interrupts are handled, the 756 - * loop is left. 776 + * Interrupt occurs on the falling and/or rising edge of a hardware 777 + * signal. The occurrence is latched into the irq controller hardware and 778 + * must be acked in order to be reenabled. After the ack another interrupt 779 + * can happen on the same source even before the first one is handled by 780 + * the associated event handler. If this happens it might be necessary to 781 + * disable (mask) the interrupt depending on the controller hardware. This 782 + * requires to reenable the interrupt inside of the loop which handles the 783 + * interrupts which have arrived while the handler was running. If all 784 + * pending interrupts are handled, the loop is left. 757 785 */ 758 786 void handle_edge_irq(struct irq_desc *desc) 759 787 { 760 - raw_spin_lock(&desc->lock); 788 + guard(raw_spinlock)(&desc->lock); 761 789 762 - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 763 - 764 - if (!irq_may_run(desc)) { 790 + if (!irq_can_handle(desc)) { 765 791 desc->istate |= IRQS_PENDING; 766 792 mask_ack_irq(desc); 767 - goto out_unlock; 768 - } 769 - 770 - /* 771 - * If its disabled or no action available then mask it and get 772 - * out of here. 773 - */ 774 - if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { 775 - desc->istate |= IRQS_PENDING; 776 - mask_ack_irq(desc); 777 - goto out_unlock; 793 + return; 778 794 } 779 795 780 796 kstat_incr_irqs_this_cpu(desc); ··· 772 814 do { 773 815 if (unlikely(!desc->action)) { 774 816 mask_irq(desc); 775 - goto out_unlock; 817 + return; 776 818 } 777 819 778 820 /* ··· 788 830 789 831 handle_irq_event(desc); 790 832 791 - } while ((desc->istate & IRQS_PENDING) && 792 - !irqd_irq_disabled(&desc->irq_data)); 793 - 794 - out_unlock: 795 - raw_spin_unlock(&desc->lock); 833 + } while ((desc->istate & IRQS_PENDING) && !irqd_irq_disabled(&desc->irq_data)); 796 834 } 797 835 EXPORT_SYMBOL(handle_edge_irq); 798 836 ··· 961 1007 } 962 1008 } 963 1009 964 - void 965 - __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 966 - const char *name) 1010 + void __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 1011 + const char *name) 967 1012 { 968 - unsigned long flags; 969 - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 970 - 971 - if (!desc) 972 - return; 973 - 974 - __irq_do_set_handler(desc, handle, is_chained, name); 975 - irq_put_desc_busunlock(desc, flags); 1013 + scoped_irqdesc_get_and_lock(irq, 0) 1014 + __irq_do_set_handler(scoped_irqdesc, handle, is_chained, name); 976 1015 } 977 1016 EXPORT_SYMBOL_GPL(__irq_set_handler); 978 1017 979 - void 980 - irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, 981 - void *data) 1018 + void irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, 1019 + void *data) 982 1020 { 983 - unsigned long flags; 984 - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 1021 + scoped_irqdesc_get_and_buslock(irq, 0) { 1022 + struct irq_desc *desc = scoped_irqdesc; 985 1023 986 - if (!desc) 987 - return; 988 - 989 - desc->irq_common_data.handler_data = data; 990 - __irq_do_set_handler(desc, handle, 1, NULL); 991 - 992 - irq_put_desc_busunlock(desc, flags); 1024 + desc->irq_common_data.handler_data = data; 1025 + __irq_do_set_handler(desc, handle, 1, NULL); 1026 + } 993 1027 } 994 1028 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); 995 1029 ··· 992 1050 993 1051 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 994 1052 { 995 - unsigned long flags, trigger, tmp; 996 - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 1053 + scoped_irqdesc_get_and_lock(irq, 0) { 1054 + struct irq_desc *desc = scoped_irqdesc; 1055 + unsigned long trigger, tmp; 1056 + /* 1057 + * Warn when a driver sets the no autoenable flag on an already 1058 + * active interrupt. 1059 + */ 1060 + WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); 997 1061 998 - if (!desc) 999 - return; 1062 + irq_settings_clr_and_set(desc, clr, set); 1000 1063 1001 - /* 1002 - * Warn when a driver sets the no autoenable flag on an already 1003 - * active interrupt. 1004 - */ 1005 - WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); 1064 + trigger = irqd_get_trigger_type(&desc->irq_data); 1006 1065 1007 - irq_settings_clr_and_set(desc, clr, set); 1066 + irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 1067 + IRQD_TRIGGER_MASK | IRQD_LEVEL); 1068 + if (irq_settings_has_no_balance_set(desc)) 1069 + irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 1070 + if (irq_settings_is_per_cpu(desc)) 1071 + irqd_set(&desc->irq_data, IRQD_PER_CPU); 1072 + if (irq_settings_is_level(desc)) 1073 + irqd_set(&desc->irq_data, IRQD_LEVEL); 1008 1074 1009 - trigger = irqd_get_trigger_type(&desc->irq_data); 1075 + tmp = irq_settings_get_trigger_mask(desc); 1076 + if (tmp != IRQ_TYPE_NONE) 1077 + trigger = tmp; 1010 1078 1011 - irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 1012 - IRQD_TRIGGER_MASK | IRQD_LEVEL); 1013 - if (irq_settings_has_no_balance_set(desc)) 1014 - irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 1015 - if (irq_settings_is_per_cpu(desc)) 1016 - irqd_set(&desc->irq_data, IRQD_PER_CPU); 1017 - if (irq_settings_is_level(desc)) 1018 - irqd_set(&desc->irq_data, IRQD_LEVEL); 1019 - 1020 - tmp = irq_settings_get_trigger_mask(desc); 1021 - if (tmp != IRQ_TYPE_NONE) 1022 - trigger = tmp; 1023 - 1024 - irqd_set(&desc->irq_data, trigger); 1025 - 1026 - irq_put_desc_unlock(desc, flags); 1079 + irqd_set(&desc->irq_data, trigger); 1080 + } 1027 1081 } 1028 1082 EXPORT_SYMBOL_GPL(irq_modify_status); 1029 1083 ··· 1032 1094 */ 1033 1095 void irq_cpu_online(void) 1034 1096 { 1035 - struct irq_desc *desc; 1036 - struct irq_chip *chip; 1037 - unsigned long flags; 1038 1097 unsigned int irq; 1039 1098 1040 1099 for_each_active_irq(irq) { 1041 - desc = irq_to_desc(irq); 1100 + struct irq_desc *desc = irq_to_desc(irq); 1101 + struct irq_chip *chip; 1102 + 1042 1103 if (!desc) 1043 1104 continue; 1044 1105 1045 - raw_spin_lock_irqsave(&desc->lock, flags); 1046 - 1106 + guard(raw_spinlock_irqsave)(&desc->lock); 1047 1107 chip = irq_data_get_irq_chip(&desc->irq_data); 1048 1108 if (chip && chip->irq_cpu_online && 1049 1109 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 1050 1110 !irqd_irq_disabled(&desc->irq_data))) 1051 1111 chip->irq_cpu_online(&desc->irq_data); 1052 - 1053 - raw_spin_unlock_irqrestore(&desc->lock, flags); 1054 1112 } 1055 1113 } 1056 1114 ··· 1058 1124 */ 1059 1125 void irq_cpu_offline(void) 1060 1126 { 1061 - struct irq_desc *desc; 1062 - struct irq_chip *chip; 1063 - unsigned long flags; 1064 1127 unsigned int irq; 1065 1128 1066 1129 for_each_active_irq(irq) { 1067 - desc = irq_to_desc(irq); 1130 + struct irq_desc *desc = irq_to_desc(irq); 1131 + struct irq_chip *chip; 1132 + 1068 1133 if (!desc) 1069 1134 continue; 1070 1135 1071 - raw_spin_lock_irqsave(&desc->lock, flags); 1072 - 1136 + guard(raw_spinlock_irqsave)(&desc->lock); 1073 1137 chip = irq_data_get_irq_chip(&desc->irq_data); 1074 1138 if (chip && chip->irq_cpu_offline && 1075 1139 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 1076 1140 !irqd_irq_disabled(&desc->irq_data))) 1077 1141 chip->irq_cpu_offline(&desc->irq_data); 1078 - 1079 - raw_spin_unlock_irqrestore(&desc->lock, flags); 1080 1142 } 1081 1143 } 1082 1144 #endif ··· 1081 1151 1082 1152 #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS 1083 1153 /** 1084 - * handle_fasteoi_ack_irq - irq handler for edge hierarchy 1085 - * stacked on transparent controllers 1154 + * handle_fasteoi_ack_irq - irq handler for edge hierarchy stacked on 1155 + * transparent controllers 1086 1156 * 1087 - * @desc: the interrupt description structure for this irq 1157 + * @desc: the interrupt description structure for this irq 1088 1158 * 1089 - * Like handle_fasteoi_irq(), but for use with hierarchy where 1090 - * the irq_chip also needs to have its ->irq_ack() function 1091 - * called. 1159 + * Like handle_fasteoi_irq(), but for use with hierarchy where the irq_chip 1160 + * also needs to have its ->irq_ack() function called. 1092 1161 */ 1093 1162 void handle_fasteoi_ack_irq(struct irq_desc *desc) 1094 1163 { 1095 1164 struct irq_chip *chip = desc->irq_data.chip; 1096 1165 1097 - raw_spin_lock(&desc->lock); 1166 + guard(raw_spinlock)(&desc->lock); 1098 1167 1099 - if (!irq_may_run(desc)) 1100 - goto out; 1168 + if (!irq_can_handle_pm(desc)) { 1169 + cond_eoi_irq(chip, &desc->irq_data); 1170 + return; 1171 + } 1101 1172 1102 - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 1103 - 1104 - /* 1105 - * If its disabled or no action available 1106 - * then mask it and get out of here: 1107 - */ 1108 - if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 1109 - desc->istate |= IRQS_PENDING; 1173 + if (unlikely(!irq_can_handle_actions(desc))) { 1110 1174 mask_irq(desc); 1111 - goto out; 1175 + cond_eoi_irq(chip, &desc->irq_data); 1176 + return; 1112 1177 } 1113 1178 1114 1179 kstat_incr_irqs_this_cpu(desc); 1115 1180 if (desc->istate & IRQS_ONESHOT) 1116 1181 mask_irq(desc); 1117 1182 1118 - /* Start handling the irq */ 1119 1183 desc->irq_data.chip->irq_ack(&desc->irq_data); 1120 1184 1121 1185 handle_irq_event(desc); 1122 1186 1123 1187 cond_unmask_eoi_irq(desc, chip); 1124 - 1125 - raw_spin_unlock(&desc->lock); 1126 - return; 1127 - out: 1128 - if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 1129 - chip->irq_eoi(&desc->irq_data); 1130 - raw_spin_unlock(&desc->lock); 1131 1188 } 1132 1189 EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq); 1133 1190 1134 1191 /** 1135 - * handle_fasteoi_mask_irq - irq handler for level hierarchy 1136 - * stacked on transparent controllers 1192 + * handle_fasteoi_mask_irq - irq handler for level hierarchy stacked on 1193 + * transparent controllers 1137 1194 * 1138 - * @desc: the interrupt description structure for this irq 1195 + * @desc: the interrupt description structure for this irq 1139 1196 * 1140 - * Like handle_fasteoi_irq(), but for use with hierarchy where 1141 - * the irq_chip also needs to have its ->irq_mask_ack() function 1142 - * called. 1197 + * Like handle_fasteoi_irq(), but for use with hierarchy where the irq_chip 1198 + * also needs to have its ->irq_mask_ack() function called. 1143 1199 */ 1144 1200 void handle_fasteoi_mask_irq(struct irq_desc *desc) 1145 1201 { 1146 1202 struct irq_chip *chip = desc->irq_data.chip; 1147 1203 1148 - raw_spin_lock(&desc->lock); 1204 + guard(raw_spinlock)(&desc->lock); 1149 1205 mask_ack_irq(desc); 1150 1206 1151 - if (!irq_may_run(desc)) 1152 - goto out; 1153 - 1154 - desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 1155 - 1156 - /* 1157 - * If its disabled or no action available 1158 - * then mask it and get out of here: 1159 - */ 1160 - if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 1161 - desc->istate |= IRQS_PENDING; 1162 - mask_irq(desc); 1163 - goto out; 1207 + if (!irq_can_handle(desc)) { 1208 + cond_eoi_irq(chip, &desc->irq_data); 1209 + return; 1164 1210 } 1165 1211 1166 1212 kstat_incr_irqs_this_cpu(desc); 1167 - if (desc->istate & IRQS_ONESHOT) 1168 - mask_irq(desc); 1169 1213 1170 1214 handle_irq_event(desc); 1171 1215 1172 1216 cond_unmask_eoi_irq(desc, chip); 1173 - 1174 - raw_spin_unlock(&desc->lock); 1175 - return; 1176 - out: 1177 - if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 1178 - chip->irq_eoi(&desc->irq_data); 1179 - raw_spin_unlock(&desc->lock); 1180 1217 } 1181 1218 EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq); 1182 1219
+5 -7
kernel/irq/cpuhotplug.c
··· 177 177 bool affinity_broken; 178 178 179 179 desc = irq_to_desc(irq); 180 - raw_spin_lock(&desc->lock); 181 - affinity_broken = migrate_one_irq(desc); 182 - raw_spin_unlock(&desc->lock); 180 + scoped_guard(raw_spinlock, &desc->lock) 181 + affinity_broken = migrate_one_irq(desc); 183 182 184 183 if (affinity_broken) { 185 184 pr_debug_ratelimited("IRQ %u: no longer affine to CPU%u\n", ··· 218 219 return; 219 220 220 221 if (irqd_is_managed_and_shutdown(data)) 221 - irq_startup(desc, IRQ_RESEND, IRQ_START_COND); 222 + irq_startup_managed(desc); 222 223 223 224 /* 224 225 * If the interrupt can only be directed to a single target ··· 243 244 irq_lock_sparse(); 244 245 for_each_active_irq(irq) { 245 246 desc = irq_to_desc(irq); 246 - raw_spin_lock_irq(&desc->lock); 247 - irq_restore_affinity_of_irq(desc, cpu); 248 - raw_spin_unlock_irq(&desc->lock); 247 + scoped_guard(raw_spinlock_irq, &desc->lock) 248 + irq_restore_affinity_of_irq(desc, cpu); 249 249 } 250 250 irq_unlock_sparse(); 251 251
+3 -4
kernel/irq/debugfs.c
··· 160 160 struct irq_desc *desc = m->private; 161 161 struct irq_data *data; 162 162 163 - raw_spin_lock_irq(&desc->lock); 163 + guard(raw_spinlock_irq)(&desc->lock); 164 164 data = irq_desc_get_irq_data(desc); 165 165 seq_printf(m, "handler: %ps\n", desc->handle_irq); 166 166 seq_printf(m, "device: %s\n", desc->dev_name); ··· 178 178 seq_printf(m, "node: %d\n", irq_data_get_node(data)); 179 179 irq_debug_show_masks(m, desc); 180 180 irq_debug_show_data(m, data, 0); 181 - raw_spin_unlock_irq(&desc->lock); 182 181 return 0; 183 182 } 184 183 ··· 225 226 226 227 void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc) 227 228 { 228 - char name [10]; 229 + char name [12]; 229 230 230 231 if (!irq_dir || !desc || desc->debugfs_file) 231 232 return; 232 233 233 - sprintf(name, "%d", irq); 234 + sprintf(name, "%u", irq); 234 235 desc->debugfs_file = debugfs_create_file(name, 0644, irq_dir, desc, 235 236 &dfs_irq_ops); 236 237 }
+24 -24
kernel/irq/internals.h
··· 87 87 extern int irq_activate(struct irq_desc *desc); 88 88 extern int irq_activate_and_startup(struct irq_desc *desc, bool resend); 89 89 extern int irq_startup(struct irq_desc *desc, bool resend, bool force); 90 + extern void irq_startup_managed(struct irq_desc *desc); 90 91 91 92 extern void irq_shutdown(struct irq_desc *desc); 92 93 extern void irq_shutdown_and_deactivate(struct irq_desc *desc); ··· 142 141 static inline int irq_setup_affinity(struct irq_desc *desc) { return 0; } 143 142 #endif 144 143 144 + 145 + #define for_each_action_of_desc(desc, act) \ 146 + for (act = desc->action; act; act = act->next) 147 + 145 148 /* Inline functions for support of irq chips on slow busses */ 146 149 static inline void chip_bus_lock(struct irq_desc *desc) 147 150 { ··· 165 160 #define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK) 166 161 #define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU) 167 162 168 - #define for_each_action_of_desc(desc, act) \ 169 - for (act = desc->action; act; act = act->next) 170 - 171 - struct irq_desc * 172 - __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, 173 - unsigned int check); 163 + struct irq_desc *__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, 164 + unsigned int check); 174 165 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus); 175 166 176 - static inline struct irq_desc * 177 - irq_get_desc_buslock(unsigned int irq, unsigned long *flags, unsigned int check) 167 + __DEFINE_CLASS_IS_CONDITIONAL(irqdesc_lock, true); 168 + __DEFINE_UNLOCK_GUARD(irqdesc_lock, struct irq_desc, 169 + __irq_put_desc_unlock(_T->lock, _T->flags, _T->bus), 170 + unsigned long flags; bool bus); 171 + 172 + static inline class_irqdesc_lock_t class_irqdesc_lock_constructor(unsigned int irq, bool bus, 173 + unsigned int check) 178 174 { 179 - return __irq_get_desc_lock(irq, flags, true, check); 175 + class_irqdesc_lock_t _t = { .bus = bus, }; 176 + 177 + _t.lock = __irq_get_desc_lock(irq, &_t.flags, bus, check); 178 + 179 + return _t; 180 180 } 181 181 182 - static inline void 183 - irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags) 184 - { 185 - __irq_put_desc_unlock(desc, flags, true); 186 - } 182 + #define scoped_irqdesc_get_and_lock(_irq, _check) \ 183 + scoped_guard(irqdesc_lock, _irq, false, _check) 187 184 188 - static inline struct irq_desc * 189 - irq_get_desc_lock(unsigned int irq, unsigned long *flags, unsigned int check) 190 - { 191 - return __irq_get_desc_lock(irq, flags, false, check); 192 - } 185 + #define scoped_irqdesc_get_and_buslock(_irq, _check) \ 186 + scoped_guard(irqdesc_lock, _irq, true, _check) 193 187 194 - static inline void 195 - irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags) 196 - { 197 - __irq_put_desc_unlock(desc, flags, false); 198 - } 188 + #define scoped_irqdesc ((struct irq_desc *)(__guard_ptr(irqdesc_lock)(&scope))) 199 189 200 190 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) 201 191
+64 -110
kernel/irq/irqdesc.c
··· 246 246 #define IRQ_ATTR_RO(_name) \ 247 247 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 248 248 249 - static ssize_t per_cpu_count_show(struct kobject *kobj, 250 - struct kobj_attribute *attr, char *buf) 249 + static ssize_t per_cpu_count_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 251 250 { 252 251 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 253 252 ssize_t ret = 0; ··· 256 257 for_each_possible_cpu(cpu) { 257 258 unsigned int c = irq_desc_kstat_cpu(desc, cpu); 258 259 259 - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c); 260 + ret += sysfs_emit_at(buf, ret, "%s%u", p, c); 260 261 p = ","; 261 262 } 262 263 263 - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); 264 + ret += sysfs_emit_at(buf, ret, "\n"); 264 265 return ret; 265 266 } 266 267 IRQ_ATTR_RO(per_cpu_count); 267 268 268 - static ssize_t chip_name_show(struct kobject *kobj, 269 - struct kobj_attribute *attr, char *buf) 269 + static ssize_t chip_name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 270 270 { 271 271 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 272 - ssize_t ret = 0; 273 272 274 - raw_spin_lock_irq(&desc->lock); 275 - if (desc->irq_data.chip && desc->irq_data.chip->name) { 276 - ret = scnprintf(buf, PAGE_SIZE, "%s\n", 277 - desc->irq_data.chip->name); 278 - } 279 - raw_spin_unlock_irq(&desc->lock); 280 - 281 - return ret; 273 + guard(raw_spinlock_irq)(&desc->lock); 274 + if (desc->irq_data.chip && desc->irq_data.chip->name) 275 + return sysfs_emit(buf, "%s\n", desc->irq_data.chip->name); 276 + return 0; 282 277 } 283 278 IRQ_ATTR_RO(chip_name); 284 279 285 - static ssize_t hwirq_show(struct kobject *kobj, 286 - struct kobj_attribute *attr, char *buf) 280 + static ssize_t hwirq_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 287 281 { 288 282 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 289 - ssize_t ret = 0; 290 283 291 - raw_spin_lock_irq(&desc->lock); 284 + guard(raw_spinlock_irq)(&desc->lock); 292 285 if (desc->irq_data.domain) 293 - ret = sprintf(buf, "%lu\n", desc->irq_data.hwirq); 294 - raw_spin_unlock_irq(&desc->lock); 295 - 296 - return ret; 286 + return sysfs_emit(buf, "%lu\n", desc->irq_data.hwirq); 287 + return 0; 297 288 } 298 289 IRQ_ATTR_RO(hwirq); 299 290 300 - static ssize_t type_show(struct kobject *kobj, 301 - struct kobj_attribute *attr, char *buf) 291 + static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 302 292 { 303 293 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 304 - ssize_t ret = 0; 305 294 306 - raw_spin_lock_irq(&desc->lock); 307 - ret = sprintf(buf, "%s\n", 308 - irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); 309 - raw_spin_unlock_irq(&desc->lock); 310 - 311 - return ret; 295 + guard(raw_spinlock_irq)(&desc->lock); 296 + return sysfs_emit(buf, "%s\n", irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); 312 297 313 298 } 314 299 IRQ_ATTR_RO(type); 315 300 316 - static ssize_t wakeup_show(struct kobject *kobj, 317 - struct kobj_attribute *attr, char *buf) 301 + static ssize_t wakeup_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 318 302 { 319 303 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 320 - ssize_t ret = 0; 321 304 322 - raw_spin_lock_irq(&desc->lock); 323 - ret = sprintf(buf, "%s\n", str_enabled_disabled(irqd_is_wakeup_set(&desc->irq_data))); 324 - raw_spin_unlock_irq(&desc->lock); 325 - 326 - return ret; 327 - 305 + guard(raw_spinlock_irq)(&desc->lock); 306 + return sysfs_emit(buf, "%s\n", str_enabled_disabled(irqd_is_wakeup_set(&desc->irq_data))); 328 307 } 329 308 IRQ_ATTR_RO(wakeup); 330 309 331 - static ssize_t name_show(struct kobject *kobj, 332 - struct kobj_attribute *attr, char *buf) 310 + static ssize_t name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 333 311 { 334 312 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 335 - ssize_t ret = 0; 336 313 337 - raw_spin_lock_irq(&desc->lock); 314 + guard(raw_spinlock_irq)(&desc->lock); 338 315 if (desc->name) 339 - ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name); 340 - raw_spin_unlock_irq(&desc->lock); 341 - 342 - return ret; 316 + return sysfs_emit(buf, "%s\n", desc->name); 317 + return 0; 343 318 } 344 319 IRQ_ATTR_RO(name); 345 320 346 - static ssize_t actions_show(struct kobject *kobj, 347 - struct kobj_attribute *attr, char *buf) 321 + static ssize_t actions_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 348 322 { 349 323 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 350 324 struct irqaction *action; 351 325 ssize_t ret = 0; 352 326 char *p = ""; 353 327 354 - raw_spin_lock_irq(&desc->lock); 355 - for_each_action_of_desc(desc, action) { 356 - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s", 357 - p, action->name); 358 - p = ","; 328 + scoped_guard(raw_spinlock_irq, &desc->lock) { 329 + for_each_action_of_desc(desc, action) { 330 + ret += sysfs_emit_at(buf, ret, "%s%s", p, action->name); 331 + p = ","; 332 + } 359 333 } 360 - raw_spin_unlock_irq(&desc->lock); 361 334 362 335 if (ret) 363 - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); 364 - 336 + ret += sysfs_emit_at(buf, ret, "\n"); 365 337 return ret; 366 338 } 367 339 IRQ_ATTR_RO(actions); ··· 388 418 int irq; 389 419 390 420 /* Prevent concurrent irq alloc/free */ 391 - irq_lock_sparse(); 392 - 421 + guard(mutex)(&sparse_irq_lock); 393 422 irq_kobj_base = kobject_create_and_add("irq", kernel_kobj); 394 - if (!irq_kobj_base) { 395 - irq_unlock_sparse(); 423 + if (!irq_kobj_base) 396 424 return -ENOMEM; 397 - } 398 425 399 426 /* Add the already allocated interrupts */ 400 427 for_each_irq_desc(irq, desc) 401 428 irq_sysfs_add(irq, desc); 402 - irq_unlock_sparse(); 403 - 404 429 return 0; 405 430 } 406 431 postcore_initcall(irq_sysfs_init); ··· 538 573 return -ENOMEM; 539 574 } 540 575 541 - static int irq_expand_nr_irqs(unsigned int nr) 576 + static bool irq_expand_nr_irqs(unsigned int nr) 542 577 { 543 578 if (nr > MAX_SPARSE_IRQS) 544 - return -ENOMEM; 579 + return false; 545 580 nr_irqs = nr; 546 - return 0; 581 + return true; 547 582 } 548 583 549 584 int __init early_irq_init(void) ··· 621 656 static void free_desc(unsigned int irq) 622 657 { 623 658 struct irq_desc *desc = irq_to_desc(irq); 624 - unsigned long flags; 625 659 626 - raw_spin_lock_irqsave(&desc->lock, flags); 627 - desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); 628 - raw_spin_unlock_irqrestore(&desc->lock, flags); 660 + scoped_guard(raw_spinlock_irqsave, &desc->lock) 661 + desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); 629 662 delete_irq_desc(irq); 630 663 } 631 664 ··· 642 679 return start; 643 680 } 644 681 645 - static int irq_expand_nr_irqs(unsigned int nr) 682 + static inline bool irq_expand_nr_irqs(unsigned int nr) 646 683 { 647 - return -ENOMEM; 684 + return false; 648 685 } 649 686 650 687 void irq_mark_irq(unsigned int irq) 651 688 { 652 - mutex_lock(&sparse_irq_lock); 689 + guard(mutex)(&sparse_irq_lock); 653 690 irq_insert_desc(irq, irq_desc + irq); 654 - mutex_unlock(&sparse_irq_lock); 655 691 } 656 692 657 693 #ifdef CONFIG_GENERIC_IRQ_LEGACY ··· 789 827 if (from >= nr_irqs || (from + cnt) > nr_irqs) 790 828 return; 791 829 792 - mutex_lock(&sparse_irq_lock); 830 + guard(mutex)(&sparse_irq_lock); 793 831 for (i = 0; i < cnt; i++) 794 832 free_desc(from + i); 795 - 796 - mutex_unlock(&sparse_irq_lock); 797 833 } 798 834 EXPORT_SYMBOL_GPL(irq_free_descs); 799 835 ··· 808 848 * 809 849 * Returns the first irq number or error code 810 850 */ 811 - int __ref 812 - __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, 813 - struct module *owner, const struct irq_affinity_desc *affinity) 851 + int __ref __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, 852 + struct module *owner, const struct irq_affinity_desc *affinity) 814 853 { 815 - int start, ret; 854 + int start; 816 855 817 856 if (!cnt) 818 857 return -EINVAL; ··· 829 870 from = arch_dynirq_lower_bound(from); 830 871 } 831 872 832 - mutex_lock(&sparse_irq_lock); 873 + guard(mutex)(&sparse_irq_lock); 833 874 834 875 start = irq_find_free_area(from, cnt); 835 - ret = -EEXIST; 836 876 if (irq >=0 && start != irq) 837 - goto unlock; 877 + return -EEXIST; 838 878 839 879 if (start + cnt > nr_irqs) { 840 - ret = irq_expand_nr_irqs(start + cnt); 841 - if (ret) 842 - goto unlock; 880 + if (!irq_expand_nr_irqs(start + cnt)) 881 + return -ENOMEM; 843 882 } 844 - ret = alloc_descs(start, cnt, node, affinity, owner); 845 - unlock: 846 - mutex_unlock(&sparse_irq_lock); 847 - return ret; 883 + return alloc_descs(start, cnt, node, affinity, owner); 848 884 } 849 885 EXPORT_SYMBOL_GPL(__irq_alloc_descs); 850 886 ··· 854 900 return irq_find_at_or_after(offset); 855 901 } 856 902 857 - struct irq_desc * 858 - __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, 859 - unsigned int check) 903 + struct irq_desc *__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, 904 + unsigned int check) 860 905 { 861 - struct irq_desc *desc = irq_to_desc(irq); 906 + struct irq_desc *desc; 862 907 863 - if (desc) { 864 - if (check & _IRQ_DESC_CHECK) { 865 - if ((check & _IRQ_DESC_PERCPU) && 866 - !irq_settings_is_per_cpu_devid(desc)) 867 - return NULL; 908 + desc = irq_to_desc(irq); 909 + if (!desc) 910 + return NULL; 868 911 869 - if (!(check & _IRQ_DESC_PERCPU) && 870 - irq_settings_is_per_cpu_devid(desc)) 871 - return NULL; 872 - } 912 + if (check & _IRQ_DESC_CHECK) { 913 + if ((check & _IRQ_DESC_PERCPU) && !irq_settings_is_per_cpu_devid(desc)) 914 + return NULL; 873 915 874 - if (bus) 875 - chip_bus_lock(desc); 876 - raw_spin_lock_irqsave(&desc->lock, *flags); 916 + if (!(check & _IRQ_DESC_PERCPU) && irq_settings_is_per_cpu_devid(desc)) 917 + return NULL; 877 918 } 919 + 920 + if (bus) 921 + chip_bus_lock(desc); 922 + raw_spin_lock_irqsave(&desc->lock, *flags); 923 + 878 924 return desc; 879 925 } 880 926
+56
kernel/irq/irqdomain.c
··· 1133 1133 EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell); 1134 1134 1135 1135 /** 1136 + * irq_domain_xlate_twothreecell() - Generic xlate for direct two or three cell bindings 1137 + * @d: Interrupt domain involved in the translation 1138 + * @ctrlr: The device tree node for the device whose interrupt is translated 1139 + * @intspec: The interrupt specifier data from the device tree 1140 + * @intsize: The number of entries in @intspec 1141 + * @out_hwirq: Pointer to storage for the hardware interrupt number 1142 + * @out_type: Pointer to storage for the interrupt type 1143 + * 1144 + * Device Tree interrupt specifier translation function for two or three 1145 + * cell bindings, where the cell values map directly to the hardware 1146 + * interrupt number and the type specifier. 1147 + */ 1148 + int irq_domain_xlate_twothreecell(struct irq_domain *d, struct device_node *ctrlr, 1149 + const u32 *intspec, unsigned int intsize, 1150 + irq_hw_number_t *out_hwirq, unsigned int *out_type) 1151 + { 1152 + struct irq_fwspec fwspec; 1153 + 1154 + of_phandle_args_to_fwspec(ctrlr, intspec, intsize, &fwspec); 1155 + 1156 + return irq_domain_translate_twothreecell(d, &fwspec, out_hwirq, out_type); 1157 + } 1158 + EXPORT_SYMBOL_GPL(irq_domain_xlate_twothreecell); 1159 + 1160 + /** 1136 1161 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings 1137 1162 * @d: Interrupt domain involved in the translation 1138 1163 * @ctrlr: The device tree node for the device whose interrupt is translated ··· 1240 1215 return 0; 1241 1216 } 1242 1217 EXPORT_SYMBOL_GPL(irq_domain_translate_twocell); 1218 + 1219 + /** 1220 + * irq_domain_translate_twothreecell() - Generic translate for direct two or three cell 1221 + * bindings 1222 + * @d: Interrupt domain involved in the translation 1223 + * @fwspec: The firmware interrupt specifier to translate 1224 + * @out_hwirq: Pointer to storage for the hardware interrupt number 1225 + * @out_type: Pointer to storage for the interrupt type 1226 + * 1227 + * Firmware interrupt specifier translation function for two or three cell 1228 + * specifications, where the parameter values map directly to the hardware 1229 + * interrupt number and the type specifier. 1230 + */ 1231 + int irq_domain_translate_twothreecell(struct irq_domain *d, struct irq_fwspec *fwspec, 1232 + unsigned long *out_hwirq, unsigned int *out_type) 1233 + { 1234 + if (fwspec->param_count == 2) { 1235 + *out_hwirq = fwspec->param[0]; 1236 + *out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; 1237 + return 0; 1238 + } 1239 + 1240 + if (fwspec->param_count == 3) { 1241 + *out_hwirq = fwspec->param[1]; 1242 + *out_type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; 1243 + return 0; 1244 + } 1245 + 1246 + return -EINVAL; 1247 + } 1248 + EXPORT_SYMBOL_GPL(irq_domain_translate_twothreecell); 1243 1249 1244 1250 int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq, 1245 1251 int node, const struct irq_affinity_desc *affinity)
+472 -660
kernel/irq/manage.c
··· 43 43 bool inprogress; 44 44 45 45 do { 46 - unsigned long flags; 47 - 48 46 /* 49 47 * Wait until we're out of the critical section. This might 50 48 * give the wrong answer due to the lack of memory barriers. ··· 51 53 cpu_relax(); 52 54 53 55 /* Ok, that indicated we're done: double-check carefully. */ 54 - raw_spin_lock_irqsave(&desc->lock, flags); 56 + guard(raw_spinlock_irqsave)(&desc->lock); 55 57 inprogress = irqd_irq_inprogress(&desc->irq_data); 56 58 57 59 /* ··· 67 69 __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE, 68 70 &inprogress); 69 71 } 70 - raw_spin_unlock_irqrestore(&desc->lock, flags); 71 - 72 72 /* Oops, that failed? */ 73 73 } while (inprogress); 74 74 } 75 75 76 76 /** 77 - * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) 78 - * @irq: interrupt number to wait for 77 + * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) 78 + * @irq: interrupt number to wait for 79 79 * 80 - * This function waits for any pending hard IRQ handlers for this 81 - * interrupt to complete before returning. If you use this 82 - * function while holding a resource the IRQ handler may need you 83 - * will deadlock. It does not take associated threaded handlers 84 - * into account. 80 + * This function waits for any pending hard IRQ handlers for this interrupt 81 + * to complete before returning. If you use this function while holding a 82 + * resource the IRQ handler may need you will deadlock. It does not take 83 + * associated threaded handlers into account. 85 84 * 86 - * Do not use this for shutdown scenarios where you must be sure 87 - * that all parts (hardirq and threaded handler) have completed. 85 + * Do not use this for shutdown scenarios where you must be sure that all 86 + * parts (hardirq and threaded handler) have completed. 88 87 * 89 - * Returns: false if a threaded handler is active. 88 + * Returns: false if a threaded handler is active. 90 89 * 91 - * This function may be called - with care - from IRQ context. 90 + * This function may be called - with care - from IRQ context. 92 91 * 93 - * It does not check whether there is an interrupt in flight at the 94 - * hardware level, but not serviced yet, as this might deadlock when 95 - * called with interrupts disabled and the target CPU of the interrupt 96 - * is the current CPU. 92 + * It does not check whether there is an interrupt in flight at the 93 + * hardware level, but not serviced yet, as this might deadlock when called 94 + * with interrupts disabled and the target CPU of the interrupt is the 95 + * current CPU. 97 96 */ 98 97 bool synchronize_hardirq(unsigned int irq) 99 98 { ··· 116 121 } 117 122 118 123 /** 119 - * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 120 - * @irq: interrupt number to wait for 124 + * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 125 + * @irq: interrupt number to wait for 121 126 * 122 - * This function waits for any pending IRQ handlers for this interrupt 123 - * to complete before returning. If you use this function while 124 - * holding a resource the IRQ handler may need you will deadlock. 127 + * This function waits for any pending IRQ handlers for this interrupt to 128 + * complete before returning. If you use this function while holding a 129 + * resource the IRQ handler may need you will deadlock. 125 130 * 126 - * Can only be called from preemptible code as it might sleep when 127 - * an interrupt thread is associated to @irq. 131 + * Can only be called from preemptible code as it might sleep when 132 + * an interrupt thread is associated to @irq. 128 133 * 129 - * It optionally makes sure (when the irq chip supports that method) 130 - * that the interrupt is not pending in any CPU and waiting for 131 - * service. 134 + * It optionally makes sure (when the irq chip supports that method) 135 + * that the interrupt is not pending in any CPU and waiting for 136 + * service. 132 137 */ 133 138 void synchronize_irq(unsigned int irq) 134 139 { ··· 151 156 } 152 157 153 158 /** 154 - * irq_can_set_affinity - Check if the affinity of a given irq can be set 155 - * @irq: Interrupt to check 159 + * irq_can_set_affinity - Check if the affinity of a given irq can be set 160 + * @irq: Interrupt to check 156 161 * 157 162 */ 158 163 int irq_can_set_affinity(unsigned int irq) ··· 176 181 } 177 182 178 183 /** 179 - * irq_set_thread_affinity - Notify irq threads to adjust affinity 180 - * @desc: irq descriptor which has affinity changed 184 + * irq_set_thread_affinity - Notify irq threads to adjust affinity 185 + * @desc: irq descriptor which has affinity changed 181 186 * 182 - * We just set IRQTF_AFFINITY and delegate the affinity setting 183 - * to the interrupt thread itself. We can not call 184 - * set_cpus_allowed_ptr() here as we hold desc->lock and this 185 - * code can be called from hard interrupt context. 187 + * Just set IRQTF_AFFINITY and delegate the affinity setting to the 188 + * interrupt thread itself. We can not call set_cpus_allowed_ptr() here as 189 + * we hold desc->lock and this code can be called from hard interrupt 190 + * context. 186 191 */ 187 192 static void irq_set_thread_affinity(struct irq_desc *desc) 188 193 { ··· 395 400 * an interrupt which is already started or which has already been configured 396 401 * as managed will also fail, as these mean invalid init state or double init. 397 402 */ 398 - int irq_update_affinity_desc(unsigned int irq, 399 - struct irq_affinity_desc *affinity) 403 + int irq_update_affinity_desc(unsigned int irq, struct irq_affinity_desc *affinity) 400 404 { 401 - struct irq_desc *desc; 402 - unsigned long flags; 403 - bool activated; 404 - int ret = 0; 405 - 406 405 /* 407 406 * Supporting this with the reservation scheme used by x86 needs 408 407 * some more thought. Fail it for now. ··· 404 415 if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE)) 405 416 return -EOPNOTSUPP; 406 417 407 - desc = irq_get_desc_buslock(irq, &flags, 0); 408 - if (!desc) 409 - return -EINVAL; 418 + scoped_irqdesc_get_and_buslock(irq, 0) { 419 + struct irq_desc *desc = scoped_irqdesc; 420 + bool activated; 410 421 411 - /* Requires the interrupt to be shut down */ 412 - if (irqd_is_started(&desc->irq_data)) { 413 - ret = -EBUSY; 414 - goto out_unlock; 422 + /* Requires the interrupt to be shut down */ 423 + if (irqd_is_started(&desc->irq_data)) 424 + return -EBUSY; 425 + 426 + /* Interrupts which are already managed cannot be modified */ 427 + if (irqd_affinity_is_managed(&desc->irq_data)) 428 + return -EBUSY; 429 + /* 430 + * Deactivate the interrupt. That's required to undo 431 + * anything an earlier activation has established. 432 + */ 433 + activated = irqd_is_activated(&desc->irq_data); 434 + if (activated) 435 + irq_domain_deactivate_irq(&desc->irq_data); 436 + 437 + if (affinity->is_managed) { 438 + irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED); 439 + irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN); 440 + } 441 + 442 + cpumask_copy(desc->irq_common_data.affinity, &affinity->mask); 443 + 444 + /* Restore the activation state */ 445 + if (activated) 446 + irq_domain_activate_irq(&desc->irq_data, false); 447 + return 0; 415 448 } 416 - 417 - /* Interrupts which are already managed cannot be modified */ 418 - if (irqd_affinity_is_managed(&desc->irq_data)) { 419 - ret = -EBUSY; 420 - goto out_unlock; 421 - } 422 - 423 - /* 424 - * Deactivate the interrupt. That's required to undo 425 - * anything an earlier activation has established. 426 - */ 427 - activated = irqd_is_activated(&desc->irq_data); 428 - if (activated) 429 - irq_domain_deactivate_irq(&desc->irq_data); 430 - 431 - if (affinity->is_managed) { 432 - irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED); 433 - irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN); 434 - } 435 - 436 - cpumask_copy(desc->irq_common_data.affinity, &affinity->mask); 437 - 438 - /* Restore the activation state */ 439 - if (activated) 440 - irq_domain_activate_irq(&desc->irq_data, false); 441 - 442 - out_unlock: 443 - irq_put_desc_busunlock(desc, flags); 444 - return ret; 449 + return -EINVAL; 445 450 } 446 451 447 452 static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, 448 453 bool force) 449 454 { 450 455 struct irq_desc *desc = irq_to_desc(irq); 451 - unsigned long flags; 452 - int ret; 453 456 454 457 if (!desc) 455 458 return -EINVAL; 456 459 457 - raw_spin_lock_irqsave(&desc->lock, flags); 458 - ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); 459 - raw_spin_unlock_irqrestore(&desc->lock, flags); 460 - return ret; 460 + guard(raw_spinlock_irqsave)(&desc->lock); 461 + return irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); 461 462 } 462 463 463 464 /** ··· 480 501 } 481 502 EXPORT_SYMBOL_GPL(irq_force_affinity); 482 503 483 - int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m, 484 - bool setaffinity) 504 + int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m, bool setaffinity) 485 505 { 486 - unsigned long flags; 487 - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 506 + int ret = -EINVAL; 488 507 489 - if (!desc) 490 - return -EINVAL; 491 - desc->affinity_hint = m; 492 - irq_put_desc_unlock(desc, flags); 493 - if (m && setaffinity) 508 + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { 509 + scoped_irqdesc->affinity_hint = m; 510 + ret = 0; 511 + } 512 + 513 + if (!ret && m && setaffinity) 494 514 __irq_set_affinity(irq, m, false); 495 - return 0; 515 + return ret; 496 516 } 497 517 EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint); 498 518 499 519 static void irq_affinity_notify(struct work_struct *work) 500 520 { 501 - struct irq_affinity_notify *notify = 502 - container_of(work, struct irq_affinity_notify, work); 521 + struct irq_affinity_notify *notify = container_of(work, struct irq_affinity_notify, work); 503 522 struct irq_desc *desc = irq_to_desc(notify->irq); 504 523 cpumask_var_t cpumask; 505 - unsigned long flags; 506 524 507 525 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) 508 526 goto out; 509 527 510 - raw_spin_lock_irqsave(&desc->lock, flags); 511 - if (irq_move_pending(&desc->irq_data)) 512 - irq_get_pending(cpumask, desc); 513 - else 514 - cpumask_copy(cpumask, desc->irq_common_data.affinity); 515 - raw_spin_unlock_irqrestore(&desc->lock, flags); 528 + scoped_guard(raw_spinlock_irqsave, &desc->lock) { 529 + if (irq_move_pending(&desc->irq_data)) 530 + irq_get_pending(cpumask, desc); 531 + else 532 + cpumask_copy(cpumask, desc->irq_common_data.affinity); 533 + } 516 534 517 535 notify->notify(notify, cpumask); 518 536 ··· 519 543 } 520 544 521 545 /** 522 - * irq_set_affinity_notifier - control notification of IRQ affinity changes 523 - * @irq: Interrupt for which to enable/disable notification 524 - * @notify: Context for notification, or %NULL to disable 525 - * notification. Function pointers must be initialised; 526 - * the other fields will be initialised by this function. 546 + * irq_set_affinity_notifier - control notification of IRQ affinity changes 547 + * @irq: Interrupt for which to enable/disable notification 548 + * @notify: Context for notification, or %NULL to disable 549 + * notification. Function pointers must be initialised; 550 + * the other fields will be initialised by this function. 527 551 * 528 - * Must be called in process context. Notification may only be enabled 529 - * after the IRQ is allocated and must be disabled before the IRQ is 530 - * freed using free_irq(). 552 + * Must be called in process context. Notification may only be enabled 553 + * after the IRQ is allocated and must be disabled before the IRQ is freed 554 + * using free_irq(). 531 555 */ 532 - int 533 - irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) 556 + int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) 534 557 { 535 558 struct irq_desc *desc = irq_to_desc(irq); 536 559 struct irq_affinity_notify *old_notify; 537 - unsigned long flags; 538 560 539 561 /* The release function is promised process context */ 540 562 might_sleep(); ··· 547 573 INIT_WORK(&notify->work, irq_affinity_notify); 548 574 } 549 575 550 - raw_spin_lock_irqsave(&desc->lock, flags); 551 - old_notify = desc->affinity_notify; 552 - desc->affinity_notify = notify; 553 - raw_spin_unlock_irqrestore(&desc->lock, flags); 576 + scoped_guard(raw_spinlock_irqsave, &desc->lock) { 577 + old_notify = desc->affinity_notify; 578 + desc->affinity_notify = notify; 579 + } 554 580 555 581 if (old_notify) { 556 582 if (cancel_work_sync(&old_notify->work)) { ··· 571 597 int irq_setup_affinity(struct irq_desc *desc) 572 598 { 573 599 struct cpumask *set = irq_default_affinity; 574 - int ret, node = irq_desc_get_node(desc); 600 + int node = irq_desc_get_node(desc); 601 + 575 602 static DEFINE_RAW_SPINLOCK(mask_lock); 576 603 static struct cpumask mask; 577 604 ··· 580 605 if (!__irq_can_set_affinity(desc)) 581 606 return 0; 582 607 583 - raw_spin_lock(&mask_lock); 608 + guard(raw_spinlock)(&mask_lock); 584 609 /* 585 610 * Preserve the managed affinity setting and a userspace affinity 586 611 * setup, but make sure that one of the targets is online. ··· 605 630 if (cpumask_intersects(&mask, nodemask)) 606 631 cpumask_and(&mask, &mask, nodemask); 607 632 } 608 - ret = irq_do_set_affinity(&desc->irq_data, &mask, false); 609 - raw_spin_unlock(&mask_lock); 610 - return ret; 633 + return irq_do_set_affinity(&desc->irq_data, &mask, false); 611 634 } 612 635 #else 613 636 /* Wrapper for ALPHA specific affinity selector magic */ ··· 618 645 619 646 620 647 /** 621 - * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt 622 - * @irq: interrupt number to set affinity 623 - * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU 624 - * specific data for percpu_devid interrupts 648 + * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt 649 + * @irq: interrupt number to set affinity 650 + * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU 651 + * specific data for percpu_devid interrupts 625 652 * 626 - * This function uses the vCPU specific data to set the vCPU 627 - * affinity for an irq. The vCPU specific data is passed from 628 - * outside, such as KVM. One example code path is as below: 629 - * KVM -> IOMMU -> irq_set_vcpu_affinity(). 653 + * This function uses the vCPU specific data to set the vCPU affinity for 654 + * an irq. The vCPU specific data is passed from outside, such as KVM. One 655 + * example code path is as below: KVM -> IOMMU -> irq_set_vcpu_affinity(). 630 656 */ 631 657 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info) 632 658 { 633 - unsigned long flags; 634 - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 635 - struct irq_data *data; 636 - struct irq_chip *chip; 637 - int ret = -ENOSYS; 659 + scoped_irqdesc_get_and_lock(irq, 0) { 660 + struct irq_desc *desc = scoped_irqdesc; 661 + struct irq_data *data; 662 + struct irq_chip *chip; 638 663 639 - if (!desc) 640 - return -EINVAL; 664 + data = irq_desc_get_irq_data(desc); 665 + do { 666 + chip = irq_data_get_irq_chip(data); 667 + if (chip && chip->irq_set_vcpu_affinity) 668 + break; 641 669 642 - data = irq_desc_get_irq_data(desc); 643 - do { 644 - chip = irq_data_get_irq_chip(data); 645 - if (chip && chip->irq_set_vcpu_affinity) 646 - break; 647 - #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 648 - data = data->parent_data; 649 - #else 650 - data = NULL; 651 - #endif 652 - } while (data); 670 + data = irqd_get_parent_data(data); 671 + } while (data); 653 672 654 - if (data) 655 - ret = chip->irq_set_vcpu_affinity(data, vcpu_info); 656 - irq_put_desc_unlock(desc, flags); 657 - 658 - return ret; 673 + if (!data) 674 + return -ENOSYS; 675 + return chip->irq_set_vcpu_affinity(data, vcpu_info); 676 + } 677 + return -EINVAL; 659 678 } 660 679 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity); 661 680 ··· 659 694 660 695 static int __disable_irq_nosync(unsigned int irq) 661 696 { 662 - unsigned long flags; 663 - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 664 - 665 - if (!desc) 666 - return -EINVAL; 667 - __disable_irq(desc); 668 - irq_put_desc_busunlock(desc, flags); 669 - return 0; 697 + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { 698 + __disable_irq(scoped_irqdesc); 699 + return 0; 700 + } 701 + return -EINVAL; 670 702 } 671 703 672 704 /** 673 - * disable_irq_nosync - disable an irq without waiting 674 - * @irq: Interrupt to disable 705 + * disable_irq_nosync - disable an irq without waiting 706 + * @irq: Interrupt to disable 675 707 * 676 - * Disable the selected interrupt line. Disables and Enables are 677 - * nested. 678 - * Unlike disable_irq(), this function does not ensure existing 679 - * instances of the IRQ handler have completed before returning. 708 + * Disable the selected interrupt line. Disables and Enables are 709 + * nested. 710 + * Unlike disable_irq(), this function does not ensure existing 711 + * instances of the IRQ handler have completed before returning. 680 712 * 681 - * This function may be called from IRQ context. 713 + * This function may be called from IRQ context. 682 714 */ 683 715 void disable_irq_nosync(unsigned int irq) 684 716 { ··· 684 722 EXPORT_SYMBOL(disable_irq_nosync); 685 723 686 724 /** 687 - * disable_irq - disable an irq and wait for completion 688 - * @irq: Interrupt to disable 725 + * disable_irq - disable an irq and wait for completion 726 + * @irq: Interrupt to disable 689 727 * 690 - * Disable the selected interrupt line. Enables and Disables are 691 - * nested. 692 - * This function waits for any pending IRQ handlers for this interrupt 693 - * to complete before returning. If you use this function while 694 - * holding a resource the IRQ handler may need you will deadlock. 728 + * Disable the selected interrupt line. Enables and Disables are nested. 695 729 * 696 - * Can only be called from preemptible code as it might sleep when 697 - * an interrupt thread is associated to @irq. 730 + * This function waits for any pending IRQ handlers for this interrupt to 731 + * complete before returning. If you use this function while holding a 732 + * resource the IRQ handler may need you will deadlock. 733 + * 734 + * Can only be called from preemptible code as it might sleep when an 735 + * interrupt thread is associated to @irq. 698 736 * 699 737 */ 700 738 void disable_irq(unsigned int irq) ··· 706 744 EXPORT_SYMBOL(disable_irq); 707 745 708 746 /** 709 - * disable_hardirq - disables an irq and waits for hardirq completion 710 - * @irq: Interrupt to disable 747 + * disable_hardirq - disables an irq and waits for hardirq completion 748 + * @irq: Interrupt to disable 711 749 * 712 - * Disable the selected interrupt line. Enables and Disables are 713 - * nested. 714 - * This function waits for any pending hard IRQ handlers for this 715 - * interrupt to complete before returning. If you use this function while 716 - * holding a resource the hard IRQ handler may need you will deadlock. 750 + * Disable the selected interrupt line. Enables and Disables are nested. 717 751 * 718 - * When used to optimistically disable an interrupt from atomic context 719 - * the return value must be checked. 752 + * This function waits for any pending hard IRQ handlers for this interrupt 753 + * to complete before returning. If you use this function while holding a 754 + * resource the hard IRQ handler may need you will deadlock. 720 755 * 721 - * Returns: false if a threaded handler is active. 756 + * When used to optimistically disable an interrupt from atomic context the 757 + * return value must be checked. 722 758 * 723 - * This function may be called - with care - from IRQ context. 759 + * Returns: false if a threaded handler is active. 760 + * 761 + * This function may be called - with care - from IRQ context. 724 762 */ 725 763 bool disable_hardirq(unsigned int irq) 726 764 { 727 765 if (!__disable_irq_nosync(irq)) 728 766 return synchronize_hardirq(irq); 729 - 730 767 return false; 731 768 } 732 769 EXPORT_SYMBOL_GPL(disable_hardirq); 733 770 734 771 /** 735 - * disable_nmi_nosync - disable an nmi without waiting 736 - * @irq: Interrupt to disable 772 + * disable_nmi_nosync - disable an nmi without waiting 773 + * @irq: Interrupt to disable 737 774 * 738 - * Disable the selected interrupt line. Disables and enables are 739 - * nested. 740 - * The interrupt to disable must have been requested through request_nmi. 741 - * Unlike disable_nmi(), this function does not ensure existing 742 - * instances of the IRQ handler have completed before returning. 775 + * Disable the selected interrupt line. Disables and enables are nested. 776 + * 777 + * The interrupt to disable must have been requested through request_nmi. 778 + * Unlike disable_nmi(), this function does not ensure existing 779 + * instances of the IRQ handler have completed before returning. 743 780 */ 744 781 void disable_nmi_nosync(unsigned int irq) 745 782 { ··· 778 817 } 779 818 780 819 /** 781 - * enable_irq - enable handling of an irq 782 - * @irq: Interrupt to enable 820 + * enable_irq - enable handling of an irq 821 + * @irq: Interrupt to enable 783 822 * 784 - * Undoes the effect of one call to disable_irq(). If this 785 - * matches the last disable, processing of interrupts on this 786 - * IRQ line is re-enabled. 823 + * Undoes the effect of one call to disable_irq(). If this matches the 824 + * last disable, processing of interrupts on this IRQ line is re-enabled. 787 825 * 788 - * This function may be called from IRQ context only when 789 - * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! 826 + * This function may be called from IRQ context only when 827 + * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! 790 828 */ 791 829 void enable_irq(unsigned int irq) 792 830 { 793 - unsigned long flags; 794 - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 831 + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { 832 + struct irq_desc *desc = scoped_irqdesc; 795 833 796 - if (!desc) 797 - return; 798 - if (WARN(!desc->irq_data.chip, 799 - KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) 800 - goto out; 801 - 802 - __enable_irq(desc); 803 - out: 804 - irq_put_desc_busunlock(desc, flags); 834 + if (WARN(!desc->irq_data.chip, "enable_irq before setup/request_irq: irq %u\n", irq)) 835 + return; 836 + __enable_irq(desc); 837 + } 805 838 } 806 839 EXPORT_SYMBOL(enable_irq); 807 840 808 841 /** 809 - * enable_nmi - enable handling of an nmi 810 - * @irq: Interrupt to enable 842 + * enable_nmi - enable handling of an nmi 843 + * @irq: Interrupt to enable 811 844 * 812 - * The interrupt to enable must have been requested through request_nmi. 813 - * Undoes the effect of one call to disable_nmi(). If this 814 - * matches the last disable, processing of interrupts on this 815 - * IRQ line is re-enabled. 845 + * The interrupt to enable must have been requested through request_nmi. 846 + * Undoes the effect of one call to disable_nmi(). If this matches the last 847 + * disable, processing of interrupts on this IRQ line is re-enabled. 816 848 */ 817 849 void enable_nmi(unsigned int irq) 818 850 { ··· 827 873 } 828 874 829 875 /** 830 - * irq_set_irq_wake - control irq power management wakeup 831 - * @irq: interrupt to control 832 - * @on: enable/disable power management wakeup 876 + * irq_set_irq_wake - control irq power management wakeup 877 + * @irq: interrupt to control 878 + * @on: enable/disable power management wakeup 833 879 * 834 - * Enable/disable power management wakeup mode, which is 835 - * disabled by default. Enables and disables must match, 836 - * just as they match for non-wakeup mode support. 880 + * Enable/disable power management wakeup mode, which is disabled by 881 + * default. Enables and disables must match, just as they match for 882 + * non-wakeup mode support. 837 883 * 838 - * Wakeup mode lets this IRQ wake the system from sleep 839 - * states like "suspend to RAM". 884 + * Wakeup mode lets this IRQ wake the system from sleep states like 885 + * "suspend to RAM". 840 886 * 841 - * Note: irq enable/disable state is completely orthogonal 842 - * to the enable/disable state of irq wake. An irq can be 843 - * disabled with disable_irq() and still wake the system as 844 - * long as the irq has wake enabled. If this does not hold, 845 - * then the underlying irq chip and the related driver need 846 - * to be investigated. 887 + * Note: irq enable/disable state is completely orthogonal to the 888 + * enable/disable state of irq wake. An irq can be disabled with 889 + * disable_irq() and still wake the system as long as the irq has wake 890 + * enabled. If this does not hold, then the underlying irq chip and the 891 + * related driver need to be investigated. 847 892 */ 848 893 int irq_set_irq_wake(unsigned int irq, unsigned int on) 849 894 { 850 - unsigned long flags; 851 - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 852 - int ret = 0; 895 + scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { 896 + struct irq_desc *desc = scoped_irqdesc; 897 + int ret = 0; 853 898 854 - if (!desc) 855 - return -EINVAL; 899 + /* Don't use NMIs as wake up interrupts please */ 900 + if (irq_is_nmi(desc)) 901 + return -EINVAL; 856 902 857 - /* Don't use NMIs as wake up interrupts please */ 858 - if (irq_is_nmi(desc)) { 859 - ret = -EINVAL; 860 - goto out_unlock; 861 - } 862 - 863 - /* wakeup-capable irqs can be shared between drivers that 864 - * don't need to have the same sleep mode behaviors. 865 - */ 866 - if (on) { 867 - if (desc->wake_depth++ == 0) { 868 - ret = set_irq_wake_real(irq, on); 869 - if (ret) 870 - desc->wake_depth = 0; 871 - else 872 - irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); 903 + /* 904 + * wakeup-capable irqs can be shared between drivers that 905 + * don't need to have the same sleep mode behaviors. 906 + */ 907 + if (on) { 908 + if (desc->wake_depth++ == 0) { 909 + ret = set_irq_wake_real(irq, on); 910 + if (ret) 911 + desc->wake_depth = 0; 912 + else 913 + irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); 914 + } 915 + } else { 916 + if (desc->wake_depth == 0) { 917 + WARN(1, "Unbalanced IRQ %d wake disable\n", irq); 918 + } else if (--desc->wake_depth == 0) { 919 + ret = set_irq_wake_real(irq, on); 920 + if (ret) 921 + desc->wake_depth = 1; 922 + else 923 + irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); 924 + } 873 925 } 874 - } else { 875 - if (desc->wake_depth == 0) { 876 - WARN(1, "Unbalanced IRQ %d wake disable\n", irq); 877 - } else if (--desc->wake_depth == 0) { 878 - ret = set_irq_wake_real(irq, on); 879 - if (ret) 880 - desc->wake_depth = 1; 881 - else 882 - irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); 883 - } 926 + return ret; 884 927 } 885 - 886 - out_unlock: 887 - irq_put_desc_busunlock(desc, flags); 888 - return ret; 928 + return -EINVAL; 889 929 } 890 930 EXPORT_SYMBOL(irq_set_irq_wake); 891 931 ··· 888 940 * particular irq has been exclusively allocated or is available 889 941 * for driver use. 890 942 */ 891 - int can_request_irq(unsigned int irq, unsigned long irqflags) 943 + bool can_request_irq(unsigned int irq, unsigned long irqflags) 892 944 { 893 - unsigned long flags; 894 - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 895 - int canrequest = 0; 945 + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { 946 + struct irq_desc *desc = scoped_irqdesc; 896 947 897 - if (!desc) 898 - return 0; 899 - 900 - if (irq_settings_can_request(desc)) { 901 - if (!desc->action || 902 - irqflags & desc->action->flags & IRQF_SHARED) 903 - canrequest = 1; 948 + if (irq_settings_can_request(desc)) { 949 + if (!desc->action || irqflags & desc->action->flags & IRQF_SHARED) 950 + return true; 951 + } 904 952 } 905 - irq_put_desc_unlock(desc, flags); 906 - return canrequest; 953 + return false; 907 954 } 908 955 909 956 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) ··· 959 1016 #ifdef CONFIG_HARDIRQS_SW_RESEND 960 1017 int irq_set_parent(int irq, int parent_irq) 961 1018 { 962 - unsigned long flags; 963 - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 964 - 965 - if (!desc) 966 - return -EINVAL; 967 - 968 - desc->parent_irq = parent_irq; 969 - 970 - irq_put_desc_unlock(desc, flags); 971 - return 0; 1019 + scoped_irqdesc_get_and_lock(irq, 0) { 1020 + scoped_irqdesc->parent_irq = parent_irq; 1021 + return 0; 1022 + } 1023 + return -EINVAL; 972 1024 } 973 1025 EXPORT_SYMBOL_GPL(irq_set_parent); 974 1026 #endif ··· 1017 1079 return; 1018 1080 } 1019 1081 1020 - raw_spin_lock_irq(&desc->lock); 1021 - /* 1022 - * This code is triggered unconditionally. Check the affinity 1023 - * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. 1024 - */ 1025 - if (cpumask_available(desc->irq_common_data.affinity)) { 1026 - const struct cpumask *m; 1082 + scoped_guard(raw_spinlock_irq, &desc->lock) { 1083 + /* 1084 + * This code is triggered unconditionally. Check the affinity 1085 + * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. 1086 + */ 1087 + if (cpumask_available(desc->irq_common_data.affinity)) { 1088 + const struct cpumask *m; 1027 1089 1028 - m = irq_data_get_effective_affinity_mask(&desc->irq_data); 1029 - cpumask_copy(mask, m); 1030 - valid = true; 1090 + m = irq_data_get_effective_affinity_mask(&desc->irq_data); 1091 + cpumask_copy(mask, m); 1092 + valid = true; 1093 + } 1031 1094 } 1032 - raw_spin_unlock_irq(&desc->lock); 1033 1095 1034 1096 if (valid) 1035 1097 set_cpus_allowed_ptr(current, mask); ··· 1197 1259 if (WARN_ON_ONCE(!secondary)) 1198 1260 return; 1199 1261 1200 - raw_spin_lock_irq(&desc->lock); 1262 + guard(raw_spinlock_irq)(&desc->lock); 1201 1263 __irq_wake_thread(desc, secondary); 1202 - raw_spin_unlock_irq(&desc->lock); 1203 1264 } 1204 1265 1205 1266 /* ··· 1271 1334 } 1272 1335 1273 1336 /** 1274 - * irq_wake_thread - wake the irq thread for the action identified by dev_id 1275 - * @irq: Interrupt line 1276 - * @dev_id: Device identity for which the thread should be woken 1277 - * 1337 + * irq_wake_thread - wake the irq thread for the action identified by dev_id 1338 + * @irq: Interrupt line 1339 + * @dev_id: Device identity for which the thread should be woken 1278 1340 */ 1279 1341 void irq_wake_thread(unsigned int irq, void *dev_id) 1280 1342 { 1281 1343 struct irq_desc *desc = irq_to_desc(irq); 1282 1344 struct irqaction *action; 1283 - unsigned long flags; 1284 1345 1285 1346 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1286 1347 return; 1287 1348 1288 - raw_spin_lock_irqsave(&desc->lock, flags); 1349 + guard(raw_spinlock_irqsave)(&desc->lock); 1289 1350 for_each_action_of_desc(desc, action) { 1290 1351 if (action->dev_id == dev_id) { 1291 1352 if (action->thread) ··· 1291 1356 break; 1292 1357 } 1293 1358 } 1294 - raw_spin_unlock_irqrestore(&desc->lock, flags); 1295 1359 } 1296 1360 EXPORT_SYMBOL_GPL(irq_wake_thread); 1297 1361 ··· 1921 1987 * There is no interrupt on the fly anymore. Deactivate it 1922 1988 * completely. 1923 1989 */ 1924 - raw_spin_lock_irqsave(&desc->lock, flags); 1925 - irq_domain_deactivate_irq(&desc->irq_data); 1926 - raw_spin_unlock_irqrestore(&desc->lock, flags); 1990 + scoped_guard(raw_spinlock_irqsave, &desc->lock) 1991 + irq_domain_deactivate_irq(&desc->irq_data); 1927 1992 1928 1993 irq_release_resources(desc); 1929 1994 chip_bus_sync_unlock(desc); ··· 1938 2005 } 1939 2006 1940 2007 /** 1941 - * free_irq - free an interrupt allocated with request_irq 1942 - * @irq: Interrupt line to free 1943 - * @dev_id: Device identity to free 2008 + * free_irq - free an interrupt allocated with request_irq 2009 + * @irq: Interrupt line to free 2010 + * @dev_id: Device identity to free 1944 2011 * 1945 - * Remove an interrupt handler. The handler is removed and if the 1946 - * interrupt line is no longer in use by any driver it is disabled. 1947 - * On a shared IRQ the caller must ensure the interrupt is disabled 1948 - * on the card it drives before calling this function. The function 1949 - * does not return until any executing interrupts for this IRQ 1950 - * have completed. 2012 + * Remove an interrupt handler. The handler is removed and if the interrupt 2013 + * line is no longer in use by any driver it is disabled. On a shared IRQ 2014 + * the caller must ensure the interrupt is disabled on the card it drives 2015 + * before calling this function. The function does not return until any 2016 + * executing interrupts for this IRQ have completed. 1951 2017 * 1952 - * This function must not be called from interrupt context. 2018 + * This function must not be called from interrupt context. 1953 2019 * 1954 - * Returns the devname argument passed to request_irq. 2020 + * Returns the devname argument passed to request_irq. 1955 2021 */ 1956 2022 const void *free_irq(unsigned int irq, void *dev_id) 1957 2023 { ··· 2007 2075 const void *free_nmi(unsigned int irq, void *dev_id) 2008 2076 { 2009 2077 struct irq_desc *desc = irq_to_desc(irq); 2010 - unsigned long flags; 2011 - const void *devname; 2012 2078 2013 2079 if (!desc || WARN_ON(!irq_is_nmi(desc))) 2014 2080 return NULL; ··· 2018 2088 if (WARN_ON(desc->depth == 0)) 2019 2089 disable_nmi_nosync(irq); 2020 2090 2021 - raw_spin_lock_irqsave(&desc->lock, flags); 2022 - 2091 + guard(raw_spinlock_irqsave)(&desc->lock); 2023 2092 irq_nmi_teardown(desc); 2024 - devname = __cleanup_nmi(irq, desc); 2025 - 2026 - raw_spin_unlock_irqrestore(&desc->lock, flags); 2027 - 2028 - return devname; 2093 + return __cleanup_nmi(irq, desc); 2029 2094 } 2030 2095 2031 2096 /** 2032 - * request_threaded_irq - allocate an interrupt line 2033 - * @irq: Interrupt line to allocate 2034 - * @handler: Function to be called when the IRQ occurs. 2035 - * Primary handler for threaded interrupts. 2036 - * If handler is NULL and thread_fn != NULL 2037 - * the default primary handler is installed. 2038 - * @thread_fn: Function called from the irq handler thread 2039 - * If NULL, no irq thread is created 2040 - * @irqflags: Interrupt type flags 2041 - * @devname: An ascii name for the claiming device 2042 - * @dev_id: A cookie passed back to the handler function 2097 + * request_threaded_irq - allocate an interrupt line 2098 + * @irq: Interrupt line to allocate 2099 + * @handler: Function to be called when the IRQ occurs. 2100 + * Primary handler for threaded interrupts. 2101 + * If handler is NULL and thread_fn != NULL 2102 + * the default primary handler is installed. 2103 + * @thread_fn: Function called from the irq handler thread 2104 + * If NULL, no irq thread is created 2105 + * @irqflags: Interrupt type flags 2106 + * @devname: An ascii name for the claiming device 2107 + * @dev_id: A cookie passed back to the handler function 2043 2108 * 2044 - * This call allocates interrupt resources and enables the 2045 - * interrupt line and IRQ handling. From the point this 2046 - * call is made your handler function may be invoked. Since 2047 - * your handler function must clear any interrupt the board 2048 - * raises, you must take care both to initialise your hardware 2049 - * and to set up the interrupt handler in the right order. 2109 + * This call allocates interrupt resources and enables the interrupt line 2110 + * and IRQ handling. From the point this call is made your handler function 2111 + * may be invoked. Since your handler function must clear any interrupt the 2112 + * board raises, you must take care both to initialise your hardware and to 2113 + * set up the interrupt handler in the right order. 2050 2114 * 2051 - * If you want to set up a threaded irq handler for your device 2052 - * then you need to supply @handler and @thread_fn. @handler is 2053 - * still called in hard interrupt context and has to check 2054 - * whether the interrupt originates from the device. If yes it 2055 - * needs to disable the interrupt on the device and return 2056 - * IRQ_WAKE_THREAD which will wake up the handler thread and run 2057 - * @thread_fn. This split handler design is necessary to support 2058 - * shared interrupts. 2115 + * If you want to set up a threaded irq handler for your device then you 2116 + * need to supply @handler and @thread_fn. @handler is still called in hard 2117 + * interrupt context and has to check whether the interrupt originates from 2118 + * the device. If yes it needs to disable the interrupt on the device and 2119 + * return IRQ_WAKE_THREAD which will wake up the handler thread and run 2120 + * @thread_fn. This split handler design is necessary to support shared 2121 + * interrupts. 2059 2122 * 2060 - * Dev_id must be globally unique. Normally the address of the 2061 - * device data structure is used as the cookie. Since the handler 2062 - * receives this value it makes sense to use it. 2123 + * @dev_id must be globally unique. Normally the address of the device data 2124 + * structure is used as the cookie. Since the handler receives this value 2125 + * it makes sense to use it. 2063 2126 * 2064 - * If your interrupt is shared you must pass a non NULL dev_id 2065 - * as this is required when freeing the interrupt. 2127 + * If your interrupt is shared you must pass a non NULL dev_id as this is 2128 + * required when freeing the interrupt. 2066 2129 * 2067 - * Flags: 2130 + * Flags: 2068 2131 * 2069 2132 * IRQF_SHARED Interrupt is shared 2070 2133 * IRQF_TRIGGER_* Specify active edge(s) or level ··· 2155 2232 EXPORT_SYMBOL(request_threaded_irq); 2156 2233 2157 2234 /** 2158 - * request_any_context_irq - allocate an interrupt line 2159 - * @irq: Interrupt line to allocate 2160 - * @handler: Function to be called when the IRQ occurs. 2161 - * Threaded handler for threaded interrupts. 2162 - * @flags: Interrupt type flags 2163 - * @name: An ascii name for the claiming device 2164 - * @dev_id: A cookie passed back to the handler function 2235 + * request_any_context_irq - allocate an interrupt line 2236 + * @irq: Interrupt line to allocate 2237 + * @handler: Function to be called when the IRQ occurs. 2238 + * Threaded handler for threaded interrupts. 2239 + * @flags: Interrupt type flags 2240 + * @name: An ascii name for the claiming device 2241 + * @dev_id: A cookie passed back to the handler function 2165 2242 * 2166 - * This call allocates interrupt resources and enables the 2167 - * interrupt line and IRQ handling. It selects either a 2168 - * hardirq or threaded handling method depending on the 2169 - * context. 2243 + * This call allocates interrupt resources and enables the interrupt line 2244 + * and IRQ handling. It selects either a hardirq or threaded handling 2245 + * method depending on the context. 2170 2246 * 2171 - * On failure, it returns a negative value. On success, 2172 - * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. 2247 + * Returns: On failure, it returns a negative value. On success, it returns either 2248 + * IRQC_IS_HARDIRQ or IRQC_IS_NESTED. 2173 2249 */ 2174 2250 int request_any_context_irq(unsigned int irq, irq_handler_t handler, 2175 2251 unsigned long flags, const char *name, void *dev_id) ··· 2195 2273 EXPORT_SYMBOL_GPL(request_any_context_irq); 2196 2274 2197 2275 /** 2198 - * request_nmi - allocate an interrupt line for NMI delivery 2199 - * @irq: Interrupt line to allocate 2200 - * @handler: Function to be called when the IRQ occurs. 2201 - * Threaded handler for threaded interrupts. 2202 - * @irqflags: Interrupt type flags 2203 - * @name: An ascii name for the claiming device 2204 - * @dev_id: A cookie passed back to the handler function 2276 + * request_nmi - allocate an interrupt line for NMI delivery 2277 + * @irq: Interrupt line to allocate 2278 + * @handler: Function to be called when the IRQ occurs. 2279 + * Threaded handler for threaded interrupts. 2280 + * @irqflags: Interrupt type flags 2281 + * @name: An ascii name for the claiming device 2282 + * @dev_id: A cookie passed back to the handler function 2205 2283 * 2206 - * This call allocates interrupt resources and enables the 2207 - * interrupt line and IRQ handling. It sets up the IRQ line 2208 - * to be handled as an NMI. 2284 + * This call allocates interrupt resources and enables the interrupt line 2285 + * and IRQ handling. It sets up the IRQ line to be handled as an NMI. 2209 2286 * 2210 - * An interrupt line delivering NMIs cannot be shared and IRQ handling 2211 - * cannot be threaded. 2287 + * An interrupt line delivering NMIs cannot be shared and IRQ handling 2288 + * cannot be threaded. 2212 2289 * 2213 - * Interrupt lines requested for NMI delivering must produce per cpu 2214 - * interrupts and have auto enabling setting disabled. 2290 + * Interrupt lines requested for NMI delivering must produce per cpu 2291 + * interrupts and have auto enabling setting disabled. 2215 2292 * 2216 - * Dev_id must be globally unique. Normally the address of the 2217 - * device data structure is used as the cookie. Since the handler 2218 - * receives this value it makes sense to use it. 2293 + * @dev_id must be globally unique. Normally the address of the device data 2294 + * structure is used as the cookie. Since the handler receives this value 2295 + * it makes sense to use it. 2219 2296 * 2220 - * If the interrupt line cannot be used to deliver NMIs, function 2221 - * will fail and return a negative value. 2297 + * If the interrupt line cannot be used to deliver NMIs, function will fail 2298 + * and return a negative value. 2222 2299 */ 2223 2300 int request_nmi(unsigned int irq, irq_handler_t handler, 2224 2301 unsigned long irqflags, const char *name, void *dev_id) 2225 2302 { 2226 2303 struct irqaction *action; 2227 2304 struct irq_desc *desc; 2228 - unsigned long flags; 2229 2305 int retval; 2230 2306 2231 2307 if (irq == IRQ_NOTCONNECTED) ··· 2265 2345 if (retval) 2266 2346 goto err_irq_setup; 2267 2347 2268 - raw_spin_lock_irqsave(&desc->lock, flags); 2269 - 2270 - /* Setup NMI state */ 2271 - desc->istate |= IRQS_NMI; 2272 - retval = irq_nmi_setup(desc); 2273 - if (retval) { 2274 - __cleanup_nmi(irq, desc); 2275 - raw_spin_unlock_irqrestore(&desc->lock, flags); 2276 - return -EINVAL; 2348 + scoped_guard(raw_spinlock_irqsave, &desc->lock) { 2349 + /* Setup NMI state */ 2350 + desc->istate |= IRQS_NMI; 2351 + retval = irq_nmi_setup(desc); 2352 + if (retval) { 2353 + __cleanup_nmi(irq, desc); 2354 + return -EINVAL; 2355 + } 2356 + return 0; 2277 2357 } 2278 - 2279 - raw_spin_unlock_irqrestore(&desc->lock, flags); 2280 - 2281 - return 0; 2282 2358 2283 2359 err_irq_setup: 2284 2360 irq_chip_pm_put(&desc->irq_data); ··· 2286 2370 2287 2371 void enable_percpu_irq(unsigned int irq, unsigned int type) 2288 2372 { 2289 - unsigned int cpu = smp_processor_id(); 2290 - unsigned long flags; 2291 - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); 2373 + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) { 2374 + struct irq_desc *desc = scoped_irqdesc; 2292 2375 2293 - if (!desc) 2294 - return; 2376 + /* 2377 + * If the trigger type is not specified by the caller, then 2378 + * use the default for this interrupt. 2379 + */ 2380 + type &= IRQ_TYPE_SENSE_MASK; 2381 + if (type == IRQ_TYPE_NONE) 2382 + type = irqd_get_trigger_type(&desc->irq_data); 2295 2383 2296 - /* 2297 - * If the trigger type is not specified by the caller, then 2298 - * use the default for this interrupt. 2299 - */ 2300 - type &= IRQ_TYPE_SENSE_MASK; 2301 - if (type == IRQ_TYPE_NONE) 2302 - type = irqd_get_trigger_type(&desc->irq_data); 2303 - 2304 - if (type != IRQ_TYPE_NONE) { 2305 - int ret; 2306 - 2307 - ret = __irq_set_trigger(desc, type); 2308 - 2309 - if (ret) { 2310 - WARN(1, "failed to set type for IRQ%d\n", irq); 2311 - goto out; 2384 + if (type != IRQ_TYPE_NONE) { 2385 + if (__irq_set_trigger(desc, type)) { 2386 + WARN(1, "failed to set type for IRQ%d\n", irq); 2387 + return; 2388 + } 2312 2389 } 2390 + irq_percpu_enable(desc, smp_processor_id()); 2313 2391 } 2314 - 2315 - irq_percpu_enable(desc, cpu); 2316 - out: 2317 - irq_put_desc_unlock(desc, flags); 2318 2392 } 2319 2393 EXPORT_SYMBOL_GPL(enable_percpu_irq); 2320 2394 ··· 2322 2416 */ 2323 2417 bool irq_percpu_is_enabled(unsigned int irq) 2324 2418 { 2325 - unsigned int cpu = smp_processor_id(); 2326 - struct irq_desc *desc; 2327 - unsigned long flags; 2328 - bool is_enabled; 2329 - 2330 - desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); 2331 - if (!desc) 2332 - return false; 2333 - 2334 - is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); 2335 - irq_put_desc_unlock(desc, flags); 2336 - 2337 - return is_enabled; 2419 + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) 2420 + return cpumask_test_cpu(smp_processor_id(), scoped_irqdesc->percpu_enabled); 2421 + return false; 2338 2422 } 2339 2423 EXPORT_SYMBOL_GPL(irq_percpu_is_enabled); 2340 2424 2341 2425 void disable_percpu_irq(unsigned int irq) 2342 2426 { 2343 - unsigned int cpu = smp_processor_id(); 2344 - unsigned long flags; 2345 - struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); 2346 - 2347 - if (!desc) 2348 - return; 2349 - 2350 - irq_percpu_disable(desc, cpu); 2351 - irq_put_desc_unlock(desc, flags); 2427 + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) 2428 + irq_percpu_disable(scoped_irqdesc, smp_processor_id()); 2352 2429 } 2353 2430 EXPORT_SYMBOL_GPL(disable_percpu_irq); 2354 2431 ··· 2347 2458 { 2348 2459 struct irq_desc *desc = irq_to_desc(irq); 2349 2460 struct irqaction *action; 2350 - unsigned long flags; 2351 2461 2352 2462 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 2353 2463 2354 2464 if (!desc) 2355 2465 return NULL; 2356 2466 2357 - raw_spin_lock_irqsave(&desc->lock, flags); 2467 + scoped_guard(raw_spinlock_irqsave, &desc->lock) { 2468 + action = desc->action; 2469 + if (!action || action->percpu_dev_id != dev_id) { 2470 + WARN(1, "Trying to free already-free IRQ %d\n", irq); 2471 + return NULL; 2472 + } 2358 2473 2359 - action = desc->action; 2360 - if (!action || action->percpu_dev_id != dev_id) { 2361 - WARN(1, "Trying to free already-free IRQ %d\n", irq); 2362 - goto bad; 2474 + if (!cpumask_empty(desc->percpu_enabled)) { 2475 + WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", 2476 + irq, cpumask_first(desc->percpu_enabled)); 2477 + return NULL; 2478 + } 2479 + 2480 + /* Found it - now remove it from the list of entries: */ 2481 + desc->action = NULL; 2482 + desc->istate &= ~IRQS_NMI; 2363 2483 } 2364 - 2365 - if (!cpumask_empty(desc->percpu_enabled)) { 2366 - WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", 2367 - irq, cpumask_first(desc->percpu_enabled)); 2368 - goto bad; 2369 - } 2370 - 2371 - /* Found it - now remove it from the list of entries: */ 2372 - desc->action = NULL; 2373 - 2374 - desc->istate &= ~IRQS_NMI; 2375 - 2376 - raw_spin_unlock_irqrestore(&desc->lock, flags); 2377 2484 2378 2485 unregister_handler_proc(irq, action); 2379 - 2380 2486 irq_chip_pm_put(&desc->irq_data); 2381 2487 module_put(desc->owner); 2382 2488 return action; 2383 - 2384 - bad: 2385 - raw_spin_unlock_irqrestore(&desc->lock, flags); 2386 - return NULL; 2387 2489 } 2388 2490 2389 2491 /** 2390 - * remove_percpu_irq - free a per-cpu interrupt 2391 - * @irq: Interrupt line to free 2392 - * @act: irqaction for the interrupt 2492 + * free_percpu_irq - free an interrupt allocated with request_percpu_irq 2493 + * @irq: Interrupt line to free 2494 + * @dev_id: Device identity to free 2393 2495 * 2394 - * Used to remove interrupts statically setup by the early boot process. 2395 - */ 2396 - void remove_percpu_irq(unsigned int irq, struct irqaction *act) 2397 - { 2398 - struct irq_desc *desc = irq_to_desc(irq); 2399 - 2400 - if (desc && irq_settings_is_per_cpu_devid(desc)) 2401 - __free_percpu_irq(irq, act->percpu_dev_id); 2402 - } 2403 - 2404 - /** 2405 - * free_percpu_irq - free an interrupt allocated with request_percpu_irq 2406 - * @irq: Interrupt line to free 2407 - * @dev_id: Device identity to free 2496 + * Remove a percpu interrupt handler. The handler is removed, but the 2497 + * interrupt line is not disabled. This must be done on each CPU before 2498 + * calling this function. The function does not return until any executing 2499 + * interrupts for this IRQ have completed. 2408 2500 * 2409 - * Remove a percpu interrupt handler. The handler is removed, but 2410 - * the interrupt line is not disabled. This must be done on each 2411 - * CPU before calling this function. The function does not return 2412 - * until any executing interrupts for this IRQ have completed. 2413 - * 2414 - * This function must not be called from interrupt context. 2501 + * This function must not be called from interrupt context. 2415 2502 */ 2416 2503 void free_percpu_irq(unsigned int irq, void __percpu *dev_id) 2417 2504 { ··· 2416 2551 } 2417 2552 2418 2553 /** 2419 - * setup_percpu_irq - setup a per-cpu interrupt 2420 - * @irq: Interrupt line to setup 2421 - * @act: irqaction for the interrupt 2554 + * setup_percpu_irq - setup a per-cpu interrupt 2555 + * @irq: Interrupt line to setup 2556 + * @act: irqaction for the interrupt 2422 2557 * 2423 2558 * Used to statically setup per-cpu interrupts in the early boot process. 2424 2559 */ ··· 2443 2578 } 2444 2579 2445 2580 /** 2446 - * __request_percpu_irq - allocate a percpu interrupt line 2447 - * @irq: Interrupt line to allocate 2448 - * @handler: Function to be called when the IRQ occurs. 2449 - * @flags: Interrupt type flags (IRQF_TIMER only) 2450 - * @devname: An ascii name for the claiming device 2451 - * @dev_id: A percpu cookie passed back to the handler function 2581 + * __request_percpu_irq - allocate a percpu interrupt line 2582 + * @irq: Interrupt line to allocate 2583 + * @handler: Function to be called when the IRQ occurs. 2584 + * @flags: Interrupt type flags (IRQF_TIMER only) 2585 + * @devname: An ascii name for the claiming device 2586 + * @dev_id: A percpu cookie passed back to the handler function 2452 2587 * 2453 - * This call allocates interrupt resources and enables the 2454 - * interrupt on the local CPU. If the interrupt is supposed to be 2455 - * enabled on other CPUs, it has to be done on each CPU using 2456 - * enable_percpu_irq(). 2588 + * This call allocates interrupt resources and enables the interrupt on the 2589 + * local CPU. If the interrupt is supposed to be enabled on other CPUs, it 2590 + * has to be done on each CPU using enable_percpu_irq(). 2457 2591 * 2458 - * Dev_id must be globally unique. It is a per-cpu variable, and 2459 - * the handler gets called with the interrupted CPU's instance of 2460 - * that variable. 2592 + * @dev_id must be globally unique. It is a per-cpu variable, and 2593 + * the handler gets called with the interrupted CPU's instance of 2594 + * that variable. 2461 2595 */ 2462 2596 int __request_percpu_irq(unsigned int irq, irq_handler_t handler, 2463 2597 unsigned long flags, const char *devname, ··· 2504 2640 EXPORT_SYMBOL_GPL(__request_percpu_irq); 2505 2641 2506 2642 /** 2507 - * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery 2508 - * @irq: Interrupt line to allocate 2509 - * @handler: Function to be called when the IRQ occurs. 2510 - * @name: An ascii name for the claiming device 2511 - * @dev_id: A percpu cookie passed back to the handler function 2643 + * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery 2644 + * @irq: Interrupt line to allocate 2645 + * @handler: Function to be called when the IRQ occurs. 2646 + * @name: An ascii name for the claiming device 2647 + * @dev_id: A percpu cookie passed back to the handler function 2512 2648 * 2513 - * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs 2514 - * have to be setup on each CPU by calling prepare_percpu_nmi() before 2515 - * being enabled on the same CPU by using enable_percpu_nmi(). 2649 + * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs 2650 + * have to be setup on each CPU by calling prepare_percpu_nmi() before 2651 + * being enabled on the same CPU by using enable_percpu_nmi(). 2516 2652 * 2517 - * Dev_id must be globally unique. It is a per-cpu variable, and 2518 - * the handler gets called with the interrupted CPU's instance of 2519 - * that variable. 2653 + * @dev_id must be globally unique. It is a per-cpu variable, and the 2654 + * handler gets called with the interrupted CPU's instance of that 2655 + * variable. 2520 2656 * 2521 - * Interrupt lines requested for NMI delivering should have auto enabling 2522 - * setting disabled. 2657 + * Interrupt lines requested for NMI delivering should have auto enabling 2658 + * setting disabled. 2523 2659 * 2524 - * If the interrupt line cannot be used to deliver NMIs, function 2525 - * will fail returning a negative value. 2660 + * If the interrupt line cannot be used to deliver NMIs, function 2661 + * will fail returning a negative value. 2526 2662 */ 2527 2663 int request_percpu_nmi(unsigned int irq, irq_handler_t handler, 2528 2664 const char *name, void __percpu *dev_id) 2529 2665 { 2530 2666 struct irqaction *action; 2531 2667 struct irq_desc *desc; 2532 - unsigned long flags; 2533 2668 int retval; 2534 2669 2535 2670 if (!handler) ··· 2564 2701 if (retval) 2565 2702 goto err_irq_setup; 2566 2703 2567 - raw_spin_lock_irqsave(&desc->lock, flags); 2568 - desc->istate |= IRQS_NMI; 2569 - raw_spin_unlock_irqrestore(&desc->lock, flags); 2570 - 2704 + scoped_guard(raw_spinlock_irqsave, &desc->lock) 2705 + desc->istate |= IRQS_NMI; 2571 2706 return 0; 2572 2707 2573 2708 err_irq_setup: ··· 2577 2716 } 2578 2717 2579 2718 /** 2580 - * prepare_percpu_nmi - performs CPU local setup for NMI delivery 2581 - * @irq: Interrupt line to prepare for NMI delivery 2719 + * prepare_percpu_nmi - performs CPU local setup for NMI delivery 2720 + * @irq: Interrupt line to prepare for NMI delivery 2582 2721 * 2583 - * This call prepares an interrupt line to deliver NMI on the current CPU, 2584 - * before that interrupt line gets enabled with enable_percpu_nmi(). 2722 + * This call prepares an interrupt line to deliver NMI on the current CPU, 2723 + * before that interrupt line gets enabled with enable_percpu_nmi(). 2585 2724 * 2586 - * As a CPU local operation, this should be called from non-preemptible 2587 - * context. 2725 + * As a CPU local operation, this should be called from non-preemptible 2726 + * context. 2588 2727 * 2589 - * If the interrupt line cannot be used to deliver NMIs, function 2590 - * will fail returning a negative value. 2728 + * If the interrupt line cannot be used to deliver NMIs, function will fail 2729 + * returning a negative value. 2591 2730 */ 2592 2731 int prepare_percpu_nmi(unsigned int irq) 2593 2732 { 2594 - unsigned long flags; 2595 - struct irq_desc *desc; 2596 - int ret = 0; 2733 + int ret = -EINVAL; 2597 2734 2598 2735 WARN_ON(preemptible()); 2599 2736 2600 - desc = irq_get_desc_lock(irq, &flags, 2601 - IRQ_GET_DESC_CHECK_PERCPU); 2602 - if (!desc) 2603 - return -EINVAL; 2737 + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) { 2738 + if (WARN(!irq_is_nmi(scoped_irqdesc), 2739 + "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n", irq)) 2740 + return -EINVAL; 2604 2741 2605 - if (WARN(!irq_is_nmi(desc), 2606 - KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n", 2607 - irq)) { 2608 - ret = -EINVAL; 2609 - goto out; 2742 + ret = irq_nmi_setup(scoped_irqdesc); 2743 + if (ret) 2744 + pr_err("Failed to setup NMI delivery: irq %u\n", irq); 2610 2745 } 2611 - 2612 - ret = irq_nmi_setup(desc); 2613 - if (ret) { 2614 - pr_err("Failed to setup NMI delivery: irq %u\n", irq); 2615 - goto out; 2616 - } 2617 - 2618 - out: 2619 - irq_put_desc_unlock(desc, flags); 2620 2746 return ret; 2621 2747 } 2622 2748 2623 2749 /** 2624 - * teardown_percpu_nmi - undoes NMI setup of IRQ line 2625 - * @irq: Interrupt line from which CPU local NMI configuration should be 2626 - * removed 2750 + * teardown_percpu_nmi - undoes NMI setup of IRQ line 2751 + * @irq: Interrupt line from which CPU local NMI configuration should be removed 2627 2752 * 2628 - * This call undoes the setup done by prepare_percpu_nmi(). 2753 + * This call undoes the setup done by prepare_percpu_nmi(). 2629 2754 * 2630 - * IRQ line should not be enabled for the current CPU. 2631 - * 2632 - * As a CPU local operation, this should be called from non-preemptible 2633 - * context. 2755 + * IRQ line should not be enabled for the current CPU. 2756 + * As a CPU local operation, this should be called from non-preemptible 2757 + * context. 2634 2758 */ 2635 2759 void teardown_percpu_nmi(unsigned int irq) 2636 2760 { 2637 - unsigned long flags; 2638 - struct irq_desc *desc; 2639 - 2640 2761 WARN_ON(preemptible()); 2641 2762 2642 - desc = irq_get_desc_lock(irq, &flags, 2643 - IRQ_GET_DESC_CHECK_PERCPU); 2644 - if (!desc) 2645 - return; 2646 - 2647 - if (WARN_ON(!irq_is_nmi(desc))) 2648 - goto out; 2649 - 2650 - irq_nmi_teardown(desc); 2651 - out: 2652 - irq_put_desc_unlock(desc, flags); 2763 + scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) { 2764 + if (WARN_ON(!irq_is_nmi(scoped_irqdesc))) 2765 + return; 2766 + irq_nmi_teardown(scoped_irqdesc); 2767 + } 2653 2768 } 2654 2769 2655 2770 static int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which, bool *state) ··· 2652 2815 } 2653 2816 2654 2817 /** 2655 - * irq_get_irqchip_state - returns the irqchip state of a interrupt. 2656 - * @irq: Interrupt line that is forwarded to a VM 2657 - * @which: One of IRQCHIP_STATE_* the caller wants to know about 2658 - * @state: a pointer to a boolean where the state is to be stored 2818 + * irq_get_irqchip_state - returns the irqchip state of a interrupt. 2819 + * @irq: Interrupt line that is forwarded to a VM 2820 + * @which: One of IRQCHIP_STATE_* the caller wants to know about 2821 + * @state: a pointer to a boolean where the state is to be stored 2659 2822 * 2660 - * This call snapshots the internal irqchip state of an 2661 - * interrupt, returning into @state the bit corresponding to 2662 - * stage @which 2823 + * This call snapshots the internal irqchip state of an interrupt, 2824 + * returning into @state the bit corresponding to stage @which 2663 2825 * 2664 - * This function should be called with preemption disabled if the 2665 - * interrupt controller has per-cpu registers. 2826 + * This function should be called with preemption disabled if the interrupt 2827 + * controller has per-cpu registers. 2666 2828 */ 2667 - int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, 2668 - bool *state) 2829 + int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, bool *state) 2669 2830 { 2670 - struct irq_desc *desc; 2671 - struct irq_data *data; 2672 - unsigned long flags; 2673 - int err = -EINVAL; 2831 + scoped_irqdesc_get_and_buslock(irq, 0) { 2832 + struct irq_data *data = irq_desc_get_irq_data(scoped_irqdesc); 2674 2833 2675 - desc = irq_get_desc_buslock(irq, &flags, 0); 2676 - if (!desc) 2677 - return err; 2678 - 2679 - data = irq_desc_get_irq_data(desc); 2680 - 2681 - err = __irq_get_irqchip_state(data, which, state); 2682 - 2683 - irq_put_desc_busunlock(desc, flags); 2684 - return err; 2834 + return __irq_get_irqchip_state(data, which, state); 2835 + } 2836 + return -EINVAL; 2685 2837 } 2686 2838 EXPORT_SYMBOL_GPL(irq_get_irqchip_state); 2687 2839 2688 2840 /** 2689 - * irq_set_irqchip_state - set the state of a forwarded interrupt. 2690 - * @irq: Interrupt line that is forwarded to a VM 2691 - * @which: State to be restored (one of IRQCHIP_STATE_*) 2692 - * @val: Value corresponding to @which 2841 + * irq_set_irqchip_state - set the state of a forwarded interrupt. 2842 + * @irq: Interrupt line that is forwarded to a VM 2843 + * @which: State to be restored (one of IRQCHIP_STATE_*) 2844 + * @val: Value corresponding to @which 2693 2845 * 2694 - * This call sets the internal irqchip state of an interrupt, 2695 - * depending on the value of @which. 2846 + * This call sets the internal irqchip state of an interrupt, depending on 2847 + * the value of @which. 2696 2848 * 2697 - * This function should be called with migration disabled if the 2698 - * interrupt controller has per-cpu registers. 2849 + * This function should be called with migration disabled if the interrupt 2850 + * controller has per-cpu registers. 2699 2851 */ 2700 - int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, 2701 - bool val) 2852 + int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, bool val) 2702 2853 { 2703 - struct irq_desc *desc; 2704 - struct irq_data *data; 2705 - struct irq_chip *chip; 2706 - unsigned long flags; 2707 - int err = -EINVAL; 2854 + scoped_irqdesc_get_and_buslock(irq, 0) { 2855 + struct irq_data *data = irq_desc_get_irq_data(scoped_irqdesc); 2856 + struct irq_chip *chip; 2708 2857 2709 - desc = irq_get_desc_buslock(irq, &flags, 0); 2710 - if (!desc) 2711 - return err; 2858 + do { 2859 + chip = irq_data_get_irq_chip(data); 2712 2860 2713 - data = irq_desc_get_irq_data(desc); 2861 + if (WARN_ON_ONCE(!chip)) 2862 + return -ENODEV; 2714 2863 2715 - do { 2716 - chip = irq_data_get_irq_chip(data); 2717 - if (WARN_ON_ONCE(!chip)) { 2718 - err = -ENODEV; 2719 - goto out_unlock; 2720 - } 2721 - if (chip->irq_set_irqchip_state) 2722 - break; 2723 - #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 2724 - data = data->parent_data; 2725 - #else 2726 - data = NULL; 2727 - #endif 2728 - } while (data); 2864 + if (chip->irq_set_irqchip_state) 2865 + break; 2729 2866 2730 - if (data) 2731 - err = chip->irq_set_irqchip_state(data, which, val); 2867 + data = irqd_get_parent_data(data); 2868 + } while (data); 2732 2869 2733 - out_unlock: 2734 - irq_put_desc_busunlock(desc, flags); 2735 - return err; 2870 + if (data) 2871 + return chip->irq_set_irqchip_state(data, which, val); 2872 + } 2873 + return -EINVAL; 2736 2874 } 2737 2875 EXPORT_SYMBOL_GPL(irq_set_irqchip_state); 2738 2876
+13 -25
kernel/irq/pm.c
··· 46 46 desc->cond_suspend_depth++; 47 47 48 48 WARN_ON_ONCE(desc->no_suspend_depth && 49 - (desc->no_suspend_depth + 50 - desc->cond_suspend_depth) != desc->nr_actions); 49 + (desc->no_suspend_depth + desc->cond_suspend_depth) != desc->nr_actions); 51 50 } 52 51 53 52 /* ··· 133 134 int irq; 134 135 135 136 for_each_irq_desc(irq, desc) { 136 - unsigned long flags; 137 137 bool sync; 138 138 139 139 if (irq_settings_is_nested_thread(desc)) 140 140 continue; 141 - raw_spin_lock_irqsave(&desc->lock, flags); 142 - sync = suspend_device_irq(desc); 143 - raw_spin_unlock_irqrestore(&desc->lock, flags); 141 + scoped_guard(raw_spinlock_irqsave, &desc->lock) 142 + sync = suspend_device_irq(desc); 144 143 145 144 if (sync) 146 145 synchronize_irq(irq); ··· 183 186 int irq; 184 187 185 188 for_each_irq_desc(irq, desc) { 186 - unsigned long flags; 187 - bool is_early = desc->action && 188 - desc->action->flags & IRQF_EARLY_RESUME; 189 + bool is_early = desc->action && desc->action->flags & IRQF_EARLY_RESUME; 189 190 190 191 if (!is_early && want_early) 191 192 continue; 192 193 if (irq_settings_is_nested_thread(desc)) 193 194 continue; 194 195 195 - raw_spin_lock_irqsave(&desc->lock, flags); 196 + guard(raw_spinlock_irqsave)(&desc->lock); 196 197 resume_irq(desc); 197 - raw_spin_unlock_irqrestore(&desc->lock, flags); 198 198 } 199 199 } 200 200 ··· 201 207 */ 202 208 void rearm_wake_irq(unsigned int irq) 203 209 { 204 - unsigned long flags; 205 - struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 210 + scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { 211 + struct irq_desc *desc = scoped_irqdesc; 206 212 207 - if (!desc) 208 - return; 213 + if (!(desc->istate & IRQS_SUSPENDED) || !irqd_is_wakeup_set(&desc->irq_data)) 214 + return; 209 215 210 - if (!(desc->istate & IRQS_SUSPENDED) || 211 - !irqd_is_wakeup_set(&desc->irq_data)) 212 - goto unlock; 213 - 214 - desc->istate &= ~IRQS_SUSPENDED; 215 - irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED); 216 - __enable_irq(desc); 217 - 218 - unlock: 219 - irq_put_desc_busunlock(desc, flags); 216 + desc->istate &= ~IRQS_SUSPENDED; 217 + irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED); 218 + __enable_irq(desc); 219 + } 220 220 } 221 221 222 222 /**
+25 -42
kernel/irq/proc.c
··· 81 81 static int irq_affinity_hint_proc_show(struct seq_file *m, void *v) 82 82 { 83 83 struct irq_desc *desc = irq_to_desc((long)m->private); 84 - unsigned long flags; 85 84 cpumask_var_t mask; 86 85 87 86 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 88 87 return -ENOMEM; 89 88 90 - raw_spin_lock_irqsave(&desc->lock, flags); 91 - if (desc->affinity_hint) 92 - cpumask_copy(mask, desc->affinity_hint); 93 - raw_spin_unlock_irqrestore(&desc->lock, flags); 89 + scoped_guard(raw_spinlock_irq, &desc->lock) { 90 + if (desc->affinity_hint) 91 + cpumask_copy(mask, desc->affinity_hint); 92 + } 94 93 95 94 seq_printf(m, "%*pb\n", cpumask_pr_args(mask)); 96 95 free_cpumask_var(mask); 97 - 98 96 return 0; 99 97 } 100 98 ··· 293 295 294 296 #define MAX_NAMELEN 128 295 297 296 - static int name_unique(unsigned int irq, struct irqaction *new_action) 298 + static bool name_unique(unsigned int irq, struct irqaction *new_action) 297 299 { 298 300 struct irq_desc *desc = irq_to_desc(irq); 299 301 struct irqaction *action; 300 - unsigned long flags; 301 - int ret = 1; 302 302 303 - raw_spin_lock_irqsave(&desc->lock, flags); 303 + guard(raw_spinlock_irq)(&desc->lock); 304 304 for_each_action_of_desc(desc, action) { 305 305 if ((action != new_action) && action->name && 306 - !strcmp(new_action->name, action->name)) { 307 - ret = 0; 308 - break; 309 - } 306 + !strcmp(new_action->name, action->name)) 307 + return false; 310 308 } 311 - raw_spin_unlock_irqrestore(&desc->lock, flags); 312 - return ret; 309 + return true; 313 310 } 314 311 315 312 void register_handler_proc(unsigned int irq, struct irqaction *action) 316 313 { 317 - char name [MAX_NAMELEN]; 314 + char name[MAX_NAMELEN]; 318 315 struct irq_desc *desc = irq_to_desc(irq); 319 316 320 - if (!desc->dir || action->dir || !action->name || 321 - !name_unique(irq, action)) 317 + if (!desc->dir || action->dir || !action->name || !name_unique(irq, action)) 322 318 return; 323 319 324 320 snprintf(name, MAX_NAMELEN, "%s", action->name); ··· 339 347 * added, not when the descriptor is created, so multiple 340 348 * tasks might try to register at the same time. 341 349 */ 342 - mutex_lock(&register_lock); 350 + guard(mutex)(&register_lock); 343 351 344 352 if (desc->dir) 345 - goto out_unlock; 346 - 347 - sprintf(name, "%d", irq); 353 + return; 348 354 349 355 /* create /proc/irq/1234 */ 356 + sprintf(name, "%u", irq); 350 357 desc->dir = proc_mkdir(name, root_irq_dir); 351 358 if (!desc->dir) 352 - goto out_unlock; 359 + return; 353 360 354 361 #ifdef CONFIG_SMP 355 362 umode_t umode = S_IRUGO; ··· 357 366 umode |= S_IWUSR; 358 367 359 368 /* create /proc/irq/<irq>/smp_affinity */ 360 - proc_create_data("smp_affinity", umode, desc->dir, 361 - &irq_affinity_proc_ops, irqp); 369 + proc_create_data("smp_affinity", umode, desc->dir, &irq_affinity_proc_ops, irqp); 362 370 363 371 /* create /proc/irq/<irq>/affinity_hint */ 364 372 proc_create_single_data("affinity_hint", 0444, desc->dir, 365 - irq_affinity_hint_proc_show, irqp); 373 + irq_affinity_hint_proc_show, irqp); 366 374 367 375 /* create /proc/irq/<irq>/smp_affinity_list */ 368 376 proc_create_data("smp_affinity_list", umode, desc->dir, 369 377 &irq_affinity_list_proc_ops, irqp); 370 378 371 - proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show, 372 - irqp); 379 + proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show, irqp); 373 380 # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 374 381 proc_create_single_data("effective_affinity", 0444, desc->dir, 375 - irq_effective_aff_proc_show, irqp); 382 + irq_effective_aff_proc_show, irqp); 376 383 proc_create_single_data("effective_affinity_list", 0444, desc->dir, 377 - irq_effective_aff_list_proc_show, irqp); 384 + irq_effective_aff_list_proc_show, irqp); 378 385 # endif 379 386 #endif 380 387 proc_create_single_data("spurious", 0444, desc->dir, 381 - irq_spurious_proc_show, (void *)(long)irq); 388 + irq_spurious_proc_show, (void *)(long)irq); 382 389 383 - out_unlock: 384 - mutex_unlock(&register_lock); 385 390 } 386 391 387 392 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) ··· 455 468 int i = *(loff_t *) v, j; 456 469 struct irqaction *action; 457 470 struct irq_desc *desc; 458 - unsigned long flags; 459 471 460 472 if (i > ACTUAL_NR_IRQS) 461 473 return 0; ··· 473 487 seq_putc(p, '\n'); 474 488 } 475 489 476 - rcu_read_lock(); 490 + guard(rcu)(); 477 491 desc = irq_to_desc(i); 478 492 if (!desc || irq_settings_is_hidden(desc)) 479 - goto outsparse; 493 + return 0; 480 494 481 495 if (!desc->action || irq_desc_is_chained(desc) || !desc->kstat_irqs) 482 - goto outsparse; 496 + return 0; 483 497 484 498 seq_printf(p, "%*d:", prec, i); 485 499 for_each_online_cpu(j) { ··· 489 503 } 490 504 seq_putc(p, ' '); 491 505 492 - raw_spin_lock_irqsave(&desc->lock, flags); 506 + guard(raw_spinlock_irq)(&desc->lock); 493 507 if (desc->irq_data.chip) { 494 508 if (desc->irq_data.chip->irq_print_chip) 495 509 desc->irq_data.chip->irq_print_chip(&desc->irq_data, p); ··· 518 532 } 519 533 520 534 seq_putc(p, '\n'); 521 - raw_spin_unlock_irqrestore(&desc->lock, flags); 522 - outsparse: 523 - rcu_read_unlock(); 524 535 return 0; 525 536 } 526 537 #endif
+21 -29
kernel/irq/resend.c
··· 30 30 */ 31 31 static void resend_irqs(struct tasklet_struct *unused) 32 32 { 33 - struct irq_desc *desc; 34 - 35 - raw_spin_lock_irq(&irq_resend_lock); 33 + guard(raw_spinlock_irq)(&irq_resend_lock); 36 34 while (!hlist_empty(&irq_resend_list)) { 37 - desc = hlist_entry(irq_resend_list.first, struct irq_desc, 38 - resend_node); 35 + struct irq_desc *desc; 36 + 37 + desc = hlist_entry(irq_resend_list.first, struct irq_desc, resend_node); 39 38 hlist_del_init(&desc->resend_node); 39 + 40 40 raw_spin_unlock(&irq_resend_lock); 41 41 desc->handle_irq(desc); 42 42 raw_spin_lock(&irq_resend_lock); 43 43 } 44 - raw_spin_unlock_irq(&irq_resend_lock); 45 44 } 46 45 47 46 /* Tasklet to handle resend: */ ··· 74 75 } 75 76 76 77 /* Add to resend_list and activate the softirq: */ 77 - raw_spin_lock(&irq_resend_lock); 78 - if (hlist_unhashed(&desc->resend_node)) 79 - hlist_add_head(&desc->resend_node, &irq_resend_list); 80 - raw_spin_unlock(&irq_resend_lock); 78 + scoped_guard(raw_spinlock, &irq_resend_lock) { 79 + if (hlist_unhashed(&desc->resend_node)) 80 + hlist_add_head(&desc->resend_node, &irq_resend_list); 81 + } 81 82 tasklet_schedule(&resend_tasklet); 82 83 return 0; 83 84 } 84 85 85 86 void clear_irq_resend(struct irq_desc *desc) 86 87 { 87 - raw_spin_lock(&irq_resend_lock); 88 + guard(raw_spinlock)(&irq_resend_lock); 88 89 hlist_del_init(&desc->resend_node); 89 - raw_spin_unlock(&irq_resend_lock); 90 90 } 91 91 92 92 void irq_resend_init(struct irq_desc *desc) ··· 170 172 */ 171 173 int irq_inject_interrupt(unsigned int irq) 172 174 { 173 - struct irq_desc *desc; 174 - unsigned long flags; 175 - int err; 175 + int err = -EINVAL; 176 176 177 177 /* Try the state injection hardware interface first */ 178 178 if (!irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, true)) 179 179 return 0; 180 180 181 181 /* That failed, try via the resend mechanism */ 182 - desc = irq_get_desc_buslock(irq, &flags, 0); 183 - if (!desc) 184 - return -EINVAL; 182 + scoped_irqdesc_get_and_buslock(irq, 0) { 183 + struct irq_desc *desc = scoped_irqdesc; 185 184 186 - /* 187 - * Only try to inject when the interrupt is: 188 - * - not NMI type 189 - * - activated 190 - */ 191 - if (irq_is_nmi(desc) || !irqd_is_activated(&desc->irq_data)) 192 - err = -EINVAL; 193 - else 194 - err = check_irq_resend(desc, true); 195 - 196 - irq_put_desc_busunlock(desc, flags); 185 + /* 186 + * Only try to inject when the interrupt is: 187 + * - not NMI type 188 + * - activated 189 + */ 190 + if (!irq_is_nmi(desc) && irqd_is_activated(&desc->irq_data)) 191 + err = check_irq_resend(desc, true); 192 + } 197 193 return err; 198 194 } 199 195 EXPORT_SYMBOL_GPL(irq_inject_interrupt);
+40 -64
kernel/irq/spurious.c
··· 34 34 * true and let the handler run. 35 35 */ 36 36 bool irq_wait_for_poll(struct irq_desc *desc) 37 - __must_hold(&desc->lock) 38 37 { 38 + lockdep_assert_held(&desc->lock); 39 + 39 40 if (WARN_ONCE(irq_poll_cpu == smp_processor_id(), 40 41 "irq poll in progress on cpu %d for irq %d\n", 41 42 smp_processor_id(), desc->irq_data.irq)) ··· 60 59 /* 61 60 * Recovery handler for misrouted interrupts. 62 61 */ 63 - static int try_one_irq(struct irq_desc *desc, bool force) 62 + static bool try_one_irq(struct irq_desc *desc, bool force) 64 63 { 65 - irqreturn_t ret = IRQ_NONE; 66 64 struct irqaction *action; 65 + bool ret = false; 67 66 68 - raw_spin_lock(&desc->lock); 67 + guard(raw_spinlock)(&desc->lock); 69 68 70 69 /* 71 70 * PER_CPU, nested thread interrupts and interrupts explicitly 72 71 * marked polled are excluded from polling. 73 72 */ 74 - if (irq_settings_is_per_cpu(desc) || 75 - irq_settings_is_nested_thread(desc) || 73 + if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc) || 76 74 irq_settings_is_polled(desc)) 77 - goto out; 75 + return false; 78 76 79 77 /* 80 78 * Do not poll disabled interrupts unless the spurious 81 79 * disabled poller asks explicitly. 82 80 */ 83 81 if (irqd_irq_disabled(&desc->irq_data) && !force) 84 - goto out; 82 + return false; 85 83 86 84 /* 87 85 * All handlers must agree on IRQF_SHARED, so we test just the 88 86 * first. 89 87 */ 90 88 action = desc->action; 91 - if (!action || !(action->flags & IRQF_SHARED) || 92 - (action->flags & __IRQF_TIMER)) 93 - goto out; 89 + if (!action || !(action->flags & IRQF_SHARED) || (action->flags & __IRQF_TIMER)) 90 + return false; 94 91 95 92 /* Already running on another processor */ 96 93 if (irqd_irq_inprogress(&desc->irq_data)) { ··· 97 98 * CPU to go looking for our mystery interrupt too 98 99 */ 99 100 desc->istate |= IRQS_PENDING; 100 - goto out; 101 + return false; 101 102 } 102 103 103 104 /* Mark it poll in progress */ 104 105 desc->istate |= IRQS_POLL_INPROGRESS; 105 106 do { 106 107 if (handle_irq_event(desc) == IRQ_HANDLED) 107 - ret = IRQ_HANDLED; 108 + ret = true; 108 109 /* Make sure that there is still a valid action */ 109 110 action = desc->action; 110 111 } while ((desc->istate & IRQS_PENDING) && action); 111 112 desc->istate &= ~IRQS_POLL_INPROGRESS; 112 - out: 113 - raw_spin_unlock(&desc->lock); 114 - return ret == IRQ_HANDLED; 113 + return ret; 115 114 } 116 115 117 116 static int misrouted_irq(int irq) ··· 154 157 continue; 155 158 156 159 /* Racy but it doesn't matter */ 157 - state = desc->istate; 158 - barrier(); 160 + state = READ_ONCE(desc->istate); 159 161 if (!(state & IRQS_SPURIOUS_DISABLED)) 160 162 continue; 161 163 ··· 164 168 } 165 169 out: 166 170 atomic_dec(&irq_poll_active); 167 - mod_timer(&poll_spurious_irq_timer, 168 - jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 171 + mod_timer(&poll_spurious_irq_timer, jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 169 172 } 170 173 171 174 static inline int bad_action_ret(irqreturn_t action_ret) ··· 188 193 { 189 194 unsigned int irq = irq_desc_get_irq(desc); 190 195 struct irqaction *action; 191 - unsigned long flags; 192 196 193 - if (bad_action_ret(action_ret)) { 194 - printk(KERN_ERR "irq event %d: bogus return value %x\n", 195 - irq, action_ret); 196 - } else { 197 - printk(KERN_ERR "irq %d: nobody cared (try booting with " 198 - "the \"irqpoll\" option)\n", irq); 199 - } 197 + if (bad_action_ret(action_ret)) 198 + pr_err("irq event %d: bogus return value %x\n", irq, action_ret); 199 + else 200 + pr_err("irq %d: nobody cared (try booting with the \"irqpoll\" option)\n", irq); 200 201 dump_stack(); 201 - printk(KERN_ERR "handlers:\n"); 202 + pr_err("handlers:\n"); 202 203 203 204 /* 204 205 * We need to take desc->lock here. note_interrupt() is called ··· 202 211 * with something else removing an action. It's ok to take 203 212 * desc->lock here. See synchronize_irq(). 204 213 */ 205 - raw_spin_lock_irqsave(&desc->lock, flags); 214 + guard(raw_spinlock_irqsave)(&desc->lock); 206 215 for_each_action_of_desc(desc, action) { 207 - printk(KERN_ERR "[<%p>] %ps", action->handler, action->handler); 216 + pr_err("[<%p>] %ps", action->handler, action->handler); 208 217 if (action->thread_fn) 209 - printk(KERN_CONT " threaded [<%p>] %ps", 210 - action->thread_fn, action->thread_fn); 211 - printk(KERN_CONT "\n"); 218 + pr_cont(" threaded [<%p>] %ps", action->thread_fn, action->thread_fn); 219 + pr_cont("\n"); 212 220 } 213 - raw_spin_unlock_irqrestore(&desc->lock, flags); 214 221 } 215 222 216 223 static void report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret) ··· 221 232 } 222 233 } 223 234 224 - static inline int 225 - try_misrouted_irq(unsigned int irq, struct irq_desc *desc, 226 - irqreturn_t action_ret) 235 + static inline bool try_misrouted_irq(unsigned int irq, struct irq_desc *desc, 236 + irqreturn_t action_ret) 227 237 { 228 238 struct irqaction *action; 229 239 230 240 if (!irqfixup) 231 - return 0; 241 + return false; 232 242 233 243 /* We didn't actually handle the IRQ - see if it was misrouted? */ 234 244 if (action_ret == IRQ_NONE) 235 - return 1; 245 + return true; 236 246 237 247 /* 238 248 * But for 'irqfixup == 2' we also do it for handled interrupts if ··· 239 251 * traditional PC timer interrupt.. Legacy) 240 252 */ 241 253 if (irqfixup < 2) 242 - return 0; 254 + return false; 243 255 244 256 if (!irq) 245 - return 1; 257 + return true; 246 258 247 259 /* 248 260 * Since we don't get the descriptor lock, "action" can 249 - * change under us. We don't really care, but we don't 250 - * want to follow a NULL pointer. So tell the compiler to 251 - * just load it once by using a barrier. 261 + * change under us. 252 262 */ 253 - action = desc->action; 254 - barrier(); 263 + action = READ_ONCE(desc->action); 255 264 return action && (action->flags & IRQF_IRQPOLL); 256 265 } 257 266 ··· 258 273 { 259 274 unsigned int irq; 260 275 261 - if (desc->istate & IRQS_POLL_INPROGRESS || 262 - irq_settings_is_polled(desc)) 276 + if (desc->istate & IRQS_POLL_INPROGRESS || irq_settings_is_polled(desc)) 263 277 return; 264 278 265 279 if (bad_action_ret(action_ret)) { ··· 404 420 /* 405 421 * Now kill the IRQ 406 422 */ 407 - printk(KERN_EMERG "Disabling IRQ #%d\n", irq); 423 + pr_emerg("Disabling IRQ #%d\n", irq); 408 424 desc->istate |= IRQS_SPURIOUS_DISABLED; 409 425 desc->depth++; 410 426 irq_disable(desc); 411 427 412 - mod_timer(&poll_spurious_irq_timer, 413 - jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 428 + mod_timer(&poll_spurious_irq_timer, jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 414 429 } 415 430 desc->irqs_unhandled = 0; 416 431 } ··· 419 436 int noirqdebug_setup(char *str) 420 437 { 421 438 noirqdebug = 1; 422 - printk(KERN_INFO "IRQ lockup detection disabled\n"); 423 - 439 + pr_info("IRQ lockup detection disabled\n"); 424 440 return 1; 425 441 } 426 - 427 442 __setup("noirqdebug", noirqdebug_setup); 428 443 module_param(noirqdebug, bool, 0644); 429 444 MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); ··· 433 452 return 1; 434 453 } 435 454 irqfixup = 1; 436 - printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); 437 - printk(KERN_WARNING "This may impact system performance.\n"); 438 - 455 + pr_warn("Misrouted IRQ fixup support enabled.\n"); 456 + pr_warn("This may impact system performance.\n"); 439 457 return 1; 440 458 } 441 - 442 459 __setup("irqfixup", irqfixup_setup); 443 460 module_param(irqfixup, int, 0644); 444 461 ··· 447 468 return 1; 448 469 } 449 470 irqfixup = 2; 450 - printk(KERN_WARNING "Misrouted IRQ fixup and polling support " 451 - "enabled\n"); 452 - printk(KERN_WARNING "This may significantly impact system " 453 - "performance\n"); 471 + pr_warn("Misrouted IRQ fixup and polling support enabled\n"); 472 + pr_warn("This may significantly impact system performance\n"); 454 473 return 1; 455 474 } 456 - 457 475 __setup("irqpoll", irqpoll_setup);