Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next

Pull core irq updates from Thomas Gleixner:
"The irq department delivers:

- Another tree wide update to get rid of the horrible create_irq
interface along with its even more horrible variants. That also
gets rid of the last leftovers of the initial sparse irq hackery.
arch/driver specific changes have been either acked or ignored.

- A fix for the spurious interrupt detection logic with threaded
interrupts.

- A new ARM SoC interrupt controller

- The usual pile of fixes and improvements all over the place"

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (40 commits)
Documentation: brcmstb-l2: Add Broadcom STB Level-2 interrupt controller binding
irqchip: brcmstb-l2: Add Broadcom Set Top Box Level-2 interrupt controller
genirq: Improve documentation to match current implementation
ARM: iop13xx: fix msi support with sparse IRQ
genirq: Provide !SMP stub for irq_set_affinity_notifier()
irqchip: armada-370-xp: Move the devicetree binding documentation
irqchip: gic: Use mask field in GICC_IAR
genirq: Remove dynamic_irq mess
ia64: Use irq_init_desc
genirq: Replace dynamic_irq_init/cleanup
genirq: Remove irq_reserve_irq[s]
genirq: Replace reserve_irqs in core code
s390: Avoid call to irq_reserve_irqs()
s390: Remove pointless arch_show_interrupts()
s390: pci: Check return value of alloc_irq_desc() proper
sh: intc: Remove pointless irq_reserve_irqs() invocation
x86, irq: Remove pointless irq_reserve_irqs() call
genirq: Make create/destroy_irq() ia64 private
tile: Use SPARSE_IRQ
tile: pci: Use irq_alloc/free_hwirq()
...

+597 -396
+1 -2
Documentation/IRQ-domain.txt
··· 41 41 calling one of the irq_domain_add_*() functions (each mapping method 42 42 has a different allocator function, more on that later). The function 43 43 will return a pointer to the irq_domain on success. The caller must 44 - provide the allocator function with an irq_domain_ops structure with 45 - the .map callback populated as a minimum. 44 + provide the allocator function with an irq_domain_ops structure. 46 45 47 46 In most cases, the irq_domain will begin empty without any mappings 48 47 between hwirq and IRQ numbers. Mappings are added to the irq_domain
Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt Documentation/devicetree/bindings/interrupt-controller/marvell,armada-370-xp-mpic.txt
+29
Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt
··· 1 + Broadcom Generic Level 2 Interrupt Controller 2 + 3 + Required properties: 4 + 5 + - compatible: should be "brcm,l2-intc" 6 + - reg: specifies the base physical address and size of the registers 7 + - interrupt-controller: identifies the node as an interrupt controller 8 + - #interrupt-cells: specifies the number of cells needed to encode an 9 + interrupt source. Should be 1. 10 + - interrupt-parent: specifies the phandle to the parent interrupt controller 11 + this controller is cacaded from 12 + - interrupts: specifies the interrupt line in the interrupt-parent irq space 13 + to be used for cascading 14 + 15 + Optional properties: 16 + 17 + - brcm,irq-can-wake: If present, this means the L2 controller can be used as a 18 + wakeup source for system suspend/resume. 19 + 20 + Example: 21 + 22 + hif_intr2_intc: interrupt-controller@f0441000 { 23 + compatible = "brcm,l2-intc"; 24 + reg = <0xf0441000 0x30>; 25 + interrupt-controller; 26 + #interrupt-cells = <1>; 27 + interrupt-parent = <&intc>; 28 + interrupts = <0x0 0x20 0x0>; 29 + };
+1
arch/arm/Kconfig
··· 480 480 select PCI 481 481 select PLAT_IOP 482 482 select VMSPLIT_1G 483 + select SPARSE_IRQ 483 484 help 484 485 Support for Intel's IOP13XX (XScale) family of processors. 485 486
-2
arch/arm/mach-iop13xx/include/mach/irqs.h
··· 191 191 #define NR_IOP13XX_IRQS (IRQ_IOP13XX_HPI + 1) 192 192 #endif 193 193 194 - #define NR_IRQS NR_IOP13XX_IRQS 195 - 196 194 #endif /* _IOP13XX_IRQ_H_ */
+3
arch/arm/mach-iop13xx/include/mach/time.h
··· 1 1 #ifndef _IOP13XX_TIME_H_ 2 2 #define _IOP13XX_TIME_H_ 3 + 4 + #include <mach/irqs.h> 5 + 3 6 #define IRQ_IOP_TIMER0 IRQ_IOP13XX_TIMER0 4 7 5 8 #define IOP_TMR_EN 0x02
+1
arch/arm/mach-iop13xx/iq81340mc.c
··· 93 93 .init_time = iq81340mc_timer_init, 94 94 .init_machine = iq81340mc_init, 95 95 .restart = iop13xx_restart, 96 + .nr_irqs = NR_IOP13XX_IRQS, 96 97 MACHINE_END
+1
arch/arm/mach-iop13xx/iq81340sc.c
··· 95 95 .init_time = iq81340sc_timer_init, 96 96 .init_machine = iq81340sc_init, 97 97 .restart = iop13xx_restart, 98 + .nr_irqs = NR_IOP13XX_IRQS, 98 99 MACHINE_END
+12 -40
arch/arm/mach-iop13xx/msi.c
··· 23 23 #include <linux/msi.h> 24 24 #include <asm/mach/irq.h> 25 25 #include <asm/irq.h> 26 - 27 - 28 - #define IOP13XX_NUM_MSI_IRQS 128 29 - static DECLARE_BITMAP(msi_irq_in_use, IOP13XX_NUM_MSI_IRQS); 26 + #include <mach/irqs.h> 30 27 31 28 /* IMIPR0 CP6 R8 Page 1 32 29 */ ··· 118 121 irq_set_chained_handler(IRQ_IOP13XX_INBD_MSI, iop13xx_msi_handler); 119 122 } 120 123 121 - /* 122 - * Dynamic irq allocate and deallocation 123 - */ 124 - int create_irq(void) 125 - { 126 - int irq, pos; 127 - 128 - again: 129 - pos = find_first_zero_bit(msi_irq_in_use, IOP13XX_NUM_MSI_IRQS); 130 - irq = IRQ_IOP13XX_MSI_0 + pos; 131 - if (irq > NR_IRQS) 132 - return -ENOSPC; 133 - /* test_and_set_bit operates on 32-bits at a time */ 134 - if (test_and_set_bit(pos, msi_irq_in_use)) 135 - goto again; 136 - 137 - dynamic_irq_init(irq); 138 - 139 - return irq; 140 - } 141 - 142 - void destroy_irq(unsigned int irq) 143 - { 144 - int pos = irq - IRQ_IOP13XX_MSI_0; 145 - 146 - dynamic_irq_cleanup(irq); 147 - 148 - clear_bit(pos, msi_irq_in_use); 149 - } 150 - 151 - void arch_teardown_msi_irq(unsigned int irq) 152 - { 153 - destroy_irq(irq); 154 - } 155 - 156 124 static void iop13xx_msi_nop(struct irq_data *d) 157 125 { 158 126 return; ··· 134 172 135 173 int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) 136 174 { 137 - int id, irq = create_irq(); 175 + int id, irq = irq_alloc_desc_from(IRQ_IOP13XX_MSI_0, -1); 138 176 struct msi_msg msg; 139 177 140 178 if (irq < 0) 141 179 return irq; 180 + 181 + if (irq >= NR_IOP13XX_IRQS) { 182 + irq_free_desc(irq); 183 + return -ENOSPC; 184 + } 142 185 143 186 irq_set_msi_desc(irq, desc); 144 187 ··· 157 190 irq_set_chip_and_handler(irq, &iop13xx_msi_chip, handle_simple_irq); 158 191 159 192 return 0; 193 + } 194 + 195 + void arch_teardown_msi_irq(unsigned int irq) 196 + { 197 + irq_free_desc(irq); 160 198 }
+1
arch/arm/mach-iop13xx/setup.c
··· 27 27 #include <mach/hardware.h> 28 28 #include <asm/irq.h> 29 29 #include <asm/hardware/iop_adma.h> 30 + #include <mach/irqs.h> 30 31 31 32 #define IOP13XX_UART_XTAL 33334000 32 33 #define IOP13XX_SETUP_DEBUG 0
+1
arch/arm/mach-iop13xx/tpmi.c
··· 24 24 #include <linux/io.h> 25 25 #include <asm/irq.h> 26 26 #include <asm/sizes.h> 27 + #include <mach/irqs.h> 27 28 28 29 /* assumes CONTROLLER_ONLY# is never asserted in the ESSR register */ 29 30 #define IOP13XX_TPMI_MMR(dev) IOP13XX_REG_ADDR32_PHYS(0x48000 + (dev << 12))
+1
arch/ia64/Kconfig
··· 32 32 select GENERIC_IRQ_PROBE 33 33 select GENERIC_PENDING_IRQ if SMP 34 34 select GENERIC_IRQ_SHOW 35 + select GENERIC_IRQ_LEGACY 35 36 select ARCH_WANT_OPTIONAL_GPIOLIB 36 37 select ARCH_HAVE_NMI_SAFE_CMPXCHG 37 38 select GENERIC_IOMAP
-1
arch/ia64/include/asm/hw_irq.h
··· 132 132 extern void __setup_vector_irq(int cpu); 133 133 extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect); 134 134 extern void ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action); 135 - extern int check_irq_used (int irq); 136 135 extern void destroy_and_reserve_irq (unsigned int irq); 137 136 138 137 #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
+3
arch/ia64/include/asm/irq.h
··· 31 31 32 32 #define is_affinity_mask_valid is_affinity_mask_valid 33 33 34 + int create_irq(void); 35 + void destroy_irq(unsigned int irq); 36 + 34 37 #endif /* _ASM_IA64_IRQ_H */
+2
arch/ia64/include/asm/irq_remapping.h
··· 1 1 #ifndef __IA64_INTR_REMAPPING_H 2 2 #define __IA64_INTR_REMAPPING_H 3 3 #define irq_remapping_enabled 0 4 + #define dmar_alloc_hwirq create_irq 5 + #define dmar_free_hwirq destroy_irq 4 6 #endif
+1 -1
arch/ia64/kernel/iosapic.c
··· 735 735 rte = find_rte(irq, gsi); 736 736 if(iosapic_intr_info[irq].count == 0) { 737 737 assign_irq_vector(irq); 738 - dynamic_irq_init(irq); 738 + irq_init_desc(irq); 739 739 } else if (rte->refcnt != NO_REF_RTE) { 740 740 rte->refcnt++; 741 741 goto unlock_iosapic_lock;
+3 -12
arch/ia64/kernel/irq_ia64.c
··· 93 93 [0 ... NR_IRQS -1] = IRQ_UNUSED 94 94 }; 95 95 96 - int check_irq_used(int irq) 97 - { 98 - if (irq_status[irq] == IRQ_USED) 99 - return 1; 100 - 101 - return -1; 102 - } 103 - 104 96 static inline int find_unassigned_irq(void) 105 97 { 106 98 int irq; ··· 382 390 { 383 391 unsigned long flags; 384 392 385 - dynamic_irq_cleanup(irq); 386 - 393 + irq_init_desc(irq); 387 394 spin_lock_irqsave(&vector_lock, flags); 388 395 __clear_irq_vector(irq); 389 396 irq_status[irq] = IRQ_RSVD; ··· 415 424 out: 416 425 spin_unlock_irqrestore(&vector_lock, flags); 417 426 if (irq >= 0) 418 - dynamic_irq_init(irq); 427 + irq_init_desc(irq); 419 428 return irq; 420 429 } 421 430 422 431 void destroy_irq(unsigned int irq) 423 432 { 424 - dynamic_irq_cleanup(irq); 433 + irq_init_desc(irq); 425 434 clear_irq_vector(irq); 426 435 } 427 436
+1 -9
arch/mips/pci/msi-xlp.c
··· 206 206 .irq_unmask = unmask_msi_irq, 207 207 }; 208 208 209 - void destroy_irq(unsigned int irq) 210 - { 211 - /* nothing to do yet */ 212 - } 213 - 214 209 void arch_teardown_msi_irq(unsigned int irq) 215 210 { 216 - destroy_irq(irq); 217 211 } 218 212 219 213 /* ··· 292 298 293 299 xirq = xirq + msivec; /* msi mapped to global irq space */ 294 300 ret = irq_set_msi_desc(xirq, desc); 295 - if (ret < 0) { 296 - destroy_irq(xirq); 301 + if (ret < 0) 297 302 return ret; 298 - } 299 303 300 304 write_msi_msg(xirq, &msg); 301 305 return 0;
+1 -9
arch/mips/pci/pci-xlr.c
··· 214 214 } 215 215 216 216 #ifdef CONFIG_PCI_MSI 217 - void destroy_irq(unsigned int irq) 218 - { 219 - /* nothing to do yet */ 220 - } 221 - 222 217 void arch_teardown_msi_irq(unsigned int irq) 223 218 { 224 - destroy_irq(irq); 225 219 } 226 220 227 221 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) ··· 257 263 MSI_DATA_DELIVERY_FIXED; 258 264 259 265 ret = irq_set_msi_desc(irq, desc); 260 - if (ret < 0) { 261 - destroy_irq(irq); 266 + if (ret < 0) 262 267 return ret; 263 - } 264 268 265 269 write_msi_msg(irq, &msg); 266 270 return 0;
+2 -3
arch/s390/kernel/irq.c
··· 92 92 93 93 void __init init_IRQ(void) 94 94 { 95 - irq_reserve_irqs(0, THIN_INTERRUPT); 96 95 init_cio_interrupts(); 97 96 init_airq_interrupts(); 98 97 init_ext_interrupts(); ··· 150 151 return 0; 151 152 } 152 153 153 - int arch_show_interrupts(struct seq_file *p, int prec) 154 + unsigned int arch_dynirq_lower_bound(unsigned int from) 154 155 { 155 - return 0; 156 + return from < THIN_INTERRUPT ? THIN_INTERRUPT : from; 156 157 } 157 158 158 159 /*
+3 -3
arch/s390/pci/pci.c
··· 401 401 int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 402 402 { 403 403 struct zpci_dev *zdev = get_zdev(pdev); 404 - unsigned int hwirq, irq, msi_vecs; 404 + unsigned int hwirq, msi_vecs; 405 405 unsigned long aisb; 406 406 struct msi_desc *msi; 407 407 struct msi_msg msg; 408 - int rc; 408 + int rc, irq; 409 409 410 410 if (type == PCI_CAP_ID_MSI && nvec > 1) 411 411 return 1; ··· 433 433 list_for_each_entry(msi, &pdev->msi_list, list) { 434 434 rc = -EIO; 435 435 irq = irq_alloc_desc(0); /* Alloc irq on node 0 */ 436 - if (irq == NO_IRQ) 436 + if (irq < 0) 437 437 goto out_msi; 438 438 rc = irq_set_msi_desc(irq, msi); 439 439 if (rc)
+2
arch/tile/Kconfig
··· 125 125 126 126 config TILEGX 127 127 bool "Building for TILE-Gx (64-bit) processor" 128 + select SPARSE_IRQ 129 + select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ 128 130 select HAVE_FUNCTION_TRACER 129 131 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 130 132 select HAVE_FUNCTION_GRAPH_TRACER
+4 -2
arch/tile/include/asm/irq.h
··· 18 18 #include <linux/hardirq.h> 19 19 20 20 /* The hypervisor interface provides 32 IRQs. */ 21 - #define NR_IRQS 32 21 + #define NR_IRQS 32 22 22 23 23 /* IRQ numbers used for linux IPIs. */ 24 - #define IRQ_RESCHEDULE 0 24 + #define IRQ_RESCHEDULE 0 25 + /* Interrupts for dynamic allocation start at 1. Let the core allocate irq0 */ 26 + #define NR_IRQS_LEGACY 1 25 27 26 28 #define irq_canonicalize(irq) (irq) 27 29
+3 -37
arch/tile/kernel/irq.c
··· 54 54 */ 55 55 static DEFINE_PER_CPU(int, irq_depth); 56 56 57 - /* State for allocating IRQs on Gx. */ 58 - #if CHIP_HAS_IPI() 59 - static unsigned long available_irqs = ((1UL << NR_IRQS) - 1) & 60 - (~(1UL << IRQ_RESCHEDULE)); 61 - static DEFINE_SPINLOCK(available_irqs_lock); 62 - #endif 63 - 64 57 #if CHIP_HAS_IPI() 65 58 /* Use SPRs to manipulate device interrupts. */ 66 59 #define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask) ··· 271 278 return 0; 272 279 } 273 280 274 - /* 275 - * Generic, controller-independent functions: 276 - */ 277 - 278 281 #if CHIP_HAS_IPI() 279 - int create_irq(void) 282 + int arch_setup_hwirq(unsigned int irq, int node) 280 283 { 281 - unsigned long flags; 282 - int result; 283 - 284 - spin_lock_irqsave(&available_irqs_lock, flags); 285 - if (available_irqs == 0) 286 - result = -ENOMEM; 287 - else { 288 - result = __ffs(available_irqs); 289 - available_irqs &= ~(1UL << result); 290 - dynamic_irq_init(result); 291 - } 292 - spin_unlock_irqrestore(&available_irqs_lock, flags); 293 - 294 - return result; 284 + return irq >= NR_IRQS ? -EINVAL : 0; 295 285 } 296 - EXPORT_SYMBOL(create_irq); 297 286 298 - void destroy_irq(unsigned int irq) 299 - { 300 - unsigned long flags; 301 - 302 - spin_lock_irqsave(&available_irqs_lock, flags); 303 - available_irqs |= (1UL << irq); 304 - dynamic_irq_cleanup(irq); 305 - spin_unlock_irqrestore(&available_irqs_lock, flags); 306 - } 307 - EXPORT_SYMBOL(destroy_irq); 287 + void arch_teardown_hwirq(unsigned int irq) { } 308 288 #endif
+8 -9
arch/tile/kernel/pci_gx.c
··· 350 350 int cpu; 351 351 352 352 /* Ask the kernel to allocate an IRQ. */ 353 - irq = create_irq(); 354 - if (irq < 0) { 353 + irq = irq_alloc_hwirq(-1); 354 + if (!irq) { 355 355 pr_err("PCI: no free irq vectors, failed for %d\n", i); 356 - 357 356 goto free_irqs; 358 357 } 359 358 controller->irq_intx_table[i] = irq; ··· 381 382 382 383 free_irqs: 383 384 for (j = 0; j < i; j++) 384 - destroy_irq(controller->irq_intx_table[j]); 385 + irq_free_hwirq(controller->irq_intx_table[j]); 385 386 386 387 return -1; 387 388 } ··· 1499 1500 int irq; 1500 1501 int ret; 1501 1502 1502 - irq = create_irq(); 1503 - if (irq < 0) 1504 - return irq; 1503 + irq = irq_alloc_hwirq(-1); 1504 + if (!irq) 1505 + return -ENOSPC; 1505 1506 1506 1507 /* 1507 1508 * Since we use a 64-bit Mem-Map to accept the MSI write, we fail ··· 1600 1601 /* Free mem-map */ 1601 1602 msi_mem_map_alloc_failure: 1602 1603 is_64_failure: 1603 - destroy_irq(irq); 1604 + irq_free_hwirq(irq); 1604 1605 return ret; 1605 1606 } 1606 1607 1607 1608 void arch_teardown_msi_irq(unsigned int irq) 1608 1609 { 1609 - destroy_irq(irq); 1610 + irq_free_hwirq(irq); 1610 1611 }
+1
arch/x86/Kconfig
··· 833 833 config X86_IO_APIC 834 834 def_bool y 835 835 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC || PCI_MSI 836 + select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ 836 837 837 838 config X86_REROUTE_FOR_BROKEN_BOOT_IRQS 838 839 bool "Reroute for broken boot IRQs"
-2
arch/x86/include/asm/io_apic.h
··· 168 168 extern void mask_ioapic_entries(void); 169 169 extern int restore_ioapic_entries(void); 170 170 171 - extern int get_nr_irqs_gsi(void); 172 - 173 171 extern void setup_ioapic_ids_from_mpc(void); 174 172 extern void setup_ioapic_ids_from_mpc_nocheck(void); 175 173
+3
arch/x86/include/asm/irq_remapping.h
··· 103 103 } 104 104 #endif /* CONFIG_IRQ_REMAP */ 105 105 106 + #define dmar_alloc_hwirq() irq_alloc_hwirq(-1) 107 + #define dmar_free_hwirq irq_free_hwirq 108 + 106 109 #endif /* __X86_IRQ_REMAPPING_H */
+24 -106
arch/x86/kernel/apic/io_apic.c
··· 206 206 count = ARRAY_SIZE(irq_cfgx); 207 207 node = cpu_to_node(0); 208 208 209 - /* Make sure the legacy interrupts are marked in the bitmap */ 210 - irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); 211 - 212 209 for (i = 0; i < count; i++) { 213 210 irq_set_chip_data(i, &cfg[i]); 214 211 zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); ··· 277 280 irq_free_desc(at); 278 281 return cfg; 279 282 } 280 - 281 - static int alloc_irqs_from(unsigned int from, unsigned int count, int node) 282 - { 283 - return irq_alloc_descs_from(from, count, node); 284 - } 285 - 286 - static void free_irq_at(unsigned int at, struct irq_cfg *cfg) 287 - { 288 - free_irq_cfg(at, cfg); 289 - irq_free_desc(at); 290 - } 291 - 292 283 293 284 struct io_apic { 294 285 unsigned int index; ··· 2901 2916 device_initcall(ioapic_init_ops); 2902 2917 2903 2918 /* 2904 - * Dynamic irq allocate and deallocation 2919 + * Dynamic irq allocate and deallocation. Should be replaced by irq domains! 2905 2920 */ 2906 - unsigned int __create_irqs(unsigned int from, unsigned int count, int node) 2921 + int arch_setup_hwirq(unsigned int irq, int node) 2907 2922 { 2908 - struct irq_cfg **cfg; 2923 + struct irq_cfg *cfg; 2909 2924 unsigned long flags; 2910 - int irq, i; 2925 + int ret; 2911 2926 2912 - if (from < nr_irqs_gsi) 2913 - from = nr_irqs_gsi; 2914 - 2915 - cfg = kzalloc_node(count * sizeof(cfg[0]), GFP_KERNEL, node); 2927 + cfg = alloc_irq_cfg(irq, node); 2916 2928 if (!cfg) 2917 - return 0; 2918 - 2919 - irq = alloc_irqs_from(from, count, node); 2920 - if (irq < 0) 2921 - goto out_cfgs; 2922 - 2923 - for (i = 0; i < count; i++) { 2924 - cfg[i] = alloc_irq_cfg(irq + i, node); 2925 - if (!cfg[i]) 2926 - goto out_irqs; 2927 - } 2929 + return -ENOMEM; 2928 2930 2929 2931 raw_spin_lock_irqsave(&vector_lock, flags); 2930 - for (i = 0; i < count; i++) 2931 - if (__assign_irq_vector(irq + i, cfg[i], apic->target_cpus())) 2932 - goto out_vecs; 2932 + ret = __assign_irq_vector(irq, cfg, apic->target_cpus()); 2933 2933 raw_spin_unlock_irqrestore(&vector_lock, flags); 2934 2934 2935 - for (i = 0; i < count; i++) { 2936 - irq_set_chip_data(irq + i, cfg[i]); 2937 - irq_clear_status_flags(irq + i, IRQ_NOREQUEST); 2938 - } 2939 - 2940 - kfree(cfg); 2941 - return irq; 2942 - 2943 - out_vecs: 2944 - for (i--; i >= 0; i--) 2945 - __clear_irq_vector(irq + i, cfg[i]); 2946 - raw_spin_unlock_irqrestore(&vector_lock, flags); 2947 - out_irqs: 2948 - for (i = 0; i < count; i++) 2949 - free_irq_at(irq + i, cfg[i]); 2950 - out_cfgs: 2951 - kfree(cfg); 2952 - return 0; 2935 + if (!ret) 2936 + irq_set_chip_data(irq, cfg); 2937 + else 2938 + free_irq_cfg(irq, cfg); 2939 + return ret; 2953 2940 } 2954 2941 2955 - unsigned int create_irq_nr(unsigned int from, int node) 2956 - { 2957 - return __create_irqs(from, 1, node); 2958 - } 2959 - 2960 - int create_irq(void) 2961 - { 2962 - int node = cpu_to_node(0); 2963 - unsigned int irq_want; 2964 - int irq; 2965 - 2966 - irq_want = nr_irqs_gsi; 2967 - irq = create_irq_nr(irq_want, node); 2968 - 2969 - if (irq == 0) 2970 - irq = -1; 2971 - 2972 - return irq; 2973 - } 2974 - 2975 - void destroy_irq(unsigned int irq) 2942 + void arch_teardown_hwirq(unsigned int irq) 2976 2943 { 2977 2944 struct irq_cfg *cfg = irq_get_chip_data(irq); 2978 2945 unsigned long flags; 2979 2946 2980 - irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); 2981 - 2982 2947 free_remapped_irq(irq); 2983 - 2984 2948 raw_spin_lock_irqsave(&vector_lock, flags); 2985 2949 __clear_irq_vector(irq, cfg); 2986 2950 raw_spin_unlock_irqrestore(&vector_lock, flags); 2987 - free_irq_at(irq, cfg); 2988 - } 2989 - 2990 - void destroy_irqs(unsigned int irq, unsigned int count) 2991 - { 2992 - unsigned int i; 2993 - 2994 - for (i = 0; i < count; i++) 2995 - destroy_irq(irq + i); 2951 + free_irq_cfg(irq, cfg); 2996 2952 } 2997 2953 2998 2954 /* ··· 3062 3136 3063 3137 int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 3064 3138 { 3065 - unsigned int irq, irq_want; 3066 3139 struct msi_desc *msidesc; 3140 + unsigned int irq; 3067 3141 int node, ret; 3068 3142 3069 3143 /* Multiple MSI vectors only supported with interrupt remapping */ ··· 3071 3145 return 1; 3072 3146 3073 3147 node = dev_to_node(&dev->dev); 3074 - irq_want = nr_irqs_gsi; 3148 + 3075 3149 list_for_each_entry(msidesc, &dev->msi_list, list) { 3076 - irq = create_irq_nr(irq_want, node); 3077 - if (irq == 0) 3150 + irq = irq_alloc_hwirq(node); 3151 + if (!irq) 3078 3152 return -ENOSPC; 3079 3153 3080 - irq_want = irq + 1; 3081 - 3082 3154 ret = setup_msi_irq(dev, msidesc, irq, 0); 3083 - if (ret < 0) 3084 - goto error; 3155 + if (ret < 0) { 3156 + irq_free_hwirq(irq); 3157 + return ret; 3158 + } 3159 + 3085 3160 } 3086 3161 return 0; 3087 - 3088 - error: 3089 - destroy_irq(irq); 3090 - return ret; 3091 3162 } 3092 3163 3093 3164 void native_teardown_msi_irq(unsigned int irq) 3094 3165 { 3095 - destroy_irq(irq); 3166 + irq_free_hwirq(irq); 3096 3167 } 3097 3168 3098 3169 #ifdef CONFIG_DMAR_TABLE ··· 3341 3418 nr_irqs_gsi = nr; 3342 3419 3343 3420 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); 3344 - } 3345 - 3346 - int get_nr_irqs_gsi(void) 3347 - { 3348 - return nr_irqs_gsi; 3349 3421 } 3350 3422 3351 3423 unsigned int arch_dynirq_lower_bound(unsigned int from)
+2 -3
arch/x86/kernel/hpet.c
··· 479 479 static int hpet_setup_msi_irq(unsigned int irq) 480 480 { 481 481 if (x86_msi.setup_hpet_msi(irq, hpet_blockid)) { 482 - destroy_irq(irq); 482 + irq_free_hwirq(irq); 483 483 return -EINVAL; 484 484 } 485 485 return 0; ··· 487 487 488 488 static int hpet_assign_irq(struct hpet_dev *dev) 489 489 { 490 - unsigned int irq; 490 + unsigned int irq = irq_alloc_hwirq(-1); 491 491 492 - irq = create_irq_nr(0, -1); 493 492 if (!irq) 494 493 return -EINVAL; 495 494
+4 -6
arch/x86/platform/uv/uv_irq.c
··· 238 238 int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, 239 239 unsigned long mmr_offset, int limit) 240 240 { 241 - int irq, ret; 241 + int ret, irq = irq_alloc_hwirq(uv_blade_to_memory_nid(mmr_blade)); 242 242 243 - irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade)); 244 - 245 - if (irq <= 0) 243 + if (!irq) 246 244 return -EBUSY; 247 245 248 246 ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset, ··· 248 250 if (ret == irq) 249 251 uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade); 250 252 else 251 - destroy_irq(irq); 253 + irq_free_hwirq(irq); 252 254 253 255 return ret; 254 256 } ··· 283 285 n = n->rb_right; 284 286 } 285 287 spin_unlock_irqrestore(&uv_irq_lock, irqflags); 286 - destroy_irq(irq); 288 + irq_free_hwirq(irq); 287 289 } 288 290 EXPORT_SYMBOL_GPL(uv_teardown_irq);
+4 -4
drivers/iommu/dmar.c
··· 994 994 if (iommu->irq) { 995 995 free_irq(iommu->irq, iommu); 996 996 irq_set_handler_data(iommu->irq, NULL); 997 - destroy_irq(iommu->irq); 997 + dmar_free_hwirq(iommu->irq); 998 998 } 999 999 1000 1000 if (iommu->qi) { ··· 1550 1550 if (iommu->irq) 1551 1551 return 0; 1552 1552 1553 - irq = create_irq(); 1554 - if (!irq) { 1553 + irq = dmar_alloc_hwirq(); 1554 + if (irq <= 0) { 1555 1555 pr_err("IOMMU: no free vectors\n"); 1556 1556 return -EINVAL; 1557 1557 } ··· 1563 1563 if (ret) { 1564 1564 irq_set_handler_data(irq, NULL); 1565 1565 iommu->irq = 0; 1566 - destroy_irq(irq); 1566 + dmar_free_hwirq(irq); 1567 1567 return ret; 1568 1568 } 1569 1569
+5 -7
drivers/iommu/irq_remapping.c
··· 51 51 52 52 static int do_setup_msi_irqs(struct pci_dev *dev, int nvec) 53 53 { 54 - int node, ret, sub_handle, nvec_pow2, index = 0; 54 + int ret, sub_handle, nvec_pow2, index = 0; 55 55 unsigned int irq; 56 56 struct msi_desc *msidesc; 57 57 ··· 61 61 WARN_ON(msidesc->msi_attrib.multiple); 62 62 WARN_ON(msidesc->nvec_used); 63 63 64 - node = dev_to_node(&dev->dev); 65 - irq = __create_irqs(get_nr_irqs_gsi(), nvec, node); 64 + irq = irq_alloc_hwirqs(nvec, dev_to_node(&dev->dev)); 66 65 if (irq == 0) 67 66 return -ENOSPC; 68 67 ··· 88 89 return 0; 89 90 90 91 error: 91 - destroy_irqs(irq, nvec); 92 + irq_free_hwirqs(irq, nvec); 92 93 93 94 /* 94 95 * Restore altered MSI descriptor fields and prevent just destroyed ··· 108 109 unsigned int irq; 109 110 110 111 node = dev_to_node(&dev->dev); 111 - irq = get_nr_irqs_gsi(); 112 112 sub_handle = 0; 113 113 114 114 list_for_each_entry(msidesc, &dev->msi_list, list) { 115 115 116 - irq = create_irq_nr(irq, node); 116 + irq = irq_alloc_hwirq(node); 117 117 if (irq == 0) 118 118 return -1; 119 119 ··· 135 137 return 0; 136 138 137 139 error: 138 - destroy_irq(irq); 140 + irq_free_hwirq(irq); 139 141 return ret; 140 142 } 141 143
+6
drivers/irqchip/Kconfig
··· 30 30 The maximum number of VICs available in the system, for 31 31 power management. 32 32 33 + config BRCMSTB_L2_IRQ 34 + bool 35 + depends on ARM 36 + select GENERIC_IRQ_CHIP 37 + select IRQ_DOMAIN 38 + 33 39 config DW_APB_ICTL 34 40 bool 35 41 select IRQ_DOMAIN
+1
drivers/irqchip/Makefile
··· 29 29 obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o 30 30 obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o 31 31 obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o 32 + obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o
+202
drivers/irqchip/irq-brcmstb-l2.c
··· 1 + /* 2 + * Generic Broadcom Set Top Box Level 2 Interrupt controller driver 3 + * 4 + * Copyright (C) 2014 Broadcom Corporation 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + */ 15 + 16 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 + 18 + #include <linux/init.h> 19 + #include <linux/slab.h> 20 + #include <linux/module.h> 21 + #include <linux/platform_device.h> 22 + #include <linux/of.h> 23 + #include <linux/of_irq.h> 24 + #include <linux/of_address.h> 25 + #include <linux/of_platform.h> 26 + #include <linux/interrupt.h> 27 + #include <linux/irq.h> 28 + #include <linux/io.h> 29 + #include <linux/irqdomain.h> 30 + #include <linux/irqchip.h> 31 + #include <linux/irqchip/chained_irq.h> 32 + 33 + #include <asm/mach/irq.h> 34 + 35 + #include "irqchip.h" 36 + 37 + /* Register offsets in the L2 interrupt controller */ 38 + #define CPU_STATUS 0x00 39 + #define CPU_SET 0x04 40 + #define CPU_CLEAR 0x08 41 + #define CPU_MASK_STATUS 0x0c 42 + #define CPU_MASK_SET 0x10 43 + #define CPU_MASK_CLEAR 0x14 44 + 45 + /* L2 intc private data structure */ 46 + struct brcmstb_l2_intc_data { 47 + int parent_irq; 48 + void __iomem *base; 49 + struct irq_domain *domain; 50 + bool can_wake; 51 + u32 saved_mask; /* for suspend/resume */ 52 + }; 53 + 54 + static void brcmstb_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc) 55 + { 56 + struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc); 57 + struct irq_chip *chip = irq_desc_get_chip(desc); 58 + u32 status; 59 + 60 + chained_irq_enter(chip, desc); 61 + 62 + status = __raw_readl(b->base + CPU_STATUS) & 63 + ~(__raw_readl(b->base + CPU_MASK_STATUS)); 64 + 65 + if (status == 0) { 66 + do_bad_IRQ(irq, desc); 67 + goto out; 68 + } 69 + 70 + do { 71 + irq = ffs(status) - 1; 72 + /* ack at our level */ 73 + __raw_writel(1 << irq, b->base + CPU_CLEAR); 74 + status &= ~(1 << irq); 75 + generic_handle_irq(irq_find_mapping(b->domain, irq)); 76 + } while (status); 77 + out: 78 + chained_irq_exit(chip, desc); 79 + } 80 + 81 + static void brcmstb_l2_intc_suspend(struct irq_data *d) 82 + { 83 + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 84 + struct brcmstb_l2_intc_data *b = gc->private; 85 + 86 + irq_gc_lock(gc); 87 + /* Save the current mask */ 88 + b->saved_mask = __raw_readl(b->base + CPU_MASK_STATUS); 89 + 90 + if (b->can_wake) { 91 + /* Program the wakeup mask */ 92 + __raw_writel(~gc->wake_active, b->base + CPU_MASK_SET); 93 + __raw_writel(gc->wake_active, b->base + CPU_MASK_CLEAR); 94 + } 95 + irq_gc_unlock(gc); 96 + } 97 + 98 + static void brcmstb_l2_intc_resume(struct irq_data *d) 99 + { 100 + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 101 + struct brcmstb_l2_intc_data *b = gc->private; 102 + 103 + irq_gc_lock(gc); 104 + /* Clear unmasked non-wakeup interrupts */ 105 + __raw_writel(~b->saved_mask & ~gc->wake_active, b->base + CPU_CLEAR); 106 + 107 + /* Restore the saved mask */ 108 + __raw_writel(b->saved_mask, b->base + CPU_MASK_SET); 109 + __raw_writel(~b->saved_mask, b->base + CPU_MASK_CLEAR); 110 + irq_gc_unlock(gc); 111 + } 112 + 113 + int __init brcmstb_l2_intc_of_init(struct device_node *np, 114 + struct device_node *parent) 115 + { 116 + unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; 117 + struct brcmstb_l2_intc_data *data; 118 + struct irq_chip_generic *gc; 119 + struct irq_chip_type *ct; 120 + int ret; 121 + 122 + data = kzalloc(sizeof(*data), GFP_KERNEL); 123 + if (!data) 124 + return -ENOMEM; 125 + 126 + data->base = of_iomap(np, 0); 127 + if (!data->base) { 128 + pr_err("failed to remap intc L2 registers\n"); 129 + ret = -ENOMEM; 130 + goto out_free; 131 + } 132 + 133 + /* Disable all interrupts by default */ 134 + __raw_writel(0xffffffff, data->base + CPU_MASK_SET); 135 + __raw_writel(0xffffffff, data->base + CPU_CLEAR); 136 + 137 + data->parent_irq = irq_of_parse_and_map(np, 0); 138 + if (data->parent_irq < 0) { 139 + pr_err("failed to find parent interrupt\n"); 140 + ret = data->parent_irq; 141 + goto out_unmap; 142 + } 143 + 144 + data->domain = irq_domain_add_linear(np, 32, 145 + &irq_generic_chip_ops, NULL); 146 + if (!data->domain) { 147 + ret = -ENOMEM; 148 + goto out_unmap; 149 + } 150 + 151 + /* Allocate a single Generic IRQ chip for this node */ 152 + ret = irq_alloc_domain_generic_chips(data->domain, 32, 1, 153 + np->full_name, handle_level_irq, clr, 0, 0); 154 + if (ret) { 155 + pr_err("failed to allocate generic irq chip\n"); 156 + goto out_free_domain; 157 + } 158 + 159 + /* Set the IRQ chaining logic */ 160 + irq_set_handler_data(data->parent_irq, data); 161 + irq_set_chained_handler(data->parent_irq, brcmstb_l2_intc_irq_handle); 162 + 163 + gc = irq_get_domain_generic_chip(data->domain, 0); 164 + gc->reg_base = data->base; 165 + gc->private = data; 166 + ct = gc->chip_types; 167 + 168 + ct->chip.irq_ack = irq_gc_ack_set_bit; 169 + ct->regs.ack = CPU_CLEAR; 170 + 171 + ct->chip.irq_mask = irq_gc_mask_disable_reg; 172 + ct->regs.disable = CPU_MASK_SET; 173 + 174 + ct->chip.irq_unmask = irq_gc_unmask_enable_reg; 175 + ct->regs.enable = CPU_MASK_CLEAR; 176 + 177 + ct->chip.irq_suspend = brcmstb_l2_intc_suspend; 178 + ct->chip.irq_resume = brcmstb_l2_intc_resume; 179 + 180 + if (of_property_read_bool(np, "brcm,irq-can-wake")) { 181 + data->can_wake = true; 182 + /* This IRQ chip can wake the system, set all child interrupts 183 + * in wake_enabled mask 184 + */ 185 + gc->wake_enabled = 0xffffffff; 186 + ct->chip.irq_set_wake = irq_gc_set_wake; 187 + } 188 + 189 + pr_info("registered L2 intc (mem: 0x%p, parent irq: %d)\n", 190 + data->base, data->parent_irq); 191 + 192 + return 0; 193 + 194 + out_free_domain: 195 + irq_domain_remove(data->domain); 196 + out_unmap: 197 + iounmap(data->base); 198 + out_free: 199 + kfree(data); 200 + return ret; 201 + } 202 + IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_intc_of_init);
+1 -1
drivers/irqchip/irq-gic.c
··· 291 291 292 292 do { 293 293 irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); 294 - irqnr = irqstat & ~0x1c00; 294 + irqnr = irqstat & GICC_IAR_INT_ID_MASK; 295 295 296 296 if (likely(irqnr > 15 && irqnr < 1021)) { 297 297 irqnr = irq_find_mapping(gic->domain, irqnr);
+3 -3
drivers/net/ethernet/tile/tilegx.c
··· 1208 1208 1209 1209 irq = md->ingress_irq; 1210 1210 if (irq < 0) { 1211 - irq = create_irq(); 1212 - if (irq < 0) { 1211 + irq = irq_alloc_hwirq(-1); 1212 + if (!irq) { 1213 1213 netdev_err(dev, 1214 1214 "create_irq failed: mpipe[%d] %d\n", 1215 1215 instance, irq); ··· 1223 1223 if (rc != 0) { 1224 1224 netdev_err(dev, "request_irq failed: mpipe[%d] %d\n", 1225 1225 instance, rc); 1226 - destroy_irq(irq); 1226 + irq_free_hwirq(irq); 1227 1227 return rc; 1228 1228 } 1229 1229 md->ingress_irq = irq;
+4 -9
drivers/pci/htirq.c
··· 87 87 int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update) 88 88 { 89 89 struct ht_irq_cfg *cfg; 90 + int max_irq, pos, irq; 90 91 unsigned long flags; 91 92 u32 data; 92 - int max_irq; 93 - int pos; 94 - int irq; 95 - int node; 96 93 97 94 pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ); 98 95 if (!pos) ··· 117 120 cfg->msg.address_lo = 0xffffffff; 118 121 cfg->msg.address_hi = 0xffffffff; 119 122 120 - node = dev_to_node(&dev->dev); 121 - irq = create_irq_nr(0, node); 122 - 123 - if (irq <= 0) { 123 + irq = irq_alloc_hwirq(dev_to_node(&dev->dev)); 124 + if (!irq) { 124 125 kfree(cfg); 125 126 return -EBUSY; 126 127 } ··· 161 166 cfg = irq_get_handler_data(irq); 162 167 irq_set_chip(irq, NULL); 163 168 irq_set_handler_data(irq, NULL); 164 - destroy_irq(irq); 169 + irq_free_hwirq(irq); 165 170 166 171 kfree(cfg); 167 172 }
-6
drivers/sh/intc/core.c
··· 80 80 unsigned int data[2], primary; 81 81 unsigned long flags; 82 82 83 - /* 84 - * Register the IRQ position with the global IRQ map, then insert 85 - * it in to the radix tree. 86 - */ 87 - irq_reserve_irq(irq); 88 - 89 83 raw_spin_lock_irqsave(&intc_big_lock, flags); 90 84 radix_tree_insert(&d->tree, enum_id, intc_irq_xlate_get(irq)); 91 85 raw_spin_unlock_irqrestore(&intc_big_lock, flags);
+4 -4
drivers/tty/hvc/hvc_tile.c
··· 133 133 int tile_hvc_irq; 134 134 135 135 /* Create our IRQ and register it. */ 136 - tile_hvc_irq = create_irq(); 137 - if (tile_hvc_irq < 0) 136 + tile_hvc_irq = irq_alloc_hwirq(-1); 137 + if (!tile_hvc_irq) 138 138 return -ENXIO; 139 139 140 140 tile_irq_activate(tile_hvc_irq, TILE_IRQ_PERCPU); 141 141 hp = hvc_alloc(0, tile_hvc_irq, &hvc_tile_get_put_ops, 128); 142 142 if (IS_ERR(hp)) { 143 - destroy_irq(tile_hvc_irq); 143 + irq_free_hwirq(tile_hvc_irq); 144 144 return PTR_ERR(hp); 145 145 } 146 146 dev_set_drvdata(&pdev->dev, hp); ··· 155 155 156 156 rc = hvc_remove(hp); 157 157 if (rc == 0) 158 - destroy_irq(hp->data); 158 + irq_free_hwirq(hp->data); 159 159 160 160 return rc; 161 161 }
+4 -4
drivers/tty/serial/tilegx.c
··· 359 359 } 360 360 361 361 /* Create our IRQs. */ 362 - port->irq = create_irq(); 363 - if (port->irq < 0) 362 + port->irq = irq_alloc_hwirq(-1); 363 + if (!port->irq) 364 364 goto err_uart_dest; 365 365 tile_irq_activate(port->irq, TILE_IRQ_PERCPU); 366 366 ··· 395 395 err_free_irq: 396 396 free_irq(port->irq, port); 397 397 err_dest_irq: 398 - destroy_irq(port->irq); 398 + irq_free_hwirq(port->irq); 399 399 err_uart_dest: 400 400 gxio_uart_destroy(context); 401 401 ret = -ENXIO; ··· 435 435 436 436 if (port->irq > 0) { 437 437 free_irq(port->irq, port); 438 - destroy_irq(port->irq); 438 + irq_free_hwirq(port->irq); 439 439 port->irq = 0; 440 440 } 441 441
+4 -4
drivers/usb/host/ehci-tilegx.c
··· 142 142 ehci->hcs_params = readl(&ehci->caps->hcs_params); 143 143 144 144 /* Create our IRQs and register them. */ 145 - pdata->irq = create_irq(); 146 - if (pdata->irq < 0) { 145 + pdata->irq = irq_alloc_hwirq(-1); 146 + if (!pdata->irq) { 147 147 ret = -ENXIO; 148 148 goto err_no_irq; 149 149 } ··· 175 175 } 176 176 177 177 err_have_irq: 178 - destroy_irq(pdata->irq); 178 + irq_free_hwirq(pdata->irq); 179 179 err_no_irq: 180 180 tilegx_stop_ehc(); 181 181 usb_put_hcd(hcd); ··· 193 193 usb_put_hcd(hcd); 194 194 tilegx_stop_ehc(); 195 195 gxio_usb_host_destroy(&pdata->usb_ctx); 196 - destroy_irq(pdata->irq); 196 + irq_free_hwirq(pdata->irq); 197 197 198 198 return 0; 199 199 }
+4 -4
drivers/usb/host/ohci-tilegx.c
··· 129 129 tilegx_start_ohc(); 130 130 131 131 /* Create our IRQs and register them. */ 132 - pdata->irq = create_irq(); 133 - if (pdata->irq < 0) { 132 + pdata->irq = irq_alloc_hwirq(-1); 133 + if (!pdata->irq) { 134 134 ret = -ENXIO; 135 135 goto err_no_irq; 136 136 } ··· 164 164 } 165 165 166 166 err_have_irq: 167 - destroy_irq(pdata->irq); 167 + irq_free_hwirq(pdata->irq); 168 168 err_no_irq: 169 169 tilegx_stop_ohc(); 170 170 usb_put_hcd(hcd); ··· 182 182 usb_put_hcd(hcd); 183 183 tilegx_stop_ohc(); 184 184 gxio_usb_host_destroy(&pdata->usb_ctx); 185 - destroy_irq(pdata->irq); 185 + irq_free_hwirq(pdata->irq); 186 186 187 187 return 0; 188 188 }
+1 -16
drivers/xen/events/events_base.c
··· 390 390 391 391 static int __must_check xen_allocate_irqs_dynamic(int nvec) 392 392 { 393 - int first = 0; 394 - int i, irq; 395 - 396 - #ifdef CONFIG_X86_IO_APIC 397 - /* 398 - * For an HVM guest or domain 0 which see "real" (emulated or 399 - * actual respectively) GSIs we allocate dynamic IRQs 400 - * e.g. those corresponding to event channels or MSIs 401 - * etc. from the range above those "real" GSIs to avoid 402 - * collisions. 403 - */ 404 - if (xen_initial_domain() || xen_hvm_domain()) 405 - first = get_nr_irqs_gsi(); 406 - #endif 407 - 408 - irq = irq_alloc_descs_from(first, nvec, -1); 393 + int i, irq = irq_alloc_descs(-1, 0, nvec, -1); 409 394 410 395 if (irq >= 0) { 411 396 for (i = 0; i < nvec; i++)
+26 -20
include/linux/interrupt.h
··· 199 199 static inline int check_wakeup_irqs(void) { return 0; } 200 200 #endif 201 201 202 + /** 203 + * struct irq_affinity_notify - context for notification of IRQ affinity changes 204 + * @irq: Interrupt to which notification applies 205 + * @kref: Reference count, for internal use 206 + * @work: Work item, for internal use 207 + * @notify: Function to be called on change. This will be 208 + * called in process context. 209 + * @release: Function to be called on release. This will be 210 + * called in process context. Once registered, the 211 + * structure must only be freed when this function is 212 + * called or later. 213 + */ 214 + struct irq_affinity_notify { 215 + unsigned int irq; 216 + struct kref kref; 217 + struct work_struct work; 218 + void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); 219 + void (*release)(struct kref *ref); 220 + }; 221 + 202 222 #if defined(CONFIG_SMP) 203 223 204 224 extern cpumask_var_t irq_default_affinity; ··· 262 242 263 243 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); 264 244 265 - /** 266 - * struct irq_affinity_notify - context for notification of IRQ affinity changes 267 - * @irq: Interrupt to which notification applies 268 - * @kref: Reference count, for internal use 269 - * @work: Work item, for internal use 270 - * @notify: Function to be called on change. This will be 271 - * called in process context. 272 - * @release: Function to be called on release. This will be 273 - * called in process context. Once registered, the 274 - * structure must only be freed when this function is 275 - * called or later. 276 - */ 277 - struct irq_affinity_notify { 278 - unsigned int irq; 279 - struct kref kref; 280 - struct work_struct work; 281 - void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); 282 - void (*release)(struct kref *ref); 283 - }; 284 - 285 245 extern int 286 246 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); 287 247 ··· 288 288 const struct cpumask *m) 289 289 { 290 290 return -EINVAL; 291 + } 292 + 293 + static inline int 294 + irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) 295 + { 296 + return 0; 291 297 } 292 298 #endif /* CONFIG_SMP */ 293 299
+16 -22
include/linux/irq.h
··· 525 525 IRQ_NOPROBE | IRQ_PER_CPU_DEVID); 526 526 } 527 527 528 - /* Handle dynamic irq creation and destruction */ 529 - extern unsigned int create_irq_nr(unsigned int irq_want, int node); 530 - extern unsigned int __create_irqs(unsigned int from, unsigned int count, 531 - int node); 532 - extern int create_irq(void); 533 - extern void destroy_irq(unsigned int irq); 534 - extern void destroy_irqs(unsigned int irq, unsigned int count); 535 - 536 - /* 537 - * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and 538 - * irq_free_desc instead. 539 - */ 540 - extern void dynamic_irq_cleanup(unsigned int irq); 541 - static inline void dynamic_irq_init(unsigned int irq) 542 - { 543 - dynamic_irq_cleanup(irq); 544 - } 545 - 546 528 /* Set/get chip/data for an IRQ: */ 547 529 extern int irq_set_chip(unsigned int irq, struct irq_chip *chip); 548 530 extern int irq_set_handler_data(unsigned int irq, void *data); ··· 607 625 irq_alloc_descs(-1, from, cnt, node) 608 626 609 627 void irq_free_descs(unsigned int irq, unsigned int cnt); 610 - int irq_reserve_irqs(unsigned int from, unsigned int cnt); 611 - 612 628 static inline void irq_free_desc(unsigned int irq) 613 629 { 614 630 irq_free_descs(irq, 1); 615 631 } 616 632 617 - static inline int irq_reserve_irq(unsigned int irq) 633 + #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ 634 + unsigned int irq_alloc_hwirqs(int cnt, int node); 635 + static inline unsigned int irq_alloc_hwirq(int node) 618 636 { 619 - return irq_reserve_irqs(irq, 1); 637 + return irq_alloc_hwirqs(1, node); 620 638 } 639 + void irq_free_hwirqs(unsigned int from, int cnt); 640 + static inline void irq_free_hwirq(unsigned int irq) 641 + { 642 + return irq_free_hwirqs(irq, 1); 643 + } 644 + int arch_setup_hwirq(unsigned int irq, int node); 645 + void arch_teardown_hwirq(unsigned int irq); 646 + #endif 647 + 648 + #ifdef CONFIG_GENERIC_IRQ_LEGACY 649 + void irq_init_desc(unsigned int irq); 650 + #endif 621 651 622 652 #ifndef irq_reg_writel 623 653 # define irq_reg_writel(val, addr) writel(val, addr)
+2
include/linux/irqchip/arm-gic.h
··· 21 21 #define GIC_CPU_ACTIVEPRIO 0xd0 22 22 #define GIC_CPU_IDENT 0xfc 23 23 24 + #define GICC_IAR_INT_ID_MASK 0x3ff 25 + 24 26 #define GIC_DIST_CTRL 0x000 25 27 #define GIC_DIST_CTR 0x004 26 28 #define GIC_DIST_IGROUP 0x080
+4
include/linux/irqdesc.h
··· 27 27 * @irq_count: stats field to detect stalled irqs 28 28 * @last_unhandled: aging timer for unhandled count 29 29 * @irqs_unhandled: stats field for spurious unhandled interrupts 30 + * @threads_handled: stats field for deferred spurious detection of threaded handlers 31 + * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers 30 32 * @lock: locking for SMP 31 33 * @affinity_hint: hint to user space for preferred irq affinity 32 34 * @affinity_notify: context for notification of affinity changes ··· 54 52 unsigned int irq_count; /* For detecting broken IRQs */ 55 53 unsigned long last_unhandled; /* Aging timer for unhandled count */ 56 54 unsigned int irqs_unhandled; 55 + atomic_t threads_handled; 56 + int threads_handled_last; 57 57 raw_spinlock_t lock; 58 58 struct cpumask *percpu_enabled; 59 59 #ifdef CONFIG_SMP
+9
kernel/irq/Kconfig
··· 5 5 config MAY_HAVE_SPARSE_IRQ 6 6 bool 7 7 8 + # Legacy support, required for itanic 9 + config GENERIC_IRQ_LEGACY 10 + bool 11 + 8 12 # Enable the generic irq autoprobe mechanism 9 13 config GENERIC_IRQ_PROBE 10 14 bool ··· 19 15 20 16 # Print level/edge extra information 21 17 config GENERIC_IRQ_SHOW_LEVEL 18 + bool 19 + 20 + # Facility to allocate a hardware interrupt. This is legacy support 21 + # and should not be used in new code. Use irq domains instead. 22 + config GENERIC_IRQ_LEGACY_ALLOC_HWIRQ 22 23 bool 23 24 24 25 # Support for delayed migration from interrupt context
+2 -3
kernel/irq/chip.c
··· 40 40 irq_put_desc_unlock(desc, flags); 41 41 /* 42 42 * For !CONFIG_SPARSE_IRQ make the irq show up in 43 - * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is 44 - * already marked, and this call is harmless. 43 + * allocated_irqs. 45 44 */ 46 - irq_reserve_irq(irq); 45 + irq_mark_irq(irq); 47 46 return 0; 48 47 } 49 48 EXPORT_SYMBOL(irq_set_chip);
+7 -1
kernel/irq/internals.h
··· 33 33 }; 34 34 35 35 /* 36 - * Bit masks for desc->state 36 + * Bit masks for desc->core_internal_state__do_not_mess_with_it 37 37 * 38 38 * IRQS_AUTODETECT - autodetection in progress 39 39 * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt ··· 75 75 extern void mask_irq(struct irq_desc *desc); 76 76 extern void unmask_irq(struct irq_desc *desc); 77 77 extern void unmask_threaded_irq(struct irq_desc *desc); 78 + 79 + #ifdef CONFIG_SPARSE_IRQ 80 + static inline void irq_mark_irq(unsigned int irq) { } 81 + #else 82 + extern void irq_mark_irq(unsigned int irq); 83 + #endif 78 84 79 85 extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); 80 86
+63 -32
kernel/irq/irqdesc.c
··· 278 278 279 279 static void free_desc(unsigned int irq) 280 280 { 281 - dynamic_irq_cleanup(irq); 281 + struct irq_desc *desc = irq_to_desc(irq); 282 + unsigned long flags; 283 + 284 + raw_spin_lock_irqsave(&desc->lock, flags); 285 + desc_set_defaults(irq, desc, desc_node(desc), NULL); 286 + raw_spin_unlock_irqrestore(&desc->lock, flags); 282 287 } 283 288 284 289 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, ··· 303 298 { 304 299 return -ENOMEM; 305 300 } 301 + 302 + void irq_mark_irq(unsigned int irq) 303 + { 304 + mutex_lock(&sparse_irq_lock); 305 + bitmap_set(allocated_irqs, irq, 1); 306 + mutex_unlock(&sparse_irq_lock); 307 + } 308 + 309 + #ifdef CONFIG_GENERIC_IRQ_LEGACY 310 + void irq_init_desc(unsigned int irq) 311 + { 312 + free_desc(irq); 313 + } 314 + #endif 306 315 307 316 #endif /* !CONFIG_SPARSE_IRQ */ 308 317 ··· 415 396 } 416 397 EXPORT_SYMBOL_GPL(__irq_alloc_descs); 417 398 399 + #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ 418 400 /** 419 - * irq_reserve_irqs - mark irqs allocated 420 - * @from: mark from irq number 421 - * @cnt: number of irqs to mark 401 + * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware 402 + * @cnt: number of interrupts to allocate 403 + * @node: node on which to allocate 422 404 * 423 - * Returns 0 on success or an appropriate error code 405 + * Returns an interrupt number > 0 or 0, if the allocation fails. 424 406 */ 425 - int irq_reserve_irqs(unsigned int from, unsigned int cnt) 407 + unsigned int irq_alloc_hwirqs(int cnt, int node) 426 408 { 427 - unsigned int start; 428 - int ret = 0; 409 + int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL); 429 410 430 - if (!cnt || (from + cnt) > nr_irqs) 431 - return -EINVAL; 411 + if (irq < 0) 412 + return 0; 432 413 433 - mutex_lock(&sparse_irq_lock); 434 - start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); 435 - if (start == from) 436 - bitmap_set(allocated_irqs, start, cnt); 437 - else 438 - ret = -EEXIST; 439 - mutex_unlock(&sparse_irq_lock); 440 - return ret; 414 + for (i = irq; cnt > 0; i++, cnt--) { 415 + if (arch_setup_hwirq(i, node)) 416 + goto err; 417 + irq_clear_status_flags(i, _IRQ_NOREQUEST); 418 + } 419 + return irq; 420 + 421 + err: 422 + for (i--; i >= irq; i--) { 423 + irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); 424 + arch_teardown_hwirq(i); 425 + } 426 + irq_free_descs(irq, cnt); 427 + return 0; 441 428 } 429 + EXPORT_SYMBOL_GPL(irq_alloc_hwirqs); 430 + 431 + /** 432 + * irq_free_hwirqs - Free irq descriptor and cleanup the hardware 433 + * @from: Free from irq number 434 + * @cnt: number of interrupts to free 435 + * 436 + */ 437 + void irq_free_hwirqs(unsigned int from, int cnt) 438 + { 439 + int i; 440 + 441 + for (i = from; cnt > 0; i++, cnt--) { 442 + irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); 443 + arch_teardown_hwirq(i); 444 + } 445 + irq_free_descs(from, cnt); 446 + } 447 + EXPORT_SYMBOL_GPL(irq_free_hwirqs); 448 + #endif 442 449 443 450 /** 444 451 * irq_get_next_irq - get next allocated irq number ··· 525 480 526 481 irq_set_percpu_devid_flags(irq); 527 482 return 0; 528 - } 529 - 530 - /** 531 - * dynamic_irq_cleanup - cleanup a dynamically allocated irq 532 - * @irq: irq number to initialize 533 - */ 534 - void dynamic_irq_cleanup(unsigned int irq) 535 - { 536 - struct irq_desc *desc = irq_to_desc(irq); 537 - unsigned long flags; 538 - 539 - raw_spin_lock_irqsave(&desc->lock, flags); 540 - desc_set_defaults(irq, desc, desc_node(desc), NULL); 541 - raw_spin_unlock_irqrestore(&desc->lock, flags); 542 483 } 543 484 544 485 void kstat_incr_irq_this_cpu(unsigned int irq)
+3 -3
kernel/irq/irqdomain.c
··· 27 27 * __irq_domain_add() - Allocate a new irq_domain data structure 28 28 * @of_node: optional device-tree node of the interrupt controller 29 29 * @size: Size of linear map; 0 for radix mapping only 30 + * @hwirq_max: Maximum number of interrupts supported by controller 30 31 * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no 31 32 * direct mapping 32 33 * @ops: map/unmap domain callbacks 33 34 * @host_data: Controller private data pointer 34 35 * 35 - * Allocates and initialize and irq_domain structure. Caller is expected to 36 - * register allocated irq_domain with irq_domain_register(). Returns pointer 37 - * to IRQ domain, or NULL on failure. 36 + * Allocates and initialize and irq_domain structure. 37 + * Returns pointer to IRQ domain, or NULL on failure. 38 38 */ 39 39 struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, 40 40 irq_hw_number_t hwirq_max, int direct_max,
+2 -2
kernel/irq/manage.c
··· 886 886 irq_thread_check_affinity(desc, action); 887 887 888 888 action_ret = handler_fn(desc, action); 889 - if (!noirqdebug) 890 - note_interrupt(action->irq, desc, action_ret); 889 + if (action_ret == IRQ_HANDLED) 890 + atomic_inc(&desc->threads_handled); 891 891 892 892 wake_threads_waitq(desc); 893 893 }
+102 -4
kernel/irq/spurious.c
··· 270 270 return action && (action->flags & IRQF_IRQPOLL); 271 271 } 272 272 273 + #define SPURIOUS_DEFERRED 0x80000000 274 + 273 275 void note_interrupt(unsigned int irq, struct irq_desc *desc, 274 276 irqreturn_t action_ret) 275 277 { ··· 279 277 irq_settings_is_polled(desc)) 280 278 return; 281 279 282 - /* we get here again via the threaded handler */ 283 - if (action_ret == IRQ_WAKE_THREAD) 284 - return; 285 - 286 280 if (bad_action_ret(action_ret)) { 287 281 report_bad_irq(irq, desc, action_ret); 288 282 return; 283 + } 284 + 285 + /* 286 + * We cannot call note_interrupt from the threaded handler 287 + * because we need to look at the compound of all handlers 288 + * (primary and threaded). Aside of that in the threaded 289 + * shared case we have no serialization against an incoming 290 + * hardware interrupt while we are dealing with a threaded 291 + * result. 292 + * 293 + * So in case a thread is woken, we just note the fact and 294 + * defer the analysis to the next hardware interrupt. 295 + * 296 + * The threaded handlers store whether they sucessfully 297 + * handled an interrupt and we check whether that number 298 + * changed versus the last invocation. 299 + * 300 + * We could handle all interrupts with the delayed by one 301 + * mechanism, but for the non forced threaded case we'd just 302 + * add pointless overhead to the straight hardirq interrupts 303 + * for the sake of a few lines less code. 304 + */ 305 + if (action_ret & IRQ_WAKE_THREAD) { 306 + /* 307 + * There is a thread woken. Check whether one of the 308 + * shared primary handlers returned IRQ_HANDLED. If 309 + * not we defer the spurious detection to the next 310 + * interrupt. 311 + */ 312 + if (action_ret == IRQ_WAKE_THREAD) { 313 + int handled; 314 + /* 315 + * We use bit 31 of thread_handled_last to 316 + * denote the deferred spurious detection 317 + * active. No locking necessary as 318 + * thread_handled_last is only accessed here 319 + * and we have the guarantee that hard 320 + * interrupts are not reentrant. 321 + */ 322 + if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) { 323 + desc->threads_handled_last |= SPURIOUS_DEFERRED; 324 + return; 325 + } 326 + /* 327 + * Check whether one of the threaded handlers 328 + * returned IRQ_HANDLED since the last 329 + * interrupt happened. 330 + * 331 + * For simplicity we just set bit 31, as it is 332 + * set in threads_handled_last as well. So we 333 + * avoid extra masking. And we really do not 334 + * care about the high bits of the handled 335 + * count. We just care about the count being 336 + * different than the one we saw before. 337 + */ 338 + handled = atomic_read(&desc->threads_handled); 339 + handled |= SPURIOUS_DEFERRED; 340 + if (handled != desc->threads_handled_last) { 341 + action_ret = IRQ_HANDLED; 342 + /* 343 + * Note: We keep the SPURIOUS_DEFERRED 344 + * bit set. We are handling the 345 + * previous invocation right now. 346 + * Keep it for the current one, so the 347 + * next hardware interrupt will 348 + * account for it. 349 + */ 350 + desc->threads_handled_last = handled; 351 + } else { 352 + /* 353 + * None of the threaded handlers felt 354 + * responsible for the last interrupt 355 + * 356 + * We keep the SPURIOUS_DEFERRED bit 357 + * set in threads_handled_last as we 358 + * need to account for the current 359 + * interrupt as well. 360 + */ 361 + action_ret = IRQ_NONE; 362 + } 363 + } else { 364 + /* 365 + * One of the primary handlers returned 366 + * IRQ_HANDLED. So we don't care about the 367 + * threaded handlers on the same line. Clear 368 + * the deferred detection bit. 369 + * 370 + * In theory we could/should check whether the 371 + * deferred bit is set and take the result of 372 + * the previous run into account here as 373 + * well. But it's really not worth the 374 + * trouble. If every other interrupt is 375 + * handled we never trigger the spurious 376 + * detector. And if this is just the one out 377 + * of 100k unhandled ones which is handled 378 + * then we merily delay the spurious detection 379 + * by one hard interrupt. Not a real problem. 380 + */ 381 + desc->threads_handled_last &= ~SPURIOUS_DEFERRED; 382 + } 289 383 } 290 384 291 385 if (unlikely(action_ret == IRQ_NONE)) {