Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (96 commits)
apic, x86: Use BIOS settings for IBS and MCE threshold interrupt LVT offsets
apic, x86: Check if EILVT APIC registers are available (AMD only)
x86: ioapic: Call free_irte only if interrupt remapping enabled
arm: Use ARCH_IRQ_INIT_FLAGS
genirq, ARM: Fix boot on ARM platforms
genirq: Fix CONFIG_GENIRQ_NO_DEPRECATED=y build
x86: Switch sparse_irq allocations to GFP_KERNEL
genirq: Switch sparse_irq allocator to GFP_KERNEL
genirq: Make sparse_lock a mutex
x86: lguest: Use new irq allocator
genirq: Remove the now unused sparse irq leftovers
genirq: Sanitize dynamic irq handling
genirq: Remove arch_init_chip_data()
x86: xen: Sanitise sparse_irq handling
x86: Use sane enumeration
x86: uv: Clean up the direct access to irq_desc
x86: Make io_apic.c local functions static
genirq: Remove irq_2_iommu
x86: Speed up the irq_remapped check in hot pathes
intr_remap: Simplify the code further
...

Fix up trivial conflicts in arch/x86/Kconfig

+2177 -2268
+52 -32
Documentation/DocBook/genericirq.tmpl
··· 28 </authorgroup> 29 30 <copyright> 31 - <year>2005-2006</year> 32 <holder>Thomas Gleixner</holder> 33 </copyright> 34 <copyright> ··· 100 <listitem><para>Edge type</para></listitem> 101 <listitem><para>Simple type</para></listitem> 102 </itemizedlist> 103 In the SMP world of the __do_IRQ() super-handler another type 104 was identified: 105 <itemizedlist> ··· 157 is still available. This leads to a kind of duality for the time 158 being. Over time the new model should be used in more and more 159 architectures, as it enables smaller and cleaner IRQ subsystems. 160 </para> 161 </chapter> 162 <chapter id="bugs"> ··· 222 <itemizedlist> 223 <listitem><para>handle_level_irq</para></listitem> 224 <listitem><para>handle_edge_irq</para></listitem> 225 <listitem><para>handle_simple_irq</para></listitem> 226 <listitem><para>handle_percpu_irq</para></listitem> 227 </itemizedlist> ··· 239 are used by the default flow implementations. 240 The following helper functions are implemented (simplified excerpt): 241 <programlisting> 242 - default_enable(irq) 243 { 244 - desc->chip->unmask(irq); 245 } 246 247 - default_disable(irq) 248 { 249 - if (!delay_disable(irq)) 250 - desc->chip->mask(irq); 251 } 252 253 - default_ack(irq) 254 { 255 - chip->ack(irq); 256 } 257 258 - default_mask_ack(irq) 259 { 260 - if (chip->mask_ack) { 261 - chip->mask_ack(irq); 262 } else { 263 - chip->mask(irq); 264 - chip->ack(irq); 265 } 266 } 267 268 - noop(irq) 269 { 270 } 271 ··· 284 <para> 285 The following control flow is implemented (simplified excerpt): 286 <programlisting> 287 - desc->chip->start(); 288 handle_IRQ_event(desc->action); 289 - desc->chip->end(); 290 </programlisting> 291 </para> 292 - </sect3> 293 <sect3 id="Default_Edge_IRQ_flow_handler"> 294 <title>Default Edge IRQ flow handler</title> 295 <para> ··· 315 The following control flow is implemented (simplified excerpt): 316 <programlisting> 317 if (desc->status &amp; running) { 318 - desc->chip->hold(); 319 desc->status |= pending | masked; 320 return; 321 } 322 - desc->chip->start(); 323 desc->status |= running; 324 do { 325 if (desc->status &amp; masked) 326 - desc->chip->enable(); 327 desc->status &amp;= ~pending; 328 handle_IRQ_event(desc->action); 329 } while (status &amp; pending); 330 desc->status &amp;= ~running; 331 - desc->chip->end(); 332 </programlisting> 333 </para> 334 </sect3> ··· 362 <para> 363 The following control flow is implemented (simplified excerpt): 364 <programlisting> 365 - desc->chip->start(); 366 handle_IRQ_event(desc->action); 367 - desc->chip->end(); 368 </programlisting> 369 </para> 370 </sect3> ··· 395 mechanism. (It's necessary to enable CONFIG_HARDIRQS_SW_RESEND when 396 you want to use the delayed interrupt disable feature and your 397 hardware is not capable of retriggering an interrupt.) 398 - The delayed interrupt disable can be runtime enabled, per interrupt, 399 - by setting the IRQ_DELAYED_DISABLE flag in the irq_desc status field. 400 </para> 401 </sect2> 402 </sect1> ··· 406 contains all the direct chip relevant functions, which 407 can be utilized by the irq flow implementations. 408 <itemizedlist> 409 - <listitem><para>ack()</para></listitem> 410 - <listitem><para>mask_ack() - Optional, recommended for performance</para></listitem> 411 - <listitem><para>mask()</para></listitem> 412 - <listitem><para>unmask()</para></listitem> 413 - <listitem><para>retrigger() - Optional</para></listitem> 414 - <listitem><para>set_type() - Optional</para></listitem> 415 - <listitem><para>set_wake() - Optional</para></listitem> 416 </itemizedlist> 417 These primitives are strictly intended to mean what they say: ack means 418 ACK, masking means masking of an IRQ line, etc. It is up to the flow ··· 477 <para> 478 This chapter contains the autogenerated documentation of the internal functions. 479 </para> 480 !Ikernel/irq/handle.c 481 !Ikernel/irq/chip.c 482 </chapter>
··· 28 </authorgroup> 29 30 <copyright> 31 + <year>2005-2010</year> 32 <holder>Thomas Gleixner</holder> 33 </copyright> 34 <copyright> ··· 100 <listitem><para>Edge type</para></listitem> 101 <listitem><para>Simple type</para></listitem> 102 </itemizedlist> 103 + During the implementation we identified another type: 104 + <itemizedlist> 105 + <listitem><para>Fast EOI type</para></listitem> 106 + </itemizedlist> 107 In the SMP world of the __do_IRQ() super-handler another type 108 was identified: 109 <itemizedlist> ··· 153 is still available. This leads to a kind of duality for the time 154 being. Over time the new model should be used in more and more 155 architectures, as it enables smaller and cleaner IRQ subsystems. 156 + It's deprecated for three years now and about to be removed. 157 </para> 158 </chapter> 159 <chapter id="bugs"> ··· 217 <itemizedlist> 218 <listitem><para>handle_level_irq</para></listitem> 219 <listitem><para>handle_edge_irq</para></listitem> 220 + <listitem><para>handle_fasteoi_irq</para></listitem> 221 <listitem><para>handle_simple_irq</para></listitem> 222 <listitem><para>handle_percpu_irq</para></listitem> 223 </itemizedlist> ··· 233 are used by the default flow implementations. 234 The following helper functions are implemented (simplified excerpt): 235 <programlisting> 236 + default_enable(struct irq_data *data) 237 { 238 + desc->chip->irq_unmask(data); 239 } 240 241 + default_disable(struct irq_data *data) 242 { 243 + if (!delay_disable(data)) 244 + desc->chip->irq_mask(data); 245 } 246 247 + default_ack(struct irq_data *data) 248 { 249 + chip->irq_ack(data); 250 } 251 252 + default_mask_ack(struct irq_data *data) 253 { 254 + if (chip->irq_mask_ack) { 255 + chip->irq_mask_ack(data); 256 } else { 257 + chip->irq_mask(data); 258 + chip->irq_ack(data); 259 } 260 } 261 262 + noop(struct irq_data *data)) 263 { 264 } 265 ··· 278 <para> 279 The following control flow is implemented (simplified excerpt): 280 <programlisting> 281 + desc->chip->irq_mask(); 282 handle_IRQ_event(desc->action); 283 + desc->chip->irq_unmask(); 284 </programlisting> 285 </para> 286 + </sect3> 287 + <sect3 id="Default_FASTEOI_IRQ_flow_handler"> 288 + <title>Default Fast EOI IRQ flow handler</title> 289 + <para> 290 + handle_fasteoi_irq provides a generic implementation 291 + for interrupts, which only need an EOI at the end of 292 + the handler 293 + </para> 294 + <para> 295 + The following control flow is implemented (simplified excerpt): 296 + <programlisting> 297 + handle_IRQ_event(desc->action); 298 + desc->chip->irq_eoi(); 299 + </programlisting> 300 + </para> 301 + </sect3> 302 <sect3 id="Default_Edge_IRQ_flow_handler"> 303 <title>Default Edge IRQ flow handler</title> 304 <para> ··· 294 The following control flow is implemented (simplified excerpt): 295 <programlisting> 296 if (desc->status &amp; running) { 297 + desc->chip->irq_mask(); 298 desc->status |= pending | masked; 299 return; 300 } 301 + desc->chip->irq_ack(); 302 desc->status |= running; 303 do { 304 if (desc->status &amp; masked) 305 + desc->chip->irq_unmask(); 306 desc->status &amp;= ~pending; 307 handle_IRQ_event(desc->action); 308 } while (status &amp; pending); 309 desc->status &amp;= ~running; 310 </programlisting> 311 </para> 312 </sect3> ··· 342 <para> 343 The following control flow is implemented (simplified excerpt): 344 <programlisting> 345 handle_IRQ_event(desc->action); 346 + if (desc->chip->irq_eoi) 347 + desc->chip->irq_eoi(); 348 </programlisting> 349 </para> 350 </sect3> ··· 375 mechanism. (It's necessary to enable CONFIG_HARDIRQS_SW_RESEND when 376 you want to use the delayed interrupt disable feature and your 377 hardware is not capable of retriggering an interrupt.) 378 + The delayed interrupt disable is not configurable. 379 </para> 380 </sect2> 381 </sect1> ··· 387 contains all the direct chip relevant functions, which 388 can be utilized by the irq flow implementations. 389 <itemizedlist> 390 + <listitem><para>irq_ack()</para></listitem> 391 + <listitem><para>irq_mask_ack() - Optional, recommended for performance</para></listitem> 392 + <listitem><para>irq_mask()</para></listitem> 393 + <listitem><para>irq_unmask()</para></listitem> 394 + <listitem><para>irq_retrigger() - Optional</para></listitem> 395 + <listitem><para>irq_set_type() - Optional</para></listitem> 396 + <listitem><para>irq_set_wake() - Optional</para></listitem> 397 </itemizedlist> 398 These primitives are strictly intended to mean what they say: ack means 399 ACK, masking means masking of an IRQ line, etc. It is up to the flow ··· 458 <para> 459 This chapter contains the autogenerated documentation of the internal functions. 460 </para> 461 + !Ikernel/irq/irqdesc.c 462 !Ikernel/irq/handle.c 463 !Ikernel/irq/chip.c 464 </chapter>
+6
MAINTAINERS
··· 3241 F: include/net/irda/ 3242 F: net/irda/ 3243 3244 ISAPNP 3245 M: Jaroslav Kysela <perex@perex.cz> 3246 S: Maintained
··· 3241 F: include/net/irda/ 3242 F: net/irda/ 3243 3244 + IRQ SUBSYSTEM 3245 + M: Thomas Gleixner <tglx@linutronix.de> 3246 + S: Maintained 3247 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git irq/core 3248 + F: kernel/irq/ 3249 + 3250 ISAPNP 3251 M: Jaroslav Kysela <perex@perex.cz> 3252 S: Maintained
+2
arch/arm/include/asm/hw_irq.h
··· 24 #define IRQF_PROBE (1 << 1) 25 #define IRQF_NOAUTOEN (1 << 2) 26 27 #endif
··· 24 #define IRQF_PROBE (1 << 1) 25 #define IRQF_NOAUTOEN (1 << 2) 26 27 + #define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE) 28 + 29 #endif
+1 -9
arch/arm/kernel/irq.c
··· 154 155 void __init init_IRQ(void) 156 { 157 - struct irq_desc *desc; 158 - int irq; 159 - 160 - for (irq = 0; irq < nr_irqs; irq++) { 161 - desc = irq_to_desc_alloc_node(irq, 0); 162 - desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; 163 - } 164 - 165 init_arch_irq(); 166 } 167 ··· 161 int __init arch_probe_nr_irqs(void) 162 { 163 nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS; 164 - return 0; 165 } 166 #endif 167
··· 154 155 void __init init_IRQ(void) 156 { 157 init_arch_irq(); 158 } 159 ··· 169 int __init arch_probe_nr_irqs(void) 170 { 171 nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS; 172 + return nr_irqs; 173 } 174 #endif 175
+3 -3
arch/arm/mach-bcmring/irq.c
··· 67 } 68 69 static struct irq_chip bcmring_irq0_chip = { 70 - .typename = "ARM-INTC0", 71 .ack = bcmring_mask_irq0, 72 .mask = bcmring_mask_irq0, /* mask a specific interrupt, blocking its delivery. */ 73 .unmask = bcmring_unmask_irq0, /* unmaks an interrupt */ 74 }; 75 76 static struct irq_chip bcmring_irq1_chip = { 77 - .typename = "ARM-INTC1", 78 .ack = bcmring_mask_irq1, 79 .mask = bcmring_mask_irq1, 80 .unmask = bcmring_unmask_irq1, 81 }; 82 83 static struct irq_chip bcmring_irq2_chip = { 84 - .typename = "ARM-SINTC", 85 .ack = bcmring_mask_irq2, 86 .mask = bcmring_mask_irq2, 87 .unmask = bcmring_unmask_irq2,
··· 67 } 68 69 static struct irq_chip bcmring_irq0_chip = { 70 + .name = "ARM-INTC0", 71 .ack = bcmring_mask_irq0, 72 .mask = bcmring_mask_irq0, /* mask a specific interrupt, blocking its delivery. */ 73 .unmask = bcmring_unmask_irq0, /* unmaks an interrupt */ 74 }; 75 76 static struct irq_chip bcmring_irq1_chip = { 77 + .name = "ARM-INTC1", 78 .ack = bcmring_mask_irq1, 79 .mask = bcmring_mask_irq1, 80 .unmask = bcmring_unmask_irq1, 81 }; 82 83 static struct irq_chip bcmring_irq2_chip = { 84 + .name = "ARM-SINTC", 85 .ack = bcmring_mask_irq2, 86 .mask = bcmring_mask_irq2, 87 .unmask = bcmring_unmask_irq2,
+4 -4
arch/arm/mach-iop13xx/msi.c
··· 164 static struct irq_chip iop13xx_msi_chip = { 165 .name = "PCI-MSI", 166 .ack = iop13xx_msi_nop, 167 - .enable = unmask_msi_irq, 168 - .disable = mask_msi_irq, 169 - .mask = mask_msi_irq, 170 - .unmask = unmask_msi_irq, 171 }; 172 173 int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
··· 164 static struct irq_chip iop13xx_msi_chip = { 165 .name = "PCI-MSI", 166 .ack = iop13xx_msi_nop, 167 + .irq_enable = unmask_msi_irq, 168 + .irq_disable = mask_msi_irq, 169 + .irq_mask = mask_msi_irq, 170 + .irq_unmask = unmask_msi_irq, 171 }; 172 173 int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
+4 -4
arch/ia64/kernel/msi_ia64.c
··· 104 */ 105 static struct irq_chip ia64_msi_chip = { 106 .name = "PCI-MSI", 107 - .mask = mask_msi_irq, 108 - .unmask = unmask_msi_irq, 109 .ack = ia64_ack_msi_irq, 110 #ifdef CONFIG_SMP 111 .set_affinity = ia64_set_msi_irq_affinity, ··· 160 161 static struct irq_chip dmar_msi_type = { 162 .name = "DMAR_MSI", 163 - .unmask = dmar_msi_unmask, 164 - .mask = dmar_msi_mask, 165 .ack = ia64_ack_msi_irq, 166 #ifdef CONFIG_SMP 167 .set_affinity = dmar_msi_set_affinity,
··· 104 */ 105 static struct irq_chip ia64_msi_chip = { 106 .name = "PCI-MSI", 107 + .irq_mask = mask_msi_irq, 108 + .irq_unmask = unmask_msi_irq, 109 .ack = ia64_ack_msi_irq, 110 #ifdef CONFIG_SMP 111 .set_affinity = ia64_set_msi_irq_affinity, ··· 160 161 static struct irq_chip dmar_msi_type = { 162 .name = "DMAR_MSI", 163 + .irq_unmask = dmar_msi_unmask, 164 + .irq_mask = dmar_msi_mask, 165 .ack = ia64_ack_msi_irq, 166 #ifdef CONFIG_SMP 167 .set_affinity = dmar_msi_set_affinity,
+2 -2
arch/ia64/sn/kernel/msi_sn.c
··· 228 229 static struct irq_chip sn_msi_chip = { 230 .name = "PCI-MSI", 231 - .mask = mask_msi_irq, 232 - .unmask = unmask_msi_irq, 233 .ack = sn_ack_msi_irq, 234 #ifdef CONFIG_SMP 235 .set_affinity = sn_set_msi_irq_affinity,
··· 228 229 static struct irq_chip sn_msi_chip = { 230 .name = "PCI-MSI", 231 + .irq_mask = mask_msi_irq, 232 + .irq_unmask = unmask_msi_irq, 233 .ack = sn_ack_msi_irq, 234 #ifdef CONFIG_SMP 235 .set_affinity = sn_set_msi_irq_affinity,
+1 -1
arch/m32r/kernel/irq.c
··· 51 for_each_online_cpu(j) 52 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 53 #endif 54 - seq_printf(p, " %14s", irq_desc[i].chip->typename); 55 seq_printf(p, " %s", action->name); 56 57 for (action=action->next; action; action = action->next)
··· 51 for_each_online_cpu(j) 52 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 53 #endif 54 + seq_printf(p, " %14s", irq_desc[i].chip->name); 55 seq_printf(p, " %s", action->name); 56 57 for (action=action->next; action; action = action->next)
+1 -1
arch/m32r/platforms/m32104ut/setup.c
··· 65 66 static struct irq_chip m32104ut_irq_type = 67 { 68 - .typename = "M32104UT-IRQ", 69 .startup = startup_m32104ut_irq, 70 .shutdown = shutdown_m32104ut_irq, 71 .enable = enable_m32104ut_irq,
··· 65 66 static struct irq_chip m32104ut_irq_type = 67 { 68 + .name = "M32104UT-IRQ", 69 .startup = startup_m32104ut_irq, 70 .shutdown = shutdown_m32104ut_irq, 71 .enable = enable_m32104ut_irq,
+4 -4
arch/m32r/platforms/m32700ut/setup.c
··· 71 72 static struct irq_chip m32700ut_irq_type = 73 { 74 - .typename = "M32700UT-IRQ", 75 .startup = startup_m32700ut_irq, 76 .shutdown = shutdown_m32700ut_irq, 77 .enable = enable_m32700ut_irq, ··· 148 149 static struct irq_chip m32700ut_pld_irq_type = 150 { 151 - .typename = "M32700UT-PLD-IRQ", 152 .startup = startup_m32700ut_pld_irq, 153 .shutdown = shutdown_m32700ut_pld_irq, 154 .enable = enable_m32700ut_pld_irq, ··· 217 218 static struct irq_chip m32700ut_lanpld_irq_type = 219 { 220 - .typename = "M32700UT-PLD-LAN-IRQ", 221 .startup = startup_m32700ut_lanpld_irq, 222 .shutdown = shutdown_m32700ut_lanpld_irq, 223 .enable = enable_m32700ut_lanpld_irq, ··· 286 287 static struct irq_chip m32700ut_lcdpld_irq_type = 288 { 289 - .typename = "M32700UT-PLD-LCD-IRQ", 290 .startup = startup_m32700ut_lcdpld_irq, 291 .shutdown = shutdown_m32700ut_lcdpld_irq, 292 .enable = enable_m32700ut_lcdpld_irq,
··· 71 72 static struct irq_chip m32700ut_irq_type = 73 { 74 + .name = "M32700UT-IRQ", 75 .startup = startup_m32700ut_irq, 76 .shutdown = shutdown_m32700ut_irq, 77 .enable = enable_m32700ut_irq, ··· 148 149 static struct irq_chip m32700ut_pld_irq_type = 150 { 151 + .name = "M32700UT-PLD-IRQ", 152 .startup = startup_m32700ut_pld_irq, 153 .shutdown = shutdown_m32700ut_pld_irq, 154 .enable = enable_m32700ut_pld_irq, ··· 217 218 static struct irq_chip m32700ut_lanpld_irq_type = 219 { 220 + .name = "M32700UT-PLD-LAN-IRQ", 221 .startup = startup_m32700ut_lanpld_irq, 222 .shutdown = shutdown_m32700ut_lanpld_irq, 223 .enable = enable_m32700ut_lanpld_irq, ··· 286 287 static struct irq_chip m32700ut_lcdpld_irq_type = 288 { 289 + .name = "M32700UT-PLD-LCD-IRQ", 290 .startup = startup_m32700ut_lcdpld_irq, 291 .shutdown = shutdown_m32700ut_lcdpld_irq, 292 .enable = enable_m32700ut_lcdpld_irq,
+1 -1
arch/m32r/platforms/mappi/setup.c
··· 65 66 static struct irq_chip mappi_irq_type = 67 { 68 - .typename = "MAPPI-IRQ", 69 .startup = startup_mappi_irq, 70 .shutdown = shutdown_mappi_irq, 71 .enable = enable_mappi_irq,
··· 65 66 static struct irq_chip mappi_irq_type = 67 { 68 + .name = "MAPPI-IRQ", 69 .startup = startup_mappi_irq, 70 .shutdown = shutdown_mappi_irq, 71 .enable = enable_mappi_irq,
+1 -1
arch/m32r/platforms/mappi2/setup.c
··· 72 73 static struct irq_chip mappi2_irq_type = 74 { 75 - .typename = "MAPPI2-IRQ", 76 .startup = startup_mappi2_irq, 77 .shutdown = shutdown_mappi2_irq, 78 .enable = enable_mappi2_irq,
··· 72 73 static struct irq_chip mappi2_irq_type = 74 { 75 + .name = "MAPPI2-IRQ", 76 .startup = startup_mappi2_irq, 77 .shutdown = shutdown_mappi2_irq, 78 .enable = enable_mappi2_irq,
+1 -1
arch/m32r/platforms/mappi3/setup.c
··· 72 73 static struct irq_chip mappi3_irq_type = 74 { 75 - .typename = "MAPPI3-IRQ", 76 .startup = startup_mappi3_irq, 77 .shutdown = shutdown_mappi3_irq, 78 .enable = enable_mappi3_irq,
··· 72 73 static struct irq_chip mappi3_irq_type = 74 { 75 + .name = "MAPPI3-IRQ", 76 .startup = startup_mappi3_irq, 77 .shutdown = shutdown_mappi3_irq, 78 .enable = enable_mappi3_irq,
+1 -1
arch/m32r/platforms/oaks32r/setup.c
··· 63 64 static struct irq_chip oaks32r_irq_type = 65 { 66 - .typename = "OAKS32R-IRQ", 67 .startup = startup_oaks32r_irq, 68 .shutdown = shutdown_oaks32r_irq, 69 .enable = enable_oaks32r_irq,
··· 63 64 static struct irq_chip oaks32r_irq_type = 65 { 66 + .name = "OAKS32R-IRQ", 67 .startup = startup_oaks32r_irq, 68 .shutdown = shutdown_oaks32r_irq, 69 .enable = enable_oaks32r_irq,
+3 -3
arch/m32r/platforms/opsput/setup.c
··· 72 73 static struct irq_chip opsput_irq_type = 74 { 75 - .typename = "OPSPUT-IRQ", 76 .startup = startup_opsput_irq, 77 .shutdown = shutdown_opsput_irq, 78 .enable = enable_opsput_irq, ··· 149 150 static struct irq_chip opsput_pld_irq_type = 151 { 152 - .typename = "OPSPUT-PLD-IRQ", 153 .startup = startup_opsput_pld_irq, 154 .shutdown = shutdown_opsput_pld_irq, 155 .enable = enable_opsput_pld_irq, ··· 218 219 static struct irq_chip opsput_lanpld_irq_type = 220 { 221 - .typename = "OPSPUT-PLD-LAN-IRQ", 222 .startup = startup_opsput_lanpld_irq, 223 .shutdown = shutdown_opsput_lanpld_irq, 224 .enable = enable_opsput_lanpld_irq,
··· 72 73 static struct irq_chip opsput_irq_type = 74 { 75 + .name = "OPSPUT-IRQ", 76 .startup = startup_opsput_irq, 77 .shutdown = shutdown_opsput_irq, 78 .enable = enable_opsput_irq, ··· 149 150 static struct irq_chip opsput_pld_irq_type = 151 { 152 + .name = "OPSPUT-PLD-IRQ", 153 .startup = startup_opsput_pld_irq, 154 .shutdown = shutdown_opsput_pld_irq, 155 .enable = enable_opsput_pld_irq, ··· 218 219 static struct irq_chip opsput_lanpld_irq_type = 220 { 221 + .name = "OPSPUT-PLD-LAN-IRQ", 222 .startup = startup_opsput_lanpld_irq, 223 .shutdown = shutdown_opsput_lanpld_irq, 224 .enable = enable_opsput_lanpld_irq,
+2 -2
arch/m32r/platforms/usrv/setup.c
··· 63 64 static struct irq_chip mappi_irq_type = 65 { 66 - .typename = "M32700-IRQ", 67 .startup = startup_mappi_irq, 68 .shutdown = shutdown_mappi_irq, 69 .enable = enable_mappi_irq, ··· 136 137 static struct irq_chip m32700ut_pld_irq_type = 138 { 139 - .typename = "USRV-PLD-IRQ", 140 .startup = startup_m32700ut_pld_irq, 141 .shutdown = shutdown_m32700ut_pld_irq, 142 .enable = enable_m32700ut_pld_irq,
··· 63 64 static struct irq_chip mappi_irq_type = 65 { 66 + .name = "M32700-IRQ", 67 .startup = startup_mappi_irq, 68 .shutdown = shutdown_mappi_irq, 69 .enable = enable_mappi_irq, ··· 136 137 static struct irq_chip m32700ut_pld_irq_type = 138 { 139 + .name = "USRV-PLD-IRQ", 140 .startup = startup_m32700ut_pld_irq, 141 .shutdown = shutdown_m32700ut_pld_irq, 142 .enable = enable_m32700ut_pld_irq,
+3 -3
arch/powerpc/platforms/cell/axon_msi.c
··· 310 } 311 312 static struct irq_chip msic_irq_chip = { 313 - .mask = mask_msi_irq, 314 - .unmask = unmask_msi_irq, 315 - .shutdown = unmask_msi_irq, 316 .name = "AXON-MSI", 317 }; 318
··· 310 } 311 312 static struct irq_chip msic_irq_chip = { 313 + .irq_mask = mask_msi_irq, 314 + .irq_unmask = unmask_msi_irq, 315 + .irq_shutdown = mask_msi_irq, 316 .name = "AXON-MSI", 317 }; 318
+1 -1
arch/powerpc/platforms/pseries/xics.c
··· 243 * at that level, so we do it here by hand. 244 */ 245 if (irq_to_desc(virq)->msi_desc) 246 - unmask_msi_irq(virq); 247 248 /* unmask it */ 249 xics_unmask_irq(virq);
··· 243 * at that level, so we do it here by hand. 244 */ 245 if (irq_to_desc(virq)->msi_desc) 246 + unmask_msi_irq(irq_get_irq_data(virq)); 247 248 /* unmask it */ 249 xics_unmask_irq(virq);
+2 -2
arch/powerpc/sysdev/fsl_msi.c
··· 51 } 52 53 static struct irq_chip fsl_msi_chip = { 54 - .mask = mask_msi_irq, 55 - .unmask = unmask_msi_irq, 56 .ack = fsl_msi_end_irq, 57 .name = "FSL-MSI", 58 };
··· 51 } 52 53 static struct irq_chip fsl_msi_chip = { 54 + .irq_mask = mask_msi_irq, 55 + .irq_unmask = unmask_msi_irq, 56 .ack = fsl_msi_end_irq, 57 .name = "FSL-MSI", 58 };
+11 -11
arch/powerpc/sysdev/mpic_pasemi_msi.c
··· 39 static struct mpic *msi_mpic; 40 41 42 - static void mpic_pasemi_msi_mask_irq(unsigned int irq) 43 { 44 - pr_debug("mpic_pasemi_msi_mask_irq %d\n", irq); 45 - mask_msi_irq(irq); 46 - mpic_mask_irq(irq); 47 } 48 49 - static void mpic_pasemi_msi_unmask_irq(unsigned int irq) 50 { 51 - pr_debug("mpic_pasemi_msi_unmask_irq %d\n", irq); 52 - mpic_unmask_irq(irq); 53 - unmask_msi_irq(irq); 54 } 55 56 static struct irq_chip mpic_pasemi_msi_chip = { 57 - .shutdown = mpic_pasemi_msi_mask_irq, 58 - .mask = mpic_pasemi_msi_mask_irq, 59 - .unmask = mpic_pasemi_msi_unmask_irq, 60 .eoi = mpic_end_irq, 61 .set_type = mpic_set_irq_type, 62 .set_affinity = mpic_set_affinity,
··· 39 static struct mpic *msi_mpic; 40 41 42 + static void mpic_pasemi_msi_mask_irq(struct irq_data *data) 43 { 44 + pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq); 45 + mask_msi_irq(data); 46 + mpic_mask_irq(data->irq); 47 } 48 49 + static void mpic_pasemi_msi_unmask_irq(struct irq_data *data) 50 { 51 + pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq); 52 + mpic_unmask_irq(data->irq); 53 + unmask_msi_irq(data); 54 } 55 56 static struct irq_chip mpic_pasemi_msi_chip = { 57 + .irq_shutdown = mpic_pasemi_msi_mask_irq, 58 + .irq_mask = mpic_pasemi_msi_mask_irq, 59 + .irq_unmask = mpic_pasemi_msi_unmask_irq, 60 .eoi = mpic_end_irq, 61 .set_type = mpic_set_irq_type, 62 .set_affinity = mpic_set_affinity,
+9 -9
arch/powerpc/sysdev/mpic_u3msi.c
··· 23 /* A bit ugly, can we get this from the pci_dev somehow? */ 24 static struct mpic *msi_mpic; 25 26 - static void mpic_u3msi_mask_irq(unsigned int irq) 27 { 28 - mask_msi_irq(irq); 29 - mpic_mask_irq(irq); 30 } 31 32 - static void mpic_u3msi_unmask_irq(unsigned int irq) 33 { 34 - mpic_unmask_irq(irq); 35 - unmask_msi_irq(irq); 36 } 37 38 static struct irq_chip mpic_u3msi_chip = { 39 - .shutdown = mpic_u3msi_mask_irq, 40 - .mask = mpic_u3msi_mask_irq, 41 - .unmask = mpic_u3msi_unmask_irq, 42 .eoi = mpic_end_irq, 43 .set_type = mpic_set_irq_type, 44 .set_affinity = mpic_set_affinity,
··· 23 /* A bit ugly, can we get this from the pci_dev somehow? */ 24 static struct mpic *msi_mpic; 25 26 + static void mpic_u3msi_mask_irq(struct irq_data *data) 27 { 28 + mask_msi_irq(data); 29 + mpic_mask_irq(data->irq); 30 } 31 32 + static void mpic_u3msi_unmask_irq(struct irq_data *data) 33 { 34 + mpic_unmask_irq(data->irq); 35 + unmask_msi_irq(data); 36 } 37 38 static struct irq_chip mpic_u3msi_chip = { 39 + .irq_shutdown = mpic_u3msi_mask_irq, 40 + .irq_mask = mpic_u3msi_mask_irq, 41 + .irq_unmask = mpic_u3msi_unmask_irq, 42 .eoi = mpic_end_irq, 43 .set_type = mpic_set_irq_type, 44 .set_affinity = mpic_set_affinity,
+1 -1
arch/sh/kernel/irq.c
··· 290 int __init arch_probe_nr_irqs(void) 291 { 292 nr_irqs = sh_mv.mv_nr_irqs; 293 - return 0; 294 } 295 #endif 296
··· 290 int __init arch_probe_nr_irqs(void) 291 { 292 nr_irqs = sh_mv.mv_nr_irqs; 293 + return NR_IRQS_LEGACY; 294 } 295 #endif 296
+4 -4
arch/sparc/kernel/pci_msi.c
··· 114 115 static struct irq_chip msi_irq = { 116 .name = "PCI-MSI", 117 - .mask = mask_msi_irq, 118 - .unmask = unmask_msi_irq, 119 - .enable = unmask_msi_irq, 120 - .disable = mask_msi_irq, 121 /* XXX affinity XXX */ 122 }; 123
··· 114 115 static struct irq_chip msi_irq = { 116 .name = "PCI-MSI", 117 + .irq_mask = mask_msi_irq, 118 + .irq_unmask = unmask_msi_irq, 119 + .irq_enable = unmask_msi_irq, 120 + .irq_disable = mask_msi_irq, 121 /* XXX affinity XXX */ 122 }; 123
+2 -2
arch/tile/kernel/irq.c
··· 208 } 209 210 static struct irq_chip tile_irq_chip = { 211 - .typename = "tile_irq_chip", 212 .ack = tile_irq_chip_ack, 213 .eoi = tile_irq_chip_eoi, 214 .mask = tile_irq_chip_mask, ··· 288 for_each_online_cpu(j) 289 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 290 #endif 291 - seq_printf(p, " %14s", irq_desc[i].chip->typename); 292 seq_printf(p, " %s", action->name); 293 294 for (action = action->next; action; action = action->next)
··· 208 } 209 210 static struct irq_chip tile_irq_chip = { 211 + .name = "tile_irq_chip", 212 .ack = tile_irq_chip_ack, 213 .eoi = tile_irq_chip_eoi, 214 .mask = tile_irq_chip_mask, ··· 288 for_each_online_cpu(j) 289 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 290 #endif 291 + seq_printf(p, " %14s", irq_desc[i].chip->name); 292 seq_printf(p, " %s", action->name); 293 294 for (action = action->next; action; action = action->next)
+3 -3
arch/um/kernel/irq.c
··· 46 for_each_online_cpu(j) 47 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 48 #endif 49 - seq_printf(p, " %14s", irq_desc[i].chip->typename); 50 seq_printf(p, " %s", action->name); 51 52 for (action=action->next; action; action = action->next) ··· 369 370 /* This is used for everything else than the timer. */ 371 static struct irq_chip normal_irq_type = { 372 - .typename = "SIGIO", 373 .release = free_irq_by_irq_and_dev, 374 .disable = dummy, 375 .enable = dummy, ··· 378 }; 379 380 static struct irq_chip SIGVTALRM_irq_type = { 381 - .typename = "SIGVTALRM", 382 .release = free_irq_by_irq_and_dev, 383 .shutdown = dummy, /* never called */ 384 .disable = dummy,
··· 46 for_each_online_cpu(j) 47 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 48 #endif 49 + seq_printf(p, " %14s", irq_desc[i].chip->name); 50 seq_printf(p, " %s", action->name); 51 52 for (action=action->next; action; action = action->next) ··· 369 370 /* This is used for everything else than the timer. */ 371 static struct irq_chip normal_irq_type = { 372 + .name = "SIGIO", 373 .release = free_irq_by_irq_and_dev, 374 .disable = dummy, 375 .enable = dummy, ··· 378 }; 379 380 static struct irq_chip SIGVTALRM_irq_type = { 381 + .name = "SIGVTALRM", 382 .release = free_irq_by_irq_and_dev, 383 .shutdown = dummy, /* never called */ 384 .disable = dummy,
+4 -31
arch/x86/Kconfig
··· 63 select HAVE_USER_RETURN_NOTIFIER 64 select HAVE_ARCH_JUMP_LABEL 65 select HAVE_TEXT_POKE_SMP 66 67 config INSTRUCTION_DECODER 68 def_bool (KPROBES || PERF_EVENTS) ··· 208 def_bool y 209 depends on EXPERIMENTAL && DMAR && ACPI 210 211 - # Use the generic interrupt handling code in kernel/irq/: 212 - config GENERIC_HARDIRQS 213 - def_bool y 214 - 215 - config GENERIC_HARDIRQS_NO__DO_IRQ 216 - def_bool y 217 - 218 - config GENERIC_IRQ_PROBE 219 - def_bool y 220 - 221 - config GENERIC_PENDING_IRQ 222 - def_bool y 223 - depends on GENERIC_HARDIRQS && SMP 224 - 225 config USE_GENERIC_SMP_HELPERS 226 def_bool y 227 depends on SMP ··· 289 and accesses the local apic via MSRs not via mmio. 290 291 If you don't know what to do here, say N. 292 - 293 - config SPARSE_IRQ 294 - bool "Support sparse irq numbering" 295 - depends on PCI_MSI || HT_IRQ 296 - ---help--- 297 - This enables support for sparse irqs. This is useful for distro 298 - kernels that want to define a high CONFIG_NR_CPUS value but still 299 - want to have low kernel memory footprint on smaller machines. 300 - 301 - ( Sparse IRQs can also be beneficial on NUMA boxes, as they spread 302 - out the irq_desc[] array in a more NUMA-friendly way. ) 303 - 304 - If you don't know what to do here, say N. 305 - 306 - config NUMA_IRQ_DESC 307 - def_bool y 308 - depends on SPARSE_IRQ && NUMA 309 310 config X86_MPPARSE 311 bool "Enable MPS table" if ACPI
··· 63 select HAVE_USER_RETURN_NOTIFIER 64 select HAVE_ARCH_JUMP_LABEL 65 select HAVE_TEXT_POKE_SMP 66 + select HAVE_GENERIC_HARDIRQS 67 + select HAVE_SPARSE_IRQ 68 + select GENERIC_IRQ_PROBE 69 + select GENERIC_PENDING_IRQ if SMP 70 71 config INSTRUCTION_DECODER 72 def_bool (KPROBES || PERF_EVENTS) ··· 204 def_bool y 205 depends on EXPERIMENTAL && DMAR && ACPI 206 207 config USE_GENERIC_SMP_HELPERS 208 def_bool y 209 depends on SMP ··· 299 and accesses the local apic via MSRs not via mmio. 300 301 If you don't know what to do here, say N. 302 303 config X86_MPPARSE 304 bool "Enable MPS table" if ACPI
+1 -3
arch/x86/include/asm/apic.h
··· 252 } 253 #endif 254 255 - extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask); 256 - extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask); 257 - 258 259 #else /* !CONFIG_X86_LOCAL_APIC */ 260 static inline void lapic_shutdown(void) { }
··· 252 } 253 #endif 254 255 + extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask); 256 257 #else /* !CONFIG_X86_LOCAL_APIC */ 258 static inline void lapic_shutdown(void) { }
+1
arch/x86/include/asm/apicdef.h
··· 131 #define APIC_EILVTn(n) (0x500 + 0x10 * n) 132 #define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ 133 #define APIC_EILVT_NR_AMD_10H 4 134 #define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) 135 #define APIC_EILVT_MSG_FIX 0x0 136 #define APIC_EILVT_MSG_SMI 0x2
··· 131 #define APIC_EILVTn(n) (0x500 + 0x10 * n) 132 #define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ 133 #define APIC_EILVT_NR_AMD_10H 4 134 + #define APIC_EILVT_NR_MAX APIC_EILVT_NR_AMD_10H 135 #define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) 136 #define APIC_EILVT_MSG_FIX 0x0 137 #define APIC_EILVT_MSG_SMI 0x2
+6 -4
arch/x86/include/asm/hpet.h
··· 74 extern unsigned int hpet_readl(unsigned int a); 75 extern void force_hpet_resume(void); 76 77 - extern void hpet_msi_unmask(unsigned int irq); 78 - extern void hpet_msi_mask(unsigned int irq); 79 - extern void hpet_msi_write(unsigned int irq, struct msi_msg *msg); 80 - extern void hpet_msi_read(unsigned int irq, struct msi_msg *msg); 81 82 #ifdef CONFIG_PCI_MSI 83 extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id);
··· 74 extern unsigned int hpet_readl(unsigned int a); 75 extern void force_hpet_resume(void); 76 77 + struct irq_data; 78 + extern void hpet_msi_unmask(struct irq_data *data); 79 + extern void hpet_msi_mask(struct irq_data *data); 80 + struct hpet_dev; 81 + extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg); 82 + extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg); 83 84 #ifdef CONFIG_PCI_MSI 85 extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id);
+13 -4
arch/x86/include/asm/hw_irq.h
··· 78 irq_attr->polarity = polarity; 79 } 80 81 /* 82 * This is performance-critical, we want to do it O(1) 83 * ··· 96 cpumask_var_t old_domain; 97 u8 vector; 98 u8 move_in_progress : 1; 99 }; 100 101 - extern struct irq_cfg *irq_cfg(unsigned int); 102 extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); 103 extern void send_cleanup_vector(struct irq_cfg *); 104 105 - struct irq_desc; 106 - extern unsigned int set_desc_affinity(struct irq_desc *, const struct cpumask *, 107 - unsigned int *dest_id); 108 extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); 109 extern void setup_ioapic_dest(void); 110
··· 78 irq_attr->polarity = polarity; 79 } 80 81 + struct irq_2_iommu { 82 + struct intel_iommu *iommu; 83 + u16 irte_index; 84 + u16 sub_handle; 85 + u8 irte_mask; 86 + }; 87 + 88 /* 89 * This is performance-critical, we want to do it O(1) 90 * ··· 89 cpumask_var_t old_domain; 90 u8 vector; 91 u8 move_in_progress : 1; 92 + #ifdef CONFIG_INTR_REMAP 93 + struct irq_2_iommu irq_2_iommu; 94 + #endif 95 }; 96 97 extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); 98 extern void send_cleanup_vector(struct irq_cfg *); 99 100 + struct irq_data; 101 + int __ioapic_set_affinity(struct irq_data *, const struct cpumask *, 102 + unsigned int *dest_id); 103 extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); 104 extern void setup_ioapic_dest(void); 105
+2
arch/x86/include/asm/i8259.h
··· 55 struct legacy_pic { 56 int nr_legacy_irqs; 57 struct irq_chip *chip; 58 void (*mask_all)(void); 59 void (*restore_mask)(void); 60 void (*init)(int auto_eoi);
··· 55 struct legacy_pic { 56 int nr_legacy_irqs; 57 struct irq_chip *chip; 58 + void (*mask)(unsigned int irq); 59 + void (*unmask)(unsigned int irq); 60 void (*mask_all)(void); 61 void (*restore_mask)(void); 62 void (*init)(int auto_eoi);
-6
arch/x86/include/asm/io_apic.h
··· 170 171 extern void probe_nr_irqs_gsi(void); 172 173 - extern int setup_ioapic_entry(int apic, int irq, 174 - struct IO_APIC_route_entry *entry, 175 - unsigned int destination, int trigger, 176 - int polarity, int vector, int pin); 177 - extern void ioapic_write_entry(int apic, int pin, 178 - struct IO_APIC_route_entry e); 179 extern void setup_ioapic_ids_from_mpc(void); 180 181 struct mp_ioapic_gsi{
··· 170 171 extern void probe_nr_irqs_gsi(void); 172 173 extern void setup_ioapic_ids_from_mpc(void); 174 175 struct mp_ioapic_gsi{
+8
arch/x86/include/asm/irq_remapping.h
··· 24 irte->dest_id = IRTE_DEST(dest); 25 irte->redir_hint = 1; 26 } 27 #else 28 static void prepare_irte(struct irte *irte, int vector, unsigned int dest) 29 { 30 } 31 #endif 32
··· 24 irte->dest_id = IRTE_DEST(dest); 25 irte->redir_hint = 1; 26 } 27 + static inline bool irq_remapped(struct irq_cfg *cfg) 28 + { 29 + return cfg->irq_2_iommu.iommu != NULL; 30 + } 31 #else 32 static void prepare_irte(struct irte *irte, int vector, unsigned int dest) 33 { 34 + } 35 + static inline bool irq_remapped(struct irq_cfg *cfg) 36 + { 37 + return false; 38 } 39 #endif 40
+24 -30
arch/x86/kernel/apb_timer.c
··· 231 apbt_start_counter(phy_cs_timer_id); 232 } 233 234 - /* Setup IRQ routing via IOAPIC */ 235 - #ifdef CONFIG_SMP 236 - static void apbt_setup_irq(struct apbt_dev *adev) 237 - { 238 - struct irq_chip *chip; 239 - struct irq_desc *desc; 240 - 241 - /* timer0 irq has been setup early */ 242 - if (adev->irq == 0) 243 - return; 244 - desc = irq_to_desc(adev->irq); 245 - chip = get_irq_chip(adev->irq); 246 - disable_irq(adev->irq); 247 - desc->status |= IRQ_MOVE_PCNTXT; 248 - irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); 249 - /* APB timer irqs are set up as mp_irqs, timer is edge triggerred */ 250 - set_irq_chip_and_handler_name(adev->irq, chip, handle_edge_irq, "edge"); 251 - enable_irq(adev->irq); 252 - if (system_state == SYSTEM_BOOTING) 253 - if (request_irq(adev->irq, apbt_interrupt_handler, 254 - IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, 255 - adev->name, adev)) { 256 - printk(KERN_ERR "Failed request IRQ for APBT%d\n", 257 - adev->num); 258 - } 259 - } 260 - #endif 261 - 262 static void apbt_enable_int(int n) 263 { 264 unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); ··· 306 } 307 308 #ifdef CONFIG_SMP 309 /* Should be called with per cpu */ 310 void apbt_setup_secondary_clock(void) 311 { ··· 382 383 switch (action & 0xf) { 384 case CPU_DEAD: 385 apbt_disable_int(cpu); 386 - if (system_state == SYSTEM_RUNNING) 387 pr_debug("skipping APBT CPU %lu offline\n", cpu); 388 - else if (adev) { 389 pr_debug("APBT clockevent for cpu %lu offline\n", cpu); 390 free_irq(adev->irq, adev); 391 }
··· 231 apbt_start_counter(phy_cs_timer_id); 232 } 233 234 static void apbt_enable_int(int n) 235 { 236 unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); ··· 334 } 335 336 #ifdef CONFIG_SMP 337 + 338 + static void apbt_setup_irq(struct apbt_dev *adev) 339 + { 340 + /* timer0 irq has been setup early */ 341 + if (adev->irq == 0) 342 + return; 343 + 344 + if (system_state == SYSTEM_BOOTING) { 345 + irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT); 346 + /* APB timer irqs are set up as mp_irqs, timer is edge type */ 347 + __set_irq_handler(adev->irq, handle_edge_irq, 0, "edge"); 348 + if (request_irq(adev->irq, apbt_interrupt_handler, 349 + IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, 350 + adev->name, adev)) { 351 + printk(KERN_ERR "Failed request IRQ for APBT%d\n", 352 + adev->num); 353 + } 354 + } else 355 + enable_irq(adev->irq); 356 + } 357 + 358 /* Should be called with per cpu */ 359 void apbt_setup_secondary_clock(void) 360 { ··· 389 390 switch (action & 0xf) { 391 case CPU_DEAD: 392 + disable_irq(adev->irq); 393 apbt_disable_int(cpu); 394 + if (system_state == SYSTEM_RUNNING) { 395 pr_debug("skipping APBT CPU %lu offline\n", cpu); 396 + } else if (adev) { 397 pr_debug("APBT clockevent for cpu %lu offline\n", cpu); 398 free_irq(adev->irq, adev); 399 }
+71 -21
arch/x86/kernel/apic/apic.c
··· 52 #include <asm/mce.h> 53 #include <asm/kvm_para.h> 54 #include <asm/tsc.h> 55 56 unsigned int num_processors; 57 ··· 371 } 372 373 /* 374 - * Setup extended LVT, AMD specific (K8, family 10h) 375 * 376 - * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and 377 - * MCE interrupts are supported. Thus MCE offset must be set to 0. 378 * 379 * If mask=1, the LVT entry does not generate interrupts while mask=0 380 * enables the vector. See also the BKDGs. 381 */ 382 383 - #define APIC_EILVT_LVTOFF_MCE 0 384 - #define APIC_EILVT_LVTOFF_IBS 1 385 - 386 - static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask) 387 { 388 - unsigned long reg = (lvt_off << 4) + APIC_EILVTn(0); 389 - unsigned int v = (mask << 16) | (msg_type << 8) | vector; 390 391 - apic_write(reg, v); 392 - } 393 394 - u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask) 395 - { 396 - setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask); 397 - return APIC_EILVT_LVTOFF_MCE; 398 - } 399 400 - u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask) 401 - { 402 - setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask); 403 - return APIC_EILVT_LVTOFF_IBS; 404 } 405 - EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs); 406 407 /* 408 * Program the next event, relative to now
··· 52 #include <asm/mce.h> 53 #include <asm/kvm_para.h> 54 #include <asm/tsc.h> 55 + #include <asm/atomic.h> 56 57 unsigned int num_processors; 58 ··· 370 } 371 372 /* 373 + * Setup extended LVT, AMD specific 374 * 375 + * Software should use the LVT offsets the BIOS provides. The offsets 376 + * are determined by the subsystems using it like those for MCE 377 + * threshold or IBS. On K8 only offset 0 (APIC500) and MCE interrupts 378 + * are supported. Beginning with family 10h at least 4 offsets are 379 + * available. 380 * 381 + * Since the offsets must be consistent for all cores, we keep track 382 + * of the LVT offsets in software and reserve the offset for the same 383 + * vector also to be used on other cores. An offset is freed by 384 + * setting the entry to APIC_EILVT_MASKED. 385 + * 386 + * If the BIOS is right, there should be no conflicts. Otherwise a 387 + * "[Firmware Bug]: ..." error message is generated. However, if 388 + * software does not properly determines the offsets, it is not 389 + * necessarily a BIOS bug. 390 + */ 391 + 392 + static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX]; 393 + 394 + static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new) 395 + { 396 + return (old & APIC_EILVT_MASKED) 397 + || (new == APIC_EILVT_MASKED) 398 + || ((new & ~APIC_EILVT_MASKED) == old); 399 + } 400 + 401 + static unsigned int reserve_eilvt_offset(int offset, unsigned int new) 402 + { 403 + unsigned int rsvd; /* 0: uninitialized */ 404 + 405 + if (offset >= APIC_EILVT_NR_MAX) 406 + return ~0; 407 + 408 + rsvd = atomic_read(&eilvt_offsets[offset]) & ~APIC_EILVT_MASKED; 409 + do { 410 + if (rsvd && 411 + !eilvt_entry_is_changeable(rsvd, new)) 412 + /* may not change if vectors are different */ 413 + return rsvd; 414 + rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new); 415 + } while (rsvd != new); 416 + 417 + return new; 418 + } 419 + 420 + /* 421 * If mask=1, the LVT entry does not generate interrupts while mask=0 422 * enables the vector. See also the BKDGs. 423 */ 424 425 + int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask) 426 { 427 + unsigned long reg = APIC_EILVTn(offset); 428 + unsigned int new, old, reserved; 429 430 + new = (mask << 16) | (msg_type << 8) | vector; 431 + old = apic_read(reg); 432 + reserved = reserve_eilvt_offset(offset, new); 433 434 + if (reserved != new) { 435 + pr_err(FW_BUG "cpu %d, try to setup vector 0x%x, but " 436 + "vector 0x%x was already reserved by another core, " 437 + "APIC%lX=0x%x\n", 438 + smp_processor_id(), new, reserved, reg, old); 439 + return -EINVAL; 440 + } 441 442 + if (!eilvt_entry_is_changeable(old, new)) { 443 + pr_err(FW_BUG "cpu %d, try to setup vector 0x%x but " 444 + "register already in use, APIC%lX=0x%x\n", 445 + smp_processor_id(), new, reg, old); 446 + return -EBUSY; 447 + } 448 + 449 + apic_write(reg, new); 450 + 451 + return 0; 452 } 453 + EXPORT_SYMBOL_GPL(setup_APIC_eilvt); 454 455 /* 456 * Program the next event, relative to now
+338 -539
arch/x86/kernel/apic/io_apic.c
··· 131 struct irq_pin_list *next; 132 }; 133 134 - static struct irq_pin_list *get_one_free_irq_2_pin(int node) 135 { 136 - struct irq_pin_list *pin; 137 - 138 - pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node); 139 - 140 - return pin; 141 } 142 143 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ ··· 146 int __init arch_early_irq_init(void) 147 { 148 struct irq_cfg *cfg; 149 - struct irq_desc *desc; 150 - int count; 151 - int node; 152 - int i; 153 154 if (!legacy_pic->nr_legacy_irqs) { 155 nr_irqs_gsi = 0; ··· 157 count = ARRAY_SIZE(irq_cfgx); 158 node = cpu_to_node(0); 159 160 for (i = 0; i < count; i++) { 161 - desc = irq_to_desc(i); 162 - desc->chip_data = &cfg[i]; 163 - zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node); 164 - zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node); 165 /* 166 * For legacy IRQ's, start with assigning irq0 to irq15 to 167 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. ··· 178 } 179 180 #ifdef CONFIG_SPARSE_IRQ 181 - struct irq_cfg *irq_cfg(unsigned int irq) 182 { 183 - struct irq_cfg *cfg = NULL; 184 - struct irq_desc *desc; 185 186 - desc = irq_to_desc(irq); 187 - if (desc) 188 - cfg = desc->chip_data; 189 190 return cfg; 191 } 192 193 - static struct irq_cfg *get_one_free_irq_cfg(int node) 194 { 195 - struct irq_cfg *cfg; 196 - 197 - cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); 198 - if (cfg) { 199 - if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { 200 - kfree(cfg); 201 - cfg = NULL; 202 - } else if (!zalloc_cpumask_var_node(&cfg->old_domain, 203 - GFP_ATOMIC, node)) { 204 - free_cpumask_var(cfg->domain); 205 - kfree(cfg); 206 - cfg = NULL; 207 - } 208 - } 209 - 210 - return cfg; 211 - } 212 - 213 - int arch_init_chip_data(struct irq_desc *desc, int node) 214 - { 215 - struct irq_cfg *cfg; 216 - 217 - cfg = desc->chip_data; 218 - if (!cfg) { 219 - desc->chip_data = get_one_free_irq_cfg(node); 220 - if (!desc->chip_data) { 221 - printk(KERN_ERR "can not alloc irq_cfg\n"); 222 - BUG_ON(1); 223 - } 224 - } 225 - 226 - return 0; 227 - } 228 - 229 - /* for move_irq_desc */ 230 - static void 231 - init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int node) 232 - { 233 - struct irq_pin_list *old_entry, *head, *tail, *entry; 234 - 235 - cfg->irq_2_pin = NULL; 236 - old_entry = old_cfg->irq_2_pin; 237 - if (!old_entry) 238 - return; 239 - 240 - entry = get_one_free_irq_2_pin(node); 241 - if (!entry) 242 - return; 243 - 244 - entry->apic = old_entry->apic; 245 - entry->pin = old_entry->pin; 246 - head = entry; 247 - tail = entry; 248 - old_entry = old_entry->next; 249 - while (old_entry) { 250 - entry = get_one_free_irq_2_pin(node); 251 - if (!entry) { 252 - entry = head; 253 - while (entry) { 254 - head = entry->next; 255 - kfree(entry); 256 - entry = head; 257 - } 258 - /* still use the old one */ 259 - return; 260 - } 261 - entry->apic = old_entry->apic; 262 - entry->pin = old_entry->pin; 263 - tail->next = entry; 264 - tail = entry; 265 - old_entry = old_entry->next; 266 - } 267 - 268 - tail->next = NULL; 269 - cfg->irq_2_pin = head; 270 - } 271 - 272 - static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg) 273 - { 274 - struct irq_pin_list *entry, *next; 275 - 276 - if (old_cfg->irq_2_pin == cfg->irq_2_pin) 277 - return; 278 - 279 - entry = old_cfg->irq_2_pin; 280 - 281 - while (entry) { 282 - next = entry->next; 283 - kfree(entry); 284 - entry = next; 285 - } 286 - old_cfg->irq_2_pin = NULL; 287 - } 288 - 289 - void arch_init_copy_chip_data(struct irq_desc *old_desc, 290 - struct irq_desc *desc, int node) 291 - { 292 - struct irq_cfg *cfg; 293 - struct irq_cfg *old_cfg; 294 - 295 - cfg = get_one_free_irq_cfg(node); 296 - 297 if (!cfg) 298 return; 299 - 300 - desc->chip_data = cfg; 301 - 302 - old_cfg = old_desc->chip_data; 303 - 304 - cfg->vector = old_cfg->vector; 305 - cfg->move_in_progress = old_cfg->move_in_progress; 306 - cpumask_copy(cfg->domain, old_cfg->domain); 307 - cpumask_copy(cfg->old_domain, old_cfg->old_domain); 308 - 309 - init_copy_irq_2_pin(old_cfg, cfg, node); 310 - } 311 - 312 - static void free_irq_cfg(struct irq_cfg *cfg) 313 - { 314 free_cpumask_var(cfg->domain); 315 free_cpumask_var(cfg->old_domain); 316 kfree(cfg); 317 } 318 319 - void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) 320 - { 321 - struct irq_cfg *old_cfg, *cfg; 322 - 323 - old_cfg = old_desc->chip_data; 324 - cfg = desc->chip_data; 325 - 326 - if (old_cfg == cfg) 327 - return; 328 - 329 - if (old_cfg) { 330 - free_irq_2_pin(old_cfg, cfg); 331 - free_irq_cfg(old_cfg); 332 - old_desc->chip_data = NULL; 333 - } 334 - } 335 - /* end for move_irq_desc */ 336 - 337 #else 338 struct irq_cfg *irq_cfg(unsigned int irq) 339 { 340 return irq < nr_irqs ? irq_cfgx + irq : NULL; 341 } 342 343 #endif 344 345 struct io_apic { 346 unsigned int index; ··· 364 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 365 } 366 367 - void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 368 { 369 unsigned long flags; 370 raw_spin_lock_irqsave(&ioapic_lock, flags); ··· 394 * fast in the common case, and fast for shared ISA-space IRQs. 395 */ 396 static int 397 - add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin) 398 { 399 struct irq_pin_list **last, *entry; 400 ··· 406 last = &entry->next; 407 } 408 409 - entry = get_one_free_irq_2_pin(node); 410 if (!entry) { 411 printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", 412 node, apic, pin); ··· 421 422 static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 423 { 424 - if (add_pin_to_irq_node_nopanic(cfg, node, apic, pin)) 425 panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); 426 } 427 ··· 484 IO_APIC_REDIR_LEVEL_TRIGGER, NULL); 485 } 486 487 - static void __unmask_IO_APIC_irq(struct irq_cfg *cfg) 488 - { 489 - io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); 490 - } 491 - 492 static void io_apic_sync(struct irq_pin_list *entry) 493 { 494 /* ··· 495 readl(&io_apic->data); 496 } 497 498 - static void __mask_IO_APIC_irq(struct irq_cfg *cfg) 499 { 500 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); 501 - } 502 - 503 - static void mask_IO_APIC_irq_desc(struct irq_desc *desc) 504 - { 505 - struct irq_cfg *cfg = desc->chip_data; 506 - unsigned long flags; 507 - 508 - BUG_ON(!cfg); 509 - 510 - raw_spin_lock_irqsave(&ioapic_lock, flags); 511 - __mask_IO_APIC_irq(cfg); 512 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 513 } 514 515 - static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) 516 { 517 - struct irq_cfg *cfg = desc->chip_data; 518 unsigned long flags; 519 520 raw_spin_lock_irqsave(&ioapic_lock, flags); 521 - __unmask_IO_APIC_irq(cfg); 522 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 523 } 524 525 - static void mask_IO_APIC_irq(unsigned int irq) 526 { 527 - struct irq_desc *desc = irq_to_desc(irq); 528 - 529 - mask_IO_APIC_irq_desc(desc); 530 - } 531 - static void unmask_IO_APIC_irq(unsigned int irq) 532 - { 533 - struct irq_desc *desc = irq_to_desc(irq); 534 - 535 - unmask_IO_APIC_irq_desc(desc); 536 } 537 538 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) ··· 595 struct IO_APIC_route_entry **ioapic_entries; 596 597 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics, 598 - GFP_ATOMIC); 599 if (!ioapic_entries) 600 return 0; 601 602 for (apic = 0; apic < nr_ioapics; apic++) { 603 ioapic_entries[apic] = 604 kzalloc(sizeof(struct IO_APIC_route_entry) * 605 - nr_ioapic_registers[apic], GFP_ATOMIC); 606 if (!ioapic_entries[apic]) 607 goto nomem; 608 } ··· 1160 /* Initialize vector_irq on a new cpu */ 1161 int irq, vector; 1162 struct irq_cfg *cfg; 1163 - struct irq_desc *desc; 1164 1165 /* 1166 * vector_lock will make sure that we don't run into irq vector ··· 1168 */ 1169 raw_spin_lock(&vector_lock); 1170 /* Mark the inuse vectors */ 1171 - for_each_irq_desc(irq, desc) { 1172 - cfg = desc->chip_data; 1173 - 1174 /* 1175 * If it is a legacy IRQ handled by the legacy PIC, this cpu 1176 * will be part of the irq_cfg's domain. ··· 1228 } 1229 #endif 1230 1231 - static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger) 1232 { 1233 1234 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1235 trigger == IOAPIC_LEVEL) 1236 - desc->status |= IRQ_LEVEL; 1237 else 1238 - desc->status &= ~IRQ_LEVEL; 1239 1240 - if (irq_remapped(irq)) { 1241 - desc->status |= IRQ_MOVE_PCNTXT; 1242 if (trigger) 1243 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, 1244 handle_fasteoi_irq, ··· 1259 handle_edge_irq, "edge"); 1260 } 1261 1262 - int setup_ioapic_entry(int apic_id, int irq, 1263 - struct IO_APIC_route_entry *entry, 1264 - unsigned int destination, int trigger, 1265 - int polarity, int vector, int pin) 1266 { 1267 /* 1268 * add it to the IO-APIC irq-routing table: ··· 1318 return 0; 1319 } 1320 1321 - static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc, 1322 - int trigger, int polarity) 1323 { 1324 - struct irq_cfg *cfg; 1325 struct IO_APIC_route_entry entry; 1326 unsigned int dest; 1327 1328 if (!IO_APIC_IRQ(irq)) 1329 return; 1330 - 1331 - cfg = desc->chip_data; 1332 - 1333 /* 1334 * For legacy irqs, cfg->domain starts with cpu 0 for legacy 1335 * controllers like 8259. Now that IO-APIC can handle this irq, update ··· 1354 return; 1355 } 1356 1357 - ioapic_register_intr(irq, desc, trigger); 1358 if (irq < legacy_pic->nr_legacy_irqs) 1359 - legacy_pic->chip->mask(irq); 1360 1361 ioapic_write_entry(apic_id, pin, entry); 1362 } ··· 1367 1368 static void __init setup_IO_APIC_irqs(void) 1369 { 1370 - int apic_id, pin, idx, irq; 1371 - int notcon = 0; 1372 - struct irq_desc *desc; 1373 - struct irq_cfg *cfg; 1374 int node = cpu_to_node(0); 1375 1376 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1377 ··· 1406 apic->multi_timer_check(apic_id, irq)) 1407 continue; 1408 1409 - desc = irq_to_desc_alloc_node(irq, node); 1410 - if (!desc) { 1411 - printk(KERN_INFO "can not get irq_desc for %d\n", irq); 1412 continue; 1413 - } 1414 - cfg = desc->chip_data; 1415 add_pin_to_irq_node(cfg, node, apic_id, pin); 1416 /* 1417 * don't mark it in pin_programmed, so later acpi could 1418 * set it correctly when irq < 16 1419 */ 1420 - setup_IO_APIC_irq(apic_id, pin, irq, desc, 1421 - irq_trigger(idx), irq_polarity(idx)); 1422 } 1423 1424 if (notcon) ··· 1431 */ 1432 void setup_IO_APIC_irq_extra(u32 gsi) 1433 { 1434 - int apic_id = 0, pin, idx, irq; 1435 - int node = cpu_to_node(0); 1436 - struct irq_desc *desc; 1437 struct irq_cfg *cfg; 1438 1439 /* ··· 1447 return; 1448 1449 irq = pin_2_irq(idx, apic_id, pin); 1450 - #ifdef CONFIG_SPARSE_IRQ 1451 - desc = irq_to_desc(irq); 1452 - if (desc) 1453 - return; 1454 - #endif 1455 - desc = irq_to_desc_alloc_node(irq, node); 1456 - if (!desc) { 1457 - printk(KERN_INFO "can not get irq_desc for %d\n", irq); 1458 - return; 1459 - } 1460 1461 - cfg = desc->chip_data; 1462 add_pin_to_irq_node(cfg, node, apic_id, pin); 1463 1464 if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) { ··· 1465 } 1466 set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed); 1467 1468 - setup_IO_APIC_irq(apic_id, pin, irq, desc, 1469 irq_trigger(idx), irq_polarity(idx)); 1470 } 1471 ··· 1516 union IO_APIC_reg_03 reg_03; 1517 unsigned long flags; 1518 struct irq_cfg *cfg; 1519 - struct irq_desc *desc; 1520 unsigned int irq; 1521 1522 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); ··· 1602 } 1603 } 1604 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 1605 - for_each_irq_desc(irq, desc) { 1606 struct irq_pin_list *entry; 1607 1608 - cfg = desc->chip_data; 1609 if (!cfg) 1610 continue; 1611 entry = cfg->irq_2_pin; ··· 2112 * an edge even if it isn't on the 8259A... 2113 */ 2114 2115 - static unsigned int startup_ioapic_irq(unsigned int irq) 2116 { 2117 - int was_pending = 0; 2118 unsigned long flags; 2119 - struct irq_cfg *cfg; 2120 2121 raw_spin_lock_irqsave(&ioapic_lock, flags); 2122 if (irq < legacy_pic->nr_legacy_irqs) { 2123 - legacy_pic->chip->mask(irq); 2124 if (legacy_pic->irq_pending(irq)) 2125 was_pending = 1; 2126 } 2127 - cfg = irq_cfg(irq); 2128 - __unmask_IO_APIC_irq(cfg); 2129 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2130 2131 return was_pending; 2132 } 2133 2134 - static int ioapic_retrigger_irq(unsigned int irq) 2135 { 2136 - 2137 - struct irq_cfg *cfg = irq_cfg(irq); 2138 unsigned long flags; 2139 2140 raw_spin_lock_irqsave(&vector_lock, flags); ··· 2182 * With interrupt-remapping, destination information comes 2183 * from interrupt-remapping table entry. 2184 */ 2185 - if (!irq_remapped(irq)) 2186 io_apic_write(apic, 0x11 + pin*2, dest); 2187 reg = io_apic_read(apic, 0x10 + pin*2); 2188 reg &= ~IO_APIC_REDIR_VECTOR_MASK; ··· 2192 } 2193 2194 /* 2195 - * Either sets desc->affinity to a valid value, and returns 2196 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and 2197 - * leaves desc->affinity untouched. 2198 */ 2199 - unsigned int 2200 - set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask, 2201 - unsigned int *dest_id) 2202 { 2203 - struct irq_cfg *cfg; 2204 - unsigned int irq; 2205 2206 if (!cpumask_intersects(mask, cpu_online_mask)) 2207 return -1; 2208 2209 - irq = desc->irq; 2210 - cfg = desc->chip_data; 2211 - if (assign_irq_vector(irq, cfg, mask)) 2212 return -1; 2213 2214 - cpumask_copy(desc->affinity, mask); 2215 2216 - *dest_id = apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain); 2217 return 0; 2218 } 2219 2220 static int 2221 - set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) 2222 { 2223 - struct irq_cfg *cfg; 2224 unsigned long flags; 2225 - unsigned int dest; 2226 - unsigned int irq; 2227 - int ret = -1; 2228 - 2229 - irq = desc->irq; 2230 - cfg = desc->chip_data; 2231 2232 raw_spin_lock_irqsave(&ioapic_lock, flags); 2233 - ret = set_desc_affinity(desc, mask, &dest); 2234 if (!ret) { 2235 /* Only the high 8 bits are valid. */ 2236 dest = SET_APIC_LOGICAL_ID(dest); 2237 - __target_IO_APIC_irq(irq, dest, cfg); 2238 } 2239 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2240 - 2241 return ret; 2242 - } 2243 - 2244 - static int 2245 - set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) 2246 - { 2247 - struct irq_desc *desc; 2248 - 2249 - desc = irq_to_desc(irq); 2250 - 2251 - return set_ioapic_affinity_irq_desc(desc, mask); 2252 } 2253 2254 #ifdef CONFIG_INTR_REMAP ··· 2246 * the interrupt-remapping table entry. 2247 */ 2248 static int 2249 - migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) 2250 { 2251 - struct irq_cfg *cfg; 2252 struct irte irte; 2253 - unsigned int dest; 2254 - unsigned int irq; 2255 - int ret = -1; 2256 2257 if (!cpumask_intersects(mask, cpu_online_mask)) 2258 - return ret; 2259 2260 - irq = desc->irq; 2261 if (get_irte(irq, &irte)) 2262 - return ret; 2263 2264 - cfg = desc->chip_data; 2265 if (assign_irq_vector(irq, cfg, mask)) 2266 - return ret; 2267 2268 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); 2269 ··· 2275 if (cfg->move_in_progress) 2276 send_cleanup_vector(cfg); 2277 2278 - cpumask_copy(desc->affinity, mask); 2279 - 2280 return 0; 2281 } 2282 2283 - /* 2284 - * Migrates the IRQ destination in the process context. 2285 - */ 2286 - static int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, 2287 - const struct cpumask *mask) 2288 - { 2289 - return migrate_ioapic_irq_desc(desc, mask); 2290 - } 2291 - static int set_ir_ioapic_affinity_irq(unsigned int irq, 2292 - const struct cpumask *mask) 2293 - { 2294 - struct irq_desc *desc = irq_to_desc(irq); 2295 - 2296 - return set_ir_ioapic_affinity_irq_desc(desc, mask); 2297 - } 2298 #else 2299 - static inline int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, 2300 - const struct cpumask *mask) 2301 { 2302 return 0; 2303 } ··· 2344 irq_exit(); 2345 } 2346 2347 - static void __irq_complete_move(struct irq_desc **descp, unsigned vector) 2348 { 2349 - struct irq_desc *desc = *descp; 2350 - struct irq_cfg *cfg = desc->chip_data; 2351 unsigned me; 2352 2353 if (likely(!cfg->move_in_progress)) ··· 2357 send_cleanup_vector(cfg); 2358 } 2359 2360 - static void irq_complete_move(struct irq_desc **descp) 2361 { 2362 - __irq_complete_move(descp, ~get_irq_regs()->orig_ax); 2363 } 2364 2365 void irq_force_complete_move(int irq) 2366 { 2367 - struct irq_desc *desc = irq_to_desc(irq); 2368 - struct irq_cfg *cfg = desc->chip_data; 2369 2370 if (!cfg) 2371 return; 2372 2373 - __irq_complete_move(&desc, cfg->vector); 2374 } 2375 #else 2376 - static inline void irq_complete_move(struct irq_desc **descp) {} 2377 #endif 2378 2379 - static void ack_apic_edge(unsigned int irq) 2380 { 2381 - struct irq_desc *desc = irq_to_desc(irq); 2382 - 2383 - irq_complete_move(&desc); 2384 - move_native_irq(irq); 2385 ack_APIC_irq(); 2386 } 2387 ··· 2400 * Otherwise, we simulate the EOI message manually by changing the trigger 2401 * mode to edge and then back to level, with RTE being masked during this. 2402 */ 2403 - static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) 2404 { 2405 struct irq_pin_list *entry; 2406 2407 for_each_irq_pin(entry, cfg->irq_2_pin) { 2408 if (mp_ioapics[entry->apic].apicver >= 0x20) { 2409 /* ··· 2414 * intr-remapping table entry. Hence for the io-apic 2415 * EOI we use the pin number. 2416 */ 2417 - if (irq_remapped(irq)) 2418 io_apic_eoi(entry->apic, entry->pin); 2419 else 2420 io_apic_eoi(entry->apic, cfg->vector); ··· 2423 __unmask_and_level_IO_APIC_irq(entry); 2424 } 2425 } 2426 - } 2427 - 2428 - static void eoi_ioapic_irq(struct irq_desc *desc) 2429 - { 2430 - struct irq_cfg *cfg; 2431 - unsigned long flags; 2432 - unsigned int irq; 2433 - 2434 - irq = desc->irq; 2435 - cfg = desc->chip_data; 2436 - 2437 - raw_spin_lock_irqsave(&ioapic_lock, flags); 2438 - __eoi_ioapic_irq(irq, cfg); 2439 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2440 } 2441 2442 - static void ack_apic_level(unsigned int irq) 2443 { 2444 struct irq_desc *desc = irq_to_desc(irq); 2445 unsigned long v; 2446 - int i; 2447 - struct irq_cfg *cfg; 2448 - int do_unmask_irq = 0; 2449 2450 - irq_complete_move(&desc); 2451 #ifdef CONFIG_GENERIC_PENDING_IRQ 2452 /* If we are moving the irq we need to mask it */ 2453 if (unlikely(desc->status & IRQ_MOVE_PENDING)) { 2454 do_unmask_irq = 1; 2455 - mask_IO_APIC_irq_desc(desc); 2456 } 2457 #endif 2458 ··· 2474 * we use the above logic (mask+edge followed by unmask+level) from 2475 * Manfred Spraul to clear the remote IRR. 2476 */ 2477 - cfg = desc->chip_data; 2478 i = cfg->vector; 2479 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); 2480 ··· 2493 if (!(v & (1 << (i & 0x1f)))) { 2494 atomic_inc(&irq_mis_count); 2495 2496 - eoi_ioapic_irq(desc); 2497 } 2498 2499 /* Now we can move and renable the irq */ ··· 2524 * accurate and is causing problems then it is a hardware bug 2525 * and you can go talk to the chipset vendor about it. 2526 */ 2527 - cfg = desc->chip_data; 2528 if (!io_apic_level_ack_pending(cfg)) 2529 move_masked_irq(irq); 2530 - unmask_IO_APIC_irq_desc(desc); 2531 } 2532 } 2533 2534 #ifdef CONFIG_INTR_REMAP 2535 - static void ir_ack_apic_edge(unsigned int irq) 2536 { 2537 ack_APIC_irq(); 2538 } 2539 2540 - static void ir_ack_apic_level(unsigned int irq) 2541 { 2542 - struct irq_desc *desc = irq_to_desc(irq); 2543 - 2544 ack_APIC_irq(); 2545 - eoi_ioapic_irq(desc); 2546 } 2547 #endif /* CONFIG_INTR_REMAP */ 2548 2549 static struct irq_chip ioapic_chip __read_mostly = { 2550 - .name = "IO-APIC", 2551 - .startup = startup_ioapic_irq, 2552 - .mask = mask_IO_APIC_irq, 2553 - .unmask = unmask_IO_APIC_irq, 2554 - .ack = ack_apic_edge, 2555 - .eoi = ack_apic_level, 2556 #ifdef CONFIG_SMP 2557 - .set_affinity = set_ioapic_affinity_irq, 2558 #endif 2559 - .retrigger = ioapic_retrigger_irq, 2560 }; 2561 2562 static struct irq_chip ir_ioapic_chip __read_mostly = { 2563 - .name = "IR-IO-APIC", 2564 - .startup = startup_ioapic_irq, 2565 - .mask = mask_IO_APIC_irq, 2566 - .unmask = unmask_IO_APIC_irq, 2567 #ifdef CONFIG_INTR_REMAP 2568 - .ack = ir_ack_apic_edge, 2569 - .eoi = ir_ack_apic_level, 2570 #ifdef CONFIG_SMP 2571 - .set_affinity = set_ir_ioapic_affinity_irq, 2572 #endif 2573 #endif 2574 - .retrigger = ioapic_retrigger_irq, 2575 }; 2576 2577 static inline void init_IO_APIC_traps(void) 2578 { 2579 - int irq; 2580 - struct irq_desc *desc; 2581 struct irq_cfg *cfg; 2582 2583 /* 2584 * NOTE! The local APIC isn't very good at handling ··· 2587 * Also, we've got to be careful not to trash gate 2588 * 0x80, because int 0x80 is hm, kind of importantish. ;) 2589 */ 2590 - for_each_irq_desc(irq, desc) { 2591 - cfg = desc->chip_data; 2592 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { 2593 /* 2594 * Hmm.. We don't have an entry for this, ··· 2599 legacy_pic->make_irq(irq); 2600 else 2601 /* Strange. Oh, well.. */ 2602 - desc->chip = &no_irq_chip; 2603 } 2604 } 2605 } ··· 2608 * The local APIC irq-chip implementation: 2609 */ 2610 2611 - static void mask_lapic_irq(unsigned int irq) 2612 { 2613 unsigned long v; 2614 ··· 2616 apic_write(APIC_LVT0, v | APIC_LVT_MASKED); 2617 } 2618 2619 - static void unmask_lapic_irq(unsigned int irq) 2620 { 2621 unsigned long v; 2622 ··· 2624 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); 2625 } 2626 2627 - static void ack_lapic_irq(unsigned int irq) 2628 { 2629 ack_APIC_irq(); 2630 } 2631 2632 static struct irq_chip lapic_chip __read_mostly = { 2633 .name = "local-APIC", 2634 - .mask = mask_lapic_irq, 2635 - .unmask = unmask_lapic_irq, 2636 - .ack = ack_lapic_irq, 2637 }; 2638 2639 - static void lapic_register_intr(int irq, struct irq_desc *desc) 2640 { 2641 - desc->status &= ~IRQ_LEVEL; 2642 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, 2643 "edge"); 2644 } ··· 2741 */ 2742 static inline void __init check_timer(void) 2743 { 2744 - struct irq_desc *desc = irq_to_desc(0); 2745 - struct irq_cfg *cfg = desc->chip_data; 2746 int node = cpu_to_node(0); 2747 int apic1, pin1, apic2, pin2; 2748 unsigned long flags; ··· 2752 /* 2753 * get/set the timer IRQ vector: 2754 */ 2755 - legacy_pic->chip->mask(0); 2756 assign_irq_vector(0, cfg, apic->target_cpus()); 2757 2758 /* ··· 2811 add_pin_to_irq_node(cfg, node, apic1, pin1); 2812 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); 2813 } else { 2814 - /* for edge trigger, setup_IO_APIC_irq already 2815 * leave it unmasked. 2816 * so only need to unmask if it is level-trigger 2817 * do we really have level trigger timer? ··· 2819 int idx; 2820 idx = find_irq_entry(apic1, pin1, mp_INT); 2821 if (idx != -1 && irq_trigger(idx)) 2822 - unmask_IO_APIC_irq_desc(desc); 2823 } 2824 if (timer_irq_works()) { 2825 if (nmi_watchdog == NMI_IO_APIC) { 2826 setup_nmi(); 2827 - legacy_pic->chip->unmask(0); 2828 } 2829 if (disable_timer_pin_1 > 0) 2830 clear_IO_APIC_pin(0, pin1); ··· 2847 */ 2848 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); 2849 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); 2850 - legacy_pic->chip->unmask(0); 2851 if (timer_irq_works()) { 2852 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 2853 timer_through_8259 = 1; 2854 if (nmi_watchdog == NMI_IO_APIC) { 2855 - legacy_pic->chip->mask(0); 2856 setup_nmi(); 2857 - legacy_pic->chip->unmask(0); 2858 } 2859 goto out; 2860 } ··· 2862 * Cleanup, just in case ... 2863 */ 2864 local_irq_disable(); 2865 - legacy_pic->chip->mask(0); 2866 clear_IO_APIC_pin(apic2, pin2); 2867 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); 2868 } ··· 2879 apic_printk(APIC_QUIET, KERN_INFO 2880 "...trying to set up timer as Virtual Wire IRQ...\n"); 2881 2882 - lapic_register_intr(0, desc); 2883 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ 2884 - legacy_pic->chip->unmask(0); 2885 2886 if (timer_irq_works()) { 2887 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 2888 goto out; 2889 } 2890 local_irq_disable(); 2891 - legacy_pic->chip->mask(0); 2892 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); 2893 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); 2894 ··· 3054 /* 3055 * Dynamic irq allocate and deallocation 3056 */ 3057 - unsigned int create_irq_nr(unsigned int irq_want, int node) 3058 { 3059 - /* Allocate an unused irq */ 3060 - unsigned int irq; 3061 - unsigned int new; 3062 unsigned long flags; 3063 - struct irq_cfg *cfg_new = NULL; 3064 - struct irq_desc *desc_new = NULL; 3065 3066 - irq = 0; 3067 - if (irq_want < nr_irqs_gsi) 3068 - irq_want = nr_irqs_gsi; 3069 3070 raw_spin_lock_irqsave(&vector_lock, flags); 3071 - for (new = irq_want; new < nr_irqs; new++) { 3072 - desc_new = irq_to_desc_alloc_node(new, node); 3073 - if (!desc_new) { 3074 - printk(KERN_INFO "can not get irq_desc for %d\n", new); 3075 - continue; 3076 - } 3077 - cfg_new = desc_new->chip_data; 3078 - 3079 - if (cfg_new->vector != 0) 3080 - continue; 3081 - 3082 - desc_new = move_irq_desc(desc_new, node); 3083 - cfg_new = desc_new->chip_data; 3084 - 3085 - if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) 3086 - irq = new; 3087 - break; 3088 - } 3089 raw_spin_unlock_irqrestore(&vector_lock, flags); 3090 3091 - if (irq > 0) 3092 - dynamic_irq_init_keep_chip_data(irq); 3093 - 3094 - return irq; 3095 } 3096 3097 int create_irq(void) ··· 3104 3105 void destroy_irq(unsigned int irq) 3106 { 3107 unsigned long flags; 3108 3109 - dynamic_irq_cleanup_keep_chip_data(irq); 3110 3111 - free_irte(irq); 3112 raw_spin_lock_irqsave(&vector_lock, flags); 3113 - __clear_irq_vector(irq, get_irq_chip_data(irq)); 3114 raw_spin_unlock_irqrestore(&vector_lock, flags); 3115 } 3116 3117 /* ··· 3138 3139 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 3140 3141 - if (irq_remapped(irq)) { 3142 struct irte irte; 3143 int ir_index; 3144 u16 sub_handle; ··· 3191 } 3192 3193 #ifdef CONFIG_SMP 3194 - static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) 3195 { 3196 - struct irq_desc *desc = irq_to_desc(irq); 3197 - struct irq_cfg *cfg; 3198 struct msi_msg msg; 3199 unsigned int dest; 3200 3201 - if (set_desc_affinity(desc, mask, &dest)) 3202 return -1; 3203 3204 - cfg = desc->chip_data; 3205 - 3206 - get_cached_msi_msg_desc(desc, &msg); 3207 3208 msg.data &= ~MSI_DATA_VECTOR_MASK; 3209 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3210 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3211 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3212 3213 - write_msi_msg_desc(desc, &msg); 3214 3215 return 0; 3216 } ··· 3218 * done in the process context using interrupt-remapping hardware. 3219 */ 3220 static int 3221 - ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) 3222 { 3223 - struct irq_desc *desc = irq_to_desc(irq); 3224 - struct irq_cfg *cfg = desc->chip_data; 3225 - unsigned int dest; 3226 struct irte irte; 3227 3228 if (get_irte(irq, &irte)) 3229 return -1; 3230 3231 - if (set_desc_affinity(desc, mask, &dest)) 3232 return -1; 3233 3234 irte.vector = cfg->vector; ··· 3258 * which implement the MSI or MSI-X Capability Structure. 3259 */ 3260 static struct irq_chip msi_chip = { 3261 - .name = "PCI-MSI", 3262 - .unmask = unmask_msi_irq, 3263 - .mask = mask_msi_irq, 3264 - .ack = ack_apic_edge, 3265 #ifdef CONFIG_SMP 3266 - .set_affinity = set_msi_irq_affinity, 3267 #endif 3268 - .retrigger = ioapic_retrigger_irq, 3269 }; 3270 3271 static struct irq_chip msi_ir_chip = { 3272 - .name = "IR-PCI-MSI", 3273 - .unmask = unmask_msi_irq, 3274 - .mask = mask_msi_irq, 3275 #ifdef CONFIG_INTR_REMAP 3276 - .ack = ir_ack_apic_edge, 3277 #ifdef CONFIG_SMP 3278 - .set_affinity = ir_set_msi_irq_affinity, 3279 #endif 3280 #endif 3281 - .retrigger = ioapic_retrigger_irq, 3282 }; 3283 3284 /* ··· 3310 3311 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) 3312 { 3313 - int ret; 3314 struct msi_msg msg; 3315 3316 ret = msi_compose_msg(dev, irq, &msg, -1); 3317 if (ret < 0) ··· 3320 set_irq_msi(irq, msidesc); 3321 write_msi_msg(irq, &msg); 3322 3323 - if (irq_remapped(irq)) { 3324 - struct irq_desc *desc = irq_to_desc(irq); 3325 - /* 3326 - * irq migration in process context 3327 - */ 3328 - desc->status |= IRQ_MOVE_PCNTXT; 3329 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); 3330 } else 3331 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); ··· 3333 3334 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 3335 { 3336 - unsigned int irq; 3337 - int ret, sub_handle; 3338 struct msi_desc *msidesc; 3339 - unsigned int irq_want; 3340 struct intel_iommu *iommu = NULL; 3341 - int index = 0; 3342 - int node; 3343 3344 /* x86 doesn't support multiple MSI yet */ 3345 if (type == PCI_CAP_ID_MSI && nvec > 1) ··· 3396 3397 #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP) 3398 #ifdef CONFIG_SMP 3399 - static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) 3400 { 3401 - struct irq_desc *desc = irq_to_desc(irq); 3402 - struct irq_cfg *cfg; 3403 struct msi_msg msg; 3404 - unsigned int dest; 3405 3406 - if (set_desc_affinity(desc, mask, &dest)) 3407 return -1; 3408 - 3409 - cfg = desc->chip_data; 3410 3411 dmar_msi_read(irq, &msg); 3412 ··· 3422 #endif /* CONFIG_SMP */ 3423 3424 static struct irq_chip dmar_msi_type = { 3425 - .name = "DMAR_MSI", 3426 - .unmask = dmar_msi_unmask, 3427 - .mask = dmar_msi_mask, 3428 - .ack = ack_apic_edge, 3429 #ifdef CONFIG_SMP 3430 - .set_affinity = dmar_msi_set_affinity, 3431 #endif 3432 - .retrigger = ioapic_retrigger_irq, 3433 }; 3434 3435 int arch_setup_dmar_msi(unsigned int irq) ··· 3450 #ifdef CONFIG_HPET_TIMER 3451 3452 #ifdef CONFIG_SMP 3453 - static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) 3454 { 3455 - struct irq_desc *desc = irq_to_desc(irq); 3456 - struct irq_cfg *cfg; 3457 struct msi_msg msg; 3458 unsigned int dest; 3459 3460 - if (set_desc_affinity(desc, mask, &dest)) 3461 return -1; 3462 3463 - cfg = desc->chip_data; 3464 - 3465 - hpet_msi_read(irq, &msg); 3466 3467 msg.data &= ~MSI_DATA_VECTOR_MASK; 3468 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3469 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3470 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3471 3472 - hpet_msi_write(irq, &msg); 3473 3474 return 0; 3475 } ··· 3475 #endif /* CONFIG_SMP */ 3476 3477 static struct irq_chip ir_hpet_msi_type = { 3478 - .name = "IR-HPET_MSI", 3479 - .unmask = hpet_msi_unmask, 3480 - .mask = hpet_msi_mask, 3481 #ifdef CONFIG_INTR_REMAP 3482 - .ack = ir_ack_apic_edge, 3483 #ifdef CONFIG_SMP 3484 - .set_affinity = ir_set_msi_irq_affinity, 3485 #endif 3486 #endif 3487 - .retrigger = ioapic_retrigger_irq, 3488 }; 3489 3490 static struct irq_chip hpet_msi_type = { 3491 .name = "HPET_MSI", 3492 - .unmask = hpet_msi_unmask, 3493 - .mask = hpet_msi_mask, 3494 - .ack = ack_apic_edge, 3495 #ifdef CONFIG_SMP 3496 - .set_affinity = hpet_msi_set_affinity, 3497 #endif 3498 - .retrigger = ioapic_retrigger_irq, 3499 }; 3500 3501 int arch_setup_hpet_msi(unsigned int irq, unsigned int id) 3502 { 3503 - int ret; 3504 struct msi_msg msg; 3505 - struct irq_desc *desc = irq_to_desc(irq); 3506 3507 if (intr_remapping_enabled) { 3508 struct intel_iommu *iommu = map_hpet_to_ir(id); ··· 3519 if (ret < 0) 3520 return ret; 3521 3522 - hpet_msi_write(irq, &msg); 3523 - desc->status |= IRQ_MOVE_PCNTXT; 3524 - if (irq_remapped(irq)) 3525 set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type, 3526 handle_edge_irq, "edge"); 3527 else ··· 3554 write_ht_irq_msg(irq, &msg); 3555 } 3556 3557 - static int set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) 3558 { 3559 - struct irq_desc *desc = irq_to_desc(irq); 3560 - struct irq_cfg *cfg; 3561 unsigned int dest; 3562 3563 - if (set_desc_affinity(desc, mask, &dest)) 3564 return -1; 3565 3566 - cfg = desc->chip_data; 3567 - 3568 - target_ht_irq(irq, dest, cfg->vector); 3569 - 3570 return 0; 3571 } 3572 3573 #endif 3574 3575 static struct irq_chip ht_irq_chip = { 3576 - .name = "PCI-HT", 3577 - .mask = mask_ht_irq, 3578 - .unmask = unmask_ht_irq, 3579 - .ack = ack_apic_edge, 3580 #ifdef CONFIG_SMP 3581 - .set_affinity = set_ht_irq_affinity, 3582 #endif 3583 - .retrigger = ioapic_retrigger_irq, 3584 }; 3585 3586 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) ··· 3668 if (nr < nr_irqs) 3669 nr_irqs = nr; 3670 3671 - return 0; 3672 } 3673 #endif 3674 3675 static int __io_apic_set_pci_routing(struct device *dev, int irq, 3676 struct io_apic_irq_attr *irq_attr) 3677 { 3678 - struct irq_desc *desc; 3679 struct irq_cfg *cfg; 3680 int node; 3681 int ioapic, pin; ··· 3692 else 3693 node = cpu_to_node(0); 3694 3695 - desc = irq_to_desc_alloc_node(irq, node); 3696 - if (!desc) { 3697 - printk(KERN_INFO "can not get irq_desc %d\n", irq); 3698 return 0; 3699 - } 3700 3701 pin = irq_attr->ioapic_pin; 3702 trigger = irq_attr->trigger; ··· 3704 * IRQs < 16 are already in the irq_2_pin[] map 3705 */ 3706 if (irq >= legacy_pic->nr_legacy_irqs) { 3707 - cfg = desc->chip_data; 3708 - if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) { 3709 printk(KERN_INFO "can not add pin %d for irq %d\n", 3710 pin, irq); 3711 return 0; 3712 } 3713 } 3714 3715 - setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity); 3716 3717 return 0; 3718 } ··· 3904 */ 3905 if (desc->status & 3906 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) 3907 - mask = desc->affinity; 3908 else 3909 mask = apic->target_cpus(); 3910 3911 if (intr_remapping_enabled) 3912 - set_ir_ioapic_affinity_irq_desc(desc, mask); 3913 else 3914 - set_ioapic_affinity_irq_desc(desc, mask); 3915 } 3916 3917 } ··· 4095 void __init pre_init_apic_IRQ0(void) 4096 { 4097 struct irq_cfg *cfg; 4098 - struct irq_desc *desc; 4099 4100 printk(KERN_INFO "Early APIC setup for system timer0\n"); 4101 #ifndef CONFIG_SMP 4102 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); 4103 #endif 4104 - desc = irq_to_desc_alloc_node(0, 0); 4105 4106 setup_local_APIC(); 4107 4108 - cfg = irq_cfg(0); 4109 add_pin_to_irq_node(cfg, 0, 0, 0); 4110 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); 4111 4112 - setup_IO_APIC_irq(0, 0, 0, desc, 0, 0); 4113 }
··· 131 struct irq_pin_list *next; 132 }; 133 134 + static struct irq_pin_list *alloc_irq_pin_list(int node) 135 { 136 + return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node); 137 } 138 139 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ ··· 150 int __init arch_early_irq_init(void) 151 { 152 struct irq_cfg *cfg; 153 + int count, node, i; 154 155 if (!legacy_pic->nr_legacy_irqs) { 156 nr_irqs_gsi = 0; ··· 164 count = ARRAY_SIZE(irq_cfgx); 165 node = cpu_to_node(0); 166 167 + /* Make sure the legacy interrupts are marked in the bitmap */ 168 + irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); 169 + 170 for (i = 0; i < count; i++) { 171 + set_irq_chip_data(i, &cfg[i]); 172 + zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); 173 + zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); 174 /* 175 * For legacy IRQ's, start with assigning irq0 to irq15 to 176 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. ··· 183 } 184 185 #ifdef CONFIG_SPARSE_IRQ 186 + static struct irq_cfg *irq_cfg(unsigned int irq) 187 { 188 + return get_irq_chip_data(irq); 189 + } 190 191 + static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) 192 + { 193 + struct irq_cfg *cfg; 194 195 + cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); 196 + if (!cfg) 197 + return NULL; 198 + if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) 199 + goto out_cfg; 200 + if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) 201 + goto out_domain; 202 return cfg; 203 + out_domain: 204 + free_cpumask_var(cfg->domain); 205 + out_cfg: 206 + kfree(cfg); 207 + return NULL; 208 } 209 210 + static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) 211 { 212 if (!cfg) 213 return; 214 + set_irq_chip_data(at, NULL); 215 free_cpumask_var(cfg->domain); 216 free_cpumask_var(cfg->old_domain); 217 kfree(cfg); 218 } 219 220 #else 221 + 222 struct irq_cfg *irq_cfg(unsigned int irq) 223 { 224 return irq < nr_irqs ? irq_cfgx + irq : NULL; 225 } 226 227 + static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) 228 + { 229 + return irq_cfgx + irq; 230 + } 231 + 232 + static inline void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) { } 233 + 234 #endif 235 + 236 + static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) 237 + { 238 + int res = irq_alloc_desc_at(at, node); 239 + struct irq_cfg *cfg; 240 + 241 + if (res < 0) { 242 + if (res != -EEXIST) 243 + return NULL; 244 + cfg = get_irq_chip_data(at); 245 + if (cfg) 246 + return cfg; 247 + } 248 + 249 + cfg = alloc_irq_cfg(at, node); 250 + if (cfg) 251 + set_irq_chip_data(at, cfg); 252 + else 253 + irq_free_desc(at); 254 + return cfg; 255 + } 256 + 257 + static int alloc_irq_from(unsigned int from, int node) 258 + { 259 + return irq_alloc_desc_from(from, node); 260 + } 261 + 262 + static void free_irq_at(unsigned int at, struct irq_cfg *cfg) 263 + { 264 + free_irq_cfg(at, cfg); 265 + irq_free_desc(at); 266 + } 267 268 struct io_apic { 269 unsigned int index; ··· 451 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 452 } 453 454 + static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 455 { 456 unsigned long flags; 457 raw_spin_lock_irqsave(&ioapic_lock, flags); ··· 481 * fast in the common case, and fast for shared ISA-space IRQs. 482 */ 483 static int 484 + __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 485 { 486 struct irq_pin_list **last, *entry; 487 ··· 493 last = &entry->next; 494 } 495 496 + entry = alloc_irq_pin_list(node); 497 if (!entry) { 498 printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", 499 node, apic, pin); ··· 508 509 static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 510 { 511 + if (__add_pin_to_irq_node(cfg, node, apic, pin)) 512 panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); 513 } 514 ··· 571 IO_APIC_REDIR_LEVEL_TRIGGER, NULL); 572 } 573 574 static void io_apic_sync(struct irq_pin_list *entry) 575 { 576 /* ··· 587 readl(&io_apic->data); 588 } 589 590 + static void mask_ioapic(struct irq_cfg *cfg) 591 { 592 + unsigned long flags; 593 + 594 + raw_spin_lock_irqsave(&ioapic_lock, flags); 595 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); 596 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 597 } 598 599 + static void mask_ioapic_irq(struct irq_data *data) 600 { 601 + mask_ioapic(data->chip_data); 602 + } 603 + 604 + static void __unmask_ioapic(struct irq_cfg *cfg) 605 + { 606 + io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); 607 + } 608 + 609 + static void unmask_ioapic(struct irq_cfg *cfg) 610 + { 611 unsigned long flags; 612 613 raw_spin_lock_irqsave(&ioapic_lock, flags); 614 + __unmask_ioapic(cfg); 615 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 616 } 617 618 + static void unmask_ioapic_irq(struct irq_data *data) 619 { 620 + unmask_ioapic(data->chip_data); 621 } 622 623 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) ··· 694 struct IO_APIC_route_entry **ioapic_entries; 695 696 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics, 697 + GFP_KERNEL); 698 if (!ioapic_entries) 699 return 0; 700 701 for (apic = 0; apic < nr_ioapics; apic++) { 702 ioapic_entries[apic] = 703 kzalloc(sizeof(struct IO_APIC_route_entry) * 704 + nr_ioapic_registers[apic], GFP_KERNEL); 705 if (!ioapic_entries[apic]) 706 goto nomem; 707 } ··· 1259 /* Initialize vector_irq on a new cpu */ 1260 int irq, vector; 1261 struct irq_cfg *cfg; 1262 1263 /* 1264 * vector_lock will make sure that we don't run into irq vector ··· 1268 */ 1269 raw_spin_lock(&vector_lock); 1270 /* Mark the inuse vectors */ 1271 + for_each_active_irq(irq) { 1272 + cfg = get_irq_chip_data(irq); 1273 + if (!cfg) 1274 + continue; 1275 /* 1276 * If it is a legacy IRQ handled by the legacy PIC, this cpu 1277 * will be part of the irq_cfg's domain. ··· 1327 } 1328 #endif 1329 1330 + static void ioapic_register_intr(unsigned int irq, unsigned long trigger) 1331 { 1332 1333 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1334 trigger == IOAPIC_LEVEL) 1335 + irq_set_status_flags(irq, IRQ_LEVEL); 1336 else 1337 + irq_clear_status_flags(irq, IRQ_LEVEL); 1338 1339 + if (irq_remapped(get_irq_chip_data(irq))) { 1340 + irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 1341 if (trigger) 1342 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, 1343 handle_fasteoi_irq, ··· 1358 handle_edge_irq, "edge"); 1359 } 1360 1361 + static int setup_ioapic_entry(int apic_id, int irq, 1362 + struct IO_APIC_route_entry *entry, 1363 + unsigned int destination, int trigger, 1364 + int polarity, int vector, int pin) 1365 { 1366 /* 1367 * add it to the IO-APIC irq-routing table: ··· 1417 return 0; 1418 } 1419 1420 + static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq, 1421 + struct irq_cfg *cfg, int trigger, int polarity) 1422 { 1423 struct IO_APIC_route_entry entry; 1424 unsigned int dest; 1425 1426 if (!IO_APIC_IRQ(irq)) 1427 return; 1428 /* 1429 * For legacy irqs, cfg->domain starts with cpu 0 for legacy 1430 * controllers like 8259. Now that IO-APIC can handle this irq, update ··· 1457 return; 1458 } 1459 1460 + ioapic_register_intr(irq, trigger); 1461 if (irq < legacy_pic->nr_legacy_irqs) 1462 + legacy_pic->mask(irq); 1463 1464 ioapic_write_entry(apic_id, pin, entry); 1465 } ··· 1470 1471 static void __init setup_IO_APIC_irqs(void) 1472 { 1473 + int apic_id, pin, idx, irq, notcon = 0; 1474 int node = cpu_to_node(0); 1475 + struct irq_cfg *cfg; 1476 1477 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1478 ··· 1511 apic->multi_timer_check(apic_id, irq)) 1512 continue; 1513 1514 + cfg = alloc_irq_and_cfg_at(irq, node); 1515 + if (!cfg) 1516 continue; 1517 + 1518 add_pin_to_irq_node(cfg, node, apic_id, pin); 1519 /* 1520 * don't mark it in pin_programmed, so later acpi could 1521 * set it correctly when irq < 16 1522 */ 1523 + setup_ioapic_irq(apic_id, pin, irq, cfg, irq_trigger(idx), 1524 + irq_polarity(idx)); 1525 } 1526 1527 if (notcon) ··· 1538 */ 1539 void setup_IO_APIC_irq_extra(u32 gsi) 1540 { 1541 + int apic_id = 0, pin, idx, irq, node = cpu_to_node(0); 1542 struct irq_cfg *cfg; 1543 1544 /* ··· 1556 return; 1557 1558 irq = pin_2_irq(idx, apic_id, pin); 1559 1560 + /* Only handle the non legacy irqs on secondary ioapics */ 1561 + if (apic_id == 0 || irq < NR_IRQS_LEGACY) 1562 + return; 1563 + 1564 + cfg = alloc_irq_and_cfg_at(irq, node); 1565 + if (!cfg) 1566 + return; 1567 + 1568 add_pin_to_irq_node(cfg, node, apic_id, pin); 1569 1570 if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) { ··· 1577 } 1578 set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed); 1579 1580 + setup_ioapic_irq(apic_id, pin, irq, cfg, 1581 irq_trigger(idx), irq_polarity(idx)); 1582 } 1583 ··· 1628 union IO_APIC_reg_03 reg_03; 1629 unsigned long flags; 1630 struct irq_cfg *cfg; 1631 unsigned int irq; 1632 1633 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); ··· 1715 } 1716 } 1717 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 1718 + for_each_active_irq(irq) { 1719 struct irq_pin_list *entry; 1720 1721 + cfg = get_irq_chip_data(irq); 1722 if (!cfg) 1723 continue; 1724 entry = cfg->irq_2_pin; ··· 2225 * an edge even if it isn't on the 8259A... 2226 */ 2227 2228 + static unsigned int startup_ioapic_irq(struct irq_data *data) 2229 { 2230 + int was_pending = 0, irq = data->irq; 2231 unsigned long flags; 2232 2233 raw_spin_lock_irqsave(&ioapic_lock, flags); 2234 if (irq < legacy_pic->nr_legacy_irqs) { 2235 + legacy_pic->mask(irq); 2236 if (legacy_pic->irq_pending(irq)) 2237 was_pending = 1; 2238 } 2239 + __unmask_ioapic(data->chip_data); 2240 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2241 2242 return was_pending; 2243 } 2244 2245 + static int ioapic_retrigger_irq(struct irq_data *data) 2246 { 2247 + struct irq_cfg *cfg = data->chip_data; 2248 unsigned long flags; 2249 2250 raw_spin_lock_irqsave(&vector_lock, flags); ··· 2298 * With interrupt-remapping, destination information comes 2299 * from interrupt-remapping table entry. 2300 */ 2301 + if (!irq_remapped(cfg)) 2302 io_apic_write(apic, 0x11 + pin*2, dest); 2303 reg = io_apic_read(apic, 0x10 + pin*2); 2304 reg &= ~IO_APIC_REDIR_VECTOR_MASK; ··· 2308 } 2309 2310 /* 2311 + * Either sets data->affinity to a valid value, and returns 2312 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and 2313 + * leaves data->affinity untouched. 2314 */ 2315 + int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2316 + unsigned int *dest_id) 2317 { 2318 + struct irq_cfg *cfg = data->chip_data; 2319 2320 if (!cpumask_intersects(mask, cpu_online_mask)) 2321 return -1; 2322 2323 + if (assign_irq_vector(data->irq, data->chip_data, mask)) 2324 return -1; 2325 2326 + cpumask_copy(data->affinity, mask); 2327 2328 + *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain); 2329 return 0; 2330 } 2331 2332 static int 2333 + ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2334 + bool force) 2335 { 2336 + unsigned int dest, irq = data->irq; 2337 unsigned long flags; 2338 + int ret; 2339 2340 raw_spin_lock_irqsave(&ioapic_lock, flags); 2341 + ret = __ioapic_set_affinity(data, mask, &dest); 2342 if (!ret) { 2343 /* Only the high 8 bits are valid. */ 2344 dest = SET_APIC_LOGICAL_ID(dest); 2345 + __target_IO_APIC_irq(irq, dest, data->chip_data); 2346 } 2347 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2348 return ret; 2349 } 2350 2351 #ifdef CONFIG_INTR_REMAP ··· 2381 * the interrupt-remapping table entry. 2382 */ 2383 static int 2384 + ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2385 + bool force) 2386 { 2387 + struct irq_cfg *cfg = data->chip_data; 2388 + unsigned int dest, irq = data->irq; 2389 struct irte irte; 2390 2391 if (!cpumask_intersects(mask, cpu_online_mask)) 2392 + return -EINVAL; 2393 2394 if (get_irte(irq, &irte)) 2395 + return -EBUSY; 2396 2397 if (assign_irq_vector(irq, cfg, mask)) 2398 + return -EBUSY; 2399 2400 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); 2401 ··· 2413 if (cfg->move_in_progress) 2414 send_cleanup_vector(cfg); 2415 2416 + cpumask_copy(data->affinity, mask); 2417 return 0; 2418 } 2419 2420 #else 2421 + static inline int 2422 + ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2423 + bool force) 2424 { 2425 return 0; 2426 } ··· 2497 irq_exit(); 2498 } 2499 2500 + static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) 2501 { 2502 unsigned me; 2503 2504 if (likely(!cfg->move_in_progress)) ··· 2512 send_cleanup_vector(cfg); 2513 } 2514 2515 + static void irq_complete_move(struct irq_cfg *cfg) 2516 { 2517 + __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); 2518 } 2519 2520 void irq_force_complete_move(int irq) 2521 { 2522 + struct irq_cfg *cfg = get_irq_chip_data(irq); 2523 2524 if (!cfg) 2525 return; 2526 2527 + __irq_complete_move(cfg, cfg->vector); 2528 } 2529 #else 2530 + static inline void irq_complete_move(struct irq_cfg *cfg) { } 2531 #endif 2532 2533 + static void ack_apic_edge(struct irq_data *data) 2534 { 2535 + irq_complete_move(data->chip_data); 2536 + move_native_irq(data->irq); 2537 ack_APIC_irq(); 2538 } 2539 ··· 2558 * Otherwise, we simulate the EOI message manually by changing the trigger 2559 * mode to edge and then back to level, with RTE being masked during this. 2560 */ 2561 + static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) 2562 { 2563 struct irq_pin_list *entry; 2564 + unsigned long flags; 2565 2566 + raw_spin_lock_irqsave(&ioapic_lock, flags); 2567 for_each_irq_pin(entry, cfg->irq_2_pin) { 2568 if (mp_ioapics[entry->apic].apicver >= 0x20) { 2569 /* ··· 2570 * intr-remapping table entry. Hence for the io-apic 2571 * EOI we use the pin number. 2572 */ 2573 + if (irq_remapped(cfg)) 2574 io_apic_eoi(entry->apic, entry->pin); 2575 else 2576 io_apic_eoi(entry->apic, cfg->vector); ··· 2579 __unmask_and_level_IO_APIC_irq(entry); 2580 } 2581 } 2582 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2583 } 2584 2585 + static void ack_apic_level(struct irq_data *data) 2586 { 2587 + struct irq_cfg *cfg = data->chip_data; 2588 + int i, do_unmask_irq = 0, irq = data->irq; 2589 struct irq_desc *desc = irq_to_desc(irq); 2590 unsigned long v; 2591 2592 + irq_complete_move(cfg); 2593 #ifdef CONFIG_GENERIC_PENDING_IRQ 2594 /* If we are moving the irq we need to mask it */ 2595 if (unlikely(desc->status & IRQ_MOVE_PENDING)) { 2596 do_unmask_irq = 1; 2597 + mask_ioapic(cfg); 2598 } 2599 #endif 2600 ··· 2644 * we use the above logic (mask+edge followed by unmask+level) from 2645 * Manfred Spraul to clear the remote IRR. 2646 */ 2647 i = cfg->vector; 2648 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); 2649 ··· 2664 if (!(v & (1 << (i & 0x1f)))) { 2665 atomic_inc(&irq_mis_count); 2666 2667 + eoi_ioapic_irq(irq, cfg); 2668 } 2669 2670 /* Now we can move and renable the irq */ ··· 2695 * accurate and is causing problems then it is a hardware bug 2696 * and you can go talk to the chipset vendor about it. 2697 */ 2698 if (!io_apic_level_ack_pending(cfg)) 2699 move_masked_irq(irq); 2700 + unmask_ioapic(cfg); 2701 } 2702 } 2703 2704 #ifdef CONFIG_INTR_REMAP 2705 + static void ir_ack_apic_edge(struct irq_data *data) 2706 { 2707 ack_APIC_irq(); 2708 } 2709 2710 + static void ir_ack_apic_level(struct irq_data *data) 2711 { 2712 ack_APIC_irq(); 2713 + eoi_ioapic_irq(data->irq, data->chip_data); 2714 } 2715 #endif /* CONFIG_INTR_REMAP */ 2716 2717 static struct irq_chip ioapic_chip __read_mostly = { 2718 + .name = "IO-APIC", 2719 + .irq_startup = startup_ioapic_irq, 2720 + .irq_mask = mask_ioapic_irq, 2721 + .irq_unmask = unmask_ioapic_irq, 2722 + .irq_ack = ack_apic_edge, 2723 + .irq_eoi = ack_apic_level, 2724 #ifdef CONFIG_SMP 2725 + .irq_set_affinity = ioapic_set_affinity, 2726 #endif 2727 + .irq_retrigger = ioapic_retrigger_irq, 2728 }; 2729 2730 static struct irq_chip ir_ioapic_chip __read_mostly = { 2731 + .name = "IR-IO-APIC", 2732 + .irq_startup = startup_ioapic_irq, 2733 + .irq_mask = mask_ioapic_irq, 2734 + .irq_unmask = unmask_ioapic_irq, 2735 #ifdef CONFIG_INTR_REMAP 2736 + .irq_ack = ir_ack_apic_edge, 2737 + .irq_eoi = ir_ack_apic_level, 2738 #ifdef CONFIG_SMP 2739 + .irq_set_affinity = ir_ioapic_set_affinity, 2740 #endif 2741 #endif 2742 + .irq_retrigger = ioapic_retrigger_irq, 2743 }; 2744 2745 static inline void init_IO_APIC_traps(void) 2746 { 2747 struct irq_cfg *cfg; 2748 + unsigned int irq; 2749 2750 /* 2751 * NOTE! The local APIC isn't very good at handling ··· 2762 * Also, we've got to be careful not to trash gate 2763 * 0x80, because int 0x80 is hm, kind of importantish. ;) 2764 */ 2765 + for_each_active_irq(irq) { 2766 + cfg = get_irq_chip_data(irq); 2767 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { 2768 /* 2769 * Hmm.. We don't have an entry for this, ··· 2774 legacy_pic->make_irq(irq); 2775 else 2776 /* Strange. Oh, well.. */ 2777 + set_irq_chip(irq, &no_irq_chip); 2778 } 2779 } 2780 } ··· 2783 * The local APIC irq-chip implementation: 2784 */ 2785 2786 + static void mask_lapic_irq(struct irq_data *data) 2787 { 2788 unsigned long v; 2789 ··· 2791 apic_write(APIC_LVT0, v | APIC_LVT_MASKED); 2792 } 2793 2794 + static void unmask_lapic_irq(struct irq_data *data) 2795 { 2796 unsigned long v; 2797 ··· 2799 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); 2800 } 2801 2802 + static void ack_lapic_irq(struct irq_data *data) 2803 { 2804 ack_APIC_irq(); 2805 } 2806 2807 static struct irq_chip lapic_chip __read_mostly = { 2808 .name = "local-APIC", 2809 + .irq_mask = mask_lapic_irq, 2810 + .irq_unmask = unmask_lapic_irq, 2811 + .irq_ack = ack_lapic_irq, 2812 }; 2813 2814 + static void lapic_register_intr(int irq) 2815 { 2816 + irq_clear_status_flags(irq, IRQ_LEVEL); 2817 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, 2818 "edge"); 2819 } ··· 2916 */ 2917 static inline void __init check_timer(void) 2918 { 2919 + struct irq_cfg *cfg = get_irq_chip_data(0); 2920 int node = cpu_to_node(0); 2921 int apic1, pin1, apic2, pin2; 2922 unsigned long flags; ··· 2928 /* 2929 * get/set the timer IRQ vector: 2930 */ 2931 + legacy_pic->mask(0); 2932 assign_irq_vector(0, cfg, apic->target_cpus()); 2933 2934 /* ··· 2987 add_pin_to_irq_node(cfg, node, apic1, pin1); 2988 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); 2989 } else { 2990 + /* for edge trigger, setup_ioapic_irq already 2991 * leave it unmasked. 2992 * so only need to unmask if it is level-trigger 2993 * do we really have level trigger timer? ··· 2995 int idx; 2996 idx = find_irq_entry(apic1, pin1, mp_INT); 2997 if (idx != -1 && irq_trigger(idx)) 2998 + unmask_ioapic(cfg); 2999 } 3000 if (timer_irq_works()) { 3001 if (nmi_watchdog == NMI_IO_APIC) { 3002 setup_nmi(); 3003 + legacy_pic->unmask(0); 3004 } 3005 if (disable_timer_pin_1 > 0) 3006 clear_IO_APIC_pin(0, pin1); ··· 3023 */ 3024 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); 3025 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); 3026 + legacy_pic->unmask(0); 3027 if (timer_irq_works()) { 3028 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 3029 timer_through_8259 = 1; 3030 if (nmi_watchdog == NMI_IO_APIC) { 3031 + legacy_pic->mask(0); 3032 setup_nmi(); 3033 + legacy_pic->unmask(0); 3034 } 3035 goto out; 3036 } ··· 3038 * Cleanup, just in case ... 3039 */ 3040 local_irq_disable(); 3041 + legacy_pic->mask(0); 3042 clear_IO_APIC_pin(apic2, pin2); 3043 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); 3044 } ··· 3055 apic_printk(APIC_QUIET, KERN_INFO 3056 "...trying to set up timer as Virtual Wire IRQ...\n"); 3057 3058 + lapic_register_intr(0); 3059 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ 3060 + legacy_pic->unmask(0); 3061 3062 if (timer_irq_works()) { 3063 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 3064 goto out; 3065 } 3066 local_irq_disable(); 3067 + legacy_pic->mask(0); 3068 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); 3069 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); 3070 ··· 3230 /* 3231 * Dynamic irq allocate and deallocation 3232 */ 3233 + unsigned int create_irq_nr(unsigned int from, int node) 3234 { 3235 + struct irq_cfg *cfg; 3236 unsigned long flags; 3237 + unsigned int ret = 0; 3238 + int irq; 3239 3240 + if (from < nr_irqs_gsi) 3241 + from = nr_irqs_gsi; 3242 + 3243 + irq = alloc_irq_from(from, node); 3244 + if (irq < 0) 3245 + return 0; 3246 + cfg = alloc_irq_cfg(irq, node); 3247 + if (!cfg) { 3248 + free_irq_at(irq, NULL); 3249 + return 0; 3250 + } 3251 3252 raw_spin_lock_irqsave(&vector_lock, flags); 3253 + if (!__assign_irq_vector(irq, cfg, apic->target_cpus())) 3254 + ret = irq; 3255 raw_spin_unlock_irqrestore(&vector_lock, flags); 3256 3257 + if (ret) { 3258 + set_irq_chip_data(irq, cfg); 3259 + irq_clear_status_flags(irq, IRQ_NOREQUEST); 3260 + } else { 3261 + free_irq_at(irq, cfg); 3262 + } 3263 + return ret; 3264 } 3265 3266 int create_irq(void) ··· 3287 3288 void destroy_irq(unsigned int irq) 3289 { 3290 + struct irq_cfg *cfg = get_irq_chip_data(irq); 3291 unsigned long flags; 3292 3293 + irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); 3294 3295 + if (intr_remapping_enabled) 3296 + free_irte(irq); 3297 raw_spin_lock_irqsave(&vector_lock, flags); 3298 + __clear_irq_vector(irq, cfg); 3299 raw_spin_unlock_irqrestore(&vector_lock, flags); 3300 + free_irq_at(irq, cfg); 3301 } 3302 3303 /* ··· 3318 3319 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 3320 3321 + if (irq_remapped(get_irq_chip_data(irq))) { 3322 struct irte irte; 3323 int ir_index; 3324 u16 sub_handle; ··· 3371 } 3372 3373 #ifdef CONFIG_SMP 3374 + static int 3375 + msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3376 { 3377 + struct irq_cfg *cfg = data->chip_data; 3378 struct msi_msg msg; 3379 unsigned int dest; 3380 3381 + if (__ioapic_set_affinity(data, mask, &dest)) 3382 return -1; 3383 3384 + __get_cached_msi_msg(data->msi_desc, &msg); 3385 3386 msg.data &= ~MSI_DATA_VECTOR_MASK; 3387 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3388 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3389 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3390 3391 + __write_msi_msg(data->msi_desc, &msg); 3392 3393 return 0; 3394 } ··· 3400 * done in the process context using interrupt-remapping hardware. 3401 */ 3402 static int 3403 + ir_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, 3404 + bool force) 3405 { 3406 + struct irq_cfg *cfg = data->chip_data; 3407 + unsigned int dest, irq = data->irq; 3408 struct irte irte; 3409 3410 if (get_irte(irq, &irte)) 3411 return -1; 3412 3413 + if (__ioapic_set_affinity(data, mask, &dest)) 3414 return -1; 3415 3416 irte.vector = cfg->vector; ··· 3440 * which implement the MSI or MSI-X Capability Structure. 3441 */ 3442 static struct irq_chip msi_chip = { 3443 + .name = "PCI-MSI", 3444 + .irq_unmask = unmask_msi_irq, 3445 + .irq_mask = mask_msi_irq, 3446 + .irq_ack = ack_apic_edge, 3447 #ifdef CONFIG_SMP 3448 + .irq_set_affinity = msi_set_affinity, 3449 #endif 3450 + .irq_retrigger = ioapic_retrigger_irq, 3451 }; 3452 3453 static struct irq_chip msi_ir_chip = { 3454 + .name = "IR-PCI-MSI", 3455 + .irq_unmask = unmask_msi_irq, 3456 + .irq_mask = mask_msi_irq, 3457 #ifdef CONFIG_INTR_REMAP 3458 + .irq_ack = ir_ack_apic_edge, 3459 #ifdef CONFIG_SMP 3460 + .irq_set_affinity = ir_msi_set_affinity, 3461 #endif 3462 #endif 3463 + .irq_retrigger = ioapic_retrigger_irq, 3464 }; 3465 3466 /* ··· 3492 3493 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) 3494 { 3495 struct msi_msg msg; 3496 + int ret; 3497 3498 ret = msi_compose_msg(dev, irq, &msg, -1); 3499 if (ret < 0) ··· 3502 set_irq_msi(irq, msidesc); 3503 write_msi_msg(irq, &msg); 3504 3505 + if (irq_remapped(get_irq_chip_data(irq))) { 3506 + irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3507 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); 3508 } else 3509 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); ··· 3519 3520 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 3521 { 3522 + int node, ret, sub_handle, index = 0; 3523 + unsigned int irq, irq_want; 3524 struct msi_desc *msidesc; 3525 struct intel_iommu *iommu = NULL; 3526 3527 /* x86 doesn't support multiple MSI yet */ 3528 if (type == PCI_CAP_ID_MSI && nvec > 1) ··· 3585 3586 #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP) 3587 #ifdef CONFIG_SMP 3588 + static int 3589 + dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, 3590 + bool force) 3591 { 3592 + struct irq_cfg *cfg = data->chip_data; 3593 + unsigned int dest, irq = data->irq; 3594 struct msi_msg msg; 3595 3596 + if (__ioapic_set_affinity(data, mask, &dest)) 3597 return -1; 3598 3599 dmar_msi_read(irq, &msg); 3600 ··· 3612 #endif /* CONFIG_SMP */ 3613 3614 static struct irq_chip dmar_msi_type = { 3615 + .name = "DMAR_MSI", 3616 + .irq_unmask = dmar_msi_unmask, 3617 + .irq_mask = dmar_msi_mask, 3618 + .irq_ack = ack_apic_edge, 3619 #ifdef CONFIG_SMP 3620 + .irq_set_affinity = dmar_msi_set_affinity, 3621 #endif 3622 + .irq_retrigger = ioapic_retrigger_irq, 3623 }; 3624 3625 int arch_setup_dmar_msi(unsigned int irq) ··· 3640 #ifdef CONFIG_HPET_TIMER 3641 3642 #ifdef CONFIG_SMP 3643 + static int hpet_msi_set_affinity(struct irq_data *data, 3644 + const struct cpumask *mask, bool force) 3645 { 3646 + struct irq_cfg *cfg = data->chip_data; 3647 struct msi_msg msg; 3648 unsigned int dest; 3649 3650 + if (__ioapic_set_affinity(data, mask, &dest)) 3651 return -1; 3652 3653 + hpet_msi_read(data->handler_data, &msg); 3654 3655 msg.data &= ~MSI_DATA_VECTOR_MASK; 3656 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3657 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3658 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3659 3660 + hpet_msi_write(data->handler_data, &msg); 3661 3662 return 0; 3663 } ··· 3667 #endif /* CONFIG_SMP */ 3668 3669 static struct irq_chip ir_hpet_msi_type = { 3670 + .name = "IR-HPET_MSI", 3671 + .irq_unmask = hpet_msi_unmask, 3672 + .irq_mask = hpet_msi_mask, 3673 #ifdef CONFIG_INTR_REMAP 3674 + .irq_ack = ir_ack_apic_edge, 3675 #ifdef CONFIG_SMP 3676 + .irq_set_affinity = ir_msi_set_affinity, 3677 #endif 3678 #endif 3679 + .irq_retrigger = ioapic_retrigger_irq, 3680 }; 3681 3682 static struct irq_chip hpet_msi_type = { 3683 .name = "HPET_MSI", 3684 + .irq_unmask = hpet_msi_unmask, 3685 + .irq_mask = hpet_msi_mask, 3686 + .irq_ack = ack_apic_edge, 3687 #ifdef CONFIG_SMP 3688 + .irq_set_affinity = hpet_msi_set_affinity, 3689 #endif 3690 + .irq_retrigger = ioapic_retrigger_irq, 3691 }; 3692 3693 int arch_setup_hpet_msi(unsigned int irq, unsigned int id) 3694 { 3695 struct msi_msg msg; 3696 + int ret; 3697 3698 if (intr_remapping_enabled) { 3699 struct intel_iommu *iommu = map_hpet_to_ir(id); ··· 3712 if (ret < 0) 3713 return ret; 3714 3715 + hpet_msi_write(get_irq_data(irq), &msg); 3716 + irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3717 + if (irq_remapped(get_irq_chip_data(irq))) 3718 set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type, 3719 handle_edge_irq, "edge"); 3720 else ··· 3747 write_ht_irq_msg(irq, &msg); 3748 } 3749 3750 + static int 3751 + ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3752 { 3753 + struct irq_cfg *cfg = data->chip_data; 3754 unsigned int dest; 3755 3756 + if (__ioapic_set_affinity(data, mask, &dest)) 3757 return -1; 3758 3759 + target_ht_irq(data->irq, dest, cfg->vector); 3760 return 0; 3761 } 3762 3763 #endif 3764 3765 static struct irq_chip ht_irq_chip = { 3766 + .name = "PCI-HT", 3767 + .irq_mask = mask_ht_irq, 3768 + .irq_unmask = unmask_ht_irq, 3769 + .irq_ack = ack_apic_edge, 3770 #ifdef CONFIG_SMP 3771 + .irq_set_affinity = ht_set_affinity, 3772 #endif 3773 + .irq_retrigger = ioapic_retrigger_irq, 3774 }; 3775 3776 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) ··· 3864 if (nr < nr_irqs) 3865 nr_irqs = nr; 3866 3867 + return NR_IRQS_LEGACY; 3868 } 3869 #endif 3870 3871 static int __io_apic_set_pci_routing(struct device *dev, int irq, 3872 struct io_apic_irq_attr *irq_attr) 3873 { 3874 struct irq_cfg *cfg; 3875 int node; 3876 int ioapic, pin; ··· 3889 else 3890 node = cpu_to_node(0); 3891 3892 + cfg = alloc_irq_and_cfg_at(irq, node); 3893 + if (!cfg) 3894 return 0; 3895 3896 pin = irq_attr->ioapic_pin; 3897 trigger = irq_attr->trigger; ··· 3903 * IRQs < 16 are already in the irq_2_pin[] map 3904 */ 3905 if (irq >= legacy_pic->nr_legacy_irqs) { 3906 + if (__add_pin_to_irq_node(cfg, node, ioapic, pin)) { 3907 printk(KERN_INFO "can not add pin %d for irq %d\n", 3908 pin, irq); 3909 return 0; 3910 } 3911 } 3912 3913 + setup_ioapic_irq(ioapic, pin, irq, cfg, trigger, polarity); 3914 3915 return 0; 3916 } ··· 4104 */ 4105 if (desc->status & 4106 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) 4107 + mask = desc->irq_data.affinity; 4108 else 4109 mask = apic->target_cpus(); 4110 4111 if (intr_remapping_enabled) 4112 + ir_ioapic_set_affinity(&desc->irq_data, mask, false); 4113 else 4114 + ioapic_set_affinity(&desc->irq_data, mask, false); 4115 } 4116 4117 } ··· 4295 void __init pre_init_apic_IRQ0(void) 4296 { 4297 struct irq_cfg *cfg; 4298 4299 printk(KERN_INFO "Early APIC setup for system timer0\n"); 4300 #ifndef CONFIG_SMP 4301 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); 4302 #endif 4303 + /* Make sure the irq descriptor is set up */ 4304 + cfg = alloc_irq_and_cfg_at(0, 0); 4305 4306 setup_local_APIC(); 4307 4308 add_pin_to_irq_node(cfg, 0, 0, 0); 4309 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); 4310 4311 + setup_ioapic_irq(0, 0, 0, cfg, 0, 0); 4312 }
+1 -1
arch/x86/kernel/apic/nmi.c
··· 178 error: 179 if (nmi_watchdog == NMI_IO_APIC) { 180 if (!timer_through_8259) 181 - legacy_pic->chip->mask(0); 182 on_each_cpu(__acpi_nmi_disable, NULL, 1); 183 } 184
··· 178 error: 179 if (nmi_watchdog == NMI_IO_APIC) { 180 if (!timer_through_8259) 181 + legacy_pic->mask(0); 182 on_each_cpu(__acpi_nmi_disable, NULL, 1); 183 } 184
+24 -3
arch/x86/kernel/cpu/mcheck/mce_amd.c
··· 131 u32 low = 0, high = 0, address = 0; 132 unsigned int bank, block; 133 struct thresh_restart tr; 134 - u8 lvt_off; 135 136 for (bank = 0; bank < NR_BANKS; ++bank) { 137 for (block = 0; block < NR_BLOCKS; ++block) { ··· 163 if (shared_bank[bank] && c->cpu_core_id) 164 break; 165 #endif 166 - lvt_off = setup_APIC_eilvt_mce(THRESHOLD_APIC_VECTOR, 167 - APIC_EILVT_MSG_FIX, 0); 168 169 high &= ~MASK_LVTOFF_HI; 170 high |= lvt_off << 20;
··· 131 u32 low = 0, high = 0, address = 0; 132 unsigned int bank, block; 133 struct thresh_restart tr; 134 + int lvt_off = -1; 135 + u8 offset; 136 137 for (bank = 0; bank < NR_BANKS; ++bank) { 138 for (block = 0; block < NR_BLOCKS; ++block) { ··· 162 if (shared_bank[bank] && c->cpu_core_id) 163 break; 164 #endif 165 + offset = (high & MASK_LVTOFF_HI) >> 20; 166 + if (lvt_off < 0) { 167 + if (setup_APIC_eilvt(offset, 168 + THRESHOLD_APIC_VECTOR, 169 + APIC_EILVT_MSG_FIX, 0)) { 170 + pr_err(FW_BUG "cpu %d, failed to " 171 + "setup threshold interrupt " 172 + "for bank %d, block %d " 173 + "(MSR%08X=0x%x%08x)", 174 + smp_processor_id(), bank, block, 175 + address, high, low); 176 + continue; 177 + } 178 + lvt_off = offset; 179 + } else if (lvt_off != offset) { 180 + pr_err(FW_BUG "cpu %d, invalid threshold " 181 + "interrupt offset %d for bank %d," 182 + "block %d (MSR%08X=0x%x%08x)", 183 + smp_processor_id(), lvt_off, bank, 184 + block, address, high, low); 185 + continue; 186 + } 187 188 high &= ~MASK_LVTOFF_HI; 189 high |= lvt_off << 20;
+6 -10
arch/x86/kernel/hpet.c
··· 440 static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev); 441 static struct hpet_dev *hpet_devs; 442 443 - void hpet_msi_unmask(unsigned int irq) 444 { 445 - struct hpet_dev *hdev = get_irq_data(irq); 446 unsigned int cfg; 447 448 /* unmask it */ ··· 451 hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); 452 } 453 454 - void hpet_msi_mask(unsigned int irq) 455 { 456 unsigned int cfg; 457 - struct hpet_dev *hdev = get_irq_data(irq); 458 459 /* mask it */ 460 cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); ··· 462 hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); 463 } 464 465 - void hpet_msi_write(unsigned int irq, struct msi_msg *msg) 466 { 467 - struct hpet_dev *hdev = get_irq_data(irq); 468 - 469 hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num)); 470 hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4); 471 } 472 473 - void hpet_msi_read(unsigned int irq, struct msi_msg *msg) 474 { 475 - struct hpet_dev *hdev = get_irq_data(irq); 476 - 477 msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num)); 478 msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4); 479 msg->address_hi = 0;
··· 440 static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev); 441 static struct hpet_dev *hpet_devs; 442 443 + void hpet_msi_unmask(struct irq_data *data) 444 { 445 + struct hpet_dev *hdev = data->handler_data; 446 unsigned int cfg; 447 448 /* unmask it */ ··· 451 hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); 452 } 453 454 + void hpet_msi_mask(struct irq_data *data) 455 { 456 + struct hpet_dev *hdev = data->handler_data; 457 unsigned int cfg; 458 459 /* mask it */ 460 cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); ··· 462 hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); 463 } 464 465 + void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg) 466 { 467 hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num)); 468 hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4); 469 } 470 471 + void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg) 472 { 473 msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num)); 474 msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4); 475 msg->address_hi = 0;
+32 -31
arch/x86/kernel/i8259.c
··· 29 * plus some generic x86 specific things if generic specifics makes 30 * any sense at all. 31 */ 32 33 static int i8259A_auto_eoi; 34 DEFINE_RAW_SPINLOCK(i8259A_lock); 35 - static void mask_and_ack_8259A(unsigned int); 36 - static void mask_8259A(void); 37 - static void unmask_8259A(void); 38 - static void disable_8259A_irq(unsigned int irq); 39 - static void enable_8259A_irq(unsigned int irq); 40 - static void init_8259A(int auto_eoi); 41 - static int i8259A_irq_pending(unsigned int irq); 42 - 43 - struct irq_chip i8259A_chip = { 44 - .name = "XT-PIC", 45 - .mask = disable_8259A_irq, 46 - .disable = disable_8259A_irq, 47 - .unmask = enable_8259A_irq, 48 - .mask_ack = mask_and_ack_8259A, 49 - }; 50 51 /* 52 * 8259A PIC functions to handle ISA devices: ··· 54 */ 55 unsigned long io_apic_irqs; 56 57 - static void disable_8259A_irq(unsigned int irq) 58 { 59 unsigned int mask = 1 << irq; 60 unsigned long flags; ··· 68 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 69 } 70 71 - static void enable_8259A_irq(unsigned int irq) 72 { 73 unsigned int mask = ~(1 << irq); 74 unsigned long flags; ··· 85 else 86 outb(cached_master_mask, PIC_MASTER_IMR); 87 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 88 } 89 90 static int i8259A_irq_pending(unsigned int irq) ··· 113 disable_irq_nosync(irq); 114 io_apic_irqs &= ~(1<<irq); 115 set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, 116 - "XT"); 117 enable_irq(irq); 118 } 119 ··· 146 * first, _then_ send the EOI, and the order of EOI 147 * to the two 8259s is important! 148 */ 149 - static void mask_and_ack_8259A(unsigned int irq) 150 { 151 unsigned int irqmask = 1 << irq; 152 unsigned long flags; 153 ··· 219 goto handle_real_irq; 220 } 221 } 222 223 static char irq_trigger[2]; 224 /** ··· 347 * In AEOI mode we just have to mask the interrupt 348 * when acking. 349 */ 350 - i8259A_chip.mask_ack = disable_8259A_irq; 351 else 352 - i8259A_chip.mask_ack = mask_and_ack_8259A; 353 354 udelay(100); /* wait for 8259A to initialize */ 355 ··· 368 static void legacy_pic_noop(void) { }; 369 static void legacy_pic_uint_noop(unsigned int unused) { }; 370 static void legacy_pic_int_noop(int unused) { }; 371 - 372 - static struct irq_chip dummy_pic_chip = { 373 - .name = "dummy pic", 374 - .mask = legacy_pic_uint_noop, 375 - .unmask = legacy_pic_uint_noop, 376 - .disable = legacy_pic_uint_noop, 377 - .mask_ack = legacy_pic_uint_noop, 378 - }; 379 static int legacy_pic_irq_pending_noop(unsigned int irq) 380 { 381 return 0; ··· 375 376 struct legacy_pic null_legacy_pic = { 377 .nr_legacy_irqs = 0, 378 - .chip = &dummy_pic_chip, 379 .mask_all = legacy_pic_noop, 380 .restore_mask = legacy_pic_noop, 381 .init = legacy_pic_int_noop, ··· 388 struct legacy_pic default_legacy_pic = { 389 .nr_legacy_irqs = NR_IRQS_LEGACY, 390 .chip = &i8259A_chip, 391 - .mask_all = mask_8259A, 392 .restore_mask = unmask_8259A, 393 .init = init_8259A, 394 .irq_pending = i8259A_irq_pending,
··· 29 * plus some generic x86 specific things if generic specifics makes 30 * any sense at all. 31 */ 32 + static void init_8259A(int auto_eoi); 33 34 static int i8259A_auto_eoi; 35 DEFINE_RAW_SPINLOCK(i8259A_lock); 36 37 /* 38 * 8259A PIC functions to handle ISA devices: ··· 68 */ 69 unsigned long io_apic_irqs; 70 71 + static void mask_8259A_irq(unsigned int irq) 72 { 73 unsigned int mask = 1 << irq; 74 unsigned long flags; ··· 82 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 83 } 84 85 + static void disable_8259A_irq(struct irq_data *data) 86 + { 87 + mask_8259A_irq(data->irq); 88 + } 89 + 90 + static void unmask_8259A_irq(unsigned int irq) 91 { 92 unsigned int mask = ~(1 << irq); 93 unsigned long flags; ··· 94 else 95 outb(cached_master_mask, PIC_MASTER_IMR); 96 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 97 + } 98 + 99 + static void enable_8259A_irq(struct irq_data *data) 100 + { 101 + unmask_8259A_irq(data->irq); 102 } 103 104 static int i8259A_irq_pending(unsigned int irq) ··· 117 disable_irq_nosync(irq); 118 io_apic_irqs &= ~(1<<irq); 119 set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, 120 + i8259A_chip.name); 121 enable_irq(irq); 122 } 123 ··· 150 * first, _then_ send the EOI, and the order of EOI 151 * to the two 8259s is important! 152 */ 153 + static void mask_and_ack_8259A(struct irq_data *data) 154 { 155 + unsigned int irq = data->irq; 156 unsigned int irqmask = 1 << irq; 157 unsigned long flags; 158 ··· 222 goto handle_real_irq; 223 } 224 } 225 + 226 + struct irq_chip i8259A_chip = { 227 + .name = "XT-PIC", 228 + .irq_mask = disable_8259A_irq, 229 + .irq_disable = disable_8259A_irq, 230 + .irq_unmask = enable_8259A_irq, 231 + .irq_mask_ack = mask_and_ack_8259A, 232 + }; 233 234 static char irq_trigger[2]; 235 /** ··· 342 * In AEOI mode we just have to mask the interrupt 343 * when acking. 344 */ 345 + i8259A_chip.irq_mask_ack = disable_8259A_irq; 346 else 347 + i8259A_chip.irq_mask_ack = mask_and_ack_8259A; 348 349 udelay(100); /* wait for 8259A to initialize */ 350 ··· 363 static void legacy_pic_noop(void) { }; 364 static void legacy_pic_uint_noop(unsigned int unused) { }; 365 static void legacy_pic_int_noop(int unused) { }; 366 static int legacy_pic_irq_pending_noop(unsigned int irq) 367 { 368 return 0; ··· 378 379 struct legacy_pic null_legacy_pic = { 380 .nr_legacy_irqs = 0, 381 + .chip = &dummy_irq_chip, 382 + .mask = legacy_pic_uint_noop, 383 + .unmask = legacy_pic_uint_noop, 384 .mask_all = legacy_pic_noop, 385 .restore_mask = legacy_pic_noop, 386 .init = legacy_pic_int_noop, ··· 389 struct legacy_pic default_legacy_pic = { 390 .nr_legacy_irqs = NR_IRQS_LEGACY, 391 .chip = &i8259A_chip, 392 + .mask = mask_8259A_irq, 393 + .unmask = unmask_8259A_irq, 394 + .mask_all = mask_8259A, 395 .restore_mask = unmask_8259A, 396 .init = init_8259A, 397 .irq_pending = i8259A_irq_pending,
+13 -11
arch/x86/kernel/irq.c
··· 159 seq_printf(p, "%*d: ", prec, i); 160 for_each_online_cpu(j) 161 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 162 - seq_printf(p, " %8s", desc->chip->name); 163 seq_printf(p, "-%-8s", desc->name); 164 165 if (action) { ··· 282 unsigned int irq, vector; 283 static int warned; 284 struct irq_desc *desc; 285 286 for_each_irq_desc(irq, desc) { 287 int break_affinity = 0; ··· 297 /* interrupt's are disabled at this point */ 298 raw_spin_lock(&desc->lock); 299 300 - affinity = desc->affinity; 301 if (!irq_has_action(irq) || 302 cpumask_equal(affinity, cpu_online_mask)) { 303 raw_spin_unlock(&desc->lock); ··· 317 affinity = cpu_all_mask; 318 } 319 320 - if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->mask) 321 - desc->chip->mask(irq); 322 323 - if (desc->chip->set_affinity) 324 - desc->chip->set_affinity(irq, affinity); 325 else if (!(warned++)) 326 set_affinity = 0; 327 328 - if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask) 329 - desc->chip->unmask(irq); 330 331 raw_spin_unlock(&desc->lock); 332 ··· 357 if (irr & (1 << (vector % 32))) { 358 irq = __get_cpu_var(vector_irq)[vector]; 359 360 - desc = irq_to_desc(irq); 361 raw_spin_lock(&desc->lock); 362 - if (desc->chip->retrigger) 363 - desc->chip->retrigger(irq); 364 raw_spin_unlock(&desc->lock); 365 } 366 }
··· 159 seq_printf(p, "%*d: ", prec, i); 160 for_each_online_cpu(j) 161 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 162 + seq_printf(p, " %8s", desc->irq_data.chip->name); 163 seq_printf(p, "-%-8s", desc->name); 164 165 if (action) { ··· 282 unsigned int irq, vector; 283 static int warned; 284 struct irq_desc *desc; 285 + struct irq_data *data; 286 287 for_each_irq_desc(irq, desc) { 288 int break_affinity = 0; ··· 296 /* interrupt's are disabled at this point */ 297 raw_spin_lock(&desc->lock); 298 299 + data = &desc->irq_data; 300 + affinity = data->affinity; 301 if (!irq_has_action(irq) || 302 cpumask_equal(affinity, cpu_online_mask)) { 303 raw_spin_unlock(&desc->lock); ··· 315 affinity = cpu_all_mask; 316 } 317 318 + if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_mask) 319 + data->chip->irq_mask(data); 320 321 + if (data->chip->irq_set_affinity) 322 + data->chip->irq_set_affinity(data, affinity, true); 323 else if (!(warned++)) 324 set_affinity = 0; 325 326 + if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_unmask) 327 + data->chip->irq_unmask(data); 328 329 raw_spin_unlock(&desc->lock); 330 ··· 355 if (irr & (1 << (vector % 32))) { 356 irq = __get_cpu_var(vector_irq)[vector]; 357 358 + data = irq_get_irq_data(irq); 359 raw_spin_lock(&desc->lock); 360 + if (data->chip->irq_retrigger) 361 + data->chip->irq_retrigger(data); 362 raw_spin_unlock(&desc->lock); 363 } 364 }
+4 -13
arch/x86/kernel/irqinit.c
··· 100 101 void __init init_ISA_irqs(void) 102 { 103 int i; 104 105 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) ··· 109 #endif 110 legacy_pic->init(0); 111 112 - /* 113 - * 16 old-style INTA-cycle interrupts: 114 - */ 115 - for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) { 116 - struct irq_desc *desc = irq_to_desc(i); 117 - 118 - desc->status = IRQ_DISABLED; 119 - desc->action = NULL; 120 - desc->depth = 1; 121 - 122 - set_irq_chip_and_handler_name(i, &i8259A_chip, 123 - handle_level_irq, "XT"); 124 - } 125 } 126 127 void __init init_IRQ(void)
··· 100 101 void __init init_ISA_irqs(void) 102 { 103 + struct irq_chip *chip = legacy_pic->chip; 104 + const char *name = chip->name; 105 int i; 106 107 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) ··· 107 #endif 108 legacy_pic->init(0); 109 110 + for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) 111 + set_irq_chip_and_handler_name(i, chip, handle_level_irq, name); 112 } 113 114 void __init init_IRQ(void)
+2 -2
arch/x86/kernel/smpboot.c
··· 323 check_tsc_sync_target(); 324 325 if (nmi_watchdog == NMI_IO_APIC) { 326 - legacy_pic->chip->mask(0); 327 enable_NMI_through_LVT0(); 328 - legacy_pic->chip->unmask(0); 329 } 330 331 /* This must be done before setting cpu_online_mask */
··· 323 check_tsc_sync_target(); 324 325 if (nmi_watchdog == NMI_IO_APIC) { 326 + legacy_pic->mask(0); 327 enable_NMI_through_LVT0(); 328 + legacy_pic->unmask(0); 329 } 330 331 /* This must be done before setting cpu_online_mask */
+19 -36
arch/x86/kernel/uv_irq.c
··· 28 static spinlock_t uv_irq_lock; 29 static struct rb_root uv_irq_root; 30 31 - static int uv_set_irq_affinity(unsigned int, const struct cpumask *); 32 33 - static void uv_noop(unsigned int irq) 34 - { 35 - } 36 37 - static unsigned int uv_noop_ret(unsigned int irq) 38 - { 39 - return 0; 40 - } 41 - 42 - static void uv_ack_apic(unsigned int irq) 43 { 44 ack_APIC_irq(); 45 } 46 47 static struct irq_chip uv_irq_chip = { 48 - .name = "UV-CORE", 49 - .startup = uv_noop_ret, 50 - .shutdown = uv_noop, 51 - .enable = uv_noop, 52 - .disable = uv_noop, 53 - .ack = uv_noop, 54 - .mask = uv_noop, 55 - .unmask = uv_noop, 56 - .eoi = uv_ack_apic, 57 - .end = uv_noop, 58 - .set_affinity = uv_set_irq_affinity, 59 }; 60 61 /* ··· 131 unsigned long mmr_offset, int limit) 132 { 133 const struct cpumask *eligible_cpu = cpumask_of(cpu); 134 - struct irq_desc *desc = irq_to_desc(irq); 135 - struct irq_cfg *cfg; 136 - int mmr_pnode; 137 unsigned long mmr_value; 138 struct uv_IO_APIC_route_entry *entry; 139 - int err; 140 141 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != 142 sizeof(unsigned long)); 143 - 144 - cfg = irq_cfg(irq); 145 146 err = assign_irq_vector(irq, cfg, eligible_cpu); 147 if (err != 0) 148 return err; 149 150 if (limit == UV_AFFINITY_CPU) 151 - desc->status |= IRQ_NO_BALANCING; 152 else 153 - desc->status |= IRQ_MOVE_PCNTXT; 154 155 set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, 156 irq_name); ··· 189 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); 190 } 191 192 - static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) 193 { 194 - struct irq_desc *desc = irq_to_desc(irq); 195 - struct irq_cfg *cfg = desc->chip_data; 196 unsigned int dest; 197 - unsigned long mmr_value; 198 struct uv_IO_APIC_route_entry *entry; 199 - unsigned long mmr_offset; 200 int mmr_pnode; 201 202 - if (set_desc_affinity(desc, mask, &dest)) 203 return -1; 204 205 mmr_value = 0; ··· 214 entry->dest = dest; 215 216 /* Get previously stored MMR and pnode of hub sourcing interrupts */ 217 - if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode)) 218 return -1; 219 220 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
··· 28 static spinlock_t uv_irq_lock; 29 static struct rb_root uv_irq_root; 30 31 + static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool); 32 33 + static void uv_noop(struct irq_data *data) { } 34 35 + static void uv_ack_apic(struct irq_data *data) 36 { 37 ack_APIC_irq(); 38 } 39 40 static struct irq_chip uv_irq_chip = { 41 + .name = "UV-CORE", 42 + .irq_mask = uv_noop, 43 + .irq_unmask = uv_noop, 44 + .irq_eoi = uv_ack_apic, 45 + .irq_set_affinity = uv_set_irq_affinity, 46 }; 47 48 /* ··· 144 unsigned long mmr_offset, int limit) 145 { 146 const struct cpumask *eligible_cpu = cpumask_of(cpu); 147 + struct irq_cfg *cfg = get_irq_chip_data(irq); 148 unsigned long mmr_value; 149 struct uv_IO_APIC_route_entry *entry; 150 + int mmr_pnode, err; 151 152 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != 153 sizeof(unsigned long)); 154 155 err = assign_irq_vector(irq, cfg, eligible_cpu); 156 if (err != 0) 157 return err; 158 159 if (limit == UV_AFFINITY_CPU) 160 + irq_set_status_flags(irq, IRQ_NO_BALANCING); 161 else 162 + irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 163 164 set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, 165 irq_name); ··· 206 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); 207 } 208 209 + static int 210 + uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask, 211 + bool force) 212 { 213 + struct irq_cfg *cfg = data->chip_data; 214 unsigned int dest; 215 + unsigned long mmr_value, mmr_offset; 216 struct uv_IO_APIC_route_entry *entry; 217 int mmr_pnode; 218 219 + if (__ioapic_set_affinity(data, mask, &dest)) 220 return -1; 221 222 mmr_value = 0; ··· 231 entry->dest = dest; 232 233 /* Get previously stored MMR and pnode of hub sourcing interrupts */ 234 + if (uv_irq_2_mmr_info(data->irq, &mmr_offset, &mmr_pnode)) 235 return -1; 236 237 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
+42 -94
arch/x86/kernel/visws_quirks.c
··· 66 } 67 68 /* Replaces the default init_ISA_irqs in the generic setup */ 69 - static void __init visws_pre_intr_init(void) 70 - { 71 - init_VISWS_APIC_irqs(); 72 - } 73 74 /* Quirk for machine specific memory setup. */ 75 ··· 426 /* 427 * This is the SGI Cobalt (IO-)APIC: 428 */ 429 - 430 - static void enable_cobalt_irq(unsigned int irq) 431 { 432 - co_apic_set(is_co_apic(irq), irq); 433 } 434 435 - static void disable_cobalt_irq(unsigned int irq) 436 { 437 - int entry = is_co_apic(irq); 438 439 co_apic_write(CO_APIC_LO(entry), CO_APIC_MASK); 440 co_apic_read(CO_APIC_LO(entry)); 441 } 442 443 - /* 444 - * "irq" really just serves to identify the device. Here is where we 445 - * map this to the Cobalt APIC entry where it's physically wired. 446 - * This is called via request_irq -> setup_irq -> irq_desc->startup() 447 - */ 448 - static unsigned int startup_cobalt_irq(unsigned int irq) 449 - { 450 - unsigned long flags; 451 - struct irq_desc *desc = irq_to_desc(irq); 452 - 453 - spin_lock_irqsave(&cobalt_lock, flags); 454 - if ((desc->status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING))) 455 - desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING); 456 - enable_cobalt_irq(irq); 457 - spin_unlock_irqrestore(&cobalt_lock, flags); 458 - return 0; 459 - } 460 - 461 - static void ack_cobalt_irq(unsigned int irq) 462 { 463 unsigned long flags; 464 465 spin_lock_irqsave(&cobalt_lock, flags); 466 - disable_cobalt_irq(irq); 467 apic_write(APIC_EOI, APIC_EIO_ACK); 468 spin_unlock_irqrestore(&cobalt_lock, flags); 469 } 470 471 - static void end_cobalt_irq(unsigned int irq) 472 - { 473 - unsigned long flags; 474 - struct irq_desc *desc = irq_to_desc(irq); 475 - 476 - spin_lock_irqsave(&cobalt_lock, flags); 477 - if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS))) 478 - enable_cobalt_irq(irq); 479 - spin_unlock_irqrestore(&cobalt_lock, flags); 480 - } 481 - 482 static struct irq_chip cobalt_irq_type = { 483 - .name = "Cobalt-APIC", 484 - .startup = startup_cobalt_irq, 485 - .shutdown = disable_cobalt_irq, 486 - .enable = enable_cobalt_irq, 487 - .disable = disable_cobalt_irq, 488 - .ack = ack_cobalt_irq, 489 - .end = end_cobalt_irq, 490 }; 491 492 ··· 467 * interrupt controller type, and through a special virtual interrupt- 468 * controller. Device drivers only see the virtual interrupt sources. 469 */ 470 - static unsigned int startup_piix4_master_irq(unsigned int irq) 471 { 472 legacy_pic->init(0); 473 - 474 - return startup_cobalt_irq(irq); 475 } 476 477 - static void end_piix4_master_irq(unsigned int irq) 478 { 479 unsigned long flags; 480 481 spin_lock_irqsave(&cobalt_lock, flags); 482 - enable_cobalt_irq(irq); 483 spin_unlock_irqrestore(&cobalt_lock, flags); 484 } 485 486 static struct irq_chip piix4_master_irq_type = { 487 - .name = "PIIX4-master", 488 - .startup = startup_piix4_master_irq, 489 - .ack = ack_cobalt_irq, 490 - .end = end_piix4_master_irq, 491 }; 492 493 494 static struct irq_chip piix4_virtual_irq_type = { 495 - .name = "PIIX4-virtual", 496 }; 497 - 498 499 /* 500 * PIIX4-8259 master/virtual functions to handle interrupt requests ··· 512 */ 513 static irqreturn_t piix4_master_intr(int irq, void *dev_id) 514 { 515 - int realirq; 516 - struct irq_desc *desc; 517 unsigned long flags; 518 519 raw_spin_lock_irqsave(&i8259A_lock, flags); 520 ··· 554 555 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 556 557 - desc = irq_to_desc(realirq); 558 - 559 /* 560 * handle this 'virtual interrupt' as a Cobalt one now. 561 */ 562 - kstat_incr_irqs_this_cpu(realirq, desc); 563 - 564 - if (likely(desc->action != NULL)) 565 - handle_IRQ_event(realirq, desc->action); 566 - 567 - if (!(desc->status & IRQ_DISABLED)) 568 - legacy_pic->chip->unmask(realirq); 569 570 return IRQ_HANDLED; 571 ··· 578 579 static inline void set_piix4_virtual_irq_type(void) 580 { 581 - piix4_virtual_irq_type.shutdown = i8259A_chip.mask; 582 piix4_virtual_irq_type.enable = i8259A_chip.unmask; 583 piix4_virtual_irq_type.disable = i8259A_chip.mask; 584 } 585 586 - void init_VISWS_APIC_irqs(void) 587 { 588 int i; 589 590 for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) { 591 - struct irq_desc *desc = irq_to_desc(i); 592 593 - desc->status = IRQ_DISABLED; 594 - desc->action = 0; 595 - desc->depth = 1; 596 597 - if (i == 0) { 598 - desc->chip = &cobalt_irq_type; 599 - } 600 - else if (i == CO_IRQ_IDE0) { 601 - desc->chip = &cobalt_irq_type; 602 - } 603 - else if (i == CO_IRQ_IDE1) { 604 - desc->chip = &cobalt_irq_type; 605 - } 606 - else if (i == CO_IRQ_8259) { 607 - desc->chip = &piix4_master_irq_type; 608 - } 609 - else if (i < CO_IRQ_APIC0) { 610 - set_piix4_virtual_irq_type(); 611 - desc->chip = &piix4_virtual_irq_type; 612 - } 613 - else if (IS_CO_APIC(i)) { 614 - desc->chip = &cobalt_irq_type; 615 - } 616 } 617 618 setup_irq(CO_IRQ_8259, &master_action);
··· 66 } 67 68 /* Replaces the default init_ISA_irqs in the generic setup */ 69 + static void __init visws_pre_intr_init(void); 70 71 /* Quirk for machine specific memory setup. */ 72 ··· 429 /* 430 * This is the SGI Cobalt (IO-)APIC: 431 */ 432 + static void enable_cobalt_irq(struct irq_data *data) 433 { 434 + co_apic_set(is_co_apic(data->irq), data->irq); 435 } 436 437 + static void disable_cobalt_irq(struct irq_data *data) 438 { 439 + int entry = is_co_apic(data->irq); 440 441 co_apic_write(CO_APIC_LO(entry), CO_APIC_MASK); 442 co_apic_read(CO_APIC_LO(entry)); 443 } 444 445 + static void ack_cobalt_irq(struct irq_data *data) 446 { 447 unsigned long flags; 448 449 spin_lock_irqsave(&cobalt_lock, flags); 450 + disable_cobalt_irq(data); 451 apic_write(APIC_EOI, APIC_EIO_ACK); 452 spin_unlock_irqrestore(&cobalt_lock, flags); 453 } 454 455 static struct irq_chip cobalt_irq_type = { 456 + .name = "Cobalt-APIC", 457 + .irq_enable = enable_cobalt_irq, 458 + .irq_disable = disable_cobalt_irq, 459 + .irq_ack = ack_cobalt_irq, 460 }; 461 462 ··· 503 * interrupt controller type, and through a special virtual interrupt- 504 * controller. Device drivers only see the virtual interrupt sources. 505 */ 506 + static unsigned int startup_piix4_master_irq(struct irq_data *data) 507 { 508 legacy_pic->init(0); 509 + enable_cobalt_irq(data); 510 } 511 512 + static void end_piix4_master_irq(struct irq_data *data) 513 { 514 unsigned long flags; 515 516 spin_lock_irqsave(&cobalt_lock, flags); 517 + enable_cobalt_irq(data); 518 spin_unlock_irqrestore(&cobalt_lock, flags); 519 } 520 521 static struct irq_chip piix4_master_irq_type = { 522 + .name = "PIIX4-master", 523 + .irq_startup = startup_piix4_master_irq, 524 + .irq_ack = ack_cobalt_irq, 525 }; 526 527 + static void pii4_mask(struct irq_data *data) { } 528 529 static struct irq_chip piix4_virtual_irq_type = { 530 + .name = "PIIX4-virtual", 531 + .mask = pii4_mask, 532 }; 533 534 /* 535 * PIIX4-8259 master/virtual functions to handle interrupt requests ··· 549 */ 550 static irqreturn_t piix4_master_intr(int irq, void *dev_id) 551 { 552 unsigned long flags; 553 + int realirq; 554 555 raw_spin_lock_irqsave(&i8259A_lock, flags); 556 ··· 592 593 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 594 595 /* 596 * handle this 'virtual interrupt' as a Cobalt one now. 597 */ 598 + generic_handle_irq(realirq); 599 600 return IRQ_HANDLED; 601 ··· 624 625 static inline void set_piix4_virtual_irq_type(void) 626 { 627 piix4_virtual_irq_type.enable = i8259A_chip.unmask; 628 piix4_virtual_irq_type.disable = i8259A_chip.mask; 629 + piix4_virtual_irq_type.unmask = i8259A_chip.unmask; 630 } 631 632 + static void __init visws_pre_intr_init(void) 633 { 634 int i; 635 636 + set_piix4_virtual_irq_type(); 637 + 638 for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) { 639 + struct irq_chip *chip = NULL; 640 641 + if (i == 0) 642 + chip = &cobalt_irq_type; 643 + else if (i == CO_IRQ_IDE0) 644 + chip = &cobalt_irq_type; 645 + else if (i == CO_IRQ_IDE1) 646 + >chip = &cobalt_irq_type; 647 + else if (i == CO_IRQ_8259) 648 + chip = &piix4_master_irq_type; 649 + else if (i < CO_IRQ_APIC0) 650 + chip = &piix4_virtual_irq_type; 651 + else if (IS_CO_APIC(i)) 652 + chip = &cobalt_irq_type; 653 654 + if (chip) 655 + set_irq_chip(i, chip); 656 } 657 658 setup_irq(CO_IRQ_8259, &master_action);
+9 -9
arch/x86/lguest/boot.c
··· 791 * simple as setting a bit. We don't actually "ack" interrupts as such, we 792 * just mask and unmask them. I wonder if we should be cleverer? 793 */ 794 - static void disable_lguest_irq(unsigned int irq) 795 { 796 - set_bit(irq, lguest_data.blocked_interrupts); 797 } 798 799 - static void enable_lguest_irq(unsigned int irq) 800 { 801 - clear_bit(irq, lguest_data.blocked_interrupts); 802 } 803 804 /* This structure describes the lguest IRQ controller. */ 805 static struct irq_chip lguest_irq_controller = { 806 .name = "lguest", 807 - .mask = disable_lguest_irq, 808 - .mask_ack = disable_lguest_irq, 809 - .unmask = enable_lguest_irq, 810 }; 811 812 /* ··· 838 * rather than set them in lguest_init_IRQ we are called here every time an 839 * lguest device needs an interrupt. 840 * 841 - * FIXME: irq_to_desc_alloc_node() can fail due to lack of memory, we should 842 * pass that up! 843 */ 844 void lguest_setup_irq(unsigned int irq) 845 { 846 - irq_to_desc_alloc_node(irq, 0); 847 set_irq_chip_and_handler_name(irq, &lguest_irq_controller, 848 handle_level_irq, "level"); 849 }
··· 791 * simple as setting a bit. We don't actually "ack" interrupts as such, we 792 * just mask and unmask them. I wonder if we should be cleverer? 793 */ 794 + static void disable_lguest_irq(struct irq_data *data) 795 { 796 + set_bit(data->irq, lguest_data.blocked_interrupts); 797 } 798 799 + static void enable_lguest_irq(struct irq_data *data) 800 { 801 + clear_bit(data->irq, lguest_data.blocked_interrupts); 802 } 803 804 /* This structure describes the lguest IRQ controller. */ 805 static struct irq_chip lguest_irq_controller = { 806 .name = "lguest", 807 + .irq_mask = disable_lguest_irq, 808 + .irq_mask_ack = disable_lguest_irq, 809 + .irq_unmask = enable_lguest_irq, 810 }; 811 812 /* ··· 838 * rather than set them in lguest_init_IRQ we are called here every time an 839 * lguest device needs an interrupt. 840 * 841 + * FIXME: irq_alloc_desc_at() can fail due to lack of memory, we should 842 * pass that up! 843 */ 844 void lguest_setup_irq(unsigned int irq) 845 { 846 + irq_alloc_desc_at(irq, 0); 847 set_irq_chip_and_handler_name(irq, &lguest_irq_controller, 848 handle_level_irq, "level"); 849 }
+127 -18
arch/x86/oprofile/op_model_amd.c
··· 64 * IBS cpuid feature detection 65 */ 66 67 - #define IBS_CPUID_FEATURES 0x8000001b 68 69 /* 70 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but 71 * bit 0 is used to indicate the existence of IBS. 72 */ 73 - #define IBS_CAPS_AVAIL (1LL<<0) 74 - #define IBS_CAPS_RDWROPCNT (1LL<<3) 75 - #define IBS_CAPS_OPCNT (1LL<<4) 76 77 /* 78 * IBS randomization macros ··· 273 wrmsrl(MSR_AMD64_IBSOPCTL, 0); 274 } 275 276 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX 277 278 static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, ··· 451 } 452 453 if (ibs_caps) 454 - setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0); 455 } 456 457 static void op_amd_cpu_shutdown(void) 458 { 459 if (ibs_caps) 460 - setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1); 461 } 462 463 static int op_amd_check_ctrs(struct pt_regs * const regs, ··· 520 op_amd_stop_ibs(); 521 } 522 523 - static int __init_ibs_nmi(void) 524 { 525 - #define IBSCTL_LVTOFFSETVAL (1 << 8) 526 - #define IBSCTL 0x1cc 527 struct pci_dev *cpu_cfg; 528 int nodes; 529 u32 value = 0; 530 - u8 ibs_eilvt_off; 531 - 532 - ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1); 533 534 nodes = 0; 535 cpu_cfg = NULL; ··· 536 break; 537 ++nodes; 538 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off 539 - | IBSCTL_LVTOFFSETVAL); 540 pci_read_config_dword(cpu_cfg, IBSCTL, &value); 541 - if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) { 542 pci_dev_put(cpu_cfg); 543 printk(KERN_DEBUG "Failed to setup IBS LVT offset, " 544 - "IBSCTL = 0x%08x", value); 545 - return 1; 546 } 547 } while (1); 548 549 if (!nodes) { 550 - printk(KERN_DEBUG "No CPU node configured for IBS"); 551 - return 1; 552 } 553 554 return 0; 555 }
··· 64 * IBS cpuid feature detection 65 */ 66 67 + #define IBS_CPUID_FEATURES 0x8000001b 68 69 /* 70 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but 71 * bit 0 is used to indicate the existence of IBS. 72 */ 73 + #define IBS_CAPS_AVAIL (1U<<0) 74 + #define IBS_CAPS_RDWROPCNT (1U<<3) 75 + #define IBS_CAPS_OPCNT (1U<<4) 76 + 77 + /* 78 + * IBS APIC setup 79 + */ 80 + #define IBSCTL 0x1cc 81 + #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) 82 + #define IBSCTL_LVT_OFFSET_MASK 0x0F 83 84 /* 85 * IBS randomization macros ··· 266 wrmsrl(MSR_AMD64_IBSOPCTL, 0); 267 } 268 269 + static inline int eilvt_is_available(int offset) 270 + { 271 + /* check if we may assign a vector */ 272 + return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1); 273 + } 274 + 275 + static inline int ibs_eilvt_valid(void) 276 + { 277 + u64 val; 278 + int offset; 279 + 280 + rdmsrl(MSR_AMD64_IBSCTL, val); 281 + if (!(val & IBSCTL_LVT_OFFSET_VALID)) { 282 + pr_err(FW_BUG "cpu %d, invalid IBS " 283 + "interrupt offset %d (MSR%08X=0x%016llx)", 284 + smp_processor_id(), offset, 285 + MSR_AMD64_IBSCTL, val); 286 + return 0; 287 + } 288 + 289 + offset = val & IBSCTL_LVT_OFFSET_MASK; 290 + 291 + if (eilvt_is_available(offset)) 292 + return !0; 293 + 294 + pr_err(FW_BUG "cpu %d, IBS interrupt offset %d " 295 + "not available (MSR%08X=0x%016llx)", 296 + smp_processor_id(), offset, 297 + MSR_AMD64_IBSCTL, val); 298 + 299 + return 0; 300 + } 301 + 302 + static inline int get_ibs_offset(void) 303 + { 304 + u64 val; 305 + 306 + rdmsrl(MSR_AMD64_IBSCTL, val); 307 + if (!(val & IBSCTL_LVT_OFFSET_VALID)) 308 + return -EINVAL; 309 + 310 + return val & IBSCTL_LVT_OFFSET_MASK; 311 + } 312 + 313 + static void setup_APIC_ibs(void) 314 + { 315 + int offset; 316 + 317 + offset = get_ibs_offset(); 318 + if (offset < 0) 319 + goto failed; 320 + 321 + if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0)) 322 + return; 323 + failed: 324 + pr_warn("oprofile: IBS APIC setup failed on cpu #%d\n", 325 + smp_processor_id()); 326 + } 327 + 328 + static void clear_APIC_ibs(void) 329 + { 330 + int offset; 331 + 332 + offset = get_ibs_offset(); 333 + if (offset >= 0) 334 + setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); 335 + } 336 + 337 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX 338 339 static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, ··· 376 } 377 378 if (ibs_caps) 379 + setup_APIC_ibs(); 380 } 381 382 static void op_amd_cpu_shutdown(void) 383 { 384 if (ibs_caps) 385 + clear_APIC_ibs(); 386 } 387 388 static int op_amd_check_ctrs(struct pt_regs * const regs, ··· 445 op_amd_stop_ibs(); 446 } 447 448 + static int setup_ibs_ctl(int ibs_eilvt_off) 449 { 450 struct pci_dev *cpu_cfg; 451 int nodes; 452 u32 value = 0; 453 454 nodes = 0; 455 cpu_cfg = NULL; ··· 466 break; 467 ++nodes; 468 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off 469 + | IBSCTL_LVT_OFFSET_VALID); 470 pci_read_config_dword(cpu_cfg, IBSCTL, &value); 471 + if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) { 472 pci_dev_put(cpu_cfg); 473 printk(KERN_DEBUG "Failed to setup IBS LVT offset, " 474 + "IBSCTL = 0x%08x\n", value); 475 + return -EINVAL; 476 } 477 } while (1); 478 479 if (!nodes) { 480 + printk(KERN_DEBUG "No CPU node configured for IBS\n"); 481 + return -ENODEV; 482 } 483 + 484 + return 0; 485 + } 486 + 487 + static int force_ibs_eilvt_setup(void) 488 + { 489 + int i; 490 + int ret; 491 + 492 + /* find the next free available EILVT entry */ 493 + for (i = 1; i < 4; i++) { 494 + if (!eilvt_is_available(i)) 495 + continue; 496 + ret = setup_ibs_ctl(i); 497 + if (ret) 498 + return ret; 499 + return 0; 500 + } 501 + 502 + printk(KERN_DEBUG "No EILVT entry available\n"); 503 + 504 + return -EBUSY; 505 + } 506 + 507 + static int __init_ibs_nmi(void) 508 + { 509 + int ret; 510 + 511 + if (ibs_eilvt_valid()) 512 + return 0; 513 + 514 + ret = force_ibs_eilvt_setup(); 515 + if (ret) 516 + return ret; 517 + 518 + if (!ibs_eilvt_valid()) 519 + return -EFAULT; 520 + 521 + pr_err(FW_BUG "workaround enabled for IBS LVT offset\n"); 522 523 return 0; 524 }
+1 -1
arch/xtensa/kernel/irq.c
··· 92 for_each_online_cpu(j) 93 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 94 #endif 95 - seq_printf(p, " %14s", irq_desc[i].chip->typename); 96 seq_printf(p, " %s", action->name); 97 98 for (action=action->next; action; action = action->next)
··· 92 for_each_online_cpu(j) 93 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 94 #endif 95 + seq_printf(p, " %14s", irq_desc[i].chip->name); 96 seq_printf(p, " %s", action->name); 97 98 for (action=action->next; action; action = action->next)
+3 -3
drivers/isdn/act2000/act2000.h
··· 141 __u8 rcvhdr[8]; 142 } irq_data_isa; 143 144 - typedef union irq_data { 145 irq_data_isa isa; 146 - } irq_data; 147 148 /* 149 * Per card driver data ··· 176 char *status_buf_read; 177 char *status_buf_write; 178 char *status_buf_end; 179 - irq_data idat; /* Data used for IRQ handler */ 180 isdn_if interface; /* Interface to upper layer */ 181 char regname[35]; /* Name used for request_region */ 182 } act2000_card;
··· 141 __u8 rcvhdr[8]; 142 } irq_data_isa; 143 144 + typedef union act2000_irq_data { 145 irq_data_isa isa; 146 + } act2000_irq_data; 147 148 /* 149 * Per card driver data ··· 176 char *status_buf_read; 177 char *status_buf_write; 178 char *status_buf_end; 179 + act2000_irq_data idat; /* Data used for IRQ handler */ 180 isdn_if interface; /* Interface to upper layer */ 181 char regname[35]; /* Name used for request_region */ 182 } act2000_card;
+14 -4
drivers/isdn/hisax/config.c
··· 801 ll_unload(csta); 802 } 803 804 static int init_card(struct IsdnCardState *cs) 805 { 806 int irq_cnt, cnt = 3, ret; ··· 819 ret = cs->cardmsg(cs, CARD_INIT, NULL); 820 return(ret); 821 } 822 - irq_cnt = kstat_irqs(cs->irq); 823 printk(KERN_INFO "%s: IRQ %d count %d\n", CardType[cs->typ], 824 cs->irq, irq_cnt); 825 - if (request_irq(cs->irq, cs->irq_func, cs->irq_flags, "HiSax", cs)) { 826 printk(KERN_WARNING "HiSax: couldn't get interrupt %d\n", 827 cs->irq); 828 return 1; ··· 832 /* Timeout 10ms */ 833 msleep(10); 834 printk(KERN_INFO "%s: IRQ %d count %d\n", 835 - CardType[cs->typ], cs->irq, kstat_irqs(cs->irq)); 836 - if (kstat_irqs(cs->irq) == irq_cnt) { 837 printk(KERN_WARNING 838 "%s: IRQ(%d) getting no interrupts during init %d\n", 839 CardType[cs->typ], cs->irq, 4 - cnt);
··· 801 ll_unload(csta); 802 } 803 804 + static irqreturn_t card_irq(int intno, void *dev_id) 805 + { 806 + struct IsdnCardState *cs = dev_id; 807 + irqreturn_t ret = cs->irq_func(intno, cs); 808 + 809 + if (ret == IRQ_HANDLED) 810 + cs->irq_cnt++; 811 + return ret; 812 + } 813 + 814 static int init_card(struct IsdnCardState *cs) 815 { 816 int irq_cnt, cnt = 3, ret; ··· 809 ret = cs->cardmsg(cs, CARD_INIT, NULL); 810 return(ret); 811 } 812 + irq_cnt = cs->irq_cnt = 0; 813 printk(KERN_INFO "%s: IRQ %d count %d\n", CardType[cs->typ], 814 cs->irq, irq_cnt); 815 + if (request_irq(cs->irq, card_irq, cs->irq_flags, "HiSax", cs)) { 816 printk(KERN_WARNING "HiSax: couldn't get interrupt %d\n", 817 cs->irq); 818 return 1; ··· 822 /* Timeout 10ms */ 823 msleep(10); 824 printk(KERN_INFO "%s: IRQ %d count %d\n", 825 + CardType[cs->typ], cs->irq, cs->irq_cnt); 826 + if (cs->irq_cnt == irq_cnt) { 827 printk(KERN_WARNING 828 "%s: IRQ(%d) getting no interrupts during init %d\n", 829 CardType[cs->typ], cs->irq, 4 - cnt);
+1
drivers/isdn/hisax/hisax.h
··· 959 u_long event; 960 struct work_struct tqueue; 961 struct timer_list dbusytimer; 962 #ifdef ERROR_STATISTIC 963 int err_crc; 964 int err_tx;
··· 959 u_long event; 960 struct work_struct tqueue; 961 struct timer_list dbusytimer; 962 + unsigned int irq_cnt; 963 #ifdef ERROR_STATISTIC 964 int err_crc; 965 int err_tx;
+2 -2
drivers/mfd/twl4030-irq.c
··· 78 u8 irq_lines; /* number of supported irq lines */ 79 80 /* SIR ignored -- set interrupt, for testing only */ 81 - struct irq_data { 82 u8 isr_offset; 83 u8 imr_offset; 84 } mask[2]; ··· 810 twl4030_irq_chip = dummy_irq_chip; 811 twl4030_irq_chip.name = "twl4030"; 812 813 - twl4030_sih_irq_chip.ack = dummy_irq_chip.ack; 814 815 for (i = irq_base; i < irq_end; i++) { 816 set_irq_chip_and_handler(i, &twl4030_irq_chip,
··· 78 u8 irq_lines; /* number of supported irq lines */ 79 80 /* SIR ignored -- set interrupt, for testing only */ 81 + struct sih_irq_data { 82 u8 isr_offset; 83 u8 imr_offset; 84 } mask[2]; ··· 810 twl4030_irq_chip = dummy_irq_chip; 811 twl4030_irq_chip.name = "twl4030"; 812 813 + twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack; 814 815 for (i = irq_base; i < irq_end; i++) { 816 set_irq_chip_and_handler(i, &twl4030_irq_chip,
+4 -4
drivers/pci/dmar.c
··· 1221 } 1222 } 1223 1224 - void dmar_msi_unmask(unsigned int irq) 1225 { 1226 - struct intel_iommu *iommu = get_irq_data(irq); 1227 unsigned long flag; 1228 1229 /* unmask it */ ··· 1234 spin_unlock_irqrestore(&iommu->register_lock, flag); 1235 } 1236 1237 - void dmar_msi_mask(unsigned int irq) 1238 { 1239 unsigned long flag; 1240 - struct intel_iommu *iommu = get_irq_data(irq); 1241 1242 /* mask it */ 1243 spin_lock_irqsave(&iommu->register_lock, flag);
··· 1221 } 1222 } 1223 1224 + void dmar_msi_unmask(struct irq_data *data) 1225 { 1226 + struct intel_iommu *iommu = irq_data_get_irq_data(data); 1227 unsigned long flag; 1228 1229 /* unmask it */ ··· 1234 spin_unlock_irqrestore(&iommu->register_lock, flag); 1235 } 1236 1237 + void dmar_msi_mask(struct irq_data *data) 1238 { 1239 unsigned long flag; 1240 + struct intel_iommu *iommu = irq_data_get_irq_data(data); 1241 1242 /* mask it */ 1243 spin_lock_irqsave(&iommu->register_lock, flag);
+8 -14
drivers/pci/htirq.c
··· 57 *msg = cfg->msg; 58 } 59 60 - void mask_ht_irq(unsigned int irq) 61 { 62 - struct ht_irq_cfg *cfg; 63 - struct ht_irq_msg msg; 64 65 - cfg = get_irq_data(irq); 66 - 67 - msg = cfg->msg; 68 msg.address_lo |= 1; 69 - write_ht_irq_msg(irq, &msg); 70 } 71 72 - void unmask_ht_irq(unsigned int irq) 73 { 74 - struct ht_irq_cfg *cfg; 75 - struct ht_irq_msg msg; 76 77 - cfg = get_irq_data(irq); 78 - 79 - msg = cfg->msg; 80 msg.address_lo &= ~1; 81 - write_ht_irq_msg(irq, &msg); 82 } 83 84 /**
··· 57 *msg = cfg->msg; 58 } 59 60 + void mask_ht_irq(struct irq_data *data) 61 { 62 + struct ht_irq_cfg *cfg = irq_data_get_irq_data(data); 63 + struct ht_irq_msg msg = cfg->msg; 64 65 msg.address_lo |= 1; 66 + write_ht_irq_msg(data->irq, &msg); 67 } 68 69 + void unmask_ht_irq(struct irq_data *data) 70 { 71 + struct ht_irq_cfg *cfg = irq_data_get_irq_data(data); 72 + struct ht_irq_msg msg = cfg->msg; 73 74 msg.address_lo &= ~1; 75 + write_ht_irq_msg(data->irq, &msg); 76 } 77 78 /**
+28 -186
drivers/pci/intr_remapping.c
··· 46 } 47 early_param("intremap", setup_intremap); 48 49 - struct irq_2_iommu { 50 - struct intel_iommu *iommu; 51 - u16 irte_index; 52 - u16 sub_handle; 53 - u8 irte_mask; 54 - }; 55 - 56 - #ifdef CONFIG_GENERIC_HARDIRQS 57 - static struct irq_2_iommu *get_one_free_irq_2_iommu(int node) 58 - { 59 - struct irq_2_iommu *iommu; 60 - 61 - iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node); 62 - printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node); 63 - 64 - return iommu; 65 - } 66 - 67 - static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 68 - { 69 - struct irq_desc *desc; 70 - 71 - desc = irq_to_desc(irq); 72 - 73 - if (WARN_ON_ONCE(!desc)) 74 - return NULL; 75 - 76 - return desc->irq_2_iommu; 77 - } 78 - 79 - static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) 80 - { 81 - struct irq_desc *desc; 82 - struct irq_2_iommu *irq_iommu; 83 - 84 - desc = irq_to_desc(irq); 85 - if (!desc) { 86 - printk(KERN_INFO "can not get irq_desc for %d\n", irq); 87 - return NULL; 88 - } 89 - 90 - irq_iommu = desc->irq_2_iommu; 91 - 92 - if (!irq_iommu) 93 - desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq)); 94 - 95 - return desc->irq_2_iommu; 96 - } 97 - 98 - #else /* !CONFIG_SPARSE_IRQ */ 99 - 100 - static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; 101 - 102 - static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 103 - { 104 - if (irq < nr_irqs) 105 - return &irq_2_iommuX[irq]; 106 - 107 - return NULL; 108 - } 109 - static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) 110 - { 111 - return irq_2_iommu(irq); 112 - } 113 - #endif 114 - 115 static DEFINE_SPINLOCK(irq_2_ir_lock); 116 117 - static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq) 118 { 119 - struct irq_2_iommu *irq_iommu; 120 - 121 - irq_iommu = irq_2_iommu(irq); 122 - 123 - if (!irq_iommu) 124 - return NULL; 125 - 126 - if (!irq_iommu->iommu) 127 - return NULL; 128 - 129 - return irq_iommu; 130 - } 131 - 132 - int irq_remapped(int irq) 133 - { 134 - return valid_irq_2_iommu(irq) != NULL; 135 } 136 137 int get_irte(int irq, struct irte *entry) 138 { 139 - int index; 140 - struct irq_2_iommu *irq_iommu; 141 unsigned long flags; 142 143 - if (!entry) 144 return -1; 145 146 spin_lock_irqsave(&irq_2_ir_lock, flags); 147 - irq_iommu = valid_irq_2_iommu(irq); 148 - if (!irq_iommu) { 149 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 150 - return -1; 151 - } 152 153 index = irq_iommu->irte_index + irq_iommu->sub_handle; 154 *entry = *(irq_iommu->iommu->ir_table->base + index); ··· 75 int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) 76 { 77 struct ir_table *table = iommu->ir_table; 78 - struct irq_2_iommu *irq_iommu; 79 u16 index, start_index; 80 unsigned int mask = 0; 81 unsigned long flags; 82 int i; 83 84 - if (!count) 85 return -1; 86 - 87 - #ifndef CONFIG_SPARSE_IRQ 88 - /* protect irq_2_iommu_alloc later */ 89 - if (irq >= nr_irqs) 90 - return -1; 91 - #endif 92 93 /* 94 * start the IRTE search from index 0. ··· 123 for (i = index; i < index + count; i++) 124 table->base[i].present = 1; 125 126 - irq_iommu = irq_2_iommu_alloc(irq); 127 - if (!irq_iommu) { 128 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 129 - printk(KERN_ERR "can't allocate irq_2_iommu\n"); 130 - return -1; 131 - } 132 - 133 irq_iommu->iommu = iommu; 134 irq_iommu->irte_index = index; 135 irq_iommu->sub_handle = 0; ··· 146 147 int map_irq_to_irte_handle(int irq, u16 *sub_handle) 148 { 149 - int index; 150 - struct irq_2_iommu *irq_iommu; 151 unsigned long flags; 152 153 spin_lock_irqsave(&irq_2_ir_lock, flags); 154 - irq_iommu = valid_irq_2_iommu(irq); 155 - if (!irq_iommu) { 156 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 157 - return -1; 158 - } 159 - 160 *sub_handle = irq_iommu->sub_handle; 161 index = irq_iommu->irte_index; 162 spin_unlock_irqrestore(&irq_2_ir_lock, flags); ··· 162 163 int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) 164 { 165 - struct irq_2_iommu *irq_iommu; 166 unsigned long flags; 167 168 - spin_lock_irqsave(&irq_2_ir_lock, flags); 169 - 170 - irq_iommu = irq_2_iommu_alloc(irq); 171 - 172 - if (!irq_iommu) { 173 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 174 - printk(KERN_ERR "can't allocate irq_2_iommu\n"); 175 return -1; 176 - } 177 178 irq_iommu->iommu = iommu; 179 irq_iommu->irte_index = index; ··· 180 return 0; 181 } 182 183 - int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) 184 - { 185 - struct irq_2_iommu *irq_iommu; 186 - unsigned long flags; 187 - 188 - spin_lock_irqsave(&irq_2_ir_lock, flags); 189 - irq_iommu = valid_irq_2_iommu(irq); 190 - if (!irq_iommu) { 191 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 192 - return -1; 193 - } 194 - 195 - irq_iommu->iommu = NULL; 196 - irq_iommu->irte_index = 0; 197 - irq_iommu->sub_handle = 0; 198 - irq_2_iommu(irq)->irte_mask = 0; 199 - 200 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 201 - 202 - return 0; 203 - } 204 - 205 int modify_irte(int irq, struct irte *irte_modified) 206 { 207 - int rc; 208 - int index; 209 - struct irte *irte; 210 struct intel_iommu *iommu; 211 - struct irq_2_iommu *irq_iommu; 212 unsigned long flags; 213 214 spin_lock_irqsave(&irq_2_ir_lock, flags); 215 - irq_iommu = valid_irq_2_iommu(irq); 216 - if (!irq_iommu) { 217 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 218 - return -1; 219 - } 220 221 iommu = irq_iommu->iommu; 222 ··· 203 __iommu_flush_cache(iommu, irte, sizeof(*irte)); 204 205 rc = qi_flush_iec(iommu, index, 0); 206 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 207 - 208 - return rc; 209 - } 210 - 211 - int flush_irte(int irq) 212 - { 213 - int rc; 214 - int index; 215 - struct intel_iommu *iommu; 216 - struct irq_2_iommu *irq_iommu; 217 - unsigned long flags; 218 - 219 - spin_lock_irqsave(&irq_2_ir_lock, flags); 220 - irq_iommu = valid_irq_2_iommu(irq); 221 - if (!irq_iommu) { 222 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 223 - return -1; 224 - } 225 - 226 - iommu = irq_iommu->iommu; 227 - 228 - index = irq_iommu->irte_index + irq_iommu->sub_handle; 229 - 230 - rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); 231 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 232 233 return rc; ··· 264 265 int free_irte(int irq) 266 { 267 - int rc = 0; 268 - struct irq_2_iommu *irq_iommu; 269 unsigned long flags; 270 271 spin_lock_irqsave(&irq_2_ir_lock, flags); 272 - irq_iommu = valid_irq_2_iommu(irq); 273 - if (!irq_iommu) { 274 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 275 - return -1; 276 - } 277 278 rc = clear_entries(irq_iommu); 279
··· 46 } 47 early_param("intremap", setup_intremap); 48 49 static DEFINE_SPINLOCK(irq_2_ir_lock); 50 51 + static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 52 { 53 + struct irq_cfg *cfg = get_irq_chip_data(irq); 54 + return cfg ? &cfg->irq_2_iommu : NULL; 55 } 56 57 int get_irte(int irq, struct irte *entry) 58 { 59 + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 60 unsigned long flags; 61 + int index; 62 63 + if (!entry || !irq_iommu) 64 return -1; 65 66 spin_lock_irqsave(&irq_2_ir_lock, flags); 67 68 index = irq_iommu->irte_index + irq_iommu->sub_handle; 69 *entry = *(irq_iommu->iommu->ir_table->base + index); ··· 160 int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) 161 { 162 struct ir_table *table = iommu->ir_table; 163 + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 164 u16 index, start_index; 165 unsigned int mask = 0; 166 unsigned long flags; 167 int i; 168 169 + if (!count || !irq_iommu) 170 return -1; 171 172 /* 173 * start the IRTE search from index 0. ··· 214 for (i = index; i < index + count; i++) 215 table->base[i].present = 1; 216 217 irq_iommu->iommu = iommu; 218 irq_iommu->irte_index = index; 219 irq_iommu->sub_handle = 0; ··· 244 245 int map_irq_to_irte_handle(int irq, u16 *sub_handle) 246 { 247 + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 248 unsigned long flags; 249 + int index; 250 + 251 + if (!irq_iommu) 252 + return -1; 253 254 spin_lock_irqsave(&irq_2_ir_lock, flags); 255 *sub_handle = irq_iommu->sub_handle; 256 index = irq_iommu->irte_index; 257 spin_unlock_irqrestore(&irq_2_ir_lock, flags); ··· 263 264 int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) 265 { 266 + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 267 unsigned long flags; 268 269 + if (!irq_iommu) 270 return -1; 271 + 272 + spin_lock_irqsave(&irq_2_ir_lock, flags); 273 274 irq_iommu->iommu = iommu; 275 irq_iommu->irte_index = index; ··· 286 return 0; 287 } 288 289 int modify_irte(int irq, struct irte *irte_modified) 290 { 291 + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 292 struct intel_iommu *iommu; 293 unsigned long flags; 294 + struct irte *irte; 295 + int rc, index; 296 + 297 + if (!irq_iommu) 298 + return -1; 299 300 spin_lock_irqsave(&irq_2_ir_lock, flags); 301 302 iommu = irq_iommu->iommu; 303 ··· 334 __iommu_flush_cache(iommu, irte, sizeof(*irte)); 335 336 rc = qi_flush_iec(iommu, index, 0); 337 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 338 339 return rc; ··· 420 421 int free_irte(int irq) 422 { 423 + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 424 unsigned long flags; 425 + int rc; 426 + 427 + if (!irq_iommu) 428 + return -1; 429 430 spin_lock_irqsave(&irq_2_ir_lock, flags); 431 432 rc = clear_entries(irq_iommu); 433
+16 -22
drivers/pci/msi.c
··· 170 desc->masked = __msix_mask_irq(desc, flag); 171 } 172 173 - static void msi_set_mask_bit(unsigned irq, u32 flag) 174 { 175 - struct msi_desc *desc = get_irq_msi(irq); 176 177 if (desc->msi_attrib.is_msix) { 178 msix_mask_irq(desc, flag); 179 readl(desc->mask_base); /* Flush write to device */ 180 } else { 181 - unsigned offset = irq - desc->dev->irq; 182 msi_mask_irq(desc, 1 << offset, flag << offset); 183 } 184 } 185 186 - void mask_msi_irq(unsigned int irq) 187 { 188 - msi_set_mask_bit(irq, 1); 189 } 190 191 - void unmask_msi_irq(unsigned int irq) 192 { 193 - msi_set_mask_bit(irq, 0); 194 } 195 196 - void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 197 { 198 - struct msi_desc *entry = get_irq_desc_msi(desc); 199 - 200 BUG_ON(entry->dev->current_state != PCI_D0); 201 202 if (entry->msi_attrib.is_msix) { ··· 225 226 void read_msi_msg(unsigned int irq, struct msi_msg *msg) 227 { 228 - struct irq_desc *desc = irq_to_desc(irq); 229 230 - read_msi_msg_desc(desc, msg); 231 } 232 233 - void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 234 { 235 - struct msi_desc *entry = get_irq_desc_msi(desc); 236 - 237 /* Assert that the cache is valid, assuming that 238 * valid messages are not all-zeroes. */ 239 BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo | ··· 242 243 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) 244 { 245 - struct irq_desc *desc = irq_to_desc(irq); 246 247 - get_cached_msi_msg_desc(desc, msg); 248 } 249 250 - void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 251 { 252 - struct msi_desc *entry = get_irq_desc_msi(desc); 253 - 254 if (entry->dev->current_state != PCI_D0) { 255 /* Don't touch the hardware now */ 256 } else if (entry->msi_attrib.is_msix) { ··· 286 287 void write_msi_msg(unsigned int irq, struct msi_msg *msg) 288 { 289 - struct irq_desc *desc = irq_to_desc(irq); 290 291 - write_msi_msg_desc(desc, msg); 292 } 293 294 static void free_msi_irqs(struct pci_dev *dev)
··· 170 desc->masked = __msix_mask_irq(desc, flag); 171 } 172 173 + static void msi_set_mask_bit(struct irq_data *data, u32 flag) 174 { 175 + struct msi_desc *desc = irq_data_get_msi(data); 176 177 if (desc->msi_attrib.is_msix) { 178 msix_mask_irq(desc, flag); 179 readl(desc->mask_base); /* Flush write to device */ 180 } else { 181 + unsigned offset = data->irq - desc->dev->irq; 182 msi_mask_irq(desc, 1 << offset, flag << offset); 183 } 184 } 185 186 + void mask_msi_irq(struct irq_data *data) 187 { 188 + msi_set_mask_bit(data, 1); 189 } 190 191 + void unmask_msi_irq(struct irq_data *data) 192 { 193 + msi_set_mask_bit(data, 0); 194 } 195 196 + void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) 197 { 198 BUG_ON(entry->dev->current_state != PCI_D0); 199 200 if (entry->msi_attrib.is_msix) { ··· 227 228 void read_msi_msg(unsigned int irq, struct msi_msg *msg) 229 { 230 + struct msi_desc *entry = get_irq_msi(irq); 231 232 + __read_msi_msg(entry, msg); 233 } 234 235 + void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) 236 { 237 /* Assert that the cache is valid, assuming that 238 * valid messages are not all-zeroes. */ 239 BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo | ··· 246 247 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) 248 { 249 + struct msi_desc *entry = get_irq_msi(irq); 250 251 + __get_cached_msi_msg(entry, msg); 252 } 253 254 + void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) 255 { 256 if (entry->dev->current_state != PCI_D0) { 257 /* Don't touch the hardware now */ 258 } else if (entry->msi_attrib.is_msix) { ··· 292 293 void write_msi_msg(unsigned int irq, struct msi_msg *msg) 294 { 295 + struct msi_desc *entry = get_irq_msi(irq); 296 297 + __write_msi_msg(entry, msg); 298 } 299 300 static void free_msi_irqs(struct pci_dev *dev)
+11 -12
drivers/xen/events.c
··· 338 339 static int find_unbound_irq(void) 340 { 341 - int irq; 342 - struct irq_desc *desc; 343 344 for (irq = 0; irq < nr_irqs; irq++) { 345 - desc = irq_to_desc(irq); 346 /* only 0->15 have init'd desc; handle irq > 16 */ 347 - if (desc == NULL) 348 break; 349 - if (desc->chip == &no_irq_chip) 350 break; 351 - if (desc->chip != &xen_dynamic_chip) 352 continue; 353 if (irq_info[irq].type == IRQT_UNBOUND) 354 - break; 355 } 356 357 if (irq == nr_irqs) 358 panic("No available IRQ to bind to: increase nr_irqs!\n"); 359 360 - desc = irq_to_desc_alloc_node(irq, 0); 361 - if (WARN_ON(desc == NULL)) 362 - return -1; 363 364 - dynamic_irq_init_keep_chip_data(irq); 365 366 return irq; 367 } ··· 494 if (irq_info[irq].type != IRQT_UNBOUND) { 495 irq_info[irq] = mk_unbound_info(); 496 497 - dynamic_irq_cleanup(irq); 498 } 499 500 spin_unlock(&irq_mapping_update_lock);
··· 338 339 static int find_unbound_irq(void) 340 { 341 + struct irq_data *data; 342 + int irq, res; 343 344 for (irq = 0; irq < nr_irqs; irq++) { 345 + data = irq_get_irq_data(irq); 346 /* only 0->15 have init'd desc; handle irq > 16 */ 347 + if (!data) 348 break; 349 + if (data->chip == &no_irq_chip) 350 break; 351 + if (data->chip != &xen_dynamic_chip) 352 continue; 353 if (irq_info[irq].type == IRQT_UNBOUND) 354 + return irq; 355 } 356 357 if (irq == nr_irqs) 358 panic("No available IRQ to bind to: increase nr_irqs!\n"); 359 360 + res = irq_alloc_desc_at(irq, 0); 361 362 + if (WARN_ON(res != irq)) 363 + return -1; 364 365 return irq; 366 } ··· 495 if (irq_info[irq].type != IRQT_UNBOUND) { 496 irq_info[irq] = mk_unbound_info(); 497 498 + irq_free_desc(irq); 499 } 500 501 spin_unlock(&irq_mapping_update_lock);
+4 -6
include/linux/dmar.h
··· 106 __u64 high; 107 }; 108 }; 109 #ifdef CONFIG_INTR_REMAP 110 extern int intr_remapping_enabled; 111 extern int intr_remapping_supported(void); ··· 120 extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, 121 u16 sub_handle); 122 extern int map_irq_to_irte_handle(int irq, u16 *sub_handle); 123 - extern int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index); 124 - extern int flush_irte(int irq); 125 extern int free_irte(int irq); 126 127 - extern int irq_remapped(int irq); 128 extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); 129 extern struct intel_iommu *map_ioapic_to_ir(int apic); 130 extern struct intel_iommu *map_hpet_to_ir(u8 id); ··· 175 return 0; 176 } 177 178 - #define irq_remapped(irq) (0) 179 #define enable_intr_remapping(mode) (-1) 180 #define disable_intr_remapping() (0) 181 #define reenable_intr_remapping(mode) (0) ··· 184 /* Can't use the common MSI interrupt functions 185 * since DMAR is not a pci device 186 */ 187 - extern void dmar_msi_unmask(unsigned int irq); 188 - extern void dmar_msi_mask(unsigned int irq); 189 extern void dmar_msi_read(int irq, struct msi_msg *msg); 190 extern void dmar_msi_write(int irq, struct msi_msg *msg); 191 extern int dmar_set_interrupt(struct intel_iommu *iommu);
··· 106 __u64 high; 107 }; 108 }; 109 + 110 #ifdef CONFIG_INTR_REMAP 111 extern int intr_remapping_enabled; 112 extern int intr_remapping_supported(void); ··· 119 extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, 120 u16 sub_handle); 121 extern int map_irq_to_irte_handle(int irq, u16 *sub_handle); 122 extern int free_irte(int irq); 123 124 extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); 125 extern struct intel_iommu *map_ioapic_to_ir(int apic); 126 extern struct intel_iommu *map_hpet_to_ir(u8 id); ··· 177 return 0; 178 } 179 180 #define enable_intr_remapping(mode) (-1) 181 #define disable_intr_remapping() (0) 182 #define reenable_intr_remapping(mode) (0) ··· 187 /* Can't use the common MSI interrupt functions 188 * since DMAR is not a pci device 189 */ 190 + struct irq_data; 191 + extern void dmar_msi_unmask(struct irq_data *data); 192 + extern void dmar_msi_mask(struct irq_data *data); 193 extern void dmar_msi_read(int irq, struct msi_msg *msg); 194 extern void dmar_msi_write(int irq, struct msi_msg *msg); 195 extern int dmar_set_interrupt(struct intel_iommu *iommu);
+3 -2
include/linux/htirq.h
··· 9 /* Helper functions.. */ 10 void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); 11 void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); 12 - void mask_ht_irq(unsigned int irq); 13 - void unmask_ht_irq(unsigned int irq); 14 15 /* The arch hook for getting things started */ 16 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev);
··· 9 /* Helper functions.. */ 10 void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); 11 void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); 12 + struct irq_data; 13 + void mask_ht_irq(struct irq_data *data); 14 + void unmask_ht_irq(struct irq_data *data); 15 16 /* The arch hook for getting things started */ 17 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev);
-3
include/linux/interrupt.h
··· 647 struct seq_file; 648 int show_interrupts(struct seq_file *p, void *v); 649 650 - struct irq_desc; 651 - 652 extern int early_irq_init(void); 653 extern int arch_probe_nr_irqs(void); 654 extern int arch_early_irq_init(void); 655 - extern int arch_init_chip_data(struct irq_desc *desc, int node); 656 657 #endif
··· 647 struct seq_file; 648 int show_interrupts(struct seq_file *p, void *v); 649 650 extern int early_irq_init(void); 651 extern int arch_probe_nr_irqs(void); 652 extern int arch_early_irq_init(void); 653 654 #endif
+196 -301
include/linux/irq.h
··· 72 #define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ 73 #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ 74 75 #ifdef CONFIG_IRQ_PER_CPU 76 # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) 77 # define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) ··· 84 # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING 85 #endif 86 87 - struct proc_dir_entry; 88 struct msi_desc; 89 90 /** 91 * struct irq_chip - hardware interrupt chip descriptor 92 * 93 * @name: name for /proc/interrupts 94 - * @startup: start up the interrupt (defaults to ->enable if NULL) 95 - * @shutdown: shut down the interrupt (defaults to ->disable if NULL) 96 - * @enable: enable the interrupt (defaults to chip->unmask if NULL) 97 - * @disable: disable the interrupt 98 - * @ack: start of a new interrupt 99 - * @mask: mask an interrupt source 100 - * @mask_ack: ack and mask an interrupt source 101 - * @unmask: unmask an interrupt source 102 - * @eoi: end of interrupt - chip level 103 - * @end: end of interrupt - flow level 104 - * @set_affinity: set the CPU affinity on SMP machines 105 - * @retrigger: resend an IRQ to the CPU 106 - * @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ 107 - * @set_wake: enable/disable power-management wake-on of an IRQ 108 * 109 - * @bus_lock: function to lock access to slow bus (i2c) chips 110 - * @bus_sync_unlock: function to sync and unlock slow bus (i2c) chips 111 * 112 * @release: release function solely used by UML 113 - * @typename: obsoleted by name, kept as migration helper 114 */ 115 struct irq_chip { 116 const char *name; 117 unsigned int (*startup)(unsigned int irq); 118 void (*shutdown)(unsigned int irq); 119 void (*enable)(unsigned int irq); ··· 175 176 void (*bus_lock)(unsigned int irq); 177 void (*bus_sync_unlock)(unsigned int irq); 178 179 /* Currently used only by UML, might disappear one day.*/ 180 #ifdef CONFIG_IRQ_RELEASE_METHOD 181 void (*release)(unsigned int irq, void *dev_id); 182 #endif 183 - /* 184 - * For compatibility, ->typename is copied into ->name. 185 - * Will disappear. 186 - */ 187 - const char *typename; 188 }; 189 190 - struct timer_rand_state; 191 - struct irq_2_iommu; 192 - /** 193 - * struct irq_desc - interrupt descriptor 194 - * @irq: interrupt number for this descriptor 195 - * @timer_rand_state: pointer to timer rand state struct 196 - * @kstat_irqs: irq stats per cpu 197 - * @irq_2_iommu: iommu with this irq 198 - * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] 199 - * @chip: low level interrupt hardware access 200 - * @msi_desc: MSI descriptor 201 - * @handler_data: per-IRQ data for the irq_chip methods 202 - * @chip_data: platform-specific per-chip private data for the chip 203 - * methods, to allow shared chip implementations 204 - * @action: the irq action chain 205 - * @status: status information 206 - * @depth: disable-depth, for nested irq_disable() calls 207 - * @wake_depth: enable depth, for multiple set_irq_wake() callers 208 - * @irq_count: stats field to detect stalled irqs 209 - * @last_unhandled: aging timer for unhandled count 210 - * @irqs_unhandled: stats field for spurious unhandled interrupts 211 - * @lock: locking for SMP 212 - * @affinity: IRQ affinity on SMP 213 - * @node: node index useful for balancing 214 - * @pending_mask: pending rebalanced interrupts 215 - * @threads_active: number of irqaction threads currently running 216 - * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers 217 - * @dir: /proc/irq/ procfs entry 218 - * @name: flow handler name for /proc/interrupts output 219 - */ 220 - struct irq_desc { 221 - unsigned int irq; 222 - struct timer_rand_state *timer_rand_state; 223 - unsigned int *kstat_irqs; 224 - #ifdef CONFIG_INTR_REMAP 225 - struct irq_2_iommu *irq_2_iommu; 226 - #endif 227 - irq_flow_handler_t handle_irq; 228 - struct irq_chip *chip; 229 - struct msi_desc *msi_desc; 230 - void *handler_data; 231 - void *chip_data; 232 - struct irqaction *action; /* IRQ action list */ 233 - unsigned int status; /* IRQ status */ 234 - 235 - unsigned int depth; /* nested irq disables */ 236 - unsigned int wake_depth; /* nested wake enables */ 237 - unsigned int irq_count; /* For detecting broken IRQs */ 238 - unsigned long last_unhandled; /* Aging timer for unhandled count */ 239 - unsigned int irqs_unhandled; 240 - raw_spinlock_t lock; 241 - #ifdef CONFIG_SMP 242 - cpumask_var_t affinity; 243 - const struct cpumask *affinity_hint; 244 - unsigned int node; 245 - #ifdef CONFIG_GENERIC_PENDING_IRQ 246 - cpumask_var_t pending_mask; 247 - #endif 248 - #endif 249 - atomic_t threads_active; 250 - wait_queue_head_t wait_for_threads; 251 - #ifdef CONFIG_PROC_FS 252 - struct proc_dir_entry *dir; 253 - #endif 254 - const char *name; 255 - } ____cacheline_internodealigned_in_smp; 256 - 257 - extern void arch_init_copy_chip_data(struct irq_desc *old_desc, 258 - struct irq_desc *desc, int node); 259 - extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); 260 - 261 - #ifndef CONFIG_SPARSE_IRQ 262 - extern struct irq_desc irq_desc[NR_IRQS]; 263 - #endif 264 - 265 - #ifdef CONFIG_NUMA_IRQ_DESC 266 - extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); 267 - #else 268 - static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) 269 - { 270 - return desc; 271 - } 272 - #endif 273 - 274 - extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); 275 276 /* 277 * Pick up the arch-dependent methods: 278 */ 279 #include <asm/hw_irq.h> 280 281 extern int setup_irq(unsigned int irq, struct irqaction *new); 282 extern void remove_irq(unsigned int irq, struct irqaction *act); 283 284 #ifdef CONFIG_GENERIC_HARDIRQS 285 286 - #ifdef CONFIG_SMP 287 - 288 - #ifdef CONFIG_GENERIC_PENDING_IRQ 289 - 290 void move_native_irq(int irq); 291 void move_masked_irq(int irq); 292 - 293 - #else /* CONFIG_GENERIC_PENDING_IRQ */ 294 - 295 - static inline void move_irq(int irq) 296 - { 297 - } 298 - 299 - static inline void move_native_irq(int irq) 300 - { 301 - } 302 - 303 - static inline void move_masked_irq(int irq) 304 - { 305 - } 306 - 307 - #endif /* CONFIG_GENERIC_PENDING_IRQ */ 308 - 309 - #else /* CONFIG_SMP */ 310 - 311 - #define move_native_irq(x) 312 - #define move_masked_irq(x) 313 - 314 - #endif /* CONFIG_SMP */ 315 316 extern int no_irq_affinity; 317 - 318 - static inline int irq_balancing_disabled(unsigned int irq) 319 - { 320 - struct irq_desc *desc; 321 - 322 - desc = irq_to_desc(irq); 323 - return desc->status & IRQ_NO_BALANCING_MASK; 324 - } 325 326 /* Handle irq action chains: */ 327 extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); ··· 250 extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); 251 extern void handle_nested_irq(unsigned int irq); 252 253 - /* 254 - * Monolithic do_IRQ implementation. 255 - */ 256 - #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 257 - extern unsigned int __do_IRQ(unsigned int irq); 258 - #endif 259 - 260 - /* 261 - * Architectures call this to let the generic IRQ layer 262 - * handle an interrupt. If the descriptor is attached to an 263 - * irqchip-style controller then we call the ->handle_irq() handler, 264 - * and it calls __do_IRQ() if it's attached to an irqtype-style controller. 265 - */ 266 - static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) 267 - { 268 - #ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 269 - desc->handle_irq(irq, desc); 270 - #else 271 - if (likely(desc->handle_irq)) 272 - desc->handle_irq(irq, desc); 273 - else 274 - __do_IRQ(irq); 275 - #endif 276 - } 277 - 278 - static inline void generic_handle_irq(unsigned int irq) 279 - { 280 - generic_handle_irq_desc(irq, irq_to_desc(irq)); 281 - } 282 - 283 /* Handling of unhandled and spurious interrupts: */ 284 extern void note_interrupt(unsigned int irq, struct irq_desc *desc, 285 irqreturn_t action_ret); 286 287 - /* Resending of interrupts :*/ 288 - void check_irq_resend(struct irq_desc *desc, unsigned int irq); 289 290 /* Enable/disable irq debugging output: */ 291 extern int noirqdebug_setup(char *str); ··· 275 extern void 276 __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 277 const char *name); 278 - 279 - /* caller has locked the irq_desc and both params are valid */ 280 - static inline void __set_irq_handler_unlocked(int irq, 281 - irq_flow_handler_t handler) 282 - { 283 - struct irq_desc *desc; 284 - 285 - desc = irq_to_desc(irq); 286 - desc->handle_irq = handler; 287 - } 288 289 /* 290 * Set a highlevel flow handler for a given IRQ: ··· 299 300 extern void set_irq_nested_thread(unsigned int irq, int nest); 301 302 - extern void set_irq_noprobe(unsigned int irq); 303 - extern void set_irq_probe(unsigned int irq); 304 305 /* Handle dynamic irq creation and destruction */ 306 extern unsigned int create_irq_nr(unsigned int irq_want, int node); 307 extern int create_irq(void); 308 extern void destroy_irq(unsigned int irq); 309 310 - /* Test to see if a driver has successfully requested an irq */ 311 - static inline int irq_has_action(unsigned int irq) 312 - { 313 - struct irq_desc *desc = irq_to_desc(irq); 314 - return desc->action != NULL; 315 - } 316 - 317 - /* Dynamic irq helper functions */ 318 - extern void dynamic_irq_init(unsigned int irq); 319 - void dynamic_irq_init_keep_chip_data(unsigned int irq); 320 extern void dynamic_irq_cleanup(unsigned int irq); 321 - void dynamic_irq_cleanup_keep_chip_data(unsigned int irq); 322 323 /* Set/get chip/data for an IRQ: */ 324 extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); ··· 342 extern int set_irq_chip_data(unsigned int irq, void *data); 343 extern int set_irq_type(unsigned int irq, unsigned int type); 344 extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); 345 346 - #define get_irq_chip(irq) (irq_to_desc(irq)->chip) 347 - #define get_irq_chip_data(irq) (irq_to_desc(irq)->chip_data) 348 - #define get_irq_data(irq) (irq_to_desc(irq)->handler_data) 349 - #define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc) 350 351 - #define get_irq_desc_chip(desc) ((desc)->chip) 352 - #define get_irq_desc_chip_data(desc) ((desc)->chip_data) 353 - #define get_irq_desc_data(desc) ((desc)->handler_data) 354 - #define get_irq_desc_msi(desc) ((desc)->msi_desc) 355 356 #endif /* CONFIG_GENERIC_HARDIRQS */ 357 358 #endif /* !CONFIG_S390 */ 359 - 360 - #ifdef CONFIG_SMP 361 - /** 362 - * alloc_desc_masks - allocate cpumasks for irq_desc 363 - * @desc: pointer to irq_desc struct 364 - * @node: node which will be handling the cpumasks 365 - * @boot: true if need bootmem 366 - * 367 - * Allocates affinity and pending_mask cpumask if required. 368 - * Returns true if successful (or not required). 369 - */ 370 - static inline bool alloc_desc_masks(struct irq_desc *desc, int node, 371 - bool boot) 372 - { 373 - gfp_t gfp = GFP_ATOMIC; 374 - 375 - if (boot) 376 - gfp = GFP_NOWAIT; 377 - 378 - #ifdef CONFIG_CPUMASK_OFFSTACK 379 - if (!alloc_cpumask_var_node(&desc->affinity, gfp, node)) 380 - return false; 381 - 382 - #ifdef CONFIG_GENERIC_PENDING_IRQ 383 - if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { 384 - free_cpumask_var(desc->affinity); 385 - return false; 386 - } 387 - #endif 388 - #endif 389 - return true; 390 - } 391 - 392 - static inline void init_desc_masks(struct irq_desc *desc) 393 - { 394 - cpumask_setall(desc->affinity); 395 - #ifdef CONFIG_GENERIC_PENDING_IRQ 396 - cpumask_clear(desc->pending_mask); 397 - #endif 398 - } 399 - 400 - /** 401 - * init_copy_desc_masks - copy cpumasks for irq_desc 402 - * @old_desc: pointer to old irq_desc struct 403 - * @new_desc: pointer to new irq_desc struct 404 - * 405 - * Insures affinity and pending_masks are copied to new irq_desc. 406 - * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the 407 - * irq_desc struct so the copy is redundant. 408 - */ 409 - 410 - static inline void init_copy_desc_masks(struct irq_desc *old_desc, 411 - struct irq_desc *new_desc) 412 - { 413 - #ifdef CONFIG_CPUMASK_OFFSTACK 414 - cpumask_copy(new_desc->affinity, old_desc->affinity); 415 - 416 - #ifdef CONFIG_GENERIC_PENDING_IRQ 417 - cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); 418 - #endif 419 - #endif 420 - } 421 - 422 - static inline void free_desc_masks(struct irq_desc *old_desc, 423 - struct irq_desc *new_desc) 424 - { 425 - free_cpumask_var(old_desc->affinity); 426 - 427 - #ifdef CONFIG_GENERIC_PENDING_IRQ 428 - free_cpumask_var(old_desc->pending_mask); 429 - #endif 430 - } 431 - 432 - #else /* !CONFIG_SMP */ 433 - 434 - static inline bool alloc_desc_masks(struct irq_desc *desc, int node, 435 - bool boot) 436 - { 437 - return true; 438 - } 439 - 440 - static inline void init_desc_masks(struct irq_desc *desc) 441 - { 442 - } 443 - 444 - static inline void init_copy_desc_masks(struct irq_desc *old_desc, 445 - struct irq_desc *new_desc) 446 - { 447 - } 448 - 449 - static inline void free_desc_masks(struct irq_desc *old_desc, 450 - struct irq_desc *new_desc) 451 - { 452 - } 453 - #endif /* CONFIG_SMP */ 454 455 #endif /* _LINUX_IRQ_H */
··· 72 #define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ 73 #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ 74 75 + #define IRQF_MODIFY_MASK \ 76 + (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ 77 + IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL) 78 + 79 #ifdef CONFIG_IRQ_PER_CPU 80 # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) 81 # define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) ··· 80 # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING 81 #endif 82 83 struct msi_desc; 84 + 85 + /** 86 + * struct irq_data - per irq and irq chip data passed down to chip functions 87 + * @irq: interrupt number 88 + * @node: node index useful for balancing 89 + * @chip: low level interrupt hardware access 90 + * @handler_data: per-IRQ data for the irq_chip methods 91 + * @chip_data: platform-specific per-chip private data for the chip 92 + * methods, to allow shared chip implementations 93 + * @msi_desc: MSI descriptor 94 + * @affinity: IRQ affinity on SMP 95 + * 96 + * The fields here need to overlay the ones in irq_desc until we 97 + * cleaned up the direct references and switched everything over to 98 + * irq_data. 99 + */ 100 + struct irq_data { 101 + unsigned int irq; 102 + unsigned int node; 103 + struct irq_chip *chip; 104 + void *handler_data; 105 + void *chip_data; 106 + struct msi_desc *msi_desc; 107 + #ifdef CONFIG_SMP 108 + cpumask_var_t affinity; 109 + #endif 110 + }; 111 112 /** 113 * struct irq_chip - hardware interrupt chip descriptor 114 * 115 * @name: name for /proc/interrupts 116 + * @startup: deprecated, replaced by irq_startup 117 + * @shutdown: deprecated, replaced by irq_shutdown 118 + * @enable: deprecated, replaced by irq_enable 119 + * @disable: deprecated, replaced by irq_disable 120 + * @ack: deprecated, replaced by irq_ack 121 + * @mask: deprecated, replaced by irq_mask 122 + * @mask_ack: deprecated, replaced by irq_mask_ack 123 + * @unmask: deprecated, replaced by irq_unmask 124 + * @eoi: deprecated, replaced by irq_eoi 125 + * @end: deprecated, will go away with __do_IRQ() 126 + * @set_affinity: deprecated, replaced by irq_set_affinity 127 + * @retrigger: deprecated, replaced by irq_retrigger 128 + * @set_type: deprecated, replaced by irq_set_type 129 + * @set_wake: deprecated, replaced by irq_wake 130 + * @bus_lock: deprecated, replaced by irq_bus_lock 131 + * @bus_sync_unlock: deprecated, replaced by irq_bus_sync_unlock 132 * 133 + * @irq_startup: start up the interrupt (defaults to ->enable if NULL) 134 + * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) 135 + * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) 136 + * @irq_disable: disable the interrupt 137 + * @irq_ack: start of a new interrupt 138 + * @irq_mask: mask an interrupt source 139 + * @irq_mask_ack: ack and mask an interrupt source 140 + * @irq_unmask: unmask an interrupt source 141 + * @irq_eoi: end of interrupt 142 + * @irq_set_affinity: set the CPU affinity on SMP machines 143 + * @irq_retrigger: resend an IRQ to the CPU 144 + * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ 145 + * @irq_set_wake: enable/disable power-management wake-on of an IRQ 146 + * @irq_bus_lock: function to lock access to slow bus (i2c) chips 147 + * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips 148 * 149 * @release: release function solely used by UML 150 */ 151 struct irq_chip { 152 const char *name; 153 + #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 154 unsigned int (*startup)(unsigned int irq); 155 void (*shutdown)(unsigned int irq); 156 void (*enable)(unsigned int irq); ··· 130 131 void (*bus_lock)(unsigned int irq); 132 void (*bus_sync_unlock)(unsigned int irq); 133 + #endif 134 + unsigned int (*irq_startup)(struct irq_data *data); 135 + void (*irq_shutdown)(struct irq_data *data); 136 + void (*irq_enable)(struct irq_data *data); 137 + void (*irq_disable)(struct irq_data *data); 138 + 139 + void (*irq_ack)(struct irq_data *data); 140 + void (*irq_mask)(struct irq_data *data); 141 + void (*irq_mask_ack)(struct irq_data *data); 142 + void (*irq_unmask)(struct irq_data *data); 143 + void (*irq_eoi)(struct irq_data *data); 144 + 145 + int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); 146 + int (*irq_retrigger)(struct irq_data *data); 147 + int (*irq_set_type)(struct irq_data *data, unsigned int flow_type); 148 + int (*irq_set_wake)(struct irq_data *data, unsigned int on); 149 + 150 + void (*irq_bus_lock)(struct irq_data *data); 151 + void (*irq_bus_sync_unlock)(struct irq_data *data); 152 153 /* Currently used only by UML, might disappear one day.*/ 154 #ifdef CONFIG_IRQ_RELEASE_METHOD 155 void (*release)(unsigned int irq, void *dev_id); 156 #endif 157 }; 158 159 + /* This include will go away once we isolated irq_desc usage to core code */ 160 + #include <linux/irqdesc.h> 161 162 /* 163 * Pick up the arch-dependent methods: 164 */ 165 #include <asm/hw_irq.h> 166 167 + #ifndef NR_IRQS_LEGACY 168 + # define NR_IRQS_LEGACY 0 169 + #endif 170 + 171 + #ifndef ARCH_IRQ_INIT_FLAGS 172 + # define ARCH_IRQ_INIT_FLAGS 0 173 + #endif 174 + 175 + #define IRQ_DEFAULT_INIT_FLAGS (IRQ_DISABLED | ARCH_IRQ_INIT_FLAGS) 176 + 177 + struct irqaction; 178 extern int setup_irq(unsigned int irq, struct irqaction *new); 179 extern void remove_irq(unsigned int irq, struct irqaction *act); 180 181 #ifdef CONFIG_GENERIC_HARDIRQS 182 183 + #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) 184 void move_native_irq(int irq); 185 void move_masked_irq(int irq); 186 + #else 187 + static inline void move_native_irq(int irq) { } 188 + static inline void move_masked_irq(int irq) { } 189 + #endif 190 191 extern int no_irq_affinity; 192 193 /* Handle irq action chains: */ 194 extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); ··· 293 extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); 294 extern void handle_nested_irq(unsigned int irq); 295 296 /* Handling of unhandled and spurious interrupts: */ 297 extern void note_interrupt(unsigned int irq, struct irq_desc *desc, 298 irqreturn_t action_ret); 299 300 301 /* Enable/disable irq debugging output: */ 302 extern int noirqdebug_setup(char *str); ··· 350 extern void 351 __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 352 const char *name); 353 354 /* 355 * Set a highlevel flow handler for a given IRQ: ··· 384 385 extern void set_irq_nested_thread(unsigned int irq, int nest); 386 387 + void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); 388 + 389 + static inline void irq_set_status_flags(unsigned int irq, unsigned long set) 390 + { 391 + irq_modify_status(irq, 0, set); 392 + } 393 + 394 + static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) 395 + { 396 + irq_modify_status(irq, clr, 0); 397 + } 398 + 399 + static inline void set_irq_noprobe(unsigned int irq) 400 + { 401 + irq_modify_status(irq, 0, IRQ_NOPROBE); 402 + } 403 + 404 + static inline void set_irq_probe(unsigned int irq) 405 + { 406 + irq_modify_status(irq, IRQ_NOPROBE, 0); 407 + } 408 409 /* Handle dynamic irq creation and destruction */ 410 extern unsigned int create_irq_nr(unsigned int irq_want, int node); 411 extern int create_irq(void); 412 extern void destroy_irq(unsigned int irq); 413 414 + /* 415 + * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and 416 + * irq_free_desc instead. 417 + */ 418 extern void dynamic_irq_cleanup(unsigned int irq); 419 + static inline void dynamic_irq_init(unsigned int irq) 420 + { 421 + dynamic_irq_cleanup(irq); 422 + } 423 424 /* Set/get chip/data for an IRQ: */ 425 extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); ··· 411 extern int set_irq_chip_data(unsigned int irq, void *data); 412 extern int set_irq_type(unsigned int irq, unsigned int type); 413 extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); 414 + extern struct irq_data *irq_get_irq_data(unsigned int irq); 415 416 + static inline struct irq_chip *get_irq_chip(unsigned int irq) 417 + { 418 + struct irq_data *d = irq_get_irq_data(irq); 419 + return d ? d->chip : NULL; 420 + } 421 422 + static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) 423 + { 424 + return d->chip; 425 + } 426 + 427 + static inline void *get_irq_chip_data(unsigned int irq) 428 + { 429 + struct irq_data *d = irq_get_irq_data(irq); 430 + return d ? d->chip_data : NULL; 431 + } 432 + 433 + static inline void *irq_data_get_irq_chip_data(struct irq_data *d) 434 + { 435 + return d->chip_data; 436 + } 437 + 438 + static inline void *get_irq_data(unsigned int irq) 439 + { 440 + struct irq_data *d = irq_get_irq_data(irq); 441 + return d ? d->handler_data : NULL; 442 + } 443 + 444 + static inline void *irq_data_get_irq_data(struct irq_data *d) 445 + { 446 + return d->handler_data; 447 + } 448 + 449 + static inline struct msi_desc *get_irq_msi(unsigned int irq) 450 + { 451 + struct irq_data *d = irq_get_irq_data(irq); 452 + return d ? d->msi_desc : NULL; 453 + } 454 + 455 + static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) 456 + { 457 + return d->msi_desc; 458 + } 459 + 460 + int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); 461 + void irq_free_descs(unsigned int irq, unsigned int cnt); 462 + int irq_reserve_irqs(unsigned int from, unsigned int cnt); 463 + 464 + static inline int irq_alloc_desc(int node) 465 + { 466 + return irq_alloc_descs(-1, 0, 1, node); 467 + } 468 + 469 + static inline int irq_alloc_desc_at(unsigned int at, int node) 470 + { 471 + return irq_alloc_descs(at, at, 1, node); 472 + } 473 + 474 + static inline int irq_alloc_desc_from(unsigned int from, int node) 475 + { 476 + return irq_alloc_descs(-1, from, 1, node); 477 + } 478 + 479 + static inline void irq_free_desc(unsigned int irq) 480 + { 481 + irq_free_descs(irq, 1); 482 + } 483 484 #endif /* CONFIG_GENERIC_HARDIRQS */ 485 486 #endif /* !CONFIG_S390 */ 487 488 #endif /* _LINUX_IRQ_H */
+159
include/linux/irqdesc.h
···
··· 1 + #ifndef _LINUX_IRQDESC_H 2 + #define _LINUX_IRQDESC_H 3 + 4 + /* 5 + * Core internal functions to deal with irq descriptors 6 + * 7 + * This include will move to kernel/irq once we cleaned up the tree. 8 + * For now it's included from <linux/irq.h> 9 + */ 10 + 11 + struct proc_dir_entry; 12 + struct timer_rand_state; 13 + /** 14 + * struct irq_desc - interrupt descriptor 15 + * @irq_data: per irq and chip data passed down to chip functions 16 + * @timer_rand_state: pointer to timer rand state struct 17 + * @kstat_irqs: irq stats per cpu 18 + * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] 19 + * @action: the irq action chain 20 + * @status: status information 21 + * @depth: disable-depth, for nested irq_disable() calls 22 + * @wake_depth: enable depth, for multiple set_irq_wake() callers 23 + * @irq_count: stats field to detect stalled irqs 24 + * @last_unhandled: aging timer for unhandled count 25 + * @irqs_unhandled: stats field for spurious unhandled interrupts 26 + * @lock: locking for SMP 27 + * @pending_mask: pending rebalanced interrupts 28 + * @threads_active: number of irqaction threads currently running 29 + * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers 30 + * @dir: /proc/irq/ procfs entry 31 + * @name: flow handler name for /proc/interrupts output 32 + */ 33 + struct irq_desc { 34 + 35 + #ifdef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 36 + struct irq_data irq_data; 37 + #else 38 + /* 39 + * This union will go away, once we fixed the direct access to 40 + * irq_desc all over the place. The direct fields are a 1:1 41 + * overlay of irq_data. 42 + */ 43 + union { 44 + struct irq_data irq_data; 45 + struct { 46 + unsigned int irq; 47 + unsigned int node; 48 + struct irq_chip *chip; 49 + void *handler_data; 50 + void *chip_data; 51 + struct msi_desc *msi_desc; 52 + #ifdef CONFIG_SMP 53 + cpumask_var_t affinity; 54 + #endif 55 + }; 56 + }; 57 + #endif 58 + 59 + struct timer_rand_state *timer_rand_state; 60 + unsigned int *kstat_irqs; 61 + irq_flow_handler_t handle_irq; 62 + struct irqaction *action; /* IRQ action list */ 63 + unsigned int status; /* IRQ status */ 64 + 65 + unsigned int depth; /* nested irq disables */ 66 + unsigned int wake_depth; /* nested wake enables */ 67 + unsigned int irq_count; /* For detecting broken IRQs */ 68 + unsigned long last_unhandled; /* Aging timer for unhandled count */ 69 + unsigned int irqs_unhandled; 70 + raw_spinlock_t lock; 71 + #ifdef CONFIG_SMP 72 + const struct cpumask *affinity_hint; 73 + #ifdef CONFIG_GENERIC_PENDING_IRQ 74 + cpumask_var_t pending_mask; 75 + #endif 76 + #endif 77 + atomic_t threads_active; 78 + wait_queue_head_t wait_for_threads; 79 + #ifdef CONFIG_PROC_FS 80 + struct proc_dir_entry *dir; 81 + #endif 82 + const char *name; 83 + } ____cacheline_internodealigned_in_smp; 84 + 85 + #ifndef CONFIG_SPARSE_IRQ 86 + extern struct irq_desc irq_desc[NR_IRQS]; 87 + #endif 88 + 89 + /* Will be removed once the last users in power and sh are gone */ 90 + extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); 91 + static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) 92 + { 93 + return desc; 94 + } 95 + 96 + #ifdef CONFIG_GENERIC_HARDIRQS 97 + 98 + #define get_irq_desc_chip(desc) ((desc)->irq_data.chip) 99 + #define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data) 100 + #define get_irq_desc_data(desc) ((desc)->irq_data.handler_data) 101 + #define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc) 102 + 103 + /* 104 + * Monolithic do_IRQ implementation. 105 + */ 106 + #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 107 + extern unsigned int __do_IRQ(unsigned int irq); 108 + #endif 109 + 110 + /* 111 + * Architectures call this to let the generic IRQ layer 112 + * handle an interrupt. If the descriptor is attached to an 113 + * irqchip-style controller then we call the ->handle_irq() handler, 114 + * and it calls __do_IRQ() if it's attached to an irqtype-style controller. 115 + */ 116 + static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) 117 + { 118 + #ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 119 + desc->handle_irq(irq, desc); 120 + #else 121 + if (likely(desc->handle_irq)) 122 + desc->handle_irq(irq, desc); 123 + else 124 + __do_IRQ(irq); 125 + #endif 126 + } 127 + 128 + static inline void generic_handle_irq(unsigned int irq) 129 + { 130 + generic_handle_irq_desc(irq, irq_to_desc(irq)); 131 + } 132 + 133 + /* Test to see if a driver has successfully requested an irq */ 134 + static inline int irq_has_action(unsigned int irq) 135 + { 136 + struct irq_desc *desc = irq_to_desc(irq); 137 + return desc->action != NULL; 138 + } 139 + 140 + static inline int irq_balancing_disabled(unsigned int irq) 141 + { 142 + struct irq_desc *desc; 143 + 144 + desc = irq_to_desc(irq); 145 + return desc->status & IRQ_NO_BALANCING_MASK; 146 + } 147 + 148 + /* caller has locked the irq_desc and both params are valid */ 149 + static inline void __set_irq_handler_unlocked(int irq, 150 + irq_flow_handler_t handler) 151 + { 152 + struct irq_desc *desc; 153 + 154 + desc = irq_to_desc(irq); 155 + desc->handle_irq = handler; 156 + } 157 + #endif 158 + 159 + #endif
+5
include/linux/irqnr.h
··· 25 26 extern int nr_irqs; 27 extern struct irq_desc *irq_to_desc(unsigned int irq); 28 29 # define for_each_irq_desc(irq, desc) \ 30 for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ ··· 47 #else 48 #define irq_node(irq) 0 49 #endif 50 51 #endif /* CONFIG_GENERIC_HARDIRQS */ 52
··· 25 26 extern int nr_irqs; 27 extern struct irq_desc *irq_to_desc(unsigned int irq); 28 + unsigned int irq_get_next_irq(unsigned int offset); 29 30 # define for_each_irq_desc(irq, desc) \ 31 for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ ··· 46 #else 47 #define irq_node(irq) 0 48 #endif 49 + 50 + # define for_each_active_irq(irq) \ 51 + for (irq = irq_get_next_irq(0); irq < nr_irqs; \ 52 + irq = irq_get_next_irq(irq + 1)) 53 54 #endif /* CONFIG_GENERIC_HARDIRQS */ 55
-8
include/linux/lockdep.h
··· 435 436 #endif /* CONFIG_LOCKDEP */ 437 438 - #ifdef CONFIG_GENERIC_HARDIRQS 439 - extern void early_init_irq_lock_class(void); 440 - #else 441 - static inline void early_init_irq_lock_class(void) 442 - { 443 - } 444 - #endif 445 - 446 #ifdef CONFIG_TRACE_IRQFLAGS 447 extern void early_boot_irqs_off(void); 448 extern void early_boot_irqs_on(void);
··· 435 436 #endif /* CONFIG_LOCKDEP */ 437 438 #ifdef CONFIG_TRACE_IRQFLAGS 439 extern void early_boot_irqs_off(void); 440 extern void early_boot_irqs_on(void);
+7 -6
include/linux/msi.h
··· 10 }; 11 12 /* Helper functions */ 13 - struct irq_desc; 14 - extern void mask_msi_irq(unsigned int irq); 15 - extern void unmask_msi_irq(unsigned int irq); 16 - extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); 17 - extern void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); 18 - extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); 19 extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); 20 extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); 21 extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
··· 10 }; 11 12 /* Helper functions */ 13 + struct irq_data; 14 + struct msi_desc; 15 + extern void mask_msi_irq(struct irq_data *data); 16 + extern void unmask_msi_irq(struct irq_data *data); 17 + extern void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 18 + extern void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 19 + extern void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 20 extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); 21 extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); 22 extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
+2
init/Kconfig
··· 339 depends on AUDITSYSCALL 340 select FSNOTIFY 341 342 menu "RCU Subsystem" 343 344 choice
··· 339 depends on AUDITSYSCALL 340 select FSNOTIFY 341 342 + source "kernel/irq/Kconfig" 343 + 344 menu "RCU Subsystem" 345 346 choice
-1
init/main.c
··· 556 557 local_irq_disable(); 558 early_boot_irqs_off(); 559 - early_init_irq_lock_class(); 560 561 /* 562 * Interrupts are still disabled. Do necessary setups, then
··· 556 557 local_irq_disable(); 558 early_boot_irqs_off(); 559 560 /* 561 * Interrupts are still disabled. Do necessary setups, then
+53
kernel/irq/Kconfig
···
··· 1 + config HAVE_GENERIC_HARDIRQS 2 + def_bool n 3 + 4 + if HAVE_GENERIC_HARDIRQS 5 + menu "IRQ subsystem" 6 + # 7 + # Interrupt subsystem related configuration options 8 + # 9 + config GENERIC_HARDIRQS 10 + def_bool y 11 + 12 + config GENERIC_HARDIRQS_NO__DO_IRQ 13 + def_bool y 14 + 15 + # Select this to disable the deprecated stuff 16 + config GENERIC_HARDIRQS_NO_DEPRECATED 17 + def_bool n 18 + 19 + # Options selectable by the architecture code 20 + config HAVE_SPARSE_IRQ 21 + def_bool n 22 + 23 + config GENERIC_IRQ_PROBE 24 + def_bool n 25 + 26 + config GENERIC_PENDING_IRQ 27 + def_bool n 28 + 29 + config AUTO_IRQ_AFFINITY 30 + def_bool n 31 + 32 + config IRQ_PER_CPU 33 + def_bool n 34 + 35 + config HARDIRQS_SW_RESEND 36 + def_bool n 37 + 38 + config SPARSE_IRQ 39 + bool "Support sparse irq numbering" 40 + depends on HAVE_SPARSE_IRQ 41 + ---help--- 42 + 43 + Sparse irq numbering is useful for distro kernels that want 44 + to define a high CONFIG_NR_CPUS value but still want to have 45 + low kernel memory footprint on smaller machines. 46 + 47 + ( Sparse irqs can also be beneficial on NUMA boxes, as they spread 48 + out the interrupt descriptors in a more NUMA-friendly way. ) 49 + 50 + If you don't know what to do here, say N. 51 + 52 + endmenu 53 + endif
+1 -2
kernel/irq/Makefile
··· 1 2 - obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o 3 obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o 4 obj-$(CONFIG_PROC_FS) += proc.o 5 obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o 6 - obj-$(CONFIG_NUMA_IRQ_DESC) += numa_migrate.o 7 obj-$(CONFIG_PM_SLEEP) += pm.o
··· 1 2 + obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o 3 obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o 4 obj-$(CONFIG_PROC_FS) += proc.o 5 obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o 6 obj-$(CONFIG_PM_SLEEP) += pm.o
+8 -7
kernel/irq/autoprobe.c
··· 57 * Some chips need to know about probing in 58 * progress: 59 */ 60 - if (desc->chip->set_type) 61 - desc->chip->set_type(i, IRQ_TYPE_PROBE); 62 - desc->chip->startup(i); 63 } 64 raw_spin_unlock_irq(&desc->lock); 65 } ··· 77 raw_spin_lock_irq(&desc->lock); 78 if (!desc->action && !(desc->status & IRQ_NOPROBE)) { 79 desc->status |= IRQ_AUTODETECT | IRQ_WAITING; 80 - if (desc->chip->startup(i)) 81 desc->status |= IRQ_PENDING; 82 } 83 raw_spin_unlock_irq(&desc->lock); ··· 99 /* It triggered already - consider it spurious. */ 100 if (!(status & IRQ_WAITING)) { 101 desc->status = status & ~IRQ_AUTODETECT; 102 - desc->chip->shutdown(i); 103 } else 104 if (i < 32) 105 mask |= 1 << i; ··· 138 mask |= 1 << i; 139 140 desc->status = status & ~IRQ_AUTODETECT; 141 - desc->chip->shutdown(i); 142 } 143 raw_spin_unlock_irq(&desc->lock); 144 } ··· 182 nr_of_irqs++; 183 } 184 desc->status = status & ~IRQ_AUTODETECT; 185 - desc->chip->shutdown(i); 186 } 187 raw_spin_unlock_irq(&desc->lock); 188 }
··· 57 * Some chips need to know about probing in 58 * progress: 59 */ 60 + if (desc->irq_data.chip->irq_set_type) 61 + desc->irq_data.chip->irq_set_type(&desc->irq_data, 62 + IRQ_TYPE_PROBE); 63 + desc->irq_data.chip->irq_startup(&desc->irq_data); 64 } 65 raw_spin_unlock_irq(&desc->lock); 66 } ··· 76 raw_spin_lock_irq(&desc->lock); 77 if (!desc->action && !(desc->status & IRQ_NOPROBE)) { 78 desc->status |= IRQ_AUTODETECT | IRQ_WAITING; 79 + if (desc->irq_data.chip->irq_startup(&desc->irq_data)) 80 desc->status |= IRQ_PENDING; 81 } 82 raw_spin_unlock_irq(&desc->lock); ··· 98 /* It triggered already - consider it spurious. */ 99 if (!(status & IRQ_WAITING)) { 100 desc->status = status & ~IRQ_AUTODETECT; 101 + desc->irq_data.chip->irq_shutdown(&desc->irq_data); 102 } else 103 if (i < 32) 104 mask |= 1 << i; ··· 137 mask |= 1 << i; 138 139 desc->status = status & ~IRQ_AUTODETECT; 140 + desc->irq_data.chip->irq_shutdown(&desc->irq_data); 141 } 142 raw_spin_unlock_irq(&desc->lock); 143 } ··· 181 nr_of_irqs++; 182 } 183 desc->status = status & ~IRQ_AUTODETECT; 184 + desc->irq_data.chip->irq_shutdown(&desc->irq_data); 185 } 186 raw_spin_unlock_irq(&desc->lock); 187 }
+198 -182
kernel/irq/chip.c
··· 18 19 #include "internals.h" 20 21 - static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data) 22 - { 23 - struct irq_desc *desc; 24 - unsigned long flags; 25 - 26 - desc = irq_to_desc(irq); 27 - if (!desc) { 28 - WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); 29 - return; 30 - } 31 - 32 - /* Ensure we don't have left over values from a previous use of this irq */ 33 - raw_spin_lock_irqsave(&desc->lock, flags); 34 - desc->status = IRQ_DISABLED; 35 - desc->chip = &no_irq_chip; 36 - desc->handle_irq = handle_bad_irq; 37 - desc->depth = 1; 38 - desc->msi_desc = NULL; 39 - desc->handler_data = NULL; 40 - if (!keep_chip_data) 41 - desc->chip_data = NULL; 42 - desc->action = NULL; 43 - desc->irq_count = 0; 44 - desc->irqs_unhandled = 0; 45 - #ifdef CONFIG_SMP 46 - cpumask_setall(desc->affinity); 47 - #ifdef CONFIG_GENERIC_PENDING_IRQ 48 - cpumask_clear(desc->pending_mask); 49 - #endif 50 - #endif 51 - raw_spin_unlock_irqrestore(&desc->lock, flags); 52 - } 53 - 54 - /** 55 - * dynamic_irq_init - initialize a dynamically allocated irq 56 - * @irq: irq number to initialize 57 - */ 58 - void dynamic_irq_init(unsigned int irq) 59 - { 60 - dynamic_irq_init_x(irq, false); 61 - } 62 - 63 - /** 64 - * dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq 65 - * @irq: irq number to initialize 66 - * 67 - * does not set irq_to_desc(irq)->chip_data to NULL 68 - */ 69 - void dynamic_irq_init_keep_chip_data(unsigned int irq) 70 - { 71 - dynamic_irq_init_x(irq, true); 72 - } 73 - 74 - static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data) 75 - { 76 - struct irq_desc *desc = irq_to_desc(irq); 77 - unsigned long flags; 78 - 79 - if (!desc) { 80 - WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq); 81 - return; 82 - } 83 - 84 - raw_spin_lock_irqsave(&desc->lock, flags); 85 - if (desc->action) { 86 - raw_spin_unlock_irqrestore(&desc->lock, flags); 87 - WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n", 88 - irq); 89 - return; 90 - } 91 - desc->msi_desc = NULL; 92 - desc->handler_data = NULL; 93 - if (!keep_chip_data) 94 - desc->chip_data = NULL; 95 - desc->handle_irq = handle_bad_irq; 96 - desc->chip = &no_irq_chip; 97 - desc->name = NULL; 98 - clear_kstat_irqs(desc); 99 - raw_spin_unlock_irqrestore(&desc->lock, flags); 100 - } 101 - 102 - /** 103 - * dynamic_irq_cleanup - cleanup a dynamically allocated irq 104 - * @irq: irq number to initialize 105 - */ 106 - void dynamic_irq_cleanup(unsigned int irq) 107 - { 108 - dynamic_irq_cleanup_x(irq, false); 109 - } 110 - 111 - /** 112 - * dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq 113 - * @irq: irq number to initialize 114 - * 115 - * does not set irq_to_desc(irq)->chip_data to NULL 116 - */ 117 - void dynamic_irq_cleanup_keep_chip_data(unsigned int irq) 118 - { 119 - dynamic_irq_cleanup_x(irq, true); 120 - } 121 - 122 - 123 /** 124 * set_irq_chip - set the irq chip for an irq 125 * @irq: irq number ··· 38 39 raw_spin_lock_irqsave(&desc->lock, flags); 40 irq_chip_set_defaults(chip); 41 - desc->chip = chip; 42 raw_spin_unlock_irqrestore(&desc->lock, flags); 43 44 return 0; ··· 91 } 92 93 raw_spin_lock_irqsave(&desc->lock, flags); 94 - desc->handler_data = data; 95 raw_spin_unlock_irqrestore(&desc->lock, flags); 96 return 0; 97 } ··· 116 } 117 118 raw_spin_lock_irqsave(&desc->lock, flags); 119 - desc->msi_desc = entry; 120 if (entry) 121 entry->irq = irq; 122 raw_spin_unlock_irqrestore(&desc->lock, flags); ··· 141 return -EINVAL; 142 } 143 144 - if (!desc->chip) { 145 printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq); 146 return -EINVAL; 147 } 148 149 raw_spin_lock_irqsave(&desc->lock, flags); 150 - desc->chip_data = data; 151 raw_spin_unlock_irqrestore(&desc->lock, flags); 152 153 return 0; 154 } 155 EXPORT_SYMBOL(set_irq_chip_data); 156 157 /** 158 * set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq ··· 193 /* 194 * default enable function 195 */ 196 - static void default_enable(unsigned int irq) 197 { 198 - struct irq_desc *desc = irq_to_desc(irq); 199 200 - desc->chip->unmask(irq); 201 desc->status &= ~IRQ_MASKED; 202 } 203 204 /* 205 * default disable function 206 */ 207 - static void default_disable(unsigned int irq) 208 { 209 } 210 211 /* 212 * default startup function 213 */ 214 - static unsigned int default_startup(unsigned int irq) 215 { 216 - struct irq_desc *desc = irq_to_desc(irq); 217 218 - desc->chip->enable(irq); 219 return 0; 220 } 221 222 /* 223 * default shutdown function 224 */ 225 - static void default_shutdown(unsigned int irq) 226 { 227 - struct irq_desc *desc = irq_to_desc(irq); 228 229 - desc->chip->mask(irq); 230 desc->status |= IRQ_MASKED; 231 } 232 233 /* 234 * Fixup enable/disable function pointers 235 */ 236 void irq_chip_set_defaults(struct irq_chip *chip) 237 { 238 - if (!chip->enable) 239 - chip->enable = default_enable; 240 - if (!chip->disable) 241 - chip->disable = default_disable; 242 - if (!chip->startup) 243 - chip->startup = default_startup; 244 /* 245 - * We use chip->disable, when the user provided its own. When 246 - * we have default_disable set for chip->disable, then we need 247 * to use default_shutdown, otherwise the irq line is not 248 * disabled on free_irq(): 249 */ 250 - if (!chip->shutdown) 251 - chip->shutdown = chip->disable != default_disable ? 252 - chip->disable : default_shutdown; 253 - if (!chip->name) 254 - chip->name = chip->typename; 255 if (!chip->end) 256 chip->end = dummy_irq_chip.end; 257 } 258 259 - static inline void mask_ack_irq(struct irq_desc *desc, int irq) 260 { 261 - if (desc->chip->mask_ack) 262 - desc->chip->mask_ack(irq); 263 else { 264 - desc->chip->mask(irq); 265 - if (desc->chip->ack) 266 - desc->chip->ack(irq); 267 } 268 desc->status |= IRQ_MASKED; 269 } 270 271 - static inline void mask_irq(struct irq_desc *desc, int irq) 272 { 273 - if (desc->chip->mask) { 274 - desc->chip->mask(irq); 275 desc->status |= IRQ_MASKED; 276 } 277 } 278 279 - static inline void unmask_irq(struct irq_desc *desc, int irq) 280 { 281 - if (desc->chip->unmask) { 282 - desc->chip->unmask(irq); 283 desc->status &= ~IRQ_MASKED; 284 } 285 } ··· 505 irqreturn_t action_ret; 506 507 raw_spin_lock(&desc->lock); 508 - mask_ack_irq(desc, irq); 509 510 if (unlikely(desc->status & IRQ_INPROGRESS)) 511 goto out_unlock; ··· 531 desc->status &= ~IRQ_INPROGRESS; 532 533 if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT))) 534 - unmask_irq(desc, irq); 535 out_unlock: 536 raw_spin_unlock(&desc->lock); 537 } ··· 568 action = desc->action; 569 if (unlikely(!action || (desc->status & IRQ_DISABLED))) { 570 desc->status |= IRQ_PENDING; 571 - mask_irq(desc, irq); 572 goto out; 573 } 574 ··· 583 raw_spin_lock(&desc->lock); 584 desc->status &= ~IRQ_INPROGRESS; 585 out: 586 - desc->chip->eoi(irq); 587 588 raw_spin_unlock(&desc->lock); 589 } ··· 619 if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || 620 !desc->action)) { 621 desc->status |= (IRQ_PENDING | IRQ_MASKED); 622 - mask_ack_irq(desc, irq); 623 goto out_unlock; 624 } 625 kstat_incr_irqs_this_cpu(irq, desc); 626 627 /* Start handling the irq */ 628 - if (desc->chip->ack) 629 - desc->chip->ack(irq); 630 631 /* Mark the IRQ currently in progress.*/ 632 desc->status |= IRQ_INPROGRESS; ··· 635 irqreturn_t action_ret; 636 637 if (unlikely(!action)) { 638 - mask_irq(desc, irq); 639 goto out_unlock; 640 } 641 ··· 647 if (unlikely((desc->status & 648 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == 649 (IRQ_PENDING | IRQ_MASKED))) { 650 - unmask_irq(desc, irq); 651 } 652 653 desc->status &= ~IRQ_PENDING; ··· 678 679 kstat_incr_irqs_this_cpu(irq, desc); 680 681 - if (desc->chip->ack) 682 - desc->chip->ack(irq); 683 684 action_ret = handle_IRQ_event(irq, desc->action); 685 if (!noirqdebug) 686 note_interrupt(irq, desc, action_ret); 687 688 - if (desc->chip->eoi) 689 - desc->chip->eoi(irq); 690 } 691 692 void ··· 704 705 if (!handle) 706 handle = handle_bad_irq; 707 - else if (desc->chip == &no_irq_chip) { 708 printk(KERN_WARNING "Trying to install %sinterrupt handler " 709 "for IRQ%d\n", is_chained ? "chained " : "", irq); 710 /* ··· 714 * prevent us to setup the interrupt at all. Switch it to 715 * dummy_irq_chip for easy transition. 716 */ 717 - desc->chip = &dummy_irq_chip; 718 } 719 720 - chip_bus_lock(irq, desc); 721 raw_spin_lock_irqsave(&desc->lock, flags); 722 723 /* Uninstall? */ 724 if (handle == handle_bad_irq) { 725 - if (desc->chip != &no_irq_chip) 726 - mask_ack_irq(desc, irq); 727 desc->status |= IRQ_DISABLED; 728 desc->depth = 1; 729 } ··· 734 desc->status &= ~IRQ_DISABLED; 735 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; 736 desc->depth = 0; 737 - desc->chip->startup(irq); 738 } 739 raw_spin_unlock_irqrestore(&desc->lock, flags); 740 - chip_bus_sync_unlock(irq, desc); 741 } 742 EXPORT_SYMBOL_GPL(__set_irq_handler); 743 ··· 757 __set_irq_handler(irq, handle, 0, name); 758 } 759 760 - void set_irq_noprobe(unsigned int irq) 761 { 762 struct irq_desc *desc = irq_to_desc(irq); 763 unsigned long flags; 764 765 - if (!desc) { 766 - printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq); 767 return; 768 - } 769 770 raw_spin_lock_irqsave(&desc->lock, flags); 771 - desc->status |= IRQ_NOPROBE; 772 - raw_spin_unlock_irqrestore(&desc->lock, flags); 773 - } 774 - 775 - void set_irq_probe(unsigned int irq) 776 - { 777 - struct irq_desc *desc = irq_to_desc(irq); 778 - unsigned long flags; 779 - 780 - if (!desc) { 781 - printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq); 782 - return; 783 - } 784 - 785 - raw_spin_lock_irqsave(&desc->lock, flags); 786 - desc->status &= ~IRQ_NOPROBE; 787 raw_spin_unlock_irqrestore(&desc->lock, flags); 788 }
··· 18 19 #include "internals.h" 20 21 /** 22 * set_irq_chip - set the irq chip for an irq 23 * @irq: irq number ··· 140 141 raw_spin_lock_irqsave(&desc->lock, flags); 142 irq_chip_set_defaults(chip); 143 + desc->irq_data.chip = chip; 144 raw_spin_unlock_irqrestore(&desc->lock, flags); 145 146 return 0; ··· 193 } 194 195 raw_spin_lock_irqsave(&desc->lock, flags); 196 + desc->irq_data.handler_data = data; 197 raw_spin_unlock_irqrestore(&desc->lock, flags); 198 return 0; 199 } ··· 218 } 219 220 raw_spin_lock_irqsave(&desc->lock, flags); 221 + desc->irq_data.msi_desc = entry; 222 if (entry) 223 entry->irq = irq; 224 raw_spin_unlock_irqrestore(&desc->lock, flags); ··· 243 return -EINVAL; 244 } 245 246 + if (!desc->irq_data.chip) { 247 printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq); 248 return -EINVAL; 249 } 250 251 raw_spin_lock_irqsave(&desc->lock, flags); 252 + desc->irq_data.chip_data = data; 253 raw_spin_unlock_irqrestore(&desc->lock, flags); 254 255 return 0; 256 } 257 EXPORT_SYMBOL(set_irq_chip_data); 258 + 259 + struct irq_data *irq_get_irq_data(unsigned int irq) 260 + { 261 + struct irq_desc *desc = irq_to_desc(irq); 262 + 263 + return desc ? &desc->irq_data : NULL; 264 + } 265 + EXPORT_SYMBOL_GPL(irq_get_irq_data); 266 267 /** 268 * set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq ··· 287 /* 288 * default enable function 289 */ 290 + static void default_enable(struct irq_data *data) 291 { 292 + struct irq_desc *desc = irq_data_to_desc(data); 293 294 + desc->irq_data.chip->irq_unmask(&desc->irq_data); 295 desc->status &= ~IRQ_MASKED; 296 } 297 298 /* 299 * default disable function 300 */ 301 + static void default_disable(struct irq_data *data) 302 { 303 } 304 305 /* 306 * default startup function 307 */ 308 + static unsigned int default_startup(struct irq_data *data) 309 { 310 + struct irq_desc *desc = irq_data_to_desc(data); 311 312 + desc->irq_data.chip->irq_enable(data); 313 return 0; 314 } 315 316 /* 317 * default shutdown function 318 */ 319 + static void default_shutdown(struct irq_data *data) 320 { 321 + struct irq_desc *desc = irq_data_to_desc(data); 322 323 + desc->irq_data.chip->irq_mask(&desc->irq_data); 324 desc->status |= IRQ_MASKED; 325 } 326 + 327 + #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 328 + /* Temporary migration helpers */ 329 + static void compat_irq_mask(struct irq_data *data) 330 + { 331 + data->chip->mask(data->irq); 332 + } 333 + 334 + static void compat_irq_unmask(struct irq_data *data) 335 + { 336 + data->chip->unmask(data->irq); 337 + } 338 + 339 + static void compat_irq_ack(struct irq_data *data) 340 + { 341 + data->chip->ack(data->irq); 342 + } 343 + 344 + static void compat_irq_mask_ack(struct irq_data *data) 345 + { 346 + data->chip->mask_ack(data->irq); 347 + } 348 + 349 + static void compat_irq_eoi(struct irq_data *data) 350 + { 351 + data->chip->eoi(data->irq); 352 + } 353 + 354 + static void compat_irq_enable(struct irq_data *data) 355 + { 356 + data->chip->enable(data->irq); 357 + } 358 + 359 + static void compat_irq_disable(struct irq_data *data) 360 + { 361 + data->chip->disable(data->irq); 362 + } 363 + 364 + static void compat_irq_shutdown(struct irq_data *data) 365 + { 366 + data->chip->shutdown(data->irq); 367 + } 368 + 369 + static unsigned int compat_irq_startup(struct irq_data *data) 370 + { 371 + return data->chip->startup(data->irq); 372 + } 373 + 374 + static int compat_irq_set_affinity(struct irq_data *data, 375 + const struct cpumask *dest, bool force) 376 + { 377 + return data->chip->set_affinity(data->irq, dest); 378 + } 379 + 380 + static int compat_irq_set_type(struct irq_data *data, unsigned int type) 381 + { 382 + return data->chip->set_type(data->irq, type); 383 + } 384 + 385 + static int compat_irq_set_wake(struct irq_data *data, unsigned int on) 386 + { 387 + return data->chip->set_wake(data->irq, on); 388 + } 389 + 390 + static int compat_irq_retrigger(struct irq_data *data) 391 + { 392 + return data->chip->retrigger(data->irq); 393 + } 394 + 395 + static void compat_bus_lock(struct irq_data *data) 396 + { 397 + data->chip->bus_lock(data->irq); 398 + } 399 + 400 + static void compat_bus_sync_unlock(struct irq_data *data) 401 + { 402 + data->chip->bus_sync_unlock(data->irq); 403 + } 404 + #endif 405 406 /* 407 * Fixup enable/disable function pointers 408 */ 409 void irq_chip_set_defaults(struct irq_chip *chip) 410 { 411 + #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 412 /* 413 + * Compat fixup functions need to be before we set the 414 + * defaults for enable/disable/startup/shutdown 415 + */ 416 + if (chip->enable) 417 + chip->irq_enable = compat_irq_enable; 418 + if (chip->disable) 419 + chip->irq_disable = compat_irq_disable; 420 + if (chip->shutdown) 421 + chip->irq_shutdown = compat_irq_shutdown; 422 + if (chip->startup) 423 + chip->irq_startup = compat_irq_startup; 424 + #endif 425 + /* 426 + * The real defaults 427 + */ 428 + if (!chip->irq_enable) 429 + chip->irq_enable = default_enable; 430 + if (!chip->irq_disable) 431 + chip->irq_disable = default_disable; 432 + if (!chip->irq_startup) 433 + chip->irq_startup = default_startup; 434 + /* 435 + * We use chip->irq_disable, when the user provided its own. When 436 + * we have default_disable set for chip->irq_disable, then we need 437 * to use default_shutdown, otherwise the irq line is not 438 * disabled on free_irq(): 439 */ 440 + if (!chip->irq_shutdown) 441 + chip->irq_shutdown = chip->irq_disable != default_disable ? 442 + chip->irq_disable : default_shutdown; 443 + 444 + #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 445 if (!chip->end) 446 chip->end = dummy_irq_chip.end; 447 + 448 + /* 449 + * Now fix up the remaining compat handlers 450 + */ 451 + if (chip->bus_lock) 452 + chip->irq_bus_lock = compat_bus_lock; 453 + if (chip->bus_sync_unlock) 454 + chip->irq_bus_sync_unlock = compat_bus_sync_unlock; 455 + if (chip->mask) 456 + chip->irq_mask = compat_irq_mask; 457 + if (chip->unmask) 458 + chip->irq_unmask = compat_irq_unmask; 459 + if (chip->ack) 460 + chip->irq_ack = compat_irq_ack; 461 + if (chip->mask_ack) 462 + chip->irq_mask_ack = compat_irq_mask_ack; 463 + if (chip->eoi) 464 + chip->irq_eoi = compat_irq_eoi; 465 + if (chip->set_affinity) 466 + chip->irq_set_affinity = compat_irq_set_affinity; 467 + if (chip->set_type) 468 + chip->irq_set_type = compat_irq_set_type; 469 + if (chip->set_wake) 470 + chip->irq_set_wake = compat_irq_set_wake; 471 + if (chip->retrigger) 472 + chip->irq_retrigger = compat_irq_retrigger; 473 + #endif 474 } 475 476 + static inline void mask_ack_irq(struct irq_desc *desc) 477 { 478 + if (desc->irq_data.chip->irq_mask_ack) 479 + desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 480 else { 481 + desc->irq_data.chip->irq_mask(&desc->irq_data); 482 + if (desc->irq_data.chip->irq_ack) 483 + desc->irq_data.chip->irq_ack(&desc->irq_data); 484 } 485 desc->status |= IRQ_MASKED; 486 } 487 488 + static inline void mask_irq(struct irq_desc *desc) 489 { 490 + if (desc->irq_data.chip->irq_mask) { 491 + desc->irq_data.chip->irq_mask(&desc->irq_data); 492 desc->status |= IRQ_MASKED; 493 } 494 } 495 496 + static inline void unmask_irq(struct irq_desc *desc) 497 { 498 + if (desc->irq_data.chip->irq_unmask) { 499 + desc->irq_data.chip->irq_unmask(&desc->irq_data); 500 desc->status &= ~IRQ_MASKED; 501 } 502 } ··· 476 irqreturn_t action_ret; 477 478 raw_spin_lock(&desc->lock); 479 + mask_ack_irq(desc); 480 481 if (unlikely(desc->status & IRQ_INPROGRESS)) 482 goto out_unlock; ··· 502 desc->status &= ~IRQ_INPROGRESS; 503 504 if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT))) 505 + unmask_irq(desc); 506 out_unlock: 507 raw_spin_unlock(&desc->lock); 508 } ··· 539 action = desc->action; 540 if (unlikely(!action || (desc->status & IRQ_DISABLED))) { 541 desc->status |= IRQ_PENDING; 542 + mask_irq(desc); 543 goto out; 544 } 545 ··· 554 raw_spin_lock(&desc->lock); 555 desc->status &= ~IRQ_INPROGRESS; 556 out: 557 + desc->irq_data.chip->irq_eoi(&desc->irq_data); 558 559 raw_spin_unlock(&desc->lock); 560 } ··· 590 if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || 591 !desc->action)) { 592 desc->status |= (IRQ_PENDING | IRQ_MASKED); 593 + mask_ack_irq(desc); 594 goto out_unlock; 595 } 596 kstat_incr_irqs_this_cpu(irq, desc); 597 598 /* Start handling the irq */ 599 + desc->irq_data.chip->irq_ack(&desc->irq_data); 600 601 /* Mark the IRQ currently in progress.*/ 602 desc->status |= IRQ_INPROGRESS; ··· 607 irqreturn_t action_ret; 608 609 if (unlikely(!action)) { 610 + mask_irq(desc); 611 goto out_unlock; 612 } 613 ··· 619 if (unlikely((desc->status & 620 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == 621 (IRQ_PENDING | IRQ_MASKED))) { 622 + unmask_irq(desc); 623 } 624 625 desc->status &= ~IRQ_PENDING; ··· 650 651 kstat_incr_irqs_this_cpu(irq, desc); 652 653 + if (desc->irq_data.chip->irq_ack) 654 + desc->irq_data.chip->irq_ack(&desc->irq_data); 655 656 action_ret = handle_IRQ_event(irq, desc->action); 657 if (!noirqdebug) 658 note_interrupt(irq, desc, action_ret); 659 660 + if (desc->irq_data.chip->irq_eoi) 661 + desc->irq_data.chip->irq_eoi(&desc->irq_data); 662 } 663 664 void ··· 676 677 if (!handle) 678 handle = handle_bad_irq; 679 + else if (desc->irq_data.chip == &no_irq_chip) { 680 printk(KERN_WARNING "Trying to install %sinterrupt handler " 681 "for IRQ%d\n", is_chained ? "chained " : "", irq); 682 /* ··· 686 * prevent us to setup the interrupt at all. Switch it to 687 * dummy_irq_chip for easy transition. 688 */ 689 + desc->irq_data.chip = &dummy_irq_chip; 690 } 691 692 + chip_bus_lock(desc); 693 raw_spin_lock_irqsave(&desc->lock, flags); 694 695 /* Uninstall? */ 696 if (handle == handle_bad_irq) { 697 + if (desc->irq_data.chip != &no_irq_chip) 698 + mask_ack_irq(desc); 699 desc->status |= IRQ_DISABLED; 700 desc->depth = 1; 701 } ··· 706 desc->status &= ~IRQ_DISABLED; 707 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; 708 desc->depth = 0; 709 + desc->irq_data.chip->irq_startup(&desc->irq_data); 710 } 711 raw_spin_unlock_irqrestore(&desc->lock, flags); 712 + chip_bus_sync_unlock(desc); 713 } 714 EXPORT_SYMBOL_GPL(__set_irq_handler); 715 ··· 729 __set_irq_handler(irq, handle, 0, name); 730 } 731 732 + void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 733 { 734 struct irq_desc *desc = irq_to_desc(irq); 735 unsigned long flags; 736 737 + if (!desc) 738 return; 739 + 740 + /* Sanitize flags */ 741 + set &= IRQF_MODIFY_MASK; 742 + clr &= IRQF_MODIFY_MASK; 743 744 raw_spin_lock_irqsave(&desc->lock, flags); 745 + desc->status &= ~clr; 746 + desc->status |= set; 747 raw_spin_unlock_irqrestore(&desc->lock, flags); 748 }
+68
kernel/irq/dummychip.c
···
··· 1 + /* 2 + * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 3 + * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 4 + * 5 + * This file contains the dummy interrupt chip implementation 6 + */ 7 + #include <linux/interrupt.h> 8 + #include <linux/irq.h> 9 + 10 + #include "internals.h" 11 + 12 + /* 13 + * What should we do if we get a hw irq event on an illegal vector? 14 + * Each architecture has to answer this themself. 15 + */ 16 + static void ack_bad(struct irq_data *data) 17 + { 18 + struct irq_desc *desc = irq_data_to_desc(data); 19 + 20 + print_irq_desc(data->irq, desc); 21 + ack_bad_irq(data->irq); 22 + } 23 + 24 + /* 25 + * NOP functions 26 + */ 27 + static void noop(struct irq_data *data) { } 28 + 29 + static unsigned int noop_ret(struct irq_data *data) 30 + { 31 + return 0; 32 + } 33 + 34 + #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 35 + static void compat_noop(unsigned int irq) { } 36 + #define END_INIT .end = compat_noop 37 + #else 38 + #define END_INIT 39 + #endif 40 + 41 + /* 42 + * Generic no controller implementation 43 + */ 44 + struct irq_chip no_irq_chip = { 45 + .name = "none", 46 + .irq_startup = noop_ret, 47 + .irq_shutdown = noop, 48 + .irq_enable = noop, 49 + .irq_disable = noop, 50 + .irq_ack = ack_bad, 51 + END_INIT 52 + }; 53 + 54 + /* 55 + * Generic dummy implementation which can be used for 56 + * real dumb interrupt sources 57 + */ 58 + struct irq_chip dummy_irq_chip = { 59 + .name = "dummy", 60 + .irq_startup = noop_ret, 61 + .irq_shutdown = noop, 62 + .irq_enable = noop, 63 + .irq_disable = noop, 64 + .irq_ack = noop, 65 + .irq_mask = noop, 66 + .irq_unmask = noop, 67 + END_INIT 68 + };
+8 -333
kernel/irq/handle.c
··· 11 */ 12 13 #include <linux/irq.h> 14 - #include <linux/sched.h> 15 - #include <linux/slab.h> 16 - #include <linux/module.h> 17 #include <linux/random.h> 18 #include <linux/interrupt.h> 19 #include <linux/kernel_stat.h> 20 - #include <linux/rculist.h> 21 - #include <linux/hash.h> 22 - #include <linux/radix-tree.h> 23 #include <trace/events/irq.h> 24 25 #include "internals.h" 26 - 27 - /* 28 - * lockdep: we want to handle all irq_desc locks as a single lock-class: 29 - */ 30 - struct lock_class_key irq_desc_lock_class; 31 32 /** 33 * handle_bad_irq - handle spurious and unhandled irqs ··· 33 kstat_incr_irqs_this_cpu(irq, desc); 34 ack_bad_irq(irq); 35 } 36 - 37 - #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) 38 - static void __init init_irq_default_affinity(void) 39 - { 40 - alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); 41 - cpumask_setall(irq_default_affinity); 42 - } 43 - #else 44 - static void __init init_irq_default_affinity(void) 45 - { 46 - } 47 - #endif 48 - 49 - /* 50 - * Linux has a controller-independent interrupt architecture. 51 - * Every controller has a 'controller-template', that is used 52 - * by the main code to do the right thing. Each driver-visible 53 - * interrupt source is transparently wired to the appropriate 54 - * controller. Thus drivers need not be aware of the 55 - * interrupt-controller. 56 - * 57 - * The code is designed to be easily extended with new/different 58 - * interrupt controllers, without having to do assembly magic or 59 - * having to touch the generic code. 60 - * 61 - * Controller mappings for all interrupt sources: 62 - */ 63 - int nr_irqs = NR_IRQS; 64 - EXPORT_SYMBOL_GPL(nr_irqs); 65 - 66 - #ifdef CONFIG_SPARSE_IRQ 67 - 68 - static struct irq_desc irq_desc_init = { 69 - .irq = -1, 70 - .status = IRQ_DISABLED, 71 - .chip = &no_irq_chip, 72 - .handle_irq = handle_bad_irq, 73 - .depth = 1, 74 - .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 75 - }; 76 - 77 - void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) 78 - { 79 - void *ptr; 80 - 81 - ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), 82 - GFP_ATOMIC, node); 83 - 84 - /* 85 - * don't overwite if can not get new one 86 - * init_copy_kstat_irqs() could still use old one 87 - */ 88 - if (ptr) { 89 - printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node); 90 - desc->kstat_irqs = ptr; 91 - } 92 - } 93 - 94 - static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) 95 - { 96 - memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); 97 - 98 - raw_spin_lock_init(&desc->lock); 99 - desc->irq = irq; 100 - #ifdef CONFIG_SMP 101 - desc->node = node; 102 - #endif 103 - lockdep_set_class(&desc->lock, &irq_desc_lock_class); 104 - init_kstat_irqs(desc, node, nr_cpu_ids); 105 - if (!desc->kstat_irqs) { 106 - printk(KERN_ERR "can not alloc kstat_irqs\n"); 107 - BUG_ON(1); 108 - } 109 - if (!alloc_desc_masks(desc, node, false)) { 110 - printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); 111 - BUG_ON(1); 112 - } 113 - init_desc_masks(desc); 114 - arch_init_chip_data(desc, node); 115 - } 116 - 117 - /* 118 - * Protect the sparse_irqs: 119 - */ 120 - DEFINE_RAW_SPINLOCK(sparse_irq_lock); 121 - 122 - static RADIX_TREE(irq_desc_tree, GFP_ATOMIC); 123 - 124 - static void set_irq_desc(unsigned int irq, struct irq_desc *desc) 125 - { 126 - radix_tree_insert(&irq_desc_tree, irq, desc); 127 - } 128 - 129 - struct irq_desc *irq_to_desc(unsigned int irq) 130 - { 131 - return radix_tree_lookup(&irq_desc_tree, irq); 132 - } 133 - 134 - void replace_irq_desc(unsigned int irq, struct irq_desc *desc) 135 - { 136 - void **ptr; 137 - 138 - ptr = radix_tree_lookup_slot(&irq_desc_tree, irq); 139 - if (ptr) 140 - radix_tree_replace_slot(ptr, desc); 141 - } 142 - 143 - static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { 144 - [0 ... NR_IRQS_LEGACY-1] = { 145 - .irq = -1, 146 - .status = IRQ_DISABLED, 147 - .chip = &no_irq_chip, 148 - .handle_irq = handle_bad_irq, 149 - .depth = 1, 150 - .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 151 - } 152 - }; 153 - 154 - static unsigned int *kstat_irqs_legacy; 155 - 156 - int __init early_irq_init(void) 157 - { 158 - struct irq_desc *desc; 159 - int legacy_count; 160 - int node; 161 - int i; 162 - 163 - init_irq_default_affinity(); 164 - 165 - /* initialize nr_irqs based on nr_cpu_ids */ 166 - arch_probe_nr_irqs(); 167 - printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); 168 - 169 - desc = irq_desc_legacy; 170 - legacy_count = ARRAY_SIZE(irq_desc_legacy); 171 - node = first_online_node; 172 - 173 - /* allocate based on nr_cpu_ids */ 174 - kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * 175 - sizeof(int), GFP_NOWAIT, node); 176 - 177 - for (i = 0; i < legacy_count; i++) { 178 - desc[i].irq = i; 179 - #ifdef CONFIG_SMP 180 - desc[i].node = node; 181 - #endif 182 - desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; 183 - lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 184 - alloc_desc_masks(&desc[i], node, true); 185 - init_desc_masks(&desc[i]); 186 - set_irq_desc(i, &desc[i]); 187 - } 188 - 189 - return arch_early_irq_init(); 190 - } 191 - 192 - struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) 193 - { 194 - struct irq_desc *desc; 195 - unsigned long flags; 196 - 197 - if (irq >= nr_irqs) { 198 - WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", 199 - irq, nr_irqs); 200 - return NULL; 201 - } 202 - 203 - desc = irq_to_desc(irq); 204 - if (desc) 205 - return desc; 206 - 207 - raw_spin_lock_irqsave(&sparse_irq_lock, flags); 208 - 209 - /* We have to check it to avoid races with another CPU */ 210 - desc = irq_to_desc(irq); 211 - if (desc) 212 - goto out_unlock; 213 - 214 - desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); 215 - 216 - printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); 217 - if (!desc) { 218 - printk(KERN_ERR "can not alloc irq_desc\n"); 219 - BUG_ON(1); 220 - } 221 - init_one_irq_desc(irq, desc, node); 222 - 223 - set_irq_desc(irq, desc); 224 - 225 - out_unlock: 226 - raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); 227 - 228 - return desc; 229 - } 230 - 231 - #else /* !CONFIG_SPARSE_IRQ */ 232 - 233 - struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 234 - [0 ... NR_IRQS-1] = { 235 - .status = IRQ_DISABLED, 236 - .chip = &no_irq_chip, 237 - .handle_irq = handle_bad_irq, 238 - .depth = 1, 239 - .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), 240 - } 241 - }; 242 - 243 - static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; 244 - int __init early_irq_init(void) 245 - { 246 - struct irq_desc *desc; 247 - int count; 248 - int i; 249 - 250 - init_irq_default_affinity(); 251 - 252 - printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); 253 - 254 - desc = irq_desc; 255 - count = ARRAY_SIZE(irq_desc); 256 - 257 - for (i = 0; i < count; i++) { 258 - desc[i].irq = i; 259 - alloc_desc_masks(&desc[i], 0, true); 260 - init_desc_masks(&desc[i]); 261 - desc[i].kstat_irqs = kstat_irqs_all[i]; 262 - } 263 - return arch_early_irq_init(); 264 - } 265 - 266 - struct irq_desc *irq_to_desc(unsigned int irq) 267 - { 268 - return (irq < NR_IRQS) ? irq_desc + irq : NULL; 269 - } 270 - 271 - struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) 272 - { 273 - return irq_to_desc(irq); 274 - } 275 - #endif /* !CONFIG_SPARSE_IRQ */ 276 - 277 - void clear_kstat_irqs(struct irq_desc *desc) 278 - { 279 - memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); 280 - } 281 - 282 - /* 283 - * What should we do if we get a hw irq event on an illegal vector? 284 - * Each architecture has to answer this themself. 285 - */ 286 - static void ack_bad(unsigned int irq) 287 - { 288 - struct irq_desc *desc = irq_to_desc(irq); 289 - 290 - print_irq_desc(irq, desc); 291 - ack_bad_irq(irq); 292 - } 293 - 294 - /* 295 - * NOP functions 296 - */ 297 - static void noop(unsigned int irq) 298 - { 299 - } 300 - 301 - static unsigned int noop_ret(unsigned int irq) 302 - { 303 - return 0; 304 - } 305 - 306 - /* 307 - * Generic no controller implementation 308 - */ 309 - struct irq_chip no_irq_chip = { 310 - .name = "none", 311 - .startup = noop_ret, 312 - .shutdown = noop, 313 - .enable = noop, 314 - .disable = noop, 315 - .ack = ack_bad, 316 - .end = noop, 317 - }; 318 - 319 - /* 320 - * Generic dummy implementation which can be used for 321 - * real dumb interrupt sources 322 - */ 323 - struct irq_chip dummy_irq_chip = { 324 - .name = "dummy", 325 - .startup = noop_ret, 326 - .shutdown = noop, 327 - .enable = noop, 328 - .disable = noop, 329 - .ack = noop, 330 - .mask = noop, 331 - .unmask = noop, 332 - .end = noop, 333 - }; 334 335 /* 336 * Special, empty irq handler: ··· 150 /* 151 * No locking required for CPU-local interrupts: 152 */ 153 - if (desc->chip->ack) 154 - desc->chip->ack(irq); 155 if (likely(!(desc->status & IRQ_DISABLED))) { 156 action_ret = handle_IRQ_event(irq, desc->action); 157 if (!noirqdebug) 158 note_interrupt(irq, desc, action_ret); 159 } 160 - desc->chip->end(irq); 161 return 1; 162 } 163 164 raw_spin_lock(&desc->lock); 165 - if (desc->chip->ack) 166 - desc->chip->ack(irq); 167 /* 168 * REPLAY is when Linux resends an IRQ that was dropped earlier 169 * WAITING is used by probe to mark irqs that are being tested ··· 223 * The ->end() handler has to deal with interrupts which got 224 * disabled while the handler was running. 225 */ 226 - desc->chip->end(irq); 227 raw_spin_unlock(&desc->lock); 228 229 return 1; 230 } 231 #endif 232 - 233 - void early_init_irq_lock_class(void) 234 - { 235 - struct irq_desc *desc; 236 - int i; 237 - 238 - for_each_irq_desc(i, desc) { 239 - lockdep_set_class(&desc->lock, &irq_desc_lock_class); 240 - } 241 - } 242 - 243 - unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 244 - { 245 - struct irq_desc *desc = irq_to_desc(irq); 246 - return desc ? desc->kstat_irqs[cpu] : 0; 247 - } 248 - EXPORT_SYMBOL(kstat_irqs_cpu); 249 -
··· 11 */ 12 13 #include <linux/irq.h> 14 #include <linux/random.h> 15 + #include <linux/sched.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel_stat.h> 18 + 19 #include <trace/events/irq.h> 20 21 #include "internals.h" 22 23 /** 24 * handle_bad_irq - handle spurious and unhandled irqs ··· 42 kstat_incr_irqs_this_cpu(irq, desc); 43 ack_bad_irq(irq); 44 } 45 46 /* 47 * Special, empty irq handler: ··· 457 /* 458 * No locking required for CPU-local interrupts: 459 */ 460 + if (desc->irq_data.chip->ack) 461 + desc->irq_data.chip->ack(irq); 462 if (likely(!(desc->status & IRQ_DISABLED))) { 463 action_ret = handle_IRQ_event(irq, desc->action); 464 if (!noirqdebug) 465 note_interrupt(irq, desc, action_ret); 466 } 467 + desc->irq_data.chip->end(irq); 468 return 1; 469 } 470 471 raw_spin_lock(&desc->lock); 472 + if (desc->irq_data.chip->ack) 473 + desc->irq_data.chip->ack(irq); 474 /* 475 * REPLAY is when Linux resends an IRQ that was dropped earlier 476 * WAITING is used by probe to mark irqs that are being tested ··· 530 * The ->end() handler has to deal with interrupts which got 531 * disabled while the handler was running. 532 */ 533 + desc->irq_data.chip->end(irq); 534 raw_spin_unlock(&desc->lock); 535 536 return 1; 537 } 538 #endif
+26 -15
kernel/irq/internals.h
··· 1 /* 2 * IRQ subsystem internal functions and variables: 3 */ 4 5 extern int noirqdebug; 6 7 /* Set default functions for irq_chip structures: */ 8 extern void irq_chip_set_defaults(struct irq_chip *chip); ··· 18 extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); 19 extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); 20 21 - extern struct lock_class_key irq_desc_lock_class; 22 extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); 23 - extern void clear_kstat_irqs(struct irq_desc *desc); 24 - extern raw_spinlock_t sparse_irq_lock; 25 26 - #ifdef CONFIG_SPARSE_IRQ 27 - void replace_irq_desc(unsigned int irq, struct irq_desc *desc); 28 - #endif 29 30 #ifdef CONFIG_PROC_FS 31 extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); 32 extern void register_handler_proc(unsigned int irq, struct irqaction *action); 33 extern void unregister_handler_proc(unsigned int irq, struct irqaction *action); 34 #else 35 static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { } 36 static inline void register_handler_proc(unsigned int irq, 37 struct irqaction *action) { } 38 static inline void unregister_handler_proc(unsigned int irq, ··· 41 42 extern void irq_set_thread_affinity(struct irq_desc *desc); 43 44 - /* Inline functions for support of irq chips on slow busses */ 45 - static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc) 46 { 47 - if (unlikely(desc->chip->bus_lock)) 48 - desc->chip->bus_lock(irq); 49 } 50 51 - static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc) 52 { 53 - if (unlikely(desc->chip->bus_sync_unlock)) 54 - desc->chip->bus_sync_unlock(irq); 55 } 56 57 /* ··· 78 irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); 79 printk("->handle_irq(): %p, ", desc->handle_irq); 80 print_symbol("%s\n", (unsigned long)desc->handle_irq); 81 - printk("->chip(): %p, ", desc->chip); 82 - print_symbol("%s\n", (unsigned long)desc->chip); 83 printk("->action(): %p\n", desc->action); 84 if (desc->action) { 85 printk("->action->handler(): %p, ", desc->action->handler);
··· 1 /* 2 * IRQ subsystem internal functions and variables: 3 */ 4 + #include <linux/irqdesc.h> 5 6 extern int noirqdebug; 7 + 8 + #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) 9 10 /* Set default functions for irq_chip structures: */ 11 extern void irq_chip_set_defaults(struct irq_chip *chip); ··· 15 extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); 16 extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); 17 18 extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); 19 20 + /* Resending of interrupts :*/ 21 + void check_irq_resend(struct irq_desc *desc, unsigned int irq); 22 23 #ifdef CONFIG_PROC_FS 24 extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); 25 + extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc); 26 extern void register_handler_proc(unsigned int irq, struct irqaction *action); 27 extern void unregister_handler_proc(unsigned int irq, struct irqaction *action); 28 #else 29 static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { } 30 + static inline void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { } 31 static inline void register_handler_proc(unsigned int irq, 32 struct irqaction *action) { } 33 static inline void unregister_handler_proc(unsigned int irq, ··· 40 41 extern void irq_set_thread_affinity(struct irq_desc *desc); 42 43 + #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 44 + static inline void irq_end(unsigned int irq, struct irq_desc *desc) 45 { 46 + if (desc->irq_data.chip && desc->irq_data.chip->end) 47 + desc->irq_data.chip->end(irq); 48 + } 49 + #else 50 + static inline void irq_end(unsigned int irq, struct irq_desc *desc) { } 51 + #endif 52 + 53 + /* Inline functions for support of irq chips on slow busses */ 54 + static inline void chip_bus_lock(struct irq_desc *desc) 55 + { 56 + if (unlikely(desc->irq_data.chip->irq_bus_lock)) 57 + desc->irq_data.chip->irq_bus_lock(&desc->irq_data); 58 } 59 60 + static inline void chip_bus_sync_unlock(struct irq_desc *desc) 61 { 62 + if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock)) 63 + desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); 64 } 65 66 /* ··· 67 irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); 68 printk("->handle_irq(): %p, ", desc->handle_irq); 69 print_symbol("%s\n", (unsigned long)desc->handle_irq); 70 + printk("->irq_data.chip(): %p, ", desc->irq_data.chip); 71 + print_symbol("%s\n", (unsigned long)desc->irq_data.chip); 72 printk("->action(): %p\n", desc->action); 73 if (desc->action) { 74 printk("->action->handler(): %p, ", desc->action->handler);
+395
kernel/irq/irqdesc.c
···
··· 1 + /* 2 + * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 3 + * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 4 + * 5 + * This file contains the interrupt descriptor management code 6 + * 7 + * Detailed information is available in Documentation/DocBook/genericirq 8 + * 9 + */ 10 + #include <linux/irq.h> 11 + #include <linux/slab.h> 12 + #include <linux/module.h> 13 + #include <linux/interrupt.h> 14 + #include <linux/kernel_stat.h> 15 + #include <linux/radix-tree.h> 16 + #include <linux/bitmap.h> 17 + 18 + #include "internals.h" 19 + 20 + /* 21 + * lockdep: we want to handle all irq_desc locks as a single lock-class: 22 + */ 23 + static struct lock_class_key irq_desc_lock_class; 24 + 25 + #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) 26 + static void __init init_irq_default_affinity(void) 27 + { 28 + alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); 29 + cpumask_setall(irq_default_affinity); 30 + } 31 + #else 32 + static void __init init_irq_default_affinity(void) 33 + { 34 + } 35 + #endif 36 + 37 + #ifdef CONFIG_SMP 38 + static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) 39 + { 40 + if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) 41 + return -ENOMEM; 42 + 43 + #ifdef CONFIG_GENERIC_PENDING_IRQ 44 + if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { 45 + free_cpumask_var(desc->irq_data.affinity); 46 + return -ENOMEM; 47 + } 48 + #endif 49 + return 0; 50 + } 51 + 52 + static void desc_smp_init(struct irq_desc *desc, int node) 53 + { 54 + desc->irq_data.node = node; 55 + cpumask_copy(desc->irq_data.affinity, irq_default_affinity); 56 + #ifdef CONFIG_GENERIC_PENDING_IRQ 57 + cpumask_clear(desc->pending_mask); 58 + #endif 59 + } 60 + 61 + static inline int desc_node(struct irq_desc *desc) 62 + { 63 + return desc->irq_data.node; 64 + } 65 + 66 + #else 67 + static inline int 68 + alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } 69 + static inline void desc_smp_init(struct irq_desc *desc, int node) { } 70 + static inline int desc_node(struct irq_desc *desc) { return 0; } 71 + #endif 72 + 73 + static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) 74 + { 75 + desc->irq_data.irq = irq; 76 + desc->irq_data.chip = &no_irq_chip; 77 + desc->irq_data.chip_data = NULL; 78 + desc->irq_data.handler_data = NULL; 79 + desc->irq_data.msi_desc = NULL; 80 + desc->status = IRQ_DEFAULT_INIT_FLAGS; 81 + desc->handle_irq = handle_bad_irq; 82 + desc->depth = 1; 83 + desc->irq_count = 0; 84 + desc->irqs_unhandled = 0; 85 + desc->name = NULL; 86 + memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); 87 + desc_smp_init(desc, node); 88 + } 89 + 90 + int nr_irqs = NR_IRQS; 91 + EXPORT_SYMBOL_GPL(nr_irqs); 92 + 93 + static DEFINE_MUTEX(sparse_irq_lock); 94 + static DECLARE_BITMAP(allocated_irqs, NR_IRQS); 95 + 96 + #ifdef CONFIG_SPARSE_IRQ 97 + 98 + static RADIX_TREE(irq_desc_tree, GFP_KERNEL); 99 + 100 + static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) 101 + { 102 + radix_tree_insert(&irq_desc_tree, irq, desc); 103 + } 104 + 105 + struct irq_desc *irq_to_desc(unsigned int irq) 106 + { 107 + return radix_tree_lookup(&irq_desc_tree, irq); 108 + } 109 + 110 + static void delete_irq_desc(unsigned int irq) 111 + { 112 + radix_tree_delete(&irq_desc_tree, irq); 113 + } 114 + 115 + #ifdef CONFIG_SMP 116 + static void free_masks(struct irq_desc *desc) 117 + { 118 + #ifdef CONFIG_GENERIC_PENDING_IRQ 119 + free_cpumask_var(desc->pending_mask); 120 + #endif 121 + free_cpumask_var(desc->irq_data.affinity); 122 + } 123 + #else 124 + static inline void free_masks(struct irq_desc *desc) { } 125 + #endif 126 + 127 + static struct irq_desc *alloc_desc(int irq, int node) 128 + { 129 + struct irq_desc *desc; 130 + gfp_t gfp = GFP_KERNEL; 131 + 132 + desc = kzalloc_node(sizeof(*desc), gfp, node); 133 + if (!desc) 134 + return NULL; 135 + /* allocate based on nr_cpu_ids */ 136 + desc->kstat_irqs = kzalloc_node(nr_cpu_ids * sizeof(*desc->kstat_irqs), 137 + gfp, node); 138 + if (!desc->kstat_irqs) 139 + goto err_desc; 140 + 141 + if (alloc_masks(desc, gfp, node)) 142 + goto err_kstat; 143 + 144 + raw_spin_lock_init(&desc->lock); 145 + lockdep_set_class(&desc->lock, &irq_desc_lock_class); 146 + 147 + desc_set_defaults(irq, desc, node); 148 + 149 + return desc; 150 + 151 + err_kstat: 152 + kfree(desc->kstat_irqs); 153 + err_desc: 154 + kfree(desc); 155 + return NULL; 156 + } 157 + 158 + static void free_desc(unsigned int irq) 159 + { 160 + struct irq_desc *desc = irq_to_desc(irq); 161 + 162 + unregister_irq_proc(irq, desc); 163 + 164 + mutex_lock(&sparse_irq_lock); 165 + delete_irq_desc(irq); 166 + mutex_unlock(&sparse_irq_lock); 167 + 168 + free_masks(desc); 169 + kfree(desc->kstat_irqs); 170 + kfree(desc); 171 + } 172 + 173 + static int alloc_descs(unsigned int start, unsigned int cnt, int node) 174 + { 175 + struct irq_desc *desc; 176 + int i; 177 + 178 + for (i = 0; i < cnt; i++) { 179 + desc = alloc_desc(start + i, node); 180 + if (!desc) 181 + goto err; 182 + mutex_lock(&sparse_irq_lock); 183 + irq_insert_desc(start + i, desc); 184 + mutex_unlock(&sparse_irq_lock); 185 + } 186 + return start; 187 + 188 + err: 189 + for (i--; i >= 0; i--) 190 + free_desc(start + i); 191 + 192 + mutex_lock(&sparse_irq_lock); 193 + bitmap_clear(allocated_irqs, start, cnt); 194 + mutex_unlock(&sparse_irq_lock); 195 + return -ENOMEM; 196 + } 197 + 198 + struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) 199 + { 200 + int res = irq_alloc_descs(irq, irq, 1, node); 201 + 202 + if (res == -EEXIST || res == irq) 203 + return irq_to_desc(irq); 204 + return NULL; 205 + } 206 + 207 + int __init early_irq_init(void) 208 + { 209 + int i, initcnt, node = first_online_node; 210 + struct irq_desc *desc; 211 + 212 + init_irq_default_affinity(); 213 + 214 + /* Let arch update nr_irqs and return the nr of preallocated irqs */ 215 + initcnt = arch_probe_nr_irqs(); 216 + printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); 217 + 218 + for (i = 0; i < initcnt; i++) { 219 + desc = alloc_desc(i, node); 220 + set_bit(i, allocated_irqs); 221 + irq_insert_desc(i, desc); 222 + } 223 + return arch_early_irq_init(); 224 + } 225 + 226 + #else /* !CONFIG_SPARSE_IRQ */ 227 + 228 + struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 229 + [0 ... NR_IRQS-1] = { 230 + .status = IRQ_DEFAULT_INIT_FLAGS, 231 + .handle_irq = handle_bad_irq, 232 + .depth = 1, 233 + .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), 234 + } 235 + }; 236 + 237 + static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; 238 + int __init early_irq_init(void) 239 + { 240 + int count, i, node = first_online_node; 241 + struct irq_desc *desc; 242 + 243 + init_irq_default_affinity(); 244 + 245 + printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); 246 + 247 + desc = irq_desc; 248 + count = ARRAY_SIZE(irq_desc); 249 + 250 + for (i = 0; i < count; i++) { 251 + desc[i].irq_data.irq = i; 252 + desc[i].irq_data.chip = &no_irq_chip; 253 + desc[i].kstat_irqs = kstat_irqs_all[i]; 254 + alloc_masks(desc + i, GFP_KERNEL, node); 255 + desc_smp_init(desc + i, node); 256 + lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 257 + } 258 + return arch_early_irq_init(); 259 + } 260 + 261 + struct irq_desc *irq_to_desc(unsigned int irq) 262 + { 263 + return (irq < NR_IRQS) ? irq_desc + irq : NULL; 264 + } 265 + 266 + struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) 267 + { 268 + return irq_to_desc(irq); 269 + } 270 + 271 + static void free_desc(unsigned int irq) 272 + { 273 + dynamic_irq_cleanup(irq); 274 + } 275 + 276 + static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) 277 + { 278 + return start; 279 + } 280 + #endif /* !CONFIG_SPARSE_IRQ */ 281 + 282 + /* Dynamic interrupt handling */ 283 + 284 + /** 285 + * irq_free_descs - free irq descriptors 286 + * @from: Start of descriptor range 287 + * @cnt: Number of consecutive irqs to free 288 + */ 289 + void irq_free_descs(unsigned int from, unsigned int cnt) 290 + { 291 + int i; 292 + 293 + if (from >= nr_irqs || (from + cnt) > nr_irqs) 294 + return; 295 + 296 + for (i = 0; i < cnt; i++) 297 + free_desc(from + i); 298 + 299 + mutex_lock(&sparse_irq_lock); 300 + bitmap_clear(allocated_irqs, from, cnt); 301 + mutex_unlock(&sparse_irq_lock); 302 + } 303 + 304 + /** 305 + * irq_alloc_descs - allocate and initialize a range of irq descriptors 306 + * @irq: Allocate for specific irq number if irq >= 0 307 + * @from: Start the search from this irq number 308 + * @cnt: Number of consecutive irqs to allocate. 309 + * @node: Preferred node on which the irq descriptor should be allocated 310 + * 311 + * Returns the first irq number or error code 312 + */ 313 + int __ref 314 + irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) 315 + { 316 + int start, ret; 317 + 318 + if (!cnt) 319 + return -EINVAL; 320 + 321 + mutex_lock(&sparse_irq_lock); 322 + 323 + start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); 324 + ret = -EEXIST; 325 + if (irq >=0 && start != irq) 326 + goto err; 327 + 328 + ret = -ENOMEM; 329 + if (start >= nr_irqs) 330 + goto err; 331 + 332 + bitmap_set(allocated_irqs, start, cnt); 333 + mutex_unlock(&sparse_irq_lock); 334 + return alloc_descs(start, cnt, node); 335 + 336 + err: 337 + mutex_unlock(&sparse_irq_lock); 338 + return ret; 339 + } 340 + 341 + /** 342 + * irq_reserve_irqs - mark irqs allocated 343 + * @from: mark from irq number 344 + * @cnt: number of irqs to mark 345 + * 346 + * Returns 0 on success or an appropriate error code 347 + */ 348 + int irq_reserve_irqs(unsigned int from, unsigned int cnt) 349 + { 350 + unsigned int start; 351 + int ret = 0; 352 + 353 + if (!cnt || (from + cnt) > nr_irqs) 354 + return -EINVAL; 355 + 356 + mutex_lock(&sparse_irq_lock); 357 + start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); 358 + if (start == from) 359 + bitmap_set(allocated_irqs, start, cnt); 360 + else 361 + ret = -EEXIST; 362 + mutex_unlock(&sparse_irq_lock); 363 + return ret; 364 + } 365 + 366 + /** 367 + * irq_get_next_irq - get next allocated irq number 368 + * @offset: where to start the search 369 + * 370 + * Returns next irq number after offset or nr_irqs if none is found. 371 + */ 372 + unsigned int irq_get_next_irq(unsigned int offset) 373 + { 374 + return find_next_bit(allocated_irqs, nr_irqs, offset); 375 + } 376 + 377 + /** 378 + * dynamic_irq_cleanup - cleanup a dynamically allocated irq 379 + * @irq: irq number to initialize 380 + */ 381 + void dynamic_irq_cleanup(unsigned int irq) 382 + { 383 + struct irq_desc *desc = irq_to_desc(irq); 384 + unsigned long flags; 385 + 386 + raw_spin_lock_irqsave(&desc->lock, flags); 387 + desc_set_defaults(irq, desc, desc_node(desc)); 388 + raw_spin_unlock_irqrestore(&desc->lock, flags); 389 + } 390 + 391 + unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 392 + { 393 + struct irq_desc *desc = irq_to_desc(irq); 394 + return desc ? desc->kstat_irqs[cpu] : 0; 395 + }
+44 -43
kernel/irq/manage.c
··· 73 { 74 struct irq_desc *desc = irq_to_desc(irq); 75 76 - if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || 77 - !desc->chip->set_affinity) 78 return 0; 79 80 return 1; ··· 109 int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) 110 { 111 struct irq_desc *desc = irq_to_desc(irq); 112 unsigned long flags; 113 114 - if (!desc->chip->set_affinity) 115 return -EINVAL; 116 117 raw_spin_lock_irqsave(&desc->lock, flags); 118 119 #ifdef CONFIG_GENERIC_PENDING_IRQ 120 if (desc->status & IRQ_MOVE_PCNTXT) { 121 - if (!desc->chip->set_affinity(irq, cpumask)) { 122 - cpumask_copy(desc->affinity, cpumask); 123 irq_set_thread_affinity(desc); 124 } 125 } ··· 129 cpumask_copy(desc->pending_mask, cpumask); 130 } 131 #else 132 - if (!desc->chip->set_affinity(irq, cpumask)) { 133 - cpumask_copy(desc->affinity, cpumask); 134 irq_set_thread_affinity(desc); 135 } 136 #endif ··· 169 * one of the targets is online. 170 */ 171 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { 172 - if (cpumask_any_and(desc->affinity, cpu_online_mask) 173 < nr_cpu_ids) 174 goto set_affinity; 175 else 176 desc->status &= ~IRQ_AFFINITY_SET; 177 } 178 179 - cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); 180 set_affinity: 181 - desc->chip->set_affinity(irq, desc->affinity); 182 183 return 0; 184 } ··· 224 225 if (!desc->depth++) { 226 desc->status |= IRQ_DISABLED; 227 - desc->chip->disable(irq); 228 } 229 } 230 ··· 247 if (!desc) 248 return; 249 250 - chip_bus_lock(irq, desc); 251 raw_spin_lock_irqsave(&desc->lock, flags); 252 __disable_irq(desc, irq, false); 253 raw_spin_unlock_irqrestore(&desc->lock, flags); 254 - chip_bus_sync_unlock(irq, desc); 255 } 256 EXPORT_SYMBOL(disable_irq_nosync); 257 ··· 314 * IRQ line is re-enabled. 315 * 316 * This function may be called from IRQ context only when 317 - * desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! 318 */ 319 void enable_irq(unsigned int irq) 320 { ··· 324 if (!desc) 325 return; 326 327 - chip_bus_lock(irq, desc); 328 raw_spin_lock_irqsave(&desc->lock, flags); 329 __enable_irq(desc, irq, false); 330 raw_spin_unlock_irqrestore(&desc->lock, flags); 331 - chip_bus_sync_unlock(irq, desc); 332 } 333 EXPORT_SYMBOL(enable_irq); 334 ··· 337 struct irq_desc *desc = irq_to_desc(irq); 338 int ret = -ENXIO; 339 340 - if (desc->chip->set_wake) 341 - ret = desc->chip->set_wake(irq, on); 342 343 return ret; 344 } ··· 430 } 431 432 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 433 - unsigned long flags) 434 { 435 int ret; 436 - struct irq_chip *chip = desc->chip; 437 438 - if (!chip || !chip->set_type) { 439 /* 440 * IRQF_TRIGGER_* but the PIC does not support multiple 441 * flow-types? ··· 446 } 447 448 /* caller masked out all except trigger mode flags */ 449 - ret = chip->set_type(irq, flags); 450 451 if (ret) 452 - pr_err("setting trigger mode %d for irq %u failed (%pF)\n", 453 - (int)flags, irq, chip->set_type); 454 else { 455 if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 456 flags |= IRQ_LEVEL; ··· 458 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); 459 desc->status |= flags; 460 461 - if (chip != desc->chip) 462 - irq_chip_set_defaults(desc->chip); 463 } 464 465 return ret; ··· 508 static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) 509 { 510 again: 511 - chip_bus_lock(irq, desc); 512 raw_spin_lock_irq(&desc->lock); 513 514 /* ··· 522 */ 523 if (unlikely(desc->status & IRQ_INPROGRESS)) { 524 raw_spin_unlock_irq(&desc->lock); 525 - chip_bus_sync_unlock(irq, desc); 526 cpu_relax(); 527 goto again; 528 } 529 530 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { 531 desc->status &= ~IRQ_MASKED; 532 - desc->chip->unmask(irq); 533 } 534 raw_spin_unlock_irq(&desc->lock); 535 - chip_bus_sync_unlock(irq, desc); 536 } 537 538 #ifdef CONFIG_SMP ··· 557 } 558 559 raw_spin_lock_irq(&desc->lock); 560 - cpumask_copy(mask, desc->affinity); 561 raw_spin_unlock_irq(&desc->lock); 562 563 set_cpus_allowed_ptr(current, mask); ··· 658 if (!desc) 659 return -EINVAL; 660 661 - if (desc->chip == &no_irq_chip) 662 return -ENOSYS; 663 /* 664 * Some drivers like serial.c use request_irq() heavily, ··· 753 } 754 755 if (!shared) { 756 - irq_chip_set_defaults(desc->chip); 757 758 init_waitqueue_head(&desc->wait_for_threads); 759 ··· 780 if (!(desc->status & IRQ_NOAUTOEN)) { 781 desc->depth = 0; 782 desc->status &= ~IRQ_DISABLED; 783 - desc->chip->startup(irq); 784 } else 785 /* Undo nested disables: */ 786 desc->depth = 1; ··· 913 914 /* Currently used only by UML, might disappear one day: */ 915 #ifdef CONFIG_IRQ_RELEASE_METHOD 916 - if (desc->chip->release) 917 - desc->chip->release(irq, dev_id); 918 #endif 919 920 /* If this was the last handler, shut down the IRQ line: */ 921 if (!desc->action) { 922 desc->status |= IRQ_DISABLED; 923 - if (desc->chip->shutdown) 924 - desc->chip->shutdown(irq); 925 else 926 - desc->chip->disable(irq); 927 } 928 929 #ifdef CONFIG_SMP ··· 998 if (!desc) 999 return; 1000 1001 - chip_bus_lock(irq, desc); 1002 kfree(__free_irq(irq, dev_id)); 1003 - chip_bus_sync_unlock(irq, desc); 1004 } 1005 EXPORT_SYMBOL(free_irq); 1006 ··· 1087 action->name = devname; 1088 action->dev_id = dev_id; 1089 1090 - chip_bus_lock(irq, desc); 1091 retval = __setup_irq(irq, desc, action); 1092 - chip_bus_sync_unlock(irq, desc); 1093 1094 if (retval) 1095 kfree(action);
··· 73 { 74 struct irq_desc *desc = irq_to_desc(irq); 75 76 + if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip || 77 + !desc->irq_data.chip->irq_set_affinity) 78 return 0; 79 80 return 1; ··· 109 int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) 110 { 111 struct irq_desc *desc = irq_to_desc(irq); 112 + struct irq_chip *chip = desc->irq_data.chip; 113 unsigned long flags; 114 115 + if (!chip->irq_set_affinity) 116 return -EINVAL; 117 118 raw_spin_lock_irqsave(&desc->lock, flags); 119 120 #ifdef CONFIG_GENERIC_PENDING_IRQ 121 if (desc->status & IRQ_MOVE_PCNTXT) { 122 + if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { 123 + cpumask_copy(desc->irq_data.affinity, cpumask); 124 irq_set_thread_affinity(desc); 125 } 126 } ··· 128 cpumask_copy(desc->pending_mask, cpumask); 129 } 130 #else 131 + if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { 132 + cpumask_copy(desc->irq_data.affinity, cpumask); 133 irq_set_thread_affinity(desc); 134 } 135 #endif ··· 168 * one of the targets is online. 169 */ 170 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { 171 + if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask) 172 < nr_cpu_ids) 173 goto set_affinity; 174 else 175 desc->status &= ~IRQ_AFFINITY_SET; 176 } 177 178 + cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity); 179 set_affinity: 180 + desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false); 181 182 return 0; 183 } ··· 223 224 if (!desc->depth++) { 225 desc->status |= IRQ_DISABLED; 226 + desc->irq_data.chip->irq_disable(&desc->irq_data); 227 } 228 } 229 ··· 246 if (!desc) 247 return; 248 249 + chip_bus_lock(desc); 250 raw_spin_lock_irqsave(&desc->lock, flags); 251 __disable_irq(desc, irq, false); 252 raw_spin_unlock_irqrestore(&desc->lock, flags); 253 + chip_bus_sync_unlock(desc); 254 } 255 EXPORT_SYMBOL(disable_irq_nosync); 256 ··· 313 * IRQ line is re-enabled. 314 * 315 * This function may be called from IRQ context only when 316 + * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! 317 */ 318 void enable_irq(unsigned int irq) 319 { ··· 323 if (!desc) 324 return; 325 326 + chip_bus_lock(desc); 327 raw_spin_lock_irqsave(&desc->lock, flags); 328 __enable_irq(desc, irq, false); 329 raw_spin_unlock_irqrestore(&desc->lock, flags); 330 + chip_bus_sync_unlock(desc); 331 } 332 EXPORT_SYMBOL(enable_irq); 333 ··· 336 struct irq_desc *desc = irq_to_desc(irq); 337 int ret = -ENXIO; 338 339 + if (desc->irq_data.chip->irq_set_wake) 340 + ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); 341 342 return ret; 343 } ··· 429 } 430 431 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 432 + unsigned long flags) 433 { 434 int ret; 435 + struct irq_chip *chip = desc->irq_data.chip; 436 437 + if (!chip || !chip->irq_set_type) { 438 /* 439 * IRQF_TRIGGER_* but the PIC does not support multiple 440 * flow-types? ··· 445 } 446 447 /* caller masked out all except trigger mode flags */ 448 + ret = chip->irq_set_type(&desc->irq_data, flags); 449 450 if (ret) 451 + pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", 452 + flags, irq, chip->irq_set_type); 453 else { 454 if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 455 flags |= IRQ_LEVEL; ··· 457 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); 458 desc->status |= flags; 459 460 + if (chip != desc->irq_data.chip) 461 + irq_chip_set_defaults(desc->irq_data.chip); 462 } 463 464 return ret; ··· 507 static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) 508 { 509 again: 510 + chip_bus_lock(desc); 511 raw_spin_lock_irq(&desc->lock); 512 513 /* ··· 521 */ 522 if (unlikely(desc->status & IRQ_INPROGRESS)) { 523 raw_spin_unlock_irq(&desc->lock); 524 + chip_bus_sync_unlock(desc); 525 cpu_relax(); 526 goto again; 527 } 528 529 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { 530 desc->status &= ~IRQ_MASKED; 531 + desc->irq_data.chip->irq_unmask(&desc->irq_data); 532 } 533 raw_spin_unlock_irq(&desc->lock); 534 + chip_bus_sync_unlock(desc); 535 } 536 537 #ifdef CONFIG_SMP ··· 556 } 557 558 raw_spin_lock_irq(&desc->lock); 559 + cpumask_copy(mask, desc->irq_data.affinity); 560 raw_spin_unlock_irq(&desc->lock); 561 562 set_cpus_allowed_ptr(current, mask); ··· 657 if (!desc) 658 return -EINVAL; 659 660 + if (desc->irq_data.chip == &no_irq_chip) 661 return -ENOSYS; 662 /* 663 * Some drivers like serial.c use request_irq() heavily, ··· 752 } 753 754 if (!shared) { 755 + irq_chip_set_defaults(desc->irq_data.chip); 756 757 init_waitqueue_head(&desc->wait_for_threads); 758 ··· 779 if (!(desc->status & IRQ_NOAUTOEN)) { 780 desc->depth = 0; 781 desc->status &= ~IRQ_DISABLED; 782 + desc->irq_data.chip->irq_startup(&desc->irq_data); 783 } else 784 /* Undo nested disables: */ 785 desc->depth = 1; ··· 912 913 /* Currently used only by UML, might disappear one day: */ 914 #ifdef CONFIG_IRQ_RELEASE_METHOD 915 + if (desc->irq_data.chip->release) 916 + desc->irq_data.chip->release(irq, dev_id); 917 #endif 918 919 /* If this was the last handler, shut down the IRQ line: */ 920 if (!desc->action) { 921 desc->status |= IRQ_DISABLED; 922 + if (desc->irq_data.chip->irq_shutdown) 923 + desc->irq_data.chip->irq_shutdown(&desc->irq_data); 924 else 925 + desc->irq_data.chip->irq_disable(&desc->irq_data); 926 } 927 928 #ifdef CONFIG_SMP ··· 997 if (!desc) 998 return; 999 1000 + chip_bus_lock(desc); 1001 kfree(__free_irq(irq, dev_id)); 1002 + chip_bus_sync_unlock(desc); 1003 } 1004 EXPORT_SYMBOL(free_irq); 1005 ··· 1086 action->name = devname; 1087 action->dev_id = dev_id; 1088 1089 + chip_bus_lock(desc); 1090 retval = __setup_irq(irq, desc, action); 1091 + chip_bus_sync_unlock(desc); 1092 1093 if (retval) 1094 kfree(action);
+7 -5
kernel/irq/migration.c
··· 7 void move_masked_irq(int irq) 8 { 9 struct irq_desc *desc = irq_to_desc(irq); 10 11 if (likely(!(desc->status & IRQ_MOVE_PENDING))) 12 return; ··· 25 if (unlikely(cpumask_empty(desc->pending_mask))) 26 return; 27 28 - if (!desc->chip->set_affinity) 29 return; 30 31 assert_raw_spin_locked(&desc->lock); ··· 44 */ 45 if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) 46 < nr_cpu_ids)) 47 - if (!desc->chip->set_affinity(irq, desc->pending_mask)) { 48 - cpumask_copy(desc->affinity, desc->pending_mask); 49 irq_set_thread_affinity(desc); 50 } 51 ··· 63 if (unlikely(desc->status & IRQ_DISABLED)) 64 return; 65 66 - desc->chip->mask(irq); 67 move_masked_irq(irq); 68 - desc->chip->unmask(irq); 69 } 70
··· 7 void move_masked_irq(int irq) 8 { 9 struct irq_desc *desc = irq_to_desc(irq); 10 + struct irq_chip *chip = desc->irq_data.chip; 11 12 if (likely(!(desc->status & IRQ_MOVE_PENDING))) 13 return; ··· 24 if (unlikely(cpumask_empty(desc->pending_mask))) 25 return; 26 27 + if (!chip->irq_set_affinity) 28 return; 29 30 assert_raw_spin_locked(&desc->lock); ··· 43 */ 44 if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) 45 < nr_cpu_ids)) 46 + if (!chip->irq_set_affinity(&desc->irq_data, 47 + desc->pending_mask, false)) { 48 + cpumask_copy(desc->irq_data.affinity, desc->pending_mask); 49 irq_set_thread_affinity(desc); 50 } 51 ··· 61 if (unlikely(desc->status & IRQ_DISABLED)) 62 return; 63 64 + desc->irq_data.chip->irq_mask(&desc->irq_data); 65 move_masked_irq(irq); 66 + desc->irq_data.chip->irq_unmask(&desc->irq_data); 67 } 68
-120
kernel/irq/numa_migrate.c
··· 1 - /* 2 - * NUMA irq-desc migration code 3 - * 4 - * Migrate IRQ data structures (irq_desc, chip_data, etc.) over to 5 - * the new "home node" of the IRQ. 6 - */ 7 - 8 - #include <linux/irq.h> 9 - #include <linux/slab.h> 10 - #include <linux/module.h> 11 - #include <linux/random.h> 12 - #include <linux/interrupt.h> 13 - #include <linux/kernel_stat.h> 14 - 15 - #include "internals.h" 16 - 17 - static void init_copy_kstat_irqs(struct irq_desc *old_desc, 18 - struct irq_desc *desc, 19 - int node, int nr) 20 - { 21 - init_kstat_irqs(desc, node, nr); 22 - 23 - if (desc->kstat_irqs != old_desc->kstat_irqs) 24 - memcpy(desc->kstat_irqs, old_desc->kstat_irqs, 25 - nr * sizeof(*desc->kstat_irqs)); 26 - } 27 - 28 - static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) 29 - { 30 - if (old_desc->kstat_irqs == desc->kstat_irqs) 31 - return; 32 - 33 - kfree(old_desc->kstat_irqs); 34 - old_desc->kstat_irqs = NULL; 35 - } 36 - 37 - static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, 38 - struct irq_desc *desc, int node) 39 - { 40 - memcpy(desc, old_desc, sizeof(struct irq_desc)); 41 - if (!alloc_desc_masks(desc, node, false)) { 42 - printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " 43 - "for migration.\n", irq); 44 - return false; 45 - } 46 - raw_spin_lock_init(&desc->lock); 47 - desc->node = node; 48 - lockdep_set_class(&desc->lock, &irq_desc_lock_class); 49 - init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids); 50 - init_copy_desc_masks(old_desc, desc); 51 - arch_init_copy_chip_data(old_desc, desc, node); 52 - return true; 53 - } 54 - 55 - static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) 56 - { 57 - free_kstat_irqs(old_desc, desc); 58 - free_desc_masks(old_desc, desc); 59 - arch_free_chip_data(old_desc, desc); 60 - } 61 - 62 - static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, 63 - int node) 64 - { 65 - struct irq_desc *desc; 66 - unsigned int irq; 67 - unsigned long flags; 68 - 69 - irq = old_desc->irq; 70 - 71 - raw_spin_lock_irqsave(&sparse_irq_lock, flags); 72 - 73 - /* We have to check it to avoid races with another CPU */ 74 - desc = irq_to_desc(irq); 75 - 76 - if (desc && old_desc != desc) 77 - goto out_unlock; 78 - 79 - desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); 80 - if (!desc) { 81 - printk(KERN_ERR "irq %d: can not get new irq_desc " 82 - "for migration.\n", irq); 83 - /* still use old one */ 84 - desc = old_desc; 85 - goto out_unlock; 86 - } 87 - if (!init_copy_one_irq_desc(irq, old_desc, desc, node)) { 88 - /* still use old one */ 89 - kfree(desc); 90 - desc = old_desc; 91 - goto out_unlock; 92 - } 93 - 94 - replace_irq_desc(irq, desc); 95 - raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); 96 - 97 - /* free the old one */ 98 - free_one_irq_desc(old_desc, desc); 99 - kfree(old_desc); 100 - 101 - return desc; 102 - 103 - out_unlock: 104 - raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); 105 - 106 - return desc; 107 - } 108 - 109 - struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) 110 - { 111 - /* those static or target node is -1, do not move them */ 112 - if (desc->irq < NR_IRQS_LEGACY || node == -1) 113 - return desc; 114 - 115 - if (desc->node != node) 116 - desc = __real_move_irq_desc(desc, node); 117 - 118 - return desc; 119 - } 120 -
···
+22 -4
kernel/irq/proc.c
··· 21 static int irq_affinity_proc_show(struct seq_file *m, void *v) 22 { 23 struct irq_desc *desc = irq_to_desc((long)m->private); 24 - const struct cpumask *mask = desc->affinity; 25 26 #ifdef CONFIG_GENERIC_PENDING_IRQ 27 if (desc->status & IRQ_MOVE_PENDING) ··· 65 cpumask_var_t new_value; 66 int err; 67 68 - if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity || 69 irq_balancing_disabled(irq)) 70 return -EIO; 71 ··· 185 { 186 struct irq_desc *desc = irq_to_desc((long) m->private); 187 188 - seq_printf(m, "%d\n", desc->node); 189 return 0; 190 } 191 ··· 269 { 270 char name [MAX_NAMELEN]; 271 272 - if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir) 273 return; 274 275 memset(name, 0, MAX_NAMELEN); ··· 295 296 proc_create_data("spurious", 0444, desc->dir, 297 &irq_spurious_proc_fops, (void *)(long)irq); 298 } 299 300 #undef MAX_NAMELEN
··· 21 static int irq_affinity_proc_show(struct seq_file *m, void *v) 22 { 23 struct irq_desc *desc = irq_to_desc((long)m->private); 24 + const struct cpumask *mask = desc->irq_data.affinity; 25 26 #ifdef CONFIG_GENERIC_PENDING_IRQ 27 if (desc->status & IRQ_MOVE_PENDING) ··· 65 cpumask_var_t new_value; 66 int err; 67 68 + if (!irq_to_desc(irq)->irq_data.chip->irq_set_affinity || no_irq_affinity || 69 irq_balancing_disabled(irq)) 70 return -EIO; 71 ··· 185 { 186 struct irq_desc *desc = irq_to_desc((long) m->private); 187 188 + seq_printf(m, "%d\n", desc->irq_data.node); 189 return 0; 190 } 191 ··· 269 { 270 char name [MAX_NAMELEN]; 271 272 + if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) 273 return; 274 275 memset(name, 0, MAX_NAMELEN); ··· 295 296 proc_create_data("spurious", 0444, desc->dir, 297 &irq_spurious_proc_fops, (void *)(long)irq); 298 + } 299 + 300 + void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) 301 + { 302 + char name [MAX_NAMELEN]; 303 + 304 + if (!root_irq_dir || !desc->dir) 305 + return; 306 + #ifdef CONFIG_SMP 307 + remove_proc_entry("smp_affinity", desc->dir); 308 + remove_proc_entry("affinity_hint", desc->dir); 309 + remove_proc_entry("node", desc->dir); 310 + #endif 311 + remove_proc_entry("spurious", desc->dir); 312 + 313 + memset(name, 0, MAX_NAMELEN); 314 + sprintf(name, "%u", irq); 315 + remove_proc_entry(name, root_irq_dir); 316 } 317 318 #undef MAX_NAMELEN
+3 -2
kernel/irq/resend.c
··· 60 /* 61 * Make sure the interrupt is enabled, before resending it: 62 */ 63 - desc->chip->enable(irq); 64 65 /* 66 * We do not resend level type interrupts. Level type ··· 70 if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { 71 desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; 72 73 - if (!desc->chip->retrigger || !desc->chip->retrigger(irq)) { 74 #ifdef CONFIG_HARDIRQS_SW_RESEND 75 /* Set it pending and activate the softirq: */ 76 set_bit(irq, irqs_resend);
··· 60 /* 61 * Make sure the interrupt is enabled, before resending it: 62 */ 63 + desc->irq_data.chip->irq_enable(&desc->irq_data); 64 65 /* 66 * We do not resend level type interrupts. Level type ··· 70 if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { 71 desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; 72 73 + if (!desc->irq_data.chip->irq_retrigger || 74 + !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { 75 #ifdef CONFIG_HARDIRQS_SW_RESEND 76 /* Set it pending and activate the softirq: */ 77 set_bit(irq, irqs_resend);
+5 -3
kernel/irq/spurious.c
··· 14 #include <linux/moduleparam.h> 15 #include <linux/timer.h> 16 17 static int irqfixup __read_mostly; 18 19 #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) ··· 80 * If we did actual work for the real IRQ line we must let the 81 * IRQ controller clean up too 82 */ 83 - if (work && desc->chip && desc->chip->end) 84 - desc->chip->end(irq); 85 raw_spin_unlock(&desc->lock); 86 87 return ok; ··· 256 printk(KERN_EMERG "Disabling IRQ #%d\n", irq); 257 desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; 258 desc->depth++; 259 - desc->chip->disable(irq); 260 261 mod_timer(&poll_spurious_irq_timer, 262 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
··· 14 #include <linux/moduleparam.h> 15 #include <linux/timer.h> 16 17 + #include "internals.h" 18 + 19 static int irqfixup __read_mostly; 20 21 #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) ··· 78 * If we did actual work for the real IRQ line we must let the 79 * IRQ controller clean up too 80 */ 81 + if (work) 82 + irq_end(irq, desc); 83 raw_spin_unlock(&desc->lock); 84 85 return ok; ··· 254 printk(KERN_EMERG "Disabling IRQ #%d\n", irq); 255 desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; 256 desc->depth++; 257 + desc->irq_data.chip->irq_disable(&desc->irq_data); 258 259 mod_timer(&poll_spurious_irq_timer, 260 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
+3 -6
kernel/softirq.c
··· 910 return 0; 911 } 912 913 int __init __weak arch_probe_nr_irqs(void) 914 { 915 - return 0; 916 } 917 918 int __init __weak arch_early_irq_init(void) 919 { 920 return 0; 921 } 922 - 923 - int __weak arch_init_chip_data(struct irq_desc *desc, int node) 924 - { 925 - return 0; 926 - }
··· 910 return 0; 911 } 912 913 + #ifdef CONFIG_GENERIC_HARDIRQS 914 int __init __weak arch_probe_nr_irqs(void) 915 { 916 + return NR_IRQS_LEGACY; 917 } 918 919 int __init __weak arch_early_irq_init(void) 920 { 921 return 0; 922 } 923 + #endif